diff --git a/_distro_map.yml b/_distro_map.yml index eef18bd20374..f2c39698bb8c 100644 --- a/_distro_map.yml +++ b/_distro_map.yml @@ -1,305 +1,11 @@ --- -openshift-origin: - name: OKD - author: OKD Documentation Project - site: community - site_name: Documentation - site_url: https://docs.okd.io/ - branches: - main: - name: 4 - dir: latest - enterprise-4.6: - name: '4.6' - dir: '4.6' - enterprise-4.7: - name: '4.7' - dir: '4.7' - enterprise-4.8: - name: '4.8' - dir: '4.8' - enterprise-4.9: - name: '4.9' - dir: '4.9' - enterprise-4.10: - name: '4.10' - dir: '4.10' - enterprise-4.11: - name: '4.11' - dir: '4.11' - enterprise-4.12: - name: '4.12' - dir: '4.12' - enterprise-4.13: - name: '4.13' - dir: '4.13' - enterprise-3.6: - name: '3.6' - dir: '3.6' - enterprise-3.7: - name: '3.7' - dir: '3.7' - enterprise-3.9: - name: '3.9' - dir: '3.9' - enterprise-3.10: - name: '3.10' - dir: '3.10' - enterprise-3.11: - name: '3.11' - dir: '3.11' -openshift-online: - name: OpenShift Online - author: OpenShift Documentation Project - site: commercial - site_name: Documentation - site_url: https://docs.openshift.com/ - branches: - enterprise-3.11: - name: 'Pro' - dir: online/pro -openshift-enterprise: - name: OpenShift Container Platform - author: OpenShift Documentation Project - site: commercial - site_name: Documentation - site_url: https://docs.openshift.com/ - branches: - enterprise-3.0: - name: '3.0' - dir: enterprise/3.0 - distro-overrides: - name: OpenShift Enterprise - enterprise-3.1: - name: '3.1' - dir: enterprise/3.1 - distro-overrides: - name: OpenShift Enterprise - enterprise-3.2: - name: '3.2' - dir: enterprise/3.2 - distro-overrides: - name: OpenShift Enterprise - enterprise-3.3: - name: '3.3' - dir: container-platform/3.3 - enterprise-3.4: - name: '3.4' - dir: container-platform/3.4 - enterprise-3.5: - name: '3.5' - dir: container-platform/3.5 - enterprise-3.6: - name: '3.6' - dir: container-platform/3.6 - enterprise-3.7: - name: '3.7' - dir: container-platform/3.7 - enterprise-3.9: - name: '3.9' - dir: container-platform/3.9 - enterprise-3.10: - name: '3.10' - dir: container-platform/3.10 - enterprise-3.11: - name: '3.11' - dir: container-platform/3.11 - enterprise-4.1: - name: '4.1' - dir: container-platform/4.1 - enterprise-4.2: - name: '4.2' - dir: container-platform/4.2 - enterprise-4.3: - name: '4.3' - dir: container-platform/4.3 - enterprise-4.4: - name: '4.4' - dir: container-platform/4.4 - enterprise-4.5: - name: '4.5' - dir: container-platform/4.5 - enterprise-4.6: - name: '4.6' - dir: container-platform/4.6 - enterprise-4.7: - name: '4.7' - dir: container-platform/4.7 - enterprise-4.8: - name: '4.8' - dir: container-platform/4.8 - enterprise-4.9: - name: '4.9' - dir: container-platform/4.9 - enterprise-4.10: - name: '4.10' - dir: container-platform/4.10 - enterprise-4.11: - name: '4.11' - dir: container-platform/4.11 - enterprise-4.12: - name: '4.12' - dir: container-platform/4.12 - enterprise-4.13: - name: '4.13' - dir: container-platform/4.13 - enterprise-4.14: - name: '4.14' - dir: container-platform/4.14 -openshift-dedicated: - name: OpenShift Dedicated - author: OpenShift Documentation Project - site: commercial - site_name: Documentation - site_url: https://docs.openshift.com/ - branches: - enterprise-3.11: - name: '3' - dir: dedicated/3 - enterprise-4.13: - name: '' - dir: dedicated/ -openshift-aro: - name: Azure Red Hat OpenShift - author: OpenShift Documentation Project - site: commercial - site_name: Documentation - site_url: https://docs.openshift.com/ - branches: - enterprise-3.11: - name: '3' - dir: aro/3 - enterprise-4.3: - name: '4' - dir: aro/4 -openshift-rosa: - name: Red Hat OpenShift Service on AWS - author: OpenShift Documentation Project - site: commercial - site_name: Documentation - site_url: https://docs.openshift.com/ - branches: - enterprise-4.13: - name: '' - dir: rosa/ - rosa-preview: - name: '' - dir: rosa-preview/ -openshift-rosa-portal: - name: Red Hat OpenShift Service on AWS - author: OpenShift Documentation Project - site: commercial - site_name: Documentation - site_url: https://docs.openshift.com/ - branches: - enterprise-4.13: - name: '' - dir: rosa-portal/ -partner-roks: - name: Red Hat OpenShift on IBM Cloud - author: OpenShift Documentation Project - site: commercial - site_name: Documentation - site_url: https://docs.openshift.com/ - branches: - enterprise-4.3: - name: '4' - dir: roks/4 -openshift-webscale: - name: OpenShift Container Platform - author: OpenShift Documentation Project - site: commercial - site_name: Documentation - site_url: https://docs.openshift.com/ - branches: - enterprise-4.4: - name: '4.4' - dir: container-platform-ocp/4.4 - enterprise-4.5: - name: '4.5' - dir: container-platform-ocp/4.5 - enterprise-4.7: - name: '4.7' - dir: container-platform-ocp/4.7 - enterprise-4.8: - name: '4.8' - dir: container-platform-ocp/4.8 -openshift-dpu: - name: OpenShift Container Platform - author: OpenShift Documentation Project - site: commercial - site_name: Documentation - site_url: https://docs.openshift.com/ - branches: - enterprise-4.10: - name: '4.10' - dir: container-platform-dpu/4.10 -openshift-acs: - name: Red Hat Advanced Cluster Security for Kubernetes +openshift-gitops: + name: Red Hat OpenShift GitOps author: OpenShift documentation team site: commercial site_name: Documentation site_url: https://docs.openshift.com/ branches: - rhacs-docs-3.65: - name: '3.65' - dir: acs/3.65 - rhacs-docs-3.66: - name: '3.66' - dir: acs/3.66 - rhacs-docs-3.67: - name: '3.67' - dir: acs/3.67 - rhacs-docs-3.68: - name: '3.68' - dir: acs/3.68 - rhacs-docs-3.69: - name: '3.69' - dir: acs/3.69 - rhacs-docs-3.70: - name: '3.70' - dir: acs/3.70 - rhacs-docs-3.71: - name: '3.71' - dir: acs/3.71 - rhacs-docs-3.72: - name: '3.72' - dir: acs/3.72 - rhacs-docs-3.73: - name: '3.73' - dir: acs/3.73 - rhacs-docs-3.74: - name: '3.74' - dir: acs/3.74 - rhacs-docs-4.0: - name: '4.0' - dir: acs/4.0 - rhacs-docs-4.1: - name: '4.1' - dir: acs/4.1 -microshift: - name: MicroShift - author: OpenShift Documentation Project - site: commercial - site_name: Documentation - site_url: https://docs.openshift.com/ - branches: - enterprise-4.12: - name: '4.12' - dir: microshift/4.12 - enterprise-4.13: - name: '4.13' - dir: microshift/4.13 -openshift-serverless: - name: Red Hat OpenShift Serverless - author: OpenShift documentation team - site: commercial - site_name: Documentation - site_url: https://docs.openshift.com/ - branches: - serverless-docs-1.28: - name: '1.28' - dir: serverless/1.28 - serverless-docs-1.29: - name: '1.29' - dir: serverless/1.29 + gitops-docs: + name: '' + dir: gitops diff --git a/_topic_maps/_topic_map.yml b/_topic_maps/_topic_map.yml index 32d779227cc5..d3d70e687d68 100644 --- a/_topic_maps/_topic_map.yml +++ b/_topic_maps/_topic_map.yml @@ -1,3911 +1,41 @@ -# This configuration file dictates the organization of the topic groups and -# topics on the main page of the doc site for this branch. Each record -# consists of the following: -# -# --- <= Record delimiter -# Name: Origin of the Species <= Display name of topic group -# Dir: origin_of_the_species <= Directory name of topic group -# Topics: -# - Name: The Majestic Marmoset <= Topic name -# File: the_majestic_marmoset <= Topic file under group dir +/- -# - Name: The Curious Crocodile <= Topic 2 name -# File: the_curious_crocodile <= Topic 2 file -# - Name: The Numerous Nematodes <= Sub-topic group name -# Dir: the_numerous_nematodes <= Sub-topic group dir -# Topics: -# - Name: The Wily Worm <= Sub-topic name -# File: the_wily_worm <= Sub-topic file under / -# - Name: The Acrobatic Ascarid <= Sub-topic 2 name -# File: the_acrobatic_ascarid <= Sub-topic 2 file under / -# -# The ordering of the records in this document determines the ordering of the -# topic groups and topics on the main page. - --- -Name: About -Dir: welcome -Distros: openshift-enterprise,openshift-webscale,openshift-origin,openshift-online,openshift-dpu -Topics: -- Name: Welcome - File: index -- Name: Learn more about OpenShift Container Platform - File: learn_more_about_openshift - Distros: openshift-enterprise -- Name: About OpenShift Kubernetes Engine - File: oke_about - Distros: openshift-enterprise -- Name: Legal notice - File: legal-notice - Distros: openshift-enterprise,openshift-online ---- -Name: What's new? -Dir: whats_new -Distros: openshift-origin -Topics: -- Name: New features and enhancements - File: new-features -- Name: Deprecated features - File: deprecated-features ---- -Name: Release notes -Dir: release_notes -Distros: openshift-enterprise -Topics: -- Name: OpenShift Container Platform 4.14 release notes - File: ocp-4-14-release-notes ---- - Name: Getting started - Dir: getting_started - Distros: openshift-enterprise - Topics: - - Name: Kubernetes overview - File: kubernetes-overview - - Name: OpenShift Container Platform overview - File: openshift-overview - - Name: Web console walkthrough - File: openshift-web-console - - Name: Command-line walkthrough - File: openshift-cli ---- -Name: Architecture -Dir: architecture -Distros: openshift-enterprise,openshift-origin,openshift-online -Topics: -- Name: Architecture overview - File: index -- Name: Product architecture - File: architecture -- Name: Installation and update - Distros: openshift-enterprise,openshift-origin - File: architecture-installation -- Name: Red Hat OpenShift Cluster Manager - Distros: openshift-enterprise - File: ocm-overview-ocp -- Name: About multicluster engine for Kubernetes operator - Distros: openshift-enterprise - File: mce-overview-ocp -- Name: Control plane architecture - File: control-plane - Distros: openshift-enterprise,openshift-origin,openshift-online -- Name: Understanding OpenShift development - File: understanding-development - Distros: openshift-enterprise -- Name: Understanding OKD development - File: understanding-development - Distros: openshift-origin -- Name: Fedora CoreOS - File: architecture-rhcos - Distros: openshift-origin -- Name: Red Hat Enterprise Linux CoreOS - File: architecture-rhcos - Distros: openshift-enterprise -- Name: Admission plugins - File: admission-plug-ins - Distros: openshift-enterprise,openshift-aro ---- -Name: Installing -Dir: installing -Distros: openshift-origin,openshift-enterprise,openshift-webscale -Topics: -- Name: Installation overview - File: index - Distros: openshift-origin,openshift-enterprise -- Name: Selecting an installation method and preparing a cluster - File: installing-preparing - Distros: openshift-origin,openshift-enterprise -- Name: Cluster capabilities - File: cluster-capabilities - Distros: openshift-origin,openshift-enterprise -- Name: Disconnected installation mirroring - Dir: disconnected_install - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: About disconnected installation mirroring - File: index - - Name: Creating a mirror registry with mirror registry for Red Hat OpenShift - File: installing-mirroring-creating-registry - - Name: Mirroring images for a disconnected installation - File: installing-mirroring-installation-images - - Name: Mirroring images for a disconnected installation using the oc-mirror plugin - File: installing-mirroring-disconnected -- Name: Installing on Alibaba - Dir: installing_alibaba - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Preparing to install on Alibaba Cloud - File: preparing-to-install-on-alibaba - - Name: Creating the required Alibaba Cloud resources - File: manually-creating-alibaba-ram - - Name: Installing a cluster quickly on Alibaba Cloud - File: installing-alibaba-default - - Name: Installing a cluster on Alibaba Cloud with customizations - File: installing-alibaba-customizations - - Name: Installing a cluster on Alibaba Cloud with network customizations - File: installing-alibaba-network-customizations - - Name: Installing a cluster on Alibaba Cloud into an existing VPC - File: installing-alibaba-vpc - - Name: Uninstalling a cluster on Alibaba Cloud - File: uninstall-cluster-alibaba -- Name: Installing on AWS - Dir: installing_aws - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Preparing to install on AWS - File: preparing-to-install-on-aws - - Name: Configuring an AWS account - File: installing-aws-account - - Name: Manually creating IAM - File: manually-creating-iam - - Name: Installing a cluster quickly on AWS - File: installing-aws-default - - Name: Installing a cluster on AWS with customizations - File: installing-aws-customizations - - Name: Installing a cluster on AWS with network customizations - File: installing-aws-network-customizations - - Name: Installing a cluster on AWS in a restricted network - File: installing-restricted-networks-aws-installer-provisioned - - Name: Installing a cluster on AWS into an existing VPC - File: installing-aws-vpc - - Name: Installing a private cluster on AWS - File: installing-aws-private - - Name: Installing a cluster on AWS into a government region - File: installing-aws-government-region - - Name: Installing a cluster on AWS into a Secret or Top Secret Region - File: installing-aws-secret-region - - Name: Installing a cluster on AWS into a China region - File: installing-aws-china - - Name: Installing a cluster on AWS using CloudFormation templates - File: installing-aws-user-infra - - Name: Installing a cluster using AWS Local Zones - File: installing-aws-localzone - - Name: Installing a cluster on AWS in a restricted network with user-provisioned infrastructure - File: installing-restricted-networks-aws - - Name: Installing a cluster on AWS with remote workers on AWS Outposts - File: installing-aws-outposts-remote-workers - - Name: Installing a three-node cluster on AWS - File: installing-aws-three-node - - Name: Expanding a cluster with on-premise bare metal nodes - File: installing-aws-expanding-a-cluster-with-on-premise-bare-metal-nodes - - Name: Uninstalling a cluster on AWS - File: uninstalling-cluster-aws -- Name: Installing on Azure - Dir: installing_azure - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Preparing to install on Azure - File: preparing-to-install-on-azure - - Name: Configuring an Azure account - File: installing-azure-account - - Name: Manually creating IAM - File: manually-creating-iam-azure - - Name: Enabling user-managed encryption on Azure - File: enabling-user-managed-encryption-azure - - Name: Installing a cluster quickly on Azure - File: installing-azure-default - - Name: Installing a cluster on Azure with customizations - File: installing-azure-customizations - - Name: Installing a cluster on Azure with network customizations - File: installing-azure-network-customizations - - Name: Installing a cluster on Azure into an existing VNet - File: installing-azure-vnet - - Name: Installing a private cluster on Azure - File: installing-azure-private - - Name: Installing a cluster on Azure into a government region - File: installing-azure-government-region - - Name: Installing a cluster on Azure using ARM templates - File: installing-azure-user-infra - - Name: Installing a three-node cluster on Azure - File: installing-azure-three-node - - Name: Uninstalling a cluster on Azure - File: uninstalling-cluster-azure -- Name: Installing on Azure Stack Hub - Dir: installing_azure_stack_hub - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Preparing to install on Azure Stack Hub - File: preparing-to-install-on-azure-stack-hub - - Name: Configuring an Azure Stack Hub account - File: installing-azure-stack-hub-account - - Name: Installing a cluster on Azure Stack Hub with an installer-provisioned infrastructure - File: installing-azure-stack-hub-default - - Name: Installing a cluster on Azure Stack Hub with network customizations - File: installing-azure-stack-hub-network-customizations - - Name: Installing a cluster on Azure Stack Hub using ARM templates - File: installing-azure-stack-hub-user-infra - - Name: Uninstalling a cluster on Azure Stack Hub - File: uninstalling-cluster-azure-stack-hub -- Name: Installing on GCP - Dir: installing_gcp - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Preparing to install on GCP - File: preparing-to-install-on-gcp - - Name: Configuring a GCP project - File: installing-gcp-account - - Name: Manually creating IAM - File: manually-creating-iam-gcp - - Name: Installing a cluster quickly on GCP - File: installing-gcp-default - - Name: Installing a cluster on GCP with customizations - File: installing-gcp-customizations - - Name: Installing a cluster on GCP with network customizations - File: installing-gcp-network-customizations - - Name: Installing a cluster on GCP in a restricted network - File: installing-restricted-networks-gcp-installer-provisioned - - Name: Installing a cluster on GCP into an existing VPC - File: installing-gcp-vpc - - Name: Installing a cluster on GCP into a shared VPC - File: installing-gcp-shared-vpc - - Name: Installing a private cluster on GCP - File: installing-gcp-private - - Name: Installing a cluster on GCP using Deployment Manager templates - File: installing-gcp-user-infra - - Name: Installing a cluster into a shared VPC on GCP using Deployment Manager templates - File: installing-gcp-user-infra-vpc - - Name: Installing a cluster on GCP in a restricted network with user-provisioned infrastructure - File: installing-restricted-networks-gcp - - Name: Installing a three-node cluster on GCP - File: installing-gcp-three-node - - Name: Uninstalling a cluster on GCP - File: uninstalling-cluster-gcp -- Name: Installing on IBM Cloud VPC - Dir: installing_ibm_cloud_public - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Preparing to install on IBM Cloud VPC - File: preparing-to-install-on-ibm-cloud - - Name: Configuring an IBM Cloud account - File: installing-ibm-cloud-account - - Name: Configuring IAM for IBM Cloud VPC - File: configuring-iam-ibm-cloud - - Name: Installing a cluster on IBM Cloud VPC with customizations - File: installing-ibm-cloud-customizations - - Name: Installing a cluster on IBM Cloud VPC with network customizations - File: installing-ibm-cloud-network-customizations - - Name: Installing a cluster on IBM Cloud VPC into an existing VPC - File: installing-ibm-cloud-vpc - - Name: Installing a private cluster on IBM Cloud VPC - File: installing-ibm-cloud-private - - Name: Uninstalling a cluster on IBM Cloud VPC - File: uninstalling-cluster-ibm-cloud -- Name: Installing on Nutanix - Dir: installing_nutanix - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Preparing to install on Nutanix - File: preparing-to-install-on-nutanix - - Name: Installing a cluster on Nutanix - File: installing-nutanix-installer-provisioned - - Name: Installing a cluster on Nutanix in a restricted network - File: installing-restricted-networks-nutanix-installer-provisioned - - Name: Installing a three-node cluster on Nutanix - File: installing-nutanix-three-node - - Name: Uninstalling a cluster on Nutanix - File: uninstalling-cluster-nutanix -- Name: Installing on bare metal - Dir: installing_bare_metal - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Preparing to install on bare metal - File: preparing-to-install-on-bare-metal - - Name: Installing a user-provisioned cluster on bare metal - File: installing-bare-metal - - Name: Installing a user-provisioned bare metal cluster with network customizations - File: installing-bare-metal-network-customizations - - Name: Installing a user-provisioned bare metal cluster on a restricted network - File: installing-restricted-networks-bare-metal - - Name: Scaling a user-provisioned installation with the bare metal operator - File: scaling-a-user-provisioned-cluster-with-the-bare-metal-operator -- Name: Installing on-premise with Assisted Installer - Dir: installing_on_prem_assisted - Distros: openshift-enterprise - Topics: - - Name: Installing an on-premise cluster using the Assisted Installer - File: installing-on-prem-assisted -- Name: Installing an on-premise cluster with the Agent-based Installer - Dir: installing_with_agent_based_installer - Distros: openshift-enterprise - Topics: - - Name: Preparing to install with Agent-based Installer - File: preparing-to-install-with-agent-based-installer - - Name: Understanding disconnected installation mirroring - File: understanding-disconnected-installation-mirroring - - Name: Installing a cluster with Agent-based Installer - File: installing-with-agent-based-installer - - Name: Preparing an Agent-based installed cluster for the multicluster engine for Kubernetes - File: preparing-an-agent-based-installed-cluster-for-mce -- Name: Installing on a single node - Dir: installing_sno - Distros: openshift-enterprise - Topics: - - Name: Preparing to install OpenShift on a single node - File: install-sno-preparing-to-install-sno - - Name: Installing OpenShift on a single node - File: install-sno-installing-sno -- Name: Deploying installer-provisioned clusters on bare metal - Dir: installing_bare_metal_ipi - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Overview - File: ipi-install-overview - - Name: Prerequisites - File: ipi-install-prerequisites - - Name: Setting up the environment for an OpenShift installation - File: ipi-install-installation-workflow - - Name: Post-installation configuration - File: ipi-install-post-installation-configuration - - Name: Expanding the cluster - File: ipi-install-expanding-the-cluster - - Name: Troubleshooting - File: ipi-install-troubleshooting -- Name: Installing bare metal clusters on IBM Cloud - Dir: installing_ibm_cloud - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Prerequisites - File: install-ibm-cloud-prerequisites - - Name: Installation workflow - File: install-ibm-cloud-installation-workflow -- Name: Installing with z/VM on IBM Z and IBM LinuxONE - Dir: installing_ibm_z - Distros: openshift-enterprise - Topics: - - Name: Preparing to install with z/VM on IBM Z and IBM LinuxONE - File: preparing-to-install-on-ibm-z - - Name: Installing a cluster with z/VM on IBM Z and IBM LinuxONE - File: installing-ibm-z - - Name: Restricted network IBM Z installation with z/VM - File: installing-restricted-networks-ibm-z -- Name: Installing with RHEL KVM on IBM Z and IBM LinuxONE - Dir: installing_ibm_z - Distros: openshift-enterprise - Topics: - - Name: Preparing to install with RHEL KVM on IBM Z and IBM LinuxONE - File: preparing-to-install-on-ibm-z-kvm - - Name: Installing a cluster with RHEL KVM on IBM Z and IBM LinuxONE - File: installing-ibm-z-kvm - - Name: Restricted network IBM Z installation with RHEL KVM - File: installing-restricted-networks-ibm-z-kvm -- Name: Installing on IBM Power - Dir: installing_ibm_power - Distros: openshift-enterprise - Topics: - - Name: Preparing to install on IBM Power - File: preparing-to-install-on-ibm-power - - Name: Installing a cluster on IBM Power - File: installing-ibm-power - - Name: Restricted network IBM Power installation - File: installing-restricted-networks-ibm-power -- Name: Installing on IBM Power Virtual Server - Dir: installing_ibm_powervs - Distros: openshift-enterprise - Topics: - - Name: Preparing to install on IBM Power Virtual Server - File: preparing-to-install-on-ibm-power-vs - - Name: Configuring an IBM Cloud account - File: installing-ibm-cloud-account-power-vs - - Name: Creating an IBM Power Virtual Server workspace - File: creating-ibm-power-vs-workspace - - Name: Installing a cluster on IBM Power Virtual Server with customizations - File: installing-ibm-power-vs-customizations - - Name: Installing a cluster on IBM Power Virtual Server into an existing VPC - File: installing-ibm-powervs-vpc - - Name: Installing a private cluster on IBM Power Virtual Server - File: installing-ibm-power-vs-private-cluster - - Name: Installing a cluster on IBM Power Virtual Server in a restricted network - File: installing-restricted-networks-ibm-power-vs - - Name: Uninstalling a cluster on IBM Power Virtual Server - File: uninstalling-cluster-ibm-power-vs -- Name: Installing on OpenStack - Dir: installing_openstack - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Preparing to install on OpenStack - File: preparing-to-install-on-openstack - - Name: Preparing to install a cluster that uses SR-IOV or OVS-DPDK on OpenStack - File: installing-openstack-nfv-preparing -# - Name: Installing a cluster on OpenStack -# File: installing-openstack-installer - - Name: Installing a cluster on OpenStack with customizations - File: installing-openstack-installer-custom - - Name: Installing a cluster on OpenStack with Kuryr - File: installing-openstack-installer-kuryr - - Name: Installing a cluster on OpenStack on your own infrastructure - File: installing-openstack-user - - Name: Installing a cluster on OpenStack with Kuryr on your own infrastructure - File: installing-openstack-user-kuryr - - Name: Installing a cluster on OpenStack in a restricted network - File: installing-openstack-installer-restricted - - Name: OpenStack Cloud Controller Manager reference guide - File: installing-openstack-cloud-config-reference - # - Name: Load balancing deployments on OpenStack - # File: installing-openstack-load-balancing - - Name: Uninstalling a cluster on OpenStack - File: uninstalling-cluster-openstack - - Name: Uninstalling a cluster on OpenStack from your own infrastructure - File: uninstalling-openstack-user -- Name: Installing on RHV - Dir: installing_rhv - Distros: openshift-enterprise - Topics: - - Name: Preparing to install on RHV - File: preparing-to-install-on-rhv - - Name: Installing a cluster quickly on RHV - File: installing-rhv-default - - Name: Installing a cluster on RHV with customizations - File: installing-rhv-customizations - - Name: Installing a cluster on RHV with user-provisioned infrastructure - File: installing-rhv-user-infra - - Name: Installing a cluster on RHV in a restricted network - File: installing-rhv-restricted-network - - Name: Uninstalling a cluster on RHV - File: uninstalling-cluster-rhv -- Name: Installing on oVirt - Dir: installing_rhv - Distros: openshift-origin - Topics: - - Name: Preparing to install on RHV - File: preparing-to-install-on-rhv - - Name: Installing a cluster quickly on oVirt - File: installing-rhv-default - - Name: Installing a cluster on oVirt with customizations - File: installing-rhv-customizations - - Name: Installing a cluster on oVirt with user-provisioned infrastructure - File: installing-rhv-user-infra - - Name: Installing a cluster on RHV in a restricted network - File: installing-rhv-restricted-network - - Name: Uninstalling a cluster on oVirt - File: uninstalling-cluster-rhv -- Name: Installing on vSphere - Dir: installing_vsphere - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Preparing to install on vSphere - File: preparing-to-install-on-vsphere - - Name: Installing a cluster on vSphere - File: installing-vsphere-installer-provisioned - - Name: Installing a cluster on vSphere with customizations - File: installing-vsphere-installer-provisioned-customizations - - Name: Installing a cluster on vSphere with network customizations - File: installing-vsphere-installer-provisioned-network-customizations - - Name: Installing a cluster on vSphere with user-provisioned infrastructure - File: installing-vsphere - - Name: Installing a cluster on vSphere with user-provisioned infrastructure and network customizations - File: installing-vsphere-network-customizations - - Name: Installing a cluster on vSphere in a restricted network - File: installing-restricted-networks-installer-provisioned-vsphere - - Name: Installing a cluster on vSphere in a restricted network with user-provisioned infrastructure - File: installing-restricted-networks-vsphere - - Name: Installing a three-node cluster on vSphere - File: installing-vsphere-three-node - - Name: Configuring the vSphere connection settings after an installation - File: installing-vsphere-post-installation-configuration - - Name: Uninstalling a cluster on vSphere that uses installer-provisioned infrastructure - File: uninstalling-cluster-vsphere-installer-provisioned - - Name: Using the vSphere Problem Detector Operator - File: using-vsphere-problem-detector-operator - - Name: Installation configuration parameters for vSphere - File: installation-config-parameters-vsphere -- Name: Installing on any platform - Dir: installing_platform_agnostic - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Installing a cluster on any platform - File: installing-platform-agnostic -- Name: Installation configuration - Dir: install_config - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Customizing nodes - File: installing-customizing - - Name: Configuring your firewall - File: configuring-firewall - - Name: Enabling Linux control group version 2 (cgroup v2) - File: enabling-cgroup-v2 - Distros: openshift-enterprise -- Name: Validating an installation - File: validating-an-installation - Distros: openshift-origin,openshift-enterprise -- Name: Troubleshooting installation issues - File: installing-troubleshooting - Distros: openshift-origin,openshift-enterprise ---- -Name: Post-installation configuration -Dir: post_installation_configuration -Distros: openshift-origin,openshift-enterprise -Topics: -- Name: Post-installation configuration overview - Distros: openshift-enterprise - File: index -- Name: Configuring a private cluster - Distros: openshift-enterprise,openshift-origin - File: configuring-private-cluster -- Name: Bare metal configuration - File: bare-metal-configuration -- Name: Configuring multi-architecture compute machines on an OpenShift cluster - Distros: openshift-enterprise - File: multi-architecture-configuration -- Name: Enabling encryption on a vSphere cluster - File: vsphere-post-installation-encryption -- Name: Machine configuration tasks - File: machine-configuration-tasks -- Name: Cluster tasks - File: cluster-tasks -- Name: Node tasks - File: node-tasks -- Name: Network configuration - File: network-configuration -- Name: Storage configuration - File: storage-configuration -- Name: Preparing for users - File: preparing-for-users -- Name: Configuring alert notifications - File: configuring-alert-notifications -- Name: Converting a connected cluster to a disconnected cluster - File: connected-to-disconnected -- Name: Enabling cluster capabilities - File: enabling-cluster-capabilities - Distros: openshift-origin,openshift-enterprise -- Name: Configuring additional devices in an IBM Z or IBM LinuxONE environment - File: ibmz-post-install -- Name: Regions and zones for a VMware vCenter - File: post-install-vsphere-zones-regions-configuration -- Name: Red Hat Enterprise Linux CoreOS image layering - File: coreos-layering - Distros: openshift-enterprise -- Name: Fedora CoreOS (FCOS) image layering - File: coreos-layering - Distros: openshift-origin ---- -Name: Updating clusters -Dir: updating -Distros: openshift-origin,openshift-enterprise -Topics: -- Name: Updating clusters overview - File: index -- Name: Understanding OpenShift updates - Dir: understanding_updates - Distros: openshift-enterprise - Topics: - - Name: Introduction to OpenShift updates - File: intro-to-updates - - Name: How cluster updates work - File: how-updates-work -- Name: Understanding update channels and releases - File: understanding-upgrade-channels-release - Distros: openshift-enterprise -- Name: Understanding OpenShift update duration - File: understanding-openshift-update-duration -- Name: Preparing to update to OpenShift Container Platform 4.13 - File: updating-cluster-prepare - Distros: openshift-enterprise -- Name: Preparing to update to OKD 4.13 - File: updating-cluster-prepare - Distros: openshift-origin -- Name: Performing a cluster update - Dir: updating_a_cluster - Topics: - - Name: Updating a cluster using the CLI - File: updating-cluster-cli - - Name: Updating a cluster using the web console - File: updating-cluster-web-console - - Name: Performing an EUS-to-EUS update - File: eus-eus-update - Distros: openshift-enterprise - - Name: Performing a canary rollout update - File: update-using-custom-machine-config-pools - - Name: Updating a cluster that includes RHEL compute machines - File: updating-cluster-rhel-compute - Distros: openshift-enterprise - - Name: Updating a cluster in a disconnected environment - Dir: updating_disconnected_cluster - Topics: - - Name: About cluster updates in a disconnected environment - File: index - - Name: Mirroring OpenShift Container Platform images - File: mirroring-image-repository - - Name: Updating a cluster in a disconnected environment using OSUS - File: disconnected-update-osus - Distros: openshift-enterprise - - Name: Updating a cluster in a disconnected environment without OSUS - File: disconnected-update - Distros: openshift-enterprise - - Name: Updating a cluster in a disconnected environment by using the CLI - File: disconnected-update - Distros: openshift-origin - - Name: Uninstalling OSUS from a cluster - File: uninstalling-osus - Distros: openshift-enterprise - - Name: Updating hardware on nodes running on vSphere - File: updating-hardware-on-nodes-running-on-vsphere - - Name: Migrating to a cluster with multi-architecture compute machines - File: migrating-to-multi-payload - - Name: Updating hosted control planes - File: updating-hosted-control-planes -- Name: Preparing to update a cluster with manually maintained credentials - File: preparing-manual-creds-update -- Name: Preflight validation for Kernel Module Management (KMM) Modules - File: kmm-preflight-validation -# - Name: Troubleshooting an update -# File: updating-troubleshooting ---- -Name: Support -Dir: support -Distros: openshift-enterprise,openshift-online,openshift-origin -Topics: -- Name: Support overview - File: index -- Name: Managing your cluster resources - File: managing-cluster-resources -- Name: Getting support - File: getting-support - Distros: openshift-enterprise -- Name: Remote health monitoring with connected clusters - Dir: remote_health_monitoring - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: About remote health monitoring - File: about-remote-health-monitoring - - Name: Showing data collected by remote health monitoring - File: showing-data-collected-by-remote-health-monitoring - - Name: Opting out of remote health reporting - File: opting-out-of-remote-health-reporting - - Name: Enabling remote health reporting - File: enabling-remote-health-reporting - - Name: Using Insights to identify issues with your cluster - File: using-insights-to-identify-issues-with-your-cluster - - Name: Using Insights Operator - File: using-insights-operator - - Name: Using remote health reporting in a restricted network - File: remote-health-reporting-from-restricted-network - - Name: Importing simple content access entitlements with Insights Operator - File: insights-operator-simple-access -- Name: Gathering data about your cluster - File: gathering-cluster-data - Distros: openshift-enterprise,openshift-origin -- Name: Summarizing cluster specifications - File: summarizing-cluster-specifications - Distros: openshift-enterprise,openshift-origin -- Name: Troubleshooting - Dir: troubleshooting - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: Troubleshooting installations - File: troubleshooting-installations - - Name: Verifying node health - File: verifying-node-health - - Name: Troubleshooting CRI-O container runtime issues - File: troubleshooting-crio-issues - - Name: Troubleshooting operating system issues - File: troubleshooting-operating-system-issues - Distros: openshift-enterprise,openshift-origin - - Name: Troubleshooting network issues - File: troubleshooting-network-issues - Distros: openshift-enterprise,openshift-origin - - Name: Troubleshooting Operator issues - File: troubleshooting-operator-issues - - Name: Investigating pod issues - File: investigating-pod-issues - - Name: Troubleshooting the Source-to-Image process - File: troubleshooting-s2i - - Name: Troubleshooting storage issues - File: troubleshooting-storage-issues - - Name: Troubleshooting Windows container workload issues - File: troubleshooting-windows-container-workload-issues - - Name: Investigating monitoring issues - File: investigating-monitoring-issues - - Name: Diagnosing OpenShift CLI (oc) issues - File: diagnosing-oc-issues ---- -Name: Web console -Dir: web_console -Distros: openshift-enterprise,openshift-origin,openshift-online -Topics: -- Name: Web console overview - File: web-console-overview -- Name: Accessing the web console - File: web-console -- Name: Viewing cluster information - File: using-dashboard-to-get-cluster-information -- Name: Adding user preferences - File: adding-user-preferences - Distros: openshift-enterprise,openshift-origin -- Name: Configuring the web console - File: configuring-web-console - Distros: openshift-enterprise,openshift-origin -- Name: Customizing the web console - File: customizing-the-web-console - Distros: openshift-enterprise,openshift-origin -- Name: Dynamic plugins - Dir: dynamic-plugin - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: Overview of dynamic plugins - File: overview-dynamic-plugin - - Name: Getting started with dynamic plugins - File: dynamic-plugins-get-started - - Name: Deploy your plugin on a cluster - File: deploy-plugin-cluster - - Name: Dynamic plugin example - File: dynamic-plugin-example - - Name: Dynamic plugin reference - File: dynamic-plugins-reference -- Name: Web terminal - Dir: web_terminal - Distros: openshift-enterprise,openshift-online - Topics: - - Name: Installing the web terminal - File: installing-web-terminal - - Name: Configuring the web terminal - File: configuring-web-terminal - - Name: Using the web terminal - File: odc-using-web-terminal - - Name: Troubleshooting the web terminal - File: troubleshooting-web-terminal - - Name: Uninstalling the web terminal - File: uninstalling-web-terminal -- Name: Disabling the web console - File: disabling-web-console - Distros: openshift-enterprise,openshift-origin -- Name: Creating quick start tutorials - File: creating-quick-start-tutorials - Distros: openshift-enterprise,openshift-origin ---- -Name: CLI tools -Dir: cli_reference -Distros: openshift-enterprise,openshift-origin,openshift-online -Topics: -- Name: CLI tools overview - File: index -- Name: OpenShift CLI (oc) - Dir: openshift_cli - Topics: - - Name: Getting started with the OpenShift CLI - File: getting-started-cli - - Name: Configuring the OpenShift CLI - File: configuring-cli - - Name: Usage of oc and kubectl commands - File: usage-oc-kubectl - - Name: Managing CLI profiles - File: managing-cli-profiles - - Name: Extending the OpenShift CLI with plugins - File: extending-cli-plugins - - Name: Managing CLI plugins with Krew - File: managing-cli-plugins-krew - Distros: openshift-enterprise,openshift-origin - - Name: OpenShift CLI developer command reference - File: developer-cli-commands - - Name: OpenShift CLI administrator command reference - File: administrator-cli-commands - Distros: openshift-enterprise,openshift-origin -- Name: Developer CLI (odo) - File: odo-important-update - # Dir: developer_cli_odo - Distros: openshift-enterprise,openshift-origin,openshift-online - # Topics: - # - Name: odo release notes - # File: odo-release-notes - # - Name: Understanding odo - # File: understanding-odo - # - Name: Installing odo - # File: installing-odo - # - Name: Configuring the odo CLI - # File: configuring-the-odo-cli - # - Name: odo CLI reference - # File: odo-cli-reference -- Name: Knative CLI (kn) for use with OpenShift Serverless - File: kn-cli-tools - Distros: openshift-enterprise,openshift-origin -- Name: Pipelines CLI (tkn) - Dir: tkn_cli - Distros: openshift-enterprise - Topics: - - Name: Installing tkn - File: installing-tkn - - Name: Configuring tkn - File: op-configuring-tkn - - Name: Basic tkn commands - File: op-tkn-reference -- Name: opm CLI - Dir: opm - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: Installing the opm CLI - File: cli-opm-install - - Name: opm CLI reference - File: cli-opm-ref -- Name: Operator SDK - Dir: osdk - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: Installing the Operator SDK CLI - File: cli-osdk-install - - Name: Operator SDK CLI reference - File: cli-osdk-ref ---- -Name: Security and compliance -Dir: security -Distros: openshift-enterprise,openshift-origin,openshift-aro -Topics: -- Name: Security and compliance overview - File: index -- Name: Container security - Dir: container_security - Topics: - - Name: Understanding container security - File: security-understanding - - Name: Understanding host and VM security - File: security-hosts-vms - - Name: Hardening Red Hat Enterprise Linux CoreOS - File: security-hardening - Distros: openshift-enterprise,openshift-aro - - Name: Container image signatures - File: security-container-signature - - Name: Hardening Fedora CoreOS - File: security-hardening - Distros: openshift-origin - - Name: Understanding compliance - File: security-compliance - - Name: Securing container content - File: security-container-content - - Name: Using container registries securely - File: security-registries - - Name: Securing the build process - File: security-build - - Name: Deploying containers - File: security-deploy - - Name: Securing the container platform - File: security-platform - - Name: Securing networks - File: security-network - - Name: Securing attached storage - File: security-storage - - Name: Monitoring cluster events and logs - File: security-monitoring -- Name: Configuring certificates - Dir: certificates - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: Replacing the default ingress certificate - File: replacing-default-ingress-certificate - - Name: Adding API server certificates - File: api-server - - Name: Securing service traffic using service serving certificates - File: service-serving-certificate - - Name: Updating the CA bundle - File: updating-ca-bundle -- Name: Certificate types and descriptions - Dir: certificate_types_descriptions - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: User-provided certificates for the API server - File: user-provided-certificates-for-api-server - - Name: Proxy certificates - File: proxy-certificates - - Name: Service CA certificates - File: service-ca-certificates - - Name: Node certificates - File: node-certificates - - Name: Bootstrap certificates - File: bootstrap-certificates - - Name: etcd certificates - File: etcd-certificates - - Name: OLM certificates - File: olm-certificates - - Name: Aggregated API client certificates - File: aggregated-api-client-certificates - - Name: Machine Config Operator certificates - File: machine-config-operator-certificates - - Name: User-provided certificates for default ingress - File: user-provided-certificates-for-default-ingress - - Name: Ingress certificates - File: ingress-certificates - - Name: Monitoring and cluster logging Operator component certificates - File: monitoring-and-cluster-logging-operator-component-certificates - - Name: Control plane certificates - File: control-plane-certificates -- Name: Compliance Operator - Dir: compliance_operator - Topics: - - Name: Compliance Operator release notes - File: compliance-operator-release-notes - - Name: Supported compliance profiles - File: compliance-operator-supported-profiles - - Name: Installing the Compliance Operator - File: compliance-operator-installation - - Name: Updating the Compliance Operator - File: compliance-operator-updating - - Name: Compliance Operator scans - File: compliance-scans - - Name: Understanding the Compliance Operator - File: compliance-operator-understanding - - Name: Managing the Compliance Operator - File: compliance-operator-manage - - Name: Tailoring the Compliance Operator - File: compliance-operator-tailor - - Name: Retrieving Compliance Operator raw results - File: compliance-operator-raw-results - - Name: Managing Compliance Operator remediation - File: compliance-operator-remediation - - Name: Performing advanced Compliance Operator tasks - File: compliance-operator-advanced - - Name: Troubleshooting the Compliance Operator - File: compliance-operator-troubleshooting - - Name: Uninstalling the Compliance Operator - File: compliance-operator-uninstallation - - Name: Using the oc-compliance plugin - File: oc-compliance-plug-in-using - - Name: Understanding the Custom Resource Definitions - File: compliance-operator-crd - -- Name: File Integrity Operator - Dir: file_integrity_operator - Topics: - - Name: File Integrity Operator release notes - File: file-integrity-operator-release-notes - - Name: Installing the File Integrity Operator - File: file-integrity-operator-installation - - Name: Updating the File Integrity Operator - File: file-integrity-operator-updating - - Name: Understanding the File Integrity Operator - File: file-integrity-operator-understanding - - Name: Configuring the File Integrity Operator - File: file-integrity-operator-configuring - - Name: Performing advanced File Integrity Operator tasks - File: file-integrity-operator-advanced-usage - - Name: Troubleshooting the File Integrity Operator - File: file-integrity-operator-troubleshooting -- Name: Security Profiles Operator - Dir: security_profiles_operator - Topics: - - Name: Security Profiles Operator overview - File: spo-overview - - Name: Security Profiles Operator release notes - File: spo-release-notes - - Name: Understanding the Security Profiles Operator - File: spo-understanding - - Name: Enabling the Security Profiles Operator - File: spo-enabling - - Name: Managing seccomp profiles - File: spo-seccomp - - Name: Managing SELinux profiles - File: spo-selinux - - Name: Advanced Security Profiles Operator tasks - File: spo-advanced - - Name: Troubleshooting the Security Profiles Operator - File: spo-troubleshooting - - Name: Uninstalling the Security Profiles Operator - File: spo-uninstalling -- Name: cert-manager Operator for Red Hat OpenShift - Dir: cert_manager_operator - Distros: openshift-enterprise - Topics: - - Name: cert-manager Operator for Red Hat OpenShift overview - File: index - - Name: cert-manager Operator for Red Hat OpenShift release notes - File: cert-manager-operator-release-notes - - Name: Installing the cert-manager Operator for Red Hat OpenShift - File: cert-manager-operator-install - - Name: Managing certificates with an ACME issuer - File: cert-manager-operator-issuer-acme - - Name: Enabling monitoring for the cert-manager Operator for Red Hat OpenShift - File: cert-manager-monitoring - - Name: Configuring the egress proxy for the cert-manager Operator for Red Hat OpenShift - File: cert-manager-operator-proxy - - Name: Customizing cert-manager by using the cert-manager Operator API fields - File: cert-manager-customizing-api-fields - - Name: Authenticating the cert-manager Operator with AWS Security Token Service - File: cert-manager-authenticate-aws - - Name: Configuring log levels for cert-manager and the cert-manager Operator for Red Hat OpenShift - File: cert-manager-log-levels - - Name: Authenticating the cert-manager Operator for Red Hat OpenShift with GCP Workload Identity - File: cert-manager-authenticate-gcp - - Name: Authenticating the cert-manager Operator for Red Hat OpenShift on AWS - File: cert-manager-authentication-non-sts - - Name: Authenticating the cert-manager Operator for Red Hat OpenShift on GCP - File: cert-manager-authenticate-non-sts-gcp - - Name: Uninstalling the cert-manager Operator for Red Hat OpenShift - File: cert-manager-operator-uninstall -- Name: Viewing audit logs - File: audit-log-view -- Name: Configuring the audit log policy - File: audit-log-policy-config -- Name: Configuring TLS security profiles - File: tls-security-profiles -- Name: Configuring seccomp profiles - File: seccomp-profiles -- Name: Allowing JavaScript-based access to the API server from additional hosts - File: allowing-javascript-access-api-server - Distros: openshift-enterprise,openshift-origin -- Name: Encrypting etcd data - File: encrypting-etcd - Distros: openshift-enterprise,openshift-origin -- Name: Scanning pods for vulnerabilities - File: pod-vulnerability-scan - Distros: openshift-enterprise,openshift-origin -- Name: Network-Bound Disk Encryption (NBDE) - Dir: network_bound_disk_encryption - Topics: - - Name: About disk encryption technology - File: nbde-about-disk-encryption-technology - - Name: Tang server installation considerations - File: nbde-tang-server-installation-considerations - - Name: Tang server encryption key management - File: nbde-managing-encryption-keys - - Name: Disaster recovery considerations - File: nbde-disaster-recovery-considerations - Distros: openshift-enterprise,openshift-origin ---- -Name: Authentication and authorization -Dir: authentication -Distros: openshift-enterprise,openshift-origin -Topics: -- Name: Authentication and authorization overview - File: index -- Name: Understanding authentication - File: understanding-authentication - Distros: openshift-enterprise,openshift-origin,openshift-online -- Name: Configuring the internal OAuth server - File: configuring-internal-oauth -- Name: Configuring OAuth clients - File: configuring-oauth-clients -- Name: Managing user-owned OAuth access tokens - File: managing-oauth-access-tokens - Distros: openshift-enterprise,openshift-origin -- Name: Understanding identity provider configuration - File: understanding-identity-provider - Distros: openshift-enterprise,openshift-origin -- Name: Configuring identity providers - Dir: identity_providers - Topics: - - Name: Configuring an htpasswd identity provider - File: configuring-htpasswd-identity-provider - Distros: openshift-enterprise,openshift-origin - - Name: Configuring a Keystone identity provider - File: configuring-keystone-identity-provider - Distros: openshift-enterprise,openshift-origin - - Name: Configuring an LDAP identity provider - File: configuring-ldap-identity-provider - - Name: Configuring a basic authentication identity provider - File: configuring-basic-authentication-identity-provider - Distros: openshift-enterprise,openshift-origin - - Name: Configuring a request header identity provider - File: configuring-request-header-identity-provider - Distros: openshift-enterprise,openshift-origin - - Name: Configuring a GitHub or GitHub Enterprise identity provider - File: configuring-github-identity-provider - - Name: Configuring a GitLab identity provider - File: configuring-gitlab-identity-provider - Distros: openshift-enterprise,openshift-origin - - Name: Configuring a Google identity provider - File: configuring-google-identity-provider - - Name: Configuring an OpenID Connect identity provider - File: configuring-oidc-identity-provider -- Name: Using RBAC to define and apply permissions - File: using-rbac -- Name: Removing the kubeadmin user - File: remove-kubeadmin - Distros: openshift-enterprise,openshift-origin -#- Name: Configuring LDAP failover -# File: configuring-ldap-failover -- Name: Understanding and creating service accounts - File: understanding-and-creating-service-accounts -- Name: Using service accounts in applications - File: using-service-accounts-in-applications -- Name: Using a service account as an OAuth client - File: using-service-accounts-as-oauth-client -- Name: Scoping tokens - File: tokens-scoping -- Name: Using bound service account tokens - File: bound-service-account-tokens -- Name: Managing security context constraints - File: managing-security-context-constraints - Distros: openshift-enterprise,openshift-origin -- Name: Understanding and managing pod security admission - File: understanding-and-managing-pod-security-admission - Distros: openshift-enterprise,openshift-origin -- Name: Impersonating the system:admin user - File: impersonating-system-admin - Distros: openshift-enterprise,openshift-origin -- Name: Syncing LDAP groups - File: ldap-syncing - Distros: openshift-enterprise,openshift-origin -- Name: Managing cloud provider credentials - Dir: managing_cloud_provider_credentials - Topics: - - Name: About the Cloud Credential Operator - File: about-cloud-credential-operator - - Name: Using mint mode - File: cco-mode-mint - - Name: Using passthrough mode - File: cco-mode-passthrough - - Name: Using manual mode - File: cco-mode-manual - - Name: Using manual mode with AWS Security Token Service - File: cco-mode-sts - - Name: Using manual mode with GCP Workload Identity - File: cco-mode-gcp-workload-identity ---- -Name: Networking -Dir: networking -Distros: openshift-enterprise,openshift-origin -Topics: -- Name: About networking - File: about-networking -- Name: Understanding networking - File: understanding-networking -- Name: Accessing hosts - File: accessing-hosts -- Name: Networking Operators overview - File: networking-operators-overview -- Name: Understanding the Cluster Network Operator - File: cluster-network-operator - Distros: openshift-enterprise,openshift-origin -- Name: Understanding the DNS Operator - File: dns-operator - Distros: openshift-enterprise,openshift-origin -- Name: Understanding the Ingress Operator - File: ingress-operator - Distros: openshift-enterprise,openshift-origin -- Name: Ingress sharding - File: ingress-sharding -- Name: Understanding the Ingress Node Firewall Operator - File: ingress-node-firewall-operator - Distros: openshift-enterprise,openshift-origin -- Name: Configuring the Ingress Controller for manual DNS management - File: ingress-controller-dnsmgt - Distros: openshift-enterprise,openshift-origin -- Name: Configuring the Ingress Controller endpoint publishing strategy - File: nw-ingress-controller-endpoint-publishing-strategies - Distros: openshift-enterprise,openshift-origin -- Name: Verifying connectivity to an endpoint - File: verifying-connectivity-endpoint -- Name: Changing the cluster network MTU - File: changing-cluster-network-mtu -- Name: Configuring the node port service range - File: configuring-node-port-service-range -- Name: Configuring the cluster network IP address range - File: configuring-cluster-network-range -- Name: Configuring IP failover - File: configuring-ipfailover -- Name: Configuring interface-level network sysctls - File: setting-interface-level-network-sysctls -- Name: Using SCTP - File: using-sctp - Distros: openshift-enterprise,openshift-origin -- Name: Using PTP hardware - File: using-ptp -- Name: Developing PTP events consumer applications - File: ptp-cloud-events-consumer-dev-reference -- Name: External DNS Operator - Dir: external_dns_operator - Topics: - - Name: Understanding the External DNS Operator - File: understanding-external-dns-operator - - Name: Installing the External DNS Operator - File: nw-installing-external-dns-operator-on-cloud-providers - - Name: External DNS Operator configuration parameters - File: nw-configuration-parameters - - Name: Creating DNS records on an public hosted zone for AWS - File: nw-creating-dns-records-on-aws - - Name: Creating DNS records on an public zone for Azure - File: nw-creating-dns-records-on-azure - - Name: Creating DNS records on an public managed zone for GCP - File: nw-creating-dns-records-on-gcp - - Name: Creating DNS records on a public DNS zone for Infoblox - File: nw-creating-dns-records-on-infoblox - - Name: Configuring the cluster-wide proxy on the External DNS Operator - File: nw-configuring-cluster-wide-egress-proxy -- Name: Network policy - Dir: network_policy - Topics: - - Name: About network policy - File: about-network-policy - - Name: Creating a network policy - File: creating-network-policy - - Name: Viewing a network policy - File: viewing-network-policy - - Name: Editing a network policy - File: editing-network-policy - - Name: Deleting a network policy - File: deleting-network-policy - - Name: Defining a default network policy for projects - File: default-network-policy - - Name: Configuring multitenant isolation with network policy - File: multitenant-network-policy -- Name: AWS Load Balancer Operator - Dir: aws_load_balancer_operator - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: AWS Load Balancer Operator release notes - File: aws-load-balancer-operator-release-notes - - Name: Understanding the AWS Load Balancer Operator - File: understanding-aws-load-balancer-operator - - Name: Installing the AWS Load Balancer Operator - File: install-aws-load-balancer-operator - - Name: Installing the AWS Load Balancer Operator on Security Token Service cluster - File: installing-albo-sts-cluster - - Name: Creating an instance of the AWS Load Balancer Controller - File: create-instance-aws-load-balancer-controller - - Name: Serving Multiple Ingresses through a single AWS Load Balancer - File: multiple-ingress-through-single-alb - - Name: Adding TLS termination on the AWS Load Balancer - File: add-tls-termination - - Name: Configuring cluster-wide proxy on the AWS Load Balancer Operator - File: configure-egress-proxy-aws-load-balancer-operator -- Name: Multiple networks - Dir: multiple_networks - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: Understanding multiple networks - File: understanding-multiple-networks - - Name: Configuring an additional network - File: configuring-additional-network - - Name: About virtual routing and forwarding - File: about-virtual-routing-and-forwarding - - Name: Configuring multi-network policy - File: configuring-multi-network-policy - - Name: Attaching a pod to an additional network - File: attaching-pod - - Name: Removing a pod from an additional network - File: removing-pod - - Name: Editing an additional network - File: edit-additional-network - - Name: Removing an additional network - File: remove-additional-network - - Name: Assigning a secondary network to a VRF - File: assigning-a-secondary-network-to-a-vrf -- Name: Hardware networks - Dir: hardware_networks - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: About Single Root I/O Virtualization (SR-IOV) hardware networks - File: about-sriov - - Name: Installing the SR-IOV Operator - File: installing-sriov-operator - - Name: Configuring the SR-IOV Operator - File: configuring-sriov-operator - - Name: Configuring an SR-IOV network device - File: configuring-sriov-device - - Name: Configuring an SR-IOV Ethernet network attachment - File: configuring-sriov-net-attach - - Name: Configuring an SR-IOV InfiniBand network attachment - File: configuring-sriov-ib-attach - - Name: Adding a pod to an SR-IOV network - File: add-pod - - Name: Tuning sysctl settings on an SR-IOV network - File: configuring-interface-sysctl-sriov-device - - Name: Using high performance multicast - File: using-sriov-multicast - - Name: Using DPDK and RDMA - File: using-dpdk-and-rdma - - Name: Using pod-level bonding for secondary networks - File: using-pod-level-bonding - - Name: Configuring hardware offloading - File: configuring-hardware-offloading - - Name: Switching Bluefield-2 from NIC to DPU mode - File: switching-bf2-nic-dpu - - Name: Uninstalling the SR-IOV Operator - File: uninstalling-sriov-operator -- Name: OVN-Kubernetes network plugin - Dir: ovn_kubernetes_network_provider - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: About the OVN-Kubernetes network plugin - File: about-ovn-kubernetes - - Name: OVN-Kubernetes architecture - File: ovn-kubernetes-architecture-assembly - - Name: OVN-Kubernetes troubleshooting - File: ovn-kubernetes-troubleshooting-sources - - Name: OVN-Kubernetes traffic tracing - File: ovn-kubernetes-tracing-using-ovntrace - - Name: Migrating from the OpenShift SDN network plugin - File: migrate-from-openshift-sdn - - Name: Rolling back to the OpenShift SDN network plugin - File: rollback-to-openshift-sdn - - Name: Migrating from Kuryr - File: migrate-from-kuryr-sdn - - Name: Converting to IPv4/IPv6 dual stack networking - File: converting-to-dual-stack - - Name: Logging for egress firewall and network policy rules - File: logging-network-policy - - Name: Configuring IPsec encryption - File: configuring-ipsec-ovn - - Name: Configuring an egress firewall for a project - File: configuring-egress-firewall-ovn - - Name: Viewing an egress firewall for a project - File: viewing-egress-firewall-ovn - - Name: Editing an egress firewall for a project - File: editing-egress-firewall-ovn - - Name: Removing an egress firewall from a project - File: removing-egress-firewall-ovn - - Name: Configuring an egress IP address - File: configuring-egress-ips-ovn - - Name: Assigning an egress IP address - File: assigning-egress-ips-ovn - - Name: Considerations for the use of an egress router pod - File: using-an-egress-router-ovn - - Name: Deploying an egress router pod in redirect mode - File: deploying-egress-router-ovn-redirection - - Name: Enabling multicast for a project - File: enabling-multicast - - Name: Disabling multicast for a project - File: disabling-multicast - - Name: Tracking network flows - File: tracking-network-flows - - Name: Configuring hybrid networking - File: configuring-hybrid-networking -- Name: OpenShift SDN network plugin - Dir: openshift_sdn - Topics: - - Name: About the OpenShift SDN network plugin - File: about-openshift-sdn - - Name: Migrating to the OpenShift SDN network plugin - File: migrate-to-openshift-sdn - - Name: Rolling back to the OpenShift SDN network plugin - File: rollback-to-ovn-kubernetes - - Name: Configuring egress IPs for a project - File: assigning-egress-ips - Distros: openshift-origin,openshift-enterprise - - Name: Configuring an egress firewall for a project - File: configuring-egress-firewall - - Name: Viewing an egress firewall for a project - File: viewing-egress-firewall - - Name: Editing an egress firewall for a project - File: editing-egress-firewall - - Name: Removing an egress firewall from a project - File: removing-egress-firewall - - Name: Considerations for the use of an egress router pod - File: using-an-egress-router - - Name: Deploying an egress router pod in redirect mode - File: deploying-egress-router-layer3-redirection - - Name: Deploying an egress router pod in HTTP proxy mode - File: deploying-egress-router-http-redirection - - Name: Deploying an egress router pod in DNS proxy mode - File: deploying-egress-router-dns-redirection - - Name: Configuring an egress router pod destination list from a config map - File: configuring-egress-router-configmap - - Name: Enabling multicast for a project - File: enabling-multicast - Distros: openshift-origin,openshift-enterprise - - Name: Disabling multicast for a project - File: disabling-multicast - Distros: openshift-origin,openshift-enterprise - - Name: Configuring multitenant isolation - File: multitenant-isolation - Distros: openshift-origin,openshift-enterprise - - Name: Configuring kube-proxy - File: configuring-kube-proxy - Distros: openshift-enterprise,openshift-origin -- Name: Configuring Routes - Dir: routes - Topics: - - Name: Route configuration - File: route-configuration - - Name: Secured routes - File: secured-routes -- Name: Configuring ingress cluster traffic - Dir: configuring_ingress_cluster_traffic - Topics: - - Name: Overview - File: overview-traffic - Distros: openshift-enterprise,openshift-origin - - Name: Configuring ExternalIPs for services - File: configuring-externalip - Distros: openshift-enterprise,openshift-origin - - Name: Configuring ingress cluster traffic using an Ingress Controller - File: configuring-ingress-cluster-traffic-ingress-controller - - Name: Configuring ingress cluster traffic using a load balancer - File: configuring-ingress-cluster-traffic-load-balancer - Distros: openshift-enterprise,openshift-origin - - Name: Configuring ingress cluster traffic on AWS - File: configuring-ingress-cluster-traffic-aws - Distros: openshift-enterprise,openshift-origin - - Name: Configuring ingress cluster traffic using a service external IP - File: configuring-ingress-cluster-traffic-service-external-ip - Distros: openshift-enterprise,openshift-origin - - Name: Configuring ingress cluster traffic using a NodePort - File: configuring-ingress-cluster-traffic-nodeport - Distros: openshift-enterprise,openshift-origin - - Name: Configuring ingress cluster traffic using load balancer allowed source ranges - File: configuring-ingress-cluster-traffic-load-balancer-allowed-source-ranges - Distros: openshift-enterprise,openshift-origin - # Kubernetes NMState (TECHNOLOGY PREVIEW) -- Name: Kubernetes NMState - Dir: k8s_nmstate - Topics: - - Name: About the Kubernetes NMState Operator - File: k8s-nmstate-about-the-k8s-nmstate-operator - - Name: Observing node network state - File: k8s-nmstate-observing-node-network-state - - Name: Updating node network configuration - File: k8s-nmstate-updating-node-network-config - - Name: Troubleshooting node network configuration - File: k8s-nmstate-troubleshooting-node-network -- Name: Configuring the cluster-wide proxy - File: enable-cluster-wide-proxy - Distros: openshift-enterprise,openshift-origin -- Name: Configuring a custom PKI - File: configuring-a-custom-pki - Distros: openshift-enterprise,openshift-origin -- Name: Load balancing on OpenStack - File: load-balancing-openstack -- Name: Load balancing with MetalLB - Dir: metallb - Topics: - - Name: About MetalLB and the MetalLB Operator - File: about-metallb - - Name: Installing the MetalLB Operator - File: metallb-operator-install - - Name: Upgrading the MetalLB Operator - File: metallb-upgrading-operator - - Name: Configuring MetalLB address pools - File: metallb-configure-address-pools - - Name: Advertising the IP address pools - File: about-advertising-ipaddresspool - - Name: Configuring MetalLB BGP peers - File: metallb-configure-bgp-peers - - Name: Advertising an IP address pool using the community alias - File: metallb-configure-community-alias - - Name: Configuring MetalLB BFD profiles - File: metallb-configure-bfd-profiles - - Name: Configuring services to use MetalLB - File: metallb-configure-services - - Name: MetalLB logging, troubleshooting, and support - File: metallb-troubleshoot-support -- Name: Associating secondary interfaces metrics to network attachments - File: associating-secondary-interfaces-metrics-to-network-attachments -- Name: Network Observability - Dir: network_observability - Topics: - - Name: Network Observability release notes - File: network-observability-operator-release-notes - - Name: Network Observability overview - File: network-observability-overview - - Name: Installing the Network Observability Operator - File: installing-operators - - Name: Understanding Network Observability Operator - File: understanding-network-observability-operator - - Name: Configuring the Network Observability Operator - File: configuring-operator - - Name: Observing the network traffic - File: observing-network-traffic - - Name: Monitoring the Network Observability Operator - File: network-observability-operator-monitoring - - Name: API reference - File: flowcollector-api - - Name: JSON flows format reference - File: json-flows-format-reference - - Name: Troubleshooting Network Observability - File: troubleshooting-network-observability ---- -Name: Storage -Dir: storage -Distros: openshift-enterprise,openshift-origin,openshift-online -Topics: -- Name: Storage overview - File: index - Distros: openshift-enterprise,openshift-origin,openshift-online -- Name: Understanding ephemeral storage - File: understanding-ephemeral-storage - Distros: openshift-enterprise,openshift-origin,openshift-online -- Name: Understanding persistent storage - File: understanding-persistent-storage - Distros: openshift-enterprise,openshift-origin,openshift-online -- Name: Configuring persistent storage - Dir: persistent_storage - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: Persistent storage using AWS Elastic Block Store - File: persistent-storage-aws - - Name: Persistent storage using Azure Disk - File: persistent-storage-azure - - Name: Persistent storage using Azure File - File: persistent-storage-azure-file - - Name: Persistent storage using Cinder - File: persistent-storage-cinder - - Name: Persistent storage using Fibre Channel - File: persistent-storage-fibre - - Name: Persistent storage using FlexVolume - File: persistent-storage-flexvolume - - Name: Persistent storage using GCE Persistent Disk - File: persistent-storage-gce - - Name: Persistent Storage using iSCSI - File: persistent-storage-iscsi - - Name: Persistent storage using NFS - File: persistent-storage-nfs - - Name: Persistent storage using Red Hat OpenShift Data Foundation - File: persistent-storage-ocs - - Name: Persistent storage using VMware vSphere - File: persistent-storage-vsphere - - Name: Persistent storage using local storage - Dir: persistent_storage_local - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: Persistent storage using local volumes - File: persistent-storage-local - - Name: Persistent storage using hostPath - File: persistent-storage-hostpath - - Name: Persistent storage using LVM Storage - File: persistent-storage-using-lvms -- Name: Using Container Storage Interface (CSI) - Dir: container_storage_interface - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: Configuring CSI volumes - File: persistent-storage-csi - - Name: CSI inline ephemeral volumes - File: ephemeral-storage-csi-inline - - Name: Shared Resource CSI Driver Operator - File: ephemeral-storage-shared-resource-csi-driver-operator - - Name: CSI volume snapshots - File: persistent-storage-csi-snapshots - - Name: CSI volume cloning - File: persistent-storage-csi-cloning - - Name: Managing the default storage class - File: persistent-storage-csi-sc-manage - - Name: CSI automatic migration - File: persistent-storage-csi-migration - - Name: Detach CSI volumes after non-graceful node shutdown - File: persistent-storage-csi-vol-detach-non-graceful-shutdown - - Name: AliCloud Disk CSI Driver Operator - File: persistent-storage-csi-alicloud-disk - - Name: AWS Elastic Block Store CSI Driver Operator - File: persistent-storage-csi-ebs - - Name: AWS Elastic File Service CSI Driver Operator - File: persistent-storage-csi-aws-efs - - Name: Azure Disk CSI Driver Operator - File: persistent-storage-csi-azure - - Name: Azure File CSI Driver Operator - File: persistent-storage-csi-azure-file - - Name: Azure Stack Hub CSI Driver Operator - File: persistent-storage-csi-azure-stack-hub - - Name: GCP PD CSI Driver Operator - File: persistent-storage-csi-gcp-pd - - Name: GCP Filestore CSI Driver Operator - File: persistent-storage-csi-google-cloud-file - - Name: IBM VPC Block CSI Driver Operator - File: persistent-storage-csi-ibm-vpc-block - - Name: IBM Power Virtual Server Block CSI Driver Operator - File: persistent-storage-csi-ibm-powervs-block - - Name: OpenStack Cinder CSI Driver Operator - File: persistent-storage-csi-cinder - - Name: OpenStack Manila CSI Driver Operator - File: persistent-storage-csi-manila - - Name: Red Hat Virtualization CSI Driver Operator - File: persistent-storage-csi-ovirt - - Name: VMware vSphere CSI Driver Operator - File: persistent-storage-csi-vsphere -- Name: Generic ephemeral volumes - File: generic-ephemeral-vols - Distros: openshift-enterprise,openshift-origin,openshift-online -- Name: Expanding persistent volumes - File: expanding-persistent-volumes - Distros: openshift-enterprise,openshift-origin -- Name: Dynamic provisioning - File: dynamic-provisioning - Distros: openshift-enterprise,openshift-origin ---- -Name: Registry -Dir: registry -Distros: openshift-enterprise,openshift-origin -Topics: -- Name: Registry overview - File: index -- Name: Image Registry Operator in OpenShift Container Platform - File: configuring-registry-operator - Distros: openshift-enterprise -- Name: Image Registry Operator in OKD - File: configuring-registry-operator - Distros: openshift-origin -- Name: Setting up and configuring the registry - Dir: configuring_registry_storage - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: Configuring the registry for AWS user-provisioned infrastructure - File: configuring-registry-storage-aws-user-infrastructure - - Name: Configuring the registry for GCP user-provisioned infrastructure - File: configuring-registry-storage-gcp-user-infrastructure - - Name: Configuring the registry for OpenStack user-provisioned infrastructure - File: configuring-registry-storage-openstack-user-infrastructure - - Name: Configuring the registry for Azure user-provisioned infrastructure - File: configuring-registry-storage-azure-user-infrastructure - - Name: Configuring the registry for OpenStack - File: configuring-registry-storage-osp - - Name: Configuring the registry for bare metal - File: configuring-registry-storage-baremetal - - Name: Configuring the registry for vSphere - File: configuring-registry-storage-vsphere - - Name: Configuring the registry for OpenShift Data Foundation - File: configuring-registry-storage-rhodf - Distros: openshift-enterprise,openshift-origin -- Name: Accessing the registry - File: accessing-the-registry -- Name: Exposing the registry - File: securing-exposing-registry - Distros: openshift-enterprise,openshift-origin ---- -Name: Operators -Dir: operators -Distros: openshift-enterprise,openshift-origin -Topics: -- Name: Operators overview - File: index -- Name: Understanding Operators - Dir: understanding - Topics: - - Name: What are Operators? - File: olm-what-operators-are - - Name: Packaging format - File: olm-packaging-format - - Name: Common terms - File: olm-common-terms - - Name: Operator Lifecycle Manager (OLM) - Dir: olm - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: Concepts and resources - File: olm-understanding-olm - - Name: Architecture - File: olm-arch - - Name: Workflow - File: olm-workflow - - Name: Dependency resolution - File: olm-understanding-dependency-resolution - - Name: Operator groups - File: olm-understanding-operatorgroups - - Name: Multitenancy and Operator colocation - File: olm-colocation - - Name: Operator conditions - File: olm-operatorconditions - - Name: Metrics - File: olm-understanding-metrics - - Name: Webhooks - File: olm-webhooks - - Name: OperatorHub - Distros: openshift-enterprise,openshift-origin - File: olm-understanding-operatorhub - - Name: Red Hat-provided Operator catalogs - Distros: openshift-enterprise - File: olm-rh-catalogs - - Name: Operators in multitenant clusters - Distros: openshift-enterprise,openshift-origin - File: olm-multitenancy - - Name: CRDs - Dir: crds - Topics: - - Name: Extending the Kubernetes API with CRDs - File: crd-extending-api-with-crds - Distros: openshift-origin,openshift-enterprise - - Name: Managing resources from CRDs - File: crd-managing-resources-from-crds - Distros: openshift-origin,openshift-enterprise -- Name: User tasks - Dir: user - Topics: - - Name: Creating applications from installed Operators - File: olm-creating-apps-from-installed-operators - Distros: openshift-enterprise,openshift-origin - - Name: Installing Operators in your namespace - File: olm-installing-operators-in-namespace - Distros: openshift-enterprise,openshift-origin -- Name: Administrator tasks - Dir: admin - Topics: - - Name: Adding Operators to a cluster - File: olm-adding-operators-to-cluster - Distros: openshift-enterprise,openshift-origin - - Name: Updating installed Operators - File: olm-upgrading-operators - Distros: openshift-enterprise,openshift-origin - - Name: Deleting Operators from a cluster - File: olm-deleting-operators-from-cluster - Distros: openshift-enterprise,openshift-origin - - Name: Configuring OLM features - File: olm-config - Distros: openshift-enterprise,openshift-origin - - Name: Configuring proxy support - File: olm-configuring-proxy-support - Distros: openshift-enterprise,openshift-origin - - Name: Viewing Operator status - File: olm-status - Distros: openshift-enterprise,openshift-origin - - Name: Managing Operator conditions - File: olm-managing-operatorconditions - Distros: openshift-origin,openshift-enterprise - - Name: Allowing non-cluster administrators to install Operators - File: olm-creating-policy - Distros: openshift-origin,openshift-enterprise - - Name: Managing custom catalogs - File: olm-managing-custom-catalogs - Distros: openshift-origin,openshift-enterprise - - Name: Using OLM on restricted networks - File: olm-restricted-networks - Distros: openshift-origin,openshift-enterprise - - Name: Catalog source pod scheduling - File: olm-cs-podsched - Distros: openshift-origin,openshift-enterprise - - Name: Managing platform Operators - File: olm-managing-po - Distros: openshift-enterprise,openshift-origin -- Name: Developing Operators - Dir: operator_sdk - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: About the Operator SDK - File: osdk-about - - Name: Installing the Operator SDK CLI - File: osdk-installing-cli - - Name: Go-based Operators - Dir: golang - Topics: - - Name: Getting started - File: osdk-golang-quickstart - - Name: Tutorial - File: osdk-golang-tutorial - - Name: Project layout - File: osdk-golang-project-layout - - Name: Updating Go-based projects - File: osdk-golang-updating-projects - - Name: Ansible-based Operators - Dir: ansible - Topics: - - Name: Getting started - File: osdk-ansible-quickstart - - Name: Tutorial - File: osdk-ansible-tutorial - - Name: Project layout - File: osdk-ansible-project-layout - - Name: Updating Ansible-based projects - File: osdk-ansible-updating-projects - - Name: Ansible support - File: osdk-ansible-support - - Name: Kubernetes Collection for Ansible - File: osdk-ansible-k8s-collection - - Name: Using Ansible inside an Operator - File: osdk-ansible-inside-operator - - Name: Custom resource status management - File: osdk-ansible-cr-status - - Name: Helm-based Operators - Dir: helm - Topics: - - Name: Getting started - File: osdk-helm-quickstart - - Name: Tutorial - File: osdk-helm-tutorial - - Name: Project layout - File: osdk-helm-project-layout - - Name: Updating Helm-based projects - File: osdk-helm-updating-projects - - Name: Helm support - File: osdk-helm-support - - Name: Hybrid Helm Operator - File: osdk-hybrid-helm - - Name: Updating Hybrid Helm-based projects - File: osdk-hybrid-helm-updating-projects - - Name: Java-based Operators - Dir: java - Topics: - - Name: Getting started - File: osdk-java-quickstart - - Name: Tutorial - File: osdk-java-tutorial - - Name: Project layout - File: osdk-java-project-layout - - Name: Updating Java-based projects - File: osdk-java-updating-projects - - Name: Defining cluster service versions (CSVs) - File: osdk-generating-csvs - - Name: Working with bundle images - File: osdk-working-bundle-images - - Name: Complying with pod security admission - File: osdk-complying-with-psa - - Name: Validating Operators using the scorecard - File: osdk-scorecard - - Name: Validating Operator bundles - File: osdk-bundle-validate - - Name: High-availability or single-node cluster detection and support - File: osdk-ha-sno - - Name: Configuring built-in monitoring with Prometheus - File: osdk-monitoring-prometheus - - Name: Configuring leader election - File: osdk-leader-election - - Name: Object pruning utility - File: osdk-pruning-utility - - Name: Migrating package manifest projects to bundle format - File: osdk-pkgman-to-bundle - - Name: Operator SDK CLI reference - File: osdk-cli-ref - - Name: Migrating to Operator SDK v0.1.0 - File: osdk-migrating-to-v0-1-0 - Distros: openshift-origin -- Name: Cluster Operators reference - File: operator-reference ---- -Name: CI/CD -Dir: cicd -Distros: openshift-enterprise,openshift-origin,openshift-online -Topics: -- Name: CI/CD overview - File: index -- Name: Builds - Dir: builds - Distros: openshift-enterprise,openshift-origin,openshift-online - Topics: - - Name: Understanding image builds - File: understanding-image-builds - - Name: Understanding build configurations - File: understanding-buildconfigs - - Name: Creating build inputs - File: creating-build-inputs - - Name: Managing build output - File: managing-build-output - - Name: Using build strategies - File: build-strategies - - Name: Custom image builds with Buildah - File: custom-builds-buildah - Distros: openshift-enterprise,openshift-origin - - Name: Performing and configuring basic builds - File: basic-build-operations - Distros: openshift-enterprise,openshift-origin,openshift-online - - Name: Triggering and modifying builds - File: triggering-builds-build-hooks - Distros: openshift-enterprise,openshift-origin,openshift-online - - Name: Performing advanced builds - File: advanced-build-operations - Distros: openshift-enterprise,openshift-origin - - Name: Using Red Hat subscriptions in builds - File: running-entitled-builds - Distros: openshift-enterprise,openshift-origin - - Name: Securing builds by strategy - File: securing-builds-by-strategy - Distros: openshift-enterprise,openshift-origin - - Name: Build configuration resources - File: build-configuration - Distros: openshift-enterprise,openshift-origin - - Name: Troubleshooting builds - File: troubleshooting-builds - Distros: openshift-enterprise,openshift-origin - - Name: Setting up additional trusted certificate authorities for builds - File: setting-up-trusted-ca - Distros: openshift-enterprise,openshift-origin -- Name: Pipelines - Dir: pipelines - Distros: openshift-enterprise - Topics: - - Name: OpenShift Pipelines release notes - File: op-release-notes - - Name: Understanding OpenShift Pipelines - File: understanding-openshift-pipelines - - Name: Installing OpenShift Pipelines - File: installing-pipelines - - Name: Uninstalling OpenShift Pipelines - File: uninstalling-pipelines - - Name: Creating CI/CD solutions for applications using OpenShift Pipelines - File: creating-applications-with-cicd-pipelines - - Name: Managing non-versioned and versioned cluster tasks - File: managing-nonversioned-and-versioned-cluster-tasks - - Name: Using Tekton Hub with OpenShift Pipelines - File: using-tekton-hub-with-openshift-pipelines - - Name: Specifying remote pipelines and tasks using resolvers - File: remote-pipelines-tasks-resolvers - - Name: Using Pipelines as Code - File: using-pipelines-as-code - - Name: Working with OpenShift Pipelines using the Developer perspective - File: working-with-pipelines-using-the-developer-perspective - - Name: Customizing configurations in the TektonConfig custom resource - File: customizing-configurations-in-the-tektonconfig-cr - - Name: Reducing resource consumption of OpenShift Pipelines - File: reducing-pipelines-resource-consumption - - Name: Setting compute resource quota for OpenShift Pipelines - File: setting-compute-resource-quota-for-openshift-pipelines - - Name: Using pods in a privileged security context - File: using-pods-in-a-privileged-security-context - - Name: Securing webhooks with event listeners - File: securing-webhooks-with-event-listeners - - Name: Authenticating pipelines using git secret - File: authenticating-pipelines-using-git-secret - - Name: Using Tekton Chains for OpenShift Pipelines supply chain security - File: using-tekton-chains-for-openshift-pipelines-supply-chain-security - - Name: Viewing pipeline logs using the OpenShift Logging Operator - File: viewing-pipeline-logs-using-the-openshift-logging-operator - - Name: Unprivileged building of container images using Buildah - File: unprivileged-building-of-container-images-using-buildah -- Name: GitOps - Dir: gitops - Distros: openshift-enterprise - Topics: - - Name: OpenShift GitOps release notes - File: gitops-release-notes - - Name: Understanding OpenShift GitOps - File: understanding-openshift-gitops - - Name: Installing OpenShift GitOps - File: installing-openshift-gitops - - Name: Uninstalling OpenShift GitOps - File: uninstalling-openshift-gitops - - Name: Setting up a new Argo CD instance - File: setting-up-argocd-instance - - Name: Configuring an OpenShift cluster by deploying an application with cluster configurations - File: configuring-an-openshift-cluster-by-deploying-an-application-with-cluster-configurations - - Name: Deploying a Spring Boot application with Argo CD - File: deploying-a-spring-boot-application-with-argo-cd - - Name: Argo CD custom resource properties - File: argo-cd-custom-resource-properties - - Name: Monitoring application health status - File: health-information-for-resources-deployment - - Name: Configuring SSO for Argo CD using Dex - File: configuring-sso-on-argo-cd-using-dex - - Name: Configuring SSO for Argo CD using Keycloak - File: configuring-sso-for-argo-cd-using-keycloak - - Name: Configuring Argo CD RBAC - File: configuring-argo-cd-rbac - - Name: Configuring Resource Quota - File: configuring-resource-quota - - Name: Monitoring Argo CD custom resource workloads - File: monitoring-argo-cd-custom-resource-workloads - - Name: Running Control Plane Workloads on Infra nodes - File: run-gitops-control-plane-workload-on-infra-nodes - - Name: Sizing requirements for GitOps Operator - File: about-sizing-requirements-gitops - - Name: Collecting debugging data for a support case - File: collecting-debugging-data-for-support - - Name: Troubleshooting issues in GitOps - File: troubleshooting-issues-in-GitOps -- Name: Jenkins - Dir: jenkins - Distros: openshift-enterprise - Topics: - - Name: Configuring Jenkins images - File: images-other-jenkins - - Name: Jenkins agent - File: images-other-jenkins-agent - - Name: Migrating from Jenkins to OpenShift Pipelines - File: migrating-from-jenkins-to-openshift-pipelines - - Name: Important changes to OpenShift Jenkins images - File: important-changes-to-openshift-jenkins-images ---- -Name: Images -Dir: openshift_images -Distros: openshift-enterprise,openshift-origin,openshift-online -Topics: -- Name: Overview of images - File: index -- Name: Configuring the Cluster Samples Operator - File: configuring-samples-operator - Distros: openshift-enterprise,openshift-origin -- Name: Using the Cluster Samples Operator with an alternate registry - File: samples-operator-alt-registry - Distros: openshift-enterprise,openshift-origin -- Name: Creating images - File: create-images -- Name: Managing images - Dir: managing_images - Topics: - - Name: Managing images overview - File: managing-images-overview - - Name: Tagging images - File: tagging-images - - Name: Image pull policy - File: image-pull-policy - - Name: Using image pull secrets - File: using-image-pull-secrets -- Name: Managing image streams - File: image-streams-manage - Distros: openshift-enterprise,openshift-origin -- Name: Using image streams with Kubernetes resources - File: using-imagestreams-with-kube-resources - Distros: openshift-enterprise,openshift-origin -- Name: Triggering updates on image stream changes - File: triggering-updates-on-imagestream-changes - Distros: openshift-enterprise,openshift-origin -- Name: Image configuration resources - File: image-configuration - Distros: openshift-enterprise,openshift-origin -- Name: Using templates - File: using-templates -- Name: Using Ruby on Rails - File: templates-using-ruby-on-rails -- Name: Using images - Dir: using_images - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: Using images overview - File: using-images-overview - - Name: Source-to-image - File: using-s21-images - - Name: Customizing source-to-image images - File: customizing-s2i-images ---- -Name: Building applications -Dir: applications -Distros: openshift-enterprise,openshift-origin -Topics: -- Name: Building applications overview - File: index -- Name: Projects - Dir: projects - Topics: - - Name: Working with projects - File: working-with-projects - - Name: Creating a project as another user - File: creating-project-other-user - Distros: openshift-enterprise,openshift-origin - - Name: Configuring project creation - File: configuring-project-creation - Distros: openshift-enterprise,openshift-origin -- Name: Creating applications - Dir: creating_applications - Topics: - - Name: Creating applications using the Developer perspective - File: odc-creating-applications-using-developer-perspective - - Name: Creating applications from installed Operators - File: creating-apps-from-installed-operators - - Name: Creating applications using the CLI - File: creating-applications-using-cli -- Name: Viewing application composition using the Topology view - File: odc-viewing-application-composition-using-topology-view -- Name: Exporting applications - File: odc-exporting-applications -- Name: Connecting applications to services - Dir: connecting_applications_to_services - Topics: - - Name: Service Binding Operator release notes - File: sbo-release-notes - - Name: Understanding Service Binding Operator - File: understanding-service-binding-operator - - Name: Installing Service Binding Operator - File: installing-sbo - - Name: Getting started with service binding - File: getting-started-with-service-binding - - Name: Getting started with service binding on IBM Power, IBM Z, and IBM LinuxONE - File: getting-started-with-service-binding-ibm-power-ibm-z - - Name: Exposing binding data from a service - File: exposing-binding-data-from-a-service - - Name: Projecting binding data - File: projecting-binding-data - - Name: Binding workloads using Service Binding Operator - File: binding-workloads-using-sbo - - Name: Connecting an application to a service using the Developer perspective - File: odc-connecting-an-application-to-a-service-using-the-developer-perspective -- Name: Working with Helm charts - Dir: working_with_helm_charts - Topics: - - Name: Understanding Helm - File: understanding-helm - - Name: Installing Helm - File: installing-helm - - Name: Configuring custom Helm chart repositories - File: configuring-custom-helm-chart-repositories - - Name: Working with Helm releases - File: odc-working-with-helm-releases -- Name: Deployments - Dir: deployments - Topics: - - Name: Understanding Deployments and DeploymentConfigs - File: what-deployments-are - - Name: Managing deployment processes - File: managing-deployment-processes - - Name: Using deployment strategies - File: deployment-strategies - - Name: Using route-based deployment strategies - File: route-based-deployment-strategies -- Name: Quotas - Dir: quotas - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Resource quotas per project - File: quotas-setting-per-project - - Name: Resource quotas across multiple projects - File: quotas-setting-across-multiple-projects - Distros: openshift-enterprise,openshift-origin -- Name: Using config maps with applications - File: config-maps -- Name: Monitoring project and application metrics using the Developer perspective - File: odc-monitoring-project-and-application-metrics-using-developer-perspective -- Name: Monitoring application health - File: application-health -- Name: Editing applications - File: odc-editing-applications -- Name: Working with quotas - File: working-with-quotas - Distros: openshift-online -- Name: Pruning objects to reclaim resources - File: pruning-objects - Distros: openshift-origin,openshift-enterprise -- Name: Idling applications - File: idling-applications - Distros: openshift-origin,openshift-enterprise -- Name: Deleting applications - File: odc-deleting-applications -- Name: Using the Red Hat Marketplace - File: red-hat-marketplace - Distros: openshift-origin,openshift-enterprise ---- -Name: Machine management -Dir: machine_management -Distros: openshift-origin,openshift-enterprise -Topics: -- Name: Overview of machine management - File: index -- Name: Managing compute machines with the Machine API - Dir: creating_machinesets - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Creating a compute machine set on Alibaba Cloud - File: creating-machineset-alibaba - - Name: Creating a compute machine set on AWS - File: creating-machineset-aws - - Name: Creating a compute machine set on Azure - File: creating-machineset-azure - - Name: Creating a compute machine set on Azure Stack Hub - File: creating-machineset-azure-stack-hub - - Name: Creating a compute machine set on GCP - File: creating-machineset-gcp - - Name: Creating a compute machine set on IBM Cloud - File: creating-machineset-ibm-cloud - - Name: Creating a compute machine set on IBM Power Virtual Server - File: creating-machineset-ibm-power-vs - - Name: Creating a compute machine set on Nutanix - File: creating-machineset-nutanix - - Name: Creating a compute machine set on OpenStack - File: creating-machineset-osp - - Name: Creating a compute machine set on RHV - File: creating-machineset-rhv - Distros: openshift-enterprise - - Name: Creating a compute machine set on oVirt - File: creating-machineset-rhv - Distros: openshift-origin - - Name: Creating a compute machine set on vSphere - File: creating-machineset-vsphere - - Name: Creating a compute machine set on bare metal - File: creating-machineset-bare-metal -- Name: Manually scaling a compute machine set - File: manually-scaling-machineset -- Name: Modifying a compute machine set - File: modifying-machineset -- Name: Deleting a machine - File: deleting-machine -- Name: Applying autoscaling to a cluster - File: applying-autoscaling -- Name: Creating infrastructure machine sets - File: creating-infrastructure-machinesets -- Name: Adding a RHEL compute machine - File: adding-rhel-compute - Distros: openshift-enterprise -- Name: Adding more RHEL compute machines - File: more-rhel-compute - Distros: openshift-enterprise -- Name: Managing user-provisioned infrastructure manually - Dir: user_infra - Topics: - - Name: Adding compute machines to clusters with user-provisioned infrastructure manually - File: adding-compute-user-infra-general - - Name: Adding compute machines to AWS using CloudFormation templates - File: adding-aws-compute-user-infra - - Name: Adding compute machines to vSphere manually - File: adding-vsphere-compute-user-infra - - Name: Adding compute machines to a cluster on RHV - File: adding-rhv-compute-user-infra - - Name: Adding compute machines to bare metal - File: adding-bare-metal-compute-user-infra -- Name: Managing machines with the Cluster API - File: capi-machine-management -- Name: Managing control plane machines - Dir: control_plane_machine_management - Topics: - - Name: About control plane machine sets - File: cpmso-about - - Name: Getting started with control plane machine sets - File: cpmso-getting-started - - Name: Control plane machine set configuration - File: cpmso-configuration - - Name: Using control plane machine sets - File: cpmso-using - - Name: Control plane resiliency and recovery - File: cpmso-resiliency - - Name: Troubleshooting the control plane machine set - File: cpmso-troubleshooting - - Name: Disabling the control plane machine set - File: cpmso-disabling -- Name: Deploying machine health checks - File: deploying-machine-health-checks ---- -Name: Hosted control planes -Dir: hosted_control_planes -Distros: openshift-enterprise, openshift-origin -Topics: -- Name: Hosted control planes overview - File: index -- Name: Configuring hosted control planes - File: hcp-configuring -- Name: Managing hosted control planes - File: hcp-managing -- Name: Backup, restore, and disaster recovery for hosted control planes - File: hcp-backup-restore-dr ---- -Name: Nodes -Dir: nodes -Distros: openshift-enterprise,openshift-origin -Topics: -- Name: Overview of nodes - File: index -- Name: Working with pods - Dir: pods - Topics: - - Name: About pods - File: nodes-pods-using - - Name: Viewing pods - File: nodes-pods-viewing - - Name: Configuring a cluster for pods - File: nodes-pods-configuring - Distros: openshift-enterprise,openshift-origin - - Name: Automatically scaling pods with the horizontal pod autoscaler - File: nodes-pods-autoscaling - - Name: Automatically adjust pod resource levels with the vertical pod autoscaler - File: nodes-pods-vertical-autoscaler - - Name: Providing sensitive data to pods - File: nodes-pods-secrets - - Name: Creating and using config maps - File: nodes-pods-configmaps - - Name: Using Device Manager to make devices available to nodes - File: nodes-pods-plugins - Distros: openshift-enterprise,openshift-origin - - Name: Including pod priority in pod scheduling decisions - File: nodes-pods-priority - Distros: openshift-enterprise,openshift-origin - - Name: Placing pods on specific nodes using node selectors - File: nodes-pods-node-selectors - Distros: openshift-enterprise,openshift-origin - - Name: Run Once Duration Override Operator - Dir: run_once_duration_override - Distros: openshift-enterprise - Topics: - - Name: Run Once Duration Override Operator overview - File: index - - Name: Run Once Duration Override Operator release notes - File: run-once-duration-override-release-notes - - Name: Overriding the active deadline for run-once pods - File: run-once-duration-override-install - - Name: Uninstalling the Run Once Duration Override Operator - File: run-once-duration-override-uninstall -- Name: Automatically scaling pods with the Custom Metrics Autoscaler Operator - Dir: cma - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: Custom Metrics Autoscaler Operator overview - File: nodes-cma-autoscaling-custom - - Name: Custom Metrics Autoscaler Operator release notes - File: nodes-cma-autoscaling-custom-rn - - Name: Installing the custom metrics autoscaler - File: nodes-cma-autoscaling-custom-install - - Name: Understanding the custom metrics autoscaler triggers - File: nodes-cma-autoscaling-custom-trigger - - Name: Understanding the custom metrics autoscaler trigger authentications - File: nodes-cma-autoscaling-custom-trigger-auth - - Name: Pausing the custom metrics autoscaler - File: nodes-cma-autoscaling-custom-pausing - - Name: Gathering audit logs - File: nodes-cma-autoscaling-custom-audit-log - - Name: Gathering debugging data - File: nodes-cma-autoscaling-custom-debugging - - Name: Viewing Operator metrics - File: nodes-cma-autoscaling-custom-metrics - - Name: Understanding how to add custom metrics autoscalers - File: nodes-cma-autoscaling-custom-adding - - Name: Removing the Custom Metrics Autoscaler Operator - File: nodes-cma-autoscaling-custom-removing -- Name: Controlling pod placement onto nodes (scheduling) - Dir: scheduling - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: About pod placement using the scheduler - File: nodes-scheduler-about - - Name: Scheduling pods using a scheduler profile - File: nodes-scheduler-profiles - - Name: Placing pods relative to other pods using pod affinity and anti-affinity rules - File: nodes-scheduler-pod-affinity - - Name: Controlling pod placement on nodes using node affinity rules - File: nodes-scheduler-node-affinity - - Name: Placing pods onto overcommited nodes - File: nodes-scheduler-overcommit - - Name: Controlling pod placement using node taints - File: nodes-scheduler-taints-tolerations - - Name: Placing pods on specific nodes using node selectors - File: nodes-scheduler-node-selectors - - Name: Controlling pod placement using pod topology spread constraints - File: nodes-scheduler-pod-topology-spread-constraints -# - Name: Placing a pod on a specific node by name -# File: nodes-scheduler-node-names -# - Name: Placing a pod in a specific project -# File: nodes-scheduler-node-projects -# - Name: Keeping your cluster balanced using the descheduler -# File: nodes-scheduler-descheduler - - Name: Evicting pods using the descheduler - File: nodes-descheduler - - Name: Secondary scheduler - Dir: secondary_scheduler - Distros: openshift-enterprise - Topics: - - Name: Secondary scheduler overview - File: index - - Name: Secondary Scheduler Operator release notes - File: nodes-secondary-scheduler-release-notes - - Name: Scheduling pods using a secondary scheduler - File: nodes-secondary-scheduler-configuring - - Name: Uninstalling the Secondary Scheduler Operator - File: nodes-secondary-scheduler-uninstalling -- Name: Using Jobs and DaemonSets - Dir: jobs - Topics: - - Name: Running background tasks on nodes automatically with daemonsets - File: nodes-pods-daemonsets - Distros: openshift-enterprise,openshift-origin - - Name: Running tasks in pods using jobs - File: nodes-nodes-jobs -- Name: Working with nodes - Dir: nodes - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: Viewing and listing the nodes in your cluster - File: nodes-nodes-viewing - - Name: Working with nodes - File: nodes-nodes-working - - Name: Managing nodes - File: nodes-nodes-managing - - Name: Managing graceful node shutdown - File: nodes-nodes-graceful-shutdown - - Name: Managing the maximum number of pods per node - File: nodes-nodes-managing-max-pods - - Name: Using the Node Tuning Operator - File: nodes-node-tuning-operator - - Name: Remediating, fencing, and maintaining nodes - File: nodes-remediating-fencing-maintaining-rhwa - - Name: Understanding node rebooting - File: nodes-nodes-rebooting - - Name: Freeing node resources using garbage collection - File: nodes-nodes-garbage-collection - - Name: Allocating resources for nodes - File: nodes-nodes-resources-configuring - - Name: Allocating specific CPUs for nodes in a cluster - File: nodes-nodes-resources-cpus - - Name: Configuring the TLS security profile for the kubelet - File: nodes-nodes-tls - Distros: openshift-enterprise,openshift-origin -# - Name: Monitoring for problems in your nodes -# File: nodes-nodes-problem-detector - - Name: Machine Config Daemon metrics - File: nodes-nodes-machine-config-daemon-metrics - - Name: Creating infrastructure nodes - File: nodes-nodes-creating-infrastructure-nodes -- Name: Working with containers - Dir: containers - Topics: - - Name: Understanding containers - File: nodes-containers-using - - Name: Using Init Containers to perform tasks before a pod is deployed - File: nodes-containers-init - Distros: openshift-enterprise,openshift-origin - - Name: Using volumes to persist container data - File: nodes-containers-volumes - - Name: Mapping volumes using projected volumes - File: nodes-containers-projected-volumes - - Name: Allowing containers to consume API objects - File: nodes-containers-downward-api - - Name: Copying files to or from a container - File: nodes-containers-copying-files - - Name: Executing remote commands in a container - File: nodes-containers-remote-commands - - Name: Using port forwarding to access applications in a container - File: nodes-containers-port-forwarding - - Name: Using sysctls in containers - File: nodes-containers-sysctls -- Name: Working with clusters - Dir: clusters - Topics: - - Name: Viewing system event information in a cluster - File: nodes-containers-events - - Name: Analyzing cluster resource levels - File: nodes-cluster-resource-levels - Distros: openshift-enterprise,openshift-origin - - Name: Setting limit ranges - File: nodes-cluster-limit-ranges - - Name: Configuring cluster memory to meet container memory and risk requirements - File: nodes-cluster-resource-configure - Distros: openshift-enterprise,openshift-origin - - Name: Configuring your cluster to place pods on overcommited nodes - File: nodes-cluster-overcommit - Distros: openshift-enterprise,openshift-origin - - Name: Configuring the Linux cgroup version on your nodes - File: nodes-cluster-cgroups-2 - Distros: openshift-enterprise - - Name: Configuring the Linux cgroup version on your nodes - File: nodes-cluster-cgroups-okd - Distros: openshift-origin - - Name: Enabling features using FeatureGates - File: nodes-cluster-enabling-features - Distros: openshift-enterprise,openshift-origin - - Name: Improving cluster stability in high latency environments using worker latency profiles - File: nodes-cluster-worker-latency-profiles - Distros: openshift-enterprise,openshift-origin -- Name: Remote worker nodes on the network edge - Dir: edge - Distros: openshift-enterprise - Topics: - - Name: Using remote worker node at the network edge - File: nodes-edge-remote-workers -- Name: Worker nodes for single-node OpenShift clusters - Dir: nodes - Distros: openshift-enterprise - Topics: - - Name: Adding worker nodes to single-node OpenShift clusters - File: nodes-sno-worker-nodes ---- -Name: Windows Container Support for OpenShift -Dir: windows_containers -Distros: openshift-origin,openshift-enterprise -Topics: -- Name: Red Hat OpenShift support for Windows Containers overview - File: index -- Name: Red Hat OpenShift support for Windows Containers release notes - File: windows-containers-release-notes-6-x -- Name: Understanding Windows container workloads - File: understanding-windows-container-workloads -- Name: Enabling Windows container workloads - File: enabling-windows-container-workloads -- Name: Creating Windows machine sets - Dir: creating_windows_machinesets - Topics: - - Name: Creating a Windows machine set on AWS - File: creating-windows-machineset-aws - - Name: Creating a Windows machine set on Azure - File: creating-windows-machineset-azure - - Name: Creating a Windows machine set on vSphere - File: creating-windows-machineset-vsphere - - Name: Creating a Windows machine set on GCP - File: creating-windows-machineset-gcp -- Name: Scheduling Windows container workloads - File: scheduling-windows-workloads -- Name: Windows node upgrades - File: windows-node-upgrades -- Name: Using Bring-Your-Own-Host Windows instances as nodes - File: byoh-windows-instance -- Name: Removing Windows nodes - File: removing-windows-nodes -- Name: Disabling Windows container workloads - File: disabling-windows-container-workloads ---- -Name: Sandboxed Containers Support for OpenShift -Dir: sandboxed_containers -Distros: openshift-enterprise -Topics: -- Name: OpenShift sandboxed containers documentation has been moved - File: sandboxed-containers-moved ---- -Name: Logging -Dir: logging -Distros: openshift-enterprise,openshift-origin -Topics: -- Name: Release notes - File: cluster-logging-release-notes -- Name: Logging 5.7 - Dir: v5_7 - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: Logging 5.7 Release Notes - File: logging-5-7-release-notes - - Name: Getting started with logging - File: logging-5-7-getting-started - - Name: Understanding Logging - File: logging-5-7-architecture - - Name: Configuring Logging - File: logging-5-7-configuration - - Name: Administering Logging - File: logging-5-7-administration -# Name: Logging Reference -# File: logging-5-7-reference -- Name: Logging 5.6 - Dir: v5_6 - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: Logging 5.6 Release Notes - File: logging-5-6-release-notes - - Name: Getting started with logging - File: logging-5-6-getting-started - - Name: Understanding Logging - File: logging-5-6-architecture - - Name: Configuring Logging - File: logging-5-6-configuration - - Name: Administering Logging - File: logging-5-6-administration - - Name: Logging Reference - File: logging-5-6-reference -- Name: Logging 5.5 - Dir: v5_5 - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: Logging 5.5 Release Notes - File: logging-5-5-release-notes - - Name: Getting started with logging - File: logging-5-5-getting-started - - Name: Understanding Logging - File: logging-5-5-architecture - - Name: Administering Logging - File: logging-5-5-administration -# - Name: Configuring Logging -# File: logging-5-5-configuration -# - Name: Logging Reference -# File: logging-5-5-reference -- Name: About Logging - File: cluster-logging -- Name: Installing Logging - File: cluster-logging-deploying - Distros: openshift-enterprise,openshift-origin -- Name: Configuring your Logging deployment - Dir: config - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: About the Cluster Logging custom resource - File: cluster-logging-configuring-cr - - Name: Configuring the logging collector - File: cluster-logging-collector - - Name: Configuring the log store - File: cluster-logging-log-store - - Name: Configuring the log visualizer - File: cluster-logging-visualizer - - Name: Configuring Logging storage - File: cluster-logging-storage-considerations - - Name: Configuring CPU and memory limits for Logging components - File: cluster-logging-memory - - Name: Using tolerations to control Logging pod placement - File: cluster-logging-tolerations - - Name: Moving the Logging resources with node selectors - File: cluster-logging-moving-nodes - - Name: Configuring systemd-journald for Logging - File: cluster-logging-systemd - - Name: Maintenance and support - File: cluster-logging-maintenance-support -- Name: Logging with the LokiStack - File: cluster-logging-loki -- Name: Viewing logs for a specific resource - File: viewing-resource-logs -- Name: Viewing cluster logs in Kibana - File: cluster-logging-visualizer - Distros: openshift-enterprise,openshift-origin -- Name: Forwarding logs to third party systems - File: cluster-logging-external - Distros: openshift-enterprise,openshift-origin -- Name: Enabling JSON logging - File: cluster-logging-enabling-json-logging -- Name: Collecting and storing Kubernetes events - File: cluster-logging-eventrouter - Distros: openshift-enterprise,openshift-origin -# - Name: Forwarding logs using ConfigMaps -# File: cluster-logging-external-configmap -# Distros: openshift-enterprise,openshift-origin -- Name: Updating Logging - File: cluster-logging-upgrading -- Name: Viewing cluster dashboards - File: cluster-logging-dashboards -- Name: Troubleshooting Logging - Dir: troubleshooting - Distros: openshift-enterprise,openshift-origin - Topics: - - Name: Viewing Logging status - File: cluster-logging-cluster-status - - Name: Viewing the status of the log store - File: cluster-logging-log-store-status - - Name: Understanding Logging alerts - File: cluster-logging-alerts - - Name: Collecting logging data for Red Hat Support - File: cluster-logging-must-gather - - Name: Troubleshooting for Critical Alerts - File: cluster-logging-troubleshooting-for-critical-alerts -- Name: Uninstalling Logging - File: cluster-logging-uninstall -- Name: Exported fields - File: cluster-logging-exported-fields - Distros: openshift-enterprise,openshift-origin ---- -Name: Monitoring -Dir: monitoring -Distros: openshift-enterprise,openshift-origin -Topics: -- Name: Monitoring overview - File: monitoring-overview -- Name: Configuring the monitoring stack - File: configuring-the-monitoring-stack -- Name: Enabling monitoring for user-defined projects - File: enabling-monitoring-for-user-defined-projects -- Name: Enabling alert routing for user-defined projects - File: enabling-alert-routing-for-user-defined-projects -- Name: Managing metrics - File: managing-metrics -- Name: Querying metrics - File: querying-metrics -- Name: Managing metrics targets - File: managing-metrics-targets -- Name: Managing alerts - File: managing-alerts -- Name: Reviewing monitoring dashboards - File: reviewing-monitoring-dashboards -- Name: The NVIDIA GPU administration dashboard - File: nvidia-gpu-admin-dashboard -- Name: Monitoring bare-metal events - File: using-rfhe -- Name: Accessing third-party monitoring APIs - File: accessing-third-party-monitoring-apis -- Name: Troubleshooting monitoring issues - File: troubleshooting-monitoring-issues -- Name: Config map reference for the Cluster Monitoring Operator - File: config-map-reference-for-the-cluster-monitoring-operator ---- -Name: Scalability and performance -Dir: scalability_and_performance -Distros: openshift-origin,openshift-enterprise,openshift-webscale,openshift-dpu -Topics: -- Name: Recommended performance and scalability practices - Dir: recommended-performance-scale-practices - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Recommended control plane practices - File: recommended-control-plane-practices - - Name: Recommended infrastructure practices - File: recommended-infrastructure-practices - - Name: Recommended etcd practices - File: recommended-etcd-practices -- Name: Planning your environment according to object maximums - File: planning-your-environment-according-to-object-maximums - Distros: openshift-origin,openshift-enterprise -- Name: Recommended host practices for IBM Z & IBM LinuxONE environments - File: ibm-z-recommended-host-practices - Distros: openshift-enterprise -- Name: Using the Node Tuning Operator - File: using-node-tuning-operator - Distros: openshift-origin,openshift-enterprise -- Name: Using CPU Manager and Topology Manager - File: using-cpu-manager - Distros: openshift-origin,openshift-enterprise -- Name: Scheduling NUMA-aware workloads - File: cnf-numa-aware-scheduling - Distros: openshift-origin,openshift-enterprise -- Name: Scalability and performance optimization - Dir: optimization - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Optimizing storage - File: optimizing-storage - - Name: Optimizing routing - File: routing-optimization - - Name: Optimizing networking - File: optimizing-networking - - Name: Optimizing CPU usage - File: optimizing-cpu-usage -- Name: Managing bare metal hosts - File: managing-bare-metal-hosts - Distros: openshift-origin,openshift-enterprise -- Name: What huge pages do and how they are consumed by apps - File: what-huge-pages-do-and-how-they-are-consumed-by-apps - Distros: openshift-origin,openshift-enterprise -- Name: Low latency tuning - File: cnf-low-latency-tuning - Distros: openshift-origin,openshift-enterprise -- Name: Performing latency tests for platform verification - File: cnf-performing-platform-verification-latency-tests -- Name: Improving cluster stability in high latency environments using worker latency profiles - File: scaling-worker-latency-profiles -- Name: Topology Aware Lifecycle Manager for cluster updates - File: cnf-talm-for-cluster-upgrades - Distros: openshift-origin,openshift-enterprise -- Name: Creating a performance profile - File: cnf-create-performance-profiles - Distros: openshift-origin,openshift-enterprise -- Name: Workload partitioning - File: enabling-workload-partitioning - Distros: openshift-origin,openshift-enterprise -- Name: Requesting CRI-O and Kubelet profiling data by using the Node Observability Operator - File: node-observability-operator - Distros: openshift-origin,openshift-enterprise -- Name: Clusters at the network far edge - Dir: ztp_far_edge - Distros: openshift-origin,openshift-enterprise - Topics: - - Name: Challenges of the network far edge - File: ztp-deploying-far-edge-clusters-at-scale - - Name: Preparing the hub cluster for ZTP - File: ztp-preparing-the-hub-cluster - - Name: Installing managed clusters with RHACM and SiteConfig resources - File: ztp-deploying-far-edge-sites - - Name: Configuring managed clusters with policies and PolicyGenTemplate resources - File: ztp-configuring-managed-clusters-policies - - Name: Manually installing a single-node OpenShift cluster with ZTP - File: ztp-manual-install - - Name: Recommended single-node OpenShift cluster configuration for vDU application workloads - File: ztp-reference-cluster-configuration-for-vdu - - Name: Validating cluster tuning for vDU application workloads - File: ztp-vdu-validating-cluster-tuning - - Name: Advanced managed cluster configuration with SiteConfig resources - File: ztp-advanced-install-ztp - - Name: Advanced managed cluster configuration with PolicyGenTemplate resources - File: ztp-advanced-policy-config - - Name: Updating managed clusters with the Topology Aware Lifecycle Manager - File: ztp-talm-updating-managed-policies - - Name: Updating GitOps ZTP - File: ztp-updating-gitops - - Name: Expanding single-node OpenShift clusters with GitOps ZTP - File: ztp-sno-additional-worker-node - - Name: Pre-caching images for single-node OpenShift deployments - File: ztp-precaching-tool ---- -Name: Specialized hardware and driver enablement -Dir: hardware_enablement -Distros: openshift-origin,openshift-enterprise -Topics: -- Name: About specialized hardware and driver enablement - File: about-hardware-enablement -- Name: Driver Toolkit - File: psap-driver-toolkit -- Name: Node Feature Discovery Operator - File: psap-node-feature-discovery-operator -- Name: Kernel Module Management Operator - File: kmm-kernel-module-management ---- -Name: Backup and restore -Dir: backup_and_restore -Distros: openshift-origin,openshift-enterprise -Topics: -- Name: Overview of backup and restore operations - File: index -- Name: Shutting down a cluster gracefully - File: graceful-cluster-shutdown -- Name: Restarting a cluster gracefully - File: graceful-cluster-restart -- Name: Application backup and restore - Dir: application_backup_and_restore - Topics: - - Name: OADP release notes - File: oadp-release-notes - - Name: OADP features and plugins - File: oadp-features-plugins - - Name: Installing and configuring OADP - Dir: installing - Topics: - - Name: About installing OADP - File: about-installing-oadp - - Name: Installing and configuring OADP with AWS - File: installing-oadp-aws - - Name: Installing and configuring OADP with Azure - File: installing-oadp-azure - - Name: Installing and configuring OADP with GCP - File: installing-oadp-gcp - - Name: Installing and configuring OADP with MCG - File: installing-oadp-mcg - - Name: Installing and configuring OADP with ODF - File: installing-oadp-ocs - - Name: Uninstalling OADP - File: uninstalling-oadp - - Name: Backing up and restoring - Dir: backing_up_and_restoring - Topics: - - Name: Backing up applications - File: backing-up-applications - - Name: Restoring applications - File: restoring-applications - - Name: Troubleshooting - File: troubleshooting - - Name: OADP API - File: oadp-api - - Name: Advanced OADP features and functionalities - File: oadp-advanced-topics -- Name: Control plane backup and restore - Dir: control_plane_backup_and_restore - Topics: - - Name: Backing up etcd data - File: backing-up-etcd - - Name: Replacing an unhealthy etcd member - File: replacing-unhealthy-etcd-member - - Name: Disaster recovery - Dir: disaster_recovery - Topics: - - Name: About disaster recovery - File: about-disaster-recovery - - Name: Restoring to a previous cluster state - File: scenario-2-restoring-cluster-state - - Name: Recovering from expired control plane certificates - File: scenario-3-expired-certs ---- -Name: Migrating from version 3 to 4 -Dir: migrating_from_ocp_3_to_4 -Distros: openshift-enterprise,openshift-origin -Topics: -- Name: Migrating from version 3 to 4 overview - File: index -- Name: About migrating from OpenShift Container Platform 3 to 4 - File: about-migrating-from-3-to-4 - Distros: openshift-enterprise -- Name: About migrating from OKD 3 to 4 - File: about-migrating-from-3-to-4 - Distros: openshift-origin -- Name: Differences between OpenShift Container Platform 3 and 4 - File: planning-migration-3-4 - Distros: openshift-enterprise -- Name: Differences between OKD 3 and 4 - File: planning-migration-3-4 - Distros: openshift-origin -- Name: Network considerations - File: planning-considerations-3-4 -- Name: About MTC - File: about-mtc-3-4 -- Name: Installing MTC - File: installing-3-4 -- Name: Installing MTC in a restricted network environment - File: installing-restricted-3-4 -- Name: Upgrading MTC - File: upgrading-3-4 -- Name: Premigration checklists - File: premigration-checklists-3-4 -- Name: Migrating your applications - File: migrating-applications-3-4 -- Name: Advanced migration options - File: advanced-migration-options-3-4 -- Name: Troubleshooting - File: troubleshooting-3-4 ---- -Name: Migration Toolkit for Containers -Dir: migration_toolkit_for_containers -Distros: openshift-enterprise,openshift-origin -Topics: -- Name: About MTC - File: about-mtc -- Name: MTC release notes - File: mtc-release-notes -- Name: Installing MTC - File: installing-mtc -- Name: Installing MTC in a restricted network environment - File: installing-mtc-restricted -- Name: Upgrading MTC - File: upgrading-mtc -- Name: Premigration checklists - File: premigration-checklists-mtc -- Name: Network considerations - File: network-considerations-mtc -- Name: Migrating your applications - File: migrating-applications-with-mtc -- Name: Advanced migration options - File: advanced-migration-options-mtc -- Name: Troubleshooting - File: troubleshooting-mtc ---- -Name: API reference -Dir: rest_api -Distros: openshift-enterprise,openshift-origin -Topics: -- Name: Understanding API tiers - File: understanding-api-support-tiers -- Name: API compatibility guidelines - File: understanding-compatibility-guidelines -- Name: Editing kubelet log level verbosity and gathering logs - File: editing-kubelet-log-level-verbosity -- Name: API list - File: index -- Name: Common object reference - Dir: objects - Topics: - - Name: Index - File: index -- Name: Authorization APIs - Dir: authorization_apis - Topics: - - Name: About Authorization APIs - File: authorization-apis-index - - Name: 'LocalResourceAccessReview [authorization.openshift.io/v1]' - File: localresourceaccessreview-authorization-openshift-io-v1 - - Name: 'LocalSubjectAccessReview [authorization.openshift.io/v1]' - File: localsubjectaccessreview-authorization-openshift-io-v1 - - Name: 'ResourceAccessReview [authorization.openshift.io/v1]' - File: resourceaccessreview-authorization-openshift-io-v1 - - Name: 'SelfSubjectRulesReview [authorization.openshift.io/v1]' - File: selfsubjectrulesreview-authorization-openshift-io-v1 - - Name: 'SubjectAccessReview [authorization.openshift.io/v1]' - File: subjectaccessreview-authorization-openshift-io-v1 - - Name: 'SubjectRulesReview [authorization.openshift.io/v1]' - File: subjectrulesreview-authorization-openshift-io-v1 - - Name: 'TokenRequest [authentication.k8s.io/v1]' - File: tokenrequest-authentication-k8s-io-v1 - - Name: 'TokenReview [authentication.k8s.io/v1]' - File: tokenreview-authentication-k8s-io-v1 - - Name: 'LocalSubjectAccessReview [authorization.k8s.io/v1]' - File: localsubjectaccessreview-authorization-k8s-io-v1 - - Name: 'SelfSubjectAccessReview [authorization.k8s.io/v1]' - File: selfsubjectaccessreview-authorization-k8s-io-v1 - - Name: 'SelfSubjectRulesReview [authorization.k8s.io/v1]' - File: selfsubjectrulesreview-authorization-k8s-io-v1 - - Name: 'SubjectAccessReview [authorization.k8s.io/v1]' - File: subjectaccessreview-authorization-k8s-io-v1 -- Name: Autoscale APIs - Dir: autoscale_apis - Topics: - - Name: About Autoscale APIs - File: autoscale-apis-index - - Name: 'ClusterAutoscaler [autoscaling.openshift.io/v1]' - File: clusterautoscaler-autoscaling-openshift-io-v1 - - Name: 'MachineAutoscaler [autoscaling.openshift.io/v1beta1]' - File: machineautoscaler-autoscaling-openshift-io-v1beta1 - - Name: 'HorizontalPodAutoscaler [autoscaling/v2]' - File: horizontalpodautoscaler-autoscaling-v2 - - Name: 'Scale [autoscaling/v1]' - File: scale-autoscaling-v1 -- Name: Config APIs - Dir: config_apis - Topics: - - Name: About Config APIs - File: config-apis-index - - Name: 'APIServer [config.openshift.io/v1]' - File: apiserver-config-openshift-io-v1 - - Name: 'Authentication [config.openshift.io/v1]' - File: authentication-config-openshift-io-v1 - - Name: 'Build [config.openshift.io/v1]' - File: build-config-openshift-io-v1 - - Name: 'ClusterOperator [config.openshift.io/v1]' - File: clusteroperator-config-openshift-io-v1 - - Name: 'ClusterVersion [config.openshift.io/v1]' - File: clusterversion-config-openshift-io-v1 - - Name: 'Console [config.openshift.io/v1]' - File: console-config-openshift-io-v1 - - Name: 'DNS [config.openshift.io/v1]' - File: dns-config-openshift-io-v1 - - Name: 'FeatureGate [config.openshift.io/v1]' - File: featuregate-config-openshift-io-v1 - - Name: 'HelmChartRepository [helm.openshift.io/v1beta1]' - File: helmchartrepository-helm-openshift-io-v1beta1 - - Name: 'Image [config.openshift.io/v1]' - File: image-config-openshift-io-v1 - - Name: 'ImageDigestMirrorSet [config.openshift.io/v1]' - File: imagedigestmirrorset-config-openshift-io-v1 - - Name: 'ImageContentPolicy [config.openshift.io/v1]' - File: imagecontentpolicy-config-openshift-io-v1 - - Name: 'ImageTagMirrorSet [config.openshift.io/v1]' - File: imagetagmirrorset-config-openshift-io-v1 - - Name: 'Infrastructure [config.openshift.io/v1]' - File: infrastructure-config-openshift-io-v1 - - Name: 'Ingress [config.openshift.io/v1]' - File: ingress-config-openshift-io-v1 - - Name: 'Network [config.openshift.io/v1]' - File: network-config-openshift-io-v1 - - Name: 'Node [config.openshift.io/v1]' - File: node-config-openshift-io-v1 - - Name: 'OAuth [config.openshift.io/v1]' - File: oauth-config-openshift-io-v1 - - Name: 'OperatorHub [config.openshift.io/v1]' - File: operatorhub-config-openshift-io-v1 - - Name: 'Project [config.openshift.io/v1]' - File: project-config-openshift-io-v1 - - Name: 'ProjectHelmChartRepository [helm.openshift.io/v1beta1]' - File: projecthelmchartrepository-helm-openshift-io-v1beta1 - - Name: 'Proxy [config.openshift.io/v1]' - File: proxy-config-openshift-io-v1 - - Name: 'Scheduler [config.openshift.io/v1]' - File: scheduler-config-openshift-io-v1 -- Name: Console APIs - Dir: console_apis - Topics: - - Name: About Console APIs - File: console-apis-index - - Name: 'ConsoleCLIDownload [console.openshift.io/v1]' - File: consoleclidownload-console-openshift-io-v1 - - Name: 'ConsoleExternalLogLink [console.openshift.io/v1]' - File: consoleexternalloglink-console-openshift-io-v1 - - Name: 'ConsoleLink [console.openshift.io/v1]' - File: consolelink-console-openshift-io-v1 - - Name: 'ConsoleNotification [console.openshift.io/v1]' - File: consolenotification-console-openshift-io-v1 - - Name: 'ConsolePlugin [console.openshift.io/v1]' - File: consoleplugin-console-openshift-io-v1 - - Name: 'ConsoleQuickStart [console.openshift.io/v1]' - File: consolequickstart-console-openshift-io-v1 - - Name: 'ConsoleYAMLSample [console.openshift.io/v1]' - File: consoleyamlsample-console-openshift-io-v1 -- Name: Extension APIs - Dir: extension_apis - Topics: - - Name: About Extension APIs - File: extension-apis-index - - Name: 'APIService [apiregistration.k8s.io/v1]' - File: apiservice-apiregistration-k8s-io-v1 - - Name: 'CustomResourceDefinition [apiextensions.k8s.io/v1]' - File: customresourcedefinition-apiextensions-k8s-io-v1 - - Name: 'MutatingWebhookConfiguration [admissionregistration.k8s.io/v1]' - File: mutatingwebhookconfiguration-admissionregistration-k8s-io-v1 - - Name: 'ValidatingWebhookConfiguration [admissionregistration.k8s.io/v1]' - File: validatingwebhookconfiguration-admissionregistration-k8s-io-v1 -- Name: Image APIs - Dir: image_apis - Topics: - - Name: About Image APIs - File: image-apis-index - - Name: 'Image [image.openshift.io/v1]' - File: image-image-openshift-io-v1 - - Name: 'ImageSignature [image.openshift.io/v1]' - File: imagesignature-image-openshift-io-v1 - - Name: 'ImageStreamImage [image.openshift.io/v1]' - File: imagestreamimage-image-openshift-io-v1 - - Name: 'ImageStreamImport [image.openshift.io/v1]' - File: imagestreamimport-image-openshift-io-v1 - - Name: 'ImageStreamLayers [image.openshift.io/v1]' - File: imagestreamlayers-image-openshift-io-v1 - - Name: 'ImageStreamMapping [image.openshift.io/v1]' - File: imagestreammapping-image-openshift-io-v1 - - Name: 'ImageStream [image.openshift.io/v1]' - File: imagestream-image-openshift-io-v1 - - Name: 'ImageStreamTag [image.openshift.io/v1]' - File: imagestreamtag-image-openshift-io-v1 - - Name: 'ImageTag [image.openshift.io/v1]' - File: imagetag-image-openshift-io-v1 - - Name: 'SecretList [image.openshift.io/v1]' - File: secretlist-image-openshift-io-v1 -- Name: Machine APIs - Dir: machine_apis - Topics: - - Name: About Machine APIs - File: machine-apis-index - - Name: 'ContainerRuntimeConfig [machineconfiguration.openshift.io/v1]' - File: containerruntimeconfig-machineconfiguration-openshift-io-v1 - - Name: 'ControllerConfig [machineconfiguration.openshift.io/v1]' - File: controllerconfig-machineconfiguration-openshift-io-v1 - - Name: 'ControlPlaneMachineSet [machine.openshift.io/v1]' - File: controlplanemachineset-machine-openshift-io-v1 - - Name: 'KubeletConfig [machineconfiguration.openshift.io/v1]' - File: kubeletconfig-machineconfiguration-openshift-io-v1 - - Name: 'MachineConfigPool [machineconfiguration.openshift.io/v1]' - File: machineconfigpool-machineconfiguration-openshift-io-v1 - - Name: 'MachineConfig [machineconfiguration.openshift.io/v1]' - File: machineconfig-machineconfiguration-openshift-io-v1 - - Name: 'MachineHealthCheck [machine.openshift.io/v1beta1]' - File: machinehealthcheck-machine-openshift-io-v1beta1 - - Name: 'Machine [machine.openshift.io/v1beta1]' - File: machine-machine-openshift-io-v1beta1 - - Name: 'MachineSet [machine.openshift.io/v1beta1]' - File: machineset-machine-openshift-io-v1beta1 -- Name: Metadata APIs - Dir: metadata_apis - Topics: - - Name: About Metadata APIs - File: metadata-apis-index - - Name: 'APIRequestCount [apiserver.openshift.io/v1]' - File: apirequestcount-apiserver-openshift-io-v1 - - Name: 'Binding [undefined/v1]' - File: binding-v1 - - Name: 'ComponentStatus [undefined/v1]' - File: componentstatus-v1 - - Name: 'ConfigMap [undefined/v1]' - File: configmap-v1 - - Name: 'ControllerRevision [apps/v1]' - File: controllerrevision-apps-v1 - - Name: 'Event [events.k8s.io/v1]' - File: event-events-k8s-io-v1 - - Name: 'Event [undefined/v1]' - File: event-v1 - - Name: 'Lease [coordination.k8s.io/v1]' - File: lease-coordination-k8s-io-v1 - - Name: 'Namespace [undefined/v1]' - File: namespace-v1 -- Name: Monitoring APIs - Dir: monitoring_apis - Topics: - - Name: About Monitoring APIs - File: monitoring-apis-index - - Name: 'Alertmanager [monitoring.coreos.com/v1]' - File: alertmanager-monitoring-coreos-com-v1 - - Name: 'AlertmanagerConfig [monitoring.coreos.com/v1beta1]' - File: alertmanagerconfig-monitoring-coreos-com-v1beta1 - - Name: 'PodMonitor [monitoring.coreos.com/v1]' - File: podmonitor-monitoring-coreos-com-v1 - - Name: 'Probe [monitoring.coreos.com/v1]' - File: probe-monitoring-coreos-com-v1 - - Name: 'Prometheus [monitoring.coreos.com/v1]' - File: prometheus-monitoring-coreos-com-v1 - - Name: 'PrometheusRule [monitoring.coreos.com/v1]' - File: prometheusrule-monitoring-coreos-com-v1 - - Name: 'ServiceMonitor [monitoring.coreos.com/v1]' - File: servicemonitor-monitoring-coreos-com-v1 - - Name: 'ThanosRuler [monitoring.coreos.com/v1]' - File: thanosruler-monitoring-coreos-com-v1 -- Name: Network APIs - Dir: network_apis - Topics: - - Name: About Network APIs - File: network-apis-index - - Name: 'CloudPrivateIPConfig [cloud.network.openshift.io/v1]' - File: cloudprivateipconfig-cloud-network-openshift-io-v1 - - Name: 'EgressFirewall [k8s.ovn.org/v1]' - File: egressfirewall-k8s-ovn-org-v1 - - Name: 'EgressIP [k8s.ovn.org/v1]' - File: egressip-k8s-ovn-org-v1 - - Name: 'EgressQoS [k8s.ovn.org/v1]' - File: egressqos-k8s-ovn-org-v1 - - Name: 'Endpoints [undefined/v1]' - File: endpoints-v1 - - Name: 'EndpointSlice [discovery.k8s.io/v1]' - File: endpointslice-discovery-k8s-io-v1 - - Name: 'EgressRouter [network.operator.openshift.io/v1]' - File: egressrouter-network-operator-openshift-io-v1 - - Name: 'Ingress [networking.k8s.io/v1]' - File: ingress-networking-k8s-io-v1 - - Name: 'IngressClass [networking.k8s.io/v1]' - File: ingressclass-networking-k8s-io-v1 - - Name: 'IPPool [whereabouts.cni.cncf.io/v1alpha1]' - File: ippool-whereabouts-cni-cncf-io-v1alpha1 - - Name: 'NetworkAttachmentDefinition [k8s.cni.cncf.io/v1]' - File: networkattachmentdefinition-k8s-cni-cncf-io-v1 - - Name: 'NetworkPolicy [networking.k8s.io/v1]' - File: networkpolicy-networking-k8s-io-v1 - - Name: 'OverlappingRangeIPReservation [whereabouts.cni.cncf.io/v1alpha1]' - File: overlappingrangeipreservation-whereabouts-cni-cncf-io-v1alpha1 - - Name: 'PodNetworkConnectivityCheck [controlplane.operator.openshift.io/v1alpha1]' - File: podnetworkconnectivitycheck-controlplane-operator-openshift-io-v1alpha1 - - Name: 'Route [route.openshift.io/v1]' - File: route-route-openshift-io-v1 - - Name: 'Service [undefined/v1]' - File: service-v1 -- Name: Node APIs - Dir: node_apis - Topics: - - Name: About Node APIs - File: node-apis-index - - Name: 'Node [undefined/v1]' - File: node-v1 - - Name: 'PerformanceProfile [performance.openshift.io/v2]' - File: performanceprofile-performance-openshift-io-v2 - - Name: 'Profile [tuned.openshift.io/v1]' - File: profile-tuned-openshift-io-v1 - - Name: 'RuntimeClass [node.k8s.io/v1]' - File: runtimeclass-node-k8s-io-v1 - - Name: 'Tuned [tuned.openshift.io/v1]' - File: tuned-tuned-openshift-io-v1 -- Name: OAuth APIs - Dir: oauth_apis - Topics: - - Name: About OAuth APIs - File: oauth-apis-index - - Name: 'OAuthAccessToken [oauth.openshift.io/v1]' - File: oauthaccesstoken-oauth-openshift-io-v1 - - Name: 'OAuthAuthorizeToken [oauth.openshift.io/v1]' - File: oauthauthorizetoken-oauth-openshift-io-v1 - - Name: 'OAuthClientAuthorization [oauth.openshift.io/v1]' - File: oauthclientauthorization-oauth-openshift-io-v1 - - Name: 'OAuthClient [oauth.openshift.io/v1]' - File: oauthclient-oauth-openshift-io-v1 - - Name: 'UserOAuthAccessToken [oauth.openshift.io/v1]' - File: useroauthaccesstoken-oauth-openshift-io-v1 -- Name: Operator APIs - Dir: operator_apis - Topics: - - Name: About Operator APIs - File: operator-apis-index - - Name: 'Authentication [operator.openshift.io/v1]' - File: authentication-operator-openshift-io-v1 - - Name: 'CloudCredential [operator.openshift.io/v1]' - File: cloudcredential-operator-openshift-io-v1 - - Name: 'ClusterCSIDriver [operator.openshift.io/v1]' - File: clustercsidriver-operator-openshift-io-v1 - - Name: 'Console [operator.openshift.io/v1]' - File: console-operator-openshift-io-v1 - - Name: 'Config [operator.openshift.io/v1]' - File: config-operator-openshift-io-v1 - - Name: 'Config [imageregistry.operator.openshift.io/v1]' - File: config-imageregistry-operator-openshift-io-v1 - - Name: 'Config [samples.operator.openshift.io/v1]' - File: config-samples-operator-openshift-io-v1 - - Name: 'CSISnapshotController [operator.openshift.io/v1]' - File: csisnapshotcontroller-operator-openshift-io-v1 - - Name: 'DNS [operator.openshift.io/v1]' - File: dns-operator-openshift-io-v1 - - Name: 'DNSRecord [ingress.operator.openshift.io/v1]' - File: dnsrecord-ingress-operator-openshift-io-v1 - - Name: 'Etcd [operator.openshift.io/v1]' - File: etcd-operator-openshift-io-v1 - - Name: 'ImageContentSourcePolicy [operator.openshift.io/v1alpha1]' - File: imagecontentsourcepolicy-operator-openshift-io-v1alpha1 - - Name: 'ImagePruner [imageregistry.operator.openshift.io/v1]' - File: imagepruner-imageregistry-operator-openshift-io-v1 - - Name: 'IngressController [operator.openshift.io/v1]' - File: ingresscontroller-operator-openshift-io-v1 - - Name: 'InsightsOperator [operator.openshift.io/v1]' - File: insightsoperator-operator-openshift-io-v1 - - Name: 'KubeAPIServer [operator.openshift.io/v1]' - File: kubeapiserver-operator-openshift-io-v1 - - Name: 'KubeControllerManager [operator.openshift.io/v1]' - File: kubecontrollermanager-operator-openshift-io-v1 - - Name: 'KubeScheduler [operator.openshift.io/v1]' - File: kubescheduler-operator-openshift-io-v1 - - Name: 'KubeStorageVersionMigrator [operator.openshift.io/v1]' - File: kubestorageversionmigrator-operator-openshift-io-v1 - - Name: 'Network [operator.openshift.io/v1]' - File: network-operator-openshift-io-v1 - - Name: 'OpenShiftAPIServer [operator.openshift.io/v1]' - File: openshiftapiserver-operator-openshift-io-v1 - - Name: 'OpenShiftControllerManager [operator.openshift.io/v1]' - File: openshiftcontrollermanager-operator-openshift-io-v1 - - Name: 'OperatorPKI [network.operator.openshift.io/v1]' - File: operatorpki-network-operator-openshift-io-v1 - - Name: 'ServiceCA [operator.openshift.io/v1]' - File: serviceca-operator-openshift-io-v1 - - Name: 'Storage [operator.openshift.io/v1]' - File: storage-operator-openshift-io-v1 -- Name: OperatorHub APIs - Dir: operatorhub_apis - Topics: - - Name: About OperatorHub APIs - File: operatorhub-apis-index - - Name: 'CatalogSource [operators.coreos.com/v1alpha1]' - File: catalogsource-operators-coreos-com-v1alpha1 - - Name: 'ClusterServiceVersion [operators.coreos.com/v1alpha1]' - File: clusterserviceversion-operators-coreos-com-v1alpha1 - - Name: 'InstallPlan [operators.coreos.com/v1alpha1]' - File: installplan-operators-coreos-com-v1alpha1 - - Name: 'OLMConfig [operators.coreos.com/v1]' - File: olmconfig-operators-coreos-com-v1 - - Name: 'Operator [operators.coreos.com/v1]' - File: operator-operators-coreos-com-v1 - - Name: 'OperatorCondition [operators.coreos.com/v2]' - File: operatorcondition-operators-coreos-com-v2 - - Name: 'OperatorGroup [operators.coreos.com/v1]' - File: operatorgroup-operators-coreos-com-v1 - - Name: 'PackageManifest [packages.operators.coreos.com/v1]' - File: packagemanifest-packages-operators-coreos-com-v1 - - Name: 'Subscription [operators.coreos.com/v1alpha1]' - File: subscription-operators-coreos-com-v1alpha1 -- Name: Policy APIs - Dir: policy_apis - Topics: - - Name: About Policy APIs - File: policy-apis-index - - Name: 'Eviction [policy/v1]' - File: eviction-policy-v1 - - Name: 'PodDisruptionBudget [policy/v1]' - File: poddisruptionbudget-policy-v1 -- Name: Project APIs - Dir: project_apis - Topics: - - Name: About Project APIs - File: project-apis-index - - Name: 'Project [project.openshift.io/v1]' - File: project-project-openshift-io-v1 - - Name: 'ProjectRequest [project.openshift.io/v1]' - File: projectrequest-project-openshift-io-v1 -- Name: Provisioning APIs - Dir: provisioning_apis - Topics: - - Name: About Provisioning APIs - File: provisioning-apis-index - - Name: 'BMCEventSubscription [metal3.io/v1alpha1]' - File: bmceventsubscription-metal3-io-v1alpha1 - - Name: 'BareMetalHost [metal3.io/v1alpha1]' - File: baremetalhost-metal3-io-v1alpha1 - - Name: 'FirmwareSchema [metal3.io/v1alpha1]' - File: firmwareschema-metal3-io-v1alpha1 - - Name: 'HardwareData [metal3.io/v1alpha1]' - File: hardwaredata-metal3-io-v1alpha1 - - Name: 'HostFirmwareSettings [metal3.io/v1alpha1]' - File: hostfirmwaresettings-metal3-io-v1alpha1 - - Name: 'Metal3Remediation [infrastructure.cluster.x-k8s.io/v1beta1]' - File: metal3remediation-infrastructure-cluster-x-k8s-io-v1beta1 - - Name: 'Metal3RemediationTemplate [infrastructure.cluster.x-k8s.io/v1beta1]' - File: metal3remediationtemplate-infrastructure-cluster-x-k8s-io-v1beta1 - - Name: 'PreprovisioningImage [metal3.io/v1alpha1]' - File: preprovisioningimage-metal3-io-v1alpha1 - - Name: 'Provisioning [metal3.io/v1alpha1]' - File: provisioning-metal3-io-v1alpha1 -- Name: RBAC APIs - Dir: rbac_apis - Topics: - - Name: About RBAC APIs - File: rbac-apis-index - - Name: 'ClusterRoleBinding [rbac.authorization.k8s.io/v1]' - File: clusterrolebinding-rbac-authorization-k8s-io-v1 - - Name: 'ClusterRole [rbac.authorization.k8s.io/v1]' - File: clusterrole-rbac-authorization-k8s-io-v1 - - Name: 'RoleBinding [rbac.authorization.k8s.io/v1]' - File: rolebinding-rbac-authorization-k8s-io-v1 - - Name: 'Role [rbac.authorization.k8s.io/v1]' - File: role-rbac-authorization-k8s-io-v1 -- Name: Role APIs - Dir: role_apis - Topics: - - Name: About Role APIs - File: role-apis-index - - Name: 'ClusterRoleBinding [authorization.openshift.io/v1]' - File: clusterrolebinding-authorization-openshift-io-v1 - - Name: 'ClusterRole [authorization.openshift.io/v1]' - File: clusterrole-authorization-openshift-io-v1 - - Name: 'RoleBindingRestriction [authorization.openshift.io/v1]' - File: rolebindingrestriction-authorization-openshift-io-v1 - - Name: 'RoleBinding [authorization.openshift.io/v1]' - File: rolebinding-authorization-openshift-io-v1 - - Name: 'Role [authorization.openshift.io/v1]' - File: role-authorization-openshift-io-v1 -- Name: Schedule and quota APIs - Dir: schedule_and_quota_apis - Topics: - - Name: About Schedule and quota APIs - File: schedule-and-quota-apis-index - - Name: 'AppliedClusterResourceQuota [quota.openshift.io/v1]' - File: appliedclusterresourcequota-quota-openshift-io-v1 - - Name: 'ClusterResourceQuota [quota.openshift.io/v1]' - File: clusterresourcequota-quota-openshift-io-v1 - - Name: 'FlowSchema [flowcontrol.apiserver.k8s.io/v1beta1]' - File: flowschema-flowcontrol-apiserver-k8s-io-v1beta1 - - Name: 'LimitRange [undefined/v1]' - File: limitrange-v1 - - Name: 'PriorityClass [scheduling.k8s.io/v1]' - File: priorityclass-scheduling-k8s-io-v1 - - Name: 'PriorityLevelConfiguration [flowcontrol.apiserver.k8s.io/v1beta1]' - File: prioritylevelconfiguration-flowcontrol-apiserver-k8s-io-v1beta1 - - Name: 'ResourceQuota [undefined/v1]' - File: resourcequota-v1 -- Name: Security APIs - Dir: security_apis - Topics: - - Name: About Security APIs - File: security-apis-index - - Name: 'CertificateSigningRequest [certificates.k8s.io/v1]' - File: certificatesigningrequest-certificates-k8s-io-v1 - - Name: 'CredentialsRequest [cloudcredential.openshift.io/v1]' - File: credentialsrequest-cloudcredential-openshift-io-v1 - - Name: 'PodSecurityPolicyReview [security.openshift.io/v1]' - File: podsecuritypolicyreview-security-openshift-io-v1 - - Name: 'PodSecurityPolicySelfSubjectReview [security.openshift.io/v1]' - File: podsecuritypolicyselfsubjectreview-security-openshift-io-v1 - - Name: 'PodSecurityPolicySubjectReview [security.openshift.io/v1]' - File: podsecuritypolicysubjectreview-security-openshift-io-v1 - - Name: 'RangeAllocation [security.openshift.io/v1]' - File: rangeallocation-security-openshift-io-v1 - - Name: 'Secret [undefined/v1]' - File: secret-v1 - - Name: 'SecurityContextConstraints [security.openshift.io/v1]' - File: securitycontextconstraints-security-openshift-io-v1 - - Name: 'ServiceAccount [undefined/v1]' - File: serviceaccount-v1 -- Name: Storage APIs - Dir: storage_apis - Topics: - - Name: About Storage APIs - File: storage-apis-index - - Name: 'CSIDriver [storage.k8s.io/v1]' - File: csidriver-storage-k8s-io-v1 - - Name: 'CSINode [storage.k8s.io/v1]' - File: csinode-storage-k8s-io-v1 - - Name: 'CSIStorageCapacity [storage.k8s.io/v1]' - File: csistoragecapacity-storage-k8s-io-v1 - - Name: 'PersistentVolume [undefined/v1]' - File: persistentvolume-v1 - - Name: 'PersistentVolumeClaim [undefined/v1]' - File: persistentvolumeclaim-v1 - - Name: 'StorageClass [storage.k8s.io/v1]' - File: storageclass-storage-k8s-io-v1 - - Name: 'StorageState [migration.k8s.io/v1alpha1]' - File: storagestate-migration-k8s-io-v1alpha1 - - Name: 'StorageVersionMigration [migration.k8s.io/v1alpha1]' - File: storageversionmigration-migration-k8s-io-v1alpha1 - - Name: 'VolumeAttachment [storage.k8s.io/v1]' - File: volumeattachment-storage-k8s-io-v1 - - Name: 'VolumeSnapshot [snapshot.storage.k8s.io/v1]' - File: volumesnapshot-snapshot-storage-k8s-io-v1 - - Name: 'VolumeSnapshotClass [snapshot.storage.k8s.io/v1]' - File: volumesnapshotclass-snapshot-storage-k8s-io-v1 - - Name: 'VolumeSnapshotContent [snapshot.storage.k8s.io/v1]' - File: volumesnapshotcontent-snapshot-storage-k8s-io-v1 -- Name: Template APIs - Dir: template_apis - Topics: - - Name: About Template APIs - File: template-apis-index - - Name: 'BrokerTemplateInstance [template.openshift.io/v1]' - File: brokertemplateinstance-template-openshift-io-v1 - - Name: 'PodTemplate [undefined/v1]' - File: podtemplate-v1 - - Name: 'Template [template.openshift.io/v1]' - File: template-template-openshift-io-v1 - - Name: 'TemplateInstance [template.openshift.io/v1]' - File: templateinstance-template-openshift-io-v1 -- Name: User and group APIs - Dir: user_and_group_apis - Topics: - - Name: About User and group APIs - File: user-and-group-apis-index - - Name: 'Group [user.openshift.io/v1]' - File: group-user-openshift-io-v1 - - Name: 'Identity [user.openshift.io/v1]' - File: identity-user-openshift-io-v1 - - Name: 'UserIdentityMapping [user.openshift.io/v1]' - File: useridentitymapping-user-openshift-io-v1 - - Name: 'User [user.openshift.io/v1]' - File: user-user-openshift-io-v1 -- Name: Workloads APIs - Dir: workloads_apis - Topics: - - Name: About Workloads APIs - File: workloads-apis-index - - Name: 'BuildConfig [build.openshift.io/v1]' - File: buildconfig-build-openshift-io-v1 - - Name: 'Build [build.openshift.io/v1]' - File: build-build-openshift-io-v1 - - Name: 'BuildLog [build.openshift.io/v1]' - File: buildlog-build-openshift-io-v1 - - Name: 'BuildRequest [build.openshift.io/v1]' - File: buildrequest-build-openshift-io-v1 - - Name: 'CronJob [batch/v1]' - File: cronjob-batch-v1 - - Name: 'DaemonSet [apps/v1]' - File: daemonset-apps-v1 - - Name: 'Deployment [apps/v1]' - File: deployment-apps-v1 - - Name: 'DeploymentConfig [apps.openshift.io/v1]' - File: deploymentconfig-apps-openshift-io-v1 - - Name: 'DeploymentConfigRollback [apps.openshift.io/v1]' - File: deploymentconfigrollback-apps-openshift-io-v1 - - Name: 'DeploymentLog [apps.openshift.io/v1]' - File: deploymentlog-apps-openshift-io-v1 - - Name: 'DeploymentRequest [apps.openshift.io/v1]' - File: deploymentrequest-apps-openshift-io-v1 - - Name: 'Job [batch/v1]' - File: job-batch-v1 - - Name: 'Pod [undefined/v1]' - File: pod-v1 - - Name: 'ReplicationController [undefined/v1]' - File: replicationcontroller-v1 - - Name: 'ReplicaSet [apps/v1]' - File: replicaset-apps-v1 - - Name: 'StatefulSet [apps/v1]' - File: statefulset-apps-v1 ---- -Name: Service Mesh -Dir: service_mesh -Distros: openshift-enterprise -Topics: -- Name: Service Mesh 2.x - Dir: v2x - Topics: - - Name: About OpenShift Service Mesh - File: ossm-about - - Name: Service Mesh 2.x release notes - File: servicemesh-release-notes - - Name: Service Mesh architecture - File: ossm-architecture - - Name: Service Mesh deployment models - File: ossm-deployment-models - - Name: Service Mesh and Istio differences - File: ossm-vs-community - - Name: Preparing to install Service Mesh - File: preparing-ossm-installation - - Name: Installing the Operators - File: installing-ossm - - Name: Creating the ServiceMeshControlPlane - File: ossm-create-smcp - - Name: Adding workloads to a service mesh - File: ossm-create-mesh - - Name: Enabling sidecar injection - File: prepare-to-deploy-applications-ossm - - Name: Upgrading Service Mesh - File: upgrading-ossm - - Name: Managing users and profiles - File: ossm-profiles-users - - Name: Security - File: ossm-security - - Name: Traffic management - File: ossm-traffic-manage - - Name: Metrics, logs, and traces - File: ossm-observability - - Name: Performance and scalability - File: ossm-performance-scalability - - Name: Deploying to production - File: ossm-deploy-production - - Name: Federation - File: ossm-federation - - Name: Extensions - File: ossm-extensions - - Name: 3scale WebAssembly for 2.1 - File: ossm-threescale-webassembly-module - - Name: 3scale Istio adapter for 2.0 - File: threescale-adapter - - Name: Troubleshooting Service Mesh - File: ossm-troubleshooting-istio - - Name: Control plane configuration reference - File: ossm-reference-smcp - - Name: Kiali configuration reference - File: ossm-reference-kiali - - Name: Jaeger configuration reference - File: ossm-reference-jaeger - - Name: Uninstalling Service Mesh - File: removing-ossm -- Name: Service Mesh 1.x - Dir: v1x - Topics: - - Name: Service Mesh 1.x release notes - File: servicemesh-release-notes - - Name: Service Mesh architecture - File: ossm-architecture - - Name: Service Mesh and Istio differences - File: ossm-vs-community - - Name: Preparing to install Service Mesh - File: preparing-ossm-installation - - Name: Installing Service Mesh - File: installing-ossm - - Name: Security - File: ossm-security - - Name: Traffic management - File: ossm-traffic-manage - - Name: Deploying applications on Service Mesh - File: prepare-to-deploy-applications-ossm - - Name: Data visualization and observability - File: ossm-observability - - Name: Custom resources - File: ossm-custom-resources - - Name: 3scale Istio adapter for 1.x - File: threescale-adapter - - Name: Removing Service Mesh - File: removing-ossm ---- -Name: Distributed tracing -Dir: distr_tracing -Distros: openshift-enterprise -Topics: -- Name: Distributed tracing release notes - File: distributed-tracing-release-notes -- Name: Distributed tracing architecture - Dir: distr_tracing_arch - Topics: - - Name: Distributed tracing architecture - File: distr-tracing-architecture -- Name: Distributed tracing installation - Dir: distr_tracing_install - Topics: - - Name: Installing distributed tracing - File: distr-tracing-installing - - Name: Configuring the distributed tracing platform - File: distr-tracing-deploying-jaeger - - Name: Configuring distributed tracing data collection - File: distr-tracing-deploying-otel - - Name: Upgrading distributed tracing - File: distr-tracing-updating - - Name: Removing distributed tracing - File: distr-tracing-removing ---- -Name: Virtualization -Dir: virt -Distros: openshift-enterprise,openshift-origin -Topics: -- Name: About - Dir: about_virt - Topics: - - Name: About OpenShift Virtualization - File: about-virt - Distros: openshift-enterprise - - Name: About OKD Virtualization - File: about-virt - Distros: openshift-origin - - Name: Security policies - File: virt-security-policies - - Name: Architecture - File: virt-architecture - Distros: openshift-enterprise -- Name: Release notes - Dir: release_notes - Topics: - - Name: OpenShift Virtualization release notes - File: virt-release-notes-placeholder - Distros: openshift-enterprise - # - Name: OKD Virtualization release notes - # File: virt-release-notes-placeholder - # Distros: openshift-origin -- Name: Getting started - Dir: getting_started - Topics: - - Name: Getting started with OpenShift Virtualization - File: virt-getting-started - Distros: openshift-enterprise - - Name: Getting started with OKD Virtualization - File: virt-getting-started - Distros: openshift-origin - - Name: Using the virtctl and libguestfs CLI tools - File: virt-using-the-cli-tools - - Name: Web console overview - File: virt-web-console-overview - Distros: openshift-enterprise -- Name: Installing - Dir: install - Topics: - - Name: Preparing your cluster - File: preparing-cluster-for-virt - - Name: Installing OpenShift Virtualization - File: installing-virt - Distros: openshift-enterprise - - Name: Installing OKD Virtualization - File: installing-virt - Distros: openshift-origin - - Name: Specifying nodes for OpenShift Virtualization components - File: virt-specifying-nodes-for-virtualization-components - Distros: openshift-enterprise - - Name: Specifying nodes for OKD Virtualization components - File: virt-specifying-nodes-for-virtualization-components - Distros: openshift-origin - - Name: Uninstalling OpenShift Virtualization - File: uninstalling-virt - Distros: openshift-enterprise - - Name: Uninstalling OKD Virtualization - File: uninstalling-virt - Distros: openshift-origin -- Name: Updating - Dir: updating - Topics: - - Name: Updating OpenShift Virtualization - File: upgrading-virt - Distros: openshift-enterprise - - Name: Updating OKD Virtualization - File: upgrading-virt - Distros: openshift-origin -- Name: Virtual machines - Dir: virtual_machines - Topics: -###VIRTUAL MACHINE CHESS SALAD (silly name to highlight that the commented out assemblies need to be checked against merged filenams) - - Name: Creating virtual machines - File: virt-create-vms - - Name: Editing virtual machines - File: virt-edit-vms - - Name: Editing boot order - File: virt-edit-boot-order - - Name: Deleting virtual machines - File: virt-delete-vms - - Name: Exporting virtual machines - File: virt-exporting-vms - - Name: Managing virtual machine instances - File: virt-manage-vmis - - Name: Controlling virtual machine states - File: virt-controlling-vm-states - - Name: Accessing virtual machine consoles - File: virt-accessing-vm-consoles - - Name: Automating Windows installation with sysprep - File: virt-automating-windows-sysprep - - Name: Triggering virtual machine failover by resolving a failed node - File: virt-triggering-vm-failover-resolving-failed-node - - Name: Installing the QEMU guest agent and VirtIO drivers - File: virt-installing-qemu-guest-agent - - Name: Viewing the QEMU guest agent information for virtual machines - File: virt-viewing-qemu-guest-agent-web - - Name: Using virtual Trusted Platform Module devices - File: virt-using-vtpm-devices - - Name: Managing virtual machines with OpenShift Pipelines - File: virt-managing-vms-openshift-pipelines - - Name: Advanced virtual machine management - Dir: advanced_vm_management - Topics: -#Advanced virtual machine configuration - - Name: Working with resource quotas for virtual machines - File: virt-working-with-resource-quotas-for-vms - - Name: Specifying nodes for virtual machines - File: virt-specifying-nodes-for-vms - - Name: Configuring certificate rotation - File: virt-configuring-certificate-rotation - - Name: Configuring the default CPU model - File: virt-configuring-default-cpu-model - - Name: UEFI mode for virtual machines - File: virt-uefi-mode-for-vms - - Name: Configuring PXE booting for virtual machines - File: virt-configuring-pxe-booting - - Name: Using huge pages with virtual machines - File: virt-using-huge-pages-with-vms - - Name: Enabling dedicated resources for a virtual machine - File: virt-dedicated-resources-vm - - Name: Scheduling virtual machines - File: virt-schedule-vms - - Name: Configuring PCI passthrough - File: virt-configuring-pci-passthrough - - Name: Configuring vGPU passthrough - File: virt-configuring-vgpu-passthrough - - Name: Configuring mediated devices - File: virt-configuring-mediated-devices - - Name: Enabling descheduler evictions on virtual machines - File: virt-enabling-descheduler-evictions -# Importing virtual machines - - Name: Importing virtual machines - Dir: importing_vms - Topics: - - Name: TLS certificates for data volume imports - File: virt-tls-certificates-for-dv-imports - - Name: Importing virtual machine images with data volumes - File: virt-importing-virtual-machine-images-datavolumes -# Cloning virtual machines - - Name: Cloning virtual machines - Dir: cloning_vms - Topics: - - Name: Enabling user permissions to clone data volumes across namespaces - File: virt-enabling-user-permissions-to-clone-datavolumes - - Name: Cloning a virtual machine disk into a new data volume - File: virt-cloning-vm-disk-into-new-datavolume - - Name: Cloning a virtual machine by using a data volume template - File: virt-cloning-vm-using-datavolumetemplate - - Name: Cloning a virtual machine disk into a new block storage persistent volume claim - File: virt-cloning-vm-disk-into-new-block-storage-pvc -# Virtual machine networking - - Name: Virtual machine networking - Dir: vm_networking - Topics: - - Name: Configuring a virtual machine for the default pod network - File: virt-using-the-default-pod-network-with-virt - Distros: openshift-enterprise - - Name: Configuring a virtual machine for the default pod network with OKD Virtualization - File: virt-using-the-default-pod-network-with-virt - Distros: openshift-origin - - Name: Creating a service to expose a virtual machine - File: virt-creating-service-vm - - Name: Connecting a virtual machine to a Linux bridge network - File: virt-attaching-vm-multiple-networks - - Name: Connecting a virtual machine to an SR-IOV network - File: virt-attaching-vm-to-sriov-network - - Name: Connecting a virtual machine to a service mesh - File: virt-connecting-vm-to-service-mesh - - Name: Configuring IP addresses for virtual machines - File: virt-configuring-ip-for-vms - - Name: Viewing the IP address of NICs on a virtual machine - File: virt-viewing-ip-of-vm-nic - - Name: Accessing a virtual machine on a secondary network by using the cluster domain name - File: virt-accessing-vm-secondary-network-fqdn - - Name: Using a MAC address pool for virtual machines - File: virt-using-mac-address-pool-for-vms -#A BETTER NAME THAN 'STORAGE 4 U' - - Name: Virtual machine disks - Dir: virtual_disks - Topics: - - Name: Configuring local storage for virtual machines - File: virt-configuring-local-storage-for-vms - - Name: Creating data volumes - File: virt-creating-data-volumes - - Name: Reserving PVC space for file system overhead - File: virt-reserving-pvc-space-fs-overhead - - Name: Configuring CDI to work with namespaces that have a compute resource quota - File: virt-configuring-cdi-for-namespace-resourcequota - - Name: Managing data volume annotations - File: virt-managing-data-volume-annotations - - Name: Using preallocation for data volumes - File: virt-using-preallocation-for-datavolumes - - Name: Uploading local disk images by using the web console - File: virt-uploading-local-disk-images-web - - Name: Uploading local disk images by using the virtctl tool - File: virt-uploading-local-disk-images-virtctl - - Name: Uploading a local disk image to a block storage persistent volume claim - File: virt-uploading-local-disk-images-block - - Name: Managing virtual machine snapshots - File: virt-managing-vm-snapshots - - Name: Moving a local virtual machine disk to a different node - File: virt-moving-local-vm-disk-to-different-node - - Name: Expanding virtual storage by adding blank disk images - File: virt-expanding-virtual-storage-with-blank-disk-images - - Name: Cloning a data volume using smart-cloning - File: virt-cloning-a-datavolume-using-smart-cloning - - Name: Hot plugging virtual disks - File: virt-hot-plugging-virtual-disks - - Name: Using container disks with virtual machines - File: virt-using-container-disks-with-vms - - Name: Preparing CDI scratch space - File: virt-preparing-cdi-scratch-space - - Name: Re-using statically provisioned persistent volumes - File: virt-reusing-statically-provisioned-persistent-volumes - - Name: Expanding a virtual machine disk - File: virt-expanding-vm-disk -# Templates -- Name: Virtual machine templates - Dir: vm_templates - Topics: - - Name: Creating virtual machine templates - File: virt-creating-vm-template - - Name: Editing virtual machine templates - File: virt-editing-vm-template - - Name: Enabling dedicated resources for a virtual machine template - File: virt-dedicated-resources-vm-template - - Name: Deploying a virtual machine template to a custom namespace - File: virt-deploying-vm-template-to-custom-namespace - - Name: Deleting a virtual machine template - File: virt-deleting-vm-template - - Name: Creating and using boot sources - File: virt-creating-and-using-boot-sources - - Name: Managing automatic boot source updates - File: virt-automatic-bootsource-updates - Distros: openshift-enterprise -# Virtual machine live migration -- Name: Live migration - Dir: live_migration - Topics: - - Name: Virtual machine live migration - File: virt-live-migration - - Name: Live migration limits and timeouts - File: virt-live-migration-limits - - Name: Migrating a virtual machine instance to another node - File: virt-migrate-vmi - - Name: Migrating a virtual machine over a dedicated additional network - File: virt-migrating-vm-on-secondary-network - - Name: Cancelling the live migration of a virtual machine instance - File: virt-cancel-vmi-migration - - Name: Configuring virtual machine eviction strategy - File: virt-configuring-vmi-eviction-strategy - - Name: Configuring live migration policies - File: virt-configuring-live-migration-policies -# Node maintenance mode -- Name: Node maintenance - Dir: node_maintenance - Topics: - - Name: About node maintenance - File: virt-about-node-maintenance - - Name: Automatic renewal of TLS certificates - File: virt-automatic-certificates - - Name: Managing node labeling for obsolete CPU models - File: virt-managing-node-labeling-obsolete-cpu-models - - Name: Preventing node reconciliation - File: virt-preventing-node-reconciliation -- Name: Monitoring - Dir: monitoring - Topics: - - Name: Monitoring overview - File: virt-monitoring-overview - - Name: OpenShift cluster checkup framework - File: virt-running-cluster-checkups - - Name: Prometheus queries for virtual resources - File: virt-prometheus-queries - - Name: Virtual machine custom metrics - File: virt-exposing-custom-metrics-for-vms - - Name: Virtual machine health checks - File: virt-monitoring-vm-health - - Name: Runbooks - File: virt-runbooks -- Name: Support - Dir: support - Topics: - - Name: Support overview - File: virt-support-overview - - Name: Collecting data for Red Hat Support - File: virt-collecting-virt-data - Distros: openshift-enterprise - - Name: Troubleshooting - File: virt-troubleshooting -- Name: Backup and restore - Dir: backup_restore - Topics: - - Name: Installing and configuring OADP - File: virt-installing-configuring-oadp - - Name: Backing up and restoring virtual machines - File: virt-backup-restore-overview - - Name: Backing up virtual machines - File: virt-backing-up-vms - - Name: Restoring virtual machines - File: virt-restoring-vms -# - Name: Collecting OKD Virtualization data for community report -# File: virt-collecting-virt-data -# Distros: openshift-origin ---- -Name: Serverless -Dir: serverless -Distros: openshift-enterprise -Topics: -- Name: About Serverless - Dir: about - Topics: - - Name: Serverless overview - File: about-serverless +Name: GitOps +Dir: gitops +Distros: openshift-gitops +Topics: +- Name: OpenShift GitOps release notes + File: gitops-release-notes +- Name: Understanding OpenShift GitOps + File: understanding-openshift-gitops +- Name: Installing OpenShift GitOps + File: installing-openshift-gitops +- Name: Uninstalling OpenShift GitOps + File: uninstalling-openshift-gitops +- Name: Setting up a new Argo CD instance + File: setting-up-argocd-instance +- Name: Configuring an OpenShift cluster by deploying an application with cluster configurations + File: configuring-an-openshift-cluster-by-deploying-an-application-with-cluster-configurations +- Name: Deploying a Spring Boot application with Argo CD + File: deploying-a-spring-boot-application-with-argo-cd +- Name: Argo CD custom resource properties + File: argo-cd-custom-resource-properties +- Name: Monitoring application health status + File: health-information-for-resources-deployment +- Name: Configuring SSO for Argo CD using Dex + File: configuring-sso-on-argo-cd-using-dex +- Name: Configuring SSO for Argo CD using Keycloak + File: configuring-sso-for-argo-cd-using-keycloak +- Name: Configuring Argo CD RBAC + File: configuring-argo-cd-rbac +- Name: Configuring Resource Quota + File: configuring-resource-quota +- Name: Monitoring Argo CD custom resource workloads + File: monitoring-argo-cd-custom-resource-workloads +- Name: Running Control Plane Workloads on Infra nodes + File: run-gitops-control-plane-workload-on-infra-nodes +- Name: Sizing requirements for GitOps Operator + File: about-sizing-requirements-gitops +- Name: Collecting debugging data for a support case + File: collecting-debugging-data-for-support +- Name: Troubleshooting issues in GitOps + File: troubleshooting-issues-in-GitOps \ No newline at end of file diff --git a/_topic_maps/_topic_map_ms.yml b/_topic_maps/_topic_map_ms.yml deleted file mode 100644 index 89ecb5beec1d..000000000000 --- a/_topic_maps/_topic_map_ms.yml +++ /dev/null @@ -1,164 +0,0 @@ -# This configuration file dictates the organization of the topic groups and -# topics on the main page of the doc site for this branch. Each record -# consists of the following: -# -# --- <= Record delimiter -# Name: Origin of the Species <= Display name of topic group -# Dir: origin_of_the_species <= Directory name of topic group -# Topics: -# - Name: The Majestic Marmoset <= Topic name -# File: the_majestic_marmoset <= Topic file under group dir +/- -# - Name: The Curious Crocodile <= Topic 2 name -# File: the_curious_crocodile <= Topic 2 file -# - Name: The Numerous Nematodes <= Sub-topic group name -# Dir: the_numerous_nematodes <= Sub-topic group dir -# Topics: -# - Name: The Wily Worm <= Sub-topic name -# File: the_wily_worm <= Sub-topic file under / -# - Name: The Acrobatic Ascarid <= Sub-topic 2 name -# File: the_acrobatic_ascarid <= Sub-topic 2 file under / -# -# The ordering of the records in this document determines the ordering of the -# topic groups and topics on the main page. - ---- -Name: About -Dir: welcome -Distros: microshift -Topics: -- Name: Welcome - File: index -- Name: Legal notice - File: legal-notice ---- -Name: Release notes -Dir: microshift_release_notes -Distros: microshift -Topics: -- Name: MicroShift 4.14 release notes - File: microshift-4-14-release-notes ---- -Name: Getting started -Dir: microshift_getting_started -Distros: microshift -Topics: -- Name: Understanding MicroShift - File: microshift-understanding -- Name: Architecture - File: microshift-architecture ---- -Name: Installing -Dir: microshift_install -Distros: microshift -Topics: -- Name: Installing from RPM - File: microshift-install-rpm -- Name: Embedding in a RHEL for Edge image - File: microshift-embed-in-rpm-ostree -- Name: Greenboot health check - File: microshift-greenboot ---- -Name: Updating clusters -Dir: microshift_updating -Distros: microshift -Topics: -- Name: About MicroShift updates - File: microshift-about-updates ---- -Name: Support -Dir: microshift_support -Distros: microshift -Topics: -- Name: MicroShift etcd - File: microshift-etcd -- Name: MicroShift sos report - File: microshift-sos-report ---- -Name: API reference -Dir: microshift_rest_api -Distros: microshift -Topics: -- Name: Understanding API tiers - File: understanding-api-support-tiers -- Name: API compatibility guidelines - File: understanding-compatibility-guidelines -- Name: Network APIs - Dir: network_apis - Topics: - - Name: Route [route.openshift.io/v1] - File: route-route-openshift-io-v1 -- Name: Security APIs - Dir: security_apis - Topics: - - Name: SecurityContextConstraints [security.openshift.io/v1] - File: securitycontextconstraints-security-openshift-io-v1 ---- -Name: CLI tools -Dir: microshift_cli_ref -Distros: microshift -Topics: -- Name: CLI tools introduction - File: microshift-cli-tools-introduction -- Name: Installing the OpenShift CLI - File: microshift-oc-cli-install -- Name: Configuring the OpenShift CLI - File: microshift-oc-config -- Name: Using the OpenShift CLI - File: microshift-cli-using-oc -- Name: Using oc and kubectl - File: microshift-usage-oc-kubectl -- Name: List of oc CLI commands - File: microshift-oc-cli-commands-list ---- -Name: Configuring -Dir: microshift_configuring -Distros: microshift -Topics: -- Name: Using configuration tools - File: microshift-using-config-tools -- Name: Cluster access with kubeconfig - File: microshift-cluster-access-kubeconfig ---- -Name: Networking -Dir: microshift_networking -Distros: microshift -Topics: -- Name: Networking settings - File: microshift-networking -- Name: Firewall configuration - File: microshift-firewall ---- -Name: Storage -Dir: microshift_storage -Distros: microshift -Topics: -- Name: MicroShift storage overview - File: index -- Name: Understanding ephemeral storage for MicroShift - File: understanding-ephemeral-storage-microshift -- Name: Generic ephemeral volumes for MicroShift - File: generic-ephemeral-volumes-microshift -- Name: Understanding persistent storage for MicroShift - File: understanding-persistent-storage-microshift -- Name: Expanding persistent volumes for MicroShift - File: expanding-persistent-volumes-microshift -- Name: Dynamic storage using the LVMS plugin - File: microshift-storage-plugin-overview ---- -Name: Running applications -Dir: microshift_running_apps -Distros: microshift -Topics: -- Name: Application deployment - File: microshift-applications -- Name: Operators - File: microshift-operators ---- -Name: Troubleshooting -Dir: microshift_troubleshooting -Distros: microshift -Topics: -- Name: Checking your version - File: microshift-version -- Name: Additional information - File: microshift-things-to-know diff --git a/_topic_maps/_topic_map_osd.yml b/_topic_maps/_topic_map_osd.yml deleted file mode 100644 index d780de27de5e..000000000000 --- a/_topic_maps/_topic_map_osd.yml +++ /dev/null @@ -1,405 +0,0 @@ -# This configuration file dictates the organization of the topic groups and -# topics on the main page of the doc site for this branch. Each record -# consists of the following: -# -# --- <= Record delimiter -# Name: Origin of the Species <= Display name of topic group -# Dir: origin_of_the_species <= Directory name of topic group -# Topics: -# - Name: The Majestic Marmoset <= Topic name -# File: the_majestic_marmoset <= Topic file under group dir +/- -# - Name: The Curious Crocodile <= Topic 2 name -# File: the_curious_crocodile <= Topic 2 file -# - Name: The Numerous Nematodes <= Sub-topic group name -# Dir: the_numerous_nematodes <= Sub-topic group dir -# Topics: -# - Name: The Wily Worm <= Sub-topic name -# File: the_wily_worm <= Sub-topic file under / -# - Name: The Acrobatic Ascarid <= Sub-topic 2 name -# File: the_acrobatic_ascarid <= Sub-topic 2 file under / -# -# The ordering of the records in this document determines the ordering of the -# topic groups and topics on the main page. - ---- -Name: About -Dir: welcome -Distros: openshift-dedicated -Topics: -- Name: Welcome - File: index -- Name: Legal notice - File: legal-notice - Distros: openshift-dedicated ---- -Name: Introduction to OpenShift Dedicated -Dir: osd_architecture -Distros: openshift-dedicated -Topics: -- Name: Understanding OpenShift Dedicated - File: osd-understanding -- Name: Architecture concepts - File: osd-architecture -- Name: Policies and service definition - Dir: osd_policy - Distros: openshift-dedicated - Topics: - - Name: OpenShift Dedicated service definition - File: osd-service-definition - - Name: Responsibility assignment matrix - File: policy-responsibility-matrix - - Name: Understanding process and security for OpenShift Dedicated - File: policy-process-security - - Name: About availability for OpenShift Dedicated - File: policy-understand-availability - - Name: Update life cycle - File: osd-life-cycle -- Name: Support for OpenShift Dedicated - File: osd-support - Distros: openshift-dedicated ---- -Name: Red Hat OpenShift Cluster Manager -Dir: ocm -Distros: openshift-dedicated -Topics: -- Name: Red Hat OpenShift Cluster Manager - File: ocm-overview ---- -Name: Planning your environment -Dir: osd_planning -Distros: openshift-dedicated -Topics: -- Name: Limits and scalability - File: osd-limits-scalability -- Name: Customer Cloud Subscriptions on AWS - File: aws-ccs -- Name: Customer Cloud Subscriptions on GCP - File: gcp-ccs ---- -Name: Getting started -Dir: osd_getting_started -Distros: openshift-dedicated -Topics: -- Name: Understanding your cloud deployment options - File: osd-understanding-your-cloud-deployment-options -- Name: Getting started with OpenShift Dedicated - File: osd-getting-started ---- -Name: Installing, accessing, and deleting OpenShift Dedicated clusters -Dir: osd_install_access_delete_cluster -Distros: openshift-dedicated -Topics: -- Name: Creating a cluster on AWS - File: creating-an-aws-cluster -- Name: Creating a cluster on GCP - File: creating-a-gcp-cluster -- Name: Configuring your identity providers - File: config-identity-providers -- Name: Revoking privileges and access to an OpenShift Dedicated cluster - File: osd-revoking-cluster-privileges -- Name: Deleting an OpenShift Dedicated cluster - File: osd-deleting-a-cluster ---- -Name: Cluster administration -Dir: osd_cluster_admin -Distros: openshift-dedicated -Topics: -- Name: Managing administration roles and users - File: osd-admin-roles -- Name: Configuring private connections - Dir: osd_private_connections - Distros: openshift-dedicated - Topics: - - Name: Configuring private connections for AWS - File: aws-private-connections - - Name: Configuring a private cluster - File: private-cluster -- Name: Nodes - Dir: osd_nodes - Distros: openshift-dedicated - Topics: - - Name: About machine pools - File: osd-nodes-machinepools-about - - Name: Managing compute nodes - File: osd-managing-worker-nodes - - Name: About autoscaling nodes on a cluster - File: osd-nodes-about-autoscaling-nodes -- Name: Logging - Dir: osd_logging - Distros: openshift-dedicated - Topics: - - Name: Accessing the service logs - File: osd-accessing-the-service-logs ---- -# Name: Security and compliance -# Dir: security -# Distros: openshift-dedicated -# Topics: -# - Name: Viewing audit logs -# File: audit-log-view -# --- -Name: Authentication and authorization -Dir: authentication -Distros: openshift-dedicated -Topics: -- Name: Managing security context constraints - File: managing-security-context-constraints ---- -Name: Upgrading -Dir: upgrading -Distros: openshift-dedicated -Topics: -- Name: Preparing to upgrade OpenShift Dedicated to 4.9 - File: osd-upgrading-cluster-prepare - Distros: openshift-dedicated -- Name: Upgrading OpenShift Dedicated - File: osd-upgrades - Distros: openshift-dedicated ---- -Name: CI/CD -Dir: cicd -Distros: openshift-dedicated -Topics: -- Name: Builds - Dir: builds - Distros: openshift-dedicated - Topics: - - Name: Setting up additional trusted certificate authorities for builds - File: setting-up-trusted-ca - Distros: openshift-dedicated ---- -Name: Add-on services -Dir: adding_service_cluster -Distros: openshift-dedicated -Topics: -- Name: Adding services to a cluster - File: adding-service -- Name: Available services - File: available-services - Distros: openshift-dedicated ---- -Name: Storage -Dir: storage -Distros: openshift-dedicated -Topics: -- Name: Storage overview - File: index -- Name: Understanding ephemeral storage - File: understanding-ephemeral-storage -- Name: Understanding persistent storage - File: understanding-persistent-storage -- Name: Configuring persistent storage - Dir: persistent_storage - Topics: - - Name: Persistent storage using AWS Elastic Block Store - File: persistent-storage-aws - - Name: Persistent storage using GCE Persistent Disk - File: persistent-storage-gce -- Name: Using Container Storage Interface (CSI) - Dir: container_storage_interface - Topics: - - Name: Configuring CSI volumes - File: persistent-storage-csi - - Name: Managing the default storage class - File: persistent-storage-csi-sc-manage - - Name: AWS Elastic Block Store CSI Driver Operator - File: persistent-storage-csi-ebs - - Name: AWS Elastic File Service CSI Driver Operator - File: osd-persistent-storage-aws-efs-csi - - Name: GCP PD CSI Driver Operator - File: persistent-storage-csi-gcp-pd - #- Name: GCP Filestore CSI Driver Operator - # File: persistent-storage-csi-google-cloud-file -- Name: Generic ephemeral volumes - File: generic-ephemeral-vols -- Name: Dynamic provisioning - File: dynamic-provisioning ---- -Name: Registry -Dir: registry -Distros: openshift-dedicated -Topics: -- Name: Registry overview - File: index -- Name: Image Registry Operator in OpenShift Dedicated - File: configuring-registry-operator -- Name: Accessing the registry - File: accessing-the-registry -- Name: Exposing the registry - File: securing-exposing-registry ---- -Name: Networking -Dir: networking -Distros: openshift-dedicated -Topics: -- Name: Understanding the DNS Operator - File: dns-operator -- Name: Understanding the Ingress Operator - File: ingress-operator -- Name: OpenShift SDN default CNI network provider - Dir: openshift_sdn - Topics: - - Name: Enabling multicast for a project - File: enabling-multicast -- Name: Network verification - File: network-verification -- Name: Configuring a cluster-wide proxy during installation - File: configuring-cluster-wide-proxy -- Name: CIDR range definitions - File: cidr-range-definitions -- Name: Network policy - Dir: network_policy - Topics: - - Name: About network policy - File: about-network-policy - - Name: Creating a network policy - File: creating-network-policy - - Name: Viewing a network policy - File: viewing-network-policy - - Name: Deleting a network policy - File: deleting-network-policy - - Name: Configuring multitenant isolation with network policy - File: multitenant-network-policy -- Name: Configuring Routes - Dir: routes - Topics: - - Name: Route configuration - File: route-configuration - - Name: Secured routes - File: secured-routes ---- -Name: Applications -Dir: applications -Distros: openshift-dedicated -Topics: -- Name: Deployments - Dir: deployments - Distros: openshift-dedicated - Topics: - - Name: Custom domains for applications - File: osd-config-custom-domains-applications ---- -Name: Logging -Dir: logging -Distros: openshift-dedicated -Topics: -- Name: Release notes - File: cluster-logging-release-notes -- Name: About Logging - File: cluster-logging -- Name: Installing Logging - File: cluster-logging-deploying -- Name: Accessing the service logs - File: sd-accessing-the-service-logs -- Name: Configuring your Logging deployment - Dir: config - Topics: - - Name: About the Cluster Logging custom resource - File: cluster-logging-configuring-cr - - Name: Configuring the logging collector - File: cluster-logging-collector - - Name: Configuring the log store - File: cluster-logging-log-store - - Name: Configuring the log visualizer - File: cluster-logging-visualizer - - Name: Configuring Logging storage - File: cluster-logging-storage-considerations - - Name: Configuring CPU and memory limits for Logging components - File: cluster-logging-memory - - Name: Using tolerations to control Logging pod placement - File: cluster-logging-tolerations - - Name: Moving the Logging resources with node selectors - File: cluster-logging-moving-nodes - #- Name: Configuring systemd-journald and Fluentd - # File: cluster-logging-systemd - - Name: Maintenance and support - File: cluster-logging-maintenance-support -- Name: Logging with the LokiStack - File: cluster-logging-loki -- Name: Viewing logs for a specific resource - File: viewing-resource-logs -- Name: Viewing cluster logs in Kibana - File: cluster-logging-visualizer - Distros: openshift-dedicated -- Name: Forwarding logs to third party systems - File: cluster-logging-external -- Name: Enabling JSON logging - File: cluster-logging-enabling-json-logging -- Name: Collecting and storing Kubernetes events - File: cluster-logging-eventrouter -# - Name: Forwarding logs using ConfigMaps -# File: cluster-logging-external-configmap -# Distros: openshift-dedicated -- Name: Updating Logging - File: cluster-logging-upgrading -- Name: Viewing cluster dashboards - File: cluster-logging-dashboards -- Name: Troubleshooting Logging - Dir: troubleshooting - Topics: - - Name: Viewing Logging status - File: cluster-logging-cluster-status - - Name: Viewing the status of the log store - File: cluster-logging-log-store-status - - Name: Understanding Logging alerts - File: cluster-logging-alerts - - Name: Collecting logging data for Red Hat Support - File: cluster-logging-must-gather - - Name: Troubleshooting for Critical Alerts - File: cluster-logging-troubleshooting-for-critical-alerts -- Name: Uninstalling Logging - File: cluster-logging-uninstall -- Name: Exported fields - File: cluster-logging-exported-fields ---- -Name: Monitoring user-defined projects -Dir: monitoring -Distros: openshift-dedicated -Topics: -- Name: Understanding the monitoring stack - File: osd-understanding-the-monitoring-stack -- Name: Accessing monitoring for user-defined projects - File: osd-accessing-monitoring-for-user-defined-projects -- Name: Configuring the monitoring stack - File: osd-configuring-the-monitoring-stack -- Name: Enabling alert routing for user-defined projects - File: osd-enabling-alert-routing-for-user-defined-projects -- Name: Managing metrics - File: osd-managing-metrics -- Name: Managing alerts - File: managing-alerts -- Name: Reviewing monitoring dashboards - File: osd-reviewing-monitoring-dashboards -- Name: Troubleshooting monitoring issues - File: osd-troubleshooting-monitoring-issues ---- -Name: Serverless -Dir: serverless -Distros: openshift-dedicated -Topics: -- Name: About Serverless - Dir: about - Topics: - - Name: Serverless overview - File: about-serverless ---- -Name: Troubleshooting -Dir: sd_support -Distros: openshift-dedicated -Topics: -- Name: Remote health monitoring with connected clusters - Dir: remote_health_monitoring - Distros: openshift-dedicated - Topics: - - Name: About remote health monitoring - File: about-remote-health-monitoring - - Name: Showing data collected by remote health monitoring - File: showing-data-collected-by-remote-health-monitoring - - Name: Using Insights to identify issues with your cluster - File: using-insights-to-identify-issues-with-your-cluster -- Name: Summarizing cluster specifications - File: osd-summarizing-cluster-specifications - Distros: openshift-dedicated -- Name: OpenShift Dedicated managed resources - File: osd-managed-resources - Distros: openshift-dedicated diff --git a/_topic_maps/_topic_map_rosa.yml b/_topic_maps/_topic_map_rosa.yml deleted file mode 100644 index 39aa73cfd331..000000000000 --- a/_topic_maps/_topic_map_rosa.yml +++ /dev/null @@ -1,611 +0,0 @@ -# This configuration file dictates the organization of the topic groups and -# topics on the main page of the doc site for this branch. Each record -# consists of the following: -# -# --- <= Record delimiter -# Name: Origin of the Species <= Display name of topic group -# Dir: origin_of_the_species <= Directory name of topic group -# Topics: -# - Name: The Majestic Marmoset <= Topic name -# File: the_majestic_marmoset <= Topic file under group dir +/- -# - Name: The Curious Crocodile <= Topic 2 name -# File: the_curious_crocodile <= Topic 2 file -# - Name: The Numerous Nematodes <= Sub-topic group name -# Dir: the_numerous_nematodes <= Sub-topic group dir -# Topics: -# - Name: The Wily Worm <= Sub-topic name -# File: the_wily_worm <= Sub-topic file under / -# - Name: The Acrobatic Ascarid <= Sub-topic 2 name -# File: the_acrobatic_ascarid <= Sub-topic 2 file under / -# -# The ordering of the records in this document determines the ordering of the -# topic groups and topics on the main page. - ---- -Name: About -Dir: welcome -Distros: openshift-rosa -Topics: -- Name: Welcome - File: index -- Name: Legal notice - File: legal-notice - Distros: openshift-rosa ---- -Name: What's new -Dir: rosa_release_notes -Distros: openshift-rosa -Topics: -- Name: What's new with ROSA - File: rosa-release-notes ---- -Name: Introduction to ROSA -Dir: rosa_architecture -Distros: openshift-rosa -Topics: -- Name: Understanding ROSA - File: rosa-understanding -- Name: ROSA architecture - Dir: rosa_architecture_sub - Distros: openshift-rosa - Topics: - - Name: Architecture concepts - File: rosa-basic-architecture-concepts - - Name: Architecture models - File: rosa-architecture-models -- Name: Policies and service definition - Dir: rosa_policy_service_definition - Distros: openshift-rosa - Topics: - - Name: About availability for ROSA - File: rosa-policy-understand-availability - - Name: Overview of responsibilities for ROSA - File: rosa-policy-responsibility-matrix - - Name: ROSA service definition - File: rosa-service-definition - - Name: ROSA update life cycle - File: rosa-life-cycle - - Name: Understanding process and security for ROSA - File: rosa-policy-process-security -- Name: About IAM resources for ROSA with STS - File: rosa-sts-about-iam-resources -- Name: OpenID Connect Overview - File: rosa-oidc-overview -- Name: Support for ROSA - File: rosa-getting-support -# - Name: Training for ROSA -# File: rosa-training ---- -Name: Getting started -Dir: rosa_getting_started -Distros: openshift-rosa -Topics: -- Name: ROSA quickstart guide - File: rosa-quickstart-guide-ui -- Name: Comprehensive guide to getting started with ROSA - File: rosa-getting-started -- Name: Understanding the ROSA with STS deployment workflow - File: rosa-sts-getting-started-workflow ---- -Name: Prepare your environment -Dir: rosa_planning -Distros: openshift-rosa -Topics: -- Name: AWS prerequisites for ROSA with STS - File: rosa-sts-aws-prereqs -- Name: ROSA IAM role resources - File: rosa-sts-ocm-role -- Name: Limits and scalability - File: rosa-limits-scalability -- Name: Planning your environment - File: rosa-planning-environment -- Name: Required AWS service quotas - File: rosa-sts-required-aws-service-quotas -- Name: Setting up your environment - File: rosa-sts-setting-up-environment ---- -Name: Install ROSA with HCP clusters -Dir: rosa_hcp -Distros: openshift-rosa -Topics: -- Name: Creating ROSA with HCP clusters using the default options - File: rosa-hcp-sts-creating-a-cluster-quickly -- Name: Using the Node Tuning Operator on ROSA with HCP - File: rosa-tuning-config ---- -Name: Install ROSA Classic clusters -Dir: rosa_install_access_delete_clusters -Distros: openshift-rosa -Topics: -- Name: Creating a ROSA cluster with STS using the default options - File: rosa-sts-creating-a-cluster-quickly -- Name: Creating a ROSA cluster with STS using customizations - File: rosa-sts-creating-a-cluster-with-customizations -- Name: Interactive cluster creation mode reference - File: rosa-sts-interactive-mode-reference -- Name: Creating an AWS PrivateLink cluster on ROSA - File: rosa-aws-privatelink-creating-cluster -- Name: Accessing a ROSA cluster - File: rosa-sts-accessing-cluster -- Name: Configuring identity providers using Red Hat OpenShift Cluster Manager - File: rosa-sts-config-identity-providers -- Name: Revoking access to a ROSA cluster - File: rosa-sts-deleting-access-cluster -- Name: Deleting a ROSA cluster - File: rosa-sts-deleting-cluster -- Name: Deploying ROSA without AWS STS - Dir: rosa_getting_started_iam - Distros: openshift-rosa - Topics: - - Name: AWS prerequisites for ROSA - File: rosa-aws-prereqs - - Name: Understanding the ROSA deployment workflow - File: rosa-getting-started-workflow - - Name: Required AWS service quotas - File: rosa-required-aws-service-quotas - - Name: Configuring your AWS account - File: rosa-config-aws-account - - Name: Installing the ROSA CLI - File: rosa-installing-rosa - - Name: Creating a ROSA cluster without AWS STS - File: rosa-creating-cluster - - Name: Configuring a private cluster - File: rosa-private-cluster -# - Name: Creating a ROSA cluster using the web console -# File: rosa-creating-cluster-console -# - Name: Accessing a ROSA cluster -# File: rosa-accessing-cluster -# - Name: Configuring identity providers using the Red Hat OpenShift Cluster Manager -# File: rosa-config-identity-providers - - Name: Deleting access to a ROSA cluster - File: rosa-deleting-access-cluster - - Name: Deleting a ROSA cluster - File: rosa-deleting-cluster - - Name: Command quick reference for creating clusters and users - File: rosa-quickstart ---- -Name: ROSA CLI -Dir: rosa_cli -Distros: openshift-rosa -Topics: -# - Name: CLI and web console -# File: rosa-cli-openshift-console -- Name: Getting started with the ROSA CLI - File: rosa-get-started-cli -- Name: Managing objects with the ROSA CLI - File: rosa-manage-objects-cli -- Name: Checking account and version information with the ROSA CLI - File: rosa-checking-acct-version-cli -- Name: Checking logs with the ROSA CLI - File: rosa-checking-logs-cli ---- -Name: Red Hat OpenShift Cluster Manager -Dir: ocm -Distros: openshift-rosa -Topics: -- Name: Red Hat OpenShift Cluster Manager - File: ocm-overview -# - Name: Red Hat OpenShift Cluster Manager -# File: ocm-overview -# - Name: Using the OpenShift web console -# File: rosa-using-openshift-console ---- -Name: Cluster administration -Dir: rosa_cluster_admin -Distros: openshift-rosa -Topics: -# - Name: Cluster configurations -# File: rosa-cluster-config -# - Name: Cluster authentication -# File: rosa-cluster-auth -# - Name: Authorization and RBAC -# File: rosa-auth-rbac -- Name: Configuring private connections - Dir: cloud_infrastructure_access - Distros: openshift-rosa - Topics: - - Name: Configuring private connections - File: rosa-configuring-private-connections - - Name: Configuring AWS VPC peering - File: dedicated-aws-peering - - Name: Configuring AWS VPN - File: dedicated-aws-vpn - - Name: Configuring AWS Direct Connect - File: dedicated-aws-dc -- Name: Nodes - Dir: rosa_nodes - Distros: openshift-rosa - Topics: - - Name: About machine pools - File: rosa-nodes-machinepools-about - - Name: Managing compute nodes - File: rosa-managing-worker-nodes - - Name: Configuring machine pools in Local Zones - File: rosa-nodes-machinepools-configuring - Distros: openshift-rosa - - Name: About autoscaling nodes on a cluster - File: rosa-nodes-about-autoscaling-nodes ---- -# Name: Security and compliance -# Dir: security -# Distros: openshift-rosa -# Topics: -# - Name: Viewing audit logs -# File: audit-log-view -# # - Name: Security -# # File: rosa-security -# # - Name: Application and cluster compliance -# # File: rosa-app-security-compliance -# --- -Name: Authentication and authorization -Dir: authentication -Distros: openshift-rosa -Topics: -- Name: Assuming an AWS IAM role for a service account - File: assuming-an-aws-iam-role-for-a-service-account -- Name: Managing security context constraints - File: managing-security-context-constraints ---- -Name: Upgrading -Dir: upgrading -Distros: openshift-rosa -Topics: -- Name: Preparing to upgrade ROSA to 4.9 - File: rosa-upgrading-cluster-prepare - Distros: openshift-rosa -- Name: Upgrading ROSA with STS - File: rosa-upgrading-sts -- Name: Upgrading ROSA - File: rosa-upgrading -- Name: Upgrading ROSA with HCP - File: rosa-hcp-upgrading ---- -Name: CI/CD -Dir: cicd -Distros: openshift-rosa -Topics: -- Name: Builds - Dir: builds - Distros: openshift-rosa - Topics: - - Name: Setting up additional trusted certificate authorities for builds - File: setting-up-trusted-ca - Distros: openshift-rosa ---- - Name: Add-on services - Dir: adding_service_cluster - Distros: openshift-rosa - Topics: - - Name: Adding services to a cluster - File: adding-service - - Name: Available services - File: rosa-available-services ---- -Name: Storage -Dir: storage -Distros: openshift-rosa -Topics: -- Name: Storage overview - File: index -- Name: Understanding ephemeral storage - File: understanding-ephemeral-storage -- Name: Understanding persistent storage - File: understanding-persistent-storage -- Name: Configuring persistent storage - Dir: persistent_storage - Topics: - - Name: Persistent storage using AWS Elastic Block Store - File: persistent-storage-aws -- Name: Using Container Storage Interface (CSI) - Dir: container_storage_interface - Topics: - - Name: Configuring CSI volumes - File: persistent-storage-csi - - Name: Managing the default storage class - File: persistent-storage-csi-sc-manage - - Name: AWS Elastic Block Store CSI Driver Operator - File: persistent-storage-csi-ebs - - Name: AWS Elastic File Service CSI Driver Operator - File: osd-persistent-storage-aws-efs-csi -- Name: Generic ephemeral volumes - File: generic-ephemeral-vols -- Name: Dynamic provisioning - File: dynamic-provisioning ---- -Name: Registry -Dir: registry -Distros: openshift-rosa -Topics: -- Name: Registry overview - File: index -- Name: Image Registry Operator in Red Hat OpenShift Service on AWS - File: configuring-registry-operator -- Name: Accessing the registry - File: accessing-the-registry -- Name: Exposing the registry - File: securing-exposing-registry ---- -Name: Networking -Dir: networking -Distros: openshift-rosa -Topics: -- Name: Understanding the DNS Operator - File: dns-operator -- Name: Understanding the Ingress Operator - File: ingress-operator -- Name: OpenShift SDN default CNI network provider - Dir: openshift_sdn - Topics: - - Name: Enabling multicast for a project - File: enabling-multicast -- Name: Network verification - File: network-verification -- Name: Configuring a cluster-wide proxy during installation - File: configuring-cluster-wide-proxy -- Name: CIDR range definitions - File: cidr-range-definitions -- Name: Network policy - Dir: network_policy - Topics: - - Name: About network policy - File: about-network-policy - - Name: Creating a network policy - File: creating-network-policy - - Name: Viewing a network policy - File: viewing-network-policy - - Name: Deleting a network policy - File: deleting-network-policy - - Name: Configuring multitenant isolation with network policy - File: multitenant-network-policy -- Name: Configuring Routes - Dir: routes - Topics: - - Name: Route configuration - File: route-configuration - - Name: Secured routes - File: secured-routes ---- -Name: Application development -Dir: applications -Distros: openshift-rosa -Topics: -- Name: Deployments - Dir: deployments - Distros: openshift-rosa - Topics: - - Name: Custom domains for applications - File: osd-config-custom-domains-applications -# - Name: Application GitOps workflows -# File: rosa-app-gitops-workflows -# - Name: Application logging -# File: rosa-app-logging -# - Name: Applications -# File: rosa-apps -# - Name: Application metrics and alerts -# File: rosa-app-metrics and alerts -# - Name: Projects -# File: rosa-projects -# - Name: Using the internal registry -# File: rosa-using-internal-registry ---- -Name: Backing up and restoring applications -Dir: rosa_backing_up_and_restoring_applications -Distros: openshift-rosa -Topics: -- Name: Installing OADP on ROSA with STS - File: backing-up-applications ---- -Name: Logging -Dir: logging -Distros: openshift-rosa -Topics: -- Name: Release notes - File: cluster-logging-release-notes -- Name: About Logging - File: cluster-logging -- Name: Installing Logging - File: cluster-logging-deploying -- Name: Accessing the service logs - File: sd-accessing-the-service-logs -- Name: Viewing cluster logs in the AWS Console - File: rosa-viewing-logs -- Name: Configuring your Logging deployment - Dir: config - Topics: - - Name: About the Cluster Logging custom resource - File: cluster-logging-configuring-cr - - Name: Configuring the logging collector - File: cluster-logging-collector - - Name: Configuring the log store - File: cluster-logging-log-store - - Name: Configuring the log visualizer - File: cluster-logging-visualizer - - Name: Configuring Logging storage - File: cluster-logging-storage-considerations - - Name: Configuring CPU and memory limits for Logging components - File: cluster-logging-memory - - Name: Using tolerations to control Logging pod placement - File: cluster-logging-tolerations - - Name: Moving the Logging resources with node selectors - File: cluster-logging-moving-nodes - #- Name: Configuring systemd-journald and Fluentd - # File: cluster-logging-systemd - - Name: Maintenance and support - File: cluster-logging-maintenance-support -- Name: Logging with the LokiStack - File: cluster-logging-loki -- Name: Viewing logs for a specific resource - File: viewing-resource-logs -- Name: Viewing cluster logs in Kibana - File: cluster-logging-visualizer -- Name: Forwarding logs to third party systems - File: cluster-logging-external -- Name: Enabling JSON logging - File: cluster-logging-enabling-json-logging -- Name: Collecting and storing Kubernetes events - File: cluster-logging-eventrouter -# - Name: Forwarding logs using ConfigMaps -# File: cluster-logging-external-configmap -- Name: Updating Logging - File: cluster-logging-upgrading -- Name: Viewing cluster dashboards - File: cluster-logging-dashboards -- Name: Troubleshooting Logging - Dir: troubleshooting - Topics: - - Name: Viewing Logging status - File: cluster-logging-cluster-status - - Name: Viewing the status of the log store - File: cluster-logging-log-store-status - - Name: Understanding Logging alerts - File: cluster-logging-alerts - - Name: Collecting logging data for Red Hat Support - File: cluster-logging-must-gather - - Name: Troubleshooting for Critical Alerts - File: cluster-logging-troubleshooting-for-critical-alerts -- Name: Uninstalling Logging - File: cluster-logging-uninstall -- Name: Exported fields - File: cluster-logging-exported-fields ---- -Name: Monitoring user-defined projects -Dir: monitoring -Distros: openshift-rosa -Topics: -- Name: Understanding the monitoring stack - File: rosa-understanding-the-monitoring-stack -- Name: Accessing monitoring for user-defined projects - File: rosa-accessing-monitoring-for-user-defined-projects -- Name: Configuring the monitoring stack - File: rosa-configuring-the-monitoring-stack -- Name: Enabling alert routing for user-defined projects - File: rosa-enabling-alert-routing-for-user-defined-projects -- Name: Managing metrics - File: rosa-managing-metrics -- Name: Managing alerts - File: managing-alerts -- Name: Reviewing monitoring dashboards - File: rosa-reviewing-monitoring-dashboards -- Name: Troubleshooting monitoring issues - File: rosa-troubleshooting-monitoring-issues ---- -Name: Service Mesh -Dir: service_mesh -Distros: openshift-rosa -Topics: -- Name: Service Mesh 2.x - Dir: v2x - Topics: - - Name: About OpenShift Service Mesh - File: ossm-about - - Name: Service Mesh 2.x release notes - File: servicemesh-release-notes - - Name: Service Mesh architecture - File: ossm-architecture - - Name: Service Mesh deployment models - File: ossm-deployment-models - - Name: Service Mesh and Istio differences - File: ossm-vs-community - - Name: Preparing to install Service Mesh - File: preparing-ossm-installation - - Name: Installing the Operators - File: installing-ossm - - Name: Creating the ServiceMeshControlPlane - File: ossm-create-smcp - - Name: Adding workloads to a service mesh - File: ossm-create-mesh - - Name: Enabling sidecar injection - File: prepare-to-deploy-applications-ossm - - Name: Upgrading Service Mesh - File: upgrading-ossm - - Name: Managing users and profiles - File: ossm-profiles-users - - Name: Security - File: ossm-security - - Name: Traffic management - File: ossm-traffic-manage - - Name: Metrics, logs, and traces - File: ossm-observability - - Name: Performance and scalability - File: ossm-performance-scalability - - Name: Deploying to production - File: ossm-deploy-production - - Name: Federation - File: ossm-federation - - Name: Extensions - File: ossm-extensions - - Name: 3scale WebAssembly for 2.1 - File: ossm-threescale-webassembly-module - - Name: 3scale Istio adapter for 2.0 - File: threescale-adapter - - Name: Troubleshooting Service Mesh - File: ossm-troubleshooting-istio - - Name: Control plane configuration reference - File: ossm-reference-smcp - - Name: Kiali configuration reference - File: ossm-reference-kiali - - Name: Jaeger configuration reference - File: ossm-reference-jaeger - - Name: Uninstalling Service Mesh - File: removing-ossm -- Name: Service Mesh 1.x - Dir: v1x - Topics: - - Name: Service Mesh 1.x release notes - File: servicemesh-release-notes - - Name: Service Mesh architecture - File: ossm-architecture - - Name: Service Mesh and Istio differences - File: ossm-vs-community - - Name: Preparing to install Service Mesh - File: preparing-ossm-installation - - Name: Installing Service Mesh - File: installing-ossm - - Name: Security - File: ossm-security - - Name: Traffic management - File: ossm-traffic-manage - - Name: Deploying applications on Service Mesh - File: prepare-to-deploy-applications-ossm - - Name: Data visualization and observability - File: ossm-observability - - Name: Custom resources - File: ossm-custom-resources - - Name: 3scale Istio adapter for 1.x - File: threescale-adapter - - Name: Removing Service Mesh - File: removing-ossm ---- -Name: Serverless -Dir: serverless -Distros: openshift-rosa -Topics: -- Name: About Serverless - Dir: about - Topics: - - Name: Serverless overview - File: about-serverless ---- -Name: Troubleshooting -Dir: sd_support -Distros: openshift-rosa -Topics: -- Name: Remote health monitoring with connected clusters - Dir: remote_health_monitoring - Topics: - - Name: About remote health monitoring - File: about-remote-health-monitoring - - Name: Showing data collected by remote health monitoring - File: showing-data-collected-by-remote-health-monitoring - - Name: Using Insights to identify issues with your cluster - File: using-insights-to-identify-issues-with-your-cluster -- Name: Troubleshooting expired offline access tokens - File: rosa-troubleshooting-expired-tokens -- Name: Troubleshooting installations - File: rosa-troubleshooting-installations -- Name: Troubleshooting IAM roles - File: rosa-troubleshooting-iam-resources -- Name: Troubleshooting cluster deployments - File: rosa-troubleshooting-deployments -- Name: Red Hat OpenShift Service on AWS managed resources - File: rosa-managed-resources diff --git a/_unused_topics/README b/_unused_topics/README deleted file mode 100644 index 5636d8245a15..000000000000 --- a/_unused_topics/README +++ /dev/null @@ -1,2 +0,0 @@ -Placeholder file. Any modules that are not included will be placed here -by the `scripts/find_unused.py` script. diff --git a/_unused_topics/adding-new-devices.adoc b/_unused_topics/adding-new-devices.adoc deleted file mode 100644 index 84ec3221b6e7..000000000000 --- a/_unused_topics/adding-new-devices.adoc +++ /dev/null @@ -1,13 +0,0 @@ -[id="adding-new-devices_{context}"] -= Adding new devices - -Adding a new device is semi-automatic. The provisioner periodically checks for new mounts in configured directories. Administrators must create a new subdirectory, mount a device, and allow Pods to use the device by applying the SELinux label, for example: - ----- -$ chcon -R unconfined_u:object_r:svirt_sandbox_file_t:s0 /mnt/local-storage/ ----- - -[WARNING] -==== -Omitting any of these steps may result in the wrong PV being created. -==== diff --git a/_unused_topics/architecture-new-content.adoc b/_unused_topics/architecture-new-content.adoc deleted file mode 100644 index 83d9ddbc1221..000000000000 --- a/_unused_topics/architecture-new-content.adoc +++ /dev/null @@ -1,10 +0,0 @@ -// Module included in the following assemblies: -// -// * architecture/architecture.adoc - -[id="architecture-updates_{context}"] -= Additional architecture content - -//Please add additional architecture content for the 4.0 release to this file. -//The docs team will edit the content and modularize it to fit the rest of -//the collection. \ No newline at end of file diff --git a/_unused_topics/builds-output-image-digest.adoc b/_unused_topics/builds-output-image-digest.adoc deleted file mode 100644 index c4bb8b1e0aa0..000000000000 --- a/_unused_topics/builds-output-image-digest.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * unused_topics/builds-output-image-digest - -[id="builds-output-image-digest_{context}"] -= Output image digest - -Built images can be uniquely identified by their digest, which can -later be used to pull the image by digest regardless of its current tag. - -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -`Docker` and -endif::[] -`Source-to-Image (S2I)` builds store the digest in -`Build.status.output.to.imageDigest` after the image is pushed to a registry. -The digest is computed by the registry. Therefore, it may not always be present, -for example when the registry did not return a digest, or when the builder image -did not understand its format. - -.Built Image Digest After a Successful Push to the Registry -[source,yaml] ----- -status: - output: - to: - imageDigest: sha256:29f5d56d12684887bdfa50dcd29fc31eea4aaf4ad3bec43daf19026a7ce69912 ----- - -[role="_additional-resources"] -.Additional resources -* link:https://docs.docker.com/registry/spec/api/#/content-digests[Docker Registry HTTP API V2: digest] -* link:https://docs.docker.com/engine/reference/commandline/pull/#/pull-an-image-by-digest-immutable-identifier[`docker pull`: pull the image by digest] diff --git a/_unused_topics/cluster-logging-collector-envvar.adoc b/_unused_topics/cluster-logging-collector-envvar.adoc deleted file mode 100644 index d1a96e696399..000000000000 --- a/_unused_topics/cluster-logging-collector-envvar.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-collector.adoc - -[id="cluster-logging-collector-envvar_{context}"] -= Configuring the logging collector using environment variables - -You can use environment variables to modify the configuration of the Fluentd log -collector. - -See the link:https://github.com/openshift/origin-aggregated-logging/blob/master/fluentd/README.md[Fluentd README] in Github for lists of the -available environment variables. - -.Prerequisites - -* Set OpenShift Logging to the unmanaged state. Operators in an unmanaged state are unsupported and the cluster administrator assumes full control of the individual component configurations and upgrades. - -.Procedure - -Set any of the Fluentd environment variables as needed: - ----- -$ oc set env ds/fluentd = ----- - -For example: - ----- -$ oc set env ds/fluentd BUFFER_SIZE_LIMIT=24 ----- diff --git a/_unused_topics/cluster-logging-configuring-node-selector.adoc b/_unused_topics/cluster-logging-configuring-node-selector.adoc deleted file mode 100644 index 05a470114490..000000000000 --- a/_unused_topics/cluster-logging-configuring-node-selector.adoc +++ /dev/null @@ -1,65 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-elasticsearch.adoc - -[id="cluster-logging-configuring-node-selector_{context}"] -= Specifying a node for cluster logging components using node selectors - -Each component specification allows the component to target a specific node. - -.Procedure - -Edit the Cluster Logging Custom Resource (CR) in the `openshift-logging` project: - -[source,yaml] ----- -$ oc edit ClusterLogging instance - -apiVersion: "logging.openshift.io/v1" -kind: "ClusterLogging" -metadata: - name: "nodeselector" -spec: - managementState: "Managed" - logStore: - type: "elasticsearch" - elasticsearch: - nodeSelector: <1> - logging: es - nodeCount: 3 - resources: - limits: - memory: 16Gi - requests: - cpu: 500m - memory: 16Gi - storage: - size: "20G" - storageClassName: "gp2" - redundancyPolicy: "ZeroRedundancy" - visualization: - type: "kibana" - kibana: - nodeSelector: <2> - logging: kibana - replicas: 1 - curation: - type: "curator" - curator: - nodeSelector: <3> - logging: curator - schedule: "*/10 * * * *" - collection: - logs: - type: "fluentd" - fluentd: - nodeSelector: <4> - logging: fluentd ----- - -<1> Node selector for Elasticsearch. -<2> Node selector for Kibana. -<3> Node selector for Curator. -<4> Node selector for Fluentd. - - diff --git a/_unused_topics/cluster-logging-elasticsearch-admin.adoc b/_unused_topics/cluster-logging-elasticsearch-admin.adoc deleted file mode 100644 index b1b3843deb19..000000000000 --- a/_unused_topics/cluster-logging-elasticsearch-admin.adoc +++ /dev/null @@ -1,43 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-elasticsearch.adoc - -[id="cluster-logging-elasticsearch-admin_{context}"] -= Performing administrative Elasticsearch operations - -An administrator certificate, key, and CA that can be used to communicate with and perform administrative operations on Elasticsearch are provided within the *elasticsearch* secret in the `openshift-logging` project. - -[NOTE] -==== -To confirm whether your OpenShift Logging installation provides these, run: ----- -$ oc describe secret elasticsearch -n openshift-logging ----- -==== - -. Connect to an Elasticsearch pod that is in the cluster on which you are attempting to perform maintenance. - -. To find a pod in a cluster use: -+ ----- -$ oc get pods -l component=elasticsearch -o name -n openshift-logging | head -1 ----- - -. Connect to a pod: -+ ----- -$ oc rsh ----- - -. Once connected to an Elasticsearch container, you can use the certificates mounted from the secret to communicate with Elasticsearch per its link:https://www.elastic.co/guide/en/elasticsearch/reference/2.3/indices.html[Indices APIs documentation]. -+ -Fluentd sends its logs to Elasticsearch using the index format *infra-00000x* or *app-00000x*. -+ -For example, to delete all logs for the openshift-logging index, *app-000001*, we can run: -+ ----- -$ curl --key /etc/elasticsearch/secret/admin-key \ ---cert /etc/elasticsearch/secret/admin-cert \ ---cacert /etc/elasticsearch/secret/admin-ca -XDELETE \ -"https://localhost:9200/app-000001" ----- diff --git a/_unused_topics/cluster-logging-exported-fields-aushape.2021-06-04.adoc b/_unused_topics/cluster-logging-exported-fields-aushape.2021-06-04.adoc deleted file mode 100644 index 3223ed28b26e..000000000000 --- a/_unused_topics/cluster-logging-exported-fields-aushape.2021-06-04.adoc +++ /dev/null @@ -1,63 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-exported-fields.adoc - -[id="cluster-logging-exported-fields-aushape_{context}"] -= Aushape exported fields - -These are the Aushape fields exported by OpenShift Logging available for searching -from Elasticsearch and Kibana. - -Audit events converted with Aushape. For more information, see -link:https://github.com/Scribery/aushape[Aushape]. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `aushape.serial` -|Audit event serial number. - -| `aushape.node` -|Name of the host where the audit event occurred. - -| `aushape.error` -|The error aushape encountered while converting the event. - -| `aushape.trimmed` -|An array of JSONPath expressions relative to the event object, specifying -objects or arrays with the content removed as the result of event size limiting. -An empty string means the event removed the content, and an empty array means -the trimming occurred by unspecified objects and arrays. - -| `aushape.text` -|An array log record strings representing the original audit event. -|=== - -[discrete] -[id="exported-fields-aushape.data_{context}"] -=== `aushape.data` Fields - -Parsed audit event data related to Aushape. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `aushape.data.avc` -|type: nested - -| `aushape.data.execve` -|type: string - -| `aushape.data.netfilter_cfg` -|type: nested - -| `aushape.data.obj_pid` -|type: nested - -| `aushape.data.path` -|type: nested -|=== diff --git a/_unused_topics/cluster-logging-exported-fields-collectd.adoc b/_unused_topics/cluster-logging-exported-fields-collectd.adoc deleted file mode 100644 index 75dfb4c71428..000000000000 --- a/_unused_topics/cluster-logging-exported-fields-collectd.adoc +++ /dev/null @@ -1,993 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-exported-fields.adoc - -[id="cluster-logging-exported-fields-collectd_{context}"] -= `collectd` exported fields - -These are the `collectd` and `collectd-*` fields exported by the logging system and available for searching -from Elasticsearch and Kibana. - -[discrete] -[id="exported-fields-collectd_{context}"] -=== `collectd` Fields - -The following fields represent namespace metrics metadata. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.interval` -|type: float - -The `collectd` interval. - -| `collectd.plugin` -|type: string - -The `collectd` plug-in. - -| `collectd.plugin_instance` -|type: string - -The `collectd` plugin_instance. - -| `collectd.type_instance` -|type: string - -The `collectd` `type_instance`. - -| `collectd.type` -|type: string - -The `collectd` type. - -| `collectd.dstypes` -|type: string - -The `collectd` dstypes. -|=== - -[discrete] -[id="exported-fields-collectd.processes_{context}"] -=== `collectd.processes` Fields - -The following field corresponds to the `collectd` processes plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.processes.ps_state` -|type: integer -The `collectd ps_state` type of processes plug-in. -|=== - -[discrete] -[id="exported-fields-collectd.processes.ps_disk_ops_{context}"] -=== `collectd.processes.ps_disk_ops` Fields - -The `collectd` `ps_disk_ops` type of processes plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.processes.ps_disk_ops.read` -|type: float - -`TODO` - -| `collectd.processes.ps_disk_ops.write` -|type: float - -`TODO` - -| `collectd.processes.ps_vm` -|type: integer - -The `collectd` `ps_vm` type of processes plug-in. - -| `collectd.processes.ps_rss` -|type: integer - -The `collectd` `ps_rss` type of processes plug-in. - -| `collectd.processes.ps_data` -|type: integer - -The `collectd` `ps_data` type of processes plug-in. - -| `collectd.processes.ps_code` -|type: integer - -The `collectd` `ps_code` type of processes plug-in. - -| `collectd.processes.ps_stacksize` -| type: integer - -The `collectd` `ps_stacksize` type of processes plug-in. -|=== - -[discrete] -[id="exported-fields-collectd.processes.ps_cputime_{context}"] -=== `collectd.processes.ps_cputime` Fields - -The `collectd` `ps_cputime` type of processes plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.processes.ps_cputime.user` -|type: float - -`TODO` - -| `collectd.processes.ps_cputime.syst` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.processes.ps_count_{context}"] -=== `collectd.processes.ps_count` Fields - -The `collectd` `ps_count` type of processes plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.processes.ps_count.processes` -|type: integer - -`TODO` - -| `collectd.processes.ps_count.threads` -|type: integer - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.processes.ps_pagefaults_{context}"] -=== `collectd.processes.ps_pagefaults` Fields - -The `collectd` `ps_pagefaults` type of processes plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.processes.ps_pagefaults.majflt` -|type: float - -`TODO` - -| `collectd.processes.ps_pagefaults.minflt` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.processes.ps_disk_octets_{context}"] -=== `collectd.processes.ps_disk_octets` Fields - -The `collectd ps_disk_octets` type of processes plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.processes.ps_disk_octets.read` -|type: float - -`TODO` - -| `collectd.processes.ps_disk_octets.write` -|type: float - -`TODO` - -| `collectd.processes.fork_rate` -|type: float - -The `collectd` `fork_rate` type of processes plug-in. -|=== - -[discrete] -[id="exported-fields-collectd.disk_{context}"] -=== `collectd.disk` Fields - -Corresponds to `collectd` disk plug-in. - -[discrete] -[id="exported-fields-collectd.disk.disk_merged_{context}"] -=== `collectd.disk.disk_merged` Fields - -The `collectd` `disk_merged` type of disk plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.disk.disk_merged.read` -|type: float - -`TODO` - -| `collectd.disk.disk_merged.write` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.disk.disk_octets_{context}"] -=== `collectd.disk.disk_octets` Fields - -The `collectd` `disk_octets` type of disk plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.disk.disk_octets.read` -|type: float - -`TODO` - -| `collectd.disk.disk_octets.write` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.disk.disk_time_{context}"] -=== `collectd.disk.disk_time` Fields - -The `collectd` `disk_time` type of disk plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.disk.disk_time.read` -|type: float - -`TODO` - -| `collectd.disk.disk_time.write` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.disk.disk_ops_{context}"] -=== `collectd.disk.disk_ops` Fields - -The `collectd` `disk_ops` type of disk plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.disk.disk_ops.read` -|type: float - -`TODO` - -| `collectd.disk.disk_ops.write` -|type: float - -`TODO` - -| `collectd.disk.pending_operations` -|type: integer - -The `collectd` `pending_operations` type of disk plug-in. -|=== - -[discrete] -[id="exported-fields-collectd.disk.disk_io_time_{context}"] -=== `collectd.disk.disk_io_time` Fields - -The `collectd disk_io_time` type of disk plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.disk.disk_io_time.io_time` -|type: float - -`TODO` - -| `collectd.disk.disk_io_time.weighted_io_time` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.interface_{context}"] -=== `collectd.interface` Fields - -Corresponds to the `collectd` interface plug-in. - -[discrete] -[id="exported-fields-collectd.interface.if_octets_{context}"] -=== `collectd.interface.if_octets` Fields - -The `collectd` `if_octets` type of interface plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.interface.if_octets.rx` -|type: float - -`TODO` - -| `collectd.interface.if_octets.tx` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.interface.if_packets_{context}"] -=== `collectd.interface.if_packets` Fields - -The `collectd` `if_packets` type of interface plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.interface.if_packets.rx` -|type: float - -`TODO` - -| `collectd.interface.if_packets.tx` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.interface.if_errors_{context}"] -=== `collectd.interface.if_errors` Fields - -The `collectd` `if_errors` type of interface plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.interface.if_errors.rx` -|type: float - -`TODO` - -| `collectd.interface.if_errors.tx` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.interface.if_dropped_{context}"] -=== collectd.interface.if_dropped Fields - -The `collectd` `if_dropped` type of interface plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.interface.if_dropped.rx` -|type: float - -`TODO` - -| `collectd.interface.if_dropped.tx` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.virt_{context}"] -=== `collectd.virt` Fields - -Corresponds to `collectd` virt plug-in. - -[discrete] -[id="exported-fields-collectd.virt.if_octets_{context}"] -=== `collectd.virt.if_octets` Fields - -The `collectd if_octets` type of virt plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.virt.if_octets.rx` -|type: float - -`TODO` - -| `collectd.virt.if_octets.tx` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.virt.if_packets_{context}"] -=== `collectd.virt.if_packets` Fields - -The `collectd` `if_packets` type of virt plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.virt.if_packets.rx` -|type: float - -`TODO` - -| `collectd.virt.if_packets.tx` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.virt.if_errors_{context}"] -=== `collectd.virt.if_errors` Fields - -The `collectd` `if_errors` type of virt plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.virt.if_errors.rx` -|type: float - -`TODO` - -| `collectd.virt.if_errors.tx` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.virt.if_dropped_{context}"] -=== `collectd.virt.if_dropped` Fields - -The `collectd` `if_dropped` type of virt plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.virt.if_dropped.rx` -|type: float - -`TODO` - -| `collectd.virt.if_dropped.tx` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.virt.disk_ops_{context}"] -=== `collectd.virt.disk_ops` Fields - -The `collectd` `disk_ops` type of virt plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| collectd.virt.disk_ops.read -|type: float - -`TODO` - -| `collectd.virt.disk_ops.write` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.virt.disk_octets_{context}"] -=== `collectd.virt.disk_octets` Fields - -The `collectd` `disk_octets` type of virt plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.virt.disk_octets.read` -|type: float - -`TODO` - -| `collectd.virt.disk_octets.write` -|type: float - -`TODO` - -| `collectd.virt.memory` -|type: float - -The `collectd` memory type of virt plug-in. - -| `collectd.virt.virt_vcpu` -|type: float - -The `collectd` `virt_vcpu` type of virt plug-in. - -| `collectd.virt.virt_cpu_total` -|type: float - -The `collectd` `virt_cpu_total` type of virt plug-in. -|=== - -[discrete] -[id="exported-fields-collectd.CPU_{context}"] -=== `collectd.CPU` Fields - -Corresponds to the `collectd` CPU plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.CPU.percent` -|type: float - -The `collectd` type percent of plug-in CPU. -|=== - -[discrete] -[id="exported-fields-collectd.df_{context}"] -=== collectd.df Fields - -Corresponds to the `collectd` `df` plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.df.df_complex` -|type: float - -The `collectd` type `df_complex` of plug-in `df`. - -| `collectd.df.percent_bytes` -|type: float - -The `collectd` type `percent_bytes` of plug-in `df`. -|=== - -[discrete] -[id="exported-fields-collectd.entropy_{context}"] -=== `collectd.entropy` Fields - -Corresponds to the `collectd` entropy plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.entropy.entropy` -|type: integer - -The `collectd` entropy type of entropy plug-in. -|=== - -//// -[discrete] -[id="exported-fields-collectd.nfs_{context}"] -=== `collectd.nfs` Fields - -Corresponds to the `collectd` NFS plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.nfs.nfs_procedure` -|type: integer - -The `collectd` `nfs_procedure` type of nfs plug-in. -|=== -//// - -[discrete] -[id="exported-fields-collectd.memory_{context}"] -=== `collectd.memory` Fields - -Corresponds to the `collectd` memory plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.memory.memory` -|type: float - -The `collectd` memory type of memory plug-in. - -| `collectd.memory.percent` -|type: float - -The `collectd` percent type of memory plug-in. -|=== - -[discrete] -[id="exported-fields-collectd.swap_{context}"] -=== `collectd.swap` Fields - -Corresponds to the `collectd` swap plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.swap.swap` -|type: integer - -The `collectd` swap type of swap plug-in. - -| `collectd.swap.swap_io` -|type: integer - -The `collectd swap_io` type of swap plug-in. -|=== - -[discrete] -[id="exported-fields-collectd.load_{context}"] -=== `collectd.load` Fields - -Corresponds to the `collectd` load plug-in. - -[discrete] -[id="exported-fields-collectd.load.load_{context}"] -=== `collectd.load.load` Fields - -The `collectd` load type of load plug-in - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.load.load.shortterm` -|type: float - -`TODO` - -| `collectd.load.load.midterm` -|type: float - -`TODO` - -| `collectd.load.load.longterm` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.aggregation_{context}"] -=== `collectd.aggregation` Fields - -Corresponds to `collectd` aggregation plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.aggregation.percent` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.statsd_{context}"] -=== `collectd.statsd` Fields - -Corresponds to `collectd` `statsd` plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.statsd.host_cpu` -|type: integer - -The `collectd` CPU type of `statsd` plug-in. - -| `collectd.statsd.host_elapsed_time` -|type: integer - -The `collectd` `elapsed_time` type of `statsd` plug-in. - -| `collectd.statsd.host_memory` -|type: integer - -The `collectd` memory type of `statsd` plug-in. - -| `collectd.statsd.host_nic_speed` -|type: integer - -The `collectd` `nic_speed` type of `statsd` plug-in. - -| `collectd.statsd.host_nic_rx` -|type: integer - -The `collectd` `nic_rx` type of `statsd` plug-in. - -| `collectd.statsd.host_nic_tx` -|type: integer - -The `collectd` `nic_tx` type of `statsd` plug-in. - -| `collectd.statsd.host_nic_rx_dropped` -|type: integer - -The `collectd` `nic_rx_dropped` type of `statsd` plug-in. - -| `collectd.statsd.host_nic_tx_dropped` -|type: integer - -The `collectd` `nic_tx_dropped` type of `statsd` plug-in. - -| `collectd.statsd.host_nic_rx_errors` -|type: integer - -The `collectd` `nic_rx_errors` type of `statsd` plug-in. - -| `collectd.statsd.host_nic_tx_errors` -|type: integer - -The `collectd` `nic_tx_errors` type of `statsd` plug-in. - -| `collectd.statsd.host_storage` -|type: integer - -The `collectd` storage type of `statsd` plug-in. - -| `collectd.statsd.host_swap` -|type: integer - -The `collectd` swap type of `statsd` plug-in. - -| `collectd.statsd.host_vdsm` -|type: integer - -The `collectd` VDSM type of `statsd` plug-in. - -| `collectd.statsd.host_vms` -|type: integer - -The `collectd` VMS type of `statsd` plug-in. - -| `collectd.statsd.vm_nic_tx_dropped` -|type: integer - -The `collectd` `nic_tx_dropped` type of `statsd` plug-in. - -| `collectd.statsd.vm_nic_rx_bytes` -|type: integer - -The `collectd` `nic_rx_bytes` type of `statsd` plug-in. - -| `collectd.statsd.vm_nic_tx_bytes` -|type: integer - -The `collectd` `nic_tx_bytes` type of `statsd` plug-in. - -| `collectd.statsd.vm_balloon_min` -|type: integer - -The `collectd` `balloon_min` type of `statsd` plug-in. - -| `collectd.statsd.vm_balloon_max` -|type: integer - -The `collectd` `balloon_max` type of `statsd` plug-in. - -| `collectd.statsd.vm_balloon_target` -|type: integer - -The `collectd` `balloon_target` type of `statsd` plug-in. - -| `collectd.statsd.vm_balloon_cur` -| type: integer - -The `collectd` `balloon_cur` type of `statsd` plug-in. - -| `collectd.statsd.vm_cpu_sys` -|type: integer - -The `collectd` `cpu_sys` type of `statsd` plug-in. - -| `collectd.statsd.vm_cpu_usage` -|type: integer - -The `collectd` `cpu_usage` type of `statsd` plug-in. - -| `collectd.statsd.vm_disk_read_ops` -|type: integer - -The `collectd` `disk_read_ops` type of `statsd` plug-in. - -| `collectd.statsd.vm_disk_write_ops` -|type: integer - -The collectd` `disk_write_ops` type of `statsd` plug-in. - -| `collectd.statsd.vm_disk_flush_latency` -|type: integer - -The `collectd` `disk_flush_latency` type of `statsd` plug-in. - -| `collectd.statsd.vm_disk_apparent_size` -|type: integer - -The `collectd` `disk_apparent_size` type of `statsd` plug-in. - -| `collectd.statsd.vm_disk_write_bytes` -|type: integer - -The `collectd` `disk_write_bytes` type of `statsd` plug-in. - -| `collectd.statsd.vm_disk_write_rate` -|type: integer - -The `collectd` `disk_write_rate` type of `statsd` plug-in. - -| `collectd.statsd.vm_disk_true_size` -|type: integer - -The `collectd` `disk_true_size` type of `statsd` plug-in. - -| `collectd.statsd.vm_disk_read_rate` -|type: integer - -The `collectd` `disk_read_rate` type of `statsd` plug-in. - -| `collectd.statsd.vm_disk_write_latency` -|type: integer - -The `collectd` `disk_write_latency` type of `statsd` plug-in. - -| `collectd.statsd.vm_disk_read_latency` -|type: integer - -The `collectd` `disk_read_latency` type of `statsd` plug-in. - -| `collectd.statsd.vm_disk_read_bytes` -|type: integer - -The `collectd` `disk_read_bytes` type of `statsd` plug-in. - -| `collectd.statsd.vm_nic_rx_dropped` -|type: integer - -The `collectd` `nic_rx_dropped` type of `statsd` plug-in. - -| `collectd.statsd.vm_cpu_user` -|type: integer - -The `collectd` `cpu_user` type of `statsd` plug-in. - -| `collectd.statsd.vm_nic_rx_errors` -|type: integer - -The `collectd` `nic_rx_errors` type of `statsd` plug-in. - -| `collectd.statsd.vm_nic_tx_errors` -|type: integer - -The `collectd` `nic_tx_errors` type of `statsd` plug-in. - -| `collectd.statsd.vm_nic_speed` -|type: integer - -The `collectd` `nic_speed` type of `statsd` plug-in. -|=== - -[discrete] -[id="exported-fields-collectd.postgresql_{context}"] -=== `collectd.postgresql Fields` - -Corresponds to `collectd` `postgresql` plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.postgresql.pg_n_tup_g` -|type: integer - -The `collectd` type `pg_n_tup_g` of plug-in postgresql. - -| `collectd.postgresql.pg_n_tup_c` -|type: integer - -The `collectd` type `pg_n_tup_c` of plug-in postgresql. - -| `collectd.postgresql.pg_numbackends` -|type: integer - -The `collectd` type `pg_numbackends` of plug-in postgresql. - -| `collectd.postgresql.pg_xact` -|type: integer - -The `collectd` type `pg_xact` of plug-in postgresql. - -| `collectd.postgresql.pg_db_size` -|type: integer - -The `collectd` type `pg_db_size` of plug-in postgresql. - -| `collectd.postgresql.pg_blks` -|type: integer - -The `collectd` type `pg_blks` of plug-in postgresql. -|=== diff --git a/_unused_topics/cluster-logging-exported-fields-container.2021-06-04.adoc b/_unused_topics/cluster-logging-exported-fields-container.2021-06-04.adoc deleted file mode 100644 index d893b804f0cc..000000000000 --- a/_unused_topics/cluster-logging-exported-fields-container.2021-06-04.adoc +++ /dev/null @@ -1,89 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-exported-fields.adoc - -[id="cluster-logging-exported-fields-container_{context}"] -= Container exported fields - -These are the Docker fields exported by OpenShift Logging available for searching from Elasticsearch and Kibana. -Namespace for docker container-specific metadata. The docker.container_id is the Docker container ID. - - -[discrete] -[id="exported-fields-pipeline_metadata.collector_{context}"] -=== `pipeline_metadata.collector` Fields - -This section contains metadata specific to the collector. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `pipeline_metadata.collector.hostname` -|FQDN of the collector. It might be different from the FQDN of the actual emitter -of the logs. - -| `pipeline_metadata.collector.name` -|Name of the collector. - -| `pipeline_metadata.collector.version` -|Version of the collector. - -| `pipeline_metadata.collector.ipaddr4` -|IP address v4 of the collector server, can be an array. - -| `pipeline_metadata.collector.ipaddr6` -|IP address v6 of the collector server, can be an array. - -| `pipeline_metadata.collector.inputname` -|How the log message was received by the collector whether it was TCP/UDP, or -imjournal/imfile. - -| `pipeline_metadata.collector.received_at` -|Time when the message was received by the collector. - -| `pipeline_metadata.collector.original_raw_message` -|The original non-parsed log message, collected by the collector or as close to the -source as possible. -|=== - -[discrete] -[id="exported-fields-pipeline_metadata.normalizer_{context}"] -=== `pipeline_metadata.normalizer` Fields - -This section contains metadata specific to the normalizer. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `pipeline_metadata.normalizer.hostname` -|FQDN of the normalizer. - -| `pipeline_metadata.normalizer.name` -|Name of the normalizer. - -| `pipeline_metadata.normalizer.version` -|Version of the normalizer. - -| `pipeline_metadata.normalizer.ipaddr4` -|IP address v4 of the normalizer server, can be an array. - -| `pipeline_metadata.normalizer.ipaddr6` -|IP address v6 of the normalizer server, can be an array. - -| `pipeline_metadata.normalizer.inputname` -|how the log message was received by the normalizer whether it was TCP/UDP. - -| `pipeline_metadata.normalizer.received_at` -|Time when the message was received by the normalizer. - -| `pipeline_metadata.normalizer.original_raw_message` -|The original non-parsed log message as it is received by the normalizer. - -| `pipeline_metadata.trace` -|The field records the trace of the message. Each collector and normalizer appends -information about itself and the date and time when the message was processed. -|=== diff --git a/_unused_topics/cluster-logging-exported-fields-default.2021-06-04.adoc b/_unused_topics/cluster-logging-exported-fields-default.2021-06-04.adoc deleted file mode 100644 index e26b60808513..000000000000 --- a/_unused_topics/cluster-logging-exported-fields-default.2021-06-04.adoc +++ /dev/null @@ -1,1100 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-exported-fields.adoc - -[id="cluster-logging-exported-fields-default_{context}"] -= Default exported fields - -These are the default fields exported by the logging system and available for searching -from Elasticsearch and Kibana. The default fields are Top Level and `collectd*` - -[discrete] -=== Top Level Fields - -The top level fields are common to every application and can be present in -every record. For the Elasticsearch template, top level fields populate the actual -mappings of `default` in the template's mapping section. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `@timestamp` -| The UTC value marking when the log payload was created, or when the log payload -was first collected if the creation time is not known. This is the log -processing pipeline's best effort determination of when the log payload was -generated. Add the `@` prefix convention to note a field as being reserved for a -particular use. With Elasticsearch, most tools look for `@timestamp` by default. -For example, the format would be 2015-01-24 14:06:05.071000. - -| `geoip` -|This is geo-ip of the machine. - -| `hostname` -|The `hostname` is the fully qualified domain name (FQDN) of the entity -generating the original payload. This field is an attempt to derive this -context. Sometimes the entity generating it knows the context. While other times -that entity has a restricted namespace itself, which is known by the collector -or normalizer. - -| `ipaddr4` -|The IP address V4 of the source server, which can be an array. - -| `ipaddr6` -|The IP address V6 of the source server, if available. - -| `level` -|The logging level as provided by rsyslog (severitytext property), python's -logging module. Possible values are as listed at -link:http://sourceware.org/git/?p=glibc.git;a=blob;f=misc/sys/syslog.h;h=ee01478c4b19a954426a96448577c5a76e6647c0;hb=HEAD#l74[`misc/sys/syslog.h`] -plus `trace` and `unknown`. For example, _alert crit debug emerg err info notice -trace unknown warning_. Note that `trace` is not in the `syslog.h` list but many -applications use it. - -* You should only use `unknown` when the logging system gets a value it does not -understand, and note that it is the highest level. - -* Consider `trace` as higher or more verbose, than `debug`. - -* `error` is deprecated, use `err`. - -* Convert `panic` to `emerg`. - -* Convert `warn` to `warning`. - -Numeric values from `syslog/journal PRIORITY` can usually be mapped using the -priority values as listed at -link:http://sourceware.org/git/?p=glibc.git;a=blob;f=misc/sys/syslog.h;h=ee01478c4b19a954426a96448577c5a76e6647c0;hb=HEAD#l51[misc/sys/syslog.h]. - -Log levels and priorities from other logging systems should be mapped to the -nearest match. See -link:https://docs.python.org/2.7/library/logging.html#logging-levels[python -logging] for an example. - -| `message` -|A typical log entry message, or payload. It can be stripped of metadata pulled -out of it by the collector or normalizer, that is UTF-8 encoded. - -| `pid` -|This is the process ID of the logging entity, if available. - -| `service` -|The name of the service associated with the logging entity, if available. For -example, the `syslog APP-NAME` property is mapped to -the service field. - -| `tags` -|Optionally provided operator defined list of tags placed on each log by the -collector or normalizer. The payload can be a string with whitespace-delimited -string tokens, or a JSON list of string tokens. - -| `file` -|Optional path to the file containing the log entry local to the collector `TODO` -analyzer for file paths. - -| `offset` -|The offset value can represent bytes to the start of the log line in the file -(zero or one based), or log line numbers (zero or one based), as long as the -values are strictly monotonically increasing in the context of a single log -file. The values are allowed to wrap, representing a new version of the log file -(rotation). - -| `namespace_name` -|Associate this record with the `namespace` that shares it's name. This value -will not be stored, but it is used to associate the record with the appropriate -`namespace` for access control and visualization. Normally this value will be -given in the tag, but if the protocol does not support sending a tag, this field -can be used. If this field is present, it will override the `namespace` given in -the tag or in `kubernetes.namespace_name`. - -| `namespace_uuid` -|This is the `uuid` associated with the `namespace_name`. This value will not be -stored, but is used to associate the record with the appropriate namespace for -access control and visualization. If this field is present, it will override the -`uuid` given in `kubernetes.namespace_uuid`. This will also cause the Kubernetes -metadata lookup to be skipped for this log record. -|=== - -[discrete] -=== `collectd` Fields - -The following fields represent namespace metrics metadata. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.interval` -|type: float - -The `collectd` interval. - -| `collectd.plugin` -|type: string - -The `collectd` plug-in. - -| `collectd.plugin_instance` -|type: string - -The `collectd` plugin_instance. - -| `collectd.type_instance` -|type: string - -The `collectd` `type_instance`. - -| `collectd.type` -|type: string - -The `collectd` type. - -| `collectd.dstypes` -|type: string - -The `collectd` dstypes. -|=== - -[discrete] -[id="exported-fields-collectd.processes_{context}"] -=== `collectd.processes` Fields - -The following field corresponds to the `collectd` processes plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.processes.ps_state` -|type: integer -The `collectd ps_state` type of processes plug-in. -|=== - -[discrete] -[id="exported-fields-collectd.processes.ps_disk_ops_{context}"] -=== `collectd.processes.ps_disk_ops` Fields - -The `collectd` `ps_disk_ops` type of processes plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.processes.ps_disk_ops.read` -|type: float - -`TODO` - -| `collectd.processes.ps_disk_ops.write` -|type: float - -`TODO` - -| `collectd.processes.ps_vm` -|type: integer - -The `collectd` `ps_vm` type of processes plug-in. - -| `collectd.processes.ps_rss` -|type: integer - -The `collectd` `ps_rss` type of processes plug-in. - -| `collectd.processes.ps_data` -|type: integer - -The `collectd` `ps_data` type of processes plug-in. - -| `collectd.processes.ps_code` -|type: integer - -The `collectd` `ps_code` type of processes plug-in. - -| `collectd.processes.ps_stacksize` -| type: integer - -The `collectd` `ps_stacksize` type of processes plug-in. -|=== - -[discrete] -[id="exported-fields-collectd.processes.ps_cputime_{context}"] -=== `collectd.processes.ps_cputime` Fields - -The `collectd` `ps_cputime` type of processes plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.processes.ps_cputime.user` -|type: float - -`TODO` - -| `collectd.processes.ps_cputime.syst` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.processes.ps_count_{context}"] -=== `collectd.processes.ps_count` Fields - -The `collectd` `ps_count` type of processes plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.processes.ps_count.processes` -|type: integer - -`TODO` - -| `collectd.processes.ps_count.threads` -|type: integer - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.processes.ps_pagefaults_{context}"] -=== `collectd.processes.ps_pagefaults` Fields - -The `collectd` `ps_pagefaults` type of processes plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.processes.ps_pagefaults.majflt` -|type: float - -`TODO` - -| `collectd.processes.ps_pagefaults.minflt` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.processes.ps_disk_octets_{context}"] -=== `collectd.processes.ps_disk_octets` Fields - -The `collectd ps_disk_octets` type of processes plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.processes.ps_disk_octets.read` -|type: float - -`TODO` - -| `collectd.processes.ps_disk_octets.write` -|type: float - -`TODO` - -| `collectd.processes.fork_rate` -|type: float - -The `collectd` `fork_rate` type of processes plug-in. -|=== - -[discrete] -[id="exported-fields-collectd.disk_{context}"] -=== `collectd.disk` Fields - -Corresponds to `collectd` disk plug-in. - -[discrete] -[id="exported-fields-collectd.disk.disk_merged_{context}"] -=== `collectd.disk.disk_merged` Fields - -The `collectd` `disk_merged` type of disk plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.disk.disk_merged.read` -|type: float - -`TODO` - -| `collectd.disk.disk_merged.write` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.disk.disk_octets_{context}"] -=== `collectd.disk.disk_octets` Fields - -The `collectd` `disk_octets` type of disk plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.disk.disk_octets.read` -|type: float - -`TODO` - -| `collectd.disk.disk_octets.write` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.disk.disk_time_{context}"] -=== `collectd.disk.disk_time` Fields - -The `collectd` `disk_time` type of disk plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.disk.disk_time.read` -|type: float - -`TODO` - -| `collectd.disk.disk_time.write` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.disk.disk_ops_{context}"] -=== `collectd.disk.disk_ops` Fields - -The `collectd` `disk_ops` type of disk plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.disk.disk_ops.read` -|type: float - -`TODO` - -| `collectd.disk.disk_ops.write` -|type: float - -`TODO` - -| `collectd.disk.pending_operations` -|type: integer - -The `collectd` `pending_operations` type of disk plug-in. -|=== - -[discrete] -[id="exported-fields-collectd.disk.disk_io_time_{context}"] -=== `collectd.disk.disk_io_time` Fields - -The `collectd disk_io_time` type of disk plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.disk.disk_io_time.io_time` -|type: float - -`TODO` - -| `collectd.disk.disk_io_time.weighted_io_time` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.interface_{context}"] -=== `collectd.interface` Fields - -Corresponds to the `collectd` interface plug-in. - -[discrete] -[id="exported-fields-collectd.interface.if_octets_{context}"] -=== `collectd.interface.if_octets` Fields - -The `collectd` `if_octets` type of interface plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.interface.if_octets.rx` -|type: float - -`TODO` - -| `collectd.interface.if_octets.tx` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.interface.if_packets_{context}"] -=== `collectd.interface.if_packets` Fields - -The `collectd` `if_packets` type of interface plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.interface.if_packets.rx` -|type: float - -`TODO` - -| `collectd.interface.if_packets.tx` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.interface.if_errors_{context}"] -=== `collectd.interface.if_errors` Fields - -The `collectd` `if_errors` type of interface plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.interface.if_errors.rx` -|type: float - -`TODO` - -| `collectd.interface.if_errors.tx` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.interface.if_dropped_{context}"] -=== collectd.interface.if_dropped Fields - -The `collectd` `if_dropped` type of interface plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.interface.if_dropped.rx` -|type: float - -`TODO` - -| `collectd.interface.if_dropped.tx` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.virt_{context}"] -=== `collectd.virt` Fields - -Corresponds to `collectd` virt plug-in. - -[discrete] -[id="exported-fields-collectd.virt.if_octets_{context}"] -=== `collectd.virt.if_octets` Fields - -The `collectd if_octets` type of virt plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.virt.if_octets.rx` -|type: float - -`TODO` - -| `collectd.virt.if_octets.tx` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.virt.if_packets_{context}"] -=== `collectd.virt.if_packets` Fields - -The `collectd` `if_packets` type of virt plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.virt.if_packets.rx` -|type: float - -`TODO` - -| `collectd.virt.if_packets.tx` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.virt.if_errors_{context}"] -=== `collectd.virt.if_errors` Fields - -The `collectd` `if_errors` type of virt plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.virt.if_errors.rx` -|type: float - -`TODO` - -| `collectd.virt.if_errors.tx` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.virt.if_dropped_{context}"] -=== `collectd.virt.if_dropped` Fields - -The `collectd` `if_dropped` type of virt plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.virt.if_dropped.rx` -|type: float - -`TODO` - -| `collectd.virt.if_dropped.tx` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.virt.disk_ops_{context}"] -=== `collectd.virt.disk_ops` Fields - -The `collectd` `disk_ops` type of virt plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.virt.disk_ops.read` -|type: float - -`TODO` - -| `collectd.virt.disk_ops.write` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.virt.disk_octets_{context}"] -=== `collectd.virt.disk_octets` Fields - -The `collectd` `disk_octets` type of virt plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.virt.disk_octets.read` -|type: float - -`TODO` - -| `collectd.virt.disk_octets.write` -|type: float - -`TODO` - -| `collectd.virt.memory` -|type: float - -The `collectd` memory type of virt plug-in. - -| `collectd.virt.virt_vcpu` -|type: float - -The `collectd` `virt_vcpu` type of virt plug-in. - -| `collectd.virt.virt_cpu_total` -|type: float - -The `collectd` `virt_cpu_total` type of virt plug-in. -|=== - -[discrete] -[id="exported-fields-collectd.CPU_{context}"] -=== `collectd.CPU` Fields - -Corresponds to the `collectd` CPU plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.CPU.percent` -|type: float - -The `collectd` type percent of plug-in CPU. -|=== - -[discrete] -[id="exported-fields-collectd.df_{context}"] -=== collectd.df Fields - -Corresponds to the `collectd` `df` plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.df.df_complex` -|type: float - -The `collectd` type `df_complex` of plug-in `df`. - -| `collectd.df.percent_bytes` -|type: float - -The `collectd` type `percent_bytes` of plug-in `df`. -|=== - -[discrete] -[id="exported-fields-collectd.entropy_{context}"] -=== `collectd.entropy` Fields - -Corresponds to the `collectd` entropy plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.entropy.entropy` -|type: integer - -The `collectd` entropy type of entropy plug-in. -|=== - -//// -[discrete] -[id="exported-fields-collectd.nfs_{context}"] -=== `collectd.nfs` Fields - -Corresponds to the `collectd` NFS plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.nfs.nfs_procedure` -|type: integer - -The `collectd` `nfs_procedure` type of nfs plug-in. -|=== -//// - -[discrete] -[id="exported-fields-collectd.memory_{context}"] -=== `collectd.memory` Fields - -Corresponds to the `collectd` memory plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.memory.memory` -|type: float - -The `collectd` memory type of memory plug-in. - -| `collectd.memory.percent` -|type: float - -The `collectd` percent type of memory plug-in. -|=== - -[discrete] -[id="exported-fields-collectd.swap_{context}"] -=== `collectd.swap` Fields - -Corresponds to the `collectd` swap plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.swap.swap` -|type: integer - -The `collectd` swap type of swap plug-in. - -| `collectd.swap.swap_io` -|type: integer - -The `collectd swap_io` type of swap plug-in. -|=== - -[discrete] -[id="exported-fields-collectd.load_{context}"] -=== `collectd.load` Fields - -Corresponds to the `collectd` load plug-in. - -[discrete] -[id="exported-fields-collectd.load.load_{context}"] -=== `collectd.load.load` Fields - -The `collectd` load type of load plug-in - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.load.load.shortterm` -|type: float - -`TODO` - -| `collectd.load.load.midterm` -|type: float - -`TODO` - -| `collectd.load.load.longterm` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.aggregation_{context}"] -=== `collectd.aggregation` Fields - -Corresponds to `collectd` aggregation plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.aggregation.percent` -|type: float - -`TODO` -|=== - -[discrete] -[id="exported-fields-collectd.statsd_{context}"] -=== `collectd.statsd` Fields - -Corresponds to `collectd` `statsd` plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.statsd.host_cpu` -|type: integer - -The `collectd` CPU type of `statsd` plug-in. - -| `collectd.statsd.host_elapsed_time` -|type: integer - -The `collectd` `elapsed_time` type of `statsd` plug-in. - -| `collectd.statsd.host_memory` -|type: integer - -The `collectd` memory type of `statsd` plug-in. - -| `collectd.statsd.host_nic_speed` -|type: integer - -The `collectd` `nic_speed` type of `statsd` plug-in. - -| `collectd.statsd.host_nic_rx` -|type: integer - -The `collectd` `nic_rx` type of `statsd` plug-in. - -| `collectd.statsd.host_nic_tx` -|type: integer - -The `collectd` `nic_tx` type of `statsd` plug-in. - -| `collectd.statsd.host_nic_rx_dropped` -|type: integer - -The `collectd` `nic_rx_dropped` type of `statsd` plug-in. - -| `collectd.statsd.host_nic_tx_dropped` -|type: integer - -The `collectd` `nic_tx_dropped` type of `statsd` plug-in. - -| `collectd.statsd.host_nic_rx_errors` -|type: integer - -The `collectd` `nic_rx_errors` type of `statsd` plug-in. - -| `collectd.statsd.host_nic_tx_errors` -|type: integer - -The `collectd` `nic_tx_errors` type of `statsd` plug-in. - -| `collectd.statsd.host_storage` -|type: integer - -The `collectd` storage type of `statsd` plug-in. - -| `collectd.statsd.host_swap` -|type: integer - -The `collectd` swap type of `statsd` plug-in. - -| `collectd.statsd.host_vdsm` -|type: integer - -The `collectd` VDSM type of `statsd` plug-in. - -| `collectd.statsd.host_vms` -|type: integer - -The `collectd` VMS type of `statsd` plug-in. - -| `collectd.statsd.vm_nic_tx_dropped` -|type: integer - -The `collectd` `nic_tx_dropped` type of `statsd` plug-in. - -| `collectd.statsd.vm_nic_rx_bytes` -|type: integer - -The `collectd` `nic_rx_bytes` type of `statsd` plug-in. - -| `collectd.statsd.vm_nic_tx_bytes` -|type: integer - -The `collectd` `nic_tx_bytes` type of `statsd` plug-in. - -| `collectd.statsd.vm_balloon_min` -|type: integer - -The `collectd` `balloon_min` type of `statsd` plug-in. - -| `collectd.statsd.vm_balloon_max` -|type: integer - -The `collectd` `balloon_max` type of `statsd` plug-in. - -| `collectd.statsd.vm_balloon_target` -|type: integer - -The `collectd` `balloon_target` type of `statsd` plug-in. - -| `collectd.statsd.vm_balloon_cur` -| type: integer - -The `collectd` `balloon_cur` type of `statsd` plug-in. - -| `collectd.statsd.vm_cpu_sys` -|type: integer - -The `collectd` `cpu_sys` type of `statsd` plug-in. - -| `collectd.statsd.vm_cpu_usage` -|type: integer - -The `collectd` `cpu_usage` type of `statsd` plug-in. - -| `collectd.statsd.vm_disk_read_ops` -|type: integer - -The `collectd` `disk_read_ops` type of `statsd` plug-in. - -| `collectd.statsd.vm_disk_write_ops` -|type: integer - -The `collectd` `disk_write_ops` type of `statsd` plug-in. - -| `collectd.statsd.vm_disk_flush_latency` -|type: integer - -The `collectd` `disk_flush_latency` type of `statsd` plug-in. - -| `collectd.statsd.vm_disk_apparent_size` -|type: integer - -The `collectd` `disk_apparent_size` type of `statsd` plug-in. - -| `collectd.statsd.vm_disk_write_bytes` -|type: integer - -The `collectd` `disk_write_bytes` type of `statsd` plug-in. - -| `collectd.statsd.vm_disk_write_rate` -|type: integer - -The `collectd` `disk_write_rate` type of `statsd` plug-in. - -| `collectd.statsd.vm_disk_true_size` -|type: integer - -The `collectd` `disk_true_size` type of `statsd` plug-in. - -| `collectd.statsd.vm_disk_read_rate` -|type: integer - -The `collectd` `disk_read_rate` type of `statsd` plug-in. - -| `collectd.statsd.vm_disk_write_latency` -|type: integer - -The `collectd` `disk_write_latency` type of `statsd` plug-in. - -| `collectd.statsd.vm_disk_read_latency` -|type: integer - -The `collectd` `disk_read_latency` type of `statsd` plug-in. - -| `collectd.statsd.vm_disk_read_bytes` -|type: integer - -The `collectd` `disk_read_bytes` type of `statsd` plug-in. - -| `collectd.statsd.vm_nic_rx_dropped` -|type: integer - -The `collectd` `nic_rx_dropped` type of `statsd` plug-in. - -| `collectd.statsd.vm_cpu_user` -|type: integer - -The `collectd` `cpu_user` type of `statsd` plug-in. - -| `collectd.statsd.vm_nic_rx_errors` -|type: integer - -The `collectd` `nic_rx_errors` type of `statsd` plug-in. - -| `collectd.statsd.vm_nic_tx_errors` -|type: integer - -The `collectd` `nic_tx_errors` type of `statsd` plug-in. - -| `collectd.statsd.vm_nic_speed` -|type: integer - -The `collectd` `nic_speed` type of `statsd` plug-in. -|=== - -[discrete] -[id="exported-fields-collectd.postgresql_{context}"] -=== `collectd.postgresql Fields` - -Corresponds to `collectd` `postgresql` plug-in. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `collectd.postgresql.pg_n_tup_g` -|type: integer - -The `collectd` type `pg_n_tup_g` of plug-in postgresql. - -| `collectd.postgresql.pg_n_tup_c` -|type: integer - -The `collectd` type `pg_n_tup_c` of plug-in postgresql. - -| `collectd.postgresql.pg_numbackends` -|type: integer - -The `collectd` type `pg_numbackends` of plug-in postgresql. - -| `collectd.postgresql.pg_xact` -|type: integer - -The `collectd` type `pg_xact` of plug-in postgresql. - -| `collectd.postgresql.pg_db_size` -|type: integer - -The `collectd` type `pg_db_size` of plug-in postgresql. - -| `collectd.postgresql.pg_blks` -|type: integer - -The `collectd` type `pg_blks` of plug-in postgresql. -|=== diff --git a/_unused_topics/cluster-logging-exported-fields-docker.adoc b/_unused_topics/cluster-logging-exported-fields-docker.adoc deleted file mode 100644 index 26d77f062ca0..000000000000 --- a/_unused_topics/cluster-logging-exported-fields-docker.adoc +++ /dev/null @@ -1,89 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-exported-fields.adoc - -[id="cluster-logging-exported-fields-container_{context}"] -= Container exported fields - -These are the Docker fields exported by OpenShift Logging available for searching from Elasticsearch and Kibana. -Namespace for docker container-specific metadata. The docker.container_id is the Docker container ID. - - -[discrete] -[id="pipeline_metadata.collector_{context}"] -=== `pipeline_metadata.collector` Fields - -This section contains metadata specific to the collector. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `pipeline_metadata.collector.hostname` -|FQDN of the collector. It might be different from the FQDN of the actual emitter -of the logs. - -| `pipeline_metadata.collector.name` -|Name of the collector. - -| `pipeline_metadata.collector.version` -|Version of the collector. - -| `pipeline_metadata.collector.ipaddr4` -|IP address v4 of the collector server, can be an array. - -| `pipeline_metadata.collector.ipaddr6` -|IP address v6 of the collector server, can be an array. - -| `pipeline_metadata.collector.inputname` -|How the log message was received by the collector whether it was TCP/UDP, or -imjournal/imfile. - -| `pipeline_metadata.collector.received_at` -|Time when the message was received by the collector. - -| `pipeline_metadata.collector.original_raw_message` -|The original non-parsed log message, collected by the collector or as close to the -source as possible. -|=== - -[discrete] -[id="exported-fields-pipeline_metadata.normalizer_{context}"] -=== `pipeline_metadata.normalizer` Fields - -This section contains metadata specific to the normalizer. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `pipeline_metadata.normalizer.hostname` -|FQDN of the normalizer. - -| `pipeline_metadata.normalizer.name` -|Name of the normalizer. - -| `pipeline_metadata.normalizer.version` -|Version of the normalizer. - -| `pipeline_metadata.normalizer.ipaddr4` -|IP address v4 of the normalizer server, can be an array. - -| `pipeline_metadata.normalizer.ipaddr6` -|IP address v6 of the normalizer server, can be an array. - -| `pipeline_metadata.normalizer.inputname` -|how the log message was received by the normalizer whether it was TCP/UDP. - -| `pipeline_metadata.normalizer.received_at` -|Time when the message was received by the normalizer. - -| `pipeline_metadata.normalizer.original_raw_message` -|The original non-parsed log message as it is received by the normalizer. - -| `pipeline_metadata.trace` -|The field records the trace of the message. Each collector and normalizer appends -information about itself and the date and time when the message was processed. -|=== diff --git a/_unused_topics/cluster-logging-exported-fields-kubernetes.2021-06-04.adoc b/_unused_topics/cluster-logging-exported-fields-kubernetes.2021-06-04.adoc deleted file mode 100644 index d40a3ddd446e..000000000000 --- a/_unused_topics/cluster-logging-exported-fields-kubernetes.2021-06-04.adoc +++ /dev/null @@ -1,83 +0,0 @@ -[id="cluster-logging-exported-fields-kubernetes_{context}"] -= Kubernetes - -The following fields can be present in the namespace for kubernetes-specific metadata. - -== kubernetes.pod_name - -The name of the pod - -[horizontal] -Data type:: keyword - - -== kubernetes.pod_id - -Kubernetes ID of the pod. - -[horizontal] -Data type:: keyword - - -== kubernetes.namespace_name - -The name of the namespace in Kubernetes. - -[horizontal] -Data type:: keyword - - -== kubernetes.namespace_id - -ID of the namespace in Kubernetes. - -[horizontal] -Data type:: keyword - - -== kubernetes.host - -Kubernetes node name - -[horizontal] -Data type:: keyword - - -== kubernetes.master_url - -Kubernetes Master URL - -[horizontal] -Data type:: keyword - - -== kubernetes.container_name - -The name of the container in Kubernetes. - -[horizontal] -Data type:: text - - -== kubernetes.annotations - -Annotations associated with the Kubernetes object - -[horizontal] -Data type:: group - - -== kubernetes.labels - -Labels attached to the Kubernetes object Each label name is a subfield of labels field. Each label name is de-dotted: dots in the name are replaced with underscores. - -[horizontal] -Data type:: group - - -== kubernetes.event - -The kubernetes event obtained from kubernetes master API The event is already JSON object and as whole nested under kubernetes field This description should loosely follow 'type Event' in https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#event-v1-core - -[horizontal] -Data type:: group diff --git a/_unused_topics/cluster-logging-exported-fields-ovirt.2021-06-04.adoc b/_unused_topics/cluster-logging-exported-fields-ovirt.2021-06-04.adoc deleted file mode 100644 index 6c5dcd5b4470..000000000000 --- a/_unused_topics/cluster-logging-exported-fields-ovirt.2021-06-04.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-exported-fields.adoc - -[id="cluster-logging-exported-fields-ovirt_{context}"] -= oVirt exported fields - -These are the oVirt fields exported by OpenShift Logging available for searching -from Elasticsearch and Kibana. - -Namespace for oVirt metadata. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `ovirt.entity` -|The type of the data source, hosts, VMS, and engine. - -| `ovirt.host_id` -|The oVirt host UUID. -|=== - -[discrete] -[id="exported-fields-ovirt.engine_{context}"] -=== `ovirt.engine` Fields - -Namespace for metadata related to the {rh-virtualization-engine-name}. The FQDN of the {rh-virtualization-engine-name} is -`ovirt.engine.fqdn` diff --git a/_unused_topics/cluster-logging-exported-fields-rsyslog.2021-06-04.adoc b/_unused_topics/cluster-logging-exported-fields-rsyslog.2021-06-04.adoc deleted file mode 100644 index fec43d97ad1a..000000000000 --- a/_unused_topics/cluster-logging-exported-fields-rsyslog.2021-06-04.adoc +++ /dev/null @@ -1,34 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-exported-fields.adoc - -[id="cluster-logging-exported-fields-rsyslog_{context}"] -= `rsyslog` exported fields - -These are the `rsyslog` fields exported by the logging system and available for searching -from Elasticsearch and Kibana. - -The following fields are RFC5424 based metadata. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `rsyslog.facility` -|See `syslog` specification for more information on `rsyslog`. - -| `rsyslog.protocol-version` -|This is the `rsyslog` protocol version. - -| `rsyslog.structured-data` -|See `syslog` specification for more information on `syslog` structured-data. - -| `rsyslog.msgid` -|This is the `syslog` msgid field. - -| `rsyslog.appname` -|If `app-name` is the same as `programname`, then only fill top-level field `service`. -If `app-name` is not equal to `programname`, this field will hold `app-name`. -See syslog specifications for more information. -|=== diff --git a/_unused_topics/cluster-logging-exported-fields-systemd.2021-06-04.adoc b/_unused_topics/cluster-logging-exported-fields-systemd.2021-06-04.adoc deleted file mode 100644 index 19e1d6a4cdca..000000000000 --- a/_unused_topics/cluster-logging-exported-fields-systemd.2021-06-04.adoc +++ /dev/null @@ -1,195 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-exported-fields.adoc - -[id="cluster-logging-exported-fields-systemd_{context}"] -= systemd exported fields - -These are the `systemd` fields exported by OpenShift Logging available for searching -from Elasticsearch and Kibana. - -Contains common fields specific to `systemd` journal. -link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html[Applications] -can write their own fields to the journal. These will be available under the -`systemd.u` namespace. `RESULT` and `UNIT` are two such fields. - -[discrete] -[id="exported-fields-systemd.k_{context}"] -=== `systemd.k` Fields - -The following table contains `systemd` kernel-specific metadata. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `systemd.k.KERNEL_DEVICE` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_KERNEL_DEVICE=[`systemd.k.KERNEL_DEVICE`] -is the kernel device name. - -| `systemd.k.KERNEL_SUBSYSTEM` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_KERNEL_SUBSYSTEM=[`systemd.k.KERNEL_SUBSYSTEM`] -is the kernel subsystem name. - -| `systemd.k.UDEV_DEVLINK` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_UDEV_DEVLINK=[`systemd.k.UDEV_DEVLINK`] -includes additional symlink names that point to the node. - -| `systemd.k.UDEV_DEVNODE` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_UDEV_DEVNODE=[`systemd.k.UDEV_DEVNODE`] -is the node path of the device. - -| `systemd.k.UDEV_SYSNAME` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_UDEV_SYSNAME=[ `systemd.k.UDEV_SYSNAME`] -is the kernel device name. - -|=== - -[discrete] -[id="exported-fields-systemd.t_{context}"] -=== `systemd.t` Fields - -`systemd.t Fields` are trusted journal fields, fields that are implicitly added -by the journal, and cannot be altered by client code. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `systemd.t.AUDIT_LOGINUID` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_AUDIT_SESSION=[`systemd.t.AUDIT_LOGINUID`] -is the user ID for the journal entry process. - -| `systemd.t.BOOT_ID` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_BOOT_ID=[`systemd.t.BOOT_ID`] -is the kernel boot ID. - -| `systemd.t.AUDIT_SESSION` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_AUDIT_SESSION=[`systemd.t.AUDIT_SESSION`] -is the session for the journal entry process. - -| `systemd.t.CAP_EFFECTIVE` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_CAP_EFFECTIVE=[`systemd.t.CAP_EFFECTIVE`] -represents the capabilities of the journal entry process. - -| `systemd.t.CMDLINE` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_COMM=[`systemd.t.CMDLINE`] -is the command line of the journal entry process. - -| `systemd.t.COMM` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_COMM=[`systemd.t.COMM`] -is the name of the journal entry process. - -| `systemd.t.EXE` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_COMM=[`systemd.t.EXE`] -is the executable path of the journal entry process. - -| `systemd.t.GID` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_PID=[`systemd.t.GID`] -is the group ID for the journal entry process. - -| `systemd.t.HOSTNAME` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_HOSTNAME=[`systemd.t.HOSTNAME`] -is the name of the host. - -| `systemd.t.MACHINE_ID` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_MACHINE_ID=[`systemd.t.MACHINE_ID`] -is the machine ID of the host. - -| `systemd.t.PID` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_PID=[`systemd.t.PID`] -is the process ID for the journal entry process. - -| `systemd.t.SELINUX_CONTEXT` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_SELINUX_CONTEXT=[`systemd.t.SELINUX_CONTEXT`] -is the security context, or label, for the journal entry process. - -| `systemd.t.SOURCE_REALTIME_TIMESTAMP` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_SOURCE_REALTIME_TIMESTAMP=[`systemd.t.SOURCE_REALTIME_TIMESTAMP`] -is the earliest and most reliable timestamp of the message. This is converted to RFC 3339 NS format. - -| `systemd.t.SYSTEMD_CGROUP` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_SYSTEMD_CGROUP=[`systemd.t.SYSTEMD_CGROUP`] -is the `systemd` control group path. - -| `systemd.t.SYSTEMD_OWNER_UID` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_SYSTEMD_CGROUP=[`systemd.t.SYSTEMD_OWNER_UID`] -is the owner ID of the session. - -| `systemd.t.SYSTEMD_SESSION` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_SYSTEMD_CGROUP=[`systemd.t.SYSTEMD_SESSION`], -if applicable, is the `systemd` session ID. - -| `systemd.t.SYSTEMD_SLICE` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_SYSTEMD_CGROUP=[`systemd.t.SYSTEMD_SLICE`] -is the slice unit of the journal entry process. - -| `systemd.t.SYSTEMD_UNIT` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_SYSTEMD_CGROUP=[`systemd.t.SYSTEMD_UNIT`] -is the unit name for a session. - -| `systemd.t.SYSTEMD_USER_UNIT` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_SYSTEMD_CGROUP=[`systemd.t.SYSTEMD_USER_UNIT`], -if applicable, is the user unit name for a session. - -| `systemd.t.TRANSPORT` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_TRANSPORT=[`systemd.t.TRANSPORT`] -is the method of entry by the journal service. This includes, `audit`, `driver`, -`syslog`, `journal`, `stdout`, and `kernel`. - -| `systemd.t.UID` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_PID=[`systemd.t.UID`] -is the user ID for the journal entry process. - -| `systemd.t.SYSLOG_FACILITY` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#SYSLOG_FACILITY=[`systemd.t.SYSLOG_FACILITY`] -is the field containing the facility, formatted as a decimal string, for `syslog`. - -| `systemd.t.SYSLOG_IDENTIFIER` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#SYSLOG_FACILITY=[`systemd.t.systemd.t.SYSLOG_IDENTIFIER`] -is the identifier for `syslog`. - -| `systemd.t.SYSLOG_PID` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#SYSLOG_FACILITY=[`SYSLOG_PID`] -is the client process ID for `syslog`. -|=== - -[discrete] -[id="exported-fields-systemd.u_{context}"] -=== `systemd.u` Fields - -`systemd.u Fields` are directly passed from clients and stored in the journal. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `systemd.u.CODE_FILE` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#CODE_FILE=[`systemd.u.CODE_FILE`] -is the code location containing the filename of the source. - -| `systemd.u.CODE_FUNCTION` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#CODE_FILE=[`systemd.u.CODE_FUNCTION`] -is the code location containing the function of the source. - -| `systemd.u.CODE_LINE` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#CODE_FILE=[`systemd.u.CODE_LINE`] -is the code location containing the line number of the source. - -| `systemd.u.ERRNO` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#ERRNO=[`systemd.u.ERRNO`], -if present, is the low-level error number formatted in numeric value, as a decimal string. - -| `systemd.u.MESSAGE_ID` -|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#MESSAGE_ID=[`systemd.u.MESSAGE_ID`] -is the message identifier ID for recognizing message types. - -| `systemd.u.RESULT` -|For private use only. - -| `systemd.u.UNIT` -|For private use only. -|=== diff --git a/_unused_topics/cluster-logging-exported-fields-tlog.2021-06-04.adoc b/_unused_topics/cluster-logging-exported-fields-tlog.2021-06-04.adoc deleted file mode 100644 index 82724afc1591..000000000000 --- a/_unused_topics/cluster-logging-exported-fields-tlog.2021-06-04.adoc +++ /dev/null @@ -1,51 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-exported-fields.adoc - -[id="cluster-logging-exported-fields-tlog_{context}"] -= Tlog exported fields - -These are the Tlog fields exported by the OpenShift Logging system and available for searching -from Elasticsearch and Kibana. - -Tlog terminal I/O recording messages. For more information see -link:https://github.com/Scribery/tlog[Tlog]. - -[cols="3,7",options="header"] -|=== -|Parameter -|Description - -| `tlog.ver` -|Message format version number. - -| `tlog.user` -|Recorded user name. - -| `tlog.term` -|Terminal type name. - -| `tlog.session` -|Audit session ID of the recorded session. - -| `tlog.id` -|ID of the message within the session. - -| `tlog.pos` -|Message position in the session, milliseconds. - -| `tlog.timing` -|Distribution of this message's events in time. - -| `tlog.in_txt` -|Input text with invalid characters scrubbed. - -| `tlog.in_bin` -|Scrubbed invalid input characters as bytes. - -| `tlog.out_txt` -|Output text with invalid characters scrubbed. - -| `tlog.out_bin` -|Scrubbed invalid output characters as bytes. -|=== diff --git a/_unused_topics/cluster-logging-kibana-console-launch.adoc b/_unused_topics/cluster-logging-kibana-console-launch.adoc deleted file mode 100644 index 44b23c483030..000000000000 --- a/_unused_topics/cluster-logging-kibana-console-launch.adoc +++ /dev/null @@ -1,28 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-kibana-console.adoc -// * logging/cluster-logging-visualizer.adoc - -[id="cluster-logging-kibana-visualize_{context}"] -= Launching the Kibana interface - -The Kibana interface is a browser-based console -to query, discover, and visualize your Elasticsearch data through histograms, line graphs, -pie charts, heat maps, built-in geospatial support, and other visualizations. - -.Procedure - -To launch the Kibana interface: - -. In the {product-title} console, click *Observe* -> *Logging*. - -. Log in using the same credentials you use to log in to the {product-title} console. -+ -The Kibana interface launches. You can now: -+ -* Search and browse your data using the Discover page. -* Chart and map your data using the Visualize page. -* Create and view custom dashboards using the Dashboard page. -+ -Use and configuration of the Kibana interface is beyond the scope of this documentation. For more information, -on using the interface, see the link:https://www.elastic.co/guide/en/kibana/5.6/connect-to-elasticsearch.html[Kibana documentation]. diff --git a/_unused_topics/cluster-logging-log-forwarding-disable.adoc b/_unused_topics/cluster-logging-log-forwarding-disable.adoc deleted file mode 100644 index 680ea9b95686..000000000000 --- a/_unused_topics/cluster-logging-log-forwarding-disable.adoc +++ /dev/null @@ -1,47 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-external.adoc - -[id="cluster-logging-log-forwarding-disable_{context}"] -= Disabling the Log Forwarding feature - -To disable the Log Forwarding feature, remove the `clusterlogging.openshift.io/logforwardingtechpreview:enabled` parameter from the Cluster Logging custom resource (CR) and delete the `ClusterLogForwarder` CR. The container and node logs will be forwarded to the internal {product-title} Elasticsearch instance. - -[IMPORTANT] -==== -You cannot disable Log Forwarding by setting the `disableDefaultForwarding` to `false` in the `ClusterLogForwarder` CR. This prevents OpenShift Logging from sending logs to the specified endpoints *and* to default internal {product-title} Elasticsearch instance. -==== - -.Procedure - -To disable the Log Forwarding feature: - -. Edit the OpenShift Logging CR in the `openshift-logging` project: -+ -[source,terminal] ----- -$ oc edit ClusterLogging instance ----- - -. Remove the `clusterlogging.openshift.io/logforwardingtechpreview` annotation: -+ -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: "ClusterLogging" -metadata: - annotations: - clusterlogging.openshift.io/logforwardingtechpreview: enabled <1> - name: "instance" - namespace: "openshift-logging" -... ----- -<1> Remove this annotation. - -. Delete the `ClusterLogForwarder` CR: -+ -[source,terminal] ----- -$ oc delete LogForwarding instance -n openshift-logging ----- - diff --git a/_unused_topics/cluster-logging-uninstall-cluster-ops.adoc b/_unused_topics/cluster-logging-uninstall-cluster-ops.adoc deleted file mode 100644 index ec4c0d37eac0..000000000000 --- a/_unused_topics/cluster-logging-uninstall-cluster-ops.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-uninstall.adoc - -[id="cluster-logging-uninstall-ops_{context}"] -= Uninstall the infra cluster - -You can uninstall the infra cluster from OpenShift Logging. -After uninstalling, Fluentd no longer splits logs. - -.Procedure - -To uninstall the infra cluster: - -. - -. - -. diff --git a/_unused_topics/cnv-accessing-vmi-web.adoc b/_unused_topics/cnv-accessing-vmi-web.adoc deleted file mode 100644 index f733d2873fd5..000000000000 --- a/_unused_topics/cnv-accessing-vmi-web.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module included in the following assemblies: -// - -[id="virt-accessing-vmi-web_{context}"] -= Connecting to a virtual machine with the web console - -You can connect to a virtual machine by using the web console. - -.Procedure - -. Ensure you are in the correct project. If not, click the *Project* -list and select the appropriate project. -. Click *Workloads* -> *Virtual Machines* to display the virtual -machines in the project. -. Select a virtual machine. -. In the *Overview* tab, click the `virt-launcher-` pod. -. Click the *Terminal* tab. If the terminal is blank, click the -terminal and press any key to initiate connection. diff --git a/_unused_topics/completing-installation.adoc b/_unused_topics/completing-installation.adoc deleted file mode 100644 index 911997c61e1a..000000000000 --- a/_unused_topics/completing-installation.adoc +++ /dev/null @@ -1,33 +0,0 @@ -// Module included in the following assemblies: -// -// * TBD - -[id="completing-installation_{context}"] -= Completing and verifying the {product-title} installation - -When the bootstrap node is done with its work and has handed off control to the new {product-title} cluster, the bootstrap node is destroyed. The installation program waits for the cluster to initialize, creates a route to the {product-title} console, and presents the information and credentials you require to log in to the cluster. Here’s an example: - ----- -INFO Install complete!                                 - -INFO Run 'export KUBECONFIG=/home/joe/ocp/auth/kubeconfig' to manage the cluster with 'oc', the {product-title} CLI. - -INFO The cluster is ready when 'oc login -u kubeadmin -p 39RPg-y4c7V-n4bbn-vAF3M' succeeds (wait a few minutes). - -INFO Access the {product-title} web-console here: https://console-openshift-console.apps.mycluster.devel.example.com - -INFO Login to the console with user: kubeadmin, password: 39RPg-y4c7V-n4bbn-vAF3M ----- - -To access the {product-title} cluster from your web browser, log in as kubeadmin with the password (for example, 39RPg-y4c7V-n4bbn-vAF3M), using the URL shown: - -     https://console-openshift-console.apps.mycluster.devel.example.com - -To access the {product-title} cluster from the command line, identify the location of the credentials file (export the KUBECONFIG variable) and log in as kubeadmin with the provided password: ----- -$ export KUBECONFIG=/home/joe/ocp/auth/kubeconfig - -$ oc login -u kubeadmin -p 39RPg-y4c7V-n4bbn-vAF3M ----- - -At this point, you can begin using the {product-title} cluster. To understand the management of your {product-title} cluster going forward, you should explore the {product-title} control plane. diff --git a/_unused_topics/con-pod-reset-policy.adoc b/_unused_topics/con-pod-reset-policy.adoc deleted file mode 100644 index b317a1df6495..000000000000 --- a/_unused_topics/con-pod-reset-policy.adoc +++ /dev/null @@ -1,54 +0,0 @@ -[[nodes-configuring-nodes]] -= Understanding Pod restart policy -{product-author} -{product-version} -:data-uri: -:icons: -:experimental: -:toc: macro -:toc-title: - - -//from https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy -A Pod restart policy determines how {product-title} responds when containers in that Pod exit. -The policy applies to all containers in that Pod. - -The possible values are: - -* `Always` - Tries restarting a successfully exited container on the Pod continuously, with an exponential back-off delay (10s, 20s, 40s) until the Pod is restarted. The default is `Always`. -* `OnFailure` - Tries restarting a failed container on the Pod with an exponential back-off delay (10s, 20s, 40s) capped at 5 minutes. -* `Never` - Does not try to restart exited or failed containers on the Pod. Pods immediately fail and exit. - -//https://kubernetes-v1-4.github.io/docs/user-guide/pod-states/ -Once bound to a node, a Pod will never be bound to another node. This means that a controller is necessary in order for a Pod to survive node failure: - -[cols="3",options="header"] -|=== - -|Condition -|Controller Type -|Restart Policy - -|Pods that are expected to terminate (such as batch computations) -|xref:../../architecture/core_concepts/deployments.adoc#jobs[Job] -|`OnFailure` or `Never` - -|Pods that are expected to not terminate (such as web servers) -|xref:../../architecture/core_concepts/deployments.adoc#replication-controllers[Replication Controller] -| `Always`. - -|Pods that must run one-per-machine -|xref:../../dev_guide/daemonsets.adoc#dev-guide-daemonsets[Daemonset] -|Any -|=== - -If a container on a Pod fails and the restart policy is set to `OnFailure`, the Pod stays on the node and the container is restarted. If you do not want the container to -restart, use a restart policy of `Never`. - -//https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#handling-pod-and-container-failures -If an entire Pod fails, {product-title} starts a new Pod. Developers must address the possibility that applications might be restarted in a new Pod. In particular, -applications must handle temporary files, locks, incomplete output, and so forth caused by previous runs. - -For details on how {product-title} uses restart policy with failed containers, see -the link:https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#example-states[Example States] in the Kubernetes documentation. - diff --git a/_unused_topics/configuration-resource-configure.adoc b/_unused_topics/configuration-resource-configure.adoc deleted file mode 100644 index a65a1d4bb2fc..000000000000 --- a/_unused_topics/configuration-resource-configure.adoc +++ /dev/null @@ -1,38 +0,0 @@ -// Module included in the following assemblies: -// -// * TBD - - -[id="configuration-resource-configure_{context}"] -= Configure the Configuration Resource - -To configure the Configuration Resource, you customize the Custom Resource Definition (CRD) that controls its Operator and deploy it to your cluster. - -.Prerequisites -* Deploy an {product-title} cluster. -* Review the CRD for the resource and provision any resources that your changes require. -* Access to the right user to do this thing. - -.Procedure - -. From some specific computer, modify the CRD for the resource to describe your intended configuration. Save the file in `whatever-the-location-is`. - -. Run the following command to update the CRD in your cluster: -+ ----- -$ oc something or other -- <1> --<2> ----- -<1> The CRD file that contains customizations for your resource. -<2> However you specify the cluster you’re changing. - -. Confirm that the resource reflects your changes. Run the following command and review the output: -+ ----- -$ oc something or other - -Output -Output -Output ----- -+ -If the output includes , the resource redeployed on your cluster. diff --git a/_unused_topics/configuring-local-provisioner.adoc b/_unused_topics/configuring-local-provisioner.adoc deleted file mode 100644 index 1e46999679b0..000000000000 --- a/_unused_topics/configuring-local-provisioner.adoc +++ /dev/null @@ -1,54 +0,0 @@ -[id="configuring-local-provisioner_{context}"] -= Configuring the local provisioner - -{product-title} depends on an external provisioner to create PVs for local devices and to clean up PVs when they are not in use to enable reuse. - -.Prerequisites - -* All local volumes must be manually mounted before they can be consumed by {product-title} as PVs. - -[NOTE] -==== -The local volume provisioner is different from most provisioners and does not support dynamic provisioning. -==== - -[NOTE] -==== -The local volume provisioner requires administrators to preconfigure the local volumes on each node and mount them under discovery directories. The provisioner then manages the volumes by creating and cleaning up PVs for each volume. -==== - -.Procedure -. Configure the external provisioner using a ConfigMap to relate directories with storage classes, for example: -+ ----- - kind: ConfigMap -metadata: - name: local-volume-config -data: - storageClassMap: | - local-ssd: - hostDir: /mnt/local-storage/ssd - mountDir: /mnt/local-storage/ssd - local-hdd: - hostDir: /mnt/local-storage/hdd - mountDir: /mnt/local-storage/hdd ----- -<1> Name of the storage class. -<2> Path to the directory on the host. It must be a subdirectory of `*/mnt/local-storage*`. -<3> Path to the directory in the provisioner Pod. We recommend using the same directory structure as used on the host and `mountDir` can be omitted in this case. - -. Create a standalone namespace for the local volume provisioner and its configuration, for example: -+ ----- -$ oc new-project local-storage ----- - -With this configuration, the provisioner creates: - -* One PV with storage class `local-ssd` for every subdirectory mounted in the `*/mnt/local-storage/ssd*` directory -* One PV with storage class `local-hdd` for every subdirectory mounted in the `*/mnt/local-storage/hdd*` directory - -[WARNING] -==== -The syntax of the ConfigMap has changed between {product-title} 3.9 and 3.10. Since this feature is in Technology Preview, the ConfigMap is not automatically converted during the update. -==== diff --git a/_unused_topics/configuring-user-agent.adoc b/_unused_topics/configuring-user-agent.adoc deleted file mode 100644 index dda5f717be47..000000000000 --- a/_unused_topics/configuring-user-agent.adoc +++ /dev/null @@ -1,10 +0,0 @@ -[id="configuring-user-agent"] -= Configuring the user agent -include::_attributes/common-attributes.adoc[] -:context: configuring-user-agent - -toc::[] - -include::modules/user-agent-overview.adoc[leveloffset=+1] - -include::modules/user-agent-configuring.adoc[leveloffset=+1] diff --git a/_unused_topics/customize-certificates-api-add-default.adoc b/_unused_topics/customize-certificates-api-add-default.adoc deleted file mode 100644 index a70aeb11709a..000000000000 --- a/_unused_topics/customize-certificates-api-add-default.adoc +++ /dev/null @@ -1,56 +0,0 @@ -// Module included in the following assemblies: -// -// * security/certificates/api-server.adoc - -[id="add-default-api-server_{context}"] -= Add an API server default certificate - -To allow clients outside the cluster to validate the API server's -certificate, you can replace the default certificate -with one that is issued by a public or organizational CA. - -.Prerequisites - -* You must have a valid certificate and key in the PEM format. - -.Procedure - -. Create a secret that contains the certificate and key in the -`openshift-config` namespace. -+ ----- -$ oc create secret tls \//<1> - --cert= \//<2> - --key= \//<3> - -n openshift-config ----- -<1> `` is the name of the secret that will contain -the certificate. -<2> `` is the path to the certificate on your -local file system. -<3> `` is the path to the private key associated -with this certificate. - -. Update the API server to reference the created secret. -+ ----- -$ oc patch apiserver cluster \ - --type=merge -p \ - '{"spec": {"servingCerts": {"defaultServingCertificate": - {"name": ""}}}}' <1> ----- -<1> Replace `` with the name used for the secret in -the previous step. - -. Examine the `apiserver/cluster` object and confirm the secret is now -referenced. -+ ----- -$ oc get apiserver cluster -o yaml -... -spec: - servingCerts: - defaultServingCertificate: - name: -... ----- diff --git a/_unused_topics/deploying-local-provisioner.adoc b/_unused_topics/deploying-local-provisioner.adoc deleted file mode 100644 index bfef02c41d1f..000000000000 --- a/_unused_topics/deploying-local-provisioner.adoc +++ /dev/null @@ -1,20 +0,0 @@ -[id="deploying-local-provisioner_{context}"] -= Deploying the local provisioner - -This paragraph is the procedure module introduction: a short description of the procedure. - -.Prerequisites - -* Before starting the provisioner, mount all local devices and create a ConfigMap with storage classes and their directories. - -.Procedure - -. Install the local provisioner from the `*local-storage-provisioner-template.yaml*` file. -. Create a service account that allows running Pods as a root user, using hostPath volumes, and using any SELinux context to monitor, manage, and clean local volumes, for example: -+ ----- -$ oc create serviceaccount local-storage-admin -$ oc adm policy add-scc-to-user privileged -z local-storage-admin ----- -+ -To allow the provisioner Pod to delete content on local volumes created by any Pod, root privileges and any SELinux context are required. hostPath is required to access the `*/mnt/local-storage*` path on the host. diff --git a/_unused_topics/distr-tracing-deploy-otel-collector.adoc b/_unused_topics/distr-tracing-deploy-otel-collector.adoc deleted file mode 100644 index d628b2501f73..000000000000 --- a/_unused_topics/distr-tracing-deploy-otel-collector.adoc +++ /dev/null @@ -1,128 +0,0 @@ -//// -This module included in the following assemblies: -- distr_tracing_install/distr-tracing-deploying.adoc -//// - -:_content-type: PROCEDURE -[id="distr-tracing-deploy-otel-collector_{context}"] -= Deploying distributed tracing data collection - -The custom resource definition (CRD) defines the configuration used when you deploy an instance of {OTELName}. - -.Prerequisites - -* The {OTELName} Operator has been installed. -//* You have reviewed the instructions for how to customize the deployment. -* You have access to the cluster as a user with the `cluster-admin` role. - -.Procedure - -. Log in to the OpenShift web console as a user with the `cluster-admin` role. - -. Create a new project, for example `tracing-system`. -+ -[NOTE] -==== -If you are installing distributed tracing as part of Service Mesh, the {DTShortName} resources must be installed in the same namespace as the `ServiceMeshControlPlane` resource, for example `istio-system`. -==== -+ -.. Navigate to *Home* -> *Projects*. - -.. Click *Create Project*. - -.. Enter `tracing-system` in the *Name* field. - -.. Click *Create*. - -. Navigate to *Operators* -> *Installed Operators*. - -. If necessary, select `tracing-system` from the *Project* menu. You might have to wait a few moments for the Operators to be copied to the new project. - -. Click the *{OTELName} Operator*. On the *Details* tab, under *Provided APIs*, the Operator provides a single link. - -. Under *OpenTelemetryCollector*, click *Create Instance*. - -. On the *Create OpenTelemetry Collector* page, to install using the defaults, click *Create* to create the {OTELShortName} instance. - -. On the *OpenTelemetryCollectors* page, click the name of the {OTELShortName} instance, for example, `opentelemetrycollector-sample`. - -. On the *Details* page, click the *Resources* tab. Wait until the pod has a status of "Running" before continuing. - -[id="distr-tracing-deploy-otel-collector-cli_{context}"] -= Deploying {OTELShortName} from the CLI - -Follow this procedure to create an instance of {OTELShortName} from the command line. - -.Prerequisites - -* The {OTELName} Operator has been installed and verified. -+ -//* You have reviewed the instructions for how to customize the deployment. -+ -* You have access to the OpenShift CLI (`oc`) that matches your {product-title} version. -* You have access to the cluster as a user with the `cluster-admin` role. - -.Procedure - -. Log in to the {product-title} CLI as a user with the `cluster-admin` role. -+ -[source,terminal] ----- -$ oc login https://:8443 ----- - -. Create a new project named `tracing-system`. -+ -[source,terminal] ----- -$ oc new-project tracing-system ----- - -. Create a custom resource file named `jopentelemetrycollector-sample.yaml` that contains the following text: -+ -.Example opentelemetrycollector.yaml -[source,yaml] ----- - apiVersion: opentelemetry.io/v1alpha1 - kind: OpenTelemetryCollector - metadata: - name: opentelemetrycollector-sample - namespace: openshift-operators - spec: - image: >- - registry.redhat.io/rhosdt/opentelemetry-collector-rhel8@sha256:61934ea5793c55900d09893e8f8b1f2dbd2e712faba8e97684e744691b29f25e - config: | - receivers: - jaeger: - protocols: - grpc: - exporters: - logging: - service: - pipelines: - traces: - receivers: [jaeger] - exporters: [logging] ----- - -. Run the following command to deploy {JaegerShortName}: -+ -[source,terminal] ----- -$ oc create -n tracing-system -f opentelemetrycollector.yaml ----- - -. Run the following command to watch the progress of the pods during the installation process: -+ -[source,terminal] ----- -$ oc get pods -n tracing-system -w ----- -+ -After the installation process has completed, you should see output similar to the following example: -+ -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -opentelemetrycollector-cdff7897b-qhfdx 2/2 Running 0 24s ----- diff --git a/_unused_topics/exploring-cvo.adoc b/_unused_topics/exploring-cvo.adoc deleted file mode 100644 index 416394623c91..000000000000 --- a/_unused_topics/exploring-cvo.adoc +++ /dev/null @@ -1,70 +0,0 @@ -// Module included in the following assemblies: -// -// * - -[id="exploring-cvo_{context}"] -= Exploring the CVO - -To see the current version that your cluster is on, type: - ----- -$ oc get clusterversion - -NAME    VERSION   AVAILABLE PROGRESSING SINCE STATUS -version 4.5.4 True      False       10h   Cluster version is 4.5.4 ----- - -Each release version is represented by a set of images. To see basic release information and a list of those images, type: - ----- -$ oc adm release info - -Name:          4.0.0-0.7 -Digest:        sha256:641c0e4f550af59ec20349187a31751ae5108270f13332d1771935520ebf34c1 -Created:   2019-03-05 13:33:12 -0500 EST -OS/Arch:   linux/amd64 -Manifests: 248 -Release Metadata: -  Version:  4.0.0-0.7 -  Upgrades: 4.0.0-0.6 -  Metadata: -        description: Beta 2 -Component Versions: -  Kubernetes 1.13.4 -Images: -  NAME                        DIGEST -  aws-machine-controllers     sha256:630e8118038ee97b8b3bbfed7d9b63e06c1346c606e11908064ea3f57bd9ff8e -  cli                         sha256:93e16a8c56ec4031b5fa68683f75910aad57b54160a1e6054b3d3e96d9a4b376 -  cloud-credential-operator   sha256:bbc8d586b2210ac44de554558fd299555e72fb662b6751589d69b173b03aa821 -…​ ----- - -To see the Operators managed on the control plane by the Cluster Version Operator, type: - ----- -$ oc get clusteroperator -NAME                                 VERSION  AVAILABLE PROGRESSING DEGRADED SINCE -cluster-autoscaler                            True      False       False   10h -cluster-storage-operator                      True      False       False   10h -console                                       True      False       False   10h -dns                                           True      False       False   10h -image-registry                                True      False       False   10h -ingress                                       True      False       False   10h -kube-apiserver                                True      False       False   10h -kube-controller-manager                       True      False       False   10h -kube-scheduler                                True      False       False   10h -machine-api                                   True      False       False   10h -machine-config                                True      False       False   10h -marketplace-operator                          True      False       False   10h -monitoring                                    True      False       False   156m -network                                       True      False       False   139m -node-tuning                                   True      False       False   10h -openshift-apiserver                           True      False       False   19m -openshift-authentication                      True      False       False   10h -openshift-cloud-credential-operator           True      False       False   10h -openshift-controller-manager                  True      False       False   10h -openshift-samples                             True      False       False   10h -operator-lifecycle-manager                    True      False       False   10h ----- - -While most of the Cluster Operators listed provide services to the {product-title} cluster, the machine-config Operator in particular is tasked with managing the {op-system} operating systems in the nodes. diff --git a/_unused_topics/identity-provider-create-CR.adoc b/_unused_topics/identity-provider-create-CR.adoc deleted file mode 100644 index 8014c4ae6ab6..000000000000 --- a/_unused_topics/identity-provider-create-CR.adoc +++ /dev/null @@ -1,47 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/identity_providers/configuring-allow-all-identity-provider.adoc -// * authentication/identity_providers/configuring-deny-all-identity-provider.adoc -// * authentication/identity_providers/configuring-htpasswd-identity-provider.adoc -// * authentication/identity_providers/configuring-keystone-identity-provider.adoc -// * authentication/identity_providers/configuring-ldap-identity-provider.adoc -// * authentication/identity_providers/configuring-basic-authentication-identity-provider.adoc -// * authentication/identity_providers/configuring-request-header-identity-provider.adoc -// * authentication/identity_providers/configuring-github-identity-provider.adoc -// * authentication/identity_providers/configuring-gitlab-identity-provider.adoc -// * authentication/identity_providers/configuring-google-identity-provider.adoc -// * authentication/identity_providers/configuring-oidc-identity-provider.adoc - -[id="identity-provider-create-CR_{context}"] -= Creating the CR that describes an identity provider - -Before you can add an identity provider to your cluster, create a Custom -Resource (CR) that describes it. - -.Prerequisites - -* Create an {product-title} cluster. - -.Procedure - -Create a CR file to describe the identity provider. A generic file displaying -the structure is below. - ----- -apiVersion: config.openshift.io/v1 -kind: OAuth -metadata: - name: cluster -spec: - identityProviders: - - name: my_identity_provider <1> - mappingMethod: claim <2> - type: <3> - ... ----- -<1> A unique name defining the identity provider. This provider name is -prefixed to provider user names to form an identity name. -<2> Controls how mappings are established between this provider's identities and user objects. -<3> The type of identity provider to be configured. -+ -Provide the parameters that are required for your identity provider type. diff --git a/_unused_topics/identity-provider-provisioning-user-lookup-mapping.adoc b/_unused_topics/identity-provider-provisioning-user-lookup-mapping.adoc deleted file mode 100644 index 6d490d6ace10..000000000000 --- a/_unused_topics/identity-provider-provisioning-user-lookup-mapping.adoc +++ /dev/null @@ -1,56 +0,0 @@ -// Module included in the following assemblies: -// -// * orphaned - -[id="identity-provider-provisioning-user-lookup-mapping_{context}"] -= Manually provisioning a user when using the lookup mapping method - -When using the `lookup` mapping method, user provisioning is done by an external system, via the API. -Typically, identities are automatically mapped to users during login. The 'lookup' mapping method automatically -disables this automatic mapping, which requires you to provision users manually. - - -.Procedure - -If you are using the `lookup` mapping method, use the following steps for each user after configuring -the identity provider: - -. Create an {product-title} User, if not created already: -+ ----- -$ oc create user ----- -+ -For example, the following command creates an {product-title} User `bob`: -+ ----- -$ oc create user bob ----- - -. Create an {product-title} Identity, if not created already. Use the name of the identity provider and -the name that uniquely represents this identity in the scope of the identity provider: -+ ----- -$ oc create identity : ----- -+ -The `` is the name of the identity provider in the master configuration, -as shown in the appropriate identity provider section below. -+ -For example, the following commands creates an Identity with identity provider `ldap_provider` and the identity provider user name `bob_s`. -+ ----- -$ oc create identity ldap_provider:bob_s ----- - -. Create a user/identity mapping for the created user and identity: -+ ----- -$ oc create useridentitymapping : ----- -+ -For example, the following command maps the identity to the user: -+ ----- -$ oc create useridentitymapping ldap_provider:bob_s bob ----- diff --git a/_unused_topics/images-s2i-java-pulling-images.adoc b/_unused_topics/images-s2i-java-pulling-images.adoc deleted file mode 100644 index dc9604744b07..000000000000 --- a/_unused_topics/images-s2i-java-pulling-images.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/using_images/using-images-source-to-image.adoc -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="images-s2i-java-pulling-images_{context}"] -= Pulling images for Java - -The Red Hat Enterprise Linux (RHEL) 8 image is available through the Red Hat Registry. - -.Procedure - -. To pull the RHEL 8 image, enter the following command: -[source,terminal] ----- -$ podman pull registry.redhat.io/redhat-openjdk-18/openjdk18-openshift ----- - -To use this image on {product-title}, you can either access it directly from the Red Hat Registry or push it into your {product-title} container image registry. Additionally, you can create an image stream that points to the image, either in your container image registry or at the external location. - -//// -Your {product-title} resources can then reference the link:https://github.com/jboss-openshift/application-templates/blob/master/jboss-image-streams.json[image stream definition]. -//// diff --git a/_unused_topics/images-s2i-nodejs-pulling-images.adoc b/_unused_topics/images-s2i-nodejs-pulling-images.adoc deleted file mode 100644 index 32fab99ea8ce..000000000000 --- a/_unused_topics/images-s2i-nodejs-pulling-images.adoc +++ /dev/null @@ -1,50 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/using_images/using-images-source-to-image.adoc -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="images-s2i-nodejs-pulling-images_{context}"] -= Pulling images for Node.js - -//These images come in two options: - -//* RHEL 8 -//* CentOS 7 - -//*RHEL 8 Based Images* - -The RHEL 8 images are available through the Red Hat Registry. - -.Procedure - -* To pull the RHEL 8 image, enter the following command for the version of Node.js you want: -+ -.Node.js `12` -[source,terminal] ----- -$ podman pull registry.redhat.io/rhscl/nodejs-12-rhel7:latest ----- -+ -.Node.js `10` -[source,terminal] ----- -$ podman pull registry.redhat.io/rhscl/nodejs-10-rhel7:latest ----- - -//// -*CentOS 7 Based Image* - -This image is available on link:quay.io[Quay.io]. - -.Procedure - -* To pull the CentOS 7 image, enter the following command: -+ -[source,terminal] ----- -$ podman pull openshift/nodejs-010-centos7 ----- -//// - -To use these images, you can either access them directly from registry.redhat.io, or push them into your {product-registry}. Additionally, you can create an image stream that points to the image, either in your container image registry or at the external location. Your {product-title} resources can then reference the -image stream. diff --git a/_unused_topics/images-using-images-s2i-perl-configuration.adoc b/_unused_topics/images-using-images-s2i-perl-configuration.adoc deleted file mode 100644 index 563ba407e4be..000000000000 --- a/_unused_topics/images-using-images-s2i-perl-configuration.adoc +++ /dev/null @@ -1,44 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/using_images/using-images-source-to-image.adoc -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="images-using-images-s2i-perl-configuration_{context}"] -= Configuring source-to-image for Perl - -The Perl image supports a number of environment variables which can be set to control the configuration and behavior of the Perl runtime. - -To set these environment variables as part of your image, you can place them into -a `.s2i/environment` file inside your source code repository, or define them in -the environment section of the build configuration's `sourceStrategy` definition. - -You can also set environment variables to be used with an existing image when creating new applications, or by updating environment variables for existing objects such as deployment configurations. - -[NOTE] -==== -Environment variables that control build behavior must be set as part of the source-to-image (S2I) build configuration or in the `.s2i/environment` file to make them available to the build steps. -==== - -.Perl Environment Variables -[cols="4a,6a",options="header"] -|=== - -|Variable name |Description - -|`ENABLE_CPAN_TEST` -|When set to `true`, this variable installs all the cpan modules and runs their tests. By default, the testing of the modules is disabled. - -|`CPAN_MIRROR` -|This variable specifies a mirror URL which cpanminus uses to install dependencies. By default, this URL is not specified. - -|`PERL_APACHE2_RELOAD` -|Set this to `true` to enable automatic reloading of modified Perl modules. By default, automatic reloading is disabled. - -|`HTTPD_START_SERVERS` -|The https://httpd.apache.org/docs/2.4/mod/mpm_common.html#startservers[StartServers] directive sets the number of child server processes created on startup. Default is 8. - -|`HTTPD_MAX_REQUEST_WORKERS` -|Number of simultaneous requests that will be handled by Apache. The default is 256, but it will be automatically lowered if memory is limited. -|=== - -//Verify` oc log` is still valid. diff --git a/_unused_topics/images-using-images-s2i-perl-hot-deploying.adoc b/_unused_topics/images-using-images-s2i-perl-hot-deploying.adoc deleted file mode 100644 index de276ad98264..000000000000 --- a/_unused_topics/images-using-images-s2i-perl-hot-deploying.adoc +++ /dev/null @@ -1,28 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/using_images/using-images-source-to-image.adoc -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="images-using-images-s2i-perl-hot-deploying_{context}"] -= Hot deploying for Perl - -Hot deployment allows you to quickly make and deploy changes to your application -without having to generate a new S2I build. To enable hot deployment in this -image, you must set the `PERL_APACHE2_RELOAD` environment variable to `true`. You can use the `oc set env` command to update environment variables of existing objects. - -[WARNING] -==== -You should only use this option while developing or debugging. It is not recommended to turn this on in your production environment. -==== - -.Procedure - -. To change your source code in a running pod, use the `oc rsh` command to enter the container: -+ -[source,terminal] ----- -$ oc rsh ----- -+ -After you enter into the running container, your current directory is set to -`/opt/app-root/src`, where the source code is located. diff --git a/_unused_topics/images-using-images-s2i-perl-pulling-images.adoc b/_unused_topics/images-using-images-s2i-perl-pulling-images.adoc deleted file mode 100644 index 996f9b752d74..000000000000 --- a/_unused_topics/images-using-images-s2i-perl-pulling-images.adoc +++ /dev/null @@ -1,50 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/using_images/using-images-source-to-image.adoc -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="images-using-images-s2i-perl-pulling-images_{context}"] -= Pulling images for Perl - -//Images comes in two options: - -//* RHEL 8 -//* CentOS 7 - -// *RHEL 8 Based Images* - -The RHEL 8 images are available through the Red Hat Registry. - -.Procedure - -* To pull the RHEL 8 image, enter the following command for the version of Perl you want: -+ -.Perl `5.26` -[source,terminal] ----- -$ podman pull registry.redhat.io/rhscl/perl-526-rhel7:latest ----- -+ -.Perl `5.30` -[source,terminal] ----- -$ podman pull registry.redhat.io/rhscl/perl-530-rhel7:latest ----- - -//// -*CentOS 7 Based Image* - -A CentOS image for Perl 5.16 is available on link:quay.io[Quay.io]. - -.Procedure - -* To pull the CentOS 7 image, enter the following command: -+ -[source,terminal] ----- -$ podman pull openshift/perl-516-centos7 ----- -//// - -To use these images, you can either access them directly from registry.redhat.io, or push them into your {product-registry}. Additionally, you can create an image stream that points to the image, either in your container image registry or at the external location. Your {product-title} resources can then reference the -image stream. diff --git a/_unused_topics/images-using-images-s2i-perl.adoc b/_unused_topics/images-using-images-s2i-perl.adoc deleted file mode 100644 index 01277ff90a72..000000000000 --- a/_unused_topics/images-using-images-s2i-perl.adoc +++ /dev/null @@ -1,13 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/using_images/using-images-source-to-image.adoc -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="images-using-images-s2i-perl_{context}"] -= Perl overview - -{product-title} provides source-to-image (S2I) enabled Perl images for building and running Perl applications. The Perl S2I builder image assembles your application source with any required dependencies to create a new image containing your Perl application. This resulting image can be run either by {product-title} or by a container runtime. - -[id="images-using-images-s2i-perl-accessing-logs_{context}"] -== Accessing logs -Access logs are streamed to standard output and as such they can be viewed using the `oc logs` command. Error logs are stored in the `/tmp/error_log` file, which can be viewed using the `oc rsh` command to access the container. diff --git a/_unused_topics/images-using-images-s2i-php-configuration.adoc b/_unused_topics/images-using-images-s2i-php-configuration.adoc deleted file mode 100644 index 7e2ec6f6d7fd..000000000000 --- a/_unused_topics/images-using-images-s2i-php-configuration.adoc +++ /dev/null @@ -1,116 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/using_images/using-images-source-to-image.adoc -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="images-using-images-s2i-php-configuration_{context}"] -= Configuring source-to-image for PHP - -The PHP image supports a number of environment variables which can be set to control the configuration and behavior of the PHP runtime. - -To set these environment variables as part of your image, you can place them into a `.s2i/environment` file inside your source code repository, or define them in the environment section of the build configuration's `sourceStrategy` definition. - -You can also set environment variables to be used with an existing image when creating new applications, or by updating environment variables for existing objects such as deployment configurations. - -[NOTE] -==== -Environment variables that control build behavior must be set as part of the source-to-image (S2I) build configuration or in the `.s2i/environment` file to make them available to the build steps. -==== - -The following environment variables set their equivalent property value in the -`php.ini` file: - -.PHP Environment Variables -[cols="4a,6a,6a",options="header"] -|=== - -|Variable Name |Description |Default - -|`ERROR_REPORTING` -|Informs PHP of the errors, warnings, and notices for which you would like it to -take action. -|`E_ALL & ~E_NOTICE` - -|`DISPLAY_ERRORS` -|Controls if and where PHP outputs errors, notices, and warnings. -|`ON` - -|`DISPLAY_STARTUP_ERRORS` -|Causes any display errors that occur during PHP's startup sequence to be -handled separately from display errors. -|`OFF` - -|`TRACK_ERRORS` -|Stores the last error/warning message in `$php_errormsg` (boolean). -|`OFF` - -|`HTML_ERRORS` -|Links errors to documentation that is related to the error. -|`ON` - -|`INCLUDE_PATH` -|Path for PHP source files. -|`.:/opt/openshift/src:/opt/rh/php55/root/usr/share/pear` - -|`SESSION_PATH` -|Location for session data files. -|`/tmp/sessions` - -|`DOCUMENTROOT` -|Path that defines the document root for your application (for example, `/public`). -|`/` -|=== - -The following environment variable sets its equivalent property value in the -`opcache.ini` file: - -.Additional PHP settings -[cols="3a,6a,1a",options="header"] -|=== - -|Variable Name |Description |Default - -|`OPCACHE_MEMORY_CONSUMPTION` -|The link:http://php.net/manual/en/book.opcache.php[OPcache] shared memory -storage size. -|`16M` - -|`OPCACHE_REVALIDATE_FREQ` -|How often to check script time stamps for updates, in seconds. `0` results in -link:http://php.net/manual/en/book.opcache.php[OPcache] checking for updates on -every request. -|`2` -|=== - -You can also override the entire directory used to load the PHP configuration by setting: - -.Additional PHP settings -[cols="3a,6a",options="header"] -|=== - -| Variable Name | Description - -|`PHPRC` -|Sets the path to the `php.ini` file. - -|`*PHP_INI_SCAN_DIR*` -|Path to scan for additional `.ini` configuration files -|=== - -You can use a custom composer repository mirror URL to download packages instead of the default `packagist.org`: - -.Composer Environment Variables -[cols="4a,6a",options="header"] -|=== - -|Variable Name |Description - -|`COMPOSER_MIRROR` -|Set this variable to use a custom Composer repository mirror URL to download required packages during the build process. -Note: This only affects packages listed in `composer.json`. -|=== - -[id="images-using-images-s2i-php-apache-configuration_{context}"] -== Apache configuration - -If the `DocumentRoot` of the application is nested in the source directory `/opt/openshift/src`, you can provide your own `.htaccess` file to override the default Apache behavior and specify how application requests should be handled. The `.htaccess` file must be located at the root of the application source. diff --git a/_unused_topics/images-using-images-s2i-php-hot-deploying.adoc b/_unused_topics/images-using-images-s2i-php-hot-deploying.adoc deleted file mode 100644 index f8a852dd3447..000000000000 --- a/_unused_topics/images-using-images-s2i-php-hot-deploying.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/using_images/using-images-source-to-image.adoc -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="images-using-images-s2i-hot-deploying_{context}"] -= Hot deploying for PHP - -Hot deployment allows you to quickly make and deploy changes to your application without having to generate a new source-to-image (S2I) build. In order to immediately pick up changes made in your application source code, you must run your built image with the `OPCACHE_REVALIDATE_FREQ=0` environment variable. - -You can use the `oc env` command to update environment variables of existing objects. - -[WARNING] -==== -You should only use this option while developing or debugging. It is not recommended to turn this on in your production environment. -==== - -.Procedure - -. To change your source code in a running pod, use the `oc rsh` command to enter the container: -+ -[source,terminal] ----- -$ oc rsh ----- - -After you enter into the running container, your current directory is set to `/opt/app-root/src`, where the source code is located. diff --git a/_unused_topics/images-using-images-s2i-php-pulling-images.adoc b/_unused_topics/images-using-images-s2i-php-pulling-images.adoc deleted file mode 100644 index 51691eb98a56..000000000000 --- a/_unused_topics/images-using-images-s2i-php-pulling-images.adoc +++ /dev/null @@ -1,55 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/using_images/using-images-source-to-image.adoc -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="images-using-images-s2i-php-pulling-images_{context}"] -= Pulling images for PHP - -//These images come in two options: - -//* RHEL 8 -//* CentOS 7 - -The RHEL 8 images are available through the Red Hat Registry. - -.Procedure - -* To pull the RHEL 8 image, enter the following command for the version of PHP you want: - -.PHP `8.1` -[source,terminal] ----- -$ podman pull registry.redhat.io/ubi9/php-81:latest ----- -+ -.PHP `7.3` -[source,terminal] ----- -$ podman pull registry.redhat.io/rhscl/php-73-rhel7:latest ----- - -//// -*CentOS 7 Based Images* - -CentOS images for PHP 5.5 and 5.6 are available on link:quay.io[Quay.io]. - -.Procedure - -* To pull the CentOS 7 image, enter the following command for the version of Node.js you want: -+ -.PHP `5.5` -[source,terminal] ----- -$ podman pull openshift/php-55-centos7 ----- -+ -.PHP `5.6` -[source,terminal] ----- -$ podman pull openshift/php-56-centos7 ----- -//// - -To use these images, you can either access them directly from registry.redhat.io, or push them into your {product-registry}. Additionally, you can create an image stream that points to the image, either in your container image registry or at the external location. Your {product-title} resources can then reference the -image stream. diff --git a/_unused_topics/images-using-images-s2i-php.adoc b/_unused_topics/images-using-images-s2i-php.adoc deleted file mode 100644 index 116276a93b06..000000000000 --- a/_unused_topics/images-using-images-s2i-php.adoc +++ /dev/null @@ -1,14 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/using_images/using-images-source-to-image.adoc -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="images-using-images-s2i-php_{context}"] -= PHP overview - -{product-title} provides source-to-image (S2I) enabled PHP images for building and running PHP applications.The PHP S2I builder image assembles your application source with any required dependencies to create a new image containing your PHP application. This resulting image can be run either by {product-title} or by a container runtime. - -[id="images-using-images-s2i-php-accessing-logs_{context}"] -== Accessing logs - -Access logs are streamed to standard out and as such they can be viewed using the `oc logs` command. Error logs are stored in the `/tmp/error_log` file, which can be viewed using the `oc rsh` command to access the container. diff --git a/_unused_topics/images-using-images-s2i-python-configuration.adoc b/_unused_topics/images-using-images-s2i-python-configuration.adoc deleted file mode 100644 index f2dfd34cbbb9..000000000000 --- a/_unused_topics/images-using-images-s2i-python-configuration.adoc +++ /dev/null @@ -1,48 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/using_images/using-images-source-to-image.adoc -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="images-using-images-s2i-python-configuration_{context}"] -= Configuring source-to-image for Python - -The Python image supports a number of environment variables which can be set to control the configuration and behavior of the Python runtime. - -To set these environment variables as part of your image, you can place them into a `.s2i/environment` file inside your source code repository, or define them in the environment section of the build configuration's `sourceStrategy` definition. - -You can also set environment variables to be used with an existing image when creating new applications, or by updating environment variables for existing objects such as deployment configurations. - -[NOTE] -==== -Environment variables that control build behavior must be set as part of the source-to-image (S2I) build configuration or in the `.s2i/environment` file to make them available to the build steps. -==== - -.Python Environment Variables -[cols="4a,6a",options="header"] -|=== - -|Variable name |Description - -|`APP_FILE` -|This variable specifies the file name passed to the Python interpreter which is responsible for launching the application. This variable is set to `app.py` by default. - -|`APP_MODULE` -|This variable specifies the WSGI callable. It follows the pattern `$(MODULE_NAME):$(VARIABLE_NAME)`, where the module name is a full dotted path and the variable name refers to a function inside the specified module. If you use `setup.py` for installing the application, then the module name can be read from that file and the variable defaults to `application`. - -|`APP_CONFIG` -|This variable indicates the path to a valid Python file with a http://docs.gunicorn.org/en/latest/configure.html[gunicorn configuration]. - -|`DISABLE_COLLECTSTATIC` -|Set it to a nonempty value to inhibit the execution of `manage.py collectstatic` during the build. Only affects Django projects. - -|`DISABLE_MIGRATE` -|Set it to a nonempty value to inhibit the execution of `manage.py migrate` when the produced image is run. Only affects Django projects. - -|`*PIP_INDEX_URL*` -| Set this variable to use a custom index URL or mirror to download required -packages during build process. This only affects packages listed in the -*_requirements.txt_* file. - -| `WEB_CONCURRENCY` -| Set this to change the default setting for the number of http://docs.gunicorn.org/en/stable/settings.html#workers[workers]. By default, this is set to the number of available cores times 4. -|=== diff --git a/_unused_topics/images-using-images-s2i-python-hot-deploying.adoc b/_unused_topics/images-using-images-s2i-python-hot-deploying.adoc deleted file mode 100644 index 03989935aebb..000000000000 --- a/_unused_topics/images-using-images-s2i-python-hot-deploying.adoc +++ /dev/null @@ -1,28 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/using_images/using-images-source-to-image.adoc -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="images-using-images-s2i-python-hot-deploying_{context}"] -= Hot deploying - -Hot deployment allows you to quickly make and deploy changes to your application without having to generate a new source-to-image (S2I) build. If you are using Django, hot deployment works out of the box. - -To enable hot deployment while using Gunicorn, ensure you have a Gunicorn -configuration file inside your repository with https://gunicorn-docs.readthedocs.org/en/latest/settings.html#reload[the `reload` option set to `true`. Specify your configuration file using the `APP_CONFIG` environment variable. For example, see the `oc new-app` command. You can use the `oc set env` command to update environment variables of existing objects. - -[WARNING] -==== -You should only use this option while developing or debugging. It is not recommended to turn this on in your production environment. -==== - -. Procedure - -To change your source code in a running pod, use the `oc rsh` command to enter the container: -+ -[source,terminal] ----- -$ oc rsh ----- - -After you enter into the running container, your current directory is set to `/opt/app-root/src`, where the source code is located. diff --git a/_unused_topics/images-using-images-s2i-python-pulling-images.adoc b/_unused_topics/images-using-images-s2i-python-pulling-images.adoc deleted file mode 100644 index 0d90476cf0b7..000000000000 --- a/_unused_topics/images-using-images-s2i-python-pulling-images.adoc +++ /dev/null @@ -1,75 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/using_images/using-images-source-to-image.adoc -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="images-using-images-s2i-python-pulling-images_{context}"] -= Pulling images for Python - -//These images come in two options: - -//* RHEL 8 -//* CentOS 7 - -//*RHEL 8 Based Images* - -The RHEL 8 images are available through the Red Hat Registry. - -.Procedure - -* To pull the RHEL 7 image, enter the following command for the version of Python you want: -+ -.Python `2.7` -[source,terminal] ----- -$ podman pull egistry.redhat.io/rhscl/python-27-rhel7:latest ----- -+ -.Python `3.6` -[source,terminal] ----- -$ podman pull registry.redhat.io/ubi9/python-39:latest ----- -+ -.Python `3.8` -[source,terminal] ----- -$ podman pull registry.redhat.io/rhscl/python-38-rhel7:latest ----- - -//// -*CentOS 7 Based Images* - -These images are available on link:quay.io[Quay.io]. - -.Procedure - -* To pull the CentOS 7 image, enter the following command for the version of Python you want: -+ -.Python `2.7` -[source,terminal] ----- -$ podman pull centos/python-27-centos7 ----- -+ -.Python `3.3` -[source,terminal] ----- -$ podman pull openshift/python-33-centos7 ----- -+ -.Python `3.4` -[source,terminal] ----- -$ podman pull centos/python-34-centos7 ----- -+ -.Python `3.5` -[source,terminal] ----- -$ podman pull centos/python-35-centos7 ----- -//// - -To use these images, you can either access them directly from registry.redhat.io, or push them into your {product-registry}. Additionally, you can create an image stream that points to the image, either in your container image registry or at the external location. Your {product-title} resources can then reference the -image stream. diff --git a/_unused_topics/images-using-images-s2i-python.adoc b/_unused_topics/images-using-images-s2i-python.adoc deleted file mode 100644 index 92c996b56fa7..000000000000 --- a/_unused_topics/images-using-images-s2i-python.adoc +++ /dev/null @@ -1,9 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/using_images/using-images-source-to-image.adoc -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="images-using-images-s2i-python_{context}"] -= python overview - -{product-title} provides source-to-image (S2I) enabled Python images for building and running Python applications. The Python S2I builder image assembles your application source with any required dependencies to create a new image containing your Python application. This resulting image can be run either by {product-title} or by a container runtime. diff --git a/_unused_topics/images-using-images-s2i-ruby-configuration.adoc b/_unused_topics/images-using-images-s2i-ruby-configuration.adoc deleted file mode 100644 index 07841e122384..000000000000 --- a/_unused_topics/images-using-images-s2i-ruby-configuration.adoc +++ /dev/null @@ -1,43 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/using_images/using-images-source-to-image.adoc -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="images-using-images-s2i-ruby-configuration_{context}"] -= Configuring source-to-image for Ruby - -The Ruby image supports a number of environment variables which can be set to control the configuration and behavior of the Ruby runtime. - -To set these environment variables as part of your image, you can place them into a `_.s2i/environment` file inside your source code repository, or define them in the environment section of the build configuration's `sourceStrategy` definition. - -You can also set environment variables to be used with an existing image when creating new applications, or by updating environment variables for existing objects such as deployment configurations. - -[NOTE] -==== -Environment variables that control build behavior must be set as part of the source-to-image (S2I) build configuration or in the `.s2i/environment` file to make them available to the build steps. -==== - -.Ruby Environment Variables -[cols="4a,6a",options="header"] -|=== - -|Variable name |Description - -|`RACK_ENV` -|This variable specifies the environment within which the Ruby application is deployed, for example, `production`, `development`, or `test`. Each level has different behavior in terms of logging verbosity, error pages, and `ruby gem` installation. The application assets are only compiled if `RACK_ENV` is set to `production`. The default value is `production`. - -|`RAILS_ENV` -|This variable specifies the environment within which the Ruby on Rails application is deployed, for example, `production`, `development`, or `test`. Each level has different behavior in terms of logging verbosity, error pages, and `ruby gem` installation. The application assets are only compiled if `RAILS_ENV` is set to `production`. This variable is set to `${RACK_ENV}` by default. - -|`DISABLE_ASSET_COMPILATION` -|When set to `true`, this variable disables the process of asset compilation. Asset compilation only happens when the application runs in a production environment. Therefore, you can use this variable when assets have already been compiled. - -|`PUMA_MIN_THREADS`, `PUMA_MAX_THREADS` -|This variable indicates the minimum and maximum number of threads that will be available in Puma's thread pool. - -|`PUMA_WORKERS` -|This variable indicates the number of worker processes to be launched in Puma's clustered mode, when Puma runs more than two processes. If not explicitly set, the default behavior sets `PUMA_WORKERS` to a value that is appropriate for the memory available to the container and the number of cores on the host. - -|`RUBYGEM_MIRROR` -|Set this variable to use a custom RubyGems mirror URL to download required gem packages during the build process. This environment variable is only available for Ruby 2.2+ images. -|=== diff --git a/_unused_topics/images-using-images-s2i-ruby-hot-deploying.adoc b/_unused_topics/images-using-images-s2i-ruby-hot-deploying.adoc deleted file mode 100644 index 6463af2986fb..000000000000 --- a/_unused_topics/images-using-images-s2i-ruby-hot-deploying.adoc +++ /dev/null @@ -1,50 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/using_images/using-images-source-to-image.adoc -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="images-using-images-s2i-ruby-hot-deploying_{context}"] -== Hot deploying for Ruby - -Hot deployment allows you to quickly make and deploy changes to your application without having to generate a new source-to-image (S2I) build. The method for enabling hot deployment in this image differs based on the application type. - -*Ruby on Rails applications* - -.Procedure - -For Ruby on Rails application, run the built Rails application with the `RAILS_ENV=development` environment variable passed to the running pod. - -* For an existing deployment configuration, you can use the `oc set env` command: -+ -[source,terminal] ----- -$ oc set env dc/rails-app RAILS_ENV=development ----- - -*Other Types of Ruby applications such as Sinatra or Padrino* - -For other types of Ruby applications, your application must be built with a gem that can reload the server every time a change to the source code is made inside the running container. Those gems are: - -* Shotgun -* Rerun -* Rack-livereload - -To be able to run your application in development mode, you must modify the S2I `run` script so that the web server is launched by the chosen gem, which checks for changes in the source code. - -After you build your application image with your version of the S2I `run` script, run the image with the `RACK_ENV=development` environment variable. For example, you can use the `oc new-app` command. You can use the `oc set env` command to update environment variables of existing objects. - -[WARNING] -==== -You should only use this option while developing or debugging. It is not recommended to turn this on in your production environment. -==== - -.Procedure - -. To change your source code in a running pod, use the `oc rsh` command to enter the container: -+ -[source,terminal] ----- -$ oc rsh ----- - -After you enter into the running container, your current directory is set to `/opt/app-root/src`, where the source code is located. diff --git a/_unused_topics/images-using-images-s2i-ruby-pulling-images.adoc b/_unused_topics/images-using-images-s2i-ruby-pulling-images.adoc deleted file mode 100644 index 9829367e28eb..000000000000 --- a/_unused_topics/images-using-images-s2i-ruby-pulling-images.adoc +++ /dev/null @@ -1,69 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/using_images/using-images-source-to-image.adoc -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="images-using-images-s2i-ruby-pulling-images_{context}"] -= Pulling images for Ruby - -//These images come in two options: - -//* RHEL 8 -//* CentOS 7 - -//*RHEL 8 Based Images* - -The RHEL 8 images are available through the Red Hat Registry. - -.Procedure - -* To pull the RHEL 8 image, enter the following command for the version of Ruby you want: -+ -.Ruby `2.5` -[source,terminal] ----- -$ podman pull registry.redhat.io/rhscl/ruby-25-rhel7:latest ----- -+ -.Ruby `2.6` -[source,terminal] ----- -$ podman pull registry.redhat.io/rhscl/ruby-26-rhel7:latest ----- -+ -.Ruby `2.7` -[source,terminal] ----- -$ podman pull registry.redhat.io/rhscl/ruby-27-rhel7:latest ----- - -//// -*CentOS 7 Based Images* - -These images are available on link:quay.io[Quay.io]. - -.Procedure - -* To pull the CentOS 7 image, enter the following command for the version of Ruby you want: -+ -.Ruby `2.0` -[source,terminal] ----- -$ podman pull openshift/ruby-20-centos7 ----- -+ -.Ruby `2.2` -[source,terminal] ----- -$ podman pull openshift/ruby-22-centos7 ----- -+ -.Ruby `2.3` -[source,terminal] ----- -$ podman pull centos/ruby-23-centos7 ----- -//// - -To use these images, you can either access them directly from registry.redhat.io, or push them into your {product-registry}. Additionally, you can create an image stream that points to the image, either in your container image registry or at the external location. Your {product-title} resources can then reference the -image stream. diff --git a/_unused_topics/images-using-images-s2i-ruby.adoc b/_unused_topics/images-using-images-s2i-ruby.adoc deleted file mode 100644 index feed3359d273..000000000000 --- a/_unused_topics/images-using-images-s2i-ruby.adoc +++ /dev/null @@ -1,9 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/using_images/using-images-source-to-image.adoc -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="images-using-images-s2i-ruby_{context}"] -= Ruby overview - -{product-title} provides source-to-image (S2I) enabled Ruby images for building and running Ruby applications. The Ruby S2I builder image assembles your application source with any required dependencies to create a new image containing your Ruby application. This resulting image can be run either by {product-title} or by a container runtime. diff --git a/_unused_topics/installation-about-custom.adoc b/_unused_topics/installation-about-custom.adoc deleted file mode 100644 index 8e26117c63b6..000000000000 --- a/_unused_topics/installation-about-custom.adoc +++ /dev/null @@ -1,52 +0,0 @@ -// Module included in the following assemblies: -// -// * orphaned - -[id="installation-about-custom_{context}"] -= About the custom installation - -You can use the {product-title} installation program to customize four levels -of the program: - -* {product-title} itself -* The cluster platform -* Kubernetes -* The cluster operating system - -Changes to {product-title} and its platform are managed and supported, but -changes to Kubernetes and the cluster operating system currently are not. If -you customize unsupported levels program levels, future installation and -upgrades might fail. - -When you select values for the prompts that the installation program presents, -you customize {product-title}. You can further modify the cluster platform -by modifying the `install-config.yaml` file that the installation program -uses to deploy your cluster. In this file, you can make changes like setting the -number of machines that the control plane uses, the type of virtual machine -that the cluster deploys, or the CIDR range for the Kubernetes service network. - -It is possible, but not supported, to modify the Kubernetes objects that are injected into the cluster. -A common modification is additional manifests in the initial installation. -No validation is available to confirm the validity of any modifications that -you make to these manifests, so if you modify these objects, you might render -your cluster non-functional. -[IMPORTANT] -==== -Modifying the Kubernetes objects is not supported. -==== - -Similarly it is possible, but not supported, to modify the -Ignition config files for the bootstrap and other machines. No validation is -available to confirm the validity of any modifications that -you make to these Ignition config files, so if you modify these objects, you might render -your cluster non-functional. - -[IMPORTANT] -==== -Modifying the Ignition config files is not supported. -==== - -To complete a custom installation, you use the installation program to generate -the installation files and then customize them. -The installation status is stored in a hidden -file in the asset directory and contains all of the installation files. diff --git a/_unused_topics/installation-creating-worker-machineset.adoc b/_unused_topics/installation-creating-worker-machineset.adoc deleted file mode 100644 index fab07717826c..000000000000 --- a/_unused_topics/installation-creating-worker-machineset.adoc +++ /dev/null @@ -1,144 +0,0 @@ -// Module included in the following assemblies: -// -// * none - -[id="installation-creating-worker-machineset_{context}"] -= Creating worker nodes that the cluster manages - -After your cluster initializes, you can create workers that are controlled by -a MachineSet in your Amazon Web Services (AWS) user-provisioned Infrastructure -cluster. - -.Prerequisites - -* Install a cluster on AWS using infrastructer that you provisioned. - -.Procedure - -. Optional: Launch worker nodes that are controlled by the machine API. -. View the list of MachineSets in the `openshift-machine-api` namespace: -+ ----- -$ oc get machinesets --namespace openshift-machine-api -NAME DESIRED CURRENT READY AVAILABLE AGE -test-tkh7l-worker-us-east-2a 1 1 11m -test-tkh7l-worker-us-east-2b 1 1 11m -test-tkh7l-worker-us-east-2c 1 1 11m ----- -+ -Note the `NAME` of each MachineSet. Because you use a different subnet than the -installation program expects, the worker MachineSets do not use the correct -network settings. You must edit each of these MachineSets. - -. Edit each worker MachineSet to provide the correct values for your cluster: -+ ----- -$ oc edit machineset --namespace openshift-machine-api test-tkh7l-worker-us-east-2a -o yaml -apiVersion: machine.openshift.io/v1beta1 -kind: MachineSet -metadata: - creationTimestamp: 2019-03-14T14:03:03Z - generation: 1 - labels: - machine.openshift.io/cluster-api-cluster: test-tkh7l - machine.openshift.io/cluster-api-machine-role: worker - machine.openshift.io/cluster-api-machine-type: worker - name: test-tkh7l-worker-us-east-2a - namespace: openshift-machine-api - resourceVersion: "2350" - selfLink: /apis/machine.openshift.io/v1beta1/namespaces/openshift-machine-api/machinesets/test-tkh7l-worker-us-east-2a - uid: e2a6c8a6-4661-11e9-a9b0-0296069fd3a2 -spec: - replicas: 1 - selector: - matchLabels: - machine.openshift.io/cluster-api-cluster: test-tkh7l - machine.openshift.io/cluster-api-machineset: test-tkh7l-worker-us-east-2a - template: - metadata: - creationTimestamp: null - labels: - machine.openshift.io/cluster-api-cluster: test-tkh7l - machine.openshift.io/cluster-api-machine-role: worker - machine.openshift.io/cluster-api-machine-type: worker - machine.openshift.io/cluster-api-machineset: test-tkh7l-worker-us-east-2a - spec: - metadata: - creationTimestamp: null - providerSpec: - value: - ami: - id: ami-07e0e0e0035b5a3fe <1> - apiVersion: awsproviderconfig.openshift.io/v1beta1 - blockDevices: - - ebs: - iops: 0 - volumeSize: 120 - volumeType: gp2 - credentialsSecret: - name: aws-cloud-credentials - deviceIndex: 0 - iamInstanceProfile: - id: test-tkh7l-worker-profile - instanceType: m4.large - kind: AWSMachineProviderConfig - metadata: - creationTimestamp: null - placement: - availabilityZone: us-east-2a - region: us-east-2 - publicIp: null - securityGroups: - - filters: - - name: tag:Name - values: - - test-tkh7l-worker-sg <2> - subnet: - filters: - - name: tag:Name - values: - - test-tkh7l-private-us-east-2a - tags: - - name: kubernetes.io/cluster/test-tkh7l - value: owned - userDataSecret: - name: worker-user-data - versions: - kubelet: "" -status: - fullyLabeledReplicas: 1 - observedGeneration: 1 - replicas: 1 ----- -<1> Specify the {op-system-first} AMI to use for your worker nodes. Use the same -value that you specified in the parameter values for your control plane and -bootstrap templates. -<2> Specify the name of the worker security group that you created in the form -`-worker-sg`. `` is the same -infrastructure name that you extracted from the Ignition config metadata, -which has the format `-`. - -//// -. Optional: Replace the `subnet` stanza with one that specifies the subnet -to deploy the machines on: -+ ----- -subnet: - filters: - - name: tag: <1> - values: - - test-tkh7l-private-us-east-2a <2> ----- -<1> Set the `` of the tag to `Name`, `ID`, or `ARN`. -<2> Specify the `Name`, `ID`, or `ARN` value for the subnet. This value must -match the `tag` type that you specify. -//// - -. View the machines in the `openshift-machine-api` namespace and confirm that -they are launching: -+ ----- -$ oc get machines --namespace openshift-machine-api -NAME INSTANCE STATE TYPE REGION ZONE AGE -test-tkh7l-worker-us-east-2a-hxlqn i-0e7f3a52b2919471e pending m4.4xlarge us-east-2 us-east-2a 3s ----- diff --git a/_unused_topics/installation-osp-troubleshooting.adoc b/_unused_topics/installation-osp-troubleshooting.adoc deleted file mode 100644 index 8b5bcff20bd9..000000000000 --- a/_unused_topics/installation-osp-troubleshooting.adoc +++ /dev/null @@ -1,40 +0,0 @@ -// Module included in the following assemblies: -// -// * n/a - -[id="installation-osp-customizing_{context}"] - -= Troubleshooting {product-title} on OpenStack installations - -// Structure as needed in the end. This is very much a WIP. -// A few more troubleshooting and/or known issues blurbs incoming - -Unfortunately, there will always be some cases where {product-title} fails to install properly. In these events, it is helpful to understand the likely failure modes as well as how to troubleshoot the failure. - -This document discusses some troubleshooting options for {rh-openstack}-based -deployments. For general tips on troubleshooting the installation program, see the [Installer Troubleshooting](../troubleshooting.md) guide. - -== View instance logs - -{rh-openstack} CLI tools must be installed, then: - ----- -$ openstack console log show ----- - -== Connect to instances via SSH - -Get the IP address of the machine on the private network: -``` -openstack server list | grep master -| 0dcd756b-ad80-42f1-987a-1451b1ae95ba | cluster-wbzrr-master-1 | ACTIVE | cluster-wbzrr-openshift=172.24.0.21 | rhcos | m1.s2.xlarge | -| 3b455e43-729b-4e64-b3bd-1d4da9996f27 | cluster-wbzrr-master-2 | ACTIVE | cluster-wbzrr-openshift=172.24.0.18 | rhcos | m1.s2.xlarge | -| 775898c3-ecc2-41a4-b98b-a4cd5ae56fd0 | cluster-wbzrr-master-0 | ACTIVE | cluster-wbzrr-openshift=172.24.0.12 | rhcos | m1.s2.xlarge | -``` - -And connect to it using the control plane machine currently holding the API as a jumpbox: - -``` -ssh -J core@${floating IP address}<1> core@ -``` -<1> The floating IP address assigned to the control plane machine. diff --git a/_unused_topics/looking-inside-nodes.adoc b/_unused_topics/looking-inside-nodes.adoc deleted file mode 100644 index 26ab2dccd6be..000000000000 --- a/_unused_topics/looking-inside-nodes.adoc +++ /dev/null @@ -1,50 +0,0 @@ -// Module included in the following assemblies: -// -// - -[id="looking-inside-openshift-nodes_{context}"] -= Looking inside {product-title} nodes - -Directly accessing a node is strongly discouraged. Nodes are meant to be managed entirely from the cluster and that are considered tainted if you log in to a node and change anything. That said, there might be times when you want to troubleshoot a problem on a node or simply go onto a node in a test environment to see how things work. - -For debugging purposes, the oc debug command lets you go inside any pod and look around. For nodes, in particular, you open a tools pod on the node, then chroot to the node’s host filesystem. At that point, you are effectively working on the node. Here’s how to do that: - ----- -$ oc get nodes - -NAME STATUS ROLES AGE VERSION - -ip-10-0-0-1.us-east-2.compute.internal Ready worker 3h19m v1.25.0 - -ip-10-0-0-39.us-east-2.compute.internal Ready master 3h37m v1.25.0 - -… - -$ oc debug nodes/ip-10-0-138-39.us-east-2.compute.internal - -Starting pod/ip-10-0-138-39us-east-2computeinternal-debug …​ ----- - ----- -$ oc debug nodes/ip-10-0-138-39.us-east-2.compute.internal - -Starting pod/ip-10-0-138-39us-east-2computeinternal-debug …​ - -To use host binaries, run chroot /host - -If you don’t see a command prompt, try pressing enter. - -sh-4.3# ----- - -As noted, you can change to the root of the node’s filesystem by typing chroot /host and running commands from the host on that filesystem as though you were logged in directly from the host. Here are some examples of commands you can run to see what is happening on the node: - -* crictl: This CRI-O client command provides many of the same operations for examining images images and containers that the docker CLI offers the Docker Container Engine. One difference is that crictl can also act on pods. If you are debugging issues with containers run the {product-title} users or the {product-title} control plane, crictl is the best tool to use. -* podman: Provides many of the same features as crictl and docker CLI tools, but requires no container engine. On a node, podman can be useful for debugging container issues if the CRI-O runtime isn’t working. -* skopeo: Copy, delete, and inspect container images with skopeo. -* rpm-ostree: Use e.g. rpm-ostree status to look at the operating system state. -* journalctl: The standard journalctl command can be very useful for querying the system journal for messages that provide information about applications running on the system. - -Because the nodes are {op-system} Linux-based systems, you can use standard Linux commands to explore the nodes as well. These include ps, netstat, ip, route, rpm, and many others. You can change to the /etc directory on the host and look into configuration files for services running directly on the host. For example, look at /etc/crio/crio.conf for CRI-O settings, /etc/resolv.conf for DNS server settings, and /etc/ssh for SSH service configuration and keys. - -If you are unable to reach the nodes with oc debug, because something is wrong with the {product-title} cluster, you might be able to debug the nodes by setting up a bastion host on the cluster. For information on setting up a bastion host for {product-title}, see https://github.com/eparis/ssh-bastion[ssh-bastion]. diff --git a/_unused_topics/machine-configs-and-pools.adoc b/_unused_topics/machine-configs-and-pools.adoc deleted file mode 100644 index d627624ec571..000000000000 --- a/_unused_topics/machine-configs-and-pools.adoc +++ /dev/null @@ -1,75 +0,0 @@ -// Module included in the following assemblies: -// -// * TBD - -[id="machine-configs-and-pools_{context}"] -= Machine Configs and Machine Config Pools -Machine Config Pools manage a cluster of nodes and their corresponding -Machine Configs. Machine Configs contain configuration information for a -cluster. - -To list all Machine Config Pools that are known: - ----- -$ oc get machineconfigpools -NAME CONFIG UPDATED UPDATING DEGRADED -master master-1638c1aea398413bb918e76632f20799 False False False -worker worker-2feef4f8288936489a5a832ca8efe953 False False False ----- - -To list all Machine Configs: ----- -$ oc get machineconfig -NAME GENERATEDBYCONTROLLER IGNITIONVERSION CREATED OSIMAGEURL -00-master 4.0.0-0.150.0.0-dirty 2.2.0 16m -00-master-ssh 4.0.0-0.150.0.0-dirty 16m -00-worker 4.0.0-0.150.0.0-dirty 2.2.0 16m -00-worker-ssh 4.0.0-0.150.0.0-dirty 16m -01-master-kubelet 4.0.0-0.150.0.0-dirty 2.2.0 16m -01-worker-kubelet 4.0.0-0.150.0.0-dirty 2.2.0 16m -master-1638c1aea398413bb918e76632f20799 4.0.0-0.150.0.0-dirty 2.2.0 16m -worker-2feef4f8288936489a5a832ca8efe953 4.0.0-0.150.0.0-dirty 2.2.0 16m ----- - -To list all KubeletConfigs: - ----- -$ oc get kubeletconfigs ----- - -To get more detailed information about a KubeletConfig, including the reason for -the current condition: - ----- -$ oc describe kubeletconfig ----- - -For example: - ----- -# oc describe kubeletconfig set-max-pods - -Name: set-max-pods <1> -Namespace: -Labels: -Annotations: -API Version: machineconfiguration.openshift.io/v1 -Kind: KubeletConfig -Metadata: - Creation Timestamp: 2019-02-05T16:27:20Z - Generation: 1 - Resource Version: 19694 - Self Link: /apis/machineconfiguration.openshift.io/v1/kubeletconfigs/set-max-pods - UID: e8ee6410-2962-11e9-9bcc-664f163f5f0f -Spec: - Kubelet Config: <2> - Max Pods: 100 - Machine Config Pool Selector: <3> - Match Labels: - Custom - Kubelet: small-pods -Events: ----- - -<1> The name of the KubeletConfig. -<2> The user defined configuration. -<3> The Machine Config Pool selector to apply the KubeletConfig to. \ No newline at end of file diff --git a/_unused_topics/managing-dedicated-readers-group.adoc b/_unused_topics/managing-dedicated-readers-group.adoc deleted file mode 100644 index 511dc8313ab6..000000000000 --- a/_unused_topics/managing-dedicated-readers-group.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -// administering_a_cluster/dedicated-admin-role.adoc - -[id="dedicated-managing-dedicated-readers-group_{context}"] -= Managing the dedicated-readers group - -Users with a `dedicated-reader` role are granted edit and view access to the -`dedicated-reader` project and view-only access to the other projects. - -To view a list of current dedicated readers by user name, you can use the -following command: - ----- -$ oc describe group dedicated-readers ----- - -To add a new member to the `dedicated-readers` group, if you have -`dedicated-admin` access: - ----- -$ oc adm groups add-users dedicated-readers ----- - -To remove an existing user from the `dedicated-readers` group, if you have -`dedicated-admin` access: - ----- -$ oc adm groups remove-users dedicated-readers ----- diff --git a/_unused_topics/metering-resources.adoc b/_unused_topics/metering-resources.adoc deleted file mode 100644 index 7b0f67114a9a..000000000000 --- a/_unused_topics/metering-resources.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// -// * metering/metering-install-metering.adoc - -[id="metering-resources_{context}"] -= Metering resources - -Metering has many resources, which can be used to manage the deployment and installation of Metering, as well as the reporting functionality Metering provides. - -Metering is managed using the following CustomResourceDefinitions (CRDs): - -[cols="1,7"] -|=== - -|*MeteringConfig* |Configures the Metering stack for deployment. Contains customizations and configuration options to control each component that makes up the Metering stack. - -|*Reports* |Controls what query to use, when, and how often the query should be run, and where to store the results. - -|*ReportQueries* |Contains the SQL queries used to perform analysis on the data contained with in ReportDataSources. - -|*ReportDataSources* |Controls the data available to ReportQueries and Reports. Allows configuring access to different databases for use within Metering. - -|=== diff --git a/_unused_topics/microshift-adding-containers-to-blueprint.adoc b/_unused_topics/microshift-adding-containers-to-blueprint.adoc deleted file mode 100644 index d4eb48d33d08..000000000000 --- a/_unused_topics/microshift-adding-containers-to-blueprint.adoc +++ /dev/null @@ -1,66 +0,0 @@ -// Module included in the following assemblies: -// -// microshift/microshift-embed-into-rpm-ostree.adoc - -:_content-type: PROCEDURE -[id="adding-microshift-container-images_{context}"] -= Adding the {product-title} container images - -You can embed {product-title}'s container images into the {op-system-ostree} images so that they are immediately available to the CRI-O container engine after booting. Embedded container images are not pulled over the network from a container registry. In Image Builder, a container image is embedded by adding a reference to it to the Image Builder blueprint. - -The following syntax must be used to add a configuration section to the blueprint file. You can then add your the container image to embed in the {op-system-ostree} image. - -.Example syntax for adding a container image to a blueprint - -[source,toml] ----- -[[containers]] -source = "" ----- - -.Prerequisites - -* You have installed jq. - -.Procedure - -You must have the exact list of container image references used by the {product-title} version to embed {product-title}'s container images. Use the `microshift-release-info` RPM package matching the version of the `microshift` RPM in your blueprint. Use the following procedure. - -. Download the `microshift-release-info` RPM package matching your {product-title} version by running the following commands: -+ -[source,terminal] ----- -$ VERSION=$(sudo yum list | awk "/^microshift\./ {print \$2;}") ----- -+ -[source,terminal] ----- -$ yum download microshift-release-info-${VERSION} ----- - -. Extract the release info by running the following command: -+ -[source,terminal] ----- -$ rpm2cpio microshift-release-info-${VERSION}.noarch.rpm | cpio -idmv -./usr/share/microshift/release/release-aarch64.json -./usr/share/microshift/release/release-x86_64.json ----- - -. Generate the lines to append to your blueprint using the release info for your CPU architecture by running the following command: -+ -[source,terminal] ----- -$ jq -r '.images | .[] | ("[[containers]]\nsource = \"" + . + "\"\n")' ./usr/share/microshift/release/release-$(uname -i).json ----- -+ -.Brief output sample -+ -[source, toml] ----- -[[containers]] -source = "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9945c3f5475a37e145160d2fe6bb21948f1024a856827bc9e7d5bc882f44a750" - -[[containers]] -source = "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:82cfef91557f9a70cff5a90accba45841a37524e9b93f98a97b20f6b2b69e5db" ----- diff --git a/_unused_topics/microshift-man-config-ovs-bridge.adoc b/_unused_topics/microshift-man-config-ovs-bridge.adoc deleted file mode 100644 index bf6226f997ae..000000000000 --- a/_unused_topics/microshift-man-config-ovs-bridge.adoc +++ /dev/null @@ -1,39 +0,0 @@ -//FIXME: need updated config procedure for customers that will persist across reboots -//this module content is unused as of 4.13 - -//=== Manually configuring OVS bridge br-ex -//.Procedure -//Manually configure the OVS bridge br-ex by running the following commands. - -//* Initiate OVS: -//+ -//[source, terminal] -//---- -//$ sudo systemctl enable openvswitch --now -//---- -//* Add the network bridge: -//+ -//[source, terminal] -//---- -//$ sudo ovs-vsctl add-br br-ex -//---- -//* Add the interface to the network bridge: -//+ -//[source, terminal] -//---- -//$ sudo ovs-vsctl add-port br-ex -//---- -//The `` is the network interface name where the node IP address is assigned. -//* Get the bridge up and running: -//+ -//[source, terminal] -//---- -//$ sudo ip link set br-ex up -//---- -//* After `br-ex up` is running, assign the node IP address to `br-ex` bridge: -//[source, terminal] -//---- -//$ sudo ... -//---- -//[NOTE] -//Adding a physical interface to `br-ex` bridge will disconnect the ssh connection on the node IP address. \ No newline at end of file diff --git a/_unused_topics/microshift-nodeport-unreachable-workaround.adoc b/_unused_topics/microshift-nodeport-unreachable-workaround.adoc deleted file mode 100644 index 4bef2a62fce3..000000000000 --- a/_unused_topics/microshift-nodeport-unreachable-workaround.adoc +++ /dev/null @@ -1,45 +0,0 @@ -// Module included in the following assemblies: -// -// * module may be unused in 4.13 - -:_content-type: PROCEDURE -[id="microshift-nodeport-unreachable-workaround_{context}"] -= Manually restarting the `ovnkube-master` pod to resume node port traffic - -After you install {product-title}, NodePort service traffic might stop. To troubleshoot this issue, manually restart the `ovnkube-master` pod in the `openshift-ovn-kubernetes` namespace. - -.Prerequisites - -* The OpenShift CLI (`oc`) is installed. -* A cluster installed on infrastructure configured with the Open Virtual Network (OVN)-Kubernetes network plugin. -* Access to the `kubeconfig` file. -* The KUBECONFIG environment variable is set. - -.Procedure - -Run the commands listed in each step that follows to restore the `NodePort` service traffic after you install{product-title}: - -. Find the name of the ovn-master pod that you want to restart by running the following command: -+ -[source, terminal] ----- -$ pod=$(oc get pods -n openshift-ovn-kubernetes | grep ovnkube-master | awk -F " " '{print $1}') ----- - -. Force a restart of the of the ovnkube-master pod by running the following command: -+ -[source, terminal] ----- -$ oc -n openshift-ovn-kubernetes delete pod $pod ----- - -. Optional: To confirm that the ovnkube-master pod restarted, run the following command: -+ -[source, terminal] ----- -$ oc get pods -n openshift-ovn-kubernetes ----- -If the pod restarted, the listing of the running pods shows a different ovnkube-master pod name and age consistent with the procedure you just completed. - -. Verify that the `NodePort` service can now be reached. - diff --git a/_unused_topics/monitoring-configuring-etcd-monitoring.adoc b/_unused_topics/monitoring-configuring-etcd-monitoring.adoc deleted file mode 100644 index 66e1144babb9..000000000000 --- a/_unused_topics/monitoring-configuring-etcd-monitoring.adoc +++ /dev/null @@ -1,190 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/monitoring.adoc - -[id="configuring-etcd-monitoring_{context}"] -= Configuring etcd monitoring - -If the `etcd` service does not run correctly, successful operation of the whole {product-title} cluster is in danger. Therefore, it is reasonable to configure monitoring of `etcd`. - -.Procedure - -. Verify that the monitoring stack is running: -+ -[subs="quotes"] ----- -$ oc -n openshift-monitoring get pods -NAME READY STATUS RESTARTS AGE -alertmanager-main-0 3/3 Running 0 34m -alertmanager-main-1 3/3 Running 0 33m -alertmanager-main-2 3/3 Running 0 33m -cluster-monitoring-operator-67b8797d79-sphxj 1/1 Running 0 36m -grafana-c66997f-pxrf7 2/2 Running 0 37s -kube-state-metrics-7449d589bc-rt4mq 3/3 Running 0 33m -node-exporter-5tt4f 2/2 Running 0 33m -node-exporter-b2mrp 2/2 Running 0 33m -node-exporter-fd52p 2/2 Running 0 33m -node-exporter-hfqgv 2/2 Running 0 33m -prometheus-k8s-0 4/4 Running 1 35m -prometheus-k8s-1 0/4 ContainerCreating 0 21s -prometheus-operator-6c9fddd47f-9jfgk 1/1 Running 0 36m ----- - -. Open the configuration file for the cluster monitoring stack: -+ -[subs="quotes"] ----- -$ oc -n openshift-monitoring edit configmap cluster-monitoring-config ----- - -. Under `config.yaml: |+`, add the `etcd` section. -+ -.. If you run `etcd` in static pods on your control plane nodes (also known as master nodes), you can specify the `etcd` nodes using the selector: -+ -[subs="quotes"] ----- -... -data: - config.yaml: |+ - ... - *etcd: - targets: - selector: - openshift.io/component: etcd - openshift.io/control-plane: "true"* ----- -+ -.. If you run `etcd` on separate hosts, you must specify the nodes using IP addresses: -+ -[subs="quotes"] ----- -... -data: - config.yaml: |+ - ... - *etcd: - targets: - ips: - - "127.0.0.1" - - "127.0.0.2" - - "127.0.0.3"* ----- -+ -If `etcd` nodes IP addresses change, you must update this list. - -. Verify that the `etcd` service monitor is now running: -+ -[subs="quotes"] ----- -$ oc -n openshift-monitoring get servicemonitor -NAME AGE -alertmanager 35m -*etcd 1m* -kube-apiserver 36m -kube-controllers 36m -kube-state-metrics 34m -kubelet 36m -node-exporter 34m -prometheus 36m -prometheus-operator 37m ----- -+ -It might take up to a minute for the `etcd` service monitor to start. - -. Now you can navigate to the Web interface to see more information about status of `etcd` monitoring: -+ -.. To get the URL, run: -+ -[subs="quotes"] ----- -$ oc -n openshift-monitoring get routes -NAME HOST/PORT PATH SERVICES PORT TERMINATION WILDCARD -... -prometheus-k8s prometheus-k8s-openshift-monitoring.apps.msvistun.origin-gce.dev.openshift.com prometheus-k8s web reencrypt None ----- -+ -.. Using `https`, navigate to the URL listed for `prometheus-k8s`. Log in. - -. Ensure the user belongs to the `cluster-monitoring-view` role. This role provides access to viewing cluster monitoring UIs. For example, to add user `developer` to `cluster-monitoring-view`, run: - - $ oc adm policy add-cluster-role-to-user cluster-monitoring-view developer -+ - -. In the Web interface, log in as the user belonging to the `cluster-monitoring-view` role. - -. Click *Status*, then *Targets*. If you see an `etcd` entry, `etcd` is being monitored. -+ -image::etcd-no-certificate.png[] - -While `etcd` is being monitored, Prometheus is not yet able to authenticate against `etcd`, and so cannot gather metrics. To configure Prometheus authentication against `etcd`: - -. Copy the `/etc/etcd/ca/ca.crt` and `/etc/etcd/ca/ca.key` credentials files from the control plane node to the local machine: -+ -[subs="quotes"] ----- -$ ssh -i gcp-dev/ssh-privatekey cloud-user@35.237.54.213 -... ----- - -. Create the `openssl.cnf` file with these contents: -+ ----- -[ req ] -req_extensions = v3_req -distinguished_name = req_distinguished_name -[ req_distinguished_name ] -[ v3_req ] -basicConstraints = CA:FALSE -keyUsage = nonRepudiation, keyEncipherment, digitalSignature -extendedKeyUsage=serverAuth, clientAuth ----- - -. Generate the `etcd.key` private key file: -+ -[subs="quotes"] ----- -$ openssl genrsa -out etcd.key 2048 ----- - -. Generate the `etcd.csr` certificate signing request file: -+ -[subs="quotes"] ----- -$ openssl req -new -key etcd.key -out etcd.csr -subj "/CN=etcd" -config openssl.cnf ----- - -. Generate the `etcd.crt` certificate file: -+ -[subs="quotes"] ----- -$ openssl x509 -req -in etcd.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out etcd.crt -days 365 -extensions v3_req -extfile openssl.cnf ----- - -. Put the credentials into format used by {product-title}: -+ ----- -cat <<-EOF > etcd-cert-secret.yaml -apiVersion: v1 -data: - etcd-client-ca.crt: "$(cat ca.crt | base64 --wrap=0)" - etcd-client.crt: "$(cat etcd.crt | base64 --wrap=0)" - etcd-client.key: "$(cat etcd.key | base64 --wrap=0)" -kind: Secret -metadata: - name: kube-etcd-client-certs - namespace: openshift-monitoring -type: Opaque -EOF ----- -+ -This creates the *_etcd-cert-secret.yaml_* file - -. Apply the credentials file to the cluster: - ----- -$ oc apply -f etcd-cert-secret.yaml ----- - -. Visit the "Targets" page of the Web interface again. Verify that `etcd` is now being correctly monitored. It might take several minutes for changes to take effect. -+ -image::etcd-monitoring-working.png[] diff --git a/_unused_topics/monitoring-dead-mans-switch-pagerduty.adoc b/_unused_topics/monitoring-dead-mans-switch-pagerduty.adoc deleted file mode 100644 index 830af9f53b55..000000000000 --- a/_unused_topics/monitoring-dead-mans-switch-pagerduty.adoc +++ /dev/null @@ -1,20 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/configuring-monitoring-stack.adoc - -[id="dead-mans-switch-pagerduty_{context}"] -== Dead man's switch PagerDuty - -https://www.pagerduty.com/[PagerDuty] supports "Dead man's switch" through an integration called https://deadmanssnitch.com/[Dead Man's Snitch]. You can enable it. - -.Procedure - -* Add a `PagerDuty` configuration to the default `deadmansswitch` receiver. -+ -For example, you can configure Dead Man's Snitch to page the operator if the "Dead man's switch" alert is silent for 15 minutes. With the default Alertmanager configuration, the Dead man's switch alert is repeated every five minutes. If Dead Man's Snitch triggers after 15 minutes, it indicates that the notification has been unsuccessful at least twice. - -[role="_additional-resources"] -.Additional resources - -// FIXME describe the procedure instead of linking * To learn how to add a `PagerDuty` configuration to the default `deadmansswitch` receiver, see LINK. -* To learn how to configure Dead Man's Snitch for PagerDuty, see https://www.pagerduty.com/docs/guides/dead-mans-snitch-integration-guide/[Dead Man’s Snitch Integration Guide]. diff --git a/_unused_topics/monitoring-dead-mans-switch.adoc b/_unused_topics/monitoring-dead-mans-switch.adoc deleted file mode 100644 index db473bd2202e..000000000000 --- a/_unused_topics/monitoring-dead-mans-switch.adoc +++ /dev/null @@ -1,13 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/configuring-monitoring-stack.adoc - -[id="dead-mans-switch_{context}"] -== Dead man's switch - -{product-title} Monitoring ships with a "Dead man's switch" to ensure the availability of the monitoring infrastructure. - -The "Dead man's switch" is a simple Prometheus alerting rule that always triggers. The Alertmanager continuously sends notifications for the dead man's switch to the notification provider that supports this functionality. This also ensures that communication between the Alertmanager and the notification provider is working. - -This mechanism is supported by PagerDuty to issue alerts when the monitoring system itself is down. - diff --git a/_unused_topics/monitoring-enabling-dynamically-provisioned-storage.adoc b/_unused_topics/monitoring-enabling-dynamically-provisioned-storage.adoc deleted file mode 100644 index ea4c252779cf..000000000000 --- a/_unused_topics/monitoring-enabling-dynamically-provisioned-storage.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/configuring-monitoring-stack.adoc - -[id="enabling-dynamically-provisioned-storage_{context}"] -= Enabling dynamically-provisioned storage - -Instead of statically-provisioned storage, you can use dynamically-provisioned storage. - -.Procedure - -. To enable dynamic storage for Prometheus and Alertmanager, set the following parameters to `true` in the Ansible inventory file: -+ -* `openshift_cluster_monitoring_operator_prometheus_storage_enabled` (Default: false) -* `openshift_cluster_monitoring_operator_alertmanager_storage_enabled` (Default: false) -+ -. Optional: After you enable dynamic storage, you can also set the `storageclass` for the persistent volume claim for each component in the following parameters in the Ansible inventory file: -+ -* `openshift_cluster_monitoring_operator_prometheus_storage_class_name` (default: "") -* `openshift_cluster_monitoring_operator_alertmanager_storage_class_name` (default: "") -+ -Each of these variables applies only if its corresponding `storage_enabled` variable is set to `true`. - -[role="_additional-resources"] -.Additional resources - -* See https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/[Dynamic Volume Provisioning] for details. diff --git a/_unused_topics/monitoring-enabling-persistent-storage.adoc b/_unused_topics/monitoring-enabling-persistent-storage.adoc deleted file mode 100644 index b9bd16207584..000000000000 --- a/_unused_topics/monitoring-enabling-persistent-storage.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/configuring-monitoring-stack.adoc - -[id="enabling-persistent-storage_{context}"] -= Enabling persistent storage - -By default, persistent storage is disabled for both Prometheus time-series data and for Alertmanager notifications and silences. You can configure the cluster to persistently store any one of them or both. - -.Procedure - -* To enable persistent storage of Prometheus time-series data, set this variable to `true` in the Ansible inventory file: -+ -`openshift_cluster_monitoring_operator_prometheus_storage_enabled` -+ -To enable persistent storage of Alertmanager notifications and silences, set this variable to `true` in the Ansible inventory file: -+ -`openshift_cluster_monitoring_operator_alertmanager_storage_enabled` - diff --git a/_unused_topics/monitoring-full-list-of-configuration-variables.adoc b/_unused_topics/monitoring-full-list-of-configuration-variables.adoc deleted file mode 100644 index 65fa04e17809..000000000000 --- a/_unused_topics/monitoring-full-list-of-configuration-variables.adoc +++ /dev/null @@ -1,48 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/configuring-monitoring-stack.adoc - -[id="full-list-of-configuration-variables_{context}"] -= Full list of configuration variables - -This table contains the full list of the inventory file variables for configuring the Cluster Monitoring Operator: - -.Cluster Monitoring Operator Ansible variables -[options="header"] -|=== - -|Variable |Description - -|`openshift_cluster_monitoring_operator_install` -| Deploy the Cluster Monitoring Operator if `true`. Otherwise, undeploy. This variable is set to `true` by default. - -|`openshift_cluster_monitoring_operator_prometheus_storage_capacity` -| The persistent volume claim size for each of the Prometheus instances. This variable applies only if `openshift_cluster_monitoring_operator_prometheus_storage_enabled` is set to `true`. Defaults to `50Gi`. - -|`openshift_cluster_monitoring_operator_alertmanager_storage_capacity` -| The persistent volume claim size for each of the Alertmanager instances. This variable applies only if `openshift_cluster_monitoring_operator_alertmanager_storage_enabled` is set to `true`. Defaults to `2Gi`. - -|`openshift_cluster_monitoring_operator_node_selector` -| Set to the desired, existing [node selector] to ensure that pods are placed onto nodes with specific labels. Defaults to `node-role.kubernetes.io/infra=true`. - -|`openshift_cluster_monitoring_operator_alertmanager_config` -| Configures Alertmanager. - -|`openshift_cluster_monitoring_operator_prometheus_storage_enabled` -| Enable persistent storage of Prometheus' time-series data. This variable is set to `false` by default. - -|`openshift_cluster_monitoring_operator_alertmanager_storage_enabled` -| Enable persistent storage of Alertmanager notifications and silences. This variable is set to `false` by default. - -|`openshift_cluster_monitoring_operator_prometheus_storage_class_name` -| If you enabled the `openshift_cluster_monitoring_operator_prometheus_storage_enabled` option, set a specific StorageClass to ensure that pods are configured to use the `PVC` with that `storageclass`. Defaults to `none`, which applies the default storage class name. - -|`openshift_cluster_monitoring_operator_alertmanager_storage_class_name` -| If you enabled the `openshift_cluster_monitoring_operator_alertmanager_storage_enabled` option, set a specific StorageClass to ensure that pods are configured to use the `PVC` with that `storageclass`. Defaults to `none`, which applies the default storage class name. -|=== - -[role="_additional-resources"] -.Additional resources -// FIXME add link once doc is available -// Used to point to ../admin_guide/scheduling/node_selector.adoc[Advanced Scheduling and Node Selectors] -// * See LINK for more information on node selectors. diff --git a/_unused_topics/monitoring-grouping-alerts.adoc b/_unused_topics/monitoring-grouping-alerts.adoc deleted file mode 100644 index ad71f4988995..000000000000 --- a/_unused_topics/monitoring-grouping-alerts.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/configuring-monitoring-stack.adoc - -[id="grouping-alerts_{context}"] -== Grouping alerts - -Once alerts are firing against the Alertmanager, it must be configured to know how to logically group them. This procedure shows how to configure alert grouping: - -.Procedure - -_FIXME get missing info and complete the procedure_ - -For this example, a new route will be added to reflect alert routing of the "frontend" team. - -. Add new routes. Multiple routes may be added beneath the original route, typically to define the receiver for the notification. This example uses a matcher to ensure that only alerts coming from the service `example-app` are used: -+ - global: - resolve_timeout: 5m - route: - group_wait: 30s - group_interval: 5m - repeat_interval: 12h - receiver: default - routes: - - match: - alertname: DeadMansSwitch - repeat_interval: 5m - receiver: deadmansswitch - - match: - service: example-app - routes: - - match: - severity: critical - receiver: team-frontend-page - receivers: - - name: default - - name: deadmansswitch -+ -The sub-route matches only on alerts that have a severity of `critical`, and sends them via the receiver called `team-frontend-page`. As the name indicates, someone should be paged for alerts that are critical. - - diff --git a/_unused_topics/monitoring-monitoring-overview.adoc b/_unused_topics/monitoring-monitoring-overview.adoc deleted file mode 100644 index 8b1096b717b0..000000000000 --- a/_unused_topics/monitoring-monitoring-overview.adoc +++ /dev/null @@ -1,57 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/monitoring.adoc - -[id="monitoring-overview_{context}"] -= Monitoring overview - -{product-title} ships with a pre-configured, pre-installed, and self-updating monitoring stack that is based on the link:https://prometheus.io/[Prometheus] open source project and its wider eco-system. It provides monitoring of cluster components and ships with a set of alerts to immediately notify the cluster administrator about any occurring problems and a set of link:https://grafana.com/[Grafana] dashboards. - -The monitoring stack includes these components: - -* Cluster Monitoring Operator -* Prometheus Operator -* Prometheus -* Prometheus Adapter -* Alertmanager -* kube-state-metrics -* node-exporter -* Grafana - -The {product-title} Cluster Monitoring Operator (CMO) is the central component of the stack. It watches over the deployed monitoring components and resources and ensures that they are always up to date. - -The Prometheus Operator (PO) creates, configures, and manages Prometheus and Alertmanager instances. It also automatically generates monitoring target configurations based on familiar Kubernetes label queries. - -The Prometheus Adapter exposes cluster resource metrics (CPU and memory utilization) API for horizontal pod autoscaling. - -Node-exporter is an agent deployed on every node to collect metrics about it. - -The kube-state-metrics exporter agent converts Kubernetes objects to metrics consumable by Prometheus. - -All the components of the monitoring stack are monitored by the stack. Additionally, the stack monitors: - -* cluster-version-operator -* image-registry -* kube-apiserver -* kube-apiserver-operator -* kube-controller-manager -* kube-controller-manager-operator -* kube-scheduler -* kubelet -* monitor-sdn -* openshift-apiserver -* openshift-apiserver-operator -* openshift-controller-manager -* openshift-controller-manager-operator -* openshift-svcat-controller-manager-operator -* telemeter-client - -All these components are automatically updated. - -Other {product-title} framework components might be exposing metrics as well. See their respective documentation. - -[NOTE] -==== -To be able to deliver updates with guaranteed compatibility, configurability of the {product-title} Monitoring stack is limited to the explicitly available options. -==== - diff --git a/_unused_topics/monitoring-setting-persistent-storage-size.adoc b/_unused_topics/monitoring-setting-persistent-storage-size.adoc deleted file mode 100644 index 619f6133c9bc..000000000000 --- a/_unused_topics/monitoring-setting-persistent-storage-size.adoc +++ /dev/null @@ -1,20 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/configuring-monitoring-stack.adoc - -[id="setting-persistent-storage-size_{context}"] -= Setting persistent storage size - -You can specify the size of the persistent volume claim for Prometheus and Alertmanager. - -.Procedure - -* Change these Ansible variables: -+ --- -* `openshift_cluster_monitoring_operator_prometheus_storage_capacity` (default: 50Gi) -* `openshift_cluster_monitoring_operator_alertmanager_storage_capacity` (default: 2Gi) --- -+ -Each of these variables applies only if its corresponding `storage_enabled` variable is set to `true`. - diff --git a/_unused_topics/monitoring-update-and-compatibility-guarantees.adoc b/_unused_topics/monitoring-update-and-compatibility-guarantees.adoc deleted file mode 100644 index 4f42970573fc..000000000000 --- a/_unused_topics/monitoring-update-and-compatibility-guarantees.adoc +++ /dev/null @@ -1,20 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/monitoring.adoc - -[id="update-and-compatibility-guarantees_{context}"] -= Update and compatibility guarantees - -To be able to deliver updates with guaranteed compatibility, configurability of the {product-title} Monitoring stack is limited to the explicitly available options. This document describes known pitfalls of which types of configuration and customization are unsupported, as well as misuse of resources provided by {product-title} Monitoring. All configuration options described in this topic are explicitly supported. - -*Modification of {product-title} monitoring resources* - -The {product-title} Monitoring stack ensures its resources are _always_ in the state it expects them to be. If they are modified, {product-title} Monitoring will ensure that this will be reset. Nonetheless it is possible to pause this behavior, by setting the `paused` field in the `AppVersion` called `openshift-monitoring`. Setting the {product-title} Monitoring stack to be paused, stops all future updates and will cause modification of the otherwise managed resources. If resources are modified in an uncontrolled manner, this will cause undefined behavior during updates. - -To ensure compatible and functioning updates, the `paused` field must be set to `false` on upgrades. - -*Usage of resources created by {product-title} monitoring* - -{product-title} Monitoring creates a number of resources. These resources are not meant to be used by any other resources, as there are no guarantees about their backward compatibility. For example, a `ClusterRole` called `prometheus-k8s` is created, and has very specific roles that exist solely for the cluster monitoring Prometheus pods to be able to access the resources it requires access to. All of these resources have no compatibility guarantees going forward. While some of these resources may incidentally have the necessary information for RBAC purposes for example, they can be subject to change in any upcoming release, with no backward compatibility. - -If the `Role` or `ClusterRole` objects that are similar are needed, we recommend creating a new object that has exactly the permissions required for the case at hand, rather than using the resources created and maintained by {product-title} Monitoring. diff --git a/_unused_topics/mounting-local-volumes.adoc b/_unused_topics/mounting-local-volumes.adoc deleted file mode 100644 index c0278465a113..000000000000 --- a/_unused_topics/mounting-local-volumes.adoc +++ /dev/null @@ -1,29 +0,0 @@ -[id="mounting-local-volumes_{context}"] -= Mounting local volumes - -This paragraph is the procedure module introduction: a short description of the procedure. - -.Prerequisites - -* All local volumes must be manually mounted before they can be consumed by {product-title} as PVs. - -.Procedure - -. Mount all volumes into the `*/mnt/local-storage//*` path: -+ ----- -# device name # mount point # FS # options # extra -/dev/sdb1 /mnt/local-storage/ssd/disk1 ext4 defaults 1 2 -/dev/sdb2 /mnt/local-storage/ssd/disk2 ext4 defaults 1 2 -/dev/sdb3 /mnt/local-storage/ssd/disk3 ext4 defaults 1 2 -/dev/sdc1 /mnt/local-storage/hdd/disk1 ext4 defaults 1 2 -/dev/sdc2 /mnt/local-storage/hdd/disk2 ext4 defaults 1 2 ----- -+ -Administrators must create local devices as needed using any method such as disk partition or LVM, create suitable file systems on these devices, and mount these devices using a script or /etc/fstab entries - -. Make all volumes accessible to the processes running within the Docker containers: -+ ----- -$ chcon -R unconfined_u:object_r:svirt_sandbox_file_t:s0 /mnt/local-storage/ ----- diff --git a/_unused_topics/nodes-cluster-disabling-features-list.adoc b/_unused_topics/nodes-cluster-disabling-features-list.adoc deleted file mode 100644 index ea94c7d6509d..000000000000 --- a/_unused_topics/nodes-cluster-disabling-features-list.adoc +++ /dev/null @@ -1,265 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-cluster-disabling-features.adoc - -[id="nodes-cluster-disabling-features-list_{context}"] -= List of feature gates - -Use the following list to determine the name of the feature you want to disable: - -[options="header"] -|=== -| Feature gate| Description | Default - -| *AdvancedAuditing* -| Enables a more general API auditing pipeline, which includes support for pluggable output backends and an audit policy specifying how different requests should be audited. -| True - -| *APIListChunking* -| Enables the API clients to retrieve LIST or GET resources from API server in chunks. -| True - -| *APIResponseCompression* -| Enables the compression of API responses for LIST or GET requests. -| False - -| *AppArmor* -| Enables AppArmor-based mandatory access control on Linux nodes when using Docker. For more information, see the link:https://kubernetes.io/docs/tutorials/clusters/apparmor/[Kubernetes AppArmor documentation]. -| True - -| *AttachVolumeLimit* -| Adds support for volume plugins to report node specific volume limits. -| True - -| *BalanceAttachedNodeVolumes* -| Includes volume count on node to be considered for balanced resource allocation while scheduling. A node which has closer CPU, memory utilization, and volume count is favored by scheduler while making decisions. -| False - -| *BlockVolume* -| Enables the definition and consumption of raw block devices in pods. For more information, see -the link:https://kubernetes.io/docs/concepts/storage/persistent-volumes/#raw-block-volume-support[Kubernetes Raw Block Volume Support]. -| False - -| *CPUManager* -| Enables Container-level CPU affinity support. -For more information, see Using CPU Manager. -| True - -| *CPUCFSQuotaPeriod* -| Enables nodes to change CPUCFSQuotaPeriod. -| False - -| *CRIcontainerLogRotation* -| Enables Container log rotation for the CRI Container runtime. -| True - -| *CSIBlockVolume* -| Enables CSI to use raw block storage volumes. -| False - -| *CSIDriverRegistry* -| Enables all logic related to the CSIDriver API object in csi.storage.k8s.io. -| False - -| *CSINodeInfo* -| Enables all logic related to the CSINodeInfo API object in csi.storage.k8s.io. -| False - -| *CSIPersistentVolume* -| Enables discovering and mounting volumes provisioned through a CSI (Container Storage Interface) compatible volume plugin. For more information, -see the link:https://github.com/kubernetes/community/blob/master/contributors/design-proposals/storage/container-storage-interface.md[CSI Volume Plugins in Kubernetes Design Documentation]. -| True - -| *CustomPodDNS* -| Enables customizing the DNS settings for a pod using the *dnsConfig* property. -| True - -| *Debugcontainers* -| Enables running a debugging Container in a pod namespace to troubleshoot a running Pod. -| False - -| *DevicePlugins* -| Enables device plug-in-based resource provisioning on nodes. -| True - -| *DryRun* -| Allows requests to be processed but not stored, so that validation, merging, mutation can be tested without committing. -| False - -| *DynamicKubeletConfig* -| Enables the dynamic configuration in a cluster. -| True - -| *EnableEquivalenceClassCache* -| Enables the scheduler to cache equivalence of nodes when scheduling Pods. -| False - -| *ExpandPersistentVolumes* -| Enables the ability to Expand persistent volumes. -| True - -| *ExpandInUsePersistentVolumes* -| Enables the ability to expand persistent volumes' file system without unmounting volumes. -| False - -| *ExperimentalHostUserNamespaceDefaultingGate* -| Enables the disabling of user namespaces. This is for Containers that are using other host projects, host mounts, or Containers that are privileged or using specific non-project capabilities, such as MKNODE, SYS_MODULE, and so forth. This should only be enabled if user project remapping is enabled in the Docker daemon. -| False - -| *GCERegionalPersistentDisk* -| Enables the GCE Persistent Disk feature. -| True - -| *HugePages* -| Enables the allocation and consumption of pre-allocated huge pages. -| True - -| *HyperVcontainer* -| Enables Hyper-V isolation for Windows Containers. -| False - -| *Intializers* -| Enables the dynamic admission control as an extension to the built-in admission controllers. -| False - -| *KubeletPluginsWatcher* -| Enables probe based plugin watcher utility for discovering Kubelet plugins. -| True - -| *LocalStorageCapacityIsolation* -| Enables the consumption of local ephemeral storage and the `sizeLimit` property of an *emptyDir* volume. -| False - -| *Mountcontainers* -| Enables using utility Containers on the host as the volume mount. -| False - -| *MountPropagation* -| Enables sharing a volume mounted by one Container to other Containers or pods. -| True - -| *NodeLease* -| Kubelet uses the new Lease API to report node heartbeats, (Kube) Node Lifecycle Controller uses these heartbeats as a node health signal. -| False - -| *PersistentLocalVolumes* -| Enables the usage of local volume pods. Pod affinity has to be specified if requesting a local volume. -| True - -| *PodPriority* -| Enables the descheduling and preemption of pods based on their priorities. -| True - -| *PodReadinessGates* -| Supports Pod Readiness. -| True - -| *PodShareProcessNamespace* -| Allows all containers in a pod to share a process namespace. -| True - -| *ProcMountType* -| Enables control over ProcMountType for containers. -| False - -| *QOSReserved* -| Allows resource reservations at the QoS level preventing pods at lower QoS levels from bursting into resources requested at higher QoS levels (memory only for now). -| False - -| *ResourceLimitsPriorityFunction* -| Enables a scheduler priority function that assigns a lowest possible score of `1` to a node that satisfies at least one of the input pod CPU and memory limits. The intent is to break ties between nodes with same scores. -| False - -| *ResourceQuotaScopeSelectors* -| Enables resource quota scope selectors. -| True - -| *RotateKubeletClientCertificate* -| Enables the rotation of the client TLS certificate on the cluster. -| True - -| *RotateKubeletServerCertificate* -| Enables the rotation of the server TLS certificate on the cluster. -| True - -| *RunAsGroup* -| Enables control over the primary group ID set on the init processes of Containers. -| False - -| *RuntimeClass* -| Enables RuntimeClass, for selecting between multiple runtimes to run a pod. -| False - -| *ScheduleDaemonSetPods* -| Enables DaemonSet pods to be scheduled by the default scheduler instead of the DaemonSet controller. -| True - -| *SCTPSupport* -| Enables SCTP as new protocol for Service ports, NetworkPolicy, and ContainerPort in Pod/Containers definition. -| False - -| *ServiceNodeExclusion* -| Enables the exclusion of nodes from load balancers created by a cloud provider. -| False - -| *StorageObjectInUseProtection* -| Enables postponing the deletion of persistent volume or persistent volume claim objects if they are still being used. -| True - -| *StreamingProxyRedirects* -| Instructs the API server to intercept and follow redirects from the backend kubelet for streaming requests. -| True - -| *SupportIPVSProxyMode* -| Enables providing in-cluster service load balancing using IP virtual servers. -| True - -| *SupportPodPidsLimit* -| Enables support for limiting the number of processes (PIDs) running in a pod. -| True - -| *Sysctls* -| Enables pods to set sysctls on a pod. -| True - -| *TaintBasedEvictions* -| Enables evicting pods from nodes based on taints on nodes and tolerations on pods. -| False - -| *TaintNodesByCondition* -| Enables automatic tainting nodes based on node conditions. -| True - -| *TokenRequest* -| Enables the TokenRequest endpoint on service account resources. -| True - -| *TokenRequestProjection* -| Enables ServiceAccountTokenVolumeProjection support in ProjectedVolumes. -| True - -| *TTLAfterFinished* -| Allows TTL controller to clean up Pods and Jobs after they finish. -| False - -| *ValidateProxyRedirects* -| Controls whether the apiserver should validate that redirects are only followed to the same host. Only used if StreamingProxyRedirects is enabled. -| False - -| *VolumeScheduling* -| Enables volume-topology-aware scheduling and make the persistent volume claim (PVC) binding aware of scheduling decisions. It also enables the usage of local volumes types when used together with the *PersistentLocalVolumes* feature gate. -| True - -| *VolumeSnapshotDataSource* -| Enables volume snapshot data source support. -| False - -| *VolumeSubpath* -| Allows mounting a subpath of a volume in a container. Do not remove this feature gate even though it's GA. -| True - -| *VolumeSubpathEnvExpansion* -| Allows subpath environment variable substitution. Only applicable if the VolumeSubpath feature is also enabled. -| False - -|=== diff --git a/_unused_topics/nodes-cluster-overcommit-node-memory.adoc b/_unused_topics/nodes-cluster-overcommit-node-memory.adoc deleted file mode 100644 index e1da11dcbb73..000000000000 --- a/_unused_topics/nodes-cluster-overcommit-node-memory.adoc +++ /dev/null @@ -1,102 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-cluster-overcommit.adoc - -[id="nodes-cluster-overcommit-node-memory_{context}"] - -= Reserving memory across quality of service tiers - -You can use the `qos-reserved` parameter to specify a percentage of memory to be reserved -by a pod in a particular QoS level. This feature attempts to reserve requested resources to exclude pods -from lower OoS classes from using resources requested by pods in higher QoS classes. - -By reserving resources for higher QOS levels, pods that do not have resource limits are prevented from encroaching on the resources -requested by pods at higher QoS levels. - -.Prerequisites - -. Obtain the label associated with the static Machine Config Pool CRD for the type of node you want to configure. -Perform one of the following steps: - -.. View the Machine Config Pool: -+ ----- -$ oc describe machineconfigpool ----- -+ -For example: -+ -[source,yaml] ----- -$ oc describe machineconfigpool worker - -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfigPool -metadata: - creationTimestamp: 2019-02-08T14:52:39Z - generation: 1 - labels: - custom-kubelet: small-pods <1> ----- -<1> If a label has been added it appears under `labels`. - -.. If the label is not present, add a key/value pair: -+ ----- -$ oc label machineconfigpool worker custom-kubelet=small-pods ----- -+ -[TIP] -==== -You can alternatively apply the following YAML to add the label: - -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfigPool -metadata: - labels: - custom-kubelet: small-pods - name: worker ----- -==== - -.Procedure - -. Create a Custom Resource (CR) for your configuration change. -+ -.Sample configuration for a disabling CPU limits -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: KubeletConfig -metadata: - name: disable-cpu-units <1> -spec: - machineConfigPoolSelector: - matchLabels: - custom-kubelet: small-pods <2> - kubeletConfig: - cgroups-per-qos: - - true - cgroup-driver: - - 'systemd' - cgroup-root: - - '/' - qos-reserved: <3> - - 'memory=50%' ----- -<1> Assign a name to CR. -<2> Specify the label to apply the configuration change. -<3> Specifies how pod resource requests are reserved at the QoS level. -{product-title} uses the `qos-reserved` parameter as follows: -- A value of `qos-reserved=memory=100%` will prevent the `Burstable` and `BestEffort` QOS classes from consuming memory -that was requested by a higher QoS class. This increases the risk of inducing OOM -on `BestEffort` and `Burstable` workloads in favor of increasing memory resource guarantees -for `Guaranteed` and `Burstable` workloads. -- A value of `qos-reserved=memory=50%` will allow the `Burstable` and `BestEffort` QOS classes -to consume half of the memory requested by a higher QoS class. -- A value of `qos-reserved=memory=0%` -will allow a `Burstable` and `BestEffort` QoS classes to consume up to the full node -allocatable amount if available, but increases the risk that a `Guaranteed` workload -will not have access to requested memory. This condition effectively disables this feature. diff --git a/_unused_topics/nodes-containers-using-about.adoc b/_unused_topics/nodes-containers-using-about.adoc deleted file mode 100644 index 0213c6acecc1..000000000000 --- a/_unused_topics/nodes-containers-using-about.adoc +++ /dev/null @@ -1,22 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-containers-using.adoc - -[id="nodes-containers-using-about_{context}"] -= Understanding Containers - -The basic units of {product-title} applications are called _containers_. -link:https://access.redhat.com/articles/1353593[Linux container technologies] -are lightweight mechanisms for isolating running processes so that they are -limited to interacting with only their designated resources. - -Many application instances can be running in containers on a single host without -visibility into each others' processes, files, network, and so on. Typically, -each container provides a single service (often called a "micro-service"), such -as a web server or a database, though containers can be used for arbitrary -workloads. - -The Linux kernel has been incorporating capabilities for container technologies -for years. {product-title} and -Kubernetes add the ability to orchestrate containers across -multi-host installations. diff --git a/_unused_topics/nodes-containers-using-ssh.adoc b/_unused_topics/nodes-containers-using-ssh.adoc deleted file mode 100644 index 868386626226..000000000000 --- a/_unused_topics/nodes-containers-using-ssh.adoc +++ /dev/null @@ -1,49 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-containers-using.adoc - -[id="nodes-containers-using-ssh_{context}"] -= Opening a Remote Shell to Containers - -The `oc rsh` command allows you to locally access and manage tools that are on -the system. The secure shell (SSH) is the underlying technology and industry -standard that provides a secure connection to the application. Access to -applications with the shell environment is protected and restricted with -Security-Enhanced Linux (SELinux) policies. - -While in the remote shell, you can issue commands as if you are inside the -container and perform local operations like monitoring, debugging, and using CLI -commands specific to what is running in the container. - -For example, in a MySQL container, you can count the number of records in the -database by invoking the `mysql` command, then using the prompt to type in the `SELECT` command. You can -also use commands like `ps(1)` and `ls(1)` for validation. - -`BuildConfigs` and `DeployConfigs` map out how you want things to look and -pods (with containers inside) are created and dismantled as needed. Your changes -are not persistent. If you make changes directly within the container and that -container is destroyed and rebuilt, your changes will no longer exist. - -[NOTE] -==== -You can use the `oc exec` c to execute a command remotely. However, the `oc rsh` command provides an easier way -to keep a remote shell open persistently. -==== - -.Procedure - -. Open a console on a system networked to connect to the node where your pod is located. - -. Open a remote shell session to a container: -+ ----- -$ oc rsh ----- - -[NOTE] -==== -For help with usage, options, and to see examples: ----- -$ oc rsh -h ----- -==== diff --git a/_unused_topics/nodes-nodes-audit-log-advanced.adoc b/_unused_topics/nodes-nodes-audit-log-advanced.adoc deleted file mode 100644 index e790f3ec446c..000000000000 --- a/_unused_topics/nodes-nodes-audit-log-advanced.adoc +++ /dev/null @@ -1,139 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-audit-log.adoc - -[id="nodes-nodes-audit-log-advanced_{context}"] -= Advanced Audit - -*DEPRECATED for the moment* - -The advanced audit feature provides several improvements over the -basic audit functionality, including fine-grained events filtering and multiple output back ends. - -To enable the advanced audit feature, provide the following values in the -`openshift_master_audit_config` parameter: - ----- -openshift_master_audit_config={"enabled": true, "auditFilePath": "/var/lib/origin/oscp-audit.log", "maximumFileRetentionDays": 14, "maximumFileSizeMegabytes": 500, "maximumRetainedFiles": 5, "policyFile": "/etc/origin/master/adv-audit.yaml", "logFormat":"json"} ----- - -[IMPORTANT] -==== -The policy file *_/etc/origin/master/adv-audit.yaml_* must be available on each control plane node. -==== - - -The following table contains additional options you can use. - -.Advanced Audit Configuration Parameters - -[cols="3a,6a",options="header"] -|=== -| Parameter Name | Description - -|`policyFile` -|Path to the file that defines the audit policy configuration. - -|`policyConfiguration` -|An embedded audit policy configuration. - -|`logFormat` -|Specifies the format of the saved audit logs. Allowed values are `legacy` (the -format used in basic audit), and `json`. - -|`webHookKubeConfig` -|Path to a `.kubeconfig`-formatted file that defines the audit webhook -configuration, where the events are sent to. - -|`webHookMode` -|Specifies the strategy for sending audit events. Allowed values are `block` -(blocks processing another event until the previous has fully processed) and -`batch` (buffers events and delivers in batches). -|=== - -[IMPORTANT] -==== -To enable the advanced audit feature, you must provide either `policyFile` *or* -`policyConfiguration` describing the audit policy rules: -==== - -.Sample Audit Policy Configuration -[source,yaml] ----- -apiVersion: audit.k8s.io/v1beta1 -kind: Policy -rules: - - # Do not log watch requests by the "system:kube-proxy" on endpoints or services - - level: None <1> - users: ["system:kube-proxy"] <2> - verbs: ["watch"] <3> - resources: <4> - - group: "" - resources: ["endpoints", "services"] - - # Do not log authenticated requests to certain non-resource URL paths. - - level: None - userGroups: ["system:authenticated"] <5> - nonResourceURLs: <6> - - "/api*" # Wildcard matching. - - "/version" - - # Log the request body of configmap changes in kube-system. - - level: Request - resources: - - group: "" # core API group - resources: ["configmaps"] - # This rule only applies to resources in the "kube-system" namespace. - # The empty string "" can be used to select non-namespaced resources. - namespaces: ["kube-system"] <7> - - # Log configmap and secret changes in all other namespaces at the metadata level. - - level: Metadata - resources: - - group: "" # core API group - resources: ["secrets", "configmaps"] - - # Log all other resources in core and extensions at the request level. - - level: Request - resources: - - group: "" # core API group - - group: "extensions" # Version of group should NOT be included. - - # A catch-all rule to log all other requests at the Metadata level. - - level: Metadata <1> - - # Log login failures from the web console or CLI. Review the logs and refine your policies. - - level: Metadata - nonResourceURLs: - - /login* <8> - - /oauth* <9> ----- -<1> There are four possible levels every event can be logged at: -+ -* `None` - Do not log events that match this rule. -+ -* `Metadata` - Log request metadata (requesting user, time stamp, resource, verb, -etc.), but not request or response body. This is the same level as the one used -in basic audit. -+ -* `Request` - Log event metadata and request body, but not response body. -+ -* `RequestResponse` - Log event metadata, request, and response bodies. -<2> A list of users the rule applies to. An empty list implies every user. -<3> A list of verbs this rule applies to. An empty list implies every verb. This is - Kubernetes verb associated with API requests (including `get`, `list`, `watch`, - `create`, `update`, `patch`, `delete`, `deletecollection`, and `proxy`). -<4> A list of resources the rule applies to. An empty list implies every resource. -Each resource is specified as a group it is assigned to (for example, an empty for -Kubernetes core API, batch, build.openshift.io, etc.), and a resource list from -that group. -<5> A list of groups the rule applies to. An empty list implies every group. -<6> A list of non-resources URLs the rule applies to. -<7> A list of namespaces the rule applies to. An empty list implies every namespace. -<8> Endpoint used by the web console. -<9> Endpoint used by the CLI. - -For more information on advanced audit, see the -link:https://kubernetes.io/docs/tasks/debug-application-cluster/audit[Kubernetes -documentation] diff --git a/_unused_topics/nodes-nodes-resources-configuring-viewing.adoc b/_unused_topics/nodes-nodes-resources-configuring-viewing.adoc deleted file mode 100644 index 8c8b62e45eeb..000000000000 --- a/_unused_topics/nodes-nodes-resources-configuring-viewing.adoc +++ /dev/null @@ -1,36 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-nodes-resources-configuring.adoc - -[id="nodes-nodes-resources-configuring-setting_{context}"] -= Viewing Node Allocatable Resources and Capacity - -As an administrator, you can view the current capacity and allocatable resources of a specific node. - -.Procedure - -To see a node's current capacity and allocatable resources: - -. Run the following command: - ----- -$ oc get node/ -o yaml ----- - -. Locate the following section in the output: -+ -[source,yaml] ----- -... -status: -... - allocatable: - cpu: "4" - memory: 8010948Ki - pods: "110" - capacity: - cpu: "4" - memory: 8010948Ki - pods: "110" -... ----- diff --git a/_unused_topics/nodes-nodes-working-adding.adoc b/_unused_topics/nodes-nodes-working-adding.adoc deleted file mode 100644 index 5efb65bc72ba..000000000000 --- a/_unused_topics/nodes-nodes-working-adding.adoc +++ /dev/null @@ -1,11 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-nodes-working.adoc - -[id="nodes-nodes-working-adding_{context}"] -= Adding new nodes to your cluster - -//// -this entire section is obsolete for 4.0. nodes are added to the cluster using MachineSets in 4.0. -https://github.com/openshift/openshift-docs/pull/12964#discussion_r242781872 -//// diff --git a/_unused_topics/nodes-pods-autoscaling-custom-metrics.adoc b/_unused_topics/nodes-pods-autoscaling-custom-metrics.adoc deleted file mode 100644 index e2271d47ee3d..000000000000 --- a/_unused_topics/nodes-pods-autoscaling-custom-metrics.adoc +++ /dev/null @@ -1,83 +0,0 @@ -== Supported metrics - -KEDA emits the following Kubernetes events: - -.Metrics -[cols="3a,5a,5a",options="header"] -|=== - -|Metric |Description |API version - -|ScaledObjectReady -|Normal -|On the first time a ScaledObject is ready, or if the previous ready condition status of the object was Unknown or False - -|ScaledJobReady -|Normal -|On the first time a ScaledJob is ready, or if the previous ready condition status of the object was Unknown or False - -|ScaledObjectCheckFailed -|Warning -|If the check validation for a ScaledObject fails - -|ScaledJobCheckFailed -|Warning -|If the check validation for a ScaledJob fails - -|ScaledObjectDeleted -|Normal -|When a ScaledObject is deleted and removed from KEDA watch - -|ScaledJobDeleted -|Normal -|When a ScaledJob is deleted and removed from KEDA watch - -|KEDAScalersStarted -|Normal -|When Scalers watch loop have started for a ScaledObject or ScaledJob - -|KEDAScalersStopped -|Normal -|When Scalers watch loop have stopped for a ScaledObject or a ScaledJob - -|KEDAScalerFailed -|Warning -|When a Scaler fails to create or check its event source - -|KEDAScaleTargetActivated -|Normal -|When the scale target (Deployment, StatefulSet, etc) of a ScaledObject is scaled to 1 - -|KEDAScaleTargetDeactivated -|Normal -|When the scale target (Deployment, StatefulSet, etc) of a ScaledObject is scaled to 0 - -|KEDAScaleTargetActivationFailed -|Warning -|When KEDA fails to scale the scale target of a ScaledObject to 1 - -|KEDAScaleTargetDeactivationFailed -|Warning -|When KEDA fails to scale the scale target of a ScaledObject to 0 - -|KEDAJobsCreated -|Normal -|When KEDA creates jobs for a ScaledJob - -|TriggerAuthenticationAdded -|Normal -|When a new TriggerAuthentication is added - -|TriggerAuthenticationDeleted -|Normal -|When a TriggerAuthentication is deleted - -|ClusterTriggerAuthenticationAdded -|Normal -|When a new ClusterTriggerAuthentication is added - -|ClusterTriggerAuthenticationDeleted -|Normal -|When a ClusterTriggerAuthentication is deleted - -|=== diff --git a/_unused_topics/nodes-pods-daemonsets-pods.adoc b/_unused_topics/nodes-pods-daemonsets-pods.adoc deleted file mode 100644 index 74520493154b..000000000000 --- a/_unused_topics/nodes-pods-daemonsets-pods.adoc +++ /dev/null @@ -1,24 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-pods-daemonsets.adoc - -[id="nodes-pods-daemonsets-pods_{context}"] -= About Scheduling DaemonSets with the default scheduler - -In {product-title}, the scheduler selects the Node that a Pod runs on. However, in previous versions of {product-title}, DaemonSet pods were created and scheduled by the DaemonSet controller. - -The `ScheduleDaemonSetPods` feature, enabled by default, forces {product-title} to schedule DaemonSets using the default scheduler, instead of the DaemonSet controller. -The DaemonSet controller adds the `NodeAffinity` parameter to the DaemonSet pods, instead of the `.spec.nodeName` parameter. The default scheduler then binds the pod to the target host. If the DaemonSet pod is already configured for node affinity, the affinity is replaced. The DaemonSet controller only performs these operations when creating or modifying DaemonSet pods, and no changes are made to the `spec.template` parameter of the DaemonSet. - ----- -nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchFields: - - key: metadata.name - operator: In - values: - - target-host-name ----- - -In addition, the DaemonSet controller adds the `node.kubernetes.io/unschedulable:NoSchedule` toleration to DaemonSet Pods. The default scheduler ignores unschedulable Nodes when scheduling DaemonSet Pods. diff --git a/_unused_topics/nodes-pods-priority-examples.adoc b/_unused_topics/nodes-pods-priority-examples.adoc deleted file mode 100644 index 92d898eb2d3f..000000000000 --- a/_unused_topics/nodes-pods-priority-examples.adoc +++ /dev/null @@ -1,50 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-pods-priority.adoc - -[id="nodes-pods-priority-examples_{context}"] -= Pod priority example scenarios - -Pod priority and preemption assigns a priority to pods for scheduling. The scheduler will preempt (evict) lower-priority pods to schedule higher-priority pods. - -Typical preemption scenario:: -*Pod P* is a pending pod. - -. The scheduler locates *Node N*, where the removal of one or more pods enables *Pod P* to be scheduled on that node. - -. The scheduler deletes the lower-priority pods from the *Node N* and schedules *Pod P* on the node. - -. The `nominatedNodeName` field of *Pod P* is set to the name of *Node N*. - -[NOTE] -==== -*Pod P* is not necessarily scheduled to the nominated node. -==== - -Preemption and termination periods:: -The preempted pod has a long termination period. - -. The scheduler preempts a lower-priority pod on *Node N*. - -. The scheduler waits for the pod to gracefully terminate. - -. For other scheduling reasons, *Node M* becomes available. - -. The scheduler can then schedule *Pod P* on *Node M*. - -//// -Under consideration for future release -Pod priority and cross-node preemption:: -*Pod P* is being considered for *Node N*. - -. *Pod Q* is running on another node in the same zone as *Node N*. - -. *Pod P* has zone-wide anti-affinity with *Pod Q*, meaning *Pod P* cannot be scheduled in the same zone as *Pod Q*. -+ -There are no other cases of anti-affinity between *Pod P* and other pods in the zone. - -. To schedule *Pod P* on *Node N*, the scheduler must preempt *Pod Q* to remove the pod anti-affinity violation, allowing the scheduler to schedule *Pod P* on *Node N*. - -The scheduler can preempt *Pod Q*, but scheduler does not perform cross-node preemption. So, Pod P will be deemed unschedulable on Node N. -//// - diff --git a/_unused_topics/nodes-scheduler-node-antiaffinity-configuring.adoc b/_unused_topics/nodes-scheduler-node-antiaffinity-configuring.adoc deleted file mode 100644 index 083d94dfc6eb..000000000000 --- a/_unused_topics/nodes-scheduler-node-antiaffinity-configuring.adoc +++ /dev/null @@ -1,102 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-scheduler-node-affinity.adoc - -[id="nodes-scheduler-node-affinity-configuring_{context}"] -= Configuring node affinity rules - -You can configure two types of node affinity rules: required and preferred. - -== Configuring a required node affinity rule - -Required rules *must* be met before a pod can be scheduled on a node. - -.Procedure - -The following steps demonstrate a simple configuration that creates a node and a pod that the scheduler is required to place on the node. - -. Add a label to a node using the `oc label node` command: -+ ----- -$ oc label node node1 e2e-az-name=e2e-az1 ----- -+ -[TIP] -==== -You can alternatively apply the following YAML to add the label: - -[source,yaml] ----- -kind: Node -apiVersion: v1 -metadata: - name: - labels: - e2e-az-name: e2e-az1 ----- -==== - -. In the pod specification, use the `nodeAffinity` stanza to configure the `requiredDuringSchedulingIgnoredDuringExecution` parameter: -+ -.. Specify the key and values that must be met. If you want the new pod to be scheduled on the node you edited, use the same `key` and `value` parameters as the label in the node. -+ -.. Specify an `operator`. The operator can be `In`, `NotIn`, `Exists`, `DoesNotExist`, `Lt`, or `Gt`. For example, use the operator `In` to require the label to be in the node: -+ ----- -spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: e2e-az-name - operator: In - values: - - e2e-az1 - - e2e-az2 ----- - -. Create the pod: -+ ----- -$ oc create -f e2e-az2.yaml ----- - -== Configuring a Preferred Node Affinity Rule - -Preferred rules specify that, if the rule is met, the scheduler tries to enforce the rules, but does not guarantee enforcement. - -.Procedure - -The following steps demonstrate a simple configuration that creates a node and a pod that the scheduler tries to place on the node. - -. Add a label to a node using the `oc label node` command: -+ ----- -$ oc label node node1 e2e-az-name=e2e-az3 ----- - -. In the pod specification, use the `nodeAffinity` stanza to configure the `preferredDuringSchedulingIgnoredDuringExecution` parameter: -+ -.. Specify a weight for the node, as a number 1-100. The node with highest weight is preferred. -+ -.. Specify the key and values that must be met. If you want the new pod to be scheduled on the node you edited, use the same `key` and `value` parameters as the label in the node: -+ ----- - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 1 - preference: - matchExpressions: - - key: e2e-az-name - operator: In - values: - - e2e-az3 ----- - -. Specify an `operator`. The operator can be `In`, `NotIn`, `Exists`, `DoesNotExist`, `Lt`, or `Gt`. For example, use the operator `In` to require the label to be in the node. - -. Create the pod. -+ ----- -$ oc create -f e2e-az3.yaml ----- diff --git a/_unused_topics/nodes-scheduler-taints-tolerations-examples.adoc b/_unused_topics/nodes-scheduler-taints-tolerations-examples.adoc deleted file mode 100644 index 43a1eb49af61..000000000000 --- a/_unused_topics/nodes-scheduler-taints-tolerations-examples.adoc +++ /dev/null @@ -1,141 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-scheduler-taints-tolerations.adoc - -[id="nodes-scheduler-taints-tolerations-examples_{context}"] -= Example taint and toleration scenarios - -Taints and tolerations are a flexible way to steer pods away from nodes or evict pods that should not be running on a node. A few of typical scenarios are: - -* Dedicating a node for a user -* Binding a user to a node -* Dedicating nodes with special hardware - -[id="nodes-scheduler-taints-tolerations-examples-user_{context}"] -== Dedicating a Node for a User - -You can specify a set of nodes for exclusive use by a particular set of users. - -.Procedure - -To specify dedicated nodes: - -. Add a taint to those nodes: -+ -For example: -+ ----- -$ oc adm taint nodes node1 dedicated=groupName:NoSchedule ----- -+ -[TIP] -==== -You can alternatively apply the following YAML to add the taint: - -[source,yaml] ----- -kind: Node -apiVersion: v1 -metadata: - name: - labels: - ... -spec: - taints: - - key: dedicated - value: groupName - effect: NoSchedule - ... ----- -==== - -. Add a corresponding toleration to the pods by writing a custom admission controller. -+ -Only the pods with the tolerations are allowed to use the dedicated nodes. - -[id="nodes-scheduler-taints-tolerations-examples-binding_{context}"] -== Binding a User to a Node - -You can configure a node so that particular users can use only the dedicated nodes. - -.Procedure - -To configure a node so that users can use only that node: - -. Add a taint to those nodes: -+ -For example: -+ ----- -$ oc adm taint nodes node1 dedicated=groupName:NoSchedule ----- -+ -[TIP] -==== -You can alternatively apply the following YAML to add the taint: - -[source,yaml] ----- -kind: Node -apiVersion: v1 -metadata: - name: - labels: - ... -spec: - taints: - - key: dedicated - value: groupName - effect: NoSchedule - ... ----- -==== - -. Add a corresponding toleration to the pods by writing a custom admission controller. -+ -The admission controller should add a node affinity to require that the pods can only schedule onto nodes labeled with the `key:value` label (`dedicated=groupName`). - -. Add a label similar to the taint (such as the `key:value` label) to the dedicated nodes. - -[id="nodes-scheduler-taints-tolerations-examples-special_{context}"] -== Nodes with Special Hardware - -In a cluster where a small subset of nodes have specialized hardware (for example GPUs), you can use taints and tolerations to keep pods that do not need the specialized hardware off of those nodes, leaving the nodes for pods that do need the specialized hardware. You can also require pods that need specialized hardware to use specific nodes. - -.Procedure - -To ensure pods are blocked from the specialized hardware: - -. Taint the nodes that have the specialized hardware using one of the following commands: -+ ----- -$ oc adm taint nodes disktype=ssd:NoSchedule -$ oc adm taint nodes disktype=ssd:PreferNoSchedule ----- -+ -[TIP] -==== -You can alternatively apply the following YAML to add the taint: - -[source,yaml] ----- -kind: Node -apiVersion: v1 -metadata: - name: - labels: - ... -spec: - taints: - - key: disktype - value: ssd - effect: PreferNoSchedule - ... ----- -==== - -. Adding a corresponding toleration to pods that use the special hardware using an admission controller. - -For example, the admission controller could use some characteristic(s) of the pod to determine that the pod should be allowed to use the special nodes by adding a toleration. - -To ensure pods can only use the specialized hardware, you need some additional mechanism. For example, you could label the nodes that have the special hardware and use node affinity on the pods that need the hardware. diff --git a/_unused_topics/nodes-scheduler-taints-tolerations-seconds.adoc b/_unused_topics/nodes-scheduler-taints-tolerations-seconds.adoc deleted file mode 100644 index a8a788cb0aa8..000000000000 --- a/_unused_topics/nodes-scheduler-taints-tolerations-seconds.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-scheduler-taints-tolerations.adoc - -[id="nodes-scheduler-taints-tolerations-seconds_{context}"] -= Setting a default value for toleration seconds - -When using taints and tolerations, if taints are added to an existing node, non-matching pods on that node will be evicted. You can modify the time allowed before pods are evicted using the toleration seconds plug-in, which sets the eviction period at five minutes, by default. - -.Procedure - -To enable Default Toleration Seconds: - -Create an *AdmissionConfiguration* object: -+ ----- -kind: AdmissionConfiguration -apiVersion: apiserver.k8s.io/v1alpha1 -plugins: -- name: DefaultTolerationSeconds -...---- diff --git a/_unused_topics/osdk-updating-projects.adoc b/_unused_topics/osdk-updating-projects.adoc deleted file mode 100644 index ee2cf2600ae0..000000000000 --- a/_unused_topics/osdk-updating-projects.adoc +++ /dev/null @@ -1,353 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/golang/osdk-golang-updating-projects.adoc -// * operators/operator_sdk/ansible/osdk-ansible-updating-projects.adoc -// * operators/operator_sdk/helm/osdk-helm-updating-projects.adoc -// * operators/operator_sdk/helm/osdk-hybrid-helm-updating-projects.adoc - -ifeval::["{context}" == "osdk-golang-updating-projects"] -:golang: -:type: Go -endif::[] -ifeval::["{context}" == "osdk-ansible-updating-projects"] -:ansible: -:type: Ansible -endif::[] -ifeval::["{context}" == "osdk-helm-updating-projects"] -:helm: -:type: Helm -endif::[] -ifeval::["{context}" == "osdk-hybrid-helm-updating-projects"] -:hybrid: -:type: Hybrid Helm -endif::[] - -:osdk_ver: v1.25.0 -:osdk_ver_n1: v1.22.0 - -:_content-type: PROCEDURE -[id="osdk-upgrading-projects_{context}"] -= Updating {type}-based Operator projects for Operator SDK {osdk_ver} - -The following procedure updates an existing {type}-based Operator project for compatibility with {osdk_ver}. - -.Prerequisites - -* Operator SDK {osdk_ver} installed. -* An Operator project created or maintained with Operator SDK {osdk_ver_n1}. - -.Procedure - -. Make the following changes to the `config/default/manager_auth_proxy_patch.yaml` file: -+ -[source,yaml] ----- -... -spec: - template: - spec: - containers: - - name: kube-rbac-proxy - image: registry.redhat.io/openshift4/ose-kube-rbac-proxy:v4.12 <1> - args: - - "--secure-listen-address=0.0.0.0:8443" - - "--upstream=http://127.0.0.1:8080/" - - "--logtostderr=true" - - "--v=0" <2> -... -resources: - limits: - cpu: 500m - memory: 128Mi - requests: - cpu: 5m - memory: 64Mi <3> ----- -<1> Update the tag version from `v4.11` to `v4.12`. -<2> Reduce the debugging log level from `--v=10` to `--v=0`. -<3> Add resource requests and limits. - -. Make the following changes to your `Makefile`: - -.. Enable support for image digests by adding the following environment variables to your `Makefile`: -+ -.Old `Makefile` -[source,terminal] ----- -BUNDLE_IMG ?= $(IMAGE_TAG_BASE)-bundle:v$(VERSION) -... ----- -+ -.New `Makefile` -[source,terminal] ----- -BUNDLE_IMG ?= $(IMAGE_TAG_BASE)-bundle:v$(VERSION) - -# BUNDLE_GEN_FLAGS are the flags passed to the operator-sdk generate bundle command -BUNDLE_GEN_FLAGS ?= -q --overwrite --version $(VERSION) $(BUNDLE_METADATA_OPTS) - -# USE_IMAGE_DIGESTS defines if images are resolved via tags or digests -# You can enable this value if you would like to use SHA Based Digests -# To enable set flag to true -USE_IMAGE_DIGESTS ?= false -ifeq ($(USE_IMAGE_DIGESTS), true) - BUNDLE_GEN_FLAGS += --use-image-digests -endif ----- - -.. Edit your `Makefile` to replace the bundle target with the `BUNDLE_GEN_FLAGS` environment variable: -+ -.Old `Makefile` -[source,terminal] ----- -$(KUSTOMIZE) build config/manifests | operator-sdk generate bundle -q --overwrite --version $(VERSION) $(BUNDLE_METADATA_OPTS) ----- -+ -.New `Makefile` -[source,terminal] ----- -$(KUSTOMIZE) build config/manifests | operator-sdk generate bundle $(BUNDLE_GEN_FLAGS) ----- - -.. Edit your `Makefile` to update `opm` to version 1.23.0: -+ -[source,terminal] ----- -.PHONY: opm -OPM = ./bin/opm -opm: ## Download opm locally if necessary. -ifeq (,$(wildcard $(OPM))) -ifeq (,$(shell which opm 2>/dev/null)) - @{ \ - set -e ;\ - mkdir -p $(dir $(OPM)) ;\ - OS=$(shell go env GOOS) && ARCH=$(shell go env GOARCH) && \ - curl -sSLo $(OPM) https://github.com/operator-framework/operator-registry/releases/download/v1.23.0/$${OS}-$${ARCH}-opm ;\ <1> - chmod +x $(OPM) ;\ - } -else -OPM = $(shell which opm) -endif -endif ----- -<1> Replace `v1.19.1` with `v1.23.0`. - -ifdef::golang[] -.. Edit your `Makefile` to replace the `go get` targets with `go install` targets: -+ -.Old `Makefile` -[source,terminal] ----- -CONTROLLER_GEN = $(shell pwd)/bin/controller-gen -.PHONY: controller-gen -controller-gen: ## Download controller-gen locally if necessary. - $(call go-get-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@v0.8.0) - -KUSTOMIZE = $(shell pwd)/bin/kustomize -.PHONY: kustomize -kustomize: ## Download kustomize locally if necessary. - $(call go-get-tool,$(KUSTOMIZE),sigs.k8s.io/kustomize/kustomize/v3@v3.8.7) - -ENVTEST = $(shell pwd)/bin/setup-envtest -.PHONY: envtest -envtest: ## Download envtest-setup locally if necessary. - $(call go-get-tool,$(ENVTEST),sigs.k8s.io/controller-runtime/tools/setup-envtest@latest) - -# go-get-tool will 'go get' any package $2 and install it to $1. -PROJECT_DIR := $(shell dirname $(abspath $(lastword $(MAKEFILE_LIST)))) -define go-get-tool -@[ -f $(1) ] || { \ -set -e ;\ -TMP_DIR=$$(mktemp -d) ;\ -cd $$TMP_DIR ;\ -go mod init tmp ;\ -echo "Downloading $(2)" ;\ -GOBIN=$(PROJECT_DIR)/bin go get $(2) ;\ -rm -rf $$TMP_DIR ;\ -} -endef ----- -+ -.New `Makefile` -[source,terminal] ----- -##@ Build Dependencies - -## Location to install dependencies to -LOCALBIN ?= $(shell pwd)/bin -$(LOCALBIN): - mkdir -p $(LOCALBIN) - -## Tool Binaries -KUSTOMIZE ?= $(LOCALBIN)/kustomize -CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen -ENVTEST ?= $(LOCALBIN)/setup-envtest - -## Tool Versions -KUSTOMIZE_VERSION ?= v3.8.7 -CONTROLLER_TOOLS_VERSION ?= v0.8.0 - -KUSTOMIZE_INSTALL_SCRIPT ?= "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh" -.PHONY: kustomize -kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary. -$(KUSTOMIZE): $(LOCALBIN) - curl -s $(KUSTOMIZE_INSTALL_SCRIPT) | bash -s -- $(subst v,,$(KUSTOMIZE_VERSION)) $(LOCALBIN) - -.PHONY: controller-gen -controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary. -$(CONTROLLER_GEN): $(LOCALBIN) - GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-tools/cmd/controller-gen@$(CONTROLLER_TOOLS_VERSION) - -.PHONY: envtest -envtest: $(ENVTEST) ## Download envtest-setup locally if necessary. -$(ENVTEST): $(LOCALBIN) - GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest ----- -endif::[] - -ifdef::golang,hybrid[] -.. Update `ENVTEST_K8S_VERSION` and `controller-gen` fields in your `Makefile` to support Kubernetes 1.24: -+ -[source,terminal] ----- -... -ENVTEST_K8S_VERSION = 1.24 <1> -... -sigs.k8s.io/controller-tools/cmd/controller-gen@v0.9.0 <2> ----- -<1> Update version `1.22` to `1.24`. -<2> Update version `0.7.0` to `0.9.0`. -endif::[] - -.. Apply the changes to your `Makefile` and rebuild your Operator by entering the following command: -+ -[source,terminal] ----- -$ make ----- - -ifdef::golang,hybrid[] -. Make the following changes to the `go.mod` file to update Go and its dependencies: -+ -[source,golang] ----- -go 1.18 <1> - -require ( - github.com/onsi/ginkgo v1.16.5 <2> - github.com/onsi/gomega v1.18.1 <3> - k8s.io/api v0.24.0 <4> - k8s.io/apimachinery v0.24.0 <4> - k8s.io/client-go v0.24.0 <4> - sigs.k8s.io/controller-runtime v0.12.1 <5> -) ----- -<1> Update version `1.16` to `1.18`. -<2> Update version `v1.16.4` to `v1.16.5`. -<3> Update version `v1.15.0` to `v1.18.1`. -<4> Update version `v0.22.1` to `v0.24.0`. -<5> Update version `v0.10.0` to `v0.12.1`. -endif::golang,hybrid[] - -ifdef::hybrid[] -. Edit your `go.mod` file to update the Helm Operator plugins: -+ -[source,golang] ----- -github.com/operator-framework/helm-operator-plugins v0.0.11 <1> ----- -<1> Update version `v0.0.8` to `v0.0.11`. -endif::[] - -ifdef::golang,hybrid[] -. Download and clean up the dependencies by entering the following command: -+ -[source,terminal] ----- -$ go mod tidy ----- -endif::[] - -ifdef::golang[] -. If you use the `api/webhook_suitetest.go` and `controllers/suite_test.go` suite test files, make the following changes: -+ -.Old suite test file -[source,golang] ----- -cfg, err := testEnv.Start() ----- -+ -.New suite test file -[source,golang] ----- -var err error -// cfg is defined in this file globally. -cfg, err = testEnv.Start() ----- - -. If you use the Kubernetes declarative plugin, update your Dockerfile with the following changes: - -.. Add the following changes below the line that begins `COPY controllers/ controllers/`: -+ -[source,terminal] ----- -# https://github.com/kubernetes-sigs/kubebuilder-declarative-pattern/blob/master/docs/addon/walkthrough/README.md#adding-a-manifest -# Stage channels and make readable -COPY channels/ /channels/ -RUN chmod -R a+rx /channels/ ----- - -.. Add the following changes below the line that begins `COPY --from=builder /workspace/manager .`: -+ -[source,terminal] ----- -# copy channels -COPY --from=builder /channels /channels ----- -endif::[] - -ifdef::ansible[] -. Update your `requirements.yml` file as shown in the following example: -+ -[source,yaml] ----- -collections: - - name: community.kubernetes - version: "2.0.1" <1> - - name: operator_sdk.util - version: "0.4.0" <2> - - name: kubernetes.core - version: "2.3.1" <3> - - name: cloud.common <4> - version: "2.1.1" ----- -<1> Update version `1.2.1` to `2.0.1`. -<2> Update version `0.3.1` to `0.4.0`. -<3> Update version `2.2.0` to `2.3.1`. -<4> Add support for the Operator Ansible SDK by adding the `cloud.common` collection. -+ -[IMPORTANT] -==== -As of version 2.0.0, the `community.kubernetes` collection was renamed to `kubernetes.core`. The `community.kubernetes` collection has been replaced by deprecated redirects to `kubernetes.core`. If you use fully qualified collection names (FQCNs) that begin with `community.kubernetes`, you must update the FQCNs to use `kubernetes.core`. -==== -endif::[] - -:!osdk_ver: -:!osdk_ver_n1: - -ifeval::["{context}" == "osdk-golang-updating-projects"] -:!golang: -:!type: -endif::[] -ifeval::["{context}" == "osdk-ansible-updating-projects"] -:!ansible: -:!type: -endif::[] -ifeval::["{context}" == "osdk-helm-updating-projects"] -:!type: -:!helm: -endif::[] -ifeval::["{context}" == "osdk-hybrid-helm-updating-projects"] -:!hybrid: -:!type: -endif::[] diff --git a/_unused_topics/osdk-updating-v1101-to-v1160.adoc b/_unused_topics/osdk-updating-v1101-to-v1160.adoc deleted file mode 100644 index 11450144ef89..000000000000 --- a/_unused_topics/osdk-updating-v1101-to-v1160.adoc +++ /dev/null @@ -1,195 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-upgrading-projects.adoc - -:osdk_ver: v1.16.0 -:osdk_ver_n1: v1.10.1 - -:_content-type: PROCEDURE -[id="osdk-upgrading-v1101-to-v1160_{context}"] -= Updating projects for Operator SDK {osdk_ver} - -The following procedure updates an existing Operator project for compatibility with {osdk_ver}. - -[IMPORTANT] -==== -* Operator SDK v1.16.0 supports Kubernetes 1.22. - -* Many deprecated `v1beta1` APIs were removed in Kubernetes 1.22, including `sigs.k8s.io/controller-runtime v0.10.0` and `controller-gen v0.7`. - -* Updating projects to Kubernetes 1.22 is a breaking change if you need to scaffold `v1beta1` APIs for custom resource definitions (CRDs) or webhooks to publish your project into older cluster versions. - -See link:https://docs.openshift.com/container-platform/4.9/release_notes/ocp-4-9-release-notes.html#ocp-4-9-osdk-k8s-api-bundle-validate[Validating bundle manifests for APIs removed from Kubernetes 1.22] and link:https://docs.openshift.com/container-platform/4.9/release_notes/ocp-4-9-release-notes.html#ocp-4-9-removed-kube-1-22-apis[Beta APIs removed from Kubernetes 1.22] for more information about changes introduced in Kubernetes 1.22. -==== - -.Prerequisites - -* Operator SDK {osdk_ver} installed. -* An Operator project created or maintained with Operator SDK {osdk_ver_n1}. - -.Procedure - -. Add the `protocol` field in the `config/default/manager_auth_proxy_patch.yaml` and `config/rbac/auth_proxy_service.yaml` files: -+ -[source,diff] ----- -... - ports: - - containerPort: 8443 -+ protocol: TCP - name: https ----- - -. Make the following changes to the `config/manager/manager.yaml` file: - -.. Increase the CPU and memory resource limits: -+ -[source,diff] ----- -resources: - limits: -- cpu: 100m -- memory: 30Mi -+ cpu: 200m -+ memory: 100Mi ----- - -.. Add an annotation to specify the default container manager: -+ -[source,yaml] ----- -... -template: - metadata: - annotations: - kubectl.kubernetes.io/default-container: manager -... ----- - -. Add `PHONY` targets to all of the targets in your `Makefile` file. - -. For Go-based Operator projects, make the following changes: - -.. Install the `setup-envtest` binary. - -.. Change your `go.mod` file to update the dependencies: -+ -[source,golang] ----- -k8s.io/api v0.22.1 -k8s.io/apimachinery v0.22.1 -k8s.io/client-go v0.22.1 -sigs.k8s.io/controller-runtime v0.10.0 ----- - -.. Run the `go mod tidy` command to download the dependencies: -+ -[source,terminal] ----- -$ go mod tidy ----- - -.. Make the following changes to your `Makefile` file: -+ -[source,diff] ----- -... - -+ ENVTEST_K8S_VERSION = 1.22 - - test: manifests generate fmt vet envtest ## Run tests. -- go test ./... -coverprofile cover.out -+ KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path)" go test ./... -coverprofile cover.out -... - -- $(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases -+ $(CONTROLLER_GEN) rbac:roleName=manager-role crd webhook paths="./..." output:crd:artifacts:config=config/crd/bases -... - -# Produce CRDs that work back to Kubernetes 1.11 (no version conversion) -- CRD_OPTIONS ?= "crd:trivialVersions=true,preserveUnknownFields=false" -... -- admissionReviewVersions={v1,v1beta1} -+ admissionReviewVersions=v1 -... - -+ ifndef ignore-not-found -+ ignore-not-found = false -+ endif - -##@ Deployment -... -- sh kubectl delete -f - -+ sh kubectl delete --ignore-not-found=$(ignore-not-found) -f - ----- - -.. Run the `make manifest` command to generate your manifests with the updated version of Kubernetes: -+ -[source,terminal] ----- -$ make manifest ----- - -. For Ansible-based Operator projects, make the following changes: -+ -.. Change your `requirements.yml` file to include the following: - -... Replace the `community.kubernetes` collection with the `kubernetes.core` collection: -+ -[source,yaml] ----- -... -- name: kubernetes.core - version: "2.2.0" -... ----- - -... Update the `operator_sdk.util` utility from version `0.2.0` to `0.3.1`: -+ -[source,yaml] ----- -... -- name: operator_sdk.util - version: "0.3.1" ----- - -.. Verify the default resource limits in the `config/manager/manager.yaml` file: -+ -[source,yaml] ----- -... - # TODO(user): Configure the resources accordingly based on the project requirements. - # More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - -resources: - limits: - cpu: 500m - memory: 768Mi - requests: - cpu: 10m - memory: 256Mi ----- -+ -[IMPORTANT] -==== -Operator SDK scaffolds these values as a reasonable default setting. Operator authors should set and optimize resource limits based on the requirements of their project. -==== - -.. Optional: Make the following changes if you want to run your Ansible-based Operator locally by using the `make run` command: - -... Change the run target in the `Makefile` file: -+ -[source,terminal] ----- -ANSIBLE_ROLES_PATH="$(ANSIBLE_ROLES_PATH):$(shell pwd)/roles" $(ANSIBLE_OPERATOR) run ----- - -... Update the local version of `ansible-runner` to 2.0.2 or later. -+ -[IMPORTANT] -==== -As of version 2.0, the `ansible-runner` tool includes changes in the command signature that are not compatible with earlier versions. -==== - -:!osdk_ver: -:!osdk_ver_n1: diff --git a/_unused_topics/osdk-upgrading-v180-to-v1101.adoc b/_unused_topics/osdk-upgrading-v180-to-v1101.adoc deleted file mode 100644 index 91a7d73f2957..000000000000 --- a/_unused_topics/osdk-upgrading-v180-to-v1101.adoc +++ /dev/null @@ -1,39 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-upgrading-projects.adoc - -:osdk_ver: v1.10.1 -:osdk_ver_n1: v1.8.0 - -:_content-type: PROCEDURE -[id="osdk-upgrading-v180-to-v1101_{context}"] -= Upgrading projects for Operator SDK {osdk_ver} - -The following upgrade steps must be performed to upgrade an existing Operator project for compatibility with {osdk_ver}. - -.Prerequisites - -- Operator SDK {osdk_ver} installed -- Operator project that was previously created or maintained with Operator SDK {osdk_ver_n1} - -.Procedure - -* For Ansible-based Operator projects, update the command in the `Set pull policy` section of the `molecule/default/prepare.yml` file: -+ -.`molecule/default/prepare.yml` file diff -[%collapsible] -==== -[source,diff] ----- - - name: Set pull policy -- command: '{{ "{{ kustomize }}" }} edit add patch pull_policy/{{ "{{ operator_pull_policy }}" }}.yaml' -+ command: '{{ "{{ kustomize }}" }} edit add patch --path pull_policy/{{ "{{ operator_pull_policy }}" }}.yaml' ----- -==== -+ -Ansible projects are now scaffolded with Kustomize version 3.8.7. This version of Kustomize requires that the path to patch files be provided with the `--path` flag in the `add patch` command. - -Your Operator project is now compatible with Operator SDK {osdk_ver}. - -:!osdk_ver: -:!osdk_ver_n1: diff --git a/_unused_topics/pod-using-a-different-service-account.adoc b/_unused_topics/pod-using-a-different-service-account.adoc deleted file mode 100644 index 29794c9f858c..000000000000 --- a/_unused_topics/pod-using-a-different-service-account.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * orphaned - -[id="pod-using-a-different-service-account_{context}"] -= Running a pod with a different service account - -You can run a pod with a service account other than the default: - -.Prerequisites - -* Install the `oc` command line interface. -* Configure a service account. -* Create a DeploymentConfig. - -.Procedure - -. Edit the DeploymentConfig: -+ ----- -$ oc edit dc/ ----- - -. Add the `serviceAccount` and `serviceAccountName` parameters to the `spec` -field, and specify the service account that you want to use: -+ ----- -spec: - securityContext: {} - serviceAccount: - serviceAccountName: ----- diff --git a/_unused_topics/rbac-updating-policy-definitions.adoc b/_unused_topics/rbac-updating-policy-definitions.adoc deleted file mode 100644 index 1a2e45a62e90..000000000000 --- a/_unused_topics/rbac-updating-policy-definitions.adoc +++ /dev/null @@ -1,57 +0,0 @@ -// Module included in the following assemblies: -// -// * orphaned - -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -[id="updating-policy-definitions_{context}"] -= Updating policy definitions - -During a cluster upgrade, and on every restart of any master, the -default cluster roles are automatically reconciled to restore any missing permissions. - -If you customized default cluster roles and want to ensure a role reconciliation -does not modify them, you must take the following actions. - -.Procedure - -. Protect each role from reconciliation: -+ ----- -$ oc annotate clusterrole.rbac --overwrite rbac.authorization.kubernetes.io/autoupdate=false ----- -+ -[WARNING] -==== -You must manually update the roles that contain this setting to include any new -or required permissions after upgrading. -==== - -. Generate a default bootstrap policy template file: -+ ----- -$ oc adm create-bootstrap-policy-file --filename=policy.json ----- -+ -[NOTE] -==== -The contents of the file vary based on the {product-title} version, but the file -contains only the default policies. -==== - -. Update the *_policy.json_* file to include any cluster role customizations. - -. Use the policy file to automatically reconcile roles and role bindings that -are not reconcile protected: -+ ----- -$ oc auth reconcile -f policy.json ----- - -. Reconcile Security Context Constraints: -+ ----- -# oc adm policy reconcile-sccs \ - --additive-only=true \ - --confirm ----- -endif::[] diff --git a/_unused_topics/running-modified-installation.adoc b/_unused_topics/running-modified-installation.adoc deleted file mode 100644 index f2c75c4d0d68..000000000000 --- a/_unused_topics/running-modified-installation.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module included in the following assemblies: -// -// * TBD - -[id="running-modified-installation_{context}"] -= Running a modified {product-title} installation - -Running a default {product-title} {product-version} cluster is the best way to ensure that the {product-title} cluster you get will be easy to install, maintain, and upgrade going forward. However, because you may want to add to or change your {product-title} cluster, openshift-install offers several ways to modify the default installation or add to it later. These include: - -* Creating an install-config file: Changing the contents of the install-config file, to identify things like the cluster name and credentials, is fully supported. -* Creating ignition-config files: Viewing ignition-config files, which define how individual nodes are configured when they are first deployed, is fully supported. However, changing those files is not supported. -* Creating Kubernetes (manifests) and {product-title} (openshift) manifest files: You can view manifest files in the manifests and openshift directories to see how Kubernetes and {product-title} features are configured, respectively. Changing those files is not supported. - -Whether you want to change your {product-title} installation or simply gain a deeper understanding of the details of the installation process, the goal of this section is to step you through an {product-title} installation. Along the way, it covers: - -* The underlying activities that go on under the covers to bring up an {product-title} cluster -* Major components that are leveraged ({op-system}, Ignition, Terraform, and so on) -* Opportunities to customize the install process (install configs, Ignition configs, manifests, and so on) diff --git a/_unused_topics/security-context-constraints-restore-defaults.adoc b/_unused_topics/security-context-constraints-restore-defaults.adoc deleted file mode 100644 index 089b41a32468..000000000000 --- a/_unused_topics/security-context-constraints-restore-defaults.adoc +++ /dev/null @@ -1,34 +0,0 @@ -// Module included in the following assemblies: -// -// * orphaned - -[id="security-context-constraints-restore-defaults_{context}"] -= Restoring the default Security Context Constraints - -If the default Security Context Constraints (SCCs) are not present when the -master restarts, they are created again. To reset SCCs to the default values or -update existing SCCs to new default definitions after an upgrade you can either: - -. Delete any SCC you want to reset and restart the master. -. Use the `oc adm policy reconcile-sccs` command. - -The `oc adm policy reconcile-sccs` command sets all SCC policies to the default -values but retains any additional users, groups, labels, annotations, and -priorities you set. - -To view which SCCs will be changed, you can run the command with no options or -by specifying your preferred output with the `-o ` option. - -After reviewing it is recommended that you back up your existing SCCs and then -use the `--confirm` option to persist the data. - -[NOTE] -==== -If you want to reset priorities and grants, use the `--additive-only=false` option. -==== - -[NOTE] -==== -If you customized settings other than priority, users, groups, labels, or annotations in an -SCC, you lose those settings when you reconcile. -==== diff --git a/_unused_topics/security-overview.adoc b/_unused_topics/security-overview.adoc deleted file mode 100644 index 4e9ea7cfc726..000000000000 --- a/_unused_topics/security-overview.adoc +++ /dev/null @@ -1,125 +0,0 @@ -// Module included in the following assemblies: -// -// * orphaned - -[id="security-overview_{context}"] -= Security in {product-title} - -The {product-title} and Kubernetes APIs authenticate users who present -credentials and then authorizes them based on their role. Both developers and -administrators can be authenticated through a number of means, primarily -OAuth tokens and X.509 client certificates. OAuth tokens are signed with JSON -Web Algorithm _RS256_, which is RSA signature algorithm PKCS#1 v1.5 with SHA-256. - -Developers, the clients of the system, typically make REST API calls from a -client program like `oc` or to the {product-title} web console through their browser. -Both methods use OAuth bearer tokens for most communication. Infrastructure components. -like nodes, use client certificates that are generated by the system that contain their -identities. Infrastructure components that run in containers use a token that is -associated with their service account to connect to the API. - -Authorization is handled in the {product-title} policy engine, which defines -actions like `create pod` or `list services`, and groups them into roles in a -policy document. Roles are bound to users or groups by the user or group -identifier. When a user or service account attempts an action, the policy engine -checks for one or more of the roles assigned to the user, such as a cluster -administrator or administrator of the current project, before allowing it to -continue. - -ifdef::openshift-origin,openshift-online,openshift-enterprise,openshift-webscale[] -Since every container that runs on the cluster is associated with a service -account, it is also possible to associate secrets to those service accounts and have them -automatically delivered into the container. This secret delivery enables the infrastructure to -manage secrets for pulling and pushing images, builds, and the deployment -components and also allows application code to use those secrets. -endif::[] - -[id="architecture-overview-tls-support_{context}"] -== TLS Support - -All communication channels with the REST API, as well as between master -components such as etcd and the API server, are secured with TLS. TLS provides -strong encryption, data integrity, and authentication of servers with X.509 -server certificates and public key infrastructure. -ifdef::openshift-origin,openshift-enterprise[] -By default, a new internal PKI is created for each deployment of -{product-title}. The internal PKI uses 2048 bit RSA keys and SHA-256 signatures. -endif::[] -ifdef::openshift-origin,openshift-enterprise,openshift-webscale[] -Custom certificates for public hosts are supported as well. -endif::[] - -{product-title} uses Golang’s standard library implementation of -link:https://golang.org/pkg/crypto/tls/[*crypto/tls*] and does not depend on any -external crypto and TLS libraries. Additionally, the client depends on external -libraries for GSSAPI authentication and OpenPGP signatures. GSSAPI is typically -provided by either MIT Kerberos or Heimdal Kerberos, which both use OpenSSL's -libcrypto. OpenPGP signature verification is handled by libgpgme and GnuPG. - -The insecure versions SSL 2.0 and SSL 3.0 are unsupported and not available. The -{product-title} server and `oc` client only provide TLS 1.2 by default. TLS 1.0 -and TLS 1.1 can be enabled in the server configuration. Both server and client -prefer modern cipher suites with authenticated encryption algorithms and perfect -forward secrecy. Cipher suites with deprecated and insecure algorithms such as -RC4, 3DES, and MD5 are disabled. Some internal clients, like LDAP -authentication, have less restrict settings with TLS 1.0 to 1.2 and more cipher -suites enabled. - -.Supported TLS Versions -[cols="4*", options="header"] -|=== -|TLS Version -|{product-title} Server -|`oc` Client -|Other Clients - -|SSL 2.0 -|Unsupported -|Unsupported -|Unsupported - -|SSL 3.0 -|Unsupported -|Unsupported -|Unsupported - -|TLS 1.0 -|No footnoteref:[tlsconfig,Disabled by default, but can be enabled in the server configuration.] -|No footnoteref:[tlsconfig] -|Maybe footnoteref:[otherclient,Some internal clients, such as the LDAP client.] - -|TLS 1.1 -|No footnoteref:[tlsconfig] -|No footnoteref:[tlsconfig] -|Maybe footnoteref:[otherclient] - -|TLS 1.2 -|*Yes* -|*Yes* -|*Yes* - -|TLS 1.3 -|N/A footnoteref:[tls13,TLS 1.3 is still under development.] -|N/A footnoteref:[tls13] -|N/A footnoteref:[tls13] -|=== - -The following list of enabled cipher suites of {product-title}'s server and `oc` -client are sorted in preferred order: - -- `TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305` -- `TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305` -- `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256` -- `TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256` -- `TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384` -- `TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384` -- `TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256` -- `TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256` -- `TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA` -- `TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA` -- `TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA` -- `TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA` -- `TLS_RSA_WITH_AES_128_GCM_SHA256` -- `TLS_RSA_WITH_AES_256_GCM_SHA384` -- `TLS_RSA_WITH_AES_128_CBC_SHA` -- `TLS_RSA_WITH_AES_256_CBC_SHA` diff --git a/_unused_topics/serverless-creating-kubeconfig-file.adoc b/_unused_topics/serverless-creating-kubeconfig-file.adoc deleted file mode 100644 index cecdb2fbba4f..000000000000 --- a/_unused_topics/serverless-creating-kubeconfig-file.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module is included in the following assemblies: -// -// serverless/knative-client.adoc - -[id="create-kubeconfig-file_{contect}"] -= Creating a `kubeconfig` file - -Use `kubeconfig` files to organize information about clusters, users, namespaces, and authentication mechanisms. The CLI tool uses `kubeconfig` files to communicate with the API server of a cluster. - -.Procedure -* Create a basic `kubeconfig` file from client certificates. Use the following command: - ----- -$ oc adm create-kubeconfig \ - --client-certificate=/path/to/client.crt \ - --client-key=/path/to/client.key \ - --certificate-authority=/path/to/ca.crt ----- \ No newline at end of file diff --git a/_unused_topics/serverless-rn-template-module.adoc b/_unused_topics/serverless-rn-template-module.adoc deleted file mode 100644 index 2b373d05d109..000000000000 --- a/_unused_topics/serverless-rn-template-module.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies -// -// * /serverless/serverless-release-notes.adoc - -:_content-type: REFERENCE -[id="serverless-rn-_{context}"] -= Release notes for Red Hat {ServerlessProductName} -// add a version, e.g. 1.20.0 -//update the to match the filename and IDs, then remove these comments - -{ServerlessProductName} is now available. New features, changes, and known issues that pertain to {ServerlessProductName} on {product-title} are included in this topic. - -[id="new-features-_{context}"] -== New features -// add a version, e.g. 1-20-0 - -* {ServerlessProductName} now uses Knative Serving 0.x. -* {ServerlessProductName} now uses Knative Eventing 0.x. -* {ServerlessProductName} now uses Kourier 0.x. -* {ServerlessProductName} now uses Knative (`kn`) CLI 0.x. -* {ServerlessProductName} now uses Knative Kafka 0.x. -* The `kn func` CLI plug-in now uses `func` 0.x. - -[id="fixed-issues-_{context}"] -== Fixed issues -// add a version, e.g. 1-20-0 - -[id="known-issues-_{context}"] -== Known issues -// add a version, e.g. 1-20-0 diff --git a/_unused_topics/service-accounts-adding-secrets.adoc b/_unused_topics/service-accounts-adding-secrets.adoc deleted file mode 100644 index 11d925ea62c7..000000000000 --- a/_unused_topics/service-accounts-adding-secrets.adoc +++ /dev/null @@ -1,70 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/using-service-accounts.adoc - -[id="service-accounts-managing-secrets_{context}"] -== Managing secrets on a service account's pod - -In addition to providing API credentials, a pod's service account determines -which secrets the pod is allowed to use. - -Pods use secrets in two ways: - -* image pull secrets, providing credentials used to pull images for the pod's containers -* mountable secrets, injecting the contents of secrets into containers as files - -To allow a secret to be used as an image pull secret by a service account's -pods, run: - ----- -$ oc secrets link --for=pull ----- - -To allow a secret to be mounted by a service account's pods, run: - ----- -$ oc secrets link --for=mount ----- - -[NOTE] -==== -Limiting secrets to only the service accounts that reference them is disabled by -default. This means that if `serviceAccountConfig.limitSecretReferences` is set -to `false` (the default setting) in the master configuration file, mounting -secrets to a service account's pods with the `--for=mount` option is not -required. However, using the `--for=pull` option to enable using an image pull -secret is required, regardless of the -`serviceAccountConfig.limitSecretReferences` value. -==== - -This example creates and adds secrets to a service account: - ----- -$ oc create secret generic secret-plans \ - --from-file=plan1.txt \ - --from-file=plan2.txt -secret/secret-plans - -$ oc create secret docker-registry my-pull-secret \ - --docker-username=mastermind \ - --docker-password=12345 \ - --docker-email=mastermind@example.com -secret/my-pull-secret - -$ oc secrets link robot secret-plans --for=mount - -$ oc secrets link robot my-pull-secret --for=pull - -$ oc describe serviceaccount robot -Name: robot -Labels: -Image pull secrets: robot-dockercfg-624cx - my-pull-secret - -Mountable secrets: robot-token-uzkbh - robot-dockercfg-624cx - secret-plans - -Tokens: robot-token-8bhpp - robot-token-uzkbh ----- diff --git a/_unused_topics/service-accounts-managing-secrets.adoc b/_unused_topics/service-accounts-managing-secrets.adoc deleted file mode 100644 index cae0fb9bf790..000000000000 --- a/_unused_topics/service-accounts-managing-secrets.adoc +++ /dev/null @@ -1,65 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/using-service-accounts.adoc - -[id="service-accounts-managing-secrets_{context}"] -= Managing allowed secrets - -You can use the service account's secrets in your application's pods for: - -* Image pull secrets, providing credentials used to pull images for the pod's containers -* Mountable secrets, injecting the contents of secrets into containers as files - -.Procedure - -. Create a secret: -+ ----- -$ oc create secret generic \ - --from-file=.txt - -secret/ ----- - -. To allow a secret to be used as an image pull secret by a service account's -pods, run: -+ ----- -$ oc secrets link --for=pull ----- - -. To allow a secret to be mounted by a service account's pods, run: -+ ----- -$ oc secrets link --for=mount ----- - -. Confirm that the secret was added to the service account: -+ ----- -$ oc describe serviceaccount -Name: -Labels: -Image pull secrets: robot-dockercfg-624cx - my-pull-secret - -Mountable secrets: robot-token-uzkbh - robot-dockercfg-624cx - secret-plans - -Tokens: robot-token-8bhpp - robot-token-uzkbh ----- - -//// -[NOTE] -==== -Limiting secrets to only the service accounts that reference them is disabled by -default. This means that if `serviceAccountConfig.limitSecretReferences` is set -to `false` (the default setting) in the master configuration file, mounting -secrets to a service account's pods with the `--for=mount` option is not -required. However, using the `--for=pull` option to enable using an image pull -secret is required, regardless of the -`serviceAccountConfig.limitSecretReferences` value. -==== -//// diff --git a/_unused_topics/understanding-installation.adoc b/_unused_topics/understanding-installation.adoc deleted file mode 100644 index dbd19c82853d..000000000000 --- a/_unused_topics/understanding-installation.adoc +++ /dev/null @@ -1,8 +0,0 @@ -// Module included in the following assemblies: -// -// * TBD - -[id="understanding-installation_{context}"] -= Understanding {product-title} installation - -{product-title} installation is designed to quickly spin up an {product-title} cluster, with the user starting the cluster required to provide as little information as possible. diff --git a/_unused_topics/understanding-workers-masters.adoc b/_unused_topics/understanding-workers-masters.adoc deleted file mode 100644 index b0028c61b6b4..000000000000 --- a/_unused_topics/understanding-workers-masters.adoc +++ /dev/null @@ -1,33 +0,0 @@ -// Module included in the following assemblies: -// -// * - -[id="understanding-workers-masters_{context}"] -= Understanding {product-title} workers and masters - -With installation complete, the cluster is now fully in charge of managing itself. Management of worker (compute) and master (control plane) nodes is done from within the cluster. So, before moving on to what the {product-title} cluster does to help you develop and deploy applications, you should explore how an {product-title} cluster manages itself. For that, we focus on three things; workers, masters (the control plane) and Operators. - -To see which workers and masters are running on your cluster, type: - ----- -$ oc get nodes - -NAME STATUS ROLES AGE VERSION -ip-10-0-0-1.us-east-2.compute.internal Ready worker 4h20m v1.25.0 -ip-10-0-0-2.us-east-2.compute.internal Ready master 4h39m v1.25.0 -ip-10-0-0.3.us-east-2.compute.internal Ready worker 4h20m v1.25.0 -ip-10-0-0-4.us-east-2.compute.internal Ready master 4h39m v1.25.0 -ip-10-0-0-5.us-east-2.compute.internal Ready master 4h39m v1.25.0 -ip-10-0-0-6.us-east-2.compute.internal Ready worker 4h20m v1.25.0 ----- - -To see more information about internal and external IP addresses, the type of operating system ({op-system}), kernel version, and container runtime (CRI-O), add the `-o wide` option. - ----- -$ oc get nodes -o wide - -NAME                                       STATUS ROLES  AGE  VERSION  INTERNAL-IP   EXTERNAL-IP  OS-IMAGE             KERNEL-VERSION             CONTAINER-RUNTIME -ip-10-0-134-252.us-east-2.compute.internal Ready worker 17h v1.25.0 10.0.134.252 Red Hat CoreOS 4.0 3.10.0-957.5.1.el7.x86_64 cri-o://1.25.0-1.rhaos4.0.git2f0cb0d.el7 - -.... ----- diff --git a/_unused_topics/upgrade-cluster-version-definition.adoc b/_unused_topics/upgrade-cluster-version-definition.adoc deleted file mode 100644 index 15f39773dd38..000000000000 --- a/_unused_topics/upgrade-cluster-version-definition.adoc +++ /dev/null @@ -1,78 +0,0 @@ -// Module included in the following assemblies: -// -// * none - -[id="upgrade-cluster-version-definition_{context}"] -= ClusterVersion definition - -You can review the `ClusterVersion` definition to see the update history -for your cluster. You can also apply overrides to this definition if your -cluster is not for production or during debugging. - -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: ClusterVersion -metadata: - creationTimestamp: 2019-03-22T14:26:41Z - generation: 1 - name: version - resourceVersion: "16740" - selfLink: /apis/config.openshift.io/v1/clusterversions/version - uid: 82f9f2c4-4cae-11e9-90b7-06dc0f62ad38 -spec: - channel: stable-4.3 <1> - overrides: "" <2> - clusterID: 0b1cf91f-c3fb-4f9e-aa02-e0d70c71f6e6 - status: <3> - availableUpdates: null <4> - conditions: <5> - - lastTransitionTime: 2019-05-22T07:13:26Z - status: "True" - type: RetrievedUpdates - - lastTransitionTime: 2019-05-22T07:13:26Z - message: Done applying 4.0.0-0.alpha-2019-03-22-124110 - status: "True" - type: Available - - lastTransitionTime: 2019-05-22T07:12:26Z - status: "False" - type: Failing - - lastTransitionTime: 2019-05-22T07:13:26Z - message: Cluster version is 4.0.0-0.alpha-2019-03-22-124110 - status: "False" - type: Progressing ----- -<1> Specify the channel to use to apply non-standard updates to the -cluster. If you do not change the value, the CVO uses the default channel. -+ -[IMPORTANT] -==== -The default channel contains stable updates. Do not modify the -`ClusterVersionSpec.channel` value on production clusters. If you update your -cluster from a different channel without explicit direction from Red Hat -support, your cluster is no longer supported. -==== -<2> A list of overrides for components that the CVO manages. Mark -components as `unmanaged` to prevent the CVO from creating or updating the object. -+ -[IMPORTANT] -==== -Set the `ClusterVersionSpec.overrides` parameter value only during cluster -debugging. Setting this value can prevent successful upgrades and is not -supported for production clusters. -==== -<3> The status of available updates and any in-progress updates. These values display -the version that the cluster is reconciling to, and the conditions -array reports whether the update succeeded, is in progress, or is failing. -All of the `ClusterVersionStatus` values are set by the cluster itself, and you -cannot modify them. -<4> The list of appropriate updates for the cluster. This list is empty if no -updates are recommended, the update service is unavailable, or you specified -an invalid channel. -<5> The condition of the CVO. This section contains both the reason that the -cluster entered its current condition and a message that provides more -information about the condition. - -* `Available` means that the upgrade to the `desiredUpdate` value completed. -* `Progressing` means that an upgrade is in progress. -* `Failing` means that an update is blocked by a temporary or permanent error. diff --git a/_unused_topics/using-images-source-to-image-java.adoc b/_unused_topics/using-images-source-to-image-java.adoc deleted file mode 100644 index 06933b38c65f..000000000000 --- a/_unused_topics/using-images-source-to-image-java.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="using-images-source-to-image"] -= Java -include::_attributes/common-attributes.adoc[] -:context: using-images-source-to-image -toc::[] - -This topic includes information on the source-to-image (S2I) supported Java images available for {product-title} users. - -//Add link to Build -> S21 following updates - -include::modules/images-using-images-s2i-java.adoc[leveloffset=+1] -include::modules/images-s2i-java-pulling-images.adoc[leveloffset=+1] -include::modules/images-s2i-build-process-overview.adoc[leveloffset=+1] -include::modules/images-s2i-java-configuration.adoc[leveloffset=+1] -include::modules/images-s2i-java-build-deploy-applications.adoc[leveloffset=+1] diff --git a/_unused_topics/using-images-source-to-image-nodejs.adoc b/_unused_topics/using-images-source-to-image-nodejs.adoc deleted file mode 100644 index 5b176a926821..000000000000 --- a/_unused_topics/using-images-source-to-image-nodejs.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="using-images-source-to-image-nodejs"] -= Node.js -include::_attributes/common-attributes.adoc[] -:context: using-images-source-to-image-nodejs -toc::[] - -This topic includes information on the source-to-image (S2I) supported Node.js images available for {product-title} users. - -//Add link to Build -> S21 following updates - -include::modules/images-using-images-s2i-nodejs.adoc[leveloffset=+1] -include::modules/images-s2i-nodejs-pulling-images.adoc[leveloffset=+1] -include::modules/images-s2i-build-process-overview.adoc[leveloffset=+1] -include::modules/images-using-images-s2i-nodejs-configuration.adoc[leveloffset=+1] -include::modules/images-using-images-s2i-nodejs-hot-deploying.adoc[leveloffset=+1] diff --git a/_unused_topics/using-images-source-to-image-perl.adoc b/_unused_topics/using-images-source-to-image-perl.adoc deleted file mode 100644 index f49d044ab927..000000000000 --- a/_unused_topics/using-images-source-to-image-perl.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="using-images-source-to-image-perl"] -= Perl -include::_attributes/common-attributes.adoc[] -:context: using-images-source-to-image-perl -toc::[] - -This topic includes information on the source-to-image (S2I) supported Perl images available for {product-title} users. - -//Add link to Build -> S21 following updates - -include::modules/images-using-images-s2i-perl.adoc[leveloffset=+1] -include::modules/images-using-images-s2i-perl-pulling-images.adoc[leveloffset=+1] -include::modules/images-s2i-build-process-overview.adoc[leveloffset=+1] -include::modules/images-using-images-s2i-perl-configuration.adoc[leveloffset=+1] -include::modules/images-using-images-s2i-perl-hot-deploying.adoc[leveloffset=+1] diff --git a/_unused_topics/using-images-source-to-image-php.adoc b/_unused_topics/using-images-source-to-image-php.adoc deleted file mode 100644 index 275223464506..000000000000 --- a/_unused_topics/using-images-source-to-image-php.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="using-images-source-to-image-php"] -= PHP -include::_attributes/common-attributes.adoc[] -:context: using-images-source-to-image-php -toc::[] - -This topic includes information on the source-to-image (S2I) supported PHP images available for {product-title} users. - -//Add link to Build -> S21 following updates - -include::modules/images-using-images-s2i-php.adoc[leveloffset=+1] -include::modules/images-using-images-s2i-php-pulling-images.adoc[leveloffset=+1] -include::modules/images-s2i-build-process-overview.adoc[leveloffset=+1] -include::modules/images-using-images-s2i-php-configuration.adoc[leveloffset=+1] -include::modules/images-using-images-s2i-php-hot-deploying.adoc[leveloffset=+1] diff --git a/_unused_topics/using-images-source-to-image-python.adoc b/_unused_topics/using-images-source-to-image-python.adoc deleted file mode 100644 index f72452a4b3e7..000000000000 --- a/_unused_topics/using-images-source-to-image-python.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="using-images-source-to-image-python"] -= Python -include::_attributes/common-attributes.adoc[] -:context: using-images-source-to-image-python -toc::[] - -This topic includes information on the source-to-image (S2I) supported Python images available for {product-title} users. - -//Add link to Build -> S21 following updates - -include::modules/images-using-images-s2i-python.adoc[leveloffset=+1] -include::modules/images-using-images-s2i-python-pulling-images.adoc[leveloffset=+1] -include::modules/images-s2i-build-process-overview.adoc[leveloffset=+1] -include::modules/images-using-images-s2i-python-configuration.adoc[leveloffset=+1] -include::modules/images-using-images-s2i-python-hot-deploying.adoc[leveloffset=+1] diff --git a/_unused_topics/using-images-source-to-image-ruby.adoc b/_unused_topics/using-images-source-to-image-ruby.adoc deleted file mode 100644 index b96681837cc7..000000000000 --- a/_unused_topics/using-images-source-to-image-ruby.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used. - -[id="using-images-source-to-image-ruby"] -= Ruby -include::_attributes/common-attributes.adoc[] -:context: using-images-source-to-image-ruby -toc::[] - -This topic includes information on the source-to-image (S2I) supported Ruby images available for {product-title} users. - -//Add link to Build -> S21 following updates - -include::modules/images-using-images-s2i-ruby.adoc[leveloffset=+1] -include::modules/images-using-images-s2i-ruby-pulling-images.adoc[leveloffset=+1] -include::modules/images-s2i-build-process-overview.adoc[leveloffset=+1] -include::modules/images-using-images-s2i-ruby-configuration.adoc[leveloffset=+1] -include::modules/images-using-images-s2i-ruby-hot-deploying.adoc[leveloffset=+1] diff --git a/_unused_topics/windows-machine-config-operator.adoc b/_unused_topics/windows-machine-config-operator.adoc deleted file mode 100644 index f315ccefb886..000000000000 --- a/_unused_topics/windows-machine-config-operator.adoc +++ /dev/null @@ -1,16 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator-reference.adoc - -[id="windows-machine-config-operator_{context}"] -= Windows Machine Config Operator - -[discrete] -== Purpose - -The Windows Machine Config Operator (WMCO) orchestrates the process of deploying and managing Windows workloads on a cluster. The WMCO configures Windows machines into compute nodes, enabling Windows container workloads to run in {product-title} clusters. This is done by creating a compute machine set that uses a Windows image with the Docker-formatted container runtime installed. The WMCO completes all necessary steps to configure the underlying Windows VM so that it can join the cluster as a compute node. - -[discrete] -== Project - -link:https://github.com/openshift/windows-machine-config-operator[windows-machine-config-operator] diff --git a/adding_service_cluster/adding-service.adoc b/adding_service_cluster/adding-service.adoc deleted file mode 100644 index bceea84dd308..000000000000 --- a/adding_service_cluster/adding-service.adoc +++ /dev/null @@ -1,29 +0,0 @@ -:_content-type: ASSEMBLY -include::_attributes/attributes-openshift-dedicated.adoc[] -[id="adding-service"] -= Adding services to a cluster using {cluster-manager-first} console -:context: adding-service - -toc::[] - -You can add, access, and remove add-on services for your {product-title} -ifdef::openshift-rosa[] -(ROSA) -endif::openshift-rosa[] -cluster by using {cluster-manager-first}. - -ifdef::openshift-rosa[] -== Prerequisites -* For the Amazon CloudWatch service, you must first install the `cluster-logging-operator` using the ROSA CLI (`rosa`). -endif::[] - -include::modules/adding-service-existing.adoc[leveloffset=+1] -include::modules/access-service.adoc[leveloffset=+1] -include::modules/deleting-service.adoc[leveloffset=+1] -//include::modules/deleting-service-cli.adoc[leveloffset=+1] - -ifdef::openshift-rosa[] -[role="_additional-resources"] -== Additional resources -* For information about the `cluster-logging-operator` and the AWS CloudWatch log forwarding service, see xref:../logging/cluster-logging-external.adoc#cluster-logging-collector-log-forward-cloudwatch_cluster-logging-external[Forwarding logs to Amazon CloudWatch] -endif::[] diff --git a/adding_service_cluster/available-services.adoc b/adding_service_cluster/available-services.adoc deleted file mode 100644 index e1404beb62ea..000000000000 --- a/adding_service_cluster/available-services.adoc +++ /dev/null @@ -1,11 +0,0 @@ -:_content-type: ASSEMBLY -include::_attributes/attributes-openshift-dedicated.adoc[] -[id="available-services"] -= Add-on services available for {product-title} -:context: available-services - -toc::[] - -You can add services to your existing {product-title} cluster using the xref:../adding_service_cluster/adding-service.adoc#adding-service[{cluster-manager-first} console]. - -include::modules/osd-rhoam.adoc[leveloffset=+1] diff --git a/adding_service_cluster/images b/adding_service_cluster/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/adding_service_cluster/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/adding_service_cluster/modules b/adding_service_cluster/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/adding_service_cluster/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/adding_service_cluster/rosa-available-services.adoc b/adding_service_cluster/rosa-available-services.adoc deleted file mode 100644 index bc7325278d6f..000000000000 --- a/adding_service_cluster/rosa-available-services.adoc +++ /dev/null @@ -1,42 +0,0 @@ -:_content-type: ASSEMBLY -include::_attributes/attributes-openshift-dedicated.adoc[] -[id="rosa-available-services"] -= Add-on services available for {product-title} -:context: rosa-available-services - - -You can add services to your existing {product-title} (ROSA) cluster using the xref:../adding_service_cluster/adding-service.adoc#adding-service[{cluster-manager-first} console]. - -These services can also be installed xref:../rosa_cli/rosa-manage-objects-cli.adoc#rosa-managing-objects-cli[using the ROSA CLI (`rosa`)]. - - -include::modules/aws-cloudwatch.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* link:https://aws.amazon.com/cloudwatch/[Amazon CloudWatch product information] -* xref:../logging/cluster-logging-external.adoc#cluster-logging-collector-log-forward-cloudwatch_cluster-logging-external[Forwarding logs to Amazon CloudWatch] - -include::modules/osd-rhoam.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* link:https://access.redhat.com/documentation/en-us/red_hat_openshift_api_management[Red Hat OpenShift API Management] documentation - -//// -include::modules/rosa-rhoda.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* link:https://www.redhat.com/en/technologies/cloud-computing/openshift/openshift-database-access[Red Hat OpenShift Database Access] product page -//// -// This module and additional resource are no longer included in the document due to OSDOCS-5817. - -include::modules/rosa-rhods.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* link:https://access.redhat.com/documentation/en-us/red_hat_openshift_data_science/1[Red Hat OpenShift Data Science] documentation -* link:https://www.redhat.com/en/technologies/cloud-computing/openshift/openshift-data-science[Red Hat OpenShift Data Science] product page diff --git a/applications/PLACEHOLDER b/applications/PLACEHOLDER deleted file mode 100644 index 985a0e1895b7..000000000000 --- a/applications/PLACEHOLDER +++ /dev/null @@ -1,2 +0,0 @@ -Please leave this file until after Node PRs merge, as is it needed for the topic_yaml. Subtopics are not allowed, apparently, without at least one topic in the TOC - diff --git a/applications/_attributes b/applications/_attributes deleted file mode 120000 index f27fd275ea6b..000000000000 --- a/applications/_attributes +++ /dev/null @@ -1 +0,0 @@ -../_attributes/ \ No newline at end of file diff --git a/applications/application-health.adoc b/applications/application-health.adoc deleted file mode 100644 index de694b2f8e2c..000000000000 --- a/applications/application-health.adoc +++ /dev/null @@ -1,33 +0,0 @@ -:_content-type: ASSEMBLY -:context: application-health -[id="application-health"] -= Monitoring application health by using health checks -include::_attributes/common-attributes.adoc[] - -toc::[] - - -In software systems, components can become unhealthy due to transient issues such as temporary connectivity loss, configuration errors, or problems with external dependencies. {product-title} applications have a number of options to detect and handle unhealthy containers. - -// The following include statements pull in the module files that comprise -// the assembly. Include any combination of concept, procedure, or reference -// modules required to cover the user story. You can also include other -// assemblies. - - -include::modules/application-health-about.adoc[leveloffset=+1] - -include::modules/application-health-configuring.adoc[leveloffset=+1] - -include::modules/odc-monitoring-application-health-using-developer-perspective.adoc[leveloffset=+1] - -include::modules/odc-adding-health-checks.adoc[leveloffset=+1] - -include::modules/odc-editing-health-checks.adoc[leveloffset=+1] - -include::modules/odc-monitoring-health-checks.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* For details on switching to the *Developer* perspective in the web console, see xref:../web_console/web-console-overview.adoc#about-developer-perspective_web-console-overview[About the *Developer* perspective]. -* For details on adding health checks while creating and deploying an application, see *Advanced Options* in the xref:../applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc#odc-creating-applications-using-developer-perspective[Creating applications using the Developer perspective] section. diff --git a/applications/config-maps.adoc b/applications/config-maps.adoc deleted file mode 100644 index 39f675a19c14..000000000000 --- a/applications/config-maps.adoc +++ /dev/null @@ -1,27 +0,0 @@ -:_content-type: ASSEMBLY -[id="config-maps"] -= Using config maps with applications -include::_attributes/common-attributes.adoc[] -:context: config-maps - -toc::[] - -Config maps allow you to decouple configuration artifacts from image content to keep containerized applications portable. - -The following sections define config maps and how to create and use them. - -For information on creating config maps, see xref:../nodes/pods/nodes-pods-configmaps.adoc[Creating and using config maps]. - -include::modules/nodes-pods-configmap-overview.adoc[leveloffset=+1] - -[id="nodes-pods-config-maps-consuming-configmap-in-pods"] -== Use cases: Consuming config maps in pods - -The following sections describe some uses cases when consuming `ConfigMap` -objects in pods. - -include::modules/nodes-pods-configmaps-use-case-consuming-in-env-vars.adoc[leveloffset=+2] - -include::modules/nodes-pods-configmaps-use-case-setting-command-line-arguments.adoc[leveloffset=+2] - -include::modules/nodes-pods-configmaps-use-case-consuming-in-volumes.adoc[leveloffset=+2] diff --git a/applications/connecting_applications_to_services/_attributes b/applications/connecting_applications_to_services/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/applications/connecting_applications_to_services/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/applications/connecting_applications_to_services/binding-workloads-using-sbo.adoc b/applications/connecting_applications_to_services/binding-workloads-using-sbo.adoc deleted file mode 100644 index 3a22f233cb3d..000000000000 --- a/applications/connecting_applications_to_services/binding-workloads-using-sbo.adoc +++ /dev/null @@ -1,51 +0,0 @@ -:_content-type: ASSEMBLY -[id="binding-workloads-using-sbo"] -= Binding workloads using Service Binding Operator -include::_attributes/common-attributes.adoc[] -include::_attributes/servicebinding-document-attributes.adoc[] -:context: binding-workloads-using-sbo - -toc::[] - -Application developers must bind a workload to one or more backing services by using a binding secret. This secret is generated for the purpose of storing information to be consumed by the workload. - -As an example, consider that the service you want to connect to is already exposing the binding data. In this case, you would also need a workload to be used along with the `ServiceBinding` custom resource (CR). By using this `ServiceBinding` CR, the workload sends a binding request with the details of the services to bind with. - -.Example of `ServiceBinding` CR -[source,yaml] ----- -apiVersion: binding.operators.coreos.com/v1alpha1 -kind: ServiceBinding -metadata: - name: spring-petclinic-pgcluster - namespace: my-petclinic -spec: - services: <1> - - group: postgres-operator.crunchydata.com - version: v1beta1 - kind: PostgresCluster - name: hippo - application: <2> - name: spring-petclinic - group: apps - version: v1 - resource: deployments ----- -<1> Specifies a list of service resources. -<2> The sample application that points to a Deployment or any other similar resource with an embedded PodSpec. - -As shown in the previous example, you can also directly use a `ConfigMap` or a `Secret` itself as a service resource to be used as a source of binding data. - -include::modules/sbo-naming-strategies.adoc[leveloffset=+1] -include::modules/sbo-advanced-binding-options.adoc[leveloffset=+1] -include::modules/sbo-binding-workloads-that-are-not-compliant-with-PodSpec.adoc[leveloffset=+1] -include::modules/sbo-unbinding-workloads-from-a-backing-service.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_binding-workloads-sbo"] -== Additional resources -* xref:../../applications/connecting_applications_to_services/understanding-service-binding-operator.adoc#binding-a-workload-together-with-a-backing-service_understanding-service-binding-operator[Binding a workload together with a backing service]. -* xref:../../applications/connecting_applications_to_services/getting-started-with-service-binding.adoc#connecting-the-spring-petclinic-sample-application-to-the-postgresql-database-service[Connecting the Spring PetClinic sample application to the PostgreSQL database service]. -* xref:../../operators/understanding/crds/crd-managing-resources-from-crds.adoc#crd-creating-custom-resources-from-file_crd-managing-resources-from-crds[Creating custom resources from a file] -* link:https://redhat-developer.github.io/service-binding-operator/userguide/binding-workloads-using-sbo/custom-path-injection.html#_workload_resource_mapping[Example schema of the ClusterWorkloadResourceMapping resource]. - diff --git a/applications/connecting_applications_to_services/exposing-binding-data-from-a-service.adoc b/applications/connecting_applications_to_services/exposing-binding-data-from-a-service.adoc deleted file mode 100644 index 50c305d5f699..000000000000 --- a/applications/connecting_applications_to_services/exposing-binding-data-from-a-service.adoc +++ /dev/null @@ -1,27 +0,0 @@ -:_content-type: ASSEMBLY -[id="exposing-binding-data-from-a-service"] -= Exposing binding data from a service -include::_attributes/common-attributes.adoc[] -include::_attributes/servicebinding-document-attributes.adoc[] -:context: exposing-binding-data-from-a-service - -toc::[] - -[role="_abstract"] -Application developers need access to backing services to build and connect workloads. Connecting workloads to backing services is always a challenge because each service provider requires a different way to access their secrets and consume them in a workload. - -The {servicebinding-title} enables application developers to easily bind workloads together with operator-managed backing services, without any manual procedures to configure the binding connection. For the {servicebinding-title} to provide the binding data, as an Operator provider or user who creates backing services, you must expose the binding data to be automatically detected by the {servicebinding-title}. Then, the {servicebinding-title} automatically collects the binding data from the backing service and shares it with a workload to provide a consistent and predictable experience. - -include::modules/sbo-methods-of-exposing-binding-data.adoc[leveloffset=+1] -include::modules/sbo-data-model.adoc[leveloffset=+1] -include::modules/sbo-setting-annotations-mapping-optional.adoc[leveloffset=+1] -include::modules/sbo-rbac-requirements.adoc[leveloffset=+1] -include::modules/sbo-categories-of-exposable-binding-data.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_exposing-binding-data"] -== Additional resources -// * link:https://github.com/openshift/console/blob/master/frontend/packages/operator-lifecycle-manager/src/components/descriptors/reference/reference.md[OLM Descriptor Reference]. -// When OLM descriptors are supported again, add this additional resource. -* xref:../../operators/operator_sdk/osdk-generating-csvs.adoc#osdk-generating-csvs[Defining cluster service versions (CSVs)]. -* xref:../../applications/connecting_applications_to_services/projecting-binding-data.adoc#projecting-binding-data[Projecting binding data]. \ No newline at end of file diff --git a/applications/connecting_applications_to_services/getting-started-with-service-binding-ibm-power-ibm-z.adoc b/applications/connecting_applications_to_services/getting-started-with-service-binding-ibm-power-ibm-z.adoc deleted file mode 100644 index 001b8a622b0e..000000000000 --- a/applications/connecting_applications_to_services/getting-started-with-service-binding-ibm-power-ibm-z.adoc +++ /dev/null @@ -1,38 +0,0 @@ -:_content-type: ASSEMBLY -[id="getting-started-with-service-binding-ibm-power-ibm-z"] -= Getting started with service binding on {ibmpowerProductName}, {ibmzProductName}, and {linuxoneProductName} -include::_attributes/common-attributes.adoc[] -include::_attributes/servicebinding-document-attributes.adoc[] -:context: getting-started-with-service-binding-ibm-power-ibm-z - -toc::[] - -[role="_abstract"] -The {servicebinding-title} manages the data plane for workloads and backing services. This guide provides instructions with examples to help you create a database instance, deploy an application, and use the {servicebinding-title} to create a binding connection between the application and the database service. - -// Prerequisites for getting started with Service Binding Operator -[discrete] -== Prerequisites - -* You have access to an {product-title} cluster using an account with `cluster-admin` permissions. -* You have installed the `oc` CLI. -* You have installed the {servicebinding-title} from OperatorHub. - -//Deploying PostgreSQL operator -include::modules/sbo-deploying-a-postgresql-database-operator-power-z.adoc[leveloffset=+1] - -//Creating a PostgreSQL database instance -include::modules/sbo-creating-a-postgresql-database-instance-power-z.adoc[leveloffset=+1] - -//Deploying the Spring PetClinic sample application -include::modules/sbo-deploying-the-spring-petclinic-sample-application-power-z.adoc[leveloffset=+1] - -//Connecting the Spring PetClinic sample application to the PostgreSQL database service -include::modules/sbo-connecting-spring-petclinic-sample-app-to-postgresql-database-service-power-z.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_getting-started-with-service-binding-ibm-power-ibm-z"] -== Additional resources -* xref:../../applications/connecting_applications_to_services/installing-sbo.adoc#installing-sbo[Installing Service Binding Operator] -* xref:../../applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc#odc-creating-applications-using-developer-perspective[Creating applications using the Developer perspective] -* xref:../../operators/understanding/crds/crd-managing-resources-from-crds.adoc[Managing resources from custom resource definitions] diff --git a/applications/connecting_applications_to_services/getting-started-with-service-binding.adoc b/applications/connecting_applications_to_services/getting-started-with-service-binding.adoc deleted file mode 100644 index 6fb5ac0f69ab..000000000000 --- a/applications/connecting_applications_to_services/getting-started-with-service-binding.adoc +++ /dev/null @@ -1,42 +0,0 @@ -:_content-type: ASSEMBLY -[id="getting-started-with-service-binding"] -= Getting started with service binding -include::_attributes/common-attributes.adoc[] -include::_attributes/servicebinding-document-attributes.adoc[] -:context: getting-started-with-service-binding - -toc::[] - -[role="_abstract"] -The {servicebinding-title} manages the data plane for workloads and backing services. This guide provides instructions with examples to help you create a database instance, deploy an application, and use the {servicebinding-title} to create a binding connection between the application and the database service. - -// Prerequisites for getting started with Service Binding Operator -[discrete] -== Prerequisites - -* You have access to an {product-title} cluster using an account with `cluster-admin` permissions. -* You have installed the `oc` CLI. -* You have installed {servicebinding-title} from OperatorHub. -* You have installed the 5.1.2 version of the Crunchy Postgres for Kubernetes Operator from OperatorHub using the *v5* Update channel. The installed Operator is available in an appropriate namespace, such as the `my-petclinic` namespace. -+ -[NOTE] -==== -You can create the namespace using the `oc create namespace my-petclinic` command. -==== - -//Creating a PostgreSQL database instance -include::modules/sbo-creating-a-postgresql-database-instance.adoc[leveloffset=+1] - -//Deploying the Spring PetClinic sample application -include::modules/sbo-deploying-the-spring-petclinic-sample-application.adoc[leveloffset=+1] - -//Connecting the Spring PetClinic sample application to the PostgreSQL database service -include::modules/sbo-connecting-spring-petclinic-sample-app-to-postgresql-database-service.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_getting-started-sbo"] -== Additional Resources -* xref:../../applications/connecting_applications_to_services/installing-sbo.adoc#installing-sbo[Installing Service Binding Operator]. -* xref:../../applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc#odc-creating-applications-using-developer-perspective[Creating applications using the Developer perspective]. -* xref:../../operators/understanding/crds/crd-managing-resources-from-crds.adoc[Managing resources from custom resource definitions]. -* link:https://github.com/redhat-developer/service-binding-operator#known-bindable-operators[Known bindable Operators]. \ No newline at end of file diff --git a/applications/connecting_applications_to_services/images b/applications/connecting_applications_to_services/images deleted file mode 120000 index 5fa6987088da..000000000000 --- a/applications/connecting_applications_to_services/images +++ /dev/null @@ -1 +0,0 @@ -../../images \ No newline at end of file diff --git a/applications/connecting_applications_to_services/installing-sbo.adoc b/applications/connecting_applications_to_services/installing-sbo.adoc deleted file mode 100644 index afd97908dad6..000000000000 --- a/applications/connecting_applications_to_services/installing-sbo.adoc +++ /dev/null @@ -1,28 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-sbo"] -= Installing Service Binding Operator -include::_attributes/common-attributes.adoc[] -include::_attributes/servicebinding-document-attributes.adoc[] -:context: installing-sbo - -toc::[] - -[role="_abstract"] -This guide walks cluster administrators through the process of installing the {servicebinding-title} to an {product-title} cluster. - -You can install {servicebinding-title} on {product-title} 4.7 and later. - -[discrete] -== Prerequisites - -* You have access to an {product-title} cluster using an account with `cluster-admin` permissions. -* Your cluster has the xref:../../post_installation_configuration/enabling-cluster-capabilities.adoc#enabling-cluster-capabilities[Marketplace capability] enabled or the Red Hat Operator catalog source configured manually. - - -//Installing Service Binding Operator using web console - -include::modules/op-installing-sbo-operator-using-the-web-console.adoc[leveloffset=+1] - - -== Additional Resources -* xref:../../applications/connecting_applications_to_services/getting-started-with-service-binding.adoc#getting-started-with-service-binding[Getting started with service binding]. \ No newline at end of file diff --git a/applications/connecting_applications_to_services/modules b/applications/connecting_applications_to_services/modules deleted file mode 120000 index 8b0e8540076d..000000000000 --- a/applications/connecting_applications_to_services/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules \ No newline at end of file diff --git a/applications/connecting_applications_to_services/odc-connecting-an-application-to-a-service-using-the-developer-perspective.adoc b/applications/connecting_applications_to_services/odc-connecting-an-application-to-a-service-using-the-developer-perspective.adoc deleted file mode 100644 index 5ea513d917dd..000000000000 --- a/applications/connecting_applications_to_services/odc-connecting-an-application-to-a-service-using-the-developer-perspective.adoc +++ /dev/null @@ -1,37 +0,0 @@ -:_content-type: ASSEMBLY -[id="odc-connecting-an-application-to-a-service-using-the-developer-perspective"] -= Connecting an application to a service using the Developer perspective -include::_attributes/common-attributes.adoc[] -include::_attributes/servicebinding-document-attributes.adoc[] -:context: odc-connecting-an-application-to-a-service-using-the-developer-perspective - -toc::[] - -[role="_abstract"] - -Use the *Topology* view for the following purposes: - -** Group multiple components within an application. - -** Connect components with each other. - -** Connect multiple resources to services with labels. - -You can either use a binding or a visual connector to connect components. - - -A binding connection between the components can be established only if the target node is an Operator-backed service. This is indicated by the *Create a binding connector* tool-tip which appears when you drag an arrow to such a target node. When an application is connected to a service by using a binding connector a `ServiceBinding` resource is created. Then, the {servicebinding-title} controller projects the necessary binding data into the application deployment. After the request is successful, the application is redeployed establishing an interaction between the connected components. - -A visual connector establishes only a visual connection between the components, depicting an intent to connect. No interaction between the components is established. If the target node is not an Operator-backed service the *Create a visual connector* tool-tip is displayed when you drag an arrow to a target node. - -include::modules/odc-discovering-and-identifying-operator-backed-bindable-services.adoc[leveloffset=+1] -include::modules/odc-creating-a-visual-connection-between-components.adoc[leveloffset=+1] -include::modules/odc-creating-a-binding-connection-between-components.adoc[leveloffset=+1] -include::modules/odc-verifying-the-status-of-your-service-binding-from-the-topology-view.adoc[leveloffset=+1] -include::modules/odc-visualizing-the-binding-connections-to-resources.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources-odc-connecting-an-application-to-a-service-using-the-developer-perspective"] -== Additional resources -* xref:../../applications/connecting_applications_to_services/getting-started-with-service-binding.adoc#getting-started-with-service-binding[Getting started with service binding]. -* link:https://github.com/redhat-developer/service-binding-operator#known-bindable-operators[Known bindable Operators]. diff --git a/applications/connecting_applications_to_services/projecting-binding-data.adoc b/applications/connecting_applications_to_services/projecting-binding-data.adoc deleted file mode 100644 index c27b55566bc1..000000000000 --- a/applications/connecting_applications_to_services/projecting-binding-data.adoc +++ /dev/null @@ -1,26 +0,0 @@ -:_content-type: ASSEMBLY -[id="projecting-binding-data"] -= Projecting binding data -include::_attributes/common-attributes.adoc[] -include::_attributes/servicebinding-document-attributes.adoc[] -:context: projecting-binding-data - -toc::[] - -[role="_abstract"] -This section provides information on how you can consume the binding data. - -== Consumption of binding data -After the backing service exposes the binding data, for a workload to access and consume this data, you must project it into the workload from a backing service. {servicebinding-title} automatically projects this set of data into the workload in the following methods: - -. By default, as files. -. As environment variables, after you configure the `.spec.bindAsFiles` parameter from the `ServiceBinding` resource. - -include::modules/sbo-configuration-of-directory-path-to-project-binding-data.adoc[leveloffset=+1] -include::modules/sbo-projecting-the-binding-data.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_projecting-binding-data-sbo"] -== Additional resources -* xref:../../applications/connecting_applications_to_services/exposing-binding-data-from-a-service.adoc#exposing-binding-data-from-a-service[Exposing binding data from a service]. -* link:https://redhat-developer.github.io/service-binding-operator/userguide/using-projected-bindings/using-projected-bindings.html[Using the projected binding data in the source code of the application]. \ No newline at end of file diff --git a/applications/connecting_applications_to_services/sbo-release-notes.adoc b/applications/connecting_applications_to_services/sbo-release-notes.adoc deleted file mode 100644 index c4a67d04d8c8..000000000000 --- a/applications/connecting_applications_to_services/sbo-release-notes.adoc +++ /dev/null @@ -1,73 +0,0 @@ -//OpenShift Service Binding Release Notes -:_content-type: ASSEMBLY -[id="servicebinding-release-notes"] -= Release notes for {servicebinding-title} -:context: servicebinding-release-notes -include::_attributes/common-attributes.adoc[] -include::_attributes/servicebinding-document-attributes.adoc[] - -toc::[] - -The {servicebinding-title} consists of a controller and an accompanying custom resource definition (CRD) for service binding. It manages the data plane for workloads and backing services. The Service Binding Controller reads the data made available by the control plane of backing services. Then, it projects this data to workloads according to the rules specified through the `ServiceBinding` resource. - -With {servicebinding-title}, you can: - -* Bind your workloads together with Operator-managed backing services. -* Automate configuration of binding data. -* Provide service operators a low-touch administrative experience to provision and manage access to services. -* Enrich development lifecycle with a consistent and declarative service binding method that eliminates discrepancies in cluster environments. - -The custom resource definition (CRD) of the {servicebinding-title} supports the following APIs: - -* *Service Binding* with the `binding.operators.coreos.com` API group. -* *Service Binding (Spec API)* with the `servicebinding.io` API group. - -[id="support-matrix"] -== Support matrix - -Some features in the following table are in link:https://access.redhat.com/support/offerings/techpreview[Technology Preview]. These experimental features are not intended for production use. - -In the table, features are marked with the following statuses: - -- *TP*: _Technology Preview_ - -- *GA*: _General Availability_ - -Note the following scope of support on the Red Hat Customer Portal for these features: - -.Support matrix -[options="header"] -|=== -|*{servicebinding-title}* 2+|*API Group and Support Status*|*OpenShift Versions* - -|*Version*|*`binding.operators.coreos.com`* |*`servicebinding.io`* | -|1.3.3 |GA |GA |4.9-4.12 -|1.3.1 |GA |GA |4.9-4.11 -|1.3 |GA |GA |4.9-4.11 -|1.2 |GA |GA |4.7-4.11 -|1.1.1 |GA |TP |4.7-4.10 -|1.1 |GA |TP |4.7-4.10 -|1.0.1 |GA |TP |4.7-4.9 -|1.0 |GA |TP |4.7-4.9 - -|=== - -[id="servicebinding-inclusive-language"] -== Making open source more inclusive - -Red Hat is committed to replacing problematic language in our code, documentation, and web properties. We are beginning with these four terms: master, slave, blacklist, and whitelist. Because of the enormity of this endeavor, these changes will be implemented gradually over several upcoming releases. For more details, see link:https://www.redhat.com/en/blog/making-open-source-more-inclusive-eradicating-problematic-language[Red Hat CTO Chris Wright's message]. - -// Modules included, most to least recent -include::modules/sbo-release-notes-1-3-3.adoc[leveloffset=+1] -include::modules/sbo-release-notes-1-3-1.adoc[leveloffset=+1] -include::modules/sbo-release-notes-1-3.adoc[leveloffset=+1] -include::modules/sbo-release-notes-1-2.adoc[leveloffset=+1] -include::modules/sbo-release-notes-1-1-1.adoc[leveloffset=+1] -include::modules/sbo-release-notes-1-1.adoc[leveloffset=+1] -include::modules/sbo-release-notes-1-0-1.adoc[leveloffset=+1] -include::modules/sbo-release-notes-1-0.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_release-notes-sbo"] -== Additional resources -* xref:../../applications/connecting_applications_to_services/understanding-service-binding-operator.adoc#understanding-service-binding-operator[Understanding Service Binding Operator]. diff --git a/applications/connecting_applications_to_services/snippets b/applications/connecting_applications_to_services/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/applications/connecting_applications_to_services/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/applications/connecting_applications_to_services/understanding-service-binding-operator.adoc b/applications/connecting_applications_to_services/understanding-service-binding-operator.adoc deleted file mode 100644 index 5a6c5500fe95..000000000000 --- a/applications/connecting_applications_to_services/understanding-service-binding-operator.adoc +++ /dev/null @@ -1,23 +0,0 @@ -:_content-type: ASSEMBLY -[id="understanding-service-binding-operator"] -= Understanding Service Binding Operator -include::_attributes/common-attributes.adoc[] -include::_attributes/servicebinding-document-attributes.adoc[] -:context: understanding-service-binding-operator - -toc::[] - -[role="_abstract"] -Application developers need access to backing services to build and connect workloads. Connecting workloads to backing services is always a challenge because each service provider suggests a different way to access their secrets and consume them in a workload. In addition, manual configuration and maintenance of this binding together of workloads and backing services make the process tedious, inefficient, and error-prone. - -The {servicebinding-title} enables application developers to easily bind workloads together with Operator-managed backing services, without any manual procedures to configure the binding connection. - -include::modules/sbo-service-binding-terminology.adoc[leveloffset=+1] -include::modules/sbo-about-service-binding-operator.adoc[leveloffset=+1] -include::modules/sbo-key-features.adoc[leveloffset=+1] -include::modules/sbo-api-differences.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_understanding-sbo"] -== Additional resources -* xref:../../applications/connecting_applications_to_services/getting-started-with-service-binding.adoc#getting-started-with-service-binding[Getting started with service binding]. \ No newline at end of file diff --git a/applications/creating_applications/_attributes b/applications/creating_applications/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/applications/creating_applications/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/applications/creating_applications/creating-applications-using-cli.adoc b/applications/creating_applications/creating-applications-using-cli.adoc deleted file mode 100644 index 10357412a63d..000000000000 --- a/applications/creating_applications/creating-applications-using-cli.adoc +++ /dev/null @@ -1,22 +0,0 @@ -:_content-type: ASSEMBLY -[id="creating-applications-using-cli"] -= Creating applications using the CLI -include::_attributes/common-attributes.adoc[] -:context: creating-applications-using-cli - -toc::[] - -You can create an {product-title} application from components that include -source or binary code, images, and templates by using the {product-title} -CLI. - -The set of objects created by `new-app` depends on the artifacts passed as -input: source repositories, images, or templates. - -include::modules/applications-create-using-cli-source-code.adoc[leveloffset=+1] - -include::modules/applications-create-using-cli-image.adoc[leveloffset=+1] - -include::modules/applications-create-using-cli-template.adoc[leveloffset=+1] - -include::modules/applications-create-using-cli-modify.adoc[leveloffset=+1] diff --git a/applications/creating_applications/creating-apps-from-installed-operators.adoc b/applications/creating_applications/creating-apps-from-installed-operators.adoc deleted file mode 100644 index 728bb3c441d3..000000000000 --- a/applications/creating_applications/creating-apps-from-installed-operators.adoc +++ /dev/null @@ -1,24 +0,0 @@ -:_content-type: ASSEMBLY -[id="creating-apps-from-installed-operators"] -= Creating applications from installed Operators -include::_attributes/common-attributes.adoc[] -:context: creating-apps-from-installed-operators - -toc::[] - -_Operators_ are a method of packaging, deploying, and managing a Kubernetes -application. You can create applications on {product-title} using Operators that -have been installed by a cluster administrator. - -This guide walks developers through an example of creating applications from an -installed Operator using the {product-title} web console. - -[role="_additional-resources"] -.Additional resources - -* See the -xref:../../operators/understanding/olm-what-operators-are.adoc#olm-what-operators-are[Operators] -guide for more on how Operators work and how the Operator Lifecycle Manager is -integrated in {product-title}. - -include::modules/olm-creating-etcd-cluster-from-operator.adoc[leveloffset=+1] diff --git a/applications/creating_applications/images b/applications/creating_applications/images deleted file mode 120000 index 5fa6987088da..000000000000 --- a/applications/creating_applications/images +++ /dev/null @@ -1 +0,0 @@ -../../images \ No newline at end of file diff --git a/applications/creating_applications/modules b/applications/creating_applications/modules deleted file mode 120000 index 8b0e8540076d..000000000000 --- a/applications/creating_applications/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules \ No newline at end of file diff --git a/applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc b/applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc deleted file mode 100644 index 4d93abbdfb4d..000000000000 --- a/applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc +++ /dev/null @@ -1,95 +0,0 @@ -:_content-type: ASSEMBLY -[id="odc-creating-applications-using-developer-perspective"] -= Creating applications using the Developer perspective -include::_attributes/common-attributes.adoc[] -:context: odc-creating-applications-using-developer-perspective - -toc::[] - -The *Developer* perspective in the web console provides you the following options from the *+Add* view to create applications and associated services and deploy them on {product-title}: - -* *Getting started resources*: Use these resources to help you get started with Developer Console. You can choose to hide the header using the *Options* menu {kebab}. -** *Creating applications using samples*: Use existing code samples to get started with creating applications on the {product-title}. -** *Build with guided documentation*: Follow the guided documentation to build applications and familiarize yourself with key concepts and terminologies. -** *Explore new developer features*: Explore the new features and resources within the *Developer* perspective. - -* *Developer catalog*: Explore the Developer Catalog to select the required applications, services, or source to image builders, and then add it to your project. -** *All Services*: Browse the catalog to discover services across {product-title}. -** *Database*: Select the required database service and add it to your application. -** *Operator Backed*: Select and deploy the required Operator-managed service. -** *Helm chart*: Select the required Helm chart to simplify deployment of applications and services. -** *Devfile*: Select a devfile from the *Devfile registry* to declaratively define a development environment. -** *Event Source*: Select an event source to register interest in a class of events from a particular system. -+ -[NOTE] -==== -The Managed services option is also available if the RHOAS Operator is installed. -==== - -* *Git repository*: Import an existing codebase, Devfile, or Dockerfile from your Git repository using the *From Git*, *From Devfile*, or *From Dockerfile* options respectively, to build and deploy an application on {product-title}. - -* *Container images*: Use existing images from an image stream or registry to deploy it on to the {product-title}. - -* *Pipelines*: Use Tekton pipeline to create CI/CD pipelines for your software delivery process on the {product-title}. - -* *Serverless*: Explore the *Serverless* options to create, build, and deploy stateless and serverless applications on the {product-title}. -** *Channel*: Create a Knative channel to create an event forwarding and persistence layer with in-memory and reliable implementations. - -* *Samples*: Explore the available sample applications to create, build, and deploy an application quickly. - -* *Quick Starts*: Explore the quick start options to create, import, and run applications with step-by-step instructions and tasks. - -* *From Local Machine*: Explore the *From Local Machine* tile to import or upload files on your local machine for building and deploying applications easily. -** *Import YAML*: Upload a YAML file to create and define resources for building and deploying applications. -** *Upload JAR file*: Upload a JAR file to build and deploy Java applications. - -* *Share my Project*: Use this option to add or remove users to a project and provide accessibility options to them. - -* *Helm Chart repositories*: Use this option to add Helm Chart repositories in a namespace. - -* *Re-ordering of resources*: Use these resources to re-order pinned resources added to your navigation pane. The drag-and-drop icon is displayed on the left side of the pinned resource when you hover over it in the navigation pane. The dragged resource can be dropped only in the section where it resides. - -ifdef::openshift-enterprise,openshift-webscale[] -Note that certain options, such as *Pipelines*, *Event Source*, and *Import Virtual Machines*, are displayed only when the xref:../../cicd/pipelines/installing-pipelines.adoc#op-installing-pipelines-operator-in-web-console_installing-pipelines[OpenShift Pipelines Operator], link:https://docs.openshift.com/serverless/1.28/install/install-serverless-operator.html#serverless-install-web-console_install-serverless-operator[{ServerlessOperatorName}], and xref:../../virt/install/installing-virt.adoc#virt-subscribing-cli_installing-virt[OpenShift Virtualization Operator] are installed, respectively. -endif::[] - -[id="prerequisites_odc-creating-applications-using-developer-perspective"] -== Prerequisites - -To create applications using the *Developer* perspective ensure that: - -* You have xref:../../web_console/web-console.adoc#web-console[logged in to the web console]. -* You have created a project or have access to a project with the appropriate xref:../../authentication/using-rbac.adoc#default-roles_using-rbac[roles and permissions] to create applications and other workloads in {product-title}. - -ifdef::openshift-enterprise,openshift-webscale[] - -To create serverless applications, in addition to the preceding prerequisites, ensure that: - -* You have link:https://docs.openshift.com/serverless/1.28/install/install-serverless-operator.html#install-serverless-operator[installed the {ServerlessOperatorName}]. -* You have link:https://docs.openshift.com/serverless/1.28/install/installing-knative-serving.html#installing-knative-serving[created a `KnativeServing` resource in the `knative-serving` namespace]. - -endif::[] - -include::modules/odc-creating-sample-applications.adoc[leveloffset=+1] - -include::modules/odc-using-quickstarts.adoc[leveloffset=+1] - -include::modules/odc-importing-codebase-from-git-to-create-application.adoc[leveloffset=+1] - -include::modules/odc-deploying-container-image.adoc[leveloffset=+1] - -include::modules/odc-deploying-java-applications.adoc[leveloffset=+1] - -include::modules/odc-using-the-devfile-registry.adoc[leveloffset=+1] - -include::modules/odc-using-the-developer-catalog-to-add-services-or-components.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_odc-creating-applications-using-developer-perspective"] -== Additional resources - -* For more information about Knative routing settings for {ServerlessProductName}, see link:https://docs.openshift.com/serverless/1.28/knative-serving/external-ingress-routing/routing-overview.html#routing-overview[Routing]. -* For more information about domain mapping settings for {ServerlessProductName}, see link:https://docs.openshift.com/serverless/1.28/knative-serving/config-custom-domains/serverless-custom-domains.html#serverless-custom-domains[Configuring a custom domain for a Knative service]. -* For more information about Knative autoscaling settings for {ServerlessProductName}, see link:https://docs.openshift.com/serverless/1.28/knative-serving/autoscaling/serverless-autoscaling-developer.html#serverless-autoscaling-developer[Autoscaling]. -* For more information about adding a new user to a project, see xref:../projects/working-with-projects.adoc#odc-providing-project-permissions-using-developer-perspective_projects[Working with projects]. -* For more information about creating a Helm Chart repository, see xref:../working_with_helm_charts/configuring-custom-helm-chart-repositories.adoc#odc-creating-helm-releases-using-developer-perspective_configuring-custom-helm-chart-repositories[Creating Helm Chart repositories]. diff --git a/applications/creating_applications/snippets b/applications/creating_applications/snippets deleted file mode 120000 index 7bf6da9a51d0..000000000000 --- a/applications/creating_applications/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets \ No newline at end of file diff --git a/applications/deployments/_attributes b/applications/deployments/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/applications/deployments/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/applications/deployments/deployment-strategies.adoc b/applications/deployments/deployment-strategies.adoc deleted file mode 100644 index 5d18c9f72dba..000000000000 --- a/applications/deployments/deployment-strategies.adoc +++ /dev/null @@ -1,63 +0,0 @@ -:_content-type: ASSEMBLY -[id="deployment-strategies"] -= Using deployment strategies -include::_attributes/common-attributes.adoc[] -:context: deployment-strategies - -toc::[] - -_Deployment strategies_ are used to change or upgrade applications without downtime so that users barely notice a change. - -Because users generally access applications through a route handled by a router, deployment strategies can focus on `DeploymentConfig` object features or routing features. Strategies that focus on `DeploymentConfig` object features impact all routes that use the application. Strategies that use router features target individual routes. - -Most deployment strategies are supported through the `DeploymentConfig` object, and some additional strategies are supported through router features. - -[id="choosing-deployment-strategies"] -== Choosing a deployment strategy - -Consider the following when choosing a deployment strategy: - -- Long-running connections must be handled gracefully. -- Database conversions can be complex and must be done and rolled back along with the application. -- If the application is a hybrid of microservices and traditional components, downtime might be required to complete the transition. -- You must have the infrastructure to do this. -- If you have a non-isolated test environment, you can break both new and old versions. - -A deployment strategy uses readiness checks to determine if a new pod is ready for use. If a readiness check fails, the `DeploymentConfig` object retries to run the pod until it times out. The default timeout is `10m`, a value set in `TimeoutSeconds` in `dc.spec.strategy.*params`. - -// Rolling strategies -include::modules/deployments-rolling-strategy.adoc[leveloffset=+1] -include::modules/deployments-canary-deployments.adoc[leveloffset=+2] -// Creating rolling deployments -include::modules/creating-rolling-deployments-CLI.adoc[leveloffset=+2] -// Editing a deployment -:context: rolling-strategy -include::modules/odc-editing-deployments.adoc[leveloffset=+2] -// Starting a deployment -include::modules/odc-starting-rolling-deployment.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../../applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc#odc-creating-applications-using-developer-perspective[Creating and deploying applications on {product-title} using the *Developer* perspective] -* xref:../../applications/odc-viewing-application-composition-using-topology-view.adoc#odc-viewing-application-composition-using-topology-view[Viewing the applications in your project, verifying their deployment status, and interacting with them in the *Topology* view] - -// Recreate strategies -include::modules/deployments-recreate-strategy.adoc[leveloffset=+1] -// Editing a deployment -:context: recreate-strategy -include::modules/odc-editing-deployments.adoc[leveloffset=+2] -// Starting a deployment -include::modules/odc-starting-recreate-deployment.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../../applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc#odc-creating-applications-using-developer-perspective[Creating and deploying applications on {product-title} using the *Developer* perspective] -* xref:../../applications/odc-viewing-application-composition-using-topology-view.adoc#odc-viewing-application-composition-using-topology-view[Viewing the applications in your project, verifying their deployment status, and interacting with them in the *Topology* view] - -// Custom strategies -include::modules/deployments-custom-strategy.adoc[leveloffset=+1] -// Editing a deployment -:context: custom-strategy -include::modules/odc-editing-deployments.adoc[leveloffset=+2] - -include::modules/deployments-lifecycle-hooks.adoc[leveloffset=+1] diff --git a/applications/deployments/images b/applications/deployments/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/applications/deployments/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/applications/deployments/managing-deployment-processes.adoc b/applications/deployments/managing-deployment-processes.adoc deleted file mode 100644 index 5226f871c93a..000000000000 --- a/applications/deployments/managing-deployment-processes.adoc +++ /dev/null @@ -1,44 +0,0 @@ -:_content-type: ASSEMBLY -[id="deployment-operations"] -= Managing deployment processes -include::_attributes/common-attributes.adoc[] -:context: deployment-operations - -toc::[] - -[id="deploymentconfig-operations"] -== Managing DeploymentConfig objects - -`DeploymentConfig` objects can be managed from the {product-title} web console's *Workloads* page or using the `oc` CLI. The following procedures show CLI usage unless otherwise stated. - -include::modules/deployments-starting-deployment.adoc[leveloffset=+2] -include::modules/deployments-viewing-deployment.adoc[leveloffset=+2] -include::modules/deployments-retrying-deployment.adoc[leveloffset=+2] -include::modules/deployments-rolling-back.adoc[leveloffset=+2] -include::modules/deployments-exec-cmd-in-container.adoc[leveloffset=+2] -include::modules/deployments-viewing-logs.adoc[leveloffset=+2] -include::modules/deployments-triggers.adoc[leveloffset=+2] -include::modules/deployments-setting-triggers.adoc[leveloffset=+3] -include::modules/deployments-setting-resources.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* For more information about resource limits and requests, see xref:../../nodes/clusters/nodes-cluster-resource-configure.adoc#nodes-cluster-resource-configure-about_nodes-cluster-resource-configure[Understanding managing application memory]. - -include::modules/deployments-scaling-manually.adoc[leveloffset=+2] -include::modules/deployments-accessing-private-repos.adoc[leveloffset=+2] - -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -include::modules/deployments-assigning-pods-to-nodes.adoc[leveloffset=+2] -endif::[] - -ifndef::openshift-online[] -include::modules/deployments-running-pod-svc-acct.adoc[leveloffset=+2] -endif::[] - -//// -== Managing Deployments - -Need docs on managing Deployment objects. -//// diff --git a/applications/deployments/modules b/applications/deployments/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/applications/deployments/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/applications/deployments/osd-config-custom-domains-applications.adoc b/applications/deployments/osd-config-custom-domains-applications.adoc deleted file mode 100644 index e652e9b7e075..000000000000 --- a/applications/deployments/osd-config-custom-domains-applications.adoc +++ /dev/null @@ -1,12 +0,0 @@ -:_content-type: ASSEMBLY -[id="osd-config-custom-domains-applications"] -= Custom domains for applications -include::_attributes/attributes-openshift-dedicated.adoc[] -:context: osd-config-custom-domains-applications - -toc::[] - -You can configure a custom domain for your applications. Custom domains are specific wildcard domains that can be used with {product-title} applications. - -include::modules/osd-applications-config-custom-domains.adoc[leveloffset=+1] -include::modules/osd-applications-renew-custom-domains.adoc[leveloffset=+1] diff --git a/applications/deployments/route-based-deployment-strategies.adoc b/applications/deployments/route-based-deployment-strategies.adoc deleted file mode 100644 index 87df7e2548e8..000000000000 --- a/applications/deployments/route-based-deployment-strategies.adoc +++ /dev/null @@ -1,35 +0,0 @@ -:_content-type: ASSEMBLY -[id="route-based-deployment-strategies"] -= Using route-based deployment strategies -include::_attributes/common-attributes.adoc[] -:context: route-based-deployment-strategies - -toc::[] - -Deployment strategies provide a way for the application to evolve. Some strategies use `Deployment` objects to make changes that are seen by users of all routes that resolve to the application. Other advanced strategies, such as the ones described in this section, use router features in conjunction with `Deployment` objects to impact specific routes. - -//// -This link keeps breaking Travis for some reason. - -[NOTE] -==== -See -xref:../../applications/deployments/deployment-strategies.adoc#deployment-strategies[Using deployment strategies] -for more on the basic strategy types. -==== -//// - -The most common route-based strategy is to use a _blue-green deployment_. The new version (the green version) is brought up for testing and evaluation, while the users still use the stable version (the blue version). When ready, the users are switched to the green version. If a problem arises, you can switch back to the blue version. - -A common alternative strategy is to use _A/B versions_ that are both active at the same time and some users use one version, and some users use the other version. This can be used for experimenting with user interface changes and other features to get user feedback. It can also be used to verify proper operation in a production context where problems impact a limited number of users. - -A canary deployment tests the new version but when a problem is detected it quickly falls back to the previous version. This can be done with both of the above strategies. - -The route-based deployment strategies do not scale the number of pods in the services. To maintain desired performance characteristics the deployment configurations might have to be scaled. - -include::modules/deployments-proxy-shards.adoc[leveloffset=+1] -include::modules/deployments-n1-compatibility.adoc[leveloffset=+1] -include::modules/deployments-graceful-termination.adoc[leveloffset=+1] -include::modules/deployments-blue-green.adoc[leveloffset=+1] -include::modules/deployments-ab-testing.adoc[leveloffset=+1] -include::modules/deployments-ab-testing-lb.adoc[leveloffset=+2] diff --git a/applications/deployments/snippets b/applications/deployments/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/applications/deployments/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/applications/deployments/what-deployments-are.adoc b/applications/deployments/what-deployments-are.adoc deleted file mode 100644 index 09654a5d92f6..000000000000 --- a/applications/deployments/what-deployments-are.adoc +++ /dev/null @@ -1,57 +0,0 @@ -:_content-type: ASSEMBLY -[id="what-deployments-are"] -= Understanding Deployment and DeploymentConfig objects -include::_attributes/common-attributes.adoc[] -:context: what-deployments-are - -toc::[] - -The `Deployment` and `DeploymentConfig` API objects in {product-title} provide two similar but different methods for fine-grained management over common user applications. They are composed of the following separate API objects: - -* A `DeploymentConfig` or `Deployment` object, either of which describes the desired state of a particular component of the application as a pod template. -* `DeploymentConfig` objects involve one or more _replication controllers_, which contain a point-in-time record of the state of a deployment as a pod template. Similarly, `Deployment` objects involve one or more _replica sets_, a successor of replication controllers. -* One or more pods, which represent an instance of a particular version of an application. - -//// -Update when converted: -[role="_additional-resources"] -.Additional resources - -xref:../../applications/deployments/advanced_deployment_strategies.adoc#graceful-termination[graceful shutdown] -xref:../../applications/basic_deployment_operations.adoc#triggers[Triggers] -xref:../../applications/deployment_strategies.adoc#strategies[strategies] -xref:../../applications/deployment_strategies.adoc#lifecycle-hooks[hooks] -xref:../../applications/basic_deployment_operations.adoc#rolling-back-a-deployment[rollbacks] -xref:../../applications/basic_deployment_operations.adoc#scaling[scaling] -xref:../../dev_guide/pod_autoscaling.adoc#dev-guide-pod-autoscaling[autoscaling] -//// - -[id="what-deployments-are-build-blocks"] -== Building blocks of a deployment - -Deployments and deployment configs are enabled by the use of native Kubernetes API objects `ReplicaSet` and `ReplicationController`, respectively, as their building blocks. - -Users do not have to manipulate replication controllers, replica sets, or pods owned by `DeploymentConfig` objects or deployments. The deployment systems ensure changes are propagated appropriately. - -[TIP] -==== -If the existing deployment strategies are not suited for your use case and you must run manual steps during the lifecycle of your deployment, then you should consider creating a custom deployment strategy. -==== - -The following sections provide further details on these objects. - -include::modules/deployments-replicationcontrollers.adoc[leveloffset=+2] -include::modules/deployments-replicasets.adoc[leveloffset=+2] - -include::modules/deployments-deploymentconfigs.adoc[leveloffset=+1] -include::modules/deployments-kube-deployments.adoc[leveloffset=+1] -include::modules/deployments-comparing-deploymentconfigs.adoc[leveloffset=+1] -//// -Update when converted: -[role="_additional-resources"] -.Additional resources - -- xref:../../dev_guide/managing_images.adoc#dev-guide-managing-images[Imagestreams] -- xref:../../dev_guide/deployments/deployment_strategies.adoc#lifecycle-hooks[Lifecycle hooks] -- xref:../../dev_guide/deployments/deployment_strategies.adoc#custom-strategy[Custom deployment strategies] -//// diff --git a/applications/idling-applications.adoc b/applications/idling-applications.adoc deleted file mode 100644 index 179701df2a45..000000000000 --- a/applications/idling-applications.adoc +++ /dev/null @@ -1,16 +0,0 @@ -:_content-type: ASSEMBLY -[id="idling-applications"] -= Idling applications -include::_attributes/common-attributes.adoc[] -:context: idling-applications - -toc::[] - -Cluster administrators can idle applications to reduce resource consumption. This is useful when the cluster is deployed on a public cloud where cost is related to resource consumption. - -If any scalable resources are not in use, {product-title} discovers and idles them by scaling their replicas to `0`. The next time network traffic is directed to the resources, the resources are unidled by scaling up the replicas, and normal operation continues. - -Applications are made of services, as well as other scalable resources, such as deployment configs. The action of idling an application involves idling all associated resources. - -include::modules/idle-idling-applications.adoc[leveloffset=+1] -include::modules/idle-unidling-applications.adoc[leveloffset=+1] diff --git a/applications/images b/applications/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/applications/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/applications/index.adoc b/applications/index.adoc deleted file mode 100644 index 7f3a761b6feb..000000000000 --- a/applications/index.adoc +++ /dev/null @@ -1,50 +0,0 @@ -:_content-type: ASSEMBLY -[id="building-applications-overview"] -= Building applications overview -include::_attributes/common-attributes.adoc[] -:context: building-applications-overview - -toc::[] - -Using {product-title}, you can create, edit, delete, and manage applications using the web console or command line interface (CLI). - -[id="working-on-a-project"] -== Working on a project - -Using projects, you can organize and manage applications in isolation. You can manage the entire project lifecycle, including xref:../applications/projects/working-with-projects.adoc#working-with-projects[creating, viewing, and deleting a project] in {product-title}. - -After you create the project, you can xref:../applications/projects/working-with-projects.adoc#odc-providing-project-permissions-using-developer-perspective_projects[grant or revoke access to a project] and xref:../applications/projects/working-with-projects.adoc#odc-customizing-available-cluster-roles-using-developer-perspective_projects[manage cluster roles] for the users using the Developer perspective. You can also xref:../applications/projects/configuring-project-creation.adoc#configuring-project-creation[edit the project configuration resource] while creating a project template that is used for automatic provisioning of new projects. - -Using the CLI, you can xref:../applications/projects/creating-project-other-user.adoc#creating-project-other-user[create a project as a different user] by impersonating a request to the {product-title} API. When you make a request to create a new project, the {product-title} uses an endpoint to provision the project according to a customizable template. As a cluster administrator, you can choose to xref:../applications/projects/configuring-project-creation.adoc#disabling-project-self-provisioning_configuring-project-creation[prevent an authenticated user group from self-provisioning new projects]. - -[id="working-on-application"] -== Working on an application - -[id="creating-application"] -=== Creating an application - -To create applications, you must have created a project or have access to a project with the appropriate roles and permissions. You can create an application by using either xref:../applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc#odc-creating-applications-using-developer-perspective[the Developer perspective in the web console], xref:../applications/creating_applications/creating-apps-from-installed-operators.adoc#creating-apps-from-installed-operators[installed Operators], or xref:../applications/creating_applications/creating-applications-using-cli.adoc#creating-applications-using-cli[the {product-title} CLI]. You can source the applications to be added to the project from Git, JAR files, devfiles, or the developer catalog. - -You can also use components that include source or binary code, images, and templates to create an application by using the {product-title} CLI. With the {product-title} web console, you can create an application from an Operator installed by a cluster administrator. - -[id="maintaining-application"] -=== Maintaining an application - -After you create the application you can use the web console to xref:../applications/odc-monitoring-project-and-application-metrics-using-developer-perspective.adoc#odc-monitoring-project-and-application-metrics-using-developer-perspective[monitor your project or application metrics]. You can also xref:../applications/odc-editing-applications.adoc#odc-editing-applications[edit] or xref:../applications/odc-deleting-applications.adoc#odc-deleting-applications[delete] the application using the web console. -When the application is running, not all applications resources are used. As a cluster administrator, you can choose to xref:../applications/idling-applications.adoc#idling-applications[idle these scalable resources] to reduce resource consumption. - -[id="connecting-application"] -=== Connecting an application to services - -An application uses backing services to build and connect workloads, which vary according to the service provider. Using the xref:../applications/connecting_applications_to_services/understanding-service-binding-operator.adoc#understanding-service-binding-operator[Service Binding Operator], as a developer, you can bind workloads together with Operator-managed backing services, without any manual procedures to configure the binding connection. You can apply service binding also on xref:../applications/connecting_applications_to_services/getting-started-with-service-binding-ibm-power-ibm-z.adoc#getting-started-with-service-binding-ibm-power-ibm-z[{ibmpowerProductName}, {ibmzProductName}, and {linuxoneProductName} environments]. - -[id="deploying-application"] -=== Deploying an application -You can deploy your application using xref:../applications/deployments/what-deployments-are.adoc#what-deployments-are[`Deployment` or `DeploymentConfig`] objects and xref:../applications/deployments/managing-deployment-processes.adoc#deployment-operations[manage] them from the web console. You can create xref:../applications/deployments/deployment-strategies.adoc#deployment-strategies[deployment strategies] that help reduce downtime during a change or an upgrade to the application. - -You can also use xref:../applications/working_with_helm_charts/understanding-helm.adoc#understanding-helm[Helm], a software package manager that simplifies deployment of applications and services to {product-title} clusters. - -[id="redhat-marketplace"] -== Using the Red Hat Marketplace - -The xref:../applications/red-hat-marketplace.adoc#red-hat-marketplace[Red Hat Marketplace] is an open cloud marketplace where you can discover and access certified software for container-based environments that run on public clouds and on-premises. \ No newline at end of file diff --git a/applications/modules b/applications/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/applications/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/applications/odc-deleting-applications.adoc b/applications/odc-deleting-applications.adoc deleted file mode 100644 index 6082134feb51..000000000000 --- a/applications/odc-deleting-applications.adoc +++ /dev/null @@ -1,11 +0,0 @@ -:_content-type: ASSEMBLY -[id="odc-deleting-applications"] -= Deleting applications -include::_attributes/common-attributes.adoc[] -:context: odc-deleting-applications - -toc::[] - -You can delete applications created in your project. - -include::modules/odc-deleting-applications-using-developer-perspective.adoc[leveloffset=+1] diff --git a/applications/odc-editing-applications.adoc b/applications/odc-editing-applications.adoc deleted file mode 100644 index 6c27e73f4e89..000000000000 --- a/applications/odc-editing-applications.adoc +++ /dev/null @@ -1,19 +0,0 @@ -:_content-type: ASSEMBLY -[id="odc-editing-applications"] -= Editing applications -include::_attributes/common-attributes.adoc[] -:context: odc-editing-applications - -toc::[] - -You can edit the configuration and the source code of the application you create using the *Topology* view. - -== Prerequisites - -* You have the appropriate xref:../authentication/using-rbac.adoc#default-roles_using-rbac[roles and permissions] in a project to create and modify applications in {product-title}. -* You have xref:../applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc#odc-creating-applications-using-developer-perspective[created and deployed an application on {product-title} using the *Developer* perspective]. -* You have xref:../web_console/web-console.adoc#web-console[logged in to the web console] and have switched to xref:../web_console/web-console-overview.adoc#about-developer-perspective_web-console-overview[the *Developer* perspective]. - -include::modules/odc-editing-source-code-using-developer-perspective.adoc[leveloffset=+1] - -include::modules/odc-editing-application-configuration-using-developer-perspective.adoc[leveloffset=+1] diff --git a/applications/odc-exporting-applications.adoc b/applications/odc-exporting-applications.adoc deleted file mode 100644 index ebef502465a4..000000000000 --- a/applications/odc-exporting-applications.adoc +++ /dev/null @@ -1,40 +0,0 @@ -:_content-type: ASSEMBLY -[id="odc-exporting-applications"] -= Exporting applications -include::_attributes/common-attributes.adoc[] -:context: odc-exporting-applications - -toc::[] - -As a developer, you can export your application in the ZIP file format. Based on your needs, import the exported application to another project in the same cluster or a different cluster by using the *Import YAML* option in the *+Add* view. Exporting your application helps you to reuse your application resources and saves your time. - -[id="prerequisites_odc-exporting-applications"] -== Prerequisites - -* You have installed the gitops-primer Operator from the OperatorHub. -+ -[NOTE] -==== -The *Export application* option is disabled in the *Topology* view even after installing the gitops-primer Operator. -==== - -* You have created an application in the *Topology* view to enable *Export application*. - -[id="odc-exporting-applications-procedure"] -== Procedure - -. In the developer perspective, perform one of the following steps: -.. Navigate to the *+Add* view and click *Export application* in the *Application portability* tile. -.. Navigate to the *Topology* view and click *Export application*. - -. Click *OK* in the *Export Application* dialog box. A notification opens to confirm that the export of resources from your project has started. - -. Optional steps that you might need to perform in the following scenarios: -+ -* If you have started exporting an incorrect application, click *Export application* -> *Cancel Export*. -* If your export is already in progress and you want to start a fresh export, click *Export application* -> *Restart Export*. -* If you want to view logs associated with exporting an application, click *Export application* and the *View Logs* link. -+ -image::export-application-dialog-box.png[] - -. After a successful export, click *Download* in the dialog box to download application resources in ZIP format onto your machine. diff --git a/applications/odc-monitoring-project-and-application-metrics-using-developer-perspective.adoc b/applications/odc-monitoring-project-and-application-metrics-using-developer-perspective.adoc deleted file mode 100644 index b70a8da2de8b..000000000000 --- a/applications/odc-monitoring-project-and-application-metrics-using-developer-perspective.adoc +++ /dev/null @@ -1,29 +0,0 @@ -:_content-type: ASSEMBLY -[id="odc-monitoring-project-and-application-metrics-using-developer-perspective"] -= Monitoring project and application metrics using the Developer perspective -include::_attributes/common-attributes.adoc[] -:context: monitoring-project-and-application-metrics-using-developer-perspective - -toc::[] - - -The *Observe* view in the *Developer* perspective provides options to monitor your project or application metrics, such as CPU, memory, and bandwidth usage, and network related information. - -[id="prerequisites_odc-monitoring-project-and-application-metrics-using-developer-perspective"] -== Prerequisites - -* You have xref:../applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc#odc-creating-applications-using-developer-perspective[created and deployed applications on {product-title}]. -* You have xref:../web_console/web-console.adoc#web-console[logged in to the web console] and have switched to xref:../web_console/web-console-overview.adoc#about-developer-perspective_web-console-overview[the *Developer* perspective]. - -include::modules/odc-monitoring-your-project-metrics.adoc[leveloffset=+1] - -include::modules/odc-monitoring-your-application-metrics.adoc[leveloffset=+1] - -include::modules/odc-image-vulnerabilities-breakdown.adoc[leveloffset=+1] - -include::modules/odc-monitoring-your-app-vulnerabilities.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources-odc-monitoring-project-and-application-metrics-using-developer-perspective"] -== Additional resources -* xref:../monitoring/monitoring-overview.adoc#monitoring-overview[Monitoring overview] diff --git a/applications/odc-viewing-application-composition-using-topology-view.adoc b/applications/odc-viewing-application-composition-using-topology-view.adoc deleted file mode 100644 index 576a2d56f718..000000000000 --- a/applications/odc-viewing-application-composition-using-topology-view.adoc +++ /dev/null @@ -1,40 +0,0 @@ -:_content-type: ASSEMBLY -[id="odc-viewing-application-composition-using-topology-view"] -= Viewing application composition using the Topology view -include::_attributes/common-attributes.adoc[] -:context: viewing-application-composition-using-topology-view - -toc::[] - -The *Topology* view in the *Developer* perspective of the web console provides a visual representation of all the applications within a project, their build status, and the components and services associated with them. - -== Prerequisites -To view your applications in the *Topology* view and interact with them, ensure that: - -* You have xref:../web_console/web-console.adoc#web-console[logged in to the web console]. -* You have the appropriate xref:../authentication/using-rbac.adoc#default-roles_using-rbac[roles and permissions] in a project to create applications and other workloads in {product-title}. -* You have xref:../applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc#odc-creating-applications-using-developer-perspective[created and deployed an application on {product-title} using the *Developer* perspective]. -* You are in xref:../web_console/web-console-overview.adoc#about-developer-perspective_web-console-overview[the *Developer* perspective]. - -include::modules/odc-viewing-application-topology.adoc[leveloffset=+1] - -include::modules/odc-interacting-with-applications-and-components.adoc[leveloffset=+1] - -include::modules/odc-scaling-application-pods-and-checking-builds-and-routes.adoc[leveloffset=+1] - -include::modules/odc-adding-components-to-an-existing-project.adoc[leveloffset=+1] - -include::modules/odc-grouping-multiple-components.adoc[leveloffset=+1] - -include::modules/odc-adding-services-to-application.adoc[leveloffset=+1] - -include::modules/odc-removing-services-from-application.adoc[leveloffset=+1] - -include::modules/odc-labels-and-annotations-used-for-topology-view.adoc[leveloffset=+1] - -[role="_additional-resources"] -== Additional resources - -* See xref:../applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc#odc-importing-codebase-from-git-to-create-application_odc-creating-applications-using-developer-perspective[Importing a codebase from Git to create an application] for more information on creating an application from Git. -* See xref:../applications/connecting_applications_to_services/odc-connecting-an-application-to-a-service-using-the-developer-perspective.adoc#odc-connecting-an-application-to-a-service-using-the-developer-perspective[Connecting an application to a service using the Developer perspective]. -* See xref:../applications/odc-exporting-applications.adoc#odc-exporting-applications[Exporting applications] diff --git a/applications/projects/_attributes b/applications/projects/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/applications/projects/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/applications/projects/configuring-project-creation.adoc b/applications/projects/configuring-project-creation.adoc deleted file mode 100644 index 1d9aa09de82f..000000000000 --- a/applications/projects/configuring-project-creation.adoc +++ /dev/null @@ -1,21 +0,0 @@ -:_content-type: ASSEMBLY -[id="configuring-project-creation"] -= Configuring project creation -include::_attributes/common-attributes.adoc[] -:context: configuring-project-creation - -toc::[] - -In {product-title}, _projects_ are used to group and isolate related objects. -When a request is made to create a new project using the web console or `oc -new-project` command, an endpoint in {product-title} is used to provision the -project according to a template, which can be customized. - -As -a cluster administrator, you can allow and configure how developers and service -accounts can create, or _self-provision_, their own projects. - -include::modules/about-project-creation.adoc[leveloffset=+1] -include::modules/modifying-template-for-new-projects.adoc[leveloffset=+1] -include::modules/disabling-project-self-provisioning.adoc[leveloffset=+1] -include::modules/customizing-project-request-message.adoc[leveloffset=+1] diff --git a/applications/projects/creating-project-other-user.adoc b/applications/projects/creating-project-other-user.adoc deleted file mode 100644 index 49c9844f7e0a..000000000000 --- a/applications/projects/creating-project-other-user.adoc +++ /dev/null @@ -1,13 +0,0 @@ -:_content-type: ASSEMBLY -[id="creating-project-other-user"] -= Creating a project as another user -include::_attributes/common-attributes.adoc[] -:context: creating-project-other-user - -toc::[] - -Impersonation allows you to create a project as a different user. - -include::modules/authentication-api-impersonation.adoc[leveloffset=+1] - -include::modules/impersonation-project-creation.adoc[leveloffset=+1] diff --git a/applications/projects/images b/applications/projects/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/applications/projects/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/applications/projects/modules b/applications/projects/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/applications/projects/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/applications/projects/snippets b/applications/projects/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/applications/projects/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/applications/projects/working-with-projects.adoc b/applications/projects/working-with-projects.adoc deleted file mode 100644 index a155c0139e4a..000000000000 --- a/applications/projects/working-with-projects.adoc +++ /dev/null @@ -1,44 +0,0 @@ -:_content-type: ASSEMBLY -[id="working-with-projects"] -= Working with projects -include::_attributes/common-attributes.adoc[] -:context: projects - -toc::[] - -A _project_ allows a community of users to organize and manage their content in -isolation from other communities. - -[NOTE] -==== -Projects starting with `openshift-` and `kube-` are xref:../../authentication/using-rbac.adoc#rbac-default-projects_using-rbac[default projects]. These projects host cluster components that run as pods and other infrastructure components. As such, {product-title} does not allow you to create projects starting with `openshift-` or `kube-` using the `oc new-project` command. Cluster administrators can create these projects using the `oc adm new-project` command. -==== - -[NOTE] -==== -You cannot assign an SCC to pods created in one of the default namespaces: `default`, `kube-system`, `kube-public`, `openshift-node`, `openshift-infra`, and `openshift`. You cannot use these namespaces for running pods or services. -==== - -include::modules/creating-a-project-using-the-web-console.adoc[leveloffset=+1] - -include::modules/odc-creating-projects-using-developer-perspective.adoc[leveloffset=+1] - -include::modules/creating-a-project-using-the-CLI.adoc[leveloffset=+1] - -include::modules/viewing-a-project-using-the-web-console.adoc[leveloffset=+1] - -include::modules/viewing-a-project-using-the-CLI.adoc[leveloffset=+1] - -include::modules/odc-providing-project-permissions-using-developer-perspective.adoc[leveloffset=+1] - -include::modules/odc-customizing-available-cluster-roles-using-developer-perspective.adoc[leveloffset=+1] - -include::modules/adding-to-a-project.adoc[leveloffset=+1] - -include::modules/checking-project-status-using-the-web-console.adoc[leveloffset=+1] - -include::modules/checking-project-status-using-the-CLI.adoc[leveloffset=+1] - -include::modules/deleting-a-project-using-the-web-console.adoc[leveloffset=+1] - -include::modules/deleting-a-project-using-the-CLI.adoc[leveloffset=+1] diff --git a/applications/pruning-objects.adoc b/applications/pruning-objects.adoc deleted file mode 100644 index 0b42fd73b34e..000000000000 --- a/applications/pruning-objects.adoc +++ /dev/null @@ -1,46 +0,0 @@ -:_content-type: ASSEMBLY -[id="pruning-objects"] -= Pruning objects to reclaim resources -include::_attributes/common-attributes.adoc[] -:context: pruning-objects - -toc::[] - -Over time, API objects created in {product-title} can accumulate in the -cluster's etcd data store through normal user operations, such as when building -and deploying applications. - -Cluster administrators can periodically prune older versions of objects from the -cluster that are no longer required. For example, by pruning images you can delete -older images and layers that are no longer in use, but are still taking up disk -space. - -include::modules/pruning-basic-operations.adoc[leveloffset=+1] -include::modules/pruning-groups.adoc[leveloffset=+1] -include::modules/pruning-deployments.adoc[leveloffset=+1] -include::modules/pruning-builds.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../cicd/builds/advanced-build-operations.adoc#builds-build-pruning-advanced-build-operations[Performing advanced builds -> Pruning builds] - -include::modules/pruning-images.adoc[leveloffset=+1] -include::modules/pruning-images-manual.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../registry/accessing-the-registry.adoc#accessing-the-registry[Accessing the registry] -* xref:../registry/securing-exposing-registry.adoc#securing-exposing-registry[Exposing the registry] -* See -xref:../registry/configuring-registry-operator.adoc#configuring-registry-operator[Image -Registry Operator in {product-title}] for information on how to create a -registry route. - -include::modules/pruning-hard-pruning-registry.adoc[leveloffset=+1] -include::modules/pruning-cronjobs.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../nodes/jobs/nodes-nodes-jobs.adoc#nodes-nodes-jobs_nodes-nodes-jobs[Running tasks in pods using jobs] -* xref:../applications/quotas/quotas-setting-across-multiple-projects.adoc#setting-quotas-across-multiple-projects[Resource quotas across multiple projects] -* xref:../authentication/using-rbac.adoc#using-rbac[Using RBAC to define and apply permissions] diff --git a/applications/quotas/_attributes b/applications/quotas/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/applications/quotas/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/applications/quotas/images b/applications/quotas/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/applications/quotas/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/applications/quotas/modules b/applications/quotas/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/applications/quotas/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/applications/quotas/quotas-setting-across-multiple-projects.adoc b/applications/quotas/quotas-setting-across-multiple-projects.adoc deleted file mode 100644 index 471a0343a6ec..000000000000 --- a/applications/quotas/quotas-setting-across-multiple-projects.adoc +++ /dev/null @@ -1,15 +0,0 @@ -:_content-type: ASSEMBLY -[id="setting-quotas-across-multiple-projects"] -= Resource quotas across multiple projects -include::_attributes/common-attributes.adoc[] -:context: setting-quotas-across-multiple-projects - -toc::[] - -A multi-project quota, defined by a `ClusterResourceQuota` object, allows quotas to be shared across multiple projects. Resources used in each selected project are aggregated and that aggregate is used to limit resources across all the selected projects. - -This guide describes how cluster administrators can set and manage resource quotas across multiple projects. - -include::modules/quotas-selecting-projects.adoc[leveloffset=+1] -include::modules/quotas-viewing-clusterresourcequotas.adoc[leveloffset=+1] -include::modules/quotas-selection-granularity.adoc[leveloffset=+1] diff --git a/applications/quotas/quotas-setting-per-project.adoc b/applications/quotas/quotas-setting-per-project.adoc deleted file mode 100644 index daf75d747225..000000000000 --- a/applications/quotas/quotas-setting-per-project.adoc +++ /dev/null @@ -1,22 +0,0 @@ -:_content-type: ASSEMBLY -[id="quotas-setting-per-project"] -= Resource quotas per project -include::_attributes/common-attributes.adoc[] -:context: quotas-setting-per-project - -toc::[] - -A _resource quota_, defined by a `ResourceQuota` object, provides constraints that limit aggregate resource consumption per project. It can limit the quantity of objects that can be created in a project by type, as well as the total amount of compute resources and storage that might be consumed by resources in that project. - -This guide describes how resource quotas work, how cluster administrators can set and manage resource quotas on a per project basis, and how developers and cluster administrators can view them. - -include::modules/quotas-resources-managed.adoc[leveloffset=+1] -include::modules/quotas-scopes.adoc[leveloffset=+1] -include::modules/quotas-enforcement.adoc[leveloffset=+1] -include::modules/quotas-requests-vs-limits.adoc[leveloffset=+1] -include::modules/quotas-sample-resource-quotas-def.adoc[leveloffset=+1] -include::modules/quotas-creating-a-quota.adoc[leveloffset=+1] -include::modules/quotas-creating-object-count-quotas.adoc[leveloffset=+2] -include::modules/setting-resource-quota-for-extended-resources.adoc[leveloffset=+2] -include::modules/quotas-viewing-quotas.adoc[leveloffset=+1] -include::modules/quotas-requiring-explicit-quota.adoc[leveloffset=+1] diff --git a/applications/quotas/snippets b/applications/quotas/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/applications/quotas/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/applications/red-hat-marketplace.adoc b/applications/red-hat-marketplace.adoc deleted file mode 100644 index d875e3eeaac7..000000000000 --- a/applications/red-hat-marketplace.adoc +++ /dev/null @@ -1,11 +0,0 @@ -:_content-type: ASSEMBLY -[id="red-hat-marketplace"] -= Using the Red Hat Marketplace -include::_attributes/common-attributes.adoc[] -:context: red-hat-marketplace - -toc::[] - -The link:https://marketplace.redhat.com[Red Hat Marketplace] is an open cloud marketplace that makes it easy to discover and access certified software for container-based environments that run on public clouds and on-premises. - -include::modules/red-hat-marketplace-features.adoc[leveloffset=+1] diff --git a/applications/snippets b/applications/snippets deleted file mode 120000 index 9f5bc7e4dde0..000000000000 --- a/applications/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets \ No newline at end of file diff --git a/applications/working-with-quotas.adoc b/applications/working-with-quotas.adoc deleted file mode 100644 index dfa5dfd6b866..000000000000 --- a/applications/working-with-quotas.adoc +++ /dev/null @@ -1,26 +0,0 @@ -:_content-type: ASSEMBLY -[id="working-with-quotas"] -= Working with quotas -include::_attributes/common-attributes.adoc[] -:context: working-with-quotas - -toc::[] - -A _resource quota_, defined by a ResourceQuota object, provides constraints that -limit aggregate resource consumption per project. It can limit the quantity of -objects that can be created in a project by type, as well as the total amount of -compute resources and storage that may be consumed by resources in that project. - -An _object quota count_ places a defined quota on all standard namespaced resource -types. When using a resource quota, an object is charged against the quota if it -exists in server storage. These types of quotas are useful to protect against -exhaustion of storage resources. - -This guide describes how resource quotas work and how developers can work with -and view them. - -include::modules/quotas-viewing-quotas.adoc[leveloffset=+1] -include::modules/quotas-resources-managed.adoc[leveloffset=+1] -include::modules/quotas-scopes.adoc[leveloffset=+1] -include::modules/quotas-enforcement.adoc[leveloffset=+1] -include::modules/quotas-requests-vs-limits.adoc[leveloffset=+1] diff --git a/applications/working_with_helm_charts/_attributes b/applications/working_with_helm_charts/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/applications/working_with_helm_charts/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/applications/working_with_helm_charts/configuring-custom-helm-chart-repositories.adoc b/applications/working_with_helm_charts/configuring-custom-helm-chart-repositories.adoc deleted file mode 100644 index 933193d52908..000000000000 --- a/applications/working_with_helm_charts/configuring-custom-helm-chart-repositories.adoc +++ /dev/null @@ -1,47 +0,0 @@ -:_content-type: ASSEMBLY -[id="configuring-custom-helm-chart-repositories"] -= Configuring custom Helm chart repositories -include::_attributes/common-attributes.adoc[] -:context: configuring-custom-helm-chart-repositories - -toc::[] - -[role="_abstract"] -You can create Helm releases on an {product-title} cluster using the following methods: - -* The CLI. -* The *Developer* perspective of the web console. - -The *Developer Catalog*, in the *Developer* perspective of the web console, displays the Helm charts available in the cluster. By default, it lists the Helm charts from the Red Hat OpenShift Helm chart repository. For a list of the charts, see link:https://charts.openshift.io/index.yaml[the Red Hat `Helm index` file]. - -As a cluster administrator, you can add multiple cluster-scoped and namespace-scoped Helm chart repositories, separate from the default cluster-scoped Helm repository, and display the Helm charts from these repositories in the *Developer Catalog*. - -As a regular user or project member with the appropriate role-based access control (RBAC) permissions, you can add multiple namespace-scoped Helm chart repositories, apart from the default cluster-scoped Helm repository, and display the Helm charts from these repositories in the *Developer Catalog*. - -In the *Developer* perspective of the web console, you can use the *Helm* page to: - -* Create Helm Releases and Repositories using the *Create* button. - -* Create, update, or delete a cluster-scoped or namespace-scoped Helm chart repository. - -* View the list of the existing Helm chart repositories in the Repositories tab, which can also be easily distinguished as either cluster scoped or namespace scoped. - -include::modules/helm-installing-a-helm-chart-on-an-openshift-cluster.adoc[leveloffset=+1] - -include::modules/odc-creating-helm-releases-using-developer-perspective.adoc[leveloffset=+1] - -== Using Helm in the web terminal - -You can use Helm by xref:../../web_console/web_terminal/odc-using-web-terminal.adoc#odc-access-web-terminal_odc-using-web-terminal[Accessing the web terminal] in the *Developer* perspective of the web console. - -include::modules/helm-creating-a-custom-helm-chart-on-openshift.adoc[leveloffset=+1] - -include::modules/helm-adding-helm-chart-repositories.adoc[leveloffset=+1] - -include::modules/helm-adding-namespace-scoped-helm-chart-repositories.adoc[leveloffset=+1] - -include::modules/helm-creating-credentials-and-certificates-to-add-helm-repositories.adoc[leveloffset=+1] - -include::modules/helm-filtering-helm-charts-by-certification-level.adoc[leveloffset=+1] - -include::modules/helm-disabling-helm-chart-repositories.adoc[leveloffset=+1] diff --git a/applications/working_with_helm_charts/images b/applications/working_with_helm_charts/images deleted file mode 120000 index 5fa6987088da..000000000000 --- a/applications/working_with_helm_charts/images +++ /dev/null @@ -1 +0,0 @@ -../../images \ No newline at end of file diff --git a/applications/working_with_helm_charts/installing-helm.adoc b/applications/working_with_helm_charts/installing-helm.adoc deleted file mode 100644 index 59a50498563a..000000000000 --- a/applications/working_with_helm_charts/installing-helm.adoc +++ /dev/null @@ -1,106 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-helm"] -= Installing Helm -include::_attributes/common-attributes.adoc[] -:context: installing-helm - -toc::[] - -The following section describes how to install Helm on different platforms using the CLI. - -You can also find the URL to the latest binaries from the {product-title} web console by clicking the *?* icon in the upper-right corner and selecting *Command Line Tools*. - -.Prerequisites -* You have installed Go, version 1.13 or higher. - -== On Linux - -. Download the Helm binary and add it to your path: - -* Linux (x86_64, amd64) -+ -[source,terminal] ----- -# curl -L https://mirror.openshift.com/pub/openshift-v4/clients/helm/latest/helm-linux-amd64 -o /usr/local/bin/helm ----- - -* Linux on {ibmzProductName} and {linuxoneProductName} (s390x) -+ -[source,terminal] ----- -# curl -L https://mirror.openshift.com/pub/openshift-v4/clients/helm/latest/helm-linux-s390x -o /usr/local/bin/helm ----- - -* Linux on {ibmpowerProductName} (ppc64le) -+ -[source,terminal] ----- -# curl -L https://mirror.openshift.com/pub/openshift-v4/clients/helm/latest/helm-linux-ppc64le -o /usr/local/bin/helm ----- - -. Make the binary file executable: -+ -[source,terminal] ----- -# chmod +x /usr/local/bin/helm ----- - -. Check the installed version: -+ -[source,terminal] ----- -$ helm version ----- -+ -.Example output -[source,terminal] ----- -version.BuildInfo{Version:"v3.0", GitCommit:"b31719aab7963acf4887a1c1e6d5e53378e34d93", GitTreeState:"clean", GoVersion:"go1.13.4"} ----- - -== On Windows 7/8 - -. Download the latest link:https://mirror.openshift.com/pub/openshift-v4/clients/helm/latest/helm-windows-amd64.exe[`.exe` file] and put in a directory of your preference. -. Right click *Start* and click *Control Panel*. -. Select *System and Security* and then click *System*. -. From the menu on the left, select *Advanced systems settings* and click *Environment Variables* at the bottom. -. Select *Path* from the *Variable* section and click *Edit*. -. Click *New* and type the path to the folder with the `.exe` file into the field or click *Browse* and select the directory, and click *OK*. - -== On Windows 10 - -. Download the latest link:https://mirror.openshift.com/pub/openshift-v4/clients/helm/latest/helm-windows-amd64.exe[`.exe` file] and put in a directory of your preference. -. Click *Search* and type `env` or `environment`. -. Select *Edit environment variables for your account*. -. Select *Path* from the *Variable* section and click *Edit*. -. Click *New* and type the path to the directory with the exe file into the field or click *Browse* and select the directory, and click *OK*. - - -== On MacOS -. Download the Helm binary and add it to your path: -+ -[source,terminal] ----- -# curl -L https://mirror.openshift.com/pub/openshift-v4/clients/helm/latest/helm-darwin-amd64 -o /usr/local/bin/helm ----- - - -. Make the binary file executable: -+ -[source,terminal] ----- -# chmod +x /usr/local/bin/helm ----- - -. Check the installed version: -+ -[source,terminal] ----- -$ helm version ----- -+ -.Example output -[source,terminal] ----- -version.BuildInfo{Version:"v3.0", GitCommit:"b31719aab7963acf4887a1c1e6d5e53378e34d93", GitTreeState:"clean", GoVersion:"go1.13.4"} ----- diff --git a/applications/working_with_helm_charts/modules b/applications/working_with_helm_charts/modules deleted file mode 120000 index 8b0e8540076d..000000000000 --- a/applications/working_with_helm_charts/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules \ No newline at end of file diff --git a/applications/working_with_helm_charts/odc-working-with-helm-releases.adoc b/applications/working_with_helm_charts/odc-working-with-helm-releases.adoc deleted file mode 100644 index cfecb6b7e9b3..000000000000 --- a/applications/working_with_helm_charts/odc-working-with-helm-releases.adoc +++ /dev/null @@ -1,20 +0,0 @@ -:_content-type: ASSEMBLY -[id="odc-working-with-helm-releases"] -= Working with Helm releases -include::_attributes/common-attributes.adoc[] -:context: working-with-helm-releases - -toc::[] - -You can use the *Developer* perspective in the web console to update, rollback, or delete a Helm release. - -== Prerequisites - -* You have logged in to the web console and have switched to xref:../../web_console/web-console-overview.adoc#about-developer-perspective_web-console-overview[the *Developer* perspective]. - - -include::modules/odc-upgrading-helm-release.adoc[leveloffset=+1] - -include::modules/odc-rolling-back-helm-release.adoc[leveloffset=+1] - -include::modules/odc-deleting-helm-release.adoc[leveloffset=+1] diff --git a/applications/working_with_helm_charts/snippets b/applications/working_with_helm_charts/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/applications/working_with_helm_charts/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/applications/working_with_helm_charts/understanding-helm.adoc b/applications/working_with_helm_charts/understanding-helm.adoc deleted file mode 100644 index 83aead71501f..000000000000 --- a/applications/working_with_helm_charts/understanding-helm.adoc +++ /dev/null @@ -1,43 +0,0 @@ -:_content-type: ASSEMBLY -[id="understanding-helm"] -= Understanding Helm -include::_attributes/common-attributes.adoc[] -:context: understanding-helm - -toc::[] - -[role="_abstract"] -Helm is a software package manager that simplifies deployment of applications and services to {product-title} clusters. - -Helm uses a packaging format called _charts_. -A Helm chart is a collection of files that describes the {product-title} resources. - -Creating a chart in a cluster creates a running instance of the chart known as a _release_. - -Each time a chart is created, or a release is upgraded or rolled back, an incremental revision is created. - - -== Key features - -Helm provides the ability to: - -* Search through a large collection of charts stored in the chart repository. -* Modify existing charts. -* Create your own charts with {product-title} or Kubernetes resources. -* Package and share your applications as charts. - -//[NOTE] -//==== -// In {product-title} 4.10 and 4.11, Helm is disabled for the xref:../../web_console/web-console.adoc#multi-cluster-about_web-console[Multicluster Console] (Technology Preview). -//==== - -== Red Hat Certification of Helm charts for OpenShift - -You can choose to verify and certify your Helm charts by Red Hat for all the components you will be deploying on the Red Hat {product-title}. Charts go through an automated Red Hat OpenShift certification workflow that guarantees security compliance as well as best integration and experience with the platform. Certification assures the integrity of the chart and ensures that the Helm chart works seamlessly on Red Hat OpenShift clusters. - -[role="_additional-resources"] -== Additional resources -* For more information on how to certify your Helm charts as a Red Hat partner, see link:https://redhat-connect.gitbook.io/partner-guide-for-red-hat-openshift-and-container/helm-chart-certification/overview[Red Hat Certification of Helm charts for OpenShift]. -* For more information on OpenShift and Container certification guides for Red Hat partners, see link:https://access.redhat.com/documentation/en-us/red_hat_software_certification/8.51/html-single/red_hat_software_certification_workflow_guide/index#con_container-certification_openshift-sw-cert-workflow-introduction-to-redhat-openshift-operator-certification[Partner Guide for OpenShift and Container Certification]. -* For a list of the charts, see link:https://charts.openshift.io/index.yaml[the Red Hat `Helm index` file]. -* You can view the available charts at the link:https://marketplace.redhat.com/en-us/documentation/access-red-hat-marketplace[Red Hat Marketplace]. For more information, see xref:../../applications/red-hat-marketplace.adoc#red-hat-marketplace[Using the Red Hat Marketplace]. diff --git a/architecture/_attributes b/architecture/_attributes deleted file mode 120000 index f27fd275ea6b..000000000000 --- a/architecture/_attributes +++ /dev/null @@ -1 +0,0 @@ -../_attributes/ \ No newline at end of file diff --git a/architecture/admission-plug-ins.adoc b/architecture/admission-plug-ins.adoc deleted file mode 100644 index c20f406a8c97..000000000000 --- a/architecture/admission-plug-ins.adoc +++ /dev/null @@ -1,31 +0,0 @@ -:_content-type: ASSEMBLY -[id="admission-plug-ins"] -= Admission plugins -include::_attributes/common-attributes.adoc[] -:context: admission-plug-ins - -toc::[] - -// Concept modules -include::modules/admission-plug-ins-about.adoc[leveloffset=+1] - -include::modules/admission-plug-ins-default.adoc[leveloffset=+1] - -include::modules/admission-webhooks-about.adoc[leveloffset=+1] - -include::modules/admission-webhook-types.adoc[leveloffset=+1] - -// Procedure module -include::modules/configuring-dynamic-admission.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="admission-plug-ins-additional-resources"] -== Additional resources - -ifdef::openshift-enterprise,openshift-webscale[] -* xref:../networking/hardware_networks/configuring-sriov-operator.adoc#configuring-sriov-operator[Limiting custom network resources managed by the SR-IOV network device plugin] -endif::[] - -* xref:../nodes/scheduling/nodes-scheduler-taints-tolerations.adoc#nodes-scheduler-taints-tolerations_dedicating_nodes-scheduler-taints-tolerations[Defining tolerations that enable taints to qualify which pods should be scheduled on a node] - -* xref:../nodes/pods/nodes-pods-priority.adoc#admin-guide-priority-preemption-names_nodes-pods-priority[Pod priority class validation] diff --git a/architecture/architecture-installation.adoc b/architecture/architecture-installation.adoc deleted file mode 100644 index 39786a08a45d..000000000000 --- a/architecture/architecture-installation.adoc +++ /dev/null @@ -1,32 +0,0 @@ -:_content-type: ASSEMBLY -[id="architecture-installation"] -= Installation and update -include::_attributes/common-attributes.adoc[] -:context: architecture-installation - -toc::[] - -include::modules/installation-overview.adoc[leveloffset=+1] - -include::modules/supported-platforms-for-openshift-clusters.adoc[leveloffset=+2] - -include::modules/installation-process.adoc[leveloffset=+2] - -[discrete] -=== Installation scope - -The scope of the {product-title} installation program is intentionally narrow. It is designed for simplicity and ensured success. You can complete many more configuration tasks after installation completes. - -[role="_additional-resources"] -.Additional resources - -* See xref:../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Available cluster customizations] for details about {product-title} configuration resources. - -include::modules/update-service-overview.adoc[leveloffset=+1] - -include::modules/unmanaged-operators.adoc[leveloffset=+1] - -[id="architecture-installation-next-steps"] -== Next steps - -* xref:../installing/installing-preparing.adoc#installing-preparing[Selecting a cluster installation method and preparing it for users] diff --git a/architecture/architecture-rhcos.adoc b/architecture/architecture-rhcos.adoc deleted file mode 100644 index 50eacc440e5a..000000000000 --- a/architecture/architecture-rhcos.adoc +++ /dev/null @@ -1,11 +0,0 @@ -:_content-type: ASSEMBLY -[id="architecture-rhcos"] -= {op-system-first} -include::_attributes/common-attributes.adoc[] -:context: architecture-rhcos - -toc::[] - -include::modules/rhcos-about.adoc[leveloffset=+1] -include::modules/ignition-config-viewing.adoc[leveloffset=+1] -include::modules/digging-into-machine-config.adoc[leveloffset=+1] diff --git a/architecture/architecture.adoc b/architecture/architecture.adoc deleted file mode 100644 index 9ab4f4fa36cc..000000000000 --- a/architecture/architecture.adoc +++ /dev/null @@ -1,38 +0,0 @@ -:_content-type: ASSEMBLY -[id="architecture"] -= {product-title} architecture -include::_attributes/common-attributes.adoc[] -:context: architecture - -toc::[] - -include::modules/architecture-platform-introduction.adoc[leveloffset=+1] - -include::modules/architecture-kubernetes-introduction.adoc[leveloffset=+2] - -include::modules/architecture-container-application-benefits.adoc[leveloffset=+2] - -include::modules/architecture-platform-benefits.adoc[leveloffset=+2] -//// -== User facing components -* Workloads (Deployments, Jobs, ReplicaSets, etc) -* Operator Lifecycle Manager -* xref:../cicd/builds/understanding-image-builds.adoc[Builds] - The build component -provides an API and infrastructure for producing new container images using a -variety of techniques including industry standard Dockerfiles and publishing -them to either the cluster image registry, or an external registry. It also -provides integration with Jenkins based pipeline continuous integration -workflows. -* xref:../registry/index.adoc[Image Registry] - -The image registry provides a scalable repository for storing and retrieving -container images that are produced by and run on the cluster. Image access is -integrated with the cluster's role-based access controls and user authentication -system. -* xref:../openshift_images/images-understand.adoc[Image -streams] - The imagestream API provides an abstraction over container images -that exist in registries. It allows workloads to reference an image indirectly, -retains a history of the images that have been referenced, and allows -notification when an image is updated with a new version. -//// - -include::modules/cluster-entitlements.adoc[leveloffset=+2] diff --git a/architecture/argocd.adoc b/architecture/argocd.adoc deleted file mode 100644 index a8b4c5d1f258..000000000000 --- a/architecture/argocd.adoc +++ /dev/null @@ -1,25 +0,0 @@ -:_content-type: ASSEMBLY -[id="argocd"] -= Using ArgoCD with {product-title} -include::_attributes/common-attributes.adoc[] - -:context: argocd - -toc::[] - -[id="argocd-what"] -== What does ArgoCD do? - -ArgoCD is a declarative continuous delivery tool that leverages GitOps to maintain cluster resources. ArgoCD is implemented as a controller that continuously monitors application definitions and configurations defined in a Git repository and compares the specified state of those configurations with their live state on the cluster. Configurations that deviate from their specified state in the Git repository are classified as OutOfSync. ArgoCD reports these differences and allows administrators to automatically or manually resync configurations to the defined state. - -ArgoCD enables you to deliver global custom resources, like the resources that are used to configure {product-title} clusters. - -[id="argocd-support"] -== Statement of support - -Red Hat does not provide support for this tool. To obtain support for ArgoCD, see link:https://argoproj.github.io/argo-cd/SUPPORT/[Support] in the ArgoCD documentation. - -[id="argocd-documentation"] -== ArgoCD documentation - -For more information about using ArgoCD, see the link:https://argoproj.github.io/argo-cd/[ArgoCD documentation]. diff --git a/architecture/cicd_gitops.adoc b/architecture/cicd_gitops.adoc deleted file mode 100644 index 09bee1d19c1b..000000000000 --- a/architecture/cicd_gitops.adoc +++ /dev/null @@ -1,60 +0,0 @@ -:_content-type: ASSEMBLY -[id="cicd_gitops"] -= The CI/CD methodology and practice -include::_attributes/common-attributes.adoc[] -:context: cicd_gitops - -toc::[] - -Using a _continuous integration/continuous delivery_ (CI/CD) methodology enables you to regularly deliver applications to customers by introducing automation into the stages of application development, from integration and testing phases to delivery and deployment. The CI/CD process is often referred to as a "CI/CD pipeline." The main concepts attributed to CI/CD are continuous integration, continuous delivery, and continuous deployment. - -[id="cicd_admin"] -== CI/CD for cluster administration and application configuration management - -_Continuous integration_ is an automation process for developers. Code changes to an application are regularly built, tested, and merged to a shared repository. - -_Continuous delivery_ and _continuous deployment_ are closely related concepts that are sometimes used interchangeably and refer to automation of the pipeline. -Continuous delivery uses automation to ensure that a developer's changes to an application are tested and sent to a repository, where an operations team can deploy them to a production environment. Continuous deployment enables the release of changes, starting from the repository and ending in production. Continuous deployment speeds up application delivery and prevents the operations team from getting overloaded. - -[id="cicd_gitops_methodology"] -== The GitOps methodology and practice - -_GitOps_ is a set of practices that use Git pull requests to manage infrastructure and application configurations. The Git repository in GitOps is the only source of truth for system and application configuration. The repository contains the entire state of the system so that the trail of changes to the system state are visible and auditable. GitOps enables you to implement a DevOps methodology. - -You can use GitOps tooling to create repeatable and predictable processes for managing and recreating {product-title} clusters and applications. By using GitOps, you can address the issues of infrastructure and application configuration sprawl. It simplifies the propagation of infrastructure and application configuration changes across multiple clusters by defining your infrastructure and applications definitions as “code.” Implementing GitOps for your cluster configuration files can make automated installation easier and allow you to configure automated cluster customizations. You can apply the core principles of developing and maintaining software in a Git repository to the creation and management of your cluster and application configuration files. - -By using {product-title} to automate both your cluster configuration and container development process, you can pick and choose where and when to adopt GitOps practices. Using a CI pipeline that pairs with your GitOps strategy and execution plan is ideal. {product-title} provides the flexibility to choose when and how you integrate this methodology into your business practices and pipelines. - -With GitOps integration, you can declaratively configure and store your {product-title} cluster configuration - -GitOps works well with {product-title} because you can both declaratively configure clusters and store the state of the cluster configuration in Git. For more information, see xref:../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Available cluster customizations]. - -[id="cicd_gitops_cluster_administration"] -=== GitOps for single-cluster and multi-cluster administration - -Whether you need one or more independent or cooperative {product-title} clusters, you can use a GitOps strategy to manage the following tasks: - -* Ensure that the clusters have similar states for configuration, monitoring, or storage. -* Recover or recreate clusters from a known state. -* Create clusters with a known state. -* Apply or revert configuration changes to multiple {product-title} clusters. -* Associate templated configuration with different environments. - -[id="cicd_gitops_application_configuration"] -=== GitOps for application configuration management - -You can also use GitOps practices to manage application configuration. This practice ensures consistency in applications when you deploy them to different clusters in different environments, like development, stage, and production. Managing application configuration with GitOps is also beneficial when you must deploy applications across multiple clusters, whether on-cloud or on-premise, for availability and scalability purposes. - -You can use a GitOps strategy to: - -* Promote applications across clusters, from stage to production. -* Apply or revert application changes to multiple {product-title} clusters. - -[id="cicd_gitops_integrators"] -=== GitOps technology providers and integrators - -There are several community offerings and third-party vendors that provide a high level of integration with {product-title}. - -You can integrate GitOps into {product-title} with the following community partners and third-party integrators: - -* xref:../architecture/argocd.adoc#argocd[ArgoCD] diff --git a/architecture/control-plane.adoc b/architecture/control-plane.adoc deleted file mode 100644 index ea482bd64c3f..000000000000 --- a/architecture/control-plane.adoc +++ /dev/null @@ -1,71 +0,0 @@ -:_content-type: ASSEMBLY -[id="control-plane"] -= Control plane architecture -include::_attributes/common-attributes.adoc[] -:context: control-plane - -toc::[] - -The _control plane_, which is composed of control plane machines, manages the {product-title} cluster. The control plane machines manage workloads on the compute machines, which are also known as worker machines. The cluster itself manages all upgrades to the machines by the actions of the Cluster Version Operator (CVO), the Machine Config Operator, and a set of individual Operators. - -include::modules/architecture-machine-config-pools.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../post_installation_configuration/machine-configuration-tasks.adoc#machine-config-drift-detection_post-install-machine-configuration-tasks[Understanding configuration drift detection]. - -include::modules/architecture-machine-roles.adoc[leveloffset=+1] - -include::modules/operators-overview.adoc[leveloffset=+1] - -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -include::modules/arch-cluster-operators.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../operators/operator-reference.adoc#cluster-operators-ref[Cluster Operators reference] -endif::[] - -include::modules/arch-olm-operators.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* For more details on running add-on Operators in {product-title}, see the _Operators_ guide sections on xref:../operators/understanding/olm/olm-understanding-olm.adoc#olm-understanding-olm[Operator Lifecycle Manager (OLM)] and xref:../operators/understanding/olm-understanding-operatorhub.adoc#olm-understanding-operatorhub[OperatorHub]. -* For more details on the Operator SDK, see xref:../operators/operator_sdk/osdk-about.adoc#osdk-about[Developing Operators]. - -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -include::modules/arch-platform-operators.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../operators/admin/olm-managing-po.adoc#olm-managing-po[Managing platform Operators] -* xref:../operators/admin/olm-managing-po.adoc#olm-po-techpreview_olm-managing-po[Technology Preview restrictions for platform Operators] -* xref:../operators/understanding/olm-packaging-format.adoc#olm-rukpak-about_olm-packaging-format[RukPak component and packaging format] -* xref:../installing/cluster-capabilities.adoc#cluster-capabilities[Cluster capabilities] -endif::[] - -include::modules/understanding-machine-config-operator.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* For more information about detecting configuration drift, see xref:../post_installation_configuration/machine-configuration-tasks.adoc#machine-config-drift-detection_post-install-machine-configuration-tasks[Understanding configuration drift detection]. - -* For information about preventing the control plane machines from rebooting after the Machine Config Operator makes changes to the machine configuration, see xref:../support/troubleshooting/troubleshooting-operator-issues.adoc#troubleshooting-disabling-autoreboot-mco_troubleshooting-operator-issues[Disabling Machine Config Operator from automatically rebooting]. - -include::modules/etcd-overview.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../scalability_and_performance/recommended-performance-scale-practices/recommended-etcd-practices.adoc#recommended-etcd-practices[Recommended etcd practices] -* xref:../backup_and_restore/control_plane_backup_and_restore/backing-up-etcd.adoc#backing-up-etcd[Backing up etcd] - -include::modules/hosted-control-planes-overview.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.8/html/clusters/cluster_mce_overview#hypershift-addon-intro[HyperShift add-on (Technology Preview)] - -* link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.8/html/clusters/cluster_mce_overview#hosted-control-planes-intro[Hosted control planes (Technology Preview)] - -include::modules/hosted-control-planes-concepts-personas.adoc[leveloffset=+2] -include::modules/hosted-control-planes-version-support.adoc[leveloffset=+2] diff --git a/architecture/images b/architecture/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/architecture/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/architecture/index.adoc b/architecture/index.adoc deleted file mode 100644 index 08892de20d31..000000000000 --- a/architecture/index.adoc +++ /dev/null @@ -1,83 +0,0 @@ -:_content-type: ASSEMBLY -[id="architecture-overview"] -= Architecture overview -include::_attributes/common-attributes.adoc[] -:context: architecture-overview - -toc::[] - -{product-title} is a cloud-based Kubernetes container platform. -The foundation of {product-title} is based on Kubernetes and therefore shares the same technology. -To learn more about {product-title} and Kubernetes, see xref:../architecture/architecture.adoc#architecture[product architecture]. - -include::modules/openshift-architecture-common-terms.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* For more information on networking, see xref:../networking/understanding-networking.adoc#understanding-networking[{product-title} networking]. -* For more information on storage, see xref:../storage/index.adoc#index[{product-title} storage]. -* For more information on authentication, see xref:../authentication/index.adoc#index[{product-title} authentication]. -* For more information on Operator Lifecycle Manager (OLM), see xref:../operators/understanding/olm/olm-understanding-olm.adoc#olm-understanding-olm[OLM]. -* For more information on logging, see xref:../logging/viewing-resource-logs.adoc#viewing-resource-logs[{product-title} Logging]. -* For more information on over-the-air (OTA) updates, see xref:../updating/index.adoc#index[Updating {product-title} clusters]. - -[id="about-installation-and-updates"] -== About installation and updates - -As a cluster administrator, you can use the {product-title} xref:../architecture/architecture-installation.adoc#architecture-installation[installation program] to install and deploy a cluster by using one of the following methods: - -* Installer-provisioned infrastructure -* User-provisioned infrastructure - -[id="about-control-planes"] -== About the control plane - -The xref:../architecture/control-plane.adoc#control-plane[control plane] manages the worker nodes and the pods in your cluster. You can configure nodes with the use of machine config pools (MCPs). -MCPs are groups of machines, such as control plane components or user workloads, that are based on the resources that they handle. -{product-title} assigns different roles to hosts. These roles define the function of a machine in a cluster. -The cluster contains definitions for the standard control plane and worker role types. - -You can use Operators to package, deploy, and manage services on the control plane. -Operators are important components in {product-title} because they provide the following services: - -* Perform health checks -* Provide ways to watch applications -* Manage over-the-air updates -* Ensure applications stay in the specified state - -[id="about-containerized-applications-for-developers"] -== About containerized applications for developers - -As a developer, you can use different tools, methods, and formats to xref:../architecture/understanding-development.adoc#understanding-development[develop your containerized application] based on your unique requirements, for example: - -* Use various build-tool, base-image, and registry options to build a simple container application. -* Use supporting components such as OperatorHub and templates to develop your application. -* Package and deploy your application as an Operator. - -You can also create a Kubernetes manifest and store it in a Git repository. -Kubernetes works on basic units called pods. A pod is a single instance of a running process in your cluster. Pods can contain one or more containers. -You can create a service by grouping a set of pods and their access policies. -Services provide permanent internal IP addresses and host names for other applications to use as pods are created and destroyed. Kubernetes defines workloads based on the type of your application. - -[id="coreos-and-ignition"] -== About {op-system-first} and Ignition - -As a cluster administrator, you can perform the following {op-system-first} tasks: - -** Learn about the next generation of xref:../architecture/architecture-rhcos.adoc#architecture-rhcos[single-purpose container operating system technology]. -** Choose how to configure {op-system-first} -** Choose how to deploy {op-system-first}: -*** Installer-provisioned deployment -*** User-provisioned deployment - -The {product-title} installation program creates the Ignition configuration files that you need to deploy your cluster. -{op-system-first} uses Ignition during the initial configuration to perform common disk tasks, such as partitioning, formatting, writing files, and configuring users. -During the first boot, Ignition reads its configuration from the installation media or the location that you specify and applies the configuration to the machines. - -You can learn how xref:../architecture/architecture-rhcos.adoc#architecture-rhcos[Ignition works], the process for a {op-system-first} machine in an {product-title} cluster, view Ignition configuration files, and change Ignition configuration after an installation. - -[id="about-admission-plug-ins"] -== About admission plugins -You can use xref:../architecture/admission-plug-ins.adoc#admission-plug-ins[admission plugins] to regulate how {product-title} functions. After a resource request is authenticated and authorized, admission plugins intercept the resource request to the master API to validate resource requests and to ensure that scaling policies are adhered to. -Admission plugins are used to enforce security policies, resource limitations, or configuration requirements. diff --git a/architecture/mce-overview-ocp.adoc b/architecture/mce-overview-ocp.adoc deleted file mode 100644 index 869f970703e4..000000000000 --- a/architecture/mce-overview-ocp.adoc +++ /dev/null @@ -1,35 +0,0 @@ -:_content-type: ASSEMBLY -[id="mce-overview-ocp"] -= About multicluster engine for Kubernetes operator -include::_attributes/common-attributes.adoc[] -:context: mce-overview-ocp - -toc::[] - -One of the challenges of scaling Kubernetes environments is managing the lifecycle of a growing fleet. To meet that challenge, you can use multicluster engine for Kubernetes operator (MCE). The operator delivers full lifecycle capabilities for managed {product-title} clusters and partial lifecycle management for other Kubernetes distributions. It is available in two ways: - -* As a standalone operator that you install as part of your {product-title} or {oke} subscription -* As part of link:https://access.redhat.com/products/red-hat-advanced-cluster-management-for-kubernetes[Red Hat Advanced Cluster Management for Kubernetes] - -[id="mce-on-ocp"] -== Cluster management with multicluster engine on {product-title} - -When you enable multicluster engine on {product-title}, you gain the following capabilities: - -* xref:../architecture/control-plane.adoc#hosted-control-planes-overview_control-plane[Hosted control planes], which is a feature that is based on the HyperShift project. With a centralized hosted control plane, you can operate {product-title} clusters in a hyperscale manner. -* Hive, which provisions self-managed {product-title} clusters to the hub and completes the initial configurations for those clusters. -* klusterlet agent, which registers managed clusters to the hub. -* Infrastructure Operator, which manages the deployment of the Assisted Service to orchestrate on-premise bare metal and vSphere installations of {product-title}, such as SNO on bare metal. The Infrastructure Operator includes xref:../scalability_and_performance/ztp_far_edge/ztp-deploying-far-edge-clusters-at-scale.adoc#ztp-challenges-of-far-edge-deployments_ztp-deploying-far-edge-clusters-at-scale[{ztp-first}], which fully automates cluster creation on bare metal and vSphere provisioning with GitOps workflows to manage deployments and configuration changes. -* Open cluster management, which provides resources to manage Kubernetes clusters. - -The multicluster engine is included with your {product-title} support subscription and is delivered separately from the core payload. To start to use multicluster engine, you deploy the {product-title} cluster and then install the operator. For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.8/html/clusters/cluster_mce_overview#mce-install-intro[Installing and upgrading multicluster engine operator]. - -[id="mce-on-rhacm"] -== Cluster management with Red Hat Advanced Cluster Management - -If you need cluster management capabilities beyond what {product-title} with multicluster engine can provide, consider Red Hat Advanced Cluster Management. The multicluster engine is an integral part of Red Hat Advanced Cluster Management and is enabled by default. - -[id="mce-additional-resources-ocp"] -== Additional resources - -For the complete documentation for multicluster engine, see link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.8/html/clusters/cluster_mce_overview#doc-wrapper[Cluster lifecycle with multicluster engine documentation], which is part of the product documentation for Red Hat Advanced Cluster Management. diff --git a/architecture/modules b/architecture/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/architecture/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/architecture/ocm-overview-ocp.adoc b/architecture/ocm-overview-ocp.adoc deleted file mode 100644 index d1eaf4095dd4..000000000000 --- a/architecture/ocm-overview-ocp.adoc +++ /dev/null @@ -1,57 +0,0 @@ -:_content-type: ASSEMBLY -[id="ocm-overview-ocp"] -= Red Hat OpenShift Cluster Manager -include::_attributes/attributes-openshift-dedicated.adoc[] -:context: ocm-overview-ocp -toc::[] - -{cluster-manager-first} is a managed service where you can install, modify, operate, and upgrade your Red Hat OpenShift clusters. This service allows you to work with all of your organization’s clusters from a single dashboard. - -{cluster-manager} guides you to install {OCP}, Red Hat OpenShift Service on AWS (ROSA), and {product-short-name} clusters. It is also responsible for managing both {OCP} clusters after self-installation as well as your ROSA and {product-short-name} clusters. - -You can use {cluster-manager} to do the following actions: - -* Create new clusters -* View cluster details and metrics -* Manage your clusters with tasks such as scaling, changing node labels, networking, authentication -* Manage access control -* Monitor clusters -* Schedule upgrades - -include::modules/ocm-accessing.adoc[leveloffset=+1] - -[id="ocm-general-actions-ocp"] -== General actions - -On the top right of the cluster page, there are some actions that a user can perform on the entire cluster: - -* **Open console** launches a web console so that the cluster owner can issue commands to the cluster. -* **Actions** drop-down menu allows the cluster owner to rename the display name of the cluster, change the amount of load balancers and persistent storage on the cluster, if applicable, manually set the node count, and delete the cluster. -* **Refresh** icon forces a refresh of the cluster. - -[id="ocm-cluster-tabs-ocp"] -== Cluster tabs - -Selecting an active, installed cluster shows tabs associated with that cluster. The following tabs display after the cluster's installation completes: - -* Overview -* Access control -* Add-ons -* Networking -* Insights Advisor -* Machine pools -* Support -* Settings - -include::modules/ocm-overview-tab.adoc[leveloffset=+2] -include::modules/ocm-accesscontrol-tab.adoc[leveloffset=+2] -include::modules/ocm-addons-tab.adoc[leveloffset=+2] -include::modules/ocm-insightsadvisor-tab.adoc[leveloffset=+2] -include::modules/ocm-machinepools-tab.adoc[leveloffset=+2] -include::modules/ocm-support-tab.adoc[leveloffset=+2] -include::modules/ocm-settings-tab.adoc[leveloffset=+2] - -[id="ocm-additional-resources-ocp"] -== Additional resources - -* For the complete documentation for {cluster-manager}, see link:https://access.redhat.com/documentation/en-us/openshift_cluster_manager/2022/html-single/managing_clusters/index[{cluster-manager} documentation]. diff --git a/architecture/snippets b/architecture/snippets deleted file mode 120000 index 9f5bc7e4dde0..000000000000 --- a/architecture/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets \ No newline at end of file diff --git a/architecture/understanding-development.adoc b/architecture/understanding-development.adoc deleted file mode 100644 index 0d7313848322..000000000000 --- a/architecture/understanding-development.adoc +++ /dev/null @@ -1,382 +0,0 @@ -:_content-type: ASSEMBLY -[id="understanding-development"] -= Understanding {product-title} development -include::_attributes/common-attributes.adoc[] -:context: understanding-development - -toc::[] - -To fully leverage the capability of containers when developing and running -enterprise-quality applications, ensure your environment is supported by tools -that allow containers to be: - -* Created as discrete microservices that can be connected to other -containerized, and non-containerized, services. For example, you might want to -join your application with a database or attach a monitoring application to it. - -* Resilient, so if a server crashes or needs to go down for maintenance or to be -decommissioned, containers can start on another machine. - -* Automated to pick up code changes automatically and then start and deploy new -versions of themselves. - -* Scaled up, or replicated, to have more instances serving clients as demand -increases and then spun down to fewer instances as demand declines. - -* Run in different ways, depending on the type of application. For example, one -application might run once a month to produce a report and then exit. Another -application might need to run constantly and be highly available to clients. - -* Managed so you can watch the state of your application and react when -something goes wrong. - -Containers’ widespread acceptance, and the resulting requirements for tools and -methods to make them enterprise-ready, resulted in many options for them. - -The rest of this section explains options for -assets you can create when you build and deploy containerized Kubernetes -applications in {product-title}. It also describes which approaches you might -use for different kinds of applications and development requirements. - -[id="developing-containerized-applications"] -== About developing containerized applications - -You can approach application development with containers in many ways, and -different approaches might be more appropriate for different situations. To -illustrate some of this variety, the series of approaches that is presented -starts with developing a single container and ultimately deploys that container -as a mission-critical application for a large enterprise. These approaches -show different tools, formats, and methods that you can employ with containerized -application development. This topic describes: - -* Building a simple container and storing it in a registry -* Creating a Kubernetes manifest and saving it to a Git repository -* Making an Operator to share your application with others - -[id="building-simple-container"] -== Building a simple container - -You have an idea for an application and you want to containerize it. - -First you require a tool for building a container, like buildah or docker, -and a file that describes what goes in your container, which is typically a -link:https://docs.docker.com/engine/reference/builder/[Dockerfile]. - -Next, you require a location to push the resulting container image so you can -pull it to run anywhere you want it to run. This location is a container -registry. - -Some examples of each of these components are installed by default on most -Linux operating systems, except for the Dockerfile, which you provide yourself. - -The following diagram displays the process of building and pushing an image: - -.Create a simple containerized application and push it to a registry -image::create-push-app.png[Creating and pushing a containerized application] - -If you use a computer that runs {op-system-base-full} as the operating -system, the process of creating a containerized application requires the -following steps: - -. Install container build tools: {op-system-base} contains a set of tools that includes -podman, buildah, and skopeo that you use to build and manage containers. -. Create a Dockerfile to combine base image and software: Information about -building your container goes into a file that is named `Dockerfile`. In that -file, you identify the base image you build from, the software packages you -install, and the software you copy into the container. You also identify -parameter values like network ports that you expose outside the container and -volumes that you mount inside the container. Put your Dockerfile and the -software you want to containerize in a directory on your {op-system-base} system. -. Run buildah or docker build: Run the `buildah build-using-dockerfile` or -the `docker build` command to pull your chosen base image to the local system and -create a container image that is stored locally. You can also build container images -without a Dockerfile by using buildah. -. Tag and push to a registry: Add a tag to your new container image that -identifies the location of the registry in which you want to store and share -your container. Then push that image to the registry by running the -`podman push` or `docker push` command. -. Pull and run the image: From any system that has a container client tool, -such as podman or docker, run a command that identifies your new image. -For example, run the `podman run ` or `docker run ` -command. Here `` is the name of your new container image, which -resembles `quay.io/myrepo/myapp:latest`. The registry might require credentials -to push and pull images. - -ifdef::openshift-origin,openshift-enterprise,openshift-webscale[] -For more details on the process of building container images, pushing them to -registries, and running them, see -xref:../cicd/builds/custom-builds-buildah.adoc#custom-builds-buildah[Custom image builds with Buildah]. -endif::openshift-origin,openshift-enterprise,openshift-webscale[] - -[id="container-build-tool-options"] -=== Container build tool options - -Building and managing containers with buildah, podman, and skopeo results in industry standard container images that include features specifically tuned for deploying containers in {product-title} or other Kubernetes environments. These tools are daemonless and can run without root privileges, requiring less overhead to run them. - -[IMPORTANT] -==== -Support for Docker Container Engine as a container runtime is deprecated in Kubernetes 1.20 and will be removed in a future release. However, Docker-produced images will continue to work in your cluster with all runtimes, including CRI-O. For more information, see the link:https://kubernetes.io/blog/2020/12/02/dont-panic-kubernetes-and-docker/[Kubernetes blog announcement]. -==== - -When you ultimately run your containers in {product-title}, you use the -link:https://cri-o.io/[CRI-O] container engine. CRI-O runs on every worker and -control plane machine in an {product-title} cluster, but CRI-O is not yet supported as -a standalone runtime outside of {product-title}. - -[id="base-image-options"] -=== Base image options - -The base image you choose to build your application on contains a set of -software that resembles a Linux system to your application. When you build your -own image, your software is placed into that file system and sees that file -system as though it were looking at its operating system. Choosing this base -image has major impact on how secure, efficient and upgradeable your container -is in the future. - -Red Hat provides a new set of base images referred to as -link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux_atomic_host/7/html-single/getting_started_with_containers/index#using_red_hat_base_container_images_standard_and_minimal[Red Hat Universal Base Images] (UBI). -These images are based on Red Hat Enterprise Linux and are similar to base -images that Red Hat has offered in the past, with one major difference: they -are freely redistributable without a Red Hat subscription. As a result, you can -build your application on UBI images without having to worry about how they -are shared or the need to create different images for different environments. - -These UBI images have standard, init, and minimal versions. You can also use the -link:https://access.redhat.com/documentation/en-us/red_hat_software_collections/3/html-single/using_red_hat_software_collections_container_images/index[Red Hat Software Collections] -images as a foundation for applications that rely on specific runtime -environments such as Node.js, Perl, or Python. Special versions of some of -these runtime base images are referred to as Source-to-Image (S2I) images. With -S2I images, you can insert your code into a base image environment that is ready -to run that code. - -S2I images are available for you to use directly from the {product-title} web UI -by selecting *Catalog* -> *Developer Catalog*, as shown in the following figure: - -.Choose S2I base images for apps that need specific runtimes -image::developer-catalog.png[{product-title} Developer Catalog] - -[id="understanding-development-registry-options"] -=== Registry options - -Container registries are where you store container images so you can share them -with others and make them available to the platform where they ultimately run. -You can select large, public container registries that offer free accounts or a -premium version that offer more storage and special features. You can also -install your own registry that can be exclusive to your organization or -selectively shared with others. - -To get Red Hat images and certified partner images, you can draw from the -Red Hat Registry. The Red Hat Registry is represented by two locations: -`registry.access.redhat.com`, which is unauthenticated and deprecated, and -`registry.redhat.io`, which requires authentication. You can learn about the Red -Hat and partner images in the Red Hat Registry from the -link:https://catalog.redhat.com/software/containers/explore[Container images section of the Red Hat Ecosystem Catalog]. -Besides listing Red Hat container images, it also shows extensive information -about the contents and quality of those images, including health scores that are -based on applied security updates. - -Large, public registries include link:https://hub.docker.com/[Docker Hub] and -link:https://quay.io/[Quay.io]. The Quay.io registry is owned and managed by Red -Hat. Many of the components used in {product-title} are stored in Quay.io, -including container images and the Operators that are used to deploy -{product-title} itself. Quay.io also offers the means of storing other types of -content, including Helm charts. - -If you want your own, private container registry, {product-title} itself -includes a private container registry that is installed with {product-title} -and runs on its cluster. Red Hat also offers a private version of the Quay.io -registry called link:https://access.redhat.com/products/red-hat-quay[Red Hat Quay]. -Red Hat Quay includes geo replication, Git build triggers, Clair image scanning, -and many other features. - -All of the registries mentioned here can require credentials to download images -from those registries. Some of those credentials are presented on a cluster-wide -basis from {product-title}, while other credentials can be assigned to individuals. - -[id="creating-kubernetes-manifest-openshift"] -== Creating a Kubernetes manifest for {product-title} - -While the container image is the basic building block for a containerized -application, more information is required to manage and deploy that application -in a Kubernetes environment such as {product-title}. The typical next steps after -you create an image are to: - -* Understand the different resources you work with in Kubernetes manifests -* Make some decisions about what kind of an application you are running -* Gather supporting components -* Create a manifest and store that manifest in a Git repository so you can store -it in a source versioning system, audit it, track it, promote and deploy it -to the next environment, roll it back to earlier versions, if necessary, and -share it with others - -[id="understanding-kubernetes-pods"] -=== About Kubernetes pods and services - -While the container image is the basic unit with docker, the basic units that -Kubernetes works with are called -link:https://kubernetes.io/docs/concepts/workloads/pods/pod-overview/[pods]. -Pods represent the next step in building out an application. A pod can contain -one or more than one container. The key is that the pod is the single unit -that you deploy, scale, and manage. - -Scalability and namespaces are probably the main items to consider when determining -what goes in a pod. For ease of deployment, you might want to deploy a container -in a pod and include its own logging and monitoring container in the pod. Later, -when you run the pod and need to scale up an additional instance, those other -containers are scaled up with it. For namespaces, containers in a pod share the -same network interfaces, shared storage volumes, and resource limitations, -such as memory and CPU, which makes it easier to manage the contents of the pod -as a single unit. Containers in a pod can also communicate with each other by -using standard inter-process communications, such as System V semaphores or -POSIX shared memory. - -While individual pods represent a scalable unit in Kubernetes, a -link:https://kubernetes.io/docs/concepts/services-networking/service/[service] -provides a means of grouping together a set of pods to create a complete, stable -application that can complete tasks such as load balancing. A service is also -more permanent than a pod because the service remains available from the same -IP address until you delete it. When the service is in use, it is requested by -name and the {product-title} cluster resolves that name into the IP addresses -and ports where you can reach the pods that compose the service. - -By their nature, containerized applications are separated from the operating -systems where they run and, by extension, their users. Part of your Kubernetes -manifest describes how to expose the application to internal and external -networks by defining -link:https://kubernetes.io/docs/concepts/services-networking/network-policies/[network policies] -that allow fine-grained control over communication with your containerized -applications. To connect incoming requests for HTTP, HTTPS, and other services -from outside your cluster to services inside your cluster, you can use an -link:https://kubernetes.io/docs/concepts/services-networking/ingress/[`Ingress`] -resource. - -If your container requires on-disk storage instead of database storage, which -might be provided through a service, you can add -link:https://kubernetes.io/docs/concepts/storage/volumes/[volumes] -to your manifests to make that storage available to your pods. You can configure -the manifests to create persistent volumes (PVs) or dynamically create volumes that -are added to your `Pod` definitions. - -After you define a group of pods that compose your application, you can define -those pods in -link:https://kubernetes.io/docs/concepts/workloads/controllers/deployment/[`Deployment`] -and xref:../applications/deployments/what-deployments-are.adoc#what-deployments-are[`DeploymentConfig`] objects. - -[id="application-types"] -=== Application types - -Next, consider how your application type influences how to run it. - -Kubernetes defines different types of workloads that are appropriate for -different kinds of applications. To determine the appropriate workload for your -application, consider if the application is: - -* Meant to run to completion and be done. An example is an application that -starts up to produce a report and exits when the report is complete. The -application might not run again then for a month. Suitable {product-title} -objects for these types of applications include -link:https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/[`Job`] -and https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/[`CronJob`] objects. -* Expected to run continuously. For long-running applications, you can write a -xref:../applications/deployments/what-deployments-are.adoc#deployments-kube-deployments[deployment]. -* Required to be highly available. If your application requires high -availability, then you want to size your deployment to have more than one -instance. A `Deployment` or `DeploymentConfig` object can incorporate a -link:https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/[replica set] -for that type of application. With replica sets, pods run across multiple nodes -to make sure the application is always available, even if a worker goes down. -* Need to run on every node. Some types of Kubernetes applications are intended -to run in the cluster itself on every master or worker node. DNS and monitoring -applications are examples of applications that need to run continuously on every -node. You can run this type of application as a -link:https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/[daemon set]. -You can also run a daemon set on a subset of nodes, based on node labels. -* Require life-cycle management. When you want to hand off your application so -that others can use it, consider creating an -link:https://www.openshift.com/learn/topics/operators[Operator]. Operators let you build in -intelligence, so it can handle things like backups and upgrades automatically. -Coupled with the Operator Lifecycle Manager (OLM), cluster managers can expose -Operators to selected namespaces so that users in the cluster can run them. -* Have identity or numbering requirements. An application might have identity -requirements or numbering requirements. For example, you might be -required to run exactly three instances of the application and to name the -instances `0`, `1`, and `2`. A -https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/[stateful set] -is suitable for this application. Stateful sets are most useful for applications -that require independent storage, such as databases and zookeeper clusters. - -[id="supporting-components"] -=== Available supporting components - -The application you write might need supporting components, like a database or -a logging component. To fulfill that need, you might be able to obtain the -required component from the following Catalogs that are available in the -{product-title} web console: - -* OperatorHub, which is available in each {product-title} {product-version} -cluster. The OperatorHub makes Operators available from Red Hat, -certified Red Hat partners, and community members to the cluster operator. The -cluster operator can make those Operators available in all or selected -namespaces in the cluster, so developers can launch them and configure them -with their applications. -* Templates, which are useful for a one-off type of application, where the -lifecycle of a component is not important after it is installed. A template provides an easy -way to get started developing a Kubernetes application with minimal overhead. -A template can be a list of resource definitions, which could be `Deployment`, -`Service`, `Route`, or other objects. If you want to change names or resources, -you can set these values as parameters in the template. - -You can configure the supporting Operators and -templates to the specific needs of your development team and then make them -available in the namespaces in which your developers work. Many people add -shared templates to the `openshift` namespace because it is accessible from all -other namespaces. - -[id="applying-manifest"] -=== Applying the manifest - -Kubernetes manifests let you create a more complete picture of the components -that make up your Kubernetes applications. You write these manifests as YAML -files and deploy them by applying them to the cluster, for example, by running -the `oc apply` command. - -[id="manifest-next-steps"] -=== Next steps - -At this point, consider ways to automate your container development process. -Ideally, you have some sort of CI pipeline that builds the images and pushes -them to a registry. In particular, a GitOps pipeline integrates your container -development with the Git repositories that you use to store the software that -is required to build your applications. - -The workflow to this point might look like: - -* Day 1: You write some YAML. You then run the `oc apply` command to apply that -YAML to the cluster and test that it works. -* Day 2: You put your YAML container configuration file into your own Git -repository. From there, people who want to install that app, or help you improve -it, can pull down the YAML and apply it to their cluster to run the app. -* Day 3: Consider writing an Operator for your application. - -[id="develop-for-operators"] -== Develop for Operators - -Packaging and deploying your application as an Operator might be preferred -if you make your application available for others to run. As noted earlier, -Operators add a lifecycle component to your application that acknowledges that -the job of running an application is not complete as soon as it is installed. - -When you create an application as an Operator, you can build in your own -knowledge of how to run and maintain the application. You can build in features -for upgrading the application, backing it up, scaling it, or keeping track of -its state. If you configure the application correctly, maintenance tasks, -like updating the Operator, can happen automatically and invisibly to the -Operator's users. - -An example of a useful Operator is one that is set up to automatically back up -data at particular times. Having an Operator manage an application's backup at -set times can save a system administrator from remembering to do it. - -Any application maintenance that has traditionally been completed manually, -like backing up data or rotating certificates, can be completed automatically -with an Operator. diff --git a/authentication/_attributes b/authentication/_attributes deleted file mode 120000 index f27fd275ea6b..000000000000 --- a/authentication/_attributes +++ /dev/null @@ -1 +0,0 @@ -../_attributes/ \ No newline at end of file diff --git a/authentication/assuming-an-aws-iam-role-for-a-service-account.adoc b/authentication/assuming-an-aws-iam-role-for-a-service-account.adoc deleted file mode 100644 index 80c43b7cb1c8..000000000000 --- a/authentication/assuming-an-aws-iam-role-for-a-service-account.adoc +++ /dev/null @@ -1,41 +0,0 @@ -:_content-type: ASSEMBLY -[id="assuming-an-aws-iam-role-for-a-service-account"] -= Assuming an AWS IAM role for a service account -include::_attributes/common-attributes.adoc[] -ifdef::openshift-rosa,openshift-dedicated[] -include::_attributes/attributes-openshift-dedicated.adoc[] -endif::openshift-rosa,openshift-dedicated[] -:context: assuming-an-aws-iam-role-for-a-service-account - -toc::[] - -[role="_abstract"] -ifdef::openshift-rosa[] -{product-title} clusters that use the AWS Security Token Service (STS) include a pod identity webhook for use with pods that run in user-defined projects. -endif::openshift-rosa[] - -You can use the pod identity webhook to enable a service account to automatically assume an AWS Identity and Access Management (IAM) role in your own pods. If the assumed IAM role has the required AWS permissions, the pods can run AWS SDK operations by using temporary STS credentials. - -include::modules/understanding-pod-identity-webhook-workflow-in-user-defined-projects.adoc[leveloffset=+1] -include::modules/assuming-an-aws-iam-role-in-your-own-pods.adoc[leveloffset=+1] -include::modules/setting-up-an-aws-iam-role-a-service-account.adoc[leveloffset=+2] -include::modules/creating-a-service-account-in-your-project.adoc[leveloffset=+2] -include::modules/creating-an-example-aws-sdk-container-image.adoc[leveloffset=+2] -include::modules/deploying-a-pod-that-includes-an-aws-sdk.adoc[leveloffset=+2] -include::modules/verifying-the-assumed-iam-role-in-your-pod.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="additional-resources_configuring-alert-notifications"] -== Additional resources - -* For more information about using AWS IAM roles with service accounts, see link:https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html[IAM roles for service accounts] in the AWS documentation. - -* For information about AWS IAM role delegation, see link:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-service.html[Creating a role to delegate permissions to an AWS service] in the AWS documentation. - -* For details about AWS SDKs, see link:https://docs.aws.amazon.com/sdkref/latest/guide/overview.html[AWS SDKs and Tools Reference Guide] in the AWS documentation. - -* For more information about installing and using the AWS Boto3 SDK for Python, see the link:https://boto3.amazonaws.com/v1/documentation/api/latest/index.html[AWS Boto3 documentation]. - -ifdef::openshift-rosa,openshift-dedicated[] -* For general information about webhook admission plugins for OpenShift, see link:https://docs.openshift.com/container-platform/4.13/architecture/admission-plug-ins.html#admission-webhooks-about_admission-plug-ins[Webhook admission plugins] in the OpenShift Container Platform documentation. -endif::openshift-rosa,openshift-dedicated[] diff --git a/authentication/bound-service-account-tokens.adoc b/authentication/bound-service-account-tokens.adoc deleted file mode 100644 index c16fda7a3f0d..000000000000 --- a/authentication/bound-service-account-tokens.adoc +++ /dev/null @@ -1,27 +0,0 @@ -:_content-type: ASSEMBLY -[id="bound-service-account-tokens"] -= Using bound service account tokens -include::_attributes/common-attributes.adoc[] -:context: bound-service-account-tokens - -toc::[] - -You can use bound service account tokens, which improves the ability to integrate with cloud provider identity access management (IAM) services, such as AWS IAM. - -// About bound service account tokens -include::modules/bound-sa-tokens-about.adoc[leveloffset=+1] - -// Configuring bound service account tokens using volume projection -include::modules/bound-sa-tokens-configuring.adoc[leveloffset=+1] - -// Creating bound service account tokens outside the pod -include::modules/bound-sa-tokens-configuring-externally.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../nodes/nodes/nodes-nodes-rebooting.adoc#nodes-nodes-rebooting-gracefully_nodes-nodes-rebooting[Rebooting a node gracefully] - -* xref:../authentication/understanding-and-creating-service-accounts.adoc#service-accounts-managing_understanding-service-accounts[Creating service accounts] - -// TODO: Verify distros: openshift-enterprise,openshift-webscale,openshift-origin diff --git a/authentication/configuring-internal-oauth.adoc b/authentication/configuring-internal-oauth.adoc deleted file mode 100644 index 7e3f86c9e602..000000000000 --- a/authentication/configuring-internal-oauth.adoc +++ /dev/null @@ -1,23 +0,0 @@ -:_content-type: ASSEMBLY -[id="configuring-internal-oauth"] -= Configuring the internal OAuth server -include::_attributes/common-attributes.adoc[] -:context: configuring-internal-oauth - -toc::[] - -include::modules/oauth-server-overview.adoc[leveloffset=+1] - -include::modules/oauth-internal-tokens.adoc[leveloffset=+1] - -include::modules/oauth-internal-options.adoc[leveloffset=+1] - -include::modules/oauth-configuring-internal-oauth.adoc[leveloffset=+1] - -include::modules/oauth-configuring-token-inactivity-timeout.adoc[leveloffset=+1] - -include::modules/oauth-customizing-the-oauth-server-URL.adoc[leveloffset=+1] - -include::modules/oauth-server-metadata.adoc[leveloffset=+1] - -include::modules/oauth-troubleshooting-api-events.adoc[leveloffset=+1] diff --git a/authentication/configuring-ldap-failover.adoc b/authentication/configuring-ldap-failover.adoc deleted file mode 100644 index ede202898a53..000000000000 --- a/authentication/configuring-ldap-failover.adoc +++ /dev/null @@ -1,19 +0,0 @@ -:_content-type: ASSEMBLY -[id="configuring-ldap-failover"] -= Configuring LDAP failover -include::_attributes/common-attributes.adoc[] -:context: sssd-ldap-failover - -toc::[] - -include::modules/ldap-failover-overview.adoc[] - -include::modules/ldap-failover-prereqs.adoc[leveloffset=+1] - -include::modules/ldap-failover-generate-certs.adoc[leveloffset=+1] - -include::modules/ldap-failover-configure-sssd.adoc[leveloffset=+1] - -include::modules/ldap-failover-configure-apache.adoc[leveloffset=+1] - -include::modules/ldap-failover-configure-openshift.adoc[leveloffset=+1] diff --git a/authentication/configuring-oauth-clients.adoc b/authentication/configuring-oauth-clients.adoc deleted file mode 100644 index 2059ef4293b6..000000000000 --- a/authentication/configuring-oauth-clients.adoc +++ /dev/null @@ -1,23 +0,0 @@ -:_content-type: ASSEMBLY -[id="configuring-oauth-clients"] -= Configuring OAuth clients -include::_attributes/common-attributes.adoc[] -:context: configuring-oauth-clients - -toc::[] - -Several OAuth clients are created by default in {product-title}. You can also register and configure additional OAuth clients. - -// Default OAuth clients -include::modules/oauth-default-clients.adoc[leveloffset=+1] - -// Register an additional OAuth client -include::modules/oauth-register-additional-client.adoc[leveloffset=+1] - -// Configuring token inactivity timeout for OAuth clients -include::modules/oauth-configuring-token-inactivity-timeout-clients.adoc[leveloffset=+1] - -[role="_additional-resources"] -== Additional resources - -* xref:../rest_api/oauth_apis/oauthclient-oauth-openshift-io-v1.adoc#oauthclient-oauth-openshift-io-v1[OAuthClient [oauth.openshift.io/v1]] diff --git a/authentication/dedicated-understanding-authentication.adoc b/authentication/dedicated-understanding-authentication.adoc deleted file mode 100644 index 7d27e9512e64..000000000000 --- a/authentication/dedicated-understanding-authentication.adoc +++ /dev/null @@ -1,39 +0,0 @@ -:_content-type: ASSEMBLY -[id="understanding-identity-provider"] -= Understanding identity provider configuration -include::_attributes/common-attributes.adoc[] -:context: understanding-identity-provider - -toc::[] - -include::modules/identity-provider-parameters.adoc[leveloffset=+1] - -[id="supported-identity-providers"] -== Supported identity providers - -You can configure the following types of identity providers: - -[cols="2a,8a",options="header"] -|=== - -|Identity provider -|Description - -|xref:../authentication/identity_providers/configuring-ldap-identity-provider.adoc#configuring-ldap-identity-provider[LDAP] -|Configure the `ldap` identity provider to validate user names and passwords -against an LDAPv3 server, using simple bind authentication. - -|xref:../authentication/identity_providers/configuring-github-identity-provider.adoc#configuring-github-identity-provider[GitHub or GitHub Enterprise] -|Configure a `github` identity provider to validate user names and passwords -against GitHub or GitHub Enterprise's OAuth authentication server. - -|xref:../authentication/identity_providers/configuring-google-identity-provider.adoc#configuring-google-identity-provider[Google] -|Configure a `google` identity provider using -link:https://developers.google.com/identity/protocols/OpenIDConnect[Google's OpenID Connect integration]. - -|xref:../authentication/identity_providers/configuring-oidc-identity-provider.adoc#configuring-oidc-identity-provider[OpenID Connect] -|Configure an `oidc` identity provider to integrate with an OpenID Connect -identity provider using an -link:http://openid.net/specs/openid-connect-core-1_0.html#CodeFlowAuth[Authorization Code Flow]. - -|=== diff --git a/authentication/identity_providers/_attributes b/authentication/identity_providers/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/authentication/identity_providers/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/authentication/identity_providers/configuring-basic-authentication-identity-provider.adoc b/authentication/identity_providers/configuring-basic-authentication-identity-provider.adoc deleted file mode 100644 index bad3240b4239..000000000000 --- a/authentication/identity_providers/configuring-basic-authentication-identity-provider.adoc +++ /dev/null @@ -1,31 +0,0 @@ -:_content-type: ASSEMBLY -[id="configuring-basic-authentication-identity-provider"] -= Configuring a basic authentication identity provider -include::_attributes/common-attributes.adoc[] -:context: configuring-basic-authentication-identity-provider - -toc::[] - -Configure the `basic-authentication` identity provider for users to log in to {product-title} with credentials validated against a remote identity provider. Basic authentication is a generic back-end integration mechanism. - -include::modules/identity-provider-overview.adoc[leveloffset=+1] - -include::modules/identity-provider-about-basic-authentication.adoc[leveloffset=+1] - -include::modules/identity-provider-secret-tls.adoc[leveloffset=+1] - -include::modules/identity-provider-config-map.adoc[leveloffset=+1] - -include::modules/identity-provider-basic-authentication-CR.adoc[leveloffset=+1] - -// Included here so that it is associated with the above module -[role="_additional-resources"] -.Additional resources - -* See xref:../../authentication/understanding-identity-provider.adoc#identity-provider-parameters_understanding-identity-provider[Identity provider parameters] for information on parameters, such as `mappingMethod`, that are common to all identity providers. - -include::modules/identity-provider-add.adoc[leveloffset=+1] - -include::modules/example-apache-httpd-configuration.adoc[leveloffset=+1] - -include::modules/identity-provider-basic-authentication-troubleshooting.adoc[leveloffset=+1] diff --git a/authentication/identity_providers/configuring-github-identity-provider.adoc b/authentication/identity_providers/configuring-github-identity-provider.adoc deleted file mode 100644 index 76a1b23f4c54..000000000000 --- a/authentication/identity_providers/configuring-github-identity-provider.adoc +++ /dev/null @@ -1,40 +0,0 @@ -:_content-type: ASSEMBLY -[id="configuring-github-identity-provider"] -= Configuring a GitHub or GitHub Enterprise identity provider -include::_attributes/common-attributes.adoc[] -:context: configuring-github-identity-provider - -toc::[] - -Configure the `github` identity provider to validate user names and passwords against GitHub or GitHub Enterprise's OAuth authentication server. OAuth facilitates a token exchange flow between {product-title} and GitHub or GitHub Enterprise. - -You can use the GitHub integration to connect to either GitHub or GitHub Enterprise. For GitHub Enterprise integrations, you must provide the `hostname` of your instance and can optionally provide a `ca` certificate bundle to use in requests to the server. - -[NOTE] -==== -The following steps apply to both GitHub and GitHub Enterprise unless noted. -==== - -ifdef::openshift-origin,openshift-enterprise,openshift-webscale[] -include::modules/identity-provider-overview.adoc[leveloffset=+1] -endif::openshift-origin,openshift-enterprise,openshift-webscale[] - -include::modules/identity-provider-github-about.adoc[leveloffset=+1] - -include::modules/identity-provider-registering-github.adoc[leveloffset=+1] - -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -include::modules/identity-provider-secret.adoc[leveloffset=+1] - -include::modules/identity-provider-config-map.adoc[leveloffset=+1] - -include::modules/identity-provider-github-CR.adoc[leveloffset=+1] - -// Included here so that it is associated with the above module -[role="_additional-resources"] -.Additional resources - -* See xref:../../authentication/understanding-identity-provider.adoc#identity-provider-parameters_understanding-identity-provider[Identity provider parameters] for information on parameters, such as `mappingMethod`, that are common to all identity providers. - -include::modules/identity-provider-add.adoc[leveloffset=+1] -endif::[] diff --git a/authentication/identity_providers/configuring-gitlab-identity-provider.adoc b/authentication/identity_providers/configuring-gitlab-identity-provider.adoc deleted file mode 100644 index 023dd8dec0fb..000000000000 --- a/authentication/identity_providers/configuring-gitlab-identity-provider.adoc +++ /dev/null @@ -1,29 +0,0 @@ -:_content-type: ASSEMBLY -[id="configuring-gitlab-identity-provider"] -= Configuring a GitLab identity provider -include::_attributes/common-attributes.adoc[] -:context: configuring-gitlab-identity-provider - -toc::[] - -Configure the `gitlab` identity provider using link:https://gitlab.com/[GitLab.com] or any other GitLab instance as an identity provider. - -ifdef::openshift-origin,openshift-enterprise,openshift-webscale[] -include::modules/identity-provider-overview.adoc[leveloffset=+1] -endif::openshift-origin,openshift-enterprise,openshift-webscale[] - -include::modules/identity-provider-gitlab-about.adoc[leveloffset=+1] - -include::modules/identity-provider-secret.adoc[leveloffset=+1] - -include::modules/identity-provider-config-map.adoc[leveloffset=+1] - -include::modules/identity-provider-gitlab-CR.adoc[leveloffset=+1] - -// Included here so that it is associated with the above module -[role="_additional-resources"] -.Additional resources - -* See xref:../../authentication/understanding-identity-provider.adoc#identity-provider-parameters_understanding-identity-provider[Identity provider parameters] for information on parameters, such as `mappingMethod`, that are common to all identity providers. - -include::modules/identity-provider-add.adoc[leveloffset=+1] diff --git a/authentication/identity_providers/configuring-google-identity-provider.adoc b/authentication/identity_providers/configuring-google-identity-provider.adoc deleted file mode 100644 index 90faa932bff9..000000000000 --- a/authentication/identity_providers/configuring-google-identity-provider.adoc +++ /dev/null @@ -1,29 +0,0 @@ -:_content-type: ASSEMBLY -[id="configuring-google-identity-provider"] -= Configuring a Google identity provider -include::_attributes/common-attributes.adoc[] -:context: configuring-google-identity-provider - -toc::[] - -Configure the `google` identity provider using the link:https://developers.google.com/identity/protocols/OpenIDConnect[Google OpenID Connect integration]. - -ifdef::openshift-origin,openshift-enterprise,openshift-webscale[] -include::modules/identity-provider-overview.adoc[leveloffset=+1] -endif::openshift-origin,openshift-enterprise,openshift-webscale[] - -include::modules/identity-provider-google-about.adoc[leveloffset=+1] - -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -include::modules/identity-provider-secret.adoc[leveloffset=+1] - -include::modules/identity-provider-google-CR.adoc[leveloffset=+1] - -// Included here so that it is associated with the above module -[role="_additional-resources"] -.Additional resources - -* See xref:../../authentication/understanding-identity-provider.adoc#identity-provider-parameters_understanding-identity-provider[Identity provider parameters] for information on parameters, such as `mappingMethod`, that are common to all identity providers. - -include::modules/identity-provider-add.adoc[leveloffset=+1] -endif::[] diff --git a/authentication/identity_providers/configuring-htpasswd-identity-provider.adoc b/authentication/identity_providers/configuring-htpasswd-identity-provider.adoc deleted file mode 100644 index f13cfb919b85..000000000000 --- a/authentication/identity_providers/configuring-htpasswd-identity-provider.adoc +++ /dev/null @@ -1,52 +0,0 @@ -:_content-type: ASSEMBLY -[id="configuring-htpasswd-identity-provider"] -= Configuring an htpasswd identity provider -include::_attributes/common-attributes.adoc[] -:context: configuring-htpasswd-identity-provider - -toc::[] - -Configure the `htpasswd` identity provider to allow users to log in to {product-title} with credentials from an htpasswd file. - -To define an htpasswd identity provider, perform the following tasks: - -. xref:../../authentication/identity_providers/configuring-htpasswd-identity-provider.adoc#creating-htpasswd-file[Create an `htpasswd` file] to store the user and password information. -. xref:../../authentication/identity_providers/configuring-htpasswd-identity-provider.adoc#identity-provider-creating-htpasswd-secret_{context}[Create -a secret] to represent the `htpasswd` file. -. xref:../../authentication/identity_providers/configuring-htpasswd-identity-provider.adoc#identity-provider-htpasswd-CR_{context}[Define an htpasswd identity provider resource] that references the secret. -. xref:../../authentication/identity_providers/configuring-htpasswd-identity-provider.adoc#add-identity-provider_{context}[Apply the resource] to -the default OAuth configuration to add the identity provider. - -ifdef::openshift-origin,openshift-enterprise,openshift-webscale[] -include::modules/identity-provider-overview.adoc[leveloffset=+1] -endif::openshift-origin,openshift-enterprise,openshift-webscale[] - -include::modules/identity-provider-htpasswd-about.adoc[leveloffset=+1] - -[id="creating-htpasswd-file"] -== Creating the htpasswd file - -See one of the following sections for instructions about how to create the htpasswd file: - -* xref:../../authentication/identity_providers/configuring-htpasswd-identity-provider.adoc#identity-provider-creating-htpasswd-file-linux_configuring-htpasswd-identity-provider[Creating an htpasswd file using Linux] -* xref:../../authentication/identity_providers/configuring-htpasswd-identity-provider.adoc#identity-provider-creating-htpasswd-file-windows_configuring-htpasswd-identity-provider[Creating an htpasswd file using Windows] - -include::modules/identity-provider-creating-htpasswd-file-linux.adoc[leveloffset=+2] - -include::modules/identity-provider-creating-htpasswd-file-windows.adoc[leveloffset=+2] - -include::modules/identity-provider-htpasswd-secret.adoc[leveloffset=+1] - -include::modules/identity-provider-htpasswd-CR.adoc[leveloffset=+1] - -// Included here so that it is associated with the above module -[role="_additional-resources"] -.Additional resources - -* See xref:../../authentication/understanding-identity-provider.adoc#identity-provider-parameters_understanding-identity-provider[Identity provider parameters] for information on parameters, such as `mappingMethod`, that are common to all identity providers. - -include::modules/identity-provider-add.adoc[leveloffset=+1] - -include::modules/identity-provider-htpasswd-update-users.adoc[leveloffset=+1] - -include::modules/identity-provider-configuring-using-web-console.adoc[leveloffset=+1] diff --git a/authentication/identity_providers/configuring-keystone-identity-provider.adoc b/authentication/identity_providers/configuring-keystone-identity-provider.adoc deleted file mode 100644 index 1bac30ad85a5..000000000000 --- a/authentication/identity_providers/configuring-keystone-identity-provider.adoc +++ /dev/null @@ -1,27 +0,0 @@ -:_content-type: ASSEMBLY -[id="configuring-keystone-identity-provider"] -= Configuring a Keystone identity provider -include::_attributes/common-attributes.adoc[] -:context: configuring-keystone-identity-provider - -toc::[] - -Configure the `keystone` identity provider to integrate your {product-title} cluster with Keystone to enable shared authentication with an OpenStack Keystone v3 server configured to store users in an internal database. This configuration allows users to log in to {product-title} with their Keystone credentials. - -include::modules/identity-provider-overview.adoc[leveloffset=+1] - -include::modules/identity-provider-keystone-about.adoc[leveloffset=+1] - -include::modules/identity-provider-secret-tls.adoc[leveloffset=+1] - -include::modules/identity-provider-config-map.adoc[leveloffset=+1] - -include::modules/identity-provider-keystone-CR.adoc[leveloffset=+1] - -// Included here so that it is associated with the above module -[role="_additional-resources"] -.Additional resources - -* See xref:../../authentication/understanding-identity-provider.adoc#identity-provider-parameters_understanding-identity-provider[Identity provider parameters] for information on parameters, such as `mappingMethod`, that are common to all identity providers. - -include::modules/identity-provider-add.adoc[leveloffset=+1] diff --git a/authentication/identity_providers/configuring-ldap-identity-provider.adoc b/authentication/identity_providers/configuring-ldap-identity-provider.adoc deleted file mode 100644 index b659386195b6..000000000000 --- a/authentication/identity_providers/configuring-ldap-identity-provider.adoc +++ /dev/null @@ -1,31 +0,0 @@ -:_content-type: ASSEMBLY -[id="configuring-ldap-identity-provider"] -= Configuring an LDAP identity provider -include::_attributes/common-attributes.adoc[] -:context: configuring-ldap-identity-provider - -toc::[] - -Configure the `ldap` identity provider to validate user names and passwords against an LDAPv3 server, using simple bind authentication. - -ifdef::openshift-origin,openshift-enterprise,openshift-webscale[] -include::modules/identity-provider-overview.adoc[leveloffset=+1] -endif::openshift-origin,openshift-enterprise,openshift-webscale[] - -include::modules/identity-provider-about-ldap.adoc[leveloffset=+1] - -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -include::modules/identity-provider-ldap-secret.adoc[leveloffset=+1] - -include::modules/identity-provider-config-map.adoc[leveloffset=+1] - -include::modules/identity-provider-ldap-CR.adoc[leveloffset=+1] - -// Included here so that it is associated with the above module -[role="_additional-resources"] -.Additional resources - -* See xref:../../authentication/understanding-identity-provider.adoc#identity-provider-parameters_understanding-identity-provider[Identity provider parameters] for information on parameters, such as `mappingMethod`, that are common to all identity providers. - -include::modules/identity-provider-add.adoc[leveloffset=+1] -endif::[] diff --git a/authentication/identity_providers/configuring-oidc-identity-provider.adoc b/authentication/identity_providers/configuring-oidc-identity-provider.adoc deleted file mode 100644 index 2c3e74fba472..000000000000 --- a/authentication/identity_providers/configuring-oidc-identity-provider.adoc +++ /dev/null @@ -1,37 +0,0 @@ -:_content-type: ASSEMBLY -[id="configuring-oidc-identity-provider"] -= Configuring an OpenID Connect identity provider -include::_attributes/common-attributes.adoc[] -:context: configuring-oidc-identity-provider - -toc::[] - -Configure the `oidc` identity provider to integrate with an OpenID Connect identity provider using an link:http://openid.net/specs/openid-connect-core-1_0.html#CodeFlowAuth[Authorization Code Flow]. - -ifdef::openshift-origin,openshift-enterprise,openshift-webscale[] -include::modules/identity-provider-overview.adoc[leveloffset=+1] -endif::openshift-origin,openshift-enterprise,openshift-webscale[] - -include::modules/identity-provider-oidc-about.adoc[leveloffset=+1] - -ifdef::openshift-enterprise[] -include::modules/identity-provider-oidc-supported.adoc[leveloffset=+1] -endif::openshift-enterprise[] - -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -include::modules/identity-provider-secret.adoc[leveloffset=+1] - -include::modules/identity-provider-config-map.adoc[leveloffset=+1] - -include::modules/identity-provider-oidc-CR.adoc[leveloffset=+1] - -// Included here so that it is associated with the above module -[role="_additional-resources"] -.Additional resources - -* See xref:../../authentication/understanding-identity-provider.adoc#identity-provider-parameters_understanding-identity-provider[Identity provider parameters] for information on parameters, such as `mappingMethod`, that are common to all identity providers. - -include::modules/identity-provider-add.adoc[leveloffset=+1] - -include::modules/identity-provider-configuring-using-web-console.adoc[leveloffset=+1] -endif::[] diff --git a/authentication/identity_providers/configuring-request-header-identity-provider.adoc b/authentication/identity_providers/configuring-request-header-identity-provider.adoc deleted file mode 100644 index ee20455070b4..000000000000 --- a/authentication/identity_providers/configuring-request-header-identity-provider.adoc +++ /dev/null @@ -1,37 +0,0 @@ -:_content-type: ASSEMBLY -[id="configuring-request-header-identity-provider"] -= Configuring a request header identity provider -include::_attributes/common-attributes.adoc[] -:context: configuring-request-header-identity-provider - -toc::[] - -Configure the `request-header` identity provider to identify users from request header values, such as `X-Remote-User`. It is typically used in combination with an authenticating proxy, which sets the request header value. - -include::modules/identity-provider-overview.adoc[leveloffset=+1] - -include::modules/identity-provider-about-request-header.adoc[leveloffset=+1] - -include::modules/identity-provider-config-map.adoc[leveloffset=+1] - -include::modules/identity-provider-request-header-CR.adoc[leveloffset=+1] - -// Included here so that it is associated with the above module -[role="_additional-resources"] -.Additional resources - -* See xref:../../authentication/understanding-identity-provider.adoc#identity-provider-parameters_understanding-identity-provider[Identity provider parameters] for information on parameters, such as `mappingMethod`, that are common to all identity providers. - -include::modules/identity-provider-add.adoc[leveloffset=+1] - -[id="example-apache-auth-config-using-request-header"] -== Example Apache authentication configuration using request header - -This example configures an Apache authentication proxy for the {product-title} -using the request header identity provider. - -[discrete] -include::modules/identity-provider-apache-custom-proxy-configuration.adoc[leveloffset=+2] - -[discrete] -include::modules/identity-provider-configuring-apache-request-header.adoc[leveloffset=+2] diff --git a/authentication/identity_providers/images b/authentication/identity_providers/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/authentication/identity_providers/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/authentication/identity_providers/modules b/authentication/identity_providers/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/authentication/identity_providers/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/authentication/identity_providers/snippets b/authentication/identity_providers/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/authentication/identity_providers/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/authentication/images b/authentication/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/authentication/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/authentication/impersonating-system-admin.adoc b/authentication/impersonating-system-admin.adoc deleted file mode 100644 index 32843c9f3a2d..000000000000 --- a/authentication/impersonating-system-admin.adoc +++ /dev/null @@ -1,13 +0,0 @@ -:_content-type: ASSEMBLY -[id="impersonating-system-admin"] -= Impersonating the system:admin user -include::_attributes/common-attributes.adoc[] -:context: impersonating-system-admin - -toc::[] - -include::modules/authentication-api-impersonation.adoc[leveloffset=+1] - -include::modules/impersonation-system-admin-user.adoc[leveloffset=+1] - -include::modules/impersonation-system-admin-group.adoc[leveloffset=+1] diff --git a/authentication/index.adoc b/authentication/index.adoc deleted file mode 100644 index 691ea227852a..000000000000 --- a/authentication/index.adoc +++ /dev/null @@ -1,62 +0,0 @@ -[id="overview-of-authentication-authorization"] -= Overview of authentication and authorization -include::_attributes/common-attributes.adoc[] -:context: overview-of-authentication-authorization - -toc::[] - -include::modules/authentication-authorization-common-terms.adoc[leveloffset=+1] - -[id="authentication-overview"] -== About authentication in {product-title} -To control access to an {product-title} cluster, a cluster administrator can configure xref:../authentication/understanding-authentication.adoc#understanding-authentication[user authentication] and ensure only approved users access the cluster. - -To interact with an {product-title} cluster, users must first authenticate to the {product-title} API in some way. You can authenticate by providing an xref:../authentication/understanding-authentication.adoc#rbac-api-authentication_understanding-authentication[OAuth access token or an X.509 client certificate] in your requests to the {product-title} API. - -[NOTE] -==== -If you do not present a valid access token or certificate, your request is unauthenticated and you receive an HTTP 401 error. -==== -An administrator can configure authentication through the following tasks: - -* Configuring an identity provider: You can define any xref:../authentication/understanding-identity-provider.adoc#supported-identity-providers[supported identity provider in {product-title}] and add it to your cluster. -* xref:../authentication/configuring-internal-oauth.adoc#configuring-internal-oauth[Configuring the internal OAuth server]: The {product-title} control plane includes a built-in OAuth server that determines the user’s identity from the configured identity provider and creates an access token. You can configure the token duration and inactivity timeout, and customize the internal OAuth server URL. -+ -[NOTE] -==== -Users can xref:../authentication/managing-oauth-access-tokens.adoc#managing-oauth-access-tokens[view and manage OAuth tokens owned by them]. -==== -* Registering an OAuth client: {product-title} includes several xref:../authentication/configuring-oauth-clients.adoc#oauth-default-clients_configuring-oauth-clients[default OAuth clients]. You can xref:../authentication/configuring-oauth-clients.adoc#oauth-register-additional-client_configuring-oauth-clients[register and configure additional OAuth clients]. -+ -[NOTE] -==== -When users send a request for an OAuth token, they must specify either a default or custom OAuth client that receives and uses the token. -==== - -* Managing cloud provider credentials using the xref:../authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.adoc#about-cloud-credential-operator[Cloud Credentials Operator]: Cluster components use cloud provider credentials to get permissions required to perform cluster-related tasks. -* Impersonating a system admin user: You can grant cluster administrator permissions to a user by xref:../authentication/impersonating-system-admin.adoc#impersonating-system-admin[impersonating a system admin user]. - -[id="authorization-overview"] -== About authorization in {product-title} -Authorization involves determining whether the identified user has permissions to perform the requested action. - -Administrators can define permissions and assign them to users using the xref:../authentication/using-rbac.adoc#authorization-overview_using-rbac[RBAC objects, such as rules, roles, and bindings]. To understand how authorization works in {product-title}, see xref:../authentication/using-rbac.adoc#evaluating-authorization_using-rbac[Evaluating authorization]. - -You can also control access to an {product-title} cluster through xref:../authentication/using-rbac.adoc#rbac-projects-namespaces_using-rbac[projects and namespaces]. - -Along with controlling user access to a cluster, you can also control the actions a pod can perform and the resources it can access using xref:../authentication/managing-security-context-constraints.adoc#managing-pod-security-policies[security context constraints (SCCs)]. - -You can manage authorization for {product-title} through the following tasks: - -* Viewing xref:../authentication/using-rbac.adoc#viewing-local-roles_using-rbac[local] and xref:../authentication/using-rbac.adoc#viewing-cluster-roles_using-rbac[cluster] roles and bindings. -* Creating a xref:../authentication/using-rbac.adoc#creating-local-role_using-rbac[local role] and assigning it to a user or group. -* Creating a cluster role and assigning it to a user or group: {product-title} includes a set of xref:../authentication/using-rbac.adoc#default-roles_using-rbac[default cluster roles]. You can create additional xref:../authentication/using-rbac.adoc#creating-cluster-role_using-rbac[cluster roles] and xref:../authentication/using-rbac.adoc#adding-roles_using-rbac[add them to a user or group]. -* Creating a cluster-admin user: By default, your cluster has only one cluster administrator called `kubeadmin`. You can xref:../authentication/using-rbac.adoc#creating-cluster-admin_using-rbac[create another cluster administrator]. Before creating a cluster administrator, ensure that you have configured an identity provider. -+ -[NOTE] -==== -After creating the cluster admin user, xref:../authentication/remove-kubeadmin.adoc#removing-kubeadmin_removing-kubeadmin[delete the existing kubeadmin user] to improve cluster security. -==== -* Creating service accounts: xref:../authentication/understanding-and-creating-service-accounts.adoc#service-accounts-overview_understanding-service-accounts[Service accounts] provide a flexible way to control API access without sharing a regular user’s credentials. A user can xref:../authentication/understanding-and-creating-service-accounts.adoc#service-accounts-managing_understanding-service-accounts[create and use a service account in applications] and also as xref:../authentication/using-service-accounts-as-oauth-client.adoc#using-service-accounts-as-oauth-client[an OAuth client]. -* xref:../authentication/tokens-scoping.adoc#tokens-scoping[Scoping tokens]: A scoped token is a token that identifies as a specific user who can perform only specific operations. You can create scoped tokens to delegate some of your permissions to another user or a service account. -* Syncing LDAP groups: You can manage user groups in one place by xref:../authentication/ldap-syncing.adoc#ldap-syncing[syncing the groups stored in an LDAP server] with the {product-title} user groups. diff --git a/authentication/ldap-syncing.adoc b/authentication/ldap-syncing.adoc deleted file mode 100644 index 9d788b1540e9..000000000000 --- a/authentication/ldap-syncing.adoc +++ /dev/null @@ -1,56 +0,0 @@ -:_content-type: ASSEMBLY -[id="ldap-syncing"] -= Syncing LDAP groups -include::_attributes/common-attributes.adoc[] -:context: ldap-syncing-groups - -toc::[] - -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -As an administrator, -endif::[] -you can use groups to manage users, change -their permissions, and enhance collaboration. Your organization may have already -created user groups and stored them in an LDAP server. {product-title} can sync -those LDAP records with internal {product-title} records, enabling you to manage -your groups in one place. {product-title} currently supports group sync with -LDAP servers using three common schemas for defining group membership: RFC 2307, -Active Directory, and augmented Active Directory. - -For more information on configuring LDAP, see -xref:../authentication/identity_providers/configuring-ldap-identity-provider.adoc#configuring-ldap-identity-provider[Configuring an LDAP identity provider]. - -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -[NOTE] -==== -You must have `cluster-admin` privileges to sync groups. -==== -endif::[] - -include::modules/ldap-syncing-about.adoc[leveloffset=+1] -include::modules/ldap-syncing-config-rfc2307.adoc[leveloffset=+2] -include::modules/ldap-syncing-config-activedir.adoc[leveloffset=+2] -include::modules/ldap-syncing-config-augmented-activedir.adoc[leveloffset=+2] -include::modules/ldap-syncing-running.adoc[leveloffset=+1] -include::modules/ldap-syncing-running-all-ldap.adoc[leveloffset=+2] -include::modules/ldap-syncing-running-openshift.adoc[leveloffset=+2] -include::modules/ldap-syncing-running-subset.adoc[leveloffset=+2] -include::modules/ldap-syncing-pruning.adoc[leveloffset=+1] - -// Automatically syncing LDAP groups -include::modules/ldap-auto-syncing.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../authentication/identity_providers/configuring-ldap-identity-provider.adoc#configuring-ldap-identity-provider[Configuring an LDAP identity provider] -* xref:../nodes/jobs/nodes-nodes-jobs.adoc#nodes-nodes-jobs-creating-cron_nodes-nodes-jobs[Creating cron jobs] - -include::modules/ldap-syncing-examples.adoc[leveloffset=+1] -include::modules/ldap-syncing-rfc2307.adoc[leveloffset=+2] -include::modules/ldap-syncing-rfc2307-user-defined.adoc[leveloffset=+2] -include::modules/ldap-syncing-rfc2307-user-defined-error.adoc[leveloffset=+2] -include::modules/ldap-syncing-activedir.adoc[leveloffset=+2] -include::modules/ldap-syncing-augmented-activedir.adoc[leveloffset=+2] -include::modules/ldap-syncing-nesting.adoc[leveloffset=+2] -include::modules/ldap-syncing-spec.adoc[leveloffset=+1] diff --git a/authentication/managing-oauth-access-tokens.adoc b/authentication/managing-oauth-access-tokens.adoc deleted file mode 100644 index 10867b018e6f..000000000000 --- a/authentication/managing-oauth-access-tokens.adoc +++ /dev/null @@ -1,18 +0,0 @@ -:_content-type: ASSEMBLY -[id="managing-oauth-access-tokens"] -= Managing user-owned OAuth access tokens -include::_attributes/common-attributes.adoc[] -:context: managing-oauth-access-tokens - -toc::[] - -Users can review their own OAuth access tokens and delete any that are no longer needed. - -// Listing user-owned OAuth access tokens -include::modules/oauth-list-tokens.adoc[leveloffset=+1] - -// Viewing the details of a user-owned OAuth access token -include::modules/oauth-view-details-tokens.adoc[leveloffset=+1] - -// Deleting user-owned OAuth access tokens -include::modules/oauth-delete-tokens.adoc[leveloffset=+1] diff --git a/authentication/managing-security-context-constraints.adoc b/authentication/managing-security-context-constraints.adoc deleted file mode 100644 index 638cbba748b4..000000000000 --- a/authentication/managing-security-context-constraints.adoc +++ /dev/null @@ -1,55 +0,0 @@ -:_content-type: ASSEMBLY -[id="managing-pod-security-policies"] -= Managing security context constraints -include::_attributes/common-attributes.adoc[] -:context: configuring-internal-oauth - -toc::[] - -In {product-title}, you can use security context constraints (SCCs) to control permissions for the pods in your cluster. - -Default SCCs are created during installation and when you install some Operators or other components. As a cluster administrator, you can also create your own SCCs by using the OpenShift CLI (`oc`). - -[IMPORTANT] -==== -Do not modify the default SCCs. Customizing the default SCCs can lead to issues when some of the platform pods deploy or -ifndef::openshift-rosa[] -{product-title} -endif::[] -ifdef::openshift-rosa[] -ROSA -endif::openshift-rosa[] -is upgraded. Additionally, the default SCC values are reset to the defaults during some cluster upgrades, which discards all customizations to those SCCs. -ifdef::openshift-origin,openshift-enterprise,openshift-webscale,openshift-dedicated,openshift-rosa[] - -Instead of modifying the default SCCs, create and modify your own SCCs as needed. For detailed steps, see xref:../authentication/managing-security-context-constraints.adoc#security-context-constraints-creating_configuring-internal-oauth[Creating security context constraints]. -endif::[] -==== - -ifdef::openshift-dedicated[] -[NOTE] -==== -In {product-title} deployments, you can create your own SCCs only for clusters that use the Customer Cloud Subscription (CCS) model. You cannot create SCCs for {product-title} clusters that use a Red Hat cloud account, because SCC resource creation requires `cluster-admin` privileges. -==== -endif::openshift-dedicated[] - -include::modules/security-context-constraints-about.adoc[leveloffset=+1] -include::modules/security-context-constraints-pre-allocated-values.adoc[leveloffset=+1] -include::modules/security-context-constraints-example.adoc[leveloffset=+1] -include::modules/security-context-constraints-creating.adoc[leveloffset=+1] -include::modules/security-context-constraints-rbac.adoc[leveloffset=+1] -include::modules/security-context-constraints-command-reference.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_configuring-internal-oauth"] -== Additional resources - -ifndef::openshift-dedicated,openshift-rosa[] -* xref:../support/getting-support.adoc#getting-support[Getting support] -endif::[] -ifdef::openshift-dedicated[] -* xref:../osd_architecture/osd-support.adoc#osd-getting-support[Getting support] -endif::[] -ifdef::openshift-rosa[] -* xref:../rosa_architecture/rosa-getting-support.adoc#rosa-getting-support[Getting support] -endif::[] diff --git a/authentication/managing_cloud_provider_credentials/_attributes b/authentication/managing_cloud_provider_credentials/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/authentication/managing_cloud_provider_credentials/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.adoc b/authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.adoc deleted file mode 100644 index 5a608835d7c3..000000000000 --- a/authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.adoc +++ /dev/null @@ -1,115 +0,0 @@ -:_content-type: ASSEMBLY -[id="about-cloud-credential-operator"] -= About the Cloud Credential Operator -include::_attributes/common-attributes.adoc[] -:context: about-cloud-credential-operator - -toc::[] - -The Cloud Credential Operator (CCO) manages cloud provider credentials as custom resource definitions (CRDs). The CCO syncs on `CredentialsRequest` custom resources (CRs) to allow {product-title} components to request cloud provider credentials with the specific permissions that are required for the cluster to run. - -By setting different values for the `credentialsMode` parameter in the `install-config.yaml` file, the CCO can be configured to operate in several different modes. If no mode is specified, or the `credentialsMode` parameter is set to an empty string (`""`), the CCO operates in its default mode. - -[id="about-cloud-credential-operator-modes_{context}"] -== Modes - -By setting different values for the `credentialsMode` parameter in the `install-config.yaml` file, the CCO can be configured to operate in _mint_, _passthrough_, or _manual_ mode. These options provide transparency and flexibility in how the CCO uses cloud credentials to process `CredentialsRequest` CRs in the cluster, and allow the CCO to be configured to suit the security requirements of your organization. Not all CCO modes are supported for all cloud providers. - -* **xref:../../authentication/managing_cloud_provider_credentials/cco-mode-mint.adoc#cco-mode-mint[Mint]**: In mint mode, the CCO uses the provided admin-level cloud credential to create new credentials for components in the cluster with only the specific permissions that are required. - -* **xref:../../authentication/managing_cloud_provider_credentials/cco-mode-passthrough.adoc#cco-mode-passthrough[Passthrough]**: In passthrough mode, the CCO passes the provided cloud credential to the components that request cloud credentials. - -* **xref:../../authentication/managing_cloud_provider_credentials/cco-mode-manual.adoc#cco-mode-manual[Manual]**: In manual mode, a user manages cloud credentials instead of the CCO. - -** **xref:../../authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc#cco-mode-sts[Manual with AWS Security Token Service]**: In manual mode, you can configure an AWS cluster to use Amazon Web Services Security Token Service (AWS STS). With this configuration, the CCO uses temporary credentials for different components. - -** **xref:../../authentication/managing_cloud_provider_credentials/cco-mode-gcp-workload-identity.adoc#cco-mode-gcp-workload-identity[Manual with GCP Workload Identity]**: In manual mode, you can configure a GCP cluster to use GCP Workload Identity. With this configuration, the CCO uses temporary credentials for different components. - -.CCO mode support matrix -[cols="<.^2,^.^1,^.^1,^.^1"] -|==== -|Cloud provider |Mint |Passthrough |Manual - -|{alibaba} -| -| -|X - -|Amazon Web Services (AWS) -|X -|X -|X - - -|Microsoft Azure -| -|X ^[1]^ -|X - -|Google Cloud Platform (GCP) -|X -|X -|X - -|IBM Cloud -| -| -|X - -|Nutanix -| -| -|X - -|{rh-openstack-first} -| -|X -| - -|{rh-virtualization-first} -| -|X -| - -|VMware vSphere -| -|X -| - -|==== -[.small] --- -1. Manual mode is the only supported CCO configuration for Microsoft Azure Stack Hub. --- - -[id="cco-determine-mode_{context}"] -== Determining the Cloud Credential Operator mode - -For platforms that support using the CCO in multiple modes, you can determine what mode the CCO is configured to use by using the web console or the CLI. - -.Determining the CCO configuration -image::334_OpenShift_cluster_updating_and_CCO_workflows_0523_4.11_A.png[Decision tree showing how to determine the configured CCO credentials mode for your cluster.] - -//Determining the Cloud Credential Operator mode by using the web console -include::modules/cco-determine-mode-gui.adoc[leveloffset=+2] - -//Determining the Cloud Credential Operator mode by using the CLI -include::modules/cco-determine-mode-cli.adoc[leveloffset=+2] - -[id="about-cloud-credential-operator-default_{context}"] -== Default behavior -For platforms on which multiple modes are supported (AWS, Azure, and GCP), when the CCO operates in its default mode, it checks the provided credentials dynamically to determine for which mode they are sufficient to process `CredentialsRequest` CRs. - -By default, the CCO determines whether the credentials are sufficient for mint mode, which is the preferred mode of operation, and uses those credentials to create appropriate credentials for components in the cluster. If the credentials are not sufficient for mint mode, it determines whether they are sufficient for passthrough mode. If the credentials are not sufficient for passthrough mode, the CCO cannot adequately process `CredentialsRequest` CRs. - -If the provided credentials are determined to be insufficient during installation, the installation fails. For AWS, the installer fails early in the process and indicates which required permissions are missing. Other providers might not provide specific information about the cause of the error until errors are encountered. - -If the credentials are changed after a successful installation and the CCO determines that the new credentials are insufficient, the CCO puts conditions on any new `CredentialsRequest` CRs to indicate that it cannot process them because of the insufficient credentials. - -To resolve insufficient credentials issues, provide a credential with sufficient permissions. If an error occurred during installation, try installing again. For issues with new `CredentialsRequest` CRs, wait for the CCO to try to process the CR again. As an alternative, you can manually create IAM for xref:../../installing/installing_aws/manually-creating-iam.adoc#manually-creating-iam-aws[AWS], xref:../../installing/installing_azure/manually-creating-iam-azure.adoc#manually-creating-iam-azure[Azure], and xref:../../installing/installing_gcp/manually-creating-iam-gcp.adoc#manually-creating-iam-gcp[GCP]. - -[role="_additional-resources"] -[id="additional-resources_about-cloud-credential-operator_{context}"] -== Additional resources - -* xref:../../operators/operator-reference.adoc#cloud-credential-operator_cluster-operators-ref[Cluster Operators reference page for the Cloud Credential Operator] diff --git a/authentication/managing_cloud_provider_credentials/cco-mode-gcp-workload-identity.adoc b/authentication/managing_cloud_provider_credentials/cco-mode-gcp-workload-identity.adoc deleted file mode 100644 index ced6e811400a..000000000000 --- a/authentication/managing_cloud_provider_credentials/cco-mode-gcp-workload-identity.adoc +++ /dev/null @@ -1,124 +0,0 @@ -:_content-type: ASSEMBLY -[id="cco-mode-gcp-workload-identity"] -= Using manual mode with GCP Workload Identity -include::_attributes/common-attributes.adoc[] -:context: cco-mode-gcp-workload-identity - -toc::[] - -Manual mode with GCP Workload Identity is supported for Google Cloud Platform (GCP). - -[NOTE] -==== -This credentials strategy is supported for only new {product-title} clusters and must be configured during installation. You cannot reconfigure an existing cluster that uses a different credentials strategy to use this feature. -==== - -[id="gcp-workload-identity-mode-about_{context}"] -== About manual mode with GCP Workload Identity - -In manual mode with GCP Workload Identity, the individual {product-title} cluster components can impersonate IAM service accounts using short-term, limited-privilege credentials. - -Requests for new and refreshed credentials are automated by using an appropriately configured OpenID Connect (OIDC) identity provider, combined with IAM service accounts. {product-title} signs service account tokens that are trusted by GCP, and can be projected into a pod and used for authentication. Tokens are refreshed after one hour by default. - -.Workload Identity authentication flow -image::347_OpenShift_credentials_with_STS_updates_0623_GCP.png[Detailed authentication flow between GCP and the cluster when using GCP Workload Identity] - -Using manual mode with GCP Workload Identity changes the content of the GCP credentials that are provided to individual {product-title} components. - -.GCP secret format - -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - namespace: <1> - name: <2> -data: - service_account.json: <3> ----- -<1> The namespace for the component. -<2> The name of the component secret. -<3> The Base64 encoded service account. - -.Content of the Base64 encoded `service_account.json` file using long-lived credentials - -[source,json] ----- -{ - "type": "service_account", <1> - "project_id": "", - "private_key_id": "", - "private_key": "", <2> - "client_email": "", - "client_id": "", - "auth_uri": "https://accounts.google.com/o/oauth2/auth", - "token_uri": "https://oauth2.googleapis.com/token", - "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", - "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/" -} ----- -<1> The credential type is `service_account`. -<2> The private RSA key that is used to authenticate to GCP. This key must be kept secure and is not rotated. - -.Content of the Base64 encoded `service_account.json` file using GCP Workload Identity - -[source,json] ----- -{ - "type": "external_account", <1> - "audience": "//iam.googleapis.com/projects/123456789/locations/global/workloadIdentityPools/test-pool/providers/test-provider", <2> - "subject_token_type": "urn:ietf:params:oauth:token-type:jwt", - "token_url": "https://sts.googleapis.com/v1/token", - "service_account_impersonation_url": "https://iamcredentials.googleapis.com/v1/projects/-/serviceAccounts/:generateAccessToken", <3> - "credential_source": { - "file": "", <4> - "format": { - "type": "text" - } - } -} ----- -<1> The credential type is `external_account`. -<2> The target audience is the GCP Workload Identity provider. -<3> The resource URL of the service account that can be impersonated with these credentials. -<4> The path to the service account token inside the pod. By convention, this is `/var/run/secrets/openshift/serviceaccount/token` for {product-title} components. - -//Supertask: Installing an OCP cluster configured for manual mode with GCP Workload Identity -[id="gcp-workload-identity-mode-installing"] -== Installing an {product-title} cluster configured for manual mode with GCP Workload Identity - -To install a cluster that is configured to use the Cloud Credential Operator (CCO) in manual mode with GCP Workload Identity: - -. xref:../../authentication/managing_cloud_provider_credentials/cco-mode-gcp-workload-identity.adoc#cco-ccoctl-configuring_cco-mode-gcp-workload-identity[Configure the Cloud Credential Operator utility]. -. xref:../../authentication/managing_cloud_provider_credentials/cco-mode-gcp-workload-identity.adoc#cco-ccoctl-creating-at-once_cco-mode-gcp-workload-identity[Create the required GCP resources]. -. xref:../../authentication/managing_cloud_provider_credentials/cco-mode-gcp-workload-identity.adoc#sts-mode-installing-manual-run-installer_cco-mode-gcp-workload-identity[Run the {product-title} installer]. -. xref:../../authentication/managing_cloud_provider_credentials/cco-mode-gcp-workload-identity.adoc#sts-mode-installing-verifying_cco-mode-gcp-workload-identity[Verify that the cluster is using short-lived credentials]. - -[NOTE] -==== -Because the cluster is operating in manual mode when using GCP Workload Identity, it is not able to create new credentials for components with the permissions that they require. When upgrading to a different minor version of {product-title}, there are often new GCP permission requirements. Before upgrading a cluster that is using GCP Workload Identity, the cluster administrator must manually ensure that the GCP permissions are sufficient for existing components and available to any new components. -==== - -[role="_additional-resources"] -.Additional resources - -* xref:../../updating/preparing-manual-creds-update.adoc#cco-ccoctl-configuring_preparing-manual-creds-update[Configuring the Cloud Credential Operator utility for a cluster update] - -//Task part 1: Configuring the Cloud Credential Operator utility -include::modules/cco-ccoctl-configuring.adoc[leveloffset=+2] - -//Task part 2: Creating the required GCP resources all at once -include::modules/cco-ccoctl-creating-at-once.adoc[leveloffset=+2] - -//Task part 3: Run the OCP installer -include::modules/sts-mode-installing-manual-run-installer.adoc[leveloffset=+2] - -//Task part 4: Verify that the cluster is using short-lived credentials -include::modules/sts-mode-installing-verifying.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="additional-resources_{context}"] -== Additional resources - -* xref:../../updating/preparing-manual-creds-update.adoc#preparing-manual-creds-update[Preparing to update a cluster with manually maintained credentials] diff --git a/authentication/managing_cloud_provider_credentials/cco-mode-manual.adoc b/authentication/managing_cloud_provider_credentials/cco-mode-manual.adoc deleted file mode 100644 index 5b403c38979a..000000000000 --- a/authentication/managing_cloud_provider_credentials/cco-mode-manual.adoc +++ /dev/null @@ -1,48 +0,0 @@ -:_content-type: ASSEMBLY -[id="cco-mode-manual"] -= Using manual mode -include::_attributes/common-attributes.adoc[] -:context: cco-mode-manual - -toc::[] - -Manual mode is supported for Alibaba Cloud, Amazon Web Services (AWS), Microsoft Azure, IBM Cloud, and Google Cloud Platform (GCP). - -In manual mode, a user manages cloud credentials instead of the Cloud Credential Operator (CCO). To use this mode, you must examine the `CredentialsRequest` CRs in the release image for the version of {product-title} that you are running or installing, create corresponding credentials in the underlying cloud provider, and create Kubernetes Secrets in the correct namespaces to satisfy all `CredentialsRequest` CRs for the cluster's cloud provider. - -Using manual mode allows each cluster component to have only the permissions it requires, without storing an administrator-level credential in the cluster. This mode also does not require connectivity to the AWS public IAM endpoint. However, you must manually reconcile permissions with new release images for every upgrade. - -For information about configuring your cloud provider to use manual mode, see the manual credentials management options for your cloud provider: - -* xref:../../installing/installing_alibaba/manually-creating-alibaba-ram.adoc#manually-creating-alibaba-ram[Manually creating RAM resources for Alibaba Cloud] -* xref:../../installing/installing_aws/manually-creating-iam.adoc#manually-creating-iam-aws[Manually creating IAM for AWS] -* xref:../../installing/installing_azure/manually-creating-iam-azure.adoc#manually-creating-iam-azure[Manually creating IAM for Azure] -* xref:../../installing/installing_gcp/manually-creating-iam-gcp.adoc#manually-creating-iam-gcp[Manually creating IAM for GCP] -* xref:../../installing/installing_ibm_cloud_public/configuring-iam-ibm-cloud.adoc#configuring-iam-ibm-cloud[Configuring IAM for IBM Cloud] -* xref:../../installing/installing_nutanix/installing-nutanix-installer-provisioned.adoc#manually-create-iam-nutanix_installing-nutanix-installer-provisioned[Configuring IAM for Nutanix] - -[id="manual-mode-sts-blurb"] -== Manual mode with cloud credentials created and managed outside of the cluster - -An AWS or GCP cluster that uses manual mode might be configured to create and manage cloud credentials from outside of the cluster using the AWS Security Token Service (STS) or GCP Workload Identity. With this configuration, the CCO uses temporary credentials for different components. - -For more information, see xref:../../authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc#cco-mode-sts[Using manual mode with Amazon Web Services Security Token Service] or xref:../../authentication/managing_cloud_provider_credentials/cco-mode-gcp-workload-identity.adoc#cco-mode-gcp-workload-identity[Using manual mode with GCP Workload Identity]. - -//Updating cloud provider resources with manually maintained credentials -include::modules/manually-maintained-credentials-upgrade.adoc[leveloffset=+1] - -//Indicating that the cluster is ready to upgrade -include::modules/cco-manual-upgrade-annotation.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="additional-resources_cco-mode-manual"] -== Additional resources - -* xref:../../installing/installing_alibaba/manually-creating-alibaba-ram.adoc#manually-creating-alibaba-ram[Manually creating RAM resources for Alibaba Cloud] -* xref:../../installing/installing_aws/manually-creating-iam.adoc#manually-creating-iam-aws[Manually creating IAM for AWS] -* xref:../../authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc#cco-mode-sts[Using manual mode with Amazon Web Services Security Token Service] -* xref:../../installing/installing_azure/manually-creating-iam-azure.adoc#manually-creating-iam-azure[Manually creating IAM for Azure] -* xref:../../installing/installing_gcp/manually-creating-iam-gcp.adoc#manually-creating-iam-gcp[Manually creating IAM for GCP] -* xref:../../authentication/managing_cloud_provider_credentials/cco-mode-gcp-workload-identity.adoc#cco-mode-gcp-workload-identity[Using manual mode with GCP Workload Identity] -* xref:../../installing/installing_ibm_cloud_public/configuring-iam-ibm-cloud.adoc#configuring-iam-ibm-cloud[Configuring IAM for IBM Cloud] -* xref:../../installing/installing_nutanix/installing-nutanix-installer-provisioned.adoc#manually-create-iam-nutanix_installing-nutanix-installer-provisioned[Configuring IAM for Nutanix] diff --git a/authentication/managing_cloud_provider_credentials/cco-mode-mint.adoc b/authentication/managing_cloud_provider_credentials/cco-mode-mint.adoc deleted file mode 100644 index fd756d2e74d6..000000000000 --- a/authentication/managing_cloud_provider_credentials/cco-mode-mint.adoc +++ /dev/null @@ -1,69 +0,0 @@ -:_content-type: ASSEMBLY -[id="cco-mode-mint"] -= Using mint mode -include::_attributes/common-attributes.adoc[] -:context: cco-mode-mint - -toc::[] - -Mint mode is supported for Amazon Web Services (AWS) and Google Cloud Platform (GCP). - -Mint mode is the default mode on the platforms for which it is supported. In this mode, the Cloud Credential Operator (CCO) uses the provided administrator-level cloud credential to create new credentials for components in the cluster with only the specific permissions that are required. - -If the credential is not removed after installation, it is stored and used by the CCO to process `CredentialsRequest` CRs for components in the cluster and create new credentials for each with only the specific permissions that are required. The continuous reconciliation of cloud credentials in mint mode allows actions that require additional credentials or permissions, such as upgrading, to proceed. - -Mint mode stores the administrator-level credential in the cluster `kube-system` namespace. If this approach does not meet the security requirements of your organization, see _Alternatives to storing administrator-level secrets in the kube-system project_ for xref:../../installing/installing_aws/manually-creating-iam.adoc#alternatives-to-storing-admin-secrets-in-kube-system_manually-creating-iam-aws[AWS] or xref:../../installing/installing_gcp/manually-creating-iam-gcp.adoc#alternatives-to-storing-admin-secrets-in-kube-system_manually-creating-iam-gcp[GCP]. - -[id="mint-mode-permissions"] -== Mint mode permissions requirements -When using the CCO in mint mode, ensure that the credential you provide meets the requirements of the cloud on which you are running or installing {product-title}. If the provided credentials are not sufficient for mint mode, the CCO cannot create an IAM user. - -[id="mint-mode-permissions-aws"] -=== Amazon Web Services (AWS) permissions -The credential you provide for mint mode in AWS must have the following permissions: - -* `iam:CreateAccessKey` -* `iam:CreateUser` -* `iam:DeleteAccessKey` -* `iam:DeleteUser` -* `iam:DeleteUserPolicy` -* `iam:GetUser` -* `iam:GetUserPolicy` -* `iam:ListAccessKeys` -* `iam:PutUserPolicy` -* `iam:TagUser` -* `iam:SimulatePrincipalPolicy` - -[id="mint-mode-permissions-gcp"] -=== Google Cloud Platform (GCP) permissions -The credential you provide for mint mode in GCP must have the following permissions: - -* `resourcemanager.projects.get` -* `serviceusage.services.list` -* `iam.serviceAccountKeys.create` -* `iam.serviceAccountKeys.delete` -* `iam.serviceAccounts.create` -* `iam.serviceAccounts.delete` -* `iam.serviceAccounts.get` -* `iam.roles.get` -* `resourcemanager.projects.getIamPolicy` -* `resourcemanager.projects.setIamPolicy` - -//Admin credentials root secret format -include::modules/admin-credentials-root-secret-formats.adoc[leveloffset=+1] - -//Mint Mode with removal or rotation of the admin credential -include::modules/mint-mode-with-removal-of-admin-credential.adoc[leveloffset=+1] - -//Rotating cloud provider credentials manually -include::modules/manually-rotating-cloud-creds.adoc[leveloffset=+2] - -//Removing cloud provider credentials -include::modules/manually-removing-cloud-creds.adoc[leveloffset=+2] - - -[role="_additional-resources"] -== Additional resources - -* xref:../../installing/installing_aws/manually-creating-iam.adoc#alternatives-to-storing-admin-secrets-in-kube-system_manually-creating-iam-aws[Alternatives to storing administrator-level secrets in the kube-system project] for AWS -* xref:../../installing/installing_gcp/manually-creating-iam-gcp.adoc#alternatives-to-storing-admin-secrets-in-kube-system_manually-creating-iam-gcp[Alternatives to storing administrator-level secrets in the kube-system project] for GCP diff --git a/authentication/managing_cloud_provider_credentials/cco-mode-passthrough.adoc b/authentication/managing_cloud_provider_credentials/cco-mode-passthrough.adoc deleted file mode 100644 index f0d19bfa73d4..000000000000 --- a/authentication/managing_cloud_provider_credentials/cco-mode-passthrough.adoc +++ /dev/null @@ -1,117 +0,0 @@ -:_content-type: ASSEMBLY -[id="cco-mode-passthrough"] -= Using passthrough mode -include::_attributes/common-attributes.adoc[] -:context: cco-mode-passthrough - -toc::[] - -Passthrough mode is supported for Amazon Web Services (AWS), Microsoft Azure, Google Cloud Platform (GCP), {rh-openstack-first}, {rh-virtualization-first}, and VMware vSphere. - -In passthrough mode, the Cloud Credential Operator (CCO) passes the provided cloud credential to the components that request cloud credentials. The credential must have permissions to perform the installation and complete the operations that are required by components in the cluster, but does not need to be able to create new credentials. The CCO does not attempt to create additional limited-scoped credentials in passthrough mode. - -[NOTE] -==== -xref:../../authentication/managing_cloud_provider_credentials/cco-mode-manual.adoc#cco-mode-manual[Manual mode] is the only supported CCO configuration for Microsoft Azure Stack Hub. -==== - -[id="passthrough-mode-permissions"] -== Passthrough mode permissions requirements -When using the CCO in passthrough mode, ensure that the credential you provide meets the requirements of the cloud on which you are running or installing {product-title}. If the provided credentials the CCO passes to a component that creates a `CredentialsRequest` CR are not sufficient, that component will report an error when it tries to call an API that it does not have permissions for. - -[id="passthrough-mode-permissions-aws"] -=== Amazon Web Services (AWS) permissions -The credential you provide for passthrough mode in AWS must have all the requested permissions for all `CredentialsRequest` CRs that are required by the version of {product-title} you are running or installing. - -To locate the `CredentialsRequest` CRs that are required, see xref:../../installing/installing_aws/manually-creating-iam.adoc#manually-creating-iam-aws[Manually creating IAM for AWS]. - -[id="passthrough-mode-permissions-azure"] -=== Microsoft Azure permissions -The credential you provide for passthrough mode in Azure must have all the requested permissions for all `CredentialsRequest` CRs that are required by the version of {product-title} you are running or installing. - -To locate the `CredentialsRequest` CRs that are required, see xref:../../installing/installing_azure/manually-creating-iam-azure.adoc#manually-creating-iam-azure[Manually creating IAM for Azure]. - -[id="passthrough-mode-permissions-gcp"] -=== Google Cloud Platform (GCP) permissions -The credential you provide for passthrough mode in GCP must have all the requested permissions for all `CredentialsRequest` CRs that are required by the version of {product-title} you are running or installing. - -To locate the `CredentialsRequest` CRs that are required, see xref:../../installing/installing_gcp/manually-creating-iam-gcp.adoc#manually-creating-iam-gcp[Manually creating IAM for GCP]. - -[id="passthrough-mode-permissions-rhosp"] -=== {rh-openstack-first} permissions -To install an {product-title} cluster on {rh-openstack}, the CCO requires a credential with the permissions of a `member` user role. - -[id="passthrough-mode-permissions-rhv"] -=== {rh-virtualization-first} permissions -To install an {product-title} cluster on {rh-virtualization}, the CCO requires a credential with the following privileges: - -* `DiskOperator` -* `DiskCreator` -* `UserTemplateBasedVm` -* `TemplateOwner` -* `TemplateCreator` -* `ClusterAdmin` on the specific cluster that is targeted for {product-title} deployment - -[id="passthrough-mode-permissions-vsware"] -=== VMware vSphere permissions -To install an {product-title} cluster on VMware vSphere, the CCO requires a credential with the following vSphere privileges: - -.Required vSphere privileges -[cols="1,2"] -|==== -|Category |Privileges - -|Datastore -|_Allocate space_ - -|Folder -|_Create folder_, _Delete folder_ - -|vSphere Tagging -|All privileges - -|Network -|_Assign network_ - -|Resource -|_Assign virtual machine to resource pool_ - -|Profile-driven storage -|All privileges - -|vApp -|All privileges - -|Virtual machine -|All privileges - -|==== - -//Admin credentials root secret format -include::modules/admin-credentials-root-secret-formats.adoc[leveloffset=+1] - -[id="passthrough-mode-maintenance"] -== Passthrough mode credential maintenance -If `CredentialsRequest` CRs change over time as the cluster is upgraded, you must manually update the passthrough mode credential to meet the requirements. To avoid credentials issues during an upgrade, check the `CredentialsRequest` CRs in the release image for the new version of {product-title} before upgrading. To locate the `CredentialsRequest` CRs that are required for your cloud provider, see _Manually creating IAM_ for xref:../../installing/installing_aws/manually-creating-iam.adoc#manually-creating-iam-aws[AWS], xref:../../installing/installing_azure/manually-creating-iam-azure.adoc#manually-creating-iam-azure[Azure], or xref:../../installing/installing_gcp/manually-creating-iam-gcp.adoc#manually-creating-iam-gcp[GCP]. - -//Rotating cloud provider credentials manually -include::modules/manually-rotating-cloud-creds.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../../storage/container_storage_interface/persistent-storage-csi-vsphere.adoc[vSphere CSI Driver Operator] - -[id="passthrough-mode-reduce-permissions"] -== Reducing permissions after installation -When using passthrough mode, each component has the same permissions used by all other components. If you do not reduce the permissions after installing, all components have the broad permissions that are required to run the installer. - -After installation, you can reduce the permissions on your credential to only those that are required to run the cluster, as defined by the `CredentialsRequest` CRs in the release image for the version of {product-title} that you are using. - -To locate the `CredentialsRequest` CRs that are required for AWS, Azure, or GCP and learn how to change the permissions the CCO uses, see _Manually creating IAM_ for xref:../../installing/installing_aws/manually-creating-iam.adoc#manually-creating-iam-aws[AWS], xref:../../installing/installing_azure/manually-creating-iam-azure.adoc#manually-creating-iam-azure[Azure], or xref:../../installing/installing_gcp/manually-creating-iam-gcp.adoc#manually-creating-iam-gcp[GCP]. - -[role="_additional-resources"] -== Additional resources - -* xref:../../installing/installing_aws/manually-creating-iam.adoc#manually-creating-iam-aws[Manually creating IAM for AWS] -* xref:../../installing/installing_azure/manually-creating-iam-azure.adoc#manually-creating-iam-azure[Manually creating IAM for Azure] -* xref:../../installing/installing_gcp/manually-creating-iam-gcp.adoc#manually-creating-iam-gcp[Manually creating IAM for GCP] diff --git a/authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc b/authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc deleted file mode 100644 index ad0c756d57f8..000000000000 --- a/authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc +++ /dev/null @@ -1,114 +0,0 @@ -:_content-type: ASSEMBLY -[id="cco-mode-sts"] -= Using manual mode with Amazon Web Services Security Token Service -include::_attributes/common-attributes.adoc[] -:context: cco-mode-sts - -toc::[] - -Manual mode with STS is supported for Amazon Web Services (AWS). - -[NOTE] -==== -This credentials strategy is supported for only new {product-title} clusters and must be configured during installation. You cannot reconfigure an existing cluster that uses a different credentials strategy to use this feature. -==== - -[id="sts-mode-about_{context}"] -== About manual mode with AWS Security Token Service - -In manual mode with STS, the individual {product-title} cluster components use AWS Security Token Service (STS) to assign components IAM roles that provide short-term, limited-privilege security credentials. These credentials are associated with IAM roles that are specific to each component that makes AWS API calls. - -Requests for new and refreshed credentials are automated by using an appropriately configured AWS IAM OpenID Connect (OIDC) identity provider, combined with AWS IAM roles. {product-title} signs service account tokens that are trusted by AWS IAM, and can be projected into a pod and used for authentication. Tokens are refreshed after one hour. - -.STS authentication flow -image::347_OpenShift_credentials_with_STS_updates_0623_AWS.png[Detailed authentication flow between AWS and the cluster when using AWS STS] - -Using manual mode with STS changes the content of the AWS credentials that are provided to individual {product-title} components. - -.AWS secret format using long-lived credentials - -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - namespace: <1> - name: <2> -data: - aws_access_key_id: - aws_secret_access_key: ----- -<1> The namespace for the component. -<2> The name of the component secret. - -.AWS secret format with STS - -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - namespace: <1> - name: <2> -stringData: - credentials: |- - [default] - sts_regional_endpoints = regional - role_name: <3> - web_identity_token_file: <4> ----- -<1> The namespace for the component. -<2> The name of the component secret. -<3> The IAM role for the component. -<4> The path to the service account token inside the pod. By convention, this is `/var/run/secrets/openshift/serviceaccount/token` for {product-title} components. - -//Supertask: Installing an OCP cluster configured for manual mode with STS -[id="sts-mode-installing_{context}"] -== Installing an {product-title} cluster configured for manual mode with STS - -To install a cluster that is configured to use the Cloud Credential Operator (CCO) in manual mode with STS: - -//[pre-4.8]. xref:../../authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc#sts-mode-installing-manual-config_cco-mode-sts[Create the required AWS resources] -. xref:../../authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc#cco-ccoctl-configuring_cco-mode-sts[Configure the Cloud Credential Operator utility]. -. Create the required AWS resources xref:../../authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc#cco-ccoctl-creating-individually_cco-mode-sts[individually], or xref:../../authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc#cco-ccoctl-creating-at-once_cco-mode-sts[with a single command]. -. xref:../../authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc#sts-mode-installing-manual-run-installer_cco-mode-sts[Run the {product-title} installer]. -. xref:../../authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc#sts-mode-installing-verifying_cco-mode-sts[Verify that the cluster is using short-lived credentials]. - -[NOTE] -==== -Because the cluster is operating in manual mode when using STS, it is not able to create new credentials for components with the permissions that they require. When upgrading to a different minor version of {product-title}, there are often new AWS permission requirements. Before upgrading a cluster that is using STS, the cluster administrator must manually ensure that the AWS permissions are sufficient for existing components and available to any new components. -==== - -[role="_additional-resources"] -.Additional resources - -* xref:../../updating/preparing-manual-creds-update.adoc#cco-ccoctl-configuring_preparing-manual-creds-update[Configuring the Cloud Credential Operator utility for a cluster update] - -//[pre-4.8]Task part 1: Creating AWS resources manually -//include::modules/sts-mode-installing-manual-config.adoc[leveloffset=+2] - -//Task part 1: Configuring the Cloud Credential Operator utility -include::modules/cco-ccoctl-configuring.adoc[leveloffset=+2] - -[id="sts-mode-create-aws-resources-ccoctl_{context}"] -=== Creating AWS resources with the Cloud Credential Operator utility - -You can use the CCO utility (`ccoctl`) to create the required AWS resources xref:../../authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc#cco-ccoctl-creating-individually_cco-mode-sts[individually], or xref:../../authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc#cco-ccoctl-creating-at-once_cco-mode-sts[with a single command]. - -//Task part 2a: Creating the required AWS resources individually -include::modules/cco-ccoctl-creating-individually.adoc[leveloffset=+3] - -//Task part 2b: Creating the required AWS resources all at once -include::modules/cco-ccoctl-creating-at-once.adoc[leveloffset=+3] - -//Task part 3: Run the OCP installer -include::modules/sts-mode-installing-manual-run-installer.adoc[leveloffset=+2] - -//Task part 4: Verify that the cluster is using short-lived credentials -include::modules/sts-mode-installing-verifying.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="additional-resources_{context}"] -== Additional resources - -* xref:../../updating/preparing-manual-creds-update.adoc#preparing-manual-creds-update[Preparing to update a cluster with manually maintained credentials] diff --git a/authentication/managing_cloud_provider_credentials/images b/authentication/managing_cloud_provider_credentials/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/authentication/managing_cloud_provider_credentials/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/authentication/managing_cloud_provider_credentials/modules b/authentication/managing_cloud_provider_credentials/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/authentication/managing_cloud_provider_credentials/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/authentication/managing_cloud_provider_credentials/snippets b/authentication/managing_cloud_provider_credentials/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/authentication/managing_cloud_provider_credentials/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/authentication/modules b/authentication/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/authentication/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/authentication/remove-kubeadmin.adoc b/authentication/remove-kubeadmin.adoc deleted file mode 100644 index 7557e91e4820..000000000000 --- a/authentication/remove-kubeadmin.adoc +++ /dev/null @@ -1,11 +0,0 @@ -:_content-type: ASSEMBLY -[id="removing-kubeadmin"] -= Removing the kubeadmin user -include::_attributes/common-attributes.adoc[] -:context: removing-kubeadmin - -toc::[] - -include::modules/authentication-kubeadmin.adoc[leveloffset=+1] - -include::modules/authentication-remove-kubeadmin.adoc[leveloffset=+1] diff --git a/authentication/snippets b/authentication/snippets deleted file mode 120000 index 9f5bc7e4dde0..000000000000 --- a/authentication/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets \ No newline at end of file diff --git a/authentication/tokens-scoping.adoc b/authentication/tokens-scoping.adoc deleted file mode 100644 index 126481d224da..000000000000 --- a/authentication/tokens-scoping.adoc +++ /dev/null @@ -1,9 +0,0 @@ -:_content-type: ASSEMBLY -[id="tokens-scoping"] -= Scoping tokens -include::_attributes/common-attributes.adoc[] -:context: configuring-internal-oauth - -toc::[] - -include::modules/tokens-scoping-about.adoc[leveloffset=+1] diff --git a/authentication/understanding-and-creating-service-accounts.adoc b/authentication/understanding-and-creating-service-accounts.adoc deleted file mode 100644 index 86fb149c7c2e..000000000000 --- a/authentication/understanding-and-creating-service-accounts.adoc +++ /dev/null @@ -1,17 +0,0 @@ -:_content-type: ASSEMBLY -[id="understanding-and-creating-service-accounts"] -= Understanding and creating service accounts -include::_attributes/common-attributes.adoc[] -:context: understanding-service-accounts - -toc::[] - -include::modules/service-accounts-overview.adoc[leveloffset=+1] - -// include::modules/service-accounts-enabling-authentication.adoc[leveloffset=+1] - -include::modules/service-accounts-creating.adoc[leveloffset=+1] - -// include::modules/service-accounts-configuration-parameters.adoc[leveloffset=+1] - -include::modules/service-accounts-granting-roles.adoc[leveloffset=+1] diff --git a/authentication/understanding-and-managing-pod-security-admission.adoc b/authentication/understanding-and-managing-pod-security-admission.adoc deleted file mode 100644 index 4bf7dcd953c2..000000000000 --- a/authentication/understanding-and-managing-pod-security-admission.adoc +++ /dev/null @@ -1,28 +0,0 @@ -:_content-type: ASSEMBLY -[id="understanding-and-managing-pod-security-admission"] -= Understanding and managing pod security admission -include::_attributes/common-attributes.adoc[] -:context: understanding-and-managing-pod-security-admission - -toc::[] - -Pod security admission is an implementation of the link:https://kubernetes.io/docs/concepts/security/pod-security-standards/[Kubernetes pod security standards]. Use pod security admission to restrict the behavior of pods. - -// Security context constraint synchronization with pod security standards -include::modules/security-context-constraints-psa-synchronization.adoc[leveloffset=+1] - -// Controlling pod security admission synchronization -include::modules/security-context-constraints-psa-opting.adoc[leveloffset=+1] - -// About pod security admission alerts -include::modules/security-context-constraints-psa-rectifying.adoc[leveloffset=+1] - -// Identifying pod security violations -include::modules/security-context-constraints-psa-alert-eval.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="additional-resources_managing-pod-security-admission"] -== Additional resources - -* xref:../security/audit-log-view.adoc#nodes-nodes-audit-log-basic-viewing_audit-log-view[Viewing audit logs] -* xref:../authentication/managing-security-context-constraints.adoc#managing-pod-security-policies[Managing security context constraints] diff --git a/authentication/understanding-authentication.adoc b/authentication/understanding-authentication.adoc deleted file mode 100644 index 6f438338cc28..000000000000 --- a/authentication/understanding-authentication.adoc +++ /dev/null @@ -1,32 +0,0 @@ -:_content-type: ASSEMBLY -[id="understanding-authentication"] -= Understanding authentication -include::_attributes/common-attributes.adoc[] -:context: understanding-authentication - -toc::[] - -For users to interact with {product-title}, they must first authenticate -to the cluster. The authentication layer identifies the user associated with requests to the -{product-title} API. The authorization layer then uses information about the -requesting user to determine if the request is allowed. - -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -As an administrator, you can configure authentication for {product-title}. -endif::[] - -include::modules/rbac-users.adoc[leveloffset=+1] - -include::modules/rbac-groups.adoc[leveloffset=+1] - -include::modules/rbac-api-authentication.adoc[leveloffset=+1] - -include::modules/oauth-server-overview.adoc[leveloffset=+2] - -include::modules/oauth-token-requests.adoc[leveloffset=+2] - -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -include::modules/authentication-api-impersonation.adoc[leveloffset=+3] - -include::modules/authentication-prometheus-system-metrics.adoc[leveloffset=+3] -endif::[] diff --git a/authentication/understanding-identity-provider.adoc b/authentication/understanding-identity-provider.adoc deleted file mode 100644 index 1c3ee54695a3..000000000000 --- a/authentication/understanding-identity-provider.adoc +++ /dev/null @@ -1,80 +0,0 @@ -:_content-type: ASSEMBLY -[id="understanding-identity-provider"] -= Understanding identity provider configuration -include::_attributes/common-attributes.adoc[] -:context: understanding-identity-provider - -toc::[] - -The {product-title} master includes a built-in OAuth server. Developers and -administrators obtain OAuth access tokens to authenticate themselves to the API. - -As an administrator, you can configure OAuth to specify an identity provider -after you install your cluster. - -include::modules/identity-provider-overview.adoc[leveloffset=+1] - -[id="supported-identity-providers"] -== Supported identity providers - -You can configure the following types of identity providers: - -[cols="2a,8a",options="header"] -|=== - -|Identity provider -|Description - -|xref:../authentication/identity_providers/configuring-htpasswd-identity-provider.adoc#configuring-htpasswd-identity-provider[htpasswd] -|Configure the `htpasswd` identity provider to validate user names and passwords -against a flat file generated using -link:http://httpd.apache.org/docs/2.4/programs/htpasswd.html[`htpasswd`]. - -|xref:../authentication/identity_providers/configuring-keystone-identity-provider.adoc#configuring-keystone-identity-provider[Keystone] -|Configure the `keystone` identity provider to integrate -your {product-title} cluster with Keystone to enable shared authentication with -an OpenStack Keystone v3 server configured to store users in an internal -database. - -|xref:../authentication/identity_providers/configuring-ldap-identity-provider.adoc#configuring-ldap-identity-provider[LDAP] -|Configure the `ldap` identity provider to validate user names and passwords -against an LDAPv3 server, using simple bind authentication. - -|xref:../authentication/identity_providers/configuring-basic-authentication-identity-provider.adoc#configuring-basic-authentication-identity-provider[Basic authentication] -|Configure a `basic-authentication` identity provider for users to log in to -{product-title} with credentials validated against a remote identity provider. -Basic authentication is a generic backend integration mechanism. - -|xref:../authentication/identity_providers/configuring-request-header-identity-provider.adoc#configuring-request-header-identity-provider[Request header] -|Configure a `request-header` identity provider to identify users from request -header values, such as `X-Remote-User`. It is typically used in combination with -an authenticating proxy, which sets the request header value. - -|xref:../authentication/identity_providers/configuring-github-identity-provider.adoc#configuring-github-identity-provider[GitHub or GitHub Enterprise] -|Configure a `github` identity provider to validate user names and passwords -against GitHub or GitHub Enterprise's OAuth authentication server. - -|xref:../authentication/identity_providers/configuring-gitlab-identity-provider.adoc#configuring-gitlab-identity-provider[GitLab] -|Configure a `gitlab` identity provider to use -link:https://gitlab.com/[GitLab.com] or any other GitLab instance as an identity -provider. - -|xref:../authentication/identity_providers/configuring-google-identity-provider.adoc#configuring-google-identity-provider[Google] -|Configure a `google` identity provider using -link:https://developers.google.com/identity/protocols/OpenIDConnect[Google's OpenID Connect integration]. - -|xref:../authentication/identity_providers/configuring-oidc-identity-provider.adoc#configuring-oidc-identity-provider[OpenID Connect] -|Configure an `oidc` identity provider to integrate with an OpenID Connect -identity provider using an -link:http://openid.net/specs/openid-connect-core-1_0.html#CodeFlowAuth[Authorization Code Flow]. - -|=== - -Once an identity provider has been defined, you can -xref:../authentication/using-rbac.adoc#authorization-overview_using-rbac[use RBAC to define and apply permissions]. - -include::modules/authentication-remove-kubeadmin.adoc[leveloffset=+1] - -include::modules/identity-provider-parameters.adoc[leveloffset=+1] - -include::modules/identity-provider-default-CR.adoc[leveloffset=+1] diff --git a/authentication/using-rbac.adoc b/authentication/using-rbac.adoc deleted file mode 100644 index 83d9ee01c65d..000000000000 --- a/authentication/using-rbac.adoc +++ /dev/null @@ -1,33 +0,0 @@ -:_content-type: ASSEMBLY -[id="using-rbac"] -= Using RBAC to define and apply permissions -include::_attributes/common-attributes.adoc[] -:context: using-rbac - -toc::[] - -include::modules/rbac-overview.adoc[leveloffset=+1] - -include::modules/rbac-projects-namespaces.adoc[leveloffset=+1] - -include::modules/rbac-default-projects.adoc[leveloffset=+1] - -include::modules/rbac-viewing-cluster-roles.adoc[leveloffset=+1] - -include::modules/rbac-viewing-local-roles.adoc[leveloffset=+1] - -include::modules/rbac-adding-roles.adoc[leveloffset=+1] - -include::modules/rbac-creating-local-role.adoc[leveloffset=+1] - -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -include::modules/rbac-creating-cluster-role.adoc[leveloffset=+1] -endif::[] - -include::modules/rbac-local-role-binding-commands.adoc[leveloffset=+1] - -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -include::modules/rbac-cluster-role-binding-commands.adoc[leveloffset=+1] - -include::modules/rbac-creating-cluster-admin.adoc[leveloffset=+1] -endif::[] diff --git a/authentication/using-service-accounts-as-oauth-client.adoc b/authentication/using-service-accounts-as-oauth-client.adoc deleted file mode 100644 index e6f0834fd958..000000000000 --- a/authentication/using-service-accounts-as-oauth-client.adoc +++ /dev/null @@ -1,9 +0,0 @@ -:_content-type: ASSEMBLY -[id="using-service-accounts-as-oauth-client"] -= Using a service account as an OAuth client -include::_attributes/common-attributes.adoc[] -:context: using-service-accounts-as-oauth-client - -toc::[] - -include::modules/service-accounts-as-oauth-clients.adoc[leveloffset=+1] diff --git a/authentication/using-service-accounts-in-applications.adoc b/authentication/using-service-accounts-in-applications.adoc deleted file mode 100644 index 73527664f998..000000000000 --- a/authentication/using-service-accounts-in-applications.adoc +++ /dev/null @@ -1,23 +0,0 @@ -:_content-type: ASSEMBLY -[id="using-service-accounts"] -= Using service accounts in applications -include::_attributes/common-attributes.adoc[] -:context: using-service-accounts - -toc::[] - -include::modules/service-accounts-overview.adoc[leveloffset=+1] - -include::modules/service-accounts-default.adoc[leveloffset=+1] - -// remove these links for 4.12+ - -.Additional resources - -* For information about requesting bound service account tokens, see xref:../authentication/bound-service-account-tokens.html#bound-sa-tokens-configuring_bound-service-account-tokens[Configuring bound service account tokens using volume projection] - -* For information about creating a service account token secret, see xref:../nodes/pods/nodes-pods-secrets.html#nodes-pods-secrets-creating-sa_nodes-pods-secrets[Creating a service account token secret]. - -include::modules/service-accounts-creating.adoc[leveloffset=+1] - -// include::modules/service-accounts-using-credentials-inside-a-container.adoc[leveloffset=+1] diff --git a/backup_and_restore/_attributes b/backup_and_restore/_attributes deleted file mode 120000 index f27fd275ea6b..000000000000 --- a/backup_and_restore/_attributes +++ /dev/null @@ -1 +0,0 @@ -../_attributes/ \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/_attributes b/backup_and_restore/application_backup_and_restore/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/backup_and_restore/application_backup_and_restore/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/_attributes b/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/_attributes deleted file mode 120000 index bf7c2529fdb4..000000000000 --- a/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../../_attributes/ \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc b/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc deleted file mode 100644 index 64f83e388436..000000000000 --- a/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc +++ /dev/null @@ -1,129 +0,0 @@ -:_content-type: ASSEMBLY -[id="backing-up-applications"] -= Backing up applications -include::_attributes/common-attributes.adoc[] -:context: backing-up-applications - -toc::[] - -You back up applications by creating a `Backup` custom resource (CR). See xref:../../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc#oadp-creating-backup-cr_backing-up-applications[Creating a Backup CR]. - -The `Backup` CR creates backup files for Kubernetes resources and internal images, on S3 object storage, and snapshots for persistent volumes (PVs), if the cloud provider uses a native snapshot API or the Container Storage Interface (CSI) to create snapshots, such as {rh-storage} 4. - -For more information about CSI volume snapshots, see xref:../../../storage/container_storage_interface/persistent-storage-csi-snapshots.adoc#persistent-storage-csi-snapshots[CSI volume snapshots]. - -:FeatureName: The `CloudStorage` API for S3 storage -include::snippets/technology-preview.adoc[] - -* If your cloud provider has a native snapshot API or supports CSI snapshots, the `Backup` CR backs up persistent volumes (PVs) by creating snapshots. For more information about working with CSI snapshots, see xref:../../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc#oadp-backing-up-pvs-csi_backing-up-applications[Backing up persistent volumes with CSI snapshots]. - -* If your cloud provider does not support snapshots or if your applications are on NFS data volumes, you can create backups by using Restic. See xref:../../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc#oadp-backing-up-applications-restic_backing-up-applications[Backing up applications with Restic]. - -[IMPORTANT] -==== -The {oadp-first} does not support backing up volume snapshots that were created by other software. -==== - -You can create backup hooks to run commands before or after the backup operation. See xref:../../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc#oadp-creating-backup-hooks_backing-up-applications[Creating backup hooks]. - -You can schedule backups by creating a `Schedule` CR instead of a `Backup` CR. See xref:../../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc#oadp-scheduling-backups_backing-up-applications[Scheduling backups]. - -include::modules/oadp-creating-backup-cr.adoc[leveloffset=+1] -include::modules/oadp-backing-up-pvs-csi.adoc[leveloffset=+1] -include::modules/oadp-backing-up-applications-restic.adoc[leveloffset=+1] -include::modules/oadp-using-data-mover-for-csi-snapshots.adoc[leveloffset=+1] - -[id="oadp-12-data-mover-ceph"] -== Using OADP 1.2 Data Mover with Ceph storage - -You can use OADP 1.2 Data Mover to backup and restore application data for clusters that use CephFS, CephRBD, or both. - -OADP 1.2 Data Mover leverages Ceph features that support large-scale environments. One of these is the shallow copy method, which is available for {product-title} 4.12 and later. This feature supports backing up and restoring `StorageClass` and `AccessMode` resources other than what is found on the source persistent volume claim (PVC). - -[IMPORTANT] -==== -The CephFS shallow copy feature is a back up feature. It is not part of restore operations. -==== - -include::modules/oadp-ceph-prerequisites.adoc[leveloffset=+2] - -[id="defining-crs-for-12-data-mover"] -=== Defining custom resources for use with OADP 1.2 Data Mover - -When you install {rh-storage-first}, it automatically creates default CephFS and a CephRBD `StorageClass` and `VolumeSnapshotClass` custom resources (CRs). You must define these CRs for use with OpenShift API for Data Protection (OADP) 1.2 Data Mover. - -After you define the CRs, you must make several other changes to your environment before you can perform your back up and restore operations. - -include::modules/oadp-ceph-preparing-cephfs-crs.adoc[leveloffset=+2] -include::modules/oadp-ceph-preparing-cephrbd-crs.adoc[leveloffset=+2] -include::modules/oadp-ceph-preparing-crs-additional.adoc[leveloffset=+2] - -[id="oadp-ceph-back-up-restore-cephfs"] -=== Backing up and restoring data using OADP 1.2 Data Mover and CephFS storage - -You can use OpenShift API for Data Protection (OADP) 1.2 Data Mover to back up and restore data using CephFS storage by enabling the shallow copy feature of CephFS. - -include::snippets/oadp-ceph-cr-prerequisites.adoc[] - -:context: !backing-up-applications - -:context: cephfs - -include::modules/oadp-ceph-cephfs-back-up-dba.adoc[leveloffset=+2] -include::modules/oadp-ceph-cephfs-back-up.adoc[leveloffset=+2] -include::modules/oadp-ceph-cephfs-restore.adoc[leveloffset=+2] - -[id="oadp-ceph-split"] -=== Backing up and restoring data using OADP 1.2 Data Mover and split volumes (CephFS and Ceph RBD) - -You can use OpenShift API for Data Protection (OADP) 1.2 Data Mover to back up and restore data in an environment that has _split volumes_, that is, an environment that uses both CephFS and CephRBD. - -include::snippets/oadp-ceph-cr-prerequisites.adoc[] - -:context: !cephfs - -:context: split - -include::modules/oadp-ceph-split-back-up-dba.adoc[leveloffset=+2] -include::modules/oadp-ceph-cephfs-back-up.adoc[leveloffset=+2] -include::modules/oadp-ceph-cephfs-restore.adoc[leveloffset=+2] - -:context: !split - -:context: backing-up-applications - -[id="oadp-cleaning-up-after-data-mover-1-1-backup"] -== Cleaning up after a backup using OADP 1.1 Data Mover - -For OADP 1.1 Data Mover, you must perform a data cleanup after you perform a backup. - -The cleanup consists of deleting the following resources: - -* Snapshots in a bucket -* Cluster resources -* Volume snapshot backups (VSBs) after a backup procedure that is either run by a schedule or is run repetitively - -include::modules/oadp-cleaning-up-after-data-mover-snapshots.adoc[leveloffset=+2] - -[id="deleting-cluster-resources"] -=== Deleting cluster resources - -OADP 1.1 Data Mover might leave cluster resources whether or not it successfully backs up your container storage interface (CSI) volume snapshots to a remote object store. - -include::modules/oadp-deleting-cluster-resources-following-success.adoc[leveloffset=+3] -include::modules/oadp-deleting-cluster-resources-following-failure.adoc[leveloffset=+3] - -include::modules/oadp-vsb-cleanup-after-scheduler.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../../../operators/admin/olm-adding-operators-to-cluster.adoc#olm-installing-operators-from-operatorhub_olm-adding-operators-to-a-cluster[Installing Operators on clusters for administrators] -* xref:../../../operators/user/olm-installing-operators-in-namespace.adoc#olm-installing-operators-in-namespace[Installing Operators in namespaces for non-administrators] - -include::modules/oadp-creating-backup-hooks.adoc[leveloffset=+1] -include::modules/oadp-scheduling-backups.adoc[leveloffset=+1] -include::modules/oadp-deleting-backups.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../../../backup_and_restore/application_backup_and_restore/troubleshooting.adoc#velero-obtaining-by-downloading_oadp-troubleshooting[Downloading the Velero CLI tool] diff --git a/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/images b/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/images deleted file mode 120000 index 5fa6987088da..000000000000 --- a/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/images +++ /dev/null @@ -1 +0,0 @@ -../../images \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/modules b/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/modules deleted file mode 120000 index 8b0e8540076d..000000000000 --- a/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/restoring-applications.adoc b/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/restoring-applications.adoc deleted file mode 100644 index ab3ec37d6077..000000000000 --- a/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/restoring-applications.adoc +++ /dev/null @@ -1,14 +0,0 @@ -:_content-type: ASSEMBLY -[id="restoring-applications"] -= Restoring applications -include::_attributes/common-attributes.adoc[] -:context: restoring-applications - -toc::[] - -You restore application backups by creating a `Restore` custom resource (CR). See xref:../../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/restoring-applications.adoc#oadp-creating-restore-cr_restoring-applications[Creating a Restore CR]. - -You can create restore hooks to run commands in a container in a pod while restoring your application by editing the `Restore` (CR). See xref:../../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/restoring-applications.adoc#oadp-creating-restore-hooks_restoring-applications[Creating restore hooks] - -include::modules/oadp-creating-restore-cr.adoc[leveloffset=+1] -include::modules/oadp-creating-restore-hooks.adoc[leveloffset=+1] diff --git a/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/snippets b/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/snippets deleted file mode 120000 index ce62fd7c41e2..000000000000 --- a/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/snippets +++ /dev/null @@ -1 +0,0 @@ -../../../snippets/ \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/images b/backup_and_restore/application_backup_and_restore/images deleted file mode 120000 index 5fa6987088da..000000000000 --- a/backup_and_restore/application_backup_and_restore/images +++ /dev/null @@ -1 +0,0 @@ -../../images \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/installing/_attributes b/backup_and_restore/application_backup_and_restore/installing/_attributes deleted file mode 120000 index bf7c2529fdb4..000000000000 --- a/backup_and_restore/application_backup_and_restore/installing/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../../_attributes/ \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/installing/about-installing-oadp.adoc b/backup_and_restore/application_backup_and_restore/installing/about-installing-oadp.adoc deleted file mode 100644 index 0292eafc004c..000000000000 --- a/backup_and_restore/application_backup_and_restore/installing/about-installing-oadp.adoc +++ /dev/null @@ -1,55 +0,0 @@ -:_content-type: ASSEMBLY -[id="about-installing-oadp"] -= About installing OADP -include::_attributes/common-attributes.adoc[] -:context: about-installing-oadp - -toc::[] - -As a cluster administrator, you install the OpenShift API for Data Protection (OADP) by installing the OADP Operator. The OADP Operator installs link:https://{velero-domain}/docs/v{velero-version}/[Velero {velero-version}]. - -include::snippets/oadp-mtc-operator.adoc[] - -To back up Kubernetes resources and internal images, you must have object storage as a backup location, such as one of the following storage types: - -* xref:../../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-aws.adoc#installing-oadp-aws[Amazon Web Services] -* xref:../../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-azure.adoc#installing-oadp-azure[Microsoft Azure] -* xref:../../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-gcp.adoc#installing-oadp-gcp[Google Cloud Platform] -* xref:../../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-mcg.adoc#installing-oadp-mcg[Multicloud Object Gateway] -* AWS S3 compatible object storage, such as Noobaa or Minio - -:FeatureName: The `CloudStorage` API, which automates the creation of a bucket for object storage, -include::snippets/technology-preview.adoc[] - -You can back up persistent volumes (PVs) by using snapshots or Restic. - -To back up PVs with snapshots, you must have a cloud provider that supports either a native snapshot API or Container Storage Interface (CSI) snapshots, such as one of the following cloud providers: - -* xref:../../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-aws.adoc#installing-oadp-aws[Amazon Web Services] -* xref:../../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-azure.adoc#installing-oadp-azure[Microsoft Azure] -* xref:../../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-gcp.adoc#installing-oadp-gcp[Google Cloud Platform] -* CSI snapshot-enabled cloud provider, such as xref:../../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-ocs.adoc#installing-oadp-ocs[OpenShift Data Foundation] - -include::snippets/oadp-ocp-compat.adoc[] - -If your cloud provider does not support snapshots or if your storage is NFS, you can back up applications with xref:../../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc#oadp-backing-up-applications-restic_backing-up-applications[Restic backups] on object storage. - -You create a default `Secret` and then you install the Data Protection Application. - -include::modules/oadp-s3-compatible-backup-storage-providers.adoc[leveloffset=+1] - -include::modules/oadp-configuring-noobaa-for-dr.adoc[leveloffset=+1] - -[discrete] -[role="_additional-resources"] -.Additional resources - -* link:https://{velero-domain}/docs/v{velero-version}/locations/[Overview of backup and snapshot locations in the Velero documentation] - -include::modules/about-oadp-update-channels.adoc[leveloffset=+1] -include::modules/about-installing-oadp-on-multiple-namespaces.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../../operators/understanding/olm/olm-understanding-olm.adoc#olm-csv_olm-understanding-olm[Cluster service version] diff --git a/backup_and_restore/application_backup_and_restore/installing/images b/backup_and_restore/application_backup_and_restore/installing/images deleted file mode 120000 index 4399cbb3c0f3..000000000000 --- a/backup_and_restore/application_backup_and_restore/installing/images +++ /dev/null @@ -1 +0,0 @@ -../../../images/ \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/installing/installing-oadp-aws.adoc b/backup_and_restore/application_backup_and_restore/installing/installing-oadp-aws.adoc deleted file mode 100644 index 64b8b58df482..000000000000 --- a/backup_and_restore/application_backup_and_restore/installing/installing-oadp-aws.adoc +++ /dev/null @@ -1,37 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-oadp-aws"] -= Installing and configuring the OpenShift API for Data Protection with Amazon Web Services -include::_attributes/common-attributes.adoc[] -:context: installing-oadp-aws -:installing-oadp-aws: -:credentials: cloud-credentials -:provider: aws - -toc::[] - -You install the OpenShift API for Data Protection (OADP) with Amazon Web Services (AWS) by installing the OADP Operator. The Operator installs link:https://{velero-domain}/docs/v{velero-version}/[Velero {velero-version}]. - -include::snippets/oadp-mtc-operator.adoc[] - -You configure AWS for Velero, create a default `Secret`, and then install the Data Protection Application. - -To install the OADP Operator in a restricted network environment, you must first disable the default OperatorHub sources and mirror the Operator catalog. See xref:../../../operators/admin/olm-restricted-networks.adoc#olm-restricted-networks[Using Operator Lifecycle Manager on restricted networks] for details. - -include::modules/oadp-installing-operator.adoc[leveloffset=+1] -include::modules/migration-configuring-aws-s3.adoc[leveloffset=+1] -include::modules/oadp-about-backup-snapshot-locations-secrets.adoc[leveloffset=+1] -include::modules/oadp-creating-default-secret.adoc[leveloffset=+2] -include::modules/oadp-secrets-for-different-credentials.adoc[leveloffset=+2] - -[id="configuring-dpa-aws"] -== Configuring the Data Protection Application - -You can configure the Data Protection Application by setting Velero resource allocations or enabling self-signed CA certificates. - -include::modules/oadp-setting-resource-limits-and-requests.adoc[leveloffset=+2] -include::modules/oadp-self-signed-certificate.adoc[leveloffset=+2] - -include::modules/oadp-installing-dpa.adoc[leveloffset=+1] -include::modules/oadp-enabling-csi-dpa.adoc[leveloffset=+2] - -:!installing-oadp-aws: diff --git a/backup_and_restore/application_backup_and_restore/installing/installing-oadp-azure.adoc b/backup_and_restore/application_backup_and_restore/installing/installing-oadp-azure.adoc deleted file mode 100644 index 3077b98b3b71..000000000000 --- a/backup_and_restore/application_backup_and_restore/installing/installing-oadp-azure.adoc +++ /dev/null @@ -1,37 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-oadp-azure"] -= Installing and configuring the OpenShift API for Data Protection with Microsoft Azure -include::_attributes/common-attributes.adoc[] -:context: installing-oadp-azure -:installing-oadp-azure: -:credentials: cloud-credentials-azure -:provider: azure - -toc::[] - -You install the OpenShift API for Data Protection (OADP) with Microsoft Azure by installing the OADP Operator. The Operator installs link:https://{velero-domain}/docs/v{velero-version}/[Velero {velero-version}]. - -include::snippets/oadp-mtc-operator.adoc[] - -You configure Azure for Velero, create a default `Secret`, and then install the Data Protection Application. - -To install the OADP Operator in a restricted network environment, you must first disable the default OperatorHub sources and mirror the Operator catalog. See xref:../../../operators/admin/olm-restricted-networks.adoc#olm-restricted-networks[Using Operator Lifecycle Manager on restricted networks] for details. - -include::modules/oadp-installing-operator.adoc[leveloffset=+1] -include::modules/migration-configuring-azure.adoc[leveloffset=+1] -include::modules/oadp-about-backup-snapshot-locations-secrets.adoc[leveloffset=+1] -include::modules/oadp-creating-default-secret.adoc[leveloffset=+2] -include::modules/oadp-secrets-for-different-credentials.adoc[leveloffset=+2] - -[id="configuring-dpa-azure"] -== Configuring the Data Protection Application - -You can configure the Data Protection Application by setting Velero resource allocations or enabling self-signed CA certificates. - -include::modules/oadp-setting-resource-limits-and-requests.adoc[leveloffset=+2] -include::modules/oadp-self-signed-certificate.adoc[leveloffset=+2] - -include::modules/oadp-installing-dpa.adoc[leveloffset=+1] -include::modules/oadp-enabling-csi-dpa.adoc[leveloffset=+2] - -:installing-oadp-azure!: diff --git a/backup_and_restore/application_backup_and_restore/installing/installing-oadp-gcp.adoc b/backup_and_restore/application_backup_and_restore/installing/installing-oadp-gcp.adoc deleted file mode 100644 index 6b688c72a17a..000000000000 --- a/backup_and_restore/application_backup_and_restore/installing/installing-oadp-gcp.adoc +++ /dev/null @@ -1,37 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-oadp-gcp"] -= Installing and configuring the OpenShift API for Data Protection with Google Cloud Platform -include::_attributes/common-attributes.adoc[] -:context: installing-oadp-gcp -:installing-oadp-gcp: -:credentials: cloud-credentials-gcp -:provider: gcp - -toc::[] - -You install the OpenShift API for Data Protection (OADP) with Google Cloud Platform (GCP) by installing the OADP Operator. The Operator installs link:https://{velero-domain}/docs/v{velero-version}/[Velero {velero-version}]. - -include::snippets/oadp-mtc-operator.adoc[] - -You configure GCP for Velero, create a default `Secret`, and then install the Data Protection Application. - -To install the OADP Operator in a restricted network environment, you must first disable the default OperatorHub sources and mirror the Operator catalog. See xref:../../../operators/admin/olm-restricted-networks.adoc#olm-restricted-networks[Using Operator Lifecycle Manager on restricted networks] for details. - -include::modules/oadp-installing-operator.adoc[leveloffset=+1] -include::modules/migration-configuring-gcp.adoc[leveloffset=+1] -include::modules/oadp-about-backup-snapshot-locations-secrets.adoc[leveloffset=+1] -include::modules/oadp-creating-default-secret.adoc[leveloffset=+2] -include::modules/oadp-secrets-for-different-credentials.adoc[leveloffset=+2] - -[id="configuring-dpa-gcp"] -== Configuring the Data Protection Application - -You can configure the Data Protection Application by setting Velero resource allocations or enabling self-signed CA certificates. - -include::modules/oadp-setting-resource-limits-and-requests.adoc[leveloffset=+2] -include::modules/oadp-self-signed-certificate.adoc[leveloffset=+2] - -include::modules/oadp-installing-dpa.adoc[leveloffset=+1] -include::modules/oadp-enabling-csi-dpa.adoc[leveloffset=+2] - -:installing-oadp-gcp!: diff --git a/backup_and_restore/application_backup_and_restore/installing/installing-oadp-mcg.adoc b/backup_and_restore/application_backup_and_restore/installing/installing-oadp-mcg.adoc deleted file mode 100644 index ff7180e2410e..000000000000 --- a/backup_and_restore/application_backup_and_restore/installing/installing-oadp-mcg.adoc +++ /dev/null @@ -1,43 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-oadp-mcg"] -= Installing and configuring the OpenShift API for Data Protection with Multicloud Object Gateway -include::_attributes/common-attributes.adoc[] -:context: installing-oadp-mcg -:installing-oadp-mcg: -:credentials: cloud-credentials -:provider: aws - -toc::[] - -You install the OpenShift API for Data Protection (OADP) with Multicloud Object Gateway (MCG) by installing the OADP Operator. The Operator installs link:https://{velero-domain}/docs/v{velero-version}/[Velero {velero-version}]. - -include::snippets/oadp-mtc-operator.adoc[] - -You configure xref:../../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-mcg.adoc#installing-oadp-mcg[Multicloud Object Gateway] as a backup location. -MCG is a component of {rh-storage}. You configure MCG as a backup location in the `DataProtectionApplication` custom resource (CR). - -:FeatureName: The `CloudStorage` API, which automates the creation of a bucket for object storage, -include::snippets/technology-preview.adoc[] - -You create a `Secret` for the backup location and then you install the Data Protection Application. - -To install the OADP Operator in a restricted network environment, you must first disable the default OperatorHub sources and mirror the Operator catalog. For details, see xref:../../../operators/admin/olm-restricted-networks.adoc#olm-restricted-networks[Using Operator Lifecycle Manager on restricted networks]. - -include::modules/oadp-installing-operator.adoc[leveloffset=+1] -include::modules/migration-configuring-mcg.adoc[leveloffset=+1] -include::modules/oadp-about-backup-snapshot-locations-secrets.adoc[leveloffset=+1] -include::modules/oadp-creating-default-secret.adoc[leveloffset=+2] -include::modules/oadp-secrets-for-different-credentials.adoc[leveloffset=+2] - -[id="configuring-dpa-mcg"] -== Configuring the Data Protection Application - -You can configure the Data Protection Application by setting Velero resource allocations or enabling self-signed CA certificates. - -include::modules/oadp-setting-resource-limits-and-requests.adoc[leveloffset=+2] -include::modules/oadp-self-signed-certificate.adoc[leveloffset=+2] - -include::modules/oadp-installing-dpa.adoc[leveloffset=+1] -include::modules/oadp-enabling-csi-dpa.adoc[leveloffset=+2] - -:installing-oadp-mcg!: diff --git a/backup_and_restore/application_backup_and_restore/installing/installing-oadp-ocs.adoc b/backup_and_restore/application_backup_and_restore/installing/installing-oadp-ocs.adoc deleted file mode 100644 index 651717695045..000000000000 --- a/backup_and_restore/application_backup_and_restore/installing/installing-oadp-ocs.adoc +++ /dev/null @@ -1,40 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-oadp-ocs"] -= Installing and configuring the OpenShift API for Data Protection with OpenShift Data Foundation -include::_attributes/common-attributes.adoc[] -:context: installing-oadp-ocs -:credentials: cloud-credentials -:provider: gcp - -toc::[] - -You install the OpenShift API for Data Protection (OADP) with {rh-storage} by installing the OADP Operator and configuring a backup location and a snapshot location. Then, you install the Data Protection Application. - -include::snippets/oadp-mtc-operator.adoc[] - -You can configure xref:../../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-mcg.adoc#installing-oadp-mcg[Multicloud Object Gateway] or any S3-compatible object storage as a backup location. - -:FeatureName: The `CloudStorage` API, which automates the creation of a bucket for object storage, -include::snippets/technology-preview.adoc[] - -You create a `Secret` for the backup location and then you install the Data Protection Application. - -To install the OADP Operator in a restricted network environment, you must first disable the default OperatorHub sources and mirror the Operator catalog. For details, see xref:../../../operators/admin/olm-restricted-networks.adoc#olm-restricted-networks[Using Operator Lifecycle Manager on restricted networks]. - -include::modules/oadp-installing-operator.adoc[leveloffset=+1] -include::modules/oadp-about-backup-snapshot-locations-secrets.adoc[leveloffset=+1] -include::modules/oadp-creating-default-secret.adoc[leveloffset=+2] -include::modules/oadp-secrets-for-different-credentials.adoc[leveloffset=+2] - -[id="configuring-dpa-ocs"] -== Configuring the Data Protection Application - -You can configure the Data Protection Application by setting Velero resource allocations or enabling self-signed CA certificates. - -include::modules/oadp-setting-resource-limits-and-requests.adoc[leveloffset=+2] -include::modules/oadp-self-signed-certificate.adoc[leveloffset=+2] - -include::modules/oadp-installing-dpa.adoc[leveloffset=+1] -include::modules/oadp-configuring-noobaa-for-dr.adoc[leveloffset=+2] -include::modules/oadp-enabling-csi-dpa.adoc[leveloffset=+2] - diff --git a/backup_and_restore/application_backup_and_restore/installing/modules b/backup_and_restore/application_backup_and_restore/installing/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/backup_and_restore/application_backup_and_restore/installing/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/installing/snippets b/backup_and_restore/application_backup_and_restore/installing/snippets deleted file mode 120000 index ce62fd7c41e2..000000000000 --- a/backup_and_restore/application_backup_and_restore/installing/snippets +++ /dev/null @@ -1 +0,0 @@ -../../../snippets/ \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/installing/uninstalling-oadp.adoc b/backup_and_restore/application_backup_and_restore/installing/uninstalling-oadp.adoc deleted file mode 100644 index 49f3c9b02f2e..000000000000 --- a/backup_and_restore/application_backup_and_restore/installing/uninstalling-oadp.adoc +++ /dev/null @@ -1,9 +0,0 @@ -:_content-type: ASSEMBLY -[id="uninstalling-oadp"] -= Uninstalling the OpenShift API for Data Protection -include::_attributes/common-attributes.adoc[] -:context: uninstalling-oadp - -toc::[] - -You uninstall the OpenShift API for Data Protection (OADP) by deleting the OADP Operator. See xref:../../../operators/admin/olm-deleting-operators-from-cluster.adoc#olm-deleting-operators-from-cluster[Deleting Operators from a cluster] for details. diff --git a/backup_and_restore/application_backup_and_restore/modules b/backup_and_restore/application_backup_and_restore/modules deleted file mode 120000 index 8b0e8540076d..000000000000 --- a/backup_and_restore/application_backup_and_restore/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/oadp-advanced-topics.adoc b/backup_and_restore/application_backup_and_restore/oadp-advanced-topics.adoc deleted file mode 100644 index 26d17d149167..000000000000 --- a/backup_and_restore/application_backup_and_restore/oadp-advanced-topics.adoc +++ /dev/null @@ -1,34 +0,0 @@ -:_content-type: ASSEMBLY -[id="oadp-advanced-topics"] -= Advanced OADP features and functionalities -include::_attributes/common-attributes.adoc[] -:context: oadp-advanced-topics - -toc::[] - -This document provides information about advanced features and functionalities of OpenShift API for Data Protection (OADP). - -[id="oadp-different-kubernetes-api-versions"] -== Working with different Kubernetes API versions on the same cluster - -include::modules/oadp-checking-api-group-versions.adoc[leveloffset=+2] -include::modules/oadp-about-enable-api-group-versions.adoc[leveloffset=+2] -include::modules/oadp-using-enable-api-group-versions.adoc[leveloffset=+2] - -[id="backing-up-data-one-cluster-restoring-another-cluster"] -== Backing up data from one cluster and restoring it to another cluster - -include::modules/oadp-about-backing-and-restoring-from-cluster-to-cluster.adoc[leveloffset=+2] -include::modules/oadp-backing-and-restoring-from-cluster-to-cluster.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="additional-resources_oadp-advanced-topics"] -== Additional resources - -For more information about API group versions, see xref:../../backup_and_restore/application_backup_and_restore/oadp-advanced-topics.adoc#oadp-different-kubernetes-api-versions[Working with different Kubernetes API versions on the same cluster]. - -For more information about OADP Data Mover, see xref:../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc#oadp-using-data-mover-for-csi-snapshots_backing-up-applications[Using Data Mover for CSI snapshots]. - -For more information about using Restic with OADP, see xref:../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc#oadp-backing-up-applications-restic_backing-up-applications[Backing up applications with Restic]. - -:!oadp-advanced-topics: diff --git a/backup_and_restore/application_backup_and_restore/oadp-api.adoc b/backup_and_restore/application_backup_and_restore/oadp-api.adoc deleted file mode 100644 index 6ac2bd278c3f..000000000000 --- a/backup_and_restore/application_backup_and_restore/oadp-api.adoc +++ /dev/null @@ -1,251 +0,0 @@ -:_content-type: ASSEMBLY -[id="oadp-api"] -= APIs used with OADP -include::_attributes/common-attributes.adoc[] -:context: oadp-api -:namespace: openshift-adp -:local-product: OADP -:velero-domain: velero.io - -toc::[] - -The document provides information about the following APIs that you can use with OADP: - -* Velero API -* OADP API - -[id="velero-api"] -== Velero API - -Velero API documentation is maintained by Velero, not by Red Hat. It can be found at link:https://velero.io/docs/main/api-types/[Velero API types]. - -[id="oadp-api-tables"] -== OADP API - -The following tables provide the structure of the OADP API: - -.DataProtectionApplicationSpec -[options="header"] -|=== -|Property|Type|Description - -|`backupLocations` -|[] link:https://pkg.go.dev/github.com/openshift/oadp-operator/api/v1alpha1#BackupLocation[`BackupLocation`] -|Defines the list of configurations to use for `BackupStorageLocations`. - -|`snapshotLocations` -|[] link:https://pkg.go.dev/github.com/openshift/oadp-operator/api/v1alpha1#SnapshotLocation[`SnapshotLocation`] -|Defines the list of configurations to use for `VolumeSnapshotLocations`. - -|`unsupportedOverrides` -|map [ link:https://pkg.go.dev/github.com/openshift/oadp-operator/api/v1alpha1#UnsupportedImageKey[UnsupportedImageKey] ] link:https://pkg.go.dev/builtin#string[string] -|Can be used to override the deployed dependent images for development. Options are `veleroImageFqin`, `awsPluginImageFqin`, `openshiftPluginImageFqin`, `azurePluginImageFqin`, `gcpPluginImageFqin`, `csiPluginImageFqin`, `dataMoverImageFqin`, `resticRestoreImageFqin`, `kubevirtPluginImageFqin`, and `operator-type`. - -|`podAnnotations` -|map [ link:https://pkg.go.dev/builtin#string[string] ] link:https://pkg.go.dev/builtin#string[string] -|Used to add annotations to pods deployed by Operators. - -|`podDnsPolicy` -|link:https://pkg.go.dev/k8s.io/api/core/v1#DNSPolicy[`DNSPolicy`] -|Defines the configuration of the DNS of a pod. - -|`podDnsConfig` -|link:https://pkg.go.dev/k8s.io/api/core/v1#PodDNSConfig[`PodDNSConfig`] -|Defines the DNS parameters of a pod in addition to those generated from `DNSPolicy`. - -|`backupImages` -|*link:https://pkg.go.dev/builtin#bool[bool] -|Used to specify whether or not you want to deploy a registry for enabling backup and restore of images. - -|`configuration` -|*link:https://pkg.go.dev/github.com/openshift/oadp-operator/api/v1alpha1#ApplicationConfig[`ApplicationConfig`] -|Used to define the data protection application's server configuration. - -|`features` -|*link:https://pkg.go.dev/github.com/openshift/oadp-operator/api/v1alpha1#Features[`Features`] -|Defines the configuration for the DPA to enable the Technology Preview features. -|=== - -link:https://pkg.go.dev/github.com/openshift/oadp-operator/api/v1alpha1#DataProtectionApplicationSpec[Complete schema definitions for the OADP API]. - -.BackupLocation -[options="header"] -|=== -|Property|Type|Description - -|`velero` -|*link:https://pkg.go.dev/github.com/vmware-tanzu/velero/pkg/apis/velero/v1#BackupStorageLocationSpec[velero.BackupStorageLocationSpec] -|Location to store volume snapshots, as described in link:https://pkg.go.dev/github.com/vmware-tanzu/velero/pkg/apis/velero/v1#BackupStorageLocation[Backup Storage Location]. - -|`bucket` -| *link:https://pkg.go.dev/github.com/openshift/oadp-operator/api/v1alpha1#CloudStorageLocation[CloudStorageLocation] -| [Technology Preview] Automates creation of a bucket at some cloud storage providers for use as a backup storage location. -|=== - -:FeatureName: The `bucket` parameter -include::snippets/technology-preview.adoc[leveloffset=+1] - -link:https://pkg.go.dev/github.com/openshift/oadp-operator/api/v1alpha1#BackupLocation[Complete schema definitions for the type `BackupLocation`]. - -.SnapshotLocation -[options="header"] -|=== -|Property|Type|Description - -|`velero` -|*link:https://pkg.go.dev/github.com/vmware-tanzu/velero/pkg/apis/velero/v1#VolumeSnapshotLocationSpec[VolumeSnapshotLocationSpec] -|Location to store volume snapshots, as described in link:https://pkg.go.dev/github.com/vmware-tanzu/velero/pkg/apis/velero/v1#VolumeSnapshotLocation[Volume Snapshot Location]. -|=== - -link:https://pkg.go.dev/github.com/openshift/oadp-operator/api/v1alpha1#SnapshotLocation[Complete schema definitions for the type `SnapshotLocation`]. - -.ApplicationConfig -[options="header"] -|=== -|Property|Type|Description - -|`velero` -|*link:https://pkg.go.dev/github.com/openshift/oadp-operator/api/v1alpha1#VeleroConfig[VeleroConfig] -|Defines the configuration for the Velero server. - -|`restic` -|*link:https://pkg.go.dev/github.com/openshift/oadp-operator/api/v1alpha1#ResticConfig[ResticConfig] -|Defines the configuration for the Restic server. -|=== - -link:https://pkg.go.dev/github.com/openshift/oadp-operator/api/v1alpha1#ApplicationConfig[Complete schema definitions for the type `ApplicationConfig`]. - -.VeleroConfig -[options="header"] -|=== -|Property|Type|Description - -|`featureFlags` -|[] link:https://pkg.go.dev/builtin#string[string] -|Defines the list of features to enable for the Velero instance. - -|`defaultPlugins` -|[] link:https://pkg.go.dev/builtin#string[string] -|The following types of default Velero plugins can be installed: `aws`,`azure`, `csi`, `gcp`, `kubevirt`, and `openshift`. - -|`customPlugins` -|[]link:https://pkg.go.dev/github.com/openshift/oadp-operator/api/v1alpha1#CustomPlugin[CustomPlugin] -|Used for installation of custom Velero plugins. - -Default and custom plugins are described in xref:../../backup_and_restore/application_backup_and_restore/oadp-features-plugins#oadp-features-plugins[OADP plugins] - -|`restoreResourcesVersionPriority` -|link:https://pkg.go.dev/builtin#string[string] -|Represents a config map that is created if defined for use in conjunction with the `EnableAPIGroupVersions` feature flag. Defining this field automatically adds `EnableAPIGroupVersions` to the Velero server feature flag. - -|`noDefaultBackupLocation` -|link:https://pkg.go.dev/builtin#bool[bool] -|To install Velero without a default backup storage location, you must set the `noDefaultBackupLocation` flag in order to confirm installation. - -|`podConfig` -|*link:https://pkg.go.dev/github.com/openshift/oadp-operator/api/v1alpha1#PodConfig[`PodConfig`] -|Defines the configuration of the `Velero` pod. - -|`logLevel` -|link:https://pkg.go.dev/builtin#string[string] -|Velero server’s log level (use `debug` for the most granular logging, leave unset for Velero default). Valid options are `trace`, `debug`, `info`, `warning`, `error`, `fatal`, and `panic`. -|=== - -link:https://pkg.go.dev/github.com/openshift/oadp-operator/api/v1alpha1#VeleroConfig[Complete schema definitions for the type `VeleroConfig`]. - -.CustomPlugin -[options="header"] -|=== -|Property|Type|Description - -|`name` -|link:https://pkg.go.dev/builtin#string[string] -|Name of custom plugin. - -|`image` -|link:https://pkg.go.dev/builtin#string[string] -|Image of custom plugin. -|=== - -link:https://pkg.go.dev/github.com/openshift/oadp-operator/api/v1alpha1#CustomPlugin[Complete schema definitions for the type `CustomPlugin`]. - -.ResticConfig -[options="header"] -|=== -|Property|Type|Description - -|`enable` -|*link:https://pkg.go.dev/builtin#bool[bool] -|If set to `true`, enables backup and restore using Restic. If set to `false`, snapshots are needed. - -|`supplementalGroups` -|[]link:https://pkg.go.dev/builtin#int64[int64] -|Defines the Linux groups to be applied to the `Restic` pod. - -|`timeout` -|link:https://pkg.go.dev/builtin#string[string] -|A user-supplied duration string that defines the Restic timeout. Default value is `1hr` (1 hour). A duration string is a possibly signed sequence of decimal numbers, each with optional fraction and a unit suffix, such as `300ms`, -1.5h` or `2h45m`. Valid time units are `ns`, `us` (or `µs`), `ms`, `s`, `m`, and `h`. - -|`podConfig` -|*link:https://pkg.go.dev/github.com/openshift/oadp-operator/api/v1alpha1#PodConfig[`PodConfig`] -|Defines the configuration of the `Restic` pod. -|=== - -link:https://pkg.go.dev/github.com/openshift/oadp-operator/api/v1alpha1#ResticConfig[Complete schema definitions for the type `ResticConfig`]. - -.PodConfig -[options="header"] -|=== -|Property|Type|Description - -|`nodeSelector` -|map [ link:https://pkg.go.dev/builtin#string[string] ] link:https://pkg.go.dev/builtin#string[string] -|Defines the `nodeSelector` to be supplied to a `Velero` `podSpec` or a `Restic` `podSpec`. - -|`tolerations` -|[]link:https://pkg.go.dev/k8s.io/api/core/v1#Toleration[Toleration] -|Defines the list of tolerations to be applied to a Velero deployment or a Restic `daemonset`. - -|`resourceAllocations` -|link:https://pkg.go.dev/k8s.io/api/core/v1#ResourceRequirements[ResourceRequirements] -|Set specific resource `limits` and `requests` for a `Velero` pod or a `Restic` pod as described in xref:../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-aws.adoc#oadp-setting-resource-limits-and-requests_installing-oadp-aws[Setting Velero CPU and memory resource allocations]. - -|`labels` -|map [ link:https://pkg.go.dev/builtin#string[string] ] link:https://pkg.go.dev/builtin#string[string] -|Labels to add to pods. -|=== - -link:https://pkg.go.dev/github.com/openshift/oadp-operator/api/v1alpha1#PodConfig[Complete schema definitions for the type `PodConfig`]. - -.Features -[options="header"] -|=== -|Property|Type|Description - -|`dataMover` -|*link:https://pkg.go.dev/github.com/openshift/oadp-operator/api/v1alpha1#DataMover[`DataMover`] -|Defines the configuration of the Data Mover. -|=== - -link:https://pkg.go.dev/github.com/openshift/oadp-operator/api/v1alpha1#Features[Complete schema definitions for the type `Features`]. - -.DataMover -[options="header"] -|=== -|Property|Type|Description - -|`enable` -|link:https://pkg.go.dev/builtin#bool[bool] -|If set to `true`, deploys the volume snapshot mover controller and a modified CSI Data Mover plugin. If set to `false`, these are not deployed. - -|`credentialName` -|link:https://pkg.go.dev/builtin#string[string] -|User-supplied Restic `Secret` name for Data Mover. - -|`timeout` -|link:https://pkg.go.dev/builtin#string[string] -|A user-supplied duration string for `VolumeSnapshotBackup` and `VolumeSnapshotRestore` to complete. Default is `10m` (10 minutes). A duration string is a possibly signed sequence of decimal numbers, each with optional fraction and a unit suffix, such as `300ms`, -1.5h` or `2h45m`. Valid time units are `ns`, `us` (or `µs`), `ms`, `s`, `m`, and `h`. -|=== - -The OADP API is more fully detailed in link:https://pkg.go.dev/github.com/openshift/oadp-operator[OADP Operator]. - diff --git a/backup_and_restore/application_backup_and_restore/oadp-features-plugins.adoc b/backup_and_restore/application_backup_and_restore/oadp-features-plugins.adoc deleted file mode 100644 index 417275375f02..000000000000 --- a/backup_and_restore/application_backup_and_restore/oadp-features-plugins.adoc +++ /dev/null @@ -1,27 +0,0 @@ -:_content-type: ASSEMBLY -[id="oadp-features-plugins"] -= OADP features and plugins -include::_attributes/common-attributes.adoc[] -:context: oadp-features-plugins - -toc::[] - -OpenShift API for Data Protection (OADP) features provide options for backing up and restoring applications. - -The default plugins enable Velero to integrate with certain cloud providers and to back up and restore {product-title} resources. - -include::modules/oadp-features.adoc[leveloffset=+1] -include::modules/oadp-plugins.adoc[leveloffset=+1] -include::modules/oadp-configuring-velero-plugins.adoc[leveloffset=+1] - -[id="oadp-support-for-ibm-power-and-ibm-z"] -== OADP support for IBM Power and {ibmzProductName} - -OpenShift API for Data Protection (OADP) is platform neutral. The information that follows relates only to IBM Power and to {ibmzProductName}. - -OADP 1.1.0 was tested successfully against {product-title} 4.11 for both IBM Power and {ibmzProductName}. The sections that follow give testing and support information for OADP 1.1.0 in terms of backup locations for these systems. - -include::modules/oadp-ibm-power-test-support.adoc[leveloffset=+2] -include::modules/oadp-ibm-z-test-support.adoc[leveloffset=+2] - -:!oadp-features-plugins: diff --git a/backup_and_restore/application_backup_and_restore/oadp-release-notes.adoc b/backup_and_restore/application_backup_and_restore/oadp-release-notes.adoc deleted file mode 100644 index 50fff6ad65a5..000000000000 --- a/backup_and_restore/application_backup_and_restore/oadp-release-notes.adoc +++ /dev/null @@ -1,20 +0,0 @@ -:_content-type: ASSEMBLY -[id="oadp-release-notes"] -= OADP release notes -include::_attributes/common-attributes.adoc[] -:context: oadp-release-notes - -toc::[] - -The release notes for OpenShift API for Data Protection (OADP) describe new features and enhancements, deprecated features, product recommendations, known issues, and resolved issues. - - -include::modules/oadp-release-notes-1-2-0.adoc[leveloffset=+1] - -include::modules/oadp-release-notes-1-1-4.adoc[leveloffset=+1] - -include::modules/oadp-release-notes-1-1-2.adoc[leveloffset=+1] - -include::modules/oadp-release-notes-1-1-1.adoc[leveloffset=+1] - -:!oadp-release-notes: diff --git a/backup_and_restore/application_backup_and_restore/snippets b/backup_and_restore/application_backup_and_restore/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/backup_and_restore/application_backup_and_restore/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/backup_and_restore/application_backup_and_restore/troubleshooting.adoc b/backup_and_restore/application_backup_and_restore/troubleshooting.adoc deleted file mode 100644 index 41b79da4646a..000000000000 --- a/backup_and_restore/application_backup_and_restore/troubleshooting.adoc +++ /dev/null @@ -1,82 +0,0 @@ -:_content-type: ASSEMBLY -[id="troubleshooting"] -= Troubleshooting -include::_attributes/common-attributes.adoc[] -:context: oadp-troubleshooting -:namespace: openshift-adp -:local-product: OADP -:must-gather: registry.redhat.io/oadp/oadp-mustgather-rhel8:v1.1 - -toc::[] - -You can debug Velero custom resources (CRs) by using the xref:../../backup_and_restore/application_backup_and_restore/troubleshooting.adoc#oadp-debugging-oc-cli_oadp-troubleshooting[OpenShift CLI tool] or the xref:../../backup_and_restore/application_backup_and_restore/troubleshooting.adoc#migration-debugging-velero-resources_oadp-troubleshooting[Velero CLI tool]. The Velero CLI tool provides more detailed logs and information. - -You can check xref:../../backup_and_restore/application_backup_and_restore/troubleshooting.adoc#oadp-installation-issues_oadp-troubleshooting[installation issues], xref:../../backup_and_restore/application_backup_and_restore/troubleshooting.adoc#oadp-backup-restore-cr-issues_oadp-troubleshooting[backup and restore CR issues], and xref:../../backup_and_restore/application_backup_and_restore/troubleshooting.adoc#oadp-restic-issues_oadp-troubleshooting[Restic issues]. - -You can collect logs, CR information, and Prometheus metric data by using the xref:../../backup_and_restore/application_backup_and_restore/troubleshooting.adoc#migration-using-must-gather_oadp-troubleshooting[`must-gather` tool]. - -You can obtain the Velero CLI tool by: - -* Downloading the Velero CLI tool -* Accessing the Velero binary in the Velero deployment in the cluster - -include::modules/velero-obtaining-by-downloading.adoc[leveloffset=+1] -include::modules/velero-obtaining-by-accessing-binary.adoc[leveloffset=+1] - -include::modules/oadp-debugging-oc-cli.adoc[leveloffset=+1] -include::modules/migration-debugging-velero-resources.adoc[leveloffset=+1] - - - -[id="oadp-pod-crash-resource-request"] -== Pods crash or restart due to lack of memory or CPU - -If a Velero or Restic pod crashes due to a lack of memory or CPU, you can set specific resource requests for either of those resources. - -include::modules/oadp-pod-crash-set-resource-request-velero.adoc[leveloffset=+2] -include::modules/oadp-pod-crash-set-resource-request-restic.adoc[leveloffset=+2] - -[IMPORTANT] -==== -The values for the resource request fields must follow the same format as Kubernetes resource requirements. -Also, if you do not specify `configuration.velero.podConfig.resourceAllocations` or `configuration.restic.podConfig.resourceAllocations`, the default `resources` specification for a Velero pod or a Restic pod is as follows: - -[source,yaml] ----- -requests: - cpu: 500m - memory: 128Mi ----- -==== - -[id="issues-with-velero-and-admission-workbooks"] -== Issues with Velero and admission webhooks - -Velero has limited abilities to resolve admission webhook issues during a restore. If you have workloads with admission webhooks, you might need to use an additional Velero plugin or make changes to how you restore the workload. - -Typically, workloads with admission webhooks require you to create a resource of a specific kind first. This is especially true if your workload has child resources because admission webhooks typically block child resources. - -For example, creating or restoring a top-level object such as `service.serving.knative.dev` typically creates child resources automatically. If you do this first, you will not need to use Velero to create and restore these resources. This avoids the problem of child resources being blocked by an admission webhook that Velero might use. - -[id="velero-restore-workarounds-for-workloads-with-admission-webhooks"] -=== Restoring workarounds for Velero backups that use admission webhooks - -This section describes the additional steps required to restore resources for several types of Velero backups that use admission webhooks. - -include::modules/migration-debugging-velero-admission-webhooks-knative.adoc[leveloffset=+3] -include::modules/migration-debugging-velero-admission-webhooks-ibm-appconnect.adoc[leveloffset=+3] - -[role="_additional-resources"] -.Additional resources - -* xref:../../architecture/admission-plug-ins.adoc[Admission plugins] -* xref:../../architecture/admission-plug-ins.adoc#admission-webhooks-about_admission-plug-ins[Webhook admission plugins] -* xref:../../architecture/admission-plug-ins.adoc#admission-webhook-types_admission-plug-ins[Types of webhook admission plugins] - -include::modules/oadp-installation-issues.adoc[leveloffset=+1] -include::modules/oadp-backup-restore-cr-issues.adoc[leveloffset=+1] -include::modules/oadp-restic-issues.adoc[leveloffset=+1] - -include::modules/migration-using-must-gather.adoc[leveloffset=+1] - -:!oadp-troubleshooting: diff --git a/backup_and_restore/control_plane_backup_and_restore/_attributes b/backup_and_restore/control_plane_backup_and_restore/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/backup_and_restore/control_plane_backup_and_restore/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/backup_and_restore/control_plane_backup_and_restore/backing-up-etcd.adoc b/backup_and_restore/control_plane_backup_and_restore/backing-up-etcd.adoc deleted file mode 100644 index a8bc6819d7a5..000000000000 --- a/backup_and_restore/control_plane_backup_and_restore/backing-up-etcd.adoc +++ /dev/null @@ -1,28 +0,0 @@ -:_content-type: ASSEMBLY -[id="backup-etcd"] -= Backing up etcd -include::_attributes/common-attributes.adoc[] -:context: backup-etcd - -toc::[] - -etcd is the key-value store for {product-title}, which persists the state of all resource objects. - -Back up your cluster's etcd data regularly and store in a secure location ideally outside the {product-title} environment. Do not take an etcd backup before the first certificate rotation completes, which occurs 24 hours after installation, otherwise the backup will contain expired certificates. It is also recommended to take etcd backups during non-peak usage hours because the etcd snapshot has a high I/O cost. - -Be sure to take an etcd backup after you upgrade your cluster. This is important because when you restore your cluster, you must use an etcd backup that was taken from the same z-stream release. For example, an {product-title} 4.y.z cluster must use an etcd backup that was taken from 4.y.z. - -[IMPORTANT] -==== -Back up your cluster's etcd data by performing a single invocation of the backup script on a control plane host. Do not take a backup for each control plane host. -==== - -After you have an etcd backup, you can xref:../../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc#dr-restoring-cluster-state[restore to a previous cluster state]. - -// Backing up etcd data -include::modules/backup-etcd.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_backup-etcd"] -== Additional resources -* xref:../../hosted_control_planes/hcp-backup-restore-dr.adoc#hcp-backup-restore[Backing up and restoring etcd on a hosted cluster] diff --git a/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/_attributes b/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/_attributes deleted file mode 120000 index bf7c2529fdb4..000000000000 --- a/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../../_attributes/ \ No newline at end of file diff --git a/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/about-disaster-recovery.adoc b/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/about-disaster-recovery.adoc deleted file mode 100644 index 38baebe6e0c6..000000000000 --- a/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/about-disaster-recovery.adoc +++ /dev/null @@ -1,44 +0,0 @@ -:_content-type: ASSEMBLY -[id="about-dr"] -= About disaster recovery -include::_attributes/common-attributes.adoc[] -:context: about-dr - -toc::[] - -The disaster recovery documentation provides information for administrators on -how to recover from several disaster situations that might occur with their -{product-title} cluster. As an administrator, you might need to follow one or -more of the following procedures to return your cluster to a working -state. - -[IMPORTANT] -==== -Disaster recovery requires you to have at least one healthy control plane host. -==== - -xref:../../../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc#dr-restoring-cluster-state[Restoring to a previous cluster state]:: -This solution handles situations where you want to restore your cluster to -a previous state, for example, if an administrator deletes something critical. -This also includes situations where you have lost the majority of your control plane hosts, leading to etcd quorum loss and the cluster going offline. As long as you have taken an etcd backup, you can follow this procedure to restore your cluster to a previous state. -+ -If applicable, you might also need to xref:../../../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-3-expired-certs.adoc#dr-recovering-expired-certs[recover from expired control plane certificates]. -+ -[WARNING] -==== -Restoring to a previous cluster state is a destructive and destablizing action to take on a running cluster. This procedure should only be used as a last resort. - -Prior to performing a restore, see xref:../../../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc#dr-scenario-2-restoring-cluster-state-about_dr-restoring-cluster-state[About restoring cluster state] for more information on the impact to the cluster. -==== -+ -[NOTE] -==== -If you have a majority of your masters still available and have an etcd quorum, then follow the procedure to xref:../../../backup_and_restore/control_plane_backup_and_restore/replacing-unhealthy-etcd-member.adoc#replacing-unhealthy-etcd-member[replace a single unhealthy etcd member]. -==== - -xref:../../../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-3-expired-certs.adoc#dr-recovering-expired-certs[Recovering from expired control plane certificates]:: -This solution handles situations where your control plane certificates have -expired. For example, if you shut down your cluster before the first certificate -rotation, which occurs 24 hours after installation, your certificates will not -be rotated and will expire. You can follow this procedure to recover from -expired control plane certificates. diff --git a/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/images b/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/modules b/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc b/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc deleted file mode 100644 index 4400fc6492a2..000000000000 --- a/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc +++ /dev/null @@ -1,27 +0,0 @@ -:_content-type: ASSEMBLY -[id="dr-restoring-cluster-state"] -= Restoring to a previous cluster state -include::_attributes/common-attributes.adoc[] -:context: dr-restoring-cluster-state - -toc::[] - -To restore the cluster to a previous state, you must have previously xref:../../../backup_and_restore/control_plane_backup_and_restore/backing-up-etcd.adoc#backing-up-etcd-data_backup-etcd[backed up etcd data] by creating a snapshot. You will use this snapshot to restore the cluster state. - -// About restoring to a previous cluster state -include::modules/dr-restoring-cluster-state-about.adoc[leveloffset=+1] - -// Restoring to a previous cluster state -include::modules/dr-restoring-cluster-state.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_dr-restoring-cluster-state"] -== Additional resources - -* xref:../../../installing/installing_bare_metal/installing-bare-metal.adoc#installing-bare-metal[Installing a user-provisioned cluster on bare metal] -* xref:../../../networking/accessing-hosts.adoc#accessing-hosts[Creating a bastion host to access {product-title} instances and the control plane nodes with SSH] -* xref:../../../installing/installing_bare_metal_ipi/ipi-install-expanding-the-cluster.adoc#replacing-a-bare-metal-control-plane-node_ipi-install-expanding[Replacing a bare-metal control plane node] - -include::modules/dr-scenario-cluster-state-issues.adoc[leveloffset=+1] - - diff --git a/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-3-expired-certs.adoc b/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-3-expired-certs.adoc deleted file mode 100644 index a15d6765d198..000000000000 --- a/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-3-expired-certs.adoc +++ /dev/null @@ -1,10 +0,0 @@ -:_content-type: ASSEMBLY -[id="dr-recovering-expired-certs"] -= Recovering from expired control plane certificates -include::_attributes/common-attributes.adoc[] -:context: dr-recovering-expired-certs - -toc::[] - -// Recovering from expired control plane certificates -include::modules/dr-recover-expired-control-plane-certs.adoc[leveloffset=+1] diff --git a/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/snippets b/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/snippets deleted file mode 120000 index ce62fd7c41e2..000000000000 --- a/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/snippets +++ /dev/null @@ -1 +0,0 @@ -../../../snippets/ \ No newline at end of file diff --git a/backup_and_restore/control_plane_backup_and_restore/images b/backup_and_restore/control_plane_backup_and_restore/images deleted file mode 120000 index 5fa6987088da..000000000000 --- a/backup_and_restore/control_plane_backup_and_restore/images +++ /dev/null @@ -1 +0,0 @@ -../../images \ No newline at end of file diff --git a/backup_and_restore/control_plane_backup_and_restore/modules b/backup_and_restore/control_plane_backup_and_restore/modules deleted file mode 120000 index 8b0e8540076d..000000000000 --- a/backup_and_restore/control_plane_backup_and_restore/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules \ No newline at end of file diff --git a/backup_and_restore/control_plane_backup_and_restore/replacing-unhealthy-etcd-member.adoc b/backup_and_restore/control_plane_backup_and_restore/replacing-unhealthy-etcd-member.adoc deleted file mode 100644 index 5ad22219dc1b..000000000000 --- a/backup_and_restore/control_plane_backup_and_restore/replacing-unhealthy-etcd-member.adoc +++ /dev/null @@ -1,55 +0,0 @@ -:_content-type: ASSEMBLY -[id="replacing-unhealthy-etcd-member"] -= Replacing an unhealthy etcd member -include::_attributes/common-attributes.adoc[] -:context: replacing-unhealthy-etcd-member - -toc::[] - -This document describes the process to replace a single unhealthy etcd member. - -This process depends on whether the etcd member is unhealthy because the machine is not running or the node is not ready, or whether it is unhealthy because the etcd pod is crashlooping. - -[NOTE] -==== -If you have lost the majority of your control plane hosts, follow the disaster recovery procedure to xref:../../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc#dr-restoring-cluster-state[restore to a previous cluster state] instead of this procedure. - -If the control plane certificates are not valid on the member being replaced, then you must follow the procedure to xref:../../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-3-expired-certs.adoc#dr-recovering-expired-certs[recover from expired control plane certificates] instead of this procedure. - -If a control plane node is lost and a new one is created, the etcd cluster Operator handles generating the new TLS certificates and adding the node as an etcd member. -==== - -== Prerequisites - -* Take an xref:../../backup_and_restore/control_plane_backup_and_restore/backing-up-etcd.adoc#backing-up-etcd-data_backup-etcd[etcd backup] prior to replacing an unhealthy etcd member. - -// Identifying an unhealthy etcd member -include::modules/restore-identify-unhealthy-etcd-member.adoc[leveloffset=+1] - -// Determining the state of the unhealthy etcd member -include::modules/restore-determine-state-etcd-member.adoc[leveloffset=+1] - -== Replacing the unhealthy etcd member - -Depending on the state of your unhealthy etcd member, use one of the following procedures: - -* xref:../../backup_and_restore/control_plane_backup_and_restore/replacing-unhealthy-etcd-member.adoc#restore-replace-stopped-etcd-member_replacing-unhealthy-etcd-member[Replacing an unhealthy etcd member whose machine is not running or whose node is not ready] -* xref:../../backup_and_restore/control_plane_backup_and_restore/replacing-unhealthy-etcd-member.adoc#restore-replace-crashlooping-etcd-member_replacing-unhealthy-etcd-member[Replacing an unhealthy etcd member whose etcd pod is crashlooping] -* xref:../../backup_and_restore/control_plane_backup_and_restore/replacing-unhealthy-etcd-member.adoc#restore-replace-stopped-baremetal-etcd-member_replacing-unhealthy-etcd-member[Replacing an unhealthy stopped baremetal etcd member] - -// Replacing an unhealthy etcd member whose machine is not running or whose node is not ready -include::modules/restore-replace-stopped-etcd-member.adoc[leveloffset=+2] -[role="_additional-resources"] -.Additional resources -* xref:../../machine_management/control_plane_machine_management/cpmso-troubleshooting.adoc#cpmso-ts-etcd-degraded_cpmso-troubleshooting[Recovering a degraded etcd Operator] - -// Replacing an unhealthy etcd member whose etcd pod is crashlooping -include::modules/restore-replace-crashlooping-etcd-member.adoc[leveloffset=+2] - -// Replacing an unhealthy baremetal stopped etcd member -include::modules/restore-replace-stopped-baremetal-etcd-member.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="additional-resources_replacing-unhealthy-etcd-member"] -== Additional resources -* xref:../../machine_management/deleting-machine.adoc#machine-lifecycle-hook-deletion-etcd_deleting-machine[Quorum protection with machine lifecycle hooks] \ No newline at end of file diff --git a/backup_and_restore/control_plane_backup_and_restore/snippets b/backup_and_restore/control_plane_backup_and_restore/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/backup_and_restore/control_plane_backup_and_restore/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/backup_and_restore/graceful-cluster-restart.adoc b/backup_and_restore/graceful-cluster-restart.adoc deleted file mode 100644 index da115c5bf6e8..000000000000 --- a/backup_and_restore/graceful-cluster-restart.adoc +++ /dev/null @@ -1,29 +0,0 @@ -:_content-type: ASSEMBLY -[id="graceful-restart-cluster"] -= Restarting the cluster gracefully -include::_attributes/common-attributes.adoc[] -:context: graceful-restart-cluster - -toc::[] - -This document describes the process to restart your cluster after a graceful shutdown. - -Even though the cluster is expected to be functional after the restart, the cluster might not recover due to unexpected conditions, for example: - -* etcd data corruption during shutdown -* Node failure due to hardware -* Network connectivity issues - -If your cluster fails to recover, follow the steps to xref:../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc#dr-restoring-cluster-state[restore to a previous cluster state]. - -== Prerequisites - -* You have xref:../backup_and_restore/graceful-cluster-shutdown.adoc#graceful-shutdown-cluster[gracefully shut down your cluster]. - -// Restarting the cluster -include::modules/graceful-restart.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc#dr-restoring-cluster-state[Restoring to a previous cluster state] for how to use an etcd backup to restore if your cluster failed to recover after restarting. diff --git a/backup_and_restore/graceful-cluster-shutdown.adoc b/backup_and_restore/graceful-cluster-shutdown.adoc deleted file mode 100644 index d5fc8860f78d..000000000000 --- a/backup_and_restore/graceful-cluster-shutdown.adoc +++ /dev/null @@ -1,24 +0,0 @@ -:_content-type: ASSEMBLY -[id="graceful-shutdown-cluster"] -= Shutting down the cluster gracefully -include::_attributes/common-attributes.adoc[] -:context: graceful-shutdown-cluster - -toc::[] - -This document describes the process to gracefully shut down your cluster. You might need to temporarily shut down your cluster for maintenance reasons, or to save on resource costs. - -== Prerequisites - -* Take an xref:../backup_and_restore/control_plane_backup_and_restore/backing-up-etcd.adoc#backing-up-etcd-data_backup-etcd[etcd backup] prior to shutting down the cluster. - -// Shutting down the cluster -include::modules/graceful-shutdown.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_restarting-restoring-cluster"] -== Additional resources - -* xref:../backup_and_restore/graceful-cluster-restart.adoc#graceful-restart-cluster[Restarting the cluster gracefully] - -* xref:../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc#dr-restoring-cluster-state[Restore to a previous cluster state] \ No newline at end of file diff --git a/backup_and_restore/images b/backup_and_restore/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/backup_and_restore/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/backup_and_restore/index.adoc b/backup_and_restore/index.adoc deleted file mode 100644 index 4784c633da6d..000000000000 --- a/backup_and_restore/index.adoc +++ /dev/null @@ -1,85 +0,0 @@ -:_content-type: ASSEMBLY -[id="backup-restore-overview"] -= Backup and restore -include::_attributes/common-attributes.adoc[] -:context: backup-restore-overview -:backup-restore-overview: - -toc::[] - -[id="control-plane-backup-restore-operations-overview"] -== Control plane backup and restore operations - -As a cluster administrator, you might need to stop an {product-title} cluster for a period and restart it later. Some reasons for restarting a cluster are that you need to perform maintenance on a cluster or want to reduce resource costs. In {product-title}, you can perform a xref:../backup_and_restore/graceful-cluster-shutdown.adoc#graceful-shutdown-cluster[graceful shutdown of a cluster] so that you can easily restart the cluster later. - -You must xref:../backup_and_restore/control_plane_backup_and_restore/backing-up-etcd.adoc#backup-etcd[back up etcd data] before shutting down a cluster; etcd is the key-value store for {product-title}, which persists the state of all resource objects. An etcd backup plays a crucial role in disaster recovery. In {product-title}, you can also xref:../backup_and_restore/control_plane_backup_and_restore/replacing-unhealthy-etcd-member.adoc#replacing-unhealthy-etcd-member[replace an unhealthy etcd member]. - -When you want to get your cluster running again, xref:../backup_and_restore/graceful-cluster-restart.adoc#graceful-restart-cluster[restart the cluster gracefully]. - -[NOTE] -==== -A cluster's certificates expire one year after the installation date. You can shut down a cluster and expect it to restart gracefully while the certificates are still valid. Although the cluster automatically retrieves the expired control plane certificates, you must still xref:../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-3-expired-certs.adoc#dr-recovering-expired-certs[approve the certificate signing requests (CSRs)]. -==== - -You might run into several situations where {product-title} does not work as expected, such as: - -* You have a cluster that is not functional after the restart because of unexpected conditions, such as node failure, or network connectivity issues. -* You have deleted something critical in the cluster by mistake. -* You have lost the majority of your control plane hosts, leading to etcd quorum loss. - -You can always recover from a disaster situation by xref:../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc#dr-restoring-cluster-state[restoring your cluster to its previous state] using the saved etcd snapshots. - -[role="_additional-resources"] -.Additional resources -* xref:../machine_management/deleting-machine.adoc#machine-lifecycle-hook-deletion-etcd_deleting-machine[Quorum protection with machine lifecycle hooks] - -[id="application-backup-restore-operations-overview"] -== Application backup and restore operations - -As a cluster administrator, you can back up and restore applications running on {product-title} by using the OpenShift API for Data Protection (OADP). - -OADP backs up and restores Kubernetes resources and internal images, at the granularity of a namespace, by using the version of Velero that is appropriate for the version of OADP you install, according to the table in xref:../backup_and_restore/application_backup_and_restore/troubleshooting.adoc#velero-obtaining-by-downloading_oadp-troubleshooting[Downloading the Velero CLI tool]. OADP backs up and restores persistent volumes (PVs) by using snapshots or Restic. For details, see xref:../backup_and_restore/application_backup_and_restore/oadp-features-plugins.adoc#oadp-features_oadp-features-plugins[OADP features]. - -[id="oadp-requirements"] -=== OADP requirements - -OADP has the following requirements: - -* You must be logged in as a user with a `cluster-admin` role. -* You must have object storage for storing backups, such as one of the following storage types: - -** OpenShift Data Foundation -** Amazon Web Services -** Microsoft Azure -** Google Cloud Platform -** S3-compatible object storage - -include::snippets/oadp-ocp-compat.adoc[] - -:FeatureName: The `CloudStorage` API for S3 storage -include::snippets/technology-preview.adoc[] - -* To back up PVs with snapshots, you must have cloud storage that has a native snapshot API or supports Container Storage Interface (CSI) snapshots, such as the following providers: - -** Amazon Web Services -** Microsoft Azure -** Google Cloud Platform -** CSI snapshot-enabled cloud storage, such as Ceph RBD or Ceph FS - -[NOTE] -==== -If you do not want to back up PVs by using snapshots, you can use link:https://restic.net/[Restic], which is installed by the OADP Operator by default. -==== - -[id="backing-up-and-restoring-applications"] -=== Backing up and restoring applications - -You back up applications by creating a `Backup` custom resource (CR). See xref:../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc#oadp-creating-backup-cr_backing-up-applications[Creating a Backup CR].You can configure the following backup options: - -* xref:../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc#oadp-creating-backup-hooks_backing-up-applications[Backup hooks] to run commands before or after the backup operation -* xref:../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc#oadp-scheduling-backups_backing-up-applications[Scheduled backups] -* xref:../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc#oadp-backing-up-applications-restic_backing-up-applications[Restic backups] - -You restore application backups by creating a `Restore` (CR). See xref:../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/restoring-applications.adoc#oadp-creating-restore-cr_restoring-applications[Creating a Restore CR]. You can configure xref:../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/restoring-applications.adoc#oadp-creating-restore-hooks_restoring-applications[restore hooks] to run commands in init containers or in the application container during the restore operation. - -:backup-restore-overview!: diff --git a/backup_and_restore/modules b/backup_and_restore/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/backup_and_restore/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/backup_and_restore/snippets b/backup_and_restore/snippets deleted file mode 120000 index 9f5bc7e4dde0..000000000000 --- a/backup_and_restore/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets \ No newline at end of file diff --git a/cicd/_attributes b/cicd/_attributes deleted file mode 120000 index f27fd275ea6b..000000000000 --- a/cicd/_attributes +++ /dev/null @@ -1 +0,0 @@ -../_attributes/ \ No newline at end of file diff --git a/cicd/builds/_attributes b/cicd/builds/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/cicd/builds/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/cicd/builds/advanced-build-operations.adoc b/cicd/builds/advanced-build-operations.adoc deleted file mode 100644 index c8a9279320d0..000000000000 --- a/cicd/builds/advanced-build-operations.adoc +++ /dev/null @@ -1,23 +0,0 @@ -:_content-type: ASSEMBLY -[id="advanced-build-operations"] -= Performing advanced builds -include::_attributes/common-attributes.adoc[] -:context: advanced-build-operations - -toc::[] - -The following sections provide instructions for advanced build operations including -setting build resources and maximum duration, assigning builds to nodes, chaining -builds, build pruning, and build run policies. - -include::modules/builds-setting-build-resources.adoc[leveloffset=+1] - -include::modules/builds-setting-maximum-duration.adoc[leveloffset=+1] - -include::modules/builds-assigning-builds-to-nodes.adoc[leveloffset=+1] - -include::modules/builds-chaining-builds.adoc[leveloffset=+1] - -include::modules/builds-build-pruning.adoc[leveloffset=+1] - -include::modules/builds-build-run-policy.adoc[leveloffset=+1] diff --git a/cicd/builds/basic-build-operations.adoc b/cicd/builds/basic-build-operations.adoc deleted file mode 100644 index 5e63cd49638b..000000000000 --- a/cicd/builds/basic-build-operations.adoc +++ /dev/null @@ -1,26 +0,0 @@ -:_content-type: ASSEMBLY -[id="basic-build-operations"] -= Performing and configuring basic builds -include::_attributes/common-attributes.adoc[] -:context: basic-build-operations - -toc::[] - -The following sections provide instructions for basic build operations, including starting and canceling builds, editing `BuildConfigs`, deleting `BuildConfigs`, viewing build details, and accessing build logs. - -include::modules/builds-basic-start-build.adoc[leveloffset=+1] -include::modules/builds-basic-start-re-run.adoc[leveloffset=+2] -include::modules/builds-basic-start-logs.adoc[leveloffset=+2] -include::modules/builds-basic-start-environment-variable.adoc[leveloffset=+2] -include::modules/builds-basic-start-source.adoc[leveloffset=+2] -include::modules/builds-basic-cancel-build.adoc[leveloffset=+1] -include::modules/builds-basic-cancel-multiple.adoc[leveloffset=+2] -include::modules/builds-basic-cancel-all.adoc[leveloffset=+2] -include::modules/builds-basic-cancel-all-state.adoc[leveloffset=+2] -include::modules/builds-basic-edit-buildconfig.adoc[leveloffset=+1] -include::modules/builds-basic-delete-buildconfig.adoc[leveloffset=+1] -include::modules/builds-basic-view-build-details.adoc[leveloffset=+1] -include::modules/builds-basic-access-build-logs.adoc[leveloffset=+1] -include::modules/builds-basic-access-buildconfig-logs.adoc[leveloffset=+2] -include::modules/builds-basic-access-buildconfig-version-logs.adoc[leveloffset=+2] -include::modules/builds-basic-access-build-verbosity.adoc[leveloffset=+2] diff --git a/cicd/builds/build-configuration.adoc b/cicd/builds/build-configuration.adoc deleted file mode 100644 index a73773137d95..000000000000 --- a/cicd/builds/build-configuration.adoc +++ /dev/null @@ -1,13 +0,0 @@ -:_content-type: ASSEMBLY -[id="build-configuration"] -= Build configuration resources -include::_attributes/common-attributes.adoc[] -:context: build-configuration - -toc::[] - -Use the following procedure to configure build settings. - -include::modules/builds-configuration-parameters.adoc[leveloffset=+1] - -include::modules/builds-configuration-file.adoc[leveloffset=+1] diff --git a/cicd/builds/build-strategies.adoc b/cicd/builds/build-strategies.adoc deleted file mode 100644 index 3bc1fd41cb51..000000000000 --- a/cicd/builds/build-strategies.adoc +++ /dev/null @@ -1,91 +0,0 @@ -:_content-type: ASSEMBLY -[id="build-strategies"] -= Using build strategies -include::_attributes/common-attributes.adoc[] -:context: build-strategies - -toc::[] - -The following sections define the primary supported build strategies, and how to -use them. - -// Docker build strategy - -include::modules/builds-strategy-docker-build.adoc[leveloffset=+1] - -include::modules/builds-strategy-docker-from-image.adoc[leveloffset=+2] - -include::modules/builds-strategy-dockerfile-path.adoc[leveloffset=+2] - -include::modules/builds-strategy-docker-environment-variables.adoc[leveloffset=+2] - -include::modules/builds-strategy-docker-build-arguments.adoc[leveloffset=+2] - -include::modules/builds-strategy-docker-squash-layers.adoc[leveloffset=+2] - -:context: build-strategies-docker - -include::modules/builds-using-build-volumes.adoc[leveloffset=+2] - - -// S2I build strategy - -include::modules/builds-strategy-s2i-build.adoc[leveloffset=+1] - -include::modules/builds-strategy-s2i-incremental-builds.adoc[leveloffset=+2] - -include::modules/builds-strategy-s2i-override-builder-image-scripts.adoc[leveloffset=+2] - -include::modules/builds-strategy-s2i-environment-variables.adoc[leveloffset=+2] - -include::modules/builds-strategy-s2i-environment-files.adoc[leveloffset=+3] - -include::modules/builds-strategy-s2i-buildconfig-environment.adoc[leveloffset=+3] - -include::modules/builds-strategy-s2i-ignore-source-files.adoc[leveloffset=+2] - -include::modules/images-create-s2i.adoc[leveloffset=+2] - -include::modules/images-create-s2i-build.adoc[leveloffset=+3] - -include::modules/images-create-s2i-scripts.adoc[leveloffset=+3] - -:context: build-strategies-s2i - -include::modules/builds-using-build-volumes.adoc[leveloffset=+2] - -// Custom build strategy - -include::modules/builds-strategy-custom-build.adoc[leveloffset=+1] - -include::modules/builds-strategy-custom-from-image.adoc[leveloffset=+2] - -include::modules/builds-strategy-custom-secrets.adoc[leveloffset=+2] - -include::modules/builds-strategy-custom-environment-variables.adoc[leveloffset=+2] - -include::modules/images-custom.adoc[leveloffset=+2] - -// Pipeline build strategy - -include::modules/builds-strategy-pipeline-build.adoc[leveloffset=+1] - -include::modules/builds-understanding-openshift-pipeline.adoc[leveloffset=+2] - -include::modules/builds-strategy-pipeline-providing-jenkinsfile.adoc[leveloffset=+2] - -include::modules/builds-strategy-pipeline-environment-variables.adoc[leveloffset=+2] - -include::modules/builds-strategy-pipeline-mapping-buildconfig-jenkins.adoc[leveloffset=+3] - -include::modules/builds-tutorial-pipeline.adoc[leveloffset=+2] - -//include::modules/builds-strategy-force-pull-procedure.adoc[leveloffset=+1] - -//include::modules/builds-strategy-docker-force-pull-example.adoc[leveloffset=+2] - -//include::modules/builds-strategy-s2i-force-pull-example.adoc[leveloffset=+2] - -include::modules/builds-strategy-secrets-web-console.adoc[leveloffset=+1] - -include::modules/builds-strategy-enable-pulling-pushing.adoc[leveloffset=+1] diff --git a/cicd/builds/creating-build-inputs.adoc b/cicd/builds/creating-build-inputs.adoc deleted file mode 100644 index 32354cd232c9..000000000000 --- a/cicd/builds/creating-build-inputs.adoc +++ /dev/null @@ -1,89 +0,0 @@ -:_content-type: ASSEMBLY -[id="creating-build-inputs"] -= Creating build inputs -include::_attributes/common-attributes.adoc[] -:context: creating-build-inputs - -toc::[] - -Use the following sections for an overview of build inputs, instructions on how -to use inputs to provide source content for builds to operate on, and how to use -build environments and create secrets. - -include::modules/builds-define-build-inputs.adoc[leveloffset=+1] - -include::modules/builds-dockerfile-source.adoc[leveloffset=+1] - -include::modules/builds-image-source.adoc[leveloffset=+1] - -include::modules/builds-source-code.adoc[leveloffset=+1] - -include::modules/builds-using-proxy-git-cloning.adoc[leveloffset=+2] - -include::modules/builds-adding-source-clone-secrets.adoc[leveloffset=+2] - -include::modules/builds-automatically-add-source-clone-secrets.adoc[leveloffset=+3] - -include::modules/builds-manually-add-source-clone-secrets.adoc[leveloffset=+3] - -include::modules/builds-gitconfig-file.adoc[leveloffset=+3] - -include::modules/builds-gitconfig-file-secured-git.adoc[leveloffset=+3] - -include::modules/builds-source-secret-basic-auth.adoc[leveloffset=+3] - -include::modules/builds-source-secret-ssh-key-auth.adoc[leveloffset=+3] - -include::modules/builds-source-secret-trusted-ca.adoc[leveloffset=+3] - -include::modules/builds-source-secret-combinations.adoc[leveloffset=+3] - -include::modules/builds-source-secret-combinations-ssh-gitconfig.adoc[leveloffset=+4] - -include::modules/builds-source-secret-combinations-gitconfig-ca.adoc[leveloffset=+4] - -include::modules/builds-source-secret-combinations-basic-auth-ca.adoc[leveloffset=+4] - -include::modules/builds-source-secret-combinations-basic-auth-gitconfig.adoc[leveloffset=+4] - -include::modules/builds-source-secret-combinations-basic-auth-gitconfig-ca.adoc[leveloffset=+4] - -include::modules/builds-binary-source.adoc[leveloffset=+1] - -include::modules/builds-input-secrets-configmaps.adoc[leveloffset=+1] - -include::modules/builds-secrets-overview.adoc[leveloffset=+2] - -include::modules/builds-creating-secrets.adoc[leveloffset=+2] - -include::modules/builds-using-secrets.adoc[leveloffset=+2] - -include::modules/builds-adding-input-secrets-configmaps.adoc[leveloffset=+2] - -include::modules/builds-source-to-image.adoc[leveloffset=+2] - -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] - -include::modules/builds-docker-strategy.adoc[leveloffset=+2] - -include::modules/builds-custom-strategy.adoc[leveloffset=+2] -endif::[] - -include::modules/builds-using-external-artifacts.adoc[leveloffset=+1] - -include::modules/builds-docker-credentials-private-registries.adoc[leveloffset=+1] - -include::modules/builds-build-environment.adoc[leveloffset=+1] - -include::modules/builds-using-build-fields-as-environment-variables.adoc[leveloffset=+2] - -include::modules/builds-using-secrets-as-environment-variables.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../../cicd/builds/creating-build-inputs.adoc#builds-input-secrets-configmaps_creating-build-inputs[Input secrets and config maps] - -include::modules/builds-service-serving-certificate-secrets.adoc[leveloffset=+1] - -include::modules/builds-secrets-restrictions.adoc[leveloffset=+1] diff --git a/cicd/builds/custom-builds-buildah.adoc b/cicd/builds/custom-builds-buildah.adoc deleted file mode 100644 index 9ea928151cb0..000000000000 --- a/cicd/builds/custom-builds-buildah.adoc +++ /dev/null @@ -1,36 +0,0 @@ -:_content-type: ASSEMBLY -[id="custom-builds-buildah"] -= Custom image builds with Buildah -include::_attributes/common-attributes.adoc[] -:context: custom-builds-buildah - -toc::[] - - -With {product-title} {product-version}, a docker socket will not be present on the host -nodes. This means the _mount docker socket_ option of a custom build is not -guaranteed to provide an accessible docker socket for use within a custom build -image. - -If you require this capability in order to build and push images, add the Buildah -tool your custom build image and use it to build and push the image within your -custom build logic. The following is an example of how to run custom builds with -Buildah. - -[NOTE] -==== -Using the custom build strategy requires permissions that normal users do -not have by default because it allows the user to execute arbitrary code inside -a privileged container running on the cluster. This level of access can be used -to compromise the cluster and therefore should be granted only to users who are -trusted with administrative privileges on the cluster. -==== - -== Prerequisites - -* Review how to xref:../../cicd/builds/securing-builds-by-strategy.adoc#securing-builds-by-strategy[grant custom build permissions]. - - -include::modules/builds-create-custom-build-artifacts.adoc[leveloffset=+1] -include::modules/builds-build-custom-builder-image.adoc[leveloffset=+1] -include::modules/builds-use-custom-builder-image.adoc[leveloffset=+1] diff --git a/cicd/builds/images b/cicd/builds/images deleted file mode 120000 index 5fa6987088da..000000000000 --- a/cicd/builds/images +++ /dev/null @@ -1 +0,0 @@ -../../images \ No newline at end of file diff --git a/cicd/builds/managing-build-output.adoc b/cicd/builds/managing-build-output.adoc deleted file mode 100644 index 1378cd27f6e5..000000000000 --- a/cicd/builds/managing-build-output.adoc +++ /dev/null @@ -1,17 +0,0 @@ -:_content-type: ASSEMBLY -[id="managing-build-output"] -= Managing build output -include::_attributes/common-attributes.adoc[] -:context: managing-build-output - -toc::[] - - -Use the following sections for an overview of and instructions for managing -build output. - -include::modules/builds-docker-source-build-output.adoc[leveloffset=+1] - -include::modules/builds-output-image-environment-variables.adoc[leveloffset=+1] - -include::modules/builds-output-image-labels.adoc[leveloffset=+1] diff --git a/cicd/builds/modules b/cicd/builds/modules deleted file mode 120000 index 8b0e8540076d..000000000000 --- a/cicd/builds/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules \ No newline at end of file diff --git a/cicd/builds/running-entitled-builds.adoc b/cicd/builds/running-entitled-builds.adoc deleted file mode 100644 index 4eef8f5985dc..000000000000 --- a/cicd/builds/running-entitled-builds.adoc +++ /dev/null @@ -1,43 +0,0 @@ -:_content-type: ASSEMBLY -[id="running-entitled-builds"] -= Using Red Hat subscriptions in builds -include::_attributes/common-attributes.adoc[] -:context: running-entitled-builds - -toc::[] - -[role="_abstract"] -Use the following sections to run entitled builds on {product-title}. - -include::modules/builds-create-imagestreamtag.adoc[leveloffset=+1] - -include::modules/builds-source-secrets-entitlements.adoc[leveloffset=+1] - -== Running builds with Subscription Manager - -include::modules/builds-strategy-docker-entitled-subman.adoc[leveloffset=+2] - -== Running builds with Red Hat Satellite subscriptions - -include::modules/builds-source-input-satellite-config.adoc[leveloffset=+2] - -include::modules/builds-strategy-docker-entitled-satellite.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* link:https://access.redhat.com/solutions/5847331[How to use builds with Red Hat Satellite subscriptions and which certificate to use] - -// Beginning of "Running entitled builds with SharedSecret objects" section - -include::modules/builds-running-entitled-builds-with-sharedsecret-objects.adoc[leveloffset=+1] - -// End of "Running entitled builds with SharedSecret objects" section - -[role="_additional-resources"] -== Additional resources - -* xref:../../support/remote_health_monitoring/insights-operator-simple-access.adoc#insights-operator-simple-access[Importing simple content access certificates with Insights Operator] -* xref:../../nodes/clusters/nodes-cluster-enabling-features.adoc#nodes-cluster-enabling[Enabling features using feature gates] -* xref:../../openshift_images/image-streams-manage.adoc#image-streams-managing[Managing image streams] -* xref:../../cicd/builds/build-strategies.adoc#build-strategies[build strategy] diff --git a/cicd/builds/securing-builds-by-strategy.adoc b/cicd/builds/securing-builds-by-strategy.adoc deleted file mode 100644 index 9809d2327602..000000000000 --- a/cicd/builds/securing-builds-by-strategy.adoc +++ /dev/null @@ -1,41 +0,0 @@ -:_content-type: ASSEMBLY -[id="securing-builds-by-strategy"] -= Securing builds by strategy -include::_attributes/common-attributes.adoc[] -:context: securing-builds-by-strategy - -toc::[] - -Builds in {product-title} are run in privileged containers. Depending on the build strategy used, if you have privileges, you can run builds to escalate their permissions on the cluster and host nodes. And as a security measure, it limits who can run builds and the strategy that is used for those builds. Custom builds are inherently less safe than source builds, because they can execute any code within a privileged container, and are disabled by default. Grant docker build permissions with caution, because a vulnerability in the Dockerfile processing logic could result in a privileges being granted on the host node. - -By default, all users that can create builds are granted permission to use the docker and Source-to-image (S2I) build strategies. Users with cluster administrator privileges can enable the custom build strategy, as referenced in the restricting build strategies to a user globally section. - -You can control who can build and which build strategies they can use by using an authorization policy. Each build strategy has a corresponding build subresource. A user must have permission to create a build and permission to create on the build strategy subresource to create builds using that strategy. Default roles are provided that grant the create permission on the build strategy subresource. - -.Build Strategy Subresources and Roles -[options="header"] -|=== - -|Strategy |Subresource |Role - -|Docker -|builds/docker -|system:build-strategy-docker - -|Source-to-Image -|builds/source -|system:build-strategy-source - -|Custom -|builds/custom -|system:build-strategy-custom - -|JenkinsPipeline -|builds/jenkinspipeline -|system:build-strategy-jenkinspipeline - -|=== - -include::modules/builds-disabling-build-strategy-globally.adoc[leveloffset=+1] -include::modules/builds-restricting-build-strategy-globally.adoc[leveloffset=+1] -include::modules/builds-restricting-build-strategy-to-user.adoc[leveloffset=+1] diff --git a/cicd/builds/setting-up-trusted-ca.adoc b/cicd/builds/setting-up-trusted-ca.adoc deleted file mode 100644 index 6adc4e59cb6e..000000000000 --- a/cicd/builds/setting-up-trusted-ca.adoc +++ /dev/null @@ -1,35 +0,0 @@ -:_content-type: ASSEMBLY -[id="setting-up-trusted-ca"] -= Setting up additional trusted certificate authorities for builds -ifndef::openshift-dedicated,openshift-rosa[] -include::_attributes/common-attributes.adoc[] -endif::[] -ifdef::openshift-dedicated,openshift-rosa[] -include::_attributes/attributes-openshift-dedicated.adoc[] -endif::[] -:context: setting-up-trusted-ca - -toc::[] - -ifdef::openshift-enterprise,openshift-rosa,openshift-dedicated,openshift-webscale,openshift-origin[] -Use the following sections to set up additional certificate authorities (CA) to be trusted by builds when pulling images from an image registry. - -The procedure requires a cluster administrator to create a `ConfigMap` and add additional CAs as keys in the `ConfigMap`. - -* The `ConfigMap` must be created in the `openshift-config` namespace. -* `domain` is the key in the `ConfigMap` and `value` is the PEM-encoded certificate. -** Each CA must be associated with a domain. The domain format is `hostname[..port]`. -* The `ConfigMap` name must be set in the `image.config.openshift.io/cluster` cluster scoped configuration resource's `spec.additionalTrustedCA` field. -//* No longer needs single PEM bundle - -include::modules/configmap-adding-ca.adoc[leveloffset=+1] - -[role="_additional-resources"] -== Additional resources - -* link:https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/#create-a-configmap[Create a `ConfigMap`] -* link:https://kubectl.docs.kubernetes.io/guides/config_management/secrets_configmaps/[Secrets and `ConfigMaps`] -ifndef::openshift-rosa,openshift-dedicated[] -* xref:../../networking/configuring-a-custom-pki.adoc#configuring-a-custom-pki[Configuring a custom PKI] -endif::[] -endif::[] diff --git a/cicd/builds/snippets b/cicd/builds/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/cicd/builds/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/cicd/builds/triggering-builds-build-hooks.adoc b/cicd/builds/triggering-builds-build-hooks.adoc deleted file mode 100644 index adca76892240..000000000000 --- a/cicd/builds/triggering-builds-build-hooks.adoc +++ /dev/null @@ -1,37 +0,0 @@ -:_content-type: ASSEMBLY -[id="triggering-builds-build-hooks"] -= Triggering and modifying builds -include::_attributes/common-attributes.adoc[] -:context: triggering-builds-build-hooks - -toc::[] - -The following sections outline how to trigger builds and modify builds using build hooks. - -include::modules/builds-triggers.adoc[leveloffset=+1] - -include::modules/builds-webhook-triggers.adoc[leveloffset=+2] - -include::modules/builds-using-github-webhooks.adoc[leveloffset=+3] - -include::modules/builds-using-gitlab-webhooks.adoc[leveloffset=+3] - -include::modules/builds-using-bitbucket-webhooks.adoc[leveloffset=+3] - -include::modules/builds-using-generic-webhooks.adoc[leveloffset=+3] - -include::modules/builds-displaying-webhook-urls.adoc[leveloffset=+3] - -include::modules/builds-using-image-change-triggers.adoc[leveloffset=+2] - -include::modules/builds-identifying-image-change-triggers.adoc[leveloffset=+2] - -include::modules/builds-configuration-change-triggers.adoc[leveloffset=+2] - -include::modules/builds-setting-triggers-manually.adoc[leveloffset=+3] - -include::modules/builds-build-hooks.adoc[leveloffset=+1] - -include::modules/builds-configuring-post-commit-build-hooks.adoc[leveloffset=+2] - -include::modules/builds-using-cli-post-commit-build-hooks.adoc[leveloffset=+2] diff --git a/cicd/builds/troubleshooting-builds.adoc b/cicd/builds/troubleshooting-builds.adoc deleted file mode 100644 index 92cd14bfd15d..000000000000 --- a/cicd/builds/troubleshooting-builds.adoc +++ /dev/null @@ -1,13 +0,0 @@ -:_content-type: ASSEMBLY -[id="troubleshooting-builds_{context}"] -= Troubleshooting builds -include::_attributes/common-attributes.adoc[] -:context: troubleshooting-builds - -toc::[] - -Use the following to troubleshoot build issues. - -include::modules/builds-troubleshooting-access-resources.adoc[leveloffset=+1] - -include::modules/builds-troubleshooting-service-certificate-generation.adoc[leveloffset=+1] diff --git a/cicd/builds/understanding-buildconfigs.adoc b/cicd/builds/understanding-buildconfigs.adoc deleted file mode 100644 index bf87540e52ac..000000000000 --- a/cicd/builds/understanding-buildconfigs.adoc +++ /dev/null @@ -1,11 +0,0 @@ -:_content-type: ASSEMBLY -[id="understanding-buildconfigs"] -= Understanding build configurations -include::_attributes/common-attributes.adoc[] -:context: understanding-builds - -toc::[] - -The following sections define the concept of a build, build configuration, and outline the primary build strategies available. - -include::modules/builds-buildconfig.adoc[leveloffset=+1] diff --git a/cicd/builds/understanding-image-builds.adoc b/cicd/builds/understanding-image-builds.adoc deleted file mode 100644 index 6483bac9c0b7..000000000000 --- a/cicd/builds/understanding-image-builds.adoc +++ /dev/null @@ -1,17 +0,0 @@ -:_content-type: ASSEMBLY -[id="understanding-image-builds"] -= Understanding image builds -include::_attributes/common-attributes.adoc[] -:context: understanding-image-builds - -toc::[] - -include::modules/builds-about.adoc[leveloffset=+1] - -include::modules/builds-strategy-docker-build.adoc[leveloffset=+2] - -include::modules/builds-strategy-s2i-build.adoc[leveloffset=+2] - -include::modules/builds-strategy-custom-build.adoc[leveloffset=+2] - -include::modules/builds-strategy-pipeline-build.adoc[leveloffset=+2] diff --git a/cicd/gitops/_attributes b/cicd/gitops/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/cicd/gitops/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/cicd/gitops/configuring_argo_cd_to_recursively_sync_a_git_repository_with_your_application/_attributes b/cicd/gitops/configuring_argo_cd_to_recursively_sync_a_git_repository_with_your_application/_attributes deleted file mode 120000 index bf7c2529fdb4..000000000000 --- a/cicd/gitops/configuring_argo_cd_to_recursively_sync_a_git_repository_with_your_application/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../../_attributes/ \ No newline at end of file diff --git a/cicd/gitops/configuring_argo_cd_to_recursively_sync_a_git_repository_with_your_application/images b/cicd/gitops/configuring_argo_cd_to_recursively_sync_a_git_repository_with_your_application/images deleted file mode 120000 index 4dd3347de19a..000000000000 --- a/cicd/gitops/configuring_argo_cd_to_recursively_sync_a_git_repository_with_your_application/images +++ /dev/null @@ -1 +0,0 @@ -../../../images \ No newline at end of file diff --git a/cicd/gitops/configuring_argo_cd_to_recursively_sync_a_git_repository_with_your_application/modules b/cicd/gitops/configuring_argo_cd_to_recursively_sync_a_git_repository_with_your_application/modules deleted file mode 120000 index 5be29a99c161..000000000000 --- a/cicd/gitops/configuring_argo_cd_to_recursively_sync_a_git_repository_with_your_application/modules +++ /dev/null @@ -1 +0,0 @@ -../../../modules \ No newline at end of file diff --git a/cicd/gitops/images b/cicd/gitops/images deleted file mode 120000 index 5fa6987088da..000000000000 --- a/cicd/gitops/images +++ /dev/null @@ -1 +0,0 @@ -../../images \ No newline at end of file diff --git a/cicd/gitops/modules b/cicd/gitops/modules deleted file mode 120000 index 8b0e8540076d..000000000000 --- a/cicd/gitops/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules \ No newline at end of file diff --git a/cicd/gitops/snippets b/cicd/gitops/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/cicd/gitops/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/cicd/jenkins/_attributes b/cicd/jenkins/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/cicd/jenkins/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/cicd/jenkins/images b/cicd/jenkins/images deleted file mode 120000 index 5fa6987088da..000000000000 --- a/cicd/jenkins/images +++ /dev/null @@ -1 +0,0 @@ -../../images \ No newline at end of file diff --git a/cicd/jenkins/images-other-jenkins-agent.adoc b/cicd/jenkins/images-other-jenkins-agent.adoc deleted file mode 100644 index 68f0628b5bf8..000000000000 --- a/cicd/jenkins/images-other-jenkins-agent.adoc +++ /dev/null @@ -1,37 +0,0 @@ -:_content-type: ASSEMBLY -[id="images-other-jenkins-agent"] -= Jenkins agent -include::_attributes/common-attributes.adoc[] -:context: images-other-jenkins-agent - -toc::[] - -{product-title} provides a base image for use as a Jenkins agent. - -The Base image for Jenkins agents does the following: - -* Pulls in both the required tools, headless Java, the Jenkins JNLP client, and the useful ones, including `git`, `tar`, `zip`, and `nss`, among others. -* Establishes the JNLP agent as the entry point. -* Includes the `oc` client tool for invoking command line operations from within Jenkins jobs. -* Provides Dockerfiles for both Red Hat Enterprise Linux (RHEL) and `localdev` images. - -[IMPORTANT] -==== -Use a version of the agent image that is appropriate for your {product-title} release version. Embedding an `oc` client version that is not compatible with the {product-title} version can cause unexpected behavior. -==== - -The {product-title} Jenkins image also defines the following sample `java-builder` pod template to illustrate how you can use the agent image with the Jenkins Kubernetes plugin. - -The `java-builder` pod template employs two containers: -* A `jnlp` container that uses the {product-title} Base agent image and handles the JNLP contract for starting and stopping Jenkins agents. -* A `java` container that uses the `java` {product-title} Sample ImageStream, which contains the various Java binaries, including the Maven binary `mvn`, for building code. - -include::modules/images-other-jenkins-agent-images.adoc[leveloffset=+1] - -include::modules/images-other-jenkins-agent-env-var.adoc[leveloffset=+1] - -include::modules/images-other-jenkins-agent-memory.adoc[leveloffset=+1] - -include::modules/images-other-jenkins-agent-gradle.adoc[leveloffset=+1] - -include::modules/images-other-jenkins-agent-pod-retention.adoc[leveloffset=+1] diff --git a/cicd/jenkins/images-other-jenkins.adoc b/cicd/jenkins/images-other-jenkins.adoc deleted file mode 100644 index 87aeeabeb003..000000000000 --- a/cicd/jenkins/images-other-jenkins.adoc +++ /dev/null @@ -1,75 +0,0 @@ -:_content-type: ASSEMBLY -[id="images-other-jenkins"] -= Configuring Jenkins images -include::_attributes/common-attributes.adoc[] -:context: images-other-jenkins - -toc::[] - -{product-title} provides a container image for running Jenkins. This image provides a Jenkins server instance, which can be used to set up a basic flow for continuous testing, integration, and delivery. - -The image is based on the Red Hat Universal Base Images (UBI). - -{product-title} follows the link:https://jenkins.io/changelog-stable/[LTS] release of Jenkins. {product-title} provides an image that contains Jenkins 2.x. - -The {product-title} Jenkins images are available on link:https://quay.io[Quay.io] or link:https://registry.redhat.io[registry.redhat.io]. - -For example: - -[source,terminal] ----- -$ podman pull registry.redhat.io/ocp-tools-4/jenkins-rhel8: ----- - -To use these images, you can either access them directly from these registries or push them into your {product-title} container image registry. Additionally, you can create an image stream that points to the image, either in your container image registry or at the external location. Your {product-title} resources can then reference the image stream. - -But for convenience, {product-title} provides image streams in the `openshift` namespace for the core Jenkins image as well as the example Agent images provided for {product-title} integration with Jenkins. - -[id="images-other-jenkins-config-customization_{context}"] -== Configuration and customization - -You can manage Jenkins authentication in two ways: - -* {product-title} OAuth authentication provided by the {product-title} Login plugin. -* Standard authentication provided by Jenkins. - -include::modules/images-other-jenkins-oauth-auth.adoc[leveloffset=+2] - -include::modules/images-other-jenkins-auth.adoc[leveloffset=+2] - -include::modules/images-other-jenkins-env-var.adoc[leveloffset=+1] - -include::modules/images-other-jenkins-cross-project.adoc[leveloffset=+1] - -[id="images-other-jenkins-cross-volume-mount_{context}"] -== Jenkins cross volume mount points - -The Jenkins image can be run with mounted volumes to enable persistent storage for the configuration: - -* `/var/lib/jenkins` is the data directory where Jenkins stores configuration files, including job definitions. - -include::modules/images-other-jenkins-customize-s2i.adoc[leveloffset=+1] - -include::modules/images-other-jenkins-config-kubernetes.adoc[leveloffset=+1] - -.Additional resources - -* xref:../../cicd/jenkins/important-changes-to-openshift-jenkins-images.adoc#important-changes-to-openshift-jenkins-images[Important changes to OpenShift Jenkins images] - -include::modules/images-other-jenkins-permissions.adoc[leveloffset=+1] - -include::modules/images-other-jenkins-create-service.adoc[leveloffset=+1] - -include::modules/images-other-jenkins-kubernetes-plugin.adoc[leveloffset=+1] - -.Additional resources - -* xref:../../cicd/jenkins/important-changes-to-openshift-jenkins-images.adoc#important-changes-to-openshift-jenkins-images[Important changes to OpenShift Jenkins images] - -include::modules/images-other-jenkins-memory.adoc[leveloffset=+1] - -[role="_additional-resources"] -== Additional resources - -* See xref:../../architecture/understanding-development.adoc#base-image-options[Base image options] for more information about the link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux_atomic_host/7/html-single/getting_started_with_containers/index#using_red_hat_base_container_images_standard_and_minimal[Red Hat Universal Base Images] (UBI). -* xref:../../cicd/jenkins/important-changes-to-openshift-jenkins-images.adoc#important-changes-to-openshift-jenkins-images[Important changes to OpenShift Jenkins images] diff --git a/cicd/jenkins/important-changes-to-openshift-jenkins-images.adoc b/cicd/jenkins/important-changes-to-openshift-jenkins-images.adoc deleted file mode 100644 index 6590c40201fd..000000000000 --- a/cicd/jenkins/important-changes-to-openshift-jenkins-images.adoc +++ /dev/null @@ -1,33 +0,0 @@ -:_content-type: ASSEMBLY -[id="important-changes-to-openshift-jenkins-images"] -= Important changes to OpenShift Jenkins images -include::_attributes/common-attributes.adoc[] -:context: important-changes-to-openshift-jenkins-images - -toc::[] - -{product-title} 4.11 moves the OpenShift Jenkins and OpenShift Agent Base images to the `ocp-tools-4` repository at `registry.redhat.io`. It also removes the OpenShift Jenkins Maven and NodeJS Agent images from its payload: - -* {product-title} 4.11 moves the OpenShift Jenkins and OpenShift Agent Base images to the `ocp-tools-4` repository at `registry.redhat.io` so that Red Hat can produce and update the images outside the {product-title} lifecycle. Previously, these images were in the {product-title} install payload and the `openshift4` repository at `registry.redhat.io`. - -* {product-title} 4.10 deprecated the OpenShift Jenkins Maven and NodeJS Agent images. {product-title} 4.11 removes these images from its payload. Red Hat no longer produces these images, and they are not available from the `ocp-tools-4` repository at `registry.redhat.io`. Red Hat maintains the 4.10 and earlier versions of these images for any significant bug fixes or security CVEs, following the link:https://access.redhat.com/support/policy/updates/openshift[{product-title} lifecycle policy]. - -These changes support the {product-title} 4.10 recommendation to use xref:../../cicd/jenkins/images-other-jenkins.adoc#images-other-jenkins-config-kubernetes_images-other-jenkins[multiple container Pod Templates with the Jenkins Kubernetes Plugin]. - -include::modules/relocation-of-openshift-jenkins-images.adoc[leveloffset=+1] - -include::modules/customizing-the-jenkins-image-stream-tag.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_important-changes-to-openshift-jenkins-images_{context}"] -== Additional resources - -* xref:../../openshift_images/managing_images/tagging-images.adoc#images-add-tags-to-imagestreams_tagging-images[Adding tags to image streams] -* xref:../../openshift_images/image-streams-manage.adoc#images-imagestream-import_image-streams-managing[Configuring periodic importing of image stream tags] -* xref:../../cicd/jenkins/images-other-jenkins-agent.adoc#images-other-jenkins-agent[Jenkins agent] -* link:https://catalog.redhat.com/software/containers/search?q=Jenkins%202&p=1[Certified `jenkins` images] -* link:https://catalog.redhat.com/software/containers/search?q=Jenkins%20Agent%20Base&p=1[Certified `jenkins-agent-base` images] -* link:https://catalog.redhat.com/software/containers/search?q=jenkins-agent-maven&p=1[Certified `jenkins-agent-maven` images] -// Writer, remove this line in 4.12 -* link:https://catalog.redhat.com/software/containers/search?q=jenkins-agent-nodejs&p=1[Certified `jenkins-agent-nodejs` images] -// Writer, remove this line in 4.12 diff --git a/cicd/jenkins/migrating-from-jenkins-to-openshift-pipelines.adoc b/cicd/jenkins/migrating-from-jenkins-to-openshift-pipelines.adoc deleted file mode 100644 index 7f8242f2ddbc..000000000000 --- a/cicd/jenkins/migrating-from-jenkins-to-openshift-pipelines.adoc +++ /dev/null @@ -1,27 +0,0 @@ -:_content-type: ASSEMBLY -//Jenkins-Tekton-Migration -[id="migrating-from-jenkins-to-openshift-pipelines_{context}"] -= Migrating from Jenkins to {pipelines-shortname} or Tekton -include::_attributes/common-attributes.adoc[] -:context: migrating-from-jenkins-to-openshift-pipelines - -toc::[] - -You can migrate your CI/CD workflows from Jenkins to xref:../../cicd/pipelines/understanding-openshift-pipelines.adoc#understanding-openshift-pipelines[{pipelines-title}], a cloud-native CI/CD experience based on the Tekton project. - -include::modules/jt-comparison-of-jenkins-and-openshift-pipelines-concepts.adoc[leveloffset=+1] - -include::modules/jt-migrating-a-sample-pipeline-from-jenkins-to-openshift-pipelines.adoc[leveloffset=+1] - -include::modules/jt-migrating-from-jenkins-plugins-to-openshift-pipelines-hub-tasks.adoc[leveloffset=+1] - -include::modules/jt-extending-openshift-pipelines-capabilities-using-custom-tasks-and-scripts.adoc[leveloffset=+1] - -include::modules/jt-comparison-of-jenkins-openshift-pipelines-execution-models.adoc[leveloffset=+1] - -include::modules/jt-examples-of-common-use-cases.adoc[leveloffset=+1] - -[role="_additional-resources"] -== Additional resources -* xref:../../cicd/pipelines/understanding-openshift-pipelines.adoc#understanding-openshift-pipelines[Understanding {pipelines-shortname}] -* xref:../../authentication/using-rbac.adoc#using-rbac[Role-based Access Control] diff --git a/cicd/jenkins/modules b/cicd/jenkins/modules deleted file mode 120000 index 8b0e8540076d..000000000000 --- a/cicd/jenkins/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules \ No newline at end of file diff --git a/cicd/jenkins/snippets b/cicd/jenkins/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/cicd/jenkins/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/cicd/pipelines/_attributes b/cicd/pipelines/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/cicd/pipelines/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/cicd/pipelines/authenticating-pipelines-using-git-secret.adoc b/cicd/pipelines/authenticating-pipelines-using-git-secret.adoc deleted file mode 100644 index 4f575e125149..000000000000 --- a/cicd/pipelines/authenticating-pipelines-using-git-secret.adoc +++ /dev/null @@ -1,23 +0,0 @@ -:_content-type: ASSEMBLY -[id="authenticating-pipelines-using-git-secret"] -= Authenticating pipelines using git secret -include::_attributes/common-attributes.adoc[] -:context: authenticating-pipelines-using-git-secret - -toc::[] - -A Git secret consists of credentials to securely interact with a Git repository, and is often used to automate authentication. In {pipelines-title}, you can use Git secrets to authenticate pipeline runs and task runs that interact with a Git repository during execution. - -A pipeline run or a task run gains access to the secrets through the associated service account. {pipelines-shortname} support the use of Git secrets as annotations (key-value pairs) for basic authentication and SSH-based authentication. - -include::modules/op-understanding-credential-selection.adoc[leveloffset=+1] - -include::modules/op-configuring-basic-authentication-for-git.adoc[leveloffset=+1] - -include::modules/op-configuring-ssh-authentication-for-git.adoc[leveloffset=+1] - -include::modules/op-using-ssh-authentication-in-git-type-tasks.adoc[leveloffset=+1] - -include::modules/op-using-secrets-as-a-nonroot-user.adoc[leveloffset=+1] - -include::modules/op-limiting-secret-access-to-specific-steps.adoc[leveloffset=+1] diff --git a/cicd/pipelines/creating-applications-with-cicd-pipelines.adoc b/cicd/pipelines/creating-applications-with-cicd-pipelines.adoc deleted file mode 100644 index 732c493b8662..000000000000 --- a/cicd/pipelines/creating-applications-with-cicd-pipelines.adoc +++ /dev/null @@ -1,89 +0,0 @@ -:_content-type: ASSEMBLY -[id="creating-applications-with-cicd-pipelines"] -= Creating CI/CD solutions for applications using {pipelines-shortname} -include::_attributes/common-attributes.adoc[] -:context: creating-applications-with-cicd-pipelines - -toc::[] - -With {pipelines-title}, you can create a customized CI/CD solution to build, test, and deploy your application. - -To create a full-fledged, self-serving CI/CD pipeline for an application, perform the following tasks: - -* Create custom tasks, or install existing reusable tasks. -* Create and define the delivery pipeline for your application. -* Provide a storage volume or filesystem that is attached to a workspace for the pipeline execution, using one of the following approaches: -** Specify a volume claim template that creates a persistent volume claim -** Specify a persistent volume claim -* Create a `PipelineRun` object to instantiate and invoke the pipeline. -* Add triggers to capture events in the source repository. - -This section uses the `pipelines-tutorial` example to demonstrate the preceding tasks. The example uses a simple application which consists of: - -* A front-end interface, `pipelines-vote-ui`, with the source code in the link:https://github.com/openshift/pipelines-vote-ui/tree/{pipelines-ver}[`pipelines-vote-ui`] Git repository. -* A back-end interface, `pipelines-vote-api`, with the source code in the link:https://github.com/openshift/pipelines-vote-api/tree/{pipelines-ver}[`pipelines-vote-api`] Git repository. -* The `apply-manifests` and `update-deployment` tasks in the link:https://github.com/openshift/pipelines-tutorial/tree/{pipelines-ver}[`pipelines-tutorial`] Git repository. - -== Prerequisites - -* You have access to an {product-title} cluster. -* You have installed xref:../../cicd/pipelines/installing-pipelines.adoc#installing-pipelines[{pipelines-shortname}] using the {pipelines-title} Operator listed in the OpenShift OperatorHub. After it is installed, it is applicable to the entire cluster. -* You have installed xref:../../cli_reference/tkn_cli/installing-tkn.adoc#installing-tkn[{pipelines-shortname} CLI]. -* You have forked the front-end link:https://github.com/openshift/pipelines-vote-ui/tree/{pipelines-ver}[`pipelines-vote-ui`] and back-end link:https://github.com/openshift/pipelines-vote-api/tree/{pipelines-ver}[`pipelines-vote-api`] Git repositories using your GitHub ID, and have administrator access to these repositories. -* Optional: You have cloned the link:https://github.com/openshift/pipelines-tutorial/tree/{pipelines-ver}[`pipelines-tutorial`] Git repository. - - -include::modules/op-creating-project-and-checking-pipeline-service-account.adoc[leveloffset=+1] - -include::modules/op-creating-pipeline-tasks.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../cicd/pipelines/managing-nonversioned-and-versioned-cluster-tasks.adoc#managing-nonversioned-and-versioned-cluster-tasks[Managing non-versioned and versioned cluster tasks] - -include::modules/op-assembling-a-pipeline.adoc[leveloffset=+1] - -include::modules/op-mirroring-images-to-run-pipelines-in-restricted-environment.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../openshift_images/configuring-samples-operator.adoc#samples-operator-restricted-network-install[Configuring Samples Operator for a restricted cluster] - -* xref:../../installing/disconnected_install/installing-mirroring-installation-images.adoc#installation-about-mirror-registry_installing-mirroring-installation-images[Creating a cluster with a mirrored registry] - -include::modules/op-running-a-pipeline.adoc[leveloffset=+1] - -include::modules/op-adding-triggers.adoc[leveloffset=+1] - -include::modules/op-configuring-eventlisteners-to-serve-multiple-namespaces.adoc[leveloffset=+1] - -include::modules/op-creating-webhooks.adoc[leveloffset=+1] - -include::modules/op-triggering-a-pipelinerun.adoc[leveloffset=+1] - -include::modules/op-enabling-monitoring-of-event-listeners-for-triggers-for-user-defined-projects.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../monitoring/enabling-monitoring-for-user-defined-projects.adoc#enabling-monitoring-for-user-defined-projects[Enabling monitoring for user-defined projects] - -include::modules/op-configuring-pull-request-capabilities-in-GitHub-interceptor.adoc[leveloffset=+1] - -include::modules/op-filtering-pull-requests-using-GitHub-interceptor.adoc[leveloffset=+2] - -include::modules/op-validating-pull-requests-using-GitHub-interceptors.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="pipeline-addtl-resources"] -== Additional resources - -* To include {pac} along with the application source code in the same repository, see xref:../../cicd/pipelines/using-pipelines-as-code.adoc#using-pipelines-as-code[Using {pac}]. -* For more details on pipelines in the *Developer* perspective, see the xref:../../cicd/pipelines/working-with-pipelines-using-the-developer-perspective.adoc#working-with-pipelines-using-the-developer-perspective[working with pipelines in the *Developer* perspective] section. -* To learn more about Security Context Constraints (SCCs), see the xref:../../authentication/managing-security-context-constraints.adoc#managing-pod-security-policies[Managing Security Context Constraints] section. -* For more examples of reusable tasks, see the link:https://github.com/openshift/pipelines-catalog[OpenShift Catalog] repository. Additionally, you can also see the Tekton Catalog in the Tekton project. -* To install and deploy a custom instance of Tekton Hub for reusable tasks and pipelines, see xref:../../cicd/pipelines/using-tekton-hub-with-openshift-pipelines.adoc#using-tekton-hub-with-openshift-pipelines[Using {tekton-hub} with {pipelines-title}]. -* For more details on re-encrypt TLS termination, see link:https://docs.openshift.com/container-platform/3.11/architecture/networking/routes.html#re-encryption-termination[Re-encryption Termination]. -* For more details on secured routes, see the xref:../../networking/routes/secured-routes.adoc#secured-routes[Secured routes] section. diff --git a/cicd/pipelines/customizing-configurations-in-the-tektonconfig-cr.adoc b/cicd/pipelines/customizing-configurations-in-the-tektonconfig-cr.adoc deleted file mode 100644 index ed728694b569..000000000000 --- a/cicd/pipelines/customizing-configurations-in-the-tektonconfig-cr.adoc +++ /dev/null @@ -1,56 +0,0 @@ -:_content-type: ASSEMBLY -[id="customizing-configurations-in-the-tektonconfig-cr"] -= Customizing configurations in the TektonConfig custom resource -include::_attributes/common-attributes.adoc[] -:context: customizing-configurations-in-the-tektonconfig-cr - -toc::[] - -In {pipelines-title}, you can customize the following configurations by using the `TektonConfig` custom resource (CR): - -* Configuring the {pipelines-title} control plane -* Changing the default service account -* Disabling the service monitor -* Configuring pipeline resolvers -* Disabling cluster tasks and pipeline templates -* Disabling the integration of {tekton-hub} -* Disabling the automatic creation of RBAC resources -* Pruning of task runs and pipeline runs - -[id="prerequisites_customizing-configurations-in-the-tektonconfig-cr"] -== Prerequisites - -* You have installed the {pipelines-title} Operator. - -include::modules/op-configuring-pipelines-control-plane.adoc[leveloffset=+1] - -include::modules/op-modifiable-fields-with-default-values.adoc[leveloffset=+2] - -include::modules/op-optional-configuration-fields.adoc[leveloffset=+2] - -include::modules/op-changing-default-service-account.adoc[leveloffset=+1] - -include::modules/op-disabling-the-service-monitor.adoc[leveloffset=+1] - -include::modules/op-configuring-pipeline-resolvers.adoc[leveloffset=+1] - -include::modules/op-disabling-cluster-tasks-and-pipeline-templates.adoc[leveloffset=+1] - -include::modules/op-disabling-the-integretion-of-tekton-hub.adoc[leveloffset=+1] - -include::modules/op-disabling-automatic-creation-of-rbac-resources.adoc[leveloffset=+1] - -include::modules/op-automatic-pruning-taskrun-pipelinerun.adoc[leveloffset=+1] - -include::modules/op-default-pruner-configuration.adoc[leveloffset=+2] - -include::modules/op-annotations-for-automatic-pruning-taskruns-pipelineruns.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="additional-resources_customizing-configurations-in-the-tektonconfig-cr"] -== Additional resources - -* xref:../../cicd/pipelines/authenticating-pipelines-using-git-secret.adoc#op-configuring-ssh-authentication-for-git_authenticating-pipelines-using-git-secret[Configuring SSH authentication for Git] -* xref:../../cicd/pipelines/managing-nonversioned-and-versioned-cluster-tasks.adoc#managing-nonversioned-and-versioned-cluster-tasks[Managing non-versioned and versioned cluster tasks] -* xref:../../cicd/pipelines/working-with-pipelines-using-the-developer-perspective.adoc#using-custom-pipeline-template-for-git-import_working-with-pipelines-using-the-developer-perspective[Using a custom pipeline template for creating and deploying an application from a Git repository] -* xref:../../applications/pruning-objects.adoc#pruning-objects[Pruning objects to reclaim resources] diff --git a/cicd/pipelines/images b/cicd/pipelines/images deleted file mode 120000 index 5fa6987088da..000000000000 --- a/cicd/pipelines/images +++ /dev/null @@ -1 +0,0 @@ -../../images \ No newline at end of file diff --git a/cicd/pipelines/installing-pipelines.adoc b/cicd/pipelines/installing-pipelines.adoc deleted file mode 100644 index 03ed8d7b277e..000000000000 --- a/cicd/pipelines/installing-pipelines.adoc +++ /dev/null @@ -1,58 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-pipelines"] -= Installing {pipelines-shortname} -include::_attributes/common-attributes.adoc[] -:context: installing-pipelines - -toc::[] - -[role="_abstract"] -This guide walks cluster administrators through the process of installing the {pipelines-title} Operator to an {product-title} cluster. - -// Prerequisites for installing OpenShift Operator -[discrete] -== Prerequisites - -* You have access to an {product-title} cluster using an account with `cluster-admin` permissions. -* You have installed `oc` CLI. -* You have installed xref:../../cli_reference/tkn_cli/installing-tkn.adoc#installing-tkn[{pipelines-shortname} (`tkn`) CLI] on your local system. -* Your cluster has the xref:../../installing/cluster-capabilities.adoc#marketplace-operator_cluster-capabilities[Marketplace capability] enabled or the Red Hat Operator catalog source configured manually. - -ifdef::openshift-origin[] -* Ensure that you have downloaded the {cluster-manager-url-pull} as shown in the xref:../../installing/installing_gcp/installing-gcp-customizations.adoc#installation-obtaining-installer_installing-gcp-customizations[Obtaining the installation program] to install this Operator. -+ -If you have the pull secret, add the `redhat-operators` catalog to the OperatorHub custom resource (CR) as shown in xref:../../post_installation_configuration/preparing-for-users.adoc#olm-installing-operators-from-operatorhub-configure_post-install-preparing-for-users[Configuring {product-title} to use Red Hat Operators]. -endif::[] - - -//Installing pipelines Operator using web console - -include::modules/op-installing-pipelines-operator-in-web-console.adoc[leveloffset=+1] - -// Installing pipelines Operator using CLI - -include::modules/op-installing-pipelines-operator-using-the-cli.adoc[leveloffset=+1] - -// {pipelines-title} Operator in a restricted environment - -include::modules/op-pipelines-operator-in-restricted-environment.adoc[leveloffset=+1] - -include::modules/op-performance-tuning-using-tektonconfig-cr.adoc[leveloffset=+1] - -[role="_additional-resources"] -== Additional resources - -* You can learn more about installing Operators on {product-title} in the xref:../../operators/admin/olm-adding-operators-to-cluster.adoc#olm-adding-operators-to-a-cluster[adding Operators to a cluster] section. - -* To install {tekton-chains} using the {pipelines-title} Operator, see xref:../../cicd/pipelines/using-tekton-chains-for-openshift-pipelines-supply-chain-security.adoc#using-tekton-chains-for-openshift-pipelines-supply-chain-security[Using {tekton-chains} for {pipelines-title} supply chain security]. - -* To install and deploy in-cluster {tekton-hub}, see xref:../../cicd/pipelines/using-tekton-hub-with-openshift-pipelines.adoc#using-tekton-hub-with-openshift-pipelines[Using {tekton-hub} with {pipelines-title}]. - -* For more information on using pipelines in a restricted environment, see: - -** xref:../../cicd/pipelines/creating-applications-with-cicd-pipelines.html#op-mirroring-images-to-run-pipelines-in-restricted-environment_creating-applications-with-cicd-pipelines[Mirroring images to run pipelines in a restricted environment] - -** xref:../../openshift_images/configuring-samples-operator.adoc#samples-operator-restricted-network-install[Configuring Samples Operator for a restricted cluster] - -** xref:../../installing/disconnected_install/installing-mirroring-installation-images.adoc#installation-about-mirror-registry_installing-mirroring-installation-images[Creating a cluster with a mirrored registry] - diff --git a/cicd/pipelines/managing-nonversioned-and-versioned-cluster-tasks.adoc b/cicd/pipelines/managing-nonversioned-and-versioned-cluster-tasks.adoc deleted file mode 100644 index ed9a3f96c984..000000000000 --- a/cicd/pipelines/managing-nonversioned-and-versioned-cluster-tasks.adoc +++ /dev/null @@ -1,22 +0,0 @@ -:_content-type: ASSEMBLY -[id="managing-nonversioned-and-versioned-cluster-tasks"] -= Managing non-versioned and versioned cluster tasks -include::_attributes/common-attributes.adoc[] -:context: managing-nonversioned-and-versioned-cluster-tasks - -toc::[] - -As a cluster administrator, installing the {pipelines-title} Operator creates variants of each default cluster task known as _versioned cluster tasks_ (VCT) and _non-versioned cluster tasks_ (NVCT). For example, installing the {pipelines-title} Operator v1.7 creates a `buildah-1-7-0` VCT and a `buildah` NVCT. - -Both NVCT and VCT have the same metadata, behavior, and specifications, including `params`, `workspaces`, and `steps`. However, they behave differently when you disable them or upgrade the Operator. - -[IMPORTANT] -==== -In {pipelines-title} 1.10, cluster task functionality is deprecated and is planned to be removed in a future release. -==== - -include::modules/op-differences-between-non-versioned-and-versioned-cluster-tasks.adoc[leveloffset=+1] - -include::modules/op-advantages-and-disadvantages-of-non-versioned-and-versioned-cluster-tasks.adoc[leveloffset=+1] - -include::modules/op-disabling-non-versioned-and-versioned-cluster-tasks.adoc[leveloffset=+1] diff --git a/cicd/pipelines/modules b/cicd/pipelines/modules deleted file mode 120000 index 8b0e8540076d..000000000000 --- a/cicd/pipelines/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules \ No newline at end of file diff --git a/cicd/pipelines/op-release-notes.adoc b/cicd/pipelines/op-release-notes.adoc deleted file mode 100644 index 3784aff18fc5..000000000000 --- a/cicd/pipelines/op-release-notes.adoc +++ /dev/null @@ -1,48 +0,0 @@ -:_content-type: ASSEMBLY -//OpenShift Pipelines Release Notes -include::_attributes/common-attributes.adoc[] -[id="op-release-notes"] -= {pipelines-title} release notes -:context: op-release-notes - -toc::[] - -{pipelines-title} is a cloud-native CI/CD experience based on the Tekton project which provides: - -* Standard Kubernetes-native pipeline definitions (CRDs). -* Serverless pipelines with no CI server management overhead. -* Extensibility to build images using any Kubernetes tool, such as S2I, Buildah, JIB, and Kaniko. -* Portability across any Kubernetes distribution. -* Powerful CLI for interacting with pipelines. -* Integrated user experience with the *Developer* perspective of the {product-title} web console. - -For an overview of {pipelines-title}, see xref:../../cicd/pipelines/understanding-openshift-pipelines.adoc#understanding-openshift-pipelines[Understanding {pipelines-shortname}]. - -include::modules/op-tkn-pipelines-compatibility-support-matrix.adoc[leveloffset=+1] - -include::modules/making-open-source-more-inclusive.adoc[leveloffset=+1] - -// Modules included, most to least recent -include::modules/op-release-notes-1-11.adoc[leveloffset=+1] - -include::modules/op-release-notes-1-10.adoc[leveloffset=+1] - -include::modules/op-release-notes-1-9.adoc[leveloffset=+1] - -include::modules/op-release-notes-1-8.adoc[leveloffset=+1] - -include::modules/op-release-notes-1-7.adoc[leveloffset=+1] - -include::modules/op-release-notes-1-6.adoc[leveloffset=+1] - -include::modules/op-release-notes-1-5.adoc[leveloffset=+1] - -include::modules/op-release-notes-1-4.adoc[leveloffset=+1] - -include::modules/op-release-notes-1-3.adoc[leveloffset=+1] - -include::modules/op-release-notes-1-2.adoc[leveloffset=+1] - -include::modules/op-release-notes-1-1.adoc[leveloffset=+1] - -include::modules/op-release-notes-1-0.adoc[leveloffset=+1] diff --git a/cicd/pipelines/reducing-pipelines-resource-consumption.adoc b/cicd/pipelines/reducing-pipelines-resource-consumption.adoc deleted file mode 100644 index 764f7790ac1a..000000000000 --- a/cicd/pipelines/reducing-pipelines-resource-consumption.adoc +++ /dev/null @@ -1,29 +0,0 @@ -:_content-type: ASSEMBLY -[id="reducing-pipelines-resource-consumption"] -= Reducing resource consumption of {pipelines-shortname} -include::_attributes/common-attributes.adoc[] -:context: reducing-pipelines-resource-consumption - -toc::[] - -If you use clusters in multi-tenant environments you must control the consumption of CPU, memory, and storage resources for each project and Kubernetes object. This helps prevent any one application from consuming too many resources and affecting other applications. - -To define the final resource limits that are set on the resulting pods, {pipelines-title} use resource quota limits and limit ranges of the project in which they are executed. - -To restrict resource consumption in your project, you can: - -* xref:../../applications/quotas/quotas-setting-per-project.html[Set and manage resource quotas] to limit the aggregate resource consumption. -* Use xref:../../nodes/clusters/nodes-cluster-limit-ranges.html[limit ranges to restrict resource consumption] for specific objects, such as pods, images, image streams, and persistent volume claims. - -include::modules/op-understanding-pipelines-resource-consumption.adoc[leveloffset=+1] - -include::modules/op-mitigating-extra-pipeline-resource-consumption.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_reducing-pipelines-resource-consumption"] -== Additional resources - -* xref:../../cicd/pipelines/setting-compute-resource-quota-for-openshift-pipelines.adoc#setting-compute-resource-quota-for-openshift-pipelines[Setting compute resource quota for {pipelines-shortname}] -* xref:../../applications/quotas/quotas-setting-per-project.adoc#quotas-setting-per-project[Resource quotas per project] -* xref:../../nodes/clusters/nodes-cluster-limit-ranges.adoc#nodes-cluster-limit-ranges[Restricting resource consumption using limit ranges] -* link:https://kubernetes.io/docs/concepts/workloads/pods/init-containers/#resources[Resource requests and limits in Kubernetes] diff --git a/cicd/pipelines/remote-pipelines-tasks-resolvers.adoc b/cicd/pipelines/remote-pipelines-tasks-resolvers.adoc deleted file mode 100644 index 80ca525603ed..000000000000 --- a/cicd/pipelines/remote-pipelines-tasks-resolvers.adoc +++ /dev/null @@ -1,52 +0,0 @@ -:_content-type: ASSEMBLY -[id="remote-pipelines-tasks-resolvers"] -= Specifying remote pipelines and tasks using resolvers -include::_attributes/common-attributes.adoc[] -:context: remote-pipelines-tasks-resolvers - -toc::[] - -Pipelines and tasks are reusable blocks for your CI/CD processes. You can reuse pipelines or tasks that you previously developed, or that were developed by others, without having to copy and paste their definitions. These pipelines or tasks can be available from several types of sources, from other namespaces on your cluster to public catalogs. - -In a pipeline run resource, you can specify a pipeline from an existing source. In a pipeline resource or a task run resource, you can specify a task from an existing source. - -In these cases, the _resolvers_ in {pipelines-title} retrieve the pipeline or task definition from the specified source at run time. - -The following resolvers are available in a default installaton of {pipelines-title}: - -Hub resolver:: Retrieves a task or pipeline from the Pipelines Catalog available on {artifact-hub} or {tekton-hub}. -Bundles resolver:: Retrieves a task or pipeline from a Tekton bundle, which is an OCI image available from any OCI repository, such as an OpenShift container repository. -Cluster resolver:: Retrieves a task or pipeline that is already created on the same {product-title} cluster in a specific namespace. -Git resolver:: Retrieves a task or pipeline binding from a Git repository. You must specify the repository, the branch, and the path. - -[id="resolver-hub_{context}"] -== Specifying a remote pipeline or task from a Tekton catalog -You can specify a remote pipeline or task that is defined in a public Tekton catalog, either link:https://artifacthub.io/[{artifact-hub}] or link:https://hub.tekton.dev/[{tekton-hub}], by using the hub resolver. - -include::modules/op-resolver-hub-config.adoc[leveloffset=+2] -include::modules/op-resolver-hub.adoc[leveloffset=+2] - -[id="resolver-bundles_{context}"] -== Specifying a remote pipeline or task from a Tekton bundle - -You can specify a remote pipeline or task from a Tekton bundle by using the bundles resolver. A Tekton bundle is an OCI image available from any OCI repository, such as an OpenShift container repository. - -include::modules/op-resolver-bundle-config.adoc[leveloffset=+2] -include::modules/op-resolver-bundle.adoc[leveloffset=+2] - -[id="resolver-cluster_{context}"] -== Specifying a remote pipeline or task from the same cluster - -You can specify a remote pipeline or task that is defined in a namespace on the {product-title} cluster where {pipelines-title} is running by using the cluster resolver. - -include::modules/op-resolver-cluster-config.adoc[leveloffset=+2] -include::modules/op-resolver-cluster.adoc[leveloffset=+2] - -[id="resolver-git_{context}"] -== Specifying a remote pipeline or task from a Git repository - -You can specify a remote pipeline or task from a Git repostory by using the Git resolver. The repository must contain a YAML file that defines the pipeline or task. The Git resolver can access a repository either by cloning it anonymously or else by using the authenticated SCM API. - -include::modules/op-resolver-git-config-anon.adoc[leveloffset=+2] -include::modules/op-resolver-git-config-scm.adoc[leveloffset=+2] -include::modules/op-resolver-git.adoc[leveloffset=+2] diff --git a/cicd/pipelines/securing-webhooks-with-event-listeners.adoc b/cicd/pipelines/securing-webhooks-with-event-listeners.adoc deleted file mode 100644 index 338c5d82c53f..000000000000 --- a/cicd/pipelines/securing-webhooks-with-event-listeners.adoc +++ /dev/null @@ -1,24 +0,0 @@ -:_content-type: ASSEMBLY -[id="securing-webhooks-with-event-listeners"] -= Securing webhooks with event listeners -include::_attributes/common-attributes.adoc[] -:context: securing-webhooks-with-event-listeners - -toc::[] - -As an administrator, you can secure webhooks with event listeners. After creating a namespace, you enable HTTPS for the `Eventlistener` resource by adding the `operator.tekton.dev/enable-annotation=enabled` label to the namespace. Then, you create a `Trigger` resource and a secured route using the re-encrypted TLS termination. - -Triggers in {pipelines-title} support insecure HTTP and secure HTTPS connections to the `Eventlistener` resource. HTTPS secures connections within and outside the cluster. - -{pipelines-title} runs a `tekton-operator-proxy-webhook` pod that watches for the labels in the namespace. When you add the label to the namespace, the webhook sets the `service.beta.openshift.io/serving-cert-secret-name=` annotation on the `EventListener` object. This, in turn, creates secrets and the required certificates. - -[source,terminal,subs="attributes+"] ----- -service.beta.openshift.io/serving-cert-secret-name= ----- - -In addition, you can mount the created secret into the `Eventlistener` pod to secure the request. - -include::modules/op-providing-secure-connection.adoc[leveloffset=+1] - -include::modules/op-sample-eventlistener-resource.adoc[leveloffset=+1] diff --git a/cicd/pipelines/setting-compute-resource-quota-for-openshift-pipelines.adoc b/cicd/pipelines/setting-compute-resource-quota-for-openshift-pipelines.adoc deleted file mode 100644 index 293db99e61cf..000000000000 --- a/cicd/pipelines/setting-compute-resource-quota-for-openshift-pipelines.adoc +++ /dev/null @@ -1,38 +0,0 @@ -:_content-type: ASSEMBLY -[id="setting-compute-resource-quota-for-openshift-pipelines"] -= Setting compute resource quota for {pipelines-shortname} -include::_attributes/common-attributes.adoc[] -:context: setting-compute-resource-quota-for-openshift-pipelines - -toc::[] - -A `ResourceQuota` object in {pipelines-title} controls the total resource consumption per namespace. You can use it to limit the quantity of objects created in a namespace, based on the type of the object. In addition, you can specify a compute resource quota to restrict the total amount of compute resources consumed in a namespace. - -However, you might want to limit the amount of compute resources consumed by pods resulting from a pipeline run, rather than setting quotas for the entire namespace. Currently, {pipelines-title} does not enable you to directly specify the compute resource quota for a pipeline. - -include::modules/op-alternative-approaches-compute-resource-quota-pipelines.adoc[leveloffset=+1] - -[NOTE] -==== -When using {pipelines-title} in a namespace configured with a `ResourceQuota` object, the pods resulting from task runs and pipeline runs might fail with an error, such as: `failed quota: must specify cpu, memory`. - -To avoid this error, do any one of the following: - -* (Recommended) Specify a limit range for the namespace. -* Explicitly define requests and limits for all containers. - -For more information, refer to the link:https://issues.redhat.com/browse/SRVKP-1801[issue] and the link:https://access.redhat.com/solutions/2841971[resolution]. -==== - -If your use case is not addressed by these approaches, you can implement a workaround by using a resource quota for a priority class. - -include::modules/op-specifying-pipelines-resource-quota-using-priority-class.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_setting-compute-resource-quota-for-pipelines"] -== Additional resources - -* xref:../../nodes/clusters/nodes-cluster-limit-ranges.adoc#nodes-cluster-limit-ranges[Restrict resource consumption with limit ranges] -* link:https://kubernetes.io/docs/concepts/policy/resource-quotas/[Resource quotas in Kubernetes] -* link:https://kubernetes.io/docs/concepts/policy/limit-range/[Limit ranges in Kubernetes] -* link:https://kubernetes.io/docs/concepts/workloads/pods/init-containers/#resources[Resource requests and limits in Kubernetes] diff --git a/cicd/pipelines/snippets b/cicd/pipelines/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/cicd/pipelines/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/cicd/pipelines/understanding-openshift-pipelines.adoc b/cicd/pipelines/understanding-openshift-pipelines.adoc deleted file mode 100644 index 7aa1be6a3f91..000000000000 --- a/cicd/pipelines/understanding-openshift-pipelines.adoc +++ /dev/null @@ -1,50 +0,0 @@ -:_content-type: ASSEMBLY -[id="understanding-openshift-pipelines"] -= Understanding {pipelines-shortname} -include::_attributes/common-attributes.adoc[] -:context: understanding-openshift-pipelines - -toc::[] - -:FeatureName: OpenShift Pipelines - -{pipelines-title} is a cloud-native, continuous integration and continuous delivery (CI/CD) solution based on Kubernetes resources. It uses Tekton building blocks to automate deployments across multiple platforms by abstracting away the underlying implementation details. Tekton introduces a number of standard custom resource definitions (CRDs) for defining CI/CD pipelines that are portable across Kubernetes distributions. - -[id="op-key-features"] -== Key features - -* {pipelines-title} is a serverless CI/CD system that runs pipelines with all the required dependencies in isolated containers. -* {pipelines-title} are designed for decentralized teams that work on microservice-based architecture. -* {pipelines-title} use standard CI/CD pipeline definitions that are easy to extend and integrate with the existing Kubernetes tools, enabling you to scale on-demand. -* You can use {pipelines-title} to build images with Kubernetes tools such as Source-to-Image (S2I), Buildah, Buildpacks, and Kaniko that are portable across any Kubernetes platform. -* You can use the {product-title} web console *Developer* perspective to create Tekton resources, view logs of pipeline runs, and manage pipelines in your {product-title} namespaces. - -[id="op-detailed-concepts"] -== {pipelines-shortname} Concepts -This guide provides a detailed view of the various pipeline concepts. - -//About tasks -include::modules/op-about-tasks.adoc[leveloffset=+2] -//About when expression -include::modules/op-about-whenexpression.adoc[leveloffset=+2] -//About final tasks -include::modules/op-about-finally_tasks.adoc[leveloffset=+2] -//About task run -include::modules/op-about-taskrun.adoc[leveloffset=+2] -//About pipelines -include::modules/op-about-pipelines.adoc[leveloffset=+2] -//About pipeline run -include::modules/op-about-pipelinerun.adoc[leveloffset=+2] -//About workspace -include::modules/op-about-workspace.adoc[leveloffset=+2] -//About triggers -include::modules/op-about-triggers.adoc[leveloffset=+2] - - -[role="_additional-resources"] -== Additional resources - -* For information on installing {pipelines-shortname}, see xref:../../cicd/pipelines/installing-pipelines.adoc#installing-pipelines[Installing {pipelines-shortname}]. -* For more details on creating custom CI/CD solutions, see xref:../../cicd/pipelines/creating-applications-with-cicd-pipelines.adoc#creating-applications-with-cicd-pipelines[Creating CI/CD solutions for applications using {pipelines-shortname}]. -* For more details on re-encrypt TLS termination, see link:https://docs.openshift.com/container-platform/3.11/architecture/networking/routes.html#re-encryption-termination[Re-encryption Termination]. -* For more details on secured routes, see the xref:../../networking/routes/secured-routes.adoc#secured-routes[Secured routes] section. diff --git a/cicd/pipelines/uninstalling-pipelines.adoc b/cicd/pipelines/uninstalling-pipelines.adoc deleted file mode 100644 index c3489bd15c38..000000000000 --- a/cicd/pipelines/uninstalling-pipelines.adoc +++ /dev/null @@ -1,29 +0,0 @@ -:_content-type: ASSEMBLY -[id="uninstalling-pipelines"] -= Uninstalling {pipelines-shortname} -include::_attributes/common-attributes.adoc[] -:context: uninstalling-pipelines - -toc::[] - -Cluster administrators can uninstall the {pipelines-title} Operator by performing the following steps: - -. Delete the Custom Resources (CRs) that were added by default when you installed the {pipelines-title} Operator. -. Delete the CRs of the optional components, such as {tekton-chains}, that are dependent on the Operator. -+ -[CAUTION] -==== -If you uninstall the Operator without removing the CRs of optional components, you cannot remove them later. -==== -. Uninstall the {pipelines-title} Operator. - -Uninstalling only the Operator will not remove the {pipelines-title} components created by default when the Operator is installed. - -include::modules/op-deleting-the-pipelines-component-and-custom-resources.adoc[leveloffset=+1] - -include::modules/op-uninstalling-the-pipelines-operator.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* You can learn more about uninstalling Operators on {product-title} in the xref:../../operators/admin/olm-deleting-operators-from-cluster.adoc#olm-deleting-operators-from-a-cluster[deleting Operators from a cluster] section. diff --git a/cicd/pipelines/unprivileged-building-of-container-images-using-buildah.adoc b/cicd/pipelines/unprivileged-building-of-container-images-using-buildah.adoc deleted file mode 100644 index 48734d9ea68c..000000000000 --- a/cicd/pipelines/unprivileged-building-of-container-images-using-buildah.adoc +++ /dev/null @@ -1,23 +0,0 @@ -:_content-type: ASSEMBLY -[id="unprivileged-building-of-container-images-using-buildah"] -= Building of container images using Buildah as a non-root user -include::_attributes/common-attributes.adoc[] -:context: unprivileged-building-of-container-images-using-buildah - -toc::[] - -Running {pipelines-shortname} as the root user on a container can expose the container processes and the host to other potentially malicious resources. You can reduce this type of exposure by running the workload as a specific non-root user in the container. To run builds of container images using Buildah as a non-root user, you can perform the following steps: - -* Define custom service account (SA) and security context constraint (SCC). -* Configure Buildah to use the `build` user with id `1000`. -* Start a task run with a custom config map, or integrate it with a pipeline run. - -include::modules/op-configuring-custom-sa-and-scc.adoc[leveloffset=+1] -include::modules/op-configuring-buildah-to-use-build-user.adoc[leveloffset=+1] -include::modules/op-starting-a-task-run-pipeline-run-build-user.adoc[leveloffset=+1] -include::modules/op-limitations-of-unprivileged-builds.adoc[leveloffset=+1] - - -.Additional resources - -* xref:../../authentication/managing-security-context-constraints.adoc#managing-pod-security-policies[Managing security context constraints (SCCs)] diff --git a/cicd/pipelines/using-pipelines-as-code.adoc b/cicd/pipelines/using-pipelines-as-code.adoc deleted file mode 100644 index ddf4f6948ee9..000000000000 --- a/cicd/pipelines/using-pipelines-as-code.adoc +++ /dev/null @@ -1,148 +0,0 @@ -:_content-type: ASSEMBLY -[id="using-pipelines-as-code"] -= Using {pac} -include::_attributes/common-attributes.adoc[] -:context: using-pipelines-as-code - -toc::[] - - -// :FeatureName: Pipelines as Code -[role="_abstract"] -With {pac}, cluster administrators and users with the required privileges can define pipeline templates as part of source code Git repositories. When triggered by a source code push or a pull request for the configured Git repository, {pac} runs the pipeline and reports the status. - -[id="pac-key-features"] -== Key features -{pac} supports the following features: - -* Pull request status and control on the platform hosting the Git repository. -* GitHub Checks API to set the status of a pipeline run, including rechecks. -* GitHub pull request and commit events. -* Pull request actions in comments, such as `/retest`. -* Git events filtering and a separate pipeline for each event. -* Automatic task resolution in {pipelines-shortname}, including local tasks, Tekton Hub, and remote URLs. -* Retrieval of configurations using GitHub blobs and objects API. -* Access Control List (ACL) over a GitHub organization, or using a Prow style `OWNER` file. -* The `tkn pac` CLI plugin for managing bootstrapping and {pac} repositories. -* Support for GitHub App, GitHub Webhook, Bitbucket Server, and Bitbucket Cloud. - -include::modules/op-installing-pipelines-as-code-on-an-openshift-cluster.adoc[leveloffset=+1] - -include::modules/op-installing-pipelines-as-code-cli.adoc[leveloffset=+1] - -[id="using-pipelines-as-code-with-a-git-repository-hosting-service-provider"] -== Using {pac} with a Git repository hosting service provider - -[role="_abstract"] -After installing {pac}, cluster administrators can configure a Git repository hosting service provider. Currently, the following services are supported: - -* GitHub App -* GitHub Webhook -* GitLab -* Bitbucket Server -* Bitbucket Cloud - -[NOTE] -==== -GitHub App is the recommended service for using with {pac}. -==== - -include::modules/op-using-pipelines-as-code-with-a-github-app.adoc[leveloffset=+1] - -include::modules/op-creating-a-github-application-in-administrator-perspective.adoc[leveloffset=+2] - -include::modules/op-scoping-github-token.adoc[leveloffset=+2] - -include::modules/op-using-pipelines-as-code-with-github-webhook.adoc[leveloffset=+1] - -.Additional resources - -* link:https://docs.github.com/en/developers/webhooks-and-events/webhooks/creating-webhooks[GitHub Webhook documentation on GitHub] -* link:https://docs.github.com/en/rest/guides/getting-started-with-the-checks-api[GitHub Check Runs documentation on GitHub] -* link:https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token[Creating a personal access token on GitHub] -* link:https://github.com/settings/tokens/new?description=pipelines-as-code-token&scopes=repo[Classic tokens with pre-filled permissions] - -include::modules/op-using-pipelines-as-code-with-gitlab.adoc[leveloffset=+1] - -.Additional resources - -* link:https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html[GitLab Webhook documentation on GitLab] - -include::modules/op-using-pipelines-as-code-with-bitbucket-cloud.adoc[leveloffset=+1] - -.Additional resources - -* link:https://support.atlassian.com/bitbucket-cloud/docs/app-passwords/[Creating app password on Bitbucket Cloud] -* link:https://developer.atlassian.com/cloud/bitbucket/bitbucket-api-changes-gdpr/#introducing-atlassian-account-id-and-nicknames[Introducing Altassian Account ID and Nicknames] - -include::modules/op-using-pipelines-as-code-with-bitbucket-server.adoc[leveloffset=+1] - -.Additional resources - -* link:https://confluence.atlassian.com/bitbucketserver/personal-access-tokens-939515499.html[Creating personal tokens on Bitbucket Server] -* link:https://support.atlassian.com/bitbucket-cloud/docs/manage-webhooks/#Create-webhooks[Creating webhooks on Bitbucket server] - -include::modules/op-interfacing-pipelines-as-code-with-custom-certificates.adoc[leveloffset=+1] - -.Additional resources - -* xref:../../networking/enable-cluster-wide-proxy.adoc#nw-proxy-configure-object[Enabling the cluster-wide proxy] - -include::modules/op-using-repository-crd-with-pipelines-as-code.adoc[leveloffset=+1] - -include::modules/op-setting-concurrency-limits-in-repository-crd.adoc[leveloffset=+2] - -include::modules/op-changing-source-branch-in-repository-crd.adoc[leveloffset=+2] - -include::modules/op-custom-parameter-expansion.adoc[leveloffset=+2] - -include::modules/op-using-pipelines-as-code-resolver.adoc[leveloffset=+1] - -include::modules/op-using-remote-task-annotations-with-pipelines-as-code.adoc[leveloffset=+2] - -include::modules/op-using-remote-pipeline-annotations-with-pipelines-as-code.adoc[leveloffset=+2] - -include::modules/op-creating-pipeline-run-using-pipelines-as-code.adoc[leveloffset=+1] - -.Additional resources - -* link:https://github.com/google/cel-spec/blob/master/doc/langdef.md[CEL language specification] - -include::modules/op-running-pipeline-run-using-pipelines-as-code.adoc[leveloffset=+1] - -include::modules/op-monitoring-pipeline-run-status-using-pipelines-as-code.adoc[leveloffset=+1] - -.Additional resources - -* link:https://github.com/chmouel/tekton-slack-task-status[An example task to send Slack messages on success or failure] -* link:https://github.com/openshift-pipelines/pipelines-as-code/blob/7b41cc3f769af40a84b7ead41c6f037637e95070/.tekton/push.yaml[An example of a pipeline run with `finally` tasks triggered on push events] - -include::modules/op-using-private-repositories-with-pipelines-as-code.adoc[leveloffset=+1] - -.Additional resources - -* link:https://github.com/openshift-pipelines/pipelines-as-code/blob/main/test/testdata/pipelinerun_git_clone_private.yaml[An example of the `git-clone` task used for cloning private repositories] - -include::modules/op-cleaning-up-pipeline-run-using-pipelines-as-code.adoc[leveloffset=+1] - -include::modules/op-using-incoming-webhook-with-pipelines-as-code.adoc[leveloffset=+1] - -include::modules/op-customizing-pipelines-as-code-configuration.adoc[leveloffset=+1] - -include::modules/op-pipelines-as-code-command-reference.adoc[leveloffset=+1] - -include::modules/op-splitting-pipelines-as-code-logs-by-namespace.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources-pac"] -== Additional resources - -* link:https://github.com/openshift-pipelines/pipelines-as-code/tree/main/.tekton[An example of the `.tekton/` directory in the Pipelines as Code repository] - -* xref:../../cicd/pipelines/installing-pipelines.adoc#installing-pipelines[Installing {pipelines-shortname}] - -* xref:../../cli_reference/tkn_cli/installing-tkn.adoc#installing-tkn[Installing tkn] - -* xref:../../cicd/pipelines/op-release-notes.adoc#op-release-notes[{pipelines-title} release notes] - -* xref:../../applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc#odc-creating-applications-using-the-developer-perspective[Creating applications using the Developer perspective] diff --git a/cicd/pipelines/using-pods-in-a-privileged-security-context.adoc b/cicd/pipelines/using-pods-in-a-privileged-security-context.adoc deleted file mode 100644 index 323b4b627d52..000000000000 --- a/cicd/pipelines/using-pods-in-a-privileged-security-context.adoc +++ /dev/null @@ -1,36 +0,0 @@ -:_content-type: ASSEMBLY -[id="using-pods-in-a-privileged-security-context"] -= Using pods in a privileged security context -include::_attributes/common-attributes.adoc[] -:context: using-pods-in-a-privileged-security-context - -toc::[] - -The default configuration of {pipelines-shortname} 1.3.x and later versions does not allow you to run pods with privileged security context, if the pods result from pipeline run or task run. -For such pods, the default service account is `pipeline`, and the security context constraint (SCC) associated with the `pipeline` service account is `pipelines-scc`. The `pipelines-scc` SCC is similar to the `anyuid` SCC, but with minor differences as defined in the YAML file for the SCC of pipelines: - -.Example `pipelines-scc.yaml` snippet -[source,yaml,subs="attributes+"] ----- -apiVersion: security.openshift.io/v1 -kind: SecurityContextConstraints -... -allowedCapabilities: - - SETFCAP -... -fsGroup: - type: MustRunAs -... ----- - -In addition, the `Buildah` cluster task, shipped as part of the {pipelines-shortname}, uses `vfs` as the default storage driver. - -include::modules/op-running-pipeline-and-task-run-pods-with-privileged-security-context.adoc[leveloffset=+1] - -include::modules/op-running-pipeline-run-and-task-run-with-custom-scc-and-service-account.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-references_using-pods-in-a-privileged-security-context"] -== Additional resources - -* For information on managing SCCs, refer to xref:../../authentication/managing-security-context-constraints.adoc[Managing security context constraints]. diff --git a/cicd/pipelines/using-tekton-chains-for-openshift-pipelines-supply-chain-security.adoc b/cicd/pipelines/using-tekton-chains-for-openshift-pipelines-supply-chain-security.adoc deleted file mode 100644 index a1c864d12d04..000000000000 --- a/cicd/pipelines/using-tekton-chains-for-openshift-pipelines-supply-chain-security.adoc +++ /dev/null @@ -1,45 +0,0 @@ -:_content-type: ASSEMBLY -[id="using-tekton-chains-for-openshift-pipelines-supply-chain-security"] -= Using Tekton Chains for {pipelines-shortname} supply chain security -include::_attributes/common-attributes.adoc[] -:context: using-tekton-chains-for-openshift-pipelines-supply-chain-security - -toc::[] - -[role="_abstract"] -{tekton-chains} is a Kubernetes Custom Resource Definition (CRD) controller. You can use it to manage the supply chain security of the tasks and pipelines created using {pipelines-title}. - -By default, {tekton-chains} observes all task run executions in your {product-title} cluster. When the task runs complete, {tekton-chains} takes a snapshot of the task runs. It then converts the snapshot to one or more standard payload formats, and finally signs and stores all artifacts. - -To capture information about task runs, {tekton-chains} uses `Result` objects. When the objects are unavailable, {tekton-chains} the URLs and qualified digests of the OCI images. - -[id="tc-key-features"] -== Key features -* You can sign task runs, task run results, and OCI registry images with cryptographic keys that are generated by tools such as `cosign` and `skopeo`. -* You can use attestation formats such as `in-toto`. -* You can securely store signatures and signed artifacts using OCI repository as a storage backend. - -include::modules/op-configuring-tekton-chains.adoc[leveloffset=+1] - -include::modules/op-supported-parameters-tekton-chains-configuration.adoc[leveloffset=+2] - -include::modules/op-signing-secrets-in-tekton-chains.adoc[leveloffset=+1] -include::modules/op-chains-signing-secrets-cosign.adoc[leveloffset=+2] -include::modules/op-chains-signing-secrets-skopeo.adoc[leveloffset=+2] -include::modules/op-chains-resolving-existing-secret.adoc[leveloffset=+2] - -include::modules/op-authenticating-to-an-oci-registry.adoc[leveloffset=+1] - -include::modules/op-creating-and-verifying-task-run-signatures-without-any-additional-authentication.adoc[leveloffset=+1] -=== Additional resources - -* xref:signing-secrets-in-tekton-chains_{context}[] -* xref:configuring-tekton-chains_{context}[] - -include::modules/op-using-tekton-chains-to-sign-and-verify-image-and-provenance.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources-tekton-chains"] -== Additional resources - -* xref:../../cicd/pipelines/installing-pipelines.adoc#installing-pipelines[Installing {pipelines-shortname}] diff --git a/cicd/pipelines/using-tekton-hub-with-openshift-pipelines.adoc b/cicd/pipelines/using-tekton-hub-with-openshift-pipelines.adoc deleted file mode 100644 index c4b1c5f07da0..000000000000 --- a/cicd/pipelines/using-tekton-hub-with-openshift-pipelines.adoc +++ /dev/null @@ -1,43 +0,0 @@ -:_content-type: ASSEMBLY -[id="using-tekton-hub-with-openshift-pipelines"] -= Using Tekton Hub with {pipelines-shortname} -include::_attributes/common-attributes.adoc[] -:context: using-tekton-hub-with-openshift-pipelines - -toc::[] - -:FeatureName: Tekton Hub -include::snippets/technology-preview.adoc[] - -[role="_abstract"] -{tekton-hub} helps you discover, search, and share reusable tasks and pipelines for your CI/CD workflows. A public instance of {tekton-hub} is available at link:https://hub.tekton.dev/[hub.tekton.dev]. Cluster administrators can also install and deploy a custom instance of {tekton-hub} by modifying the configurations in the `TektonHub` custom resource (CR). - -include::modules/op-installing-and-deploying-tekton-hub-on-an-openshift-cluster.adoc[leveloffset=+1] - -include::modules/op-installing-tekton-hub-without-login-and-rating.adoc[leveloffset=+2] - -include::modules/op-installing-tekton-hub-with-login-and-rating.adoc[leveloffset=+2] - -include::modules/op-using-a-custom-database-in-tekton-hub.adoc[leveloffset=+1] - -include::modules/op-installing-crunchy-postgres-database-and-tekton-hub.adoc[leveloffset=+2] - -include::modules/op-migrating-tekton-hub-data-to-an-existing-crunchy-postgres-database.adoc[leveloffset=+2] - -include::modules/op-updating-tekton-hub-with-custom-categories-and-catalogs.adoc[leveloffset=+1] - -include::modules/op-modifying-catalog-refresh-interval-tekton-hub.adoc[leveloffset=+1] - -include::modules/op-adding-new-users-in-tekton-hub-configuration.adoc[leveloffset=+1] - -include::modules/op-disabling-tekton-hub-authorization-after-upgrade.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources-tekton-hub"] -== Additional resources - -* GitHub repository of link:https://github.com/tektoncd/hub[Tekton Hub] - -* xref:../../cicd/pipelines/installing-pipelines.adoc#installing-pipelines[Installing {pipelines-shortname}] - -* xref:../../cicd/pipelines/op-release-notes.adoc#op-release-notes[{pipelines-title} release notes] \ No newline at end of file diff --git a/cicd/pipelines/viewing-pipeline-logs-using-the-openshift-logging-operator.adoc b/cicd/pipelines/viewing-pipeline-logs-using-the-openshift-logging-operator.adoc deleted file mode 100644 index 0a4b75bc2866..000000000000 --- a/cicd/pipelines/viewing-pipeline-logs-using-the-openshift-logging-operator.adoc +++ /dev/null @@ -1,32 +0,0 @@ -:_content-type: ASSEMBLY -[id="viewing-pipeline-logs-using-the-openshift-logging-operator"] -= Viewing pipeline logs using the OpenShift Logging Operator -include::_attributes/common-attributes.adoc[] -:context: viewing-pipeline-logs-using-the-openshift-logging-operator - -toc::[] - -The logs generated by pipeline runs, task runs, and event listeners are stored in their respective pods. It is useful to review and analyze logs for troubleshooting and audits. - -However, retaining the pods indefinitely leads to unnecessary resource consumption and cluttered namespaces. - -To eliminate any dependency on the pods for viewing pipeline logs, you can use the OpenShift Elasticsearch Operator and the OpenShift Logging Operator. These Operators help you to view pipeline logs by using the link:https://www.elastic.co/guide/en/kibana/6.8/connect-to-elasticsearch.html[Elasticsearch Kibana] stack, even after you have deleted the pods that contained the logs. - -[id="prerequisites_viewing-pipeline-logs-using-the-openshift-logging-operator"] -== Prerequisites - -Before trying to view pipeline logs in a Kibana dashboard, ensure the following: - -* The steps are performed by a cluster administrator. -* Logs for pipeline runs and task runs are available. -* The OpenShift Elasticsearch Operator and the OpenShift Logging Operator are installed. - -include::modules/op-viewing-pipeline-logs-in-kibana.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_viewing-pipeline-logs-using-the-openshift-logging-operator"] -== Additional resources - -* xref:../../logging/cluster-logging-deploying.adoc[Installing OpenShift Logging] -* xref:../../logging/viewing-resource-logs.adoc[Viewing logs for a resource] -* xref:../../logging/cluster-logging-visualizer.adoc[Viewing cluster logs by using Kibana] diff --git a/cicd/pipelines/working-with-pipelines-using-the-developer-perspective.adoc b/cicd/pipelines/working-with-pipelines-using-the-developer-perspective.adoc deleted file mode 100644 index f81e5c6f689b..000000000000 --- a/cicd/pipelines/working-with-pipelines-using-the-developer-perspective.adoc +++ /dev/null @@ -1,52 +0,0 @@ -:_content-type: ASSEMBLY -[id="working-with-pipelines-using-the-developer-perspective"] -= Working with {pipelines-title} using the Developer perspective -include::_attributes/common-attributes.adoc[] -:context: working-with-pipelines-using-the-developer-perspective - -toc::[] - -[role="_abstract"] -You can use the *Developer* perspective of the {product-title} web console to create CI/CD pipelines for your software delivery process. - -In the *Developer* perspective: - -* Use the *Add* -> *Pipeline* -> *Pipeline builder* option to create customized pipelines for your application. -* Use the *Add* -> *From Git* option to create pipelines using operator-installed pipeline templates and resources while creating an application on {product-title}. - -After you create the pipelines for your application, you can view and visually interact with the deployed pipelines in the *Pipelines* view. You can also use the *Topology* view to interact with the pipelines created using the *From Git* option. You must apply custom labels to pipelines created using the *Pipeline builder* to see them in the *Topology* view. - -[discrete] -== Prerequisites - -* You have access to an {product-title} cluster and have switched to xref:../../web_console/web-console-overview.adoc#about-developer-perspective_web-console-overview[the *Developer* perspective]. -* You have the xref:../../cicd/pipelines/installing-pipelines.adoc#installing-pipelines[{pipelines-shortname} Operator installed] in your cluster. -* You are a cluster administrator or a user with create and edit permissions. -* You have created a project. - - -include::modules/op-constructing-pipelines-using-pipeline-builder.adoc[leveloffset=+1] - -include::modules/op-creating-pipelines-along-with-applications.adoc[leveloffset=+1] - -include::modules/odc-adding-a-GitHub-repository-containing-pipelines.adoc[leveloffset=+1] - -include::modules/op-interacting-with-pipelines-using-the-developer-perspective.adoc[leveloffset=+1] - -include::modules/op-using-custom-pipeline-template-for-git-import.adoc[leveloffset=+1] - -include::modules/op-starting-pipelines-from-pipelines-view.adoc[leveloffset=+1] - -include::modules/op-starting-pipelines-from-topology-view.adoc[leveloffset=+1] - -include::modules/op-interacting-pipelines-from-topology-view.adoc[leveloffset=+1] - -include::modules/op-editing-pipelines.adoc[leveloffset=+1] - -include::modules/op-deleting-pipelines.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources-working-with-pipelines-using-the-developer-perspective"] -== Additional resources - -* xref:../../cicd/pipelines/using-tekton-hub-with-openshift-pipelines.adoc#using-tekton-hub-with-openshift-pipelines[Using Tekton Hub with {pipelines-shortname}] diff --git a/cicd/snippets b/cicd/snippets deleted file mode 120000 index 9f5bc7e4dde0..000000000000 --- a/cicd/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets \ No newline at end of file diff --git a/cli_reference/_attributes b/cli_reference/_attributes deleted file mode 120000 index f27fd275ea6b..000000000000 --- a/cli_reference/_attributes +++ /dev/null @@ -1 +0,0 @@ -../_attributes/ \ No newline at end of file diff --git a/cli_reference/developer_cli_odo/_attributes b/cli_reference/developer_cli_odo/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/cli_reference/developer_cli_odo/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/cli_reference/developer_cli_odo/attributes b/cli_reference/developer_cli_odo/attributes deleted file mode 120000 index 5b32de1e01e7..000000000000 --- a/cli_reference/developer_cli_odo/attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes \ No newline at end of file diff --git a/cli_reference/developer_cli_odo/configuring-the-odo-cli.adoc b/cli_reference/developer_cli_odo/configuring-the-odo-cli.adoc deleted file mode 100644 index db390d2e31ca..000000000000 --- a/cli_reference/developer_cli_odo/configuring-the-odo-cli.adoc +++ /dev/null @@ -1,27 +0,0 @@ -//// -:_content-type: ASSEMBLY -[id='configuring-the-odo-cli'] -= Configuring the odo CLI -include::_attributes/common-attributes.adoc[] -:context: configuring-the-odo-cli - -toc::[] - -// Comment out per https://issues.redhat.com/browse/RHDEVDOCS-3594 -// include::modules/developer-cli-odo-using-command-completion.adoc[leveloffset=+1] - -You can find the global settings for `odo` in the `preference.yaml` file which is located by default in your `$HOME/.odo` directory. - -You can set a different location for the `preference.yaml` file by exporting the `GLOBALODOCONFIG` variable. - -// view config -include::modules/developer-cli-odo-view-config.adoc[leveloffset=+1] -// set key -include::modules/developer-cli-odo-set-config.adoc[leveloffset=+1] -// unset key -include::modules/developer-cli-odo-unset-config.adoc[leveloffset=+1] -// preference ref table -include::modules/developer-cli-odo-preference-table.adoc[leveloffset=+1] - -include::modules/developer-cli-odo-ignoring-files-or-patterns.adoc[leveloffset=+1] -//// \ No newline at end of file diff --git a/cli_reference/developer_cli_odo/creating-instances-of-services-managed-by-operators.adoc b/cli_reference/developer_cli_odo/creating-instances-of-services-managed-by-operators.adoc deleted file mode 100644 index cfa8616d9b80..000000000000 --- a/cli_reference/developer_cli_odo/creating-instances-of-services-managed-by-operators.adoc +++ /dev/null @@ -1,22 +0,0 @@ -:_content-type: ASSEMBLY -[id=creating-instances-of-services-managed-by-operators] -= Creating instances of services managed by Operators -include::_attributes/common-attributes.adoc[] -:context: creating-instances-of-services-managed-by-operators - -toc::[] - -Operators are a method of packaging, deploying, and managing Kubernetes services. With `{odo-title}`, you can create instances of services from the custom resource definitions (CRDs) provided by the Operators. You can then use these instances in your projects and link them to your components. - -To create services from an Operator, you must ensure that the Operator has valid values defined in its `metadata` to start the requested service. `{odo-title}` uses the `metadata.annotations.alm-examples` YAML file of an Operator to start -the service. If this YAML has placeholder values or sample values, a service cannot start. You can modify the YAML file and start the service with the modified values. To learn how to modify YAML files and start services from it, see xref:../../cli_reference/developer_cli_odo/creating-instances-of-services-managed-by-operators.adoc#creating-services-from-yaml-files_creating-instances-of-services-managed-by-operators[Creating services from YAML files]. - -== Prerequisites -* Install the `oc` CLI and log in to the cluster. -** Note that the configuration of the cluster determines the services available to you. To access the Operator services, a cluster administrator must install the respective Operator on the cluster first. To learn more, see xref:../../operators/admin/olm-adding-operators-to-cluster.adoc#olm-installing-operators-from-operatorhub_olm-adding-operators-to-a-cluster[Adding Operators to the cluster]. -* Install the `{odo-title}` CLI. - -include::modules/developer-cli-odo-creating-a-project.adoc[leveloffset=+1] -include::modules/developer-cli-odo-listing-available-services-from-the-operators-installed-on-the-cluster.adoc[leveloffset=+1] -include::modules/developer-cli-odo-creating-a-service-from-an-operator.adoc[leveloffset=+1] -include::modules/developer-cli-odo-creating-services-from-yaml-files.adoc[leveloffset=+1] diff --git a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/_attributes b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/_attributes deleted file mode 120000 index bf7c2529fdb4..000000000000 --- a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../../_attributes/ \ No newline at end of file diff --git a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/creating-a-java-application-with-a-database.adoc b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/creating-a-java-application-with-a-database.adoc deleted file mode 100644 index e30e041083b7..000000000000 --- a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/creating-a-java-application-with-a-database.adoc +++ /dev/null @@ -1,23 +0,0 @@ -:_content-type: ASSEMBLY -[id=creating-a-java-application-with-a-database] -= Creating a Java application with a database -include::_attributes/common-attributes.adoc[] -:context: creating-a-java-application-with-a-database -toc::[] - -This example describes how to deploy a Java application by using devfile and connect it to a database service. - -.Prerequisites - -* A running cluster. -* `{odo-title}` is installed. -* A Service Binding Operator is installed in your cluster. To learn how to install Operators, contact your cluster administrator or see xref:../../../operators/user/olm-installing-operators-in-namespace.adoc#olm-installing-operators-from-operatorhub_olm-installing-operators-in-namespace[Installing Operators from OperatorHub]. -* A Dev4Devs PostgreSQL Operator Operator is installed in your cluster. To learn how to install Operators, contact your cluster administrator or see xref:../../../operators/user/olm-installing-operators-in-namespace.adoc#olm-installing-operators-from-operatorhub_olm-installing-operators-in-namespace[Installing Operators from OperatorHub]. - -include::modules/developer-cli-odo-creating-a-project.adoc[leveloffset=+1] - -include::modules/developer-cli-odo-creating-a-java-microservice-jpa-application.adoc[leveloffset=+1] - -include::modules/developer-cli-odo-creating-a-database-with-odo.adoc[leveloffset=+1] - -include::modules/developer-cli-odo-connecting-a-java-application-to-mysql-database.adoc[leveloffset=+1] diff --git a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/creating-a-multicomponent-application-with-odo.adoc b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/creating-a-multicomponent-application-with-odo.adoc deleted file mode 100644 index 6904fe98e44d..000000000000 --- a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/creating-a-multicomponent-application-with-odo.adoc +++ /dev/null @@ -1,31 +0,0 @@ -:_content-type: ASSEMBLY -include::_attributes/common-attributes.adoc[] -[id='creating-a-multicomponent-application-with-odo'] -= Creating a multicomponent application with `{odo-title}` -:context: creating-a-multicomponent-application-with-odo - -toc::[] - -`{odo-title}` allows you to create a multicomponent application, modify it, and link its components in an easy and automated way. - -This example describes how to deploy a multicomponent application - a shooter game. The application consists of a front-end Node.js component and a back-end Java component. - -.Prerequisites - -* `{odo-title}` is installed. -* You have a running cluster. Developers can use link:https://access.redhat.com/documentation/en-us/red_hat_openshift_local/[{openshift-local-productname}] to deploy a local cluster quickly. -* Maven is installed. - -include::modules/developer-cli-odo-creating-a-project.adoc[leveloffset=+1] - -include::modules/developer-cli-odo-deploying-the-back-end-component.adoc[leveloffset=+1] - -include::modules/developer-cli-odo-deploying-the-front-end-component.adoc[leveloffset=+1] - -include::modules/developer-cli-odo-linking-both-components.adoc[leveloffset=+1] - -include::modules/developer-cli-odo-exposing-the-components-to-the-public.adoc[leveloffset=+1] - -include::modules/developer-cli-odo-modifying-the-running-application.adoc[leveloffset=+1] - -include::modules/developer-cli-odo-deleting-an-application.adoc[leveloffset=+1] diff --git a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/creating-a-single-component-application-with-odo.adoc b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/creating-a-single-component-application-with-odo.adoc deleted file mode 100644 index a5cbc0653c55..000000000000 --- a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/creating-a-single-component-application-with-odo.adoc +++ /dev/null @@ -1,29 +0,0 @@ -:_content-type: ASSEMBLY -include::_attributes/common-attributes.adoc[] -[id='creating-a-single-component-application-with-odo'] -= Creating a single-component application with {odo-title} - -:context: creating-a-single-component-application-with-odo - -toc::[] - -With `{odo-title}`, you can create and deploy applications on clusters. - -.Prerequisites - -* `{odo-title}` is installed. -* You have a running cluster. You can use link:https://access.redhat.com/documentation/en-us/red_hat_openshift_local/[{openshift-local-productname}] to deploy a local cluster quickly. - -include::modules/developer-cli-odo-creating-a-project.adoc[leveloffset=+1] - -include::modules/developer-cli-odo-creating-and-deploying-a-nodejs-application-with-odo.adoc[leveloffset=+1] - -include::modules/developer-cli-odo-modifying-your-application-code.adoc[leveloffset=+1] - -include::modules/developer-cli-odo-adding-storage-to-the-application-components.adoc[leveloffset=+1] - -include::modules/developer-cli-odo-adding-a-custom-builder-to-specify-a-build-image.adoc[leveloffset=+1] - -include::modules/developer-cli-odo-connecting-your-application-to-multiple-services-using-openshift-service-catalog.adoc[leveloffset=+1] - -include::modules/developer-cli-odo-deleting-an-application.adoc[leveloffset=+1] diff --git a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/creating-an-application-with-a-database.adoc b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/creating-an-application-with-a-database.adoc deleted file mode 100644 index af8e2d948469..000000000000 --- a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/creating-an-application-with-a-database.adoc +++ /dev/null @@ -1,31 +0,0 @@ -:_content-type: ASSEMBLY -[id=creating-an-application-with-a-database] -= Creating an application with a database -include::_attributes/common-attributes.adoc[] -:context: creating-an-application-with-a-database - -toc::[] - -This example describes how to deploy and connect a database to a front-end application. - -.Prerequisites - -* `{odo-title}` is installed. -* `oc` client is installed. -* You have a running cluster. Developers can use link:https://access.redhat.com/documentation/en-us/red_hat_openshift_local/[{openshift-local-productname}] to deploy a local cluster quickly. -* The Service Catalog is installed and enabled on your cluster. -+ -[NOTE] -==== -Service Catalog is deprecated on {product-title} 4 and later. -==== - -include::modules/developer-cli-odo-creating-a-project.adoc[leveloffset=+1] - -include::modules/developer-cli-odo-deploying-the-front-end-component.adoc[leveloffset=+1] - -include::modules/developer-cli-odo-deploying-a-database-in-interactive-mode.adoc[leveloffset=+1] - -include::modules/developer-cli-odo-deploying-a-database-manually.adoc[leveloffset=+1] - -include::modules/developer-cli-odo-connecting-the-database.adoc[leveloffset=+1] diff --git a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/debugging-applications-in-odo.adoc b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/debugging-applications-in-odo.adoc deleted file mode 100644 index 67d9f4478a59..000000000000 --- a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/debugging-applications-in-odo.adoc +++ /dev/null @@ -1,14 +0,0 @@ -:_content-type: ASSEMBLY -include::_attributes/common-attributes.adoc[] -[id='debugging-applications-in-odo'] -= Debugging applications in `{odo-title}` -:context: debugging-applications-in-odo - -toc::[] - -With `{odo-title}`, you can attach a debugger to remotely debug your application. This feature is only supported for NodeJS and Java components. - -Components created with `{odo-title}` run in the debug mode by default. A debugger agent runs on the component, on a specific port. To start debugging your application, you must start port forwarding and attach the local debugger bundled in your Integrated development environment (IDE). - -include::modules/developer-cli-odo-debugging-an-application.adoc[leveloffset=+1] -include::modules/developer-cli-odo-configuring-debugging-parameters.adoc[leveloffset=+1] diff --git a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/deleting-applications.adoc b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/deleting-applications.adoc deleted file mode 100644 index fba30e7d3f48..000000000000 --- a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/deleting-applications.adoc +++ /dev/null @@ -1,11 +0,0 @@ -:_content-type: ASSEMBLY -[id='deleting-applications'] -= Deleting applications -include::_attributes/common-attributes.adoc[] -:context: deleting-applications - -toc::[] - -You can delete applications and all components associated with the application in your project. - -include::modules/developer-cli-odo-deleting-an-application.adoc[leveloffset=+1] diff --git a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/images b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/images deleted file mode 120000 index 4399cbb3c0f3..000000000000 --- a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/images +++ /dev/null @@ -1 +0,0 @@ -../../../images/ \ No newline at end of file diff --git a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/modules b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/modules deleted file mode 120000 index 5be29a99c161..000000000000 --- a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/modules +++ /dev/null @@ -1 +0,0 @@ -../../../modules \ No newline at end of file diff --git a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/sample-applications.adoc b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/sample-applications.adoc deleted file mode 100644 index 962f39a629ad..000000000000 --- a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/sample-applications.adoc +++ /dev/null @@ -1,37 +0,0 @@ -:_content-type: ASSEMBLY -[id="sample-applications"] -= Sample applications -include::_attributes/common-attributes.adoc[] -:context: using-sample-applications - -toc::[] - -`{odo-title}` offers partial compatibility with any language or runtime listed within the {product-title} catalog of component types. For example: - -[source,terminal] ----- -NAME PROJECT TAGS -dotnet openshift 3.1,latest -httpd openshift 2.4,latest -java openshift 8,latest -nginx openshift 1.10,1.12,1.8,latest -nodejs openshift 0.10,4,6,8,latest -perl openshift 5.16,5.20,5.24,latest -php openshift 5.5,5.6,7.0,7.1,latest -python openshift 2.7,3.3,3.4,3.5,3.6,latest -ruby openshift 2.0,2.2,2.3,2.4,latest -wildfly openshift 10.0,10.1,8.1,9.0,latest ----- - -[NOTE] -==== -For `{odo-title}` Java and Node.js are the officially supported component types. -Run `odo catalog list components` to verify the officially supported component types. -==== - -To access the component over the web, create a URL using `odo url create`. - - - -include::modules/developer-cli-odo-sample-applications-git.adoc[leveloffset=+1] -include::modules/developer-cli-odo-sample-applications-binary.adoc[leveloffset=+1] diff --git a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/snippets b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/snippets deleted file mode 120000 index ce62fd7c41e2..000000000000 --- a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/snippets +++ /dev/null @@ -1 +0,0 @@ -../../../snippets/ \ No newline at end of file diff --git a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/using-devfiles-in-odo.adoc b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/using-devfiles-in-odo.adoc deleted file mode 100644 index fec5f98f977a..000000000000 --- a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/using-devfiles-in-odo.adoc +++ /dev/null @@ -1,29 +0,0 @@ -:_content-type: ASSEMBLY -[id="using-devfiles-in-odo"] -= Using devfiles in {odo-title} -include::_attributes/common-attributes.adoc[] -:context: creating-applications-by-using-devfiles - -toc::[] - -include::modules/developer-cli-odo-about-devfiles-in-odo.adoc[leveloffset=+1] - -== Creating a Java application by using a devfile - -.Prerequisites - -* You have installed `{odo-title}`. -* You must know your ingress domain cluster name. Contact your cluster administrator if you do not know it. For example, `apps-crc.testing` is the cluster domain name for https://access.redhat.com/documentation/en-us/red_hat_openshift_local/[{openshift-local-productname}]. - -[NOTE] -==== -Currently odo does not support creating devfile components with `--git` or `--binary` flags. You can only create S2I components when using these flags. -==== - -include::modules/developer-cli-odo-creating-a-project.adoc[leveloffset=+2] - -include::modules/developer-cli-odo-listing-available-devfile-components.adoc[leveloffset=+2] - -include::modules/developer-cli-odo-deploying-a-java-application-using-a-devfile.adoc[leveloffset=+2] - -include::modules/developer-cli-odo-converting-an-s2i-component-into-a-devfile-component.adoc[leveloffset=+1] diff --git a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/working-with-projects.adoc b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/working-with-projects.adoc deleted file mode 100644 index bc36206cd0d9..000000000000 --- a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/working-with-projects.adoc +++ /dev/null @@ -1,11 +0,0 @@ -:_content-type: ASSEMBLY -[id="working-with-projects"] -= Working with projects -include::_attributes/common-attributes.adoc[] -:context: working-with-projects - -toc::[] - -Project keeps your source code, tests, and libraries organized in a separate single unit. - -include::modules/developer-cli-odo-creating-a-project.adoc[leveloffset=+1] diff --git a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/working-with-storage.adoc b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/working-with-storage.adoc deleted file mode 100644 index 87ecdf93ac03..000000000000 --- a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/working-with-storage.adoc +++ /dev/null @@ -1,21 +0,0 @@ -:_content-type: ASSEMBLY -[id='working-with-storage'] -= Working with storage -include::_attributes/common-attributes.adoc[] -:context: working-with-storage - -toc::[] - -Persistent storage keeps data available between restarts of `{odo-title}`. - -include::modules/developer-cli-odo-adding-storage-to-the-application-components.adoc[leveloffset=+1] - -include::modules/developer-cli-odo-adding-storage-to-a-specific-container.adoc[leveloffset=+1] - -include::modules/developer-cli-odo-switching-between-ephemeral-and-persistent-storage.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../../storage/understanding-ephemeral-storage.adoc#storage-ephemeral-storage-overview_understanding-ephemeral-storage[Understanding ephemeral storage]. -* xref:../../../storage/understanding-persistent-storage.adoc#persistent-storage-overview_understanding-persistent-storage[Understanding persistent storage] diff --git a/cli_reference/developer_cli_odo/images b/cli_reference/developer_cli_odo/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/cli_reference/developer_cli_odo/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/cli_reference/developer_cli_odo/installing-odo.adoc b/cli_reference/developer_cli_odo/installing-odo.adoc deleted file mode 100644 index 813a0f156199..000000000000 --- a/cli_reference/developer_cli_odo/installing-odo.adoc +++ /dev/null @@ -1,30 +0,0 @@ -//// -:_content-type: ASSEMBLY -[id='installing-odo'] -= Installing odo -include::_attributes/common-attributes.adoc[] -:context: installing-odo - -toc::[] - -// The following section describes how to install `{odo-title}` on different platforms using the CLI or the Visual Studio Code (VS Code) IDE. - -You can install the `{odo-title}` CLI on Linux, Windows, or macOS by downloading a binary. You can also install the OpenShift VS Code extension, which uses both the `{odo-title}` and the `oc` binaries to interact with your OpenShift Container Platform cluster. For {op-system-base-full}, you can install the `{odo-title}` CLI as an RPM. - -[NOTE] -==== -Currently, `{odo-title}` does not support installation in a restricted network environment. -==== - -// You can also find the URL to the latest binaries from the {product-title} web console by clicking the *?* icon in the upper-right corner and selecting *Command Line Tools* - -include::modules/developer-cli-odo-installing-odo-on-linux.adoc[leveloffset=+1] - -include::modules/developer-cli-odo-installing-odo-on-windows.adoc[leveloffset=+1] - -include::modules/developer-cli-odo-installing-odo-on-macos.adoc[leveloffset=+1] - -include::modules/developer-cli-odo-installing-odo-on-vs-code.adoc[leveloffset=+1] - -include::modules/developer-cli-odo-installing-odo-on-linux-rpm.adoc[leveloffset=+1] -//// diff --git a/cli_reference/developer_cli_odo/managing-environment-variables-in-odo.adoc b/cli_reference/developer_cli_odo/managing-environment-variables-in-odo.adoc deleted file mode 100644 index 1b37feceb671..000000000000 --- a/cli_reference/developer_cli_odo/managing-environment-variables-in-odo.adoc +++ /dev/null @@ -1,11 +0,0 @@ -:_content-type: ASSEMBLY -[id='managing-environment-variables'] -= Managing environment variables -include::_attributes/common-attributes.adoc[] -:context: managing-environment-variables - -toc::[] - -`{odo-title}` stores component-specific configurations and environment variables in the `config` file. You can use the `odo config` command to set, unset, and list environment variables for components without the need to modify the `config` file. - -include::modules/developer-cli-odo-setting-and-unsetting-environment-variables.adoc[leveloffset=+1] diff --git a/cli_reference/developer_cli_odo/modules b/cli_reference/developer_cli_odo/modules deleted file mode 120000 index 8b0e8540076d..000000000000 --- a/cli_reference/developer_cli_odo/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules \ No newline at end of file diff --git a/cli_reference/developer_cli_odo/odo-architecture.adoc b/cli_reference/developer_cli_odo/odo-architecture.adoc deleted file mode 100644 index 9a68934dc11e..000000000000 --- a/cli_reference/developer_cli_odo/odo-architecture.adoc +++ /dev/null @@ -1,17 +0,0 @@ -:_content-type: ASSEMBLY -[id="odo-architecture"] -= odo architecture -include::_attributes/common-attributes.adoc[] -:context: odo-architecture - -toc::[] - -This section describes `{odo-title}` architecture and how `{odo-title}` manages resources on a cluster. - -include::modules/developer-cli-odo-developer-setup.adoc[leveloffset=+1] -include::modules/developer-cli-odo-openshift-source-to-image.adoc[leveloffset=+1] -include::modules/developer-cli-odo-openshift-cluster-objects.adoc[leveloffset=+1] -include::modules/developer-cli-odo-push-workflow.adoc[leveloffset=+1] - -// [role="_additional-resources"] -// == Additional resources diff --git a/cli_reference/developer_cli_odo/odo-cli-reference.adoc b/cli_reference/developer_cli_odo/odo-cli-reference.adoc deleted file mode 100644 index faa2e5650493..000000000000 --- a/cli_reference/developer_cli_odo/odo-cli-reference.adoc +++ /dev/null @@ -1,21 +0,0 @@ -//// -:_content-type: ASSEMBLY -[id='odo-cli-reference'] -= odo CLI reference -include::_attributes/common-attributes.adoc[] -:context: odo-cli-reference - -toc::[] - -include::modules/developer-cli-odo-ref-build-images.adoc[leveloffset=+1] -include::modules/developer-cli-odo-ref-catalog.adoc[leveloffset=+1] -include::modules/developer-cli-odo-ref-create.adoc[leveloffset=+1] -include::modules/developer-cli-odo-ref-delete.adoc[leveloffset=+1] -include::modules/developer-cli-odo-ref-deploy.adoc[leveloffset=+1] -include::modules/developer-cli-odo-ref-link.adoc[leveloffset=+1] -include::modules/developer-cli-odo-ref-registry.adoc[leveloffset=+1] -include::modules/developer-cli-odo-ref-service.adoc[leveloffset=+1] -include::modules/developer-cli-odo-ref-storage.adoc[leveloffset=+1] -include::modules/developer-cli-odo-ref-flags.adoc[leveloffset=+1] -include::modules/developer-cli-odo-ref-json-output.adoc[leveloffset=+1] -//// diff --git a/cli_reference/developer_cli_odo/odo-release-notes.adoc b/cli_reference/developer_cli_odo/odo-release-notes.adoc deleted file mode 100644 index 645fca7f25c9..000000000000 --- a/cli_reference/developer_cli_odo/odo-release-notes.adoc +++ /dev/null @@ -1,74 +0,0 @@ -//// -:_content-type: ASSEMBLY -[id='odo-release-notes'] -= `{odo-title}` release notes -include::_attributes/common-attributes.adoc[] -:context: odo-release-notes - -toc::[] - -[id="odo-notable-improvements_{context}"] -== Notable changes and improvements in `{odo-title}` version 2.5.0 - -// #5238 -* Creates unique routes for each component, using `adler32` hashing -// #5252 -* Supports additional fields in the devfile for assigning resources: -** cpuRequest -** cpuLimit -** memoryRequest -** memoryLimit -// #5276 -* Adds the `--deploy` flag to the `odo delete` command, to remove components deployed using the `odo deploy` command: -+ -[source,terminal] ----- -$ odo delete --deploy ----- -// #5237 -* Adds mapping support to the `odo link` command -// #5279 -* Supports ephemeral volumes using the `ephemeral` field in `volume` components -// #5270 -* Sets the default answer to `yes` when asking for telemetry opt-in -// #5260 -* Improves metrics by sending additional telemetry data to the devfile registry -// #5287 -* Updates the bootstrap image to `registry.access.redhat.com/ocp-tools-4/odo-init-container-rhel8:1.1.11` -// #5308 -* The upstream repository is available at link:https://github.com/redhat-developer/odo[] - - - -[id="odo-fixed-issues_{context}"] -== Bug fixes -// #5294 -* Previously, `odo deploy` would fail if the `.odo/env` file did not exist. The command now creates the `.odo/env` file if required. -// #5286 -* Previously, interactive component creation using the `odo create` command would fail if disconnect from the cluster. This issue is fixed in the latest release. - - -[id="odo-getting-support_{context}"] -== Getting support - -.For Product - -If you find an error, encounter a bug, or have suggestions for improving the functionality of `{odo-title}`, file an issue in link:http://bugzilla.redhat.com[Bugzilla]. Choose *OpenShift Developer Tools and Services* as a product type and *odo* as a component. - -Provide as many details in the issue description as possible. - -.For Documentation - -If you find an error or have suggestions for improving the documentation, file an issue in link:http://bugzilla.redhat.com[Bugzilla]. Choose the *{product-title}* product type and the *Documentation* component type. - - - - - -////[id="odo-known-issues_{context}"] -== Known issues -//// - -//[id="odo-technology-preview_{context}"] -//== Technology Preview features `{odo-title}` -//// \ No newline at end of file diff --git a/cli_reference/developer_cli_odo/snippets b/cli_reference/developer_cli_odo/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/cli_reference/developer_cli_odo/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/cli_reference/developer_cli_odo/understanding-odo.adoc b/cli_reference/developer_cli_odo/understanding-odo.adoc deleted file mode 100644 index c9901902f1df..000000000000 --- a/cli_reference/developer_cli_odo/understanding-odo.adoc +++ /dev/null @@ -1,20 +0,0 @@ -//// -:_content-type: ASSEMBLY -[id="understanding-odo"] -= Understanding odo -include::_attributes/common-attributes.adoc[] -:context: understanding-odo - -toc::[] - -Red Hat OpenShift Developer CLI (`odo`) is a tool for creating applications on {product-title} and Kubernetes. With `{odo-title}`, you can develop, test, debug, and deploy microservices-based applications on a Kubernetes cluster without having a deep understanding of the platform. - -`{odo-title}` follows a _create and push_ workflow. As a user, when you _create_, the information (or manifest) is stored in a configuration file. When you _push_, the corresponding resources are created on the Kubernetes cluster. All of this configuration is stored in the Kubernetes API for seamless accessibility and functionality. - -`{odo-title}` uses _service_ and _link_ commands to link components and services together. `{odo-title}` achieves this by creating and deploying services based on Kubernetes Operators in the cluster. Services can be created using any of the Operators available on the Operator Hub. After linking a service, `odo` injects the service configuration into the component. Your application can then use this configuration to communicate with the Operator-backed service. - -include::modules/odo-key-features.adoc[leveloffset=+1] -include::modules/odo-core-concepts.adoc[leveloffset=+1] -include::modules/odo-listing-components.adoc[leveloffset=+1] -include::modules/odo-telemetry.adoc[leveloffset=+1] -//// \ No newline at end of file diff --git a/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/_attributes b/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/_attributes deleted file mode 120000 index bf7c2529fdb4..000000000000 --- a/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../../_attributes/ \ No newline at end of file diff --git a/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/about-odo-in-a-restricted-environment.adoc b/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/about-odo-in-a-restricted-environment.adoc deleted file mode 100644 index 594c44db70b1..000000000000 --- a/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/about-odo-in-a-restricted-environment.adoc +++ /dev/null @@ -1,23 +0,0 @@ -:_content-type: ASSEMBLY -include::_attributes/common-attributes.adoc[] -[id="about-odo-in-a-restricted-environment"] -= About {odo-title} in a restricted environment -:context: about-odo-in-a-restricted-environment - -toc::[] - - -To run `{odo-title}` in a disconnected cluster or a cluster provisioned in a restricted environment, you must ensure that a cluster administrator has created a cluster with a mirrored registry. - - -To start working in a disconnected cluster, you must first xref:../../../cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/pushing-the-odo-init-image-to-the-restricted-cluster-registry.adoc#pushing-the-odo-init-image-to-a-mirror-registry_pushing-the-odo-init-image-to-the-restricted-cluster-registry[push the `odo` init image to the registry of the cluster] and then overwrite the `odo` init image path using the `ODO_BOOTSTRAPPER_IMAGE` environment variable. - - -After you push the `odo` init image, you must xref:../../../cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/creating-and-deploying-a-component-to-the-disconnected-cluster.adoc#mirroring-a-supported-builder-image_creating-and-deploying-a-component-to-the-disconnected-cluster[mirror a supported builder image] from the registry, xref:../../../cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/creating-and-deploying-a-component-to-the-disconnected-cluster.adoc#overwriting-the-mirror-registry_creating-and-deploying-a-component-to-the-disconnected-cluster[overwrite a mirror registry] and then xref:../../../cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/creating-and-deploying-a-component-to-the-disconnected-cluster.adoc#creating-a-nodejs-application-with-odo_creating-and-deploying-a-component-to-the-disconnected-cluster[create your application]. A builder image is necessary to configure a runtime environment for your application and also contains the build tool needed to build your application, for example npm for Node.js or Maven for Java. A mirror registry contains all the necessary dependencies for your application. - -[role="_additional-resources"] -.Additional resources -ifdef::openshift-enterprise,openshift-webscale[] -* xref:../../../installing/disconnected_install/installing-mirroring-installation-images.adoc#installation-about-mirror-registry_installing-mirroring-installation-images[Mirroring images for a disconnected installation] -endif::[] -* xref:../../../registry/accessing-the-registry.adoc#registry-accessing-directly_accessing-the-registry[Accessing the registry] diff --git a/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/creating-and-deploying-a-component-to-the-disconnected-cluster.adoc b/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/creating-and-deploying-a-component-to-the-disconnected-cluster.adoc deleted file mode 100644 index 5904d648335f..000000000000 --- a/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/creating-and-deploying-a-component-to-the-disconnected-cluster.adoc +++ /dev/null @@ -1,20 +0,0 @@ -:_content-type: ASSEMBLY -[id="creating-and-deploying-a-component-to-the-disconnected-cluster"] -= Creating and deploying a component to the disconnected cluster -include::_attributes/common-attributes.adoc[] -:context: creating-and-deploying-a-component-to-the-disconnected-cluster - -toc::[] - -After you push the `init` image to a cluster with a mirrored registry, you must mirror a supported builder image for your application with the `oc` tool, overwrite the mirror registry using the environment variable, and then create your component. - -== Prerequisites - -* Install `oc` on the client operating system. -* xref:../../../cli_reference/developer_cli_odo/installing-odo.adoc#installing-odo-on-linux_installing-odo[Install `{odo-title}`] on the client operating system. -* Access to an restricted cluster with a configured {product-registry} or a mirror registry. -* xref:../../../cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/pushing-the-odo-init-image-to-the-restricted-cluster-registry.adoc#pushing-the-odo-init-image-to-a-mirror-registry_pushing-the-odo-init-image-to-the-restricted-cluster-registry[Push the `odo` init image to your cluster registry]. - -include::modules/developer-cli-odo-mirroring-a-supported-builder-image.adoc[leveloffset=+1] -include::modules/developer-cli-odo-overwriting-a-mirror-registry.adoc[leveloffset=+1] -include::modules/developer-cli-odo-creating-and-deploying-a-nodejs-application-with-odo.adoc[leveloffset=+1] diff --git a/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/creating-and-deploying-devfile-components-to-the-disconnected-cluster.adoc b/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/creating-and-deploying-devfile-components-to-the-disconnected-cluster.adoc deleted file mode 100644 index c4c0e7da0e0f..000000000000 --- a/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/creating-and-deploying-devfile-components-to-the-disconnected-cluster.adoc +++ /dev/null @@ -1,11 +0,0 @@ -:_content-type: ASSEMBLY -[id="creating-and-deploying-devfile-components-to-the-disconnected-cluster"] -= Creating and deploying devfile components to the disconnected cluster -include::_attributes/common-attributes.adoc[] -:context: creating-and-deploying-a-component-to-the-disconnected-cluster - -toc::[] - -include::modules/developer-cli-odo-creating-a-nodejs-application-by-using-a-devfile-in-a-disconnected-cluster.adoc[leveloffset=+1] - -include::modules/developer-cli-odo-creating-a-java-application-by-using-a-devfile-in-a-disconnected-cluster.adoc[leveloffset=+1] diff --git a/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/images b/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/images deleted file mode 120000 index 4399cbb3c0f3..000000000000 --- a/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/images +++ /dev/null @@ -1 +0,0 @@ -../../../images/ \ No newline at end of file diff --git a/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/modules b/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/modules deleted file mode 120000 index 7e8b50bee77a..000000000000 --- a/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/modules +++ /dev/null @@ -1 +0,0 @@ -../../../modules/ \ No newline at end of file diff --git a/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/pushing-the-odo-init-image-to-the-restricted-cluster-registry.adoc b/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/pushing-the-odo-init-image-to-the-restricted-cluster-registry.adoc deleted file mode 100644 index 30cecbd5d8e5..000000000000 --- a/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/pushing-the-odo-init-image-to-the-restricted-cluster-registry.adoc +++ /dev/null @@ -1,18 +0,0 @@ -:_content-type: ASSEMBLY -[id="pushing-the-odo-init-image-to-the-restricted-cluster-registry"] -include::_attributes/common-attributes.adoc[] -= Pushing the {odo-title} init image to the restricted cluster registry -:context: pushing-the-odo-init-image-to-the-restricted-cluster-registry - -toc::[] - -Depending on the configuration of your cluster and your operating system you can either push the `odo` init image to a mirror registry or directly to an {product-registry}. - -== Prerequisites - -* Install `oc` on the client operating system. -* xref:../../../cli_reference/developer_cli_odo/installing-odo.adoc#installing-odo-on-linux_installing-odo[Install `{odo-title}`] on the client operating system. -* Access to a restricted cluster with a configured {product-registry} or a mirror registry. - -include::modules/developer-cli-odo-pushing-the-odo-init-image-to-a-mirror-registry.adoc[leveloffset=+1] -include::modules/developer-cli-odo-pushing-the-odo-init-image-to-an-internal-registry-directly.adoc[leveloffset=+1] diff --git a/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/snippets b/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/snippets deleted file mode 120000 index ce62fd7c41e2..000000000000 --- a/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/snippets +++ /dev/null @@ -1 +0,0 @@ -../../../snippets/ \ No newline at end of file diff --git a/cli_reference/images b/cli_reference/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/cli_reference/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/cli_reference/index.adoc b/cli_reference/index.adoc deleted file mode 100644 index 6b4bbc18d69a..000000000000 --- a/cli_reference/index.adoc +++ /dev/null @@ -1,33 +0,0 @@ -:_content-type: ASSEMBLY -[id="cli-tools-overview"] -= {product-title} CLI tools overview -include::_attributes/common-attributes.adoc[] -:context: cli-tools-overview - -toc::[] - -A user performs a range of operations while working on {product-title} such as the following: - -* Managing clusters -* Building, deploying, and managing applications -* Managing deployment processes -* Developing Operators -* Creating and maintaining Operator catalogs - -{product-title} offers a set of command-line interface (CLI) tools that simplify these tasks by enabling users to perform various administration and development operations from the terminal. -These tools expose simple commands to manage the applications, as well as interact with each component of the system. - -[id="cli-tools-list"] -== List of CLI tools - -The following set of CLI tools are available in {product-title}: - -* xref:../cli_reference/openshift_cli/getting-started-cli.adoc#cli-getting-started[OpenShift CLI (oc)]: This is the most commonly used CLI tool by {product-title} users. It helps both cluster administrators and developers to perform end-to-end operations across {product-title} using the terminal. Unlike the web console, it allows the user to work directly with the project source code using command scripts. - -* xref:../cli_reference/kn-cli-tools.adoc#kn-cli-tools[Knative CLI (kn)]: The Knative (`kn`) CLI tool provides simple and intuitive terminal commands that can be used to interact with OpenShift Serverless components, such as Knative Serving and Eventing. - -* xref:../cli_reference/tkn_cli/installing-tkn.adoc#installing-tkn[Pipelines CLI (tkn)]: OpenShift Pipelines is a continuous integration and continuous delivery (CI/CD) solution in {product-title}, which internally uses Tekton. The `tkn` CLI tool provides simple and intuitive commands to interact with OpenShift Pipelines using the terminal. - -* xref:../cli_reference/opm/cli-opm-install.adoc#cli-opm-install[opm CLI]: The `opm` CLI tool helps the Operator developers and cluster administrators to create and maintain the catalogs of Operators from the terminal. - -* xref:../cli_reference/osdk/cli-osdk-install.adoc#cli-osdk-install[Operator SDK]: The Operator SDK, a component of the Operator Framework, provides a CLI tool that Operator developers can use to build, test, and deploy an Operator from the terminal. It simplifies the process of building Kubernetes-native applications, which can require deep, application-specific operational knowledge. diff --git a/cli_reference/kn-cli-tools.adoc b/cli_reference/kn-cli-tools.adoc deleted file mode 100644 index e5d4c358d7ff..000000000000 --- a/cli_reference/kn-cli-tools.adoc +++ /dev/null @@ -1,28 +0,0 @@ -:_content-type: ASSEMBLY -include::_attributes/common-attributes.adoc[] -[id="kn-cli-tools"] -= Knative CLI for use with {ServerlessProductName} -:context: kn-cli-tools - -toc::[] - -The Knative (`kn`) CLI enables simple interaction with Knative components on {product-title}. - -[id="kn-cli-tools-key-features"] -== Key features - -The Knative (`kn`) CLI is designed to make serverless computing tasks simple and concise. -Key features of the Knative CLI include: - -* Deploy serverless applications from the command line. -* Manage features of Knative Serving, such as services, revisions, and traffic-splitting. -* Create and manage Knative Eventing components, such as event sources and triggers. -* Create sink bindings to connect existing Kubernetes applications and Knative services. -* Extend the Knative CLI with flexible plugin architecture, similar to the `kubectl` CLI. -* Configure autoscaling parameters for Knative services. -* Scripted usage, such as waiting for the results of an operation, or deploying custom rollout and rollback strategies. - -[id="kn-cli-tools-installing-kn"] -== Installing the Knative CLI - -See link:https://docs.openshift.com/serverless/1.28/install/installing-kn.html#installing-kn[Installing the Knative CLI]. diff --git a/cli_reference/modules b/cli_reference/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/cli_reference/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/cli_reference/odo-important-update.adoc b/cli_reference/odo-important-update.adoc deleted file mode 100644 index bbb939cd21f1..000000000000 --- a/cli_reference/odo-important-update.adoc +++ /dev/null @@ -1,20 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/developer_cli_odo/odo-important-update.adoc - -:_content-type: CONCEPT -[id="odo-important_update_{context}"] -include::_attributes/attributes-openshift-dedicated.adoc[] -include::_attributes/common-attributes.adoc[] -= Important update on `{odo-title}` -:context: odo-important-update - -toc::[] - -Red Hat does not provide information about `{odo-title}` on the {OCP} documentation site. See the link:https://odo.dev/docs/introduction[documentation] maintained by Red Hat and the upstream community for documentation information related to `{odo-title}`. - -[IMPORTANT] -==== -For the materials maintained by the upstream community, Red Hat provides support under link:https://access.redhat.com/solutions/5893251[Cooperative Community Support]. -==== - diff --git a/cli_reference/openshift_cli/_attributes b/cli_reference/openshift_cli/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/cli_reference/openshift_cli/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/cli_reference/openshift_cli/administrator-cli-commands.adoc b/cli_reference/openshift_cli/administrator-cli-commands.adoc deleted file mode 100644 index 3b5c3da2e911..000000000000 --- a/cli_reference/openshift_cli/administrator-cli-commands.adoc +++ /dev/null @@ -1,23 +0,0 @@ -:_content-type: ASSEMBLY -[id="cli-administrator-commands"] -= OpenShift CLI administrator command reference -include::_attributes/common-attributes.adoc[] -:context: cli-administrator-commands - -toc::[] - -This reference provides descriptions and example commands for OpenShift CLI (`oc`) administrator commands. You must have `cluster-admin` or equivalent permissions to use these commands. - -For developer commands, see the xref:../../cli_reference/openshift_cli/developer-cli-commands.adoc#cli-developer-commands[OpenShift CLI developer command reference]. - -Run `oc adm -h` to list all administrator commands or run `oc --help` to get additional details for a specific command. - -// The following file is auto-generated from the openshift/oc repository -// OpenShift CLI (oc) administrator commands -include::modules/oc-adm-by-example-content.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_cli-administrator-commands"] -== Additional resources - -* xref:../../cli_reference/openshift_cli/developer-cli-commands.adoc#cli-developer-commands[OpenShift CLI developer command reference] diff --git a/cli_reference/openshift_cli/configuring-cli.adoc b/cli_reference/openshift_cli/configuring-cli.adoc deleted file mode 100644 index 0eace0d03e61..000000000000 --- a/cli_reference/openshift_cli/configuring-cli.adoc +++ /dev/null @@ -1,18 +0,0 @@ -:_content-type: ASSEMBLY -[id="cli-configuring-cli"] -= Configuring the OpenShift CLI -include::_attributes/common-attributes.adoc[] -:context: cli-configuring-cli - -toc::[] - -[id="cli-enabling-tab-completion"] -== Enabling tab completion - -You can enable tab completion for the Bash or Zsh shells. - -// Enabling tab completion for Bash -include::modules/cli-configuring-completion.adoc[leveloffset=+2] - -// Enabling tab completion for Zsh -include::modules/cli-configuring-completion-zsh.adoc[leveloffset=+2] diff --git a/cli_reference/openshift_cli/developer-cli-commands.adoc b/cli_reference/openshift_cli/developer-cli-commands.adoc deleted file mode 100644 index 517e2cb7a777..000000000000 --- a/cli_reference/openshift_cli/developer-cli-commands.adoc +++ /dev/null @@ -1,28 +0,0 @@ -:_content-type: ASSEMBLY -[id="cli-developer-commands"] -= OpenShift CLI developer command reference -include::_attributes/common-attributes.adoc[] -:context: cli-developer-commands - -toc::[] - -This reference provides descriptions and example commands for OpenShift CLI (`oc`) developer commands. -ifdef::openshift-enterprise,openshift-origin[] -For administrator commands, see the xref:../../cli_reference/openshift_cli/administrator-cli-commands.adoc#cli-administrator-commands[OpenShift CLI administrator command reference]. -endif::openshift-enterprise,openshift-origin[] - -Run `oc help` to list all commands or run `oc --help` to get additional details for a specific command. - -// The following file is auto-generated from the openshift/oc repository -// OpenShift CLI (oc) developer commands -include::modules/oc-by-example-content.adoc[leveloffset=+1] - -ifdef::openshift-enterprise,openshift-origin[] - -[role="_additional-resources"] -[id="additional-resources_cli-developer-commands"] -== Additional resources - -* xref:../../cli_reference/openshift_cli/administrator-cli-commands.adoc#cli-administrator-commands[OpenShift CLI administrator command reference] - -endif::openshift-enterprise,openshift-origin[] diff --git a/cli_reference/openshift_cli/extending-cli-plugins.adoc b/cli_reference/openshift_cli/extending-cli-plugins.adoc deleted file mode 100644 index 549e986f667f..000000000000 --- a/cli_reference/openshift_cli/extending-cli-plugins.adoc +++ /dev/null @@ -1,16 +0,0 @@ -:_content-type: ASSEMBLY -[id="cli-extend-plugins"] -= Extending the OpenShift CLI with plugins -include::_attributes/common-attributes.adoc[] -:context: cli-extend-plugins - -toc::[] - -You can write and install plugins to build on the default `oc` commands, -allowing you to perform new and more complex tasks with the {product-title} CLI. - -// Writing CLI plugins -include::modules/cli-extending-plugins-writing.adoc[leveloffset=+1] - -// Installing and using CLI plugins -include::modules/cli-extending-plugins-installing.adoc[leveloffset=+1] diff --git a/cli_reference/openshift_cli/getting-started-cli.adoc b/cli_reference/openshift_cli/getting-started-cli.adoc deleted file mode 100644 index bff334b936e1..000000000000 --- a/cli_reference/openshift_cli/getting-started-cli.adoc +++ /dev/null @@ -1,50 +0,0 @@ -:_content-type: ASSEMBLY -[id="cli-getting-started"] -= Getting started with the OpenShift CLI -include::_attributes/common-attributes.adoc[] -:context: cli-developer-commands - -toc::[] - -// About the CLI -include::modules/cli-about-cli.adoc[leveloffset=+1] - -[id="installing-openshift-cli"] -== Installing the OpenShift CLI - -You can install the OpenShift CLI (`oc`) either by downloading the binary or by using an RPM. - -// Installing the CLI by downloading the binary -include::modules/cli-installing-cli.adoc[leveloffset=+2] - -// Installing the CLI by using the web console -include::modules/cli-installing-cli-web-console.adoc[leveloffset=+2] - -// Installing the CLI on Linux by using the web console -include::modules/cli-installing-cli-web-console-linux.adoc[leveloffset=+3] - -// Installing the CLI on Windows by using the web console -include::modules/cli-installing-cli-web-console-windows.adoc[leveloffset=+3] - -// Installing the CLI on macOS by using the web console -include::modules/cli-installing-cli-web-console-macos.adoc[leveloffset=+3] - -ifndef::openshift-origin[] -// Installing the CLI by using an RPM -include::modules/cli-installing-cli-rpm.adoc[leveloffset=+2] -endif::[] - -// Installing the CLI by using Homebrew -include::modules/cli-installing-cli-brew.adoc[leveloffset=+2] - -// Logging in to the CLI -include::modules/cli-logging-in.adoc[leveloffset=+1] - -// Using the CLI -include::modules/cli-using-cli.adoc[leveloffset=+1] - -// Getting help -include::modules/cli-getting-help.adoc[leveloffset=+1] - -// Logging out of the CLI -include::modules/cli-logging-out.adoc[leveloffset=+1] diff --git a/cli_reference/openshift_cli/images b/cli_reference/openshift_cli/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/cli_reference/openshift_cli/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/cli_reference/openshift_cli/managing-cli-plugins-krew.adoc b/cli_reference/openshift_cli/managing-cli-plugins-krew.adoc deleted file mode 100644 index 25d0e4420b04..000000000000 --- a/cli_reference/openshift_cli/managing-cli-plugins-krew.adoc +++ /dev/null @@ -1,28 +0,0 @@ -:_content-type: ASSEMBLY -[id="managing-cli-plugin-krew"] -= Managing CLI plugins with Krew -include::_attributes/common-attributes.adoc[] -:context: managing-cli-plugins-krew - -toc::[] - -You can use Krew to install and manage plugins for the OpenShift CLI (`oc`). - -:FeatureName: Using Krew to install and manage plugins for the OpenShift CLI -include::snippets/technology-preview.adoc[] - -// Installing a CLI plugin with Krew -include::modules/cli-krew-install-plugin.adoc[leveloffset=+1] - -// Updating a CLI plugin with Krew -include::modules/cli-krew-update-plugin.adoc[leveloffset=+1] - -// Removing a CLI plugin with Krew -include::modules/cli-krew-remove-plugin.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_managing-cli-plugins-krew"] -== Additional resources - -* link:https://krew.sigs.k8s.io/[Krew] -* xref:../../cli_reference/openshift_cli/extending-cli-plugins.adoc#cli-extend-plugins[Extending the OpenShift CLI with plugins] diff --git a/cli_reference/openshift_cli/managing-cli-profiles.adoc b/cli_reference/openshift_cli/managing-cli-profiles.adoc deleted file mode 100644 index 8978acebfc22..000000000000 --- a/cli_reference/openshift_cli/managing-cli-profiles.adoc +++ /dev/null @@ -1,22 +0,0 @@ -:_content-type: ASSEMBLY -[id="managing-cli-profiles"] -= Managing CLI profiles -include::_attributes/common-attributes.adoc[] -:context: managing-cli-profiles - -toc::[] - -A CLI configuration file allows you to configure different profiles, or contexts, for use with the xref:../../cli_reference/index.adoc#cli-tools-overview[CLI tools overview]. A context consists of -ifndef::microshift[] -xref:../../authentication/understanding-authentication.adoc#understanding-authentication[user authentication] -endif::[] -ifdef::microshift[] -user authentication -endif::[] -and {product-title} server information associated with a _nickname_. - -include::modules/about-cli-profiles-switch.adoc[leveloffset=+1] - -include::modules/manual-configuration-of-cli-profiles.adoc[leveloffset=+1] - -include::modules/load-and-merge-rules.adoc[leveloffset=+1] diff --git a/cli_reference/openshift_cli/modules b/cli_reference/openshift_cli/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/cli_reference/openshift_cli/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/cli_reference/openshift_cli/snippets b/cli_reference/openshift_cli/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/cli_reference/openshift_cli/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/cli_reference/openshift_cli/usage-oc-kubectl.adoc b/cli_reference/openshift_cli/usage-oc-kubectl.adoc deleted file mode 100644 index a6c2cf909381..000000000000 --- a/cli_reference/openshift_cli/usage-oc-kubectl.adoc +++ /dev/null @@ -1,69 +0,0 @@ -:_content-type: ASSEMBLY -[id="usage-oc-kubectl"] -= Usage of oc and kubectl commands -include::_attributes/common-attributes.adoc[] -:context: usage-oc-kubectl - -The Kubernetes command-line interface (CLI), `kubectl`, can be used to run commands against a Kubernetes cluster. Because {product-title} is a certified Kubernetes distribution, you can use the supported `kubectl` binaries that ship with {product-title}, or you can gain extended functionality by using the `oc` binary. - -== The oc binary - -The `oc` binary offers the same capabilities as the `kubectl` binary, but it extends to natively support additional {product-title} features, including: - -* **Full support for {product-title} resources** -+ -Resources such as `DeploymentConfig`, `BuildConfig`, `Route`, `ImageStream`, and `ImageStreamTag` objects are specific to {product-title} distributions, and build upon standard Kubernetes primitives. -+ -* **Authentication** -+ -ifndef::microshift[] -The `oc` binary offers a built-in `login` command for authentication and lets you work with {product-title} projects, which map Kubernetes namespaces to authenticated users. -Read xref:../../authentication/understanding-authentication.adoc#understanding-authentication[Understanding authentication] for more information. -endif::[] -+ -ifdef::microshift[] -The `oc` binary offers a built-in `login` command for authentication to {product-title}. -endif::[] -+ -* **Additional commands** -+ -The additional command `oc new-app`, for example, makes it easier to get new applications started using existing source code or pre-built images. Similarly, the additional command `oc new-project` makes it easier to start a project that you can switch to as your default. - -[IMPORTANT] -==== -If you installed an earlier version of the `oc` binary, you cannot use it to complete all of the commands in {product-title} {product-version}. If you want the latest features, you must download and install the latest version of the `oc` binary corresponding to your {product-title} server version. -==== - -Non-security API changes will involve, at minimum, two minor releases (4.1 to 4.2 to 4.3, for example) to allow older `oc` binaries to update. Using new capabilities might require newer `oc` binaries. A 4.3 server might have additional capabilities that a 4.2 `oc` binary cannot use and a 4.3 `oc` binary might have additional capabilities that are unsupported by a 4.2 server. - -.Compatibility Matrix - -[cols="1,1,1"] -|=== - -| -|*X.Y* (`oc` Client) -|*X.Y+N* footnote:versionpolicyn[Where *N* is a number greater than or equal to 1.] (`oc` Client) - -|*X.Y* (Server) -|image:redcircle-1.png[] -|image:redcircle-3.png[] - -|*X.Y+N* footnote:versionpolicyn[] (Server) -|image:redcircle-2.png[] -|image:redcircle-1.png[] - -|=== -image:redcircle-1.png[] Fully compatible. - -image:redcircle-2.png[] `oc` client might not be able to access server features. - -image:redcircle-3.png[] `oc` client might provide options and features that might not be compatible with the accessed server. - -== The kubectl binary - -The `kubectl` binary is provided as a means to support existing workflows and scripts for new {product-title} users coming from a standard Kubernetes environment, or for those who prefer to use the `kubectl` CLI. Existing users of `kubectl` can continue to use the binary to interact with Kubernetes primitives, with no changes required to the {product-title} cluster. - -You can install the supported `kubectl` binary by following the steps to xref:../../cli_reference/openshift_cli/getting-started-cli.adoc#cli-installing-cli_cli-developer-commands[Install the OpenShift CLI]. The `kubectl` binary is included in the archive if you download the binary, or is installed when you install the CLI by using an RPM. - -For more information, see the link:https://kubernetes.io/docs/reference/kubectl/overview/[kubectl documentation]. diff --git a/cli_reference/opm/_attributes b/cli_reference/opm/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/cli_reference/opm/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/cli_reference/opm/cli-opm-install.adoc b/cli_reference/opm/cli-opm-install.adoc deleted file mode 100644 index 70d2c01c01bf..000000000000 --- a/cli_reference/opm/cli-opm-install.adoc +++ /dev/null @@ -1,24 +0,0 @@ -:_content-type: ASSEMBLY -[id="cli-opm-install"] -= Installing the opm CLI -include::_attributes/common-attributes.adoc[] -:context: cli-opm-install - -toc::[] - -include::modules/olm-about-opm.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../operators/understanding/olm-packaging-format.adoc#olm-bundle-format_olm-packaging-format[Operator Framework packaging format] for more information about the bundle format. -* To create a bundle image using the Operator SDK, see -xref:../../operators/operator_sdk/osdk-working-bundle-images.adoc#osdk-working-bundle-images[Working with bundle images]. - -include::modules/olm-installing-opm.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="opm-addtl-resources"] -== Additional resources - -* See xref:../../operators/admin/olm-managing-custom-catalogs.adoc#olm-managing-custom-catalogs[Managing custom catalogs] for `opm` procedures including creating, updating, and pruning catalogs. diff --git a/cli_reference/opm/cli-opm-ref.adoc b/cli_reference/opm/cli-opm-ref.adoc deleted file mode 100644 index ae7d744ef70f..000000000000 --- a/cli_reference/opm/cli-opm-ref.adoc +++ /dev/null @@ -1,46 +0,0 @@ -:_content-type: ASSEMBLY -[id="cli-opm-ref"] -= opm CLI reference -include::_attributes/common-attributes.adoc[] -:context: cli-opm-ref - -toc::[] - -The `opm` command-line interface (CLI) is a tool for creating and maintaining Operator catalogs. - -.`opm` CLI syntax -[source,terminal] ----- -$ opm [] [] [] ----- - -.Global flags -[options="header",cols="1,3"] -|=== -|Flag |Description - -|`-skip-tls-verify` -|Skip TLS certificate verification for container image registries while pulling bundles or indexes. - -|`--use-http` -|When you pull bundles, use plain HTTP for container image registries. - -|=== - -:FeatureName: The SQLite-based catalog format, including the related CLI commands, -include::snippets/deprecated-feature.adoc[] - -include::modules/opm-cli-ref-generate.adoc[leveloffset=+1] -include::modules/opm-cli-ref-index.adoc[leveloffset=+1] -[role="_additional-resources"] -.Additional resources - -* xref:../../operators/understanding/olm-packaging-format.adoc#olm-file-based-catalogs_olm-packaging-format[Operator Framework packaging format] -* xref:../../operators/admin/olm-managing-custom-catalogs.adoc#olm-managing-custom-catalogs-fb[Managing custom catalogs] -* xref:../../installing/disconnected_install/installing-mirroring-disconnected.adoc#installing-mirroring-disconnected[Mirroring images for a disconnected installation using the oc-mirror plugin] - -include::modules/opm-cli-ref-init.adoc[leveloffset=+1] -include::modules/opm-cli-ref-migrate.adoc[leveloffset=+1] -include::modules/opm-cli-ref-render.adoc[leveloffset=+1] -include::modules/opm-cli-ref-serve.adoc[leveloffset=+1] -include::modules/opm-cli-ref-validate.adoc[leveloffset=+1] \ No newline at end of file diff --git a/cli_reference/opm/images b/cli_reference/opm/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/cli_reference/opm/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/cli_reference/opm/modules b/cli_reference/opm/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/cli_reference/opm/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/cli_reference/opm/snippets b/cli_reference/opm/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/cli_reference/opm/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/cli_reference/osdk/_attributes b/cli_reference/osdk/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/cli_reference/osdk/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/cli_reference/osdk/cli-osdk-install.adoc b/cli_reference/osdk/cli-osdk-install.adoc deleted file mode 100644 index 085ad64eb12b..000000000000 --- a/cli_reference/osdk/cli-osdk-install.adoc +++ /dev/null @@ -1,22 +0,0 @@ -:_content-type: ASSEMBLY -[id="cli-osdk-install"] -= Installing the Operator SDK CLI -include::_attributes/common-attributes.adoc[] -:context: cli-osdk-install - -toc::[] - -The Operator SDK provides a command-line interface (CLI) tool that Operator developers can use to build, test, and deploy an Operator. You can install the Operator SDK CLI on your workstation so that you are prepared to start authoring your own Operators. - -Operator authors with cluster administrator access to a Kubernetes-based cluster, such as {product-title}, can use the Operator SDK CLI to develop their own Operators based on Go, Ansible, Java, or Helm. link:https://kubebuilder.io/[Kubebuilder] is embedded into the Operator SDK as the scaffolding solution for Go-based Operators, which means existing Kubebuilder projects can be used as is with the Operator SDK and continue to work. - -See xref:../../operators/operator_sdk/osdk-about.adoc#osdk-about[Developing Operators] for full documentation on the Operator SDK. - -[NOTE] -==== -{product-title} {product-version} supports Operator SDK {osdk_ver}. -==== - -include::modules/osdk-installing-cli-linux-macos.adoc[leveloffset=+1] - -include::modules/osdk-installing-cli-macos.adoc[leveloffset=+1] \ No newline at end of file diff --git a/cli_reference/osdk/cli-osdk-ref.adoc b/cli_reference/osdk/cli-osdk-ref.adoc deleted file mode 100644 index 10b97dfa8c90..000000000000 --- a/cli_reference/osdk/cli-osdk-ref.adoc +++ /dev/null @@ -1,48 +0,0 @@ -:_content-type: ASSEMBLY -[id="cli-osdk-ref"] -= Operator SDK CLI reference -include::_attributes/common-attributes.adoc[] -:context: cli-osdk-ref - -toc::[] - -The Operator SDK command-line interface (CLI) is a development kit designed to make writing Operators easier. - -.Operator SDK CLI syntax -[source,terminal] ----- -$ operator-sdk [] [] [] ----- - -See xref:../../operators/operator_sdk/osdk-about.adoc#osdk-about[Developing Operators] for full documentation on the Operator SDK. - -include::modules/osdk-cli-ref-bundle.adoc[leveloffset=+1] -include::modules/osdk-cli-ref-cleanup.adoc[leveloffset=+1] -include::modules/osdk-cli-ref-completion.adoc[leveloffset=+1] -include::modules/osdk-cli-ref-create.adoc[leveloffset=+1] -include::modules/osdk-cli-ref-generate.adoc[leveloffset=+1] -include::modules/osdk-cli-ref-generate-bundle.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../operators/operator_sdk/osdk-working-bundle-images.adoc#osdk-bundle-deploy-olm_osdk-working-bundle-images[Bundling an Operator and deploying with Operator Lifecycle Manager] for a full procedure that includes using the `make bundle` command to call the `generate bundle` subcommand. - -include::modules/osdk-cli-ref-generate-kustomize.adoc[leveloffset=+2] - -include::modules/osdk-cli-ref-init.adoc[leveloffset=+1] -include::modules/osdk-cli-ref-run.adoc[leveloffset=+1] -include::modules/osdk-cli-ref-run-bundle.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../operators/understanding/olm/olm-understanding-operatorgroups.adoc#olm-operatorgroups-membership_olm-understanding-operatorgroups[Operator group membership] for details on possible install modes. - -include::modules/osdk-cli-ref-run-bundle-upgrade.adoc[leveloffset=+2] -include::modules/osdk-cli-ref-scorecard.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../operators/operator_sdk/osdk-scorecard.adoc#osdk-scorecard[Validating Operators using the scorecard tool] for details about running the scorecard tool. diff --git a/cli_reference/osdk/images b/cli_reference/osdk/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/cli_reference/osdk/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/cli_reference/osdk/modules b/cli_reference/osdk/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/cli_reference/osdk/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/cli_reference/osdk/snippets b/cli_reference/osdk/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/cli_reference/osdk/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/cli_reference/snippets b/cli_reference/snippets deleted file mode 120000 index 9f5bc7e4dde0..000000000000 --- a/cli_reference/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets \ No newline at end of file diff --git a/cli_reference/tkn_cli/_attributes b/cli_reference/tkn_cli/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/cli_reference/tkn_cli/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/cli_reference/tkn_cli/images b/cli_reference/tkn_cli/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/cli_reference/tkn_cli/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/cli_reference/tkn_cli/installing-tkn.adoc b/cli_reference/tkn_cli/installing-tkn.adoc deleted file mode 100644 index 5ea19aa0ff94..000000000000 --- a/cli_reference/tkn_cli/installing-tkn.adoc +++ /dev/null @@ -1,38 +0,0 @@ -:_content-type: ASSEMBLY -[id='installing-tkn'] -= Installing tkn -include::_attributes/common-attributes.adoc[] -:context: installing-tkn - -toc::[] - -Use the CLI tool to manage {pipelines-title} from a terminal. The following section describes how to install the CLI tool on different platforms. - -You can also find the URL to the latest binaries from the {product-title} web console by clicking the *?* icon in the upper-right corner and selecting *Command Line Tools*. - -:FeatureName: Running {pipelines-title} on ARM hardware -include::snippets/technology-preview.adoc[] - -[NOTE] -==== -Both the archives and the RPMs contain the following executables: - -* tkn -* tkn-pac -* opc -==== - -:FeatureName: Running {pipelines-title} with the `opc` CLI tool -include::snippets/technology-preview.adoc[] - -// Install tkn on Linux -include::modules/op-installing-tkn-on-linux.adoc[leveloffset=+1] - -// Install tkn on Linux using RPM -include::modules/op-installing-tkn-on-linux-using-rpm.adoc[leveloffset=+1] - -//Install tkn on Windows -include::modules/op-installing-tkn-on-windows.adoc[leveloffset=+1] - -//Install tkn on macOS -include::modules/op-installing-tkn-on-macos.adoc[leveloffset=+1] diff --git a/cli_reference/tkn_cli/modules b/cli_reference/tkn_cli/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/cli_reference/tkn_cli/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/cli_reference/tkn_cli/op-configuring-tkn.adoc b/cli_reference/tkn_cli/op-configuring-tkn.adoc deleted file mode 100644 index 675db608c42c..000000000000 --- a/cli_reference/tkn_cli/op-configuring-tkn.adoc +++ /dev/null @@ -1,12 +0,0 @@ -:_content-type: ASSEMBLY -[id="op-configuring-tkn"] -= Configuring the OpenShift Pipelines tkn CLI -include::_attributes/common-attributes.adoc[] -:context: configuring-tkn - -toc::[] - -Configure the {pipelines-title} `tkn` CLI to enable tab completion. - -// Enabling tab completion -include::modules/op-tkn-enabling-tab-completion.adoc[leveloffset=+1] diff --git a/cli_reference/tkn_cli/op-tkn-reference.adoc b/cli_reference/tkn_cli/op-tkn-reference.adoc deleted file mode 100644 index 66e36f075552..000000000000 --- a/cli_reference/tkn_cli/op-tkn-reference.adoc +++ /dev/null @@ -1,46 +0,0 @@ -:_content-type: ASSEMBLY -[id='op-tkn-reference'] -= OpenShift Pipelines tkn reference -include::_attributes/common-attributes.adoc[] -:context: op-tkn-reference - -toc::[] - - -This section lists the basic `tkn` CLI commands. - -== Basic syntax -`tkn [command or options] [arguments...]` - -== Global options -`--help, -h` - -// Utility commands -include::modules/op-tkn-utility-commands.adoc[leveloffset=+1] - -// Pipeline management commands -include::modules/op-tkn-pipeline-management.adoc[leveloffset=+1] - -// Pipeline run commands -include::modules/op-tkn-pipeline-run.adoc[leveloffset=+1] - -// Task management commands -include::modules/op-tkn-task-management.adoc[leveloffset=+1] - -// Task run commands -include::modules/op-tkn-task-run.adoc[leveloffset=+1] - -// Condition management commands -include::modules/op-tkn-condition-management.adoc[leveloffset=+1] - -// Pipeline resources commands -include::modules/op-tkn-pipeline-resource-management.adoc[leveloffset=+1] - -// ClusterTask management commands -include::modules/op-tkn-clustertask-management.adoc[leveloffset=+1] - -// Trigger management commands -include::modules/op-tkn-trigger-management.adoc[leveloffset=+1] - -// Hub interaction commands -include::modules/op-tkn-hub-interaction.adoc[leveloffset=+1] diff --git a/cli_reference/tkn_cli/snippets b/cli_reference/tkn_cli/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/cli_reference/tkn_cli/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/cloud_providers/PLACEHOLDER b/cloud_providers/PLACEHOLDER deleted file mode 100644 index 4020393e57eb..000000000000 --- a/cloud_providers/PLACEHOLDER +++ /dev/null @@ -1,2 +0,0 @@ -Please delete this file once you have assemblies here. - diff --git a/cloud_providers/_attributes b/cloud_providers/_attributes deleted file mode 120000 index f27fd275ea6b..000000000000 --- a/cloud_providers/_attributes +++ /dev/null @@ -1 +0,0 @@ -../_attributes/ \ No newline at end of file diff --git a/cloud_providers/images b/cloud_providers/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/cloud_providers/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/cloud_providers/modules b/cloud_providers/modules deleted file mode 120000 index 43aab75b53c9..000000000000 --- a/cloud_providers/modules +++ /dev/null @@ -1 +0,0 @@ -../modules/ \ No newline at end of file diff --git a/contributing_to_docs/contributing.adoc b/contributing_to_docs/contributing.adoc deleted file mode 100644 index 3f051908ad0d..000000000000 --- a/contributing_to_docs/contributing.adoc +++ /dev/null @@ -1,231 +0,0 @@ -[id="contributing-to-docs-contributing"] -= Contribute to OpenShift documentation -:icons: -:toc: macro -:toc-title: -:toclevels: 1 -:description: Basic information about the OpenShift GitHub repository -:imagesdir: ../images - -toc::[] - -== Different ways to contribute - -There are a few different ways you can contribute to OpenShift documentation: - -* Email the OpenShift Docs team at openshift-docs@redhat.com. -* Create a link:https://github.com/openshift/openshift-docs/issues/new[GitHub] or link:https://issues.redhat.com/secure/CreateIssueDetails!init.jspa?pid=12332330&summary=Documentation_issue&issuetype=1&components=12367614&priority=10200&versions=12385624[Jira issue] for the most relevant documentation component. -* Submit a pull request (PR). You can create a local clone of your own fork of the link:https://github.com/openshift/openshift-docs[openshift-docs repository], make your changes, and submit a PR. This option is best if you have substantial changes. If you open a PR, be sure that all of its contents are related and apply to the same versions. - -*What happens when you submit a PR?* - -The following diagram outlines the OpenShift documentation contribution process: - -image::osdocs-contribution-workflow.png[OpenShift documentation contribution workflow] - -When you submit a PR, the https://github.com/orgs/openshift/teams/team-documentation[OpenShift Docs team] reviews the PR and arranges further reviews by Quality Engineering (QE), subject matter experts (SMEs), and others, as required. If the PR requires changes, updates, or corrections, the reviewers add comments in the PR. We might request that you make the changes, or let you know that we incorporated your content in a different PR. Occasionally, we might add commits to the original PR directly. When the PR has been reviewed, all updates are complete, and all commits are squashed, the documentation team merges the PR and applies it to the valid versions. - -For a more detailed description of the contribution workflow, see link:create_or_edit_content.adoc#understanding-the-contribution-workflow[Understanding the contribution workflow]. - -== Repository organization -Each top directory in the OpenShift documentation repository can include a -collection of top level assemblies and subdirectories that contain more -assemblies. The exceptions to this rule are directories whose names -start with an underscore (like `_builder_lib` and `_javascripts`), which contain -the assets used to generate the finished documentation. - -Each top level `` directory contains AsciiDoc assembly files, any `` -subdirectories, and symlinks to the `images` and `modules` directories that -contain all the images and modules for the collection. - ----- -/ -/topic_dir1 -/subtopic_dir1 -/subtopic_dirN -/topic_dir/assembly1.adoc -/topic_dir/assemblyN.adoc -/topic_dir/subtopic_dir1/assembly1.adoc -/topic_dir/subtopic_dirN/assemblyN.adoc -/topic_dir/~images -/topic_dir/~modules -... -/topic_dir2 ----- - -== Version management -Most of the content applies to all five OpenShift products: OKD, OpenShift -Online, OpenShift Dedicated, ROSA and OpenShift Container Platform. While a large -amount of content is reused for all product collections, some information -applies to only specific collections. Content inclusion and exclusion is managed -on the assembly level by specifying distributions in the -`_topic_map.yml` files in the `_topic_maps` folder or by using `ifdef/endif` statements in individual -files. - -//// -While it is _possible_ -to accomplish this solely with Git branches to maintain slightly different -versions of a given topic, doing so would make the task of maintaining internal -consistency extremely difficult for content contributors. - -Git branching is still extremely valuable, and serves the important role of -tracking the release versions of documentation for the various OpenShift -products. -//// - -=== Conditional text between products -OpenShift documentation uses AsciiDoc's `ifdef/endif` macro to conditionalize -and reuse content across the different OpenShift products, down to the -single-line level. - -The supported distribution attributes used with the OpenShift build mechanism -are: - -* _openshift-origin_ -* _openshift-online_ -* _openshift-enterprise_ -* _openshift-dedicated_ -* _openshift-aro_ -* _openshift-webscale_ -* _openshift-rosa_ - -These attributes can be used by themselves or in conjunction to conditionalize -text within an assembly or module. - -Here is an example of this concept in use: - ----- -This first line is unconditionalized, and will appear for all versions. - -\ifdef::openshift-online[] -This line will only appear for OpenShift Online. -\endif::[] - -ifdef::openshift-enterprise -This line will only appear for OpenShift Container Platform. -\endif::[] - -ifdef::openshift-origin,openshift-enterprise -This line will appear for OKD and OpenShift Container Platform, but not for OpenShift Online or OpenShift Dedicated. -\endif::[] ----- - -Note that the following limitation exists when conditionalizing text: - -* While the `ifdef/endif` blocks have no size limit, do not use them to -to conditionalize an entire file. If an entire file is specific to a -only some OpenShift distributions, specify them in the `_topic_map.yml` -file. - -== Release branches - -With the combination of conditionalizing content within files with -`ifdef/endif` and conditionalizing whole files in the `_topic_map.yml` -file, the `main` branch of -this repository always contains a complete set of documentation for all -OpenShift products. However, when and as new versions of an OpenShift product -are released, the `main` branch is merged down to new or existing release -branches. Here is the general naming scheme used in the branches: - -* `main` - This is our *working* branch. -* `enterprise-N.N` - OpenShift Container Platform support releases. The docs -for OpenShift Online and OpenShift Dedicated are based on the appropriate -`enterprise-N.N` branch. - -On a 12-hourly basis, the documentation web sites are rebuilt for each of these -branches. This way the published content for each released version of an -OpenShift product will remain the same while development continues on the -`main` branch. Additionally, any corrections or additions that are -"cherry-picked" into the release branches will show up in the published -documentation after 12 hours. - -[NOTE] -==== -All OpenShift content development for the 4.x stream occurs on the `main`, or - *working* branch. -Therefore, when submitting your work the PR must be created against the `main` -branch. After it is reviewed, a writer will apply the content to the relevant -release branches. If you know which branches a change applies to, be sure to -specify it in your PR. - -When adding or updating content for version 3.11, you should create a feature -branch against enterprise-3.11 to submit your changes. -==== - -== Adding files to the collection -After you create assembly files, you must add them to the `_topic_map.yml` so -that the build system can render them. The documentation build system reads -the `_distro_map.yml` from the main branch to determine -which branches to build and then the `_topic_map.yml` file -for each of the branches -to construct the content from the source files and publish to the relevant -product site at https://docs.openshift.com. The build system _only_ reads this -file to determine which topic files to include. Therefore, all new assemblies that -are created must be included in the `_topic_map.yml` file in -order to be processed by the build system. - -For all supported versions, the topic map files are available in the `_topic_maps` folder. Older versions support `_topic_map.yml` file in the root folder. - -OpenShift Dedicated and OpenShift ROSA have their own topic maps: `_topic_map_osd.yml` and `_topic_map_rosa.yml`. Edits to these files should be coordinated with Service Delivery documentation team members as that team is primarily responsible for maintaining this content. - -[NOTE] -==== -Module files are included in the appropriate assembly files. Modules are not added directly to the `_topic_map.yml` file. -==== - -=== Topic map file format -For supported branches the `_topic_map.yml` is based in the `_topic_maps` folder in the root directory and are organized (primarily) by distributions. - -The `_topic_map.yml` file uses the following format: - ----- ---- //<1> -Name: Origin of the Species <2> -Dir: origin_of_the_species <3> -Distros: all <4> -Topics: - - Name: The Majestic Marmoset <5> - File: the_majestic_marmoset <6> - Distros: all - - Name: The Curious Crocodile - File: the_curious_crocodile - Distros: openshift-online,openshift-enterprise <4> - - Name: The Numerous Nematodes - Dir: the_numerous_nematodes <7> - Topics: - - Name: The Wily Worm <8> - File: the_wily_worm - - Name: The Acrobatic Ascarid <= Sub-topic 2 name - File: the_acrobatic_ascarid <= Sub-topic 2 file under / ----- -<1> Record separator at the top of each topic group. -<2> Display name of topic group. -<3> Directory name of topic group. -<4> Which OpenShift versions this topic group is part of. -* The *Distros* setting is optional for topic groups and topic items. By -default, if the *Distros* setting is not used, it is processed as if it was set -to *Distros: all* for that particular topic or topic group. This means that -topic or topic group will appear in all product documentation versions. -* The *all* value for *Distros* is a synonym for -_openshift-origin,openshift-enterprise,openshift-online,openshift-dedicated,openshift-aro,openshift-webscale_. -* The *all* value overrides other values, so _openshift-online,all_ is processed -as *all*. -* Do not use _openshift-dedicated_ or _openshift-rosa_ in the main `_topic_map.yml` file. Use the distribution specific topic map file. -<5> Assembly name. -<6> Assembly file under the topic group dir without `.adoc`. -<7> This topic is actually a subtopic group. Instead of a `File` path it has a -`Dir` path and `Topics`, just like a top-level topic group. -<8> Assemblies belonging to a subtopic group are listed just like regular assemblies -with a `Name` and `File`. - - - -== Next steps -* First, you should link:tools_and_setup.adoc[install and set up the tools and software] -on your workstation so that you can contribute. -* Next, link:doc_guidelines.adoc[review the documentation guidelines] to -understand some basic guidelines to keep things consistent -across our content. -* If you are ready to create content, or want to edit existing content, the -link:create_or_edit_content.adoc[create or edit content] topic describes how -you can do this by creating a working branch. diff --git a/contributing_to_docs/contributing_user_stories.adoc b/contributing_to_docs/contributing_user_stories.adoc deleted file mode 100644 index 3a3f9ae6878d..000000000000 --- a/contributing_to_docs/contributing_user_stories.adoc +++ /dev/null @@ -1,97 +0,0 @@ -[[contributing-user-stories]] -= Contribute user stories to OpenShift documentation -:icons: -:toc: macro -:toc-title: -:toclevels: 1 -:description: Basic information about how to create user stories for OpenShift GitHub repository - -toc::[] - -== Modularization backstory -OpenShift docs are modularized, starting from OpenShift 4.1. -All existing content has been replaced with content that is based on user stories and -complies with the modularization guidelines. All future content must both -support a user story and be modular. - -== How do I contribute modularized content? -To contribute modularized content, you need to write a user story, create -documentation modules to support the user story, and create an assembly for the -story. - -== What if I don't want to write in modules? -If you don't want to write the modules yourself but have a content change, -write a user story, provide details to support the story, and reach out to the -OpenShift docs team. - -== How do I write a user story? Is there a template? -Instead of a template, we have a series of questions for you to answer to -create the user story. Follow the same steps if you are writing the modules -yourself or if you plan to work with the docs team. - -The basic format of a user story is: - ----- -As a , I want to because . ----- - -For example, "As a cluster administrator, I want to enable an Auto Scaling group to manage my OpenShift Enterprise -cluster deployed on AWS because I want my node count to scale based on application demand." - -Use the following questions to guide you in providing the context for your user story and the necessary technical details to start a draft. -You don't have to answer all of these questions, only the ones that make sense to your particular user story. - -=== Feature info -* What is the feature being developed? What does it do? -* How does it work? -* Are there any configuration files/settings/parameters being added or modified? Are any new commands being added or modified? -* What tools or software does the docs team need to test how this feature works? Does the docs team need to update any installed software? -* Are there any existing blogs, Wiki posts, Kbase articles, or Bzs involving this feature? Or any other existing information that may help to understand this feature? - -=== Customer impact -* Who is the intended audience for this feature? If it's for Enterprise, does it apply to developers, admins, or both? -* Why is it important for our users? Why would they want to use this feature? How does it benefit them? -* How will the customer use it? Is there a use case? -* How will the customer interact with this feature? Client tools? Web console? REST API? - -=== Product info -* Is this feature being developed for Online? Enterprise? Dedicated? OKD? All? -* Will this feature be rolled back to previous versions? -* If it's for Online, what type of plan do users need to use this feature? -* Is it user-facing, or more behind-the-scenes admin stuff? -* What tools or software does the docs team need to test how this feature works? - -== How do I write in modules? -The full guidelines for writing modules are in the Customer Content Services (CCS) -link:https://redhat-documentation.github.io/modular-docs/[modularization guide]. - -The main concepts of writing in modules are: - -* Each assembly contains the information required for a user to achieve a single -goal. -* Assemblies contain primarily `include` statements, which are references to -smaller, targeted module files. -* Modules can contain conceptual information, reference information, or steps, -but not a combination of the types. - -For example, a simple assembly might contain the following three modules: - -* A concept module that contains background information about the feature -that the user will configure -* A reference module that contains an annotated sample yaml file that the user -needs to modify -* A procedure module that contains the prerequisites that the user needs to -complete before they start configuring and steps that the user takes to -complete the configuration. - -The `enterprise-4.1` branch contains sample assemblies that explain how to -get started with modular documentation for OpenShift and that serve as -references for including modules in assemblies. The -link:https://raw.githubusercontent.com/openshift/openshift-docs/enterprise-4.1/mod_docs_guide/mod-docs-conventions-ocp.adoc[Modular Docs OpenShift conventions] -assembly contains the -link:https://raw.githubusercontent.com/openshift/openshift-docs/enterprise-4.1/modules/mod-docs-ocp-conventions.adoc[Modular docs OpenShift conventions] -reference module, and the -link:https://github.com/openshift/openshift-docs/blob/enterprise-4.1/mod_docs_guide/getting-started-modular-docs-ocp.adoc[Getting started with modular docs on OpenShift] -assembly contains the -link:https://raw.githubusercontent.com/openshift/openshift-docs/enterprise-4.1/modules/creating-your-first-content.adoc[Creating your first content] -procedure module. diff --git a/contributing_to_docs/create_or_edit_content.adoc b/contributing_to_docs/create_or_edit_content.adoc deleted file mode 100644 index 57c0605697fe..000000000000 --- a/contributing_to_docs/create_or_edit_content.adoc +++ /dev/null @@ -1,270 +0,0 @@ -[id="contributing-to-docs-create-or-edit-content"] -= Create content or edit existing content -:icons: -:toc: macro -:toc-title: -:toclevels: 1 -:description: Create feature branch to contribute new content or updates -:imagesdir: ../images - -toc::[] - -== Before you begin -Before you create or edit content: - -* Read and review the link:contributing.adoc[Contribute to OpenShift documentation] -topic to understand some basics -* link:tools_and_setup.adoc[Install and set up the tools and software] -required to contribute -* Read and review the link:doc_guidelines.adoc[Documentation guidelines] topic -to understand the basic guidelines for consistency - -== Understanding the contribution workflow - -The following diagram outlines the steps required to add content to the OpenShift documentation: - -image::osdocs-contribution-workflow.png[OpenShift documentation contribution workflow] - -After you have identified a documentation requirement and created a ticket, you can contribute to the documentation directly or the OpenShift Docs team can create the content. - -When you contribute content directly, you must create a feature branch in a local clone of your own fork of the link:https://github.com/openshift/openshift-docs[openshift-docs repository]. After gathering stakeholder input and completing your technical testing, you can develop your documentation draft in your local feature branch. For more information about working with feature branches, see link:https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/about-branches[the GitHub documentation]. By using AsciiBinder on your local machine, you can create a local preview to review your updates. - -To submit your content for review, you must push your local feature branch to your openshift-docs fork on GitHub and create a pull request (PR). The PR is a request for the updates in your feature branch on your fork to be merged into the relevant branch in the mainstream openshift-docs repository. In the *Open a pull request* dialog, you can add a description, review the content updates, and create the PR. After your PR is created, the Netlify bot automatically generates a preview build and provides a preview link in a PR comment. - -The OpenShift Docs team reviews the PR and the Netlify preview build. The team also requests reviews from Quality Engineering (QE), subject matter experts (SMEs), and others, depending on the content that is submitted. You can apply any suggested changes by updating the original commit in your local feature branch. If you have multiple commits in your PR, you must squash them into one commit. After you push the additional updates to your fork, the PR and the Netlify preview are updated automatically. - -When all of the required approvals are in place, the OpenShift Docs team merges the PR and cherry picks the content to the relevant branches. When the PR is merged and cherry picked, the content is automatically published after a short while. The OpenShift Docs team then checks the published content, add links in the documentation ticket, and closes the ticket to complete the request. - -The following sections in this document provide detailed steps to create or edit OpenShift documentation content. - -== Ensure your local repository is in sync with the remote -Before you create a local feature branch, it is good practice to ensure that -your local source branch is in sync with the remote and that you have all the -latest changes. You must also ensure that your forked repository is also in sync -with the remote repository. - -[NOTE] -==== -Because most changes in this repository must be committed to the `main` -branch, the following process uses `main` as the name of the source -branch. If you must use another branch as the source for your change, make -sure that you consistently use that branch name instead of `main`. -==== - -1. From your local repository, make sure you have the `main` branch checked -out: -+ ----- -$ git checkout main ----- - -2. Fetch the current state of the OpenShift documentation repository: -+ ----- -$ git fetch upstream ----- - -3. Incorporate the commits from the remote repository, in this case -`openshift/openshift-docs`, into your local repository: -+ ----- -$ git rebase upstream/main ----- - -4. Push the latest updates to your forked repository so that it is also in sync -with the remote: -+ ----- -$ git push origin main ----- - -== Add content or update existing content on local branch -With your local and forked repositories in sync with the remote, you can now -create a local feature branch where you will make all your updates, or create -any new content. - -*Step 1:* Create local feature branch - -The following command creates a local feature branch from the branch that you are currently on, and checks it out -automatically. Be sure to replace `` with a suitable name. -Also, be sure that the changes made on this branch are closely related. -You must create separate PRs for bugfix changes (for an old or current release) -and enhancement changes (for an upcoming new release). - ----- -$ git checkout -b ----- - -[NOTE] -==== -This command creates a new specified branch and also checks it out, so you will -automatically switch to the new branch. -==== - -*Step 2:* Create content or update existing content as required - -With the local feature branch created and checked out, you can now edit any content or -start adding new content. - -Ensure that any new file contains the required metadata as described -in the link:doc_guidelines.adoc[documentation guidelines] topic, including -naming and title conventions. - -*Step 3:* Add all of your changes to a pending commit - -When you are finished making all of your changes, used asciibinder to build -the updated or new content, and reviewed the rendered changes, run the following -command to add those changes to a pending commit: - ----- -$ git add . ----- - -*Step 4:* Commit your changes - -After adding your changes to a pending commit, run the following command to -commit those changes locally: - ----- -$ git commit -am "Detailed comments about what changes were made; for example, fixed typo" ----- - -*Step 5:* Rebase updates from `main` into your feature branch - -Remember that you must rebase against the branch that you created this feature -branch from. In most cases, it will be the main branch for the 4.x stream. - ----- -$ git rebase upstream/main ----- - -[NOTE] -==== -If you find any conflicts you must fix those, and repeat steps 3 and 4. -==== - -*Step 6:* Push all changes to your GitHub account - -After you have rebased, fixed any conflicts, and committed your changes, you can -push them to your GitHub account. This command adds your local feature branch to -your GitHub repository: - ----- -$ git push origin ----- - -[id="submit-PR"] -== Submit PR to merge your work - -When you have pushed your changes to your GitHub account, you can submit a PR to -have your work from your GitHub fork to the `main` branch of the OpenShift -documentation repository. The documentation team will review the work, advise of -any further changes that are required, and finally merge your work. - -1. Go to your forked GitHub repository on the GitHub website, and you should see -your feature branch that includes all of your work. -2. Click on *Pull Request* to submit the PR against the `main` branch of the -`openshift-docs` repository. -3. Fill out the information requested on the template. -** If you know which product versions your change applies to, include a comment -that specifies the minimum version that the change applies to. The docs team -maintains these branches for all active and future distros and your PR will be -applied to one or more of these branches. -*** PR applies to all versions after a specific version (e.g. 4.8): 4.8+ -*** PR applies to the in-development version (e.g. 4.12) and future versions: 4.12+ -*** PR applies only to a specific single version (e.g. 4.10): 4.10 -*** PR applies to multiple specific versions (e.g. 4.8-4.10): 4.8, 4.9, 4.10 -** Provide a link to the preview. Automatic preview functionality is currently only available for some branches. For PRs that update the rendered build in any way against branches that do not create an automated preview: -*** OpenShift documentation team members (core and aligned) must include a link to a locally generated preview. -*** External contributors can request a generated preview from the OpenShift documentation team. -** All documentation changes that impact the meaning of the docs must be verified by a QE team associate before merging. -** Provide any other information about the change that the docs team might need to understand it. -4. Make sure that you squash to one commit before submitting your PR. -5. Ask for review from the OpenShift docs team: -** For community authors: Request a review by tagging @openshift/team-documentation or @kalexand-rh in a GitHub comment. -** For Red Hat associates: -*** For normal peer requests, add a comment that contains this text: /label peer-review-needed -*** For normal merge review requests, add a comment that contains this text: /label merge-review-needed -*** For urgent peer review requests, ping @peer-review-squad requesting a review in the #forum-docs-review channel (CoreOS Slack workspace) and provide the following information: -**** A link to the PR. -**** The size of the PR that the GitHub bot assigns (ex: XS, S, M, L, XL). -**** Details about how the PR is urgent. -*** For urgent merge requests, ping @merge-review-squad in the #forum-docs-review channel (CoreOS Slack workspace). -*** Except for changes that do not impact the meaning of the content, QE review is required before content is merged. - -== Confirm your changes have been merged - -When your PR has been merged into the `main` branch, you should confirm and -then sync your local and GitHub repositories with the `main` branch. - -1. On your workstation, switch to the `main` branch: -+ ----- -$ git checkout main ----- - -2. Pull the latest changes from `main`: -+ ----- -$ git fetch upstream ----- - -3. Incorporate the commits from the remote repository, in this case -`openshift/openshift-docs`, into your local repository: -+ ----- -$ git rebase upstream/main ----- - -4. After confirming in your rebased local repository that your changes have been -merged, push the latest changes, including your work, to your GitHub account: -+ ----- -$ git push origin main ----- - -== Add changes to an existing PR, if required -In some cases you might have to make changes to a PR that you have already -submitted. The following instructions describe how to make changes to an -existing PR you have already submitted. - -1. Commit whatever updates you have made to the feature branch by creating a new -commit: -+ ----- -$ git commit -am "Detailed message as noted earlier" ----- - -2. Rebase your PR and squash multiple commits into one commit. Before you push -your changes in the next step, follow the instructions here to rebase and squash: -https://github.com/edx/edx-platform/wiki/How-to-Rebase-a-Pull-Request - -3. After you have rebased and squashed, push the latest updates to the local -feature branch to your GitHub account. -+ ----- -$ git push origin --force ----- - -The `--force` flag ignores whatever is on the remote server and replaces -everything with the local copy. You should now see the new commits in the -existing PR. Sometimes a refresh of your browser may be required. - -== Delete the local feature branch - -When you have confirmed that all of your changes have been accepted and merged, -and you have pulled the latest changes on `main` and pushed them to your -GitHub account, you can delete the local feature branch. Ensure you are in your -local repository before proceeding. - -1. Delete the local feature branch from your workstation. -+ ----- -$ git branch -D ----- - -2. Delete the feature branch from your GitHub account: -+ ----- -$ git push origin : ----- diff --git a/contributing_to_docs/doc_guidelines.adoc b/contributing_to_docs/doc_guidelines.adoc deleted file mode 100644 index 337cd36370e1..000000000000 --- a/contributing_to_docs/doc_guidelines.adoc +++ /dev/null @@ -1,2065 +0,0 @@ -[id="contributing-to-docs-doc-guidelines"] -= Documentation guidelines -include::_attributes/common-attributes.adoc -:toc: macro - -The documentation guidelines for OpenShift 4 build on top of the -link:https://redhat-documentation.github.io/modular-docs/[_Red Hat modular docs reference guide_]. - -[NOTE] -==== -These _Documentation guidelines_ are primarily concerned with the modular structure and AsciiDoc / AsciiBinder requirements for building OpenShift documention. For general style guidelines in OpenShift docs, see the following: - -* Primary source: link:https://www.ibm.com/docs/en/ibm-style[_IBM Style_] -* Supplementary source: link:https://redhat-documentation.github.io/supplementary-style-guide/[_Red Hat supplementary style guide for product documentation_] - -When looking for style guidance, reference the _Red Hat supplementary style guide for product documentation_ first, because it overrides certain guidance from the _IBM Style_ guide. -==== - -toc::[] - -== General file guidelines - -* Set your editor to strip trailing whitespace. -* Do *not* hard wrap lines at 80 characters (or at any other length). -+ -It is not necessary to update existing content to unwrap lines, but you can remove existing hard wrapping from any lines that you are currently working in. -+ -[TIP] -==== -In the Atom editor, you can use `Ctrl`+`J` to undo hard wrapping on a paragraph. -==== - -[id="assembly-file-metadata"] -== Assembly file metadata -Every assembly file should contain the following metadata at the top, with no line spacing in between, except where noted: - ----- -:_content-type: ASSEMBLY <1> -[id=""] <2> -= Assembly title <3> -include::_attributes/common-attributes.adoc[] <4> -:context: <5> - <6> -toc::[] <7> ----- - -<1> The content type for the file. For assemblies, always use `:_content-type: ASSEMBLY`. Place this attribute before the anchor ID or, if present, the conditional that contains the anchor ID. -<2> A unique (within OpenShift docs) anchor ID for this assembly. Use lowercase. Example: cli-developer-commands -<3> Human readable title (notice the `=` top-level header) -<4> Includes attributes common to OpenShift docs. -+ -[NOTE] -==== -The `{product-title}` and `{product-version}` common attributes are not defined in the `_attributes/common-attributes.adoc` file. Those attributes are pulled by AsciiBinder from the distro mapping definitions in the https://github.com/openshift/openshift-docs/blob/main/_distro_map.yml[_distro_map.yml] file. See xref:product-name-and-version[Product title and version] and xref:attribute-files[attribute files] for more information on this topic. -==== -+ -<5> Context used for identifying headers in modules that is the same as the anchor ID. Example: cli-developer-commands. -<6> A blank line. You *must* have a blank line here before the toc. -<7> The table of contents for the current assembly. - -After the heading block and a single whitespace line, you can include any content for this assembly. - -[NOTE] -==== -The assembly title, which is the first line of the document, is the only level 1 ( = ) title. -Section headers within the assembly must be level 2 ( == ) or lower. When you include modules, you must add -leveloffsets in the include statements. You can manually add more level 2 or lower section headers in the assembly. -==== - -[id="module-file-metadata"] -== Module file metadata -Every module should be placed in the modules folder and should contain the following metadata at the top: - ----- -// Module included in the following assemblies: -// -// * list of assemblies where this module is included <1> - -:_content-type: <2> -[id="_{context}"] <3> -= Module title <4> ----- - -<1> The content type for the file. Replace `` with the actual type of the module, `CONCEPT`, `REFERENCE`, or `PROCEDURE`. Place this attribute before the anchor ID or, if present, the conditional that contains the anchor ID. -<2> List of assemblies in which this module is included. -<3> A module anchor with {context} that must be lowercase and must match the module's file name. -<4> Human readable title. To ensure consistency in the results of the -leveloffset values in include statements, you must use a level one heading -( = ) for the module title. - -Example: - ----- -// Module included in the following assemblies: -// -// * cli_reference/openshift_cli/developer-cli-commands.adoc - -:_content-type: REFERENCE -[id="cli-basic-commands_{context}"] -= Basic CLI commands ----- - -[id="snippet-file-metadata"] -== Text snippet file metadata -Every text snippet should be placed in the `snippets/` folder and should contain the following metadata at the top: - ----- -// Text snippet included in the following assemblies: <1> -// -// * list of assemblies where this text snippet is included -// -// Text snippet included in the following modules: <2> -// -// * list of modules where this text snippet is included - -:_content-type: SNIPPET <3> ----- -<1> List of assemblies in which this text snippet is included. -<2> List of modules in which this text snippet is included. -<3> The content type for the file. For snippets, always use `:_content-type: SNIPPET`. Place this attribute before the anchor ID, the conditional that contains the anchor ID, or the first line of body text. - -[NOTE] -==== -An anchor ID and human readable title are not required metadata. This type of component is text only and not intended to be published or cross referenced on its own. See <>. -==== - -Example: - ----- -// Text snippet included in the following assemblies: -// -// * installing/installing_aws/installing-aws-default.adoc -// * installing/installing_azure/installing-azure-default.adoc -// * installing/installing_gcp/installing-gcp-default.adoc - -:_content-type: SNIPPET -In {product-title} version {product-version}, you can install a cluster on {cloud-provider-first} ({cloud-provider}) that uses the default configuration options. ----- - -== Content type attributes - -Each `.adoc` file must contain a `:_content-type:` attribute in its metadata that indicates its file type. This information is used by some publication processes to sort and label files. - -Add the attribute from the following list that corresponds to your file type: - -* `:_content-type: ASSEMBLY` -* `:_content-type: CONCEPT` -* `:_content-type: PROCEDURE` -* `:_content-type: REFERENCE` -* `:_content-type: SNIPPET` - -Place the attribute in the file metadata. The following list describes the best attribute placement options: - -. Directly before the first anchor ID in a file -. If the first anchor ID is enclosed in a conditional, before the conditional -. Between the list of assemblies in which this module is included and the first line of body text - -The metadata examples contain sample placement for each file type, xref:assembly-file-metadata[assembly], xref:module-file-metadata[module], and xref:snippet-file-metadata[snippet]. - -[id="attribute-files"] -== Attribute files - -All attribute files must be placed in the `_attributes` directory. In most cases involving OpenShift Container Platform or OKD, add attributes to the `common-attributes.adoc` file instead of creating or using a separate attributes file. Before you add an attribute, review the contents of the `common-attributes.adoc` file to ensure that it is not already defined. - -[IMPORTANT] -==== -If you think that you need a separate attributes file, check with the docs team before you create it. -==== - -It is acceptable to group related attributes in the `common-attributes.adoc` file under a comment, as shown in the following example: - ----- -//gitops -:gitops-title: Red Hat OpenShift GitOps -:gitops-shortname: GitOps ----- - -It is also acceptable to enclose attributes in a xref:product-name-and-version[distro-based] conditional, but you must place attribute definitions for the `openshift-enterprise` distro first. The following example shows how to set a different value for the `:op-system-base:` attribute for OKD: - ----- -:op-system-base: RHEL -ifdef::openshift-origin[] -:op-system-base: Fedora -endif::[] ----- - -== Assembly/module file names - -Try to shorten the file name as much as possible _without_ abbreviating important terms that may cause confusion. For example, the `managing-authorization-policies.adoc` file name would be appropriate for an assembly titled "Managing Authorization Policies". - -== Directory names - -If you create a directory with a multiple-word name, separate each word with an underscore, for example `backup_and_restore`. - -[NOTE] -==== -Do not italicize user-replaced values. This guideline is an exception to the link:https://redhat-documentation.github.io/supplementary-style-guide/#user-replaced-values[_Red Hat supplementary style guide for product documentation_]. -==== - -Do not create or rename a top-level directory in the repository and topic map without checking with the docs program manager first. - -Avoid creating two levels of subdirectories because the link:https://github.com/openshift/openshift-docs/issues/52149[breadcrumb bar on docs.openshift.com breaks]. If you have a valid use case for two levels of subdirectories, talk with your DPM/CS (and, for aligned teams, the OpenShift DPM) for approval before creating it. - -When creating a new directory or subdirectory, you must create four symbolic links in it: - -* An `images` symbolic link to the top-level `images/` directory -* A `modules` symbolic link to the top-level `modules/` directory -* A `snippets` symbolic link to the top-level `snippets/` directory -* An `_attributes` symbolic link to the top-level `_attributes/` directory - -If the directory that contains an assembly does not have the `images` symbolic link, any images in that assembly or its modules will not be included properly when building the docs. - -[TIP] -==== -To create the symbolic links: - -. Navigate to the directory that you need to add the links in. -. Use the following command to create a symbolic link: -+ ----- -$ ln -s ----- -+ -For example, if you are creating the links in a directory that is two levels deep, such as `cli_reference/openshift_cli`, use the following commands: -+ ----- -$ ln -s ../../images/ images -$ ln -s ../../modules/ modules -$ ln -s ../../snippets/ snippets -$ ln -s ../../_attributes/ _attributes ----- -+ -Be sure to adjust the number of levels to back up (`../`) depending on how deep your directory is. - -If you accidentally create an incorrect link, you can remove that link by using `unlink `. -==== - -== Assembly/Module titles and section headings - -Use sentence case in all titles and section headings. See http://www.titlecase.com/ or https://convertcase.net/ for a conversion tool. - -Try to be as descriptive as possible with the title or section headings -without making them unnecessarily long. For assemblies and task modules, -use a gerund form in headings, such as: - -* Creating -* Managing -* Using - -Do not use "Overview" as a heading. - -Do not use backticks or other markup in assembly or module headings. - -Use only one level 1 heading (`=`) in any file. - -=== Discrete headings - -If you have a section heading that you do not want to appear in the TOC (like if you think that some section is not worth showing up or if there are already too many nested levels), you can use a discrete (or floating) heading: - -https://docs.asciidoctor.org/asciidoc/latest/blocks/discrete-headings/ - -A discrete heading also will not get a section number in the Customer Portal build of the doc. Previously, we would use plain bold mark-up around a heading like this, but discrete headings also allow you to ignore section nesting rules (like jumping from a `==` section level to a `====` level if you wanted for some style reason). - -To use a discrete heading, just add `[discrete]` to the line before your unique ID. For example: - ----- -[discrete] -[id="managing-authorization-policies_{context}"] -== Managing authorization policies ----- - -== Anchoring titles and section headings - -All titles and section headings must have an anchor ID. The anchor ID must be similar to the title or section heading. - -=== Anchoring in assembly files - -The following is an example anchor ID in an assembly file: - ----- -[id="configuring-alert-notifications"] -= Configuring alert notifications ----- - -[NOTE] -==== -Do not include line spaces between the anchor ID and the section title. -==== - -=== Anchoring in module files - -You must add the `{context}` variable to the end of each anchor ID in module files. When called, the `{context}` variable is resolved into the value declared in the `:context:` attribute in the corresponding assembly file. This enables cross-referencing to module IDs in context to a specific assembly and is useful when a module is included in multiple assemblies. - -[NOTE] -==== -The `{context}` variable must be preceded by an underscore (`_`) when declared in an anchor ID. -==== - -The following is an example of an anchor ID for a module file title: - ----- -[id="sending-notifications-to-external-systems_{context}"] -= Sending notifications to external systems ----- - -The following is an example of an anchor ID for a second level (`==`) heading: - ----- -[id="deployment-scaling-benefits_{context}"] -== Deployment and scaling benefits ----- - -=== Anchoring "Prerequisites", "Additional resources", and "Next steps" titles in assemblies - -Use unique IDs for "Prerequisites", "Additional resources", and "Next steps" titles in assemblies. You can add the prefixes `prerequisites_`, `additional-resources_`, or `next-steps_` to a unique string that describes the assembly topic. The unique string can match the value assigned to the `:context:` attribute in the assembly. - -[NOTE] -==== -The `prerequisites_`, `additional-resources_`, and `next-steps_` prefixes must end with an underscore (`_`) when declared in an anchor ID in an assembly. -==== - -The following examples include IDs that are unique to the "Configuring alert notifications" assembly: - -*Example unique ID for a "Prerequisites" title* - ----- -[id="prerequisites_configuring-alert-notifications"] -== Prerequisites ----- - -*Example unique ID for an "Additional resources" title* - ----- -[role="_additional-resources"] -[id="additional-resources_configuring-alert-notifications"] -== Additional resources ----- - -*Example unique ID for a "Next steps" title* - ----- -[id="next-steps_configuring-alert-notifications"] -== Next steps ----- - -== Writing assemblies -An _assembly_ is a collection of modules that describes how to accomplish a user story. - -Avoid link:https://redhat-documentation.github.io/modular-docs/#nesting-assemblies[nesting assemblies] in other assembly files. You can create more complicated document structures by modifying the link:https://github.com/openshift/openshift-docs/tree/main/_topic_maps[topic maps]. - -For more information about forming assemblies, see the -link:https://redhat-documentation.github.io/modular-docs/#forming-assemblies[_Red Hat modular docs reference guide_] and the link:https://raw.githubusercontent.com/redhat-documentation/modular-docs/master/modular-docs-manual/files/TEMPLATE_ASSEMBLY_a-collection-of-modules.adoc[assembly template]. - -[NOTE] -==== -When using the "Prerequisites", "Next steps", or "Additional resources" headings in an assembly, use `==` formatting, such as `== Prerequisites` or `== Additional resources`. Use of this heading syntax at the assembly level indicates that the sections relate to the whole assembly. - -Only use `.` formatting (`.Additional resources`) to follow a module in an assembly. Because you cannot use the xrefs in modules, this functions as a _trailing include_ at the assembly level, where the `.` formatting of the `include` statement indicates that the resource applies specifically to the module and not to the assembly. -==== - -== Writing concepts -A _concept_ contains information to support the tasks that users want to do and -must not include task information like commands or numbered steps. In most -cases, create your concepts as individual modules and include them in -appropriate assemblies. - -Avoid using gerunds in concept titles. "About " -is a common concept module title. - -For more information about creating concept modules, see the -link:https://redhat-documentation.github.io/modular-docs/#creating-concept-modules[_Red Hat modular docs reference guide_] and the link:https://raw.githubusercontent.com/redhat-documentation/modular-docs/master/modular-docs-manual/files/TEMPLATE_CONCEPT_concept-explanation.adoc[concept template]. - -== Writing procedures -A _procedure_ contains the steps that users follow to complete a process or task. Procedures contain ordered steps and explicit commands. In most cases, create your procedures as individual modules and include them in appropriate assemblies. - -Use a gerund in the procedure title, such as "Creating". - -For more information about writing procedures, see the -link:https://redhat-documentation.github.io/modular-docs/#creating-procedure-modules[_Red Hat modular docs reference guide_] and the link:https://raw.githubusercontent.com/redhat-documentation/modular-docs/master/modular-docs-manual/files/TEMPLATE_PROCEDURE_doing-one-procedure.adoc[procedure template]. - -[NOTE] -==== -When needed, use `.Prerequisites`, `.Next steps`, or `.Additional resources` syntax to suppress TOC formatting within a module. Do not use `==` syntax for these headings in modules. Because you cannot use the xrefs in modules, if you need to include a link under one of these headings, place the entire subsection in the assembly instead. -==== - -[id="writing-text-snippets"] -== Writing text snippets -A _text snippet_ is an optional component that lets you reuse content in multiple modules and assemblies. Text snippets are not a substitute for modules but instead are a more granular form of content reuse. While a module is content that a reader can understand on its own (like an article) or as part of a larger body of work (like an assembly), a text snippet is not self-contained and is not intended to be published or cross referenced on its own. - -In the context of modules and assemblies, text snippets do not include headings or anchor IDs. Text snippets also cannot contain xrefs. This type of component is text only. Examples include the following: - -* Admonitions that appear in multiple modules. -* An introductory paragraph that appears in multiple assemblies. -* The same series of steps that appear in multiple procedure modules. -* A deprecation statement that appears in multiple sets of release notes. - -Example: - -You could write the following paragraph once and include it in each assembly that explains how to install a cluster using the installer-provisioned default values: - -[source,text] ----- -In {product-title} version {product-version}, you can install a cluster on {cloud-provider-first} ({cloud-provider}) that uses the default configuration options. ----- - -[NOTE] -==== -In the example, `cloud-provider-first` and `cloud-provider` are not defined by the `common-attributes` module. If you use an attribute that is not common to OpenShift docs, make sure to define it locally in either the assembly or module, depending on where the text snippet is included. Because of this, consider adding all attributes that you add to snippets to the `common-attributes.adoc` file. -==== - -For more information about creating text snippets, see the -link:https://redhat-documentation.github.io/modular-docs/#using-text-snippets[_Red Hat modular docs reference guide_]. - -[id="Auto-generated-content"] -== Auto-generated content - -The following content is auto-generated in each release and must not be manually edited: - -* The OpenShift CLI (`oc`) command references `modules/oc-by-example-content.adoc` and `modules/oc-adm-by-example-content.adoc`. -* The following API references content in the `rest_api` folder: the contents of all `_apis` subfolders and the `rest_api/objects/index.adoc` and `rest_api/index.adoc` assemblies. -* OpenShift Virtualization runbook modules: `modules/virt-runbook-.adoc`. - -[NOTE] -==== -If the content in these files needs to be updated, the update must be made in the applicable code repository where these files are generated from. The updates are reflected when the files are generated the next time, for example a future release. For help with where to make the updates, you can contact https://github.com/bergerhoffer[Andrea Hoffer] for the CLI docs, https://github.com/jboxman-rh[Jason Boxman] for the API docs, or https://github.com/apinnick[Avital Pinnick] for the OpenShift Virtualization runbooks. -==== - -[id="using-conscious-language"] -== Using conscious language - -To assist with the removal of the problematic word "master" from the documentation, use the following terminology when referring to OpenShift control plane nodes: - -[options="header"] -|=== -|Branch |Control plane node reference - -|`main`, `enterprise-4.9`, and later enterprise versions -|Control plane node - -|`enterprise-4.8` and earlier enterprise versions -|Control plane (also known as master) node - -|`enterprise-3.11` -|Master node - -|=== - -You can replace "node" in the preceding examples with "machine", "host", or another suitable description. - -In general text, use the term "control plane machine" in place of "master machine"; use the term "compute machine" in place of "worker machine". Be mindful of certain valid code entities, such as `master` role, `worker` role, and `infra` role. - -[NOTE] -==== -If you are cherry picking from `main` to `enterprise-4.8` or earlier, you must manually cherry pick to include the “(also known as master)” phrasing. This is required only if the phrase “control plane” is introduced for the first time in an assembly or module. -==== - -[id="adding-a-subsection-on-making-open-source-more-inclusive"] -=== Adding a subsection on making open source more inclusive - -If you create a release notes assembly for a sub-product within the `openshift/openshift-docs` repo, you might include a "Making open source more inclusive" statement. Instead of pasting the statement from the OpenShift Release Notes, use the following module, which is available in the `enterprise-4.8` branch and later: - -[source,text] ----- -\include::modules/making-open-source-more-inclusive.adoc[leveloffset=+1] ----- - -[id="product-name-and-version"] -== Product title and version - -When possible, generalize references to the product name and/or version by using -the `{product-title}` and/or `{product-version}` attributes. These attributes -are pulled by AsciiBinder from the OpenShift distribution, or _distro_, mapping definitions in the -https://github.com/openshift/openshift-docs/blob/main/_distro_map.yml[_distro_map.yml] -file. - -The `{product-title}` comes from the first `name:` field in a distro mapping, -while the associated `{product-version}` comes from the `name:` fields on any -`branches:` defined. - -How these attributes render is dependent on which distro and branch build you -are viewing. The following table shows the current distros and the -possible values for `{product-title}` and `{product-version}`, depending on the branch: - -[options="header"] -|=== -|Distro |`{product-title}` |`{product-version}` - -|`openshift-origin` -|OKD -a|* 3.6, 3.7, 3.9, 3.10, 3.11 -* 4.8, 4.9, 4.10, 4.11, 4.12, 4.13 -* 4 for the `latest/` build from the `main` branch - -|`openshift-enterprise` -|OpenShift Container Platform -a|* 3.0, 3.1, 3.2, 3.3, 3.4, 3.5, 3.6, 3.7, 3.9, 3.10, 3.11 -* 4.1, 4.2, 4.3, 4.4, 4.5, 4.6, 4.7, 4.8, 4.9, 4.10, 4.11, 4.12, 4.13, 4.14 - -|`openshift-dedicated` -|OpenShift Dedicated -a|* No value set for the latest `dedicated/` build from the `enterprise-4.13` branch -* 3 for the `dedicated/3` build from the `enterprise-3.11` branch - -|`openshift-rosa` -|Red Hat OpenShift Service on AWS -|No value set for the `rosa/` build from the `enterprise-4.13` branch - -|`openshift-online` -|OpenShift Online -|Pro -|=== - -For example: - ----- -You can deploy applications on {product-title}. ----- - -This is a safe statement that could appear in probably any of the builds, so an -https://github.com/openshift/openshift-docs/blob/main/contributing_to_docs/contributing.adoc#conditional-text-between-products[ifdef/endif -statement] is not necessary. For example, if you were viewing a build for the -`openshift-enterprise` distro (for any of the distro-defined branches), this -would render as: - -> You can deploy applications on OpenShift Container Platform. - -And for the `openshift-origin` distro: - -> You can deploy applications on OKD. - -Considering that we use distinct branches to keep content for product versions separated, global use of `{product-version}` across all branches is probably less useful, but it is available if you come across a requirement for it. Just consider how it will render across any branches that the content appears in. - -If it makes more sense in context to refer to the major version of the product instead of a specific minor version (for example, if comparing how something in OpenShift Container Platform 4 differs from OpenShift Container Platform 3), just use the major version number. Do not prepend with a `v`, as in `v3` or `v4`. - -[NOTE] -==== -Other common attribute values are defined in the `_attributes/common-attributes.adoc` file. Where possible, generalize references to those values by using the common attributes. For example, use `{cluster-manager-first}` to refer to Red Hat OpenShift Cluster Manager. If you need to add an attribute to the `_attributes/common-attributes.adoc` file, open a pull request to add it to the attribute list. Do not create a separate attributes file without first consulting the docs team. -==== - -//CANARY -[id="conditional-content"] -== Conditional content - -You can use ifdef and ifeval statements to control the way content displays in different distributions and assemblies. - -NOTE: You can nest conditional statements that involve distribution and assembly context, but you must ensure that you close the if statements correctly. - -Because we maintain separate branches for each OpenShift Container Platform version, do not use if statements that are based on product version to vary content. - -[id="conditionals-for-distributions"] -=== Conditionals for distributions - -Use ifdef and ifndef statements to control content based on distribution, as described in the previous section. For example, the following example renders differently in (`openshift-origin`) and OpenShift Container Platform (`openshift-enterprise`): - ----- -\ifdef::openshift-origin[] -You can link:https://www.keycloak.org/docs/latest/server_admin/index.html#openshift[configure a Keycloak] server as an OpenID -Connect identity provider for {product-title}. -\endif::[] - -\ifdef::openshift-enterprise[] -You can -link:https://access.redhat.com/documentation/en-us/red_hat_single_sign-on/[configure Red Hat Single Sign-On] -as an OpenID Connect identity provider for {product-title}. -\endif::[] ----- - -In OKD, this section renders as the following text: - -> You can link:https://www.keycloak.org/docs/latest/server_admin/index.html#openshift[configure a Keycloak] server as an OpenID -Connect identity provider for OKD. - -In OpenShift Container Platform, this section renders as the following text: - -> You can -link:https://access.redhat.com/documentation/en-us/red_hat_single_sign-on/[configure Red Hat Single Sign-On] -as an OpenID Connect identity provider for OpenShift Container Platform. - - -[id="conditionals-for-assemblies"] -=== Conditionals for different assemblies - -Use a combination of ifdef and ifeval statements to control content that needs to vary between assemblies. These conditional statements rely on a combination of the context attribute for each assembly and specific temporary attributes within each module to control content. - -The following sample shows a simple example. In the assembly that contains the `context` attribute `updating-restricted-network-cluster`, an extra paragraph is displayed. - ----- -\ifeval::["{context}" == "updating-restricted-network-cluster"] -:restricted: -\endif::[] - -... - -\ifdef::restricted[] -If you are upgrading a cluster in a restricted network, install the `oc` version that you plan to upgrade to. -\endif::restricted[] - -... - -\ifeval::["{context}" == "updating-restricted-network-cluster"] -:!restricted: -\endif::[] ----- - -Note that you must set and unset each temporary attribute that you introduce to an assembly. Use the temporary attributes in the applicable ifdef and ifndef statements to vary text between the assemblies. The preceeding example uses `restricted` as the temporary attribute to display an additional paragraph for the assembly with the `updating-restricted-network-cluster` context attribute. - -== Node names - -Do not use internal company server names in commands or example output. Provide generic OpenShift Container Platform node name examples that are not provider-specific, unless required. Where possible, use the example.com domain name when providing fully qualified domain names (FQDNs). - -The following table includes example OpenShift Container Platform 4 node names and their corresponding role types: - -[options="header"] -|=== - -|Node name |Role type - -|*node-1.example.com* -.3+.^|You can use this format for nodes that do not need role-specific node names. - -|*node-2.example.com* - -|*node-3.example.com* - -|*control-plane-1.example.com* -.3+.^|You can use this format if you need to describe the control plane role type within a node name. - -|*control-plane-2.example.com* - -|*control-plane-3.example.com* - -|*compute-1.example.com* -.2+.^|You can use this format if you need to describe the compute node role type within a node name. - -|*compute-2.example.com* - -|*bootstrap.example.com* -|You can use this format if you need to describe the bootstrap node role type within a node name. -|=== - -This example lists the status of cluster nodes that use the node name formatting guidelines: - -.... -[source,terminal] ----- -$ oc get nodes ----- -+ -.Example output -[source,terminal] ----- -NAME STATUS ROLES AGE VERSION -compute-1.example.com Ready worker 33m v1.19.0+9f84db3 -control-plane-1.example.com Ready master 41m v1.19.0+9f84db3 -control-plane-2.example.com Ready master 45m v1.19.0+9f84db3 -compute-2.example.com Ready worker 38m v1.19.0+9f84db3 -compute-3.example.com Ready worker 33m v1.19.0+9f84db3 -control-plane-3.example.com Ready master 41m v1.19.0+9f84db3 ----- -.... - -[NOTE] -==== -Some provider-formatted hostnames include IPv4 addresses. An OpenShift Container Platform node name typically reflects the hostname of a node. If node names in your output need to be provider-specific and require this format, use private IPv4 addresses. For example, you could use `ip-10-0-48-9.example.com` as a node name that includes a private IPv4 address. -==== - -== IP addresses - -You may include IPv4 addresses from test clusters in examples in the documentation, as long as they are private. Private IPv4 addresses fall into one of the following ranges: - -* 10.0.0.0 to 10.255.255.255 (class A address block 10.0.0.0/8) -* 172.16.0.0 to 172.31.255.255 (class B address block 172.16.0.0/12) -* 192.168.0.0 to 192.168.255.255 (class C address block 192.168.0.0/16) - -Replace all public IP addresses with an address from the following blocks. These address blocks are reserved for documentation: - -* 192.0.2.0 to 192.0.2.255 (TEST-NET-1 address block 192.0.2.0/24) -* 198.51.100.0 to 198.51.100.255 (TEST-NET-2 address block 198.51.100.0/24) -* 203.0.113.0 to 203.0.113.255 (TEST-NET-3 address block 203.0.113.0/24) - -[NOTE] -==== -There might be advanced networking examples that require specific IP addresses, or cloud provider-specific examples that require a public IP address. Contact a subject matter expert if you need assistance with replacing IP addresses. -==== - -== Links, hyperlinks, and cross references -Links can be used to cross-reference internal assemblies or send readers to external information resources for further reading. - -In OpenShift docs: - -* All links to internal content is created using `xref` and **must have an anchor ID**. -* Only use `xref` in assemblies, not in modules. -* All links to external websites are created using `link`. - -[IMPORTANT] -==== -Do not split link paths across lines when wrapping text. This will cause issues with the doc builds. -==== - -=== Example URLs -To provide an example URL path that you do not want to render as a hyperlink, use this format: - -.... -`\https://www.example.com` -.... - -=== Internal cross-references - -Use the relative file path (from the file you are editing to the file you are linking to), even if you are linking to the same directory that you are writing in. This makes search and replace operations to fix broken links much easier. - -For example, if you are writing in `architecture/core_concepts/deployments.adoc` and you want to link to `architecture/core_concepts/routes.adoc`, then you must include the path back to the first level of the assembly directory: - ----- -xref:../../architecture/networking/routes.adoc#architecture-core-concepts-routes ----- - -[NOTE] -==== -In OpenShift docs, you can only use `xref` in assemblies, not in modules. -==== - -.Markup example of cross-referencing ----- -For more information, see xref:../dev_guide/application_lifecycle/new_app.adoc#dev-guide-new-app[Creating an application]. - -Rollbacks can be performed using the REST API or the xref:../cli_reference/openshift_cli/get_started_cli.adoc#installing-openshift-cli[OpenShift CLI]. ----- - -.Rendered output of cross-referencing -> For more information, see xref:../dev_guide/application_lifecycle/new_app.adoc#dev-guide-new-app[Creating an application]. -> -> Rollbacks can be performed using the REST API or the xref:../cli_reference/openshift_cli/get_started_cli.adoc#installing-openshift-cli[OpenShift CLI]. - -=== Links to external websites - -If you want to link to a different website, use: - ----- -link:http://othersite.com/otherpath[friendly reference text] ----- - -IMPORTANT: You must use `link:` before the start of the URL. - -IMPORTANT: You cannot link to a repository that is hosted on www.github.com. - -TIP: If you want to build a link from a URL _without_ changing the text from the actual URL, just print the URL without adding a `[friendly text]` block at the end; it will automatically be rendered as a link. - -=== Links to internal content -There are two scenarios for linking to other assemblies: - -1. Link to another file that exists in the same directory. -2. Link to another file that exists in a separate directory. - -The following examples use the example directory structure shown here: -.... -/ -/foo -/foo/bar.adoc -/baz -/baz/zig.adoc -/baz/zag.adoc -.... - -*Link to assembly in same directory* - ----- -xref:#anchor-id[friendly title] ----- - -You must use the `.adoc` file extension. The document processor will correctly link this to the resulting HTML file. - -For example, using the above syntax, if you are working on `zig.adoc` and want to link to `zag.adoc`, do it this way: - ----- -xref:../zag.adoc#baz-zag[comment] ----- - -where `baz-zag` is the anchor ID at the top of the file `zag.adoc`. - -*Link to assembly in different directory* - ----- -xref:../dir/.adoc#anchor-id[friendly title] ----- - -For example, if you are working on `bar.adoc` and you want to link to `zig.adoc`, do it this way: - ----- -For more information, see the xref:../baz/zig.adoc#baz-zig[ZIG manual]. ----- - -[NOTE] -==== -You must use the `.adoc` extension in order for the link to work correctly and you must specify an anchor ID. -==== - -== Embedding an external file - -You can embed content hosted outside the link:https://github.com/openshift/openshift-docs[openshift-docs] -GitHub repository by using the `include` directive to target the URI of a raw -file. This is helpful for cases where content frequently changes; you embed the raw -file and the content auto-updates based on the changes made to the content on its -host site. - -[IMPORTANT] -==== -You are restricted to only embed files from GitHub repositories managed by the -`openshift` GitHub user. You must also prefix your external file URI with `https`. -URIs beginning with `http` are forbidden for security reasons and will fail the -documentation build. -==== - -For example, if you want to embed the link:https://github.com/openshift/installer/blob/release-4.8/upi/azure/01_vnet.json[01_vnet.json] template, include the URI of its raw file version like this: - -``` -.`01_vnet.json` ARM template -[source,json] ----- -\include::https://raw.githubusercontent.com/openshift/installer/release-4.8/upi/azure/01_vnet.json[] ----- -``` - -[NOTE] -==== -Embedding external files is restricted for files that change frequently, like templates. You must ensure that embedded files are QE verified before they are updated on their host site. -==== - -[NOTE] -==== -You must get approval from the Engineering, QE, and Docs teams before embedding an external file. -==== - -== Embedding a local YAML file - -You can embed local YAML files in AsciiDoc modules. -Consider embedding a local YAML file when you have a complete and valid YAML file that you want to use. -This is useful when you want to include a complete YAML CR in the docs. -The YAML file that you include must be a local file maintained in the link:https://github.com/openshift/openshift-docs[openshift-docs] GitHub repository. -Use the `include` directive to target the local file. - -To use a local YAML file, add it to the `snippets/` folder, and include it in your module. For example: - -[source,yaml] ----- -\include::snippets/install-config.yaml[] ----- - -[NOTE] -==== -Do not include link:https://docs.asciidoctor.org/asciidoc/latest/directives/include-lines/[lines by content ranges]. This approach can lead to content errors when the included file is subsequently updated. -==== - -[IMPORTANT] -==== -If the YAML file you want to include is from a GitHub repository that is managed by the `openshift` GitHub user, link to the file directly rather than copying the file to the `/openshift-docs` folder. -==== - -[discrete] -=== Using AsciiDoc callouts in the YAML - -You can use AsciiDoc callouts in the YAML file. -Comment out the callout in the YAML file to ensure that file can still be parsed as valid YAML. -Asciidoctor recognises the commented callout and renders it correctly in the output. -For example: - -[source,yaml] ----- -apiVersion: v1 # <1> ----- - -[discrete] -=== Version and upgrade implications - -Carefully consider the version and upgrade implications of including the local YAML file in your content. Including a local YAML file can increase the maintenance overhead for the content. -If you have a doubt, talk to your content strategist or docs team lead. - -[discrete] -=== Validating the local YAML file - -Before you include the YAML file, use a YAML linter or the `oc` CLI to verify that the YAML is valid. -For example, to validate the `snippets/SiteConfig.yaml` file using `oc`, log in to a cluster and run the following command from a terminal opened in the `openshift-docs/` folder: - -[source,terminal] ----- -$ oc apply -f snippets/SiteConfig.yaml --dry-run=client ----- - -.Example output -[source,terminal] ----- -siteconfig.ran.openshift.io/example-sno created (dry run) ----- - -Running `oc` with the `--dry-run=client` switch does not succeed with an invalid YAML file. - -== Indicating Technology Preview features - -To indicate that a feature is in Technology Preview, include the `snippets/technology-preview.adoc` file in the feature's assembly or module to keep the supportability wording consistent across Technology Preview features. Provide a value for the `:FeatureName:` variable before you include this module. - -[source,text] ----- -:FeatureName: The XYZ plug-in -\include::snippets/technology-preview.adoc[] ----- - -== Indicating deprecated features - -To indicate that a feature is deprecated, include the `modules/deprecated-feature.adoc` file in the feature's assembly, or to each relevant assembly such as for a deprecated Operator, to keep the supportability wording consistent across deprecated features. Provide a value for the `:FeatureName:` variable before you include this module. - -For more information on how this is applied, see link:https://github.com/openshift/openshift-docs/pull/31776/files[this example PR]. - -== Verification of your content -All documentation changes must be verified by a QE team associate before merging. This includes executing all "Procedure" changes and confirming expected results. There are exceptions for typo-level changes, formatting-only changes, and other negotiated documentation sets and distributions. - -If a documentation change is due to a Bugzilla bug or Jira issue, the bug/issue should be put on ON_QA when you have a PR ready. After QE approval is given (either in the bug/issue or in the PR), the QE associate should move the bug/issue status to VERIFIED, at which point the associated PR can be merged. It is also ok for the assigned writer to change the status of the bug/issue to VERIFIED if approval for the changes has been provided in another forum (slack, PR, or email). The writer should indicate that the QE team approved the change as a comment in the bug/issue. - -== Images - -=== Image format - -Use `*.png` format images. - -=== Block images - -To include a block image (an image on its own line): - -1. Put the image file in the `images` folder. -+ -Ensure that the folder containing your assembly contains an `images` symbolic link to the top-level `images/` directory, otherwise the image will not be found when building the docs. - -2. In the `.adoc` content, use this format to link to the image: -+ ----- -image::[] ----- -+ -Note the double `::` instead of a single `:`, as seen in inline image usage. -You only have to specify `` itself and not the full file path; -the build mechanism automatically expands this appropriately. - -=== Inline images (icons) - -Inline images can be used to indicate graphic items in the web console, such as -buttons or menu icons. - -==== Inserting reusable images inline - -To simplify reuse, the following common SVGs (the OpenShift web console uses the -Font Awesome icon set) have already been added to the `images` folder with a -user-defined entity added to the `common-attributes.adoc` module: - -|=== -|Icon |Entity |Alt text |File name - -|Kebab -|`:kebab:` -|Options menu -|`ellipsis-v.svg` - -|=== - -When using inline, include the image after the UI element name. For example: - ----- -Click the *Options* menu {kebab}. ----- - -==== Inserting images inline without reuse - -If you are inserting an image that is not part of the `common-attributes.adoc` -module, then include the image using this formatting: - ----- -image:[title=""] ----- - -Note the single `:` instead of a double `::`, as seen in block image usage. - -For example: - ----- -image:manage-columns.png[title="Manage Columns icon"] ----- - -== Formatting - -For all of the system blocks including table delimiters, use four characters. For example: - -.... -|=== for tables ----- for code blocks -.... - -[NOTE] -==== -You can use backticks or other markup in the title for a block, such as a code block `.Example` or a table `.Description` title. -==== - -=== Code blocks, command syntax, and example output - -Code blocks are generally used to show examples of command syntax, example -screen output, and configuration files. - -The main distinction between showing command syntax and a command example is -that a command syntax shows readers how to use the command without real values. -An example command, however, shows the command with actual values with an -example output of that command, where applicable. - -For example: - -.... -In the following example, the `oc get` operation returns a complete list of services that are currently defined: - -[source,terminal] ----- -$ oc get se ----- - -.Example output -[source,terminal] ----- -NAME LABELS SELECTOR IP PORT -kubernetes component=apiserver,provider=kubernetes 172.30.17.96 443 -kubernetes-ro component=apiserver,provider=kubernetes 172.30.17.77 80 -docker-registry name=registrypod 172.30.17.158 5001 ----- -.... - -This renders as: - -> In the following example, the `oc get` operation returns a complete list of services that are currently defined: -> -> ---- -> $ oc get se -> ---- -> -> .Example output -> ---- -> NAME LABELS SELECTOR IP PORT -> kubernetes component=apiserver,provider=kubernetes 172.30.17.96 443 -> kubernetes-ro component=apiserver,provider=kubernetes 172.30.17.77 80 -> docker-registry name=registrypod 172.30.17.158 5001 -> ---- - -The following guidelines go into more detail about specific requirements and -recommendations when using code blocks: - -* If a step in a procedure is to run a command, make sure that the step -text includes an explicit instruction to "run" or "enter" the command. In most cases, -use one of the following patterns to introduce the code block: - -** by running the following command: -** by entering the following command: -** , run the following command: -** , enter the following command: - -* Do NOT use any markup in code blocks; code blocks generally do not accept any markup. - -* For all code blocks, you must include an empty line above a code block (unless -that line is introducing block metadata, such as `[source,terminal]` for syntax -highlighting). -+ -Acceptable: -+ -.... -Lorem ipsum - ----- -$ lorem.sh ----- -.... -+ -Not acceptable: -+ -.... -Lorem ipsum ----- -$ lorem.sh ----- -.... -+ -Without the line spaces, the content is likely to be not parsed correctly. - -* Use `[source,terminal]` for `oc` commands or any terminal commands to enable -syntax highlighting. Any `[source]` metadata must go on the line directly before -the code block. For example: -+ -.... -[source,terminal] ----- -$ oc get nodes ----- -.... -+ -If you are also showing a code block for the output of the command, use -`[source,terminal]` for that code block as well. - -* Use source tags for the programming language used in the code block to enable -syntax highlighting. For example: - -** `[source,yaml]` -** `[source,go]` -** `[source,javascript]` -** `[source,jsx]` - -* Do not use more than one command per code block. For example, the following must -be split up into three separate code blocks: -+ -.... -To create templates you can modify, run the following commands: - -[source,terminal] ----- -$ oc adm create-login-template > login.html ----- - -[source,terminal] ----- -$ oc adm create-provider-selection-template > providers.html ----- - -[source,terminal] ----- -$ oc adm create-error-template > errors.html ----- -.... - -* If your command contains multiple lines and uses callout annotations, you must comment out the callout(s) in the codeblock, as shown in the following example: -+ -.... -To scale based on the percent of CPU utilization, create a `HorizontalPodAutoscaler` object for an existing object: - -[source,terminal] ----- -$ oc autoscale / \// <1> - --min \// <2> - --max \// <3> - --cpu-percent= <4> ----- -<1> Specify the type and name of the object to autoscale. -<2> Optional: Specify the minimum number of replicas when scaling down. -<3> Specify the maximum number of replicas when scaling up. -<4> Specify the target average CPU utilization over all the pods, represented as a percent of requested CPU. -.... - -* Separate a command and its related example output into individual code blocks. -This allows the command to be easily copied using the button on -+++docs.openshift.com+++. -+ -In addition, prepend the code block for the output with the title `.Example output` -to make it consistently clear across the docs when this is being represented. A -lead-in sentence explaining the example output is optional. For example: -+ -.... -Use the `oc new-project` command to create a new project: - -[source,terminal] ----- -$ oc new-project my-project ----- - -The output verifies that a new project was created: - -.Example output -[source,terminal] ----- -Now using project "my-project" on server "https://openshift.example.com:6443". ----- -.... - -* To mark up command syntax, use the code block and wrap any replaceable values in angle brackets (`<>`) with the required command parameter, using underscores (`_`) between words as necessary for legibility. Do not italicize user-replaced values. For example: -+ -.... -To view a list of objects for the specified object type, enter the following command: - -[source,terminal] ----- -$ oc get ----- -.... -+ -This renders as: -+ --- -> To view a list of objects for the specified object type, enter the following command: -> -> ---- -> $ oc get -> ---- --- -+ -NOTE: Avoid using full command syntax inline with sentences. - -* When you specify link:https://kubernetes.io/docs/reference/kubectl/#resource-types[resource names] in `oc` commands, use the full name of the resource type by default. You can use the abbreviation of the resource type name if it improves readability, such as with very long commands, or to be consistent with existing content in the same assembly. -+ -For example, use `namespaces` instead of `ns` and `poddisruptionbudgets` instead of `pdb`. - -* When referring to a path to a location that the user has selected or created, treat the part of the path that the user chose as a replaceable value. For example: -+ -.... -Create a secret that contains the certificate and key in the `openshift-config` namespace: - -[source,terminal] ----- -$ oc create secret tls --cert=/cert.crt --key=/cert.key -n openshift-config ----- -.... -+ -This renders as: -+ --- -> Create a secret that contains the certificate and key in the `openshift-config` namespace: -> -> ---- -> $ oc create secret tls --cert=/cert.crt --key=/cert.key -n openshift-config -> ---- --- -+ -The following example shows a more complex use of user-chosen elements and prescriptive placement: -+ -.... -/providers/Microsoft.Compute/diskEncryptionSets/ -.... - -* If you must provide additional information on what a line of a code block -represents, use callouts (`<1>`, `<2>`, etc.) to provide that information. -+ -Use this format when embedding callouts into the code block: -+ -[subs=-callouts] -.... ----- -code example 1 <1> -code example 2 <2> ----- -<1> A note about the first example value. -<2> A note about the second example value. -.... - -* If you must provide additional information on what a line of a code block -represents and the use of callouts is impractical, you can use a description list -to provide information about the variables in the code block. Using callouts -might be impractical if a code block contains too many conditional statements to -easily use numbered callouts or if the same note applies to multiple lines of the codeblock. -+ -.... ----- -code -code ----- -+ -where: - -:: Specifies the explanation of the first variable. -:: Specifies the explanation of the first variable. -.... -+ -Be sure to introduce the description list with "where:" and start each variable -description with "Specifies." - -* For long lines of code that you want to break up among multiple lines, use a -backslash to show the line break. For example: -+ ----- -$ oc get endpoints --all-namespaces --template \ - '{{ range .items }}{{ .metadata.namespace }}:{{ .metadata.name }} \ - {{ range .subsets }}{{ range .addresses }}{{ .ip }} \ - {{ end }}{{ end }}{{ "\n" }}{{ end }}' | awk '/ 172\.30\./ { print $1 }' ----- - -* If the user must run a command as root, use a number sign (`#`) at the start of the command instead of a dollar sign (`$`). For example: -+ ----- -# subscription-manager list ----- - -* For snippets or sections of a file, use an ellipsis (`...` or `# ...` for YAML) to show that the file continues before or after the quoted block. -+ ----- -apiVersion: v1 -kind: Pod -metadata: - labels: - test: liveness -# ... ----- -+ -or -+ ----- -Name: ci-ln-iyhx092-f76d1-nvdfm-worker-b-wln2l -Roles: worker -... -Taints: node-role.kubernetes.io/infra:NoSchedule -... ----- -+ -Do not use `[...]`, ``, or any other variant. - -* Do not use `jq` in commands (unless it is truly required), because this requires users to install the `jq` tool. Oftentimes, the same or similar result can be accomplished using `jsonpath` for `oc` commands. -+ -For example, this command that uses `jq`: -+ ----- -$ oc get clusterversion -o json|jq ".items[0].spec" ----- -+ -can be updated to use `jsonpath` instead: -+ ----- -$ oc get clusterversion -o jsonpath='{.items[0].spec}{"\n"}' ----- - -* For Bash "here" documents use `[source,terminal]`, such as the following example: -+ -.... -[source,terminal] ----- -$ cat < ` command: -+ -.... -[source,text] ----- -Name: node1.example.com -Roles: worker -Labels: kubernetes.io/arch=amd64 -... -Annotations: cluster.k8s.io/machine: openshift-machine-api/ahardin-worker-us-east-2a-q5dzc -... -CreationTimestamp: Wed, 13 Feb 2019 11:05:57 -0500 ----- -.... - -=== YAML formatting for Kubernetes and OpenShift API objects -The following formatting guidelines apply to YAML manifests, but do not apply to the installation configuration YAML specified by `install-config.yaml`. - -When possible, ensure that YAML is valid in a running cluster. You can validate YAML with `oc apply` with the following invocation: - ----- -$ oc apply -f test.yaml --dry-run=client ----- - -==== Required fields - -- Include the `apiVersion` and `kind` so that a user always knows the context of the YAML. -- Include the full hierarchy to a deeply nested key. -- For objects that are in the global scope, such as for `config.openshift.io` API group, always include the `metadata.name` for the object, which is usually `cluster`. - -.Example API object in the global scope ----- -apiVersion: config.openshift.io/v1 -kind: Scheduler -metadata: - name: cluster -# ... -spec: - defaultNodeSelector: node-role.kubernetes.io/app= -# ... ----- - -.Example deeply nested key with full context for `.ports` array ----- -apiVersion: v1 -kind: Pod -metadata: - name: pod1 - namespace: default -spec: - containers: - - name: web - image: nginx - ports: - - name: web - containerPort: 80 - protocol: TCP ----- - -==== Formatting -The following conventions govern the layout of YAML for API objects: - -- Begin YAML at the beginning of the left margin. -- Use two-space indentation. -- Indent arrays at the same depth as the parent field. -- Include a space immediately after the colon for keys. -- Use block style for complex strings, such as embedded JSON or text blocks. You can enable block style by specifying `|` or `|-` after a field and indenting the field content by two spaces, such as in the following example: -+ ----- -fieldName: |- - This is a string. - And it can be on multiple lines. ----- -- When truncating YAML, comment out the ellipsis (`# ...`) because three dots (`...`) in YAML is actually a link:https://yaml.org/spec/1.2.2/#22-structures[document end marker]. -- Use three hyphens (`---`) to separate YAML definitions in a single YAML file. - -.Example with array indentation flush with parent field ----- -apiVersion: v1 -kind: Pod -metadata: - name: pod1 - labels: - - key1: val1 - - key2: val2 -spec: -# ... ----- - -.Example with block string for annotation ----- -apiVersion: v1 -kind: Pod -metadata: - name: pod1 - annotations: - k8s.v1.cni.cncf.io/networks: |- - [ - { - "name": "net" - } - ] -spec: -# ... ----- - -=== Inline code or commands -Do NOT show full commands or command syntax inline within a sentence. The next section covers how to show commands and command syntax. - -Only use case for inline commands would be general commands and operations, without replaceables and command options. In this case an inline command is marked up using the back ticks: - -.... -Use the `GET` operation to do x. -.... - -This renders as: - -> Use the `GET` operation to do x. - -=== System messages - -System messages include error, warning, confirmation, and information messages that are presented to the user in places such as the GUI, CLI, or system logs. - -If a message is short enough to include inline, enclose it in back ticks: - -.... -Previously, image builds and pushes would fail with the `error reading blob from source` error message because the builder logic would compute the contents of new layers twice. -.... - -This renders as: - -> Previously, image builds and pushes would fail with the `error reading blob from source` error message because the builder logic would compute the contents of new layers twice. - -If a message is too long to include inline, put it inside a code block with `[source,text]` metadata: - -.... -Previously, the AWS Terraform provider that the installation program used occasionally caused a race condition with the S3 bucket, and the cluster installation failed with the following error message: - -[source,text] ----- -When applying changes to module.bootstrap.aws_s3_bucket.ignition, provider level=error msg="\"aws\" produced an unexpected new value for was present, but now absent. ----- - -Now, the installation program uses different AWS Terraform provider code, which now robustly handles S3 eventual consistency, and the installer-provisioned AWS cluster installation does not fail with that error message. -.... - -This renders as: - -> Previously, the AWS Terraform provider that the installation program used occasionally caused a race condition with the S3 bucket, and the cluster installation failed with the following error message: -> -> ---- -> When applying changes to module.bootstrap.aws_s3_bucket.ignition, provider level=error msg="\"aws\" produced an unexpected new value for was present, but now absent. -> ---- -> -> Now, the installation program uses different AWS Terraform provider code, which now robustly handles S3 eventual consistency, and the installer-provisioned AWS cluster installation does not fail with that error message. - -NOTE: Always refer to a message with the type of message it is, followed by the word "message". For example, refer to an error message as an "error message", and not simply as an "error". - -=== Lists -Lists are created as shown in this example: - -.... -. Item 1 (2 spaces between the period and the first character) - -. Item 2 - -. Item 3 -.... - -This renders as: - -> . Item 1 -> . Item 2 -> . Item 3 - -If you must add any text, admonitions, or code blocks you have to add the continuous +, as shown in the example: - -.... -. Item 1 -+ ----- -some code block ----- - -. Item 2 - -. Item 3 -.... - -This renders as: - -> . Item 1 -> + -> ---- -> some code block -> ---- -> . Item 2 -> . Item 3 - -=== Footnotes - -Avoid footnotes when possible. - -If you reference a footnote from only a single location, use the following syntax: - -.Footnote -.... -footnote:[This is the footnote text.] -.... - -If you reference a footnote from multiple locations, set an attribute with the footnote text. As a consequence, this will duplicate the footnote text at bottom of the page. - -.Footnote with text set by an attribute -.... -:note-text: This is a footnote. - -This text has a footnote qualifier attached footnote:[{note-text}]. - -But this other text uses the same qualifier elsewhere footnote:[{note-text}]. -.... - -Avoid using `footnoteref`. - -[IMPORTANT] -==== -The `footnoteref` directive is deprecated in asciidoctor and causes a build warning when `ascii_binder` is run. -==== - -.Footnote with reference -.... -footnoteref:[ref-string, This is the footnote text.] -.... - -==== Alternative footnote styling in tables - -For footnotes in tables, use the following syntax to mimic Asciidoctor's -styling: - -.... -[cols="3",options="header"] -|=== -|Header 1 -|Header 2 -|Header 3 - -|Item A ^[1]^ -|Item B -|Item C ^[2]^ - -|Item D -|Item E ^[3]^ -|Item F ^[3]^ -|=== -[.small] --- -1. A description. -2. Another description. -3. Two items relate to this description. --- -.... - -The notes are kept immediately after the table, instead of moved to the bottom of the rendered assembly. This manual method also allows you to reuse the same footnote number for multiple references as needed. - -Note the following: - -* Add a space before the superscripted numbers with square brackets. -* To match the table cell's font size, start the ordered list with a `[.small]` -style and wrap it in a `--` block. - -[id="collapsible-content"] -=== Collapsible content -You can collapse sections of content by using the `collapsible` option, which converts the Asciidoctor markup to HTML `details` and `summary` sections. The `collapsible` option is used at the writer's discretion and is appropriate for considerably long code blocks, lists, or other such content that significantly increases the length of a module or assembly. - -[NOTE] -==== -You must set a title for the `summary` section. If a title is not set, the default title is "Details." -==== - -Collapsible content is formatted as shown: - -.... -.Title of the `summary` dropdown -[%collapsible] -==== -This is content within the `details` section. -==== -.... - -This renders as a dropdown with collapsed content: - -.Title of the `summary` dropdown -[%collapsible] -==== -This is content within the `details` section. -==== - -If your collapsible content includes an admonition such as a note or warning, the admonition must be nested: - -.... -.Collapsible content that includes an admonition -[%collapsible] -==== -This content includes an admonition. - -[source,terminal] ----- -$ oc whoami ----- - -[NOTE] -===== -Nest admonitions when using the `collapsible` option. -===== -==== -.... - -This renders as: - -.Collapsible content that includes an admonition -[%collapsible] -==== -This content includes an admonition. - -[source,terminal] ----- -$ oc whoami ----- - -[NOTE] -===== -Nest admonitions when using the `collapsible` option. -===== -==== - -=== Quick reference - -.User accounts and info -[option="header"] -|=== -|Markup in command syntax |Description |Substitute value in Example block - -|`` -|Name of user account -|user@example.com - -|`` -|User password -|password -|=== - -.Projects and applications -[option="header"] -|=== -|Markup in command syntax |Description |Substitute value in Example block - -|`` -|Name of project -|myproject - -|`` -|Name of an application -|myapp -|=== - -=== Additional resources sections - -The following guidelines apply to all "Additional resources" sections: - -* You must include the `[role="_additional-resources"]` attribute declaration before the section heading. -* You must not include paragraphs in the section. Use an unordered list. -* The links and xrefs in the unordered list must contain human-readable text between the square brackets. -* Each item in the unordered list must contain a minimum of text besides the link or xref. - -Additionally, in an assembly, use `==` formatting for the section heading (`== Additional resources`). Use of this heading syntax at the assembly level indicates that the sections relate to the whole assembly. For example: - ----- -[role="_additional-resources"] -[id="additional-resources_configuring-alert-notifications"] -== Additional resources -* link:some-url.com[Human readable label] -* xref:some_xref[Human readable label] -* xref:some_other_xref[Human readable label] ----- - -Only use `.` formatting (`.Additional resources`) in a module or to follow a module in an assembly. Because you cannot use the xrefs in modules, this functions as a _trailing include_ at the assembly level, where the `.` formatting of the `include` statement indicates that the resource applies specifically to the module and not to the assembly. For example: - ----- -[role="_additional-resources"] -.Additional resources -* link:some-url.com[Human readable label] -* xref:some_xref[Human readable label] -* xref:some_other_xref[Human readable label] ----- - -== Admonitions -Admonitions such as notes and warnings are formatted as shown: - -.... -[ADMONITION] -==== -Text for admonition -==== -.... - -See the link:https://redhat-documentation.github.io/supplementary-style-guide/#admonitions[Red Hat Supplementary style guide] for the valid admonition types and their definitions. - -[id="api-object-formatting"] -== API object formatting - -For terms that are API objects, the way they are written depends on whether the term is a general reference or an actual reference to the object. - -[id="api-object-general-references"] -=== General references - -A general reference is any time you are speaking conceptually, or generally, about these components in a cluster. - -When referring to API object terms in general usage, use lowercase and separate multi-word API objects. *Default to following this guidance unless you are specifically interacting with/referring to the API object (see xref:api-object-object-references[Object references]).* - -For example: - -* pod -* node -* daemon set -* config map -* deployment -* image stream -* persistent volume claim - -.Examples of general references -.... -Kubernetes runs your workload by placing containers into pods to run on nodes. - -You must have at least one secret, config map, or service account. - -The total number of persistent volume claims in a project. -.... - -Note that if an object uses an acronym or other special capitalization, then its general reference should honor that. For example, general references to `APIService` should be written as "API service", not "api service". Any other exceptions or special guidance are noted in the xref:../contributing_to_docs/term_glossary.adoc[glossary]. - -[id="api-object-object-references"] -=== Object references - -An object reference is when you are referring to the actual instance of an API object, where the object name is important. - -When referring to actual instances of API objects, use link:https://en.wikipedia.org/wiki/Camel_case#Variations_and_synonyms[PascalCase] and mark it up as monospace in backticks (````). - -[NOTE] -==== -Do not use backticks or other markup in assembly or module headings. You can use backticks or other markup in the title for a block, such as a code block `.Example` or a table `.Description` title. -==== - -Be sure to match the proper object type (or `kind` in Kubernetes terms); for example, do not add an "s" to make it plural. *Only follow this guidance if you are explicitly referring to the API object (for example, when editing an object in the CLI or viewing an object in the web console).* - -For example: - -* `Pod` -* `Node` -* `DaemonSet` -* `ConfigMap` -* `Deployment` -* `ImageStream` -* `PersistentVolumeClaim` - -.Examples of API object references -.... -After you create a `Node` object, or the kubelet on a node self-registers, the control plane checks whether the new `Node` object is valid. - -The default amount of CPU that a container can use if not specified in the `Pod` spec. - -Create a file, `pvc.yaml`, with the `PersistentVolumeClaim` object definition. -.... - -[NOTE] -==== -Use "object", "resource", "custom resource", "spec", etc. as appropriate after the object reference. This helps with clarity and readability. - -Another situation where this is necessary is when referring to the plural version of objects. Do not add an "s" to the end of an object name reference to make it plural. Use only the official `kind` of object (for example, seen when you run `oc api-resources`). - -For example, the object `kind` for a node is `Node`, not `Nodes`. So do not write "You can create `Nodes` using `kubectl`." Instead, rewrite to something like "You can create `Node` objects using `kubectl`." -==== - -[id="operator-name-capitalization"] -=== Operator capitalization - -The term "Operator" is always capitalized. For example: - ----- -= Support policy for unmanaged Operators - -Individual Operators have a `managementState` parameter in their configuration. ----- - -An Operator's full name must be a proper noun, with each word initially -capitalized. If it includes a product name, defer the product's capitalization -style guidelines. For example: - -- Red Hat OpenShift Logging Operator -- Prometheus Operator -- etcd Operator -- Node Tuning Operator -- Cluster Version Operator - -[NOTE] -==== -Red Hat Brand and Legal guidance for Operator names will likely differ. For marketing materials, they prefer lowercase names for anything that is not a Red Hat product. - -However, the Brand team recognizes that there are different standards for marketing materials versus technical content. For this reason, the title case capitalization for Operator names in technical product documentation and OperatorHub is acceptable. - -The "Naming" page by Red Hat Brand on the Source provides an overview of naming slide deck that also confirms this difference. -==== - -== Declarative config examples - -Many of our procedures provide imperative `oc` commands (which cannot be stored in a Git repo). Due to efforts around improving the experience for GitOps users, we sometimes also want to provide a declarative YAML example that achieves the same configuration. This allows users to store these YAML configurations in a Git repo and follow GitOps practices to configure OpenShift. - -[IMPORTANT] -==== -When adding declarative examples to procedures, do not completely replace the imperative command with the declarative YAML example. Some users might still prefer the imperative option. -==== - -To add a declarative YAML example to a procedure step with an existing imperative command, add it in a "TIP" admonition by following the template in the example below. This example uses an imperative command (`oc create configmap`) to create a config map, and then provides the declarative YAML example of the `ConfigMap` object afterward. - -.... -* Define a `ConfigMap` object containing the certificate authority by using the following command: -+ -[source,terminal] ----- -$ oc create configmap ca-config-map --from-file=ca.crt=/path/to/ca -n openshift-config ----- -+ -[TIP] -==== -You can alternatively apply the following YAML to create the config map: - -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: ca-config-map - namespace: openshift-config -type: Opaque -data: - ca.crt: ----- -==== -.... - -This renders as: - -> * Define a `ConfigMap` object containing the certificate authority by using the following command: -> + -> [source,terminal] -> ---- -> $ oc create configmap ca-config-map --from-file=ca.crt=/path/to/ca -n openshift-config -> ---- -> + -> [TIP] -> ==== -> You can alternatively apply the following YAML to create the config map: -> -> [source,yaml] -> ---- -> apiVersion: v1 -> kind: ConfigMap -> metadata: -> name: ca-config-map -> namespace: openshift-config -> type: Opaque -> data: -> ca.crt: -> ---- -> ==== - -[NOTE] -==== -If you are adding a particularly long YAML block, you can optionally use the xref:collapsible-content[`%collapsible`] feature to allow users to collapse the code block. -==== - -== Quick markup reference - -|=== -|Convention |Markup |Example rendered output - -|Code blocks - -a| -.... -Use the following syntax for the `oc` command: - ----- -$ oc ----- -.... - -a| -> Use the following syntax for the `oc` command: -> -> ---- -> $ oc -> ---- - -a|Use backticks for all non-GUI "system items", including: - -* Inline commands, operations, literal values, variables, parameters, settings, -flags, environment variables, user input -* System term/item, user names, unique or example names for individual API -objects/resources (e.g., a pod named `mypod`), daemon, service, or software -package -* RPM packages -* File names or directory paths - -a| -.... -`oc get` - -Set the `upgrade` variable to `true`. - -Use the `--amend` flag. - -Answer by typing `Yes` or `No` when prompted. - -`user_name` - -`service_name` - -`package_name` - -`filename` -.... - -a| -> Use the `oc get services` command to get a list of services that are currently defined. -> ->   -> -> Use the `--amend` flag. -> ->   -> -> Set the `upgrade` variable to `true`. -> ->   -> -> Answer by typing `Yes` or `No` when prompted. -> ->   -> -> `cluster-admin` user -> ->   -> -> `firewalld` service -> ->   -> -> `rubygems` RPM package -> ->   -> -> The `express.conf` configuration file is located in the `/usr/share` directory. - -|System or software variable to be replaced by the user -a| -.... -`` - -`` - -`` -.... - -a| -> Use the following command to roll back a Deployment, specifying the Deployment name: -> -> `oc rollback ` -> ->   -> -> Apply the new configuration file: -> -> `oc apply -f /.yaml` - -|Use single asterisks for web console / GUI items (menus, buttons, page titles, etc.). -Use two characters to form the arrow in a series of menu items (`$$->$$`). - -a| -.... -Choose *Cluster Console* from the list. - -Navigate to the *Operators* -> *Catalog Sources* page. - -Click *Create Subscription*. -.... - -a| -> Choose *Cluster Console* from the list. -> ->   -> -> Navigate to the *Operators* -> *Catalog Sources* page. -> ->   -> -> Click *Create Subscription*. - -|Use underscores to emphasize the first appearance of a new term. - -a| -.... -An _Operator_ is a method of packaging, deploying, -and managing a Kubernetes application. -.... - -a| -> An _Operator_ is a method of packaging, deploying, and managing a Kubernetes application. - -|Use of underscores for general emphasis is allowed but should only be used -very sparingly. Let the writing, instead of font usage, create the emphasis -wherever possible. - -a| -.... -Do _not_ delete the file. -.... - -a| -> Do _not_ delete the file. - -|Footnotes - -|A footnote is created with the footnote macro. If you plan to reference a footnote more than once, use the ID footnoteref macro. The Customer Portal does not support spaces in the footnoteref. For example, "dynamic PV" should be "dynamicPV". - -|For footnote and footnoteref syntax, see link:http://asciidoctor.org/docs/user-manual/#user-footnotes[AsciiDoctor documentation]. - -|=== diff --git a/contributing_to_docs/docs_production_deployment.adoc b/contributing_to_docs/docs_production_deployment.adoc deleted file mode 100644 index b3ba9c39f34c..000000000000 --- a/contributing_to_docs/docs_production_deployment.adoc +++ /dev/null @@ -1,51 +0,0 @@ -[id="contributing-to-docs-docs-production-deployment"] -= Production deployment of the OpenShift documentation -:icons: -:toc: macro -:toc-title: -:toclevels: 1 -:linkattrs: -:description: How to deploy the entire set of documentation - -toc::[] - -== Source-to-image pipeline -OpenShift documentation is built and deployed on an https://cloud.redhat.com/products/dedicated/[OpenShift Dedicated cluster] -using a https://github.com/openshift/source-to-image[source-to-image] build pipeline. - -The source-to-image builder image is built from a https://github.com/openshift-cs/docs-builder/[community project in GitHub] -and published to https://quay.io/repository/openshift-cs/docs-builder. - -== Documentation deployment -Deploying the OpenShift documentation is simplified by using a -https://github.com/openshift-cs/docs-builder/blob/main/template.yaml[pre-built OpenShift template YAML]. - -You can use the following command to deploy the OpenShift Container Platform (commercial) documentation: - -[source,terminal] ----- -oc new-app https://raw.githubusercontent.com/openshift-cs/docs-builder/main/template.yaml \ - -p NAME=docs-openshift-com \ - -p PACKAGE=commercial \ - -p APPLICATION_DOMAIN=docs.openshift.com \ - -p BUILD_REPO=https://github.com/openshift/openshift-docs.git \ - -p BUILD_BRANCH=main ----- - -You can use the following command to deploy the OKD (community) documentation - -[source,terminal] ----- -oc new-app https://raw.githubusercontent.com/openshift-cs/docs-builder/main/template.yaml \ - -p NAME=docs-openshift-com \ - -p PACKAGE=community \ - -p APPLICATION_DOMAIN=docs.openshift.com \ - -p BUILD_REPO=https://github.com/openshift/openshift-docs.git \ - -p BUILD_BRANCH=main ----- - -== Deployment customization -It's possible to change the documentation source repository to another repository for development by changing the -`BUILD_REPO` parameter in the `oc new-app` command. - -To change the builder image, provide the `BUILDER_IMAGE` parameter in the `oc new-app` command. diff --git a/contributing_to_docs/images b/contributing_to_docs/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/contributing_to_docs/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/contributing_to_docs/modules b/contributing_to_docs/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/contributing_to_docs/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/contributing_to_docs/snippets b/contributing_to_docs/snippets deleted file mode 120000 index 9f5bc7e4dde0..000000000000 --- a/contributing_to_docs/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets \ No newline at end of file diff --git a/contributing_to_docs/term_glossary.adoc b/contributing_to_docs/term_glossary.adoc deleted file mode 100644 index 94538e00962e..000000000000 --- a/contributing_to_docs/term_glossary.adoc +++ /dev/null @@ -1,745 +0,0 @@ -[id="contributing-to-docs-term-glossary"] -= OpenShift glossary of terms -{product-author} -{product-version} -:data-uri: -:icons: -:experimental: -:toc: macro -:toc-title: - -toc::[] - -== Usage of OpenShift terms - -This topic provides guidelines for referring to the various components of -OpenShift 4 and objects of a running OpenShift system in our documentation. The -goal is to standardize terminology across OpenShift content and be consistent in -the usage of our terminology when referring to OpenShift components or -architecture. - -For terms that are also API objects, there is different guidance for general usage of the term versus referencing the actual API object. This glossary mainly defines the general usage guideline (lowercase, separating words), but be sure to use the object formatting (PascalCase, in monospace) when referring to the actual object. See link:doc_guidelines.adoc#api-object-formatting[API object formatting] for more information. - -[NOTE] -==== -If you want to add terms or other content to this document, or if anything must -be fixed, send an email to openshift-docs@redhat.com or submit a PR -on GitHub. -==== - -== A - -'''' -=== action - -Usage: action - -An action consists of _project_, _verb_, and _resource_: - -* *Project* is the project containing the resource that is to be acted upon. -* *Verb* is a get, list, create, or update operation. -* *Resource* is the API endpoint being accessed. This is distinct from the -referenced resource itself, which can be a pod, deployment, build, etc. - -'''' -=== API server - -Usage: API server(s) - -A REST API endpoint for interacting with the system. New deployments and -configurations can be created with this endpoint, and the state of the system -can be interrogated through this endpoint as well. - -'''' -=== API service - -Usage: API service(s) - -When referencing the actual object, write as `APIService`. See link:doc_guidelines.adoc#api-object-formatting[API object formatting] for more details. - -'''' -=== app - -Usage: app(s) - -Acceptable when referring to a mobile or web xref:application[application]. - -'''' -[id="application"] -=== application - -Usage: application(s) - -Although the term application is no longer an official noun in OpenShift, -customers still create and host applications on OpenShift, and using the term -within certain contexts is acceptable. For example, the term application might -refer to some combination of an image, a Git repository, or a replication -controller, and this application might be running PHP, MySQL, Ruby, JBoss, or -something else. - -.Examples of correct usage -==== -OpenShift runs your applications. - -The `new-app` command creates a new application from the components you specify. - -My application has two Ruby web services connected to a database back end and a RabbitMQ message queue, as well as a python worker framework. - -You can check the health of your application by adding probes to the various parts. - -You can host a WordPress application on OpenShift. -==== - -'''' -=== Assisted Installer - -Usage: Assisted Installer - -In Red Hat OpenShift, the Assisted Installer is an installation solution that is offered on the Red Hat Hybrid Cloud Console to provide Software-as-a-Service functionality for cluster installations. - -Do not use: AI, assisted installer - -'''' -=== authorization - -Usage: authorization - -An authorization determines whether an _identity_ is allowed to perform any -action. It consists of _identity_ and _action_. - -== B - -'''' -=== boot image - -Usage: boot image(s) - -* A boot image is a disk image that contains a bootable operating system (OS) and all the configuration settings for the OS, such as drivers. - -'''' -=== build - -Usage: build(s), or when speaking generally about `Build` objects. - -* A build is the process of transforming input parameters into a resulting object. -* A `Build` object encapsulates the inputs needed to produce a new deployable image, as well as the status of the execution and a reference to the pod that executed the build. - -When referencing the actual object, write as "``Build`` object" as appropriate. See link:doc_guidelines.adoc#api-object-formatting[API object formatting] for more details. - -'''' -=== build configuration - -Usage: build configuration(s) when speaking generally about `BuildConfig` objects. - -A `BuildConfig` object is the definition of the entire build process. A build configuration describes a single build definition and a set of triggers for when a new build is created. - -When referencing the actual object, write as "``BuildConfig`` object" as appropriate. See link:doc_guidelines.adoc#api-object-formatting[API object formatting] for more details. - -== C - -'''' -=== cluster - -Usage: cluster - -The collection of controllers, pods, and services and related DNS and networking -routing configuration that are defined on the system. - -'''' -=== cluster service version - -Usage: cluster service version - -Operator Lifecycle Manager (OLM), part of the Operator Framework, uses a cluster service version (CSV) to define the metadata that accompanies an Operator container image and assist in running the Operator in a cluster. This metadata is defined in a `ClusterServiceVersion` API object used to populate user interfaces with information such as its logo, description, and version. It is also a source of technical information that is required to run the Operator, like the RBAC rules it requires and which custom resources (CRs) it manages or depends on. - -This is commonly abbreviated as a CSV. - -'''' -=== config map - -Usage: config map(s) - -Config maps hold configuration data for pods to consume. - -When referencing the actual object, write as `ConfigMap`. See link:doc_guidelines.adoc#api-object-formatting[API object formatting] for more details. - -Do not use: configuration map(s) - -'''' -=== container - -Usage: container(s) - -'''' -=== containerize - -Usage: containerize(d) - -Use "containerized" as an adjective when referring to applications made up of -multiple services that are distributed in containers. "Containerized" can be -used interchangeably with "container-based." - -'''' -=== container group - -Usage: container group - -'''' -=== control plane - -Usage: control plane - -The control plane, which is composed of control plane machines, manages the {product-title} cluster. The control plane machines manage workloads on the compute machines, which are also known as worker machines. - -Note that the OpenShift "control plane" was previously known as "master" and could still be in the code. - -'''' -=== custom resource - -Usage: custom resource (CR) - -A resource implemented through the Kubernetes `CustomResourceDefinition` API. A custom resource is distinct from the built-in Kubernetes resources, such as the pod and service resources. Every CR is part of an API group. - -Do not capitalize. - -'''' -=== custom resource definition (CRD) - -Usage: custom resource definition (CRD) for the first time reference; CRD thereafter. - -Create a custom resource definition to define a new custom resource. - -This is commonly abbreviated as a CRD. - -== D - -'''' -=== deployment - -Usage: deployment(s) when speaking generally about `Deployment` or `DeploymentConfig` objects - -* A `Deployment` is a Kubernetes-native object that provides declarative updates for pods and -replica sets. -* A `DeploymentConfig` is an OpenShift-specific object that defines the template for a pod and manages -deploying new images or configuration changes. Uses replication controllers. Predates Kubernetes `Deployment` objects. - -When referencing the actual object, write as `Deployment` or `DeploymentConfig` as appropriate. See link:doc_guidelines.adoc#api-object-formatting[API object formatting] for more details. - -To avoid further confusion, do not refer to an overall OpenShift installation / -instance / cluster as an "OpenShift deployment". - -Do not use: deployment configuration(s), deployment config(s) - -'''' -=== disconnected - -Usage: disconnected environment, disconnected installation - -Use "disconnected" when discussing installing a cluster in an environment that does not have an active connection to the internet. Use "disconnected" regardless of whether the restriction is physical or logical. - -"Disconnected" is the preferred term over "restricted", "air-gapped", or "offline". - -'''' -=== Dockerfile - -Usage: Dockerfile; wrapped with [filename] markup. See -link:doc_guidelines.adoc[Documentation Guidelines] for markup information. - -Docker can build images automatically by reading the instructions from a -Dockerfile. A Dockerfile is a text document that contains all the commands you -would normally execute manually to build a docker image. - -Source: https://docs.docker.com/reference/builder/ - -.Examples of correct usage -==== -Open the [filename]#Dockerfile# and make the following changes. - -Create a [filename]#Dockerfile# at the root of your repository. -==== - -== E - -'''' -=== event - -Usage: event(s) - -An event is a data record expressing an occurrence and its context, based on the CNCF CloudEvents specification. -Events contain two types of information: the event data representing the occurrence, and the context metadata providing contextual information about the occurrence. -Events are routed from an event producer, or source, to connected event consumers. - -Routing can be performed based on information contained in the event, but an event will not identify a specific routing destination. -Events can be delivered through various industry standard protocols such as HTTP, AMQP, MQTT, or SMTP, or through messaging and broker systems, such as Kafka, NATS, AWS Kinesis, or Azure Event Grid. - -When referencing the actual object, write as `Event`. See link:doc_guidelines.adoc#api-object-formatting[API object formatting] for more details. - -// NOTE: This is inconsistently used, e.g. https://docs.openshift.com/container-platform/4.5/rest_api/metadata_apis/event-core-v1.html -See: link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#event-v1-core[Event v1 core API], link:https://github.com/cloudevents/spec/blob/master/primer.md#cloudevents-concepts[CloudEvents concepts], and link:https://github.com/cloudevents/spec/blob/master/spec.md#event[CloudEvents specification]. - -== F - -== G - -'''' -=== group/version/kind (GVK) - -Usage: group/version/kind (GVK) for the first time reference; GVK thereafter. - -A unique identifier for a Kubernetes API, specifying its _group_ (a collection of related APIs), _version_ (defines the release and level of stability), and _kind_ (an individual API type or name). - -While "GroupVersionKind" does appear in the API guide, typically there should not be a reason to mark up in reference to a specific object. Favor simply "GVK", or "GVKs" for pluralization, after the first time reference as much as possible. Avoid pluralizing the long form (e.g., group/version/kinds or groups/versions/kinds). - -== H - -== I - -'''' -=== identity - -Usage: identity or identities - -Both the user name and list of groups the user belongs to. - -'''' -=== image - -Usage: image(s) - -'''' -=== image stream - -Usage: image stream(s) - -Image streams provide a means of creating and updating container images in an ongoing way. - -'''' -=== Ignition config - -Usage: Ignition config file or Ignition config files - -The file that Ignition uses to configure Red Hat Enterprise Linux CoreOS (RHCOS) during -operating system initialization. The installation program generates different -Ignition config files to initialize bootstrap, control plane, and worker nodes. - -'''' - -=== Ingress - -Usage: Ingress - -API object that allows developers to expose services through an HTTP(S) aware -load balancing and proxy layer via a public DNS entry. The Ingress resource may -further specify TLS options and a certificate, or specify a public CNAME that -the OpenShift Ingress Controller should also accept for HTTP and HTTPS traffic. -An administrator typically configures their Ingress Controller to be visible -outside the cluster firewall, and might also add additional security, caching, or -traffic controls on the service content. - -'''' - -=== Ingress Controller - -Usage: Ingress Controller(s) - -A resource that forwards traffic to endpoints of services. The Ingress Controller -replaces router from {product-title} 3 and earlier. - -'''' -=== installer-provisioned infrastructure - -Usage: installer-provisioned infrastructure - -If the installation program deploys and configures the infrastructure that the -cluster runs on, it is an installer-provisioned infrastructure installation. - -Do not use: IPI - -== J - -== K - -'''' -=== kubelet - -Usage: kubelet(s) - -The agent that controls a Kubernetes node. Each node runs a kubelet, which -handles starting and stopping containers on a node, based on the desired state -defined by the control plane (also known as master). - -'''' -=== Kubernetes API server - -Usage: Kubernetes API server - -== L - -== M - -'''' -=== MetalLB - -Usage: MetalLB, MetalLB Operator, MetalLB project - -MetalLB is an open source project that provides a way to add services of type `LoadBalancer` to clusters that are not installed on infrastructure from a cloud provider. MetalLB primarily targets on-premise, bare-metal clusters, but any infrastructure that does not include a native load-balancing capability is a candidate. - -"MetalLB" always has the first letter and last two letters capitalized in general text. Do not use "Metallb." - -'''' -=== minion - -Usage: Deprecated. Use link:#node[node] instead. - -== N - -'''' -=== node - -Usage: node(s) - -A -http://docs.openshift.org/latest/architecture/infrastructure_components/kubernetes_infrastructure.html#node[node] -provides the runtime environments for containers. - -'''' -=== namespace - -Usage: namespace - -Typically synonymous with link:#project[project] in OpenShift parlance, which is -preferred. - -== O - -'''' -=== OpenShift - -Usage: OpenShift Container Platform, OpenShift Online, OpenShift Dedicated, -OpenShift Container Engine - -The OpenShift product name should be paired with its product distribution / -variant name whenever possible. Previously, the upstream distribution was called -OpenShift Origin, however it is now called OKD; use of the OpenShift Origin name -is deprecated. - -Avoid using the name "OpenShift" on its own when referring to something that -applies to all distributions, as OKD does not have OpenShift in its name. -However, the following components currently use "OpenShift" in the name and are -allowed for use across all distribution documentation: - -- OpenShift Pipeline -- OpenShift SDN -- OpenShift Ansible Broker (deprecated in 4.2 / removed in 4.4) - -'''' -=== OpenShift API server - -Usage: OpenShift API server - -'''' -=== OpenShift CLI - -Usage: OpenShift CLI (`oc`) - -The `oc` tool is the command-line interface of OpenShift 3 and 4. - -When referencing as a prerequisite for a procedure module, use the following -construction: Install the OpenShift CLI (`oc`). - -'''' -=== Operator - -Usage: Operator(s) - -An Operator is a method of packaging, deploying and managing a Kubernetes -application. A Kubernetes application is an application that is both deployed on -a Kubernetes cluster (including OpenShift clusters) and managed using the -Kubernetes APIs and `kubectl` or `oc` tooling. - -The term "Operator" is always captalized. - -While "containerized" is allowed, do not use "Operatorize" to refer to building an -Operator that packages an application. - -.Examples of correct usage -==== -Install the etcd Operator. - -Build an Operator using the Operator SDK. -==== - -See link:doc_guidelines.adoc#api-object-formatting[API object formatting] for -more on Operator naming. - -'''' -=== OperatorHub - -Usage: OperatorHub - -'''' -=== Operator Lifecycle Manager (OLM) -Usage: Operator Lifecycle Manager, OLM - -Refer to this component without a preceding article ("the"). - -.Examples of correct usage -==== -You can use OpenShift Lifecycle Manager (OLM) to manually or automatically upgrade an Operator. -==== - -'''' -=== Options menu - -Usage: Options menu; use sparingly; not to be confused with Actions menu, which -signifies a specific menu seen in the web console. - -This describes a menu type commonly called a "kebab", "hamburger", or "overflow" -menu that does not have hover text or a given name or label in the web console. - -'''' - -== P - -'''' -=== persistent volume (PV) - -Usage: persistent volume - -Developers can use a persistent volume claim (PVC) to request a persistent volume (PV) resource without having specific knowledge of the underlying storage infrastructure. - -'''' -=== persistent volume claim (PVC) - -Usage: persistent volume claim - -Developers can use a persistent volume claim (PVC) to request a persistent volume (PV) resource without having specific knowledge of the underlying storage infrastructure. - -'''' -=== pod - -Usage: pod(s) - -Kubernetes object that groups related Docker containers that have to share -network, file system, or memory together for placement on a node. Multiple -instances of a pod can run to provide scaling and redundancy. - -When referencing the actual object, write as `Pod`. See link:doc_guidelines.adoc#api-object-formatting[API object formatting] for more details. - -'''' -=== project - -Usage: project(s) - -A project allows a community of users to organize and manage their content in -isolation from other communities. It is an extension of the `Namespace` object -from Kubernetes. - -When referencing the actual object, write as `Project`. See link:doc_guidelines.adoc#api-object-formatting[API object formatting] for more details. - -== Q - -'''' -=== quick start - -Usage: quick start(s) - -There are two types of quick starts in OpenShift: - -* quick starts that are guided tutorials in the web console -* quick start templates that allow users to quickly get started creating a new application - -Be sure to provide context about which type of quick start you are referring to. - -== R - -'''' -=== replica set - -Usage: replica set(s) - -Similar to a replication controller, a replica set is a native Kubernetes API -object that ensures a specified number of pod replicas are running at any given -time. Used by `Deployment` objects. - -When referencing the actual object, write as `ReplicaSet`. See link:doc_guidelines.adoc#api-object-formatting[API object formatting] for more details. - -See link:https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/[ReplicaSet - Kubernetes]. - -'''' -=== replication controller - -Usage: replication controller(s) - -Kubernetes object that ensures N (as specified by the user) instances of a given -pod are running at all times. Used by deployment configs. - -'''' -=== route - -Usage: route(s) - -OpenShift-specific API object that allows developers to expose services through -an HTTP(S) aware load balancing and proxy layer via a public DNS entry. The -route might further specify TLS options and a certificate, or specify a public -CNAME that the OpenShift Ingress Controller should also accept for HTTP and -HTTPS traffic. An administrator typically configures their Ingress Controller to -be visible outside the cluster firewall, and might also add additional security, -caching, or traffic controls on the service content. - -== S - -'''' -=== scheduler - -Usage: scheduler(s) - -Component of the Kubernetes control plane or OpenShift control plane that manages the state of -the system, places pods on nodes, and ensures that all containers that are -expected to be running are actually running. - -'''' -=== secret - -Usage: secret(s) - -Kubernetes API object that holds secret data of a certain type. - -See link:https://kubernetes.io/docs/concepts/configuration/secret/[Secrets - Kubernetes]. - -'''' -=== security context constraints (SCC) - -Usage: security context constraints - -Security context constraints govern the ability to make requests that affect the security context that will be applied to a container. - -When referencing the actual object, write as `SecurityContextConstraints`. See link:doc_guidelines.adoc#api-object-formatting[API object formatting] for more details. - -This is commonly abbreviated as SCC. - -'''' -=== service - -Usage: service(s) - -Kubernetes native API object that serves as an internal load balancer. It -identifies a set of replicated pods to proxy the connections it -receives to them. Backing pods can be added to or removed from a service -arbitrarily while the service remains consistently available, enabling anything -that depends on the service to refer to it at a consistent address. - -A service is a named abstraction of software service (for example, `mysql`) -consisting of local port (for example `3306`) that the proxy listens on, and the -selector that determines which pods will answer requests sent through the proxy. - -Do not confuse with link:https://www.openservicebrokerapi.org/[Open Service Broker API related objects]. -See -link:https://docs.openshift.com/container-platform/3.11/architecture/service_catalog/index.html#service-catalog-concepts-terminology[Service Catalog Concepts and Terminology]. - -'''' -=== service account - -Usage: service account(s) - -A service account binds together: - -* a name, understood by users, and perhaps by peripheral systems, for an identity -* a principal that can be authenticated and authorized -* a set of secrets - -'''' -=== single-node OpenShift - -Usage: single-node OpenShift - -Single-node OpenShift (or {product-title} on a single-node cluster) is a deployment footprint that provides control plane and worker node capabilities in a single server for deployments in constrained environments. - -Do not use: Single Node Openshift (SNO). - -'''' -=== three-node OpenShift - -Usage: three-node OpenShift - -Three-node OpenShift is a compact cluster deployment footprint on three nodes for deployments in constrained environments. It provides three control plane nodes that you configure as schedulable for workloads. - -Do not use: Three Node Openshift. - -'''' -=== SkyDNS - -Usage: SkyDNS - -Component of the Kubernetes control plane or OpenShift control plane that provides -cluster-wide DNS resolution of internal hostnames for services and pods. - -'''' -=== Source-to-Image (S2I) - -Usage: Source-to-Image for the first time reference; S2I thereafter. - -Deprecated abbreviation (do not use): STI - -'''' -=== spec - -Usage: spec(s) - -In addition to "spec file" being allowed related to RPM spec files, general -usage of "spec" is allowed when describing Kubernetes or OpenShift object specs -/ manifests / definitions. - -.Examples of correct usage -==== -Update the `Pod` spec to reflect the changes. -==== - -'''' -=== storage class - -Usage: storage class(es) - -Kubernetes API object that describes the parameters for a class of storage for -which persistent volumes can be dynamically provisioned. storage classes are -non-namespaced; the name of the storage class according to etcd is in -`ObjectMeta.Name`. - -When referencing the actual object, write as `StorageClass`. See link:doc_guidelines.adoc#api-object-formatting[API object formatting] for more details. - -See link:https://kubernetes.io/docs/concepts/storage/storage-classes/[Storage Classes - Kubernetes]. - -== T - -== U - -'''' -=== update - -Usage: update - -Use "update" when referring to updating the cluster to a new version. Although "upgrade" is sometimes used interchangeably, "update" is the preferred term to use, for consistency. - - -'''' -=== user-provisioned infrastructure - -Usage: user-provisioned infrastructure - -If the user must deploy and configure separate virtual or physical hosts as part of -the cluster deployment process, it is a user-provisioned infrastructure -installation. - -Do not use: UPI - -'''' - -== V - -== W - -== X - -== Y - -== Z diff --git a/contributing_to_docs/tools_and_setup.adoc b/contributing_to_docs/tools_and_setup.adoc deleted file mode 100644 index 9f4bc22452c8..000000000000 --- a/contributing_to_docs/tools_and_setup.adoc +++ /dev/null @@ -1,185 +0,0 @@ -[id="contributing-to-docs-tools-and-setup"] -= Install and set up the tools and software -:icons: -:toc: macro -:toc-title: -:toclevels: 1 -:linkattrs: -:description: How to set up and install the tools to contribute - -toc::[] - -== Create a GitHub account -Before you can contribute to OpenShift documentation, you must -https://www.github.com/join[sign up for a GitHub account]. - -== Set up authentication -When you have your account set up, follow the instructions to -https://help.github.com/articles/generating-ssh-keys/[generate and set up SSH -keys on GitHub] for proper authentication between your workstation and GitHub. - -Confirm authentication is working correctly with the following command: - ----- -$ ssh -T git@github.com ----- - -== Fork and clone the OpenShift documentation repository -You must fork and set up the OpenShift documentation repository on your -workstation so that you can create PRs and contribute. These steps must only -be performed during initial setup. - -. Fork the https://github.com/openshift/openshift-docs repository into your -GitHub account from the GitHub UI. You can do this by clicking on *Fork* in the -upper right-hand corner. - -. In the terminal on your workstation, change into the directory where you want -to clone the forked repository. - -. Clone the forked repository onto your workstation with the following -command, replacing __ with your actual GitHub username. -+ ----- -$ git clone git@github.com:/openshift-docs.git ----- - -. Change into the directory for the local repository you just cloned. -+ ----- -$ cd openshift-docs ----- - -. Add an upstream pointer back to the OpenShift's remote repository, in this -case _openshift-docs_. -+ ----- -$ git remote add upstream git@github.com:openshift/openshift-docs.git ----- - -This ensures that you are tracking the remote repository to keep your local -repository in sync with it. - -== Install AsciiBinder and dependencies -When you have the documentation repository cloned and set up, you are ready to -install the software and tools you will use to create the content. All OpenShift -documentation is created in AsciiDoc, and is processed with https://github.com/redhataccess/ascii_binder[AsciiBinder], -which is an http://asciidoctor.org/[AsciiDoctor]-based docs management system. - - -=== What you require -The following are minimum requirements: - -* A bash shell environment (Linux and OS X include a bash shell environment out -of the box, but if you are on Windows you can use http://cygwin.com/[Cygwin]) -* https://www.ruby-lang.org/en/[Ruby] -* http://www.git-scm.com/[Git] -* A web browser (Firefox, Chrome, or Safari) -* An editor that can strip trailing whitespace, such as -link:https://code.visualstudio.com/[Visual Studio Code]. - -=== Install the required software dependencies on a Linux system -The following instructions describe how to install all the required tools to do -live content editing on a Fedora Linux system. - -1. Install the _RubyGems_ package with `yum install rubygems` -+ -[NOTE] -==== -On certain systems, `yum` installs an older version of RubyGems that can cause issues. As an alternative, you can install RubyGems by using RVM. The following example is referenced from the link:https://rvm.io/rvm/install[RVM site]: - -[source,terminal] ----- -$ curl -sSL https://get.rvm.io | bash -s stable --ruby ----- -==== - -2. Install _Ruby_ development packages with `yum install ruby-devel` -3. Install _gcc_ with `yum install gcc-c++` -4. Install _redhat-rpm-config_ with `yum install redhat-rpm-config` -5. Install _make_ with `yum install make` -6. Install _asciidoctor-diagram_ with `gem install asciidoctor-diagram` -7. Install the _ascii_binder_ gem with `gem install ascii_binder` - -NOTE: If you already have AsciiBinder installed, you might be due for an update. -These directions assume that you are using AsciiBinder 0.2.0 or newer. To check -and update if necessary, simply run `gem update ascii_binder`. Note that you might require root permissions. - -=== Install the required software dependencies in a toolbox container on Linux - -You can use link:https://containertoolbx.org/[`toolbx`] to create a Fedora-based container for our tools on most Linux distributions, including RHEL. By using Fedora as the base, you have access to relatively recent versions of required software packages. - -.Prerequisites - -* Your distro has link:https://podman.io/[Podman] 1.4.0 or greater. - -.Procedure - -. If you don't already have `toolbx`, link:https://containertoolbx.org/install/[install it]. - -. To create a Fedora 37 container, on a command line, enter: -+ -[source,terminal] ----- -$ toolbox create --distro fedora --release f37 ----- -+ -where: - -:: Specifies the name that you want to give your toolbox container. - -. Enter the container. From the command line, run: -+ -[source,terminal] ----- -$ toolbox enter ----- - -. Install dependencies for our tools. Within the toolbox that you entered, run: -+ -[source,terminal] ----- -[toolbox] $ sudo dnf install ruby-devel gcc-c++ redhat-rpm-config make ----- - -. Install the required Ruby gems: -+ -[source,terminal] ----- -[toolbox] $ gem install ascii_binder asciidoctor-diagram ----- - -You now have a toolbox container that you can use to build our documentation no matter which distribution you use. - -NOTE: Press *Ctrl + D* or enter `exit` to exit the container. To use AsciiBinder or update the software in the container, remember to `toolbox enter ` first. - -=== Building the collection -With the initial setup complete, you are ready to build the collection. - -1. From the `openshift-docs` directory, run an initial build: -+ ----- -$ cd openshift-docs -$ asciibinder build ----- -2. Open the generated HTML file in your web browser. This will be located in the -`openshift-docs/_preview//` directory, with the same path and -filename as the original `.adoc` file you edited, only it will be with the -`.html` extension. - -== Clean up -The `.gitignore` file is set up to prevent anything under the `_preview` and -`_package` directories from being committed. However, you can reset the -environment manually by running: - ----- -$ asciibinder clean ----- - -== Next steps -With the repository and tools set up on your workstation, you can now either -edit existing content or create assemblies and modules. - -* link:doc_guidelines.adoc[Review the documentation guidelines] to understand -some basic guidelines to keep things consistent across our content. -* link:create_or_edit_content.adoc[Create a local working branch] on your -workstation to edit existing content or create content. diff --git a/distr_tracing/_attributes b/distr_tracing/_attributes deleted file mode 120000 index f27fd275ea6b..000000000000 --- a/distr_tracing/_attributes +++ /dev/null @@ -1 +0,0 @@ -../_attributes/ \ No newline at end of file diff --git a/distr_tracing/distr_tracing_arch/_attributes b/distr_tracing/distr_tracing_arch/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/distr_tracing/distr_tracing_arch/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/distr_tracing/distr_tracing_arch/distr-tracing-architecture.adoc b/distr_tracing/distr_tracing_arch/distr-tracing-architecture.adoc deleted file mode 100644 index ff905a8f4c22..000000000000 --- a/distr_tracing/distr_tracing_arch/distr-tracing-architecture.adoc +++ /dev/null @@ -1,24 +0,0 @@ -:_content-type: ASSEMBLY -[id="distr-tracing-architecture"] -= Distributed tracing architecture -include::_attributes/common-attributes.adoc[] -:context: distributed-tracing-architecture - -toc::[] - -Every time a user takes an action in an application, a request is executed by the architecture that may require dozens of different services to participate to produce a response. -{DTProductName} lets you perform distributed tracing, which records the path of a request through various microservices that make up an application. - -_Distributed tracing_ is a technique that is used to tie the information about different units of work together — usually executed in different processes or hosts — to understand a whole chain of events in a distributed transaction. -Developers can visualize call flows in large microservice architectures with distributed tracing. -It is valuable for understanding serialization, parallelism, and sources of latency. - -{DTProductName} records the execution of individual requests across the whole stack of microservices, and presents them as traces. A _trace_ is a data/execution path through the system. An end-to-end trace is comprised of one or more spans. - -A _span_ represents a logical unit of work in {DTProductName} that has an operation name, the start time of the operation, and the duration, as well as potentially tags and logs. Spans may be nested and ordered to model causal relationships. - -include::modules/distr-tracing-product-overview.adoc[leveloffset=+1] - -include::modules/distr-tracing-features.adoc[leveloffset=+1] - -include::modules/distr-tracing-architecture.adoc[leveloffset=+1] diff --git a/distr_tracing/distr_tracing_arch/images b/distr_tracing/distr_tracing_arch/images deleted file mode 120000 index e4c5bd02a10a..000000000000 --- a/distr_tracing/distr_tracing_arch/images +++ /dev/null @@ -1 +0,0 @@ -../images/ \ No newline at end of file diff --git a/distr_tracing/distr_tracing_arch/modules b/distr_tracing/distr_tracing_arch/modules deleted file mode 120000 index 43aab75b53c9..000000000000 --- a/distr_tracing/distr_tracing_arch/modules +++ /dev/null @@ -1 +0,0 @@ -../modules/ \ No newline at end of file diff --git a/distr_tracing/distr_tracing_arch/snippets b/distr_tracing/distr_tracing_arch/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/distr_tracing/distr_tracing_arch/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/distr_tracing/distr_tracing_config/_attributes b/distr_tracing/distr_tracing_config/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/distr_tracing/distr_tracing_config/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/distr_tracing/distr_tracing_config/images b/distr_tracing/distr_tracing_config/images deleted file mode 120000 index e4c5bd02a10a..000000000000 --- a/distr_tracing/distr_tracing_config/images +++ /dev/null @@ -1 +0,0 @@ -../images/ \ No newline at end of file diff --git a/distr_tracing/distr_tracing_config/modules b/distr_tracing/distr_tracing_config/modules deleted file mode 120000 index 43aab75b53c9..000000000000 --- a/distr_tracing/distr_tracing_config/modules +++ /dev/null @@ -1 +0,0 @@ -../modules/ \ No newline at end of file diff --git a/distr_tracing/distr_tracing_config/snippets b/distr_tracing/distr_tracing_config/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/distr_tracing/distr_tracing_config/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/distr_tracing/distr_tracing_install/_attributes b/distr_tracing/distr_tracing_install/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/distr_tracing/distr_tracing_install/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/distr_tracing/distr_tracing_install/distr-tracing-deploying-jaeger.adoc b/distr_tracing/distr_tracing_install/distr-tracing-deploying-jaeger.adoc deleted file mode 100644 index dd2a9dfb445f..000000000000 --- a/distr_tracing/distr_tracing_install/distr-tracing-deploying-jaeger.adoc +++ /dev/null @@ -1,94 +0,0 @@ -:_content-type: ASSEMBLY -[id="distr-tracing-deploying"] -= Configuring and deploying distributed tracing -include::_attributes/common-attributes.adoc[] -:context: deploying-distr-tracing-platform - -toc::[] - -The {JaegerName} Operator uses a custom resource definition (CRD) file that defines the architecture and configuration settings to be used when creating and deploying the {JaegerShortName} resources. You can either install the default configuration or modify the file to better suit your business requirements. - -{JaegerName} has predefined deployment strategies. You specify a deployment strategy in the custom resource file. When you create a {JaegerShortName} instance the Operator uses this configuration file to create the objects necessary for the deployment. - -.Jaeger custom resource file showing deployment strategy -[source,yaml] ----- -apiVersion: jaegertracing.io/v1 -kind: Jaeger -metadata: - name: MyConfigFile -spec: - strategy: production <1> ----- - -<1> The {JaegerName} Operator currently supports the following deployment strategies: - -* *allInOne* (Default) - This strategy is intended for development, testing, and demo purposes; it is not intended for production use. The main backend components, Agent, Collector, and Query service, are all packaged into a single executable which is configured, by default. to use in-memory storage. -+ -[NOTE] -==== -In-memory storage is not persistent, which means that if the {JaegerShortName} instance shuts down, restarts, or is replaced, that your trace data will be lost. And in-memory storage cannot be scaled, since each pod has its own memory. For persistent storage, you must use the `production` or `streaming` strategies, which use Elasticsearch as the default storage. -==== - -* *production* - The production strategy is intended for production environments, where long term storage of trace data is important, as well as a more scalable and highly available architecture is required. Each of the backend components is therefore deployed separately. The Agent can be injected as a sidecar on the instrumented application. The Query and Collector services are configured with a supported storage type - currently Elasticsearch. Multiple instances of each of these components can be provisioned as required for performance and resilience purposes. - -* *streaming* - The streaming strategy is designed to augment the production strategy by providing a streaming capability that effectively sits between the Collector and the Elasticsearch backend storage. This provides the benefit of reducing the pressure on the backend storage, under high load situations, and enables other trace post-processing capabilities to tap into the real time span data directly from the streaming platform (https://access.redhat.com/documentation/en-us/red_hat_amq/7.6/html/using_amq_streams_on_openshift/index[AMQ Streams]/ https://kafka.apache.org/documentation/[Kafka]). -+ -[NOTE] -==== -The streaming strategy requires an additional Red Hat subscription for AMQ Streams. -==== - -[NOTE] -==== -The streaming deployment strategy is currently unsupported on {ibmzProductName}. -==== - -[NOTE] -==== -There are two ways to install and use {DTProductName}, as part of a service mesh or as a stand alone component. If you have installed {DTShortName} as part of {SMProductName}, you can perform basic configuration as part of the xref:../../service_mesh/v2x/installing-ossm.adoc#installing-ossm[ServiceMeshControlPlane] but for completely control you should configure a Jaeger CR and then xref:../../service_mesh/v2x/ossm-observability.html#ossm-config-external-jaeger_observability[reference your distributed tracing configuration file in the ServiceMeshControlPlane]. - -==== - -include::modules/distr-tracing-deploy-default.adoc[leveloffset=+1] - -include::modules/distr-tracing-deploy-production-es.adoc[leveloffset=+1] - -include::modules/distr-tracing-deploy-streaming.adoc[leveloffset=+1] - -[id="validating-your-jaeger-deployment"] -== Validating your deployment - -include::modules/distr-tracing-accessing-jaeger-console.adoc[leveloffset=+2] - -[id="customizing-your-deployment"] -== Customizing your deployment - -include::modules/distr-tracing-deployment-best-practices.adoc[leveloffset=+2] - -ifdef::openshift-enterprise,openshift-dedicated[] -For information about configuring persistent storage, see xref:../../storage/understanding-persistent-storage.adoc[Understanding persistent storage] and the appropriate configuration topic for your chosen storage option. -endif::[] - -include::modules/distr-tracing-config-default.adoc[leveloffset=+2] - -include::modules/distr-tracing-config-jaeger-collector.adoc[leveloffset=+2] - -//include::modules/distr-tracing-config-otel-collector.adoc[leveloffset=+2] - -include::modules/distr-tracing-config-sampling.adoc[leveloffset=+2] - -include::modules/distr-tracing-config-storage.adoc[leveloffset=+2] - -include::modules/distr-tracing-config-query.adoc[leveloffset=+2] - -include::modules/distr-tracing-config-ingester.adoc[leveloffset=+2] - -[id="injecting-sidecars"] -== Injecting sidecars - -{JaegerName} relies on a proxy sidecar within the application's pod to provide the agent. The {JaegerName} Operator can inject Agent sidecars into Deployment workloads. You can enable automatic sidecar injection or manage it manually. - -include::modules/distr-tracing-sidecar-automatic.adoc[leveloffset=+2] - -include::modules/distr-tracing-sidecar-manual.adoc[leveloffset=+2] diff --git a/distr_tracing/distr_tracing_install/distr-tracing-deploying-otel.adoc b/distr_tracing/distr_tracing_install/distr-tracing-deploying-otel.adoc deleted file mode 100644 index e1e19f61dc1e..000000000000 --- a/distr_tracing/distr_tracing_install/distr-tracing-deploying-otel.adoc +++ /dev/null @@ -1,11 +0,0 @@ -:_content-type: ASSEMBLY -[id="distr-tracing-deploying-otel"] -= Configuring and deploying distributed tracing data collection -include::_attributes/common-attributes.adoc[] -:context: deploying-distr-tracing-data-collection - -toc::[] - -The {OTELName} Operator uses a custom resource definition (CRD) file that defines the architecture and configuration settings to be used when creating and deploying the {OTELName} resources. You can either install the default configuration or modify the file to better suit your business requirements. - -include::modules/distr-tracing-config-otel-collector.adoc[leveloffset=+1] diff --git a/distr_tracing/distr_tracing_install/distr-tracing-installing.adoc b/distr_tracing/distr_tracing_install/distr-tracing-installing.adoc deleted file mode 100644 index 9a64b8c418e9..000000000000 --- a/distr_tracing/distr_tracing_install/distr-tracing-installing.adoc +++ /dev/null @@ -1,43 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-distributed-tracing"] -= Installing distributed tracing -include::_attributes/common-attributes.adoc[] -:context: install-distributed-tracing - -toc::[] - -You can install {DTProductName} on {product-title} in either of two ways: - -* You can install {DTProductName} as part of {SMProductName}. Distributed tracing is included by default in the Service Mesh installation. To install {DTProductName} as part of a service mesh, follow the xref:../../service_mesh/v2x/preparing-ossm-installation.adoc#preparing-ossm-installation[Red Hat Service Mesh Installation] instructions. You must install {DTProductName} in the same namespace as your service mesh, that is, the `ServiceMeshControlPlane` and the {DTProductName} resources must be in the same namespace. - -* If you do not want to install a service mesh, you can use the {DTProductName} Operators to install {DTShortName} by itself. To install {DTProductName} without a service mesh, use the following instructions. - -== Prerequisites - -Before you can install {DTProductName}, review the installation activities, and ensure that you meet the prerequisites: - -* Possess an active {product-title} subscription on your Red Hat account. If you do not have a subscription, contact your sales representative for more information. - -* Review the xref:../../architecture/architecture-installation.adoc#installation-overview_architecture-installation[{product-title} {product-version} overview]. -* Install {product-title} {product-version}. - -** xref:../../installing/installing_aws/installing-aws-account.adoc#installing-aws-account[Install {product-title} {product-version} on AWS] -** xref:../../installing/installing_aws/installing-aws-user-infra.adoc#installing-aws-user-infra[Install {product-title} {product-version} on user-provisioned AWS] -** xref:../../installing/installing_bare_metal/installing-bare-metal.adoc#installing-bare-metal[Install {product-title} {product-version} on bare metal] -** xref:../../installing/installing_vsphere/installing-vsphere.adoc#installing-vsphere[Install {product-title} {product-version} on vSphere] -* Install the version of the OpenShift CLI (`oc`) that matches your {product-title} version and add it to your path. - -* An account with the `cluster-admin` role. - -include::modules/distr-tracing-install-overview.adoc[leveloffset=+1] - -include::modules/distr-tracing-install-elasticsearch.adoc[leveloffset=+1] - -include::modules/distr-tracing-install-jaeger-operator.adoc[leveloffset=+1] - -include::modules/distr-tracing-install-otel-operator.adoc[leveloffset=+1] - -//// -== Next steps -* xref:../../distr_tracing/distr_tracing_install/distr-tracing-deploying.adoc#deploying-distributed-tracing[Deploy {DTProductName}]. -//// diff --git a/distr_tracing/distr_tracing_install/distr-tracing-removing.adoc b/distr_tracing/distr_tracing_install/distr-tracing-removing.adoc deleted file mode 100644 index f4d9b3d26524..000000000000 --- a/distr_tracing/distr_tracing_install/distr-tracing-removing.adoc +++ /dev/null @@ -1,31 +0,0 @@ -:_content-type: ASSEMBLY -[id="removing-distributed-tracing"] -= Removing distributed tracing -include::_attributes/common-attributes.adoc[] -:context: removing-distributed-tracing - -toc::[] - -The steps for removing {DTProductName} from an {product-title} cluster are as follows: - -. Shut down any {DTProductName} pods. -. Remove any {DTProductName} instances. -. Remove the {JaegerName} Operator. -. Remove the {OTELName} Operator. - -include::modules/distr-tracing-removing-instance.adoc[leveloffset=+1] - -include::modules/distr-tracing-removing-instance-cli.adoc[leveloffset=+1] - - -== Removing the {DTProductName} Operators - -.Procedure - -. Follow the instructions for xref:../../operators/admin/olm-deleting-operators-from-cluster.adoc#olm-deleting-operators-from-a-cluster[Deleting Operators from a cluster]. - -* Remove the {JaegerName} Operator. - -//* Remove the {OTELName} Operator. - -* After the {JaegerName} Operator has been removed, if appropriate, remove the OpenShift Elasticsearch Operator. diff --git a/distr_tracing/distr_tracing_install/distr-tracing-updating.adoc b/distr_tracing/distr_tracing_install/distr-tracing-updating.adoc deleted file mode 100644 index 9de6057bcebf..000000000000 --- a/distr_tracing/distr_tracing_install/distr-tracing-updating.adoc +++ /dev/null @@ -1,24 +0,0 @@ -:_content-type: ASSEMBLY -[id="upgrading-distributed-tracing"] -= Upgrading distributed tracing -include::_attributes/common-attributes.adoc[] -:context: upgrading-distributed-tracing - -toc::[] - -Operator Lifecycle Manager (OLM) controls the installation, upgrade, and role-based access control (RBAC) of Operators in a cluster. The OLM runs by default in {product-title}. -OLM queries for available Operators as well as upgrades for installed Operators. -For more information about how {product-title} handles upgrades, see the xref:../../operators/understanding/olm/olm-understanding-olm.adoc#olm-understanding-olm[Operator Lifecycle Manager] documentation. - -During an update, the {DTProductName} Operators upgrade the managed {DTShortName} instances to the version associated with the Operator. Whenever a new version of the {JaegerName} Operator is installed, all the {JaegerShortName} application instances managed by the Operator are upgraded to the Operator's version. For example, after upgrading the Operator from 1.10 installed to 1.11, the Operator scans for running {JaegerShortName} instances and upgrades them to 1.11 as well. - -For specific instructions on how to update the OpenShift Elasticsearch Operator, see xref:../../logging/cluster-logging-upgrading.adoc#cluster-logging-upgrading_cluster-logging-upgrading[Updating OpenShift Logging]. - -include::modules/distr-tracing-change-operator-20.adoc[leveloffset=+1] - -[IMPORTANT] -==== -If you have not already updated your OpenShift Elasticsearch Operator as described in xref:../../logging/cluster-logging-upgrading.adoc[Updating OpenShift Logging] complete that update before updating your {JaegerName} Operator. -==== - -For instructions on how to update the Operator channel, see xref:../../operators/admin/olm-upgrading-operators.adoc[Updating installed Operators]. diff --git a/distr_tracing/distr_tracing_install/images b/distr_tracing/distr_tracing_install/images deleted file mode 120000 index e4c5bd02a10a..000000000000 --- a/distr_tracing/distr_tracing_install/images +++ /dev/null @@ -1 +0,0 @@ -../images/ \ No newline at end of file diff --git a/distr_tracing/distr_tracing_install/modules b/distr_tracing/distr_tracing_install/modules deleted file mode 120000 index 43aab75b53c9..000000000000 --- a/distr_tracing/distr_tracing_install/modules +++ /dev/null @@ -1 +0,0 @@ -../modules/ \ No newline at end of file diff --git a/distr_tracing/distr_tracing_install/snippets b/distr_tracing/distr_tracing_install/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/distr_tracing/distr_tracing_install/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/distr_tracing/distributed-tracing-release-notes.adoc b/distr_tracing/distributed-tracing-release-notes.adoc deleted file mode 100644 index fcac0d5f7275..000000000000 --- a/distr_tracing/distributed-tracing-release-notes.adoc +++ /dev/null @@ -1,21 +0,0 @@ -:_content-type: ASSEMBLY -[id="distr-tracing-release-notes"] -= Distributed tracing release notes -include::_attributes/common-attributes.adoc[] -:context: distributed-tracing-release-notes - -toc::[] - -include::modules/distr-tracing-product-overview.adoc[leveloffset=+1] - -include::modules/making-open-source-more-inclusive.adoc[leveloffset=+1] - -include::modules/support.adoc[leveloffset=+1] - -include::modules/distr-tracing-rn-new-features.adoc[leveloffset=+1] - -include::modules/distr-tracing-rn-technology-preview.adoc[leveloffset=+1] - -include::modules/distr-tracing-rn-known-issues.adoc[leveloffset=+1] - -include::modules/distr-tracing-rn-fixed-issues.adoc[leveloffset=+1] diff --git a/distr_tracing/images b/distr_tracing/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/distr_tracing/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/distr_tracing/modules b/distr_tracing/modules deleted file mode 120000 index 43aab75b53c9..000000000000 --- a/distr_tracing/modules +++ /dev/null @@ -1 +0,0 @@ -../modules/ \ No newline at end of file diff --git a/distr_tracing/snippets b/distr_tracing/snippets deleted file mode 120000 index 9d58b92e5058..000000000000 --- a/distr_tracing/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets/ \ No newline at end of file diff --git a/getting_started/_attributes b/getting_started/_attributes deleted file mode 120000 index f27fd275ea6b..000000000000 --- a/getting_started/_attributes +++ /dev/null @@ -1 +0,0 @@ -../_attributes/ \ No newline at end of file diff --git a/getting_started/accessing-your-services.adoc b/getting_started/accessing-your-services.adoc deleted file mode 100644 index 93c9fb6080fb..000000000000 --- a/getting_started/accessing-your-services.adoc +++ /dev/null @@ -1,30 +0,0 @@ -:_content-type: ASSEMBLY -[id="accessing-your-services"] -= Accessing your services -include::_attributes/common-attributes.adoc[] -:context: access - -toc::[] - -Once you have an {product-title} subscription, you can access your services. - -include::modules/dedicated-creating-your-cluster.adoc[leveloffset=+1] - -include::modules/dedicated-accessing-your-cluster.adoc[leveloffset=+1] - -//// - -== Receiving status updates - -Access the status portal at link:https://status-dedicated.openshift.com[]. You -can also subscribe to notifications via email, SMS, or RSS by changing your -preferences in the status portal. - -//// - -== Requesting support - -If you have questions about your environment or must open a support ticket, -you can open or view a support case in the -link:https://access.redhat.com/support/cases/#/case/list[Red Hat Customer -Portal]. diff --git a/getting_started/dedicated-networking.adoc b/getting_started/dedicated-networking.adoc deleted file mode 100644 index 5f3e4a324521..000000000000 --- a/getting_started/dedicated-networking.adoc +++ /dev/null @@ -1,10 +0,0 @@ -:_content-type: ASSEMBLY -[id="dedicated-networking"] -= Neworking -include::_attributes/common-attributes.adoc[] -:context: access - -toc::[] - -include::modules/dedicated-configuring-your-application-routes.adoc[leveloffset=+1] -include::modules/dedicated-exposing-TCP-services.adoc[leveloffset=+1] diff --git a/getting_started/deleting-your-cluster.adoc b/getting_started/deleting-your-cluster.adoc deleted file mode 100644 index f06749fd3a62..000000000000 --- a/getting_started/deleting-your-cluster.adoc +++ /dev/null @@ -1,16 +0,0 @@ -:_content-type: ASSEMBLY -[id="deleting-your-cluster"] -= Deleting your cluster -include::_attributes/common-attributes.adoc[] -:context: access - -To delete your {product-title} cluster: - -. From link:https://console.redhat.com/openshift[console.redhat.com/openshift], click - on the cluster you want to delete. - -. Click the *Actions* button, then *Delete Cluster*. - -. Type the name of the cluster highlighted in bold, then click *Delete*. - -Cluster deletion occurs automatically. diff --git a/getting_started/images b/getting_started/images deleted file mode 120000 index e4c5bd02a10a..000000000000 --- a/getting_started/images +++ /dev/null @@ -1 +0,0 @@ -../images/ \ No newline at end of file diff --git a/getting_started/kubernetes-overview.adoc b/getting_started/kubernetes-overview.adoc deleted file mode 100644 index 54d4654afabd..000000000000 --- a/getting_started/kubernetes-overview.adoc +++ /dev/null @@ -1,35 +0,0 @@ -:_content-type: ASSEMBLY -[id="kubernetes-overview"] -= Kubernetes overview -include::_attributes/common-attributes.adoc[] -:context: kubernetes-overview - -toc::[] - -Kubernetes is an open source container orchestration tool developed by Google. You can run and manage container-based workloads by using Kubernetes. The most common Kubernetes use case is to deploy an array of interconnected microservices, building an application in a cloud native way. You can create Kubernetes clusters that can span hosts across on-premise, public, private, or hybrid clouds. - -Traditionally, applications were deployed on top of a single operating system. With virtualization, you can split the physical host into several virtual hosts. Working on virtual instances on shared resources is not optimal for efficiency and scalability. Because a virtual machine (VM) consumes as many resources as a physical machine, providing resources to a VM such as CPU, RAM, and storage can be expensive. Also, you might see your application degrading in performance due to virtual instance usage on shared resources. - -.Evolution of container technologies for classical deployments -image::247-OpenShift-Kubernetes-Overview.png[] - -To solve this problem, you can use containerization technologies that segregate applications in a containerized environment. Similar to a VM, a container has its own filesystem, vCPU, memory, process space, dependencies, and more. Containers are decoupled from the underlying infrastructure, and are portable across clouds and OS distributions. Containers are inherently much lighter than a fully-featured OS, and are lightweight isolated processes that run on the operating system kernel. VMs are slower to boot, and are an abstraction of physical hardware. VMs run on a single machine with the help of a hypervisor. - -You can perform the following actions by using Kubernetes: - -* Sharing resources -* Orchestrating containers across multiple hosts -* Installing new hardware configurations -* Running health checks and self-healing applications -* Scaling containerized applications - -include::modules/kubernetes-components.adoc[leveloffset=+1] - -include::modules/kubernetes-resources.adoc[leveloffset=+1] - -.Architecture of Kubernetes -image::247_OpenShift_Kubernetes_Overview-2.png[] - -A cluster is a single computational unit consisting of multiple nodes in a cloud environment. A Kubernetes cluster includes a control plane and worker nodes. You can run Kubernetes containers across various machines and environments. The control plane node controls and maintains the state of a cluster. You can run the Kubernetes application by using worker nodes. You can use the Kubernetes namespace to differentiate cluster resources in a cluster. Namespace scoping is applicable for resource objects, such as deployment, service, and pods. You cannot use namespace for cluster-wide resource objects such as storage class, nodes, and persistent volumes. - -include::modules/kubernetes-conceptual-guidelines.adoc[leveloffset=+1] diff --git a/getting_started/modules b/getting_started/modules deleted file mode 120000 index 43aab75b53c9..000000000000 --- a/getting_started/modules +++ /dev/null @@ -1 +0,0 @@ -../modules/ \ No newline at end of file diff --git a/getting_started/openshift-cli.adoc b/getting_started/openshift-cli.adoc deleted file mode 100644 index 3852a6598f34..000000000000 --- a/getting_started/openshift-cli.adoc +++ /dev/null @@ -1,84 +0,0 @@ -:_content-type: ASSEMBLY -[id="openshift-cli"] -= Creating and building an application using the CLI -include::_attributes/common-attributes.adoc[] -:context: openshift-cli - -toc::[] - -[id="openshift-cli-before-you-begin"] - -== Before you begin - -* Review xref:../cli_reference/openshift_cli/getting-started-cli.adoc#cli-about-cli_cli-developer-commands[About the OpenShift CLI]. -* You must be able to access a running instance of {product-title}. If you do not have access, contact your cluster administrator. -* You must have the OpenShift CLI (`oc`) xref:../cli_reference/openshift_cli/getting-started-cli.adoc#installing-openshift-cli[downloaded and installed]. - -include::modules/getting-started-cli-login.adoc[leveloffset=+1] -[role="_additional-resources"] -.Additional resources -* xref:../cli_reference/openshift_cli/developer-cli-commands.adoc#oc-login[oc login] -* xref:../cli_reference/openshift_cli/developer-cli-commands.adoc#oc-logout[oc logout] - -include::modules/getting-started-cli-creating-new-project.adoc[leveloffset=+1] -[role="_additional-resources"] -.Additional resources -* xref:../cli_reference/openshift_cli/developer-cli-commands.adoc#oc-new-project[oc new-project] - -include::modules/getting-started-cli-granting-permissions.adoc[leveloffset=+1] -[role="_additional-resources"] -.Additional resources -* xref:../authentication/understanding-authentication.adoc#understanding-authentication[Understanding authentication] -* xref:../authentication/using-rbac.adoc#authorization-overview_using-rbac[RBAC overview] -* xref:../cli_reference/openshift_cli/developer-cli-commands.adoc#oc-policy-add-role-to-user[oc policy add-role-to-user] - -include::modules/getting-started-cli-deploying-first-image.adoc[leveloffset=+1] -[role="_additional-resources"] -.Additional resources -* xref:../cli_reference/openshift_cli/developer-cli-commands.adoc#oc-new-app[oc new-app] - - -include::modules/getting-started-cli-creating-route.adoc[leveloffset=+2] -[role="_additional-resources"] -.Additional resources -* xref:../cli_reference/openshift_cli/developer-cli-commands.adoc#oc-create-route-edge[oc create route edge] -* xref:../cli_reference/openshift_cli/developer-cli-commands.adoc#oc-get[oc get] - - -include::modules/getting-started-cli-examining-pod.adoc[leveloffset=+2] -[role="_additional-resources"] -.Additional resources -* xref:../cli_reference/openshift_cli/developer-cli-commands.adoc#oc-describe[oc describe] -* xref:../cli_reference/openshift_cli/developer-cli-commands.adoc#oc-get[oc get] -* xref:../cli_reference/openshift_cli/developer-cli-commands.adoc#oc-label[oc label] -* xref:../cli_reference/openshift_cli/getting-started-cli.adoc#viewing-pods[Viewing pods] -* xref:../cli_reference/openshift_cli/getting-started-cli.adoc#viewing-pod-logs[Viewing pod logs] - -include::modules/getting-started-cli-scaling-app.adoc[leveloffset=+2] -[role="_additional-resources"] -.Additional resources -* xref:../cli_reference/openshift_cli/developer-cli-commands.adoc#oc-scale[oc scale] - -include::modules/getting-started-cli-deploying-python-app.adoc[leveloffset=+1] -[role="_additional-resources"] -.Additional resources -* xref:../cli_reference/openshift_cli/developer-cli-commands.adoc#oc-new-app[oc new-app] - -include::modules/getting-started-cli-connecting-a-database.adoc[leveloffset=+1] -[role="_additional-resources"] -.Additional resources -* xref:../cli_reference/openshift_cli/developer-cli-commands.adoc#oc-new-project[oc new-project] - -include::modules/getting-started-cli-creating-secret.adoc[leveloffset=+2] -[role="_additional-resources"] -.Additional resources -* xref:../cli_reference/openshift_cli/developer-cli-commands.adoc#oc-create-secret-generic[oc create secret generic] -* xref:../cli_reference/openshift_cli/developer-cli-commands.adoc#oc-set-env[oc set env] -* xref:../cli_reference/openshift_cli/developer-cli-commands.adoc#oc-rollout-status[oc rollout status] - -include::modules/getting-started-cli-load-data-output.adoc[leveloffset=+2] -[role="_additional-resources"] -.Additional resources -* xref:../cli_reference/openshift_cli/developer-cli-commands.adoc#oc-exec[oc exec] -* xref:../cli_reference/openshift_cli/developer-cli-commands.adoc#oc-label[oc label] -* xref:../cli_reference/openshift_cli/developer-cli-commands.adoc#oc-get[oc get] diff --git a/getting_started/openshift-overview.adoc b/getting_started/openshift-overview.adoc deleted file mode 100644 index 82aebc6bbe42..000000000000 --- a/getting_started/openshift-overview.adoc +++ /dev/null @@ -1,115 +0,0 @@ -:_content-type: ASSEMBLY -[id="openshift-overview"] -= {product-title} overview -include::_attributes/common-attributes.adoc[] -:context: openshift-overview - -toc::[] - -{product-title} is a cloud-based Kubernetes container platform. The foundation of {product-title} is based on Kubernetes and therefore shares the same technology. It is designed to allow applications and the data centers that support them to expand from just a few machines and applications to thousands of machines that serve millions of clients. - -{product-title} enables you to do the following: - -* Provide developers and IT organizations with cloud application platforms that can be used for deploying applications on secure and scalable resources. -* Require minimal configuration and management overhead. -* Bring the Kubernetes platform to customer data centers and cloud. -* Meet security, privacy, compliance, and governance requirements. - -With its foundation in Kubernetes, {product-title} incorporates the same technology that serves as the engine for massive telecommunications, streaming video, gaming, banking, and other applications. Its implementation in open Red Hat technologies lets you extend your containerized applications beyond a single cloud to on-premise and multi-cloud environments. - -include::modules/getting-started-openshift-common-terms.adoc[leveloffset=+1] -include::modules/understanding-openshift.adoc[leveloffset=+1] - - -[id="openshift-overview-install-openshift"] -== Installing {product-title} - -The {product-title} installation program offers you flexibility. You can use the installation program to deploy a cluster on infrastructure that the installation program provisions and the cluster maintains or deploy a cluster on infrastructure that you prepare and maintain. - -For more information about the installation process, the supported platforms, and choosing a method of installing and preparing your cluster, see the following: - -* xref:../installing/index.adoc#installation-overview_ocp-installation-overview[OpenShift Container Platform installation overview] -* xref:../installing/index.adoc#installation-process_ocp-installation-overview[Installation process] -* xref:../installing/index.adoc#supported-platforms-for-openshift-clusters_ocp-installation-overview[Supported platforms for OpenShift Container Platform clusters] -* xref:../installing/installing-preparing.adoc#installing-preparing-selecting-cluster-type[Selecting a cluster installation type] - -include::modules/installation-openshift-local.adoc[leveloffset=+2] - -[id="openshift-next-steps"] -== Next Steps -=== For developers -Develop and deploy containerized applications with {product-title}. {product-title} is a platform for developing and deploying containerized applications. {product-title} documentation helps you: - -* **xref:../architecture/understanding-development.adoc#understanding-development[Understand {product-title} development]**: Learn the different types of containerized applications, from simple containers to advanced Kubernetes deployments and Operators. - -* **xref:../applications/projects/working-with-projects.adoc#working-with-projects[Work with projects]**: Create projects from the {product-title} web console or OpenShift CLI (`oc`) to organize and share the software you develop. - -* **xref:../applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc#odc-creating-applications-using-developer-perspective[Work with applications]**: - -Use xref:../web_console/web-console-overview.adoc#about-developer-perspective_web-console-overview[the *Developer* perspective] in the {product-title} web console to -xref:../applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc#odc-creating-applications-using-developer-perspective[create and deploy applications]. - -Use the -xref:../applications/odc-viewing-application-composition-using-topology-view.adoc#odc-viewing-application-composition-using-topology-view[*Topology* view] -to see your applications, monitor status, connect and group components, and modify your code base. - -* ** xref:../cli_reference/odo-important-update.adoc#odo-important_update[Use the developer CLI tool (`odo`)]**: -The `odo` CLI tool lets developers create single or multi-component applications and automates deployment, build, and service route configurations. It abstracts complex Kubernetes and {product-title} concepts, allowing you to focus on developing your applications. - -* **xref:../cicd/pipelines/understanding-openshift-pipelines.adoc#op-key-features[Create CI/CD Pipelines]**: Pipelines are serverless, cloud-native, continuous integration, and continuous deployment systems that run in isolated containers. -They use standard Tekton custom resources to automate deployments and are designed for decentralized teams working on microservices-based architecture. - -* **Deploy Helm charts**: -xref:../applications/working_with_helm_charts/understanding-helm.adoc#understanding-helm[Helm 3] -is a package manager that helps developers define, install, and update -application packages on Kubernetes. A Helm chart is a packaging format that -describes an application that can be deployed using the Helm CLI. - -* **xref:../cicd/builds/understanding-image-builds.adoc#understanding-image-builds[Understand image builds]**: Choose from different build strategies (Docker, S2I, custom, and pipeline) that can include different kinds of source materials (Git repositories, local binary inputs, and external artifacts). Then, follow examples of build types from basic builds to advanced builds. - -* **xref:../openshift_images/create-images.adoc#create-images[Create container images]**: A container image is the most basic building block in {product-title} (and Kubernetes) applications. Defining image streams lets you gather multiple versions of an image in one place as you continue its development. S2I containers let you insert your source code into a base container that is set up to run code of a particular type, such as Ruby, Node.js, or Python. - -* **xref:../applications/deployments/what-deployments-are.adoc#what-deployments-are[Create deployments]**: Use `Deployment` and `DeploymentConfig` objects to exert fine-grained management over applications. - xref:../applications/deployments/managing-deployment-processes.adoc#deployment-operations[Manage deployments] using the *Workloads* page or OpenShift CLI (`oc`). Learn xref:../applications/deployments/deployment-strategies.adoc#deployment-strategies[rolling, recreate, and custom] deployment strategies. - -* **xref:../openshift_images/using-templates.adoc#using-templates[Create templates]**: Use existing templates or create your own templates that describe how an application is built or deployed. A template can combine images with descriptions, parameters, replicas, exposed ports and other content that defines how an application can be run or built. - -* **xref:../operators/understanding/olm-what-operators-are.adoc#olm-what-operators-are[Understand Operators]**: Operators are the preferred method for creating on-cluster applications for {product-title} {product-version}. Learn about the Operator Framework and how to deploy applications using installed Operators into your projects. - -* **xref:../operators/operator_sdk/osdk-about.adoc#osdk-about[Develop Operators]**: Operators are the preferred method for creating on-cluster applications for {product-title} {product-version}. Learn the workflow for building, testing, and deploying Operators. Then, create your own Operators based on xref:../operators/operator_sdk/ansible/osdk-ansible-support.adoc#osdk-ansible-support[Ansible] or -xref:../operators/operator_sdk/helm/osdk-helm-support.adoc#osdk-helm-support[Helm], or configure xref:../operators/operator_sdk/osdk-monitoring-prometheus.adoc#osdk-monitoring-prometheus[built-in Prometheus monitoring] using the Operator SDK. - -* **xref:../rest_api/index.adoc#api-index[REST API reference]**: Learn about {product-title} application programming interface endpoints. - -=== For administrators -* **xref:../architecture/architecture.adoc#architecture-overview-architecture[Understand {product-title} management]**: Learn about components -of the {product-title} {product-version} control plane. See how {product-title} control plane and worker nodes are managed and updated through the xref:../machine_management/creating_machinesets/creating-machineset-aws.adoc#machine-api-overview_creating-machineset-aws[Machine API] and xref:../architecture/control-plane.adoc#operators-overview_control-plane[Operators]. - -* **xref:../authentication/understanding-authentication.adoc#understanding-authentication[Manage users and groups]**: Add users and groups with different levels of permissions to use or modify clusters. - -* **xref:../authentication/understanding-authentication.adoc#understanding-authentication[Manage authentication]**: Learn how user, group, and API authentication -works in {product-title}. {product-title} supports multiple identity providers. - -* **xref:../networking/understanding-networking.adoc#understanding-networking[Manage networking]**: The cluster network in {product-title} is managed by the xref:../networking/cluster-network-operator.adoc#cluster-network-operator[Cluster Network Operator] (CNO). The CNO uses iptables rules in xref:../networking/openshift_sdn/configuring-kube-proxy.adoc#configuring-kube-proxy[kube-proxy] to direct traffic between nodes and pods running on those nodes. The Multus Container Network Interface adds the capability to attach xref:../networking/multiple_networks/understanding-multiple-networks.adoc#understanding-multiple-networks[multiple network interfaces] to a pod. Using -xref:../networking/network_policy/about-network-policy.adoc#about-network-policy[network policy] features, you can isolate your pods or permit selected traffic. - -* **xref:../storage/understanding-persistent-storage.adoc#understanding-persistent-storage[Manage storage]**: {product-title} allows cluster administrators to configure persistent storage. - -* **xref:../operators/understanding/olm-understanding-operatorhub.adoc#olm-understanding-operatorhub[Manage Operators]**: Lists of Red Hat, ISV, and community Operators can -be reviewed by cluster administrators and xref:../operators/admin/olm-adding-operators-to-cluster.adoc#olm-adding-operators-to-a-cluster[installed on their clusters]. After you install them, you can xref:../operators/user/olm-creating-apps-from-installed-operators.adoc#olm-creating-apps-from-installed-operators[run], xref:../operators/admin/olm-upgrading-operators.adoc#olm-upgrading-operators[upgrade], back up, or otherwise manage the Operator on your cluster. - -* **xref:../operators/understanding/crds/crd-extending-api-with-crds.adoc#crd-extending-api-with-crds[Use custom resource definitions (CRDs) to modify the cluster]**: Cluster features implemented with Operators can be modified with CRDs. Learn to xref:../operators/understanding/crds/crd-extending-api-with-crds.adoc#crd-creating-custom-resources-definition_crd-extending-api-with-crds[create a CRD] and xref:../operators/understanding/crds/crd-managing-resources-from-crds.adoc#crd-managing-resources-from-crds[manage resources from CRDs]. - -* **xref:../applications/quotas/quotas-setting-per-project.adoc#quotas-setting-per-project[Set resource quotas]**: Choose from CPU, memory, and other system resources to xref:../applications/quotas/quotas-setting-per-project.adoc#quotas-setting-per-project[set quotas]. - -* **xref:../applications/pruning-objects.adoc#pruning-objects[Prune and reclaim resources]**: Reclaim space by pruning unneeded Operators, groups, deployments, builds, images, registries, and cron jobs. - -* **xref:../scalability_and_performance/recommended-performance-scale-practices/recommended-infrastructure-practices.adoc#scaling-cluster-monitoring-operator[Scale] and xref:../scalability_and_performance/using-node-tuning-operator.adoc#using-node-tuning-operator[tune] clusters**: Set cluster limits, tune nodes, scale cluster monitoring, and optimize networking, storage, and routes for your environment. - -* **xref:../updating/updating_a_cluster/updating_disconnected_cluster/disconnected-update-osus.adoc#update-service-overview_updating-restricted-network-cluster-osus[Using the OpenShift Update Service in a disconnected environement]**: Learn about installing and managing a local OpenShift Update Service for recommending {product-title} updates in disconnected environments. - -* **xref:../monitoring/monitoring-overview.adoc#monitoring-overview[Monitor clusters]**: -Learn to xref:../monitoring/configuring-the-monitoring-stack.adoc#configuring-the-monitoring-stack[configure the monitoring stack]. -After configuring monitoring, use the web console to access xref:../monitoring/reviewing-monitoring-dashboards.adoc#reviewing-monitoring-dashboards[monitoring dashboards]. In addition to infrastructure metrics, you can also scrape and view metrics for your own services. - -* **xref:../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring_about-remote-health-monitoring[Remote health monitoring]**: {product-title} collects anonymized aggregated information about your cluster. Using Telemetry and the Insights Operator, this data is received by Red Hat and used to improve {product-title}. You can view the xref:../support/remote_health_monitoring/showing-data-collected-by-remote-health-monitoring.adoc#showing-data-collected-by-remote-health-monitoring_showing-data-collected-by-remote-health-monitoring[data collected by remote health monitoring]. diff --git a/getting_started/openshift-web-console.adoc b/getting_started/openshift-web-console.adoc deleted file mode 100644 index f34f48e3b1c9..000000000000 --- a/getting_started/openshift-web-console.adoc +++ /dev/null @@ -1,80 +0,0 @@ -:_content-type: ASSEMBLY -[id="openshift-web-console"] -= Creating and building an application using the web console -include::_attributes/common-attributes.adoc[] -:context: openshift-web-console - -toc::[] - -[id="openshift-web-console-before-you-begin"] - -== Before you begin -* Review xref:../web_console/web-console.adoc#web-console-overview[Accessing the web console]. -* You must be able to access a running instance of {product-title}. If you do not have access, contact your cluster administrator. - -include::modules/getting-started-web-console-login.adoc[leveloffset=+1] - -include::modules/getting-started-web-console-creating-new-project.adoc[leveloffset=+1] -[role="_additional-resources"] -.Additional resources -* xref:../authentication/using-rbac.adoc#default-roles_using-rbac[Default cluster roles] -* xref:../applications/projects/working-with-projects.adoc#viewing-a-project-using-the-web-console_projects[Viewing a project using the web console] -* xref:../applications/projects/working-with-projects.adoc#odc-providing-project-permissions-using-developer-perspective_projects[Providing access permissions to your project using the Developer perspective] -* xref:../applications/projects/working-with-projects.adoc#deleting-a-project-using-the-web-console_projects[Deleting a project using the web console] - -include::modules/getting-started-web-console-granting-permissions.adoc[leveloffset=+1] -[role="_additional-resources"] -.Additional resources -* xref:../authentication/understanding-authentication.adoc#rbac-users_understanding-authentication[Understanding authentication] -* xref:../authentication/using-rbac.adoc#authorization-overview_using-rbac[RBAC overview] - -include::modules/getting-started-web-console-deploying-first-image.adoc[leveloffset=+1] -[role="_additional-resources"] -.Additional resources -* xref:../applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc[Creating applications using the Developer perspective] -* xref:../applications/projects/working-with-projects.adoc#viewing-a-project-using-the-web-console_projects[Viewing a project using the web console] -* xref:../applications/odc-viewing-application-composition-using-topology-view.adoc#odc-viewing-application-topology_viewing-application-composition-using-topology-view[Viewing the topology of your application] -* xref:../applications/projects/working-with-projects.adoc#deleting-a-project-using-the-web-console_projects[Deleting a project using the web console] - -include::modules/getting-started-web-console-examining-pod.adoc[leveloffset=+2] -[role="_additional-resources"] -.Additional resources -* xref:../applications/odc-viewing-application-composition-using-topology-view.adoc#odc-interacting-with-applications-and-components_viewing-application-composition-using-topology-view[Interacting with applications and components] -* xref:../applications/odc-viewing-application-composition-using-topology-view.adoc#odc-scaling-application-pods-and-checking-builds-and-routes_viewing-application-composition-using-topology-view[Scaling application pods and checking builds and routes] -* xref:../applications/odc-viewing-application-composition-using-topology-view.adoc#odc-labels-and-annotations-used-for-topology-view_viewing-application-composition-using-topology-view[Labels and annotations used for the Topology view] - -include::modules/getting-started-web-console-scaling-app.adoc[leveloffset=+2] -[role="_additional-resources"] -.Additional resources -* xref:../scalability_and_performance/recommended-performance-scale-practices/recommended-control-plane-practices.adoc#recommended-scale-practices_cluster-scaling[Recommended practices for scaling the cluster] -* xref:../nodes/pods/nodes-pods-autoscaling.adoc#nodes-pods-autoscaling-about_nodes-pods-autoscaling[Understanding horizontal pod autoscalers] -* xref:../nodes/pods/nodes-pods-vertical-autoscaler.adoc#nodes-pods-vertical-autoscaler-about_nodes-pods-vertical-autoscaler[About the Vertical Pod Autoscaler Operator] - -include::modules/getting-started-web-console-deploying-python-app.adoc[leveloffset=+1] -[role="_additional-resources"] -.Additional resources -* xref:../applications/odc-viewing-application-composition-using-topology-view.adoc#odc-adding-services-to-your-application_viewing-application-composition-using-topology-view[Adding services to your application] -* xref:../applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc#odc-importing-codebase-from-git-to-create-application_odc-creating-applications-using-developer-perspective[Importing a codebase from Git to create an application] -* xref:../applications/odc-viewing-application-composition-using-topology-view.adoc#odc-viewing-application-topology_viewing-application-composition-using-topology-view[Viewing the topology of your application] -* xref:../applications/projects/working-with-projects.adoc#odc-providing-project-permissions-using-developer-perspective_projects[Providing access permissions to your project using the Developer perspective] -* xref:../applications/projects/working-with-projects.adoc#deleting-a-project-using-the-web-console_projects[Deleting a project using the web console] - -include::modules/getting-started-web-console-connecting-a-database.adoc[leveloffset=+1] -[role="_additional-resources"] -.Additional resources -* xref:../applications/odc-viewing-application-composition-using-topology-view.adoc#odc-adding-services-to-your-application_viewing-application-composition-using-topology-view[Adding services to your application] -* xref:../applications/projects/working-with-projects.adoc#viewing-a-project-using-the-web-console_projects[Viewing a project using the web console] -* xref:../applications/odc-viewing-application-composition-using-topology-view.adoc#odc-viewing-application-topology_viewing-application-composition-using-topology-view[Viewing the topology of your application] -* xref:../applications/projects/working-with-projects.adoc#odc-providing-project-permissions-using-developer-perspective_projects[Providing access permissions to your project using the Developer perspective] -* xref:../applications/projects/working-with-projects.adoc#deleting-a-project-using-the-web-console_projects[Deleting a project using the web console] - -include::modules/getting-started-web-console-creating-secret.adoc[leveloffset=+2] -[role="_additional-resources"] -.Additional resources -* xref:../nodes/pods/nodes-pods-secrets.adoc#nodes-pods-secrets-about_nodes-pods-secrets[Understanding secrets] - -include::modules/getting-started-web-console-load-data-output.adoc[leveloffset=+2] -[role="_additional-resources"] -.Additional resources -* xref:../applications/projects/working-with-projects.adoc#odc-providing-project-permissions-using-developer-perspective_projects[Providing access permissions to your project using the Developer perspective] -* xref:../applications/odc-viewing-application-composition-using-topology-view.adoc#odc-labels-and-annotations-used-for-topology-view_viewing-application-composition-using-topology-view[Labels and annotations used for the Topology view] diff --git a/getting_started/scaling-your-cluster.adoc b/getting_started/scaling-your-cluster.adoc deleted file mode 100644 index 0468c0c63a78..000000000000 --- a/getting_started/scaling-your-cluster.adoc +++ /dev/null @@ -1,9 +0,0 @@ -:_content-type: ASSEMBLY -[id="scaling-your-cluster"] -= Scaling your cluster -include::_attributes/common-attributes.adoc[] -:context: access - -toc::[] - -include::modules/dedicated-scaling-your-cluster.adoc[leveloffset=+1] diff --git a/getting_started/snippets b/getting_started/snippets deleted file mode 120000 index 9f5bc7e4dde0..000000000000 --- a/getting_started/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets \ No newline at end of file diff --git a/adding_service_cluster/_attributes b/gitops/_attributes similarity index 100% rename from adding_service_cluster/_attributes rename to gitops/_attributes diff --git a/cicd/gitops/about-sizing-requirements-gitops.adoc b/gitops/about-sizing-requirements-gitops.adoc similarity index 100% rename from cicd/gitops/about-sizing-requirements-gitops.adoc rename to gitops/about-sizing-requirements-gitops.adoc diff --git a/cicd/gitops/argo-cd-custom-resource-properties.adoc b/gitops/argo-cd-custom-resource-properties.adoc similarity index 100% rename from cicd/gitops/argo-cd-custom-resource-properties.adoc rename to gitops/argo-cd-custom-resource-properties.adoc diff --git a/cicd/gitops/collecting-debugging-data-for-support.adoc b/gitops/collecting-debugging-data-for-support.adoc similarity index 100% rename from cicd/gitops/collecting-debugging-data-for-support.adoc rename to gitops/collecting-debugging-data-for-support.adoc diff --git a/cicd/gitops/configuring-an-openshift-cluster-by-deploying-an-application-with-cluster-configurations.adoc b/gitops/configuring-an-openshift-cluster-by-deploying-an-application-with-cluster-configurations.adoc similarity index 76% rename from cicd/gitops/configuring-an-openshift-cluster-by-deploying-an-application-with-cluster-configurations.adoc rename to gitops/configuring-an-openshift-cluster-by-deploying-an-application-with-cluster-configurations.adoc index 05240fc23661..c0b5be6a8ed8 100644 --- a/cicd/gitops/configuring-an-openshift-cluster-by-deploying-an-application-with-cluster-configurations.adoc +++ b/gitops/configuring-an-openshift-cluster-by-deploying-an-application-with-cluster-configurations.adoc @@ -22,8 +22,8 @@ include::modules/go-run-argo-cd-instance-on-infrastructure-nodes.adoc[leveloffse [role="_additional-resources"] .Additional resources -* To learn more about taints and tolerations, see xref:../../nodes/scheduling/nodes-scheduler-taints-tolerations.adoc#nodes-scheduler-taints-tolerations[Controlling pod placement using node taints]. -* For more information on infrastructure machine sets, see xref:../../machine_management/creating-infrastructure-machinesets.adoc#creating-infrastructure-machinesets[Creating infrastructure machine sets]. +* To learn more about taints and tolerations, see link:https://docs.openshift.com/container-platform/latest/nodes/scheduling/nodes-scheduler-taints-tolerations.html#nodes-scheduler-taints-tolerations[Controlling pod placement using node taints]. +* For more information on infrastructure machine sets, see link:https://docs.openshift.com/container-platform/latest/machine_management/creating-infrastructure-machinesets.html#creating-infrastructure-machinesets[Creating infrastructure machine sets]. include::modules/gitops-creating-an-application-by-using-the-argo-cd-dashboard.adoc[leveloffset=+1] diff --git a/cicd/gitops/configuring-argo-cd-rbac.adoc b/gitops/configuring-argo-cd-rbac.adoc similarity index 100% rename from cicd/gitops/configuring-argo-cd-rbac.adoc rename to gitops/configuring-argo-cd-rbac.adoc diff --git a/cicd/gitops/configuring-resource-quota.adoc b/gitops/configuring-resource-quota.adoc similarity index 100% rename from cicd/gitops/configuring-resource-quota.adoc rename to gitops/configuring-resource-quota.adoc diff --git a/cicd/gitops/configuring-sso-for-argo-cd-on-openshift.adoc b/gitops/configuring-sso-for-argo-cd-on-openshift.adoc similarity index 100% rename from cicd/gitops/configuring-sso-for-argo-cd-on-openshift.adoc rename to gitops/configuring-sso-for-argo-cd-on-openshift.adoc diff --git a/cicd/gitops/configuring-sso-for-argo-cd-using-keycloak.adoc b/gitops/configuring-sso-for-argo-cd-using-keycloak.adoc similarity index 100% rename from cicd/gitops/configuring-sso-for-argo-cd-using-keycloak.adoc rename to gitops/configuring-sso-for-argo-cd-using-keycloak.adoc diff --git a/cicd/gitops/configuring-sso-on-argo-cd-using-dex.adoc b/gitops/configuring-sso-on-argo-cd-using-dex.adoc similarity index 100% rename from cicd/gitops/configuring-sso-on-argo-cd-using-dex.adoc rename to gitops/configuring-sso-on-argo-cd-using-dex.adoc diff --git a/cicd/gitops/deploying-a-spring-boot-application-with-argo-cd.adoc b/gitops/deploying-a-spring-boot-application-with-argo-cd.adoc similarity index 100% rename from cicd/gitops/deploying-a-spring-boot-application-with-argo-cd.adoc rename to gitops/deploying-a-spring-boot-application-with-argo-cd.adoc diff --git a/cicd/gitops/gitops-release-notes.adoc b/gitops/gitops-release-notes.adoc similarity index 60% rename from cicd/gitops/gitops-release-notes.adoc rename to gitops/gitops-release-notes.adoc index e5dffe5ab484..fa376967e7b4 100644 --- a/cicd/gitops/gitops-release-notes.adoc +++ b/gitops/gitops-release-notes.adoc @@ -16,7 +16,7 @@ toc::[] * Associate templated configuration with different environments * Promote applications across clusters, from staging to production -For an overview of {gitops-title}, see xref:../../cicd/gitops/understanding-openshift-gitops.adoc#understanding-openshift-gitops[Understanding OpenShift GitOps]. +For an overview of {gitops-title}, see xref:../gitops/understanding-openshift-gitops.adoc#understanding-openshift-gitops[Understanding OpenShift GitOps]. include::modules/go-compatibility-and-support-matrix.adoc[leveloffset=+1] @@ -24,12 +24,6 @@ include::modules/making-open-source-more-inclusive.adoc[leveloffset=+1] // Modules included, most to least recent include::modules/gitops-release-notes-1-9-0.adoc[leveloffset=+1] -// 1.25.0 additional resources, OCP docs -ifdef::openshift-enterprise[] -[role="_additional-resources"] -.Additional resources -* xref:../../operators/admin/olm-configuring-proxy-support.adoc#olm-inject-custom-ca_olm-configuring-proxy-support[Injecting a custom CA certificate] -endif::[] include::modules/gitops-release-notes-1-8-3.adoc[leveloffset=+1] @@ -73,38 +67,3 @@ include::modules/gitops-release-notes-1-5-1.adoc[leveloffset=+1] include::modules/gitops-release-notes-1-5-0.adoc[leveloffset=+1] -include::modules/gitops-release-notes-1-4-13.adoc[leveloffset=+1] - -include::modules/gitops-release-notes-1-4-12.adoc[leveloffset=+1] - -include::modules/gitops-release-notes-1-4-11.adoc[leveloffset=+1] - -include::modules/gitops-release-notes-1-4-6.adoc[leveloffset=+1] - -include::modules/gitops-release-notes-1-4-5.adoc[leveloffset=+1] - -include::modules/gitops-release-notes-1-4-3.adoc[leveloffset=+1] - -include::modules/gitops-release-notes-1-4-2.adoc[leveloffset=+1] - -include::modules/gitops-release-notes-1-4-1.adoc[leveloffset=+1] - -include::modules/gitops-release-notes-1-4-0.adoc[leveloffset=+1] - -include::modules/gitops-release-notes-1-3-7.adoc[leveloffset=+1] - -include::modules/gitops-release-notes-1-3-6.adoc[leveloffset=+1] - -include::modules/gitops-release-notes-1-3-2.adoc[leveloffset=+1] - -include::modules/gitops-release-notes-1-3-1.adoc[leveloffset=+1] - -include::modules/gitops-release-notes-1-3-0.adoc[leveloffset=+1] - -include::modules/gitops-release-notes-1-2-2.adoc[leveloffset=+1] - -include::modules/gitops-release-notes-1-2-1.adoc[leveloffset=+1] - -include::modules/gitops-release-notes-1-2.adoc[leveloffset=+1] - -include::modules/gitops-release-notes-1-1.adoc[leveloffset=+1] diff --git a/cicd/gitops/health-information-for-resources-deployment.adoc b/gitops/health-information-for-resources-deployment.adoc similarity index 100% rename from cicd/gitops/health-information-for-resources-deployment.adoc rename to gitops/health-information-for-resources-deployment.adoc diff --git a/cicd/images b/gitops/images similarity index 100% rename from cicd/images rename to gitops/images diff --git a/cicd/index.adoc b/gitops/index.adoc similarity index 100% rename from cicd/index.adoc rename to gitops/index.adoc diff --git a/cicd/gitops/installing-openshift-gitops.adoc b/gitops/installing-openshift-gitops.adoc similarity index 82% rename from cicd/gitops/installing-openshift-gitops.adoc rename to gitops/installing-openshift-gitops.adoc index 2435189f3b21..e9adb288ba07 100644 --- a/cicd/gitops/installing-openshift-gitops.adoc +++ b/gitops/installing-openshift-gitops.adoc @@ -15,7 +15,7 @@ toc::[] * You have access to the {product-title} web console. * You are logged in as a user with the `cluster-admin` role. * You are logged in to the {product-title} cluster as an administrator. -* Your cluster has the xref:../../installing/cluster-capabilities.adoc#marketplace-operator_cluster-capabilities[Marketplace capability] enabled or the Red Hat Operator catalog source configured manually. +* Your cluster has the link:https://docs.openshift.com/container-platform/latest/installing/cluster-capabilities.html#marketplace-operator_cluster-capabilities[Marketplace capability] enabled or the Red Hat Operator catalog source configured manually. [WARNING] ==== diff --git a/cicd/modules b/gitops/modules similarity index 100% rename from cicd/modules rename to gitops/modules diff --git a/cicd/gitops/monitoring-argo-cd-custom-resource-workloads.adoc b/gitops/monitoring-argo-cd-custom-resource-workloads.adoc similarity index 82% rename from cicd/gitops/monitoring-argo-cd-custom-resource-workloads.adoc rename to gitops/monitoring-argo-cd-custom-resource-workloads.adoc index 800b71dc6de3..f456d2f1b272 100644 --- a/cicd/gitops/monitoring-argo-cd-custom-resource-workloads.adoc +++ b/gitops/monitoring-argo-cd-custom-resource-workloads.adoc @@ -19,7 +19,7 @@ You can enable and disable the setting for monitoring Argo CD custom resource wo * {gitops-title} is installed in your cluster. * The monitoring stack is configured in your cluster in the `openshift-monitoring` project. In addition, the Argo CD instance is in a namespace that you can monitor through Prometheus. * The `kube-state-metrics` service is running in your cluster. -* Optional: If you are enabling monitoring for an Argo CD instance already present in a user-defined project, ensure that the monitoring is xref:../../monitoring/enabling-monitoring-for-user-defined-projects.html#enabling-monitoring-for-user-defined-projects_enabling-monitoring-for-user-defined-projects[enabled for user-defined projects] in your cluster. +* Optional: If you are enabling monitoring for an Argo CD instance already present in a user-defined project, ensure that the monitoring is link:https://docs.openshift.com/container-platform/latest/monitoring/enabling-monitoring-for-user-defined-projects.html#enabling-monitoring-for-user-defined-projects_enabling-monitoring-for-user-defined-projects[enabled for user-defined projects] in your cluster. + [NOTE] ==== @@ -35,4 +35,4 @@ include::modules/gitops-disabling-monitoring-for-argo-cd-custom-resource-workloa [role="_additional-resources"] [id="additional-resources_monitoring-argo-cd-custom-resource-workloads"] == Additional resources -* xref:../../monitoring/enabling-monitoring-for-user-defined-projects.adoc#enabling-monitoring-for-user-defined-projects[Enabling monitoring for user-defined projects] +* link:https://docs.openshift.com/container-platform/latest/monitoring/enabling-monitoring-for-user-defined-projects.html#enabling-monitoring-for-user-defined-projects[Enabling monitoring for user-defined projects] diff --git a/cicd/gitops/run-gitops-control-plane-workload-on-infra-nodes.adoc b/gitops/run-gitops-control-plane-workload-on-infra-nodes.adoc similarity index 66% rename from cicd/gitops/run-gitops-control-plane-workload-on-infra-nodes.adoc rename to gitops/run-gitops-control-plane-workload-on-infra-nodes.adoc index 21122fe29469..3ceba7622516 100644 --- a/cicd/gitops/run-gitops-control-plane-workload-on-infra-nodes.adoc +++ b/gitops/run-gitops-control-plane-workload-on-infra-nodes.adoc @@ -20,5 +20,5 @@ include::modules/go-add-infra-nodes.adoc[leveloffset=+1] [role="_additional-resources"] [id="additional-resources_run-gitops-control-plane-workload-on-infra-nodes"] == Additional resources -* To learn more about taints and tolerations, see xref:../../nodes/scheduling/nodes-scheduler-taints-tolerations.adoc#nodes-scheduler-taints-tolerations[Controlling pod placement using node taints]. -* For more information on infrastructure machine sets, see xref:../../machine_management/creating-infrastructure-machinesets.adoc#creating-infrastructure-machinesets[Creating infrastructure machine sets]. +* To learn more about taints and tolerations, see link:https://docs.openshift.com/container-platform/latest/nodes/scheduling/nodes-scheduler-taints-tolerations.html#nodes-scheduler-taints-tolerations[Controlling pod placement using node taints]. +* For more information on infrastructure machine sets, see link:https://docs.openshift.com/container-platform/latest/machine_management/creating-infrastructure-machinesets.html#creating-infrastructure-machinesets[Creating infrastructure machine sets]. diff --git a/cicd/gitops/setting-up-argocd-instance.adoc b/gitops/setting-up-argocd-instance.adoc similarity index 100% rename from cicd/gitops/setting-up-argocd-instance.adoc rename to gitops/setting-up-argocd-instance.adoc diff --git a/adding_service_cluster/snippets b/gitops/snippets similarity index 100% rename from adding_service_cluster/snippets rename to gitops/snippets diff --git a/cicd/gitops/troubleshooting-issues-in-GitOps.adoc b/gitops/troubleshooting-issues-in-GitOps.adoc similarity index 100% rename from cicd/gitops/troubleshooting-issues-in-GitOps.adoc rename to gitops/troubleshooting-issues-in-GitOps.adoc diff --git a/cicd/gitops/understanding-openshift-gitops.adoc b/gitops/understanding-openshift-gitops.adoc similarity index 100% rename from cicd/gitops/understanding-openshift-gitops.adoc rename to gitops/understanding-openshift-gitops.adoc diff --git a/cicd/gitops/uninstalling-openshift-gitops.adoc b/gitops/uninstalling-openshift-gitops.adoc similarity index 77% rename from cicd/gitops/uninstalling-openshift-gitops.adoc rename to gitops/uninstalling-openshift-gitops.adoc index 17e3ecadfa01..e82a01ea7fd3 100644 --- a/cicd/gitops/uninstalling-openshift-gitops.adoc +++ b/gitops/uninstalling-openshift-gitops.adoc @@ -19,5 +19,4 @@ include::modules/go-uninstalling-gitops-operator.adoc[leveloffset=+1] [role="_additional-resources"] .Additional resources - -* You can learn more about uninstalling Operators on {product-title} in the xref:../../operators/admin/olm-deleting-operators-from-cluster.adoc#olm-deleting-operators-from-a-cluster[Deleting Operators from a cluster] section. +* You can learn more about uninstalling Operators on {product-title} in the link:https://docs.openshift.com/container-platform/latest/operators/admin/olm-deleting-operators-from-cluster.html#olm-deleting-operators-from-a-cluster[Deleting Operators from a cluster] section. diff --git a/hardware_enablement/_attributes b/hardware_enablement/_attributes deleted file mode 120000 index f27fd275ea6b..000000000000 --- a/hardware_enablement/_attributes +++ /dev/null @@ -1 +0,0 @@ -../_attributes/ \ No newline at end of file diff --git a/hardware_enablement/about-hardware-enablement.adoc b/hardware_enablement/about-hardware-enablement.adoc deleted file mode 100644 index 36cef34b0431..000000000000 --- a/hardware_enablement/about-hardware-enablement.adoc +++ /dev/null @@ -1,11 +0,0 @@ -:_content-type: ASSEMBLY -[id="about-hardware-enablement"] -= About specialized hardware and driver enablement -include::_attributes/common-attributes.adoc[] -:context: about-hardware-enablement - -toc::[] - -The Driver Toolkit (DTK) is a container image in the {product-title} payload which is meant to be used as a base image on which to build driver containers. The Driver Toolkit image contains the kernel packages commonly required as dependencies to build or install kernel modules as well as a few tools needed in driver containers. The version of these packages will match the kernel version running on the RHCOS nodes in the corresponding {product-title} release. - -Driver containers are container images used for building and deploying out-of-tree kernel modules and drivers on container operating systems such as :op-system-first:. Kernel modules and drivers are software libraries running with a high level of privilege in the operating system kernel. They extend the kernel functionalities or provide the hardware-specific code required to control new devices. Examples include hardware devices like field-programmable gate arrays (FPGA) or graphics processing units (GPU), and software-defined storage solutions, which all require kernel modules on client machines. Driver containers are the first layer of the software stack used to enable these technologies on {product-title} deployments. \ No newline at end of file diff --git a/hardware_enablement/images b/hardware_enablement/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/hardware_enablement/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/hardware_enablement/kmm-kernel-module-management.adoc b/hardware_enablement/kmm-kernel-module-management.adoc deleted file mode 100644 index 6db1495f3d79..000000000000 --- a/hardware_enablement/kmm-kernel-module-management.adoc +++ /dev/null @@ -1,92 +0,0 @@ -:_content-type: ASSEMBLY -[id="kernel-module-management-operator"] -= Kernel Module Management Operator -include::_attributes/common-attributes.adoc[] -:context: kernel-module-management-operator - -toc::[] - -Learn about the Kernel Module Management (KMM) Operator and how you can use it to deploy out-of-tree kernel modules and device plugins on {product-title} clusters. - -:FeatureName: Kernel Module Management Operator - -include::modules/kmm-about-kmm.adoc[leveloffset=+1] -include::modules/kmm-installation.adoc[leveloffset=+1] -include::modules/kmm-installing-using-web-console.adoc[leveloffset=+2] -include::modules/kmm-installing-using-cli.adoc[leveloffset=+2] -include::modules/kmm-installing-older-versions.adoc[leveloffset=+2] -include::modules/kmm-deploying-modules.adoc[leveloffset=+1] -include::modules/kmm-creating-module-cr.adoc[leveloffset=+2] -include::modules/kmm-security.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../authentication/understanding-and-managing-pod-security-admission.adoc#understanding-and-managing-pod-security-admission[Understanding and managing pod security admission]. - -include::modules/kmm-example-module-cr.adoc[leveloffset=+2] -include::modules/kmm-creating-moduleloader-image.adoc[leveloffset=+1] -include::modules/kmm-running-depmod.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../hardware_enablement/psap-driver-toolkit.adoc#driver-toolkit[Driver Toolkit]. - -include::modules/kmm-building-in-cluster.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../cicd/builds/build-configuration.adoc#build-configuration[Build configuration resources]. - -include::modules/kmm-using-driver-toolkit.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../hardware_enablement/psap-driver-toolkit.adoc#driver-toolkit[Driver Toolkit]. - -//Deploying kernel modules (Might just leave this short intro in the assembly and put further module below it) -// * Running ModuleLoader images (CONCEPT, or could be included in the assembly with the intro) -// * Using the device plugin (CONCEPT, or could be included in the assembly with the intro) -// * Creating the Module Custom Resource (PROCEDURE? Seems like not a process the user does after reading it. Maybe a REFERENCE) -// * Security and permissions (CONCEPT or REFERENCE) -// * ServiceAccounts and SecurityContextConstraints (can include in Security and permissions) -// * Pod Security Standards (can include in Security and permissions) -// * Example Module CR (REFERENCE) - -// Added for TELCODOCS-1065 -include::modules/kmm-using-signing-with-kmm.adoc[leveloffset=+1] -include::modules/kmm-adding-the-keys-for-secureboot.adoc[leveloffset=+1] -include::modules/kmm-checking-the-keys.adoc[leveloffset=+2] -include::modules/kmm-signing-a-prebuilt-driver-container.adoc[leveloffset=+1] -include::modules/kmm-building-and-signing-a-moduleloader-container-image.adoc[leveloffset=+1] -.Additional resources -For information on creating a service account, see xref:https://docs.openshift.com/container-platform/4.12/authentication/understanding-and-creating-service-accounts.html#service-accounts-managing_understanding-service-accounts[Creating service accounts]. - -include::modules/kmm-debugging-and-troubleshooting.adoc[leveloffset=+1] - -// Added for TELCODOCS-1067 -include::modules/kmm-firmware-support.adoc[leveloffset=+1] -[role="_additional-resources"] -.Additional resources -* xref:../hardware_enablement/kmm-kernel-module-management.adoc#kmm-creating-moduleloader-image_kernel-module-management-operator[Creating a ModuleLoader image]. - -include::modules/kmm-configuring-the-lookup-path-on-nodes.adoc[leveloffset=+2] -[role="_additional-resources"] -.Additional resources -* xref:../post_installation_configuration/machine-configuration-tasks.adoc#understanding-the-machine-config-operator[Machine Config Operator]. - -include::modules/kmm-building-a-moduleloader-image.adoc[leveloffset=+2] -include::modules/kmm-tuning-the-module-resource.adoc[leveloffset=+2] - -// Added for TELCODOCS-1059 -include::modules/kmm-troubleshooting.adoc[leveloffset=+1] -include::modules/kmm-must-gather-tool.adoc[leveloffset=+2] -[role="_additional-resources"] -.Additional resources -* xref:../support/gathering-cluster-data.adoc#about-must-gather_gathering-cluster-data[About the must-gather tool] - -include::modules/kmm-gathering-data-for-kmm.adoc[leveloffset=+3] -include::modules/kmm-gathering-data-for-kmm-hub.adoc[leveloffset=+3] diff --git a/hardware_enablement/modules b/hardware_enablement/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/hardware_enablement/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/hardware_enablement/psap-driver-toolkit.adoc b/hardware_enablement/psap-driver-toolkit.adoc deleted file mode 100644 index 9817ddbde47f..000000000000 --- a/hardware_enablement/psap-driver-toolkit.adoc +++ /dev/null @@ -1,23 +0,0 @@ -:_content-type: ASSEMBLY -[id="driver-toolkit"] -= Driver Toolkit -include::_attributes/common-attributes.adoc[] -:context: driver-toolkit - -toc::[] - -Learn about the Driver Toolkit and how you can use it as a base image for driver containers for enabling special software and hardware devices on {product-title} deployments. - -:FeatureName: The Driver Toolkit - -include::modules/psap-driver-toolkit.adoc[leveloffset=+1] - -include::modules/psap-driver-toolkit-pulling.adoc[leveloffset=+1] - -include::modules/psap-driver-toolkit-using.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_driver-toolkkit-id"] -== Additional resources - -* For more information about configuring registry storage for your cluster, see xref:../registry/configuring-registry-operator.adoc#registry-removed_configuring-registry-operator[Image Registry Operator in OpenShift Container Platform]. diff --git a/hardware_enablement/psap-node-feature-discovery-operator.adoc b/hardware_enablement/psap-node-feature-discovery-operator.adoc deleted file mode 100644 index e35b19794cf8..000000000000 --- a/hardware_enablement/psap-node-feature-discovery-operator.adoc +++ /dev/null @@ -1,21 +0,0 @@ -:_content-type: ASSEMBLY -[id="node-feature-discovery-operator"] -= Node Feature Discovery Operator -include::_attributes/common-attributes.adoc[] -:context: node-feature-discovery-operator - -toc::[] - -Learn about the Node Feature Discovery (NFD) Operator and how you can use it to expose node-level information by orchestrating Node Feature Discovery, a Kubernetes add-on for detecting hardware features and system configuration. - -include::modules/psap-node-feature-discovery-operator.adoc[leveloffset=+1] - -include::modules/psap-installing-node-feature-discovery-operator.adoc[leveloffset=+1] - -include::modules/psap-using-node-feature-discovery-operator.adoc[leveloffset=+1] - -include::modules/psap-configuring-node-feature-discovery.adoc[leveloffset=+1] - -include::modules/psap-node-feature-discovery-using-topology-updater.adoc[leveloffset=+1] - -include::modules/psap-node-feature-discovery-topology-updater-command-reference.adoc[leveloffset=+2] diff --git a/hardware_enablement/snippets b/hardware_enablement/snippets deleted file mode 120000 index 9f5bc7e4dde0..000000000000 --- a/hardware_enablement/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets \ No newline at end of file diff --git a/hosted_control_planes/_attributes b/hosted_control_planes/_attributes deleted file mode 120000 index f27fd275ea6b..000000000000 --- a/hosted_control_planes/_attributes +++ /dev/null @@ -1 +0,0 @@ -../_attributes/ \ No newline at end of file diff --git a/hosted_control_planes/hcp-backup-restore-dr.adoc b/hosted_control_planes/hcp-backup-restore-dr.adoc deleted file mode 100644 index 4464b822f02c..000000000000 --- a/hosted_control_planes/hcp-backup-restore-dr.adoc +++ /dev/null @@ -1,159 +0,0 @@ -:_content-type: ASSEMBLY -[id="hcp-backup-restore-dr"] -= Backup, restore, and disaster recovery for hosted control planes -include::_attributes/common-attributes.adoc[] -:context: hcp-backup-restore-dr - -toc::[] - -If you need to back up and restore etcd on a hosted cluster or provide disaster recovery for a hosted cluster, see the following procedures. - -:FeatureName: Hosted control planes -include::snippets/technology-preview.adoc[] - -[id="hcp-backup-restore"] -== Backing up and restoring etcd on a hosted cluster - -If you use hosted control planes on {product-title}, the process to back up and restore etcd is different from xref:../backup_and_restore/control_plane_backup_and_restore/backing-up-etcd.adoc#backing-up-etcd-data_backup-etcd[the usual etcd backup process]. - -// Backing up etcd on a hosted cluster -include::modules/backup-etcd-hosted-cluster.adoc[leveloffset=+2] - -// Restoring an etcd snapshot on a hosted cluster -include::modules/restoring-etcd-snapshot-hosted-cluster.adoc[leveloffset=+2] - -[id="hcp-dr-aws"] -== Disaster recovery for a hosted cluster within an AWS region - -In a situation where you need disaster recovery (DR) for a hosted cluster, you can recover a hosted cluster to the same region within AWS. For example, you need DR when the upgrade of a management cluster fails and the hosted cluster is in a read-only state. - -:FeatureName: Hosted control planes -include::snippets/technology-preview.adoc[] - -The DR process involves three main steps: - -. Backing up the hosted cluster on the source management cluster -. Restoring the hosted cluster on a destination management cluster -. Deleting the hosted cluster from the source management cluster - -Your workloads remain running during the process. The Cluster API might be unavailable for a period, but that will not affect the services that are running on the worker nodes. - -[IMPORTANT] -==== -Both the source management cluster and the destination management cluster must have the `--external-dns` flags to maintain the API server URL, as shown in this example: - -.Example: External DNS flags -[source,terminal] ----- ---external-dns-provider=aws \ ---external-dns-credentials= \ ---external-dns-domain-filter= ----- - -That way, the server URL ends with `https://api-sample-hosted.sample-hosted.aws.openshift.com`. - -If you do not include the `--external-dns` flags to maintain the API server URL, the hosted cluster cannot be migrated. -==== - -[id="dr-hosted-cluster-env-context"] -=== Example environment and context - -Consider an scenario where you have three clusters to restore. Two are management clusters, and one is a hosted cluster. You can restore either the control plane only or the control plane and the nodes. Before you begin, you need the following information: - -* Source MGMT Namespace: The source management namespace -* Source MGMT ClusterName: The source management cluster name -* Source MGMT Kubeconfig: The source management `kubeconfig` file -* Destination MGMT Kubeconfig: The destination management `kubeconfig` file -* HC Kubeconfig: The hosted cluster `kubeconfig` file -* SSH key file: The SSH public key -* Pull secret: The pull secret file to access the release images -* AWS credentials -* AWS region -* Base domain: The DNS base domain to use as an external DNS -* S3 bucket name: The bucket in the AWS region where you plan to upload the etcd backup - -This information is shown in the following example environment variables. - -.Example environment variables -[source,terminal] ----- -SSH_KEY_FILE=${HOME}/.ssh/id_rsa.pub -BASE_PATH=${HOME}/hypershift -BASE_DOMAIN="aws.sample.com" -PULL_SECRET_FILE="${HOME}/pull_secret.json" -AWS_CREDS="${HOME}/.aws/credentials" -AWS_ZONE_ID="Z02718293M33QHDEQBROL" - -CONTROL_PLANE_AVAILABILITY_POLICY=SingleReplica -HYPERSHIFT_PATH=${BASE_PATH}/src/hypershift -HYPERSHIFT_CLI=${HYPERSHIFT_PATH}/bin/hypershift -HYPERSHIFT_IMAGE=${HYPERSHIFT_IMAGE:-"quay.io/${USER}/hypershift:latest"} -NODE_POOL_REPLICAS=${NODE_POOL_REPLICAS:-2} - -# MGMT Context -MGMT_REGION=us-west-1 -MGMT_CLUSTER_NAME="${USER}-dev" -MGMT_CLUSTER_NS=${USER} -MGMT_CLUSTER_DIR="${BASE_PATH}/hosted_clusters/${MGMT_CLUSTER_NS}-${MGMT_CLUSTER_NAME}" -MGMT_KUBECONFIG="${MGMT_CLUSTER_DIR}/kubeconfig" - -# MGMT2 Context -MGMT2_CLUSTER_NAME="${USER}-dest" -MGMT2_CLUSTER_NS=${USER} -MGMT2_CLUSTER_DIR="${BASE_PATH}/hosted_clusters/${MGMT2_CLUSTER_NS}-${MGMT2_CLUSTER_NAME}" -MGMT2_KUBECONFIG="${MGMT2_CLUSTER_DIR}/kubeconfig" - -# Hosted Cluster Context -HC_CLUSTER_NS=clusters -HC_REGION=us-west-1 -HC_CLUSTER_NAME="${USER}-hosted" -HC_CLUSTER_DIR="${BASE_PATH}/hosted_clusters/${HC_CLUSTER_NS}-${HC_CLUSTER_NAME}" -HC_KUBECONFIG="${HC_CLUSTER_DIR}/kubeconfig" -BACKUP_DIR=${HC_CLUSTER_DIR}/backup - -BUCKET_NAME="${USER}-hosted-${MGMT_REGION}" - -# DNS -AWS_ZONE_ID="Z07342811SH9AA102K1AC" -EXTERNAL_DNS_DOMAIN="hc.jpdv.aws.kerbeross.com" ----- - -[id="dr-hosted-cluster-process"] -=== Overview of the backup and restore process - -The backup and restore process works as follows: - -. On management cluster 1, which you can think of as the source management cluster, the control plane and workers interact by using the external DNS API. The external DNS API is accessible, and a load balancer sits between the management clusters. -+ -image::298_OpenShift_Backup_Restore_0123_00.png[Diagram that shows the workers accessing the external DNS API and the external DNS API pointing to the control plane through a load balancer] - -. You take a snapshot of the hosted cluster, which includes etcd, the control plane, and the worker nodes. During this process, the worker nodes continue to try to access the external DNS API even if it is not accessible, the workloads are running, the control plane is saved in a local manifest file, and etcd is backed up to an S3 bucket. The data plane is active and the control plane is paused. -+ -image::298_OpenShift_Backup_Restore_0123_01.png[] - -. On management cluster 2, which you can think of as the destination management cluster, you restore etcd from the S3 bucket and restore the control plane from the local manifest file. During this process, the external DNS API is stopped, the hosted cluster API becomes inaccessible, and any workers that use the API are unable to update their manifest files, but the workloads are still running. -+ -image::298_OpenShift_Backup_Restore_0123_02.png[] - -. The external DNS API is accessible again, and the worker nodes use it to move to management cluster 2. The external DNS API can access the load balancer that points to the control plane. -+ -image::298_OpenShift_Backup_Restore_0123_03.png[] - -. On management cluster 2, the control plane and worker nodes interact by using the external DNS API. The resources are deleted from management cluster 1, except for the S3 backup of etcd. If you try to set up the hosted cluster again on mangagement cluster 1, it will not work. -+ -image::298_OpenShift_Backup_Restore_0123_04.png[] - -You can manually back up and restore your hosted cluster, or you can run a script to complete the process. For more information about the script, see "Running a script to back up and restore a hosted cluster". - -// Backing up the hosted cluster -include::modules/dr-hosted-cluster-within-aws-region-backup.adoc[leveloffset=+2] - -// Restoring the hosted cluster -include::modules/dr-hosted-cluster-within-aws-region-restore.adoc[leveloffset=+2] - -// Deleting the hosted cluster -include::modules/dr-hosted-cluster-within-aws-region-delete.adoc[leveloffset=+2] - -//Helper script -include::modules/dr-hosted-cluster-within-aws-region-script.adoc[leveloffset=+2] - diff --git a/hosted_control_planes/hcp-configuring.adoc b/hosted_control_planes/hcp-configuring.adoc deleted file mode 100644 index 5dfa05a5fe1e..000000000000 --- a/hosted_control_planes/hcp-configuring.adoc +++ /dev/null @@ -1,39 +0,0 @@ -:_content-type: ASSEMBLY -[id="hcp-configuring"] -= Configuring hosted control planes -include::_attributes/common-attributes.adoc[] -:context: hcp-configuring - -toc::[] - -To get started with hosted control planes for {product-title}, you first configure your hosted cluster on the provider that you want to use. Then, you complete a few management tasks. - -:FeatureName: Hosted control planes -include::snippets/technology-preview.adoc[] - -You can view the procedures by selecting from one of the following providers: - -[id="hcp-configuring-aws"] -== Amazon Web Services (AWS) - -* link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.8/html/clusters/cluster_mce_overview#hosting-service-cluster-configure-aws[Configuring the hosting cluster on AWS (Technology Preview)]: The tasks to configure a hosted cluster on AWS include creating the AWS S3 OIDC secret, creating a routable public zone, enabling external DNS, enabling AWS PrivateLink, enabling the hosted control planes feature, and installing the hosted control planes CLI. -* link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.8/html/clusters/cluster_mce_overview#hosted-control-planes-manage-aws[Managing hosted control plane clusters on AWS (Technology Preview)]: Management tasks include creating, importing, accessing, or deleting a hosted cluster on AWS. - -[id="hcp-configuring-bm"] -== Bare metal - -* link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.8/html/clusters/cluster_mce_overview#configuring-hosting-service-cluster-configure-bm[Configuring the hosting cluster on bare metal (Technology Preview)]: Configure DNS before you create a hosted cluster. -* link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.8/html/clusters/cluster_mce_overview#hosted-control-planes-manage-bm[Managing hosted control plane clusters on bare metal (Technology Preview)]: Create a hosted cluster, create an `InfraEnv` resource, add agents, access the hosted cluster, scale the `NodePool` object, handle Ingress, enable node auto-scaling, or delete a hosted cluster. - -[id="hcp-configuring-virt"] -== {VirtProductName} - -* link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.8/html/clusters/cluster_mce_overview#hosted-control-planes-manage-kubevirt[Managing hosted control plane clusters on OpenShift Virtualization (Technology Preview)]: Create {product-title} clusters with worker nodes that are hosted by KubeVirt virtual machines. - -// To be added after ACM 2.9 goes live: - -//{ibmpowerProductName} - - - - diff --git a/hosted_control_planes/hcp-managing.adoc b/hosted_control_planes/hcp-managing.adoc deleted file mode 100644 index 8b02751060d2..000000000000 --- a/hosted_control_planes/hcp-managing.adoc +++ /dev/null @@ -1,26 +0,0 @@ -:_content-type: ASSEMBLY -[id="hcp-managing"] -= Managing hosted control planes -include::_attributes/common-attributes.adoc[] -:context: hcp-managing - -toc::[] - -After you configure your environment for hosted control planes and create a hosted cluster, you can further manage your clusters and nodes. - -:FeatureName: Hosted control planes -include::snippets/technology-preview.adoc[] - -include::modules/updates-for-hosted-control-planes.adoc[leveloffset=+1] -include::modules/updating-node-pools-for-hcp.adoc[leveloffset=+1] -include::modules/configuring-node-pools-for-hcp.adoc[leveloffset=+1] -//restarting hosted control plane components -//pausing reconciliation -//debugging why nodes have not joined the cluster -//using service-level DNS for control plane services -//configuring metrics sets -include::modules/node-tuning-hosted-cluster.adoc[leveloffset=+1] -include::modules/sriov-operator-hosted-control-planes.adoc[leveloffset=+1] -//automated machine management -include::modules/delete-hosted-cluster.adoc[leveloffset=+1] - diff --git a/hosted_control_planes/images b/hosted_control_planes/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/hosted_control_planes/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/hosted_control_planes/index.adoc b/hosted_control_planes/index.adoc deleted file mode 100644 index c045590ad6aa..000000000000 --- a/hosted_control_planes/index.adoc +++ /dev/null @@ -1,20 +0,0 @@ -:_content-type: ASSEMBLY -[id="hcp-overview"] -= Hosted control planes overview -include::_attributes/common-attributes.adoc[] -:context: hcp-overview - -You can deploy {product-title} clusters by using two different control plane configurations: standalone or hosted control planes. The standalone configuration uses dedicated virtual machines or physical machines to host the control plane. With hosted control planes for {product-title}, you create control planes as pods on a hosting cluster without the need for dedicated virtual or physical machines for each control plane. - -toc::[] - -include::modules/hosted-control-planes-overview.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.8/html/clusters/cluster_mce_overview#hypershift-addon-intro[HyperShift add-on (Technology Preview)] - -* link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.8/html/clusters/cluster_mce_overview#hosted-control-planes-intro[Hosted control planes (Technology Preview)] - -include::modules/hosted-control-planes-concepts-personas.adoc[leveloffset=+1] -include::modules/hosted-control-planes-version-support.adoc[leveloffset=+1] diff --git a/hosted_control_planes/modules b/hosted_control_planes/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/hosted_control_planes/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/hosted_control_planes/snippets b/hosted_control_planes/snippets deleted file mode 120000 index 9f5bc7e4dde0..000000000000 --- a/hosted_control_planes/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets \ No newline at end of file diff --git a/identity_providers/_attributes b/identity_providers/_attributes deleted file mode 120000 index f27fd275ea6b..000000000000 --- a/identity_providers/_attributes +++ /dev/null @@ -1 +0,0 @@ -../_attributes/ \ No newline at end of file diff --git a/identity_providers/images b/identity_providers/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/identity_providers/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/identity_providers/modules b/identity_providers/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/identity_providers/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/identity_providers/snippets b/identity_providers/snippets deleted file mode 120000 index 9f5bc7e4dde0..000000000000 --- a/identity_providers/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets \ No newline at end of file diff --git a/images/135_OpenShift_Distributed_Unit_0121.svg b/images/135_OpenShift_Distributed_Unit_0121.svg deleted file mode 100644 index bac62a3ed04f..000000000000 --- a/images/135_OpenShift_Distributed_Unit_0121.svg +++ /dev/null @@ -1 +0,0 @@ -RAN boundaryCentral unit (CU),control planeCentral unit (CU),user plane135_OpenShift_0121Distributed unit (DU)Radio unit (RU) \ No newline at end of file diff --git a/images/150_OpenShift_VMware_on_AWS_0321_arch.svg b/images/150_OpenShift_VMware_on_AWS_0321_arch.svg deleted file mode 100644 index 963242149269..000000000000 --- a/images/150_OpenShift_VMware_on_AWS_0321_arch.svg +++ /dev/null @@ -1 +0,0 @@ -RWO PVCsVMware Cloud on AWSOpenShiftvSphere 7OpenShift SDNOpenShift integrated load balancer and ingressNSX-TVSANvCenterContainersAppAppAppControl plane VMInfra VMWorker VM150_OpenShift_0321 \ No newline at end of file diff --git a/images/150_OpenShift_VMware_on_AWS_1021_installer_FCOS.png b/images/150_OpenShift_VMware_on_AWS_1021_installer_FCOS.png deleted file mode 100644 index 846a46474508..000000000000 Binary files a/images/150_OpenShift_VMware_on_AWS_1021_installer_FCOS.png and /dev/null differ diff --git a/images/152_OpenShift_Config_NTP_0421.png b/images/152_OpenShift_Config_NTP_0421.png deleted file mode 100644 index d576af889b45..000000000000 Binary files a/images/152_OpenShift_Config_NTP_0421.png and /dev/null differ diff --git a/images/152_OpenShift_Config_NTP_0421.svg b/images/152_OpenShift_Config_NTP_0421.svg deleted file mode 100644 index d9e21af5f437..000000000000 --- a/images/152_OpenShift_Config_NTP_0421.svg +++ /dev/null @@ -1 +0,0 @@ -Baremetal networkInternet accessIngress VIPAPI VIPControl plane nodes x3Worker nodes xNNTP serverNTP clientOptionalOptional152_OpenShift_0421External NTP serverRouter \ No newline at end of file diff --git a/images/156_OpenShift_ROSA_Arch_0621_arch.svg b/images/156_OpenShift_ROSA_Arch_0621_arch.svg deleted file mode 100644 index bcbd1c06340e..000000000000 --- a/images/156_OpenShift_ROSA_Arch_0621_arch.svg +++ /dev/null @@ -1 +0,0 @@ -Control plane nodes (x3)apiserveretcdcontrollerWorker nodes (xN)Compute (xN)Persistent storagePublicnetworkPrivate networkInternal(API) NLBRed Hat(Console) ELBExternal/internalApp ELBRed Hat(API) ELBExternal/internal(API) NLBRoute53DNSInfra nodes (x2, x3)registryroutermonitoringAWS VPCAvailability zone(x1, x3)Availability zone(x1, x3)Availability zone(x1, x3)InternetRed HatManagementDeveloper156_OpenShift_0621 \ No newline at end of file diff --git a/images/156_OpenShift_ROSA_Arch_0621_privatelink.svg b/images/156_OpenShift_ROSA_Arch_0621_privatelink.svg deleted file mode 100644 index 37f9712650fb..000000000000 --- a/images/156_OpenShift_ROSA_Arch_0621_privatelink.svg +++ /dev/null @@ -1,258 +0,0 @@ - - - - - - Private network - - - - - - - - - - - Route53DNS - - - - - - - - - - - - - - - - - - - - - Developer - - - - - - - Red HatManagement - - - - - - - PrivateLink - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Control plane nodes (x3) - - apiserver - - etcd - - controller - - - - - - - - - - - - Worker nodes (xN) - - Compute (xN) - - Persistent storage - - API NLB - - - - - - - - - - - - - App ELB - - - - - - - - - - - - - - - - - - - - - - - - - - Infra nodes (x2, x3) - - registry - - router - - monitoring - - AWS VPC - - - - - - Availability zone(x1, x3) - Availability zone(x1, x3) - Availability zone(x1, x3) - - 156_OpenShift_1221 - - - diff --git a/images/161_OpenShift_Baremetal_IPI_Deployment_updates_0521.png b/images/161_OpenShift_Baremetal_IPI_Deployment_updates_0521.png deleted file mode 100644 index a9a171c35912..000000000000 Binary files a/images/161_OpenShift_Baremetal_IPI_Deployment_updates_0521.png and /dev/null differ diff --git a/images/161_OpenShift_Baremetal_IPI_Deployment_updates_0521.svg b/images/161_OpenShift_Baremetal_IPI_Deployment_updates_0521.svg deleted file mode 100644 index 4a5508c8c975..000000000000 --- a/images/161_OpenShift_Baremetal_IPI_Deployment_updates_0521.svg +++ /dev/null @@ -1 +0,0 @@ -Out-of-Band Managementnetwork (optional)RouterControl plane nodes x3Baremetal networkProvisioning network (optional)RouterDHCP serverDNS server161_OpenShift_0521Internet accessProvisioning nodeBootstrap VMWorker nodes xNProvisioning bridgeeno1Baremetal bridgeeno2Ingress VIPAPI VIP \ No newline at end of file diff --git a/images/175_OpenShift_ACM_0821_1.png b/images/175_OpenShift_ACM_0821_1.png deleted file mode 100644 index 61743c591d6f..000000000000 Binary files a/images/175_OpenShift_ACM_0821_1.png and /dev/null differ diff --git a/images/175_OpenShift_ACM_0821_2.png b/images/175_OpenShift_ACM_0821_2.png deleted file mode 100644 index 7f517282613e..000000000000 Binary files a/images/175_OpenShift_ACM_0821_2.png and /dev/null differ diff --git a/images/176_OpenShift_zero_touch_provisioning_0821.png b/images/176_OpenShift_zero_touch_provisioning_0821.png deleted file mode 100644 index 766887b66ada..000000000000 Binary files a/images/176_OpenShift_zero_touch_provisioning_0821.png and /dev/null differ diff --git a/images/177_OpenShift_cluster_provisioning_0821.png b/images/177_OpenShift_cluster_provisioning_0821.png deleted file mode 100644 index eff4a6d28fc7..000000000000 Binary files a/images/177_OpenShift_cluster_provisioning_0821.png and /dev/null differ diff --git a/images/179_OpenShift_NBDE_implementation_0821_1.png b/images/179_OpenShift_NBDE_implementation_0821_1.png deleted file mode 100644 index 7007bcbeb721..000000000000 Binary files a/images/179_OpenShift_NBDE_implementation_0821_1.png and /dev/null differ diff --git a/images/179_OpenShift_NBDE_implementation_0821_2.png b/images/179_OpenShift_NBDE_implementation_0821_2.png deleted file mode 100644 index 2fe21e0832db..000000000000 Binary files a/images/179_OpenShift_NBDE_implementation_0821_2.png and /dev/null differ diff --git a/images/179_OpenShift_NBDE_implementation_0821_3.png b/images/179_OpenShift_NBDE_implementation_0821_3.png deleted file mode 100644 index f70ddba3da14..000000000000 Binary files a/images/179_OpenShift_NBDE_implementation_0821_3.png and /dev/null differ diff --git a/images/179_OpenShift_NBDE_implementation_0821_4.png b/images/179_OpenShift_NBDE_implementation_0821_4.png deleted file mode 100644 index 67d574b2c50a..000000000000 Binary files a/images/179_OpenShift_NBDE_implementation_0821_4.png and /dev/null differ diff --git a/images/183_OpenShift_ZTP_0921.png b/images/183_OpenShift_ZTP_0921.png deleted file mode 100644 index 6ec2ceac46ce..000000000000 Binary files a/images/183_OpenShift_ZTP_0921.png and /dev/null differ diff --git a/images/193_OpenShift_Cluster_Install_updates_1021_oVirt.png b/images/193_OpenShift_Cluster_Install_updates_1021_oVirt.png deleted file mode 100644 index a6afcf0b6404..000000000000 Binary files a/images/193_OpenShift_Cluster_Install_updates_1021_oVirt.png and /dev/null differ diff --git a/images/202_OpenShift_Ingress_0222_load_balancer.png b/images/202_OpenShift_Ingress_0222_load_balancer.png deleted file mode 100644 index 3d74aa80574e..000000000000 Binary files a/images/202_OpenShift_Ingress_0222_load_balancer.png and /dev/null differ diff --git a/images/202_OpenShift_Ingress_0222_node_port.png b/images/202_OpenShift_Ingress_0222_node_port.png deleted file mode 100644 index 6474dd2be45b..000000000000 Binary files a/images/202_OpenShift_Ingress_0222_node_port.png and /dev/null differ diff --git a/images/209_OpenShift_BGP_0122.png b/images/209_OpenShift_BGP_0122.png deleted file mode 100644 index 551f452b43b4..000000000000 Binary files a/images/209_OpenShift_BGP_0122.png and /dev/null differ diff --git a/images/210_OpenShift_Baremetal_IPI_Deployment_updates_0122_1.png b/images/210_OpenShift_Baremetal_IPI_Deployment_updates_0122_1.png deleted file mode 100644 index 2aa781eb2fe7..000000000000 Binary files a/images/210_OpenShift_Baremetal_IPI_Deployment_updates_0122_1.png and /dev/null differ diff --git a/images/210_OpenShift_Baremetal_IPI_Deployment_updates_0122_2.png b/images/210_OpenShift_Baremetal_IPI_Deployment_updates_0122_2.png deleted file mode 100644 index 59ea76d55f95..000000000000 Binary files a/images/210_OpenShift_Baremetal_IPI_Deployment_updates_0122_2.png and /dev/null differ diff --git a/images/211_OpenShift_Redfish_dataflow_0222.png b/images/211_OpenShift_Redfish_dataflow_0222.png deleted file mode 100644 index 6aa8ce8cbe42..000000000000 Binary files a/images/211_OpenShift_Redfish_dataflow_0222.png and /dev/null differ diff --git a/images/211_OpenShift_Redfish_dataflow_0822.png b/images/211_OpenShift_Redfish_dataflow_0822.png deleted file mode 100644 index b115a9e66190..000000000000 Binary files a/images/211_OpenShift_Redfish_dataflow_0822.png and /dev/null differ diff --git a/images/216_OpenShift_Topology-aware_Scheduling_0222.png b/images/216_OpenShift_Topology-aware_Scheduling_0222.png deleted file mode 100644 index 510a625a0204..000000000000 Binary files a/images/216_OpenShift_Topology-aware_Scheduling_0222.png and /dev/null differ diff --git a/images/217_OpenShift_Zero_Touch_Provisioning_updates_0222_1.png b/images/217_OpenShift_Zero_Touch_Provisioning_updates_0222_1.png deleted file mode 100644 index 112ac405fb5c..000000000000 Binary files a/images/217_OpenShift_Zero_Touch_Provisioning_updates_0222_1.png and /dev/null differ diff --git a/images/217_OpenShift_Zero_Touch_Provisioning_updates_0222_2.png b/images/217_OpenShift_Zero_Touch_Provisioning_updates_0222_2.png deleted file mode 100644 index d2f623fb0fc2..000000000000 Binary files a/images/217_OpenShift_Zero_Touch_Provisioning_updates_0222_2.png and /dev/null differ diff --git a/images/217_OpenShift_Zero_Touch_Provisioning_updates_0222_3.png b/images/217_OpenShift_Zero_Touch_Provisioning_updates_0222_3.png deleted file mode 100644 index 13b98bb110cb..000000000000 Binary files a/images/217_OpenShift_Zero_Touch_Provisioning_updates_0222_3.png and /dev/null differ diff --git a/images/217_OpenShift_Zero_Touch_Provisioning_updates_1022_1.png b/images/217_OpenShift_Zero_Touch_Provisioning_updates_1022_1.png deleted file mode 100644 index b435ca39da53..000000000000 Binary files a/images/217_OpenShift_Zero_Touch_Provisioning_updates_1022_1.png and /dev/null differ diff --git a/images/217_OpenShift_Zero_Touch_Provisioning_updates_1022_2.png b/images/217_OpenShift_Zero_Touch_Provisioning_updates_1022_2.png deleted file mode 100644 index ef7f4f36756b..000000000000 Binary files a/images/217_OpenShift_Zero_Touch_Provisioning_updates_1022_2.png and /dev/null differ diff --git a/images/217_OpenShift_Zero_Touch_Provisioning_updates_1022_3.png b/images/217_OpenShift_Zero_Touch_Provisioning_updates_1022_3.png deleted file mode 100644 index c1d3cfc56f50..000000000000 Binary files a/images/217_OpenShift_Zero_Touch_Provisioning_updates_1022_3.png and /dev/null differ diff --git a/images/218_OpenShift_PTP_events_0222.png b/images/218_OpenShift_PTP_events_0222.png deleted file mode 100644 index 6d0d0025a89b..000000000000 Binary files a/images/218_OpenShift_PTP_events_0222.png and /dev/null differ diff --git a/images/247-OpenShift-Kubernetes-Overview.png b/images/247-OpenShift-Kubernetes-Overview.png deleted file mode 100644 index 7ba598c3064f..000000000000 Binary files a/images/247-OpenShift-Kubernetes-Overview.png and /dev/null differ diff --git a/images/247_OpenShift_Kubernetes_Overview-1.png b/images/247_OpenShift_Kubernetes_Overview-1.png deleted file mode 100644 index 927cf769907c..000000000000 Binary files a/images/247_OpenShift_Kubernetes_Overview-1.png and /dev/null differ diff --git a/images/247_OpenShift_Kubernetes_Overview-2.png b/images/247_OpenShift_Kubernetes_Overview-2.png deleted file mode 100644 index e645a2335a53..000000000000 Binary files a/images/247_OpenShift_Kubernetes_Overview-2.png and /dev/null differ diff --git a/images/261_OpenShift_DPDK_0722.png b/images/261_OpenShift_DPDK_0722.png deleted file mode 100644 index 3eaf53747c80..000000000000 Binary files a/images/261_OpenShift_DPDK_0722.png and /dev/null differ diff --git a/images/264_OpenShift_CNI_plugin_chain_0622.png b/images/264_OpenShift_CNI_plugin_chain_0622.png deleted file mode 100644 index 06bb772db181..000000000000 Binary files a/images/264_OpenShift_CNI_plugin_chain_0622.png and /dev/null differ diff --git a/images/264_OpenShift_CNI_plugin_chain_0722.png b/images/264_OpenShift_CNI_plugin_chain_0722.png deleted file mode 100644 index 47a35e5bdfa4..000000000000 Binary files a/images/264_OpenShift_CNI_plugin_chain_0722.png and /dev/null differ diff --git a/images/267_OpenShift_on_AWS_Access_Networking_1222.png b/images/267_OpenShift_on_AWS_Access_Networking_1222.png deleted file mode 100644 index f62d20794e5c..000000000000 Binary files a/images/267_OpenShift_on_AWS_Access_Networking_1222.png and /dev/null differ diff --git a/images/291_OpenShift_on_AWS_Intro_1122_docs.png b/images/291_OpenShift_on_AWS_Intro_1122_docs.png deleted file mode 100644 index 51ffbb751dac..000000000000 Binary files a/images/291_OpenShift_on_AWS_Intro_1122_docs.png and /dev/null differ diff --git a/images/292_OpenShift_Configuring_multi-network_policy_1122.png b/images/292_OpenShift_Configuring_multi-network_policy_1122.png deleted file mode 100644 index e1bc9d767e77..000000000000 Binary files a/images/292_OpenShift_Configuring_multi-network_policy_1122.png and /dev/null differ diff --git a/images/292_OpenShift_Configuring_multiple-network_policy_1122.png b/images/292_OpenShift_Configuring_multiple-network_policy_1122.png deleted file mode 100644 index 7f3b2d116535..000000000000 Binary files a/images/292_OpenShift_Configuring_multiple-network_policy_1122.png and /dev/null differ diff --git a/images/295_OpenShift_Nodes_Overview_1222.png b/images/295_OpenShift_Nodes_Overview_1222.png deleted file mode 100644 index 36eb557368f5..000000000000 Binary files a/images/295_OpenShift_Nodes_Overview_1222.png and /dev/null differ diff --git a/images/298_OpenShift_Backup_Restore_0123_00.png b/images/298_OpenShift_Backup_Restore_0123_00.png deleted file mode 100644 index f0b2c2c1a93e..000000000000 Binary files a/images/298_OpenShift_Backup_Restore_0123_00.png and /dev/null differ diff --git a/images/298_OpenShift_Backup_Restore_0123_01.png b/images/298_OpenShift_Backup_Restore_0123_01.png deleted file mode 100644 index fde99c740f0a..000000000000 Binary files a/images/298_OpenShift_Backup_Restore_0123_01.png and /dev/null differ diff --git a/images/298_OpenShift_Backup_Restore_0123_02.png b/images/298_OpenShift_Backup_Restore_0123_02.png deleted file mode 100644 index 627b468cdfdc..000000000000 Binary files a/images/298_OpenShift_Backup_Restore_0123_02.png and /dev/null differ diff --git a/images/298_OpenShift_Backup_Restore_0123_03.png b/images/298_OpenShift_Backup_Restore_0123_03.png deleted file mode 100644 index d055d8fe1884..000000000000 Binary files a/images/298_OpenShift_Backup_Restore_0123_03.png and /dev/null differ diff --git a/images/298_OpenShift_Backup_Restore_0123_04.png b/images/298_OpenShift_Backup_Restore_0123_04.png deleted file mode 100644 index b28819019140..000000000000 Binary files a/images/298_OpenShift_Backup_Restore_0123_04.png and /dev/null differ diff --git a/images/299_OpenShift_OVN-Kubernetes_arch_0123_2.png b/images/299_OpenShift_OVN-Kubernetes_arch_0123_2.png deleted file mode 100644 index 456a330217f3..000000000000 Binary files a/images/299_OpenShift_OVN-Kubernetes_arch_0123_2.png and /dev/null differ diff --git a/images/299_OpenShift_OVN-Kubernetes_arch_0223_1.png b/images/299_OpenShift_OVN-Kubernetes_arch_0223_1.png deleted file mode 100644 index 7bda732b17e0..000000000000 Binary files a/images/299_OpenShift_OVN-Kubernetes_arch_0223_1.png and /dev/null differ diff --git a/images/302_OpenShift_Bare_Metal_Operator_0223.png b/images/302_OpenShift_Bare_Metal_Operator_0223.png deleted file mode 100644 index d6c5a973cdd6..000000000000 Binary files a/images/302_OpenShift_Bare_Metal_Operator_0223.png and /dev/null differ diff --git a/images/310_OpenShift_machine_deletion_hooks_0223.png b/images/310_OpenShift_machine_deletion_hooks_0223.png deleted file mode 100644 index 4acaf0f3d13c..000000000000 Binary files a/images/310_OpenShift_machine_deletion_hooks_0223.png and /dev/null differ diff --git a/images/311_RHDevice_Edge_Overview_0223_1.png b/images/311_RHDevice_Edge_Overview_0223_1.png deleted file mode 100644 index b30a00edcc6c..000000000000 Binary files a/images/311_RHDevice_Edge_Overview_0223_1.png and /dev/null differ diff --git a/images/311_RHDevice_Edge_Overview_0223_2.png b/images/311_RHDevice_Edge_Overview_0223_2.png deleted file mode 100644 index fecd5e4bc039..000000000000 Binary files a/images/311_RHDevice_Edge_Overview_0223_2.png and /dev/null differ diff --git a/images/317_RHbM_OVN_topology_0323.png b/images/317_RHbM_OVN_topology_0323.png deleted file mode 100644 index 84d45a07986e..000000000000 Binary files a/images/317_RHbM_OVN_topology_0323.png and /dev/null differ diff --git a/images/319_OpenShift_PTP_bare-metal_OCP_nodes_0323_4.13.png b/images/319_OpenShift_PTP_bare-metal_OCP_nodes_0323_4.13.png deleted file mode 100644 index a62c45c03c38..000000000000 Binary files a/images/319_OpenShift_PTP_bare-metal_OCP_nodes_0323_4.13.png and /dev/null differ diff --git a/images/319_OpenShift_redfish_bare-metal_OCP_nodes_0323.png b/images/319_OpenShift_redfish_bare-metal_OCP_nodes_0323.png deleted file mode 100644 index bb7e4449b29f..000000000000 Binary files a/images/319_OpenShift_redfish_bare-metal_OCP_nodes_0323.png and /dev/null differ diff --git a/images/324_RHbM_Certificate_Rotation_0323_long-term.png b/images/324_RHbM_Certificate_Rotation_0323_long-term.png deleted file mode 100644 index c57664f62d06..000000000000 Binary files a/images/324_RHbM_Certificate_Rotation_0323_long-term.png and /dev/null differ diff --git a/images/324_RHbM_Certificate_Rotation_0323_short-term.png b/images/324_RHbM_Certificate_Rotation_0323_short-term.png deleted file mode 100644 index 9c8fcbcd8358..000000000000 Binary files a/images/324_RHbM_Certificate_Rotation_0323_short-term.png and /dev/null differ diff --git a/images/325_OpenShift_vSphere_Deployment_updates_0323.png b/images/325_OpenShift_vSphere_Deployment_updates_0323.png deleted file mode 100644 index 5f8b2e26aa7a..000000000000 Binary files a/images/325_OpenShift_vSphere_Deployment_updates_0323.png and /dev/null differ diff --git a/images/334_OpenShift_cluster_updating_and_CCO_workflows_0523_4.11_A.png b/images/334_OpenShift_cluster_updating_and_CCO_workflows_0523_4.11_A.png deleted file mode 100644 index ac969c08eb96..000000000000 Binary files a/images/334_OpenShift_cluster_updating_and_CCO_workflows_0523_4.11_A.png and /dev/null differ diff --git a/images/334_OpenShift_cluster_updating_and_CCO_workflows_0523_4.11_B.png b/images/334_OpenShift_cluster_updating_and_CCO_workflows_0523_4.11_B.png deleted file mode 100644 index 054e7b078181..000000000000 Binary files a/images/334_OpenShift_cluster_updating_and_CCO_workflows_0523_4.11_B.png and /dev/null differ diff --git a/images/347_OpenShift_credentials_with_STS_updates_0623_AWS.png b/images/347_OpenShift_credentials_with_STS_updates_0623_AWS.png deleted file mode 100644 index 85dba21558bf..000000000000 Binary files a/images/347_OpenShift_credentials_with_STS_updates_0623_AWS.png and /dev/null differ diff --git a/images/347_OpenShift_credentials_with_STS_updates_0623_GCP.png b/images/347_OpenShift_credentials_with_STS_updates_0623_GCP.png deleted file mode 100644 index 968b0285f989..000000000000 Binary files a/images/347_OpenShift_credentials_with_STS_updates_0623_GCP.png and /dev/null differ diff --git a/images/4.4-71_OpenShift_Baremetal_IPI_Depoyment_0320_1.png b/images/4.4-71_OpenShift_Baremetal_IPI_Depoyment_0320_1.png deleted file mode 100644 index cd53a0a5dcc6..000000000000 Binary files a/images/4.4-71_OpenShift_Baremetal_IPI_Depoyment_0320_1.png and /dev/null differ diff --git a/images/4.4-71_OpenShift_Baremetal_IPI_Depoyment_0320_2.png b/images/4.4-71_OpenShift_Baremetal_IPI_Depoyment_0320_2.png deleted file mode 100644 index 9ddf2df1f1ff..000000000000 Binary files a/images/4.4-71_OpenShift_Baremetal_IPI_Depoyment_0320_2.png and /dev/null differ diff --git a/images/71_OpenShift_4.6_Baremetal_IPI_Deployment_1020_1.svg b/images/71_OpenShift_4.6_Baremetal_IPI_Deployment_1020_1.svg deleted file mode 100644 index 04effb30efd1..000000000000 --- a/images/71_OpenShift_4.6_Baremetal_IPI_Deployment_1020_1.svg +++ /dev/null @@ -1 +0,0 @@ -Baremetal networkProvisioning network (optional)71_OpenShift_1020Internet accessBootstrap VMProvisioning nodeIngress VIPAPI VIPOut-of-Band Managementnetwork (optional)RouterControl plane nodes x3Worker nodes xNRouterDHCP serverDNS serverProvisioning bridgeeno1Baremetal bridgeeno2 \ No newline at end of file diff --git a/images/71_OpenShift_4.6_Baremetal_IPI_Deployment_1020_2.svg b/images/71_OpenShift_4.6_Baremetal_IPI_Deployment_1020_2.svg deleted file mode 100644 index 3f07dcb5af48..000000000000 --- a/images/71_OpenShift_4.6_Baremetal_IPI_Deployment_1020_2.svg +++ /dev/null @@ -1 +0,0 @@ -Out-of-Band Managementnetwork (optional)RouterControl plane nodes x3Baremetal networkProvisioning network (optional)RouterDHCP serverDNS server71_OpenShift_1020Internet accessProvisioning nodeBootstrap VMWorker nodes xNProvisioning bridgeeno1Baremetal bridgeeno2Ingress VIPAPI VIP \ No newline at end of file diff --git a/images/92_OpenShift_Cluster_Install_RHV_0520.png b/images/92_OpenShift_Cluster_Install_RHV_0520.png deleted file mode 100644 index 885d338abe38..000000000000 Binary files a/images/92_OpenShift_Cluster_Install_RHV_0520.png and /dev/null differ diff --git a/images/CLI-list.png b/images/CLI-list.png deleted file mode 100644 index 0e4c462d0e16..000000000000 Binary files a/images/CLI-list.png and /dev/null differ diff --git a/images/Git-access-token-secret.png b/images/Git-access-token-secret.png deleted file mode 100644 index 6c732263b879..000000000000 Binary files a/images/Git-access-token-secret.png and /dev/null differ diff --git a/images/Git-access-token.png b/images/Git-access-token.png deleted file mode 100644 index 7d533fc1c7f8..000000000000 Binary files a/images/Git-access-token.png and /dev/null differ diff --git a/images/Github-app-details.png b/images/Github-app-details.png deleted file mode 100644 index d047e1a4e279..000000000000 Binary files a/images/Github-app-details.png and /dev/null differ diff --git a/images/HPAflow.png b/images/HPAflow.png deleted file mode 100644 index 0d7e1335e860..000000000000 Binary files a/images/HPAflow.png and /dev/null differ diff --git a/images/OCP_3_to_4_App_migration.png b/images/OCP_3_to_4_App_migration.png deleted file mode 100644 index a1f14cd4dd89..000000000000 Binary files a/images/OCP_3_to_4_App_migration.png and /dev/null differ diff --git a/images/Operator_Icon-OpenShift_Virtualization-5.png b/images/Operator_Icon-OpenShift_Virtualization-5.png deleted file mode 100644 index 7fd768c9155b..000000000000 Binary files a/images/Operator_Icon-OpenShift_Virtualization-5.png and /dev/null differ diff --git a/images/VPC-Diagram.png b/images/VPC-Diagram.png deleted file mode 100644 index 862355d985f6..000000000000 Binary files a/images/VPC-Diagram.png and /dev/null differ diff --git a/images/add-serverless-app-dev.png b/images/add-serverless-app-dev.png deleted file mode 100644 index 1e69d3206d9c..000000000000 Binary files a/images/add-serverless-app-dev.png and /dev/null differ diff --git a/images/admin-console-create-binding-event-source-1.png b/images/admin-console-create-binding-event-source-1.png deleted file mode 100644 index 0f30f0cb4399..000000000000 Binary files a/images/admin-console-create-binding-event-source-1.png and /dev/null differ diff --git a/images/admin-console-create-binding-event-source-2.png b/images/admin-console-create-binding-event-source-2.png deleted file mode 100644 index 5b9b512c7baf..000000000000 Binary files a/images/admin-console-create-binding-event-source-2.png and /dev/null differ diff --git a/images/admin-console-create-role-event-source.png b/images/admin-console-create-role-event-source.png deleted file mode 100644 index 621b1b555223..000000000000 Binary files a/images/admin-console-create-role-event-source.png and /dev/null differ diff --git a/images/admin-console-create-sa-event-source.png b/images/admin-console-create-sa-event-source.png deleted file mode 100644 index a1501d594768..000000000000 Binary files a/images/admin-console-create-sa-event-source.png and /dev/null differ diff --git a/images/after-k8s-mount-propagation.png b/images/after-k8s-mount-propagation.png deleted file mode 100644 index c06e4d7b91a3..000000000000 Binary files a/images/after-k8s-mount-propagation.png and /dev/null differ diff --git a/images/agent-based-installer-workflow.png b/images/agent-based-installer-workflow.png deleted file mode 100644 index f3167a13b1d6..000000000000 Binary files a/images/agent-based-installer-workflow.png and /dev/null differ diff --git a/images/agent-tui-home.png b/images/agent-tui-home.png deleted file mode 100644 index f983aede0dda..000000000000 Binary files a/images/agent-tui-home.png and /dev/null differ diff --git a/images/api-admission-chain.png b/images/api-admission-chain.png deleted file mode 100644 index 8c483eab6d3c..000000000000 Binary files a/images/api-admission-chain.png and /dev/null differ diff --git a/images/app-launcher.png b/images/app-launcher.png deleted file mode 100644 index 6a5ac43a54c8..000000000000 Binary files a/images/app-launcher.png and /dev/null differ diff --git a/images/architecture_overview.png b/images/architecture_overview.png deleted file mode 100644 index 55f1e2cee849..000000000000 Binary files a/images/architecture_overview.png and /dev/null differ diff --git a/images/before-k8s-mount-propagation.png b/images/before-k8s-mount-propagation.png deleted file mode 100644 index 5e527ca799fb..000000000000 Binary files a/images/before-k8s-mount-propagation.png and /dev/null differ diff --git a/images/bringing_it_all_together.png b/images/bringing_it_all_together.png deleted file mode 100644 index 557d6196f208..000000000000 Binary files a/images/bringing_it_all_together.png and /dev/null differ diff --git a/images/build_process1.png b/images/build_process1.png deleted file mode 100644 index c721722629e3..000000000000 Binary files a/images/build_process1.png and /dev/null differ diff --git a/images/build_process2.png b/images/build_process2.png deleted file mode 100644 index 8edbaf557cc9..000000000000 Binary files a/images/build_process2.png and /dev/null differ diff --git a/images/click-question-mark.png b/images/click-question-mark.png deleted file mode 100644 index bf34abb4a202..000000000000 Binary files a/images/click-question-mark.png and /dev/null differ diff --git a/images/cluster-configuration-general-tab.png b/images/cluster-configuration-general-tab.png deleted file mode 100644 index ff333d488818..000000000000 Binary files a/images/cluster-configuration-general-tab.png and /dev/null differ diff --git a/images/cluster-settings-console.png b/images/cluster-settings-console.png deleted file mode 100644 index 206559d5f787..000000000000 Binary files a/images/cluster-settings-console.png and /dev/null differ diff --git a/images/cnv_components_cdi-operator.png b/images/cnv_components_cdi-operator.png deleted file mode 100644 index 26e52340e409..000000000000 Binary files a/images/cnv_components_cdi-operator.png and /dev/null differ diff --git a/images/cnv_components_cluster-network-addons-operator.png b/images/cnv_components_cluster-network-addons-operator.png deleted file mode 100644 index ffd3e871fc78..000000000000 Binary files a/images/cnv_components_cluster-network-addons-operator.png and /dev/null differ diff --git a/images/cnv_components_hco-operator.png b/images/cnv_components_hco-operator.png deleted file mode 100644 index 36adf5bb9e7e..000000000000 Binary files a/images/cnv_components_hco-operator.png and /dev/null differ diff --git a/images/cnv_components_hpp-operator.png b/images/cnv_components_hpp-operator.png deleted file mode 100644 index 57f9a4ada49c..000000000000 Binary files a/images/cnv_components_hpp-operator.png and /dev/null differ diff --git a/images/cnv_components_main.png b/images/cnv_components_main.png deleted file mode 100644 index 45577eb8a51d..000000000000 Binary files a/images/cnv_components_main.png and /dev/null differ diff --git a/images/cnv_components_ssp-operator.png b/images/cnv_components_ssp-operator.png deleted file mode 100644 index 68166b451d7d..000000000000 Binary files a/images/cnv_components_ssp-operator.png and /dev/null differ diff --git a/images/cnv_components_tekton-tasks-operator.png b/images/cnv_components_tekton-tasks-operator.png deleted file mode 100644 index e817ff35e15d..000000000000 Binary files a/images/cnv_components_tekton-tasks-operator.png and /dev/null differ diff --git a/images/cnv_components_virt-operator.png b/images/cnv_components_virt-operator.png deleted file mode 100644 index 844fd378c8db..000000000000 Binary files a/images/cnv_components_virt-operator.png and /dev/null differ diff --git a/images/create-event-sink.png b/images/create-event-sink.png deleted file mode 100644 index 14e6cf205459..000000000000 Binary files a/images/create-event-sink.png and /dev/null differ diff --git a/images/create-eventing-namespace.png b/images/create-eventing-namespace.png deleted file mode 100644 index cd316ed23011..000000000000 Binary files a/images/create-eventing-namespace.png and /dev/null differ diff --git a/images/create-nodes-okd.png b/images/create-nodes-okd.png deleted file mode 100644 index 2abc14c89aca..000000000000 Binary files a/images/create-nodes-okd.png and /dev/null differ diff --git a/images/create-nodes.png b/images/create-nodes.png deleted file mode 100644 index 07d9ea6a5fbe..000000000000 Binary files a/images/create-nodes.png and /dev/null differ diff --git a/images/create-push-app.png b/images/create-push-app.png deleted file mode 100644 index a81d88d2fa1c..000000000000 Binary files a/images/create-push-app.png and /dev/null differ diff --git a/images/create-serving-namespace.png b/images/create-serving-namespace.png deleted file mode 100644 index 9d8ef2abb9ce..000000000000 Binary files a/images/create-serving-namespace.png and /dev/null differ diff --git a/images/create-silence.png b/images/create-silence.png deleted file mode 100644 index 4085666a23df..000000000000 Binary files a/images/create-silence.png and /dev/null differ diff --git a/images/create-sub-ODC.png b/images/create-sub-ODC.png deleted file mode 100644 index 54a81308a898..000000000000 Binary files a/images/create-sub-ODC.png and /dev/null differ diff --git a/images/csi-arch-rev1.png b/images/csi-arch-rev1.png deleted file mode 100644 index 5ebab3f41b7c..000000000000 Binary files a/images/csi-arch-rev1.png and /dev/null differ diff --git a/images/csi-arch.png b/images/csi-arch.png deleted file mode 100644 index ffaa509ae5ab..000000000000 Binary files a/images/csi-arch.png and /dev/null differ diff --git a/images/cso-namespace-vulnerable.png b/images/cso-namespace-vulnerable.png deleted file mode 100644 index 948a6dc81276..000000000000 Binary files a/images/cso-namespace-vulnerable.png and /dev/null differ diff --git a/images/cso-registry-vulnerable.png b/images/cso-registry-vulnerable.png deleted file mode 100644 index c9b147d11cf3..000000000000 Binary files a/images/cso-registry-vulnerable.png and /dev/null differ diff --git a/images/custom_4.5.png b/images/custom_4.5.png deleted file mode 100644 index 7a8d1607d2cd..000000000000 Binary files a/images/custom_4.5.png and /dev/null differ diff --git a/images/custom_4.5.svg b/images/custom_4.5.svg deleted file mode 100644 index 8116cb5e88eb..000000000000 --- a/images/custom_4.5.svg +++ /dev/null @@ -1 +0,0 @@ -namespace/openshift-authenticationsecrets/v4-0-config-system-router-certs11namespace/openshift-consoleconfigmaps/default-ingress-cert11configmaps/serviceacount-ca11namespace/openshift-kube-controller-managerconfigmaps/serviceacount-ca11namespace/openshift-kube-schedulerconfigmaps/proxy-ca10configmaps/my-certificatenamespace/openshift-ingressnamespace/openshift-ingress-operatornamespace/openshift-config-manageddeployments/router-default3secrets/my-certificate2ingresscontrollers/defaultReferenceContents are copied0namespace/openshift-config8proxies/cluster9User7cluster-scopedsecrets/router-certs4configmaps/default-ingress-cert5configmaps/trusted-ca-bundle6Requesting namespaces75_OpenShift_0520 \ No newline at end of file diff --git a/images/customizing-user-perspective.png b/images/customizing-user-perspective.png deleted file mode 100644 index ffa0c674ddce..000000000000 Binary files a/images/customizing-user-perspective.png and /dev/null differ diff --git a/images/darkcircle-0.png b/images/darkcircle-0.png deleted file mode 100644 index 5ab465076d8f..000000000000 Binary files a/images/darkcircle-0.png and /dev/null differ diff --git a/images/darkcircle-1.png b/images/darkcircle-1.png deleted file mode 100644 index 7b16d8ed9932..000000000000 Binary files a/images/darkcircle-1.png and /dev/null differ diff --git a/images/darkcircle-10.png b/images/darkcircle-10.png deleted file mode 100644 index dfdc4f8d4ea4..000000000000 Binary files a/images/darkcircle-10.png and /dev/null differ diff --git a/images/darkcircle-11.png b/images/darkcircle-11.png deleted file mode 100644 index 9bdcfea71d14..000000000000 Binary files a/images/darkcircle-11.png and /dev/null differ diff --git a/images/darkcircle-12.png b/images/darkcircle-12.png deleted file mode 100644 index 303bcd41f52d..000000000000 Binary files a/images/darkcircle-12.png and /dev/null differ diff --git a/images/darkcircle-2.png b/images/darkcircle-2.png deleted file mode 100644 index a537be6f42c8..000000000000 Binary files a/images/darkcircle-2.png and /dev/null differ diff --git a/images/darkcircle-3.png b/images/darkcircle-3.png deleted file mode 100644 index a22625c683a8..000000000000 Binary files a/images/darkcircle-3.png and /dev/null differ diff --git a/images/darkcircle-4.png b/images/darkcircle-4.png deleted file mode 100644 index 27d03e8c1f30..000000000000 Binary files a/images/darkcircle-4.png and /dev/null differ diff --git a/images/darkcircle-5.png b/images/darkcircle-5.png deleted file mode 100644 index 8a59e47b1498..000000000000 Binary files a/images/darkcircle-5.png and /dev/null differ diff --git a/images/darkcircle-6.png b/images/darkcircle-6.png deleted file mode 100644 index c8b686908d54..000000000000 Binary files a/images/darkcircle-6.png and /dev/null differ diff --git a/images/darkcircle-7.png b/images/darkcircle-7.png deleted file mode 100644 index 2503523947d4..000000000000 Binary files a/images/darkcircle-7.png and /dev/null differ diff --git a/images/darkcircle-8.png b/images/darkcircle-8.png deleted file mode 100644 index b14edd088c2c..000000000000 Binary files a/images/darkcircle-8.png and /dev/null differ diff --git a/images/darkcircle-9.png b/images/darkcircle-9.png deleted file mode 100644 index 5cc237f70578..000000000000 Binary files a/images/darkcircle-9.png and /dev/null differ diff --git a/images/default_4.5.png b/images/default_4.5.png deleted file mode 100644 index 9aa577d33900..000000000000 Binary files a/images/default_4.5.png and /dev/null differ diff --git a/images/default_4.5.svg b/images/default_4.5.svg deleted file mode 100644 index 42ea9d8ad941..000000000000 --- a/images/default_4.5.svg +++ /dev/null @@ -1 +0,0 @@ -namespace/openshift-authenticationsecrets/v4-0-config-system-router-certs11configmaps/detault-ingress-cert11configmaps/serviceacount-ca11namespace/openshift-kube-controller-managerconfigmaps/serviceacount-ca11namespace/openshift-kube-schedulernamespace/openshift-ingressnamespace/openshift-ingress-operatornamespace/openshift-config-manageddeployments/router-default3secrets/router-certs-default2ingresscontrollers/defaultReferenceContents are copied0secrets/router-certs4configmaps/default-ingress-cert5secrets/router-ca1namespace/openshift-console75_OpenShift_0520 \ No newline at end of file diff --git a/images/delete-apiserversource-odc.png b/images/delete-apiserversource-odc.png deleted file mode 100644 index eec08d930913..000000000000 Binary files a/images/delete-apiserversource-odc.png and /dev/null differ diff --git a/images/delete.png b/images/delete.png deleted file mode 100644 index cec427f39142..000000000000 Binary files a/images/delete.png and /dev/null differ diff --git a/images/developer-catalog.png b/images/developer-catalog.png deleted file mode 100644 index e72996af91aa..000000000000 Binary files a/images/developer-catalog.png and /dev/null differ diff --git a/images/dpdk_line_rate.png b/images/dpdk_line_rate.png deleted file mode 100644 index b417377ded23..000000000000 Binary files a/images/dpdk_line_rate.png and /dev/null differ diff --git a/images/ellipsis-v.svg b/images/ellipsis-v.svg deleted file mode 100644 index c3074e62602a..000000000000 --- a/images/ellipsis-v.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/images/etcd-monitoring-working.png b/images/etcd-monitoring-working.png deleted file mode 100644 index 0c8d1ded9b5e..000000000000 Binary files a/images/etcd-monitoring-working.png and /dev/null differ diff --git a/images/etcd-no-certificate.png b/images/etcd-no-certificate.png deleted file mode 100644 index 7af701b81cf7..000000000000 Binary files a/images/etcd-no-certificate.png and /dev/null differ diff --git a/images/etcd-operator-overview.png b/images/etcd-operator-overview.png deleted file mode 100644 index 72ae3f7901b5..000000000000 Binary files a/images/etcd-operator-overview.png and /dev/null differ diff --git a/images/etcd-operator-resources.png b/images/etcd-operator-resources.png deleted file mode 100644 index dce86b5e8eec..000000000000 Binary files a/images/etcd-operator-resources.png and /dev/null differ diff --git a/images/event-sources-odc.png b/images/event-sources-odc.png deleted file mode 100644 index d3a13fea55a3..000000000000 Binary files a/images/event-sources-odc.png and /dev/null differ diff --git a/images/eventing-YAML-HA.png b/images/eventing-YAML-HA.png deleted file mode 100644 index c928146179ba..000000000000 Binary files a/images/eventing-YAML-HA.png and /dev/null differ diff --git a/images/eventing-conditions-true.png b/images/eventing-conditions-true.png deleted file mode 100644 index ba904f8fcf61..000000000000 Binary files a/images/eventing-conditions-true.png and /dev/null differ diff --git a/images/eventing-overview.png b/images/eventing-overview.png deleted file mode 100644 index 4264c5bc4d61..000000000000 Binary files a/images/eventing-overview.png and /dev/null differ diff --git a/images/export-application-dialog-box.png b/images/export-application-dialog-box.png deleted file mode 100644 index e069ae402239..000000000000 Binary files a/images/export-application-dialog-box.png and /dev/null differ diff --git a/images/filtered-messages.png b/images/filtered-messages.png deleted file mode 100644 index a2849401fe99..000000000000 Binary files a/images/filtered-messages.png and /dev/null differ diff --git a/images/flow1.png b/images/flow1.png deleted file mode 100644 index 0c1201620c33..000000000000 Binary files a/images/flow1.png and /dev/null differ diff --git a/images/flow2.png b/images/flow2.png deleted file mode 100644 index 105d2dbea3c2..000000000000 Binary files a/images/flow2.png and /dev/null differ diff --git a/images/flow3.png b/images/flow3.png deleted file mode 100644 index af0b1c94c442..000000000000 Binary files a/images/flow3.png and /dev/null differ diff --git a/images/flow4.png b/images/flow4.png deleted file mode 100644 index dcf737241600..000000000000 Binary files a/images/flow4.png and /dev/null differ diff --git a/images/getting-started-create-route-view-app.png b/images/getting-started-create-route-view-app.png deleted file mode 100644 index fe45d96de039..000000000000 Binary files a/images/getting-started-create-route-view-app.png and /dev/null differ diff --git a/images/getting-started-examine-pod.png b/images/getting-started-examine-pod.png deleted file mode 100644 index 18114693fa6b..000000000000 Binary files a/images/getting-started-examine-pod.png and /dev/null differ diff --git a/images/getting-started-map-national-parks.png b/images/getting-started-map-national-parks.png deleted file mode 100644 index 5c15da96beb5..000000000000 Binary files a/images/getting-started-map-national-parks.png and /dev/null differ diff --git a/images/getting-started-perspective-selector.png b/images/getting-started-perspective-selector.png deleted file mode 100644 index 492a3e5cec12..000000000000 Binary files a/images/getting-started-perspective-selector.png and /dev/null differ diff --git a/images/getting-started-scaling-pod.png b/images/getting-started-scaling-pod.png deleted file mode 100644 index 10078565c764..000000000000 Binary files a/images/getting-started-scaling-pod.png and /dev/null differ diff --git a/images/grid.png b/images/grid.png deleted file mode 100644 index 57998b4e58e7..000000000000 Binary files a/images/grid.png and /dev/null differ diff --git a/images/hosted-control-planes-diagram.png b/images/hosted-control-planes-diagram.png deleted file mode 100644 index aa2d465d8be8..000000000000 Binary files a/images/hosted-control-planes-diagram.png and /dev/null differ diff --git a/images/icon-link.png b/images/icon-link.png deleted file mode 100644 index 530de47f03c4..000000000000 Binary files a/images/icon-link.png and /dev/null differ diff --git a/images/icon-pencil.png b/images/icon-pencil.png deleted file mode 100644 index 3104d3b60ebc..000000000000 Binary files a/images/icon-pencil.png and /dev/null differ diff --git a/images/image_security.png b/images/image_security.png deleted file mode 100644 index a1254d2a273a..000000000000 Binary files a/images/image_security.png and /dev/null differ diff --git a/images/img_power.png b/images/img_power.png deleted file mode 100644 index 419fa79a55a1..000000000000 Binary files a/images/img_power.png and /dev/null differ diff --git a/images/ingress-certificates-workflow.png b/images/ingress-certificates-workflow.png deleted file mode 100644 index 77edf2652acf..000000000000 Binary files a/images/ingress-certificates-workflow.png and /dev/null differ diff --git a/images/installconfig.png b/images/installconfig.png deleted file mode 100644 index cf813b8d0e9c..000000000000 Binary files a/images/installconfig.png and /dev/null differ diff --git a/images/kafka-YAML-HA.png b/images/kafka-YAML-HA.png deleted file mode 100644 index 10252db282f2..000000000000 Binary files a/images/kafka-YAML-HA.png and /dev/null differ diff --git a/images/kebab.png b/images/kebab.png deleted file mode 100644 index 81893bd4ad10..000000000000 Binary files a/images/kebab.png and /dev/null differ diff --git a/images/knative-admin-health-status-dash.png b/images/knative-admin-health-status-dash.png deleted file mode 100644 index 90854acc3679..000000000000 Binary files a/images/knative-admin-health-status-dash.png and /dev/null differ diff --git a/images/knative-kafka-overview.png b/images/knative-kafka-overview.png deleted file mode 100644 index 2be315547a64..000000000000 Binary files a/images/knative-kafka-overview.png and /dev/null differ diff --git a/images/knative-service-architecture.png b/images/knative-service-architecture.png deleted file mode 100644 index 4d171664eea1..000000000000 Binary files a/images/knative-service-architecture.png and /dev/null differ diff --git a/images/knative-serving-created.png b/images/knative-serving-created.png deleted file mode 100644 index d6e4f6a47720..000000000000 Binary files a/images/knative-serving-created.png and /dev/null differ diff --git a/images/knative-serving-overview.png b/images/knative-serving-overview.png deleted file mode 100644 index 7980664456f3..000000000000 Binary files a/images/knative-serving-overview.png and /dev/null differ diff --git a/images/microshift-cert-rotation.png b/images/microshift-cert-rotation.png deleted file mode 100644 index 49497bbf5f5b..000000000000 Binary files a/images/microshift-cert-rotation.png and /dev/null differ diff --git a/images/microshift_ovn_topology.png b/images/microshift_ovn_topology.png deleted file mode 100644 index 55db68eeed96..000000000000 Binary files a/images/microshift_ovn_topology.png and /dev/null differ diff --git a/images/migration-PV-copy.png b/images/migration-PV-copy.png deleted file mode 100644 index a5b5baa39d98..000000000000 Binary files a/images/migration-PV-copy.png and /dev/null differ diff --git a/images/migration-PV-move.png b/images/migration-PV-move.png deleted file mode 100644 index 291fce220cf7..000000000000 Binary files a/images/migration-PV-move.png and /dev/null differ diff --git a/images/migration-architecture.png b/images/migration-architecture.png deleted file mode 100644 index 3ba8f035d15c..000000000000 Binary files a/images/migration-architecture.png and /dev/null differ diff --git a/images/mixed-windows-linux-workloads.png b/images/mixed-windows-linux-workloads.png deleted file mode 100644 index c4b9748df19c..000000000000 Binary files a/images/mixed-windows-linux-workloads.png and /dev/null differ diff --git a/images/monitoring-alert-overview.png b/images/monitoring-alert-overview.png deleted file mode 100644 index c12d2f088e41..000000000000 Binary files a/images/monitoring-alert-overview.png and /dev/null differ diff --git a/images/monitoring-alerting-rule-overview.png b/images/monitoring-alerting-rule-overview.png deleted file mode 100644 index 812668b53be5..000000000000 Binary files a/images/monitoring-alerting-rule-overview.png and /dev/null differ diff --git a/images/monitoring-alerting-rules-screen.png b/images/monitoring-alerting-rules-screen.png deleted file mode 100644 index 401365332e17..000000000000 Binary files a/images/monitoring-alerting-rules-screen.png and /dev/null differ diff --git a/images/monitoring-alerts-screen.png b/images/monitoring-alerts-screen.png deleted file mode 100644 index 0ed404546a33..000000000000 Binary files a/images/monitoring-alerts-screen.png and /dev/null differ diff --git a/images/monitoring-architecture.png b/images/monitoring-architecture.png deleted file mode 100644 index b3d15f0cbbeb..000000000000 Binary files a/images/monitoring-architecture.png and /dev/null differ diff --git a/images/monitoring-dashboard-administrator.png b/images/monitoring-dashboard-administrator.png deleted file mode 100644 index 1138a2fc4722..000000000000 Binary files a/images/monitoring-dashboard-administrator.png and /dev/null differ diff --git a/images/monitoring-dashboard-compute-resources.png b/images/monitoring-dashboard-compute-resources.png deleted file mode 100644 index ca1cdd7e209e..000000000000 Binary files a/images/monitoring-dashboard-compute-resources.png and /dev/null differ diff --git a/images/monitoring-dashboard-developer.png b/images/monitoring-dashboard-developer.png deleted file mode 100644 index f59847857a21..000000000000 Binary files a/images/monitoring-dashboard-developer.png and /dev/null differ diff --git a/images/monitoring-diagram.png b/images/monitoring-diagram.png deleted file mode 100644 index 3e76816e5ecf..000000000000 Binary files a/images/monitoring-diagram.png and /dev/null differ diff --git a/images/monitoring-metrics-developer.png b/images/monitoring-metrics-developer.png deleted file mode 100644 index cbd0abaa88c0..000000000000 Binary files a/images/monitoring-metrics-developer.png and /dev/null differ diff --git a/images/monitoring-metrics-screen.png b/images/monitoring-metrics-screen.png deleted file mode 100644 index 4058f7f196dd..000000000000 Binary files a/images/monitoring-metrics-screen.png and /dev/null differ diff --git a/images/monitoring-silences-screen.png b/images/monitoring-silences-screen.png deleted file mode 100644 index e46f5925cbad..000000000000 Binary files a/images/monitoring-silences-screen.png and /dev/null differ diff --git a/images/monitoring-yaml-screen.png b/images/monitoring-yaml-screen.png deleted file mode 100644 index f18af6b0cc02..000000000000 Binary files a/images/monitoring-yaml-screen.png and /dev/null differ diff --git a/images/node-add-hpa-action.png b/images/node-add-hpa-action.png deleted file mode 100644 index 01f7c73fd779..000000000000 Binary files a/images/node-add-hpa-action.png and /dev/null differ diff --git a/images/node-tuning-operator-workflow-revised.png b/images/node-tuning-operator-workflow-revised.png deleted file mode 100644 index a272e5854265..000000000000 Binary files a/images/node-tuning-operator-workflow-revised.png and /dev/null differ diff --git a/images/not-placetools.png b/images/not-placetools.png deleted file mode 100644 index cb70ff8c0325..000000000000 Binary files a/images/not-placetools.png and /dev/null differ diff --git a/images/nw-egress-ips-diagram.svg b/images/nw-egress-ips-diagram.svg deleted file mode 100644 index 3c996e5648dc..000000000000 --- a/images/nw-egress-ips-diagram.svg +++ /dev/null @@ -1 +0,0 @@ -192.168.126.102Node 3meta:name: node3labels:k8s.ovn.org/egress-assignable: ""Pod network10.128.0.0/14Infrastructure network192.168.126.0/18Externalservice192.168.126.10pod4pod2pod1Node 1meta:name: node1labels:k8s.ovn.org/egress-assignable: ""namespace1namespace2121_OpenShift_1020Node 2meta:name: node2pod3 \ No newline at end of file diff --git a/images/nw-ipsec-encryption.png b/images/nw-ipsec-encryption.png deleted file mode 100644 index 0d1fc46201b1..000000000000 Binary files a/images/nw-ipsec-encryption.png and /dev/null differ diff --git a/images/nw-metallb-layer2.png b/images/nw-metallb-layer2.png deleted file mode 100644 index 3cc343622e17..000000000000 Binary files a/images/nw-metallb-layer2.png and /dev/null differ diff --git a/images/nw-sharding-namespace-labels.png b/images/nw-sharding-namespace-labels.png deleted file mode 100644 index 64b4e5cc3125..000000000000 Binary files a/images/nw-sharding-namespace-labels.png and /dev/null differ diff --git a/images/nw-sharding-route-labels.png b/images/nw-sharding-route-labels.png deleted file mode 100644 index 995e2c137c0d..000000000000 Binary files a/images/nw-sharding-route-labels.png and /dev/null differ diff --git a/images/observe-dashboard-developer.png b/images/observe-dashboard-developer.png deleted file mode 100644 index d502fbf9c064..000000000000 Binary files a/images/observe-dashboard-developer.png and /dev/null differ diff --git a/images/odc-binding-connector.png b/images/odc-binding-connector.png deleted file mode 100644 index a4606e850ced..000000000000 Binary files a/images/odc-binding-connector.png and /dev/null differ diff --git a/images/odc-delete-service-binding.png b/images/odc-delete-service-binding.png deleted file mode 100644 index b8e40f63b61e..000000000000 Binary files a/images/odc-delete-service-binding.png and /dev/null differ diff --git a/images/odc-label-selector-sb-details.png b/images/odc-label-selector-sb-details.png deleted file mode 100644 index 110aa7dd7aff..000000000000 Binary files a/images/odc-label-selector-sb-details.png and /dev/null differ diff --git a/images/odc-label-selector-topology-side-panel.png b/images/odc-label-selector-topology-side-panel.png deleted file mode 100644 index 0d25dc9a5447..000000000000 Binary files a/images/odc-label-selector-topology-side-panel.png and /dev/null differ diff --git a/images/odc-recreate-update.png b/images/odc-recreate-update.png deleted file mode 100644 index a0fda1238410..000000000000 Binary files a/images/odc-recreate-update.png and /dev/null differ diff --git a/images/odc-rolling-update.png b/images/odc-rolling-update.png deleted file mode 100644 index 1fa736639419..000000000000 Binary files a/images/odc-rolling-update.png and /dev/null differ diff --git a/images/odc-sbc-modal.png b/images/odc-sbc-modal.png deleted file mode 100644 index c8fcf755fd75..000000000000 Binary files a/images/odc-sbc-modal.png and /dev/null differ diff --git a/images/odc-serverless-app.png b/images/odc-serverless-app.png deleted file mode 100644 index 8378acde156f..000000000000 Binary files a/images/odc-serverless-app.png and /dev/null differ diff --git a/images/odc-serverless-revisions.png b/images/odc-serverless-revisions.png deleted file mode 100644 index b373b4753bdc..000000000000 Binary files a/images/odc-serverless-revisions.png and /dev/null differ diff --git a/images/odc-view-broker.png b/images/odc-view-broker.png deleted file mode 100644 index d8ed44717f51..000000000000 Binary files a/images/odc-view-broker.png and /dev/null differ diff --git a/images/odc-wto-icon.png b/images/odc-wto-icon.png deleted file mode 100644 index 2c323700f609..000000000000 Binary files a/images/odc-wto-icon.png and /dev/null differ diff --git a/images/odc_add_to_project.png b/images/odc_add_to_project.png deleted file mode 100644 index 5b276655a9e4..000000000000 Binary files a/images/odc_add_to_project.png and /dev/null differ diff --git a/images/odc_add_view.png b/images/odc_add_view.png deleted file mode 100644 index d343a614fc41..000000000000 Binary files a/images/odc_add_view.png and /dev/null differ diff --git a/images/odc_app_grouping_label.png b/images/odc_app_grouping_label.png deleted file mode 100644 index d30ed4ea8a30..000000000000 Binary files a/images/odc_app_grouping_label.png and /dev/null differ diff --git a/images/odc_app_metrics.png b/images/odc_app_metrics.png deleted file mode 100644 index d81ae7902451..000000000000 Binary files a/images/odc_app_metrics.png and /dev/null differ diff --git a/images/odc_application_topology.png b/images/odc_application_topology.png deleted file mode 100644 index f418a45cfe97..000000000000 Binary files a/images/odc_application_topology.png and /dev/null differ diff --git a/images/odc_build_canceled.png b/images/odc_build_canceled.png deleted file mode 100644 index 0e4da383baf5..000000000000 Binary files a/images/odc_build_canceled.png and /dev/null differ diff --git a/images/odc_build_completed.png b/images/odc_build_completed.png deleted file mode 100644 index fa55d761cf79..000000000000 Binary files a/images/odc_build_completed.png and /dev/null differ diff --git a/images/odc_build_failed.png b/images/odc_build_failed.png deleted file mode 100644 index eb5a62cd455f..000000000000 Binary files a/images/odc_build_failed.png and /dev/null differ diff --git a/images/odc_build_new.png b/images/odc_build_new.png deleted file mode 100644 index 52cb97fd6b2c..000000000000 Binary files a/images/odc_build_new.png and /dev/null differ diff --git a/images/odc_build_pending.png b/images/odc_build_pending.png deleted file mode 100644 index a9d4615733d3..000000000000 Binary files a/images/odc_build_pending.png and /dev/null differ diff --git a/images/odc_build_running.png b/images/odc_build_running.png deleted file mode 100644 index a0aaa83590f0..000000000000 Binary files a/images/odc_build_running.png and /dev/null differ diff --git a/images/odc_che_workspace.png b/images/odc_che_workspace.png deleted file mode 100644 index fd17944d96c3..000000000000 Binary files a/images/odc_che_workspace.png and /dev/null differ diff --git a/images/odc_cluster_console.png b/images/odc_cluster_console.png deleted file mode 100644 index 0dd37fbade82..000000000000 Binary files a/images/odc_cluster_console.png and /dev/null differ diff --git a/images/odc_connecting_multiple_applications.png b/images/odc_connecting_multiple_applications.png deleted file mode 100644 index 9d18ea3cca47..000000000000 Binary files a/images/odc_connecting_multiple_applications.png and /dev/null differ diff --git a/images/odc_connector.png b/images/odc_connector.png deleted file mode 100644 index 6add9a7dfc26..000000000000 Binary files a/images/odc_connector.png and /dev/null differ diff --git a/images/odc_context_menu.png b/images/odc_context_menu.png deleted file mode 100644 index ef8905e784a8..000000000000 Binary files a/images/odc_context_menu.png and /dev/null differ diff --git a/images/odc_context_operator.png b/images/odc_context_operator.png deleted file mode 100644 index 112c608ecc78..000000000000 Binary files a/images/odc_context_operator.png and /dev/null differ diff --git a/images/odc_context_project.png b/images/odc_context_project.png deleted file mode 100644 index 8342c69d3dbc..000000000000 Binary files a/images/odc_context_project.png and /dev/null differ diff --git a/images/odc_create_project.png b/images/odc_create_project.png deleted file mode 100644 index 23a8dda1916f..000000000000 Binary files a/images/odc_create_project.png and /dev/null differ diff --git a/images/odc_customizing_developer_catalog.png b/images/odc_customizing_developer_catalog.png deleted file mode 100644 index 8fd236feb74b..000000000000 Binary files a/images/odc_customizing_developer_catalog.png and /dev/null differ diff --git a/images/odc_deleting_deployment.png b/images/odc_deleting_deployment.png deleted file mode 100644 index 400329ce5794..000000000000 Binary files a/images/odc_deleting_deployment.png and /dev/null differ diff --git a/images/odc_devcatalog_toplogy.png b/images/odc_devcatalog_toplogy.png deleted file mode 100644 index 951ede330b87..000000000000 Binary files a/images/odc_devcatalog_toplogy.png and /dev/null differ diff --git a/images/odc_developer_perspective.png b/images/odc_developer_perspective.png deleted file mode 100644 index ad24511c19c0..000000000000 Binary files a/images/odc_developer_perspective.png and /dev/null differ diff --git a/images/odc_edit_app.png b/images/odc_edit_app.png deleted file mode 100644 index bdf41a27b477..000000000000 Binary files a/images/odc_edit_app.png and /dev/null differ diff --git a/images/odc_edit_redeploy.png b/images/odc_edit_redeploy.png deleted file mode 100644 index 3708f2868262..000000000000 Binary files a/images/odc_edit_redeploy.png and /dev/null differ diff --git a/images/odc_git_repository.png b/images/odc_git_repository.png deleted file mode 100644 index 4b22c5d567c1..000000000000 Binary files a/images/odc_git_repository.png and /dev/null differ diff --git a/images/odc_helm_chart_devcatalog.png b/images/odc_helm_chart_devcatalog.png deleted file mode 100644 index f9e8bd6ab935..000000000000 Binary files a/images/odc_helm_chart_devcatalog.png and /dev/null differ diff --git a/images/odc_helm_chart_devcatalog_new.png b/images/odc_helm_chart_devcatalog_new.png deleted file mode 100644 index aadfb8c8aff9..000000000000 Binary files a/images/odc_helm_chart_devcatalog_new.png and /dev/null differ diff --git a/images/odc_helm_chart_repo_filter.png b/images/odc_helm_chart_repo_filter.png deleted file mode 100644 index bfc44325b1a2..000000000000 Binary files a/images/odc_helm_chart_repo_filter.png and /dev/null differ diff --git a/images/odc_helm_chart_select_chart_ver.png b/images/odc_helm_chart_select_chart_ver.png deleted file mode 100644 index 6d7b52aa2d92..000000000000 Binary files a/images/odc_helm_chart_select_chart_ver.png and /dev/null differ diff --git a/images/odc_helm_revision_history.png b/images/odc_helm_revision_history.png deleted file mode 100644 index 1cee05afb284..000000000000 Binary files a/images/odc_helm_revision_history.png and /dev/null differ diff --git a/images/odc_image_vulnerabilities.png b/images/odc_image_vulnerabilities.png deleted file mode 100644 index 484788a032e5..000000000000 Binary files a/images/odc_image_vulnerabilities.png and /dev/null differ diff --git a/images/odc_info.png b/images/odc_info.png deleted file mode 100644 index c59cfc70e233..000000000000 Binary files a/images/odc_info.png and /dev/null differ diff --git a/images/odc_list_view_icon.png b/images/odc_list_view_icon.png deleted file mode 100644 index 4a6013aba005..000000000000 Binary files a/images/odc_list_view_icon.png and /dev/null differ diff --git a/images/odc_namespace_helm_chart_repo_filter.png b/images/odc_namespace_helm_chart_repo_filter.png deleted file mode 100644 index 77400ca84046..000000000000 Binary files a/images/odc_namespace_helm_chart_repo_filter.png and /dev/null differ diff --git a/images/odc_observe_dashboard.png b/images/odc_observe_dashboard.png deleted file mode 100644 index 199b96cd7354..000000000000 Binary files a/images/odc_observe_dashboard.png and /dev/null differ diff --git a/images/odc_open_url.png b/images/odc_open_url.png deleted file mode 100644 index ad5498ec94a2..000000000000 Binary files a/images/odc_open_url.png and /dev/null differ diff --git a/images/odc_pod_failed.png b/images/odc_pod_failed.png deleted file mode 100644 index 94bb0a686b09..000000000000 Binary files a/images/odc_pod_failed.png and /dev/null differ diff --git a/images/odc_pod_not_ready.png b/images/odc_pod_not_ready.png deleted file mode 100644 index 5b0f76daa2f8..000000000000 Binary files a/images/odc_pod_not_ready.png and /dev/null differ diff --git a/images/odc_pod_pending.png b/images/odc_pod_pending.png deleted file mode 100644 index c0181dfdba37..000000000000 Binary files a/images/odc_pod_pending.png and /dev/null differ diff --git a/images/odc_pod_running.png b/images/odc_pod_running.png deleted file mode 100644 index 75c015fab634..000000000000 Binary files a/images/odc_pod_running.png and /dev/null differ diff --git a/images/odc_pod_succeeded.png b/images/odc_pod_succeeded.png deleted file mode 100644 index 257b956f58dc..000000000000 Binary files a/images/odc_pod_succeeded.png and /dev/null differ diff --git a/images/odc_pod_terminating.png b/images/odc_pod_terminating.png deleted file mode 100644 index f4f3fcdd3772..000000000000 Binary files a/images/odc_pod_terminating.png and /dev/null differ diff --git a/images/odc_pod_unknown.png b/images/odc_pod_unknown.png deleted file mode 100644 index d4d0b65664ff..000000000000 Binary files a/images/odc_pod_unknown.png and /dev/null differ diff --git a/images/odc_pod_warning.png b/images/odc_pod_warning.png deleted file mode 100644 index 8a6d5afb5bfa..000000000000 Binary files a/images/odc_pod_warning.png and /dev/null differ diff --git a/images/odc_project_alerts.png b/images/odc_project_alerts.png deleted file mode 100644 index 7266b3776829..000000000000 Binary files a/images/odc_project_alerts.png and /dev/null differ diff --git a/images/odc_project_dashboard.png b/images/odc_project_dashboard.png deleted file mode 100644 index 03da036fa655..000000000000 Binary files a/images/odc_project_dashboard.png and /dev/null differ diff --git a/images/odc_project_events.png b/images/odc_project_events.png deleted file mode 100644 index a24cc1e0903a..000000000000 Binary files a/images/odc_project_events.png and /dev/null differ diff --git a/images/odc_project_metrics.png b/images/odc_project_metrics.png deleted file mode 100644 index bd6ded4a54b4..000000000000 Binary files a/images/odc_project_metrics.png and /dev/null differ diff --git a/images/odc_project_permissions.png b/images/odc_project_permissions.png deleted file mode 100644 index b2f34484fd72..000000000000 Binary files a/images/odc_project_permissions.png and /dev/null differ diff --git a/images/odc_quick_search.png b/images/odc_quick_search.png deleted file mode 100644 index 59fbb1b9d6cc..000000000000 Binary files a/images/odc_quick_search.png and /dev/null differ diff --git a/images/odc_serverless_app.png b/images/odc_serverless_app.png deleted file mode 100644 index 2b748c403b17..000000000000 Binary files a/images/odc_serverless_app.png and /dev/null differ diff --git a/images/odc_topology_view_icon.png b/images/odc_topology_view_icon.png deleted file mode 100644 index 76b517a24145..000000000000 Binary files a/images/odc_topology_view_icon.png and /dev/null differ diff --git a/images/odc_verified_icon.png b/images/odc_verified_icon.png deleted file mode 100644 index 36c754bd9347..000000000000 Binary files a/images/odc_verified_icon.png and /dev/null differ diff --git a/images/oke-about-ocp-stack-image.png b/images/oke-about-ocp-stack-image.png deleted file mode 100644 index b74324a82ebc..000000000000 Binary files a/images/oke-about-ocp-stack-image.png and /dev/null differ diff --git a/images/oke-about.png b/images/oke-about.png deleted file mode 100644 index f8055fb1a51f..000000000000 Binary files a/images/oke-about.png and /dev/null differ diff --git a/images/oke-arch-ocp-stack.png b/images/oke-arch-ocp-stack.png deleted file mode 100644 index 92af5f6f7b9e..000000000000 Binary files a/images/oke-arch-ocp-stack.png and /dev/null differ diff --git a/images/olm-catalog-sources.png b/images/olm-catalog-sources.png deleted file mode 100644 index 2a8e7dbbc0da..000000000000 Binary files a/images/olm-catalog-sources.png and /dev/null differ diff --git a/images/olm-catalogsource.png b/images/olm-catalogsource.png deleted file mode 100644 index 7d7401cf7836..000000000000 Binary files a/images/olm-catalogsource.png and /dev/null differ diff --git a/images/olm-channels.png b/images/olm-channels.png deleted file mode 100644 index 845d72a77560..000000000000 Binary files a/images/olm-channels.png and /dev/null differ diff --git a/images/olm-manualapproval.png b/images/olm-manualapproval.png deleted file mode 100644 index 01f91f101de9..000000000000 Binary files a/images/olm-manualapproval.png and /dev/null differ diff --git a/images/olm-operator-delete.png b/images/olm-operator-delete.png deleted file mode 100644 index 1bde1467f2c2..000000000000 Binary files a/images/olm-operator-delete.png and /dev/null differ diff --git a/images/olm-operatorhub.png b/images/olm-operatorhub.png deleted file mode 100644 index 713179f2461e..000000000000 Binary files a/images/olm-operatorhub.png and /dev/null differ diff --git a/images/olm-replaces.png b/images/olm-replaces.png deleted file mode 100644 index 5394fee620c0..000000000000 Binary files a/images/olm-replaces.png and /dev/null differ diff --git a/images/olm-skipping-updates.png b/images/olm-skipping-updates.png deleted file mode 100644 index dae054588cff..000000000000 Binary files a/images/olm-skipping-updates.png and /dev/null differ diff --git a/images/olm-uptodate.png b/images/olm-uptodate.png deleted file mode 100644 index 2176cd097594..000000000000 Binary files a/images/olm-uptodate.png and /dev/null differ diff --git a/images/olm-workflow.png b/images/olm-workflow.png deleted file mode 100644 index aeb48f3ecb0e..000000000000 Binary files a/images/olm-workflow.png and /dev/null differ diff --git a/images/olm-z-stream.png b/images/olm-z-stream.png deleted file mode 100644 index b55a5ffa0c83..000000000000 Binary files a/images/olm-z-stream.png and /dev/null differ diff --git a/images/op-install-subscription.png b/images/op-install-subscription.png deleted file mode 100644 index 5f41c09482f3..000000000000 Binary files a/images/op-install-subscription.png and /dev/null differ diff --git a/images/op-installed-tile.png b/images/op-installed-tile.png deleted file mode 100644 index dfd4af39fca7..000000000000 Binary files a/images/op-installed-tile.png and /dev/null differ diff --git a/images/op-pipeline-builder-task-details.png b/images/op-pipeline-builder-task-details.png deleted file mode 100644 index 34b2f08e3fa0..000000000000 Binary files a/images/op-pipeline-builder-task-details.png and /dev/null differ diff --git a/images/op-pipeline-builder.png b/images/op-pipeline-builder.png deleted file mode 100644 index 9b19bebe7802..000000000000 Binary files a/images/op-pipeline-builder.png and /dev/null differ diff --git a/images/op-pipeline-details.png b/images/op-pipeline-details.png deleted file mode 100644 index 01cf2d79227b..000000000000 Binary files a/images/op-pipeline-details.png and /dev/null differ diff --git a/images/op-pipeline-details1.png b/images/op-pipeline-details1.png deleted file mode 100644 index 74b671989e55..000000000000 Binary files a/images/op-pipeline-details1.png and /dev/null differ diff --git a/images/op-pipeline-yaml.png b/images/op-pipeline-yaml.png deleted file mode 100644 index cdf3a2e43bae..000000000000 Binary files a/images/op-pipeline-yaml.png and /dev/null differ diff --git a/images/op_pipeline_run.png b/images/op_pipeline_run.png deleted file mode 100644 index 145915eff5e6..000000000000 Binary files a/images/op_pipeline_run.png and /dev/null differ diff --git a/images/op_pipeline_run2.png b/images/op_pipeline_run2.png deleted file mode 100644 index 5b61837a51d9..000000000000 Binary files a/images/op_pipeline_run2.png and /dev/null differ diff --git a/images/op_pipeline_topology.png b/images/op_pipeline_topology.png deleted file mode 100644 index 94d65ea12616..000000000000 Binary files a/images/op_pipeline_topology.png and /dev/null differ diff --git a/images/op_pipeline_topology1.png b/images/op_pipeline_topology1.png deleted file mode 100644 index 0462c8884019..000000000000 Binary files a/images/op_pipeline_topology1.png and /dev/null differ diff --git a/images/openshift-on-openstack-provider-network.png b/images/openshift-on-openstack-provider-network.png deleted file mode 100644 index 9700c13b2972..000000000000 Binary files a/images/openshift-on-openstack-provider-network.png and /dev/null differ diff --git a/images/operator-maturity-model.png b/images/operator-maturity-model.png deleted file mode 100644 index c4f745c299f8..000000000000 Binary files a/images/operator-maturity-model.png and /dev/null differ diff --git a/images/orchestration.png b/images/orchestration.png deleted file mode 100644 index 4e77da1c5d8f..000000000000 Binary files a/images/orchestration.png and /dev/null differ diff --git a/images/osd-monitoring-architecture.svg b/images/osd-monitoring-architecture.svg deleted file mode 100644 index 9a648fdc3788..000000000000 --- a/images/osd-monitoring-architecture.svg +++ /dev/null @@ -1 +0,0 @@ -DeployDeployQueriesInstalled by defaultPrometheusOperatorNEPAGrafanaKSMOSMTelemeterClientDeployDeployAlertsDeployUserAlertmanagerOpenShiftProjectsUser-DefinedProjectsAlertsThanos RulerPrometheusThanosQuerierQueriesPrometheusPrometheusOperatorPlatform118_OpenShift_0920DeployDeployClusterMonitoringOperatorClusterVersionOperator \ No newline at end of file diff --git a/images/osd-nodes-machinepools-about-f7619.png b/images/osd-nodes-machinepools-about-f7619.png deleted file mode 100644 index 80c5e309b255..000000000000 Binary files a/images/osd-nodes-machinepools-about-f7619.png and /dev/null differ diff --git a/images/osdk-workflow.png b/images/osdk-workflow.png deleted file mode 100644 index 2fb5236bcef3..000000000000 Binary files a/images/osdk-workflow.png and /dev/null differ diff --git a/images/osdocs-contribution-workflow.png b/images/osdocs-contribution-workflow.png deleted file mode 100644 index c24d49c16b8d..000000000000 Binary files a/images/osdocs-contribution-workflow.png and /dev/null differ diff --git a/images/ossm-adding-project-using-label-selector.png b/images/ossm-adding-project-using-label-selector.png deleted file mode 100644 index 566c4f9ae83d..000000000000 Binary files a/images/ossm-adding-project-using-label-selector.png and /dev/null differ diff --git a/images/ossm-adding-project-using-smm.png b/images/ossm-adding-project-using-smm.png deleted file mode 100644 index f0e017331271..000000000000 Binary files a/images/ossm-adding-project-using-smm.png and /dev/null differ diff --git a/images/ossm-adding-project-using-smmr.png b/images/ossm-adding-project-using-smmr.png deleted file mode 100644 index 82b7189bf869..000000000000 Binary files a/images/ossm-adding-project-using-smmr.png and /dev/null differ diff --git a/images/ossm-architecture.png b/images/ossm-architecture.png deleted file mode 100644 index 552e5a2ba61d..000000000000 Binary files a/images/ossm-architecture.png and /dev/null differ diff --git a/images/ossm-federated-mesh.png b/images/ossm-federated-mesh.png deleted file mode 100644 index c7af735dcf28..000000000000 Binary files a/images/ossm-federated-mesh.png and /dev/null differ diff --git a/images/ossm-federation-export-service.png b/images/ossm-federation-export-service.png deleted file mode 100644 index 242ef179763d..000000000000 Binary files a/images/ossm-federation-export-service.png and /dev/null differ diff --git a/images/ossm-federation-import-service.png b/images/ossm-federation-import-service.png deleted file mode 100644 index b5f5dcec2bef..000000000000 Binary files a/images/ossm-federation-import-service.png and /dev/null differ diff --git a/images/ossm-grafana-control-plane-dashboard.png b/images/ossm-grafana-control-plane-dashboard.png deleted file mode 100644 index fd6b32e7db5e..000000000000 Binary files a/images/ossm-grafana-control-plane-dashboard.png and /dev/null differ diff --git a/images/ossm-grafana-dashboard-no-traffic.png b/images/ossm-grafana-dashboard-no-traffic.png deleted file mode 100644 index 7c2182c016b3..000000000000 Binary files a/images/ossm-grafana-dashboard-no-traffic.png and /dev/null differ diff --git a/images/ossm-grafana-home-screen.png b/images/ossm-grafana-home-screen.png deleted file mode 100644 index 0690720c6a47..000000000000 Binary files a/images/ossm-grafana-home-screen.png and /dev/null differ diff --git a/images/ossm-grafana-mesh-no-traffic.png b/images/ossm-grafana-mesh-no-traffic.png deleted file mode 100644 index c5f717e266a0..000000000000 Binary files a/images/ossm-grafana-mesh-no-traffic.png and /dev/null differ diff --git a/images/ossm-grafana-mesh-with-traffic.png b/images/ossm-grafana-mesh-with-traffic.png deleted file mode 100644 index 37e53db9e305..000000000000 Binary files a/images/ossm-grafana-mesh-with-traffic.png and /dev/null differ diff --git a/images/ossm-grafana-services.png b/images/ossm-grafana-services.png deleted file mode 100644 index dec1e32dcd1c..000000000000 Binary files a/images/ossm-grafana-services.png and /dev/null differ diff --git a/images/ossm-grafana-workloads.png b/images/ossm-grafana-workloads.png deleted file mode 100644 index a32e69e3e64f..000000000000 Binary files a/images/ossm-grafana-workloads.png and /dev/null differ diff --git a/images/ossm-icon-missing-sidecar.png b/images/ossm-icon-missing-sidecar.png deleted file mode 100644 index 5d50229f1810..000000000000 Binary files a/images/ossm-icon-missing-sidecar.png and /dev/null differ diff --git a/images/ossm-kiali-graph-badge-security.png b/images/ossm-kiali-graph-badge-security.png deleted file mode 100644 index 440bff3ea42b..000000000000 Binary files a/images/ossm-kiali-graph-badge-security.png and /dev/null differ diff --git a/images/ossm-kiali-graph-bookinfo.png b/images/ossm-kiali-graph-bookinfo.png deleted file mode 100644 index e34dd3d0d04c..000000000000 Binary files a/images/ossm-kiali-graph-bookinfo.png and /dev/null differ diff --git a/images/ossm-kiali-masthead-mtls-enabled.png b/images/ossm-kiali-masthead-mtls-enabled.png deleted file mode 100644 index 1ffe26bcd3d4..000000000000 Binary files a/images/ossm-kiali-masthead-mtls-enabled.png and /dev/null differ diff --git a/images/ossm-kiali-masthead-mtls-partial.png b/images/ossm-kiali-masthead-mtls-partial.png deleted file mode 100644 index 5e9302bea975..000000000000 Binary files a/images/ossm-kiali-masthead-mtls-partial.png and /dev/null differ diff --git a/images/ossm-kiali-overview.png b/images/ossm-kiali-overview.png deleted file mode 100644 index 7b36d12ca495..000000000000 Binary files a/images/ossm-kiali-overview.png and /dev/null differ diff --git a/images/ossm-node-badge-missing-sidecar.svg b/images/ossm-node-badge-missing-sidecar.svg deleted file mode 100644 index f8005984ad1e..000000000000 --- a/images/ossm-node-badge-missing-sidecar.svg +++ /dev/null @@ -1,119 +0,0 @@ - - - - - - - - - - - - image/svg+xml - - - - - - - - - - - - - - - - - - - - - diff --git a/images/ossm-prometheus-home-screen.png b/images/ossm-prometheus-home-screen.png deleted file mode 100644 index c570ae4b41cc..000000000000 Binary files a/images/ossm-prometheus-home-screen.png and /dev/null differ diff --git a/images/ossm-prometheus-metrics.png b/images/ossm-prometheus-metrics.png deleted file mode 100644 index c115e31d2441..000000000000 Binary files a/images/ossm-prometheus-metrics.png and /dev/null differ diff --git a/images/pod-identity-webhook-workflow-in-user-defined-projects.png b/images/pod-identity-webhook-workflow-in-user-defined-projects.png deleted file mode 100644 index 70d811428708..000000000000 Binary files a/images/pod-identity-webhook-workflow-in-user-defined-projects.png and /dev/null differ diff --git a/images/product-workflow-overview.png b/images/product-workflow-overview.png deleted file mode 100644 index b6a7872c0f20..000000000000 Binary files a/images/product-workflow-overview.png and /dev/null differ diff --git a/images/question-circle.png b/images/question-circle.png deleted file mode 100644 index a505ba1fcadd..000000000000 Binary files a/images/question-circle.png and /dev/null differ diff --git a/images/quick-start-conclusion.png b/images/quick-start-conclusion.png deleted file mode 100644 index f93257032c6d..000000000000 Binary files a/images/quick-start-conclusion.png and /dev/null differ diff --git a/images/quick-start-description.png b/images/quick-start-description.png deleted file mode 100644 index e8829706d2e9..000000000000 Binary files a/images/quick-start-description.png and /dev/null differ diff --git a/images/quick-start-display-name.png b/images/quick-start-display-name.png deleted file mode 100644 index c6b6aa3579ca..000000000000 Binary files a/images/quick-start-display-name.png and /dev/null differ diff --git a/images/quick-start-duration.png b/images/quick-start-duration.png deleted file mode 100644 index 7d88be59b935..000000000000 Binary files a/images/quick-start-duration.png and /dev/null differ diff --git a/images/quick-start-icon.png b/images/quick-start-icon.png deleted file mode 100644 index 1b1c7c0b8e8a..000000000000 Binary files a/images/quick-start-icon.png and /dev/null differ diff --git a/images/quick-start-introduction.png b/images/quick-start-introduction.png deleted file mode 100644 index 450cb8dc63b9..000000000000 Binary files a/images/quick-start-introduction.png and /dev/null differ diff --git a/images/rbac.png b/images/rbac.png deleted file mode 100644 index 63312ea53f2f..000000000000 Binary files a/images/rbac.png and /dev/null differ diff --git a/images/redcircle-1.png b/images/redcircle-1.png deleted file mode 100644 index 4cbb364a2d76..000000000000 Binary files a/images/redcircle-1.png and /dev/null differ diff --git a/images/redcircle-2.png b/images/redcircle-2.png deleted file mode 100644 index 23f4a0d23f93..000000000000 Binary files a/images/redcircle-2.png and /dev/null differ diff --git a/images/redcircle-3.png b/images/redcircle-3.png deleted file mode 100644 index 06ac69f6a99c..000000000000 Binary files a/images/redcircle-3.png and /dev/null differ diff --git a/images/s2i-flow.png b/images/s2i-flow.png deleted file mode 100644 index b7ec58fb6353..000000000000 Binary files a/images/s2i-flow.png and /dev/null differ diff --git a/images/secure_deployments.png b/images/secure_deployments.png deleted file mode 100644 index 57add95959b2..000000000000 Binary files a/images/secure_deployments.png and /dev/null differ diff --git a/images/serverless-create-namespaces.png b/images/serverless-create-namespaces.png deleted file mode 100644 index 292db72db060..000000000000 Binary files a/images/serverless-create-namespaces.png and /dev/null differ diff --git a/images/serverless-create-service-admin.png b/images/serverless-create-service-admin.png deleted file mode 100644 index b3c99c8ef11f..000000000000 Binary files a/images/serverless-create-service-admin.png and /dev/null differ diff --git a/images/serverless-event-broker-workflow.png b/images/serverless-event-broker-workflow.png deleted file mode 100644 index aea669722c38..000000000000 Binary files a/images/serverless-event-broker-workflow.png and /dev/null differ diff --git a/images/serverless-event-channel-workflow.png b/images/serverless-event-channel-workflow.png deleted file mode 100644 index 99957bc3aed7..000000000000 Binary files a/images/serverless-event-channel-workflow.png and /dev/null differ diff --git a/images/serverless-monitoring-service-example-dashboard.png b/images/serverless-monitoring-service-example-dashboard.png deleted file mode 100644 index d9c29422ac29..000000000000 Binary files a/images/serverless-monitoring-service-example-dashboard.png and /dev/null differ diff --git a/images/serverless-monitoring-service-example1.png b/images/serverless-monitoring-service-example1.png deleted file mode 100644 index 27c73173cfc5..000000000000 Binary files a/images/serverless-monitoring-service-example1.png and /dev/null differ diff --git a/images/serverless-monitoring-service-example2.png b/images/serverless-monitoring-service-example2.png deleted file mode 100644 index f0149cd5dd11..000000000000 Binary files a/images/serverless-monitoring-service-example2.png and /dev/null differ diff --git a/images/serverless-verify-broker-odc.png b/images/serverless-verify-broker-odc.png deleted file mode 100644 index 9a7a9b057275..000000000000 Binary files a/images/serverless-verify-broker-odc.png and /dev/null differ diff --git a/images/service-yaml-admin.png b/images/service-yaml-admin.png deleted file mode 100644 index 28823e44a019..000000000000 Binary files a/images/service-yaml-admin.png and /dev/null differ diff --git a/images/serving-YAML-HA.png b/images/serving-YAML-HA.png deleted file mode 100644 index caf0b5b1261a..000000000000 Binary files a/images/serving-YAML-HA.png and /dev/null differ diff --git a/images/serving-conditions-true.png b/images/serving-conditions-true.png deleted file mode 100644 index 22f38a447bab..000000000000 Binary files a/images/serving-conditions-true.png and /dev/null differ diff --git a/images/serving-overview.png b/images/serving-overview.png deleted file mode 100644 index c0e6478a9530..000000000000 Binary files a/images/serving-overview.png and /dev/null differ diff --git a/images/silence-overview.png b/images/silence-overview.png deleted file mode 100644 index 5b2093a3a89f..000000000000 Binary files a/images/silence-overview.png and /dev/null differ diff --git a/images/targets-and-dependencies.png b/images/targets-and-dependencies.png deleted file mode 100644 index 91fa65dbaf9a..000000000000 Binary files a/images/targets-and-dependencies.png and /dev/null differ diff --git a/images/telmetry-and-insights-operator-data-flow.svg b/images/telmetry-and-insights-operator-data-flow.svg deleted file mode 100644 index 4d0820cdd731..000000000000 --- a/images/telmetry-and-insights-operator-data-flow.svg +++ /dev/null @@ -1 +0,0 @@ -132_OpenShift_1220KubernetesAPIPrometheusAPIHTTPS (443)HTTPS (443)TelemeterClientOpenShiftContainer PlatformRed HatSupportSubscriptionmanagementOpenShiftCluster ManagerInsightsanalysis enginecloud.redhat.comapi.openshift.comRed HatWebconsoleInsightsOperator \ No newline at end of file diff --git a/images/toplogy-odc-apiserver.png b/images/toplogy-odc-apiserver.png deleted file mode 100644 index 22e6532e2091..000000000000 Binary files a/images/toplogy-odc-apiserver.png and /dev/null differ diff --git a/images/trustedsupplychain.png b/images/trustedsupplychain.png deleted file mode 100644 index e62e0f70824d..000000000000 Binary files a/images/trustedsupplychain.png and /dev/null differ diff --git a/images/update-runlevels.png b/images/update-runlevels.png deleted file mode 100644 index 309e195a9c58..000000000000 Binary files a/images/update-runlevels.png and /dev/null differ diff --git a/images/verify-channel-odc.png b/images/verify-channel-odc.png deleted file mode 100644 index 43a36617d790..000000000000 Binary files a/images/verify-channel-odc.png and /dev/null differ diff --git a/images/verify-kafka-ODC.png b/images/verify-kafka-ODC.png deleted file mode 100644 index 1f94c0e028c5..000000000000 Binary files a/images/verify-kafka-ODC.png and /dev/null differ diff --git a/images/verify-pingsource-ODC.png b/images/verify-pingsource-ODC.png deleted file mode 100644 index dac0464b36cf..000000000000 Binary files a/images/verify-pingsource-ODC.png and /dev/null differ diff --git a/images/verify-sinkbinding-odc.png b/images/verify-sinkbinding-odc.png deleted file mode 100644 index 91039cf2ab1a..000000000000 Binary files a/images/verify-sinkbinding-odc.png and /dev/null differ diff --git a/images/verify-subscription-odc.png b/images/verify-subscription-odc.png deleted file mode 100644 index c621e6d34f18..000000000000 Binary files a/images/verify-subscription-odc.png and /dev/null differ diff --git a/images/virt-icon.png b/images/virt-icon.png deleted file mode 100644 index dc1439e8d389..000000000000 Binary files a/images/virt-icon.png and /dev/null differ diff --git a/images/web_console_perspectives.png b/images/web_console_perspectives.png deleted file mode 100644 index 1018d33dbabc..000000000000 Binary files a/images/web_console_perspectives.png and /dev/null differ diff --git a/images/whatarecontainers.png b/images/whatarecontainers.png deleted file mode 100644 index 8c3bd20379d5..000000000000 Binary files a/images/whatarecontainers.png and /dev/null differ diff --git a/images/wmco-design.png b/images/wmco-design.png deleted file mode 100644 index 17245c45b7c2..000000000000 Binary files a/images/wmco-design.png and /dev/null differ diff --git a/installing/_attributes b/installing/_attributes deleted file mode 120000 index f27fd275ea6b..000000000000 --- a/installing/_attributes +++ /dev/null @@ -1 +0,0 @@ -../_attributes/ \ No newline at end of file diff --git a/installing/cluster-capabilities.adoc b/installing/cluster-capabilities.adoc deleted file mode 100644 index b59bb330d228..000000000000 --- a/installing/cluster-capabilities.adoc +++ /dev/null @@ -1,80 +0,0 @@ -:_content-type: ASSEMBLY -[id="cluster-capabilities"] -= Cluster capabilities -include::_attributes/common-attributes.adoc[] -:context: cluster-capabilities - -toc::[] - -Cluster administrators can use cluster capabilities to enable or disable optional components prior to installation. Cluster administrators can enable cluster capabilities at anytime after installation. - -[NOTE] -==== -Cluster administrators cannot disable a cluster capability after it is enabled. -==== - -include::modules/selecting-cluster-capabilities.adoc[leveloffset=+1] - -include::snippets/capabilities-table.adoc[] - -[role="_additional-resources"] -.Additional resources -* xref:../installing/installing_aws/installing-aws-customizations.adoc#installing-aws-customizations[Installing a cluster on AWS with customizations] -* xref:../installing/installing_gcp/installing-gcp-customizations.adoc#installing-gcp-customizations[Installing a cluster on GCP with customizations] - -include::modules/explanation-of-capabilities.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../operators/operator-reference.adoc#cluster-operator-reference[Cluster Operators reference] - -include::modules/cluster-bare-metal-operator.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../installing/installing_bare_metal_ipi/ipi-install-overview.adoc#ipi-install-overview[Deploying installer-provisioned clusters on bare metal] -* xref:../installing/installing_bare_metal/preparing-to-install-on-bare-metal.adoc#preparing-to-install-on-bare-metal[Preparing for bare metal cluster installation] -* xref:../post_installation_configuration/bare-metal-configuration.adoc#post-install-bare-metal-configuration[Bare metal configuration] - -include::modules/cluster-storage-operator.adoc[leveloffset=+2] - -include::modules/console-operator.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../web_console/web-console-overview.adoc#web-console-overview[Web console overview] - -include::modules/cluster-csi-snapshot-controller-operator.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../storage/container_storage_interface/persistent-storage-csi-snapshots.adoc#persistent-storage-csi-snapshots[CSI volume snapshots] - -include::modules/insights-operator.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../support/remote_health_monitoring/using-insights-operator.adoc#using-insights-operator[Using Insights Operator] - -include::modules/operator-marketplace.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../operators/understanding/olm-rh-catalogs.adoc#olm-rh-catalogs[Red Hat-provided Operator catalogs] - -include::modules/node-tuning-operator.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../scalability_and_performance/using-node-tuning-operator.adoc#using-node-tuning-operator[Using the Node Tuning Operator] - -include::modules/cluster-samples-operator.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../openshift_images/configuring-samples-operator.adoc#configuring-samples-operator[Configuring the Cluster Samples Operator] - -[role="_additional-resources"] -[id="additional-resources_{context}"] -== Additional resources -* xref:../post_installation_configuration/enabling-cluster-capabilities.adoc#enabling-cluster-capabilities[Enabling cluster capabilities after installation] \ No newline at end of file diff --git a/installing/disconnected_install/_attributes b/installing/disconnected_install/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/installing/disconnected_install/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/installing/disconnected_install/images b/installing/disconnected_install/images deleted file mode 120000 index 5fa6987088da..000000000000 --- a/installing/disconnected_install/images +++ /dev/null @@ -1 +0,0 @@ -../../images \ No newline at end of file diff --git a/installing/disconnected_install/index.adoc b/installing/disconnected_install/index.adoc deleted file mode 100644 index b00aa205c287..000000000000 --- a/installing/disconnected_install/index.adoc +++ /dev/null @@ -1,22 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-mirroring-disconnected-about"] -= About disconnected installation mirroring -include::_attributes/common-attributes.adoc[] -:context: installing-mirroring-disconnected-about - -toc::[] - -You can use a mirror registry to ensure that your clusters only use container images that satisfy your organizational controls on external content. Before you install a cluster on infrastructure that you provision in a restricted network, you must mirror the required container images into that environment. To mirror container images, you must have a registry for mirroring. - -[id="creating-mirror-registry"] -== Creating a mirror registry - -If you already have a container image registry, such as Red Hat Quay, you can use it as your mirror registry. If you do not already have a registry, you can xref:../../installing/disconnected_install/installing-mirroring-creating-registry.adoc#installing-mirroring-creating-registry[create a mirror registry using the _mirror registry for Red Hat OpenShift_]. - -[id="mirroring-images-disconnected-install"] -== Mirroring images for a disconnected installation - -You can use one of the following procedures to mirror your {product-title} image repository to your mirror registry: - -* xref:../../installing/disconnected_install/installing-mirroring-installation-images.adoc#installing-mirroring-installation-images[Mirroring images for a disconnected installation] -* xref:../../installing/disconnected_install/installing-mirroring-disconnected.adoc#installing-mirroring-disconnected[Mirroring images for a disconnected installation using the oc-mirror plugin] diff --git a/installing/disconnected_install/installing-mirroring-creating-registry.adoc b/installing/disconnected_install/installing-mirroring-creating-registry.adoc deleted file mode 100644 index 94c454d0a942..000000000000 --- a/installing/disconnected_install/installing-mirroring-creating-registry.adoc +++ /dev/null @@ -1,48 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-mirroring-creating-registry"] -= Creating a mirror registry with mirror registry for Red Hat OpenShift -include::_attributes/common-attributes.adoc[] -:context: installing-mirroring-creating-registry - -toc::[] - -The _mirror registry for Red Hat OpenShift_ is a small and streamlined container registry that you can use as a target for mirroring the required container images of {product-title} for disconnected installations. - -If you already have a container image registry, such as Red Hat Quay, you can skip this section and go straight to xref:../../installing/disconnected_install/installing-mirroring-installation-images.adoc#installation-mirror-repository_installing-mirroring-installation-images[Mirroring the OpenShift Container Platform image repository]. - -[id="prerequisites_installing-mirroring-creating-registry"] -== Prerequisites - -* An {product-title} subscription. -* {op-system-base-full} 8 and 9 with Podman 3.4.2 or later and OpenSSL installed. -* Fully qualified domain name for the Red Hat Quay service, which must resolve through a DNS server. -* Key-based SSH connectivity on the target host. SSH keys are automatically generated for local installs. For remote hosts, you must generate your own SSH keys. -* 2 or more vCPUs. -* 8 GB of RAM. -* About 12 GB for {product-title} {product-version} release images, or about 358 GB for {product-title} {product-version} release images and {product-title} {product-version} Red Hat Operator images. Up to 1 TB per stream or more is suggested. -+ -[IMPORTANT] -==== -These requirements are based on local testing results with only release images and Operator images. Storage requirements can vary based on your organization's needs. You might require more space, for example, when you mirror multiple z-streams. You can use standard link:https://access.redhat.com/documentation/en-us/red_hat_quay/3[Red Hat Quay functionality] or the proper link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_api_guide/index#deletefulltag[API callout] to remove unnecessary images and free up space. -==== - -include::modules/mirror-registry-introduction.adoc[leveloffset=+1] -include::modules/mirror-registry-localhost.adoc[leveloffset=+1] -include::modules/mirror-registry-localhost-update.adoc[leveloffset=+1] -include::modules/mirror-registry-remote.adoc[leveloffset=+1] -include::modules/mirror-registry-remote-host-update.adoc[leveloffset=+1] -include::modules/mirror-registry-uninstall.adoc[leveloffset=+1] -include::modules/mirror-registry-flags.adoc[leveloffset=+1] -include::modules/mirror-registry-release-notes.adoc[leveloffset=+1] -include::modules/mirror-registry-troubleshooting.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html/manage_red_hat_quay/using-ssl-to-protect-quay[Using SSL to protect connections to Red Hat Quay] - -* link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html/manage_red_hat_quay/using-ssl-to-protect-quay#configuring_the_system_to_trust_the_certificate_authority[Configuring the system to trust the certificate authority] - -* xref:../../installing/disconnected_install/installing-mirroring-installation-images.adoc#installation-mirror-repository_installing-mirroring-installation-images[Mirroring the OpenShift Container Platform image repository] - -* xref:../../installing/disconnected_install/installing-mirroring-installation-images.adoc#olm-mirror-catalog_installing-mirroring-installation-images[Mirroring Operator catalogs for use with disconnected clusters] diff --git a/installing/disconnected_install/installing-mirroring-disconnected.adoc b/installing/disconnected_install/installing-mirroring-disconnected.adoc deleted file mode 100644 index 69d9298a3a57..000000000000 --- a/installing/disconnected_install/installing-mirroring-disconnected.adoc +++ /dev/null @@ -1,149 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-mirroring-disconnected"] -= Mirroring images for a disconnected installation using the oc-mirror plugin -include::_attributes/common-attributes.adoc[] -:context: installing-mirroring-disconnected - -toc::[] - -Running your cluster in a restricted network without direct internet connectivity is possible by installing the cluster from a mirrored set of {product-title} container images in a private registry. This registry must be running at all times as long as the cluster is running. See the xref:../../installing/disconnected_install/installing-mirroring-disconnected.adoc#prerequisites_installing-mirroring-disconnected[Prerequisites] section for more information. - -You can use the oc-mirror OpenShift CLI (`oc`) plugin to mirror images to a mirror registry in your fully or partially disconnected environments. You must run oc-mirror from a system with internet connectivity in order to download the required images from the official Red Hat registries. - -The following steps outline the high-level workflow on how to use the oc-mirror plugin to mirror images to a mirror registry: - -. Create an image set configuration file. -. Mirror the image set to the mirror registry by using one of the following methods: -** Mirror an image set directly to the mirror registry. -** Mirror an image set to disk, transfer the image set to the target environment, then upload the image set to the target mirror registry. -. Configure your cluster to use the resources generated by the oc-mirror plugin. -. Repeat these steps to update your mirror registry as necessary. - -// About the oc-mirror plugin -include::modules/oc-mirror-about.adoc[leveloffset=+1] - -// oc-mirror compatibility and support -include::modules/oc-mirror-support.adoc[leveloffset=+1] - -// About the mirror registry -include::modules/installation-about-mirror-registry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* For information about viewing the CRI-O logs to view the image source, see xref:../../installing/validating-an-installation.adoc#viewing-the-image-pull-source_validating-an-installation[Viewing the image pull source]. - -[id="prerequisites_installing-mirroring-disconnected"] -== Prerequisites - -* You must have a container image registry that supports link:https://docs.docker.com/registry/spec/manifest-v2-2[Docker v2-2] in the location that will host the {product-title} cluster, such as Red Hat Quay. -+ -[NOTE] -==== -If you use Red Hat Quay, you must use version 3.6 or later with the oc-mirror plugin. If you have an entitlement to Red Hat Quay, see the documentation on deploying Red Hat Quay link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.6/html/deploy_red_hat_quay_for_proof-of-concept_non-production_purposes/[for proof-of-concept purposes] or link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.6/html/deploy_red_hat_quay_on_openshift_with_the_quay_operator/[by using the Quay Operator]. If you need additional assistance selecting and installing a registry, contact your sales representative or Red Hat Support. -==== -+ -If you do not already have an existing solution for a container image registry, subscribers of {product-title} are provided a xref:../../installing/disconnected_install/installing-mirroring-creating-registry.adoc#installing-mirroring-creating-registry[mirror registry for Red Hat OpenShift]. The _mirror registry for Red Hat OpenShift_ is included with your subscription and is a small-scale container registry that can be used to mirror the required container images of {product-title} in disconnected installations. - -[id="mirroring-preparing-your-hosts"] -== Preparing your mirror hosts - -Before you can use the oc-mirror plugin to mirror images, you must install the plugin and create a container image registry credentials file to allow the mirroring from Red Hat to your mirror. - -// Installing the oc-mirror OpenShift CLI plugin -include::modules/oc-mirror-installing-plugin.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../../cli_reference/openshift_cli/extending-cli-plugins.adoc#cli-installing-plugins_cli-extend-plugins[Installing and using CLI plugins] - -// Configuring credentials that allow images to be mirrored -include::modules/installation-adding-registry-pull-secret.adoc[leveloffset=+2] - -// Creating the image set configuration -include::modules/oc-mirror-creating-image-set-config.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/disconnected_install/installing-mirroring-disconnected.adoc#oc-mirror-imageset-config-params_installing-mirroring-disconnected[Image set configuration parameters] -* xref:../../installing/disconnected_install/installing-mirroring-disconnected.adoc#oc-mirror-image-set-examples_installing-mirroring-disconnected[Image set configuration examples] -* xref:../../updating/updating_a_cluster/updating_disconnected_cluster/disconnected-update-osus.adoc#update-service-overview_updating-restricted-network-cluster-osus[Using the OpenShift Update Service in a disconnected environment] - -[id="mirroring-image-set"] -== Mirroring an image set to a mirror registry - -You can use the oc-mirror CLI plugin to mirror images to a mirror registry in a xref:../../installing/disconnected_install/installing-mirroring-disconnected.adoc#mirroring-image-set-partial[partially disconnected environment] or in a xref:../../installing/disconnected_install/installing-mirroring-disconnected.adoc#mirroring-image-set-full[fully disconnected environment]. - -These procedures assume that you already have your mirror registry set up. - -[id="mirroring-image-set-partial"] -=== Mirroring an image set in a partially disconnected environment - -In a partially disconnected environment, you can mirror an image set directly to the target mirror registry. - -// Mirroring from mirror to mirror -include::modules/oc-mirror-mirror-to-mirror.adoc[leveloffset=+3] - -[id="mirroring-image-set-full"] -=== Mirroring an image set in a fully disconnected environment - -To mirror an image set in a fully disconnected environment, you must first xref:../../installing/disconnected_install/installing-mirroring-disconnected.adoc#oc-mirror-mirror-to-disk_installing-mirroring-disconnected[mirror the image set to disk], then xref:../../installing/disconnected_install/installing-mirroring-disconnected.adoc#oc-mirror-disk-to-mirror_installing-mirroring-disconnected[mirror the image set file on disk to a mirror]. - -// Mirroring from mirror to disk -include::modules/oc-mirror-mirror-to-disk.adoc[leveloffset=+3] - -// Mirroring from disk to mirror in a disconnected environment -include::modules/oc-mirror-disk-to-mirror.adoc[leveloffset=+3] - -// Configuring your cluster to use the resources generated by oc-mirror -include::modules/oc-mirror-updating-cluster-manifests.adoc[leveloffset=+1] - -[id="updating-mirror-registry-content"] -== Keeping your mirror registry content updated - -After your target mirror registry is populated with the initial image set, be sure to update it regularly so that it has the latest content. You can optionally set up a cron job, if possible, so that the mirror registry is updated on a regular basis. - -Ensure that you update your image set configuration to add or remove {product-title} and Operator releases as necessary. Any images that are removed are pruned from the mirror registry. - -// About updating your mirror registry content -include::modules/oc-mirror-updating-registry-about.adoc[leveloffset=+2] - -// Updating your mirror registry content -include::modules/oc-mirror-differential-updates.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/disconnected_install/installing-mirroring-disconnected.adoc#oc-mirror-image-set-examples_installing-mirroring-disconnected[Image set configuration examples] -* xref:../../installing/disconnected_install/installing-mirroring-disconnected.adoc#mirroring-image-set-partial[Mirroring an image set in a partially disconnected environment] -* xref:../../installing/disconnected_install/installing-mirroring-disconnected.adoc#mirroring-image-set-full[Mirroring an image set in a fully disconnected environment] -* xref:../../installing/disconnected_install/installing-mirroring-disconnected.adoc#oc-mirror-updating-cluster-manifests_installing-mirroring-disconnected[Configuring your cluster to use the resources generated by oc-mirror] - -// Performing a dry run -include::modules/oc-mirror-dry-run.adoc[leveloffset=+1] - -// Including local OCI Operator catalogs -include::modules/oc-mirror-oci-format.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -// TODO: This title might need to update per sebastian's PR -* xref:../../installing/disconnected_install/installing-mirroring-disconnected.html#oc-mirror-updating-cluster-manifests_installing-mirroring-disconnected[Configuring your cluster to use the resources generated by oc-mirror] - -// Image set configuration parameters -include::modules/oc-mirror-imageset-config-params.adoc[leveloffset=+1] - -// Image set configuration examples -include::modules/oc-mirror-image-set-config-examples.adoc[leveloffset=+1] - -// Command reference for oc-mirror -include::modules/oc-mirror-command-reference.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_installing-mirroring-disconnected"] -== Additional resources - -* xref:../../updating/updating_a_cluster/updating_disconnected_cluster/index.adoc#about-restricted-network-updates[About cluster updates in a disconnected environment] diff --git a/installing/disconnected_install/installing-mirroring-installation-images.adoc b/installing/disconnected_install/installing-mirroring-installation-images.adoc deleted file mode 100644 index ac4512cd355e..000000000000 --- a/installing/disconnected_install/installing-mirroring-installation-images.adoc +++ /dev/null @@ -1,154 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-mirroring-installation-images"] -= Mirroring images for a disconnected installation -include::_attributes/common-attributes.adoc[] -:context: installing-mirroring-installation-images - -toc::[] - -You can ensure your clusters only use container images that satisfy your organizational controls on external content. Before you install a cluster on infrastructure that you provision in a restricted network, you must mirror the required container images into that environment. To mirror container images, you must have a registry for mirroring. - -// TODO: Is this procedure going to be marked deprecated for 4.10 so that it could be removed in the future? -// TODO: Add a link to the TP procedure? -// TODO: Consider updating the title of this one to indicate the difference? Or wait to make any changes like that til GA, til we know if it'll stick around or be completely replaced by the oc-mirror one? - -[IMPORTANT] -==== -You must have access to the internet to obtain the necessary container images. -In this procedure, you place your mirror registry on a mirror host -that has access to both your network and the internet. If you do not have access -to a mirror host, use the xref:../../installing/disconnected_install/installing-mirroring-installation-images.adoc#olm-mirror-catalog_installing-mirroring-installation-images[Mirroring Operator catalogs for use with disconnected clusters] procedure to copy images to a device you can move across network boundaries with. -==== - -[id="prerequisites_installing-mirroring-installation-images"] -== Prerequisites - -* You must have a container image registry that supports link:https://docs.docker.com/registry/spec/manifest-v2-2[Docker v2-2] in the location that will host the {product-title} cluster, such as one of the following registries: -+ --- -** link:https://www.redhat.com/en/technologies/cloud-computing/quay[Red Hat Quay] -** link:https://jfrog.com/artifactory/[JFrog Artifactory] -** link:https://www.sonatype.com/products/repository-oss?topnav=true[Sonatype Nexus Repository] -** link:https://goharbor.io/[Harbor] --- -+ -If you have an entitlement to Red Hat Quay, see the documentation on deploying Red Hat Quay link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.5/html/deploy_red_hat_quay_for_proof-of-concept_non-production_purposes/[for proof-of-concept purposes] or link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.5/html/deploy_red_hat_quay_on_openshift_with_the_quay_operator/[by using the Quay Operator]. If you need additional assistance selecting and installing a registry, contact your sales representative or Red Hat support. - -* If you do not already have an existing solution for a container image registry, subscribers of {product-title} are provided a xref:../../installing/disconnected_install/installing-mirroring-creating-registry.adoc#installing-mirroring-creating-registry[mirror registry for Red Hat OpenShift]. The _mirror registry for Red Hat OpenShift_ is included with your subscription and is a small-scale container registry that can be used to mirror the required container images of {product-title} in disconnected installations. - -include::modules/installation-about-mirror-registry.adoc[leveloffset=+1] - -.Additional information - -For information about viewing the CRI-O logs to view the image source, see xref:../../installing/validating-an-installation.adoc#viewing-the-image-pull-source_validating-an-installation[Viewing the image pull source]. - -[id="installing-preparing-mirror"] -== Preparing your mirror host - -Before you perform the mirror procedure, you must prepare the host to retrieve content -and push it to the remote location. - -include::modules/cli-installing-cli.adoc[leveloffset=+2] - -include::modules/installation-adding-registry-pull-secret.adoc[leveloffset=+1] - -//This command seems out of place. Where should it really go? -//// -[id="installing-performing-connected-mirror"] -== Performing a mirror while connected to the internet - -$ oc adm release mirror OPENSHIFT_VERSION --to MIRROR_REPOSITORY -//// - -//// -[id="installing-restricted-networks-preparations-mirroring"] -== Mirroring the content - -In production environments, add the required images to a registry in your restricted network. For non-production environments, you can use the images without a separate registry. - - modules/installation-performing-disconnected-mirror.adoc[leveloffset=+2] - - modules/installation-performing-disconnected-mirror-without-registry.adoc[leveloffset=+2] -//// - -include::modules/installation-mirror-repository.adoc[leveloffset=+1] - -[id="installing-preparing-samples-operator"] -== The Cluster Samples Operator in a disconnected environment - -In a disconnected environment, you must take additional steps after you install a cluster to configure the Cluster Samples Operator. Review the following information in preparation. - -include::modules/installation-images-samples-disconnected-mirroring-assist.adoc[leveloffset=+2] - -include::modules/olm-mirroring-catalog.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../operators/admin/olm-restricted-networks.adoc#olm-restricted-networks[Using Operator Lifecycle Manager on restricted networks] - -[id="olm-mirror-catalog-prerequisites_installing-mirroring-installation-images"] -=== Prerequisites - -Mirroring Operator catalogs for use with disconnected clusters has the following prerequisites: - -* Workstation with unrestricted network access. -* `podman` version 1.9.3 or later. -* If you want to filter, or _prune_, an existing catalog and selectively mirror only a subset of Operators, see the following sections: -** xref:../../cli_reference/opm/cli-opm-install.adoc#cli-opm-install[Installing the opm CLI] -** xref:../../operators/admin/olm-managing-custom-catalogs.adoc#olm-filtering-fbc_olm-managing-custom-catalogs[Updating or filtering a file-based catalog image] -ifndef::openshift-origin[] -* If you want to mirror a Red Hat-provided catalog, run the following command on your workstation with unrestricted network access to authenticate with `registry.redhat.io`: -+ -[source,terminal] ----- -$ podman login registry.redhat.io ----- -endif::[] -* Access to a mirror registry that supports -link:https://docs.docker.com/registry/spec/manifest-v2-2/[Docker v2-2]. -* On your mirror registry, decide which repository, or namespace, to use for storing mirrored Operator content. For example, you might create an `olm-mirror` repository. -* If your mirror registry does not have internet access, connect removable media to your workstation with unrestricted network access. -* If you are working with private registries, including `registry.redhat.io`, set the `REG_CREDS` environment variable to the file path of your registry credentials for use in later steps. For example, for the `podman` CLI: -+ -[source,terminal] ----- -$ REG_CREDS=${XDG_RUNTIME_DIR}/containers/auth.json ----- - -include::modules/olm-mirroring-catalog-extracting.adoc[leveloffset=+2] -include::modules/olm-mirroring-catalog-colocated.adoc[leveloffset=+3] - -[role="_additional-resources"] -.Additional resources -* xref:../../operators/operator_sdk/osdk-generating-csvs.adoc#olm-arch-os-support_osdk-generating-csvs[Architecture and operating system support for Operators] - -include::modules/olm-mirroring-catalog-airgapped.adoc[leveloffset=+3] - -[role="_additional-resources"] -.Additional resources -* xref:../../operators/operator_sdk/osdk-generating-csvs.adoc#olm-arch-os-support_osdk-generating-csvs[Architecture and operating system support for Operators] - -include::modules/olm-mirroring-catalog-manifests.adoc[leveloffset=+2] -include::modules/olm-mirroring-catalog-post.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../../post_installation_configuration/preparing-for-users.adoc#post-install-mirrored-catalogs[Populating OperatorHub from mirrored Operator catalogs] -* xref:../../operators/admin/olm-managing-custom-catalogs.adoc#olm-filtering-fbc_olm-managing-custom-catalogs[Updating or filtering a file-based catalog image] - -[id="next-steps_installing-mirroring-installation-images"] -== Next steps - -//* TODO need to add the registry secret to the machines, which is different - -* Install a cluster on infrastructure that you provision in your restricted network, such as on -xref:../../installing/installing_vsphere/installing-restricted-networks-vsphere.adoc#installing-restricted-networks-vsphere[VMware vSphere], -xref:../../installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc#installing-restricted-networks-bare-metal[bare metal], or xref:../../installing/installing_aws/installing-restricted-networks-aws.adoc#installing-restricted-networks-aws[Amazon Web Services]. - -[role="_additional-resources"] -[id="restricted-networks-additional-resources"] -== Additional resources - -* See xref:../../support/gathering-cluster-data.adoc#gathering-data-specific-features_gathering-cluster-data[Gathering data about specific features] for more information about using must-gather. diff --git a/installing/disconnected_install/modules b/installing/disconnected_install/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/installing/disconnected_install/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/installing/disconnected_install/snippets b/installing/disconnected_install/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/installing/disconnected_install/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/installing/images b/installing/images deleted file mode 120000 index e4c5bd02a10a..000000000000 --- a/installing/images +++ /dev/null @@ -1 +0,0 @@ -../images/ \ No newline at end of file diff --git a/installing/index.adoc b/installing/index.adoc deleted file mode 100644 index 79643bc8a691..000000000000 --- a/installing/index.adoc +++ /dev/null @@ -1,50 +0,0 @@ -:_content-type: ASSEMBLY -[id="ocp-installation-overview"] -= {product-title} installation overview -include::_attributes/common-attributes.adoc[] -:context: ocp-installation-overview - -toc::[] - -include::modules/installation-overview.adoc[leveloffset=+1] - - -include::modules/install-openshift-common-terms.adoc[leveloffset=+2] - -include::modules/installation-process.adoc[leveloffset=+2] - -include::modules/ipi-verifying-nodes-after-installation.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../post_installation_configuration/bare-metal-configuration.adoc#getting-the-baremetalhost-resource_post-install-bare-metal-configuration[Getting the BareMetalHost resource] - -* xref:../installing/installing_bare_metal_ipi/ipi-install-installation-workflow.adoc#ipi-install-troubleshooting-following-the-installation_ipi-install-installation-workflow[Following the installation] - -* xref:../installing/validating-an-installation.adoc#validating-an-installation[Validating an installation] - -* xref:../installing/installing_with_agent_based_installer/preparing-to-install-with-agent-based-installer.adoc#preparing-to-install-with-agent-based-installer[Agent-based Installer] - -* link:https://access.redhat.com/documentation/en-us/assisted_installer_for_openshift_container_platform/2022/html-single/assisted_installer_for_openshift_container_platform/index[Assisted Installer for OpenShift Container Platform] - -[discrete] -=== Installation scope - -The scope of the {product-title} installation program is intentionally narrow. It is designed for simplicity and ensured success. You can complete many more configuration tasks after installation completes. - -[role="_additional-resources"] -.Additional resources - -* See xref:../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Available cluster customizations] for details about {product-title} configuration resources. - -include::modules/installation-openshift-local.adoc[leveloffset=+2] - -include::modules/supported-platforms-for-openshift-clusters.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../installing/installing-preparing.adoc#supported-installation-methods-for-different-platforms[Supported installation methods for different platforms] for more information about the types of installations that are available for each supported platform. - -* See xref:../installing/installing-preparing.adoc#installing-preparing[Selecting a cluster installation method and preparing it for users] for information about choosing an installation method and preparing the required resources. diff --git a/installing/install_config/_attributes b/installing/install_config/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/installing/install_config/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/installing/install_config/configuring-custom-ca.adoc b/installing/install_config/configuring-custom-ca.adoc deleted file mode 100644 index 290bfc593a76..000000000000 --- a/installing/install_config/configuring-custom-ca.adoc +++ /dev/null @@ -1,12 +0,0 @@ -:_content-type: ASSEMBLY -[id="configuring-custom-ca"] -= Configuring a custom certificate authority -include::_attributes/common-attributes.adoc[] -:context: configuring-custom-ca - -toc::[] - -If you install {product-title} with a proxy or in a restricted network, -you might need to configure a custom certificate authority (CA). - -//include::modules/configuring-firewall.adoc[leveloffset=+1] diff --git a/installing/install_config/configuring-firewall.adoc b/installing/install_config/configuring-firewall.adoc deleted file mode 100644 index 5a55a7b1e345..000000000000 --- a/installing/install_config/configuring-firewall.adoc +++ /dev/null @@ -1,12 +0,0 @@ -:_content-type: ASSEMBLY -[id="configuring-firewall"] -= Configuring your firewall -include::_attributes/common-attributes.adoc[] -:context: configuring-firewall - -toc::[] - -If you use a firewall, you must configure it so that {product-title} can access the sites that it requires to function. You must always grant access to some sites, and you grant access to more if you use -Red Hat Insights, the Telemetry service, a cloud to host your cluster, and certain build strategies. - -include::modules/configuring-firewall.adoc[leveloffset=+1] diff --git a/installing/install_config/enabling-cgroup-v2.adoc b/installing/install_config/enabling-cgroup-v2.adoc deleted file mode 100644 index 84b1f5bc8e41..000000000000 --- a/installing/install_config/enabling-cgroup-v2.adoc +++ /dev/null @@ -1,40 +0,0 @@ -:_content-type: ASSEMBLY -:context: nodes-cluster-cgroups-2 -[id="enabling-cgroup-v2"] -= Enabling Linux control group version 2 (cgroup v2) -include::_attributes/common-attributes.adoc[] - -toc::[] - - -ifndef::openshift-origin[] -By default, {product-title} uses link:https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v1.html[Linux control group version 1] (cgroup v1) in your cluster. You can enable link:https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html[Linux control group version 2] (cgroup v2) upon installation. Enabling cgroup v2 in {product-title} disables all cgroup version 1 controllers and hierarchies in your cluster. - -cgroup v2 is the next version of the Linux cgroup API. cgroup v2 offers several improvements over cgroup v1, including a unified hierarchy, safer sub-tree delegation, new features such as link:https://www.kernel.org/doc/html/latest/accounting/psi.html[Pressure Stall Information], and enhanced resource management and isolation. - -You can switch between cgroup v1 and cgroup v2, as needed, by editing the `node.config` object. For more information, see "Configuring the Linux cgroup on your nodes" in the "Additional resources" of this section. -endif::openshift-origin[] - -ifdef::openshift-origin[] -By default, {product-title} uses link:https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html[Linux control group version 2] (cgroup v2) in your cluster. You can switch to link:https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v1.html[Linux control group version 1] (cgroup v1), if needed. - -cgroup v2 is the next version of the kernel link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/resource_management_guide/ch01[control group] and offers multiple improvements. However, it can have some unwanted effects on your nodes. -endif::openshift-origin[] - -// The following include statements pull in the module files that comprise -// the assembly. Include any combination of concept, procedure, or reference -// modules required to cover the user story. You can also include other -// assemblies. - -ifndef::openshift-origin[] -include::modules/nodes-clusters-cgroups-2-install.adoc[leveloffset=+1] -endif::openshift-origin[] - -ifdef::openshift-origin[] -include::modules/nodes-clusters-cgroups-okd-configure.adoc[leveloffset=+1] -endif::openshift-origin[] - -.Additional resources - -* xref:../../installing/index.adoc#ocp-installation-overview[OpenShift Container Platform installation overview] -* xref:../../nodes/clusters/nodes-cluster-cgroups-2.adoc#nodes-clusters-cgroups-2_nodes-cluster-cgroups-2[Configuring the Linux cgroup on your nodes] diff --git a/installing/install_config/images b/installing/install_config/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/installing/install_config/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/installing/install_config/installing-customizing.adoc b/installing/install_config/installing-customizing.adoc deleted file mode 100644 index f83ad75f6289..000000000000 --- a/installing/install_config/installing-customizing.adoc +++ /dev/null @@ -1,56 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-customizing"] -= Customizing nodes -include::_attributes/common-attributes.adoc[] -:context: installing-customizing - -toc::[] - -Although directly making changes to {product-title} nodes is discouraged, -there are times when it is necessary to implement a -required low-level security, redundancy, networking, or performance feature. -Direct changes to {product-title} nodes can be done by: - -* Creating machine configs that are included in manifest files -to start up a cluster during `openshift-install`. - -* Creating machine configs that are passed to running -{product-title} nodes via the Machine Config Operator. - -* Creating an Ignition config that is passed to `coreos-installer` -when installing bare-metal nodes. - -The following sections describe features that you might want to -configure on your nodes in this way. - -include::modules/installation-special-config-butane.adoc[leveloffset=+1] -include::modules/installation-special-config-butane-about.adoc[leveloffset=+2] -include::modules/installation-special-config-butane-install.adoc[leveloffset=+2] -include::modules/installation-special-config-butane-create.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/install_config/installing-customizing.adoc#installation-special-config-kmod_installing-customizing[Adding kernel modules to nodes] -* xref:../../installing/install_config/installing-customizing.adoc#installation-special-config-storage_installing-customizing[Encrypting and mirroring disks during installation] - -include::modules/installation-special-config-kargs.adoc[leveloffset=+1] -ifdef::openshift-webscale[] -include::modules/installation-special-config-rtkernel.adoc[leveloffset=+1] -endif::openshift-webscale[] -include::modules/installation-special-config-kmod.adoc[leveloffset=+1] -include::modules/installation-special-config-storage.adoc[leveloffset=+1] -include::modules/installation-special-config-raid.adoc[leveloffset=+1] -include::modules/installation-special-config-chrony.adoc[leveloffset=+1] - -[role="_additional-resources"] -== Additional resources - -* For information on Butane, see xref:../../installing/install_config/installing-customizing.adoc#installation-special-config-butane_installing-customizing[Creating machine configs with Butane]. - -//// -ifndef::openshift-origin[] -* For information on FIPS support, see xref:../../installing/installing-fips.adoc#installing-fips[Support for FIPS cryptography]. -endif::[] - -//// diff --git a/installing/install_config/modules b/installing/install_config/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/installing/install_config/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/installing/install_config/snippets b/installing/install_config/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/installing/install_config/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/installing/installing-fips.adoc b/installing/installing-fips.adoc deleted file mode 100644 index 2ba5b1770fbf..000000000000 --- a/installing/installing-fips.adoc +++ /dev/null @@ -1,92 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-fips"] -= Support for FIPS cryptography -include::_attributes/common-attributes.adoc[] -:context: installing-fips - -toc::[] - -You can install an {product-title} cluster that uses FIPS Validated / Modules in Process cryptographic libraries on `x86_64`, `ppc64le`, and `s390x` architectures. - -For the {op-system-first} machines in your cluster, this change is applied when the machines are deployed based on the status of an option in the `install-config.yaml` file, which governs the cluster options that a user can change during cluster deployment. With {op-system-base-full} machines, you must enable FIPS mode when you install the operating system on the machines that you plan to use as worker machines. These configuration methods ensure that your cluster meet the requirements of a FIPS compliance audit: only FIPS Validated / Modules in Process cryptography packages are enabled before the initial system boot. - -Because FIPS must be enabled before the operating system that your cluster uses boots for the first time, you cannot enable FIPS after you deploy a cluster. - -[id="installation-about-fips-validation_{context}"] -== FIPS validation in {product-title} - -{product-title} uses certain FIPS Validated / Modules in Process modules within {op-system-base} and {op-system} for the operating system components that it uses. See link:https://access.redhat.com/articles/3655361[RHEL8 core crypto components]. For example, when users SSH into {product-title} clusters and containers, those connections are properly encrypted. - -{product-title} components are written in Go and built with Red Hat's golang compiler. When you enable FIPS mode for your cluster, all {product-title} components that require cryptographic signing call {op-system-base} and {op-system} cryptographic libraries. - -.FIPS mode attributes and limitations in {product-title} {product-version} -[cols="8a,8a",options="header"] -|=== - -|Attributes -|Limitations - -|FIPS support in {op-system-base} 8 and {op-system} operating systems. -.3+|The FIPS implementation does not offer a single function that both computes hash functions and validates the keys that are based on that hash. This limitation will continue to be evaluated and improved in future {product-title} releases. - -|FIPS support in CRI-O runtimes. -|FIPS support in {product-title} services. - -|FIPS Validated / Modules in Process cryptographic module and algorithms that are obtained from {op-system-base} 8 and {op-system} binaries and images. -| - -|Use of FIPS compatible golang compiler. -|TLS FIPS support is not complete but is planned for future {product-title} releases. - -|FIPS support across multiple architectures. -|FIPS is currently only supported on {product-title} deployments using `x86_64`, `ppc64le`, and `s390x` architectures. - -|=== - -[id="installation-about-fips-components_{context}"] -== FIPS support in components that the cluster uses - -Although the {product-title} cluster itself uses FIPS Validated / Modules in Process modules, ensure that the systems that support your {product-title} cluster use FIPS Validated / Modules in Process modules for cryptography. - -[id="installation-about-fips-components-etcd_{context}"] -=== etcd - -To ensure that the secrets that are stored in etcd use FIPS Validated / Modules in Process encryption, boot the node in FIPS mode. After you install the cluster in FIPS mode, you can xref:../security/encrypting-etcd.adoc#encrypting-etcd[encrypt the etcd data] by using the FIPS-approved `aes cbc` cryptographic algorithm. - -[id="installation-about-fips-components-storage_{context}"] -=== Storage - -For local storage, use {op-system-base}-provided disk encryption or Container Native Storage that uses {op-system-base}-provided disk encryption. By storing all data in volumes that use {op-system-base}-provided disk encryption and enabling FIPS mode for your cluster, both data at rest and data in motion, or network data, are protected by FIPS Validated / Modules in Process encryption. -You can configure your cluster to encrypt the root filesystem of each node, as described -in xref:../installing/install_config/installing-customizing.adoc#installing-customizing[Customizing nodes]. - -[id="installation-about-fips-components-runtimes_{context}"] -=== Runtimes - -To ensure that containers know that they are running on a host that is using FIPS Validated / Modules in Process cryptography modules, use CRI-O to manage your runtimes. CRI-O supports FIPS mode, in that it configures the containers to know that they are running in FIPS mode. - -[id="installing-fips-mode_{context}"] -== Installing a cluster in FIPS mode - -To install a cluster in FIPS mode, follow the instructions to install a customized cluster on your preferred infrastructure. Ensure that you set `fips: true` in the `install-config.yaml` file before you deploy your cluster. - -* xref:../installing/installing_aws/installing-aws-customizations.adoc#installing-aws-customizations[Amazon Web Services] -* xref:../installing/installing_alibaba/installing-alibaba-customizations.adoc#installing-alibaba-customizations[Alibaba Cloud] -* xref:../installing/installing_azure/installing-azure-customizations.adoc#installing-azure-customizations[Microsoft Azure] -* xref:../installing/installing_bare_metal/installing-bare-metal.adoc#installing-bare-metal[Bare metal] -* xref:../installing/installing_gcp/installing-gcp-customizations.adoc#installing-gcp-customizations[Google Cloud Platform] -* xref:../installing/installing_ibm_cloud_public/installing-ibm-cloud-customizations.adoc#installing-ibm-cloud-customizations[IBM Cloud VPC] -* xref:../installing/installing_ibm_power/installing-ibm-power.adoc#installing-ibm-power[{ibmpowerProductName}] -* xref:../installing/installing_ibm_z/installing-ibm-z.adoc#installing-ibm-z[{ibmzProductName} and {linuxoneProductName}] -* xref:../installing/installing_ibm_z/installing-ibm-z-kvm.adoc#installing-ibm-z-kvm[{ibmzProductName} and {linuxoneProductName} with {op-system-base} KVM] -* xref:../installing/installing_openstack/installing-openstack-installer-custom.adoc#installing-openstack-installer-custom[{rh-openstack-first}] -* xref:../installing/installing_vsphere/installing-vsphere.adoc#installing-vsphere[VMware vSphere] - -[NOTE] -==== -If you are using Azure File storage, you cannot enable FIPS mode. -==== - -To apply `AES CBC` encryption to your etcd data store, follow the xref:../security/encrypting-etcd.adoc#encrypting-etcd[Encrypting etcd data] process after you install your cluster. - -If you add {op-system-base} nodes to your cluster, ensure that you enable FIPS mode on the machines before their initial boot. See xref:../machine_management/adding-rhel-compute.adoc#adding-rhel-compute[Adding RHEL compute machines to an {product-title} cluster] and link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/security_hardening/using-the-system-wide-cryptographic-policies_security-hardening#enabling-fips-mode-in-a-container_using-the-system-wide-cryptographic-policies[Enabling FIPS Mode] in the {op-system-base} 8 documentation. diff --git a/installing/installing-preparing.adoc b/installing/installing-preparing.adoc deleted file mode 100644 index 577c06e79be9..000000000000 --- a/installing/installing-preparing.adoc +++ /dev/null @@ -1,640 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-preparing"] -= Selecting a cluster installation method and preparing it for users -include::_attributes/common-attributes.adoc[] -:context: installing-preparing - -toc::[] - -Before you install {product-title}, decide what kind of installation process to follow and verify that you have all of the required resources to prepare the cluster for users. - -[id="installing-preparing-selecting-cluster-type"] -== Selecting a cluster installation type -Before you install an {product-title} cluster, you need to select the best installation instructions to follow. Think about your answers to the following questions to select the best option. - -[id="installing-preparing-install-manage"] -=== Do you want to install and manage an {product-title} cluster yourself? - -If you want to install and manage {product-title} yourself, you can install it on the following platforms: - -* Alibaba Cloud -* Amazon Web Services (AWS) on 64-bit x86 instances -ifndef::openshift-origin[] -* Amazon Web Services (AWS) on 64-bit ARM instances -endif::openshift-origin[] -* Microsoft Azure on 64-bit x86 instances -* Microsoft Azure on 64-bit ARM instances -* Microsoft Azure Stack Hub -* Google Cloud Platform (GCP) -* {rh-openstack-first} -* {rh-virtualization-first} -* IBM Cloud VPC -* {ibmzProductName} or {linuxoneProductName} -* {ibmzProductName} or {linuxoneProductName} for {op-system-base-full} KVM -* {ibmpowerProductName} -* {ibmpowerProductName} Virtual Server -* Nutanix -* VMware vSphere -* Bare metal or other platform agnostic infrastructure -// might want a note about single node here - -You can deploy an {product-title} 4 cluster to both on-premise hardware and to cloud hosting services, but all of the machines in a cluster must be in the same data center or cloud hosting service. - -If you want to use {product-title} but do not want to manage the cluster yourself, you have several managed service options. If you want a cluster that is fully managed by Red Hat, you can use link:https://www.openshift.com/products/dedicated/[OpenShift Dedicated] or link:https://www.openshift.com/products/online/[OpenShift Online]. You can also use OpenShift as a managed service on Azure, AWS, IBM Cloud VPC, or Google Cloud. For more information about managed services, see the link:https://www.openshift.com/products[OpenShift Products] page. If you install an {product-title} cluster with a cloud virtual machine as a virtual bare metal, the corresponding cloud-based storage is not supported. - -[id="installing-preparing-migrate"] -=== Have you used {product-title} 3 and want to use {product-title} 4? - -If you used {product-title} 3 and want to try {product-title} 4, you need to understand how different {product-title} 4 is. {product-title} 4 weaves the Operators that package, deploy, and manage Kubernetes applications and the operating system that the platform runs on, {op-system-first}, together seamlessly. Instead of deploying machines and configuring their operating systems so that you can install {product-title} on them, the {op-system} operating system is an integral part of the {product-title} cluster. Deploying the operating system for the cluster machines as part of the installation process for {product-title}. See xref:../migrating_from_ocp_3_to_4/planning-migration-3-4.adoc#migration-comparing-ocp-3-4[Differences between {product-title} 3 and 4]. - -Because you need to provision machines as part of the {product-title} cluster installation process, you cannot upgrade an {product-title} 3 cluster to {product-title} 4. Instead, you must create a new {product-title} 4 cluster and migrate your {product-title} 3 workloads to them. For more information about migrating, see xref:../migrating_from_ocp_3_to_4/index.adoc#migration-from-version-3-to-4-overview[Migrating from {product-title} 3 to 4 overview]. Because you must migrate to {product-title} 4, you can use any type of production cluster installation process to create your new cluster. - -[id="installing-preparing-existing-components"] -=== Do you want to use existing components in your cluster? - -Because the operating system is integral to {product-title}, it is easier to let the installation program for {product-title} stand up all of the infrastructure. These are called _installer provisioned infrastructure_ installations. In this type of installation, you can provide some existing infrastructure to the cluster, but the installation program deploys all of the machines that your cluster initially needs. - -You can deploy an installer-provisioned infrastructure cluster without specifying any customizations to the cluster or its underlying machines to xref:../installing/installing_alibaba/installing-alibaba-default.adoc#installing-alibaba-default[Alibaba Cloud], xref:../installing/installing_aws/installing-aws-default.adoc#installing-aws-default[AWS], xref:../installing/installing_azure/installing-azure-default.adoc#installing-azure-default[Azure], xref:../installing/installing_azure_stack_hub/installing-azure-stack-hub-default.adoc#installing-azure-stack-hub-default[Azure Stack Hub], xref:../installing/installing_gcp/installing-gcp-default.adoc#installing-gcp-default[GCP], xref:../installing/installing_nutanix/installing-nutanix-installer-provisioned.adoc#installing-nutanix-installer-provisioned[Nutanix]. - -If you need to perform basic configuration for your installer-provisioned infrastructure cluster, such as the instance type for the cluster machines, you can customize an installation for xref:../installing/installing_alibaba/installing-alibaba-customizations.adoc#installing-alibaba-customizations[Alibaba Cloud], xref:../installing/installing_aws/installing-aws-customizations.adoc#installing-aws-customizations[AWS], xref:../installing/installing_azure/installing-azure-customizations.adoc#installing-azure-customizations[Azure], xref:../installing/installing_gcp/installing-gcp-customizations.adoc#installing-gcp-customizations[GCP], xref:../installing/installing_nutanix/installing-nutanix-installer-provisioned.adoc#installing-nutanix-installer-provisioned[Nutanix]. - -For installer-provisioned infrastructure installations, you can use an existing xref:../installing/installing_aws/installing-aws-vpc.adoc#installing-aws-vpc[VPC in AWS], xref:../installing/installing_azure/installing-azure-vnet.adoc#installing-azure-vnet[vNet in Azure], or xref:../installing/installing_gcp/installing-gcp-vpc.adoc#installing-gcp-vpc[VPC in GCP]. You can also reuse part of your networking infrastructure so that your cluster in xref:../installing/installing_aws/installing-aws-network-customizations.adoc#installing-aws-network-customizations[AWS], xref:../installing/installing_azure/installing-azure-network-customizations.adoc#installing-azure-network-customizations[Azure], xref:../installing/installing_gcp/installing-gcp-network-customizations.adoc#installing-gcp-network-customizations[GCP] can coexist with existing IP address allocations in your environment and integrate with existing MTU and VXLAN configurations. If you have existing accounts and credentials on these clouds, you can re-use them, but you might need to modify the accounts to have the required permissions to install {product-title} clusters on them. - -You can use the installer-provisioned infrastructure method to create appropriate machine instances on your hardware for xref:../installing/installing_openstack/installing-openstack-installer-custom.adoc#installing-openstack-installer-custom[{rh-openstack}], xref:../installing/installing_openstack/installing-openstack-installer-kuryr.adoc#installing-openstack-installer-kuryr[{rh-openstack} with Kuryr], xref:../installing/installing_rhv/installing-rhv-default.adoc#installing-rhv-default[{rh-virtualization}], xref:../installing/installing_vsphere/installing-vsphere-installer-provisioned.adoc#installing-vsphere-installer-provisioned[vSphere], and xref:../installing/installing_bare_metal_ipi/ipi-install-overview#ipi-install-overview[bare metal]. Additionally, for xref:../installing/installing_vsphere/installing-vsphere-installer-provisioned-network-customizations.adoc#installing-vsphere-installer-provisioned-network-customizations[vSphere], you can also customize additional network parameters during installation. - -If you want to reuse extensive cloud infrastructure, you can complete a _user-provisioned infrastructure_ installation. With these installations, you manually deploy the machines that your cluster requires during the installation process. If you perform a user-provisioned infrastructure installation on xref:../installing/installing_aws/installing-aws-user-infra.adoc#installing-aws-user-infra[AWS], xref:../installing/installing_azure/installing-azure-user-infra.adoc#installing-azure-user-infra[Azure], xref:../installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc#installing-azure-stack-hub-user-infra[Azure Stack Hub], you can use the provided templates to help you stand up all of the required components. You can also reuse a shared xref:../installing/installing_gcp/installing-gcp-user-infra-vpc.adoc#installing-gcp-user-infra-vpc[VPC on GCP]. Otherwise, you can use the xref:../installing/installing_platform_agnostic/installing-platform-agnostic.adoc#installing-platform-agnostic[provider-agnostic] installation method to deploy a cluster into other clouds. - - -You can also complete a user-provisioned infrastructure installation on your existing hardware. If you use xref:../installing/installing_openstack/installing-openstack-user.adoc#installing-openstack-user[{rh-openstack}], xref:../installing/installing_rhv/installing-rhv-user-infra.adoc#installing-rhv-user-infra[{rh-virtualization}], xref:../installing/installing_ibm_z/installing-ibm-z.adoc#installing-ibm-z[{ibmzProductName} or {linuxoneProductName}], xref:../installing/installing_ibm_z/installing-ibm-z-kvm.adoc#installing-ibm-z-kvm[{ibmzProductName} and {linuxoneProductName} with {op-system-base} KVM], xref:../installing/installing_ibm_power/installing-ibm-power.adoc#installing-ibm-power[IBM Power], or xref:../installing/installing_vsphere/installing-vsphere.adoc#installing-vsphere[vSphere], use the specific installation instructions to deploy your cluster. If you use other supported hardware, follow the xref:../installing/installing_bare_metal/installing-bare-metal.adoc#installing-bare-metal[bare metal installation] procedure. For some of these platforms, such as xref:../installing/installing_openstack/installing-openstack-user-kuryr.adoc#installing-openstack-user-kuryr[{rh-openstack}], xref:../installing/installing_vsphere/installing-vsphere-network-customizations.adoc#installing-vsphere-network-customizations[vSphere], and xref:../installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc#installing-bare-metal-network-customizations[bare metal], you can also customize additional network parameters during installation. - -[id="installing-preparing-security"] -=== Do you need extra security for your cluster? - -If you use a user-provisioned installation method, you can configure a proxy for your cluster. The instructions are included in each installation procedure. - -If you want to prevent your cluster on a public cloud from exposing endpoints externally, you can deploy a private cluster with installer-provisioned infrastructure on xref:../installing/installing_aws/installing-aws-private.adoc#installing-aws-private[AWS], xref:../installing/installing_azure/installing-azure-private.adoc#installing-azure-private[Azure], or xref:../installing/installing_gcp/installing-gcp-private.adoc#installing-gcp-private[GCP]. - -If you need to install your cluster that has limited access to the internet, such as a disconnected or restricted network cluster, you can xref:../installing/disconnected_install/installing-mirroring-installation-images.adoc#installing-mirroring-installation-images[mirror the installation packages] and install the cluster from them. Follow detailed instructions for user provisioned infrastructure installations into restricted networks for xref:../installing/installing_aws/installing-restricted-networks-aws.adoc#installing-restricted-networks-aws[AWS], xref:../installing/installing_gcp/installing-restricted-networks-gcp.adoc#installing-restricted-networks-gcp[GCP], xref:../installing/installing_ibm_z/installing-restricted-networks-ibm-z.adoc#installing-restricted-networks-ibm-z[{ibmzProductName} or {linuxoneProductName}], xref:../installing/installing_ibm_z/installing-restricted-networks-ibm-z-kvm.adoc#installing-restricted-networks-ibm-z-kvm[{ibmzProductName} or {linuxoneProductName} with {op-system-base} KVM], xref:../installing/installing_ibm_power/installing-restricted-networks-ibm-power.adoc#installing-restricted-networks-ibm-power[IBM Power], xref:../installing/installing_vsphere/installing-restricted-networks-vsphere.adoc#installing-restricted-networks-vsphere[vSphere], or xref:../installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc#installing-restricted-networks-bare-metal[bare metal]. You can also install a cluster into a restricted network using installer-provisioned infrastructure by following detailed instructions for xref:../installing/installing_aws/installing-restricted-networks-aws-installer-provisioned.adoc#installing-restricted-networks-aws-installer-provisioned[AWS], xref:../installing/installing_gcp/installing-restricted-networks-gcp-installer-provisioned.adoc#installing-restricted-networks-gcp-installer-provisioned[GCP], xref:../installing/installing_nutanix/installing-restricted-networks-nutanix-installer-provisioned.adoc#installing-restricted-networks-nutanix-installer-provisioned[Nutanix], xref:../installing/installing_openstack/installing-openstack-installer-restricted.adoc#installing-openstack-installer-restricted[{rh-openstack}], xref:../installing/installing_rhv/installing-rhv-restricted-network.adoc#installing-rhv-restricted-network[{rh-virtualization}], and xref:../installing/installing_vsphere/installing-restricted-networks-installer-provisioned-vsphere.adoc#installing-restricted-networks-installer-provisioned-vsphere[vSphere]. - -If you need to deploy your cluster to an xref:../installing/installing_aws/installing-aws-government-region.adoc#installing-aws-government-region[AWS GovCloud region], xref:../installing/installing_aws/installing-aws-china.adoc#installing-aws-china-region[AWS China region], or xref:../installing/installing_azure/installing-azure-government-region.adoc#installing-azure-government-region[Azure government region], you can configure those custom regions during an installer-provisioned infrastructure installation. - -//// -ifndef::openshift-origin[] -You can also configure the cluster machines to use xref:../installing/installing-fips.adoc#installing-fips[FIPS Validated / Modules in Process cryptographic libraries] during installation. - -[IMPORTANT] -==== -The use of FIPS Validated / Modules in Process cryptographic libraries is only supported on {product-title} deployments on the `x86_64` architecture. -==== - -endif::[] -//// - -//// -[id="installing-preparing-single-node"] -=== Are you installing single-node clusters at the edge? - -You can use the assisted installer to deploy xref:../installing/installing_sno/install-sno-installing-sno.adoc#installing-sno[single node] clusters for edge workloads. -//// - -[id="installing-preparing-cluster-for-users"] -== Preparing your cluster for users after installation - -Some configuration is not required to install the cluster but recommended before your users access the cluster. You can customize the cluster itself by xref:../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[customizing] the Operators that make up your cluster and integrate you cluster with other required systems, such as an identity provider. -//This link will change when we consolidate the customizations page with the post-installation activities. - -For a production cluster, you must configure the following integrations: - -* xref:../storage/understanding-persistent-storage.adoc#understanding-persistent-storage[Persistent storage] -* xref:../authentication/understanding-identity-provider.adoc#understanding-identity-provider[An identity provider] -* xref:../monitoring/configuring-the-monitoring-stack.adoc#configuring-the-monitoring-stack[Monitoring core OpenShift Container Platform components] - -[id="installing-preparing-cluster-for-workloads"] -== Preparing your cluster for workloads - -Depending on your workload needs, you might need to take extra steps before you begin deploying applications. For example, after you prepare infrastructure to support your application xref:../cicd/builds/build-strategies.adoc#build-strategies[build strategy], you might need to make provisions for xref:../scalability_and_performance/cnf-low-latency-tuning.adoc#cnf-low-latency-tuning[low-latency] workloads or to xref:../nodes/pods/nodes-pods-secrets.adoc#nodes-pods-secrets[protect sensitive workloads]. You can also configure xref:../monitoring/enabling-monitoring-for-user-defined-projects.adoc#enabling-monitoring-for-user-defined-projects[monitoring] for application workloads. -If you plan to run xref:../windows_containers/enabling-windows-container-workloads.adoc#enabling-windows-container-workloads[Windows workloads], you must enable xref:../networking/ovn_kubernetes_network_provider/configuring-hybrid-networking.adoc#configuring-hybrid-networking[hybrid networking with OVN-Kubernetes] during the installation process; hybrid networking cannot be enabled after your cluster is installed. - -[id="supported-installation-methods-for-different-platforms"] -== Supported installation methods for different platforms - -You can perform different types of installations on different platforms. - -[NOTE] -==== -Not all installation options are supported for all platforms, as shown in the following tables. A checkmark indicates that the option is supported and links to the relevant section. -==== - -.Installer-provisioned infrastructure options -//This table is for all flavors of OpenShift, except OKD. A separate table is required because OKD does not support multiple AWS architecture types. Trying to maintain one table using conditions, while convenient, is very fragile and prone to publishing errors. -ifndef::openshift-origin[] -|=== -||Alibaba |AWS (64-bit x86) |AWS (64-bit ARM) |Azure (64-bit x86) |Azure (64-bit ARM)|Azure Stack Hub |GCP |Nutanix |{rh-openstack} |RHV |Bare metal (64-bit x86) |Bare metal (64-bit ARM) |vSphere |IBM Cloud VPC |{ibmzProductName} |{ibmpowerProductName} |{ibmpowerProductName} Virtual Server - -|Default -|xref:../installing/installing_alibaba/installing-alibaba-default.adoc#installing-alibaba-default[✓] -|xref:../installing/installing_aws/installing-aws-default.adoc#installing-aws-default[✓] -|xref:../installing/installing_aws/installing-aws-default.adoc#installing-aws-default[✓] -|xref:../installing/installing_azure/installing-azure-default.adoc#installing-azure-default[✓] -|xref:../installing/installing_azure/installing-azure-default.adoc#installing-azure-default[✓] -|xref:../installing/installing_azure_stack_hub/installing-azure-stack-hub-default.adoc#installing-azure-stack-hub-default[✓] -|xref:../installing/installing_gcp/installing-gcp-default.adoc#installing-gcp-default[✓] -|xref:../installing/installing_nutanix/installing-nutanix-installer-provisioned.adoc#installing-nutanix-installer-provisioned[✓] -| -|xref:../installing/installing_rhv/installing-rhv-default.adoc#installing-rhv-default[✓] -|xref:../installing/installing_bare_metal_ipi/ipi-install-overview.adoc#ipi-install-overview[✓] -|xref:../installing/installing_bare_metal_ipi/ipi-install-overview.adoc#ipi-install-overview[✓] -|xref:../installing/installing_vsphere/installing-vsphere-installer-provisioned.adoc#installing-vsphere-installer-provisioned[✓] -|xref:../installing/installing_ibm_cloud_public/installing-ibm-cloud-customizations.adoc#installing-ibm-cloud-customizations[✓] -| -| -| - -|Custom -|xref:../installing/installing_alibaba/installing-alibaba-customizations.adoc#installing-alibaba-customizations[✓] -|xref:../installing/installing_aws/installing-aws-customizations.adoc#installing-aws-customizations[✓] -|xref:../installing/installing_aws/installing-aws-customizations.adoc#installing-aws-customizations[✓] -|xref:../installing/installing_azure/installing-azure-customizations.adoc#installing-azure-customizations[✓] -|xref:../installing/installing_azure/installing-azure-customizations.adoc#installing-azure-customizations[✓] -|xref:../installing/installing_azure_stack_hub/installing-azure-stack-hub-default.adoc#installing-azure-stack-hub-default[✓] -|xref:../installing/installing_gcp/installing-gcp-customizations.adoc#installing-gcp-customizations[✓] -|xref:../installing/installing_nutanix/installing-nutanix-installer-provisioned.adoc#installing-nutanix-installer-provisioned[✓] -|xref:../installing/installing_openstack/installing-openstack-installer-custom.adoc#installing-openstack-installer-custom[✓] -|xref:../installing/installing_rhv/installing-rhv-customizations.adoc#installing-rhv-customizations[✓] -| -| -|xref:../installing/installing_vsphere/installing-vsphere-installer-provisioned-customizations.adoc#installing-vsphere-installer-provisioned-customizations[✓] -|xref:../installing/installing_ibm_cloud_public/installing-ibm-cloud-customizations.adoc#installing-ibm-cloud-customizations[✓] -| -| -|xref:../installing/installing_ibm_powervs/installing-ibm-power-vs-customizations.adoc#installing-ibm-power-vs-customizations[✓] - - -|Network customization -|xref:../installing/installing_alibaba/installing-alibaba-network-customizations.adoc#installing-alibaba-network-customizations[✓] -|xref:../installing/installing_aws/installing-aws-network-customizations.adoc#installing-aws-network-customizations[✓] -|xref:../installing/installing_aws/installing-aws-network-customizations.adoc#installing-aws-network-customizations[✓] -|xref:../installing/installing_azure/installing-azure-network-customizations.adoc#installing-azure-network-customizations[✓] -|xref:../installing/installing_azure/installing-azure-network-customizations.adoc#installing-azure-network-customizations[✓] -|xref:../installing/installing_azure_stack_hub/installing-azure-stack-hub-network-customizations.adoc#installing-azure-stack-hub-network-customizations[✓] -|xref:../installing/installing_gcp/installing-gcp-network-customizations.adoc#installing-gcp-network-customizations[✓] -| -|xref:../installing/installing_openstack/installing-openstack-installer-kuryr.adoc#installing-openstack-installer-kuryr[✓] -| -| -| -|xref:../installing/installing_vsphere/installing-vsphere-installer-provisioned-network-customizations.adoc#installing-vsphere-installer-provisioned-network-customizations[✓] -|xref:../installing/installing_ibm_cloud_public/installing-ibm-cloud-network-customizations.adoc#installing-ibm-cloud-network-customizations[✓] -| -| -| - -|Restricted network -| -|xref:../installing/installing_aws/installing-restricted-networks-aws-installer-provisioned.adoc#installing-restricted-networks-aws-installer-provisioned[✓] -|xref:../installing/installing_aws/installing-restricted-networks-aws-installer-provisioned.adoc#installing-restricted-networks-aws-installer-provisioned[✓] -| -| -| -|xref:../installing/installing_gcp/installing-restricted-networks-gcp-installer-provisioned.adoc#installing-restricted-networks-gcp-installer-provisioned[✓] -|xref:../installing/installing_nutanix/installing-restricted-networks-nutanix-installer-provisioned.adoc#installing-restricted-networks-nutanix-installer-provisioned[✓] -|xref:../installing/installing_openstack/installing-openstack-installer-restricted.adoc#installing-openstack-installer-restricted[✓] -|xref:../installing/installing_rhv/installing-rhv-restricted-network.adoc#installing-rhv-restricted-network[✓] -|xref:../installing/installing_bare_metal_ipi/ipi-install-installation-workflow.adoc#ipi-install-installation-workflow[✓] -|xref:../installing/installing_bare_metal_ipi/ipi-install-installation-workflow.adoc#ipi-install-installation-workflow[✓] -|xref:../installing/installing_vsphere/installing-restricted-networks-installer-provisioned-vsphere.adoc#installing-restricted-networks-installer-provisioned-vsphere[✓] -| -| -| -|xref:../installing/installing_ibm_powervs/installing-restricted-networks-ibm-power-vs.adoc#installing-restricted-networks-ibm-power-vs[✓] - -|Private clusters -| -|xref:../installing/installing_aws/installing-aws-private.adoc#installing-aws-private[✓] -|xref:../installing/installing_aws/installing-aws-private.adoc#installing-aws-private[✓] -|xref:../installing/installing_azure/installing-azure-private.adoc#installing-azure-private[✓] -|xref:../installing/installing_azure/installing-azure-private.adoc#installing-azure-private[✓] -| -|xref:../installing/installing_gcp/installing-gcp-private.adoc#installing-gcp-private[✓] -| -| -| -| -| -| -| -|xref:../installing/installing_ibm_cloud_public/installing-ibm-cloud-private.adoc#installing-ibm-cloud-private[✓] -| -| -|xref:../installing/installing_ibm_powervs/installing-ibm-power-vs-private-cluster.adoc#installing-ibm-power-vs-private-cluster[✓] - -|Existing virtual private networks -| -|xref:../installing/installing_aws/installing-aws-vpc.adoc#installing-aws-vpc[✓] -|xref:../installing/installing_aws/installing-aws-vpc.adoc#installing-aws-vpc[✓] -|xref:../installing/installing_azure/installing-azure-vnet.adoc#installing-azure-vnet[✓] -|xref:../installing/installing_azure/installing-azure-vnet.adoc#installing-azure-vnet[✓] -| -|xref:../installing/installing_gcp/installing-gcp-vpc.adoc#installing-gcp-vpc[✓] -| -| -| -| -| -| -| -|xref:../installing/installing_ibm_cloud_public/installing-ibm-cloud-vpc.adoc#installing-ibm-cloud-vpc[✓] -| -| -|xref:../installing/installing_ibm_powervs/installing-ibm-powervs-vpc.adoc#installing-ibm-powervs-vpc[✓] - -|Government regions -| -|xref:../installing/installing_aws/installing-aws-government-region.adoc#installing-aws-government-region[✓] -| -|xref:../installing/installing_azure/installing-azure-government-region.adoc#installing-azure-government-region[✓] -| -| -| -| -| -| -| -| -| -| -| -| -| -| - -|Secret regions -| -|xref:../installing/installing_aws/installing-aws-secret-region.adoc#installing-aws-secret-region[✓] -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| - -|China regions -| -|xref:../installing/installing_aws/installing-aws-china.adoc#installing-aws-china-region[✓] -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -| -|=== -endif::openshift-origin[] - -//This table is for OKD only. A separate table is required because OKD does not support multiple AWS architecture types. Trying to maintain one table using conditions, while convenient, is very fragile and prone to publishing errors. -ifdef::openshift-origin[] -|=== -||Alibaba |AWS |Azure |Azure Stack Hub |GCP |Nutanix |{rh-openstack} |oVirt |Bare metal |vSphere |IBM Cloud VPC |{ibmzProductName} |{ibmpowerProductName} - -|Default -|xref:../installing/installing_alibaba/installing-alibaba-default.adoc#installing-alibaba-default[✓] -|xref:../installing/installing_aws/installing-aws-default.adoc#installing-aws-default[✓] -|xref:../installing/installing_azure/installing-azure-default.adoc#installing-azure-default[✓] -|xref:../installing/installing_azure/installing-azure-default.adoc#installing-azure-default[✓] -|xref:../installing/installing_gcp/installing-gcp-default.adoc#installing-gcp-default[✓] -|xref:../installing/installing_nutanix/installing-nutanix-installer-provisioned.adoc#installing-nutanix-installer-provisioned[✓] -| -|xref:../installing/installing_rhv/installing-rhv-default.adoc#installing-rhv-default[✓] -|xref:../installing/installing_bare_metal_ipi/ipi-install-overview.adoc#ipi-install-overview[✓] -|xref:../installing/installing_vsphere/installing-vsphere-installer-provisioned.adoc#installing-vsphere-installer-provisioned[✓] -|xref:../installing/installing_ibm_cloud_public/installing-ibm-cloud-customizations.adoc#installing-ibm-cloud-customizations[✓] -| -| - -|Custom -|xref:../installing/installing_alibaba/installing-alibaba-customizations.adoc#installing-alibaba-customizations[✓] -|xref:../installing/installing_aws/installing-aws-customizations.adoc#installing-aws-customizations[✓] -|xref:../installing/installing_azure/installing-azure-customizations.adoc#installing-azure-customizations[✓] -|xref:../installing/installing_azure/installing-azure-default.adoc#installing-azure-default[✓] -|xref:../installing/installing_gcp/installing-gcp-customizations.adoc#installing-gcp-customizations[✓] -|xref:../installing/installing_nutanix/installing-nutanix-installer-provisioned.adoc#installing-nutanix-installer-provisioned[✓] -|xref:../installing/installing_openstack/installing-openstack-installer-custom.adoc#installing-openstack-installer-custom[✓] -|xref:../installing/installing_rhv/installing-rhv-customizations.adoc#installing-rhv-customizations[✓] -| -|xref:../installing/installing_vsphere/installing-vsphere-installer-provisioned-customizations.adoc#installing-vsphere-installer-provisioned-customizations[✓] -|xref:../installing/installing_ibm_cloud_public/installing-ibm-cloud-customizations.adoc#installing-ibm-cloud-customizations[✓] -| -| - -|Network customization -|xref:../installing/installing_alibaba/installing-alibaba-network-customizations.adoc#installing-alibaba-network-customizations[✓] -|xref:../installing/installing_aws/installing-aws-network-customizations.adoc#installing-aws-network-customizations[✓] -|xref:../installing/installing_azure/installing-azure-network-customizations.adoc#installing-azure-network-customizations[✓] -|xref:../installing/installing_azure_stack_hub/installing-azure-stack-hub-network-customizations.adoc#installing-azure-stack-hub-network-customizations[✓] -|xref:../installing/installing_gcp/installing-gcp-network-customizations.adoc#installing-gcp-network-customizations[✓] -| -|xref:../installing/installing_openstack/installing-openstack-installer-kuryr.adoc#installing-openstack-installer-kuryr[✓] -| -| -|xref:../installing/installing_vsphere/installing-vsphere-installer-provisioned-network-customizations.adoc#installing-vsphere-installer-provisioned-network-customizations[✓] -|xref:../installing/installing_ibm_cloud_public/installing-ibm-cloud-network-customizations.adoc#installing-ibm-cloud-network-customizations[✓] -| -| - -|Restricted network -| -|xref:../installing/installing_aws/installing-restricted-networks-aws-installer-provisioned.adoc#installing-restricted-networks-aws-installer-provisioned[✓] -| -| -|xref:../installing/installing_gcp/installing-restricted-networks-gcp-installer-provisioned.adoc#installing-restricted-networks-gcp-installer-provisioned[✓] -|xref:../installing/installing_nutanix/installing-restricted-networks-nutanix-installer-provisioned.adoc#installing-restricted-networks-nutanix-installer-provisioned[✓] -|xref:../installing/installing_openstack/installing-openstack-installer-restricted.adoc#installing-openstack-installer-restricted[✓] -|xref:../installing/installing_rhv/installing-rhv-restricted-network.adoc#installing-rhv-restricted-network[✓] -| -|xref:../installing/installing_vsphere/installing-restricted-networks-installer-provisioned-vsphere.adoc#installing-restricted-networks-installer-provisioned-vsphere[✓] -| -| -| - -|Private clusters -| -|xref:../installing/installing_aws/installing-aws-private.adoc#installing-aws-private[✓] -|xref:../installing/installing_azure/installing-azure-private.adoc#installing-azure-private[✓] -| -|xref:../installing/installing_gcp/installing-gcp-private.adoc#installing-gcp-private[✓] -| -| -| -| -| -| -|xref:../installing/installing_ibm_cloud_public/installing-ibm-cloud-private.adoc#installing-ibm-cloud-private[✓] -| -| - -|Existing virtual private networks -| -|xref:../installing/installing_aws/installing-aws-vpc.adoc#installing-aws-vpc[✓] -|xref:../installing/installing_azure/installing-azure-vnet.adoc#installing-azure-vnet[✓] -| -|xref:../installing/installing_gcp/installing-gcp-vpc.adoc#installing-gcp-vpc[✓] -| -| -| -| -| -| -|xref:../installing/installing_ibm_cloud_public/installing-ibm-cloud-vpc.adoc#installing-ibm-cloud-vpc[✓] -| -| - -|Government regions -| -|xref:../installing/installing_aws/installing-aws-government-region.adoc#installing-aws-government-region[✓] -|xref:../installing/installing_azure/installing-azure-government-region.adoc#installing-azure-government-region[✓] -| -| -| -| -| -| -| -| -| -| -| - -|Secret regions -| -|xref:../installing/installing_aws/installing-aws-secret-region.adoc#installing-aws-secret-region[✓] -| -| -| -| -| -| -| -| -| -| -| -| - -|China regions -| -|xref:../installing/installing_aws/installing-aws-china.adoc#installing-aws-china-region[✓] -| -| -| -| -| -| -| -| -| -| -| -| -|=== -endif::openshift-origin[] - -.User-provisioned infrastructure options -//This table is for all flavors of OpenShift, except OKD. A separate table is required because OKD does not support multiple AWS architecture types. Trying to maintain one table using conditions, while convenient, is very fragile and prone to publishing errors. -ifndef::openshift-origin[] -|=== -||Alibaba |AWS (64-bit x86) |AWS (64-bit ARM) |Azure (64-bit x86) |Azure (64-bit ARM) |Azure Stack Hub |GCP |Nutanix |{rh-openstack} |RHV |Bare metal (64-bit x86) |Bare metal (64-bit ARM) |vSphere |IBM Cloud VPC |{ibmzProductName} |{ibmzProductName} with {op-system-base} KVM |{ibmpowerProductName} |Platform agnostic - -|Custom -| -|xref:../installing/installing_aws/installing-aws-user-infra.adoc#installing-aws-user-infra[✓] -|xref:../installing/installing_aws/installing-aws-user-infra.adoc#installing-aws-user-infra[✓] -|xref:../installing/installing_azure/installing-azure-user-infra.adoc#installing-azure-user-infra[✓] -|xref:../installing/installing_azure/installing-azure-user-infra.adoc#installing-azure-user-infra[✓] -|xref:../installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc#installing-azure-stack-hub-user-infra[✓] -|xref:../installing/installing_gcp/installing-gcp-user-infra.adoc#installing-gcp-user-infra[✓] -| -|xref:../installing/installing_openstack/installing-openstack-user.adoc#installing-openstack-user[✓] -|xref:../installing/installing_rhv/installing-rhv-user-infra.adoc#installing-rhv-user-infra[✓] -|xref:../installing/installing_bare_metal/installing-bare-metal.adoc#installing-bare-metal[✓] -|xref:../installing/installing_bare_metal/installing-bare-metal.adoc#installing-bare-metal[✓] -|xref:../installing/installing_vsphere/installing-vsphere.adoc#installing-vsphere[✓] -| -|xref:../installing/installing_ibm_z/installing-ibm-z.adoc#installing-ibm-z[✓] -|xref:../installing/installing_ibm_z/installing-ibm-z-kvm.adoc#installing-ibm-z-kvm[✓] -|xref:../installing/installing_ibm_power/installing-ibm-power.adoc#installing-ibm-power[✓] -|xref:../installing/installing_platform_agnostic/installing-platform-agnostic.adoc#installing-platform-agnostic[✓] - -// Add RHV UPI link when docs are available: https://github.com/openshift/openshift-docs/pull/26484 - - -|Network customization -| -| -| -| -| -| -| -| -|xref:../installing/installing_openstack/installing-openstack-user-kuryr.adoc#installing-openstack-user-kuryr[✓] -| -|xref:../installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc#installing-bare-metal-network-customizations[✓] -|xref:../installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc#installing-bare-metal-network-customizations[✓] -|xref:../installing/installing_vsphere/installing-vsphere-network-customizations.adoc#installing-vsphere-network-customizations[✓] -| -| -| -| -| - -|Restricted network -| -|xref:../installing/installing_aws/installing-restricted-networks-aws.adoc#installing-restricted-networks-aws[✓] -|xref:../installing/installing_aws/installing-restricted-networks-aws.adoc#installing-restricted-networks-aws[✓] -| -| -| -|xref:../installing/installing_gcp/installing-restricted-networks-gcp.adoc#installing-restricted-networks-gcp[✓] -| -| -|xref:../installing/installing_rhv/installing-rhv-restricted-network.adoc#installing-rhv-restricted-network[✓] -|xref:../installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc#installing-restricted-networks-bare-metal[✓] -|xref:../installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc#installing-restricted-networks-bare-metal[✓] -|xref:../installing/installing_vsphere/installing-restricted-networks-vsphere.adoc#installing-restricted-networks-vsphere[✓] -| -|xref:../installing/installing_ibm_z/installing-restricted-networks-ibm-z.adoc#installing-restricted-networks-ibm-z[✓] -|xref:../installing/installing_ibm_z/installing-restricted-networks-ibm-z-kvm.adoc#installing-restricted-networks-ibm-z-kvm[✓] -|xref:../installing/installing_ibm_power/installing-restricted-networks-ibm-power.adoc#installing-restricted-networks-ibm-power[✓] -| - -|Shared VPC hosted outside of cluster project -| -| -| -| -| -| -|xref:../installing/installing_gcp/installing-gcp-user-infra-vpc.adoc#installing-gcp-user-infra-vpc[✓] -| -| -| -| -| -| -| -| -| -| -| -| -|=== -endif::openshift-origin[] - -//This table is for OKD only. A separate table is required because OKD does not support multiple AWS architecture types. Trying to maintain one table using conditions, while convenient, is very fragile and prone to publishing errors. -ifdef::openshift-origin[] -|=== -||Alibaba |AWS |Azure |Azure Stack Hub |GCP |Nutanix |{rh-openstack} |oVirt |Bare metal |vSphere |IBM Cloud VPC |{ibmzProductName} |{ibmzProductName} with {op-system-base} KVM |{ibmpowerProductName} |Platform agnostic - -|Custom -| -|xref:../installing/installing_aws/installing-aws-user-infra.adoc#installing-aws-user-infra[✓] -|xref:../installing/installing_azure/installing-azure-user-infra.adoc#installing-azure-user-infra[✓] -|xref:../installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc#installing-azure-stack-hub-user-infra[✓] -|xref:../installing/installing_gcp/installing-gcp-user-infra.adoc#installing-gcp-user-infra[✓] -| -|xref:../installing/installing_openstack/installing-openstack-user.adoc#installing-openstack-user[✓] -|xref:../installing/installing_rhv/installing-rhv-user-infra.adoc#installing-rhv-user-infra[✓] -|xref:../installing/installing_bare_metal/installing-bare-metal.adoc#installing-bare-metal[✓] -|xref:../installing/installing_vsphere/installing-vsphere.adoc#installing-vsphere[✓] -| -|xref:../installing/installing_ibm_z/installing-ibm-z.adoc#installing-ibm-z[✓] -|xref:../installing/installing_ibm_z/installing-ibm-z-kvm.adoc#installing-ibm-z-kvm[✓] -|xref:../installing/installing_ibm_power/installing-ibm-power.adoc#installing-ibm-power[✓] -|xref:../installing/installing_platform_agnostic/installing-platform-agnostic.adoc#installing-platform-agnostic[✓] - -// Add RHV UPI link when docs are available: https://github.com/openshift/openshift-docs/pull/26484 - -|Network customization -| -| -| -| -| -| -|xref:../installing/installing_openstack/installing-openstack-user-kuryr.adoc#installing-openstack-user-kuryr[✓] -| -|xref:../installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc#installing-bare-metal-network-customizations[✓] -|xref:../installing/installing_vsphere/installing-vsphere-network-customizations.adoc#installing-vsphere-network-customizations[✓] -| -| -| -| -| - -|Restricted network -| -|xref:../installing/installing_aws/installing-restricted-networks-aws.adoc#installing-restricted-networks-aws[✓] -| -| -|xref:../installing/installing_gcp/installing-restricted-networks-gcp.adoc#installing-restricted-networks-gcp[✓] -| -| -| -|xref:../installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc#installing-restricted-networks-bare-metal[✓] -|xref:../installing/installing_vsphere/installing-restricted-networks-vsphere.adoc#installing-restricted-networks-vsphere[✓] -| -|xref:../installing/installing_ibm_z/installing-restricted-networks-ibm-z.adoc#installing-restricted-networks-ibm-z[✓] -|xref:../installing/installing_ibm_z/installing-restricted-networks-ibm-z-kvm.adoc#installing-restricted-networks-ibm-z-kvm[✓] -|xref:../installing/installing_ibm_power/installing-restricted-networks-ibm-power.adoc#installing-restricted-networks-ibm-power[✓] -| - -|Shared VPC hosted outside of cluster project -| -| -| -| -|xref:../installing/installing_gcp/installing-gcp-user-infra-vpc.adoc#installing-gcp-user-infra-vpc[✓] -| -| -| -| -| -| -| -| -| -| -| -|=== -endif::openshift-origin[] - -//// -.Special use cases -|=== -|Single Node - -|xref:../installing/installing_sno/install-sno-installing-sno.adoc#installing-sno[✓] - - -|=== -//// -// sync diff --git a/installing/installing-troubleshooting.adoc b/installing/installing-troubleshooting.adoc deleted file mode 100644 index f2330d138a3b..000000000000 --- a/installing/installing-troubleshooting.adoc +++ /dev/null @@ -1,32 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-troubleshooting"] -= Troubleshooting installation issues -include::_attributes/common-attributes.adoc[] -:context: installing-troubleshooting - -toc::[] - -To assist in troubleshooting a failed {product-title} installation, you can gather logs from the bootstrap and control plane machines. You can also get debug information from the installation program. If you are unable to resolve the issue using the logs and debug information, see xref:../support/troubleshooting/troubleshooting-installations.adoc#determining-where-installation-issues-occur_troubleshooting-installations[Determining where installation issues occur] for component-specific troubleshooting. - -[NOTE] -==== -If your {product-title} installation fails and the debug output or logs contain network timeouts or other connectivity errors, review the guidelines for xref:../installing/install_config/configuring-firewall.adoc#configuring-firewall[configuring your firewall]. Gathering logs from your firewall and load balancer can help you diagnose network-related errors. -==== - -== Prerequisites - -* You attempted to install an {product-title} cluster and the installation failed. - -include::modules/installation-bootstrap-gather.adoc[leveloffset=+1] - -include::modules/manually-gathering-logs-with-ssh.adoc[leveloffset=+1] - -include::modules/manually-gathering-logs-without-ssh.adoc[leveloffset=+1] - -include::modules/installation-getting-debug-information.adoc[leveloffset=+1] - -include::modules/restarting-installation.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../installing/index.adoc#ocp-installation-overview[Installing an {product-title} cluster] diff --git a/installing/installing_alibaba/_attributes b/installing/installing_alibaba/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/installing/installing_alibaba/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/installing/installing_alibaba/images b/installing/installing_alibaba/images deleted file mode 120000 index 5fa6987088da..000000000000 --- a/installing/installing_alibaba/images +++ /dev/null @@ -1 +0,0 @@ -../../images \ No newline at end of file diff --git a/installing/installing_alibaba/installing-alibaba-customizations.adoc b/installing/installing_alibaba/installing-alibaba-customizations.adoc deleted file mode 100644 index 3fc08a24a635..000000000000 --- a/installing/installing_alibaba/installing-alibaba-customizations.adoc +++ /dev/null @@ -1,70 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-alibaba-customizations"] -= Installing a cluster on Alibaba Cloud with customizations -include::_attributes/common-attributes.adoc[] -:context: installing-alibaba-customizations - -toc::[] - -In {product-title} version {product-version}, you can install a customized cluster on infrastructure that the installation program provisions on Alibaba Cloud. To customize the installation, you modify parameters in the `install-config.yaml` file before you install the cluster. - -[NOTE] -==== -The scope of the {product-title} installation configurations is intentionally narrow. It is designed for simplicity and ensured success. You can complete many more {product-title} configuration tasks after an installation completes. -==== - -:FeatureName: Alibaba Cloud on {product-title} -include::snippets/technology-preview.adoc[] - -[id="prerequisites_installing-alibaba-customizations"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_alibaba/preparing-to-install-on-alibaba.adoc#installation-alibaba-dns_preparing-to-install-on-alibaba[registered your domain]. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* If the cloud Resource Access Management (RAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_alibaba/manually-creating-alibaba-ram.adoc#manually-creating-alibaba-ram[manually create and maintain Resource Access Management (RAM) credentials]. - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-initializing.adoc[leveloffset=+2] - -include::modules/manually-creating-alibaba-manifests.adoc[leveloffset=+2] - -include::modules/cco-ccoctl-creating-at-once.adoc[leveloffset=+2] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-alibaba-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/logging-in-by-using-the-web-console.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service. -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console. - -[id="next-steps_installing-alibaba-customizations"] -== Next steps - -* xref:../../installing/validating-an-installation.adoc#validating-an-installation[Validating an installation]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -//Given that manual mode is required to install on Alibaba Cloud, I do not believe this xref is necessary. -//* If necessary, you can xref:../../authentication/managing_cloud_provider_credentials/cco-mode-mint.adoc#manually-removing-cloud-creds_cco-mode-mint[remove cloud provider credentials]. diff --git a/installing/installing_alibaba/installing-alibaba-default.adoc b/installing/installing_alibaba/installing-alibaba-default.adoc deleted file mode 100644 index 2655c5bb74d2..000000000000 --- a/installing/installing_alibaba/installing-alibaba-default.adoc +++ /dev/null @@ -1,60 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-alibaba-default"] -= Installing a cluster quickly on Alibaba Cloud -include::_attributes/common-attributes.adoc[] -:context: installing-alibaba-default - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on -Alibaba Cloud that uses the default configuration options. - -:FeatureName: Alibaba Cloud on {product-title} -include::snippets/technology-preview.adoc[] - -[id="prerequisites_installing-alibaba-default"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_alibaba/preparing-to-install-on-alibaba.adoc#installation-alibaba-dns_preparing-to-install-on-alibaba[registered your domain]. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* You have xref:../../installing/installing_alibaba/manually-creating-alibaba-ram.adoc#manually-creating-alibaba-ram[created the required Alibaba Cloud resources]. -* If the cloud Resource Access Management (RAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the kube-system namespace, you can xref:../../installing/installing_alibaba/manually-creating-alibaba-ram.adoc#manually-creating-alibaba-ram[manually create and maintain Resource Access Management (RAM) credentials]. - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-initializing.adoc[leveloffset=+1] - -include::modules/manually-creating-alibaba-manifests.adoc[leveloffset=+1] - -include::modules/cco-ccoctl-creating-at-once.adoc[leveloffset=+1] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/logging-in-by-using-the-web-console.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console. -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -[id="next-steps_installing-alibaba-default"] -== Next steps - -* xref:../../installing/validating-an-installation.adoc#validating-an-installation[Validating an installation]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -//Given that manual mode is required to install on Alibaba Cloud, I do not believe this xref is necessary. -//* If necessary, you can xref:../../authentication/managing_cloud_provider_credentials/cco-mode-mint.adoc#manually-removing-cloud-creds_cco-mode-mint[remove cloud provider credentials] diff --git a/installing/installing_alibaba/installing-alibaba-network-customizations.adoc b/installing/installing_alibaba/installing-alibaba-network-customizations.adoc deleted file mode 100644 index 16a4be1ef188..000000000000 --- a/installing/installing_alibaba/installing-alibaba-network-customizations.adoc +++ /dev/null @@ -1,80 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-alibaba-network-customizations"] -= Installing a cluster on Alibaba Cloud with network customizations -include::_attributes/common-attributes.adoc[] -:context: installing-alibaba-network-customizations - -toc::[] - -In {product-title} {product-version}, you can install a cluster on Alibaba Cloud with customized network configuration options. By customizing your network configuration, your cluster can coexist with existing IP address allocations in your environment and integrate with existing MTU and -VXLAN configurations. - -You must set most of the network configuration parameters during installation, and you can modify only `kubeProxy` configuration parameters in a running cluster. - -:FeatureName: Alibaba Cloud on {product-title} -include::snippets/technology-preview.adoc[] - -[id="prerequisites_installing-alibaba-network-customizations"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_alibaba/preparing-to-install-on-alibaba.adoc#installation-alibaba-dns_preparing-to-install-on-alibaba[registered your domain]. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* If the cloud Resource Access Management (RAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_alibaba/manually-creating-alibaba-ram.adoc#manually-creating-alibaba-ram[manually create and maintain Resource Access Management (RAM) credentials]. - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -//Networking-specific customization module -include::modules/nw-network-config.adoc[leveloffset=+1] - -include::modules/installation-initializing.adoc[leveloffset=+2] - -include::modules/manually-creating-alibaba-manifests.adoc[leveloffset=+2] - -include::modules/cco-ccoctl-creating-at-once.adoc[leveloffset=+2] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-alibaba-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -//Networking-specific customization module -include::modules/nw-operator-cr.adoc[leveloffset=+1] - -//Networking-specific customization module -include::modules/nw-modifying-operator-install-config.adoc[leveloffset=+1] - -//Networking-specific customization module -include::modules/configuring-hybrid-ovnkubernetes.adoc[leveloffset=+1] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/logging-in-by-using-the-web-console.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service. -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console. - -[id="next-steps_installing-alibaba-network-customizations"] -== Next steps - -* xref:../../installing/validating-an-installation.adoc#validating-an-installation[Validate an installation]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -//Given that manual mode is required to install on Alibaba Cloud, I do not believe this xref is necessary. -//* If necessary, you can xref:../../authentication/managing_cloud_provider_credentials/cco-mode-mint.adoc#manually-removing-cloud-creds_cco-mode-mint[remove cloud provider credentials]. \ No newline at end of file diff --git a/installing/installing_alibaba/installing-alibaba-vpc.adoc b/installing/installing_alibaba/installing-alibaba-vpc.adoc deleted file mode 100644 index ab7e6baa1500..000000000000 --- a/installing/installing_alibaba/installing-alibaba-vpc.adoc +++ /dev/null @@ -1,72 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-alibaba-vpc"] -= Installing a cluster on Alibaba Cloud into an existing VPC -include::_attributes/common-attributes.adoc[] -:context: installing-alibaba-vpc - -toc::[] - -In {product-title} version {product-version}, you can install a cluster into an existing Alibaba Virtual Private Cloud (VPC) on Alibaba Cloud Services. The installation program provisions the required infrastructure, which can then be customized. To customize the VPC installation, modify the parameters in the 'install-config.yaml' file before you install the cluster. - -[NOTE] -==== -The scope of the {product-title} installation configurations is intentionally narrow. It is designed for simplicity and ensured success. You can complete many more {product-title} configuration tasks after an installation completes. -==== - -:FeatureName: Alibaba Cloud on {product-title} -include::snippets/technology-preview.adoc[] - -[id="prerequisites_installing-alibaba-vpc"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_alibaba/preparing-to-install-on-alibaba.adoc#installation-alibaba-dns_preparing-to-install-on-alibaba[registered your domain]. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* If the cloud Resource Access Management (RAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_alibaba/manually-creating-alibaba-ram.adoc#manually-creating-alibaba-ram[manually create and maintain Resource Access Management (RAM) credentials]. - -include::modules/installation-custom-alibaba-vpc.adoc[leveloffset=+1] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-initializing.adoc[leveloffset=+2] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-alibaba-config-yaml.adoc[leveloffset=+2] - -include::modules/manually-creating-alibaba-manifests.adoc[leveloffset=+2] - -include::modules/cco-ccoctl-configuring.adoc[leveloffset=+2] - -include::modules/cco-ccoctl-creating-at-once.adoc[leveloffset=+2] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/logging-in-by-using-the-web-console.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service. -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console - -[id="next-steps_installing-alibaba-vpc"] -== Next steps - -* xref:../../installing/validating-an-installation.adoc#validating-an-installation[Validating an installation]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -//Given that manual mode is required to install on Alibaba Cloud, I do not believe this xref is necessary. -//* If necessary, you can xref:../../authentication/managing_cloud_provider_credentials/cco-mode-mint.adoc#manually-removing-cloud-creds_cco-mode-mint[remove cloud provider credentials]. - diff --git a/installing/installing_alibaba/manually-creating-alibaba-ram.adoc b/installing/installing_alibaba/manually-creating-alibaba-ram.adoc deleted file mode 100644 index 8c4caa5e92d1..000000000000 --- a/installing/installing_alibaba/manually-creating-alibaba-ram.adoc +++ /dev/null @@ -1,34 +0,0 @@ -:_content-type: ASSEMBLY -[id="manually-creating-alibaba-ram"] -= Creating the required Alibaba Cloud resources -include::_attributes/common-attributes.adoc[] -:context: manually-creating-alibaba-ram - -toc::[] - -Before you install {product-title}, you must use the Alibaba Cloud console to create a Resource Access Management (RAM) user that has sufficient permissions to install {product-title} into your Alibaba Cloud. This user must also have permissions to create new RAM users. You can also configure and use the `ccoctl` tool to create new credentials for the {product-title} components with the permissions that they require. - -:FeatureName: Alibaba Cloud on {product-title} -include::snippets/technology-preview.adoc[] - -//Task part 1: Manually creating the required RAM user -include::modules/manually-creating-alibaba-ram-user.adoc[leveloffset=+1] - -//Task part 2: Configuring the Cloud Credential Operator utility -include::modules/cco-ccoctl-configuring.adoc[leveloffset=+1] -[role="_additional-resources"] -.Additional resources -* xref:../../updating/preparing-manual-creds-update.adoc#preparing-manual-creds-update[Preparing to update a cluster with manually maintained credentials] - -//Task part 3: Creating Alibaba resources with a single command -// modules/cco-ccoctl-creating-at-once.adoc[leveloffset=+1] - -[id="next-steps_manually-creating-alibaba-ram"] -== Next steps - -* Install a cluster on Alibaba Cloud infrastructure that is provisioned by the {product-title} installation program, by using one of the following methods: - -** **xref:../../installing/installing_alibaba/installing-alibaba-default.adoc#installing-alibaba-default[Installing a cluster quickly on Alibaba Cloud]**: You can install a cluster quickly by using the default configuration options. - -** **xref:../../installing/installing_alibaba/installing-alibaba-customizations.adoc#installing-alibaba-customizations[Installing a customized cluster on Alibaba Cloud]**: The installation program allows for some customization to be applied at the installation stage. Many other customization options are available xref:../../post_installation_configuration/cluster-tasks.adoc#post-install-cluster-tasks[post-installation]. - diff --git a/installing/installing_alibaba/modules b/installing/installing_alibaba/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/installing/installing_alibaba/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/installing/installing_alibaba/preparing-to-install-on-alibaba.adoc b/installing/installing_alibaba/preparing-to-install-on-alibaba.adoc deleted file mode 100644 index 1df21d71d2fc..000000000000 --- a/installing/installing_alibaba/preparing-to-install-on-alibaba.adoc +++ /dev/null @@ -1,35 +0,0 @@ -:_content-type: ASSEMBLY -[id="preparing-to-install-on-alibaba"] -= Preparing to install on Alibaba Cloud -include::_attributes/common-attributes.adoc[] -:context: preparing-to-install-on-alibaba - -toc::[] - -:FeatureName: Alibaba Cloud on {product-title} -include::snippets/technology-preview.adoc[] - -[id="prerequisites_preparing-to-install-on-alibaba"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. - -[id="requirements-for-installing-ocp-on-alibaba"] -== Requirements for installing {product-title} on Alibaba Cloud - -Before installing {product-title} on Alibaba Cloud, you must configure and register your domain, create a Resource Access Management (RAM) user for the installation, and review the supported Alibaba Cloud data center regions and zones for the installation. - -include::modules/installation-alibaba-dns.adoc[leveloffset=+1] - -// include modules/installation-alibaba-limits.adoc[leveloffset=+1] - -// include modules/installation-alibaba-ram-user.adoc[leveloffset=+1] - -include::modules/installation-alibaba-regions.adoc[leveloffset=+1] - -[id="next-steps_preparing-to-install-on-alibaba"] -== Next steps - -* xref:../../installing/installing_alibaba/manually-creating-alibaba-ram.adoc#manually-creating-alibaba-ram[Create the required Alibaba Cloud resources]. - diff --git a/installing/installing_alibaba/snippets b/installing/installing_alibaba/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/installing/installing_alibaba/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/installing/installing_alibaba/uninstall-cluster-alibaba.adoc b/installing/installing_alibaba/uninstall-cluster-alibaba.adoc deleted file mode 100644 index 9b4fbbb22346..000000000000 --- a/installing/installing_alibaba/uninstall-cluster-alibaba.adoc +++ /dev/null @@ -1,11 +0,0 @@ -:_content-type: ASSEMBLY -[id="uninstalling-cluster-alibaba"] -= Uninstalling a cluster on Alibaba Cloud -include::_attributes/common-attributes.adoc[] -:context: uninstall-cluster-alibaba - -toc::[] - -You can remove a cluster that you deployed to Alibaba Cloud. - -include::modules/installation-uninstall-clouds.adoc[leveloffset=+1] diff --git a/installing/installing_aws/_attributes b/installing/installing_aws/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/installing/installing_aws/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/installing/installing_aws/images b/installing/installing_aws/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/installing/installing_aws/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/installing/installing_aws/installing-aws-account.adoc b/installing/installing_aws/installing-aws-account.adoc deleted file mode 100644 index cda3cd0adcc9..000000000000 --- a/installing/installing_aws/installing-aws-account.adoc +++ /dev/null @@ -1,49 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-aws-account"] -= Configuring an AWS account -include::_attributes/common-attributes.adoc[] -:context: installing-aws-account - -toc::[] - -Before you can install {product-title}, you must configure an -Amazon Web Services (AWS) account. - -include::modules/installation-aws-route53.adoc[leveloffset=+1] - -include::modules/nw-endpoint-route53.adoc[leveloffset=+2] - -include::modules/installation-aws-limits.adoc[leveloffset=+1] - -include::modules/installation-aws-permissions.adoc[leveloffset=+1] - -include::modules/installation-aws-iam-user.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../installing/installing_aws/manually-creating-iam.adoc#manually-creating-iam-aws[Manually creating IAM for AWS] for steps to set the Cloud Credential Operator (CCO) to manual mode prior to installation. Use this mode in environments where the cloud identity and access management (IAM) APIs are not reachable, or if you prefer not to store an administrator-level credential secret in the cluster `kube-system` project. - -include::modules/installation-aws-iam-policies-about.adoc[leveloffset=+1] - -include::modules/installation-aws-permissions-iam-roles.adoc[leveloffset=+2] -include::modules/installation-aws-add-iam-roles.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* See xref:../../installing/installing_aws/installing-aws-customizations.adoc#installation-launching-installer_installing-aws-customizations[Deploying the cluster]. - -include::modules/installation-aws-access-analyzer.adoc[leveloffset=+2] - -include::modules/installation-aws-marketplace.adoc[leveloffset=+1] - -include::modules/installation-aws-regions.adoc[leveloffset=+1] - -== Next steps - -* Install an {product-title} cluster: -** xref:../../installing/installing_aws/installing-aws-default.adoc#installing-aws-default[Quickly install a cluster] with default options on installer-provisioned infrastructure -** xref:../../installing/installing_aws/installing-aws-customizations.adoc#installing-aws-customizations[Install a cluster with cloud customizations on installer-provisioned infrastructure] -** xref:../../installing/installing_aws/installing-aws-network-customizations.adoc#installing-aws-network-customizations[Install a cluster with network customizations on installer-provisioned infrastructure] -** xref:../../installing/installing_aws/installing-aws-user-infra.adoc#installing-aws-user-infra[Installing a cluster on user-provisioned infrastructure in AWS by using CloudFormation templates] -** xref:../../installing/installing_aws/installing-aws-outposts-remote-workers.adoc#installing-aws-outposts-remote-workers[Installing a cluster on AWS with remote workers on AWS Outposts] diff --git a/installing/installing_aws/installing-aws-china.adoc b/installing/installing_aws/installing-aws-china.adoc deleted file mode 100644 index bda46256d655..000000000000 --- a/installing/installing_aws/installing-aws-china.adoc +++ /dev/null @@ -1,74 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-aws-china-region"] -= Installing a cluster on AWS China -include::_attributes/common-attributes.adoc[] -:context: installing-aws-china-region - -toc::[] - -In {product-title} version {product-version}, you can install a cluster to the following Amazon Web Services (AWS) China regions: - -* `cn-north-1` (Beijing) -* `cn-northwest-1` (Ningxia) - -== Prerequisites - -* You have an Internet Content Provider (ICP) license. -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_aws/installing-aws-account.adoc#installing-aws-account[configured an AWS account] to host the cluster. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_aws/manually-creating-iam.adoc#manually-creating-iam-aws[manually create and maintain IAM credentials]. - -[IMPORTANT] -==== -If you have an AWS profile stored on your computer, it must not use a temporary session token that you generated while using a multi-factor authentication device. The cluster continues to use your current AWS credentials to create AWS resources for the entire life of the cluster, so you must use long-lived credentials. To generate appropriate keys, see link:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html[Managing Access Keys for IAM Users] in the AWS documentation. You can supply the keys when you run the installation program. -==== - -include::modules/installation-aws-regions-with-no-ami.adoc[leveloffset=+1] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/private-clusters-default.adoc[leveloffset=+1] -include::modules/private-clusters-about-aws.adoc[leveloffset=+2] - -include::modules/installation-custom-aws-vpc.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-aws-upload-custom-rhcos-ami.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-initializing-manual.adoc[leveloffset=+1] -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] -include::modules/installation-aws-config-yaml.adoc[leveloffset=+2] -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] - -include::modules/installation-aws-tested-machine-types.adoc[leveloffset=+2] -include::modules/installation-aws-arm-tested-machine-types.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/logging-in-by-using-the-web-console.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console. -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service. - -== Next steps - -* xref:../../installing/validating-an-installation.adoc#validating-an-installation[Validating an installation]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* If necessary, you can xref:../../authentication/managing_cloud_provider_credentials/cco-mode-mint.adoc#manually-removing-cloud-creds_cco-mode-mint[remove cloud provider credentials]. diff --git a/installing/installing_aws/installing-aws-customizations.adoc b/installing/installing_aws/installing-aws-customizations.adoc deleted file mode 100644 index 365b0b5c2b0d..000000000000 --- a/installing/installing_aws/installing-aws-customizations.adoc +++ /dev/null @@ -1,80 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-aws-customizations"] -= Installing a cluster on AWS with customizations -include::_attributes/common-attributes.adoc[] -:context: installing-aws-customizations -:platform: AWS - -toc::[] - -In {product-title} version {product-version}, you can install a customized -cluster on infrastructure that the installation program provisions on -Amazon Web Services (AWS). To customize the installation, you modify -parameters in the `install-config.yaml` file before you install the cluster. - -[NOTE] -==== -The scope of the {product-title} installation configurations is intentionally narrow. It is designed for simplicity and ensured success. You can complete many more {product-title} configuration tasks after an installation completes. -==== - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_aws/installing-aws-account.adoc#installing-aws-account[configured an AWS account] to host the cluster. -+ -[IMPORTANT] -==== -If you have an AWS profile stored on your computer, it must not use a temporary session token that you generated while using a multi-factor authentication device. The cluster continues to use your current AWS credentials to create AWS resources for the entire life of the cluster, so you must use long-lived credentials. To generate appropriate keys, see link:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html[Managing Access Keys for IAM Users] in the AWS documentation. You can supply the keys when you run the installation program. -==== -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_aws/manually-creating-iam.adoc#manually-creating-iam-aws[manually create and maintain IAM credentials]. - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-aws-marketplace-subscribe.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-initializing.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] - -include::modules/installation-aws-tested-machine-types.adoc[leveloffset=+2] - -include::modules/installation-aws-arm-tested-machine-types.adoc[leveloffset=+2] - -include::modules/installation-aws-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/logging-in-by-using-the-web-console.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console. - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service. - -== Next steps - -* xref:../../installing/validating-an-installation.adoc#validating-an-installation[Validating an installation]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* If necessary, you can xref:../../authentication/managing_cloud_provider_credentials/cco-mode-mint.adoc#manually-removing-cloud-creds_cco-mode-mint[remove cloud provider credentials]. diff --git a/installing/installing_aws/installing-aws-default.adoc b/installing/installing_aws/installing-aws-default.adoc deleted file mode 100644 index 30daa9bf5e18..000000000000 --- a/installing/installing_aws/installing-aws-default.adoc +++ /dev/null @@ -1,61 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-aws-default"] -= Installing a cluster quickly on AWS -include::_attributes/common-attributes.adoc[] -:context: installing-aws-default - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on -Amazon Web Services (AWS) that uses the default configuration options. - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_aws/installing-aws-account.adoc#installing-aws-account[configured an AWS account] to host the cluster. -+ -[IMPORTANT] -==== -If you have an AWS profile stored on your computer, it must not use a temporary session token that you generated while using a multi-factor authentication device. The cluster continues to use your current AWS credentials to create AWS resources for the entire life of the cluster, so you must use key-based, long-lived credentials. To generate appropriate keys, see link:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html[Managing Access Keys for IAM Users] in the AWS documentation. You can supply the keys when you run the installation program. -==== -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_aws/manually-creating-iam.adoc#manually-creating-iam-aws[manually create and maintain IAM credentials]. Manual mode can also be used in environments where the cloud IAM APIs are not reachable. - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See link:https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html[Configuration and credential file settings] in the AWS documentation for more information about AWS profile and credential configuration. - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/logging-in-by-using-the-web-console.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console. - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../installing/validating-an-installation.adoc#validating-an-installation[Validating an installation]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* If necessary, you can xref:../../authentication/managing_cloud_provider_credentials/cco-mode-mint.adoc#manually-removing-cloud-creds_cco-mode-mint[remove cloud provider credentials]. diff --git a/installing/installing_aws/installing-aws-expanding-a-cluster-with-on-premise-bare-metal-nodes.adoc b/installing/installing_aws/installing-aws-expanding-a-cluster-with-on-premise-bare-metal-nodes.adoc deleted file mode 100644 index eb3cea8854fd..000000000000 --- a/installing/installing_aws/installing-aws-expanding-a-cluster-with-on-premise-bare-metal-nodes.adoc +++ /dev/null @@ -1,24 +0,0 @@ -:_content-type: ASSEMBLY -[id="expanding-a-cluster-with-on-premise-bare-metal-nodes"] -= Expanding a cluster with on-premise bare metal nodes -include::_attributes/common-attributes.adoc[] -:context: expanding-a-cluster-with-on-premise-bare-metal-nodes - -toc::[] - -You can expand an {product-title} cluster deployed on AWS by adding bare-metal nodes to the cluster. By default, a cluster deployed on AWS with {product-title} 4.11 or earlier has the Baremetal Operator (BMO) disabled. In {product-title} 4.12 and later releases, the BMO is enabled to support a hybrid cloud consisting of AWS control plane nodes and worker nodes with additional on-premise bare-metal worker nodes. - -Expanding an {product-title} cluster deployed on AWS requires using virtual media with bare-metal nodes that meet the xref:../installing_bare_metal_ipi/ipi-install-prerequisites.adoc#node-requirements_ipi-install-prerequisites[node requirements] and xref:../installing_bare_metal_ipi/ipi-install-prerequisites.adoc#ipi-install-firmware-requirements-for-installing-with-virtual-media_ipi-install-prerequisites[firmware requirements] for installing with virtual media. A `provisioning` network is not required, and if present, should be xref:../installing_bare_metal_ipi/ipi-install-installation-workflow.adoc#modifying-install-config-for-no-provisioning-network_ipi-install-installation-workflow[disabled]. - -include::modules/installation-aws_con_connecting-the-vpc-to-the-on-premise-network.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources-aws-vpc"] -.Additional resources - -* link:https://docs.aws.amazon.com/vpc/?icmpid=docs_homepage_featuredsvcs[Amazon VPC] -* link:https://docs.aws.amazon.com/vpc/latest/peering/what-is-vpc-peering.html[VPC peering] - -include::modules/installation-aws_proc_creating-firewall-rules-for-port-6183.adoc[leveloffset=+1] - -After you have the networking configured, you can proceed with xref:../installing_bare_metal_ipi/ipi-install-expanding-the-cluster.adoc#ipi-install-expanding-the-cluster[expanding the cluster]. \ No newline at end of file diff --git a/installing/installing_aws/installing-aws-government-region.adoc b/installing/installing_aws/installing-aws-government-region.adoc deleted file mode 100644 index fe35c5e19120..000000000000 --- a/installing/installing_aws/installing-aws-government-region.adoc +++ /dev/null @@ -1,79 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-aws-government-region"] -= Installing a cluster on AWS into a government region -include::_attributes/common-attributes.adoc[] -:context: installing-aws-government-region - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on -Amazon Web Services (AWS) into a government region. To configure the -region, modify parameters in the `install-config.yaml` file before you -install the cluster. - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_aws/installing-aws-account.adoc#installing-aws-account[configured an AWS account] to host the cluster. -+ -[IMPORTANT] -==== -If you have an AWS profile stored on your computer, it must not use a temporary session token that you generated while using a multi-factor authentication device. The cluster continues to use your current AWS credentials to create AWS resources for the entire life of the cluster, so you must use long-lived credentials. To generate appropriate keys, see link:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html[Managing Access Keys for IAM Users] in the AWS documentation. You can supply the keys when you run the installation program. -==== -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_aws/manually-creating-iam.adoc#manually-creating-iam-aws[manually create and maintain IAM credentials]. - -include::modules/installation-aws-about-government-region.adoc[leveloffset=+1] - -include::modules/installation-prereq-aws-private-cluster.adoc[leveloffset=+1] - -include::modules/private-clusters-default.adoc[leveloffset=+1] -include::modules/private-clusters-about-aws.adoc[leveloffset=+2] - -include::modules/installation-custom-aws-vpc.adoc[leveloffset=+1] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-aws-marketplace-subscribe.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-initializing-manual.adoc[leveloffset=+1] -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] -include::modules/installation-aws-tested-machine-types.adoc[leveloffset=+2] -include::modules/installation-aws-arm-tested-machine-types.adoc[leveloffset=+2] -include::modules/installation-aws-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/logging-in-by-using-the-web-console.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console. - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service. - -== Next steps - -* xref:../../installing/validating-an-installation.adoc#validating-an-installation[Validating an installation]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* If necessary, you can xref:../../authentication/managing_cloud_provider_credentials/cco-mode-mint.adoc#manually-removing-cloud-creds_cco-mode-mint[remove cloud provider credentials]. diff --git a/installing/installing_aws/installing-aws-localzone.adoc b/installing/installing_aws/installing-aws-localzone.adoc deleted file mode 100644 index 2e6120ddaeeb..000000000000 --- a/installing/installing_aws/installing-aws-localzone.adoc +++ /dev/null @@ -1,159 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-aws-localzone"] -= Installing a cluster using AWS Local Zones -include::_attributes/common-attributes.adoc[] -:context: installing-aws-localzone - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on Amazon Web Services (AWS) into an existing VPC, extending workers to the edge of the Cloud Infrastructure using AWS Local Zones. - -After you create an Amazon Web Service (AWS) Local Zone environment, and you deploy your cluster, you can use edge worker nodes to create user workloads in Local Zone subnets. - -AWS Local Zones are a type of infrastructure that place Cloud Resources close to the metropolitan regions. For more information, see the link:https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-local-zones[AWS Local Zones Documentation]. - -{product-title} can be installed in existing VPCs with Local Zone subnets. The Local Zone subnets can be used to extend the regular workers' nodes to the edge networks. The edge worker nodes are dedicated to running user workloads. - -One way to create the VPC and subnets is to use the provided CloudFormation templates. You can modify the templates to customize your infrastructure or use the information that they contain to create AWS objects according to your company's policies. - -[IMPORTANT] -==== -The steps for performing an installer-provisioned infrastructure installation are provided as an example only. Installing a cluster with VPC you provide requires knowledge of the cloud provider and the installation process of {product-title}. The CloudFormation templates are provided to assist in completing these steps or to help model your own. You are also free to create the required resources through other methods; the templates are just an example. -==== - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_aws/installing-aws-account.adoc#installing-aws-account[configured an AWS account] to host the cluster. -+ -[IMPORTANT] -==== -If you have an AWS profile stored on your computer, it must not use a temporary session token that you generated while using a multi-factor authentication device. The cluster continues to use your current AWS credentials to create AWS resources for the entire life of the cluster, so you must use key-based, long-lived credentials. To generate appropriate keys, see link:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html[Managing Access Keys for IAM Users] in the AWS documentation. You can supply the keys when you run the installation program. -==== -* You noted the region and supported link:https://aws.amazon.com/about-aws/global-infrastructure/localzones/locations[AWS Local Zones locations] to create the network resources in. -* You read the link:https://aws.amazon.com/about-aws/global-infrastructure/localzones/features/[Features] for each AWS Local Zones location. -* You downloaded the AWS CLI and installed it on your computer. See link:https://docs.aws.amazon.com/cli/latest/userguide/install-bundle.html[Install the AWS CLI Using the Bundled Installer (Linux, macOS, or UNIX)] in the AWS documentation. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -+ -[NOTE] -==== -Be sure to also review this site list if you are configuring a proxy. -==== -* If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_aws/manually-creating-iam.adoc#manually-creating-iam-aws[manually create and maintain IAM credentials]. -* Add permission for the user who creates the cluster to modify the Local Zone group with `ec2:ModifyAvailabilityZoneGroup`. For example: -+ -.An example of a permissive IAM policy to attach to a user or role -[source,yaml] ----- -{ - "Version": "2012-10-17", - "Statement": [ - { - "Action": [ - "ec2:ModifyAvailabilityZoneGroup" - ], - "Effect": "Allow", - "Resource": "*" - } - ] -} ----- - -include::modules/cluster-limitations-local-zone.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../storage/understanding-persistent-storage.html#pvc-storage-class_understanding-persistent-storage[Storage classes] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/installation-aws-marketplace-subscribe.adoc[leveloffset=+1] - -include::modules/installation-creating-aws-vpc-localzone.adoc[leveloffset=+1] - -include::modules/installation-cloudformation-vpc-localzone.adoc[leveloffset=+2] - -include::modules/installation-aws-add-local-zone-locations.adoc[leveloffset=+1] - -include::modules/installation-creating-aws-subnet-localzone.adoc[leveloffset=+1] - -include::modules/installation-cloudformation-subnet-localzone.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* You can view details about the CloudFormation stacks that you create by navigating to the link:https://console.aws.amazon.com/cloudformation/[AWS CloudFormation console]. - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-user-infra-generate.adoc[leveloffset=+1] -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] -include::modules/installation-aws-tested-machine-types.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* See link:https://aws.amazon.com/about-aws/global-infrastructure/localzones/features/[AWS Local Zones features] in the AWS documentation for more information about AWS Local Zones and the supported instances types and services. - -include::modules/installation-generate-aws-user-infra-install-config.adoc[leveloffset=+2] -// Suggest to standarize edge-pool's specific files with same prefixes, like: machine-edge-pool-[...] or compute-edge-pool-[...] (which is more compatible with install-config.yaml/compute) -include::modules/machines-edge-machine-pool.adoc[leveloffset=+2] -include::modules/edge-machine-pools-aws-local-zones.adoc[leveloffset=+3] - -[role="_additional-resources"] -.Additional resources - -* xref:../../networking/changing-cluster-network-mtu.adoc#mtu-value-selection_changing-cluster-network-mtu[Changing the MTU for the cluster network] -* xref:../../networking/changing-cluster-network-mtu.adoc#nw-ovn-ipsec-enable_configuring-ipsec-ovn[Enabling IPsec encryption] - -include::modules/install-creating-install-config-aws-local-zones.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* See link:https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html[Configuration and credential file settings] in the AWS documentation for more information about AWS profile and credential configuration. - -//include::modules/installation-configure-proxy.adoc[leveloffset=+2] -//Put this back if QE validates it. - -// Verify removal due to automation. -// include::modules/installation-localzone-generate-k8s-manifest.adoc[leveloffset=+2] - - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -.Next steps -* xref:../../post_installation_configuration/cluster-tasks.adoc#installation-extend-edge-nodes-aws-local-zones_post-install-cluster-tasks[Creating user workloads in AWS Local Zones] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/logging-in-by-using-the-web-console.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console. - -include::modules/machine-edge-pool-review-nodes.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service. - -[id="installing-aws-localzone-next-steps"] -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#installation-extend-edge-nodes-aws-local-zones_post-install-cluster-tasks[Creating user workloads in AWS Local Zones]. -* xref:../../installing/validating-an-installation.adoc#validating-an-installation[Validating an installation]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* If necessary, you can xref:../../authentication/managing_cloud_provider_credentials/cco-mode-mint.adoc#manually-removing-cloud-creds_cco-mode-mint[remove cloud provider credentials]. diff --git a/installing/installing_aws/installing-aws-network-customizations.adoc b/installing/installing_aws/installing-aws-network-customizations.adoc deleted file mode 100644 index 9e7874e03cfa..000000000000 --- a/installing/installing_aws/installing-aws-network-customizations.adoc +++ /dev/null @@ -1,100 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-aws-network-customizations"] -= Installing a cluster on AWS with network customizations -include::_attributes/common-attributes.adoc[] -:context: installing-aws-network-customizations - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on -Amazon Web Services (AWS) with customized network configuration options. By -customizing your network configuration, your cluster can coexist with existing -IP address allocations in your environment and integrate with existing MTU and -VXLAN configurations. - -You must set most of the network configuration parameters during installation, -and you can modify only `kubeProxy` configuration parameters in a running -cluster. - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_aws/installing-aws-account.adoc#installing-aws-account[configured an AWS account] to host the cluster. -+ -[IMPORTANT] -==== -If you have an AWS profile stored on your computer, it must not use a temporary session token that you generated while using a multi-factor authentication device. The cluster continues to use your current AWS credentials to create AWS resources for the entire life of the cluster, so you must use key-based, long-lived credentials. To generate appropriate keys, see link:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html[Managing Access Keys for IAM Users] in the AWS documentation. You can supply the keys when you run the installation program. -==== -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_aws/manually-creating-iam.adoc#manually-creating-iam-aws[manually create and maintain IAM credentials]. -// TODO -// Concept that describes networking - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/nw-network-config.adoc[leveloffset=+1] - -include::modules/installation-initializing.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] - -include::modules/installation-aws-tested-machine-types.adoc[leveloffset=+2] -include::modules/installation-aws-arm-tested-machine-types.adoc[leveloffset=+2] - -include::modules/installation-aws-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -// Network Operator specific configuration -include::modules/nw-operator-cr.adoc[leveloffset=+1] -include::modules/nw-modifying-operator-install-config.adoc[leveloffset=+1] - - -[NOTE] -==== -For more information on using a Network Load Balancer (NLB) on AWS, see xref:../../networking/configuring_ingress_cluster_traffic/configuring-ingress-cluster-traffic-aws.adoc#configuring-ingress-cluster-traffic-aws-network-load-balancer[Configuring Ingress cluster traffic on AWS using a Network Load Balancer]. -==== - -include::modules/nw-aws-nlb-new-cluster.adoc[leveloffset=+1] - -include::modules/configuring-hybrid-ovnkubernetes.adoc[leveloffset=+1] - - -[NOTE] -==== -For more information on using Linux and Windows nodes in the same cluster, see xref:../../windows_containers/understanding-windows-container-workloads.adoc#understanding-windows-container-workloads[Understanding Windows container workloads]. -==== - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/logging-in-by-using-the-web-console.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console. - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service. - -== Next steps - -* xref:../../installing/validating-an-installation.adoc#validating-an-installation[Validating an installation]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* If necessary, you can xref:../../authentication/managing_cloud_provider_credentials/cco-mode-mint.adoc#manually-removing-cloud-creds_cco-mode-mint[remove cloud provider credentials]. diff --git a/installing/installing_aws/installing-aws-outposts-remote-workers.adoc b/installing/installing_aws/installing-aws-outposts-remote-workers.adoc deleted file mode 100644 index 9655931e0278..000000000000 --- a/installing/installing_aws/installing-aws-outposts-remote-workers.adoc +++ /dev/null @@ -1,102 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-aws-outposts-remote-workers"] -= Installing a cluster on AWS with remote workers on AWS Outposts -include::_attributes/common-attributes.adoc[] -:context: installing-aws-outposts-remote-workers - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on -Amazon Web Services (AWS) with remote workers running in AWS Outposts. -This can be achieved by customizing the default AWS installation and performing some manual steps. - -For more info about AWS Outposts see link:https://docs.aws.amazon.com/outposts/index.html[AWS Outposts Documentation]. - -[IMPORTANT] -==== -In order to install a cluster with remote workers in AWS Outposts, all worker instances must be located within the same Outpost instance and cannot be located in an AWS region. It is not possible for the cluster to have instances in both AWS Outposts and AWS region. In addition, it also follows that control plane nodes mustn't be schedulable. -==== - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_aws/installing-aws-account.adoc#installing-aws-account[configured an AWS account] to host the cluster. -* You are familiar with the instance types are supported in the AWS Outpost instance you use. This can be validated with link:https://docs.aws.amazon.com/cli/latest/reference/outposts/get-outpost-instance-types.html[get-outpost-instance-types AWS CLI command] -* You are familiar with the AWS Outpost instance details, such as OutpostArn and AvailabilityZone. This can be validated with link:https://docs.aws.amazon.com/cli/latest/reference/outposts/list-outposts.html[list-outposts AWS CLI command] -+ -[IMPORTANT] -==== -Since the cluster uses the provided AWS credentials to create AWS resources for its entire life cycle, the credentials must be key-based and long-lived. So, If you have an AWS profile stored on your computer, it must not use a temporary session token, generated while using a multi-factor authentication device. For more information about generating the appropriate keys, see link:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html[Managing Access Keys for IAM Users] in the AWS documentation. You may supply the keys when you run the installation program. -==== -* You have access to an existing Amazon Virtual Private Cloud (VPC) in Amazon Web Services (AWS). See the section "About using a custom VPC" for more information. -* If a firewall is used, it was xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured to allow the sites] that your cluster requires access to. -* If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_aws/manually-creating-iam.adoc#manually-creating-iam-aws[manually create and maintain IAM credentials]. - -include::modules/installation-custom-aws-vpc.adoc[leveloffset=+1] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+1] - -include::modules/installation-identify-supported-aws-outposts-instance-types.adoc[leveloffset=+1] - -include::modules/installation-initializing.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-aws-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-aws-editing-manifests.adoc[leveloffset=+1] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/logging-in-by-using-the-web-console.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console. -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service. - -== Cluster Limitations - -[IMPORTANT] -==== -Network Load Balancer (NLB) and Classic Load Balancer are not supported on AWS Outposts. After the cluster is created, all the Load Balancers are created in the AWS region. In order to use Load Balancers created inside the Outpost instances, Application Load Balancer should be used. The AWS Load Balancer Operator can be used in order to achieve that goal. - -If you want to use a public subnet located in the outpost instance for the ALB, you need to remove the special tag (`kubernetes.io/cluster/.*-outposts: owned`) that was added earlier during the VPC creation. This will prevent you from creating new Services of type LoadBalancer (Network Load Balancer). - -See xref:../../networking/aws_load_balancer_operator/understanding-aws-load-balancer-operator.adoc[Understanding the AWS Load Balancer Operator] for more information -==== - -[IMPORTANT] -==== -Persistent storage using AWS Elastic Block Store limitations - -* AWS Outposts does not support Amazon Elastic Block Store (EBS) gp3 volumes. After installation, the cluster includes two storage classes - gp3-csi and gp2-csi, with gp3-csi being the default storage class. It is important to always use gp2-csi. You can change the default storage class using the following OpenShift CLI (oc) commands: -+ -[source,terminal] ----- -$ oc annotate --overwrite storageclass gp3-csi storageclass.kubernetes.io/is-default-class=false -$ oc annotate --overwrite storageclass gp2-csi storageclass.kubernetes.io/is-default-class=true ----- -* To create a Volume in the Outpost instance, the CSI driver determines the Outpost ARN based on the topology keys stored on the CSINode objects. To ensure that the CSI driver uses the correct topology values, it is necessary to use the `WaitForConsumer` volume binding mode and avoid setting allowed topologies on any new storage class created. -==== - -== Next steps - -* xref:../../installing/validating-an-installation.adoc#validating-an-installation[Validating an installation]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* If necessary, you can xref:../../authentication/managing_cloud_provider_credentials/cco-mode-mint.adoc#manually-removing-cloud-creds_cco-mode-mint[remove cloud provider credentials]. diff --git a/installing/installing_aws/installing-aws-private.adoc b/installing/installing_aws/installing-aws-private.adoc deleted file mode 100644 index 5dfe06670e8f..000000000000 --- a/installing/installing_aws/installing-aws-private.adoc +++ /dev/null @@ -1,75 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-aws-private"] -= Installing a private cluster on AWS -include::_attributes/common-attributes.adoc[] -:context: installing-aws-private - -toc::[] - -In {product-title} version {product-version}, you can install a private cluster into an existing VPC on Amazon Web Services (AWS). The installation program provisions the rest of the required infrastructure, which you can further customize. To customize the installation, you modify -parameters in the `install-config.yaml` file before you install the cluster. - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_aws/installing-aws-account.adoc#installing-aws-account[configured an AWS account] to host the cluster. -+ -[IMPORTANT] -==== -If you have an AWS profile stored on your computer, it must not use a temporary session token that you generated while using a multi-factor authentication device. The cluster continues to use your current AWS credentials to create AWS resources for the entire life of the cluster, so you must use long-lived credentials. To generate appropriate keys, see link:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html[Managing Access Keys for IAM Users] in the AWS documentation. You can supply the keys when you run the installation program. -==== -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_aws/manually-creating-iam.adoc#manually-creating-iam-aws[manually create and maintain IAM credentials]. - -include::modules/private-clusters-default.adoc[leveloffset=+1] - -include::modules/private-clusters-about-aws.adoc[leveloffset=+2] - -include::modules/installation-custom-aws-vpc.adoc[leveloffset=+1] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-initializing-manual.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] - -include::modules/installation-aws-tested-machine-types.adoc[leveloffset=+2] -include::modules/installation-aws-arm-tested-machine-types.adoc[leveloffset=+2] - -include::modules/installation-aws-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/logging-in-by-using-the-web-console.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console. - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service. - -== Next steps - -* xref:../../installing/validating-an-installation.adoc#validating-an-installation[Validating an installation]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* If necessary, you can xref:../../authentication/managing_cloud_provider_credentials/cco-mode-mint.adoc#manually-removing-cloud-creds_cco-mode-mint[remove cloud provider credentials]. diff --git a/installing/installing_aws/installing-aws-secret-region.adoc b/installing/installing_aws/installing-aws-secret-region.adoc deleted file mode 100644 index 0247f5f4f9bb..000000000000 --- a/installing/installing_aws/installing-aws-secret-region.adoc +++ /dev/null @@ -1,78 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-aws-secret-region"] -= Installing a cluster on AWS into a Secret or Top Secret Region -include::_attributes/common-attributes.adoc[] -:context: installing-aws-secret-region - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on Amazon Web Services (AWS) into the following secret regions: - -* Secret Commercial Cloud Services (SC2S) -* Commercial Cloud Services (C2S) - -To configure a cluster in either region, you change parameters in the `install config.yaml` file before you install the cluster. - -[id="prerequisites_installing-aws-secret-region"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_aws/installing-aws-account.adoc#installing-aws-account[configured an AWS account] to host the cluster. -+ -[IMPORTANT] -==== -If you have an AWS profile stored on your computer, it must not use a temporary session token that you generated while using a multifactor authentication device. The cluster continues to use your current AWS credentials to create AWS resources for the entire life of the cluster, so you must use long-lived credentials. To generate appropriate keys, see link:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html[Managing Access Keys for IAM Users] in the AWS documentation. You can supply the keys when you run the installation program. -==== -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_aws/manually-creating-iam.adoc#manually-creating-iam-aws[manually create and maintain IAM credentials]. - -include::modules/installation-aws-about-government-region.adoc[leveloffset=+1] - -include::modules/installation-aws-regions-with-no-ami.adoc[leveloffset=+1] - -include::modules/private-clusters-default.adoc[leveloffset=+1] -include::modules/private-clusters-about-aws.adoc[leveloffset=+2] - -include::modules/installation-custom-aws-vpc.adoc[leveloffset=+1] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/installation-aws-upload-custom-rhcos-ami.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-initializing-manual.adoc[leveloffset=+1] -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] -include::modules/installation-supported-aws-machine-types.adoc[leveloffset=+2] -include::modules/installation-aws-config-yaml.adoc[leveloffset=+2] -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/logging-in-by-using-the-web-console.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_installing-aws-secret-region_console"] -.Additional resources -* xref:../../web_console/web-console.adoc#web-console[Accessing the web console] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_installing-aws-secret-region_telemetry"] -.Additional resources -* xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] - -[id="next-steps_installing-aws-secret-region"] -== Next steps -* xref:../../installing/validating-an-installation.adoc#validating-an-installation[Validating an installation]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* If necessary, you can xref:../../authentication/managing_cloud_provider_credentials/cco-mode-mint.adoc#manually-removing-cloud-creds_cco-mode-mint[remove cloud provider credentials]. diff --git a/installing/installing_aws/installing-aws-three-node.adoc b/installing/installing_aws/installing-aws-three-node.adoc deleted file mode 100644 index 4bc6d680896c..000000000000 --- a/installing/installing_aws/installing-aws-three-node.adoc +++ /dev/null @@ -1,22 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-aws-three-node"] -= Installing a three-node cluster on AWS -include::_attributes/common-attributes.adoc[] -:context: installing-aws-three-node - -toc::[] - -In {product-title} version {product-version}, you can install a three-node cluster on Amazon Web Services (AWS). A three-node cluster consists of three control plane machines, which also act as compute machines. This type of cluster provides a smaller, more resource efficient cluster, for cluster administrators and developers to use for testing, development, and production. - -You can install a three-node cluster using either installer-provisioned or user-provisioned infrastructure. - -[NOTE] -==== -Deploying a three-node cluster using an AWS Marketplace image is not supported. -==== - -include::modules/installation-three-node-cluster-cloud-provider.adoc[leveloffset=+1] - -== Next steps -* xref:../../installing/installing_aws/installing-aws-customizations.adoc#installing-aws-customizations[Installing a cluster on AWS with customizations] -* xref:../../installing/installing_aws/installing-aws-user-infra.adoc#installing-aws-user-infra[Installing a cluster on user-provisioned infrastructure in AWS by using CloudFormation templates] diff --git a/installing/installing_aws/installing-aws-user-infra.adoc b/installing/installing_aws/installing-aws-user-infra.adoc deleted file mode 100644 index 3969e2298acf..000000000000 --- a/installing/installing_aws/installing-aws-user-infra.adoc +++ /dev/null @@ -1,222 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-aws-user-infra"] -= Installing a cluster on user-provisioned infrastructure in AWS by using CloudFormation templates -include::_attributes/common-attributes.adoc[] -:context: installing-aws-user-infra -:platform: AWS - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on Amazon Web Services (AWS) that uses infrastructure that you provide. - -One way to create this infrastructure is to use the provided CloudFormation templates. You can modify the templates to customize your infrastructure or use the information that they contain to create AWS objects according to your company's policies. - -[IMPORTANT] -==== -The steps for performing a user-provisioned infrastructure installation are provided as an example only. Installing a cluster with infrastructure you provide requires knowledge of the cloud provider and the installation process of {product-title}. Several CloudFormation templates are provided to assist in completing these steps or to help model your own. You are also free to create the required resources through other methods; the templates are just an example. -==== - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_aws/installing-aws-account.adoc#installing-aws-account[configured an AWS account] to host the cluster. -+ -[IMPORTANT] -==== -If you have an AWS profile stored on your computer, it must not use a temporary session token that you generated while using a multi-factor authentication device. The cluster continues to use your current AWS credentials to create AWS resources for the entire life of the cluster, so you must use key-based, long-lived credentials. To generate appropriate keys, see link:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html[Managing Access Keys for IAM Users] in the AWS documentation. You can supply the keys when you run the installation program. -==== -* You downloaded the AWS CLI and installed it on your computer. See link:https://docs.aws.amazon.com/cli/latest/userguide/install-bundle.html[Install the AWS CLI Using the Bundled Installer (Linux, macOS, or UNIX)] in the AWS documentation. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -+ -[NOTE] -==== -Be sure to also review this site list if you are configuring a proxy. -==== -* If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_aws/manually-creating-iam.adoc#manually-creating-iam-aws[manually create and maintain IAM credentials]. - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -[id="installation-requirements-user-infra_{context}"] -== Requirements for a cluster with user-provisioned infrastructure - -For a cluster that contains user-provisioned infrastructure, you must deploy all -of the required machines. - -This section describes the requirements for deploying {product-title} on user-provisioned infrastructure. - -include::modules/installation-machine-requirements.adoc[leveloffset=+2] -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] -include::modules/installation-aws-tested-machine-types.adoc[leveloffset=+2] -include::modules/installation-aws-arm-tested-machine-types.adoc[leveloffset=+2] -include::modules/csr-management.adoc[leveloffset=+2] - -include::modules/installation-supported-aws-machine-types.adoc[leveloffset=+2] - -include::modules/installation-aws-user-infra-requirements.adoc[leveloffset=+1] - -include::modules/installation-aws-permissions.adoc[leveloffset=+2] - -include::modules/installation-aws-marketplace-subscribe.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-user-infra-generate.adoc[leveloffset=+1] - -include::modules/installation-disk-partitioning-upi-templates.adoc[leveloffset=+2] - -include::modules/installation-generate-aws-user-infra-install-config.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* See link:https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html[Configuration and credential file settings] in the AWS documentation for more information about AWS profile and credential configuration. - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -//include::modules/installation-three-node-cluster.adoc[leveloffset=+2] - -include::modules/installation-user-infra-generate-k8s-manifest-ignition.adoc[leveloffset=+2] - -include::modules/installation-extracting-infraid.adoc[leveloffset=+1] - -include::modules/installation-creating-aws-vpc.adoc[leveloffset=+1] - -include::modules/installation-cloudformation-vpc.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* You can view details about the CloudFormation stacks that you create by navigating to the link:https://console.aws.amazon.com/cloudformation/[AWS CloudFormation console]. - -include::modules/installation-creating-aws-dns.adoc[leveloffset=+1] - -include::modules/installation-cloudformation-dns.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* You can view details about the CloudFormation stacks that you create by navigating to the link:https://console.aws.amazon.com/cloudformation/[AWS CloudFormation console]. - -* You can view details about your hosted zones by navigating to the link:https://console.aws.amazon.com/route53/[AWS Route 53 console]. - -* See link:https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ListInfoOnHostedZone.html[Listing public hosted zones] in the AWS documentation for more information about listing public hosted zones. - -include::modules/installation-creating-aws-security.adoc[leveloffset=+1] - -include::modules/installation-cloudformation-security.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* You can view details about the CloudFormation stacks that you create by navigating to the link:https://console.aws.amazon.com/cloudformation/[AWS CloudFormation console]. - -include::modules/installation-aws-ami-stream-metadata.adoc[leveloffset=+1] - -include::modules/installation-aws-user-infra-rhcos-ami.adoc[leveloffset=+1] - -include::modules/installation-aws-regions-with-no-ami.adoc[leveloffset=+2] - -include::modules/installation-aws-upload-custom-rhcos-ami.adoc[leveloffset=+2] - -include::modules/installation-creating-aws-bootstrap.adoc[leveloffset=+1] - -include::modules/installation-cloudformation-bootstrap.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* You can view details about the CloudFormation stacks that you create by navigating to the link:https://console.aws.amazon.com/cloudformation/[AWS CloudFormation console]. - -* See xref:../../installing/installing_aws/installing-aws-user-infra.adoc#installation-aws-user-infra-rhcos-ami_installing-aws-user-infra[{op-system} AMIs for the AWS infrastructure] for details about the {op-system-first} AMIs for the AWS zones. - -include::modules/installation-creating-aws-control-plane.adoc[leveloffset=+1] - -include::modules/installation-cloudformation-control-plane.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* You can view details about the CloudFormation stacks that you create by navigating to the link:https://console.aws.amazon.com/cloudformation/[AWS CloudFormation console]. - -include::modules/installation-creating-aws-worker.adoc[leveloffset=+1] - -//// -[id="installing-workers-aws-user-infra"] -== Creating worker nodes - -You can either manually create worker nodes or use a MachineSet to create worker -nodes after the cluster deploys. If you use a MachineSet to create and maintain -the workers, you can allow the cluster to manage them. This allows you to easily -scale, manage, and upgrade your workers. -//// - -include::modules/installation-cloudformation-worker.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* You can view details about the CloudFormation stacks that you create by navigating to the link:https://console.aws.amazon.com/cloudformation/[AWS CloudFormation console]. - -include::modules/installation-aws-user-infra-bootstrap.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/troubleshooting/troubleshooting-installations.html#monitoring-installation-progress_troubleshooting-installations[Monitoring installation progress] for details about monitoring the installation, bootstrap, and control plane logs as an {product-title} installation progresses. - -* See xref:../../support/troubleshooting/troubleshooting-installations.adoc#gathering-bootstrap-diagnostic-data_troubleshooting-installations[Gathering bootstrap node diagnostic data] for information about troubleshooting issues related to the bootstrap process. - -* You can view details about the running instances that are created by using the link:https://console.aws.amazon.com/ec2[AWS EC2 console]. - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/installation-approve-csrs.adoc[leveloffset=+1] - -include::modules/installation-operators-config.adoc[leveloffset=+1] - -include::modules/installation-registry-storage-config.adoc[leveloffset=+2] - -You can configure registry storage for user-provisioned infrastructure in AWS to deploy {product-title} to hidden regions. See xref:../../registry/configuring_registry_storage/configuring-registry-storage-aws-user-infrastructure.adoc#configuring-registry-storage-aws-user-infrastructure[Configuring the registry for AWS user-provisioned infrastructure] for more information. - -include::modules/registry-configuring-storage-aws-user-infra.adoc[leveloffset=+3] - -include::modules/installation-registry-storage-non-production.adoc[leveloffset=+3] - -include::modules/installation-aws-user-infra-delete-bootstrap.adoc[leveloffset=+1] - -include::modules/installation-create-ingress-dns-records.adoc[leveloffset=+1] - -include::modules/installation-aws-user-infra-installation.adoc[leveloffset=+1] - -include::modules/logging-in-by-using-the-web-console.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console. - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service. - -[role="_additional-resources"] -[id="installing-aws-user-infra-additional-resources"] -== Additional resources - -* See link:https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/stacks.html[Working with stacks] in the AWS documentation for more information about AWS CloudFormation stacks. - -[id="installing-aws-user-infra-next-steps"] -== Next steps - -* xref:../../installing/validating-an-installation.adoc#validating-an-installation[Validating an installation]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* If necessary, you can xref:../../authentication/managing_cloud_provider_credentials/cco-mode-mint.adoc#manually-removing-cloud-creds_cco-mode-mint[remove cloud provider credentials]. diff --git a/installing/installing_aws/installing-aws-vpc.adoc b/installing/installing_aws/installing-aws-vpc.adoc deleted file mode 100644 index 53f1a4e4259f..000000000000 --- a/installing/installing_aws/installing-aws-vpc.adoc +++ /dev/null @@ -1,71 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-aws-vpc"] -= Installing a cluster on AWS into an existing VPC -include::_attributes/common-attributes.adoc[] -:context: installing-aws-vpc - -toc::[] - -In {product-title} version {product-version}, you can install a cluster into an existing Amazon Virtual Private Cloud (VPC) on Amazon Web Services (AWS). The installation program provisions the rest of the required infrastructure, which you can further customize. To customize the installation, you modify -parameters in the `install-config.yaml` file before you install the cluster. - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_aws/installing-aws-account.adoc#installing-aws-account[configured an AWS account] to host the cluster. -+ -[IMPORTANT] -==== -If you have an AWS profile stored on your computer, it must not use a temporary session token that you generated while using a multi-factor authentication device. The cluster continues to use your current AWS credentials to create AWS resources for the entire life of the cluster, so you must use long-lived credentials. To generate appropriate keys, see link:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html[Managing Access Keys for IAM Users] in the AWS documentation. You can supply the keys when you run the installation program. -==== -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_aws/manually-creating-iam.adoc#manually-creating-iam-aws[manually create and maintain IAM credentials]. - -include::modules/installation-custom-aws-vpc.adoc[leveloffset=+1] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-initializing.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] - -include::modules/installation-aws-tested-machine-types.adoc[leveloffset=+2] -include::modules/installation-aws-arm-tested-machine-types.adoc[leveloffset=+2] - -include::modules/installation-aws-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/logging-in-by-using-the-web-console.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console. - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service. - -== Next steps - -* xref:../../installing/validating-an-installation.adoc#validating-an-installation[Validating an installation]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* If necessary, you can xref:../../authentication/managing_cloud_provider_credentials/cco-mode-mint.adoc#manually-removing-cloud-creds_cco-mode-mint[remove cloud provider credentials]. diff --git a/installing/installing_aws/installing-restricted-networks-aws-installer-provisioned.adoc b/installing/installing_aws/installing-restricted-networks-aws-installer-provisioned.adoc deleted file mode 100644 index 5aea22557e62..000000000000 --- a/installing/installing_aws/installing-restricted-networks-aws-installer-provisioned.adoc +++ /dev/null @@ -1,81 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-restricted-networks-aws-installer-provisioned"] -= Installing a cluster on AWS in a restricted network -include::_attributes/common-attributes.adoc[] -:context: installing-restricted-networks-aws-installer-provisioned - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on Amazon Web Services (AWS) in a restricted network by creating an internal mirror of the installation release content on an existing Amazon Virtual Private Cloud (VPC). - -[id="prerequisites_installing-restricted-networks-aws-installer-provisioned"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/disconnected_install/installing-mirroring-installation-images.adoc#installation-about-mirror-registry_installing-mirroring-installation-images[mirrored the images for a disconnected installation] to your registry and obtained the `imageContentSources` data for your version of {product-title}. -+ -[IMPORTANT] -==== -Because the installation media is on the mirror host, you can use that computer to complete all installation steps. -==== -* You have an existing VPC in AWS. When installing to a restricted network using installer-provisioned infrastructure, you cannot use the installer-provisioned VPC. You must use a user-provisioned VPC that satisfies one of the following requirements: -** Contains the mirror registry -** Has firewall rules or a peering connection to access the mirror registry hosted elsewhere -* You xref:../../installing/installing_aws/installing-aws-account.adoc#installing-aws-account[configured an AWS account] to host the cluster. -+ -[IMPORTANT] -==== -If you have an AWS profile stored on your computer, it must not use a temporary session token that you generated while using a multi-factor authentication device. The cluster continues to use your current AWS credentials to create AWS resources for the entire life of the cluster, so you must use key-based, long-lived credentials. To generate appropriate keys, see link:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html[Managing Access Keys for IAM Users] in the AWS documentation. You can supply the keys when you run the installation program. -==== -* You downloaded the AWS CLI and installed it on your computer. See link:https://docs.aws.amazon.com/cli/latest/userguide/install-bundle.html[Install the AWS CLI Using the Bundled Installer (Linux, macOS, or Unix)] in the AWS documentation. -* If you use a firewall and plan to use the Telemetry service, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured the firewall to allow the sites] that your cluster requires access to. -+ -[NOTE] -==== -If you are configuring a proxy, be sure to also review this site list. -==== -* If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_aws/manually-creating-iam.adoc#manually-creating-iam-aws[manually create and maintain IAM credentials]. - -include::modules/installation-about-restricted-network.adoc[leveloffset=+1] - -include::modules/installation-custom-aws-vpc.adoc[leveloffset=+1] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-initializing.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] - -include::modules/installation-aws-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/olm-restricted-networks-configuring-operatorhub.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -[id="next-steps_installing-restricted-networks-aws-installer-provisioned"] -== Next steps - -* xref:../../installing/validating-an-installation.adoc#validating-an-installation[Validate an installation]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#post-install-must-gather-disconnected[Configure image streams] for the Cluster Samples Operator and the `must-gather` tool. -* Learn how to xref:../../operators/admin/olm-restricted-networks.adoc#olm-restricted-networks[use Operator Lifecycle Manager (OLM) on restricted networks]. -* If the mirror registry that you used to install your cluster has a trusted CA, add it to the cluster by xref:../../openshift_images/image-configuration.adoc#images-configuration-cas_image-configuration[configuring additional trust stores]. -* If necessary, you can xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. diff --git a/installing/installing_aws/installing-restricted-networks-aws.adoc b/installing/installing_aws/installing-restricted-networks-aws.adoc deleted file mode 100644 index 79133000e7fd..000000000000 --- a/installing/installing_aws/installing-restricted-networks-aws.adoc +++ /dev/null @@ -1,206 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-restricted-networks-aws"] -= Installing a cluster on AWS in a restricted network with user-provisioned infrastructure -include::_attributes/common-attributes.adoc[] -:context: installing-restricted-networks-aws - -toc::[] - -In {product-title} version {product-version}, you can install a -cluster on Amazon Web Services (AWS) using infrastructure that you provide and -an internal mirror of the installation release content. - -[IMPORTANT] -==== -While you can install an {product-title} cluster by using mirrored installation -release content, your cluster still requires internet access to use the AWS APIs. -==== - -One way to create this infrastructure is to use the provided -CloudFormation templates. You can modify the templates to customize your -infrastructure or use the information that they contain to create AWS objects -according to your company's policies. - -[IMPORTANT] -==== -The steps for performing a user-provisioned infrastructure installation are provided as an example only. Installing a cluster with infrastructure you provide requires knowledge of the cloud provider and the installation process of {product-title}. Several CloudFormation templates are provided to assist in completing these steps or to help model your own. You are also free to create the required resources through other methods; the templates are just an example. -==== - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/disconnected_install/installing-mirroring-installation-images.adoc#installing-mirroring-installation-images[created a mirror registry on your mirror host] and obtained the `imageContentSources` data for your version of {product-title}. -+ -[IMPORTANT] -==== -Because the installation media is on the mirror host, you can use that computer to complete all installation steps. -==== -* You xref:../../installing/installing_aws/installing-aws-account.adoc#installing-aws-account[configured an AWS account] to host the cluster. -+ -[IMPORTANT] -==== -If you have an AWS profile stored on your computer, it must not use a temporary session token that you generated while using a multi-factor authentication device. The cluster continues to use your current AWS credentials to create AWS resources for the entire life of the cluster, so you must use key-based, long-lived credentials. To generate appropriate keys, see link:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html[Managing Access Keys for IAM Users] in the AWS documentation. You can supply the keys when you run the installation program. -==== -* You downloaded the AWS CLI and installed it on your computer. See link:https://docs.aws.amazon.com/cli/latest/userguide/install-bundle.html[Install the AWS CLI Using the Bundled Installer (Linux, macOS, or Unix)] in the AWS documentation. -* If you use a firewall and plan to use the Telemetry service, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured the firewall to allow the sites] that your cluster requires access to. -+ -[NOTE] -==== -Be sure to also review this site list if you are configuring a proxy. -==== -* If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_aws/manually-creating-iam.adoc#manually-creating-iam-aws[manually create and maintain IAM credentials]. - -include::modules/installation-about-restricted-network.adoc[leveloffset=+1] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -[id="installation-requirements-user-infra_{context}"] -== Requirements for a cluster with user-provisioned infrastructure - -For a cluster that contains user-provisioned infrastructure, you must deploy all -of the required machines. - -This section describes the requirements for deploying {product-title} on user-provisioned infrastructure. - -include::modules/installation-machine-requirements.adoc[leveloffset=+2] -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] -include::modules/installation-aws-tested-machine-types.adoc[leveloffset=+2] -include::modules/installation-aws-arm-tested-machine-types.adoc[leveloffset=+2] -include::modules/csr-management.adoc[leveloffset=+2] - -include::modules/installation-supported-aws-machine-types.adoc[leveloffset=+2] - -include::modules/installation-aws-user-infra-requirements.adoc[leveloffset=+1] - -include::modules/installation-aws-permissions.adoc[leveloffset=+2] - -//You extract the installation program from the mirrored content. - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-user-infra-generate.adoc[leveloffset=+1] - -include::modules/installation-disk-partitioning-upi-templates.adoc[leveloffset=+2] - -include::modules/installation-generate-aws-user-infra-install-config.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* See link:https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html[Configuration and credential file settings] in the AWS documentation for more information about AWS profile and credential configuration. - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -//include::modules/installation-three-node-cluster.adoc[leveloffset=+2] - -include::modules/installation-user-infra-generate-k8s-manifest-ignition.adoc[leveloffset=+2] - -include::modules/installation-extracting-infraid.adoc[leveloffset=+1] - -include::modules/installation-creating-aws-vpc.adoc[leveloffset=+1] - -include::modules/installation-cloudformation-vpc.adoc[leveloffset=+2] - -include::modules/installation-creating-aws-dns.adoc[leveloffset=+1] - -include::modules/installation-cloudformation-dns.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* See link:https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ListInfoOnHostedZone.html[Listing public hosted zones] in the AWS documentation for more information about listing public hosted zones. - -include::modules/installation-creating-aws-security.adoc[leveloffset=+1] - -include::modules/installation-cloudformation-security.adoc[leveloffset=+2] - -include::modules/installation-aws-ami-stream-metadata.adoc[leveloffset=+1] - -include::modules/installation-aws-user-infra-rhcos-ami.adoc[leveloffset=+1] - -include::modules/installation-creating-aws-bootstrap.adoc[leveloffset=+1] - -include::modules/installation-cloudformation-bootstrap.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../installing/installing_aws/installing-aws-user-infra.adoc#installation-aws-user-infra-rhcos-ami_installing-aws-user-infra[{op-system} AMIs for the AWS infrastructure] for details about the {op-system-first} AMIs for the AWS zones. - -include::modules/installation-creating-aws-control-plane.adoc[leveloffset=+1] - -include::modules/installation-cloudformation-control-plane.adoc[leveloffset=+2] - -include::modules/installation-creating-aws-worker.adoc[leveloffset=+1] - -//// -[id="installing-workers-aws-user-infra"] -== Creating worker nodes - -You can either manually create worker nodes or use a MachineSet to create worker nodes after the cluster deploys. If you use a MachineSet to create and maintain the workers, you can allow the cluster to manage them. This allows you to easily scale, manage, and upgrade your workers. -//// - -include::modules/installation-cloudformation-worker.adoc[leveloffset=+2] - -include::modules/installation-aws-user-infra-bootstrap.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/troubleshooting/troubleshooting-installations.html#monitoring-installation-progress_troubleshooting-installations[Monitoring installation progress] for details about monitoring the installation, bootstrap, and control plane logs as an {product-title} installation progresses. - -* See xref:../../support/troubleshooting/troubleshooting-installations.adoc#gathering-bootstrap-diagnostic-data_troubleshooting-installations[Gathering bootstrap node diagnostic data] for information about troubleshooting issues related to the bootstrap process. - -//You can install the CLI on the mirror host. - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/installation-approve-csrs.adoc[leveloffset=+1] - -include::modules/installation-operators-config.adoc[leveloffset=+1] - -include::modules/olm-restricted-networks-configuring-operatorhub.adoc[leveloffset=+2] - -include::modules/installation-registry-storage-config.adoc[leveloffset=+2] - -include::modules/registry-configuring-storage-aws-user-infra.adoc[leveloffset=+3] - -include::modules/installation-registry-storage-non-production.adoc[leveloffset=+3] - -include::modules/installation-aws-user-infra-delete-bootstrap.adoc[leveloffset=+1] - -include::modules/installation-create-ingress-dns-records.adoc[leveloffset=+1] - -include::modules/installation-aws-user-infra-installation.adoc[leveloffset=+1] - -include::modules/logging-in-by-using-the-web-console.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console. - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -[role="_additional-resources"] -[id="installing-restricted-networks-aws-additional-resources"] -== Additional resources - -* See link:https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/stacks.html[Working with stacks] in the AWS documentation for more information about AWS CloudFormation stacks. - -[id="installing-restricted-networks-aws-next-steps"] -== Next steps - -* xref:../../installing/validating-an-installation.adoc#validating-an-installation[Validate an installation]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#post-install-must-gather-disconnected[Configure image streams] for the Cluster Samples Operator and the `must-gather` tool. -* Learn how to xref:../../operators/admin/olm-restricted-networks.adoc#olm-restricted-networks[use Operator Lifecycle Manager (OLM) on restricted networks]. -* If the mirror registry that you used to install your cluster has a trusted CA, add it to the cluster by xref:../../openshift_images/image-configuration.adoc#images-configuration-cas_image-configuration[configuring additional trust stores]. -* If necessary, you can xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* If necessary, you can xref:../../authentication/managing_cloud_provider_credentials/cco-mode-mint.adoc#manually-removing-cloud-creds_cco-mode-mint[remove cloud provider credentials]. diff --git a/installing/installing_aws/manually-creating-iam.adoc b/installing/installing_aws/manually-creating-iam.adoc deleted file mode 100644 index 11eaf4d626dc..000000000000 --- a/installing/installing_aws/manually-creating-iam.adoc +++ /dev/null @@ -1,43 +0,0 @@ -:_content-type: ASSEMBLY -[id="manually-creating-iam-aws"] -= Manually creating IAM for AWS -include::_attributes/common-attributes.adoc[] -:context: manually-creating-iam-aws - -//TO-DO: this should be one file for AWS, Azure, and GCP with conditions for specifics. - -toc::[] - -In environments where the cloud identity and access management (IAM) APIs are not reachable, or the administrator prefers not to store an administrator-level credential secret in the cluster `kube-system` namespace, you can put the Cloud Credential Operator (CCO) into manual mode before you install the cluster. - -include::modules/alternatives-to-storing-admin-secrets-in-kube-system.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -// AWS only. Condition out if combining topic for AWS/Azure/GCP. -* To learn how to use the CCO utility (`ccoctl`) to configure the CCO to use the AWS STS, see xref:../../authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc#cco-mode-sts[Using manual mode with STS]. - -// Not supported in Azure. Condition out if combining topic for AWS/Azure/GCP. -* To learn how to rotate or remove the administrator-level credential secret after installing {product-title}, see xref:../../post_installation_configuration/cluster-tasks.adoc#post-install-rotate-remove-cloud-creds[Rotating or removing cloud provider credentials]. - -* For a detailed description of all available CCO credential modes and their supported platforms, see xref:../../authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.adoc#about-cloud-credential-operator[About the Cloud Credential Operator]. - -include::modules/manually-create-identity-access-management.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../../updating/preparing-manual-creds-update.adoc#preparing-manual-creds-update[Preparing to update a cluster with manually maintained credentials] - -include::modules/mint-mode.adoc[leveloffset=+1] - -include::modules/mint-mode-with-removal-of-admin-credential.adoc[leveloffset=+1] - -[id="manually-creating-iam-aws-next-steps"] -== Next steps - -* Install an {product-title} cluster: -** xref:../../installing/installing_aws/installing-aws-default.adoc#installing-aws-default[Installing a cluster quickly on AWS] with default options on installer-provisioned infrastructure -** xref:../../installing/installing_aws/installing-aws-customizations.adoc#installing-aws-customizations[Install a cluster with cloud customizations on installer-provisioned infrastructure] -** xref:../../installing/installing_aws/installing-aws-network-customizations.adoc#installing-aws-network-customizations[Install a cluster with network customizations on installer-provisioned infrastructure] -** xref:../../installing/installing_aws/installing-aws-user-infra.adoc#installing-aws-user-infra[Installing a cluster on user-provisioned infrastructure in AWS by using CloudFormation templates] diff --git a/installing/installing_aws/modules b/installing/installing_aws/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/installing/installing_aws/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/installing/installing_aws/preparing-to-install-on-aws.adoc b/installing/installing_aws/preparing-to-install-on-aws.adoc deleted file mode 100644 index 18a9a35bd265..000000000000 --- a/installing/installing_aws/preparing-to-install-on-aws.adoc +++ /dev/null @@ -1,65 +0,0 @@ -:_content-type: ASSEMBLY -[id="preparing-to-install-on-aws"] -= Preparing to install on AWS -include::_attributes/common-attributes.adoc[] -:context: preparing-to-install-on-aws - -toc::[] - -[id="preparing-to-install-on-aws-prerequisites"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. - -[id="requirements-for-installing-ocp-on-aws"] -== Requirements for installing {product-title} on AWS - -Before installing {product-title} on Amazon Web Services (AWS), you must create an AWS account. See xref:../../installing/installing_aws/installing-aws-account.adoc#installing-aws-account[Configuring an AWS account] for details about configuring an account, account limits, account permissions, IAM user setup, and supported AWS regions. - -If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, see xref:../../installing/installing_aws/manually-creating-iam.adoc#manually-creating-iam-aws[Manually creating IAM for AWS] for other options, including xref:../../authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc#cco-mode-sts[configuring the Cloud Credential Operator (CCO) to use the Amazon Web Services Security Token Service (AWS STS)]. - -[id="choosing-an-method-to-install-ocp-on-aws"] -== Choosing a method to install {product-title} on AWS - -You can install {product-title} on installer-provisioned or user-provisioned infrastructure. The default installation type uses installer-provisioned infrastructure, where the installation program provisions the underlying infrastructure for the cluster. You can also install {product-title} on infrastructure that you provision. If you do not use infrastructure that the installation program provisions, you must manage and maintain the cluster resources yourself. - -See xref:../../architecture/architecture-installation.adoc#installation-process_architecture-installation[Installation process] for more information about installer-provisioned and user-provisioned installation processes. - -[id="choosing-an-method-to-install-ocp-on-aws-single-node"] -=== Installing a cluster on a single node - -Installing {product-title} on a single node alleviates some of the requirements for high availability and large scale clusters. However, you must address the xref:../../installing/installing_sno/install-sno-preparing-to-install-sno.adoc#install-sno-requirements-for-installing-on-a-single-node_install-sno-preparing[requirements for installing on a single node], and the xref:../../installing/installing_sno/install-sno-installing-sno.adoc#additional-requirements-for-installing-on-a-single-node-on-aws_install-sno-installing-sno-with-the-assisted-installer[additional requirements for installing on a single node on AWS]. After addressing the requirements for single node installation, use the xref:../../installing/installing_aws/installing-aws-customizations.adoc#installing-aws-customizations[Installing a customized cluster on AWS] procedure to install the cluster. The xref:../../installing/installing_sno/install-sno-installing-sno.adoc#install-sno-installing-sno-manually[installing single-node OpenShift manually] section contains an exemplary `install-config.yaml` file when installing an {product-title} cluster on a single node. - -[id="choosing-an-method-to-install-ocp-on-aws-installer-provisioned"] -=== Installing a cluster on installer-provisioned infrastructure - -You can install a cluster on AWS infrastructure that is provisioned by the {product-title} installation program, by using one of the following methods: - -* **xref:../../installing/installing_aws/installing-aws-default.adoc#installing-aws-default[Installing a cluster quickly on AWS]**: You can install {product-title} on AWS infrastructure that is provisioned by the {product-title} installation program. You can install a cluster quickly by using the default configuration options. - -* **xref:../../installing/installing_aws/installing-aws-customizations.adoc#installing-aws-customizations[Installing a customized cluster on AWS]**: You can install a customized cluster on AWS infrastructure that the installation program provisions. The installation program allows for some customization to be applied at the installation stage. Many other customization options are available xref:../../post_installation_configuration/cluster-tasks.adoc#post-install-cluster-tasks[post-installation]. - -* **xref:../../installing/installing_aws/installing-aws-network-customizations.adoc#installing-aws-network-customizations[Installing a cluster on AWS with network customizations]**: You can customize your {product-title} network configuration during installation, so that your cluster can coexist with your existing IP address allocations and adhere to your network requirements. - -* **xref:../../installing/installing_aws/installing-restricted-networks-aws-installer-provisioned.adoc#installing-restricted-networks-aws-installer-provisioned[Installing a cluster on AWS in a restricted network]**: You can install {product-title} on AWS on installer-provisioned infrastructure by using an internal mirror of the installation release content. You can use this method to install a cluster that does not require an active internet connection to obtain the software components. - -* **xref:../../installing/installing_aws/installing-aws-vpc.adoc#installing-aws-vpc[Installing a cluster on an existing Virtual Private Cloud]**: You can install {product-title} on an existing AWS Virtual Private Cloud (VPC). You can use this installation method if you have constraints set by the guidelines of your company, such as limits when creating new accounts or infrastructure. - -* **xref:../../installing/installing_aws/installing-aws-private.adoc#installing-aws-private[Installing a private cluster on an existing VPC]**: You can install a private cluster on an existing AWS VPC. You can use this method to deploy {product-title} on an internal network that is not visible to the internet. - -* **xref:../../installing/installing_aws/installing-aws-government-region.adoc#installing-aws-government-region[Installing a cluster on AWS into a government or secret region]**: {product-title} can be deployed into AWS regions that are specifically designed for US government agencies at the federal, state, and local level, as well as contractors, educational institutions, and other US customers that must run sensitive workloads in the cloud. - -[id="choosing-an-method-to-install-ocp-on-aws-user-provisioned"] -=== Installing a cluster on user-provisioned infrastructure - -You can install a cluster on AWS infrastructure that you provision, by using one of the following methods: - -* **xref:../../installing/installing_aws/installing-aws-user-infra.adoc#installing-aws-user-infra[Installing a cluster on AWS infrastructure that you provide]**: You can install {product-title} on AWS infrastructure that you provide. You can use the provided CloudFormation templates to create stacks of AWS resources that represent each of the components required for an {product-title} installation. - -* **xref:../../installing/installing_aws/installing-restricted-networks-aws.adoc#installing-restricted-networks-aws[Installing a cluster on AWS in a restricted network with user-provisioned infrastructure]**: You can install {product-title} on AWS infrastructure that you provide by using an internal mirror of the installation release content. You can use this method to install a cluster that does not require an active internet connection to obtain the software components. You can also use this installation method to ensure that your clusters only use container images that satisfy your organizational controls on external content. While you can install {product-title} by using the mirrored content, your cluster still requires internet access to use the AWS APIs. - -[id="preparing-to-install-on-aws-next-steps"] -== Next steps - -* xref:../../installing/installing_aws/installing-aws-account.adoc#installing-aws-account[Configuring an AWS account] diff --git a/installing/installing_aws/snippets b/installing/installing_aws/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/installing/installing_aws/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/installing/installing_aws/uninstalling-cluster-aws.adoc b/installing/installing_aws/uninstalling-cluster-aws.adoc deleted file mode 100644 index 1a856732afb2..000000000000 --- a/installing/installing_aws/uninstalling-cluster-aws.adoc +++ /dev/null @@ -1,24 +0,0 @@ -:_content-type: ASSEMBLY -[id="uninstalling-cluster-aws"] -= Uninstalling a cluster on AWS -include::_attributes/common-attributes.adoc[] -:context: uninstall-cluster-aws - -toc::[] - -You can remove a cluster that you deployed to Amazon Web Services (AWS). - -include::modules/installation-uninstall-clouds.adoc[leveloffset=+1] - -include::modules/cco-ccoctl-deleting-sts-resources.adoc[leveloffset=+1] - -include::modules/installation-aws-delete-cluster.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="installing-localzone-additional-resources"] -.Additional resources - -* See link:https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/stacks.html[Working with stacks] in the AWS documentation for more information about AWS CloudFormation stacks. -* link:https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#opt-in-local-zone[Opt into AWS Local Zones] -* link:https://aws.amazon.com/about-aws/global-infrastructure/localzones/locations[AWS Local Zones available locations] -* link:https://aws.amazon.com/about-aws/global-infrastructure/localzones/features[AWS Local Zones features] \ No newline at end of file diff --git a/installing/installing_azure/_attributes b/installing/installing_azure/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/installing/installing_azure/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/installing/installing_azure/enabling-user-managed-encryption-azure.adoc b/installing/installing_azure/enabling-user-managed-encryption-azure.adoc deleted file mode 100644 index efa301c928c7..000000000000 --- a/installing/installing_azure/enabling-user-managed-encryption-azure.adoc +++ /dev/null @@ -1,21 +0,0 @@ -:_content-type: ASSEMBLY -[id="enabling-user-managed-encryption-azure"] -= Enabling user-managed encryption for Azure -include::_attributes/common-attributes.adoc[] -:context: enabling-user-managed-encryption-azure - -toc::[] - -In {product-title} version {product-version}, you can install a cluster with a user-managed encryption key in Azure. To enable this feature, you can prepare an Azure DiskEncryptionSet before installation, modify the `install-config.yaml` file, and then complete the installation. - -include::modules/installation-azure-preparing-diskencryptionsets.adoc[leveloffset=+1] - -[id="enabling-disk-encryption-sets-azure-next-steps"] -== Next steps - -* Install an {product-title} cluster: -** xref:../../installing/installing_azure/installing-azure-customizations.adoc#installing-azure-customizations[Install a cluster with customizations on installer-provisioned infrastructure] -** xref:../../installing/installing_azure/installing-azure-network-customizations.adoc#installing-azure-network-customizations[Install a cluster with network customizations on installer-provisioned infrastructure] -** xref:../../installing/installing_azure/installing-azure-vnet.adoc#installing-azure-vnet[Install a cluster into an existing VNet on installer-provisioned infrastructure] -** xref:../../installing/installing_azure/installing-azure-private.adoc#installing-azure-private[Install a private cluster on installer-provisioned infrastructure] -** xref:../../installing/installing_azure/installing-azure-government-region.adoc#installing-azure-government-region[Install a cluster into an government region on installer-provisioned infrastructure] diff --git a/installing/installing_azure/images b/installing/installing_azure/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/installing/installing_azure/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/installing/installing_azure/installing-azure-account.adoc b/installing/installing_azure/installing-azure-account.adoc deleted file mode 100644 index 95f2c8420bfb..000000000000 --- a/installing/installing_azure/installing-azure-account.adoc +++ /dev/null @@ -1,52 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-azure-account"] -= Configuring an Azure account -include::_attributes/common-attributes.adoc[] -:context: installing-azure-account - -toc::[] - -Before you can install {product-title}, you must configure a Microsoft Azure -account. - -[IMPORTANT] -==== -All Azure resources that are available through public endpoints are subject to -resource name restrictions, and you cannot create resources that use certain -terms. For a list of terms that Azure restricts, see -link:https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-manager-reserved-resource-name[Resolve reserved resource name errors] -in the Azure documentation. -==== - -include::modules/installation-azure-limits.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../scalability_and_performance/optimization/optimizing-storage.adoc#optimizing-storage[Optimizing storage]. - -include::modules/installation-azure-network-config.adoc[leveloffset=+1] - -include::modules/installation-azure-increasing-limits.adoc[leveloffset=+1] - -include::modules/installation-azure-permissions.adoc[leveloffset=+1] - -include::modules/minimum-required-permissions-ipi-azure.adoc[leveloffset=+1] - -include::modules/installation-azure-service-principal.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* For more information about CCO modes, see xref:../../authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.adoc#about-cloud-credential-operator-modes[About the Cloud Credential Operator]. - -include::modules/installation-azure-marketplace.adoc[leveloffset=+1] - -include::modules/installation-azure-regions.adoc[leveloffset=+1] - -== Next steps - -* Install an {product-title} cluster on Azure. You can -xref:../../installing/installing_azure/installing-azure-customizations.adoc#installing-azure-customizations[install a customized cluster] -or -xref:../../installing/installing_azure/installing-azure-default.adoc#installing-azure-default[quickly install a cluster] with default options. diff --git a/installing/installing_azure/installing-azure-customizations.adoc b/installing/installing_azure/installing-azure-customizations.adoc deleted file mode 100644 index 13967aa1cd82..000000000000 --- a/installing/installing_azure/installing-azure-customizations.adoc +++ /dev/null @@ -1,77 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-azure-customizations"] -= Installing a cluster on Azure with customizations -include::_attributes/common-attributes.adoc[] -:context: installing-azure-customizations -:platform: Azure - -toc::[] - -In {product-title} version {product-version}, you can install a customized -cluster on infrastructure that the installation program provisions on -Microsoft Azure. To customize the installation, you modify -parameters in the `install-config.yaml` file before you install the cluster. - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_azure/installing-azure-account.adoc#installing-azure-account[configured an Azure account] to host the cluster and determined the tested and validated region to deploy the cluster to. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_azure/manually-creating-iam-azure.adoc#manually-creating-iam-azure[manually create and maintain IAM credentials]. -* If you use customer-managed encryption keys, you xref:../../installing/installing_azure/enabling-user-managed-encryption-azure.adoc#enabling-user-managed-encryption-azure[prepared your Azure environment for encryption]. - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-azure-marketplace-subscribe.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-user-defined-tags-azure.adoc[leveloffset=+1] - -include::modules/querying-user-defined-tags-azure.adoc[leveloffset=+1] - -include::modules/installation-initializing.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] - -include::modules/installation-azure-tested-machine-types.adoc[leveloffset=+2] - -include::modules/installation-azure-arm-tested-machine-types.adoc[leveloffset=+2] - -include::modules/installation-azure-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* For more details about Accelerated Networking, see xref:../../machine_management/creating_machinesets/creating-machineset-azure.adoc#machineset-azure-accelerated-networking_creating-machineset-azure[Accelerated Networking for Microsoft Azure VMs]. - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console. - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. diff --git a/installing/installing_azure/installing-azure-default.adoc b/installing/installing_azure/installing-azure-default.adoc deleted file mode 100644 index 39dd5e3ea0da..000000000000 --- a/installing/installing_azure/installing-azure-default.adoc +++ /dev/null @@ -1,48 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-azure-default"] -= Installing a cluster quickly on Azure -include::_attributes/common-attributes.adoc[] -:context: installing-azure-default - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on -Microsoft Azure that uses the default configuration options. - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_azure/installing-azure-account.adoc#installing-azure-account[configured an Azure account] to host the cluster and determined the tested and validated region to deploy the cluster to. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_azure/manually-creating-iam-azure.adoc#manually-creating-iam-azure[manually create and maintain IAM credentials]. - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console. - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. diff --git a/installing/installing_azure/installing-azure-government-region.adoc b/installing/installing_azure/installing-azure-government-region.adoc deleted file mode 100644 index 6bed17ab0878..000000000000 --- a/installing/installing_azure/installing-azure-government-region.adoc +++ /dev/null @@ -1,78 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-azure-government-region"] -= Installing a cluster on Azure into a government region -include::_attributes/common-attributes.adoc[] -:context: installing-azure-government-region - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on -Microsoft Azure into a government region. To configure the government region, -you modify parameters in the `install-config.yaml` file before you install the -cluster. - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_azure/installing-azure-account.adoc#installing-azure-account[configured an Azure account] to host the cluster and determined the tested and validated government region to deploy the cluster to. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_azure/manually-creating-iam-azure.adoc#manually-creating-iam-azure[manually create and maintain IAM credentials]. -* If you use customer-managed encryption keys, you xref:../../installing/installing_azure/enabling-user-managed-encryption-azure.adoc#enabling-user-managed-encryption-azure[prepared your Azure environment for encryption]. - -include::modules/installation-azure-about-government-region.adoc[leveloffset=+1] - -include::modules/private-clusters-default.adoc[leveloffset=+1] - -include::modules/private-clusters-about-azure.adoc[leveloffset=+2] - -include::modules/installation-azure-user-defined-routing.adoc[leveloffset=+2] - -include::modules/installation-about-custom-azure-vnet.adoc[leveloffset=+1] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-initializing-manual.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] - -include::modules/installation-azure-tested-machine-types.adoc[leveloffset=+2] - -include::modules/installation-azure-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* For more details about Accelerated Networking, see xref:../../machine_management/creating_machinesets/creating-machineset-azure.adoc#machineset-azure-accelerated-networking_creating-machineset-azure[Accelerated Networking for Microsoft Azure VMs]. - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console. - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. diff --git a/installing/installing_azure/installing-azure-network-customizations.adoc b/installing/installing_azure/installing-azure-network-customizations.adoc deleted file mode 100644 index b16643bce98c..000000000000 --- a/installing/installing_azure/installing-azure-network-customizations.adoc +++ /dev/null @@ -1,87 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-azure-network-customizations"] -= Installing a cluster on Azure with network customizations -include::_attributes/common-attributes.adoc[] -:context: installing-azure-network-customizations - -toc::[] - -In {product-title} version {product-version}, you can install a cluster with a -customized network configuration on infrastructure that the installation program -provisions on Microsoft Azure. By customizing your network configuration, your -cluster can coexist with existing IP address allocations in your environment and -integrate with existing MTU and VXLAN configurations. - -You must set most of the network configuration parameters during installation, -and you can modify only `kubeProxy` configuration parameters in a running -cluster. - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_azure/installing-azure-account.adoc#installing-azure-account[configured an Azure account] to host the cluster and determined the tested and validated region to deploy the cluster to. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_azure/manually-creating-iam-azure.adoc#manually-creating-iam-azure[manually create and maintain IAM credentials]. Manual mode can also be used in environments where the cloud IAM APIs are not reachable. -* If you use customer-managed encryption keys, you xref:../../installing/installing_azure/enabling-user-managed-encryption-azure.adoc#enabling-user-managed-encryption-azure[prepared your Azure environment for encryption]. - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-initializing.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] - -include::modules/installation-azure-tested-machine-types.adoc[leveloffset=+2] - -include::modules/installation-azure-arm-tested-machine-types.adoc[leveloffset=+2] - -include::modules/installation-azure-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -// Network Operator specific configuration -include::modules/nw-network-config.adoc[leveloffset=+1] -include::modules/nw-modifying-operator-install-config.adoc[leveloffset=+1] -include::modules/nw-operator-cr.adoc[leveloffset=+1] -include::modules/configuring-hybrid-ovnkubernetes.adoc[leveloffset=+1] - - -[NOTE] -==== -For more information on using Linux and Windows nodes in the same cluster, see xref:../../windows_containers/understanding-windows-container-workloads.adoc#understanding-windows-container-workloads[Understanding Windows container workloads]. -==== - -[role="_additional-resources"] -.Additional resources - -* For more details about Accelerated Networking, see xref:../../machine_management/creating_machinesets/creating-machineset-azure.adoc#machineset-azure-accelerated-networking_creating-machineset-azure[Accelerated Networking for Microsoft Azure VMs]. - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console. - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. diff --git a/installing/installing_azure/installing-azure-private.adoc b/installing/installing_azure/installing-azure-private.adoc deleted file mode 100644 index 6e67fcf4d30a..000000000000 --- a/installing/installing_azure/installing-azure-private.adoc +++ /dev/null @@ -1,75 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-azure-private"] -= Installing a private cluster on Azure -include::_attributes/common-attributes.adoc[] -:context: installing-azure-private - -toc::[] - -In {product-title} version {product-version}, you can install a private cluster into an existing Azure Virtual Network (VNet) on Microsoft Azure. The installation program provisions the rest of the required infrastructure, which you can further customize. To customize the installation, you modify parameters in the `install-config.yaml` file before you install the cluster. - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_azure/installing-azure-account.adoc#installing-azure-account[configured an Azure account] to host the cluster and determined the tested and validated region to deploy the cluster to. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_azure/manually-creating-iam-azure.adoc#manually-creating-iam-azure[manually create and maintain IAM credentials]. -* If you use customer-managed encryption keys, you xref:../../installing/installing_azure/enabling-user-managed-encryption-azure.adoc#enabling-user-managed-encryption-azure[prepared your Azure environment for encryption]. - -include::modules/private-clusters-default.adoc[leveloffset=+1] - -include::modules/private-clusters-about-azure.adoc[leveloffset=+2] - -include::modules/installation-azure-user-defined-routing.adoc[leveloffset=+2] - -include::modules/installation-about-custom-azure-vnet.adoc[leveloffset=+1] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-initializing-manual.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] - -include::modules/installation-azure-tested-machine-types.adoc[leveloffset=+2] - -include::modules/installation-azure-arm-tested-machine-types.adoc[leveloffset=+2] - -include::modules/installation-azure-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* For more details about Accelerated Networking, see xref:../../machine_management/creating_machinesets/creating-machineset-azure.adoc#machineset-azure-accelerated-networking_creating-machineset-azure[Accelerated Networking for Microsoft Azure VMs]. - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console. - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. diff --git a/installing/installing_azure/installing-azure-three-node.adoc b/installing/installing_azure/installing-azure-three-node.adoc deleted file mode 100644 index d7c5e24d8bc4..000000000000 --- a/installing/installing_azure/installing-azure-three-node.adoc +++ /dev/null @@ -1,22 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-azure-three-node"] -= Installing a three-node cluster on Azure -include::_attributes/common-attributes.adoc[] -:context: installing-azure-three-node - -toc::[] - -In {product-title} version {product-version}, you can install a three-node cluster on Microsoft Azure. A three-node cluster consists of three control plane machines, which also act as compute machines. This type of cluster provides a smaller, more resource efficient cluster, for cluster administrators and developers to use for testing, development, and production. - -You can install a three-node cluster using either installer-provisioned or user-provisioned infrastructure. - -[NOTE] -==== -Deploying a three-node cluster using an Azure Marketplace image is not supported. -==== - -include::modules/installation-three-node-cluster-cloud-provider.adoc[leveloffset=+1] - -== Next steps -* xref:../../installing/installing_azure/installing-azure-customizations.adoc#installing-azure-customizations[Installing a cluster on Azure with customizations] -* xref:../../installing/installing_azure/installing-azure-user-infra.adoc#installing-azure-user-infra[Installing a cluster on Azure using ARM templates] diff --git a/installing/installing_azure/installing-azure-user-infra.adoc b/installing/installing_azure/installing-azure-user-infra.adoc deleted file mode 100644 index c54858c8f497..000000000000 --- a/installing/installing_azure/installing-azure-user-infra.adoc +++ /dev/null @@ -1,143 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-azure-user-infra"] -= Installing a cluster on Azure using ARM templates -include::_attributes/common-attributes.adoc[] -:context: installing-azure-user-infra -:platform: Azure - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on Microsoft Azure by using infrastructure that you provide. - -Several link:https://docs.microsoft.com/en-us/azure/azure-resource-manager/templates/overview[Azure Resource Manager] (ARM) templates are provided to assist in completing these steps or to help model your own. - -[IMPORTANT] -==== -The steps for performing a user-provisioned infrastructure installation are provided as an example only. Installing a cluster with infrastructure you provide requires knowledge of the cloud provider and the installation process of {product-title}. Several ARM templates are provided to assist in completing these steps or to help model your own. You are also free to create the required resources through other methods; the templates are just an example. -==== - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_azure/installing-azure-account.adoc#installing-azure-account[configured an Azure account] to host the cluster. -* You downloaded the Azure CLI and installed it on your computer. See link:https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest[Install the Azure CLI] in the Azure documentation. The documentation below was last tested using version `2.38.0` of the Azure CLI. Azure CLI commands might perform differently based on the version you use. -* If you use a firewall and plan to use the Telemetry service, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured the firewall to allow the sites] that your cluster requires access to. -* If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_azure/manually-creating-iam-azure.adoc#manually-creating-iam-azure[manually create and maintain IAM credentials]. -+ -[NOTE] -==== -Be sure to also review this site list if you are configuring a proxy. -==== - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -[id="installation-azure-user-infra-config-project"] -== Configuring your Azure project - -Before you can install {product-title}, you must configure an Azure project to host it. - -[IMPORTANT] -==== -All Azure resources that are available through public endpoints are subject to resource name restrictions, and you cannot create resources that use certain terms. For a list of terms that Azure restricts, see link:https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-manager-reserved-resource-name[Resolve reserved resource name errors] in the Azure documentation. -==== - -include::modules/installation-azure-limits.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../../scalability_and_performance/optimization/optimizing-storage.adoc#optimizing-storage[Optimizing storage] - -include::modules/installation-azure-network-config.adoc[leveloffset=+2] - -You can view Azure's DNS solution by visiting this xref:installation-azure-create-dns-zones_{context}[example for creating DNS zones]. - -include::modules/installation-azure-increasing-limits.adoc[leveloffset=+2] - -include::modules/csr-management.adoc[leveloffset=+2] - -include::modules/installation-azure-permissions.adoc[leveloffset=+2] -include::modules/minimum-required-permissions-upi-azure.adoc[leveloffset=+2] -include::modules/installation-azure-service-principal.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* For more information about CCO modes, see xref:../../authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.adoc#about-cloud-credential-operator-modes[About the Cloud Credential Operator]. - -include::modules/installation-azure-regions.adoc[leveloffset=+2] - -[id="installation-requirements-user-infra_{context}"] -== Requirements for a cluster with user-provisioned infrastructure - -For a cluster that contains user-provisioned infrastructure, you must deploy all -of the required machines. - -This section describes the requirements for deploying {product-title} on user-provisioned infrastructure. - -include::modules/installation-machine-requirements.adoc[leveloffset=+2] -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] -include::modules/installation-azure-tested-machine-types.adoc[leveloffset=+2] -include::modules/installation-azure-arm-tested-machine-types.adoc[leveloffset=+2] - -include::modules/installation-azure-marketplace-subscribe.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-user-infra-generate.adoc[leveloffset=+1] -include::modules/installation-disk-partitioning-upi-templates.adoc[leveloffset=+2] -include::modules/installation-initializing.adoc[leveloffset=+2] -include::modules/installation-configure-proxy.adoc[leveloffset=+2] -//include::modules/installation-three-node-cluster.adoc[leveloffset=+2] -include::modules/installation-user-infra-exporting-common-variables-arm-templates.adoc[leveloffset=+2] -include::modules/installation-user-infra-generate-k8s-manifest-ignition.adoc[leveloffset=+2] - -include::modules/installation-azure-create-resource-group-and-identity.adoc[leveloffset=+1] - -include::modules/installation-azure-user-infra-uploading-rhcos.adoc[leveloffset=+1] - -include::modules/installation-azure-create-dns-zones.adoc[leveloffset=+1] - -You can learn more about xref:installation-azure-network-config_{context}[configuring a public DNS zone in Azure] by visiting that section. - -include::modules/installation-creating-azure-vnet.adoc[leveloffset=+1] -include::modules/installation-arm-vnet.adoc[leveloffset=+2] - -include::modules/installation-azure-user-infra-deploying-rhcos.adoc[leveloffset=+1] -include::modules/installation-arm-image-storage.adoc[leveloffset=+2] - -include::modules/installation-network-user-infra.adoc[leveloffset=+1] - -include::modules/installation-creating-azure-dns.adoc[leveloffset=+1] -include::modules/installation-arm-dns.adoc[leveloffset=+2] - -include::modules/installation-creating-azure-bootstrap.adoc[leveloffset=+1] -include::modules/installation-arm-bootstrap.adoc[leveloffset=+2] - -include::modules/installation-creating-azure-control-plane.adoc[leveloffset=+1] -include::modules/installation-arm-control-plane.adoc[leveloffset=+2] - -include::modules/installation-azure-user-infra-wait-for-bootstrap.adoc[leveloffset=+1] - -include::modules/installation-creating-azure-worker.adoc[leveloffset=+1] -include::modules/installation-arm-worker.adoc[leveloffset=+2] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/installation-approve-csrs.adoc[leveloffset=+1] - -include::modules/installation-azure-create-ingress-dns-records.adoc[leveloffset=+1] - -include::modules/installation-azure-user-infra-completing.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service diff --git a/installing/installing_azure/installing-azure-vnet.adoc b/installing/installing_azure/installing-azure-vnet.adoc deleted file mode 100644 index 0cc586737a1e..000000000000 --- a/installing/installing_azure/installing-azure-vnet.adoc +++ /dev/null @@ -1,69 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-azure-vnet"] -= Installing a cluster on Azure into an existing VNet -include::_attributes/common-attributes.adoc[] -:context: installing-azure-vnet - -toc::[] - -In {product-title} version {product-version}, you can install a cluster into an existing Azure Virtual Network (VNet) on Microsoft Azure. The installation program provisions the rest of the required infrastructure, which you can further customize. To customize the installation, you modify parameters in the `install-config.yaml` file before you install the cluster. - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_azure/installing-azure-account.adoc#installing-azure-account[configured an Azure account] to host the cluster and determined the tested and validated region to deploy the cluster to. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_azure/manually-creating-iam-azure.adoc#manually-creating-iam-azure[manually create and maintain IAM credentials]. -* If you use customer-managed encryption keys, you xref:../../installing/installing_azure/enabling-user-managed-encryption-azure.adoc#enabling-user-managed-encryption-azure[prepared your Azure environment for encryption]. - -include::modules/installation-about-custom-azure-vnet.adoc[leveloffset=+1] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-initializing.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] - -include::modules/installation-azure-tested-machine-types.adoc[leveloffset=+2] - -include::modules/installation-azure-arm-tested-machine-types.adoc[leveloffset=+2] - -include::modules/installation-azure-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* For more details about Accelerated Networking, see xref:../../machine_management/creating_machinesets/creating-machineset-azure.adoc#machineset-azure-accelerated-networking_creating-machineset-azure[Accelerated Networking for Microsoft Azure VMs]. - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console. - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. diff --git a/installing/installing_azure/manually-creating-iam-azure.adoc b/installing/installing_azure/manually-creating-iam-azure.adoc deleted file mode 100644 index 879beea678f3..000000000000 --- a/installing/installing_azure/manually-creating-iam-azure.adoc +++ /dev/null @@ -1,29 +0,0 @@ -:_content-type: ASSEMBLY -[id="manually-creating-iam-azure"] -= Manually creating IAM for Azure -include::_attributes/common-attributes.adoc[] -:context: manually-creating-iam-azure - -toc::[] - -In environments where the cloud identity and access management (IAM) APIs are not reachable, or the administrator prefers not to store an administrator-level credential secret in the cluster `kube-system` namespace, you can put the Cloud Credential Operator (CCO) into manual mode before you install the cluster. - -include::modules/alternatives-to-storing-admin-secrets-in-kube-system.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* For a detailed description of all available CCO credential modes and their supported platforms, see xref:../../authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.adoc#about-cloud-credential-operator[About the Cloud Credential Operator]. - -include::modules/manually-create-identity-access-management.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../../updating/preparing-manual-creds-update.adoc#preparing-manual-creds-update[Preparing to update a cluster with manually maintained credentials] - -[id="manually-creating-iam-azure-next-steps"] -== Next steps - -* Install an {product-title} cluster: -** xref:../../installing/installing_azure/installing-azure-default.adoc#installing-azure-default[Installing a cluster quickly on Azure] with default options on installer-provisioned infrastructure -** xref:../../installing/installing_azure/installing-azure-customizations.adoc#installing-azure-customizations[Install a cluster with cloud customizations on installer-provisioned infrastructure] -** xref:../../installing/installing_azure/installing-azure-network-customizations.adoc#installing-azure-network-customizations[Install a cluster with network customizations on installer-provisioned infrastructure] diff --git a/installing/installing_azure/modules b/installing/installing_azure/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/installing/installing_azure/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/installing/installing_azure/preparing-to-install-on-azure.adoc b/installing/installing_azure/preparing-to-install-on-azure.adoc deleted file mode 100644 index 0fdcaa209b57..000000000000 --- a/installing/installing_azure/preparing-to-install-on-azure.adoc +++ /dev/null @@ -1,56 +0,0 @@ -:_content-type: ASSEMBLY -[id="preparing-to-install-on-azure"] -= Preparing to install on Azure -include::_attributes/common-attributes.adoc[] -:context: preparing-to-install-on-azure - -toc::[] - -[id="preparing-to-install-on-azure-prerequisites"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. - -[id="requirements-for-installing-ocp-on-azure"] -== Requirements for installing {product-title} on Azure - -Before installing {product-title} on Microsoft Azure, you must configure an Azure account. See xref:../../installing/installing_azure/installing-azure-account.adoc#installing-azure-account[Configuring an Azure account] for details about account configuration, account limits, public DNS zone configuration, required roles, creating service principals, and supported Azure regions. - -If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, see xref:../../installing/installing_azure/manually-creating-iam-azure.adoc#manually-creating-iam-azure[Manually creating IAM for Azure] for other options. - -[id="choosing-an-method-to-install-ocp-on-azure"] -== Choosing a method to install {product-title} on Azure - -You can install {product-title} on installer-provisioned or user-provisioned infrastructure. The default installation type uses installer-provisioned infrastructure, where the installation program provisions the underlying infrastructure for the cluster. You can also install {product-title} on infrastructure that you provision. If you do not use infrastructure that the installation program provisions, you must manage and maintain the cluster resources yourself. - -See xref:../../architecture/architecture-installation.adoc#installation-process_architecture-installation[Installation process] for more information about installer-provisioned and user-provisioned installation processes. - -[id="choosing-an-method-to-install-ocp-on-azure-installer-provisioned"] -=== Installing a cluster on installer-provisioned infrastructure - -You can install a cluster on Azure infrastructure that is provisioned by the {product-title} installation program, by using one of the following methods: - -* **xref:../../installing/installing_azure/installing-azure-default.adoc#installing-azure-default[Installing a cluster quickly on Azure]**: You can install {product-title} on Azure infrastructure that is provisioned by the {product-title} installation program. You can install a cluster quickly by using the default configuration options. - -* **xref:../../installing/installing_azure/installing-azure-customizations.adoc#installing-azure-customizations[Installing a customized cluster on Azure]**: You can install a customized cluster on Azure infrastructure that the installation program provisions. The installation program allows for some customization to be applied at the installation stage. Many other customization options are available xref:../../post_installation_configuration/cluster-tasks.adoc#post-install-cluster-tasks[post-installation]. - -* **xref:../../installing/installing_azure/installing-azure-network-customizations.adoc#installing-azure-network-customizations[Installing a cluster on Azure with network customizations]**: You can customize your {product-title} network configuration during installation, so that your cluster can coexist with your existing IP address allocations and adhere to your network requirements. - -* **xref:../../installing/installing_azure/installing-azure-vnet.adoc#installing-azure-vnet[Installing a cluster on Azure into an existing VNet]**: You can install {product-title} on an existing Azure Virtual Network (VNet) on Azure. You can use this installation method if you have constraints set by the guidelines of your company, such as limits when creating new accounts or infrastructure. - -* **xref:../../installing/installing_azure/installing-azure-private.adoc#installing-azure-private[Installing a private cluster on Azure]**: You can install a private cluster into an existing Azure Virtual Network (VNet) on Azure. You can use this method to deploy {product-title} on an internal network that is not visible to the internet. - -* **xref:../../installing/installing_azure/installing-azure-government-region.adoc#installing-azure-government-region[Installing a cluster on Azure into a government region]**: {product-title} can be deployed into Microsoft Azure Government (MAG) regions that are specifically designed for US government agencies at the federal, state, and local level, as well as contractors, educational institutions, and other US customers that must run sensitive workloads on Azure. - -[id="choosing-an-method-to-install-ocp-on-azure-user-provisioned"] -=== Installing a cluster on user-provisioned infrastructure - -You can install a cluster on Azure infrastructure that you provision, by using the following method: - -* **xref:../../installing/installing_azure/installing-azure-user-infra.adoc#installing-azure-user-infra[Installing a cluster on Azure using ARM templates]**: You can install {product-title} on Azure by using infrastructure that you provide. You can use the provided Azure Resource Manager (ARM) templates to assist with an installation. - -[id="preparing-to-install-on-azure-next-steps"] -== Next steps - -* xref:../../installing/installing_azure/installing-azure-account.adoc#installing-azure-account[Configuring an Azure account] diff --git a/installing/installing_azure/snippets b/installing/installing_azure/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/installing/installing_azure/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/installing/installing_azure/uninstalling-cluster-azure.adoc b/installing/installing_azure/uninstalling-cluster-azure.adoc deleted file mode 100644 index 2b0264be291c..000000000000 --- a/installing/installing_azure/uninstalling-cluster-azure.adoc +++ /dev/null @@ -1,11 +0,0 @@ -:_content-type: ASSEMBLY -[id="uninstalling-cluster-azure"] -= Uninstalling a cluster on Azure -include::_attributes/common-attributes.adoc[] -:context: uninstall-cluster-azure - -toc::[] - -You can remove a cluster that you deployed to Microsoft Azure. - -include::modules/installation-uninstall-clouds.adoc[leveloffset=+1] diff --git a/installing/installing_azure_stack_hub/_attributes b/installing/installing_azure_stack_hub/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/installing/installing_azure_stack_hub/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/installing/installing_azure_stack_hub/images b/installing/installing_azure_stack_hub/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/installing/installing_azure_stack_hub/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/installing/installing_azure_stack_hub/installing-azure-stack-hub-account.adoc b/installing/installing_azure_stack_hub/installing-azure-stack-hub-account.adoc deleted file mode 100644 index 222d71807560..000000000000 --- a/installing/installing_azure_stack_hub/installing-azure-stack-hub-account.adoc +++ /dev/null @@ -1,39 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-azure-stack-hub-account"] -= Configuring an Azure Stack Hub account -include::_attributes/common-attributes.adoc[] -:context: installing-azure-stack-hub-account - -toc::[] - -Before you can install {product-title}, you must configure a Microsoft Azure account. - -[IMPORTANT] -==== -All Azure resources that are available through public endpoints are subject to resource name restrictions, and you cannot create resources that use certain terms. For a list of terms that Azure restricts, see link:https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-manager-reserved-resource-name[Resolve reserved resource name errors] in the Azure documentation. -==== - -include::modules/installation-azure-limits.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../scalability_and_performance/optimization/optimizing-storage.adoc#optimizing-storage[Optimizing storage]. - -include::modules/installation-azure-stack-hub-network-config.adoc[leveloffset=+1] - -include::modules/installation-azure-stack-hub-permissions.adoc[leveloffset=+1] - -include::modules/installation-azure-service-principal.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* For more information about CCO modes, see xref:../../authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.adoc#about-cloud-credential-operator-modes[About the Cloud Credential Operator]. - -[id="next-steps_installing-azure-stack-hub-account"] -== Next steps - -* Install an {product-title} cluster: -** xref:../../installing/installing_azure_stack_hub/installing-azure-stack-hub-default.adoc#installing-azure-stack-hub-default[Installing a cluster quickly on Azure Stack Hub]. -** Install an {product-title} cluster on Azure Stack Hub with user-provisioned infrastructure by following xref:../../installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc#installing-azure-stack-hub-user-infra[Installing a cluster on Azure Stack Hub using ARM templates]. diff --git a/installing/installing_azure_stack_hub/installing-azure-stack-hub-default.adoc b/installing/installing_azure_stack_hub/installing-azure-stack-hub-default.adoc deleted file mode 100644 index 299e756de4d7..000000000000 --- a/installing/installing_azure_stack_hub/installing-azure-stack-hub-default.adoc +++ /dev/null @@ -1,72 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-azure-stack-hub-default"] -= Installing a cluster on Azure Stack Hub with an installer-provisioned infrastructure -include::_attributes/common-attributes.adoc[] -:context: installing-azure-stack-hub-default - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on Microsoft Azure Stack Hub with an installer-provisioned infrastructure. However, you must manually configure the `install-config.yaml` file to specify values that are specific to Azure Stack Hub. - -[NOTE] -==== -While you can select `azure` when using the installation program to deploy a cluster using installer-provisioned infrastructure, this option is only supported for the Azure Public Cloud. -==== - -[id="prerequisites_installing-azure-stack-hub-default"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_azure_stack_hub/installing-azure-stack-hub-account.adoc#installing-azure-stack-hub-account[configured an Azure Stack Hub account] to host the cluster. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* You verified that you have approximately 16 GB of local disk space. Installing the cluster requires that you download the {op-system} virtual hard disk (VHD) cluster image and upload it to your Azure Stack Hub environment so that it is accessible during deployment. Decompressing the VHD files requires this amount of local disk space. - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-azure-user-infra-uploading-rhcos.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-initializing-manual.adoc[leveloffset=+1] -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] -include::modules/installation-azure-stack-hub-config-yaml.adoc[leveloffset=+2] - -include::modules/manually-create-identity-access-management.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_installing-azure-stack-hub-default-cco"] -.Additional resources -* xref:../../updating/preparing-manual-creds-update.adoc#manually-maintained-credentials-upgrade_preparing-manual-creds-update[Updating cloud provider resources with manually maintained credentials] - -include::modules/azure-stack-hub-internal-ca.adoc[leveloffset=+1] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/logging-in-by-using-the-web-console.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_installing-azure-stack-hub-default-console"] -.Additional resources -* xref:../../web_console/web-console.adoc#web-console[Accessing the web console] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_installing-azure-stack-hub-default-telemetry"] -.Additional resources -* xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] - -[id="next-steps_installing-azure-stack-hub-default"] -== Next steps - -* xref:../../installing/validating-an-installation.adoc#validating-an-installation[Validating an installation]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* If necessary, you can xref:../../authentication/managing_cloud_provider_credentials/cco-mode-mint.adoc#manually-removing-cloud-creds_cco-mode-mint[remove cloud provider credentials]. diff --git a/installing/installing_azure_stack_hub/installing-azure-stack-hub-network-customizations.adoc b/installing/installing_azure_stack_hub/installing-azure-stack-hub-network-customizations.adoc deleted file mode 100644 index 393738d4fb93..000000000000 --- a/installing/installing_azure_stack_hub/installing-azure-stack-hub-network-customizations.adoc +++ /dev/null @@ -1,88 +0,0 @@ -[id="installing-azure-stack-hub-network-customizations"] -= Installing a cluster on Azure Stack Hub with network customizations -include::_attributes/common-attributes.adoc[] -:context: installing-azure-stack-hub-network-customizations - -toc::[] - -In {product-title} version {product-version}, you can install a cluster with a customized network configuration on infrastructure that the installation program provisions on Azure Stack Hub. By customizing your network configuration, your cluster can coexist with existing IP address allocations in your environment and integrate with existing MTU and VXLAN configurations. - -[NOTE] -==== -While you can select `azure` when using the installation program to deploy a cluster using installer-provisioned infrastructure, this option is only supported for the Azure Public Cloud. -==== - -[id="prerequisites_installing-azure-stack-hub-network-customizations"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_azure_stack_hub/installing-azure-stack-hub-account.adoc#installing-azure-stack-hub-account[configured an Azure Stack Hub account] to host the cluster. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* You verified that you have approximately 16 GB of local disk space. Installing the cluster requires that you download the {op-system} virtual hard disk (VHD) cluster image and upload it to your Azure Stack Hub environment so that it is accessible during deployment. Decompressing the VHD files requires this amount of local disk space. - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-azure-user-infra-uploading-rhcos.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-initializing-manual.adoc[leveloffset=+1] -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] -include::modules/installation-azure-stack-hub-config-yaml.adoc[leveloffset=+2] - -include::modules/manually-create-identity-access-management.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_installing-azure-stack-hub-network-customizations-cco"] -.Additional resources -* xref:../../updating/updating_a_cluster/updating-cluster-web-console.adoc#manually-maintained-credentials-upgrade_updating-cluster-web-console[Updating a cluster using the web console] -* xref:../../updating/updating_a_cluster/updating-cluster-cli.adoc#manually-maintained-credentials-upgrade_updating-cluster-cli[Updating a cluster using the CLI] - -include::modules/azure-stack-hub-internal-ca.adoc[leveloffset=+1] - -//include::modules/installation-launching-installer.adoc[leveloffset=+1] -//Leaving this stubbed in case future might remove the requirement to manually configure the install configuration file. - -// Network Operator specific configuration -include::modules/nw-network-config.adoc[leveloffset=+1] -include::modules/nw-modifying-operator-install-config.adoc[leveloffset=+1] -include::modules/nw-operator-cr.adoc[leveloffset=+1] -include::modules/configuring-hybrid-ovnkubernetes.adoc[leveloffset=+1] - - -[NOTE] -==== -For more information on using Linux and Windows nodes in the same cluster, see xref:../../windows_containers/understanding-windows-container-workloads.adoc#understanding-windows-container-workloads[Understanding Windows container workloads]. -==== - - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/logging-in-by-using-the-web-console.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_installing-azure-stack-hub-network-customizations-console"] -.Additional resources -* xref:../../web_console/web-console.adoc#web-console[Accessing the web console]. - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_installing-azure-stack-hub-network-customizations-telemetry"] -.Additional resources -* xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] - -[id="next-steps_installing-azure-stack-hub-network-customizations"] -== Next steps - -* xref:../../installing/validating-an-installation.adoc#validating-an-installation[Validating an installation]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* If necessary, you can xref:../../authentication/managing_cloud_provider_credentials/cco-mode-mint.adoc#manually-removing-cloud-creds_cco-mode-mint[remove cloud provider credentials]. diff --git a/installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc b/installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc deleted file mode 100644 index f96cdaca9dee..000000000000 --- a/installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc +++ /dev/null @@ -1,121 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-azure-stack-hub-user-infra"] -= Installing a cluster on Azure Stack Hub using ARM templates -include::_attributes/common-attributes.adoc[] -:context: installing-azure-stack-hub-user-infra - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on Microsoft Azure Stack Hub by using infrastructure that you provide. - -Several link:https://docs.microsoft.com/en-us/azure/azure-resource-manager/templates/overview[Azure Resource Manager] (ARM) templates are provided to assist in completing these steps or to help model your own. - -[IMPORTANT] -==== -The steps for performing a user-provisioned infrastructure installation are provided as an example only. Installing a cluster with infrastructure you provide requires knowledge of the cloud provider and the installation process of {product-title}. Several ARM templates are provided to assist in completing these steps or to help model your own. You are also free to create the required resources through other methods; the templates are just an example. -==== - -[id="prerequisites_installing-azure-stack-hub-user-infra"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_azure_stack_hub/installing-azure-stack-hub-account.adoc#installing-azure-stack-hub-account[configured an Azure Stack Hub account] to host the cluster. -* You downloaded the Azure CLI and installed it on your computer. See link:https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest[Install the Azure CLI] in the Azure documentation. The documentation below was tested using version `2.28.0` of the Azure CLI. Azure CLI commands might perform differently based on the version you use. -* If you use a firewall and plan to use the Telemetry service, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured the firewall to allow the sites] that your cluster requires access to. -+ -[NOTE] -==== -Be sure to also review this site list if you are configuring a proxy. -==== - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -[id="installation-azure-stack-hub-user-infra-config-project"] -== Configuring your Azure Stack Hub project - -Before you can install {product-title}, you must configure an Azure project to host it. - -[IMPORTANT] -==== -All Azure Stack Hub resources that are available through public endpoints are subject to resource name restrictions, and you cannot create resources that use certain terms. For a list of terms that Azure Stack Hub restricts, see link:https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-manager-reserved-resource-name[Resolve reserved resource name errors] in the Azure documentation. -==== - -include::modules/installation-azure-limits.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../../scalability_and_performance/optimization/optimizing-storage.adoc#optimizing-storage[Optimizing storage]. - -include::modules/installation-azure-stack-hub-network-config.adoc[leveloffset=+2] - -You can view Azure's DNS solution by visiting this xref:installation-azure-create-dns-zones_{context}[example for creating DNS zones]. - -include::modules/csr-management.adoc[leveloffset=+2] - -include::modules/installation-azure-stack-hub-permissions.adoc[leveloffset=+2] -include::modules/installation-azure-service-principal.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* For more information about CCO modes, see xref:../../authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.adoc#about-cloud-credential-operator-modes[About the Cloud Credential Operator]. - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-user-infra-generate.adoc[leveloffset=+1] -include::modules/installation-initializing-manual.adoc[leveloffset=+2] -include::modules/installation-azure-stack-hub-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] -include::modules/installation-user-infra-exporting-common-variables-arm-templates.adoc[leveloffset=+2] -include::modules/installation-user-infra-generate-k8s-manifest-ignition.adoc[leveloffset=+2] -include::modules/installation-disk-partitioning-upi-templates.adoc[leveloffset=+2] - -include::modules/installation-azure-create-resource-group-and-identity.adoc[leveloffset=+1] - -include::modules/installation-azure-user-infra-uploading-rhcos.adoc[leveloffset=+1] - -include::modules/installation-azure-create-dns-zones.adoc[leveloffset=+1] - -You can learn more about xref:installation-azure-stack-hub-network-config_{context}[configuring a DNS zone in Azure Stack Hub] by visiting that section. - -include::modules/installation-creating-azure-vnet.adoc[leveloffset=+1] -include::modules/installation-arm-vnet.adoc[leveloffset=+2] - -include::modules/installation-azure-user-infra-deploying-rhcos.adoc[leveloffset=+1] -include::modules/installation-arm-image-storage.adoc[leveloffset=+2] - -include::modules/installation-network-user-infra.adoc[leveloffset=+1] - -include::modules/installation-creating-azure-dns.adoc[leveloffset=+1] -include::modules/installation-arm-dns.adoc[leveloffset=+2] - -include::modules/installation-creating-azure-bootstrap.adoc[leveloffset=+1] -include::modules/installation-arm-bootstrap.adoc[leveloffset=+2] - -include::modules/installation-creating-azure-control-plane.adoc[leveloffset=+1] -include::modules/installation-arm-control-plane.adoc[leveloffset=+2] - -include::modules/installation-azure-user-infra-wait-for-bootstrap.adoc[leveloffset=+1] - -include::modules/installation-creating-azure-worker.adoc[leveloffset=+1] -include::modules/installation-arm-worker.adoc[leveloffset=+2] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/installation-approve-csrs.adoc[leveloffset=+1] - -include::modules/installation-azure-create-ingress-dns-records.adoc[leveloffset=+1] - -include::modules/installation-azure-user-infra-completing.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service. diff --git a/installing/installing_azure_stack_hub/manually-creating-iam-azure-stack-hub.adoc b/installing/installing_azure_stack_hub/manually-creating-iam-azure-stack-hub.adoc deleted file mode 100644 index 9beb6096fff2..000000000000 --- a/installing/installing_azure_stack_hub/manually-creating-iam-azure-stack-hub.adoc +++ /dev/null @@ -1,33 +0,0 @@ -:_content-type: ASSEMBLY -[id="manually-creating-iam-azure-stack-hub"] -= Manually creating IAM for Azure Stack Hub -include::_attributes/common-attributes.adoc[] -:context: manually-creating-iam-azure-stack-hub - -toc::[] - -In environments where the cloud identity and access management (IAM) APIs are not reachable, you must put the Cloud Credential Operator (CCO) into manual mode before you install the cluster. - -//// -In environments where the cloud identity and access management (IAM) APIs are not reachable, or the administrator prefers not to store an administrator-level credential secret in the cluster `kube-system` namespace, you can put the Cloud Credential Operator (CCO) into manual mode before you install the cluster. -//// -// Until ASH supports other credential scenarios besides manual mode, the tone for this article will be manual mode use only. - -include::modules/alternatives-to-storing-admin-secrets-in-kube-system.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* For a detailed description of all available CCO credential modes and their supported platforms, see xref:../../authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.adoc#about-cloud-credential-operator[About the Cloud Credential Operator]. - -include::modules/manually-create-identity-access-management.adoc[leveloffset=+1] - -// I was going to update this but I think the assembly is no longer used and will ask install team if I can get rid of it entirely. -include::modules/manually-maintained-credentials-upgrade.adoc[leveloffset=+1] - -[id="next-steps_manually-creating-iam-azure-stack-hub"] -== Next steps - -* Install an {product-title} cluster: -** xref:../../installing/installing_azure_stack_hub/installing-azure-stack-hub-default.adoc#installing-azure-stack-hub-default[Installing a cluster quickly on Azure Stack Hub]. -** xref:../../installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc#installing-azure-stack-hub-user-infra[Installing a cluster on Azure Stack Hub using ARM templates]. diff --git a/installing/installing_azure_stack_hub/modules b/installing/installing_azure_stack_hub/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/installing/installing_azure_stack_hub/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/installing/installing_azure_stack_hub/preparing-to-install-on-azure-stack-hub.adoc b/installing/installing_azure_stack_hub/preparing-to-install-on-azure-stack-hub.adoc deleted file mode 100644 index 2ca66634c32b..000000000000 --- a/installing/installing_azure_stack_hub/preparing-to-install-on-azure-stack-hub.adoc +++ /dev/null @@ -1,47 +0,0 @@ -:_content-type: ASSEMBLY -[id="preparing-to-install-on-azure-stack-hub"] -= Preparing to install on Azure Stack Hub -include::_attributes/common-attributes.adoc[] -:context: preparing-to-install-on-azure-stack-hub - -toc::[] - -[id="preparing-to-install-on-ash-prerequisites"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You have installed Azure Stack Hub version 2008 or later. - -[id="requirements-for-installing-ocp-on-ash"] -== Requirements for installing {product-title} on Azure Stack Hub - -Before installing {product-title} on Microsoft Azure Stack Hub, you must configure an Azure account. - -See xref:../../installing/installing_azure_stack_hub/installing-azure-stack-hub-account.adoc#installing-azure-stack-hub-account[Configuring an Azure Stack Hub account] for details about account configuration, account limits, DNS zone configuration, required roles, and creating service principals. - -[id="choosing-a-method-to-install-ocp-on-ash"] -== Choosing a method to install {product-title} on Azure Stack Hub - -You can install {product-title} on installer-provisioned or user-provisioned infrastructure. The default installation type uses installer-provisioned infrastructure, where the installation program provisions the underlying infrastructure for the cluster. You can also install {product-title} on infrastructure that you provision. If you do not use infrastructure that the installation program provisions, you must manage and maintain the cluster resources yourself. - -See xref:../../architecture/architecture-installation.adoc#installation-process_architecture-installation[Installation process] for more information about installer-provisioned and user-provisioned installation processes. - -[id="choosing-a-method-to-install-ocp-on-ash-installer-provisioned"] -=== Installing a cluster on installer-provisioned infrastructure - -You can install a cluster on Azure Stack Hub infrastructure that is provisioned by the {product-title} installation program, by using the following method: - -* **xref:../../installing/installing_azure_stack_hub/installing-azure-stack-hub-default.adoc#installing-azure-stack-hub-default[Installing a cluster on Azure Stack Hub with an installer-provisioned infrastructure]**: You can install {product-title} on Azure Stack Hub infrastructure that is provisioned by the {product-title} installation program. - -[id="choosing-a-method-to-install-ocp-on-ash-user-provisioned"] -=== Installing a cluster on user-provisioned infrastructure - -You can install a cluster on Azure Stack Hub infrastructure that you provision, by using the following method: - -* **xref:../../installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc#installing-azure-stack-hub-user-infra[Installing a cluster on Azure Stack Hub using ARM templates]**: You can install {product-title} on Azure Stack Hub by using infrastructure that you provide. You can use the provided Azure Resource Manager (ARM) templates to assist with an installation. - -[id="preparing-to-install-on-ash-next-steps"] -== Next steps - -* xref:../../installing/installing_azure_stack_hub/installing-azure-stack-hub-account.adoc#installing-azure-stack-hub-account[Configuring an Azure Stack Hub account] diff --git a/installing/installing_azure_stack_hub/snippets b/installing/installing_azure_stack_hub/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/installing/installing_azure_stack_hub/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/installing/installing_azure_stack_hub/uninstalling-cluster-azure-stack-hub.adoc b/installing/installing_azure_stack_hub/uninstalling-cluster-azure-stack-hub.adoc deleted file mode 100644 index 72ede5bd654f..000000000000 --- a/installing/installing_azure_stack_hub/uninstalling-cluster-azure-stack-hub.adoc +++ /dev/null @@ -1,11 +0,0 @@ -:_content-type: ASSEMBLY -[id="uninstalling-cluster-azure-stack-hub"] -= Uninstalling a cluster on Azure Stack Hub -include::_attributes/common-attributes.adoc[] -:context: uninstall-cluster-azure-stack-hub - -toc::[] - -You can remove a cluster that you deployed to Azure Stack Hub. - -include::modules/installation-uninstall-clouds.adoc[leveloffset=+1] diff --git a/installing/installing_bare_metal/_attributes b/installing/installing_bare_metal/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/installing/installing_bare_metal/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/installing/installing_bare_metal/images b/installing/installing_bare_metal/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/installing/installing_bare_metal/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc b/installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc deleted file mode 100644 index 5ba1910e9cb2..000000000000 --- a/installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc +++ /dev/null @@ -1,197 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-bare-metal-network-customizations"] -= Installing a user-provisioned bare metal cluster with network customizations -include::_attributes/common-attributes.adoc[] -:context: installing-bare-metal-network-customizations - -toc::[] - -In {product-title} {product-version}, you can install a cluster on bare -metal infrastructure that you provision with customized network configuration -options. By customizing your network configuration, your cluster can coexist -with existing IP address allocations in your environment and integrate with -existing MTU and VXLAN configurations. - -When you customize {product-title} networking, you must set most of the network configuration parameters during installation. You can modify only `kubeProxy` network configuration parameters in a running -cluster. - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* If you use a firewall and plan to use the Telemetry service, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured the firewall to allow the sites] that your cluster requires access to. - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc#installing-restricted-networks-bare-metal[Installing a user-provisioned bare metal cluster on a restricted network] for more information about performing a restricted network installation on bare metal infrastructure that you provision. - -[id="installation-requirements-user-infra_{context}"] -== Requirements for a cluster with user-provisioned infrastructure - -For a cluster that contains user-provisioned infrastructure, you must deploy all -of the required machines. - -This section describes the requirements for deploying {product-title} on user-provisioned infrastructure. - -include::modules/installation-machine-requirements.adoc[leveloffset=+2] -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] -include::modules/csr-management.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../installing/installing_bare_metal/installing-bare-metal.adoc#installation-three-node-cluster_installing-bare-metal[Configuring a three-node cluster] for details about deploying three-node clusters in bare metal environments. -* See xref:../../installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc#installation-approve-csrs_installing-bare-metal-network-customizations[Approving the certificate signing requests for your machines] for more information about approving cluster certificate signing requests after installation. - -include::modules/installation-network-user-infra.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/install_config/installing-customizing.adoc#installation-special-config-chrony_installing-customizing[Configuring chrony time service] - -include::modules/installation-dns-user-infra.adoc[leveloffset=+2] - -* xref:../../installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc#installation-user-provisioned-validating-dns_installing-bare-metal-network-customizations[Validating DNS resolution for user-provisioned infrastructure] - -include::modules/installation-load-balancing-user-infra.adoc[leveloffset=+2] - -include::modules/installation-infrastructure-user-infra.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc#installation-requirements-user-infra_installing-bare-metal[Requirements for a cluster with user-provisioned infrastructure] -* xref:../../installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc#creating-machines-bare-metal_installing-bare-metal[Installing {op-system} and starting the {product-title} bootstrap process] -* xref:../../installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc#installation-host-names-dhcp-user-infra_installing-bare-metal[Setting the cluster node hostnames through DHCP] -* xref:../../installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc#installation-user-infra-machines-advanced_installing-bare-metal[Advanced RHCOS installation configuration] -* xref:../../installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc#installation-network-user-infra_installing-bare-metal[Networking requirements for user-provisioned infrastructure] -* xref:../../installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc#installation-dns-user-infra_installing-bare-metal[User-provisioned DNS requirements] -* xref:../../installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc#installation-user-provisioned-validating-dns_installing-bare-metal[Validating DNS resolution for user-provisioned infrastructure] -* xref:../../installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc#installation-load-balancing-user-infra_installing-bare-metal[Load balancing requirements for user-provisioned infrastructure] - -include::modules/installation-user-provisioned-validating-dns.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc#installation-dns-user-infra_installing-bare-metal[User-provisioned DNS requirements] -* xref:../../installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc#installation-load-balancing-user-infra_installing-bare-metal[Load balancing requirements for user-provisioned infrastructure] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../support/troubleshooting/verifying-node-health.adoc#verifying-node-health[Verifying node health] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/installation-initializing-manual.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-bare-metal-config-yaml.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc#installation-load-balancing-user-infra_installing-bare-metal[Load balancing requirements for user-provisioned infrastructure] for more information on the API and application ingress load balancing requirements. - -// Network Operator specific configuration -include::modules/nw-network-config.adoc[leveloffset=+1] -include::modules/nw-modifying-operator-install-config.adoc[leveloffset=+1] -include::modules/nw-operator-cr.adoc[leveloffset=+1] - -include::modules/installation-generate-ignition-configs.adoc[leveloffset=+1] - -include::modules/creating-machines-bare-metal.adoc[leveloffset=+1] - -include::modules/installation-user-infra-machines-iso.adoc[leveloffset=+2] - -include::modules/installation-user-infra-machines-pxe.adoc[leveloffset=+2] - -include::modules/installation-user-infra-machines-advanced.adoc[leveloffset=+2] - -include::modules/installation-user-infra-machines-advanced-console-configuration.adoc[leveloffset=+3] - -include::modules/installation-user-infra-machines-advanced-enabling-serial-console.adoc[leveloffset=+3] - -include::modules/installation-user-infra-machines-advanced-customizing-iso-or-pxe.adoc[leveloffset=+3] - -:boot-media: ISO image -:boot: iso -include::modules/installation-user-infra-machines-advanced-customizing-live.adoc[leveloffset=+3] - -include::modules/installation-user-infra-machines-advanced-customizing-live-serial-console.adoc[leveloffset=+4] - -include::modules/installation-user-infra-machines-advanced-customizing-live-ca-certs.adoc[leveloffset=+4] - -include::modules/installation-user-infra-machines-advanced-customizing-live-network-config.adoc[leveloffset=+4] -:boot-media!: -:boot!: - -:boot-media: PXE environment -:boot: pxe -include::modules/installation-user-infra-machines-advanced-customizing-live.adoc[leveloffset=+3] - -include::modules/installation-user-infra-machines-advanced-customizing-live-serial-console.adoc[leveloffset=+4] - -include::modules/installation-user-infra-machines-advanced-customizing-live-ca-certs.adoc[leveloffset=+4] - -include::modules/installation-user-infra-machines-advanced-customizing-live-network-config.adoc[leveloffset=+4] -:boot-media!: -:boot!: - -include::modules/installation-user-infra-machines-static-network.adoc[leveloffset=+3] - -include::modules/rhcos-enabling-multipath.adoc[leveloffset=+2] - -include::modules/architecture-rhcos-updating-bootloader.adoc[leveloffset=+2] - -include::modules/installation-installing-bare-metal.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/troubleshooting/troubleshooting-installations.adoc#monitoring-installation-progress_troubleshooting-installations[Monitoring installation progress] for more information about monitoring the installation logs and retrieving diagnostic data if installation issues arise. - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/installation-approve-csrs.adoc[leveloffset=+1] - -include::modules/installation-operators-config.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/troubleshooting/troubleshooting-installations.adoc#installation-bootstrap-gather_troubleshooting-installations[Gathering logs from a failed installation] for details about gathering data in the event of a failed {product-title} installation. -* See xref:../../support/troubleshooting/troubleshooting-operator-issues.adoc#troubleshooting-operator-issues[Troubleshooting Operator issues] for steps to check Operator pod health across the cluster and gather Operator logs for diagnosis. - -include::modules/registry-removed.adoc[leveloffset=+2] - -include::modules/installation-registry-storage-config.adoc[leveloffset=+2] - -include::modules/installation-registry-storage-block-recreate-rollout-bare-metal.adoc[leveloffset=+2] - -include::modules/installation-complete-user-infra.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../installing/validating-an-installation.adoc#validating-an-installation[Validating an installation]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* xref:../../registry/configuring_registry_storage/configuring-registry-storage-baremetal.adoc#configuring-registry-storage-baremetal[Set up your registry and configure registry storage]. diff --git a/installing/installing_bare_metal/installing-bare-metal.adoc b/installing/installing_bare_metal/installing-bare-metal.adoc deleted file mode 100644 index bc2b6acc29eb..000000000000 --- a/installing/installing_bare_metal/installing-bare-metal.adoc +++ /dev/null @@ -1,224 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-bare-metal"] -= Installing a user-provisioned cluster on bare metal -include::_attributes/common-attributes.adoc[] -:context: installing-bare-metal - -toc::[] - -In {product-title} {product-version}, you can install a cluster on -bare metal infrastructure that you provision. - -[IMPORTANT] -==== -While you might be able to follow this procedure to deploy a cluster on -virtualized or cloud environments, you must be aware of additional -considerations for non-bare metal platforms. Review the information in the -link:https://access.redhat.com/articles/4207611[guidelines for deploying {product-title} on non-tested platforms] -before you attempt to install an {product-title} cluster in such an environment. -==== - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -+ -[NOTE] -==== -Be sure to also review this site list if you are configuring a proxy. -==== - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc#installing-restricted-networks-bare-metal[Installing a user-provisioned bare metal cluster on a restricted network] for more information about performing a restricted network installation on bare metal infrastructure that you provision. - -[id="installation-requirements-user-infra_{context}"] -== Requirements for a cluster with user-provisioned infrastructure - -For a cluster that contains user-provisioned infrastructure, you must deploy all -of the required machines. - -This section describes the requirements for deploying {product-title} on user-provisioned infrastructure. - -include::modules/installation-machine-requirements.adoc[leveloffset=+2] -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] -include::modules/csr-management.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../installing/installing_bare_metal/installing-bare-metal.adoc#installation-three-node-cluster_installing-bare-metal[Configuring a three-node cluster] for details about deploying three-node clusters in bare metal environments. -* See xref:../../installing/installing_bare_metal/installing-bare-metal.adoc#installation-approve-csrs_installing-bare-metal[Approving the certificate signing requests for your machines] for more information about approving cluster certificate signing requests after installation. - -include::modules/installation-network-user-infra.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/install_config/installing-customizing.adoc#installation-special-config-chrony_installing-customizing[Configuring chrony time service] - -include::modules/installation-dns-user-infra.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/installing_bare_metal/installing-bare-metal.adoc#installation-user-provisioned-validating-dns_installing-bare-metal[Validating DNS resolution for user-provisioned infrastructure] - -include::modules/installation-load-balancing-user-infra.adoc[leveloffset=+2] - -include::modules/installation-infrastructure-user-infra.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/installing_bare_metal/installing-bare-metal.adoc#installation-requirements-user-infra_installing-bare-metal[Requirements for a cluster with user-provisioned infrastructure] -* xref:../../installing/installing_bare_metal/installing-bare-metal.adoc#creating-machines-bare-metal_installing-bare-metal[Installing {op-system} and starting the {product-title} bootstrap process] -* xref:../../installing/installing_bare_metal/installing-bare-metal.adoc#installation-host-names-dhcp-user-infra_installing-bare-metal[Setting the cluster node hostnames through DHCP] -* xref:../../installing/installing_bare_metal/installing-bare-metal.adoc#installation-user-infra-machines-advanced_installing-bare-metal[Advanced RHCOS installation configuration] -* xref:../../installing/installing_bare_metal/installing-bare-metal.adoc#installation-network-user-infra_installing-bare-metal[Networking requirements for user-provisioned infrastructure] -* xref:../../installing/installing_bare_metal/installing-bare-metal.adoc#installation-dns-user-infra_installing-bare-metal[User-provisioned DNS requirements] -* xref:../../installing/installing_bare_metal/installing-bare-metal.adoc#installation-user-provisioned-validating-dns_installing-bare-metal[Validating DNS resolution for user-provisioned infrastructure] -* xref:../../installing/installing_bare_metal/installing-bare-metal.adoc#installation-load-balancing-user-infra_installing-bare-metal[Load balancing requirements for user-provisioned infrastructure] - -include::modules/installation-user-provisioned-validating-dns.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/installing_bare_metal/installing-bare-metal.adoc#installation-dns-user-infra_installing-bare-metal[User-provisioned DNS requirements] -* xref:../../installing/installing_bare_metal/installing-bare-metal.adoc#installation-load-balancing-user-infra_installing-bare-metal[Load balancing requirements for user-provisioned infrastructure] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../support/troubleshooting/verifying-node-health.adoc#verifying-node-health[Verifying node health] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/installation-initializing-manual.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-bare-metal-config-yaml.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../installing/installing_bare_metal/installing-bare-metal.adoc#installation-load-balancing-user-infra_installing-bare-metal[Load balancing requirements for user-provisioned infrastructure] for more information on the API and application ingress load balancing requirements. -* See xref:../../post_installation_configuration/enabling-cluster-capabilities.adoc[Enabling cluster capabilities] for more information on enabling cluster capabilities that were disabled prior to installation. -* See xref:../../installing/cluster-capabilities.html#explanation_of_capabilities_cluster-capabilities[Optional cluster capabilities in {product-title} {product-version}] for more information about the features provided by each capability. - - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/installation-three-node-cluster.adoc[leveloffset=+2] - -include::modules/installation-user-infra-generate-k8s-manifest-ignition.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-3-expired-certs.adoc#dr-recovering-expired-certs[Recovering from expired control plane certificates] for more information about recovering kubelet certificates. - -include::modules/creating-machines-bare-metal.adoc[leveloffset=+1] - -include::modules/installation-user-infra-machines-iso.adoc[leveloffset=+2] - -include::modules/installation-user-infra-machines-pxe.adoc[leveloffset=+2] - -include::modules/installation-user-infra-machines-advanced.adoc[leveloffset=+2] - -include::modules/installation-user-infra-machines-advanced-console-configuration.adoc[leveloffset=+3] - -include::modules/installation-user-infra-machines-advanced-enabling-serial-console.adoc[leveloffset=+3] - -include::modules/installation-user-infra-machines-advanced-customizing-iso-or-pxe.adoc[leveloffset=+3] - -:boot-media: ISO image -:boot: iso -include::modules/installation-user-infra-machines-advanced-customizing-live.adoc[leveloffset=+3] - -include::modules/installation-user-infra-machines-advanced-customizing-live-serial-console.adoc[leveloffset=+4] - -include::modules/installation-user-infra-machines-advanced-customizing-live-ca-certs.adoc[leveloffset=+4] - -include::modules/installation-user-infra-machines-advanced-customizing-live-network-config.adoc[leveloffset=+4] -:boot-media!: -:boot!: - -:boot-media: PXE environment -:boot: pxe -include::modules/installation-user-infra-machines-advanced-customizing-live.adoc[leveloffset=+3] - -include::modules/installation-user-infra-machines-advanced-customizing-live-serial-console.adoc[leveloffset=+4] - -include::modules/installation-user-infra-machines-advanced-customizing-live-ca-certs.adoc[leveloffset=+4] - -include::modules/installation-user-infra-machines-advanced-customizing-live-network-config.adoc[leveloffset=+4] -:boot-media!: -:boot!: - -include::modules/installation-user-infra-machines-static-network.adoc[leveloffset=+3] - -include::modules/rhcos-enabling-multipath.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../installing/installing_bare_metal/installing-bare-metal.adoc#creating-machines-bare-metal_installing-bare-metal[Installing {op-system} and starting the {product-title} bootstrap process] for more information on using special `coreos.inst.*` arguments to direct the live installer. - -include::modules/architecture-rhcos-updating-bootloader.adoc[leveloffset=+2] - -include::modules/installation-installing-bare-metal.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/troubleshooting/troubleshooting-installations.adoc#monitoring-installation-progress_troubleshooting-installations[Monitoring installation progress] for more information about monitoring the installation logs and retrieving diagnostic data if installation issues arise. - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/installation-approve-csrs.adoc[leveloffset=+1] - -include::modules/installation-operators-config.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/troubleshooting/troubleshooting-installations.adoc#installation-bootstrap-gather_troubleshooting-installations[Gathering logs from a failed installation] for details about gathering data in the event of a failed {product-title} installation. -* See xref:../../support/troubleshooting/troubleshooting-operator-issues.adoc#troubleshooting-operator-issues[Troubleshooting Operator issues] for steps to check Operator pod health across the cluster and gather Operator logs for diagnosis. - -include::modules/registry-removed.adoc[leveloffset=+2] - -include::modules/installation-registry-storage-config.adoc[leveloffset=+2] - -include::modules/registry-configuring-storage-baremetal.adoc[leveloffset=+3] - -include::modules/installation-registry-storage-non-production.adoc[leveloffset=+3] - -include::modules/installation-registry-storage-block-recreate-rollout-bare-metal.adoc[leveloffset=+3] - -include::modules/installation-complete-user-infra.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../installing/validating-an-installation.adoc#validating-an-installation[Validating an installation]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* xref:../../registry/configuring_registry_storage/configuring-registry-storage-baremetal.adoc#configuring-registry-storage-baremetal[Set up your registry and configure registry storage]. diff --git a/installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc b/installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc deleted file mode 100644 index 6887619c59b3..000000000000 --- a/installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc +++ /dev/null @@ -1,226 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-restricted-networks-bare-metal"] -= Installing a user-provisioned bare metal cluster on a restricted network -include::_attributes/common-attributes.adoc[] -:context: installing-restricted-networks-bare-metal - -toc::[] - -In {product-title} {product-version}, you can install a cluster on -bare metal infrastructure that you provision in a restricted network. - -[IMPORTANT] -==== -While you might be able to follow this procedure to deploy a cluster on -virtualized or cloud environments, you must be aware of additional -considerations for non-bare metal platforms. Review the information in the -link:https://access.redhat.com/articles/4207611[guidelines for deploying {product-title} on non-tested platforms] -before you attempt to install an {product-title} cluster in such an environment. -==== - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/disconnected_install/installing-mirroring-installation-images.adoc#installing-mirroring-installation-images[created a registry on your mirror host] and obtained the `imageContentSources` data for your version of {product-title}. -+ -[IMPORTANT] -==== -Because the installation media is on the mirror host, you can use that computer to complete all installation steps. -==== -* You provisioned xref:../../storage/understanding-persistent-storage.adoc#understanding-persistent-storage[persistent storage] for your cluster. To deploy a private image registry, your storage must provide ReadWriteMany access modes. -* If you use a firewall and plan to use the Telemetry service, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured the firewall to allow the sites] that your cluster requires access to. -+ -[NOTE] -==== -Be sure to also review this site list if you are configuring a proxy. -==== - -include::modules/installation-about-restricted-network.adoc[leveloffset=+1] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -[id="installation-requirements-user-infra_{context}"] -== Requirements for a cluster with user-provisioned infrastructure - -For a cluster that contains user-provisioned infrastructure, you must deploy all -of the required machines. - -This section describes the requirements for deploying {product-title} on user-provisioned infrastructure. - -include::modules/installation-machine-requirements.adoc[leveloffset=+2] -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] -include::modules/csr-management.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc#installation-three-node-cluster_installing-restricted-networks-bare-metal[Configuring a three-node cluster] for details about deploying three-node clusters in bare metal environments. -* See xref:../../installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc#installation-approve-csrs_installing-bare-metal[Approving the certificate signing requests for your machines] for more information about approving cluster certificate signing requests after installation. - -include::modules/installation-network-user-infra.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/install_config/installing-customizing.adoc#installation-special-config-chrony_installing-customizing[Configuring chrony time service] - -include::modules/installation-dns-user-infra.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc#installation-user-provisioned-validating-dns_installing-bare-metal[Validating DNS resolution for user-provisioned infrastructure] - -include::modules/installation-load-balancing-user-infra.adoc[leveloffset=+2] - -include::modules/installation-infrastructure-user-infra.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc#installation-requirements-user-infra_installing-bare-metal[Requirements for a cluster with user-provisioned infrastructure] -* xref:../../installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc#creating-machines-bare-metal_installing-bare-metal[Installing {op-system} and starting the {product-title} bootstrap process] -* xref:../../installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc#installation-host-names-dhcp-user-infra_installing-bare-metal[Setting the cluster node hostnames through DHCP] -* xref:../../installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc#installation-user-infra-machines-advanced_installing-bare-metal[Advanced RHCOS installation configuration] -* xref:../../installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc#installation-network-user-infra_installing-bare-metal[Networking requirements for user-provisioned infrastructure] -* xref:../../installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc#installation-dns-user-infra_installing-bare-metal[User-provisioned DNS requirements] -* xref:../../installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc#installation-user-provisioned-validating-dns_installing-bare-metal[Validating DNS resolution for user-provisioned infrastructure] -* xref:../../installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc#installation-load-balancing-user-infra_installing-bare-metal[Load balancing requirements for user-provisioned infrastructure] - -include::modules/installation-user-provisioned-validating-dns.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc#installation-dns-user-infra_installing-bare-metal[User-provisioned DNS requirements] -* xref:../../installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc#installation-load-balancing-user-infra_installing-bare-metal[Load balancing requirements for user-provisioned infrastructure] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../support/troubleshooting/verifying-node-health.adoc#verifying-node-health[Verifying node health] - -//You extract the installation program from the mirrored content. - -//You can install the CLI on the mirror host. - -include::modules/installation-initializing-manual.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-bare-metal-config-yaml.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc#installation-load-balancing-user-infra_installing-bare-metal[Load balancing requirements for user-provisioned infrastructure] for more information on the API and application ingress load balancing requirements. - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/installation-three-node-cluster.adoc[leveloffset=+2] - -include::modules/installation-user-infra-generate-k8s-manifest-ignition.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-3-expired-certs.adoc#dr-recovering-expired-certs[Recovering from expired control plane certificates] for more information about recovering kubelet certificates. - -include::modules/installation-special-config-chrony.adoc[leveloffset=+1] - -include::modules/creating-machines-bare-metal.adoc[leveloffset=+1] - -include::modules/installation-user-infra-machines-iso.adoc[leveloffset=+2] - -include::modules/installation-user-infra-machines-pxe.adoc[leveloffset=+2] - -include::modules/installation-user-infra-machines-advanced.adoc[leveloffset=+2] - -include::modules/installation-user-infra-machines-advanced-console-configuration.adoc[leveloffset=+3] - -include::modules/installation-user-infra-machines-advanced-enabling-serial-console.adoc[leveloffset=+3] - -include::modules/installation-user-infra-machines-advanced-customizing-iso-or-pxe.adoc[leveloffset=+3] - -:boot-media: ISO image -:boot: iso -include::modules/installation-user-infra-machines-advanced-customizing-live.adoc[leveloffset=+3] - -include::modules/installation-user-infra-machines-advanced-customizing-live-serial-console.adoc[leveloffset=+4] - -include::modules/installation-user-infra-machines-advanced-customizing-live-ca-certs.adoc[leveloffset=+4] - -include::modules/installation-user-infra-machines-advanced-customizing-live-network-config.adoc[leveloffset=+4] -:boot-media!: -:boot!: - -:boot-media: PXE environment -:boot: pxe -include::modules/installation-user-infra-machines-advanced-customizing-live.adoc[leveloffset=+3] - -include::modules/installation-user-infra-machines-advanced-customizing-live-serial-console.adoc[leveloffset=+4] - -include::modules/installation-user-infra-machines-advanced-customizing-live-ca-certs.adoc[leveloffset=+4] - -include::modules/installation-user-infra-machines-advanced-customizing-live-network-config.adoc[leveloffset=+4] -:boot-media!: -:boot!: - -include::modules/installation-user-infra-machines-static-network.adoc[leveloffset=+3] - -include::modules/rhcos-enabling-multipath.adoc[leveloffset=+2] - -include::modules/architecture-rhcos-updating-bootloader.adoc[leveloffset=+2] - -include::modules/installation-installing-bare-metal.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/troubleshooting/troubleshooting-installations.adoc#monitoring-installation-progress_troubleshooting-installations[Monitoring installation progress] for more information about monitoring the installation logs and retrieving diagnostic data if installation issues arise. - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/installation-approve-csrs.adoc[leveloffset=+1] - -include::modules/installation-operators-config.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/troubleshooting/troubleshooting-installations.adoc#installation-bootstrap-gather_troubleshooting-installations[Gathering logs from a failed installation] for details about gathering data in the event of a failed {product-title} installation. -* See xref:../../support/troubleshooting/troubleshooting-operator-issues.adoc#troubleshooting-operator-issues[Troubleshooting Operator issues] for steps to check Operator pod health across the cluster and gather Operator logs for diagnosis. - -include::modules/olm-restricted-networks-configuring-operatorhub.adoc[leveloffset=+2] - -include::modules/installation-registry-storage-config.adoc[leveloffset=+2] - -include::modules/registry-change-management-state.adoc[leveloffset=+3] - -include::modules/registry-configuring-storage-baremetal.adoc[leveloffset=+3] - -include::modules/installation-registry-storage-non-production.adoc[leveloffset=+3] - -include::modules/installation-registry-storage-block-recreate-rollout-bare-metal.adoc[leveloffset=+3] - -include::modules/installation-complete-user-infra.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../installing/validating-an-installation.adoc#validating-an-installation[Validating an installation]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#post-install-must-gather-disconnected[Configure image streams] for the Cluster Samples Operator and the `must-gather` tool. -* Learn how to xref:../../operators/admin/olm-restricted-networks.adoc#olm-restricted-networks[use Operator Lifecycle Manager (OLM) on restricted networks]. -* If the mirror registry that you used to install your cluster has a trusted CA, add it to the cluster by xref:../../openshift_images/image-configuration.adoc#images-configuration-cas_image-configuration[configuring additional trust stores]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. diff --git a/installing/installing_bare_metal/modules b/installing/installing_bare_metal/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/installing/installing_bare_metal/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/installing/installing_bare_metal/preparing-to-install-on-bare-metal.adoc b/installing/installing_bare_metal/preparing-to-install-on-bare-metal.adoc deleted file mode 100644 index f828cc638052..000000000000 --- a/installing/installing_bare_metal/preparing-to-install-on-bare-metal.adoc +++ /dev/null @@ -1,72 +0,0 @@ -:_content-type: ASSEMBLY -[id="preparing-to-install-on-bare-metal"] -= Preparing for bare metal cluster installation -include::_attributes/common-attributes.adoc[] -:context: preparing-to-install-on-bare-metal - -toc::[] - -[id="preparing_preparing-to-install-on-bare-metal"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You have read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. - -include::modules/virt-planning-bare-metal-cluster-for-ocp-virt.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../virt/getting_started/virt-getting-started.adoc#virt-getting-started[Getting started with {VirtProductName}] -* xref:../../virt/install/preparing-cluster-for-virt.adoc#preparing-cluster-for-virt[Preparing your cluster for {VirtProductName}] -* xref:../../networking/hardware_networks/about-sriov.adoc#about-sriov[About Single Root I/O Virtualization (SR-IOV) hardware networks] -* xref:../../virt/virtual_machines/vm_networking/virt-attaching-vm-to-sriov-network.adoc#virt-attaching-vm-to-sriov-network[Connecting a virtual machine to an SR-IOV network] - -include::modules/nw-sriov-dual-nic-con.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/installing_with_agent_based_installer/preparing-to-install-with-agent-based-installer.adoc#agent-install-sample-config-bond-sriov_preparing-to-install-with-agent-based-installer[Example: Bonds and SR-IOV dual-nic node network configuration] - -* xref:../../installing/installing_bare_metal_ipi/ipi-install-installation-workflow.adoc#configuring-host-dual-network-interfaces-in-the-install-config-yaml-file_ipi-install-installation-workflow[Optional: Configuring host network interfaces for dual port NIC] - -* xref:../../installing/installing_bare_metal/installing-bare-metal.adoc#bonding-multiple-sriov-network-interfaces-to-dual-port_installing-bare-metal[Bonding multiple SR-IOV network interfaces to a dual port NIC interface] - -[id="choosing-a-method-to-install-ocp-on-bare-metal"] -== Choosing a method to install {product-title} on bare metal - -The {product-title} installation program offers four methods for deploying a cluster: - -* *Interactive*: You can deploy a cluster with the web-based link:https://access.redhat.com/documentation/en-us/assisted_installer_for_openshift_container_platform/2022/html-single/assisted_installer_for_openshift_container_platform/index[{ai-full}]. This is the recommended approach for clusters with networks connected to the internet. The {ai-full} is the easiest way to install {product-title}, it provides smart defaults, and it performs pre-flight validations before installing the cluster. It also provides a RESTful API for automation and advanced configuration scenarios. - -* *Local Agent-based*: You can deploy a cluster locally with the xref:../../installing/installing_with_agent_based_installer/preparing-to-install-with-agent-based-installer.adoc#preparing-to-install-with-agent-based-installer[agent-based installer] for air-gapped or restricted networks. It provides many of the benefits of the {ai-full}, but you must download and configure the link:https://console.redhat.com/openshift/install/metal/agent-based[agent-based installer] first. Configuration is done with a commandline interface. This approach is ideal for air-gapped or restricted networks. - -* *Automated*: You can xref:../../installing/installing_bare_metal_ipi/ipi-install-overview.adoc#ipi-install-overview[deploy a cluster on installer-provisioned infrastructure] and the cluster it maintains. The installer uses each cluster host's baseboard management controller (BMC) for provisioning. You can deploy clusters with both connected or air-gapped or restricted networks. - -* *Full control*: You can deploy a cluster on xref:../../installing/installing_bare_metal/installing-bare-metal.adoc#installing-bare-metal[infrastructure that you prepare and maintain], which provides maximum customizability. You can deploy clusters with both connected or air-gapped or restricted networks. - -The clusters have the following characteristics: - -* Highly available infrastructure with no single points of failure is available by default. -* Administrators maintain control over what updates are applied and when. - -See xref:../../architecture/architecture-installation.adoc#installation-process_architecture-installation[Installation process] for more information about installer-provisioned and user-provisioned installation processes. - -[id="choosing-a-method-to-install-ocp-on-bare-metal-installer-provisioned"] -=== Installing a cluster on installer-provisioned infrastructure - -You can install a cluster on bare metal infrastructure that is provisioned by the {product-title} installation program, by using the following method: - -* **xref:../../installing/installing_bare_metal_ipi/ipi-install-overview.adoc#ipi-install-overview[Installing an installer-provisioned cluster on bare metal]**: You can install {product-title} on bare metal by using installer provisioning. - -[id="choosing-a-method-to-install-ocp-on-bare-metal-user-provisioned"] -=== Installing a cluster on user-provisioned infrastructure - -You can install a cluster on bare metal infrastructure that you provision, by using one of the following methods: - -* **xref:../../installing/installing_bare_metal/installing-bare-metal.adoc#installing-bare-metal[Installing a user-provisioned cluster on bare metal]**: You can install {product-title} on bare metal infrastructure that you provision. For a cluster that contains user-provisioned infrastructure, you must deploy all of the required machines. - -* **xref:../../installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc#installing-bare-metal-network-customizations[Installing a user-provisioned bare metal cluster with network customizations]**: You can install a bare metal cluster on user-provisioned infrastructure with network-customizations. By customizing your network configuration, your cluster can coexist with existing IP address allocations in your environment and integrate with existing MTU and VXLAN configurations. Most of the network customizations must be applied at the installation stage. - -* **xref:../../installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc#installing-restricted-networks-bare-metal[Installing a user-provisioned bare metal cluster on a restricted network]**: You can install a user-provisioned bare metal cluster on a restricted or disconnected network by using a mirror registry. You can also use this installation method to ensure that your clusters only use container images that satisfy your organizational controls on external content. diff --git a/installing/installing_bare_metal/scaling-a-user-provisioned-cluster-with-the-bare-metal-operator.adoc b/installing/installing_bare_metal/scaling-a-user-provisioned-cluster-with-the-bare-metal-operator.adoc deleted file mode 100644 index 907fdb418584..000000000000 --- a/installing/installing_bare_metal/scaling-a-user-provisioned-cluster-with-the-bare-metal-operator.adoc +++ /dev/null @@ -1,19 +0,0 @@ -:_content-type: ASSEMBLY -[id="scaling-a-user-provisioned-cluster-with-the-bare-metal-operator"] -= Scaling a user-provisioned cluster with the Bare Metal Operator -include::_attributes/common-attributes.adoc[] -:context: scaling-a-user-provisioned-cluster-with-the-bare-metal-operator - -toc::[] - -After deploying a user-provisioned infrastructure cluster, you can use the Bare Metal Operator (BMO) and other metal3 components to scale bare-metal hosts in the cluster. This approach helps you to scale a user-provisioned cluster in a more automated way. - -include::modules/about-scaling-a-user-provisioned-installation-with-the-bare-metal-operator.adoc[leveloffset=+1] -include::modules/upi-prerequisites-for-scaling-a-upi-cluster.adoc[leveloffset=+2] -include::modules/upi-limitations-for-scaling-a-upi-cluster.adoc[leveloffset=+2] -include::modules/configuring-a-provisioning-resource-to-scale-user-provisioned-clusters.adoc[leveloffset=+1] -include::modules/upi-provisioning-new-hosts-in-a-upi-cluster.adoc[leveloffset=+1] -include::modules/upi-managing-existing-hosts-in-a-upi-cluster.adoc[leveloffset=+1] -include::modules/upi-removing-hosts-from-a-upi-cluster.adoc[leveloffset=+1] - - diff --git a/installing/installing_bare_metal/snippets b/installing/installing_bare_metal/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/installing/installing_bare_metal/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/installing/installing_bare_metal_ipi/_attributes b/installing/installing_bare_metal_ipi/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/installing/installing_bare_metal_ipi/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/installing/installing_bare_metal_ipi/images b/installing/installing_bare_metal_ipi/images deleted file mode 120000 index 5fa6987088da..000000000000 --- a/installing/installing_bare_metal_ipi/images +++ /dev/null @@ -1 +0,0 @@ -../../images \ No newline at end of file diff --git a/installing/installing_bare_metal_ipi/ipi-install-expanding-the-cluster.adoc b/installing/installing_bare_metal_ipi/ipi-install-expanding-the-cluster.adoc deleted file mode 100644 index 7d9217fd1f34..000000000000 --- a/installing/installing_bare_metal_ipi/ipi-install-expanding-the-cluster.adoc +++ /dev/null @@ -1,41 +0,0 @@ -:_content-type: ASSEMBLY -[id="ipi-install-expanding-the-cluster"] -= Expanding the cluster -include::_attributes/common-attributes.adoc[] -:context: ipi-install-expanding - -toc::[] - -After deploying an installer-provisioned {product-title} cluster, you can use the following procedures to expand the number of worker nodes. Ensure that each prospective worker node meets the prerequisites. - -[NOTE] -==== -Expanding the cluster using RedFish Virtual Media involves meeting minimum firmware requirements. See *Firmware requirements for installing with virtual media* in the *Prerequisites* section for additional details when expanding the cluster using RedFish Virtual Media. -==== - -include::modules/ipi-install-preparing-the-bare-metal-node.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../installing/installing_bare_metal_ipi/ipi-install-installation-workflow.adoc#configuring-host-network-interfaces-in-the-install-config-yaml-file_ipi-install-installation-workflow[Optional: Configuring host network interfaces in the install-config.yaml file] for details on configuring the NMState syntax. -* See xref:../../scalability_and_performance/managing-bare-metal-hosts.adoc#automatically-scaling-machines-to-available-bare-metal-hosts_managing-bare-metal-hosts[Automatically scaling machines to the number of available bare metal hosts] for details on automatically scaling machines. - -include::modules/ipi-install-replacing-a-bare-metal-control-plane-node.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../backup_and_restore/control_plane_backup_and_restore/replacing-unhealthy-etcd-member.adoc#replacing-the-unhealthy-etcd-member[Replacing an unhealthy etcd member] - -* xref:../../backup_and_restore/control_plane_backup_and_restore/backing-up-etcd.adoc#backing-up-etcd-data_backup-etcd[Backing up etcd] - -* xref:../../post_installation_configuration/bare-metal-configuration.adoc#post-install-bare-metal-configuration[Bare metal configuration] - -* xref:../installing_bare_metal_ipi/ipi-install-installation-workflow.adoc#bmc-addressing_ipi-install-installation-workflow[BMC addressing] - -include::modules/ipi-install-preparing-to-deploy-with-virtual-media-on-the-baremetal-network.adoc[leveloffset=+1] - -include::modules/ipi-install-diagnosing-duplicate-mac-address.adoc[leveloffset=+1] - -include::modules/ipi-install-provisioning-the-bare-metal-node.adoc[leveloffset=+1] diff --git a/installing/installing_bare_metal_ipi/ipi-install-installation-workflow.adoc b/installing/installing_bare_metal_ipi/ipi-install-installation-workflow.adoc deleted file mode 100644 index 8ad6e40b0335..000000000000 --- a/installing/installing_bare_metal_ipi/ipi-install-installation-workflow.adoc +++ /dev/null @@ -1,117 +0,0 @@ -:_content-type: ASSEMBLY -[id="ipi-install-installation-workflow"] -= Setting up the environment for an OpenShift installation -include::_attributes/common-attributes.adoc[] -:context: ipi-install-installation-workflow - -toc::[] - -include::modules/ipi-install-installing-rhel-on-the-provisioner-node.adoc[leveloffset=+1] - -include::modules/ipi-install-preparing-the-provisioner-node-for-openshift-install.adoc[leveloffset=+1] - -include::modules/ipi-install-configuring-networking.adoc[leveloffset=+1] - -include::modules/ipi-install-retrieving-the-openshift-installer.adoc[leveloffset=+1] - -include::modules/ipi-install-extracting-the-openshift-installer.adoc[leveloffset=+1] - -include::modules/ipi-install-creating-an-rhcos-images-cache.adoc[leveloffset=+1] - -[id="ipi-install-configuration-files"] -[id="additional-resources_config"] -== Configuring the install-config.yaml file - -include::modules/ipi-install-configuring-the-install-config-file.adoc[leveloffset=+2] - -include::modules/ipi-install-additional-install-config-parameters.adoc[leveloffset=+2] - -include::modules/ipi-install-bmc-addressing.adoc[leveloffset=+2] - -include::modules/ipi-install-bmc-addressing-for-dell-idrac.adoc[leveloffset=+2] - -include::modules/ipi-install-bmc-addressing-for-hpe-ilo.adoc[leveloffset=+2] - -include::modules/ipi-install-bmc-addressing-for-fujitsu-irmc.adoc[leveloffset=+2] - -include::modules/ipi-install-root-device-hints.adoc[leveloffset=+2] - -include::modules/ipi-install-setting-proxy-settings-within-install-config.adoc[leveloffset=+2] - -include::modules/ipi-install-modifying-install-config-for-no-provisioning-network.adoc[leveloffset=+2] - -include::modules/ipi-install-modifying-install-config-for-dual-stack-network.adoc[leveloffset=+2] - -include::modules/ipi-install-configuring-host-network-interfaces-in-the-install-config.yaml-file.adoc[leveloffset=+2] - -include::modules/ipi-install-configuring-host-dual-network-interfaces-in-the-install-config.yaml-file.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html/configuring_and_managing_networking/configuring-network-bonding_configuring-and-managing-networking[Configuring network bonding] - -include::modules/ipi-install-configure-multiple-cluster-nodes.adoc[leveloffset=+2] - -include::modules/ipi-install-configuring-managed-secure-boot-in-the-install-config-file.adoc[leveloffset=+2] - -[id="ipi-install-manifest-configuration-files"] -== Manifest configuration files - -include::modules/ipi-install-creating-the-openshift-manifests.adoc[leveloffset=+2] - -include::modules/ipi-install-configuring-ntp-for-disconnected-clusters.adoc[leveloffset=+2] - -include::modules/ipi-install-configure-network-components-to-run-on-the-control-plane.adoc[leveloffset=+2] - -include::modules/ipi-install-deploying-routers-on-worker-nodes.adoc[leveloffset=+2] - -include::modules/ipi-install-configuring-the-bios.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="additional-resources_bare_metal_config"] -.Additional resources - -* xref:../../post_installation_configuration/bare-metal-configuration.adoc#post-install-bare-metal-configuration[Bare metal configuration] - -include::modules/ipi-install-configuring-the-raid.adoc[leveloffset=+2] - -include::modules/ipi-install-configuring-storage-on-nodes.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="additional-resources_raid_config"] -.Additional resources - -* xref:../../post_installation_configuration/bare-metal-configuration.adoc#post-install-bare-metal-configuration[Bare metal configuration] - -* link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html-single/managing_storage_devices/index#partition-naming-scheme_disk-partitions[Partition naming scheme] - -include::modules/ipi-install-creating-a-disconnected-registry.adoc[leveloffset=+1] - -[discrete] -[id="prerequisites_ipi-disconnected-registry"] -=== Prerequisites - -* If you have already prepared a mirror registry for xref:../../installing/disconnected_install/installing-mirroring-installation-images.adoc#prerequisites_installing-mirroring-installation-images[Mirroring images for a disconnected installation], you can skip directly to xref:../../installing/installing_bare_metal_ipi/ipi-install-installation-workflow.adoc#ipi-modify-install-config-for-a-disconnected-registry_ipi-install-installation-workflow[Modify the install-config.yaml file to use the disconnected registry]. - -include::modules/ipi-install-preparing-a-disconnected-registry.adoc[leveloffset=+2] - -include::modules/ipi-install-mirroring-for-disconnected-registry.adoc[leveloffset=+2] - -include::modules/ipi-modify-install-config-for-a-disconnected-registry.adoc[leveloffset=+2] - -include::modules/ipi-install-validation-checklist-for-installation.adoc[leveloffset=+1] - -include::modules/ipi-install-deploying-the-cluster-via-the-openshift-installer.adoc[leveloffset=+1] - -include::modules/ipi-install-following-the-installation.adoc[leveloffset=+1] - -include::modules/ipi-install-verifying-static-ip-address-configuration.adoc[leveloffset=+1] - -include::modules/ipi-preparing-reinstall-cluster-bare-metal.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_creating_manifest_ignition"] -== Additional resources -* xref:../../installing/installing_bare_metal/installing-bare-metal.adoc#installation-user-infra-generate-k8s-manifest-ignition_installing-bare-metal[{product-title} Creating the Kubernetes manifest and Ignition config files] -* xref:../../updating/understanding-upgrade-channels-release.adoc#understanding-upgrade-channels-releases[Understanding update channels and releases] diff --git a/installing/installing_bare_metal_ipi/ipi-install-overview.adoc b/installing/installing_bare_metal_ipi/ipi-install-overview.adoc deleted file mode 100644 index 734d0349e999..000000000000 --- a/installing/installing_bare_metal_ipi/ipi-install-overview.adoc +++ /dev/null @@ -1,30 +0,0 @@ -:_content-type: ASSEMBLY -[id="ipi-install-overview"] -= Overview -include::_attributes/common-attributes.adoc[] -:context: ipi-install - -toc::[] - -Installer-provisioned installation on bare metal nodes deploys and configures the infrastructure that a {product-title} cluster runs on. This guide provides a methodology to achieving a successful installer-provisioned bare-metal installation. The following diagram illustrates the installation environment in phase 1 of deployment: - -image::210_OpenShift_Baremetal_IPI_Deployment_updates_0122_1.png[Deployment phase one] - -For the installation, the key elements in the previous diagram are: - -- **Provisioner**: A physical machine that runs the installation program and hosts the bootstrap VM that deploys the controller of a new {product-title} cluster. -- **Bootstrap VM**: A virtual machine used in the process of deploying an {product-title} cluster. -- **Network bridges**: The bootstrap VM connects to the bare metal network and to the provisioning network, if present, via network bridges, `eno1` and `eno2`. - -In phase 2 of the deployment, the provisioner destroys the bootstrap VM automatically and moves the virtual IP addresses (VIPs) to the appropriate nodes. The API VIP moves to the control plane nodes and the Ingress VIP moves to the worker nodes. - -The following diagram illustrates phase 2 of deployment: - -image::210_OpenShift_Baremetal_IPI_Deployment_updates_0122_2.png[Deployment phase two] - -After this point, the node used by the provisioner can be removed or repurposed. From here, all additional provisioning tasks are carried out by controllers. - -[IMPORTANT] -==== -The provisioning network is optional, but it is required for PXE booting. If you deploy without a provisioning network, you must use a virtual media baseboard management controller (BMC) addressing option such as `redfish-virtualmedia` or `idrac-virtualmedia`. -==== diff --git a/installing/installing_bare_metal_ipi/ipi-install-post-installation-configuration.adoc b/installing/installing_bare_metal_ipi/ipi-install-post-installation-configuration.adoc deleted file mode 100644 index ae35096b1bfe..000000000000 --- a/installing/installing_bare_metal_ipi/ipi-install-post-installation-configuration.adoc +++ /dev/null @@ -1,15 +0,0 @@ -:_content-type: ASSEMBLY -[id="ipi-install-post-installation-configuration"] -= Installer-provisioned post-installation configuration -include::_attributes/common-attributes.adoc[] -:context: ipi-install-post-installation-configuration - -toc::[] - -After successfully deploying an installer-provisioned cluster, consider the following post-installation procedures. - -include::modules/ipi-install-configuring-ntp-for-disconnected-clusters.adoc[leveloffset=+1] - -include::modules/nw-enabling-a-provisioning-network-after-installation.adoc[leveloffset=+1] - -include::modules/nw-osp-configuring-external-load-balancer.adoc[leveloffset=+1] \ No newline at end of file diff --git a/installing/installing_bare_metal_ipi/ipi-install-prerequisites.adoc b/installing/installing_bare_metal_ipi/ipi-install-prerequisites.adoc deleted file mode 100644 index 1c4c9c5a7a85..000000000000 --- a/installing/installing_bare_metal_ipi/ipi-install-prerequisites.adoc +++ /dev/null @@ -1,48 +0,0 @@ -:_content-type: ASSEMBLY -[id="ipi-install-prerequisites"] -= Prerequisites -include::_attributes/common-attributes.adoc[] -:context: ipi-install-prerequisites - -toc::[] - -Installer-provisioned installation of {product-title} requires: - -ifdef::openshift-origin[. One provisioner node with {op-system-first} installed. The provisioner can be removed after installation.] -ifndef::openshift-origin[. One provisioner node with {op-system-base-full} 8.x installed. The provisioner can be removed after installation.] -. Three control plane nodes -. Baseboard management controller (BMC) access to each node -. At least one network: -.. One required routable network -.. One optional provisioning network -.. One optional management network - -Before starting an installer-provisioned installation of {product-title}, ensure the hardware environment meets the following requirements. - -include::modules/ipi-install-node-requirements.adoc[leveloffset=+1] - -include::modules/virt-planning-bare-metal-cluster-for-ocp-virt.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../virt/install/preparing-cluster-for-virt.adoc#preparing-cluster-for-virt[Preparing your cluster for {VirtProductName}] -* xref:../../networking/hardware_networks/about-sriov.adoc#about-sriov[About Single Root I/O Virtualization (SR-IOV) hardware networks] -* xref:../../virt/virtual_machines/vm_networking/virt-attaching-vm-to-sriov-network.adoc#virt-attaching-vm-to-sriov-network[Connecting a virtual machine to an SR-IOV network] - -include::modules/ipi-install-firmware-requirements-for-installing-with-virtual-media.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -xref:../../installing/installing_bare_metal_ipi/ipi-install-troubleshooting.adoc#unable-to-discover-new-bare-metal-hosts-using-the-bmc_ipi-install-troubleshooting[Unable to discover new bare metal hosts using the BMC] - -include::modules/ipi-install-network-requirements.adoc[leveloffset=+1] - -include::modules/ipi-install-configuring-nodes.adoc[leveloffset=+1] - -include::modules/ipi-install-out-of-band-management.adoc[leveloffset=+1] - -include::modules/ipi-install-required-data-for-installation.adoc[leveloffset=+1] - -include::modules/ipi-install-validation-checklist-for-nodes.adoc[leveloffset=+1] diff --git a/installing/installing_bare_metal_ipi/ipi-install-troubleshooting.adoc b/installing/installing_bare_metal_ipi/ipi-install-troubleshooting.adoc deleted file mode 100644 index 7f5548f1e1b7..000000000000 --- a/installing/installing_bare_metal_ipi/ipi-install-troubleshooting.adoc +++ /dev/null @@ -1,46 +0,0 @@ -:_content-type: ASSEMBLY -[id="ipi-install-troubleshooting"] -= Troubleshooting -include::_attributes/common-attributes.adoc[] -:context: ipi-install-troubleshooting - -toc::[] - - -== Troubleshooting the installer workflow - -Prior to troubleshooting the installation environment, it is critical to understand the overall flow of the installer-provisioned installation on bare metal. The diagrams below provide a troubleshooting flow with a step-by-step breakdown for the environment. - -image:flow1.png[Flow-Diagram-1] - -_Workflow 1 of 4_ illustrates a troubleshooting workflow when the `install-config.yaml` file has errors or the {op-system-first} images are inaccessible. Troubleshooting suggestions can be found at xref:ipi-install-troubleshooting-install-config_{context}[Troubleshooting `install-config.yaml`]. - -image:flow2.png[Flow-Diagram-2] - -_Workflow 2 of 4_ illustrates a troubleshooting workflow for xref:ipi-install-troubleshooting-bootstrap-vm_{context}[ bootstrap VM issues], xref:ipi-install-troubleshooting-bootstrap-vm-cannot-boot_{context}[ bootstrap VMs that cannot boot up the cluster nodes], and xref:ipi-install-troubleshooting-bootstrap-vm-inspecting-logs_{context}[ inspecting logs]. When installing an {product-title} cluster without the `provisioning` network, this workflow does not apply. - -image:flow3.png[Flow-Diagram-3] - -_Workflow 3 of 4_ illustrates a troubleshooting workflow for xref:ipi-install-troubleshooting-cluster-nodes-will-not-pxe_{context}[ cluster nodes that will not PXE boot]. If installing using RedFish Virtual Media, each node must meet minimum firmware requirements for the installer to deploy the node. See *Firmware requirements for installing with virtual media* in the *Prerequisites* section for additional details. - -image:flow4.png[Flow-Diagram-4] - -_Workflow 4 of 4_ illustrates a troubleshooting workflow from -xref:ipi-install-troubleshooting-api-not-accessible_{context}[ a non-accessible API] to a xref:ipi-install-troubleshooting-reviewing-the-installation_{context}[validated installation]. - - -include::modules/ipi-install-troubleshooting-install-config.adoc[leveloffset=+1] -include::modules/ipi-install-troubleshooting-bootstrap-vm.adoc[leveloffset=+1] -include::modules/ipi-install-troubleshooting-bootstrap-vm-cannot-boot.adoc[leveloffset=+2] -include::modules/ipi-install-troubleshooting-bootstrap-vm-inspecting-logs.adoc[leveloffset=+2] -include::modules/ipi-install-troubleshooting-cluster-nodes-will-not-pxe.adoc[leveloffset=+1] -include::modules/ipi-install-troubleshooting_unable-to-discover-new-bare-metal-hosts-using-the-bmc.adoc[leveloffset=+1] -include::modules/ipi-install-troubleshooting-api-not-accessible.adoc[leveloffset=+1] -include::modules/ipi-install-troubleshooting_proc_worker-nodes-cannot-join-the-cluster.adoc[leveloffset=+1] -include::modules/ipi-install-troubleshooting-cleaning-up-previous-installations.adoc[leveloffset=+1] -include::modules/ipi-install-troubleshooting-registry-issues.adoc[leveloffset=+1] -include::modules/ipi-install-troubleshooting-misc-issues.adoc[leveloffset=+1] -include::modules/ipi-install-troubleshooting-failed-ignition-during-firstboot.adoc[leveloffset=+2] -include::modules/ipi-install-troubleshooting-ntp-out-of-sync.adoc[leveloffset=+2] -include::modules/ipi-install-troubleshooting-reviewing-the-installation.adoc[leveloffset=+1] - diff --git a/installing/installing_bare_metal_ipi/modules b/installing/installing_bare_metal_ipi/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/installing/installing_bare_metal_ipi/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/installing/installing_bare_metal_ipi/snippets b/installing/installing_bare_metal_ipi/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/installing/installing_bare_metal_ipi/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/installing/installing_gcp/_attributes b/installing/installing_gcp/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/installing/installing_gcp/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/installing/installing_gcp/images b/installing/installing_gcp/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/installing/installing_gcp/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/installing/installing_gcp/installing-gcp-account.adoc b/installing/installing_gcp/installing-gcp-account.adoc deleted file mode 100644 index d8344c97ab56..000000000000 --- a/installing/installing_gcp/installing-gcp-account.adoc +++ /dev/null @@ -1,40 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-gcp-account"] -= Configuring a GCP project -include::_attributes/common-attributes.adoc[] -:context: installing-gcp-account - -toc::[] - -Before you can install {product-title}, you must configure a -Google Cloud Platform (GCP) project to host it. - -include::modules/installation-gcp-project.adoc[leveloffset=+1] - -include::modules/installation-gcp-enabling-api-services.adoc[leveloffset=+1] - -include::modules/installation-gcp-dns.adoc[leveloffset=+1] - -include::modules/installation-gcp-limits.adoc[leveloffset=+1] - -include::modules/installation-gcp-service-account.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../installing/installing_gcp/manually-creating-iam-gcp.adoc#manually-create-iam_manually-creating-iam-gcp[Manually creating IAM] for more details about using manual credentials mode. - -include::modules/installation-gcp-permissions.adoc[leveloffset=+2] - -include::modules/minimum-required-permissions-ipi-gcp.adoc[leveloffset=+2] - -include::modules/minimum-required-permissions-ipi-gcp-xpn.adoc[leveloffset=+2] - -include::modules/installation-gcp-regions.adoc[leveloffset=+1] - -== Next steps - -* Install an {product-title} cluster on GCP. You can -xref:../../installing/installing_gcp/installing-gcp-customizations.adoc#installing-gcp-customizations[install a customized cluster] -or xref:../../installing/installing_gcp/installing-gcp-default.adoc#installing-gcp-default[quickly install a cluster] -with default options. diff --git a/installing/installing_gcp/installing-gcp-customizations.adoc b/installing/installing_gcp/installing-gcp-customizations.adoc deleted file mode 100644 index 782a9759fb8e..000000000000 --- a/installing/installing_gcp/installing-gcp-customizations.adoc +++ /dev/null @@ -1,76 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-gcp-customizations"] -= Installing a cluster on GCP with customizations -include::_attributes/common-attributes.adoc[] -:context: installing-gcp-customizations -:platform: GCP - -toc::[] - -In {product-title} version {product-version}, you can install a customized -cluster on infrastructure that the installation program provisions on -Google Cloud Platform (GCP). To customize the installation, you modify -parameters in the `install-config.yaml` file before you install the cluster. - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_gcp/installing-gcp-account.adoc#installing-gcp-account[configured a GCP project] to host the cluster. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_gcp/manually-creating-iam-gcp.adoc#manually-creating-iam-gcp[manually create and maintain IAM credentials]. - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-initializing.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] - -include::modules/installation-gcp-tested-machine-types.adoc[leveloffset=+2] - -include::modules/installation-using-gcp-custom-machine-types.adoc[leveloffset=+2] - -include::modules/installation-gcp-enabling-shielded-vms.adoc[leveloffset=+2] - -include::modules/installation-gcp-enabling-confidential-vms.adoc[leveloffset=+2] - -include::modules/installation-gcp-config-yaml.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../../machine_management/creating_machinesets/creating-machineset-gcp.adoc#machineset-enabling-customer-managed-encryption_creating-machineset-gcp[Enabling customer-managed encryption keys for a compute machine set] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/installation-gcp-marketplace.adoc[leveloffset=+1] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console. - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. diff --git a/installing/installing_gcp/installing-gcp-default.adoc b/installing/installing_gcp/installing-gcp-default.adoc deleted file mode 100644 index ba1b623354c8..000000000000 --- a/installing/installing_gcp/installing-gcp-default.adoc +++ /dev/null @@ -1,48 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-gcp-default"] -= Installing a cluster quickly on GCP -include::_attributes/common-attributes.adoc[] -:context: installing-gcp-default - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on -Google Cloud Platform (GCP) that uses the default configuration options. - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_gcp/installing-gcp-account.adoc#installing-gcp-account[configured a GCP project] to host the cluster. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_gcp/manually-creating-iam-gcp.adoc#manually-creating-iam-gcp[manually create and maintain IAM credentials]. - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console. - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. diff --git a/installing/installing_gcp/installing-gcp-network-customizations.adoc b/installing/installing_gcp/installing-gcp-network-customizations.adoc deleted file mode 100644 index 6b01f8625928..000000000000 --- a/installing/installing_gcp/installing-gcp-network-customizations.adoc +++ /dev/null @@ -1,85 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-gcp-network-customizations"] -= Installing a cluster on GCP with network customizations -include::_attributes/common-attributes.adoc[] -:context: installing-gcp-network-customizations - -toc::[] - -In {product-title} version {product-version}, you can install a cluster with a -customized network configuration on infrastructure that the installation program -provisions on Google Cloud Platform (GCP). By customizing your network -configuration, your cluster can coexist with existing IP address allocations in -your environment and integrate with existing MTU and VXLAN configurations. To -customize the installation, you modify parameters in the `install-config.yaml` -file before you install the cluster. - -You must set most of the network configuration parameters during installation, -and you can modify only `kubeProxy` configuration parameters in a running -cluster. - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_gcp/installing-gcp-account.adoc#installing-gcp-account[configured a GCP project] to host the cluster. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_gcp/manually-creating-iam-gcp.adoc#manually-creating-iam-gcp[manually create and maintain IAM credentials]. - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-initializing.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] - -include::modules/installation-gcp-tested-machine-types.adoc[leveloffset=+2] - -include::modules/installation-using-gcp-custom-machine-types.adoc[leveloffset=+2] - -include::modules/installation-gcp-enabling-shielded-vms.adoc[leveloffset=+2] - -include::modules/installation-gcp-enabling-confidential-vms.adoc[leveloffset=+2] - -include::modules/installation-gcp-config-yaml.adoc[leveloffset=+2] - -[role="_additional-resources"] -== Additional resources - -* xref:../../machine_management/creating_machinesets/creating-machineset-gcp.adoc#machineset-enabling-customer-managed-encryption_creating-machineset-gcp[Enabling customer-managed encryption keys for a compute machine set] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -// Network Operator specific configuration -include::modules/nw-network-config.adoc[leveloffset=+1] -include::modules/nw-modifying-operator-install-config.adoc[leveloffset=+1] -include::modules/nw-operator-cr.adoc[leveloffset=+1] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console. - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. diff --git a/installing/installing_gcp/installing-gcp-private.adoc b/installing/installing_gcp/installing-gcp-private.adoc deleted file mode 100644 index 9620370b0bdb..000000000000 --- a/installing/installing_gcp/installing-gcp-private.adoc +++ /dev/null @@ -1,80 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-gcp-private"] -= Installing a private cluster on GCP -include::_attributes/common-attributes.adoc[] -:context: installing-gcp-private - -toc::[] - -In {product-title} version {product-version}, you can install a private cluster into an existing VPC on Google Cloud Platform (GCP). The installation program provisions the rest of the required infrastructure, which you can further customize. To customize the installation, you modify -parameters in the `install-config.yaml` file before you install the cluster. - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_gcp/installing-gcp-account.adoc#installing-gcp-account[configured a GCP project] to host the cluster. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_gcp/manually-creating-iam-gcp.adoc#manually-creating-iam-gcp[manually create and maintain IAM credentials]. - -include::modules/private-clusters-default.adoc[leveloffset=+1] - -include::modules/private-clusters-about-gcp.adoc[leveloffset=+2] - -include::modules/installation-about-custom-gcp-vpc.adoc[leveloffset=+1] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-initializing-manual.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] - -include::modules/installation-gcp-tested-machine-types.adoc[leveloffset=+2] - -include::modules/installation-using-gcp-custom-machine-types.adoc[leveloffset=+2] - -include::modules/installation-gcp-enabling-shielded-vms.adoc[leveloffset=+2] - -include::modules/installation-gcp-enabling-confidential-vms.adoc[leveloffset=+2] - -include::modules/installation-gcp-config-yaml.adoc[leveloffset=+2] - -include::modules/nw-gcp-installing-global-access-configuration.adoc[leveloffset=+2] - - -[role="_additional-resources"] -== Additional resources - -* xref:../../machine_management/creating_machinesets/creating-machineset-gcp.adoc#machineset-enabling-customer-managed-encryption_creating-machineset-gcp[Enabling customer-managed encryption keys for a compute machine set] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console. - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. diff --git a/installing/installing_gcp/installing-gcp-shared-vpc.adoc b/installing/installing_gcp/installing-gcp-shared-vpc.adoc deleted file mode 100644 index a05b11fb1b5f..000000000000 --- a/installing/installing_gcp/installing-gcp-shared-vpc.adoc +++ /dev/null @@ -1,68 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-gcp-shared-vpc"] -= Installing a cluster on GCP into a shared VPC -include::_attributes/common-attributes.adoc[] -:context: installing-gcp-shared-vpc -:FeatureName: Installing a cluster on GCP into a shared VPC - -toc::[] - -In {product-title} version {product-version}, you can install a cluster into a shared Virtual Private Cloud (VPC) on Google Cloud Platform (GCP). In this installation method, the cluster is configured to use a VPC from a different GCP project. A shared VPC enables an organization to connect resources from multiple projects to a common VPC network. You can communicate within the organization securely and efficiently by using internal IP addresses from that network. For more information about shared VPC, see link:https://cloud.google.com/vpc/docs/shared-vpc[Shared VPC overview in the GCP documentation]. - -The installation program provisions the rest of the required infrastructure, which you can further customize. To customize the installation, you modify parameters in the `install-config.yaml` file before you install the cluster. - -[id="installation-gcp-shared-vpc-prerequisites_{context}"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_gcp/manually-creating-iam-gcp.adoc#manually-creating-iam-gcp[manually create and maintain IAM credentials]. -* You have a GCP host project which contains a shared VPC network. -* You xref:../../installing/installing_gcp/installing-gcp-account.adoc#installing-gcp-account[configured a GCP project] to host the cluster. This project, known as the service project, must be attached to the host project. For more information, see link:https://cloud.google.com/vpc/docs/provisioning-shared-vpc#create-shared[Attaching service projects in the GCP documentation]. -* You have a GCP service account that has the xref:../../installing/installing_gcp/installing-gcp-account.adoc#minimum-required-permissions-ipi-gcp-xpn[required GCP permissions] in both the host and service projects. - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-user-infra-generate.adoc[leveloffset=+1] - -include::modules/installation-initializing-manual.adoc[leveloffset=+2] - -include::modules/installation-gcp-enabling-shielded-vms.adoc[leveloffset=+2] - -include::modules/installation-gcp-enabling-confidential-vms.adoc[leveloffset=+2] - -include::modules/installation-gcp-shared-vpc-config.adoc[leveloffset=+2] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console. - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -[id="installation-gcp-shared-vpc-next-steps_{context}"] -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. \ No newline at end of file diff --git a/installing/installing_gcp/installing-gcp-three-node.adoc b/installing/installing_gcp/installing-gcp-three-node.adoc deleted file mode 100644 index 63d7298e7ed6..000000000000 --- a/installing/installing_gcp/installing-gcp-three-node.adoc +++ /dev/null @@ -1,17 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-gcp-three-node"] -= Installing a three-node cluster on GCP -include::_attributes/common-attributes.adoc[] -:context: installing-gcp-three-node - -toc::[] - -In {product-title} version {product-version}, you can install a three-node cluster on Google Cloud Platform (GCP). A three-node cluster consists of three control plane machines, which also act as compute machines. This type of cluster provides a smaller, more resource efficient cluster, for cluster administrators and developers to use for testing, development, and production. - -You can install a three-node cluster using either installer-provisioned or user-provisioned infrastructure. - -include::modules/installation-three-node-cluster-cloud-provider.adoc[leveloffset=+1] - -== Next steps -* xref:../../installing/installing_gcp/installing-gcp-customizations.adoc#installing-gcp-customizations[Installing a cluster on GCP with customizations] -* xref:../../installing/installing_gcp/installing-gcp-user-infra.adoc#installing-gcp-user-infra[Installing a cluster on user-provisioned infrastructure in GCP by using Deployment Manager templates] diff --git a/installing/installing_gcp/installing-gcp-user-infra-vpc.adoc b/installing/installing_gcp/installing-gcp-user-infra-vpc.adoc deleted file mode 100644 index 97fadb727e5c..000000000000 --- a/installing/installing_gcp/installing-gcp-user-infra-vpc.adoc +++ /dev/null @@ -1,167 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-gcp-user-infra-vpc"] -= Installing a cluster into a shared VPC on GCP using Deployment Manager templates -include::_attributes/common-attributes.adoc[] -:context: installing-gcp-user-infra-vpc - -toc::[] - -In {product-title} version {product-version}, you can install a cluster into a shared Virtual Private Cloud (VPC) on Google Cloud Platform (GCP) that uses infrastructure that you provide. In this context, a cluster installed into a shared VPC is a cluster that is configured to use a VPC from a project different from where the cluster is being deployed. - -A shared VPC enables an organization to connect resources from multiple projects to a common VPC network. You can communicate within the organization securely and efficiently by using internal IPs from that network. For more information about shared VPC, see link:https://cloud.google.com/vpc/docs/shared-vpc[Shared VPC overview] in the GCP documentation. - -The steps for performing a user-provided infrastructure installation into a shared VPC are outlined here. Several -link:https://cloud.google.com/deployment-manager/docs[Deployment Manager] templates are provided to assist in -completing these steps or to help model your own. You are also free to create -the required resources through other methods. - -[IMPORTANT] -==== -The steps for performing a user-provisioned infrastructure installation are provided as an example only. Installing a cluster with infrastructure you provide requires knowledge of the cloud provider and the installation process of {product-title}. Several Deployment Manager templates are provided to assist in completing these steps or to help model your own. You are also free to create the required resources through other methods; the templates are just an example. -==== - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* If you use a firewall and plan to use the Telemetry service, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured the firewall to allow the sites] that your cluster requires access to. -* If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_gcp/manually-creating-iam-gcp.adoc#manually-creating-iam-gcp[manually create and maintain IAM credentials]. -+ -[NOTE] -==== -Be sure to also review this site list if you are configuring a proxy. -==== - -include::modules/csr-management.adoc[leveloffset=+1] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -[id="installation-gcp-user-infra-config-project-vpc"] -== Configuring the GCP project that hosts your cluster - -Before you can install {product-title}, you must configure a Google Cloud -Platform (GCP) project to host it. - -include::modules/installation-gcp-project.adoc[leveloffset=+2] -include::modules/installation-gcp-enabling-api-services.adoc[leveloffset=+2] -include::modules/installation-gcp-limits.adoc[leveloffset=+2] -include::modules/installation-gcp-service-account.adoc[leveloffset=+2] -include::modules/installation-gcp-permissions.adoc[leveloffset=+3] -include::modules/installation-gcp-regions.adoc[leveloffset=+2] -include::modules/installation-gcp-install-cli.adoc[leveloffset=+2] - -[id="installation-requirements-user-infra_{context}"] -== Requirements for a cluster with user-provisioned infrastructure - -For a cluster that contains user-provisioned infrastructure, you must deploy all -of the required machines. - -This section describes the requirements for deploying {product-title} on user-provisioned infrastructure. - -include::modules/installation-machine-requirements.adoc[leveloffset=+2] -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] -include::modules/installation-gcp-tested-machine-types.adoc[leveloffset=+2] -include::modules/installation-using-gcp-custom-machine-types.adoc[leveloffset=+2] - -include::modules/installation-gcp-user-infra-config-host-project-vpc.adoc[leveloffset=+1] -include::modules/installation-gcp-dns.adoc[leveloffset=+2] -include::modules/installation-creating-gcp-vpc.adoc[leveloffset=+2] -include::modules/installation-deployment-manager-vpc.adoc[leveloffset=+3] - -include::modules/installation-user-infra-generate.adoc[leveloffset=+1] - -include::modules/installation-initializing-manual.adoc[leveloffset=+2] -include::modules/installation-gcp-enabling-shielded-vms.adoc[leveloffset=+2] -include::modules/installation-gcp-enabling-confidential-vms.adoc[leveloffset=+2] -include::modules/installation-gcp-user-infra-shared-vpc-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -//include::modules/installation-three-node-cluster.adoc[leveloffset=+2] - -include::modules/installation-user-infra-generate-k8s-manifest-ignition.adoc[leveloffset=+2] - -//// -[role="_additional-resources"] -.Additional resources -//// - -[id="installation-gcp-user-infra-exporting-common-variables-vpc"] -== Exporting common variables - -include::modules/installation-extracting-infraid.adoc[leveloffset=+2] -include::modules/installation-user-infra-exporting-common-variables.adoc[leveloffset=+2] - -include::modules/installation-network-user-infra.adoc[leveloffset=+1] - -include::modules/installation-creating-gcp-lb.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-ext-lb.adoc[leveloffset=+2] -include::modules/installation-deployment-manager-int-lb.adoc[leveloffset=+2] - -include::modules/installation-creating-gcp-private-dns.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-private-dns.adoc[leveloffset=+2] - -include::modules/installation-creating-gcp-firewall-rules-vpc.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-firewall-rules.adoc[leveloffset=+2] - -include::modules/installation-creating-gcp-iam-shared-vpc.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-iam-shared-vpc.adoc[leveloffset=+2] - -include::modules/installation-gcp-user-infra-rhcos.adoc[leveloffset=+1] - -include::modules/installation-creating-gcp-bootstrap.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-bootstrap.adoc[leveloffset=+2] - -include::modules/installation-creating-gcp-control-plane.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-control-plane.adoc[leveloffset=+2] - -include::modules/installation-gcp-user-infra-wait-for-bootstrap.adoc[leveloffset=+1] - -include::modules/installation-creating-gcp-worker.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-worker.adoc[leveloffset=+2] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/installation-approve-csrs.adoc[leveloffset=+1] - -include::modules/installation-gcp-user-infra-adding-ingress.adoc[leveloffset=+1] - -[id="installation-gcp-user-infra-vpc-adding-firewall-rules"] -== Adding ingress firewall rules -The cluster requires several firewall rules. If you do not use a shared VPC, these rules are created by the Ingress Controller via the GCP cloud provider. When you use a shared VPC, you can either create cluster-wide firewall rules for all services now or create each rule based on events, when the cluster requests access. By creating each rule when the cluster requests access, you know exactly which firewall rules are required. By creating cluster-wide firewall rules, you can apply the same rule set across multiple clusters. - -If you choose to create each rule based on events, you must create firewall rules after you provision the cluster and during the life of the cluster when the console notifies you that rules are missing. Events that are similar to the following event are displayed, and you must add the firewall rules that are required: - -[source,terminal] ----- -$ oc get events -n openshift-ingress --field-selector="reason=LoadBalancerManualChange" ----- - -.Example output -[source,terminal] ----- -Firewall change required by security admin: `gcloud compute firewall-rules create k8s-fw-a26e631036a3f46cba28f8df67266d55 --network example-network --description "{\"kubernetes.io/service-name\":\"openshift-ingress/router-default\", \"kubernetes.io/service-ip\":\"35.237.236.234\"}\" --allow tcp:443,tcp:80 --source-ranges 0.0.0.0/0 --target-tags exampl-fqzq7-master,exampl-fqzq7-worker --project example-project` ----- - -If you encounter issues when creating these rule-based events, you can configure the cluster-wide firewall rules while your cluster is running. - -include::modules/installation-creating-gcp-shared-vpc-cluster-wide-firewall-rules.adoc[leveloffset=+2] - -//include::modules/installation-creating-gcp-shared-vpc-ingress-firewall-rules.adoc[leveloffset=+1] - -include::modules/installation-gcp-user-infra-completing.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. diff --git a/installing/installing_gcp/installing-gcp-user-infra.adoc b/installing/installing_gcp/installing-gcp-user-infra.adoc deleted file mode 100644 index c4293fa39835..000000000000 --- a/installing/installing_gcp/installing-gcp-user-infra.adoc +++ /dev/null @@ -1,135 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-gcp-user-infra"] -= Installing a cluster on user-provisioned infrastructure in GCP by using Deployment Manager templates -include::_attributes/common-attributes.adoc[] -:context: installing-gcp-user-infra -:platform: GCP - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on Google Cloud Platform (GCP) that uses infrastructure that you provide. - -The steps for performing a user-provided infrastructure install are outlined here. Several link:https://cloud.google.com/deployment-manager/docs[Deployment Manager] templates are provided to assist in completing these steps or to help model your own. You are also free to create the required resources through other methods. - -[IMPORTANT] -==== -The steps for performing a user-provisioned infrastructure installation are provided as an example only. Installing a cluster with infrastructure you provide requires knowledge of the cloud provider and the installation process of {product-title}. Several Deployment Manager templates are provided to assist in completing these steps or to help model your own. You are also free to create the required resources through other methods; the templates are just an example. -==== - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* If you use a firewall and plan to use the Telemetry service, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured the firewall to allow the sites] that your cluster requires access to. -* If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_gcp/manually-creating-iam-gcp.adoc#manually-creating-iam-gcp[manually create and maintain IAM credentials]. -+ -[NOTE] -==== -Be sure to also review this site list if you are configuring a proxy. -==== - -include::modules/csr-management.adoc[leveloffset=+1] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -[id="installation-gcp-user-infra-config-project"] -== Configuring your GCP project - -Before you can install {product-title}, you must configure a Google Cloud Platform (GCP) project to host it. - -include::modules/installation-gcp-project.adoc[leveloffset=+2] -include::modules/installation-gcp-enabling-api-services.adoc[leveloffset=+2] -include::modules/installation-gcp-dns.adoc[leveloffset=+2] -include::modules/installation-gcp-limits.adoc[leveloffset=+2] -include::modules/installation-gcp-service-account.adoc[leveloffset=+2] -include::modules/installation-gcp-permissions.adoc[leveloffset=+2] -include::modules/minimum-required-permissions-upi-gcp.adoc[leveloffset=+2] -include::modules/installation-gcp-regions.adoc[leveloffset=+2] -include::modules/installation-gcp-install-cli.adoc[leveloffset=+2] - -[id="installation-requirements-user-infra_{context}"] -== Requirements for a cluster with user-provisioned infrastructure - -For a cluster that contains user-provisioned infrastructure, you must deploy all -of the required machines. - -This section describes the requirements for deploying {product-title} on user-provisioned infrastructure. - -include::modules/installation-machine-requirements.adoc[leveloffset=+2] -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] -include::modules/installation-gcp-tested-machine-types.adoc[leveloffset=+2] -include::modules/installation-using-gcp-custom-machine-types.adoc[leveloffset=+2] - -include::modules/installation-user-infra-generate.adoc[leveloffset=+1] -include::modules/installation-disk-partitioning-upi-templates.adoc[leveloffset=+2] -include::modules/installation-initializing.adoc[leveloffset=+2] -include::modules/installation-gcp-enabling-shielded-vms.adoc[leveloffset=+2] -include::modules/installation-gcp-enabling-confidential-vms.adoc[leveloffset=+2] -include::modules/installation-configure-proxy.adoc[leveloffset=+2] -//include::modules/installation-three-node-cluster.adoc[leveloffset=+2] -include::modules/installation-user-infra-generate-k8s-manifest-ignition.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/installing_gcp/installing-gcp-user-infra.adoc#installation-gcp-user-infra-adding-ingress_installing-gcp-user-infra[Optional: Adding the ingress DNS records] - -[id="installation-gcp-user-infra-exporting-common-variables"] -== Exporting common variables - -include::modules/installation-extracting-infraid.adoc[leveloffset=+2] -include::modules/installation-user-infra-exporting-common-variables.adoc[leveloffset=+2] - -include::modules/installation-creating-gcp-vpc.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-vpc.adoc[leveloffset=+2] - -include::modules/installation-network-user-infra.adoc[leveloffset=+1] - -include::modules/installation-creating-gcp-lb.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-ext-lb.adoc[leveloffset=+2] -include::modules/installation-deployment-manager-int-lb.adoc[leveloffset=+2] - -include::modules/installation-creating-gcp-private-dns.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-private-dns.adoc[leveloffset=+2] - -include::modules/installation-creating-gcp-firewall-rules-vpc.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-firewall-rules.adoc[leveloffset=+2] - -include::modules/installation-creating-gcp-iam-shared-vpc.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-iam-shared-vpc.adoc[leveloffset=+2] - -include::modules/installation-gcp-user-infra-rhcos.adoc[leveloffset=+1] - -include::modules/installation-creating-gcp-bootstrap.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-bootstrap.adoc[leveloffset=+2] - -include::modules/installation-creating-gcp-control-plane.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-control-plane.adoc[leveloffset=+2] - -include::modules/installation-gcp-user-infra-wait-for-bootstrap.adoc[leveloffset=+1] - -include::modules/installation-creating-gcp-worker.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-worker.adoc[leveloffset=+2] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/installation-approve-csrs.adoc[leveloffset=+1] - -include::modules/installation-gcp-user-infra-adding-ingress.adoc[leveloffset=+1] - -include::modules/installation-gcp-user-infra-completing.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* xref:../../networking/ingress-operator.adoc#nw-ingress-controller-configuration-gcp-global-access_configuring-ingress[Configure Global Access for an Ingress Controller on GCP]. diff --git a/installing/installing_gcp/installing-gcp-vpc.adoc b/installing/installing_gcp/installing-gcp-vpc.adoc deleted file mode 100644 index 9d63c3642a84..000000000000 --- a/installing/installing_gcp/installing-gcp-vpc.adoc +++ /dev/null @@ -1,76 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-gcp-vpc"] -= Installing a cluster on GCP into an existing VPC -include::_attributes/common-attributes.adoc[] -:context: installing-gcp-vpc - -toc::[] - -In {product-title} version {product-version}, you can install a cluster into an existing Virtual Private Cloud (VPC) on Google Cloud Platform (GCP). The installation program provisions the rest of the required infrastructure, which you can further customize. To customize the installation, you modify -parameters in the `install-config.yaml` file before you install the cluster. - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_gcp/installing-gcp-account.adoc#installing-gcp-account[configured a GCP project] to host the cluster. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_gcp/manually-creating-iam-gcp.adoc#manually-creating-iam-gcp[manually create and maintain IAM credentials]. - -include::modules/installation-custom-gcp-vpc.adoc[leveloffset=+1] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-initializing.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] - -include::modules/installation-gcp-tested-machine-types.adoc[leveloffset=+2] - -include::modules/installation-using-gcp-custom-machine-types.adoc[leveloffset=+2] - -include::modules/installation-gcp-enabling-shielded-vms.adoc[leveloffset=+2] - -include::modules/installation-gcp-enabling-confidential-vms.adoc[leveloffset=+2] - -include::modules/installation-gcp-config-yaml.adoc[leveloffset=+2] - -include::modules/nw-gcp-installing-global-access-configuration.adoc[leveloffset=+2] - - -[role="_additional-resources"] -== Additional resources - -* xref:../../machine_management/creating_machinesets/creating-machineset-gcp.adoc#machineset-enabling-customer-managed-encryption_creating-machineset-gcp[Enabling customer-managed encryption keys for a compute machine set] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console. - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. diff --git a/installing/installing_gcp/installing-restricted-networks-gcp-installer-provisioned.adoc b/installing/installing_gcp/installing-restricted-networks-gcp-installer-provisioned.adoc deleted file mode 100644 index 4aeea86ef656..000000000000 --- a/installing/installing_gcp/installing-restricted-networks-gcp-installer-provisioned.adoc +++ /dev/null @@ -1,83 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-restricted-networks-gcp-installer-provisioned"] -= Installing a cluster on GCP in a restricted network -include::_attributes/common-attributes.adoc[] -:context: installing-restricted-networks-gcp-installer-provisioned - -toc::[] - -In {product-title} {product-version}, you can install a cluster on Google Cloud Platform (GCP) in a restricted network by creating an internal mirror of the installation release content on an existing Google Virtual Private Cloud (VPC). - -[IMPORTANT] -==== -You can install an {product-title} cluster by using mirrored installation release content, but your cluster will require internet access to use the GCP APIs. -==== - -[id="prerequisites_installing-restricted-networks-gcp-installer-provisioned"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_gcp/installing-gcp-account.adoc#installing-gcp-account[configured a GCP project] to host the cluster. -* You xref:../../installing/disconnected_install/installing-mirroring-installation-images.adoc#installation-about-mirror-registry_installing-mirroring-installation-images[mirrored the images for a disconnected installation] to your registry and obtained the `imageContentSources` data for your version of {product-title}. -+ -[IMPORTANT] -==== -Because the installation media is on the mirror host, you can use that computer to complete all installation steps. -==== -* You have an existing VPC in GCP. While installing a cluster in a restricted network that uses installer-provisioned infrastructure, you cannot use the installer-provisioned VPC. You must use a user-provisioned VPC that satisfies one of the following requirements: -** Contains the mirror registry -** Has firewall rules or a peering connection to access the mirror registry hosted elsewhere -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. While you might need to grant access to more sites, you must grant access to `*.googleapis.com` and `accounts.google.com`. -* If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_gcp/manually-creating-iam-gcp.adoc#manually-creating-iam-gcp[manually create and maintain IAM credentials]. - -include::modules/installation-about-restricted-network.adoc[leveloffset=+1] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-initializing.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] - -include::modules/installation-gcp-tested-machine-types.adoc[leveloffset=+2] - -include::modules/installation-using-gcp-custom-machine-types.adoc[leveloffset=+2] - -include::modules/installation-gcp-enabling-shielded-vms.adoc[leveloffset=+2] - -include::modules/installation-gcp-enabling-confidential-vms.adoc[leveloffset=+2] - -include::modules/installation-gcp-config-yaml.adoc[leveloffset=+2] - -include::modules/nw-gcp-installing-global-access-configuration.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/olm-restricted-networks-configuring-operatorhub.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -[id="next-steps_installing-restricted-networks-gcp-installer-provisioned"] -== Next steps - -* xref:../../installing/validating-an-installation.adoc#validating-an-installation[Validate an installation]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#post-install-must-gather-disconnected[Configure image streams] for the Cluster Samples Operator and the `must-gather` tool. -* Learn how to xref:../../operators/admin/olm-restricted-networks.adoc#olm-restricted-networks[use Operator Lifecycle Manager (OLM) on restricted networks]. -* If the mirror registry that you used to install your cluster has a trusted CA, add it to the cluster by xref:../../openshift_images/image-configuration.adoc#images-configuration-cas_image-configuration[configuring additional trust stores]. -* If necessary, you can xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. diff --git a/installing/installing_gcp/installing-restricted-networks-gcp.adoc b/installing/installing_gcp/installing-restricted-networks-gcp.adoc deleted file mode 100644 index 4cc8a767501e..000000000000 --- a/installing/installing_gcp/installing-restricted-networks-gcp.adoc +++ /dev/null @@ -1,144 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-restricted-networks-gcp"] -= Installing a cluster on GCP in a restricted network with user-provisioned infrastructure -include::_attributes/common-attributes.adoc[] -:context: installing-restricted-networks-gcp - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on Google Cloud Platform (GCP) that uses infrastructure that you provide and an internal mirror of the installation release content. - -[IMPORTANT] -==== -While you can install an {product-title} cluster by using mirrored installation release content, your cluster still requires internet access to use the GCP APIs. -==== - -The steps for performing a user-provided infrastructure install are outlined here. Several link:https://cloud.google.com/deployment-manager/docs[Deployment Manager] templates are provided to assist in completing these steps or to help model your own. You are also free to create the required resources through other methods. - -[IMPORTANT] -==== -The steps for performing a user-provisioned infrastructure installation are provided as an example only. Installing a cluster with infrastructure you provide requires knowledge of the cloud provider and the installation process of {product-title}. Several Deployment Manager templates are provided to assist in completing these steps or to help model your own. You are also free to create the required resources through other methods; the templates are just an example. -==== - - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/disconnected_install/installing-mirroring-installation-images.adoc#installing-mirroring-installation-images[created a registry on your mirror host] and obtained the `imageContentSources` data for your version of {product-title}. -+ -[IMPORTANT] -==== -Because the installation media is on the mirror host, you can use that computer to complete all installation steps. -==== -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. While you might need to grant access to more sites, you must grant access to `*.googleapis.com` and `accounts.google.com`. -* If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, you can xref:../../installing/installing_gcp/manually-creating-iam-gcp.adoc#manually-creating-iam-gcp[manually create and maintain IAM credentials]. - -include::modules/installation-about-restricted-network.adoc[leveloffset=+1] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -[id="installation-restricted-networks-gcp-user-infra-config-project"] -== Configuring your GCP project - -Before you can install {product-title}, you must configure a Google Cloud Platform (GCP) project to host it. - -include::modules/installation-gcp-project.adoc[leveloffset=+2] -include::modules/installation-gcp-enabling-api-services.adoc[leveloffset=+2] -include::modules/installation-gcp-dns.adoc[leveloffset=+2] -include::modules/installation-gcp-limits.adoc[leveloffset=+2] -include::modules/installation-gcp-service-account.adoc[leveloffset=+2] -include::modules/installation-gcp-permissions.adoc[leveloffset=+2] -include::modules/minimum-required-permissions-upi-gcp.adoc[leveloffset=+2] -include::modules/installation-gcp-regions.adoc[leveloffset=+2] -include::modules/installation-gcp-install-cli.adoc[leveloffset=+2] - -[id="installation-requirements-user-infra_{context}"] -== Requirements for a cluster with user-provisioned infrastructure - -For a cluster that contains user-provisioned infrastructure, you must deploy all -of the required machines. - -This section describes the requirements for deploying {product-title} on user-provisioned infrastructure. - -include::modules/installation-machine-requirements.adoc[leveloffset=+2] -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] -include::modules/installation-gcp-tested-machine-types.adoc[leveloffset=+2] -include::modules/installation-using-gcp-custom-machine-types.adoc[leveloffset=+2] - -include::modules/installation-user-infra-generate.adoc[leveloffset=+1] -include::modules/installation-disk-partitioning-upi-templates.adoc[leveloffset=+2] -include::modules/installation-initializing.adoc[leveloffset=+2] -include::modules/installation-gcp-enabling-shielded-vms.adoc[leveloffset=+2] -include::modules/installation-gcp-enabling-confidential-vms.adoc[leveloffset=+2] -include::modules/installation-configure-proxy.adoc[leveloffset=+2] -include::modules/installation-user-infra-generate-k8s-manifest-ignition.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/installing_gcp/installing-gcp-user-infra.adoc#installation-gcp-user-infra-adding-ingress_installing-gcp-user-infra[Optional: Adding the ingress DNS records] - -[id="installation-restricted-networks-gcp-user-infra-exporting-common-variables"] -== Exporting common variables - -include::modules/installation-extracting-infraid.adoc[leveloffset=+2] -include::modules/installation-user-infra-exporting-common-variables.adoc[leveloffset=+2] - -include::modules/installation-creating-gcp-vpc.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-vpc.adoc[leveloffset=+2] - -include::modules/installation-network-user-infra.adoc[leveloffset=+1] - -include::modules/installation-creating-gcp-lb.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-ext-lb.adoc[leveloffset=+2] -include::modules/installation-deployment-manager-int-lb.adoc[leveloffset=+2] - -include::modules/installation-creating-gcp-private-dns.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-private-dns.adoc[leveloffset=+2] - -include::modules/installation-creating-gcp-firewall-rules-vpc.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-firewall-rules.adoc[leveloffset=+2] - -include::modules/installation-creating-gcp-iam-shared-vpc.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-iam-shared-vpc.adoc[leveloffset=+2] - -include::modules/installation-gcp-user-infra-rhcos.adoc[leveloffset=+1] - -include::modules/installation-creating-gcp-bootstrap.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-bootstrap.adoc[leveloffset=+2] - -include::modules/installation-creating-gcp-control-plane.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-control-plane.adoc[leveloffset=+2] - -include::modules/installation-gcp-user-infra-wait-for-bootstrap.adoc[leveloffset=+1] - -include::modules/installation-creating-gcp-worker.adoc[leveloffset=+1] -include::modules/installation-deployment-manager-worker.adoc[leveloffset=+2] - -//You install the CLI on the mirror host. - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/olm-restricted-networks-configuring-operatorhub.adoc[leveloffset=+1] - -include::modules/installation-approve-csrs.adoc[leveloffset=+1] - -include::modules/installation-gcp-user-infra-adding-ingress.adoc[leveloffset=+1] - -include::modules/installation-gcp-user-infra-completing.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#post-install-must-gather-disconnected[Configure image streams] for the Cluster Samples Operator and the `must-gather` tool. -* Learn how to xref:../../operators/admin/olm-restricted-networks.adoc#olm-restricted-networks[use Operator Lifecycle Manager (OLM) on restricted networks]. -* If the mirror registry that you used to install your cluster has a trusted CA, add it to the cluster by xref:../../openshift_images/image-configuration.adoc#images-configuration-cas_image-configuration[configuring additional trust stores]. -* If necessary, you can xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. diff --git a/installing/installing_gcp/manually-creating-iam-gcp.adoc b/installing/installing_gcp/manually-creating-iam-gcp.adoc deleted file mode 100644 index de4865932e9c..000000000000 --- a/installing/installing_gcp/manually-creating-iam-gcp.adoc +++ /dev/null @@ -1,37 +0,0 @@ -:_content-type: ASSEMBLY -[id="manually-creating-iam-gcp"] -= Manually creating IAM for GCP -include::_attributes/common-attributes.adoc[] -:context: manually-creating-iam-gcp - -toc::[] - -In environments where the cloud identity and access management (IAM) APIs are not reachable, or the administrator prefers not to store an administrator-level credential secret in the cluster `kube-system` namespace, you can put the Cloud Credential Operator (CCO) into manual mode before you install the cluster. - -include::modules/alternatives-to-storing-admin-secrets-in-kube-system.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../authentication/managing_cloud_provider_credentials/cco-mode-gcp-workload-identity.adoc#cco-mode-gcp-workload-identity[Using manual mode with GCP Workload Identity] -* xref:../../post_installation_configuration/cluster-tasks.adoc#post-install-rotate-remove-cloud-creds[Rotating or removing cloud provider credentials] - -For a detailed description of all available CCO credential modes and their supported platforms, see xref:../../authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.adoc#about-cloud-credential-operator[About the Cloud Credential Operator]. - -include::modules/manually-create-identity-access-management.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../../updating/preparing-manual-creds-update.adoc#preparing-manual-creds-update[Preparing to update a cluster with manually maintained credentials] - -include::modules/mint-mode.adoc[leveloffset=+1] - -include::modules/mint-mode-with-removal-of-admin-credential.adoc[leveloffset=+1] - -[id="manually-creating-iam-gcp-next-steps"] -== Next steps - -* Install an {product-title} cluster: -** xref:../../installing/installing_gcp/installing-gcp-default.adoc#installing-gcp-default[Installing a cluster quickly on GCP] with default options on installer-provisioned infrastructure -** xref:../../installing/installing_gcp/installing-gcp-customizations.adoc#installing-gcp-customizations[Install a cluster with cloud customizations on installer-provisioned infrastructure] -** xref:../../installing/installing_gcp/installing-gcp-network-customizations.adoc#installing-gcp-network-customizations[Install a cluster with network customizations on installer-provisioned infrastructure] diff --git a/installing/installing_gcp/modules b/installing/installing_gcp/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/installing/installing_gcp/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/installing/installing_gcp/preparing-to-install-on-gcp.adoc b/installing/installing_gcp/preparing-to-install-on-gcp.adoc deleted file mode 100644 index 3f92abef99df..000000000000 --- a/installing/installing_gcp/preparing-to-install-on-gcp.adoc +++ /dev/null @@ -1,61 +0,0 @@ -:_content-type: ASSEMBLY -[id="preparing-to-install-on-gcp"] -= Preparing to install on GCP -include::_attributes/common-attributes.adoc[] -:context: preparing-to-install-on-gcp - -toc::[] - -[id="{context}-prerequisites"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. - -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. - -[id="requirements-for-installing-ocp-on-gcp"] -== Requirements for installing {product-title} on GCP - -Before installing {product-title} on Google Cloud Platform (GCP), you must create a service account and configure a GCP project. See xref:../../installing/installing_gcp/installing-gcp-account.adoc#installing-gcp-account[Configuring a GCP project] for details about creating a project, enabling API services, configuring DNS, GCP account limits, and supported GCP regions. - -If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, see xref:../../installing/installing_gcp/manually-creating-iam-gcp.adoc#manually-creating-iam-gcp[Manually creating IAM for GCP] for other options. - -[id="choosing-an-method-to-install-ocp-on-gcp"] -== Choosing a method to install {product-title} on GCP - -You can install {product-title} on installer-provisioned or user-provisioned infrastructure. The default installation type uses installer-provisioned infrastructure, where the installation program provisions the underlying infrastructure for the cluster. You can also install {product-title} on infrastructure that you provision. If you do not use infrastructure that the installation program provisions, you must manage and maintain the cluster resources yourself. - -See xref:../../architecture/architecture-installation.adoc#installation-process_architecture-installation[Installation process] for more information about installer-provisioned and user-provisioned installation processes. - -[id="choosing-an-method-to-install-ocp-on-gcp-installer-provisioned"] -=== Installing a cluster on installer-provisioned infrastructure - -You can install a cluster on GCP infrastructure that is provisioned by the {product-title} installation program, by using one of the following methods: - -* **xref:../../installing/installing_gcp/installing-gcp-default.adoc#installing-gcp-default[Installing a cluster quickly on GCP]**: You can install {product-title} on GCP infrastructure that is provisioned by the {product-title} installation program. You can install a cluster quickly by using the default configuration options. - -* **xref:../../installing/installing_gcp/installing-gcp-customizations.adoc#installing-gcp-customizations[Installing a customized cluster on GCP]**: You can install a customized cluster on GCP infrastructure that the installation program provisions. The installation program allows for some customization to be applied at the installation stage. Many other customization options are available xref:../../post_installation_configuration/cluster-tasks.adoc#post-install-cluster-tasks[post-installation]. - -* **xref:../../installing/installing_gcp/installing-gcp-network-customizations.adoc#installing-gcp-network-customizations[Installing a cluster on GCP with network customizations]**: You can customize your {product-title} network configuration during installation, so that your cluster can coexist with your existing IP address allocations and adhere to your network requirements. - -* **xref:../../installing/installing_gcp/installing-restricted-networks-gcp-installer-provisioned.adoc#installing-restricted-networks-gcp-installer-provisioned[Installing a cluster on GCP in a restricted network]**: You can install {product-title} on GCP on installer-provisioned infrastructure by using an internal mirror of the installation release content. You can use this method to install a cluster that does not require an active internet connection to obtain the software components. While you can install {product-title} by using the mirrored content, your cluster still requires internet access to use the GCP APIs. - -* **xref:../../installing/installing_gcp/installing-gcp-vpc.adoc#installing-gcp-vpc[Installing a cluster into an existing Virtual Private Cloud]**: You can install {product-title} on an existing GCP Virtual Private Cloud (VPC). You can use this installation method if you have constraints set by the guidelines of your company, such as limits on creating new accounts or infrastructure. - -* **xref:../../installing/installing_gcp/installing-gcp-private.adoc#installing-gcp-private[Installing a private cluster on an existing VPC]**: You can install a private cluster on an existing GCP VPC. You can use this method to deploy {product-title} on an internal network that is not visible to the internet. - -[id="choosing-an-method-to-install-ocp-on-gcp-user-provisioned"] -=== Installing a cluster on user-provisioned infrastructure - -You can install a cluster on GCP infrastructure that you provision, by using one of the following methods: - -* **xref:../../installing/installing_gcp/installing-gcp-user-infra.adoc#installing-gcp-user-infra[Installing a cluster on GCP with user-provisioned infrastructure]**: You can install {product-title} on GCP infrastructure that you provide. You can use the provided Deployment Manager templates to assist with the installation. - -* **xref:../../installing/installing_gcp/installing-gcp-user-infra-vpc.adoc#installing-gcp-user-infra-vpc[Installing a cluster with shared VPC on user-provisioned infrastructure in GCP]**: You can use the provided Deployment Manager templates to create GCP resources in a shared VPC infrastructure. - -* **xref:../../installing/installing_gcp/installing-restricted-networks-gcp.adoc#installing-restricted-networks-gcp[Installing a cluster on GCP in a restricted network with user-provisioned infrastructure]**: You can install {product-title} on GCP in a restricted network with user-provisioned infrastructure. By creating an internal mirror of the installation release content, you can install a cluster that does not require an active internet connection to obtain the software components. You can also use this installation method to ensure that your clusters only use container images that satisfy your organizational controls on external content. - -[id="preparing-to-install-on-gcp-next-steps"] -== Next steps - -* xref:../../installing/installing_gcp/installing-gcp-account.adoc#installing-gcp-account[Configuring a GCP project] diff --git a/installing/installing_gcp/snippets b/installing/installing_gcp/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/installing/installing_gcp/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/installing/installing_gcp/uninstalling-cluster-gcp.adoc b/installing/installing_gcp/uninstalling-cluster-gcp.adoc deleted file mode 100644 index b8bcb4feaaeb..000000000000 --- a/installing/installing_gcp/uninstalling-cluster-gcp.adoc +++ /dev/null @@ -1,13 +0,0 @@ -:_content-type: ASSEMBLY -[id="uninstalling-cluster-gcp"] -= Uninstalling a cluster on GCP -include::_attributes/common-attributes.adoc[] -:context: uninstalling-cluster-gcp - -toc::[] - -You can remove a cluster that you deployed to Google Cloud Platform (GCP). - -include::modules/installation-uninstall-clouds.adoc[leveloffset=+1] - -include::modules/cco-ccoctl-deleting-sts-resources.adoc[leveloffset=+1] diff --git a/installing/installing_ibm_cloud/_attributes b/installing/installing_ibm_cloud/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/installing/installing_ibm_cloud/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/installing/installing_ibm_cloud/images b/installing/installing_ibm_cloud/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/installing/installing_ibm_cloud/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/installing/installing_ibm_cloud/install-ibm-cloud-installation-workflow.adoc b/installing/installing_ibm_cloud/install-ibm-cloud-installation-workflow.adoc deleted file mode 100644 index 56652129d615..000000000000 --- a/installing/installing_ibm_cloud/install-ibm-cloud-installation-workflow.adoc +++ /dev/null @@ -1,27 +0,0 @@ -:_content-type: ASSEMBLY -[id="install-ibm-cloud-installation-workflow"] -= Setting up the environment for an {product-title} installation -include::_attributes/common-attributes.adoc[] -:context: install-ibm-cloud-installation-workflow - -toc::[] - -include::modules/install-ibm-cloud-preparing-the-provisioner-node.adoc[leveloffset=+1] - -include::modules/install-ibm-cloud-configuring-the-public-subnet.adoc[leveloffset=+1] - -include::modules/ipi-install-retrieving-the-openshift-installer.adoc[leveloffset=+1] - -include::modules/ipi-install-extracting-the-openshift-installer.adoc[leveloffset=+1] - -include::modules/install-ibm-cloud-configuring-the-install-config-file.adoc[leveloffset=+1] - -include::modules/ipi-install-additional-install-config-parameters.adoc[leveloffset=+1] - -include::modules/ipi-install-root-device-hints.adoc[leveloffset=+1] - -include::modules/ipi-install-creating-the-openshift-manifests.adoc[leveloffset=+1] - -include::modules/ipi-install-deploying-the-cluster-via-the-openshift-installer.adoc[leveloffset=+1] - -include::modules/ipi-install-following-the-installation.adoc[leveloffset=+1] diff --git a/installing/installing_ibm_cloud/install-ibm-cloud-prerequisites.adoc b/installing/installing_ibm_cloud/install-ibm-cloud-prerequisites.adoc deleted file mode 100644 index 77bb83f89424..000000000000 --- a/installing/installing_ibm_cloud/install-ibm-cloud-prerequisites.adoc +++ /dev/null @@ -1,25 +0,0 @@ -:_content-type: ASSEMBLY -[id="install-ibm-cloud-prerequisites"] -= Prerequisites -include::_attributes/common-attributes.adoc[] -:context: install-ibm-cloud - -toc::[] - -You can use installer-provisioned installation to install {product-title} on IBM Cloud® nodes. This document describes the prerequisites and procedures when installing {product-title} on IBM Cloud nodes. - -[IMPORTANT] -==== -Red Hat supports IPMI and PXE on the provisioning network only. Red Hat has not tested Red Fish, virtual media, or other complementary technologies such as Secure Boot on IBM Cloud deployments. A provisioning network is required. -==== - -Installer-provisioned installation of {product-title} requires: - -* One node with {op-system-first} 8.x installed, for running the provisioner -* Three control plane nodes -* One routable network -* One provisioning network - -Before starting an installer-provisioned installation of {product-title} on IBM Cloud, address the following prerequisites and requirements. - -include::modules/install-ibm-cloud-setting-up-ibm-cloud-infrastructure.adoc[leveloffset=+1] diff --git a/installing/installing_ibm_cloud/modules b/installing/installing_ibm_cloud/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/installing/installing_ibm_cloud/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/installing/installing_ibm_cloud/snippets b/installing/installing_ibm_cloud/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/installing/installing_ibm_cloud/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/installing/installing_ibm_cloud_public/_attributes b/installing/installing_ibm_cloud_public/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/installing/installing_ibm_cloud_public/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/installing/installing_ibm_cloud_public/configuring-iam-ibm-cloud.adoc b/installing/installing_ibm_cloud_public/configuring-iam-ibm-cloud.adoc deleted file mode 100644 index d39411d07672..000000000000 --- a/installing/installing_ibm_cloud_public/configuring-iam-ibm-cloud.adoc +++ /dev/null @@ -1,33 +0,0 @@ -:_content-type: ASSEMBLY -[id="configuring-iam-ibm-cloud"] -= Configuring IAM for IBM Cloud VPC -include::_attributes/common-attributes.adoc[] -:context: configuring-iam-ibm-cloud - -toc::[] - -In environments where the cloud identity and access management (IAM) APIs are not reachable, you must put the Cloud Credential Operator (CCO) into manual mode before you install the cluster. - -include::modules/alternatives-to-storing-admin-secrets-in-kube-system.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_configuring-iam-ibm-cloud-about-cco"] -.Additional resources -* xref:../../authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.adoc#about-cloud-credential-operator[About the Cloud Credential Operator] - -include::modules/cco-ccoctl-configuring.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_configuring-iam-ibm-cloud-refreshing-ids"] -.Additional resources -* xref:../../post_installation_configuration/cluster-tasks.adoc#refreshing-service-ids-ibm-cloud_post-install-cluster-tasks[Rotating API keys for IBM Cloud VPC] - -[id="next-steps_configuring-iam-ibm-cloud"] -== Next steps -* xref:../../installing/installing_ibm_cloud_public/installing-ibm-cloud-customizations.adoc#installing-ibm-cloud-customizations[Installing a cluster on IBM Cloud VPC with customizations] - -[role="_additional-resources"] -[id="additional-resources_{context}"] -== Additional resources - -* xref:../../updating/preparing-manual-creds-update.adoc#preparing-manual-creds-update[Preparing to update a cluster with manually maintained credentials] \ No newline at end of file diff --git a/installing/installing_ibm_cloud_public/images b/installing/installing_ibm_cloud_public/images deleted file mode 120000 index 5fa6987088da..000000000000 --- a/installing/installing_ibm_cloud_public/images +++ /dev/null @@ -1 +0,0 @@ -../../images \ No newline at end of file diff --git a/installing/installing_ibm_cloud_public/installing-ibm-cloud-account.adoc b/installing/installing_ibm_cloud_public/installing-ibm-cloud-account.adoc deleted file mode 100644 index 3af38d584212..000000000000 --- a/installing/installing_ibm_cloud_public/installing-ibm-cloud-account.adoc +++ /dev/null @@ -1,36 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-ibm-cloud-account"] -= Configuring an IBM Cloud account -include::_attributes/common-attributes.adoc[] -:context: installing-ibm-cloud-account - -toc::[] - -Before you can install {product-title}, you must configure an IBM Cloud account. - -[id="prerequisites_installing-ibm-cloud-account"] -== Prerequisites - -* You have an IBM Cloud account with a subscription. You cannot install {product-title} on a free or trial IBM Cloud account. - -include::modules/quotas-and-limits-ibm-cloud.adoc[leveloffset=+1] - -[id="configuring-dns-resolution"] -== Configuring DNS resolution - -How you configure DNS resolution depends on the type of {product-title} cluster you are installing: - -* If you are installing a public cluster, you use IBM Cloud Internet Services (CIS). -* If you are installing a private cluster, you use IBM Cloud DNS Services (DNS Services) - -include::modules/installation-cis-ibm-cloud.adoc[leveloffset=+2] -include::modules/installation-dns-ibm-cloud.adoc[leveloffset=+2] - -include::modules/installation-ibm-cloud-iam-policies-api-key.adoc[leveloffset=+1] -include::modules/installation-ibm-cloud-creating-api-key.adoc[leveloffset=+2] - -include::modules/installation-ibm-cloud-regions.adoc[leveloffset=+1] - -[id="next-steps_installing-ibm-cloud-account"] -== Next steps -* xref:../../installing/installing_ibm_cloud_public/configuring-iam-ibm-cloud.adoc#configuring-iam-ibm-cloud[Configuring IAM for IBM Cloud VPC] diff --git a/installing/installing_ibm_cloud_public/installing-ibm-cloud-customizations.adoc b/installing/installing_ibm_cloud_public/installing-ibm-cloud-customizations.adoc deleted file mode 100644 index fd1d36d2e204..000000000000 --- a/installing/installing_ibm_cloud_public/installing-ibm-cloud-customizations.adoc +++ /dev/null @@ -1,66 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-ibm-cloud-customizations"] -= Installing a cluster on IBM Cloud VPC with customizations -include::_attributes/common-attributes.adoc[] -:context: installing-ibm-cloud-customizations - -toc::[] - -In {product-title} version {product-version}, you can install a customized cluster on infrastructure that the installation program provisions on IBM Cloud VPC. To customize the installation, you modify parameters in the `install-config.yaml` file before you install the cluster. - -[id="prerequisites_installing-ibm-cloud-customizations"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_ibm_cloud_public/installing-ibm-cloud-account.adoc#installing-ibm-cloud-account[configured an IBM Cloud account] to host the cluster. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* You configured the `ccoctl` utility before you installed the cluster. For more information, see xref:../../installing/installing_ibm_cloud_public/configuring-iam-ibm-cloud.adoc#configuring-iam-ibm-cloud[Configuring IAM for IBM Cloud VPC]. - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-ibm-cloud-export-variables.adoc[leveloffset=+1] - -include::modules/installation-initializing.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] - -include::modules/installation-ibm-cloud-config-yaml.adoc[leveloffset=+2] - -//.Additional resources - -//* ../../machine_management/creating_machinesets/creating-machineset-ibm-cloud.adoc#machineset-enabling-customer-managed-encryption_creating-machineset-ibm-cloud[Enabling customer-managed encryption keys for a compute machine set] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/manually-create-iam-ibm-cloud.adoc[leveloffset=+1] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_installing-ibm-cloud-customizations-console"] -.Additional resources -* xref:../../web_console/web-console.adoc#web-console[Accessing the web console] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_installing-ibm-cloud-customizations-telemetry"] -.Additional resources -* xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] - -[id="next-steps_installing-ibm-cloud-customizations"] -== Next steps -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. diff --git a/installing/installing_ibm_cloud_public/installing-ibm-cloud-network-customizations.adoc b/installing/installing_ibm_cloud_public/installing-ibm-cloud-network-customizations.adoc deleted file mode 100644 index c71411a8717c..000000000000 --- a/installing/installing_ibm_cloud_public/installing-ibm-cloud-network-customizations.adoc +++ /dev/null @@ -1,75 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-ibm-cloud-network-customizations"] -= Installing a cluster on IBM Cloud VPC with network customizations -include::_attributes/common-attributes.adoc[] -:context: installing-ibm-cloud-network-customizations - -toc::[] - -In {product-title} version {product-version}, you can install a cluster with a -customized network configuration on infrastructure that the installation program provisions on IBM Cloud VPC. By customizing your network configuration, your cluster can coexist with existing IP address allocations in your environment and integrate with existing MTU and VXLAN configurations. To customize the installation, you modify parameters in the `install-config.yaml` file before you install the cluster. - -You must set most of the network configuration parameters during installation, and you can modify only `kubeProxy` configuration parameters in a running cluster. - -[id="prerequisites_installing-ibm-cloud-network-customizations"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_ibm_cloud_public/installing-ibm-cloud-account.adoc#installing-ibm-cloud-account[configured an IBM Cloud account] to host the cluster. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* You configured the `ccoctl` utility before you installed the cluster. For more information, see xref:../../installing/installing_ibm_cloud_public/configuring-iam-ibm-cloud.adoc#configuring-iam-ibm-cloud[Configuring IAM for IBM Cloud VPC]. - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-ibm-cloud-export-variables.adoc[leveloffset=+1] - -include::modules/installation-initializing.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] - -include::modules/installation-ibm-cloud-config-yaml.adoc[leveloffset=+2] - -//.Additional resources - -//* ../../machine_management/creating_machinesets/creating-machineset-ibm-cloud.adoc#machineset-enabling-customer-managed-encryption_creating-machineset-ibm-cloud[Enabling customer-managed encryption keys for a compute machine set] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/manually-create-iam-ibm-cloud.adoc[leveloffset=+1] - -// Network Operator specific configuration -include::modules/nw-network-config.adoc[leveloffset=+1] -include::modules/nw-modifying-operator-install-config.adoc[leveloffset=+1] -include::modules/nw-operator-cr.adoc[leveloffset=+1] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_installing-ibm-cloud-network-customizations-console"] -.Additional resources -* xref:../../web_console/web-console.adoc#web-console[Accessing the web console] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_installing-ibm-cloud-network-customizations-telemetry"] -.Additional resources -* xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] - -[id="next-steps_installing-ibm-cloud-network-customizations"] -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. diff --git a/installing/installing_ibm_cloud_public/installing-ibm-cloud-private.adoc b/installing/installing_ibm_cloud_public/installing-ibm-cloud-private.adoc deleted file mode 100644 index 00f4a1b208a2..000000000000 --- a/installing/installing_ibm_cloud_public/installing-ibm-cloud-private.adoc +++ /dev/null @@ -1,67 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-ibm-cloud-private"] -= Installing a private cluster on IBM Cloud VPC -include::_attributes/common-attributes.adoc[] -:context: installing-ibm-cloud-private - -toc::[] - -In {product-title} version {product-version}, you can install a private cluster into an existing VPC. The installation program provisions the rest of the required infrastructure, which you can further customize. To customize the installation, you modify parameters in the `install-config.yaml` file before you install the cluster. - -[id="prerequisites_installing-ibm-cloud-private"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_ibm_cloud_public/installing-ibm-cloud-account.adoc#installing-ibm-cloud-account[configured an IBM Cloud account] to host the cluster. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* You configured the `ccoctl` utility before you installed the cluster. For more information, see xref:../../installing/installing_ibm_cloud_public/configuring-iam-ibm-cloud.adoc#configuring-iam-ibm-cloud[Configuring IAM for IBM Cloud VPC]. - -include::modules/private-clusters-default.adoc[leveloffset=+1] - -include::modules/private-clusters-about-ibm-cloud.adoc[leveloffset=+1] - -include::modules/installation-custom-ibm-cloud-vpc.adoc[leveloffset=+1] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-ibm-cloud-export-variables.adoc[leveloffset=+1] - -include::modules/installation-initializing-manual.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] - -include::modules/installation-ibm-cloud-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/manually-create-iam-ibm-cloud.adoc[leveloffset=+1] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_installing-ibm-cloud-private-console"] -.Additional resources -* xref:../../web_console/web-console.adoc#web-console[Accessing the web console] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_installing-ibm-cloud-private-telemetry"] -.Additional resources -* xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] - -[id="next-steps_installing-ibm-cloud-private"] -== Next steps -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. diff --git a/installing/installing_ibm_cloud_public/installing-ibm-cloud-vpc.adoc b/installing/installing_ibm_cloud_public/installing-ibm-cloud-vpc.adoc deleted file mode 100644 index f4acc0477718..000000000000 --- a/installing/installing_ibm_cloud_public/installing-ibm-cloud-vpc.adoc +++ /dev/null @@ -1,63 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-ibm-cloud-vpc"] -= Installing a cluster on IBM Cloud VPC into an existing VPC -include::_attributes/common-attributes.adoc[] -:context: installing-ibm-cloud-vpc - -toc::[] - -In {product-title} version {product-version}, you can install a cluster into an existing Virtual Private Cloud (VPC) on IBM Cloud VPC. The installation program provisions the rest of the required infrastructure, which you can then further customize. To customize the installation, you modify parameters in the `install-config.yaml` file before you install the cluster. - -[id="prerequisites_installing-ibm-cloud-vpc"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_ibm_cloud_public/installing-ibm-cloud-account.adoc#installing-ibm-cloud-account[configured an IBM Cloud account] to host the cluster. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* You configured the `ccoctl` utility before you installed the cluster. For more information, see xref:../../installing/installing_ibm_cloud_public/configuring-iam-ibm-cloud.adoc#configuring-iam-ibm-cloud[Configuring IAM for IBM Cloud VPC]. - -include::modules/installation-custom-ibm-cloud-vpc.adoc[leveloffset=+1] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-ibm-cloud-export-variables.adoc[leveloffset=+1] - -include::modules/installation-initializing.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] - -include::modules/installation-ibm-cloud-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/manually-create-iam-ibm-cloud.adoc[leveloffset=+1] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_installing-ibm-cloud-vpc-console"] -.Additional resources -* xref:../../web_console/web-console.adoc#web-console[Accessing the web console] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_installing-ibm-cloud-vpc-telemetry"] -.Additional resources -* xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] - -[id="next-steps_installing-ibm-cloud-vpc"] -== Next steps -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* Optional: xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[Opt out of remote health reporting]. diff --git a/installing/installing_ibm_cloud_public/modules b/installing/installing_ibm_cloud_public/modules deleted file mode 120000 index 8b0e8540076d..000000000000 --- a/installing/installing_ibm_cloud_public/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules \ No newline at end of file diff --git a/installing/installing_ibm_cloud_public/preparing-to-install-on-ibm-cloud.adoc b/installing/installing_ibm_cloud_public/preparing-to-install-on-ibm-cloud.adoc deleted file mode 100644 index 1907b825a6f8..000000000000 --- a/installing/installing_ibm_cloud_public/preparing-to-install-on-ibm-cloud.adoc +++ /dev/null @@ -1,47 +0,0 @@ -:_content-type: ASSEMBLY -[id="preparing-to-install-on-ibm-cloud"] -= Preparing to install on IBM Cloud VPC -include::_attributes/common-attributes.adoc[] -:context: preparing-to-install-on-ibm-cloud - -toc::[] - -The installation workflows documented in this section are for IBM Cloud VPC infrastructure environments. IBM Cloud Classic is not supported at this time. For more information about the difference between Classic and VPC infrastructures, see the IBM link:https://cloud.ibm.com/docs/cloud-infrastructure?topic=cloud-infrastructure-compare-infrastructure[documentation]. - -[id="prerequisites_preparing-to-install-on-ibm-cloud"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. - -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. - -[id="requirements-for-installing-ocp-on-ibm-cloud"] -== Requirements for installing {product-title} on IBM Cloud VPC - -Before installing {product-title} on IBM Cloud VPC, you must create a service account and configure an IBM Cloud account. See xref:../../installing/installing_ibm_cloud_public/installing-ibm-cloud-account.adoc#installing-ibm-cloud-account[Configuring an IBM Cloud account] for details about creating an account, enabling API services, configuring DNS, IBM Cloud account limits, and supported IBM Cloud VPC regions. - -You must manually manage your cloud credentials when installing a cluster to IBM Cloud VPC. Do this by configuring the Cloud Credential Operator (CCO) for manual mode before you install the cluster. For more information, see xref:../../installing/installing_ibm_cloud_public/configuring-iam-ibm-cloud.adoc#configuring-iam-ibm-cloud[Configuring IAM for IBM Cloud VPC]. - -[id="choosing-a-method-to-install-ocp-on-ibm-cloud"] -== Choosing a method to install {product-title} on IBM Cloud VPC - -You can install {product-title} on IBM Cloud VPC using installer-provisioned infrastructure. This process involves using an installation program to provision the underlying infrastructure for your cluster. Installing {product-title} on IBM Cloud VPC using user-provisioned infrastructure is not supported at this time. - -See xref:../../architecture/architecture-installation.adoc#installation-process_architecture-installation[Installation process] for more information about installer-provisioned installation processes. - -[id="choosing-an-method-to-install-ocp-on-ibm-cloud-installer-provisioned"] -=== Installing a cluster on installer-provisioned infrastructure - -You can install a cluster on IBM Cloud VPC infrastructure that is provisioned by the {product-title} installation program by using one of the following methods: - -* **xref:../../installing/installing_ibm_cloud_public/installing-ibm-cloud-customizations.adoc#installing-ibm-cloud-customizations[Installing a customized cluster on IBM Cloud VPC]**: You can install a customized cluster on IBM Cloud VPC infrastructure that the installation program provisions. The installation program allows for some customization to be applied at the installation stage. Many other customization options are available xref:../../post_installation_configuration/cluster-tasks.adoc#post-install-cluster-tasks[post-installation]. - -* **xref:../../installing/installing_ibm_cloud_public/installing-ibm-cloud-network-customizations.adoc#installing-ibm-cloud-network-customizations[Installing a cluster on IBM Cloud VPC with network customizations]**: You can customize your {product-title} network configuration during installation, so that your cluster can coexist with your existing IP address allocations and adhere to your network requirements. - -* **xref:../../installing/installing_ibm_cloud_public/installing-ibm-cloud-vpc.adoc#installing-ibm-cloud-vpc[Installing a cluster on IBM Cloud VPC into an existing VPC]**: You can install {product-title} on an existing IBM Virtual Private Cloud (VPC). You can use this installation method if you have constraints set by the guidelines of your company, such as limits when creating new accounts or infrastructure. - -* **xref:../../installing/installing_ibm_cloud_public/installing-ibm-cloud-private.adoc#installing-ibm-cloud-private[Installing a private cluster on an existing VPC]**: You can install a private cluster on an existing Virtual Private Cloud (VPC). You can use this method to deploy {product-title} on an internal network that is not visible to the internet. - -[id="next-steps_preparing-to-install-on-ibm-cloud"] -== Next steps -* xref:../../installing/installing_ibm_cloud_public/installing-ibm-cloud-account.adoc#installing-ibm-cloud-account[Configuring an IBM Cloud account] diff --git a/installing/installing_ibm_cloud_public/snippets b/installing/installing_ibm_cloud_public/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/installing/installing_ibm_cloud_public/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/installing/installing_ibm_cloud_public/uninstalling-cluster-ibm-cloud.adoc b/installing/installing_ibm_cloud_public/uninstalling-cluster-ibm-cloud.adoc deleted file mode 100644 index 3cec6a033333..000000000000 --- a/installing/installing_ibm_cloud_public/uninstalling-cluster-ibm-cloud.adoc +++ /dev/null @@ -1,11 +0,0 @@ -:_content-type: ASSEMBLY -[id="uninstalling-cluster-ibm-cloud"] -= Uninstalling a cluster on IBM Cloud VPC -include::_attributes/common-attributes.adoc[] -:context: uninstalling-cluster-ibm-cloud - -toc::[] - -You can remove a cluster that you deployed to IBM Cloud VPC. - -include::modules/installation-uninstall-clouds.adoc[leveloffset=+1] diff --git a/installing/installing_ibm_power/_attributes b/installing/installing_ibm_power/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/installing/installing_ibm_power/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/installing/installing_ibm_power/images b/installing/installing_ibm_power/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/installing/installing_ibm_power/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/installing/installing_ibm_power/installing-ibm-power.adoc b/installing/installing_ibm_power/installing-ibm-power.adoc deleted file mode 100644 index 35256a6de020..000000000000 --- a/installing/installing_ibm_power/installing-ibm-power.adoc +++ /dev/null @@ -1,125 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-ibm-power"] -= Installing a cluster on IBM Power -include::_attributes/common-attributes.adoc[] -:context: installing-ibm-power - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on -IBM Power infrastructure that you provision. - -[IMPORTANT] -==== -Additional considerations exist for non-bare metal platforms. Review the information in the -link:https://access.redhat.com/articles/4207611[guidelines for deploying {product-title} on non-tested platforms] before you install an {product-title} cluster. -==== - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* Before you begin the installation process, you must clean the installation directory. This ensures that the required installation files are created and updated during the installation process. -* You provisioned xref:../../storage/persistent_storage/persistent-storage-ocs.adoc#persistent-storage-ocs[persistent storage using {rh-storage}] or other supported storage protocols for your cluster. To deploy a private image registry, you must set up persistent storage with `ReadWriteMany` access. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -+ -[NOTE] -==== -Be sure to also review this site list if you are configuring a proxy. -==== - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -[id="installation-requirements-user-infra_{context}"] -== Requirements for a cluster with user-provisioned infrastructure - -For a cluster that contains user-provisioned infrastructure, you must deploy all -of the required machines. - -This section describes the requirements for deploying {product-title} on user-provisioned infrastructure. - -include::modules/installation-machine-requirements.adoc[leveloffset=+2] -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] -include::modules/minimum-ibm-power-system-requirements.adoc[leveloffset=+2] -include::modules/recommended-ibm-power-system-requirements.adoc[leveloffset=+2] -include::modules/csr-management.adoc[leveloffset=+2] - -include::modules/installation-network-user-infra.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/install_config/installing-customizing.adoc#installation-special-config-chrony_installing-customizing[Configuring chrony time service] - -include::modules/installation-dns-user-infra.adoc[leveloffset=+2] - -include::modules/installation-load-balancing-user-infra.adoc[leveloffset=+2] - -include::modules/installation-infrastructure-user-infra.adoc[leveloffset=+1] - -include::modules/installation-user-provisioned-validating-dns.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/installation-initializing-manual.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-bare-metal-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/installation-three-node-cluster.adoc[leveloffset=+2] - -include::modules/nw-operator-cr.adoc[leveloffset=+1] - -include::modules/installation-user-infra-generate-k8s-manifest-ignition.adoc[leveloffset=+1] - -[id="creating-machines-bare-metal-power"] -== Installing {op-system} and starting the {product-title} bootstrap process - -To install {product-title} on IBM Power infrastructure that you provision, you must install {op-system-first} on the machines. When you install {op-system}, you must provide the Ignition config file that was generated by the {product-title} installation program for the type of machine you are installing. If you have configured suitable networking, DNS, and load balancing infrastructure, the {product-title} bootstrap process begins automatically after the {op-system} machines have rebooted. - -Follow either the steps to use an ISO image or network PXE booting to install {op-system} on the machines. - -include::modules/installation-user-infra-machines-iso.adoc[leveloffset=+2] - -include::modules/installation-user-infra-machines-static-network.adoc[leveloffset=+3] - -include::modules/installation-user-infra-machines-pxe.adoc[leveloffset=+2] - -include::modules/rhcos-enabling-multipath-day-1-power.adoc[leveloffset=+2] - -include::modules/installation-installing-bare-metal.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/installation-approve-csrs.adoc[leveloffset=+1] - -include::modules/installation-operators-config.adoc[leveloffset=+1] - -include::modules/installation-registry-storage-config.adoc[leveloffset=+2] - -include::modules/registry-configuring-storage-baremetal.adoc[leveloffset=+3] - -include::modules/installation-registry-storage-non-production.adoc[leveloffset=+3] - -include::modules/installation-complete-user-infra.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../post_installation_configuration/machine-configuration-tasks.adoc#rhcos-enabling-multipath_post-install-machine-configuration-tasks[Enabling multipathing with kernel arguments on {op-system}]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. diff --git a/installing/installing_ibm_power/installing-restricted-networks-ibm-power.adoc b/installing/installing_ibm_power/installing-restricted-networks-ibm-power.adoc deleted file mode 100644 index 99d092efd7a0..000000000000 --- a/installing/installing_ibm_power/installing-restricted-networks-ibm-power.adoc +++ /dev/null @@ -1,129 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-restricted-networks-ibm-power"] -= Installing a cluster on IBM Power in a restricted network -include::_attributes/common-attributes.adoc[] -:context: installing-restricted-networks-ibm-power - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on -IBM Power infrastructure that you provision in a restricted network. - -[IMPORTANT] -==== -Additional considerations exist for non-bare metal platforms. Review the information in the -link:https://access.redhat.com/articles/4207611[guidelines for deploying {product-title} on non-tested platforms] before you install an {product-title} cluster. -==== - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/disconnected_install/installing-mirroring-installation-images.adoc#installing-mirroring-installation-images[created a mirror registry for installation in a restricted network] and obtained the `imageContentSources` data for your version of {product-title}. -* Before you begin the installation process, you must move or remove any existing installation files. This ensures that the required installation files are created and updated during the installation process. -+ -[IMPORTANT] -==== -Ensure that installation steps are performed on a machine with access to the installation media. -==== -* You provisioned xref:../../storage/persistent_storage/persistent-storage-ocs.adoc#persistent-storage-ocs[persistent storage using {rh-storage}] or other supported storage protocols for your cluster. To deploy a private image registry, you must set up persistent storage with `ReadWriteMany` access. -* If you use a firewall and plan to use the Telemetry service, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured the firewall to allow the sites] that your cluster requires access to. -+ -[NOTE] -==== -Be sure to also review this site list if you are configuring a proxy. -==== - -include::modules/installation-about-restricted-network.adoc[leveloffset=+1] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -[id="installation-requirements-user-infra_{context}"] -== Requirements for a cluster with user-provisioned infrastructure - -For a cluster that contains user-provisioned infrastructure, you must deploy all -of the required machines. - -This section describes the requirements for deploying {product-title} on user-provisioned infrastructure. - -include::modules/installation-machine-requirements.adoc[leveloffset=+2] -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] -include::modules/minimum-ibm-power-system-requirements.adoc[leveloffset=+2] -include::modules/recommended-ibm-power-system-requirements.adoc[leveloffset=+2] -include::modules/csr-management.adoc[leveloffset=+2] - -include::modules/installation-network-user-infra.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/install_config/installing-customizing.adoc#installation-special-config-chrony_installing-customizing[Configuring chrony time service] - -include::modules/installation-dns-user-infra.adoc[leveloffset=+2] - -include::modules/installation-load-balancing-user-infra.adoc[leveloffset=+2] - -include::modules/installation-infrastructure-user-infra.adoc[leveloffset=+1] - -include::modules/installation-user-provisioned-validating-dns.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -//You extract the installation program from the mirrored content. - -//You install the CLI on the mirror host. - -include::modules/installation-initializing-manual.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-bare-metal-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/installation-three-node-cluster.adoc[leveloffset=+2] - -include::modules/nw-operator-cr.adoc[leveloffset=+1] - -include::modules/installation-user-infra-generate-k8s-manifest-ignition.adoc[leveloffset=+1] - -[id="creating-machines-ibm-power-restricted-network"] -== Installing {op-system} and starting the {product-title} bootstrap process - -To install {product-title} on IBM Power infrastructure that you provision, you must install {op-system-first} on the machines. When you install {op-system}, you must provide the Ignition config file that was generated by the {product-title} installation program for the type of machine you are installing. If you have configured suitable networking, DNS, and load balancing infrastructure, the {product-title} bootstrap process begins automatically after the {op-system} machines have rebooted. - -Follow either the steps to use an ISO image or network PXE booting to install {op-system} on the machines. - -include::modules/installation-user-infra-machines-iso.adoc[leveloffset=+2] - -include::modules/installation-user-infra-machines-static-network.adoc[leveloffset=+3] - -include::modules/installation-user-infra-machines-pxe.adoc[leveloffset=+2] - -include::modules/rhcos-enabling-multipath-day-1-power.adoc[leveloffset=+2] - -include::modules/installation-installing-bare-metal.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/installation-approve-csrs.adoc[leveloffset=+1] - -include::modules/installation-operators-config.adoc[leveloffset=+1] - -include::modules/olm-restricted-networks-configuring-operatorhub.adoc[leveloffset=+2] - -include::modules/installation-registry-storage-config.adoc[leveloffset=+2] - -include::modules/registry-change-management-state.adoc[leveloffset=+3] - -include::modules/registry-configuring-storage-baremetal.adoc[leveloffset=+3] - -include::modules/installation-registry-storage-non-production.adoc[leveloffset=+3] - -include::modules/installation-complete-user-infra.adoc[leveloffset=+1] - -== Next steps - -* xref:../../post_installation_configuration/machine-configuration-tasks.adoc#rhcos-enabling-multipath_post-install-machine-configuration-tasks[Enabling multipathing with kernel arguments on {op-system}]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If the mirror registry that you used to install your cluster has a trusted CA, add it to the cluster by xref:../../openshift_images/image-configuration.adoc#images-configuration-cas_image-configuration[configuring additional trust stores]. diff --git a/installing/installing_ibm_power/modules b/installing/installing_ibm_power/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/installing/installing_ibm_power/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/installing/installing_ibm_power/preparing-to-install-on-ibm-power.adoc b/installing/installing_ibm_power/preparing-to-install-on-ibm-power.adoc deleted file mode 100644 index fbc5222b7c09..000000000000 --- a/installing/installing_ibm_power/preparing-to-install-on-ibm-power.adoc +++ /dev/null @@ -1,22 +0,0 @@ -:_content-type: ASSEMBLY -[id="preparing-to-install-on-ibm-power"] -= Preparing to install on IBM Power -include::_attributes/common-attributes.adoc[] -:context: preparing-to-install-on-ibm-power - -toc::[] - -[id="preparing-to-install-on-ibm-power-prerequisites"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. - -[id="choosing-an-method-to-install-ocp-on-ibm-power"] -== Choosing a method to install {product-title} on IBM Power - -You can install a cluster on IBM Power infrastructure that you provision, by using one of the following methods: - -* **xref:../../installing/installing_ibm_power/installing-ibm-power.adoc#installing-ibm-power[Installing a cluster on IBM Power]**: You can install {product-title} on IBM Power infrastructure that you provision. - -* **xref:../../installing/installing_ibm_power/installing-restricted-networks-ibm-power.adoc#installing-restricted-networks-ibm-power[Installing a cluster on IBM Power in a restricted network]**: You can install {product-title} on IBM Power infrastructure that you provision in a restricted or disconnected network, by using an internal mirror of the installation release content. You can use this method to install a cluster that does not require an active internet connection to obtain the software components. You can also use this installation method to ensure that your clusters only use container images that satisfy your organizational controls on external content. diff --git a/installing/installing_ibm_power/snippets b/installing/installing_ibm_power/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/installing/installing_ibm_power/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/installing/installing_ibm_powervs/_attributes b/installing/installing_ibm_powervs/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/installing/installing_ibm_powervs/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/installing/installing_ibm_powervs/creating-ibm-power-vs-workspace.adoc b/installing/installing_ibm_powervs/creating-ibm-power-vs-workspace.adoc deleted file mode 100644 index 9c21524a0e22..000000000000 --- a/installing/installing_ibm_powervs/creating-ibm-power-vs-workspace.adoc +++ /dev/null @@ -1,15 +0,0 @@ -:_content-type: ASSEMBLY -[id="creating-ibm-power-vs-workspace"] -= Creating an {ibmpowerProductName} Virtual Server workspace -include::_attributes/common-attributes.adoc[] -:context: creating-ibm-power-vs-workspace - -:FeatureName: {ibmpowerProductName} Virtual Server using installer-provisioned infrastructure -include::snippets/technology-preview.adoc[] - -include::modules/creating-ibm-power-vs-workspace-procedure.adoc[leveloffset=+1] - - -[id="next-steps_creating-ibm-power-vs-workspace"] -== Next steps -* xref:../../installing/installing_ibm_powervs/installing-ibm-power-vs-customizations.adoc#installing-ibm-power-vs-customizations[Installing a cluster on {ibmpowerProductName} Virtual Server with customizations] diff --git a/installing/installing_ibm_powervs/images b/installing/installing_ibm_powervs/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/installing/installing_ibm_powervs/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/installing/installing_ibm_powervs/installing-ibm-cloud-account-power-vs.adoc b/installing/installing_ibm_powervs/installing-ibm-cloud-account-power-vs.adoc deleted file mode 100644 index 3aaa32c147b6..000000000000 --- a/installing/installing_ibm_powervs/installing-ibm-cloud-account-power-vs.adoc +++ /dev/null @@ -1,39 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-ibm-cloud-account-power-vs"] -= Configuring an IBM Cloud account -include::_attributes/common-attributes.adoc[] -:context: installing-ibm-cloud-account-power-vs - -toc::[] - -Before you can install {product-title}, you must configure an IBM Cloud account. - -:FeatureName: {ibmpowerProductName} Virtual Server using installer-provisioned infrastructure -include::snippets/technology-preview.adoc[] - -[id="prerequisites_installing-ibm-cloud-account-power-vs"] -== Prerequisites - -* You have an IBM Cloud account with a subscription. You cannot install {product-title} on a free or on a trial IBM Cloud account. - -include::modules/quotas-and-limits-ibm-power-vs.adoc[leveloffset=+1] - -[id="configuring-dns-resolution-powervs"] -== Configuring DNS resolution - -How you configure DNS resolution depends on the type of {product-title} cluster you are installing: - -* If you are installing a public cluster, you use IBM Cloud Internet Services (CIS). -* If you are installing a private cluster, you use IBM Cloud DNS Services (DNS Services). - -include::modules/installation-cis-ibm-cloud.adoc[leveloffset=+1] - -include::modules/installation-ibm-cloud-iam-policies-api-key.adoc[leveloffset=+1] - -include::modules/installation-ibm-cloud-creating-api-key.adoc[leveloffset=+2] - -include::modules/installation-ibm-cloud-regions.adoc[leveloffset=+1] - -[id="next-steps_installing-ibm-cloud-account-power-vs"] -== Next steps -* xref:../../installing/installing_ibm_powervs/creating-ibm-power-vs-workspace.adoc#creating-ibm-power-vs-workspace[Creating an {ibmpowerProductName} Virtual Server workspace] diff --git a/installing/installing_ibm_powervs/installing-ibm-power-vs-customizations.adoc b/installing/installing_ibm_powervs/installing-ibm-power-vs-customizations.adoc deleted file mode 100644 index 0fb0e4bcedd8..000000000000 --- a/installing/installing_ibm_powervs/installing-ibm-power-vs-customizations.adoc +++ /dev/null @@ -1,63 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-ibm-power-vs-customizations"] -= Installing a cluster on {ibmpowerProductName} Virtual Server with customizations -include::_attributes/common-attributes.adoc[] -:context: installing-ibm-power-vs-customizations - -toc::[] - -In {product-title} version {product-version}, you can install a customized cluster on infrastructure that the installation program provisions on {ibmpowerProductName} Virtual Server. To customize the installation, you modify parameters in the `install-config.yaml` file before you install the cluster. - -:FeatureName: {ibmpowerProductName} Virtual Server using installer-provisioned infrastructure -include::snippets/technology-preview.adoc[] - -[id="prerequisites_installing-ibm-powervs-customizations"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_ibm_powervs/installing-ibm-cloud-account-power-vs.adoc#installing-ibm-cloud-account-power-vs[configured an IBM Cloud account] to host the cluster. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* You configured the `ccoctl` utility before you installed the cluster. For more information, see xref:../../installing/installing_ibm_powervs/preparing-to-install-on-ibm-power-vs.adoc#cco-ccoctl-configuring_preparing-to-install-on-ibm-power-vs[Configuring the Cloud Credential Operator utility]. - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-ibm-cloud-export-variables.adoc[leveloffset=+1] - -include::modules/installation-initializing.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-ibm-power-vs-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/manually-create-iam-ibm-cloud.adoc[leveloffset=+1] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_installing-ibm-power-vs-customizations-console"] -.Additional resources -* xref:../../web_console/web-console.adoc#web-console[Accessing the web console] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_installing-ibm-power-vs-customizations-telemetry"] -.Additional resources -* xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] - -[id="next-steps_installing-ibm-power-vs-customizations"] -== Next steps -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster] -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting] diff --git a/installing/installing_ibm_powervs/installing-ibm-power-vs-private-cluster.adoc b/installing/installing_ibm_powervs/installing-ibm-power-vs-private-cluster.adoc deleted file mode 100644 index b06b4d76a556..000000000000 --- a/installing/installing_ibm_powervs/installing-ibm-power-vs-private-cluster.adoc +++ /dev/null @@ -1,71 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-ibm-power-vs-private-cluster"] -= Installing a private cluster on {ibmpowerProductName} Virtual Server -include::_attributes/common-attributes.adoc[] -:context: installing-ibm-power-vs-private-cluster - -toc::[] - -In {product-title} version {product-version}, you can install a private cluster into an existing VPC and {ibmpowerProductName} Virtual Server Workspace. The installation program provisions the rest of the required infrastructure, which you can further customize. To customize the installation, you modify parameters in the `install-config.yaml` file before you install the cluster. - -:FeatureName: {ibmpowerProductName} Virtual Server using installer-provisioned infrastructure -include::snippets/technology-preview.adoc[] - -[id="prerequisites_installing-ibm-power-vs-private-cluster"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_ibm_powervs/installing-ibm-cloud-account-power-vs.adoc#installing-ibm-cloud-account-power-vs[configured an IBM Cloud account] to host the cluster. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* You configured the `ccoctl` utility before you installed the cluster. For more information, see xref:../../installing/installing_ibm_powervs/preparing-to-install-on-ibm-power-vs.adoc#cco-ccoctl-configuring_preparing-to-install-on-ibm-power-vs[Configuring the Cloud Credential Operator utility]. - -include::modules/private-clusters-default.adoc[leveloffset=+1] - -include::modules/private-clusters-about-ibm-power-vs.adoc[leveloffset=+1] - -include::modules/installation-custom-ibm-power-vs.adoc[leveloffset=+1] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-ibm-cloud-export-variables.adoc[leveloffset=+1] - -include::modules/installation-initializing-manual.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] - -include::modules/installation-ibm-power-vs-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/manually-create-iam-ibm-cloud.adoc[leveloffset=+1] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_installing-ibm-power-vs-private-console"] -.Additional resources -* xref:../../web_console/web-console.adoc#web-console[Accessing the web console] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_installing-ibm-power-vs-private-telemetry"] -.Additional resources -* xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] - - -[id="next-steps_installing-ibm-power-vs-private-cluster"] -== Next steps -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster] -* Optional: xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[Opt out of remote health reporting] diff --git a/installing/installing_ibm_powervs/installing-ibm-powervs-vpc.adoc b/installing/installing_ibm_powervs/installing-ibm-powervs-vpc.adoc deleted file mode 100644 index 31daad2354b2..000000000000 --- a/installing/installing_ibm_powervs/installing-ibm-powervs-vpc.adoc +++ /dev/null @@ -1,66 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-ibm-powervs-vpc"] -= Installing a cluster on {ibmpowerProductName} Virtual Server into an existing VPC -include::_attributes/common-attributes.adoc[] -:context: installing-ibm-powervs-vpc - -toc::[] - -In {product-title} version {product-version}, you can install a cluster into an existing Virtual Private Cloud (VPC) on IBM Cloud VPC. The installation program provisions the rest of the required infrastructure, which you can then further customize. To customize the installation, you modify parameters in the `install-config.yaml` file before you install the cluster. - -:FeatureName: {ibmpowerProductName} Virtual Server using installer-provisioned infrastructure -include::snippets/technology-preview.adoc[] - -[id="prerequisites_installing-ibm-powervs-vpc"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_ibm_powervs/installing-ibm-cloud-account-power-vs.adoc#installing-ibm-cloud-account-power-vs[configured an IBM Cloud account] to host the cluster. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* You configured the `ccoctl` utility before you installed the cluster. For more information, see xref:../../installing/installing_ibm_powervs/preparing-to-install-on-ibm-power-vs.adoc#cco-ccoctl-configuring_preparing-to-install-on-ibm-power-vs[Configuring the Cloud Credential Operator utility]. - -include::modules/installation-custom-ibm-power-vs.adoc[leveloffset=+1] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-ibm-cloud-export-variables.adoc[leveloffset=+1] - -include::modules/installation-initializing.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] - -include::modules/installation-ibm-power-vs-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/manually-create-iam-ibm-cloud.adoc[leveloffset=+1] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_installing-ibm-powervs-vpc-console"] -.Additional resources -* xref:../../web_console/web-console.adoc#web-console[Accessing the web console] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_installing-ibm-powervs-vpc-telemetry"] -.Additional resources -* xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] - -[id="next-steps_installing-ibm-powervs-vpc"] -== Next steps -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster] -* Optional: xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[Opt out of remote health reporting] diff --git a/installing/installing_ibm_powervs/installing-restricted-networks-ibm-power-vs.adoc b/installing/installing_ibm_powervs/installing-restricted-networks-ibm-power-vs.adoc deleted file mode 100644 index d603c9e2c5cd..000000000000 --- a/installing/installing_ibm_powervs/installing-restricted-networks-ibm-power-vs.adoc +++ /dev/null @@ -1,77 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-restricted-networks-ibm-power-vs"] -= Installing a cluster on {ibmpowerProductName} Virtual Server in a restricted network -include::_attributes/common-attributes.adoc[] -:context: installing-restricted-networks-ibm-power-vs - -toc::[] - -In {product-title} {product-version}, you can install a cluster on IBM Cloud VPC in a restricted network by creating an internal mirror of the installation release content on an existing Virtual Private Cloud (VPC) on IBM Cloud VPC. - -:FeatureName: {ibmpowerProductName} Virtual Server using installer-provisioned infrastructure -include::snippets/technology-preview.adoc[] - -[id="prerequisites_installing-ibm-power-vs-restricted"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_ibm_powervs/installing-ibm-cloud-account-power-vs.adoc#installing-ibm-cloud-account-power-vs[configured an IBM Cloud account] to host the cluster. -* You xref:../../installing/disconnected_install/installing-mirroring-installation-images.adoc#installation-about-mirror-registry_installing-mirroring-installation-images[mirrored the images for a disconnected installation] to your registry and obtained the `imageContentSources` data for your version of {product-title}. -+ -[IMPORTANT] -==== -Because the installation media is on the mirror host, you can use that computer to complete all installation steps. -==== -* You have an existing VPC in IBM Cloud VPC. When installing a cluster in a restricted network, you cannot use the installer-provisioned VPC. You must use a user-provisioned VPC that satisfies one of the following requirements: -** Contains the mirror registry -** Has firewall rules or a peering connection to access the mirror registry hosted elsewhere -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -* You configured the `ccoctl` utility before you installed the cluster. For more information, see xref:../../installing/installing_ibm_powervs/preparing-to-install-on-ibm-power-vs.adoc#cco-ccoctl-configuring_preparing-to-install-on-ibm-power-vs[Configuring the Cloud Credential Operator utility]. - -include::modules/installation-about-restricted-network.adoc[leveloffset=+1] - -include::modules/installation-custom-ibm-power-vs.adoc[leveloffset=+1] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-ibm-cloud-export-variables.adoc[leveloffset=+1] - -include::modules/installation-initializing.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] - -include::modules/installation-ibm-power-vs-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/manually-create-iam-ibm-cloud.adoc[leveloffset=+1] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_installing-ibm-power-vs-restricted-console"] -.Additional resources -* xref:../../web_console/web-console.adoc#web-console[Accessing the web console] - -include::modules/olm-restricted-networks-configuring-operatorhub.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_installing-ibm-power-vs-restricted-telemetry"] -.Additional resources -* xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] - -[id="next-steps_installing-ibm-power-vs-restricted"] -== Next steps -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster] -* Optional: xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[Opt out of remote health reporting] \ No newline at end of file diff --git a/installing/installing_ibm_powervs/modules b/installing/installing_ibm_powervs/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/installing/installing_ibm_powervs/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/installing/installing_ibm_powervs/preparing-to-install-on-ibm-power-vs.adoc b/installing/installing_ibm_powervs/preparing-to-install-on-ibm-power-vs.adoc deleted file mode 100644 index 75542226e3ac..000000000000 --- a/installing/installing_ibm_powervs/preparing-to-install-on-ibm-power-vs.adoc +++ /dev/null @@ -1,58 +0,0 @@ -:_content-type: ASSEMBLY -[id="preparing-to-install-on-ibm-power-vs"] -= Preparing to install on {ibmpowerProductName} Virtual Server -include::_attributes/common-attributes.adoc[] -:context: preparing-to-install-on-ibm-power-vs - -toc::[] - -The installation workflows documented in this section are for {ibmpowerProductName} Virtual Server infrastructure environments. - -[id="prerequisites_preparing-to-install-on-ibm-power-vs"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. - -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. - -:FeatureName: {ibmpowerProductName} Virtual Server using installer-provisioned infrastructure -include::snippets/technology-preview.adoc[] - -[id="requirements-for-installing-ocp-on-ibm-power-vs"] -== Requirements for installing {product-title} on {ibmpowerProductName} Virtual Server - -Before installing {product-title} on {ibmpowerProductName} Virtual Server, you must create a service account and configure an IBM Cloud account. See xref:../../installing/installing_ibm_powervs/installing-ibm-cloud-account-power-vs.adoc#installing-ibm-cloud-account-power-vs[Configuring an IBM Cloud account] for details about creating an account, configuring DNS and supported {ibmpowerProductName} Virtual Server regions. - -You must manually manage your cloud credentials when installing a cluster to {ibmpowerProductName} Virtual Server. Do this by configuring the Cloud Credential Operator (CCO) for manual mode before you install the cluster. - -[id="choosing-a-method-to-install-ocp-on-ibm-power-vs"] -== Choosing a method to install {product-title} on {ibmpowerProductName} Virtual Server - -You can install {product-title} on {ibmpowerProductName} Virtual Server using installer-provisioned infrastructure. This process involves using an installation program to provision the underlying infrastructure for your cluster. Installing {product-title} on {ibmpowerProductName} Virtual Server using user-provisioned infrastructure is not supported at this time. - -See xref:../../architecture/architecture-installation.adoc#installation-process_architecture-installation[Installation process] for more information about installer-provisioned installation processes. - -[id="choosing-an-method-to-install-ocp-on-power-vs-installer-provisioned"] -=== Installing a cluster on installer-provisioned infrastructure - -You can install a cluster on {ibmpowerProductName} Virtual Server infrastructure that is provisioned by the {product-title} installation program by using one of the following methods: - -* **xref:../../installing/installing_ibm_powervs/installing-ibm-power-vs-customizations.adoc#installing-ibm-power-vs-customizations[Installing a customized cluster on {ibmpowerProductName} Virtual Server]**: You can install a customized cluster on {ibmpowerProductName} Virtual Server infrastructure that the installation program provisions. The installation program allows for some customization to be applied at the installation stage. Many other customization options are available xref:../../post_installation_configuration/cluster-tasks.adoc#post-install-cluster-tasks[post-installation]. - -* **xref:../../installing/installing_ibm_powervs/installing-ibm-powervs-vpc.adoc#installing-ibm-powervs-vpc[Installing a cluster on {ibmpowerProductName} Virtual Server into an existing VPC]**: You can install {product-title} on {ibmpowerProductName} Virtual Server into an existing Virtual Private Cloud (VPC). You can use this installation method if you have constraints set by the guidelines of your company, such as limits when creating new accounts or infrastructure. - -* **xref:../../installing/installing_ibm_powervs/installing-ibm-power-vs-private-cluster.adoc#installing-ibm-power-vs-private-cluster[Installing a private cluster on {ibmpowerProductName} Virtual Server]**: You can install a private cluster on {ibmpowerProductName} Virtual Server. You can use this method to deploy {product-title} on an internal network that is not visible to the internet. - -* **xref:../../installing/installing_ibm_powervs/installing-restricted-networks-ibm-power-vs.adoc#installing-restricted-networks-ibm-power-vs[Installing a cluster on {ibmpowerProductName} Virtual Server in a restricted network]**: You can install {product-title} on {ibmpowerProductName} Virtual Server on installer-provisioned infrastructure by using an internal mirror of the installation release content. You can use this method to install a cluster that does not require an active internet connection to obtain the software components. - -include::modules/cco-ccoctl-configuring.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_configuring-ibm-cloud-refreshing-ids"] - -.Additional resources -* xref:../../post_installation_configuration/cluster-tasks.adoc#refreshing-service-ids-ibm-cloud_post-install-cluster-tasks[Rotating API keys] - -[id="next-steps_preparing-to-install-on-ibm-power-vs"] -== Next steps -* xref:../../installing/installing_ibm_powervs/installing-ibm-cloud-account-power-vs.adoc#installing-ibm-cloud-account-power-vs[Configuring an IBM Cloud account] \ No newline at end of file diff --git a/installing/installing_ibm_powervs/snippets b/installing/installing_ibm_powervs/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/installing/installing_ibm_powervs/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/installing/installing_ibm_powervs/uninstalling-cluster-ibm-power-vs.adoc b/installing/installing_ibm_powervs/uninstalling-cluster-ibm-power-vs.adoc deleted file mode 100644 index ffcbdb93a46a..000000000000 --- a/installing/installing_ibm_powervs/uninstalling-cluster-ibm-power-vs.adoc +++ /dev/null @@ -1,11 +0,0 @@ -:_content-type: ASSEMBLY -[id="uninstalling-cluster-ibm-power-vs"] -= Uninstalling a cluster on {ibmpowerProductName} Virtual Server -include::_attributes/common-attributes.adoc[] -:context: uninstalling-cluster-ibm-power-vs - -toc::[] - -You can remove a cluster that you deployed to {ibmpowerProductName} Virtual Server. - -include::modules/installation-uninstall-clouds.adoc[leveloffset=+1] \ No newline at end of file diff --git a/installing/installing_ibm_z/_attributes b/installing/installing_ibm_z/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/installing/installing_ibm_z/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/installing/installing_ibm_z/images b/installing/installing_ibm_z/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/installing/installing_ibm_z/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/installing/installing_ibm_z/installing-ibm-z-kvm.adoc b/installing/installing_ibm_z/installing-ibm-z-kvm.adoc deleted file mode 100644 index 46e24611c254..000000000000 --- a/installing/installing_ibm_z/installing-ibm-z-kvm.adoc +++ /dev/null @@ -1,144 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-ibm-z-kvm"] -= Installing a cluster with {op-system-base} KVM on {ibmzProductName} and {linuxoneProductName} -include::_attributes/common-attributes.adoc[] -:context: installing-ibm-z-kvm - -toc::[] - -[role="_abstract"] -In {product-title} version {product-version}, you can install a cluster on -{ibmzProductName} or {linuxoneProductName} infrastructure that you provision. - -[NOTE] -==== -While this document refers only to {ibmzProductName}, all information in it also applies to {linuxoneProductName}. -==== - -[IMPORTANT] -==== -Additional considerations exist for non-bare metal platforms. Review the information in the -link:https://access.redhat.com/articles/4207611[guidelines for deploying {product-title} on non-tested platforms] before you install an {product-title} cluster. -==== - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* Before you begin the installation process, you must clean the installation directory. This ensures that the required installation files are created and updated during the installation process. -* You provisioned xref:../../storage/persistent_storage/persistent-storage-ocs.adoc#persistent-storage-ocs[persistent storage using {rh-storage}] or other supported storage protocols for your cluster. To deploy a private image registry, you must set up persistent storage with `ReadWriteMany` access. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -+ -[NOTE] -==== -Be sure to also review this site list if you are configuring a proxy. -==== -* You provisioned a {op-system-base} Kernel Virtual Machine (KVM) system that is hosted on the logical partition (LPAR) and based on {op-system-base} 8.6 or later. See link:https://access.redhat.com/support/policy/updates/errata#RHEL8_and_9_Life_Cycle[Red Hat Enterprise Linux 8 and 9 Life Cycle]. - - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/installation-requirements-user-infra-ibm-z-kvm.adoc[leveloffset=+1] -include::modules/csr-management.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="additional-resources_ibmz-kvm-recommended-host-practices"] -.Additional resources - -* xref:../../scalability_and_performance/ibm-z-recommended-host-practices.adoc#ibm-z-recommended-host-practices[Recommended host practices for {ibmzProductName} & {linuxoneProductName} environments] - -include::modules/installation-network-user-infra.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="additional-resources_ibmz-kvm-chrony-time-service"] -.Additional resources - -* xref:../../installing/install_config/installing-customizing.adoc#installation-special-config-chrony_installing-customizing[Configuring chrony time service] - -include::modules/installation-dns-user-infra.adoc[leveloffset=+2] - -include::modules/installation-load-balancing-user-infra.adoc[leveloffset=+2] - -include::modules/installation-infrastructure-user-infra.adoc[leveloffset=+1] - -include::modules/installation-user-provisioned-validating-dns.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/installation-initializing-manual.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-bare-metal-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/installation-three-node-cluster.adoc[leveloffset=+2] - -include::modules/nw-operator-cr.adoc[leveloffset=+1] - -include::modules/installation-user-infra-generate-k8s-manifest-ignition.adoc[leveloffset=+1] - -include::modules/installation-ibm-z-kvm-user-infra-installing-rhcos.adoc[leveloffset=+1] - -include::modules/ibm-z-secure-execution.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="additional-resources_Linux-as-an-IBM-Secure-Execution-host-or-guest"] -.Additional resources - -* link:https://www.ibm.com/docs/en/linux-on-systems?topic=virtualization-secure-execution[Introducing IBM Secure Execution for Linux] - -* link:https://www.ibm.com/docs/en/linux-on-systems?topic=ibmz-secure-execution[Linux as an IBM Secure Execution host or guest] - -include::modules/ibmz-configure-nbde-with-static-ip.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="additional-resources_configure-nbde-ibm-z-kvm"] -.Additional resources - -* xref:../../installing/install_config/installing-customizing.adoc#installation-special-config-butane_installing-customizing[Creating machine configs with Butane] - -include::modules/installation-ibm-z-kvm-user-infra-machines-iso.adoc[leveloffset=+2] - -include::modules/installation-full-ibm-z-kvm-user-infra-machines-iso.adoc[leveloffset=+2] - -include::modules/installation-user-infra-machines-static-network.adoc[leveloffset=+2] - -include::modules/installation-installing-bare-metal.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/installation-approve-csrs.adoc[leveloffset=+1] - -include::modules/installation-operators-config.adoc[leveloffset=+1] - -include::modules/installation-registry-storage-config.adoc[leveloffset=+2] - -include::modules/registry-configuring-storage-baremetal.adoc[leveloffset=+3] - -include::modules/installation-registry-storage-non-production.adoc[leveloffset=+3] - -include::modules/installation-complete-user-infra.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_ibmz-kvm-remote-health-monitoring"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -* link:https://access.redhat.com/solutions/4387261[How to generate SOSREPORT within OpenShift4 nodes without SSH]. - -[id="next-steps_ibmz-kvm"] -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. - -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. diff --git a/installing/installing_ibm_z/installing-ibm-z.adoc b/installing/installing_ibm_z/installing-ibm-z.adoc deleted file mode 100644 index efef77e3ae95..000000000000 --- a/installing/installing_ibm_z/installing-ibm-z.adoc +++ /dev/null @@ -1,147 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-ibm-z"] -= Installing a cluster with z/VM on {ibmzProductName} and {linuxoneProductName} -include::_attributes/common-attributes.adoc[] -:context: installing-ibm-z - -toc::[] - -[role="_abstract"] -In {product-title} version {product-version}, you can install a cluster on -{ibmzProductName} or {linuxoneProductName} infrastructure that you provision. - -[NOTE] -==== -While this document refers only to {ibmzProductName}, all information in it also applies to {linuxoneProductName}. -==== - -[IMPORTANT] -==== -Additional considerations exist for non-bare metal platforms. Review the information in the -link:https://access.redhat.com/articles/4207611[guidelines for deploying {product-title} on non-tested platforms] before you install an {product-title} cluster. -==== - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* Before you begin the installation process, you must clean the installation directory. This ensures that the required installation files are created and updated during the installation process. -* You provisioned xref:../../storage/persistent_storage/persistent-storage-ocs.adoc#persistent-storage-ocs[persistent storage using {rh-storage}] or other supported storage protocols for your cluster. To deploy a private image registry, you must set up persistent storage with `ReadWriteMany` access. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. - -[NOTE] -==== -Be sure to also review this site list if you are configuring a proxy. -==== - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -[id="installation-requirements-user-infra_{context}"] -== Requirements for a cluster with user-provisioned infrastructure - -For a cluster that contains user-provisioned infrastructure, you must deploy all -of the required machines. - -This section describes the requirements for deploying {product-title} on user-provisioned infrastructure. - -include::modules/installation-machine-requirements.adoc[leveloffset=+2] -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] -include::modules/minimum-ibm-z-system-requirements.adoc[leveloffset=+2] -include::modules/preferred-ibm-z-system-requirements.adoc[leveloffset=+2] -include::modules/csr-management.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="additional-resources_ibmz-requirements"] -.Additional resources - -* See link:https://www.ibm.com/docs/en/zvm/7.1?topic=networks-bridging-hipersockets-lan-zvm-virtual-switch[Bridging a HiperSockets LAN with a z/VM Virtual Switch] in IBM Documentation. - -* See link:http://public.dhe.ibm.com/software/dw/linux390/perf/zvm_hpav00.pdf[Scaling HyperPAV alias devices on Linux guests on z/VM] for performance optimization. - -* See link:https://www.vm.ibm.com/library/presentations/lparperf.pdf[Topics in LPAR performance] for LPAR weight management and entitlements. - -* xref:../../scalability_and_performance/ibm-z-recommended-host-practices.adoc#ibm-z-recommended-host-practices[Recommended host practices for {ibmzProductName} & {linuxoneProductName} environments] - -include::modules/installation-network-user-infra.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="additional-resources_ibmz-chrony-time-service"] -.Additional resources - -* xref:../../installing/install_config/installing-customizing.adoc#installation-special-config-chrony_installing-customizing[Configuring chrony time service] - -include::modules/installation-dns-user-infra.adoc[leveloffset=+2] - -include::modules/installation-load-balancing-user-infra.adoc[leveloffset=+2] - -include::modules/installation-infrastructure-user-infra.adoc[leveloffset=+1] - -include::modules/installation-user-provisioned-validating-dns.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/installation-initializing-manual.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-bare-metal-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/installation-three-node-cluster.adoc[leveloffset=+2] - -include::modules/nw-operator-cr.adoc[leveloffset=+1] - -include::modules/installation-user-infra-generate-k8s-manifest-ignition.adoc[leveloffset=+1] - -include::modules/ibmz-configure-nbde-with-static-ip.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_configure-nbde-ibm-z"] -.Additional resources - -* xref:../../installing/install_config/installing-customizing.adoc#installation-special-config-butane_installing-customizing[Creating machine configs with Butane] - -include::modules/installation-ibm-z-user-infra-machines-iso.adoc[leveloffset=+1] - -include::modules/installation-user-infra-machines-static-network.adoc[leveloffset=+2] - -include::modules/installation-installing-bare-metal.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/installation-approve-csrs.adoc[leveloffset=+1] - -include::modules/installation-operators-config.adoc[leveloffset=+1] - -include::modules/installation-registry-storage-config.adoc[leveloffset=+2] - -include::modules/registry-configuring-storage-baremetal.adoc[leveloffset=+3] - -include::modules/installation-registry-storage-non-production.adoc[leveloffset=+3] - -include::modules/installation-complete-user-infra.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_ibmz-remote-health-monitoring"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -* link:https://access.redhat.com/solutions/4387261[How to generate SOSREPORT within OpenShift4 nodes without SSH]. - -[id="next-steps_ibmz-vm"] -== Next steps - -* xref:../../post_installation_configuration/machine-configuration-tasks.adoc#rhcos-enabling-multipath_post-install-machine-configuration-tasks[Enabling multipathing with kernel arguments on {op-system}]. - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. - -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. diff --git a/installing/installing_ibm_z/installing-restricted-networks-ibm-z-kvm.adoc b/installing/installing_ibm_z/installing-restricted-networks-ibm-z-kvm.adoc deleted file mode 100644 index 69529ec1d7ab..000000000000 --- a/installing/installing_ibm_z/installing-restricted-networks-ibm-z-kvm.adoc +++ /dev/null @@ -1,147 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-restricted-networks-ibm-z-kvm"] -= Installing a cluster with {op-system-base} KVM on {ibmzProductName} and {linuxoneProductName} in a restricted network -include::_attributes/common-attributes.adoc[] -:context: installing-restricted-networks-ibm-z-kvm - -toc::[] - -[role="_abstract"] -In {product-title} version {product-version}, you can install a cluster on -{ibmzProductName} or {linuxoneProductName} infrastructure that you provision in a restricted network. - -[NOTE] -==== -While this document refers to only {ibmzProductName}, all information in it also applies to {linuxoneProductName}. -==== - -[IMPORTANT] -==== -Additional considerations exist for non-bare metal platforms. Review the information in the -link:https://access.redhat.com/articles/4207611[guidelines for deploying {product-title} on non-tested platforms] before you install an {product-title} cluster. -==== - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/disconnected_install/installing-mirroring-installation-images.adoc#installing-mirroring-installation-images[created a registry on your mirror host] and obtained the `imageContentSources` data for your version of {product-title}. -* You must move or remove any existing installation files, before you begin the installation process. This ensures that the required installation files are created and updated during the installation process. -+ -[IMPORTANT] -==== -Ensure that installation steps are done from a machine with access to the installation media. -==== -* You provisioned xref:../../storage/persistent_storage/persistent-storage-ocs.adoc#persistent-storage-ocs[persistent storage using {rh-storage}] or other supported storage protocols for your cluster. To deploy a private image registry, you must set up persistent storage with `ReadWriteMany` access. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -+ -[NOTE] -==== -Be sure to also review this site list if you are configuring a proxy. -==== -* You provisioned a {op-system-base} Kernel Virtual Machine (KVM) system that is hosted on the logical partition (LPAR) and based on {op-system-base} 8.6 or later. See link:https://access.redhat.com/support/policy/updates/errata#RHEL8_and_9_Life_Cycle[Red Hat Enterprise Linux 8 and 9 Life Cycle]. - -include::modules/installation-about-restricted-network.adoc[leveloffset=+1] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/installation-requirements-user-infra-ibm-z-kvm.adoc[leveloffset=+1] -include::modules/csr-management.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="additional-resources_ibmz-kvm-restricted-recommended-host-practices"] -.Additional resources - -* xref:../../scalability_and_performance/ibm-z-recommended-host-practices.adoc#ibm-z-recommended-host-practices[Recommended host practices for {ibmzProductName} & {linuxoneProductName} environments] - -include::modules/installation-network-user-infra.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="additional-resources_ibmz-network-user-infra"] -.Additional resources - -* xref:../../installing/install_config/installing-customizing.adoc#installation-special-config-chrony_installing-customizing[Configuring chrony time service] - -include::modules/installation-dns-user-infra.adoc[leveloffset=+2] - -include::modules/installation-load-balancing-user-infra.adoc[leveloffset=+2] - -include::modules/installation-infrastructure-user-infra.adoc[leveloffset=+1] - -include::modules/installation-user-provisioned-validating-dns.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -//You extract the installation program from the mirrored content. - -//You install the CLI on the mirror host. - -include::modules/installation-initializing-manual.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-bare-metal-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/installation-three-node-cluster.adoc[leveloffset=+2] - -include::modules/nw-operator-cr.adoc[leveloffset=+1] - -include::modules/installation-user-infra-generate-k8s-manifest-ignition.adoc[leveloffset=+1] - -include::modules/installation-ibm-z-kvm-user-infra-installing-rhcos.adoc[leveloffset=+1] - -include::modules/ibm-z-secure-execution.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="additional-resources_Linux-as-an-IBM-Secure-Execution-host-or-guest-restricted"] -.Additional resources - -* link:https://www.ibm.com/docs/en/linux-on-systems?topic=virtualization-secure-execution[Introducing IBM Secure Execution for Linux] - -* link:https://www.ibm.com/docs/en/linux-on-systems?topic=ibmz-secure-execution[Linux as an IBM Secure Execution host or guest] - -include::modules/ibmz-configure-nbde-with-static-ip.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="additional-resources_configure-nbde-ibm-z-kvm-restricted"] -.Additional resources - -* xref:../../installing/install_config/installing-customizing.adoc#installation-special-config-butane_installing-customizing[Creating machine configs with Butane] - -include::modules/installation-ibm-z-kvm-user-infra-machines-iso.adoc[leveloffset=+2] - -include::modules/installation-full-ibm-z-kvm-user-infra-machines-iso.adoc[leveloffset=+2] - -include::modules/installation-user-infra-machines-static-network.adoc[leveloffset=+2] - -include::modules/installation-installing-bare-metal.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/installation-approve-csrs.adoc[leveloffset=+1] - -include::modules/installation-operators-config.adoc[leveloffset=+1] - -include::modules/olm-restricted-networks-configuring-operatorhub.adoc[leveloffset=+2] - -include::modules/installation-registry-storage-config.adoc[leveloffset=+2] - -include::modules/registry-configuring-storage-baremetal.adoc[leveloffset=+3] - -include::modules/installation-registry-storage-non-production.adoc[leveloffset=+3] - -include::modules/installation-complete-user-infra.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_ibmz-kvm-restricted-sosreport"] -.Additional resources - -* link:https://access.redhat.com/solutions/4387261[How to generate SOSREPORT within {product-title} version 4 nodes without SSH]. - -[id="next-steps_ibmz-kvm-restricted"] -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If the mirror registry that you used to install your cluster has a trusted CA, add it to the cluster by xref:../../openshift_images/image-configuration.adoc#images-configuration-cas_image-configuration[configuring additional trust stores]. diff --git a/installing/installing_ibm_z/installing-restricted-networks-ibm-z.adoc b/installing/installing_ibm_z/installing-restricted-networks-ibm-z.adoc deleted file mode 100644 index 68550f8a3746..000000000000 --- a/installing/installing_ibm_z/installing-restricted-networks-ibm-z.adoc +++ /dev/null @@ -1,146 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-restricted-networks-ibm-z"] -= Installing a cluster with z/VM on {ibmzProductName} and {linuxoneProductName} in a restricted network -include::_attributes/common-attributes.adoc[] -:context: installing-restricted-networks-ibm-z - -toc::[] - -[role="_abstract"] -In {product-title} version {product-version}, you can install a cluster on -{ibmzProductName} or {linuxoneProductName} infrastructure that you provision in a restricted network. - -[NOTE] -==== -While this document refers to only {ibmzProductName}, all information in it also applies to {linuxoneProductName}. -==== - -[IMPORTANT] -==== -Additional considerations exist for non-bare metal platforms. Review the information in the -link:https://access.redhat.com/articles/4207611[guidelines for deploying {product-title} on non-tested platforms] before you install an {product-title} cluster. -==== - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/disconnected_install/installing-mirroring-installation-images.adoc#installing-mirroring-installation-images[created a mirror registry for installation in a restricted network] and obtained the `imageContentSources` data for your version of {product-title}. -* Before you begin the installation process, you must move or remove any existing installation files. This ensures that the required installation files are created and updated during the installation process. -+ -[IMPORTANT] -==== -Ensure that installation steps are done from a machine with access to the installation media. -==== -* You provisioned xref:../../storage/persistent_storage/persistent-storage-ocs.adoc#persistent-storage-ocs[persistent storage using {rh-storage}] or other supported storage protocols for your cluster. To deploy a private image registry, you must set up persistent storage with `ReadWriteMany` access. -* If you use a firewall and plan to use the Telemetry service, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured the firewall to allow the sites] that your cluster requires access to. -+ -[NOTE] -==== -Be sure to also review this site list if you are configuring a proxy. -==== - -include::modules/installation-about-restricted-network.adoc[leveloffset=+1] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -[id="installation-requirements-user-infra_{context}"] -== Requirements for a cluster with user-provisioned infrastructure - -For a cluster that contains user-provisioned infrastructure, you must deploy all -of the required machines. - -This section describes the requirements for deploying {product-title} on user-provisioned infrastructure. - -include::modules/installation-machine-requirements.adoc[leveloffset=+2] -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] -include::modules/minimum-ibm-z-system-requirements.adoc[leveloffset=+2] -include::modules/preferred-ibm-z-system-requirements.adoc[leveloffset=+2] -include::modules/csr-management.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* See link:https://www.ibm.com/docs/en/zvm/7.1?topic=networks-bridging-hipersockets-lan-zvm-virtual-switch[Bridging a HiperSockets LAN with a z/VM Virtual Switch] in IBM Documentation. - -* See link:http://public.dhe.ibm.com/software/dw/linux390/perf/zvm_hpav00.pdf[Scaling HyperPAV alias devices on Linux guests on z/VM] for performance optimization. - -* See link:https://www.vm.ibm.com/library/presentations/lparperf.pdf[Topics in LPAR performance] for LPAR weight management and entitlements. - -* xref:../../scalability_and_performance/ibm-z-recommended-host-practices.adoc#ibm-z-recommended-host-practices[Recommended host practices for {ibmzProductName} & {linuxoneProductName} environments] - -include::modules/installation-network-user-infra.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/install_config/installing-customizing.adoc#installation-special-config-chrony_installing-customizing[Configuring chrony time service] - -include::modules/installation-dns-user-infra.adoc[leveloffset=+2] - -include::modules/installation-load-balancing-user-infra.adoc[leveloffset=+2] - -include::modules/installation-infrastructure-user-infra.adoc[leveloffset=+1] - -include::modules/installation-user-provisioned-validating-dns.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -//You extract the installation program from the mirrored content. - -//You install the CLI on the mirror host. - -include::modules/installation-initializing-manual.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-bare-metal-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/installation-three-node-cluster.adoc[leveloffset=+2] - -include::modules/nw-operator-cr.adoc[leveloffset=+1] - -include::modules/installation-user-infra-generate-k8s-manifest-ignition.adoc[leveloffset=+1] - -include::modules/ibmz-configure-nbde-with-static-ip.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_Configure-nbde-ibm-z-restricted"] -.Additional resources - -* xref:../../installing/install_config/installing-customizing.adoc#installation-special-config-butane_installing-customizing[Creating machine configs with Butane] - -include::modules/installation-ibm-z-user-infra-machines-iso.adoc[leveloffset=+1] - -include::modules/installation-user-infra-machines-static-network.adoc[leveloffset=+2] - -include::modules/installation-installing-bare-metal.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/installation-approve-csrs.adoc[leveloffset=+1] - -include::modules/installation-operators-config.adoc[leveloffset=+1] - -include::modules/olm-restricted-networks-configuring-operatorhub.adoc[leveloffset=+2] - -include::modules/installation-registry-storage-config.adoc[leveloffset=+2] - -include::modules/registry-configuring-storage-baremetal.adoc[leveloffset=+3] - -include::modules/installation-registry-storage-non-production.adoc[leveloffset=+3] - -include::modules/installation-complete-user-infra.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* link:https://access.redhat.com/solutions/4387261[How to generate SOSREPORT within {product-title} version 4 nodes without SSH]. - -[id="next-steps_ibmz-restricted"] -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If the mirror registry that you used to install your cluster has a trusted CA, add it to the cluster by xref:../../openshift_images/image-configuration.adoc#images-configuration-cas_image-configuration[configuring additional trust stores]. diff --git a/installing/installing_ibm_z/modules b/installing/installing_ibm_z/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/installing/installing_ibm_z/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/installing/installing_ibm_z/preparing-to-install-on-ibm-z-kvm.adoc b/installing/installing_ibm_z/preparing-to-install-on-ibm-z-kvm.adoc deleted file mode 100644 index b06398172c81..000000000000 --- a/installing/installing_ibm_z/preparing-to-install-on-ibm-z-kvm.adoc +++ /dev/null @@ -1,22 +0,0 @@ -:_content-type: ASSEMBLY -[id="preparing-to-install-on-ibm-z-kvm"] -= Preparing to install with {op-system-base} KVM on {ibmzProductName} and {linuxoneProductName} -include::_attributes/common-attributes.adoc[] -:context: preparing-to-install-on-ibm-z-kvm - -toc::[] - -[id="preparing-to-install-on-ibm-z-kvm-prerequisites"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. - -[id="choosing-an-method-to-install-ocp-on-ibm-z-kvm"] -== Choosing a method to install {product-title} with {op-system-base} KVM on {ibmzProductName} or {linuxoneProductName} - -You can install a cluster with {op-system-base} KVM on {ibmzProductName} or {linuxoneProductName} infrastructure that you provision, by using one of the following methods: - -* **xref:../../installing/installing_ibm_z/installing-ibm-z-kvm.adoc#installing-ibm-z-kvm[Installing a cluster with RHEL KVM on {ibmzProductName} and {linuxoneProductName}]**: You can install {product-title} with KVM on {ibmzProductName} or {linuxoneProductName} infrastructure that you provision. - -* **xref:../../installing/installing_ibm_z/installing-restricted-networks-ibm-z-kvm.adoc#installing-restricted-networks-ibm-z-kvm[Installing a cluster with {op-system-base} KVM on {ibmzProductName} and {linuxoneProductName} in a restricted network]**: You can install {product-title} with {op-system-base} KVM on {ibmzProductName} or {linuxoneProductName} infrastructure that you provision in a restricted or disconnected network, by using an internal mirror of the installation release content. You can use this method to install a cluster that does not require an active internet connection to obtain the software components. You can also use this installation method to ensure that your clusters only use container images that satisfy your organizational controls on external content. diff --git a/installing/installing_ibm_z/preparing-to-install-on-ibm-z.adoc b/installing/installing_ibm_z/preparing-to-install-on-ibm-z.adoc deleted file mode 100644 index 336a00f1fe74..000000000000 --- a/installing/installing_ibm_z/preparing-to-install-on-ibm-z.adoc +++ /dev/null @@ -1,22 +0,0 @@ -:_content-type: ASSEMBLY -[id="preparing-to-install-on-ibm-z"] -= Preparing to install with z/VM on {ibmzProductName} and {linuxoneProductName} -include::_attributes/common-attributes.adoc[] -:context: preparing-to-install-on-ibm-z - -toc::[] - -[id="preparing-to-install-on-ibm-z-prerequisites"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. - -[id="choosing-an-method-to-install-ocp-on-ibm-z"] -== Choosing a method to install {product-title} with z/VM on {ibmzProductName} or {linuxoneProductName} - -You can install a cluster with z/VM on {ibmzProductName} or {linuxoneProductName} infrastructure that you provision, by using one of the following methods: - -* **xref:../../installing/installing_ibm_z/installing-ibm-z.adoc#installing-ibm-z[Installing a cluster with z/VM on {ibmzProductName} and {linuxoneProductName}]**: You can install {product-title} with z/VM on {ibmzProductName} or {linuxoneProductName} infrastructure that you provision. - -* **xref:../../installing/installing_ibm_z/installing-restricted-networks-ibm-z.adoc#installing-restricted-networks-ibm-z[Installing a cluster with z/VM on {ibmzProductName} and {linuxoneProductName} in a restricted network]**: You can install {product-title} with z/VM on {ibmzProductName} or {linuxoneProductName} infrastructure that you provision in a restricted or disconnected network, by using an internal mirror of the installation release content. You can use this method to install a cluster that does not require an active internet connection to obtain the software components. You can also use this installation method to ensure that your clusters only use container images that satisfy your organizational controls on external content. diff --git a/installing/installing_ibm_z/snippets b/installing/installing_ibm_z/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/installing/installing_ibm_z/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/installing/installing_nutanix/_attributes b/installing/installing_nutanix/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/installing/installing_nutanix/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/installing/installing_nutanix/images b/installing/installing_nutanix/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/installing/installing_nutanix/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/installing/installing_nutanix/installing-nutanix-installer-provisioned.adoc b/installing/installing_nutanix/installing-nutanix-installer-provisioned.adoc deleted file mode 100644 index 06c5269109e6..000000000000 --- a/installing/installing_nutanix/installing-nutanix-installer-provisioned.adoc +++ /dev/null @@ -1,61 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-nutanix-installer-provisioned"] -= Installing a cluster on Nutanix -include::_attributes/common-attributes.adoc[] -:context: installing-nutanix-installer-provisioned -:platform: Nutanix - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on your Nutanix instance with two methods: - -* Using the link:https://access.redhat.com/documentation/en-us/assisted_installer_for_openshift_container_platform/2022/html-single/assisted_installer_for_openshift_container_platform/index[{ai-full}] hosted at link:http://console.redhat.com[console.redhat.com]. This method requires no setup for the installer, and is ideal for connected environments like Nutanix. Installing with the {ai-full} also provides integration with Nutanix, enabling autoscaling. See xref:../../installing/installing_on_prem_assisted/installing-on-prem-assisted.adoc#installing-on-prem-assisted[Installing an on-premise cluster using the {ai-full}] for additional details. - -* Using installer-provisioned infrastructure. Use the procedures in the following sections to use installer-provisioned infrastructure. Installer-provisioned infrastructure is ideal for installing in environments with air-gapped/restricted networks. - -== Prerequisites - -* You have reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* If you use a firewall, you have configured it to xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[grant access] to the sites that {product-title} requires. This includes the use of Telemetry. -* If your Nutanix environment is using the default self-signed SSL certificate, replace it with a certificate that is signed by a CA. The installation program requires a valid CA-signed certificate to access to the Prism Central API. For more information about replacing the self-signed certificate, see the https://portal.nutanix.com/page/documents/details?targetId=Nutanix-Security-Guide-v6_1:mul-security-ssl-certificate-pc-t.html[Nutanix AOS Security Guide]. -+ -[IMPORTANT] -==== -Use 2048-bit certificates. The installation fails if you use 4096-bit certificates with Prism Central 2022.x. -==== -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/nutanix-entitlements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-adding-nutanix-root-certificates.adoc[leveloffset=+1] - -include::modules/installation-initializing.adoc[leveloffset=+1] -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] -include::modules/installation-nutanix-config-yaml.adoc[leveloffset=+2] -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/manually-configure-iam-nutanix.adoc[leveloffset=+1] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -== Configuring the default storage container -After you install the cluster, you must install the Nutanix CSI Operator and configure the default storage container for the cluster. - -For more information, see the Nutanix documentation for link:https://opendocs.nutanix.com/openshift/operators/csi/[installing the CSI Operator] and link:https://opendocs.nutanix.com/openshift/install/ipi/#openshift-image-registry-configuration[configuring registry storage]. - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -== Additional resources - -* xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] - -== Next steps -* xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[Opt out of remote health reporting] -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster] diff --git a/installing/installing_nutanix/installing-nutanix-three-node.adoc b/installing/installing_nutanix/installing-nutanix-three-node.adoc deleted file mode 100644 index 611d2d75be0b..000000000000 --- a/installing/installing_nutanix/installing-nutanix-three-node.adoc +++ /dev/null @@ -1,14 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-nutanix-three-node"] -= Installing a three-node cluster on Nutanix -include::_attributes/common-attributes.adoc[] -:context: installing-nutanix-three-node - -toc::[] - -In {product-title} version {product-version}, you can install a three-node cluster on Nutanix. A three-node cluster consists of three control plane machines, which also act as compute machines. This type of cluster provides a smaller, more resource efficient cluster, for cluster administrators and developers to use for testing, development, and production. - -include::modules/installation-three-node-cluster-cloud-provider.adoc[leveloffset=+1] - -== Next steps -* xref:../../installing/installing_nutanix/installing-nutanix-installer-provisioned.adoc#installing-nutanix-installer-provisioned[Installing a cluster on Nutanix] diff --git a/installing/installing_nutanix/installing-restricted-networks-nutanix-installer-provisioned.adoc b/installing/installing_nutanix/installing-restricted-networks-nutanix-installer-provisioned.adoc deleted file mode 100644 index 8dfa556dfd93..000000000000 --- a/installing/installing_nutanix/installing-restricted-networks-nutanix-installer-provisioned.adoc +++ /dev/null @@ -1,64 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-restricted-networks-nutanix-installer-provisioned"] -= Installing a cluster on Nutanix in a restricted network -include::_attributes/common-attributes.adoc[] -:context: installing-restricted-networks-nutanix-installer-provisioned - -toc::[] - -In {product-title} {product-version}, you can install a cluster on Nutanix infrastructure in a restricted network by creating an internal mirror of the installation release content. - -== Prerequisites - -* You have reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* If you use a firewall, you have configured it to xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[grant access] to the sites that {product-title} requires. This includes the use of Telemetry. -* If your Nutanix environment is using the default self-signed SSL/TLS certificate, replace it with a certificate that is signed by a CA. The installation program requires a valid CA-signed certificate to access to the Prism Central API. For more information about replacing the self-signed certificate, see the https://portal.nutanix.com/page/documents/details?targetId=Nutanix-Security-Guide-v6_1:mul-security-ssl-certificate-pc-t.html[Nutanix AOS Security Guide]. -+ -[IMPORTANT] -==== -Use 2048-bit certificates. The installation fails if you use 4096-bit certificates with Prism Central 2022.x. -==== -* You have a container image registry, such as Red Hat Quay. If you do not already have a registry, you can create a mirror registry using xref:../../installing/disconnected_install/installing-mirroring-creating-registry.adoc#installing-mirroring-creating-registry[_mirror registry for Red Hat OpenShift_]. -* You have used the xref:../../installing/disconnected_install/installing-mirroring-disconnected.adoc#installing-mirroring-disconnected[oc-mirror OpenShift CLI (oc) plugin] to mirror all of the required {product-title} content and other images, including the Nutanix CSI Operator, to your mirror registry. -+ -[IMPORTANT] -==== -Because the installation media is on the mirror host, you can use that computer to complete all installation steps. -==== - -include::modules/installation-about-restricted-network.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-adding-nutanix-root-certificates.adoc[leveloffset=+1] - -include::modules/installation-nutanix-download-rhcos.adoc[leveloffset=+1] - -include::modules/installation-initializing.adoc[leveloffset=+1] -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] -include::modules/installation-nutanix-config-yaml.adoc[leveloffset=+2] -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/manually-configure-iam-nutanix.adoc[leveloffset=+1] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -== Post installation -Complete the following steps to complete the configuration of your cluster. - -include::modules/olm-restricted-networks-configuring-operatorhub.adoc[leveloffset=+2] -include::modules/oc-mirror-updating-restricted-cluster-manifests.adoc[leveloffset=+2] -include::modules/registry-configuring-storage-nutanix.adoc[leveloffset=+2] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -== Additional resources - -* xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] - -== Next steps -* xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[Opt out of remote health reporting] -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster] diff --git a/installing/installing_nutanix/modules b/installing/installing_nutanix/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/installing/installing_nutanix/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/installing/installing_nutanix/preparing-to-install-on-nutanix.adoc b/installing/installing_nutanix/preparing-to-install-on-nutanix.adoc deleted file mode 100644 index 94c3aebe6ebe..000000000000 --- a/installing/installing_nutanix/preparing-to-install-on-nutanix.adoc +++ /dev/null @@ -1,16 +0,0 @@ -:_content-type: ASSEMBLY -[id="preparing-to-install-on-nutanix"] -= Preparing to install on Nutanix -include::_attributes/common-attributes.adoc[] -:context: preparing-to-install-on-nutanix - -toc::[] - -Before you install an {product-title} cluster, be sure that your Nutanix environment meets the following requirements. - -include::modules/installation-nutanix-infrastructure.adoc[leveloffset=+1] -include::modules/installation-nutanix-installer-infra-reqs.adoc[leveloffset=+1] -include::modules/cco-ccoctl-configuring.adoc[leveloffset=+1] -[role="_additional-resources"] -.Additional resources -* xref:../../updating/preparing-manual-creds-update.adoc#preparing-manual-creds-update[Preparing to update a cluster with manually maintained credentials] \ No newline at end of file diff --git a/installing/installing_nutanix/snippets b/installing/installing_nutanix/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/installing/installing_nutanix/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/installing/installing_nutanix/uninstalling-cluster-nutanix.adoc b/installing/installing_nutanix/uninstalling-cluster-nutanix.adoc deleted file mode 100644 index c06ce809a46b..000000000000 --- a/installing/installing_nutanix/uninstalling-cluster-nutanix.adoc +++ /dev/null @@ -1,11 +0,0 @@ -:_content-type: ASSEMBLY -[id="uninstalling-cluster-nutanix"] -= Uninstalling a cluster on Nutanix -include::_attributes/common-attributes.adoc[] -:context: uninstalling-cluster-nutanix - -toc::[] - -You can remove a cluster that you deployed to Nutanix. - -include::modules/installation-uninstall-clouds.adoc[leveloffset=+1] diff --git a/installing/installing_on_prem_assisted/_attributes b/installing/installing_on_prem_assisted/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/installing/installing_on_prem_assisted/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/installing/installing_on_prem_assisted/assisted-installer-installing.adoc b/installing/installing_on_prem_assisted/assisted-installer-installing.adoc deleted file mode 100644 index bd6b65dcf992..000000000000 --- a/installing/installing_on_prem_assisted/assisted-installer-installing.adoc +++ /dev/null @@ -1,57 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-with-ai"] -= Installing with the Assisted Installer -include::_attributes/common-attributes.adoc[] -:context: assisted-installer-installing - -toc::[] - -After you ensure the cluster nodes and network requirements are met, you can begin installing the cluster. - -include::modules/assisted-installer-pre-installation-considerations.adoc[leveloffset=+1] - -include::modules/assisted-installer-setting-the-cluster-details.adoc[leveloffset=+1] - -include::modules/assisted-installer-configuring-host-network-interfaces.adoc[leveloffset=+1] - -[role="_additional_resources"] -.Additional resources -* xref:../installing_bare_metal_ipi/ipi-install-installation-workflow.adoc#configuring-host-network-interfaces-in-the-install-config-yaml-file_ipi-install-installation-workflow[Configuring network interfaces] - -* link:http://nmstate.io[NMState version 2.1.4] - -include::modules/assisted-installer-adding-hosts-to-the-cluster.adoc[leveloffset=+1] - -include::modules/installing-with-usb-media.adoc[leveloffset=+1] - -include::modules/assisted-installer-booting-with-a-usb-drive.adoc[leveloffset=+1] - -include::modules/install-booting-from-an-iso-over-http-redfish.adoc[leveloffset=+1] - -[role="_additional_resources"] -.Additional resources - -* xref:../installing_bare_metal_ipi/ipi-install-installation-workflow.adoc#bmc-addressing_ipi-install-installation-workflow[BMC addressing]. - -* xref:../installing_bare_metal_ipi/ipi-install-prerequisites.adoc#ipi-install-firmware-requirements-for-installing-with-virtual-media_ipi-install-prerequisites[Firmware requirements for installing with virtual media] - -include::modules/assisted-installer-configuring-hosts.adoc[leveloffset=+1] - -include::modules/assisted-installer-configuring-networking.adoc[leveloffset=+1] - -include::modules/assisted-installer-installing-the-cluster.adoc[leveloffset=+1] - -include::modules/assisted-installer-completing-the-installation.adoc[leveloffset=+1] - - -[role="_additional_resources"] -[id="ai-saas-installing-additional-resources_{context}"] -== Additional resources - -* xref:../../cli_reference/openshift_cli/getting-started-cli.adoc#cli-installing-cli_cli-developer-commands[Installing the OpenShift CLI]. - -* xref:../../cli_reference/openshift_cli/getting-started-cli.adoc#cli-logging-in_cli-developer-commands[Logging in to the OpenShift CLI] - -* xref:../../post_installation_configuration/preparing-for-users.adoc#creating-cluster-admin_post-install-preparing-for-users[Creating a cluster admin] - -* xref:../../post_installation_configuration/preparing-for-users.adoc#removing-kubeadmin_post-install-preparing-for-users[Removing the kubeadmin user] diff --git a/installing/installing_on_prem_assisted/assisted-installer-preparing-to-install.adoc b/installing/installing_on_prem_assisted/assisted-installer-preparing-to-install.adoc deleted file mode 100644 index e3e20a19b38b..000000000000 --- a/installing/installing_on_prem_assisted/assisted-installer-preparing-to-install.adoc +++ /dev/null @@ -1,26 +0,0 @@ -:_content-type: ASSEMBLY -[id="preparing-to-install-with-ai"] -= Preparing to install with the Assisted Installer -include::_attributes/common-attributes.adoc[] -:context: assisted-installer-preparing-to-install - -toc::[] - -Before installing a cluster, you must ensure the cluster nodes and network meet the requirements. - -[id="assisted-installer-prerequisites"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* If you use a firewall, you must xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configure it] so that {ai-full} can access the resources it requires to function. - -include::modules/assisted-installer-assisted-installer-prerequisites.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="ai-saas-preparing--to-install-additional-resources_{context}"] -== Additional resources - -* xref:../installing_bare_metal_ipi/ipi-install-prerequisites.adoc#ipi-install-firmware-requirements-for-installing-with-virtual-media_ipi-install-prerequisites[Firmware requirements for installing with virtual media] - -* xref:../installing_bare_metal_ipi/ipi-install-prerequisites.html#network-requirements-increase-mtu_ipi-install-prerequisites[Increase the network MTU] diff --git a/installing/installing_on_prem_assisted/images b/installing/installing_on_prem_assisted/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/installing/installing_on_prem_assisted/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/installing/installing_on_prem_assisted/installing-on-prem-assisted.adoc b/installing/installing_on_prem_assisted/installing-on-prem-assisted.adoc deleted file mode 100644 index 336ca53b4041..000000000000 --- a/installing/installing_on_prem_assisted/installing-on-prem-assisted.adoc +++ /dev/null @@ -1,21 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-on-prem-assisted"] -= Installing an on-premise cluster using the {ai-full} -include::_attributes/common-attributes.adoc[] -:context: installing-on-prem-assisted - -toc::[] - -You can install {product-title} on on-premise hardware or on-premise VMs using the {ai-full}. Installing {product-title} using the {ai-full} supports x86_64, AArch64, ppc64le, and s390x CPU architectures. - -[NOTE] -==== -Installing {product-title} on {ibmzProductName} (s390x) is supported only with RHEL KVM installations. -==== - -include::modules/assisted-installer-using-the-assisted-installer.adoc[leveloffset=+1] - -[id="assisted-installer-api-support-policy"] -== API support for the {ai-full} - -Supported APIs for the {ai-full} are stable for a minimum of three months from the announcement of deprecation. diff --git a/installing/installing_on_prem_assisted/modules b/installing/installing_on_prem_assisted/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/installing/installing_on_prem_assisted/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/installing/installing_on_prem_assisted/snippets b/installing/installing_on_prem_assisted/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/installing/installing_on_prem_assisted/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/installing/installing_openstack/_attributes b/installing/installing_openstack/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/installing/installing_openstack/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/installing/installing_openstack/images b/installing/installing_openstack/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/installing/installing_openstack/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/installing/installing_openstack/installing-openstack-cloud-config-reference.adoc b/installing/installing_openstack/installing-openstack-cloud-config-reference.adoc deleted file mode 100644 index 83cb9f26fe91..000000000000 --- a/installing/installing_openstack/installing-openstack-cloud-config-reference.adoc +++ /dev/null @@ -1,10 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-openstack-cloud-config-reference"] -= OpenStack Cloud Controller Manager reference guide -include::_attributes/common-attributes.adoc[] -:context: installing-openstack-cloud-config-reference - -toc::[] - -include::modules/nw-openstack-external-ccm.adoc[leveloffset=+1] -include::modules/cluster-cloud-controller-config-osp.adoc[leveloffset=+1] diff --git a/installing/installing_openstack/installing-openstack-installer-custom.adoc b/installing/installing_openstack/installing-openstack-installer-custom.adoc deleted file mode 100644 index 48c5ae2c18cb..000000000000 --- a/installing/installing_openstack/installing-openstack-installer-custom.adoc +++ /dev/null @@ -1,84 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-openstack-installer-custom"] -= Installing a cluster on OpenStack with customizations -include::_attributes/common-attributes.adoc[] -:context: installing-openstack-installer-custom - -toc::[] - -In {product-title} version {product-version}, you can install a customized cluster on -{rh-openstack-first}. To customize the installation, modify parameters in the `install-config.yaml` before you install the cluster. - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You verified that {product-title} {product-version} is compatible with your {rh-openstack} version by using the xref:../../architecture/architecture-installation.adoc#supported-platforms-for-openshift-clusters_architecture-installation[Supported platforms for OpenShift clusters] section. You can also compare platform support across different versions by viewing the link:https://access.redhat.com/articles/4679401[{product-title} on {rh-openstack} support matrix]. -* You have a storage service installed in {rh-openstack}, such as block storage (Cinder) or object storage (Swift). Object storage is the recommended storage technology for {product-title} registry cluster deployment. For more information, see xref:../../scalability_and_performance/optimization/optimizing-storage.adoc#optimizing-storage[Optimizing storage]. -* You understand performance and scalability practices for cluster scaling, control plane sizing, and etcd. For more information, see xref:../../scalability_and_performance/recommended-performance-scale-practices/recommended-control-plane-practices.adoc#recommended-host-practices[Recommended practices for scaling the cluster]. -* You have the metadata service enabled in {rh-openstack}. - -include::modules/installation-osp-default-deployment.adoc[leveloffset=+1] -include::modules/installation-osp-control-compute-machines.adoc[leveloffset=+2] -include::modules/installation-osp-bootstrap-machine.adoc[leveloffset=+2] -include::modules/installation-load-balancing-user-infra.adoc[leveloffset=+2] -include::modules/cluster-entitlements.adoc[leveloffset=+1] -include::modules/installation-osp-enabling-swift.adoc[leveloffset=+1] -include::modules/installation-registry-osp-creating-custom-pvc.adoc[leveloffset=+1] -include::modules/installation-osp-verifying-external-network.adoc[leveloffset=+1] -include::modules/installation-osp-describing-cloud-parameters.adoc[leveloffset=+1] -include::modules/installation-osp-setting-cloud-provider-options.adoc[leveloffset=+1] -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] -include::modules/installation-initializing.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -See xref:../installing_openstack/installing-openstack-installer-custom.adoc#installation-configuration-parameters_installing-openstack-installer-custom[*Installation configuration parameters* section] for more information about the available parameters. - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] -include::modules/installation-configuration-parameters.adoc[leveloffset=+1] -include::modules/installation-osp-custom-subnet.adoc[leveloffset=+2] -include::modules/installation-osp-deploying-bare-metal-machines.adoc[leveloffset=+2] -include::modules/installation-osp-provider-networks.adoc[leveloffset=+2] -include::modules/installation-osp-provider-network-preparation.adoc[leveloffset=+3] -include::modules/installation-osp-deploying-provider-networks-installer.adoc[leveloffset=+3] - -[TIP] -==== -You can add additional networks, including provider networks, to the `platform.openstack.additionalNetworkIDs` list. - -After you deploy your cluster, you can attach pods to additional networks. For more information, see xref:../../networking/multiple_networks/understanding-multiple-networks.adoc#understanding-multiple-networks[Understanding multiple networks]. -==== - -include::modules/installation-osp-config-yaml.adoc[leveloffset=+2] -include::modules/installation-osp-failure-domains-config.adoc[leveloffset=+2] -include::modules/installation-osp-external-lb-config.adoc[leveloffset=+2] -// include::modules/installation-osp-setting-worker-affinity.adoc[leveloffset=+1] -include::modules/ssh-agent-using.adoc[leveloffset=+1] -include::modules/installation-osp-accessing-api.adoc[leveloffset=+1] -include::modules/installation-osp-accessing-api-floating.adoc[leveloffset=+2] -include::modules/installation-osp-accessing-api-no-floating.adoc[leveloffset=+2] -include::modules/installation-launching-installer.adoc[leveloffset=+1] -include::modules/installation-osp-verifying-cluster-status.adoc[leveloffset=+1] -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console. - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* If you need to enable external access to node ports, xref:../../networking/configuring_ingress_cluster_traffic/configuring-ingress-cluster-traffic-nodeport.adoc#nw-using-nodeport_configuring-ingress-cluster-traffic-nodeport[configure ingress cluster traffic by using a node port]. -* If you did not configure {rh-openstack} to accept application traffic over floating IP addresses, xref:../../post_installation_configuration/network-configuration.adoc#installation-osp-configuring-api-floating-ip_post-install-network-configuration[configure {rh-openstack} access with floating IP addresses]. diff --git a/installing/installing_openstack/installing-openstack-installer-kuryr.adoc b/installing/installing_openstack/installing-openstack-installer-kuryr.adoc deleted file mode 100644 index c8e26373a009..000000000000 --- a/installing/installing_openstack/installing-openstack-installer-kuryr.adoc +++ /dev/null @@ -1,88 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-openstack-installer-kuryr"] -= Installing a cluster on OpenStack with Kuryr -include::_attributes/common-attributes.adoc[] -:context: installing-openstack-installer-kuryr - -toc::[] - -:FeatureName: Kuryr -include::modules/deprecated-feature.adoc[leveloffset=+1] - -In {product-title} version {product-version}, you can install a customized cluster on -{rh-openstack-first} that uses Kuryr SDN. To customize the installation, modify parameters in the `install-config.yaml` before you install the cluster. - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You verified that {product-title} {product-version} is compatible with your {rh-openstack} version by using the xref:../../architecture/architecture-installation.adoc#supported-platforms-for-openshift-clusters_architecture-installation[Supported platforms for OpenShift clusters] section. You can also compare platform support across different versions by viewing the link:https://access.redhat.com/articles/4679401[{product-title} on {rh-openstack} support matrix]. -* You have a storage service installed in {rh-openstack}, such as block storage (Cinder) or object storage (Swift). Object storage is the recommended storage technology for {product-title} registry cluster deployment. For more information, see xref:../../scalability_and_performance/optimization/optimizing-storage.adoc#optimizing-storage[Optimizing storage]. -* You understand performance and scalability practices for cluster scaling, control plane sizing, and etcd. For more information, see xref:../../scalability_and_performance/recommended-performance-scale-practices/recommended-control-plane-practices.adoc#recommended-host-practices[Recommended practices for scaling the cluster]. - -include::modules/installation-osp-about-kuryr.adoc[leveloffset=+1] -include::modules/installation-osp-default-kuryr-deployment.adoc[leveloffset=+1] -include::modules/installation-osp-kuryr-increase-quota.adoc[leveloffset=+2] -include::modules/installation-osp-kuryr-neutron-configuration.adoc[leveloffset=+2] -include::modules/installation-osp-kuryr-octavia-configuration.adoc[leveloffset=+2] - -You can xref:../../networking/load-balancing-openstack.adoc#installation-osp-kuryr-octavia-configure[configure your cluster to use the Octavia OVN driver] after your {rh-openstack} cloud is upgraded from version 13 to version 16. - -include::modules/installation-osp-kuryr-known-limitations.adoc[leveloffset=+2] -include::modules/installation-osp-control-compute-machines.adoc[leveloffset=+2] -include::modules/installation-osp-bootstrap-machine.adoc[leveloffset=+2] -include::modules/installation-load-balancing-user-infra.adoc[leveloffset=+2] -include::modules/cluster-entitlements.adoc[leveloffset=+1] -include::modules/installation-osp-enabling-swift.adoc[leveloffset=+1] -include::modules/installation-osp-verifying-external-network.adoc[leveloffset=+1] -include::modules/installation-osp-describing-cloud-parameters.adoc[leveloffset=+1] -include::modules/installation-osp-setting-cloud-provider-options.adoc[leveloffset=+1] -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] -include::modules/installation-initializing.adoc[leveloffset=+1] -include::modules/installation-configure-proxy.adoc[leveloffset=+2] -include::modules/installation-configuration-parameters.adoc[leveloffset=+1] -include::modules/installation-osp-custom-subnet.adoc[leveloffset=+2] -include::modules/installation-osp-kuryr-config-yaml.adoc[leveloffset=+2] -include::modules/installation-osp-failure-domains-config.adoc[leveloffset=+2] -include::modules/installation-osp-external-lb-config.adoc[leveloffset=+2] -include::modules/installation-osp-provider-networks.adoc[leveloffset=+2] -include::modules/installation-osp-provider-network-preparation.adoc[leveloffset=+3] -include::modules/installation-osp-deploying-provider-networks-installer.adoc[leveloffset=+3] - -[TIP] -==== -You can add additional networks, including provider networks, to the `platform.openstack.additionalNetworkIDs` list. - -After you deploy your cluster, you can attach pods to additional networks. For more information, see xref:../../networking/multiple_networks/understanding-multiple-networks.adoc#understanding-multiple-networks[Understanding multiple networks]. -==== - -include::modules/installation-osp-kuryr-port-pools.adoc[leveloffset=+2] -include::modules/installation-osp-kuryr-settings-installing.adoc[leveloffset=+2] -// include::modules/installation-osp-setting-worker-affinity.adoc[leveloffset=+1] -include::modules/ssh-agent-using.adoc[leveloffset=+1] -include::modules/installation-osp-accessing-api.adoc[leveloffset=+1] -include::modules/installation-osp-accessing-api-floating.adoc[leveloffset=+2] -include::modules/installation-osp-accessing-api-no-floating.adoc[leveloffset=+2] -include::modules/installation-launching-installer.adoc[leveloffset=+1] -include::modules/installation-osp-verifying-cluster-status.adoc[leveloffset=+1] -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console. - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* If you need to enable external access to node ports, xref:../../networking/configuring_ingress_cluster_traffic/configuring-ingress-cluster-traffic-nodeport.adoc#nw-using-nodeport_configuring-ingress-cluster-traffic-nodeport[configure ingress cluster traffic by using a node port]. -* If you did not configure {rh-openstack} to accept application traffic over floating IP addresses, xref:../../post_installation_configuration/network-configuration.adoc#installation-osp-configuring-api-floating-ip_post-install-network-configuration[configure {rh-openstack} access with floating IP addresses]. diff --git a/installing/installing_openstack/installing-openstack-installer-ovs-dpdk.adoc b/installing/installing_openstack/installing-openstack-installer-ovs-dpdk.adoc deleted file mode 100644 index 702d8369c8b4..000000000000 --- a/installing/installing_openstack/installing-openstack-installer-ovs-dpdk.adoc +++ /dev/null @@ -1,84 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-openstack-installer-ovs-dpdk"] -= Installing a cluster on OpenStack that supports OVS-DPDK-connected compute machines -include::_attributes/common-attributes.adoc[] -:context: installing-openstack-installer-ovs-dpdk - -toc::[] - -If your {rh-openstack-first} deployment has Open vSwitch with the Data Plane Development Kit (OVS-DPDK) enabled, you can install an {product-title} cluster on it. Clusters that run on such {rh-openstack} deployments use OVS-DPDK features by providing access to link:https://doc.dpdk.org/guides/prog_guide/poll_mode_drv.html[poll mode drivers]. - -== Prerequisites - -* Review details about the -xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] -processes. -** Verify that {product-title} {product-version} is compatible with your {rh-openstack} version by using the "Supported platforms for OpenShift clusters" section. You can also compare platform support across different versions by viewing the link:https://access.redhat.com/articles/4679401[{product-title} on {rh-openstack} support matrix]. - -* Have a storage service installed in {rh-openstack}, like block storage (Cinder) or object storage (Swift). Object storage is the recommended storage technology for {product-registry} cluster deployment. For more information, see xref:../../scalability_and_performance/optimization/optimizing-storage.adoc#optimizing-storage[Optimizing storage]. - -* Have the metadata service enabled in {rh-openstack}. - -* Plan your {rh-openstack} OVS-DPDK deployment by referring to link:https://access.redhat.com/documentation/en-us/red_hat_openstack_platform/16.2/html/network_functions_virtualization_planning_and_configuration_guide/assembly_ovsdpdk_parameters[Planning your OVS-DPDK deployment] in the Network Functions Virtualization Planning and Configuration Guide. - -* Configure your {rh-openstack} OVS-DPDK deployment according to link:https://access.redhat.com/documentation/en-us/red_hat_openstack_platform/16.2/html/network_functions_virtualization_planning_and_configuration_guide/part-dpdk-configure[Configuring an OVS-DPDK deployment] in the Network Functions Virtualization Planning and Configuration Guide. - -** You must complete link:https://access.redhat.com/documentation/en-us/red_hat_openstack_platform/16.2/html/network_functions_virtualization_planning_and_configuration_guide/part-dpdk-configure#p-ovs-dpdk-flavor-deploy-instance[Creating a flavor and deploying an instance for OVS-DPDK] before you install a cluster on {rh-openstack}. - -include::modules/installation-osp-default-deployment.adoc[leveloffset=+1] -include::modules/installation-osp-control-compute-machines.adoc[leveloffset=+2] -include::modules/installation-osp-bootstrap-machine.adoc[leveloffset=+2] -include::modules/cluster-entitlements.adoc[leveloffset=+1] -include::modules/installation-osp-enabling-swift.adoc[leveloffset=+1] -include::modules/installation-osp-verifying-external-network.adoc[leveloffset=+1] -include::modules/installation-osp-describing-cloud-parameters.adoc[leveloffset=+1] -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] -include::modules/installation-initializing.adoc[leveloffset=+1] -include::modules/installation-configure-proxy.adoc[leveloffset=+2] -include::modules/installation-configuration-parameters.adoc[leveloffset=+1] -include::modules/installation-osp-custom-subnet.adoc[leveloffset=+2] -include::modules/installation-osp-deploying-bare-metal-machines.adoc[leveloffset=+2] -include::modules/installation-osp-config-yaml.adoc[leveloffset=+2] -include::modules/ssh-agent-using.adoc[leveloffset=+1] -include::modules/installation-osp-accessing-api.adoc[leveloffset=+1] -include::modules/installation-osp-accessing-api-floating.adoc[leveloffset=+2] -include::modules/installation-osp-accessing-api-no-floating.adoc[leveloffset=+2] -include::modules/installation-osp-configuring-sr-iov.adoc[leveloffset=+1] -include::modules/installation-launching-installer.adoc[leveloffset=+1] -include::modules/installation-osp-verifying-cluster-status.adoc[leveloffset=+1] -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -The cluster is operational. Before you can add OVS-DPDK compute machines though, you must perform additional tasks. - -include::modules/networking-osp-enabling-metadata.adoc[leveloffset=+1] -include::modules/networking-osp-enabling-vfio-noiommu.adoc[leveloffset=+1] -include::modules/installation-osp-dpdk-binding-vfio-pci.adoc[leveloffset=+1] -include::modules/installation-osp-dpdk-exposing-host-interface.adoc[leveloffset=+1] - -.Additional resources - -* xref:../../networking/multiple_networks/configuring-additional-network.adoc#nw-multus-host-device-object_configuring-additional-network[Creating an additional network attachment with the Cluster Network Operator] - -The cluster is installed and prepared for configuration. You must now perform the OVS-DPDK configuration tasks in <>. - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -[role="_additional-resources"] -[id="additional-resources_installing-openstack-installer-ovs-dpdk"] -== Additional resources -* xref:../../scalability_and_performance/cnf-low-latency-tuning.adoc#cnf-understanding-low-latency_cnf-master[Low latency tuning of OpenShift Container Platform nodes] - -[id="next-steps_installing-openstack-installer-ovs-dpdk"] -== Next steps - -* To complete OVS-DPDK configuration for your cluster, xref:../../scalability_and_performance/what-huge-pages-do-and-how-they-are-consumed-by-apps.adoc#what-huge-pages-do_huge-pages[Configure huge pages support]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* If you need to enable external access to node ports, xref:../../networking/configuring_ingress_cluster_traffic/configuring-ingress-cluster-traffic-nodeport.adoc#nw-using-nodeport_configuring-ingress-cluster-traffic-nodeport[configure ingress cluster traffic by using a node port]. -* If you did not configure {rh-openstack} to accept application traffic over floating IP addresses, xref:../../post_installation_configuration/network-configuration.adoc#installation-osp-configuring-api-floating-ip_post-install-network-configuration[configure {rh-openstack} access with floating IP addresses]. diff --git a/installing/installing_openstack/installing-openstack-installer-restricted.adoc b/installing/installing_openstack/installing-openstack-installer-restricted.adoc deleted file mode 100644 index 2fb483d74035..000000000000 --- a/installing/installing_openstack/installing-openstack-installer-restricted.adoc +++ /dev/null @@ -1,71 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-openstack-installer-restricted"] -= Installing a cluster on OpenStack in a restricted network -include::_attributes/common-attributes.adoc[] -:context: installing-openstack-installer-restricted - -toc::[] - -In {product-title} {product-version}, you can install a cluster on -{rh-openstack-first} in a restricted network by creating an internal mirror of the installation release content. - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You verified that {product-title} {product-version} is compatible with your {rh-openstack} version by using the xref:../../architecture/architecture-installation.adoc#supported-platforms-for-openshift-clusters_architecture-installation[Supported platforms for OpenShift clusters] section. You can also compare platform support across different versions by viewing the link:https://access.redhat.com/articles/4679401[{product-title} on {rh-openstack} support matrix]. -* You xref:../../installing/disconnected_install/installing-mirroring-installation-images.adoc#installing-mirroring-installation-images[created a registry on your mirror host] and obtained the `imageContentSources` data for your version of {product-title}. -+ -[IMPORTANT] -==== -Because the installation media is on the mirror host, you can use that computer to complete all installation steps. -==== -* You understand performance and scalability practices for cluster scaling, control plane sizing, and etcd. For more information, see xref:../../scalability_and_performance/recommended-performance-scale-practices/recommended-control-plane-practices.adoc#recommended-host-practices[Recommended practices for scaling the cluster]. -* You have the metadata service enabled in {rh-openstack}. - -include::modules/installation-about-restricted-network.adoc[leveloffset=+1] -include::modules/installation-osp-default-deployment.adoc[leveloffset=+1] -include::modules/installation-osp-control-compute-machines.adoc[leveloffset=+2] -include::modules/installation-osp-bootstrap-machine.adoc[leveloffset=+2] -include::modules/cluster-entitlements.adoc[leveloffset=+1] -include::modules/installation-osp-enabling-swift.adoc[leveloffset=+1] -include::modules/installation-osp-describing-cloud-parameters.adoc[leveloffset=+1] -include::modules/installation-osp-failure-domains-config.adoc[leveloffset=+2] -include::modules/installation-osp-setting-cloud-provider-options.adoc[leveloffset=+1] -include::modules/installation-creating-image-restricted.adoc[leveloffset=+1] -include::modules/installation-initializing.adoc[leveloffset=+1] -include::modules/installation-configure-proxy.adoc[leveloffset=+2] -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] -include::modules/installation-osp-restricted-config-yaml.adoc[leveloffset=+2] -// include::modules/installation-osp-setting-worker-affinity.adoc[leveloffset=+1] -include::modules/ssh-agent-using.adoc[leveloffset=+1] -include::modules/installation-osp-accessing-api.adoc[leveloffset=+1] -include::modules/installation-osp-accessing-api-floating.adoc[leveloffset=+2] -include::modules/installation-osp-accessing-api-no-floating.adoc[leveloffset=+2] -include::modules/installation-launching-installer.adoc[leveloffset=+1] -include::modules/installation-osp-verifying-cluster-status.adoc[leveloffset=+1] -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console. - -include::modules/olm-restricted-networks-configuring-operatorhub.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If the mirror registry that you used to install your cluster has a trusted CA, add it to the cluster by xref:../../openshift_images/image-configuration.adoc#images-configuration-cas_image-configuration[configuring additional trust stores]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#post-install-must-gather-disconnected[Configure image streams] for the Cluster Samples Operator and the `must-gather` tool. -* Learn how to xref:../../operators/admin/olm-restricted-networks.adoc#olm-restricted-networks[use Operator Lifecycle Manager (OLM) on restricted networks]. -* If you did not configure {rh-openstack} to accept application traffic over floating IP addresses, xref:../../post_installation_configuration/network-configuration.adoc#installation-osp-configuring-api-floating-ip_post-install-network-configuration[configure {rh-openstack} access with floating IP addresses]. diff --git a/installing/installing_openstack/installing-openstack-installer.adoc b/installing/installing_openstack/installing-openstack-installer.adoc deleted file mode 100644 index 3dbb132d0a41..000000000000 --- a/installing/installing_openstack/installing-openstack-installer.adoc +++ /dev/null @@ -1,52 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-openstack-installer"] -= Installing a cluster on OpenStack -include::_attributes/common-attributes.adoc[] -:context: installing-openstack-installer - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on -{rh-openstack-first}. - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* On {rh-openstack}, you have access to an external network that does not overlap these CIDR ranges: -** `10.0.0.0/16` -** `172.30.0.0/16` -** `10.128.0.0/14` -+ -If the external network overlaps these ranges, go to xref:./installing-openstack-installer-custom.adoc#installing-openstack-installer-custom[Installing a cluster on OpenStack with customizations] - -include::modules/installation-osp-default-deployment.adoc[leveloffset=+1] -include::modules/cluster-entitlements.adoc[leveloffset=+1] -include::modules/installation-osp-enabling-swift.adoc[leveloffset=+1] -include::modules/installation-osp-verifying-external-network.adoc[leveloffset=+1] -include::modules/installation-osp-describing-cloud-parameters.adoc[leveloffset=+1] -include::modules/ssh-agent-using.adoc[leveloffset=+1] -include::modules/installation-osp-accessing-api.adoc[leveloffset=+1] -include::modules/installation-osp-accessing-api-floating.adoc[leveloffset=+2] -include::modules/installation-osp-accessing-api-no-floating.adoc[leveloffset=+2] -include::modules/installation-launching-installer.adoc[leveloffset=+1] -include::modules/installation-osp-verifying-cluster-status.adoc[leveloffset=+1] -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console. - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. diff --git a/installing/installing_openstack/installing-openstack-load-balancing.adoc b/installing/installing_openstack/installing-openstack-load-balancing.adoc deleted file mode 100644 index 6b1b879a9508..000000000000 --- a/installing/installing_openstack/installing-openstack-load-balancing.adoc +++ /dev/null @@ -1,9 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-openstack-load-balancing"] -= Load balancing deployments on OpenStack -include::_attributes/common-attributes.adoc[] -:context: installing-openstack-load-balancing - -toc::[] - -include::modules/installation-osp-balancing-external-loads.adoc[leveloffset=+1] diff --git a/installing/installing_openstack/installing-openstack-nfv-preparing.adoc b/installing/installing_openstack/installing-openstack-nfv-preparing.adoc deleted file mode 100644 index 25179b2de16a..000000000000 --- a/installing/installing_openstack/installing-openstack-nfv-preparing.adoc +++ /dev/null @@ -1,43 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-openstack-nfv-preparing"] -= Preparing to install a cluster that uses SR-IOV or OVS-DPDK on OpenStack -include::_attributes/common-attributes.adoc[] -:context: installing-openstack-nfv-preparing - -toc::[] - -Before you install a {product-title} cluster that uses single-root I/O virtualization (SR-IOV) or Open vSwitch with the Data Plane Development Kit (OVS-DPDK) on {rh-openstack-first}, you must understand the requirements for each technology and then perform preparatory tasks. - -include::modules/installation-openstack-nfv-requirements.adoc[leveloffset=+1] -include::modules/installation-openstack-sr-iov-requirements.adoc[leveloffset=+2] -include::modules/installation-openstack-ovs-dpdk-requirements.adoc[leveloffset=+2] - -[id="installing-openstack-nfv-preparing-tasks-sr-iov"] -== Preparing to install a cluster that uses SR-IOV - -You must configure {rh-openstack} before you install a cluster that uses SR-IOV on it. - -include::modules/installation-osp-configuring-sr-iov.adoc[leveloffset=+2] - -[id="installing-openstack-nfv-preparing-tasks-ovs-dpdk"] -== Preparing to install a cluster that uses OVS-DPDK - -You must configure {rh-openstack} before you install a cluster that uses SR-IOV on it. - -* Complete link:https://access.redhat.com/documentation/en-us/red_hat_openstack_platform/16.2/html/network_functions_virtualization_planning_and_configuration_guide/part-dpdk-configure#p-ovs-dpdk-flavor-deploy-instance[Creating a flavor and deploying an instance for OVS-DPDK] before you install a cluster on {rh-openstack}. - -After you perform pre-installation tasks, install your cluster by following the most relevant {product-title} on {rh-openstack} installation instructions. Then, perform the tasks under "Next steps" on this page. - -[id="next-steps_installing-openstack-nfv-preparing"] -== Next steps - -* For either type of deployment: -** xref:../../scalability_and_performance/what-huge-pages-do-and-how-they-are-consumed-by-apps.html#what-huge-pages-do_huge-pages[Configure the Node Tuning Operator with huge pages support]. -* To complete SR-IOV configuration after you deploy your cluster: -** xref:../../networking/hardware_networks/installing-sriov-operator.html#installing-sr-iov-operator_installing-sriov-operator[Install the SR-IOV Operator]. -** xref:../../networking/hardware_networks/configuring-sriov-device.html#nw-sriov-networknodepolicy-object_configuring-sriov-device[Configure your SR-IOV network device]. -** xref:../../machine_management/creating_machinesets/creating-machineset-osp.adoc#machineset-yaml-osp-sr-iov_creating-machineset-osp[Create SR-IOV compute machines]. -* Consult the following references after you deploy your cluster to improve its performance: -** xref:../../networking/hardware_networks/using-dpdk-and-rdma.adoc#nw-openstack-ovs-dpdk-testpmd-pod_using-dpdk-and-rdma[A test pod template for clusters that use OVS-DPDK on OpenStack]. -** xref:../../networking/hardware_networks/add-pod.adoc#nw-openstack-sr-iov-testpmd-pod_add-pod[A test pod template for clusters that use SR-IOV on OpenStack]. -** xref:../../scalability_and_performance/cnf-create-performance-profiles.adoc#installation-openstack-ovs-dpdk-performance-profile_cnf-create-performance-profiles[A performance profile template for clusters that use OVS-DPDK on OpenStack]. diff --git a/installing/installing_openstack/installing-openstack-troubleshooting.adoc b/installing/installing_openstack/installing-openstack-troubleshooting.adoc deleted file mode 100644 index ed463008ca06..000000000000 --- a/installing/installing_openstack/installing-openstack-troubleshooting.adoc +++ /dev/null @@ -1,53 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-openstack-troubleshooting"] -= Troubleshooting -include::_attributes/common-attributes.adoc[] -:context: installing-openstack-troubleshooting - -toc::[] - -//Very much a WIP. Chop up sections into mod docs as they're finalized. - -In the event of a failure in {product-title} on OpenStack installation, you can recover by understanding the likely failure modes and then starting to troubleshoot the problem. - -== View OpenStack instance logs - -== Prerequisites - -* OpenStack CLI tools are installed - -.Procedure - -. In a terminal window, run `openstack console log show ` - -The console logs appear. - -== SSH access to an instance - -== Prerequisites - -* OpenStack CLI tools are installed - -.Procedure - -. Get the IP address of the node on the private network: -+ -[source,terminal] ----- -$ openstack server list | grep master ----- -+ -.Example output -[source,terminal] ----- -| 0dcd756b-ad80-42f1-987a-1451b1ae95ba | cluster-wbzrr-master-1 | ACTIVE | cluster-wbzrr-openshift=172.24.0.21 | rhcos | m1.s2.xlarge | -| 3b455e43-729b-4e64-b3bd-1d4da9996f27 | cluster-wbzrr-master-2 | ACTIVE | cluster-wbzrr-openshift=172.24.0.18 | rhcos | m1.s2.xlarge | -| 775898c3-ecc2-41a4-b98b-a4cd5ae56fd0 | cluster-wbzrr-master-0 | ACTIVE | cluster-wbzrr-openshift=172.24.0.12 | rhcos | m1.s2.xlarge | ----- - -. Connect to the instance from the master that holds the API VIP (and API FIP) as a jumpbox: -+ -[source,terminal] ----- -$ ssh -J core@${FIP} core@ ----- diff --git a/installing/installing_openstack/installing-openstack-user-kuryr.adoc b/installing/installing_openstack/installing-openstack-user-kuryr.adoc deleted file mode 100644 index 00838b18c266..000000000000 --- a/installing/installing_openstack/installing-openstack-user-kuryr.adoc +++ /dev/null @@ -1,93 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-openstack-user-kuryr"] -= Installing a cluster on OpenStack with Kuryr on your own infrastructure -include::_attributes/common-attributes.adoc[] -:context: installing-openstack-user-kuryr - -toc::[] - -:FeatureName: Kuryr -include::modules/deprecated-feature.adoc[leveloffset=+1] - -In {product-title} version {product-version}, you can install a cluster on -{rh-openstack-first} that runs on user-provisioned infrastructure. - -Using your own infrastructure allows you to integrate your cluster with existing infrastructure and modifications. The process requires more labor on your part than installer-provisioned installations, because you must create all {rh-openstack} resources, like Nova servers, Neutron ports, and security groups. However, Red Hat provides Ansible playbooks to help you in the deployment process. - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You verified that {product-title} {product-version} is compatible with your {rh-openstack} version by using the xref:../../architecture/architecture-installation.adoc#supported-platforms-for-openshift-clusters_architecture-installation[Supported platforms for OpenShift clusters] section. You can also compare platform support across different versions by viewing the link:https://access.redhat.com/articles/4679401[{product-title} on {rh-openstack} support matrix]. -* You have an {rh-openstack} account where you want to install {product-title}. -* You understand performance and scalability practices for cluster scaling, control plane sizing, and etcd. For more information, see xref:../../scalability_and_performance/recommended-performance-scale-practices/recommended-control-plane-practices.adoc#recommended-host-practices[Recommended practices for scaling the cluster]. -* On the machine from which you run the installation program, you have: -** A single directory in which you can keep the files you create during the installation process -** Python 3 - -include::modules/installation-osp-about-kuryr.adoc[leveloffset=+1] -include::modules/installation-osp-default-kuryr-deployment.adoc[leveloffset=+1] -include::modules/installation-osp-kuryr-increase-quota.adoc[leveloffset=+2] -include::modules/installation-osp-kuryr-neutron-configuration.adoc[leveloffset=+2] -include::modules/installation-osp-kuryr-octavia-configuration.adoc[leveloffset=+2] -include::modules/installation-osp-kuryr-known-limitations.adoc[leveloffset=+2] -include::modules/installation-osp-control-compute-machines.adoc[leveloffset=+2] -include::modules/installation-osp-bootstrap-machine.adoc[leveloffset=+2] -include::modules/cluster-entitlements.adoc[leveloffset=+1] -include::modules/installation-osp-downloading-modules.adoc[leveloffset=+1] -include::modules/installation-osp-downloading-playbooks.adoc[leveloffset=+1] -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] -include::modules/ssh-agent-using.adoc[leveloffset=+1] -// include::modules/installation-osp-enabling-swift.adoc[leveloffset=+1] -include::modules/installation-osp-creating-image.adoc[leveloffset=+1] -include::modules/installation-osp-verifying-external-network.adoc[leveloffset=+1] -include::modules/installation-osp-accessing-api.adoc[leveloffset=+1] -include::modules/installation-osp-accessing-api-floating.adoc[leveloffset=+2] -include::modules/installation-osp-accessing-api-no-floating.adoc[leveloffset=+2] -include::modules/installation-osp-describing-cloud-parameters.adoc[leveloffset=+1] -include::modules/installation-initializing.adoc[leveloffset=+1] -include::modules/installation-configuration-parameters.adoc[leveloffset=+1] -include::modules/installation-osp-custom-subnet.adoc[leveloffset=+2] -include::modules/installation-osp-kuryr-config-yaml.adoc[leveloffset=+2] -include::modules/installation-osp-failure-domains-config.adoc[leveloffset=+2] -include::modules/installation-osp-provider-networks.adoc[leveloffset=+2] -include::modules/installation-osp-provider-network-preparation.adoc[leveloffset=+3] -include::modules/installation-osp-deploying-provider-networks-installer.adoc[leveloffset=+3] - -[TIP] -==== -You can add additional networks, including provider networks, to the `platform.openstack.additionalNetworkIDs` list. - -After you deploy your cluster, you can attach pods to additional networks. For more information, see xref:../../networking/multiple_networks/understanding-multiple-networks.adoc#understanding-multiple-networks[Understanding multiple networks]. -==== - -include::modules/installation-osp-kuryr-port-pools.adoc[leveloffset=+2] -include::modules/installation-osp-kuryr-settings-installing.adoc[leveloffset=+2] -include::modules/installation-osp-fixing-subnet.adoc[leveloffset=+2] -include::modules/installation-osp-emptying-worker-pools.adoc[leveloffset=+2] -include::modules/installation-osp-modifying-networktype.adoc[leveloffset=+2] -include::modules/installation-user-infra-generate-k8s-manifest-ignition.adoc[leveloffset=+1] -include::modules/installation-osp-converting-ignition-resources.adoc[leveloffset=+1] -include::modules/installation-osp-creating-control-plane-ignition.adoc[leveloffset=+1] -include::modules/installation-osp-creating-network-resources.adoc[leveloffset=+1] -include::modules/installation-osp-creating-bootstrap-machine.adoc[leveloffset=+1] -include::modules/installation-osp-creating-control-plane.adoc[leveloffset=+1] -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] -include::modules/installation-osp-deleting-bootstrap-resources.adoc[leveloffset=+1] -include::modules/installation-osp-creating-compute-machines.adoc[leveloffset=+1] -include::modules/installation-approve-csrs.adoc[leveloffset=+1] -include::modules/installation-osp-verifying-installation.adoc[leveloffset=+1] -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* If you need to enable external access to node ports, xref:../../networking/configuring_ingress_cluster_traffic/configuring-ingress-cluster-traffic-nodeport.adoc#nw-using-nodeport_configuring-ingress-cluster-traffic-nodeport[configure ingress cluster traffic by using a node port]. -* If you did not configure {rh-openstack} to accept application traffic over floating IP addresses, xref:../../post_installation_configuration/network-configuration.adoc#installation-osp-configuring-api-floating-ip_post-install-network-configuration[configure {rh-openstack} access with floating IP addresses]. diff --git a/installing/installing_openstack/installing-openstack-user-sr-iov-kuryr.adoc b/installing/installing_openstack/installing-openstack-user-sr-iov-kuryr.adoc deleted file mode 100644 index 5659175b96cf..000000000000 --- a/installing/installing_openstack/installing-openstack-user-sr-iov-kuryr.adoc +++ /dev/null @@ -1,85 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-openstack-user-sr-iov-kuryr"] -= Installing a cluster on OpenStack with Kuryr on your own SR-IOV infrastructure -include::_attributes/common-attributes.adoc[] -:context: installing-openstack-user-sr-iov-kuryr - -toc::[] - -In {product-title} {product-version}, you can install a cluster on -{rh-openstack-first} that runs on user-provisioned infrastructure and uses SR-IOV networks to run compute machines. - -Using your own infrastructure allows you to integrate your cluster with existing infrastructure and modifications. The process requires more labor on your part than installer-provisioned installations, because you must create all {rh-openstack} resources, such as Nova servers, Neutron ports, and security groups. However, Red Hat provides Ansible playbooks to help you in the deployment process. - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You verified that {product-title} {product-version} is compatible with your {rh-openstack} version by using the xref:../../architecture/architecture-installation.adoc#supported-platforms-for-openshift-clusters_architecture-installation[Supported platforms for OpenShift clusters] section. You can also compare platform support across different versions by viewing the link:https://access.redhat.com/articles/4679401[{product-title} on {rh-openstack} support matrix]. -* Your network configuration does not rely on a provider network. Provider networks are not supported. -* You have a {rh-openstack} account where you want to install {product-title}. -* You understand performance and scalability practices for cluster scaling, control plane sizing, and etcd. For more information, see xref:../../scalability_and_performance/recommended-performance-scale-practices/recommended-control-plane-practices.adoc#recommended-host-practices[Recommended practices for scaling the cluster]. -* On the machine where you run the installation program, you have: -** A single directory in which you can keep the files you create during the installation process -** Python 3 - -include::modules/installation-osp-about-kuryr.adoc[leveloffset=+1] -include::modules/installation-osp-default-kuryr-deployment.adoc[leveloffset=+1] -include::modules/installation-osp-kuryr-increase-quota.adoc[leveloffset=+2] -include::modules/installation-osp-kuryr-neutron-configuration.adoc[leveloffset=+2] -include::modules/installation-osp-kuryr-octavia-configuration.adoc[leveloffset=+2] -include::modules/installation-osp-kuryr-known-limitations.adoc[leveloffset=+2] -include::modules/installation-osp-control-compute-machines.adoc[leveloffset=+2] -include::modules/installation-osp-bootstrap-machine.adoc[leveloffset=+2] -include::modules/cluster-entitlements.adoc[leveloffset=+1] -include::modules/installation-osp-downloading-modules.adoc[leveloffset=+1] -include::modules/installation-osp-downloading-playbooks.adoc[leveloffset=+1] -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] -include::modules/ssh-agent-using.adoc[leveloffset=+1] -// include::modules/installation-osp-enabling-swift.adoc[leveloffset=+1] -include::modules/installation-osp-creating-image.adoc[leveloffset=+1] -include::modules/installation-osp-verifying-external-network.adoc[leveloffset=+1] -include::modules/installation-osp-accessing-api.adoc[leveloffset=+1] -include::modules/installation-osp-accessing-api-floating.adoc[leveloffset=+2] -include::modules/installation-osp-accessing-api-no-floating.adoc[leveloffset=+2] -include::modules/installation-osp-describing-cloud-parameters.adoc[leveloffset=+1] -include::modules/installation-initializing.adoc[leveloffset=+1] -include::modules/installation-configuration-parameters.adoc[leveloffset=+1] -include::modules/installation-osp-custom-subnet.adoc[leveloffset=+2] -include::modules/installation-osp-kuryr-config-yaml.adoc[leveloffset=+2] -include::modules/installation-osp-fixing-subnet.adoc[leveloffset=+2] -include::modules/installation-osp-emptying-worker-pools.adoc[leveloffset=+2] -include::modules/installation-osp-modifying-networktype.adoc[leveloffset=+2] -include::modules/installation-user-infra-generate-k8s-manifest-ignition.adoc[leveloffset=+1] -include::modules/installation-osp-converting-ignition-resources.adoc[leveloffset=+1] -include::modules/installation-osp-creating-control-plane-ignition.adoc[leveloffset=+1] -include::modules/installation-osp-creating-network-resources.adoc[leveloffset=+1] -include::modules/installation-osp-creating-bootstrap-machine.adoc[leveloffset=+1] -include::modules/installation-osp-creating-control-plane.adoc[leveloffset=+1] -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] -include::modules/installation-osp-deleting-bootstrap-resources.adoc[leveloffset=+1] -include::modules/installation-osp-configuring-sr-iov.adoc[leveloffset=+1] -include::modules/installation-osp-creating-sr-iov-compute-machines.adoc[leveloffset=+1] - -To finish configuring SR-IOV for your cluster, complete the SR-IOV-related "Next steps" that follow the installation process. - -include::modules/installation-approve-csrs.adoc[leveloffset=+1] -include::modules/installation-osp-verifying-installation.adoc[leveloffset=+1] -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* To complete SR-IOV configuration for your cluster: -** xref:../../post_installation_configuration/network-configuration.html#networking-osp-preparing-for-sr-iov_post-install-network-configuration[Prepare the cluster for SR-IOV]. -** xref:../../scalability_and_performance/what-huge-pages-do-and-how-they-are-consumed-by-apps.html#what-huge-pages-do_huge-pages[Install the performance operator with huge pages support]. -** xref:../../networking/hardware_networks/installing-sriov-operator.html#installing-sr-iov-operator_installing-sriov-operator[Install the SR-IOV Operator]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* If you need to enable external access to node ports, xref:../../networking/configuring_ingress_cluster_traffic/configuring-ingress-cluster-traffic-nodeport.adoc#nw-using-nodeport_configuring-ingress-cluster-traffic-nodeport[configure ingress cluster traffic by using a node port]. -* If you did not configure {rh-openstack} to accept application traffic over floating IP addresses, xref:../../post_installation_configuration/network-configuration.adoc#installation-osp-configuring-api-floating-ip_post-install-network-configuration[configure {rh-openstack} access with floating IP addresses]. diff --git a/installing/installing_openstack/installing-openstack-user-sr-iov.adoc b/installing/installing_openstack/installing-openstack-user-sr-iov.adoc deleted file mode 100644 index af03deba0932..000000000000 --- a/installing/installing_openstack/installing-openstack-user-sr-iov.adoc +++ /dev/null @@ -1,101 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-openstack-user-sr-iov"] -= Installing a cluster on OpenStack on your own SR-IOV infrastructure -include::_attributes/common-attributes.adoc[] -:context: installing-openstack-user-sr-iov - -toc::[] - -In {product-title} {product-version}, you can install a cluster on -{rh-openstack-first} that runs on user-provisioned infrastructure and uses single-root input/output virtualization (SR-IOV) networks to run compute machines. - -Using your own infrastructure allows you to integrate your cluster with existing infrastructure and modifications. The process requires more labor on your part than installer-provisioned installations, because you must create all {rh-openstack} resources, such as Nova servers, Neutron ports, and security groups. However, Red Hat provides Ansible playbooks to help you in the deployment process. - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You verified that {product-title} {product-version} is compatible with your {rh-openstack} version by using the xref:../../architecture/architecture-installation.adoc#supported-platforms-for-openshift-clusters_architecture-installation[Supported platforms for OpenShift clusters] section. You can also compare platform support across different versions by viewing the link:https://access.redhat.com/articles/4679401[{product-title} on {rh-openstack} support matrix]. -* Your network configuration does not rely on a provider network. Provider networks are not supported. -* You have an {rh-openstack} account where you want to install {product-title}. -* You understand performance and scalability practices for cluster scaling, control plane sizing, and etcd. For more information, see xref:../../scalability_and_performance/recommended-performance-scale-practices/recommended-control-plane-practices.adoc#recommended-host-practices[Recommended practices for scaling the cluster]. -* On the machine where you run the installation program, you have: -** A single directory in which you can keep the files you create during the installation process -** Python 3 - -include::modules/cluster-entitlements.adoc[leveloffset=+1] -include::modules/installation-osp-default-deployment.adoc[leveloffset=+1] -include::modules/installation-osp-control-compute-machines.adoc[leveloffset=+2] -include::modules/installation-osp-bootstrap-machine.adoc[leveloffset=+2] -include::modules/installation-osp-downloading-modules.adoc[leveloffset=+1] -include::modules/installation-osp-downloading-playbooks.adoc[leveloffset=+1] -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] -include::modules/ssh-agent-using.adoc[leveloffset=+1] -include::modules/installation-osp-creating-image.adoc[leveloffset=+1] -include::modules/installation-osp-verifying-external-network.adoc[leveloffset=+1] -include::modules/installation-osp-accessing-api.adoc[leveloffset=+1] -include::modules/installation-osp-accessing-api-floating.adoc[leveloffset=+2] -include::modules/installation-osp-accessing-api-no-floating.adoc[leveloffset=+2] -include::modules/installation-osp-describing-cloud-parameters.adoc[leveloffset=+1] -include::modules/installation-initializing.adoc[leveloffset=+1] -include::modules/installation-configuration-parameters.adoc[leveloffset=+1] -include::modules/installation-osp-config-yaml.adoc[leveloffset=+2] -include::modules/installation-osp-custom-subnet.adoc[leveloffset=+2] -include::modules/installation-osp-fixing-subnet.adoc[leveloffset=+2] -include::modules/installation-osp-emptying-worker-pools.adoc[leveloffset=+2] -include::modules/installation-user-infra-generate-k8s-manifest-ignition.adoc[leveloffset=+1] -include::modules/installation-osp-converting-ignition-resources.adoc[leveloffset=+1] -include::modules/installation-osp-creating-control-plane-ignition.adoc[leveloffset=+1] -include::modules/installation-osp-creating-network-resources.adoc[leveloffset=+1] -Optionally, you can use the `inventory.yaml` file that you created to customize your installation. For example, you can deploy a cluster that uses bare metal machines. - -include::modules/installation-osp-deploying-bare-metal-machines.adoc[leveloffset=+2] -include::modules/installation-osp-creating-bootstrap-machine.adoc[leveloffset=+1] -include::modules/installation-osp-creating-control-plane.adoc[leveloffset=+1] -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] -include::modules/installation-osp-deleting-bootstrap-resources.adoc[leveloffset=+1] -include::modules/installation-osp-configuring-sr-iov.adoc[leveloffset=+1] -include::modules/installation-osp-creating-sr-iov-compute-machines.adoc[leveloffset=+1] -include::modules/installation-approve-csrs.adoc[leveloffset=+1] -include::modules/installation-osp-verifying-installation.adoc[leveloffset=+1] -The cluster is operational. Before you can configure it for SR-IOV networks though, you must perform additional tasks. - -include::modules/networking-osp-preparing-for-sr-iov.adoc[leveloffset=+1] -include::modules/networking-osp-enabling-metadata.adoc[leveloffset=+2] -include::modules/networking-osp-enabling-vfio-noiommu.adoc[leveloffset=+2] - -[NOTE] -==== -After you apply the machine config to the machine pool, you can xref:../../post_installation_configuration/machine-configuration-tasks.html#checking-mco-status_post-install-machine-configuration-tasks[watch the machine config pool status] to see when the machines are available. -==== - -// TODO: If bullet one of Next steps is truly required for this flow, these topics (in full or in part) could be added here rather than linked to. -// This document is quite long, however, and operator installation and configuration should arguably remain in their their own assemblies. - -The cluster is installed and prepared for SR-IOV configuration. You must now perform the SR-IOV configuration tasks in "Next steps". - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_cluster-telemetry"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -[role="_additional-resources"] -[id="additional-resources_installing-openstack-user-sr-iov"] -== Additional resources -* xref:../../scalability_and_performance/cnf-low-latency-tuning.html#cnf-understanding-low-latency_cnf-master[Low latency tuning of OpenShift Container Platform nodes] - -[id="next-steps_installing-user-sr-iov"] -== Next steps - -* To complete SR-IOV configuration for your cluster: -** xref:../../scalability_and_performance/what-huge-pages-do-and-how-they-are-consumed-by-apps.html#what-huge-pages-do_huge-pages[Configure huge pages support]. -** xref:../../networking/hardware_networks/installing-sriov-operator.html#installing-sr-iov-operator_installing-sriov-operator[Install the SR-IOV Operator]. -** xref:../../networking/hardware_networks/configuring-sriov-device.html#nw-sriov-networknodepolicy-object_configuring-sriov-device[Configure your SR-IOV network device]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* If you need to enable external access to node ports, xref:../../networking/configuring_ingress_cluster_traffic/configuring-ingress-cluster-traffic-nodeport.adoc#nw-using-nodeport_configuring-ingress-cluster-traffic-nodeport[configure ingress cluster traffic by using a node port]. -* If you did not configure {rh-openstack} to accept application traffic over floating IP addresses, xref:../../post_installation_configuration/network-configuration.adoc#installation-osp-configuring-api-floating-ip_post-install-network-configuration[configure {rh-openstack} access with floating IP addresses]. diff --git a/installing/installing_openstack/installing-openstack-user.adoc b/installing/installing_openstack/installing-openstack-user.adoc deleted file mode 100644 index 141336fa1b3f..000000000000 --- a/installing/installing_openstack/installing-openstack-user.adoc +++ /dev/null @@ -1,87 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-openstack-user"] -= Installing a cluster on OpenStack on your own infrastructure -include::_attributes/common-attributes.adoc[] -:context: installing-openstack-user - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on -{rh-openstack-first} that runs on user-provisioned infrastructure. - -Using your own infrastructure allows you to integrate your cluster with existing infrastructure and modifications. The process requires more labor on your part than installer-provisioned installations, because you must create all {rh-openstack} resources, like Nova servers, Neutron ports, and security groups. However, Red Hat provides Ansible playbooks to help you in the deployment process. - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You verified that {product-title} {product-version} is compatible with your {rh-openstack} version by using the xref:../../architecture/architecture-installation.adoc#supported-platforms-for-openshift-clusters_architecture-installation[Supported platforms for OpenShift clusters] section. You can also compare platform support across different versions by viewing the link:https://access.redhat.com/articles/4679401[{product-title} on {rh-openstack} support matrix]. -* You have an {rh-openstack} account where you want to install {product-title}. -* You understand performance and scalability practices for cluster scaling, control plane sizing, and etcd. For more information, see xref:../../scalability_and_performance/recommended-performance-scale-practices/recommended-control-plane-practices.adoc#recommended-host-practices[Recommended practices for scaling the cluster]. -* On the machine from which you run the installation program, you have: -** A single directory in which you can keep the files you create during the installation process -** Python 3 - -include::modules/cluster-entitlements.adoc[leveloffset=+1] -include::modules/installation-osp-default-deployment.adoc[leveloffset=+1] -include::modules/installation-osp-control-compute-machines.adoc[leveloffset=+2] -include::modules/installation-osp-bootstrap-machine.adoc[leveloffset=+2] -include::modules/installation-osp-downloading-modules.adoc[leveloffset=+1] -include::modules/installation-osp-downloading-playbooks.adoc[leveloffset=+1] -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] -include::modules/ssh-agent-using.adoc[leveloffset=+1] -// include::modules/installation-osp-enabling-swift.adoc[leveloffset=+1] -include::modules/installation-osp-creating-image.adoc[leveloffset=+1] -include::modules/installation-osp-verifying-external-network.adoc[leveloffset=+1] -include::modules/installation-osp-accessing-api.adoc[leveloffset=+1] -include::modules/installation-osp-accessing-api-floating.adoc[leveloffset=+2] -include::modules/installation-osp-accessing-api-no-floating.adoc[leveloffset=+2] -include::modules/installation-osp-describing-cloud-parameters.adoc[leveloffset=+1] -include::modules/installation-initializing.adoc[leveloffset=+1] -include::modules/installation-configuration-parameters.adoc[leveloffset=+1] -include::modules/installation-osp-custom-subnet.adoc[leveloffset=+2] -include::modules/installation-osp-config-yaml.adoc[leveloffset=+2] -include::modules/installation-osp-failure-domains-config.adoc[leveloffset=+2] -include::modules/installation-osp-fixing-subnet.adoc[leveloffset=+2] -include::modules/installation-osp-emptying-worker-pools.adoc[leveloffset=+2] -include::modules/installation-osp-provider-networks.adoc[leveloffset=+2] -include::modules/installation-osp-provider-network-preparation.adoc[leveloffset=+3] -include::modules/installation-osp-deploying-provider-networks-installer.adoc[leveloffset=+3] - -[TIP] -==== -You can add additional networks, including provider networks, to the `platform.openstack.additionalNetworkIDs` list. - -After you deploy your cluster, you can attach pods to additional networks. For more information, see xref:../../networking/multiple_networks/understanding-multiple-networks.adoc#understanding-multiple-networks[Understanding multiple networks]. -==== - -include::modules/installation-user-infra-generate-k8s-manifest-ignition.adoc[leveloffset=+1] -include::modules/installation-osp-converting-ignition-resources.adoc[leveloffset=+1] -include::modules/installation-osp-creating-control-plane-ignition.adoc[leveloffset=+1] -include::modules/installation-osp-creating-network-resources.adoc[leveloffset=+1] - -Optionally, you can use the `inventory.yaml` file that you created to customize your installation. For example, you can deploy a cluster that uses bare metal machines. - -include::modules/installation-osp-deploying-bare-metal-machines.adoc[leveloffset=+2] -include::modules/installation-osp-creating-bootstrap-machine.adoc[leveloffset=+1] -include::modules/installation-osp-creating-control-plane.adoc[leveloffset=+1] -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] -include::modules/installation-osp-deleting-bootstrap-resources.adoc[leveloffset=+1] -include::modules/installation-osp-creating-compute-machines.adoc[leveloffset=+1] -include::modules/installation-approve-csrs.adoc[leveloffset=+1] -include::modules/installation-osp-verifying-installation.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* If you need to enable external access to node ports, xref:../../networking/configuring_ingress_cluster_traffic/configuring-ingress-cluster-traffic-nodeport.adoc#nw-using-nodeport_configuring-ingress-cluster-traffic-nodeport[configure ingress cluster traffic by using a node port]. -* If you did not configure {rh-openstack} to accept application traffic over floating IP addresses, xref:../../post_installation_configuration/network-configuration.adoc#installation-osp-configuring-api-floating-ip_post-install-network-configuration[configure {rh-openstack} access with floating IP addresses]. diff --git a/installing/installing_openstack/modules b/installing/installing_openstack/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/installing/installing_openstack/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/installing/installing_openstack/preparing-to-install-on-openstack.adoc b/installing/installing_openstack/preparing-to-install-on-openstack.adoc deleted file mode 100644 index 8ecf10b5b272..000000000000 --- a/installing/installing_openstack/preparing-to-install-on-openstack.adoc +++ /dev/null @@ -1,48 +0,0 @@ -:_content-type: ASSEMBLY -[id="preparing-to-install-on-openstack"] -= Preparing to install on OpenStack -include::_attributes/common-attributes.adoc[] -:context: preparing-to-install-on-openstack - -toc::[] - -You can install {product-title} on {rh-openstack-first}. -ifdef::openshift-origin[{product-title} version {product-version} supports OpenStack Train.] - -[id="preparing-to-install-on-openstack-prerequisites"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. - -[id="choosing-an-method-to-install-ocp-on-openstack"] -== Choosing a method to install {product-title} on OpenStack - -You can install {product-title} on installer-provisioned or user-provisioned infrastructure. The default installation type uses installer-provisioned infrastructure, where the installation program provisions the underlying infrastructure for the cluster. You can also install {product-title} on infrastructure that you provision. If you do not use infrastructure that the installation program provisions, you must manage and maintain the cluster resources yourself. - -See xref:../../architecture/architecture-installation.adoc#installation-process_architecture-installation[Installation process] for more information about installer-provisioned and user-provisioned installation processes. - -[id="choosing-an-method-to-install-ocp-on-openstack-installer-provisioned"] -=== Installing a cluster on installer-provisioned infrastructure - -You can install a cluster on {rh-openstack-first} infrastructure that is provisioned by the {product-title} installation program, by using one of the following methods: - -* **xref:../../installing/installing_openstack/installing-openstack-installer-custom.adoc#installing-openstack-installer-custom[Installing a cluster on OpenStack with customizations]**: You can install a customized cluster on {rh-openstack}. The installation program allows for some customization to be applied at the installation stage. Many other customization options are available xref:../../post_installation_configuration/cluster-tasks.adoc#post-install-cluster-tasks[post-installation]. - -* **xref:../../installing/installing_openstack/installing-openstack-installer-kuryr.adoc#installing-openstack-installer-kuryr[Installing a cluster on OpenStack with Kuryr]**: You can install a customized {product-title} cluster on {rh-openstack} that uses Kuryr SDN. Kuryr and {product-title} integration is primarily designed for {product-title} clusters running on {rh-openstack} VMs. Kuryr improves the network performance by plugging {product-title} pods into {rh-openstack} SDN. In addition, it provides interconnectivity between pods and {rh-openstack} virtual instances. - -* **xref:../../installing/installing_openstack/installing-openstack-installer-restricted.adoc#installing-openstack-installer-restricted[Installing a cluster on OpenStack in a restricted network]**: You can install {product-title} on {rh-openstack} in a restricted or disconnected network by creating an internal mirror of the installation release content. You can use this method to install a cluster that does not require an active internet connection to obtain the software components. You can also use this installation method to ensure that your clusters only use container images that satisfy your organizational controls on external content. - -[id="choosing-an-method-to-install-ocp-on-openstack-user-provisioned"] -=== Installing a cluster on user-provisioned infrastructure - -You can install a cluster on {rh-openstack} infrastructure that you provision, by using one of the following methods: - -* **xref:../../installing/installing_openstack/installing-openstack-user.adoc#installing-openstack-user[Installing a cluster on OpenStack on your own infrastructure]**: You can install {product-title} on user-provisioned {rh-openstack} infrastructure. By using this installation method, you can integrate your cluster with existing infrastructure and modifications. For installations on user-provisioned infrastructure, you must create all {rh-openstack} resources, like Nova servers, Neutron ports, and security groups. You can use the provided Ansible playbooks to assist with the deployment process. - -* **xref:../../installing/installing_openstack/installing-openstack-user-kuryr.adoc#installing-openstack-user-kuryr[Installing a cluster on OpenStack with Kuryr on your own infrastructure]**: You can install {product-title} on user-provisioned {rh-openstack} infrastructure that uses Kuryr SDN. - -include::modules/security-osp-validating-certificates.adoc[leveloffset=+1] - -include::modules/security-osp-validating-certificates-manually.adoc[leveloffset=+2] - diff --git a/installing/installing_openstack/snippets b/installing/installing_openstack/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/installing/installing_openstack/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/installing/installing_openstack/uninstalling-cluster-openstack.adoc b/installing/installing_openstack/uninstalling-cluster-openstack.adoc deleted file mode 100644 index 05b70252f927..000000000000 --- a/installing/installing_openstack/uninstalling-cluster-openstack.adoc +++ /dev/null @@ -1,11 +0,0 @@ -:_content-type: ASSEMBLY -[id="uninstalling-cluster-openstack"] -= Uninstalling a cluster on OpenStack -include::_attributes/common-attributes.adoc[] -:context: uninstalling-cluster-openstack - -toc::[] - -You can remove a cluster that you deployed to {rh-openstack-first}. - -include::modules/installation-uninstall-clouds.adoc[leveloffset=+1] diff --git a/installing/installing_openstack/uninstalling-openstack-user.adoc b/installing/installing_openstack/uninstalling-openstack-user.adoc deleted file mode 100644 index e018cc6b1bf3..000000000000 --- a/installing/installing_openstack/uninstalling-openstack-user.adoc +++ /dev/null @@ -1,13 +0,0 @@ -:_content-type: ASSEMBLY -[id="uninstalling-openstack-user"] -= Uninstalling a cluster on {rh-openstack} from your own infrastructure -include::_attributes/common-attributes.adoc[] -:context: uninstalling-openstack-user - -toc::[] - -You can remove a cluster that you deployed to {rh-openstack-first} on user-provisioned infrastructure. - -// include::modules/installation-uninstall-clouds.adoc[leveloffset=+1] -include::modules/installation-osp-downloading-modules.adoc[leveloffset=+1] -include::modules/installation-uninstall-infra.adoc[leveloffset=+1] diff --git a/installing/installing_platform_agnostic/_attributes b/installing/installing_platform_agnostic/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/installing/installing_platform_agnostic/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/installing/installing_platform_agnostic/images b/installing/installing_platform_agnostic/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/installing/installing_platform_agnostic/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/installing/installing_platform_agnostic/installing-platform-agnostic.adoc b/installing/installing_platform_agnostic/installing-platform-agnostic.adoc deleted file mode 100644 index 1a2386a03a63..000000000000 --- a/installing/installing_platform_agnostic/installing-platform-agnostic.adoc +++ /dev/null @@ -1,119 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-platform-agnostic"] -= Installing a cluster on any platform -include::_attributes/common-attributes.adoc[] -:context: installing-platform-agnostic - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on -any infrastructure that you provision, including virtualization and cloud environments. - -[IMPORTANT] -==== -Review the information in the link:https://access.redhat.com/articles/4207611[guidelines for deploying {product-title} on non-tested platforms] before you attempt to install an {product-title} cluster in virtualized or cloud environments. -==== - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -+ -[NOTE] -==== -Be sure to also review this site list if you are configuring a proxy. -==== - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -[id="installation-requirements-user-infra_{context}"] -== Requirements for a cluster with user-provisioned infrastructure - -For a cluster that contains user-provisioned infrastructure, you must deploy all -of the required machines. - -This section describes the requirements for deploying {product-title} on user-provisioned infrastructure. - -include::modules/installation-machine-requirements.adoc[leveloffset=+2] -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] -include::modules/csr-management.adoc[leveloffset=+2] - -include::modules/installation-network-user-infra.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/install_config/installing-customizing.adoc#installation-special-config-chrony_installing-customizing[Configuring chrony time service] - -include::modules/installation-dns-user-infra.adoc[leveloffset=+2] - -include::modules/installation-load-balancing-user-infra.adoc[leveloffset=+2] - -include::modules/installation-infrastructure-user-infra.adoc[leveloffset=+1] - -include::modules/installation-user-provisioned-validating-dns.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/installation-initializing-manual.adoc[leveloffset=+1] - -include::modules/installation-bare-metal-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/installation-three-node-cluster.adoc[leveloffset=+2] - -include::modules/installation-user-infra-generate-k8s-manifest-ignition.adoc[leveloffset=+1] - -include::modules/creating-machines-bare-metal.adoc[leveloffset=+1] - -include::modules/installation-user-infra-machines-iso.adoc[leveloffset=+2] - -include::modules/installation-user-infra-machines-pxe.adoc[leveloffset=+2] - -include::modules/installation-user-infra-machines-advanced.adoc[leveloffset=+2] - -include::modules/installation-user-infra-machines-static-network.adoc[leveloffset=+3] - -include::modules/architecture-rhcos-updating-bootloader.adoc[leveloffset=+2] - -include::modules/installation-installing-bare-metal.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/installation-approve-csrs.adoc[leveloffset=+1] - -include::modules/installation-operators-config.adoc[leveloffset=+1] - -include::modules/olm-restricted-networks-configuring-operatorhub.adoc[leveloffset=+2] - -include::modules/registry-removed.adoc[leveloffset=+2] - -include::modules/installation-registry-storage-config.adoc[leveloffset=+2] - -include::modules/registry-configuring-storage-baremetal.adoc[leveloffset=+3] - -include::modules/installation-registry-storage-non-production.adoc[leveloffset=+3] - -include::modules/installation-registry-storage-block-recreate-rollout-bare-metal.adoc[leveloffset=+3] - -include::modules/installation-complete-user-infra.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* xref:../../registry/configuring_registry_storage/configuring-registry-storage-baremetal.adoc#configuring-registry-storage-baremetal[Set up your registry and configure registry storage]. diff --git a/installing/installing_platform_agnostic/modules b/installing/installing_platform_agnostic/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/installing/installing_platform_agnostic/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/installing/installing_platform_agnostic/snippets b/installing/installing_platform_agnostic/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/installing/installing_platform_agnostic/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/installing/installing_rhv/_attributes b/installing/installing_rhv/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/installing/installing_rhv/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/installing/installing_rhv/images b/installing/installing_rhv/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/installing/installing_rhv/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/installing/installing_rhv/installing-rhv-customizations.adoc b/installing/installing_rhv/installing-rhv-customizations.adoc deleted file mode 100644 index ef10d50a20b1..000000000000 --- a/installing/installing_rhv/installing-rhv-customizations.adoc +++ /dev/null @@ -1,106 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-rhv-customizations"] -= Installing a cluster on {rh-virtualization} with customizations -include::_attributes/common-attributes.adoc[] -:context: installing-rhv-customizations - -toc::[] - -You can customize and install an {product-title} cluster on {rh-virtualization-first}, similar to the one shown in the following diagram. - -ifndef::openshift-origin[] -image::92_OpenShift_Cluster_Install_RHV_0520.png[Diagram of an {product-title} cluster on a {rh-virtualization} cluster] -endif::openshift-origin[] -ifdef::openshift-origin[] -image::193_OpenShift_Cluster_Install_updates_1021_oVirt.png[Diagram of an {product-title} cluster on a {rh-virtualization} cluster] -endif::openshift-origin[] - -The installation program uses installer-provisioned infrastructure to automate creating and deploying the cluster. - -To install a customized cluster, you prepare the environment and perform the following steps: - -. Create an installation configuration file, the `install-config.yaml` file, by running the installation program and answering its prompts. -. Inspect and modify parameters in the `install-config.yaml` file. -. Make a working copy of the `install-config.yaml` file. -. Run the installation program with a copy of the `install-config.yaml` file. - -Then, the installation program creates the {product-title} cluster. - -For an alternative to installing a customized cluster, see xref:../../installing/installing_rhv/installing-rhv-default.adoc#installing-rhv-default[Installing a default cluster]. - -[NOTE] -==== -This installation program is available for Linux and macOS only. -==== - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You have a supported combination of versions in the link:https://access.redhat.com/articles/5485861[Support Matrix for {product-title} on {rh-virtualization-first}]. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/installing-rhv-requirements.adoc[leveloffset=+1] - -include::modules/installing-rhv-verifying-rhv-environment.adoc[leveloffset=+1] - -include::modules/installing-rhv-preparing-network-environment.adoc[leveloffset=+1] - -include::modules/installing-rhv-insecure-mode.adoc[leveloffset=+1] - -// include::modules/installing-rhv-setting-up-ca-certificate.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-initializing.adoc[leveloffset=+1] - -include::modules/installing-rhv-example-install-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -[IMPORTANT] -==== -You have completed the steps required to install the cluster. The remaining steps show you how to verify the cluster and troubleshoot the installation. -==== - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -To learn more, see xref:../../cli_reference/openshift_cli/getting-started-cli.adoc#getting-started-cli[Getting started with the OpenShift CLI]. - -include::modules/installation-osp-verifying-cluster-status.adoc[leveloffset=+1] - -.Troubleshooting -If the installation fails, the installation program times out and displays an error message. To learn more, see -xref:../../installing/installing-troubleshooting.adoc#installing-troubleshooting[Troubleshooting installation issues]. - -include::modules/installing-rhv-accessing-ocp-web-console.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -include::modules/installation-common-issues.adoc[leveloffset=+1] - -== Post-installation tasks - -After the {product-title} cluster initializes, you can perform the following tasks. - -* Optional: After deployment, add or replace SSH keys using the Machine Config Operator (MCO) in {product-title}. -* Optional: Remove the `kubeadmin` user. Instead, use the authentication provider to create a user with cluster-admin privileges. - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. diff --git a/installing/installing_rhv/installing-rhv-default.adoc b/installing/installing_rhv/installing-rhv-default.adoc deleted file mode 100644 index 1e76882bfc4a..000000000000 --- a/installing/installing_rhv/installing-rhv-default.adoc +++ /dev/null @@ -1,96 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-rhv-default"] -= Installing a cluster quickly on {rh-virtualization} -include::_attributes/common-attributes.adoc[] -:context: installing-rhv-default - -toc::[] - -You can quickly install a default, non-customized, {product-title} cluster on a {rh-virtualization-first} cluster, similar to the one shown in the following diagram. - -ifndef::openshift-origin[] -image::92_OpenShift_Cluster_Install_RHV_0520.png[Diagram of an {product-title} cluster on a {rh-virtualization} cluster] -endif::openshift-origin[] -ifdef::openshift-origin[] -image::193_OpenShift_Cluster_Install_updates_1021_oVirt.png[Diagram of an {product-title} cluster on a {rh-virtualization} cluster] -endif::openshift-origin[] - -The installation program uses installer-provisioned infrastructure to automate creating and deploying the cluster. - -To install a default cluster, you prepare the environment, run the installation program and answer its prompts. Then, the installation program creates the {product-title} cluster. - -For an alternative to installing a default cluster, see xref:../../installing/installing_rhv/installing-rhv-customizations.adoc#installing-rhv-customizations[Installing a cluster with customizations]. - -[NOTE] -==== -This installation program is available for Linux and macOS only. -==== - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You have a supported combination of versions in the link:https://access.redhat.com/articles/5485861[Support Matrix for {product-title} on {rh-virtualization-first}]. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/installing-rhv-requirements.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/installing_rhv/installing-rhv-customizations.adoc#installing-rhv-example-install-config-yaml_installing-rhv-customizations[Example: Removing all affinity groups for a non-production lab setup]. - -include::modules/installing-rhv-verifying-rhv-environment.adoc[leveloffset=+1] - -include::modules/installing-rhv-preparing-network-environment.adoc[leveloffset=+1] - -include::modules/installing-rhv-insecure-mode.adoc[leveloffset=+1] - -// include::modules/installing-rhv-setting-up-ca-certificate.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -[IMPORTANT] -==== -You have completed the steps required to install the cluster. The remaining steps show you how to verify the cluster and troubleshoot the installation. -==== - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -To learn more, see xref:../../cli_reference/openshift_cli/getting-started-cli.adoc#getting-started-cli[Getting started with the OpenShift CLI]. - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console. - -include::modules/installation-osp-verifying-cluster-status.adoc[leveloffset=+1] - -.Troubleshooting -If the installation fails, the installation program times out and displays an error message. To learn more, see -xref:../../installing/installing-troubleshooting.adoc#installing-troubleshooting[Troubleshooting installation issues]. - -include::modules/installing-rhv-accessing-ocp-web-console.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -include::modules/installation-common-issues.adoc[leveloffset=+1] - -== Post-installation tasks -After the {product-title} cluster initializes, you can perform the following tasks. - -* Optional: After deployment, add or replace SSH keys using the Machine Config Operator (MCO) in {product-title}. -* Optional: Remove the `kubeadmin` user. Instead, use the authentication provider to create a user with cluster-admin privileges. diff --git a/installing/installing_rhv/installing-rhv-restricted-network.adoc b/installing/installing_rhv/installing-rhv-restricted-network.adoc deleted file mode 100644 index 1ba8012fbb5f..000000000000 --- a/installing/installing_rhv/installing-rhv-restricted-network.adoc +++ /dev/null @@ -1,93 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-rhv-restricted-network"] -= Installing a cluster on {rh-virtualization} in a restricted network -include::_attributes/common-attributes.adoc[] -:context: installing-rhv-restricted-network - -toc::[] - -In {product-title} version {product-version}, you can install a -customized {product-title} cluster on {rh-virtualization-first} in a restricted network by creating an internal mirror of the installation release content. - -== Prerequisites - -The following items are required to install an {product-title} cluster on a {rh-virtualization} environment. - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You have a supported combination of versions in the link:https://access.redhat.com/articles/5485861[Support Matrix for {product-title} on {rh-virtualization}]. -* You xref:../../installing/disconnected_install/installing-mirroring-installation-images.adoc#installing-mirroring-installation-images[created a registry on your mirror host] and obtained the `imageContentSources` data for your version of {product-title}. -+ -[IMPORTANT] -==== -Because the installation media is on the mirror host, you can use that computer to complete all installation steps. -==== -+ -* You provisioned xref:../../storage/understanding-persistent-storage.adoc#understanding-persistent-storage[persistent storage] for your cluster. To deploy a private image registry, your storage must provide ReadWriteMany access modes. -* If you use a firewall and plan to use the Telemetry service, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured the firewall to allow the sites] that your cluster requires access to. -+ -[NOTE] -==== -Be sure to also review this site list if you are configuring a proxy. -==== - -include::modules/installation-about-restricted-network.adoc[leveloffset=+1] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/installing-rhv-requirements.adoc[leveloffset=+1] - -include::modules/installing-rhv-verifying-rhv-environment.adoc[leveloffset=+1] - -include::modules/installation-network-user-infra.adoc[leveloffset=+1] - -include::modules/installation-dns-user-infra.adoc[leveloffset=+1] - -include::modules/installation-load-balancing-user-infra.adoc[leveloffset=+2] - -include::modules/installing-rhv-setting-up-installation-machine.adoc[leveloffset=+1] - -include::modules/installing-rhv-setting-up-ca-certificate.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-rhv-downloading-ansible-playbooks.adoc[leveloffset=+1] - -include::modules/installation-rhv-about-inventory-yml.adoc[leveloffset=+1] - -include::modules/installation-rhv-specifying-rhcos-image-settings.adoc[leveloffset=+1] - -include::modules/installation-rhv-creating-install-config-file.adoc[leveloffset=+1] - -include::modules/installation-bare-metal-config-yaml.adoc[leveloffset=+1] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/installation-rhv-customizing-install-config-yaml.adoc[leveloffset=+1] - -include::modules/installation-rhv-editing-manifests.adoc[leveloffset=+1] - -include::modules/installation-rhv-making-control-plane-nodes-non-schedulable.adoc[leveloffset=+1] - -include::modules/installation-rhv-building-ignition-files.adoc[leveloffset=+1] - -include::modules/installation-rhv-creating-templates-virtual-machines.adoc[leveloffset=+1] - -include::modules/installation-rhv-creating-bootstrap-machine.adoc[leveloffset=+1] - -include::modules/installation-rhv-creating-control-plane-nodes.adoc[leveloffset=+1] - -include::modules/installation-osp-verifying-cluster-status.adoc[leveloffset=+1] - -include::modules/installation-rhv-removing-bootstrap-machine.adoc[leveloffset=+1] - -include::modules/installation-rhv-creating-worker-nodes-completing-installation.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -include::modules/olm-restricted-networks-configuring-operatorhub.adoc[leveloffset=+1] diff --git a/installing/installing_rhv/installing-rhv-user-infra.adoc b/installing/installing_rhv/installing-rhv-user-infra.adoc deleted file mode 100644 index 19df54e4f3a8..000000000000 --- a/installing/installing_rhv/installing-rhv-user-infra.adoc +++ /dev/null @@ -1,84 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-rhv-user-infra"] -= Installing a cluster on {rh-virtualization} with user-provisioned infrastructure -include::_attributes/common-attributes.adoc[] -:context: installing-rhv-user-infra - -toc::[] - -In {product-title} version {product-version}, you can install a -customized {product-title} cluster on {rh-virtualization-first} and other infrastructure that you provide. The {product-title} documentation uses the term _user-provisioned infrastructure_ to refer to this infrastructure type. - -The following diagram shows an example of a potential {product-title} cluster running on a {rh-virtualization} cluster. - -ifndef::openshift-origin[] -image::92_OpenShift_Cluster_Install_RHV_0520.png[Diagram of an {product-title} cluster on a {rh-virtualization} cluster] -endif::openshift-origin[] -ifdef::openshift-origin[] -image::193_OpenShift_Cluster_Install_updates_1021_oVirt.png[Diagram of an {product-title} cluster on a {rh-virtualization} cluster] -endif::openshift-origin[] - -The {rh-virtualization} hosts run virtual machines that contain both control plane and compute pods. One of the hosts also runs a {rh-virtualization-engine-name} virtual machine and a bootstrap virtual machine that contains a temporary control plane pod.] - -== Prerequisites - -The following items are required to install an {product-title} cluster on a {rh-virtualization} environment. - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You have a supported combination of versions in the link:https://access.redhat.com/articles/5485861[Support Matrix for {product-title} on {rh-virtualization-first}]. - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/installing-rhv-requirements.adoc[leveloffset=+1] - -include::modules/installing-rhv-verifying-rhv-environment.adoc[leveloffset=+1] - -//include::modules/installing-rhv-network-infrastructure-configuration-upi.adoc[leveloffset=+1] - -include::modules/installation-network-user-infra.adoc[leveloffset=+1] - -include::modules/installing-rhv-setting-up-installation-machine.adoc[leveloffset=+1] - -include::modules/installing-rhv-insecure-mode.adoc[leveloffset=+1] - -// include::modules/installing-rhv-setting-up-ca-certificate.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-rhv-downloading-ansible-playbooks.adoc[leveloffset=+1] - -include::modules/installation-rhv-about-inventory-yml.adoc[leveloffset=+1] - -include::modules/installation-rhv-specifying-rhcos-image-settings.adoc[leveloffset=+1] - -include::modules/installation-rhv-creating-install-config-file.adoc[leveloffset=+1] - -include::modules/installation-rhv-customizing-install-config-yaml.adoc[leveloffset=+1] - -include::modules/installation-rhv-editing-manifests.adoc[leveloffset=+1] - -include::modules/installation-rhv-making-control-plane-nodes-non-schedulable.adoc[leveloffset=+1] - -include::modules/installation-rhv-building-ignition-files.adoc[leveloffset=+1] - -include::modules/installation-rhv-creating-templates-virtual-machines.adoc[leveloffset=+1] - -include::modules/installation-rhv-creating-bootstrap-machine.adoc[leveloffset=+1] - -include::modules/installation-rhv-creating-control-plane-nodes.adoc[leveloffset=+1] - -include::modules/installation-osp-verifying-cluster-status.adoc[leveloffset=+1] - -include::modules/installation-rhv-removing-bootstrap-machine.adoc[leveloffset=+1] - -include::modules/installation-rhv-creating-worker-nodes-completing-installation.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service diff --git a/installing/installing_rhv/modules b/installing/installing_rhv/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/installing/installing_rhv/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/installing/installing_rhv/preparing-to-install-on-rhv.adoc b/installing/installing_rhv/preparing-to-install-on-rhv.adoc deleted file mode 100644 index ab511c7c3809..000000000000 --- a/installing/installing_rhv/preparing-to-install-on-rhv.adoc +++ /dev/null @@ -1,39 +0,0 @@ -:_content-type: ASSEMBLY -[id="preparing-to-install-on-rhv"] -= Preparing to install on {rh-virtualization-first} -include::_attributes/common-attributes.adoc[] -:context: preparing-to-install-on-rhv - -toc::[] - -[id="preparing-to-install-on-rhv-prerequisites"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You have a supported combination of versions in the link:https://access.redhat.com/articles/5485861[Support Matrix for {product-title} on {rh-virtualization-first}]. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. - -[id="choosing-an-method-to-install-ocp-on-rhv"] -== Choosing a method to install {product-title} on {rh-virtualization} - -You can install {product-title} on installer-provisioned or user-provisioned infrastructure. The default installation type uses installer-provisioned infrastructure, where the installation program provisions the underlying infrastructure for the cluster. You can also install {product-title} on infrastructure that you provision. If you do not use infrastructure that the installation program provisions, you must manage and maintain the cluster resources yourself. - -See xref:../../architecture/architecture-installation.adoc#installation-process_architecture-installation[Installation process] for more information about installer-provisioned and user-provisioned installation processes. - -[id="choosing-an-method-to-install-ocp-on-rhv-installer-provisioned"] -=== Installing a cluster on installer-provisioned infrastructure - -You can install a cluster on {rh-virtualization-first} virtual machines that are provisioned by the {product-title} installation program, by using one of the following methods: - -* **xref:../../installing/installing_rhv/installing-rhv-default.adoc#installing-rhv-default[Installing a cluster quickly on {rh-virtualization}]**: You can quickly install {product-title} on {rh-virtualization} virtual machines that the {product-title} installation program provisions. - -* **xref:../../installing/installing_rhv/installing-rhv-customizations.adoc#installing-rhv-customizations[Installing a cluster on {rh-virtualization} with customizations]**: You can install a customized {product-title} cluster on installer-provisioned guests on {rh-virtualization}. The installation program allows for some customization to be applied at the installation stage. Many other customization options are available xref:../../post_installation_configuration/cluster-tasks.adoc#post-install-cluster-tasks[post-installation]. - -[id="choosing-an-method-to-install-ocp-on-rhv-user-provisioned"] -=== Installing a cluster on user-provisioned infrastructure - -You can install a cluster on {rh-virtualization} virtual machines that you provision, by using one of the following methods: - -* **xref:../../installing/installing_rhv/installing-rhv-user-infra.adoc#installing-rhv-user-infra[Installing a cluster on {rh-virtualization} with user-provisioned infrastructure]**: You can install {product-title} on {rh-virtualization} virtual machines that you provision. You can use the provided Ansible playbooks to assist with the installation. - -* **xref:../../installing/installing_rhv/installing-rhv-restricted-network.adoc#installing-rhv-restricted-network[Installing a cluster on {rh-virtualization} in a restricted network]**: You can install {product-title} on {rh-virtualization} in a restricted or disconnected network by creating an internal mirror of the installation release content. You can use this method to install a user-provisioned cluster that does not require an active internet connection to obtain the software components. You can also use this installation method to ensure that your clusters only use container images that satisfy your organizational controls on external content. diff --git a/installing/installing_rhv/snippets b/installing/installing_rhv/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/installing/installing_rhv/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/installing/installing_rhv/uninstalling-cluster-rhv.adoc b/installing/installing_rhv/uninstalling-cluster-rhv.adoc deleted file mode 100644 index f1d2e0d29023..000000000000 --- a/installing/installing_rhv/uninstalling-cluster-rhv.adoc +++ /dev/null @@ -1,13 +0,0 @@ -:_content-type: ASSEMBLY -[id="uninstalling-cluster-rhv"] -= Uninstalling a cluster on {rh-virtualization} -include::_attributes/common-attributes.adoc[] -:context: uninstalling-cluster-rhv - -toc::[] - -You can remove an {product-title} cluster from {rh-virtualization-first}. - -include::modules/installation-uninstall-clouds.adoc[leveloffset=+1] - -include::modules/installation-rhv-removing-cluster-upi.adoc[leveloffset=+1] diff --git a/installing/installing_sno/_attributes b/installing/installing_sno/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/installing/installing_sno/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/installing/installing_sno/images b/installing/installing_sno/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/installing/installing_sno/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/installing/installing_sno/install-sno-installing-sno.adoc b/installing/installing_sno/install-sno-installing-sno.adoc deleted file mode 100644 index 8ba9c17c304c..000000000000 --- a/installing/installing_sno/install-sno-installing-sno.adoc +++ /dev/null @@ -1,77 +0,0 @@ -:_content-type: ASSEMBLY -[id="install-sno-installing-sno"] -= Installing OpenShift on a single node -:context: install-sno-installing-sno-with-the-assisted-installer -include::_attributes/common-attributes.adoc[] - -toc::[] - -You can install {sno} using the web-based Assisted Installer and a discovery ISO that you generate using the Assisted Installer. You can also install {sno} by using `coreos-installer` to generate the installation ISO. - -ifndef::openshift-origin[] - -== Installing {sno} using the Assisted Installer - -To install {product-title} on a single node, use the web-based Assisted Installer wizard to guide you through the process and manage the installation. - -include::modules/install-sno-generating-the-discovery-iso-with-the-assisted-installer.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../../storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc#persistent-storage-using-lvms_logical-volume-manager-storage[Persistent storage using logical volume manager storage] -* xref:../../virt/about_virt/about-virt.adoc#virt-what-you-can-do-with-virt_about-virt[What you can do with OpenShift Virtualization] - -include::modules/install-sno-installing-with-the-assisted-installer.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/installing_sno/install-sno-installing-sno.adoc#installing-with-usb-media_install-sno-installing-sno-with-the-assisted-installer[Creating a bootable ISO image on a USB drive] - -* xref:../../installing/installing_sno/install-sno-installing-sno.adoc#install-booting-from-an-iso-over-http-redfish_install-sno-installing-sno-with-the-assisted-installer[Booting from an HTTP-hosted ISO image using the Redfish API] - -* xref:../../nodes/nodes/nodes-sno-worker-nodes.adoc#nodes-sno-worker-nodes[Adding worker nodes to {sno} clusters] - -endif::openshift-origin[] - -[id="install-sno-installing-sno-manually"] -== Installing {sno} manually - -To install {product-title} on a single node, first generate the installation ISO, and then boot the server from the ISO. You can monitor the installation using the `openshift-install` installation program. - -include::modules/install-sno-generating-the-install-iso-manually.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../post_installation_configuration/enabling-cluster-capabilities.adoc[Enabling cluster capabilities] for more information about enabling cluster capabilities that were disabled prior to installation. -* See xref:../../installing/cluster-capabilities.html#explanation_of_capabilities_cluster-capabilities[Optional cluster capabilities in OpenShift Container Platform {product-title} {product-version}] for more information about the features provided by each capability. - -include::modules/install-sno-monitoring-the-installation-manually.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/installing_sno/install-sno-installing-sno.adoc#installing-with-usb-media_install-sno-installing-sno-with-the-assisted-installer[Creating a bootable ISO image on a USB drive] - -* xref:../../installing/installing_sno/install-sno-installing-sno.adoc#install-booting-from-an-iso-over-http-redfish_install-sno-installing-sno-with-the-assisted-installer[Booting from an HTTP-hosted ISO image using the Redfish API] - -* xref:../../nodes/nodes/nodes-sno-worker-nodes.adoc#nodes-sno-worker-nodes[Adding worker nodes to {sno} clusters] - -== Installing {sno} on AWS - -include::modules/install-sno_additional-requirements-for-installing-on-a-single-node-on-aws.adoc[leveloffset=+2] - -include::modules/installation-aws_con_installing-sno-on-aws.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/installing_aws/installing-aws-customizations.adoc#installing-aws-customizations[Installing a cluster on AWS with customizations] - -include::modules/install-sno-installing-with-usb-media.adoc[leveloffset=+1] - -include::modules/install-booting-from-an-iso-over-http-redfish.adoc[leveloffset=+1] - -include::modules/creating-custom-live-rhcos-iso.adoc[leveloffset=+1] diff --git a/installing/installing_sno/install-sno-preparing-to-install-sno.adoc b/installing/installing_sno/install-sno-preparing-to-install-sno.adoc deleted file mode 100644 index e878cc67f6a7..000000000000 --- a/installing/installing_sno/install-sno-preparing-to-install-sno.adoc +++ /dev/null @@ -1,17 +0,0 @@ -:_content-type: ASSEMBLY -[id="preparing-to-install-sno"] -= Preparing to install on a single node -:context: install-sno-preparing -include::_attributes/common-attributes.adoc[] - -toc::[] - -[id="preparing-to-install-sno_{context}"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You have read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. - -include::modules/install-sno-about-installing-on-a-single-node.adoc[leveloffset=+1] - -include::modules/install-sno-requirements-for-installing-on-a-single-node.adoc[leveloffset=+1] diff --git a/installing/installing_sno/modules b/installing/installing_sno/modules deleted file mode 120000 index 8b0e8540076d..000000000000 --- a/installing/installing_sno/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules \ No newline at end of file diff --git a/installing/installing_sno/snippets b/installing/installing_sno/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/installing/installing_sno/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/installing/installing_vmc/installing-restricted-networks-vmc-user-infra.adoc b/installing/installing_vmc/installing-restricted-networks-vmc-user-infra.adoc deleted file mode 100644 index 3e0c90912326..000000000000 --- a/installing/installing_vmc/installing-restricted-networks-vmc-user-infra.adoc +++ /dev/null @@ -1,159 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-restricted-networks-vmc-user-infra"] -= Installing a cluster on VMC in a restricted network with user-provisioned infrastructure -include::_attributes/common-attributes.adoc[] -:context: installing-restricted-networks-vmc-user-infra - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on VMware vSphere infrastructure that you provision in a restricted network by deploying it to link:https://cloud.vmware.com/vmc-aws[VMware Cloud (VMC) on AWS]. - -Once you configure your VMC environment for {product-title} deployment, you use the {product-title} installation program from the bastion management host, co-located in the VMC environment. The installation program and control plane automates the process of deploying and managing the resources needed for the {product-title} cluster. - -include::snippets/vcenter-support.adoc[] - -include::modules/setting-up-vmc-for-vsphere.adoc[leveloffset=+1] -include::modules/vmc-sizer-tool.adoc[leveloffset=+2] - -== vSphere prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/disconnected_install/installing-mirroring-installation-images.adoc#installing-mirroring-installation-images[created a registry on your mirror host] and obtain the `imageContentSources` data for your version of {product-title}. -+ -[IMPORTANT] -==== -Because the installation media is on the mirror host, you can use that computer -to complete all installation steps. -==== -* You provisioned xref:../../registry/configuring_registry_storage/configuring-registry-storage-vsphere.adoc#installation-registry-storage-block-recreate-rollout_configuring-registry-storage-vsphere[block registry storage]. For more information on persistent storage, see xref:../../storage/understanding-persistent-storage.adoc#understanding-persistent-storage[Understanding persistent storage]. -* If you use a firewall and plan to use the Telemetry service, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured the firewall to allow the sites] that your cluster requires access to. -+ -[NOTE] -==== -Be sure to also review this site list if you are configuring a proxy. -==== - -include::modules/installation-about-restricted-network.adoc[leveloffset=+1] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/installation-vsphere-infrastructure.adoc[leveloffset=+1] - -include::modules/vmware-csi-driver-reqs.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* To remove a third-party CSI driver, see xref:../../storage/container_storage_interface/persistent-storage-csi-vsphere.adoc#persistent-storage-csi-vsphere-install-issues_persistent-storage-csi-vsphere[Removing a third-party vSphere CSI Driver]. -* To update the hardware version for your vSphere nodes, see xref:../../updating/updating_a_cluster/updating-hardware-on-nodes-running-on-vsphere.adoc#updating-hardware-on-nodes-running-on-vsphere[Updating hardware on nodes running in vSphere]. - -[id="installation-requirements-user-infra_{context}"] -== Requirements for a cluster with user-provisioned infrastructure - -For a cluster that contains user-provisioned infrastructure, you must deploy all -of the required machines. - -This section describes the requirements for deploying {product-title} on user-provisioned infrastructure. - -include::modules/installation-vsphere-installer-infra-requirements.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../../machine_management/creating_machinesets/creating-machineset-vsphere.adoc#creating-machineset-vsphere_creating-machineset-vsphere[Creating a compute machine set on vSphere] - -include::modules/installation-machine-requirements.adoc[leveloffset=+2] -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] -include::modules/csr-management.adoc[leveloffset=+2] - -include::modules/installation-network-user-infra.adoc[leveloffset=+2] - -include::modules/installation-dns-user-infra.adoc[leveloffset=+2] - -include::modules/installation-load-balancing-user-infra.adoc[leveloffset=+2] - -include::modules/installation-infrastructure-user-infra.adoc[leveloffset=+1] - -include::modules/installation-user-provisioned-validating-dns.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-vsphere-regions-zones.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/installing_vmc/installing-restricted-networks-vmc-user-infra.adoc#installation-configuration-parameters-additional-vsphere_installing-restricted-networks-vmc-user-infra[Additional VMware vSphere configuration parameters] - -* xref:../../installing/installing_vmc/installing-restricted-networks-vmc-user-infra.adoc#deprecated-parameters-vsphere_installing-restricted-networks-vmc-user-infra[Deprecated VMware vSphere configuration parameters] - -//You extract the installation program from the mirrored content. - -//You can install the CLI on the mirror host. - -include::modules/installation-initializing-manual.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-vsphere-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/configuring-vsphere-regions-zones.adoc[leveloffset=+2] - -//include::modules/installation-three-node-cluster.adoc[leveloffset=+2] - -include::modules/installation-user-infra-generate-k8s-manifest-ignition.adoc[leveloffset=+1] - -include::modules/installation-extracting-infraid.adoc[leveloffset=+1] - -include::modules/installation-vsphere-machines.adoc[leveloffset=+1] - -include::modules/machine-vsphere-machines.adoc[leveloffset=+1] - -include::modules/installation-disk-partitioning.adoc[leveloffset=+1] - -include::modules/architecture-rhcos-updating-bootloader.adoc[leveloffset=+1] - -include::modules/installation-installing-bare-metal.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/installation-approve-csrs.adoc[leveloffset=+1] - -include::modules/installation-operators-config.adoc[leveloffset=+1] - -include::modules/olm-restricted-networks-configuring-operatorhub.adoc[leveloffset=+2] - -include::modules/installation-registry-storage-config.adoc[leveloffset=+2] - -include::modules/registry-configuring-storage-vsphere.adoc[leveloffset=+3] - -include::modules/installation-registry-storage-non-production.adoc[leveloffset=+3] - -include::modules/installation-registry-storage-block-recreate-rollout.adoc[leveloffset=+3] - -For instructions about configuring registry storage so that it references the correct PVC, see xref:../../registry/configuring_registry_storage/configuring-registry-storage-vsphere.adoc#registry-configuring-storage-vsphere_configuring-registry-storage-vsphere[Configuring registry storage for VMware vSphere]. - -include::modules/installation-complete-user-infra.adoc[leveloffset=+1] - -You can add extra compute machines after the cluster installation is completed by following xref:../../machine_management/user_infra/adding-vsphere-compute-user-infra.adoc#adding-vsphere-compute-user-infra[Adding compute machines to vSphere]. - -include::modules/persistent-storage-vsphere-backup.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#post-install-must-gather-disconnected[Configure image streams] for the Cluster Samples Operator and the `must-gather` tool. -* Learn how to xref:../../operators/admin/olm-restricted-networks.adoc#olm-restricted-networks[use Operator Lifecycle Manager (OLM) on restricted networks]. -* If the mirror registry that you used to install your cluster has a trusted CA, add it to the cluster by xref:../../openshift_images/image-configuration.adoc#images-configuration-cas_image-configuration[configuring additional trust stores]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* Optional: xref:../../installing/installing_vsphere/using-vsphere-problem-detector-operator.adoc#vsphere-problem-detector-viewing-events_vsphere-problem-detector[View the events from the vSphere Problem Detector Operator] to determine if the cluster has permission or storage configuration issues. diff --git a/installing/installing_vmc/installing-vmc-network-customizations-user-infra.adoc b/installing/installing_vmc/installing-vmc-network-customizations-user-infra.adoc deleted file mode 100644 index f707a1b2453d..000000000000 --- a/installing/installing_vmc/installing-vmc-network-customizations-user-infra.adoc +++ /dev/null @@ -1,142 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-vmc-network-customizations-user-infra"] -= Installing a cluster on VMC with user-provisioned infrastructure and network customizations -include::_attributes/common-attributes.adoc[] -:context: installing-vmc-network-customizations-user-infra - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on your VMware vSphere instance using infrastructure you provision with customized network configuration options by deploying it to link:https://cloud.vmware.com/vmc-aws[VMware Cloud (VMC) on AWS]. - -Once you configure your VMC environment for {product-title} deployment, you use the {product-title} installation program from the bastion management host, co-located in the VMC environment. The installation program and control plane automates the process of deploying and managing the resources needed for the {product-title} cluster. - -By customizing your network configuration, your cluster can coexist with existing IP address allocations in your environment and integrate with existing VXLAN configurations. You must set most of the network configuration parameters during installation, and you can modify only `kubeProxy` configuration parameters in a running cluster. - -include::snippets/vcenter-support.adoc[] - -include::modules/setting-up-vmc-for-vsphere.adoc[leveloffset=+1] -include::modules/vmc-sizer-tool.adoc[leveloffset=+2] - -== vSphere prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You provisioned xref:../../registry/configuring_registry_storage/configuring-registry-storage-vsphere.adoc#installation-registry-storage-block-recreate-rollout_configuring-registry-storage-vsphere[block registry storage]. For more information on persistent storage, see xref:../../storage/understanding-persistent-storage.adoc#understanding-persistent-storage[Understanding persistent storage]. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/installation-vsphere-infrastructure.adoc[leveloffset=+1] - -include::modules/vmware-csi-driver-reqs.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* To remove a third-party CSI driver, see xref:../../storage/container_storage_interface/persistent-storage-csi-vsphere.adoc#persistent-storage-csi-vsphere-install-issues_persistent-storage-csi-vsphere[Removing a third-party vSphere CSI Driver]. -* To update the hardware version for your vSphere nodes, see xref:../../updating/updating_a_cluster/updating-hardware-on-nodes-running-on-vsphere.adoc#updating-hardware-on-nodes-running-on-vsphere[Updating hardware on nodes running in vSphere]. - -[id="installation-requirements-user-infra_{context}"] -== Requirements for a cluster with user-provisioned infrastructure - -For a cluster that contains user-provisioned infrastructure, you must deploy all -of the required machines. - -This section describes the requirements for deploying {product-title} on user-provisioned infrastructure. - -include::modules/installation-vsphere-installer-infra-requirements.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../../machine_management/creating_machinesets/creating-machineset-vsphere.adoc#creating-machineset-vsphere_creating-machineset-vsphere[Creating a compute machine set on vSphere] - -include::modules/installation-machine-requirements.adoc[leveloffset=+2] -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] -include::modules/csr-management.adoc[leveloffset=+2] - -include::modules/installation-network-user-infra.adoc[leveloffset=+2] - -include::modules/installation-dns-user-infra.adoc[leveloffset=+2] - -include::modules/installation-load-balancing-user-infra.adoc[leveloffset=+2] - -include::modules/installation-infrastructure-user-infra.adoc[leveloffset=+1] - -include::modules/installation-user-provisioned-validating-dns.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-vsphere-regions-zones.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/installing_vmc/installing-vmc-network-customizations-user-infra.adoc#installation-configuration-parameters-additional-vsphere_installing-vmc-network-customizations-user-infra[Additional VMware vSphere configuration parameters] - -* xref:../../installing/installing_vmc/installing-vmc-network-customizations-user-infra.adoc#deprecated-parameters-vsphere_installing-vmc-network-customizations-user-infra[Deprecated VMware vSphere configuration parameters] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-initializing-manual.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-vsphere-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/configuring-vsphere-regions-zones.adoc[leveloffset=+2] - -// Network Operator specific configuration - -include::modules/nw-modifying-operator-install-config.adoc[leveloffset=+1] -include::modules/nw-operator-cr.adoc[leveloffset=+1] - -include::modules/installation-generate-ignition-configs.adoc[leveloffset=+1] - -include::modules/installation-extracting-infraid.adoc[leveloffset=+1] - -include::modules/installation-vsphere-machines.adoc[leveloffset=+1] - -include::modules/machine-vsphere-machines.adoc[leveloffset=+1] - -include::modules/installation-disk-partitioning.adoc[leveloffset=+1] - -include::modules/architecture-rhcos-updating-bootloader.adoc[leveloffset=+1] - -include::modules/installation-installing-bare-metal.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/installation-approve-csrs.adoc[leveloffset=+1] - -include::modules/installation-operators-config.adoc[leveloffset=+1] - -include::modules/registry-removed.adoc[leveloffset=+2] - -include::modules/installation-registry-storage-config.adoc[leveloffset=+2] - -include::modules/installation-registry-storage-block-recreate-rollout.adoc[leveloffset=+3] - -For instructions about configuring registry storage so that it references the correct PVC, see xref:../../registry/configuring_registry_storage/configuring-registry-storage-vsphere.adoc#registry-configuring-storage-vsphere_configuring-registry-storage-vsphere[Configuring the registry for vSphere]. - -include::modules/installation-complete-user-infra.adoc[leveloffset=+1] - -You can add extra compute machines after the cluster installation is completed by following xref:../../machine_management/user_infra/adding-vsphere-compute-user-infra.adoc#adding-vsphere-compute-user-infra[Adding compute machines to vSphere]. - -include::modules/persistent-storage-vsphere-backup.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* xref:../../registry/configuring_registry_storage/configuring-registry-storage-vsphere.adoc#configuring-registry-storage-vsphere[Set up your registry and configure registry storage]. -* Optional: xref:../../installing/installing_vsphere/using-vsphere-problem-detector-operator.adoc#vsphere-problem-detector-viewing-events_vsphere-problem-detector[View the events from the vSphere Problem Detector Operator] to determine if the cluster has permission or storage configuration issues. diff --git a/installing/installing_vmc/installing-vmc-user-infra.adoc b/installing/installing_vmc/installing-vmc-user-infra.adoc deleted file mode 100644 index 39e12390e130..000000000000 --- a/installing/installing_vmc/installing-vmc-user-infra.adoc +++ /dev/null @@ -1,149 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-vmc-user-infra"] -= Installing a cluster on VMC with user-provisioned infrastructure -include::_attributes/common-attributes.adoc[] -:context: installing-vmc-user-infra -:platform: VMC - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on VMware vSphere infrastructure that you provision by deploying it to link:https://cloud.vmware.com/vmc-aws[VMware Cloud (VMC) on AWS]. - -Once you configure your VMC environment for {product-title} deployment, you use the {product-title} installation program from the bastion management host, co-located in the VMC environment. The installation program and control plane automates the process of deploying and managing the resources needed for the {product-title} cluster. - -include::snippets/vcenter-support.adoc[] - -include::modules/setting-up-vmc-for-vsphere.adoc[leveloffset=+1] -include::modules/vmc-sizer-tool.adoc[leveloffset=+2] - -== vSphere prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You provisioned xref:../../registry/configuring_registry_storage/configuring-registry-storage-vsphere.adoc#installation-registry-storage-block-recreate-rollout_configuring-registry-storage-vsphere[block registry storage]. For more information on persistent storage, see xref:../../storage/understanding-persistent-storage.adoc#understanding-persistent-storage[Understanding persistent storage]. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -+ -[NOTE] -==== -Be sure to also review this site list if you are configuring a proxy. -==== - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/installation-vsphere-infrastructure.adoc[leveloffset=+1] - -include::modules/vmware-csi-driver-reqs.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* To remove a third-party CSI driver, see xref:../../storage/container_storage_interface/persistent-storage-csi-vsphere.adoc#persistent-storage-csi-vsphere-install-issues_persistent-storage-csi-vsphere[Removing a third-party vSphere CSI Driver]. -* To update the hardware version for your vSphere nodes, see xref:../../updating/updating_a_cluster/updating-hardware-on-nodes-running-on-vsphere.adoc#updating-hardware-on-nodes-running-on-vsphere[Updating hardware on nodes running in vSphere]. - -[id="installation-requirements-user-infra_{context}"] -== Requirements for a cluster with user-provisioned infrastructure - -For a cluster that contains user-provisioned infrastructure, you must deploy all -of the required machines. - -This section describes the requirements for deploying {product-title} on user-provisioned infrastructure. - -include::modules/installation-vsphere-installer-infra-requirements.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../../machine_management/creating_machinesets/creating-machineset-vsphere.adoc#creating-machineset-vsphere_creating-machineset-vsphere[Creating a compute machine set on vSphere] - -include::modules/installation-machine-requirements.adoc[leveloffset=+2] -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] -include::modules/csr-management.adoc[leveloffset=+2] - -include::modules/installation-network-user-infra.adoc[leveloffset=+2] - -include::modules/installation-dns-user-infra.adoc[leveloffset=+2] - -include::modules/installation-load-balancing-user-infra.adoc[leveloffset=+2] - -include::modules/installation-infrastructure-user-infra.adoc[leveloffset=+1] - -include::modules/installation-user-provisioned-validating-dns.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-vsphere-regions-zones.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/installing_vmc/installing-vmc-user-infra.adoc#installation-configuration-parameters-additional-vsphere_installing-vmc-user-infra[Additional VMware vSphere configuration parameters] - -* xref:../../installing/installing_vmc/installing-vmc-user-infra.adoc#deprecated-parameters-vsphere_installing-vmc-user-infra[Deprecated VMware vSphere configuration parameters] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-initializing-manual.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-vsphere-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/configuring-vsphere-regions-zones.adoc[leveloffset=+2] - -//include::modules/installation-three-node-cluster.adoc[leveloffset=+2] - -include::modules/installation-user-infra-generate-k8s-manifest-ignition.adoc[leveloffset=+1] - -include::modules/installation-extracting-infraid.adoc[leveloffset=+1] - -include::modules/installation-vsphere-machines.adoc[leveloffset=+1] - -include::modules/machine-vsphere-machines.adoc[leveloffset=+1] - -include::modules/installation-disk-partitioning.adoc[leveloffset=+1] - -include::modules/architecture-rhcos-updating-bootloader.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/installation-installing-bare-metal.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/installation-approve-csrs.adoc[leveloffset=+1] - -include::modules/installation-operators-config.adoc[leveloffset=+1] - -include::modules/registry-removed.adoc[leveloffset=+2] - -include::modules/installation-registry-storage-config.adoc[leveloffset=+2] - -include::modules/registry-configuring-storage-vsphere.adoc[leveloffset=+3] - -include::modules/installation-registry-storage-non-production.adoc[leveloffset=+3] - -include::modules/installation-registry-storage-block-recreate-rollout.adoc[leveloffset=+3] - -For instructions about configuring registry storage so that it references the correct PVC, see xref:../../registry/configuring_registry_storage/configuring-registry-storage-vsphere.adoc#registry-configuring-storage-vsphere_configuring-registry-storage-vsphere[Configuring the registry for vSphere]. - -include::modules/installation-complete-user-infra.adoc[leveloffset=+1] - -You can add extra compute machines after the cluster installation is completed by following xref:../../machine_management/user_infra/adding-vsphere-compute-user-infra.adoc#adding-vsphere-compute-user-infra[Adding compute machines to vSphere]. - -include::modules/persistent-storage-vsphere-backup.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* xref:../../registry/configuring_registry_storage/configuring-registry-storage-vsphere.adoc#configuring-registry-storage-vsphere[Set up your registry and configure registry storage]. -* Optional: xref:../../installing/installing_vsphere/using-vsphere-problem-detector-operator.adoc#vsphere-problem-detector-viewing-events_vsphere-problem-detector[View the events from the vSphere Problem Detector Operator] to determine if the cluster has permission or storage configuration issues. diff --git a/installing/installing_vsphere/_attributes b/installing/installing_vsphere/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/installing/installing_vsphere/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/installing/installing_vsphere/images b/installing/installing_vsphere/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/installing/installing_vsphere/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/installing/installing_vsphere/installation-config-parameters-vsphere.adoc b/installing/installing_vsphere/installation-config-parameters-vsphere.adoc deleted file mode 100644 index 349eaddb50bd..000000000000 --- a/installing/installing_vsphere/installation-config-parameters-vsphere.adoc +++ /dev/null @@ -1,12 +0,0 @@ -:_content-type: ASSEMBLY -[id="installation-config-parameters-vsphere"] -= Installation configuration parameters for vSphere -include::_attributes/common-attributes.adoc[] -:context: installation-config-parameters-vsphere -:platform: vSphere - -toc::[] - -Before you deploy an {product-title} cluster on vSphere, you provide parameters to customize your cluster and the platform that hosts it. When you create the `install-config.yaml` file, you provide values for the required parameters through the command line. You can then modify the `install-config.yaml` file to customize your cluster further. - -include::modules/installation-configuration-parameters.adoc[leveloffset=+1] diff --git a/installing/installing_vsphere/installing-restricted-networks-installer-provisioned-vsphere.adoc b/installing/installing_vsphere/installing-restricted-networks-installer-provisioned-vsphere.adoc deleted file mode 100644 index 3a65dc5ecb5d..000000000000 --- a/installing/installing_vsphere/installing-restricted-networks-installer-provisioned-vsphere.adoc +++ /dev/null @@ -1,116 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-restricted-networks-installer-provisioned-vsphere"] -= Installing a cluster on vSphere in a restricted network -include::_attributes/common-attributes.adoc[] -:context: installing-restricted-networks-installer-provisioned-vsphere - -toc::[] - -In {product-title} {product-version}, you can install a cluster on VMware vSphere infrastructure in a restricted network by creating an internal mirror of the installation release content. - -include::snippets/vcenter-support.adoc[] - -[id="prerequisites_installing-restricted-networks-installer-provisioned-vsphere"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/disconnected_install/installing-mirroring-installation-images.adoc#installing-mirroring-installation-images[created a registry on your mirror host] and obtained the `imageContentSources` data for your version of {product-title}. -+ -[IMPORTANT] -==== -Because the installation media is on the mirror host, you can use that computer to complete all installation steps. -==== -* You provisioned xref:../../storage/understanding-persistent-storage.adoc#understanding-persistent-storage[persistent storage] for your cluster. To deploy a private image registry, your storage must provide the ReadWriteMany access mode. -* The {product-title} installer requires access to port 443 on the vCenter and ESXi hosts. You verified that port 443 is accessible. -* If you use a firewall, you confirmed with the administrator that port 443 is accessible. Control plane nodes must be able to reach vCenter and ESXi hosts on port 443 for the installation to succeed. -* If you use a firewall and plan to use the Telemetry service, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured the firewall to allow the sites] that your cluster requires access to. -+ -[NOTE] -==== -If you are configuring a proxy, be sure to also review this site list. -==== - -include::modules/installation-about-restricted-network.adoc[leveloffset=+1] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/installation-vsphere-infrastructure.adoc[leveloffset=+1] - -include::modules/installation-vsphere-installer-network-requirements.adoc[leveloffset=+1] - -include::modules/vmware-csi-driver-reqs.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* To remove a third-party vSphere CSI driver, see xref:../../storage/container_storage_interface/persistent-storage-csi-vsphere.adoc#persistent-storage-csi-vsphere-install-issues_persistent-storage-csi-vsphere[Removing a third-party vSphere CSI Driver]. -* To update the hardware version for your vSphere nodes, see xref:../../updating/updating_a_cluster/updating-hardware-on-nodes-running-on-vsphere.adoc#updating-hardware-on-nodes-running-on-vsphere[Updating hardware on nodes running in vSphere]. - -include::modules/installation-vsphere-installer-infra-requirements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-adding-vcenter-root-certificates.adoc[leveloffset=+1] - -include::modules/installation-creating-image-restricted.adoc[leveloffset=+1] - -include::modules/installation-vsphere-regions-zones.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/installing_vsphere/installation-config-parameters-vsphere.adoc#installation-configuration-parameters-additional-vsphere_installation-config-parameters-vsphere[Additional VMware vSphere configuration parameters] - -* xref:../../installing/installing_vsphere/installation-config-parameters-vsphere.adoc#deprecated-parameters-vsphere_installation-config-parameters-vsphere[Deprecated VMware vSphere configuration parameters] - -* xref:../../storage/container_storage_interface/persistent-storage-csi-migration.adoc#persistent-storage-csi-migration-sc-vsphere_persistent-storage-csi-migration[vSphere automatic migration] - -* xref:../../storage/container_storage_interface/persistent-storage-csi-vsphere.html#persistent-storage-csi-vsphere-top-aware_persistent-storage-csi-vsphere[VMware vSphere CSI Driver Operator] - -include::modules/installation-initializing.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../../installing/installing_vsphere/installation-config-parameters-vsphere.adoc#installation-config-parameters-vsphere[Installation configuration parameters] - -include::modules/installation-installer-provisioned-vsphere-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/configuring-vsphere-regions-zones.adoc[leveloffset=+2] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/olm-restricted-networks-configuring-operatorhub.adoc[leveloffset=+1] - -[id="installing-vsphere-restricted-networks-installer-provisioned-customizations-registry"] -== Creating registry storage - -After you install the cluster, you must create storage for the Registry Operator. - -include::modules/registry-removed.adoc[leveloffset=+2] - -include::modules/installation-registry-storage-config.adoc[leveloffset=+2] - -include::modules/registry-configuring-storage-vsphere.adoc[leveloffset=+3] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -include::modules/nw-osp-configuring-external-load-balancer.adoc[leveloffset=+1] - -[id="next-steps_installing-restricted-networks-installer-provisioned-vsphere"] -== Next steps - -* xref:../../installing/install_config/installing-customizing.adoc#installing-customizing[Customize your cluster]. -* If necessary, you can xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* xref:../../registry/configuring_registry_storage/configuring-registry-storage-vsphere.adoc#configuring-registry-storage-vsphere[Set up your registry and configure registry storage]. diff --git a/installing/installing_vsphere/installing-restricted-networks-vsphere.adoc b/installing/installing_vsphere/installing-restricted-networks-vsphere.adoc deleted file mode 100644 index a12d4c7c77a6..000000000000 --- a/installing/installing_vsphere/installing-restricted-networks-vsphere.adoc +++ /dev/null @@ -1,181 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-restricted-networks-vsphere"] -= Installing a cluster on vSphere in a restricted network with user-provisioned infrastructure -include::_attributes/common-attributes.adoc[] -:context: installing-restricted-networks-vsphere - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on -VMware vSphere infrastructure that you provision in a restricted network. - -include::snippets/vcenter-support.adoc[] - -[IMPORTANT] -==== -The steps for performing a user-provisioned infrastructure installation are provided as an example only. Installing a cluster with infrastructure you provide requires knowledge of the vSphere platform and the installation process of {product-title}. Use the user-provisioned infrastructure installation instructions as a guide; you are free to create the required resources through other methods. -==== - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/disconnected_install/installing-mirroring-installation-images.adoc#installing-mirroring-installation-images[created a registry on your mirror host] and obtained the `imageContentSources` data for your version of {product-title}. -+ -[IMPORTANT] -==== -Because the installation media is on the mirror host, you can use that computer to complete all installation steps. -==== -* You provisioned xref:../../storage/understanding-persistent-storage.adoc#understanding-persistent-storage[persistent storage] for your cluster. To deploy a private image registry, your storage must provide -`ReadWriteMany` access modes. -* Completing the installation requires that you upload the {op-system-first} OVA on vSphere hosts. The machine from which you complete this process requires access to port 443 on the vCenter and ESXi hosts. You verified that port 443 is accessible. -* If you use a firewall, you confirmed with the administrator that port 443 is accessible. Control plane nodes must be able to reach vCenter and ESXi hosts on port 443 for the installation to succeed. -* If you use a firewall and plan to use the Telemetry service, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured the firewall to allow the sites] that your cluster requires access to. -+ -[NOTE] -==== -Be sure to also review this site list if you are configuring a proxy. -==== - -include::modules/installation-about-restricted-network.adoc[leveloffset=+1] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/installation-vsphere-infrastructure.adoc[leveloffset=+1] - -include::modules/vmware-csi-driver-reqs.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* To remove a third-party vSphere CSI driver, see xref:../../storage/container_storage_interface/persistent-storage-csi-vsphere.adoc#persistent-storage-csi-vsphere-install-issues_persistent-storage-csi-vsphere[Removing a third-party vSphere CSI Driver]. -* To update the hardware version for your vSphere nodes, see xref:../../updating/updating_a_cluster/updating-hardware-on-nodes-running-on-vsphere.adoc#updating-hardware-on-nodes-running-on-vsphere[Updating hardware on nodes running in vSphere]. - -[id="installation-requirements-user-infra_{context}"] -== Requirements for a cluster with user-provisioned infrastructure - -For a cluster that contains user-provisioned infrastructure, you must deploy all -of the required machines. - -This section describes the requirements for deploying {product-title} on user-provisioned infrastructure. - -include::modules/installation-vsphere-installer-infra-requirements.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../../machine_management/creating_machinesets/creating-machineset-vsphere.adoc#creating-machineset-vsphere_creating-machineset-vsphere[Creating a compute machine set on vSphere] - -include::modules/installation-machine-requirements.adoc[leveloffset=+2] -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] -include::modules/installation-vsphere-encrypted-vms.adoc[leveloffset=+2] -[role="_additional-resources"] -.Additional resources -* xref:../../storage/container_storage_interface/persistent-storage-csi-vsphere.adoc#vsphere-encryption[Creating an encrypted storage class] - -include::modules/csr-management.adoc[leveloffset=+2] - -include::modules/installation-network-user-infra.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/install_config/installing-customizing.adoc#installation-special-config-chrony_installing-customizing[Configuring chrony time service] - -include::modules/installation-dns-user-infra.adoc[leveloffset=+2] - -include::modules/installation-load-balancing-user-infra.adoc[leveloffset=+2] - -include::modules/installation-infrastructure-user-infra.adoc[leveloffset=+1] - -include::modules/installation-user-provisioned-validating-dns.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -//You extract the installation program from the mirrored content. - -//You can install the CLI on the mirror host. - -include::modules/installation-vsphere-regions-zones.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/installing_vsphere/installation-config-parameters-vsphere.adoc#installation-configuration-parameters-additional-vsphere_installation-config-parameters-vsphere[Additional VMware vSphere configuration parameters] - -* xref:../../installing/installing_vsphere/installation-config-parameters-vsphere.adoc#deprecated-parameters-vsphere_installation-config-parameters-vsphere[Deprecated VMware vSphere configuration parameters] - -* xref:../../storage/container_storage_interface/persistent-storage-csi-migration.adoc#persistent-storage-csi-migration-sc-vsphere_persistent-storage-csi-migration[vSphere automatic migration] - -* xref:../../storage/container_storage_interface/persistent-storage-csi-vsphere.html#persistent-storage-csi-vsphere-top-aware_persistent-storage-csi-vsphere[VMware vSphere CSI Driver Operator] - -include::modules/installation-initializing-manual.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../../installing/installing_vsphere/installation-config-parameters-vsphere.adoc#installation-config-parameters-vsphere[Installation configuration parameters] - -include::modules/installation-vsphere-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -//include::modules/installation-three-node-cluster.adoc[leveloffset=+2] - -include::modules/configuring-vsphere-regions-zones.adoc[leveloffset=+2] - -include::modules/installation-user-infra-generate-k8s-manifest-ignition.adoc[leveloffset=+1] - -include::modules/installation-special-config-chrony.adoc[leveloffset=+1] - -include::modules/installation-extracting-infraid.adoc[leveloffset=+1] - -include::modules/installation-vsphere-machines.adoc[leveloffset=+1] - -include::modules/machine-vsphere-machines.adoc[leveloffset=+1] - -include::modules/installation-disk-partitioning.adoc[leveloffset=+1] - -include::modules/architecture-rhcos-updating-bootloader.adoc[leveloffset=+1] - -include::modules/installation-installing-bare-metal.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/installation-approve-csrs.adoc[leveloffset=+1] - -include::modules/installation-operators-config.adoc[leveloffset=+1] - -include::modules/olm-restricted-networks-configuring-operatorhub.adoc[leveloffset=+2] - -include::modules/installation-registry-storage-config.adoc[leveloffset=+2] - -include::modules/registry-configuring-storage-vsphere.adoc[leveloffset=+3] - -include::modules/installation-registry-storage-non-production.adoc[leveloffset=+3] - -include::modules/installation-registry-storage-block-recreate-rollout.adoc[leveloffset=+3] - -For instructions about configuring registry storage so that it references the correct PVC, see xref:../../registry/configuring_registry_storage/configuring-registry-storage-vsphere.adoc#registry-configuring-storage-vsphere_configuring-registry-storage-vsphere[Configuring the registry for vSphere]. - -include::modules/installation-complete-user-infra.adoc[leveloffset=+1] - -You can add extra compute machines after the cluster installation is completed by following xref:../../machine_management/user_infra/adding-vsphere-compute-user-infra.adoc#adding-vsphere-compute-user-infra[Adding compute machines to vSphere]. - -include::modules/vsphere-anti-affinity.adoc[leveloffset=+1] - -include::modules/persistent-storage-vsphere-backup.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If the mirror registry that you used to install your cluster has a trusted CA, add it to the cluster by xref:../../openshift_images/image-configuration.adoc#images-configuration-cas_image-configuration[configuring additional trust stores]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* Optional: xref:../../installing/installing_vsphere/using-vsphere-problem-detector-operator.adoc#vsphere-problem-detector-viewing-events_vsphere-problem-detector[View the events from the vSphere Problem Detector Operator] to determine if the cluster has permission or storage configuration issues. -* Optional: if you created encrypted virtual machines, xref:../../storage/container_storage_interface/persistent-storage-csi-vsphere.adoc#vsphere-pv-encryption[create an encrypted storage class]. diff --git a/installing/installing_vsphere/installing-vsphere-installer-provisioned-customizations.adoc b/installing/installing_vsphere/installing-vsphere-installer-provisioned-customizations.adoc deleted file mode 100644 index bc9ad19e0fac..000000000000 --- a/installing/installing_vsphere/installing-vsphere-installer-provisioned-customizations.adoc +++ /dev/null @@ -1,113 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-vsphere-installer-provisioned-customizations"] -= Installing a cluster on vSphere with customizations -include::_attributes/common-attributes.adoc[] -:context: installing-vsphere-installer-provisioned-customizations -:platform: vSphere - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on your -VMware vSphere instance by using installer-provisioned infrastructure. To customize the installation, you modify parameters in the `install-config.yaml` file before you install the cluster. - -include::snippets/vcenter-support.adoc[] - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You provisioned xref:../../storage/understanding-persistent-storage.adoc#understanding-persistent-storage[persistent storage] for your cluster. To deploy a private image registry, your storage must provide `ReadWriteMany` access modes. -* The {product-title} installer requires access to port 443 on the vCenter and ESXi hosts. You verified that port 443 is accessible. -* If you use a firewall, you confirmed with the administrator that port 443 is accessible. Control plane nodes must be able to reach vCenter and ESXi hosts on port 443 for the installation to succeed. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -+ -[NOTE] -==== -Be sure to also review this site list if you are configuring a proxy. -==== - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/installation-vsphere-infrastructure.adoc[leveloffset=+1] - -include::modules/installation-vsphere-installer-network-requirements.adoc[leveloffset=+1] - -include::modules/vmware-csi-driver-reqs.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* To remove a third-party vSphere CSI driver, see xref:../../storage/container_storage_interface/persistent-storage-csi-vsphere.adoc#persistent-storage-csi-vsphere-install-issues_persistent-storage-csi-vsphere[Removing a third-party vSphere CSI Driver]. -* To update the hardware version for your vSphere nodes, see xref:../../updating/updating_a_cluster/updating-hardware-on-nodes-running-on-vsphere.adoc#updating-hardware-on-nodes-running-on-vsphere[Updating hardware on nodes running in vSphere]. - -include::modules/installation-vsphere-installer-infra-requirements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-adding-vcenter-root-certificates.adoc[leveloffset=+1] - -include::modules/installation-vsphere-regions-zones.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/installing_vsphere/installation-config-parameters-vsphere.adoc#installation-configuration-parameters-additional-vsphere_installation-config-parameters-vsphere[Additional VMware vSphere configuration parameters] - -* xref:../../installing/installing_vsphere/installation-config-parameters-vsphere.adoc#deprecated-parameters-vsphere_installation-config-parameters-vsphere[Deprecated VMware vSphere configuration parameters] - -* xref:../../storage/container_storage_interface/persistent-storage-csi-migration.adoc#persistent-storage-csi-migration-sc-vsphere_persistent-storage-csi-migration[vSphere automatic migration] - -* xref:../../storage/container_storage_interface/persistent-storage-csi-vsphere.html#persistent-storage-csi-vsphere-top-aware_persistent-storage-csi-vsphere[VMware vSphere CSI Driver Operator] - -include::modules/installation-initializing.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../../installing/installing_vsphere/installation-config-parameters-vsphere.adoc#installation-config-parameters-vsphere[Installation configuration parameters] - -include::modules/installation-installer-provisioned-vsphere-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/configuring-vsphere-regions-zones.adoc[leveloffset=+2] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -[id="installing-vsphere-installer-provisioned-customizations-registry"] -== Creating registry storage -After you install the cluster, you must create storage for the registry Operator. - -include::modules/registry-removed.adoc[leveloffset=+2] - -include::modules/installation-registry-storage-config.adoc[leveloffset=+2] - -include::modules/registry-configuring-storage-vsphere.adoc[leveloffset=+3] - -include::modules/installation-registry-storage-block-recreate-rollout.adoc[leveloffset=+3] - -For instructions about configuring registry storage so that it references the correct PVC, see xref:../../registry/configuring_registry_storage/configuring-registry-storage-vsphere.adoc#registry-configuring-storage-vsphere_configuring-registry-storage-vsphere[Configuring the registry for vSphere]. - -include::modules/persistent-storage-vsphere-backup.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -include::modules/nw-osp-configuring-external-load-balancer.adoc[leveloffset=+1] - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* xref:../../registry/configuring_registry_storage/configuring-registry-storage-vsphere.adoc#configuring-registry-storage-vsphere[Set up your registry and configure registry storage]. -* Optional: xref:../../installing/installing_vsphere/using-vsphere-problem-detector-operator.adoc#vsphere-problem-detector-viewing-events_vsphere-problem-detector[View the events from the vSphere Problem Detector Operator] to determine if the cluster has permission or storage configuration issues. diff --git a/installing/installing_vsphere/installing-vsphere-installer-provisioned-network-customizations.adoc b/installing/installing_vsphere/installing-vsphere-installer-provisioned-network-customizations.adoc deleted file mode 100644 index 73dcae9de0b7..000000000000 --- a/installing/installing_vsphere/installing-vsphere-installer-provisioned-network-customizations.adoc +++ /dev/null @@ -1,124 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-vsphere-installer-provisioned-network-customizations"] -= Installing a cluster on vSphere with network customizations -include::_attributes/common-attributes.adoc[] -:context: installing-vsphere-installer-provisioned-network-customizations - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on your -VMware vSphere instance by using installer-provisioned infrastructure with customized network configuration options. By customizing your network configuration, your cluster can coexist with existing IP address allocations in your environment and integrate with existing MTU and VXLAN configurations. To customize the installation, you modify parameters in the `install-config.yaml` file before you install the cluster. - -You must set most of the network configuration parameters during installation, and you can modify only `kubeProxy` configuration parameters in a running cluster. - -include::snippets/vcenter-support.adoc[] - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You provisioned xref:../../storage/understanding-persistent-storage.adoc#understanding-persistent-storage[persistent storage] for your cluster. To deploy a private image registry, your storage must provide -`ReadWriteMany` access modes. -* The {product-title} installer requires access to port 443 on the vCenter and ESXi hosts. You verified that port 443 is accessible. -* If you use a firewall, confirm with the administrator that port 443 is accessible. Control plane nodes must be able to reach vCenter and ESXi hosts on port 443 for the installation to succeed. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -+ -[NOTE] -==== -Be sure to also review this site list if you are configuring a proxy. -==== - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/installation-vsphere-infrastructure.adoc[leveloffset=+1] - -include::modules/installation-vsphere-installer-network-requirements.adoc[leveloffset=+1] - -include::modules/vmware-csi-driver-reqs.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* To remove a third-party vSphere CSI driver, see xref:../../storage/container_storage_interface/persistent-storage-csi-vsphere.adoc#persistent-storage-csi-vsphere-install-issues_persistent-storage-csi-vsphere[Removing a third-party vSphere CSI Driver]. -* To update the hardware version for your vSphere nodes, see xref:../../updating/updating_a_cluster/updating-hardware-on-nodes-running-on-vsphere.adoc#updating-hardware-on-nodes-running-on-vsphere[Updating hardware on nodes running in vSphere]. - -include::modules/installation-vsphere-installer-infra-requirements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-adding-vcenter-root-certificates.adoc[leveloffset=+1] - -include::modules/installation-vsphere-regions-zones.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/installing_vsphere/installation-config-parameters-vsphere.adoc#installation-configuration-parameters-additional-vsphere_installation-config-parameters-vsphere[Additional VMware vSphere configuration parameters] - -* xref:../../installing/installing_vsphere/installation-config-parameters-vsphere.adoc#deprecated-parameters-vsphere_installation-config-parameters-vsphere[Deprecated VMware vSphere configuration parameters] - -* xref:../../storage/container_storage_interface/persistent-storage-csi-migration.adoc#persistent-storage-csi-migration-sc-vsphere_persistent-storage-csi-migration[vSphere automatic migration] - -* xref:../../storage/container_storage_interface/persistent-storage-csi-vsphere.html#persistent-storage-csi-vsphere-top-aware_persistent-storage-csi-vsphere[VMware vSphere CSI Driver Operator] - -include::modules/installation-initializing.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../../installing/installing_vsphere/installation-config-parameters-vsphere.adoc#installation-config-parameters-vsphere[Installation configuration parameters] - -include::modules/installation-installer-provisioned-vsphere-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/ipi-install-modifying-install-config-for-dual-stack-network.adoc[leveloffset=+2] - -include::modules/configuring-vsphere-regions-zones.adoc[leveloffset=+2] - -// begin network customization -include::modules/nw-network-config.adoc[leveloffset=+1] -include::modules/nw-modifying-operator-install-config.adoc[leveloffset=+1] -include::modules/nw-operator-cr.adoc[leveloffset=+1] -// end network customization - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -[id="installing-vsphere-installer-provisioned-network-customizations-registry"] -== Creating registry storage -After you install the cluster, you must create storage for the registry Operator. - -include::modules/registry-removed.adoc[leveloffset=+2] - -include::modules/installation-registry-storage-config.adoc[leveloffset=+2] - -include::modules/registry-configuring-storage-vsphere.adoc[leveloffset=+3] - -include::modules/installation-registry-storage-block-recreate-rollout.adoc[leveloffset=+3] - -For instructions about configuring registry storage so that it references the correct PVC, see xref:../../registry/configuring_registry_storage/configuring-registry-storage-vsphere.adoc#registry-configuring-storage-vsphere_configuring-registry-storage-vsphere[Configuring the registry for vSphere]. - -include::modules/persistent-storage-vsphere-backup.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -include::modules/nw-osp-configuring-external-load-balancer.adoc[leveloffset=+1] -include::modules/ipi-install-configure-network-components-to-run-on-the-control-plane.adoc[leveloffset=+1] - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* xref:../../registry/configuring_registry_storage/configuring-registry-storage-vsphere.adoc#configuring-registry-storage-vsphere[Set up your registry and configure registry storage]. -* Optional: xref:../../installing/installing_vsphere/using-vsphere-problem-detector-operator.adoc#vsphere-problem-detector-viewing-events_vsphere-problem-detector[View the events from the vSphere Problem Detector Operator] to determine if the cluster has permission or storage configuration issues. diff --git a/installing/installing_vsphere/installing-vsphere-installer-provisioned.adoc b/installing/installing_vsphere/installing-vsphere-installer-provisioned.adoc deleted file mode 100644 index f8f677432d59..000000000000 --- a/installing/installing_vsphere/installing-vsphere-installer-provisioned.adoc +++ /dev/null @@ -1,88 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-vsphere-installer-provisioned"] -= Installing a cluster on vSphere -include::_attributes/common-attributes.adoc[] -:context: installing-vsphere-installer-provisioned - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on your -VMware vSphere instance by using installer-provisioned infrastructure. - -include::snippets/vcenter-support.adoc[] - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You provisioned xref:../../storage/understanding-persistent-storage.adoc#understanding-persistent-storage[persistent storage] for your cluster. To deploy a private image registry, your storage must provide -`ReadWriteMany` access modes. -* The {product-title} installer requires access to port 443 on the vCenter and ESXi hosts. You verified that port 443 is accessible. -* If you use a firewall, you confirmed with the administrator that port 443 is accessible. Control plane nodes must be able to reach vCenter and ESXi hosts on port 443 for the installation to succeed. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -+ -[NOTE] -==== -Be sure to also review this site list if you are configuring a proxy. -==== - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/installation-vsphere-infrastructure.adoc[leveloffset=+1] - -include::modules/installation-vsphere-installer-network-requirements.adoc[leveloffset=+1] - -include::modules/vmware-csi-driver-reqs.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* To remove a third-party vSphere CSI driver, see xref:../../storage/container_storage_interface/persistent-storage-csi-vsphere.adoc#persistent-storage-csi-vsphere-install-issues_persistent-storage-csi-vsphere[Removing a third-party vSphere CSI Driver]. -* To update the hardware version for your vSphere nodes, see xref:../../updating/updating_a_cluster/updating-hardware-on-nodes-running-on-vsphere.adoc#updating-hardware-on-nodes-running-on-vsphere[Updating hardware on nodes running in vSphere]. - -include::modules/installation-vsphere-installer-infra-requirements.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-adding-vcenter-root-certificates.adoc[leveloffset=+1] - -include::modules/installation-launching-installer.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -[id="installing-vsphere-installer-provisioned-registry"] -== Creating registry storage -After you install the cluster, you must create storage for the registry Operator. - -include::modules/registry-removed.adoc[leveloffset=+2] - -include::modules/installation-registry-storage-config.adoc[leveloffset=+2] - -include::modules/registry-configuring-storage-vsphere.adoc[leveloffset=+3] - -include::modules/installation-registry-storage-block-recreate-rollout.adoc[leveloffset=+3] - -For instructions about configuring registry storage so that it references the correct PVC, see xref:../../registry/configuring_registry_storage/configuring-registry-storage-vsphere.adoc#registry-configuring-storage-vsphere_configuring-registry-storage-vsphere[Configuring the registry for vSphere]. - -include::modules/persistent-storage-vsphere-backup.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -include::modules/nw-osp-configuring-external-load-balancer.adoc[leveloffset=+1] - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* xref:../../registry/configuring_registry_storage/configuring-registry-storage-vsphere.adoc#configuring-registry-storage-vsphere[Set up your registry and configure registry storage]. -* Optional: xref:../../installing/installing_vsphere/using-vsphere-problem-detector-operator.adoc#vsphere-problem-detector-viewing-events_vsphere-problem-detector[View the events from the vSphere Problem Detector Operator] to determine if the cluster has permission or storage configuration issues. diff --git a/installing/installing_vsphere/installing-vsphere-network-customizations.adoc b/installing/installing_vsphere/installing-vsphere-network-customizations.adoc deleted file mode 100644 index cb6384439845..000000000000 --- a/installing/installing_vsphere/installing-vsphere-network-customizations.adoc +++ /dev/null @@ -1,168 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-vsphere-network-customizations"] -= Installing a cluster on vSphere with network customizations -include::_attributes/common-attributes.adoc[] -:context: installing-vsphere-network-customizations - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on -VMware vSphere infrastructure that you provision with customized network -configuration options. By customizing your network configuration, your cluster -can coexist with existing IP address allocations in your environment and -integrate with existing MTU and VXLAN configurations. - -You must set most of the network configuration parameters during installation, -and you can modify only `kubeProxy` configuration parameters in a running -cluster. - -include::snippets/vcenter-support.adoc[] - -[IMPORTANT] -==== -The steps for performing a user-provisioned infrastructure installation are provided as an example only. Installing a cluster with infrastructure you provide requires knowledge of the vSphere platform and the installation process of {product-title}. Use the user-provisioned infrastructure installation instructions as a guide; you are free to create the required resources through other methods. -==== - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* Completing the installation requires that you upload the {op-system-first} OVA on vSphere hosts. The machine from which you complete this process requires access to port 443 on the vCenter and ESXi hosts. Verify that port 443 is accessible. -* If you use a firewall, you confirmed with the administrator that port 443 is accessible. Control plane nodes must be able to reach vCenter and ESXi hosts on port 443 for the installation to succeed. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/installation-vsphere-infrastructure.adoc[leveloffset=+1] - -include::modules/vmware-csi-driver-reqs.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* To remove a third-party vSphere CSI driver, see xref:../../storage/container_storage_interface/persistent-storage-csi-vsphere.adoc#persistent-storage-csi-vsphere-install-issues_persistent-storage-csi-vsphere[Removing a third-party vSphere CSI Driver]. -* To update the hardware version for your vSphere nodes, see xref:../../updating/updating_a_cluster/updating-hardware-on-nodes-running-on-vsphere.adoc#updating-hardware-on-nodes-running-on-vsphere[Updating hardware on nodes running in vSphere]. - -[id="installation-requirements-user-infra_{context}"] -== Requirements for a cluster with user-provisioned infrastructure - -For a cluster that contains user-provisioned infrastructure, you must deploy all -of the required machines. - -This section describes the requirements for deploying {product-title} on user-provisioned infrastructure. - -include::modules/installation-vsphere-installer-infra-requirements.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../../machine_management/creating_machinesets/creating-machineset-vsphere.adoc#creating-machineset-vsphere_creating-machineset-vsphere[Creating a compute machine set on vSphere] - -include::modules/installation-machine-requirements.adoc[leveloffset=+2] -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] -include::modules/installation-vsphere-encrypted-vms.adoc[leveloffset=+2] -[role="_additional-resources"] -.Additional resources -* xref:../../storage/container_storage_interface/persistent-storage-csi-vsphere.adoc#vsphere-pv-encryption[Creating an encrypted storage class] - -include::modules/csr-management.adoc[leveloffset=+2] - -include::modules/installation-network-user-infra.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/install_config/installing-customizing.adoc#installation-special-config-chrony_installing-customizing[Configuring chrony time service] - -include::modules/installation-dns-user-infra.adoc[leveloffset=+2] - -include::modules/installation-load-balancing-user-infra.adoc[leveloffset=+2] - -include::modules/installation-infrastructure-user-infra.adoc[leveloffset=+1] - -include::modules/installation-user-provisioned-validating-dns.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-vsphere-regions-zones.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/installing_vsphere/installation-config-parameters-vsphere.adoc#installation-configuration-parameters-additional-vsphere_installation-config-parameters-vsphere[Additional VMware vSphere configuration parameters] - -* xref:../../installing/installing_vsphere/installation-config-parameters-vsphere.adoc#deprecated-parameters-vsphere_installation-config-parameters-vsphere[Deprecated VMware vSphere configuration parameters] - -* xref:../../storage/container_storage_interface/persistent-storage-csi-migration.adoc#persistent-storage-csi-migration-sc-vsphere_persistent-storage-csi-migration[vSphere automatic migration] - -* xref:../../storage/container_storage_interface/persistent-storage-csi-vsphere.html#persistent-storage-csi-vsphere-top-aware_persistent-storage-csi-vsphere[VMware vSphere CSI Driver Operator] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-initializing-manual.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../../installing/installing_vsphere/installation-config-parameters-vsphere.adoc#installation-config-parameters-vsphere[Installation configuration parameters] - -include::modules/installation-vsphere-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/configuring-vsphere-regions-zones.adoc[leveloffset=+2] - -// Network Operator specific configuration -include::modules/nw-network-config.adoc[leveloffset=+1] -include::modules/nw-modifying-operator-install-config.adoc[leveloffset=+1] -include::modules/nw-operator-cr.adoc[leveloffset=+1] - -include::modules/installation-generate-ignition-configs.adoc[leveloffset=+1] - -include::modules/installation-extracting-infraid.adoc[leveloffset=+1] - -include::modules/installation-vsphere-machines.adoc[leveloffset=+1] - -include::modules/machine-vsphere-machines.adoc[leveloffset=+1] - -include::modules/installation-disk-partitioning.adoc[leveloffset=+1] - -include::modules/architecture-rhcos-updating-bootloader.adoc[leveloffset=+1] - -include::modules/installation-installing-bare-metal.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/installation-approve-csrs.adoc[leveloffset=+1] - -include::modules/installation-operators-config.adoc[leveloffset=+2] - -include::modules/registry-removed.adoc[leveloffset=+2] - -include::modules/installation-registry-storage-config.adoc[leveloffset=+2] - -include::modules/installation-registry-storage-block-recreate-rollout.adoc[leveloffset=+3] - -For instructions about configuring registry storage so that it references the correct PVC, see xref:../../registry/configuring_registry_storage/configuring-registry-storage-vsphere.adoc#registry-configuring-storage-vsphere_configuring-registry-storage-vsphere[Configuring the registry for vSphere]. - -include::modules/installation-complete-user-infra.adoc[leveloffset=+1] - -You can add extra compute machines after the cluster installation is completed by following xref:../../machine_management/user_infra/adding-vsphere-compute-user-infra.adoc#adding-vsphere-compute-user-infra[Adding compute machines to vSphere]. - -include::modules/vsphere-anti-affinity.adoc[leveloffset=+1] - -include::modules/persistent-storage-vsphere-backup.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* xref:../../registry/configuring_registry_storage/configuring-registry-storage-vsphere.adoc#configuring-registry-storage-vsphere[Set up your registry and configure registry storage]. -* Optional: xref:../../installing/installing_vsphere/using-vsphere-problem-detector-operator.adoc#vsphere-problem-detector-viewing-events_vsphere-problem-detector[View the events from the vSphere Problem Detector Operator] to determine if the cluster has permission or storage configuration issues. -* Optional: if you created encrypted virtual machines, xref:../../storage/container_storage_interface/persistent-storage-csi-vsphere.adoc#vsphere-pv-encryption[create an encrypted storage class]. diff --git a/installing/installing_vsphere/installing-vsphere-post-installation-configuration.adoc b/installing/installing_vsphere/installing-vsphere-post-installation-configuration.adoc deleted file mode 100644 index 4a2dc0822686..000000000000 --- a/installing/installing_vsphere/installing-vsphere-post-installation-configuration.adoc +++ /dev/null @@ -1,20 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-vsphere-post-installation-configuration"] -= Configuring the vSphere connection settings after an installation -include::_attributes/common-attributes.adoc[] -:context: installing-vsphere-post-installation-configuration - - -After installing an {product-title} cluster on vSphere with the platform integration feature enabled, you might need to update the vSphere connection settings manually, depending on the installation method. - -For installations using the Assisted Installer, you must update the connection settings. This is because the Assisted Installer adds default connection settings to the *vSphere connection configuration* wizard as placeholders during the installation. - -For installer-provisioned or user-provisioned infrastructure installations, you should have entered valid connection settings during the installation. You can use the *vSphere connection configuration* wizard at any time to validate or modify the connection settings, but this is not mandatory for completing the installation. - -toc::[] - -include::modules/configuring-vsphere-connection-settings.adoc[leveloffset=+1] - -include::modules/configuring-vsphere-verifying-configuration.adoc[leveloffset=+1] - -For instructions on creating storage objects, see xref:../../storage/dynamic-provisioning.adoc#dynamic-provisioning[Dynamic provisioning]. diff --git a/installing/installing_vsphere/installing-vsphere-three-node.adoc b/installing/installing_vsphere/installing-vsphere-three-node.adoc deleted file mode 100644 index 67378f814fff..000000000000 --- a/installing/installing_vsphere/installing-vsphere-three-node.adoc +++ /dev/null @@ -1,17 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-vsphere-three-node"] -= Installing a three-node cluster on vSphere -include::_attributes/common-attributes.adoc[] -:context: installing-vsphere-three-node - -toc::[] - -In {product-title} version {product-version}, you can install a three-node cluster on VMware vSphere. A three-node cluster consists of three control plane machines, which also act as compute machines. This type of cluster provides a smaller, more resource efficient cluster, for cluster administrators and developers to use for testing, development, and production. - -You can install a three-node cluster using either installer-provisioned or user-provisioned infrastructure. - -include::modules/installation-three-node-cluster-cloud-provider.adoc[leveloffset=+1] - -== Next steps -* xref:../../installing/installing_vsphere/installing-vsphere-installer-provisioned-customizations.adoc#installing-vsphere-installer-provisioned-customizations[Installing a cluster on vSphere with customizations] -* xref:../../installing/installing_vsphere/installing-vsphere.adoc#installing-vsphere[Installing a cluster on vSphere with user-provisioned infrastructure] diff --git a/installing/installing_vsphere/installing-vsphere.adoc b/installing/installing_vsphere/installing-vsphere.adoc deleted file mode 100644 index a3809c51185f..000000000000 --- a/installing/installing_vsphere/installing-vsphere.adoc +++ /dev/null @@ -1,171 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-vsphere"] -= Installing a cluster on vSphere with user-provisioned infrastructure -include::_attributes/common-attributes.adoc[] -:context: installing-vsphere -:platform: vSphere - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on -VMware vSphere infrastructure that you provision. - -include::snippets/vcenter-support.adoc[] - -[IMPORTANT] -==== -The steps for performing a user-provisioned infrastructure installation are provided as an example only. Installing a cluster with infrastructure you provide requires knowledge of the vSphere platform and the installation process of {product-title}. Use the user-provisioned infrastructure installation instructions as a guide; you are free to create the required resources through other methods. -==== - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You provisioned xref:../../storage/understanding-persistent-storage.adoc#understanding-persistent-storage[persistent storage] for your cluster. To deploy a private image registry, your storage must provide `ReadWriteMany` access modes. -* Completing the installation requires that you upload the {op-system-first} OVA on vSphere hosts. The machine from which you complete this process requires access to port 443 on the vCenter and ESXi hosts. You verified that port 443 is accessible. -* If you use a firewall, you confirmed with the administrator that port 443 is accessible. Control plane nodes must be able to reach vCenter and ESXi hosts on port 443 for the installation to succeed. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -+ -[NOTE] -==== -Be sure to also review this site list if you are configuring a proxy. -==== - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/installation-vsphere-infrastructure.adoc[leveloffset=+1] - -include::modules/vmware-csi-driver-reqs.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* To remove a third-party vSphere CSI driver, see xref:../../storage/container_storage_interface/persistent-storage-csi-vsphere.adoc#persistent-storage-csi-vsphere-install-issues_persistent-storage-csi-vsphere[Removing a third-party vSphere CSI Driver]. -* To update the hardware version for your vSphere nodes, see xref:../../updating/updating_a_cluster/updating-hardware-on-nodes-running-on-vsphere.adoc#updating-hardware-on-nodes-running-on-vsphere[Updating hardware on nodes running in vSphere]. - -[id="installation-requirements-user-infra_{context}"] -== Requirements for a cluster with user-provisioned infrastructure - -For a cluster that contains user-provisioned infrastructure, you must deploy all -of the required machines. - -This section describes the requirements for deploying {product-title} on user-provisioned infrastructure. - -include::modules/installation-vsphere-installer-infra-requirements.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../../machine_management/creating_machinesets/creating-machineset-vsphere.adoc#creating-machineset-vsphere_creating-machineset-vsphere[Creating a compute machine set on vSphere] - -include::modules/installation-machine-requirements.adoc[leveloffset=+2] -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] -include::modules/installation-vsphere-encrypted-vms.adoc[leveloffset=+2] -[role="_additional-resources"] -.Additional resources -* xref:../../storage/container_storage_interface/persistent-storage-csi-vsphere.adoc#vsphere-pv-encryption[Creating an encrypted storage class] - -include::modules/csr-management.adoc[leveloffset=+2] - -include::modules/installation-network-user-infra.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/install_config/installing-customizing.adoc#installation-special-config-chrony_installing-customizing[Configuring chrony time service] - -include::modules/installation-dns-user-infra.adoc[leveloffset=+2] - -include::modules/installation-load-balancing-user-infra.adoc[leveloffset=+2] - -include::modules/installation-infrastructure-user-infra.adoc[leveloffset=+1] - -include::modules/installation-user-provisioned-validating-dns.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-vsphere-regions-zones.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/installing_vsphere/installation-config-parameters-vsphere.adoc#installation-configuration-parameters-additional-vsphere_installation-config-parameters-vsphere[Additional VMware vSphere configuration parameters] - -* xref:../../installing/installing_vsphere/installation-config-parameters-vsphere.adoc#deprecated-parameters-vsphere_installation-config-parameters-vsphere[Deprecated VMware vSphere configuration parameters] - -* xref:../../storage/container_storage_interface/persistent-storage-csi-migration.adoc#persistent-storage-csi-migration-sc-vsphere_persistent-storage-csi-migration[vSphere automatic migration] - -* xref:../../storage/container_storage_interface/persistent-storage-csi-vsphere.html#persistent-storage-csi-vsphere-top-aware_persistent-storage-csi-vsphere[VMware vSphere CSI Driver Operator] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-initializing-manual.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../../installing/installing_vsphere/installation-config-parameters-vsphere.adoc#installation-config-parameters-vsphere[Installation configuration parameters] - -include::modules/installation-vsphere-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -//include::modules/installation-three-node-cluster.adoc[leveloffset=+2] - -include::modules/configuring-vsphere-regions-zones.adoc[leveloffset=+2] - -include::modules/installation-user-infra-generate-k8s-manifest-ignition.adoc[leveloffset=+1] - -include::modules/installation-extracting-infraid.adoc[leveloffset=+1] - -include::modules/installation-vsphere-machines.adoc[leveloffset=+1] - -include::modules/machine-vsphere-machines.adoc[leveloffset=+1] - -include::modules/installation-disk-partitioning.adoc[leveloffset=+1] - -include::modules/architecture-rhcos-updating-bootloader.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/installation-installing-bare-metal.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/installation-approve-csrs.adoc[leveloffset=+1] - -include::modules/installation-operators-config.adoc[leveloffset=+1] - -include::modules/registry-removed.adoc[leveloffset=+2] - -include::modules/installation-registry-storage-config.adoc[leveloffset=+2] - -include::modules/registry-configuring-storage-vsphere.adoc[leveloffset=+3] - -include::modules/installation-registry-storage-non-production.adoc[leveloffset=+3] - -include::modules/installation-registry-storage-block-recreate-rollout.adoc[leveloffset=+3] - -For instructions about configuring registry storage so that it references the correct PVC, see xref:../../registry/configuring_registry_storage/configuring-registry-storage-vsphere.adoc#registry-configuring-storage-vsphere_configuring-registry-storage-vsphere[Configuring the registry for vSphere]. - -include::modules/installation-complete-user-infra.adoc[leveloffset=+1] - -You can add extra compute machines after the cluster installation is completed by following xref:../../machine_management/user_infra/adding-vsphere-compute-user-infra.adoc#adding-vsphere-compute-user-infra[Adding compute machines to vSphere]. - -include::modules/vsphere-anti-affinity.adoc[leveloffset=+1] - -include::modules/persistent-storage-vsphere-backup.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* If necessary, you can -xref:../../support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc#opting-out-remote-health-reporting_opting-out-remote-health-reporting[opt out of remote health reporting]. -* xref:../../registry/configuring_registry_storage/configuring-registry-storage-vsphere.adoc#configuring-registry-storage-vsphere[Set up your registry and configure registry storage]. -* Optional: xref:../../installing/installing_vsphere/using-vsphere-problem-detector-operator.adoc#vsphere-problem-detector-viewing-events_vsphere-problem-detector[View the events from the vSphere Problem Detector Operator] to determine if the cluster has permission or storage configuration issues. -* Optional: if you created encrypted virtual machines, xref:../../storage/container_storage_interface/persistent-storage-csi-vsphere.adoc#vsphere-pv-encryption[create an encrypted storage class]. diff --git a/installing/installing_vsphere/modules b/installing/installing_vsphere/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/installing/installing_vsphere/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/installing/installing_vsphere/preparing-to-install-on-vsphere.adoc b/installing/installing_vsphere/preparing-to-install-on-vsphere.adoc deleted file mode 100644 index 2a90758bfed1..000000000000 --- a/installing/installing_vsphere/preparing-to-install-on-vsphere.adoc +++ /dev/null @@ -1,75 +0,0 @@ -:_content-type: ASSEMBLY -[id="preparing-to-install-on-vsphere"] -= Preparing to install on vSphere -include::_attributes/common-attributes.adoc[] -:context: preparing-to-install-on-vsphere - -toc::[] - - -[id="preparing-to-install-on-vsphere-prerequisites"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. - -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. - -* If you use a firewall and plan to use Telemetry, you -xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured the firewall to allow the sites] required by your cluster. - -* You reviewed your VMware platform licenses. Red Hat does not place any restrictions on your VMware licenses, but some VMware infrastructure components require licensing. - -[id="choosing-a-method-to-install-ocp-on-vsphere"] -== Choosing a method to install {product-title} on vSphere - -You can install {product-title} with the link:https://access.redhat.com/documentation/en-us/assisted_installer_for_openshift_container_platform/2022/html-single/assisted_installer_for_openshift_container_platform/index[{ai-full}]. This method requires no setup for the installer, and is ideal for connected environments like vSphere. Installing with the {ai-full} also provides integration with vSphere, enabling autoscaling. See xref:../../installing/installing_on_prem_assisted/installing-on-prem-assisted.adoc#installing-on-prem-assisted[Installing an on-premise cluster using the {ai-full}] for additional details. - -You can also install {product-title} on vSphere by using installer-provisioned or user-provisioned infrastructure. Installer-provisioned infrastructure is ideal for installing in environments with air-gapped/restricted networks, where the installation program provisions the underlying infrastructure for the cluster. You can also install {product-title} on infrastructure that you provide. If you do not use infrastructure that the installation program provisions, you must manage and maintain the cluster resources yourself. - -See the xref:../../architecture/architecture-installation.html#installation-process_architecture-installation[Installation process] for more information about installer-provisioned and user-provisioned installation processes. - -[IMPORTANT] -==== -The steps for performing a user-provisioned infrastructure installation are provided as an example only. Installing a cluster with infrastructure you provide requires knowledge of the vSphere platform and the installation process of {product-title}. Use the user-provisioned infrastructure installation instructions as a guide; you are free to create the required resources through other methods. -==== - - -=== Installer-provisioned infrastructure installation of {product-title} on vSphere - -Installer-provisioned infrastructure allows the installation program to pre-configure and automate the provisioning of resources required by {product-title}. - -* **xref:../../installing/installing_vsphere/installing-vsphere-installer-provisioned.adoc#installing-vsphere-installer-provisioned[Installing a cluster on vSphere]**: You can install {product-title} on vSphere by using installer-provisioned infrastructure installation with no customization. - -* **xref:../../installing/installing_vsphere/installing-vsphere-installer-provisioned-customizations.adoc#installing-vsphere-installer-provisioned-customizations[Installing a cluster on vSphere with customizations]**: You can install {product-title} on vSphere by using installer-provisioned infrastructure installation with the default customization options. - -* **xref:../../installing/installing_vsphere/installing-vsphere-installer-provisioned-network-customizations.adoc#installing-vsphere-installer-provisioned-network-customizations[Installing a cluster on vSphere with network customizations]**: You can install {product-title} on installer-provisioned vSphere infrastructure, with network customizations. You can customize your {product-title} network configuration during installation, so that your cluster can coexist with your existing IP address allocations and adhere to your network requirements. - -* **xref:../../installing/installing_vsphere/installing-restricted-networks-installer-provisioned-vsphere.adoc#installing-restricted-networks-installer-provisioned-vsphere[Installing a cluster on vSphere in a restricted network]**: You can install a cluster on VMware vSphere infrastructure in a restricted network by creating an internal mirror of the installation release content. - You can use this method to deploy {product-title} on an internal network that is not visible to the internet. - -=== User-provisioned infrastructure installation of {product-title} on vSphere - -User-provisioned infrastructure requires the user to provision all resources required by {product-title}. - -* **xref:../../installing/installing_vsphere/installing-vsphere.adoc#[Installing a cluster on vSphere with user-provisioned infrastructure]**: You can install {product-title} on VMware vSphere infrastructure that you provision. - -* **xref:../../installing/installing_vsphere/installing-vsphere-network-customizations.adoc#installing-vsphere-network-customizations[Installing a cluster on vSphere with network customizations with user-provisioned infrastructure]**: You can install {product-title} on VMware vSphere infrastructure that you provision with customized network configuration options. - -* **xref:../../installing/installing_vsphere/installing-restricted-networks-vsphere.adoc#installing-restricted-networks-vsphere[Installing a cluster on vSphere in a restricted network with user-provisioned infrastructure]**: {product-title} can be installed on VMware vSphere infrastructure that you provision in a restricted network. - -include::modules/installation-vsphere-infrastructure.adoc[leveloffset=+1] - -include::modules/vmware-csi-driver-reqs.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* To remove a third-party vSphere CSI driver, see xref:../../storage/container_storage_interface/persistent-storage-csi-vsphere.adoc#persistent-storage-csi-vsphere-install-issues_persistent-storage-csi-vsphere[Removing a third-party vSphere CSI Driver]. - -== Configuring the vSphere connection settings - -* **xref:../../installing/installing_vsphere/installing-vsphere-post-installation-configuration.adoc#installing-vsphere-post-installation-configuration[Updating the vSphere connection settings following an installation]**: For installations on vSphere using the Assisted Installer, you must manually update the vSphere connection settings to complete the installation. For installer-provisioned or user-provisioned infrastructure installations on vSphere, you can optionally validate or modify the vSphere connection settings at any time. - -== Uninstalling an installer-provisioned infrastructure installation of {product-title} on vSphere - -* **xref:../../installing/installing_vsphere/uninstalling-cluster-vsphere-installer-provisioned.adoc#uninstalling-cluster-vsphere-installer-provisioned[Uninstalling a cluster on vSphere that uses installer-provisioned infrastructure]**: You can remove a cluster that you deployed on VMware vSphere infrastructure that used installer-provisioned infrastructure. diff --git a/installing/installing_vsphere/snippets b/installing/installing_vsphere/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/installing/installing_vsphere/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/installing/installing_vsphere/uninstalling-cluster-vsphere-installer-provisioned.adoc b/installing/installing_vsphere/uninstalling-cluster-vsphere-installer-provisioned.adoc deleted file mode 100644 index 92ef5ed520cf..000000000000 --- a/installing/installing_vsphere/uninstalling-cluster-vsphere-installer-provisioned.adoc +++ /dev/null @@ -1,16 +0,0 @@ -:_content-type: ASSEMBLY -[id="uninstalling-cluster-vsphere-installer-provisioned"] -= Uninstalling a cluster on vSphere that uses installer-provisioned infrastructure -include::_attributes/common-attributes.adoc[] -:context: uninstalling-cluster-vsphere-installer-provisioned - -toc::[] - -You can remove a cluster that you deployed in your VMware vSphere instance by using installer-provisioned infrastructure. - -[NOTE] -==== -When you run the `openshift-install destroy cluster` command to uninstall {product-title}, vSphere volumes are not automatically deleted. The cluster administrator must manually find the vSphere volumes and delete them. -==== - -include::modules/installation-uninstall-clouds.adoc[leveloffset=+1] diff --git a/installing/installing_vsphere/using-vsphere-problem-detector-operator.adoc b/installing/installing_vsphere/using-vsphere-problem-detector-operator.adoc deleted file mode 100644 index 2d89bc05a81f..000000000000 --- a/installing/installing_vsphere/using-vsphere-problem-detector-operator.adoc +++ /dev/null @@ -1,33 +0,0 @@ -:_content-type: ASSEMBLY -[id="using-vsphere-problem-detector-operator"] -= Using the vSphere Problem Detector Operator -include::_attributes/common-attributes.adoc[] -:context: vsphere-problem-detector - -toc::[] - -// About the operator -include::modules/vsphere-problem-detector-about.adoc[leveloffset=+1] - -// Run the checks -include::modules/vsphere-problem-detector-running.adoc[leveloffset=+1] - -// View the events -include::modules/vsphere-problem-detector-viewing-events.adoc[leveloffset=+1] - -// View the logs -include::modules/vsphere-problem-detector-viewing-logs.adoc[leveloffset=+1] - -// Reference: Problem detector checks -include::modules/vsphere-problem-detector-config-checks.adoc[leveloffset=+1] - -// Concept: Storage class config check -include::modules/vsphere-problem-detector-storage-class-config-check.adoc[leveloffset=+1] - -// Metrics -include::modules/vsphere-problem-detector-metrics.adoc[leveloffset=+1] - -[role="_additional-resources"] -== Additional resources - -* xref:../../monitoring/monitoring-overview.adoc#monitoring-overview[Monitoring overview] diff --git a/installing/installing_with_agent_based_installer/_attributes b/installing/installing_with_agent_based_installer/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/installing/installing_with_agent_based_installer/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/installing/installing_with_agent_based_installer/images b/installing/installing_with_agent_based_installer/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/installing/installing_with_agent_based_installer/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/installing/installing_with_agent_based_installer/installing-with-agent-based-installer.adoc b/installing/installing_with_agent_based_installer/installing-with-agent-based-installer.adoc deleted file mode 100644 index d7cbe43b7e40..000000000000 --- a/installing/installing_with_agent_based_installer/installing-with-agent-based-installer.adoc +++ /dev/null @@ -1,44 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-with-agent-based-installer"] -= Installing a {product-title} cluster with the Agent-based Installer -include::_attributes/common-attributes.adoc[] -:context: installing-with-agent-based-installer - -toc::[] - -Use the following procedures to install an {product-title} cluster using the Agent-based Installer. - -[id="prerequisites_installing-with-agent-based-installer"] -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. - -// This anchor ID is extracted/replicated from the former installing-ocp-agent.adoc module to preserve links. -[id="installing-ocp-agent_installing-with-agent-based-installer"] -== Installing {product-title} with the Agent-based Installer - -The following procedures deploy a single-node {product-title} in a disconnected environment. You can use these procedures as a basis and modify according to your requirements. - -include::modules/installing-ocp-agent-download.adoc[leveloffset=+2] - -include::modules/installing-ocp-agent-boot.adoc[leveloffset=+2] - -include::modules/installing-ocp-agent-tui.adoc[leveloffset=+2] - -include::modules/installing-ocp-agent-verify.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* See xref:../../installing/installing_bare_metal_ipi/ipi-install-installation-workflow.adoc#modifying-install-config-for-dual-stack-network_ipi-install-installation-workflow[Deploying with dual-stack networking]. -* See xref:../../installing/installing_bare_metal_ipi/ipi-install-installation-workflow.adoc#configuring-the-install-config-file_ipi-install-installation-workflow[Configuring the install-config yaml file]. -* See xref:../../installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc#installation-three-node-cluster_installing-restricted-networks-bare-metal[Configuring a three-node cluster] to deploy three-node clusters in bare metal environments. -* See xref:../../installing/installing_with_agent_based_installer/preparing-to-install-with-agent-based-installer.adoc#root-device-hints_preparing-to-install-with-agent-based-installer[About root device hints]. -* See link:https://nmstate.io/examples.html[NMState state examples]. - -include::modules/sample-ztp-custom-resources.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../scalability_and_performance/ztp_far_edge/ztp-deploying-far-edge-clusters-at-scale.adoc#ztp-deploying-far-edge-clusters-at-scale[Challenges of the network far edge] to learn more about {ztp-first}. diff --git a/installing/installing_with_agent_based_installer/modules b/installing/installing_with_agent_based_installer/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/installing/installing_with_agent_based_installer/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/installing/installing_with_agent_based_installer/preparing-an-agent-based-installed-cluster-for-mce.adoc b/installing/installing_with_agent_based_installer/preparing-an-agent-based-installed-cluster-for-mce.adoc deleted file mode 100644 index b41a0cda5e35..000000000000 --- a/installing/installing_with_agent_based_installer/preparing-an-agent-based-installed-cluster-for-mce.adoc +++ /dev/null @@ -1,30 +0,0 @@ -:_content-type: ASSEMBLY -[id="preparing-an-agent-based-installed-cluster-for-mce"] -= Preparing an Agent-based installed cluster for the multicluster engine for Kubernetes Operator -include::_attributes/common-attributes.adoc[] -:context: preparing-an-agent-based-installed-cluster-for-mce - -toc::[] - -You can install the multicluster engine for Kubernetes Operator and deploy a hub cluster with the Agent-based {product-title} Installer. -The following procedure is partially automated and requires manual steps after the initial cluster is deployed. - -== Prerequisites -* You have read the following documentation: -** link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.8/html/clusters/cluster_mce_overview[Cluster lifecycle with multicluster engine operator overview]. -** xref:../../storage/persistent_storage/persistent_storage_local/persistent-storage-local.adoc#persistent-storage-using-local-volume[Persistent storage using local volumes]. -** xref:../../scalability_and_performance/ztp_far_edge/ztp-deploying-far-edge-clusters-at-scale.adoc#about-ztp_ztp-deploying-far-edge-clusters-at-scale[Using ZTP to provision clusters at the network far edge]. -** xref:../../installing/installing_with_agent_based_installer/preparing-to-install-with-agent-based-installer.adoc#preparing-to-install-with-agent-based-installer[Preparing to install with the Agent-based Installer]. -** xref:../../installing/disconnected_install/index.adoc#installing-mirroring-disconnected-about[About disconnected installation mirroring]. -* You have access to the internet to obtain the necessary container images. -* You have installed the OpenShift CLI (`oc`). -* If you are installing in a disconnected environment, you must have a configured local mirror registry for disconnected installation mirroring. - -include::modules/preparing-an-inital-cluster-deployment-for-mce-disconnected.adoc[leveloffset=+1] - -include::modules/preparing-an-inital-cluster-deployment-for-mce-connected.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../storage/persistent_storage/persistent_storage_local/persistent-storage-local.adoc#persistent-storage-using-local-volume[The Local Storage Operator] diff --git a/installing/installing_with_agent_based_installer/preparing-to-install-with-agent-based-installer.adoc b/installing/installing_with_agent_based_installer/preparing-to-install-with-agent-based-installer.adoc deleted file mode 100644 index 3b9160d331a6..000000000000 --- a/installing/installing_with_agent_based_installer/preparing-to-install-with-agent-based-installer.adoc +++ /dev/null @@ -1,55 +0,0 @@ -:_content-type: ASSEMBLY -[id="preparing-to-install-with-agent-based-installer"] -= Preparing to install with the Agent-based installer -include::_attributes/common-attributes.adoc[] -:context: preparing-to-install-with-agent-based-installer - -toc::[] - -[id="about-the-agent-based-installer"] -== About the Agent-based Installer - -The Agent-based installation method provides the flexibility to boot your on-premises servers in any way that you choose. It combines the ease of use of the Assisted Installation service with the ability to run offline, including in air-gapped environments. -Agent-based installation is a subcommand of the {product-title} installer. -It generates a bootable ISO image containing all of the information required to deploy an {product-title} cluster, with an available release image. - -The configuration is in the same format as for the installer-provisioned infrastructure and user-provisioned infrastructure installation methods. -The Agent-based Installer can also optionally generate or accept Zero Touch Provisioning (ZTP) custom resources. ZTP allows you to provision new edge sites with declarative configurations of bare-metal equipment. - -include::modules/understanding-agent-install.adoc[leveloffset=+1] - -include::modules/agent-installer-fips-compliance.adoc[leveloffset=+1] - -include::modules/agent-installer-configuring-fips-compliance.adoc[leveloffset=+1] - -[discrete] -[role="_additional-resources"] -.Additional resources - -* link:https://access.redhat.com/articles/5059881[OpenShift Security Guide Book] - -//// -* xref:../../installing/installing-fips.adoc#installing-fips[Support for FIPS cryptography] -//// - -include::modules/agent-install-networking.adoc[leveloffset=+1] - -include::modules/agent-install-sample-config-bonds-vlans.adoc[leveloffset=+1] - -include::modules/agent-install-sample-config-bond-sriov.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html/configuring_and_managing_networking/configuring-network-bonding_configuring-and-managing-networking[Configuring network bonding] - -include::modules/installation-bare-metal-agent-installer-config-yaml.adoc[leveloffset=+1] - -include::modules/validations-before-agent-iso-creation.adoc[leveloffset=+1] - -include::modules/agent-install-ipi-install-root-device-hints.adoc[leveloffset=+1] - -[id="agent-based-installation-next-steps"] -== Next steps - -* xref:../../installing/installing_with_agent_based_installer/installing-with-agent-based-installer.adoc#installing-with-agent-based-installer[Installing a cluster with the Agent-based Installer] diff --git a/installing/installing_with_agent_based_installer/snippets b/installing/installing_with_agent_based_installer/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/installing/installing_with_agent_based_installer/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/installing/installing_with_agent_based_installer/understanding-disconnected-installation-mirroring.adoc b/installing/installing_with_agent_based_installer/understanding-disconnected-installation-mirroring.adoc deleted file mode 100644 index 83ab77c427f8..000000000000 --- a/installing/installing_with_agent_based_installer/understanding-disconnected-installation-mirroring.adoc +++ /dev/null @@ -1,22 +0,0 @@ -:_content-type: ASSEMBLY -[id="understanding-disconnected-installation-mirroring"] -= Understanding disconnected installation mirroring -include::_attributes/common-attributes.adoc[] -:context: understanding-disconnected-installation-mirroring - -toc::[] -// Reusing applicable content from Disconnected installation mirroring assembly - -You can use a mirror registry for disconnected installations and to ensure that your clusters only use container images that satisfy your organization's controls on external content. Before you install a cluster on infrastructure that you provision in a disconnected environment, you must mirror the required container images into that environment. To mirror container images, you must have a registry for mirroring. - -[id="agent-install-mirroring-images-disconnected"] -== Mirroring images for a disconnected installation through the Agent-based Installer - -You can use one of the following procedures to mirror your {product-title} image repository to your mirror registry: - -* xref:../../installing/disconnected_install/installing-mirroring-installation-images.adoc#installing-mirroring-installation-images[Mirroring images for a disconnected installation] -* xref:../../installing/disconnected_install/installing-mirroring-disconnected.adoc#installing-mirroring-disconnected[Mirroring images for a disconnected installation using the oc-mirror plugin] - -include::modules/agent-install-about-mirroring-for-disconnected-registry.adoc[leveloffset=+1] - -include::modules/agent-install-configuring-for-disconnected-registry.adoc[leveloffset=+2] \ No newline at end of file diff --git a/installing/modules b/installing/modules deleted file mode 120000 index 43aab75b53c9..000000000000 --- a/installing/modules +++ /dev/null @@ -1 +0,0 @@ -../modules/ \ No newline at end of file diff --git a/installing/snippets b/installing/snippets deleted file mode 120000 index 9f5bc7e4dde0..000000000000 --- a/installing/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets \ No newline at end of file diff --git a/installing/validating-an-installation.adoc b/installing/validating-an-installation.adoc deleted file mode 100644 index 63a0d6b575ec..000000000000 --- a/installing/validating-an-installation.adoc +++ /dev/null @@ -1,71 +0,0 @@ -:_content-type: ASSEMBLY -[id="validating-an-installation"] -= Validating an installation -include::_attributes/common-attributes.adoc[] -:context: validating-an-installation - -toc::[] - -You can check the status of an {product-title} cluster after an installation by following the procedures in this document. - -//Reviewing the installation log -include::modules/reviewing-the-installation-log.adoc[leveloffset=+1] - -//Viewing the image pull source -include::modules/viewing-the-image-pull-source.adoc[leveloffset=+1] - -//Getting cluster version, status, and update details -include::modules/getting-cluster-version-status-and-update-details.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../support/troubleshooting/troubleshooting-installations.adoc#querying-operator-status-after-installation_troubleshooting-installations[Querying Operator status after installation] for more information about querying Operator status if your installation is still progressing. - -* See xref:../support/troubleshooting/troubleshooting-operator-issues.adoc#troubleshooting-operator-issues[Troubleshooting Operator issues] for information about investigating issues with Operators. - -* See xref:../updating/updating_a_cluster/updating-cluster-web-console.adoc#updating-cluster-web-console[Updating a cluster using the web console] for more information on updating your cluster. - -* See xref:../updating/understanding-upgrade-channels-release.adoc#understanding-upgrade-channels-releases[Understanding update channels and releases] for an overview about update release channels. - -//Querying the status of the cluster nodes by using the CLI -include::modules/querying-the-status-of-cluster-nodes-using-the-cli.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../support/troubleshooting/verifying-node-health.adoc#verifying-node-health[Verifying node health] for more details about reviewing node health and investigating node issues. - -//Reviewing the cluster status from the OpenShift Container Platform web console -include::modules/reviewing-cluster-status-from-the-openshift-web-console.adoc[leveloffset=+1] - -//Reviewing the cluster status from {cluster-manager} -include::modules/reviewing-cluster-status-from-the-openshift-cluster-manager.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../support/remote_health_monitoring/using-insights-to-identify-issues-with-your-cluster.adoc#using-insights-to-identify-issues-with-your-cluster[Using Insights to identify issues with your cluster] for more information about reviewing potential issues with your cluster. - -//Checking cluster resource availability and utilization -include::modules/checking-cluster-resource-availability-and-utilization.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../monitoring/monitoring-overview.adoc#monitoring-overview[Monitoring overview] for more information about the {product-title} monitoring stack. - -//Listing alerts that are firing -include::modules/listing-alerts-that-are-firing.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../monitoring/managing-alerts.adoc#managing-alerts[Managing alerts] for further details about alerting in {product-title}. - -[id="validating-an-installation-next-steps"] -== Next steps - -* See xref:../support/troubleshooting/troubleshooting-installations.adoc#troubleshooting-installations[Troubleshooting installations] if you experience issues when installing your cluster. - -* After installing {product-title}, you can xref:../post_installation_configuration/cluster-tasks.adoc#post-install-cluster-tasks[further expand and customize your cluster]. diff --git a/logging/.meta-logging-quickref.adoc b/logging/.meta-logging-quickref.adoc deleted file mode 100644 index 1f902314ffd8..000000000000 --- a/logging/.meta-logging-quickref.adoc +++ /dev/null @@ -1,46 +0,0 @@ -= Logging Meta Reference for Writers - -This hidden file contains meta content for writers and is not for inclusion in published docs. For Logging 5.5+ documentation has shifted to a per version of Logging approach. Files created for logging after this change follow the naming convention 'logging-description', while files created prior to this change use 'cluster-logging-description'. - -== Logging Files -Files referenced only apply to versions 5.5+ of logging. - -* Assemblies -** logging-5-5-administration.adoc -** logging-5-5-architecture.adoc -** logging-5-5-configuration.adoc -** logging-5-5-reference.adoc -** logging-5-5-release-notes.adoc -** logging-5-6-administration.adoc -** logging-5-6-architecture.adoc -** logging-5-6-configuration.adoc -** logging-5-6-reference.adoc -** logging-5-6-release-notes.adoc - -.Include syntax: ----- -\include::target[leveloffset=offset,lines=ranges] -\include::modules/logging-module-name.adoc[leveloffset=+1,lines=5..10] -\include::snippets/ ----- - -* Modules -** logging-rn-5.5.5.adoc -** logging-rn-5.5.4.adoc -** logging-rn-5.5.3.adoc -** logging-rn-5.5.2.adoc -** logging-rn-5.5.1.adoc -** logging-rn-5.5.0.adoc -** logging-loki-retention.adoc - - -* Snippets -** logging-stable-updates-snip.adoc[] -** logging-log-types-snip.adoc[] -** logging-compatibility-snip.adoc[] -** logging-loki-vs-lokistack-snip.adoc[] -** logging-create-secret-snip.adoc[] -** logging-supported-config-snip.adoc[] -** logging-approval-strategy-snip.adoc[] -** logging-subscription-object-snip.adoc[ -** logging-create-apply-cr-snip.adoc[] diff --git a/logging/_attributes b/logging/_attributes deleted file mode 120000 index f27fd275ea6b..000000000000 --- a/logging/_attributes +++ /dev/null @@ -1 +0,0 @@ -../_attributes/ \ No newline at end of file diff --git a/logging/cluster-logging-dashboards.adoc b/logging/cluster-logging-dashboards.adoc deleted file mode 100644 index 72a885c2f3d4..000000000000 --- a/logging/cluster-logging-dashboards.adoc +++ /dev/null @@ -1,33 +0,0 @@ -:_content-type: ASSEMBLY -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] -[id="cluster-logging-dashboards"] -= Viewing cluster dashboards -:context: cluster-logging-dashboards - -toc::[] - -The *Logging/Elasticsearch Nodes* and *Openshift Logging* dashboards in the -ifndef::openshift-rosa,openshift-dedicated[] -{product-title} web console -endif::[] -ifdef::openshift-rosa,openshift-dedicated[] -{cluster-manager-url} -endif::[] -contain in-depth details about your Elasticsearch instance and the individual Elasticsearch nodes that you can use to prevent and diagnose problems. - -The *OpenShift Logging* dashboard contains charts that show details about your Elasticsearch instance at a cluster level, including cluster resources, garbage collection, shards in the cluster, and Fluentd statistics. - -The *Logging/Elasticsearch Nodes* dashboard contains charts that show details about your Elasticsearch instance, many at node level, including details on indexing, shards, resources, and so forth. - -// The following include statements pull in the module files that comprise -// the assembly. Include any combination of concept, procedure, or reference -// modules required to cover the user story. You can also include other -// assemblies. - -include::modules/cluster-logging-dashboards-access.adoc[leveloffset=+1] - -For information on the dashboard charts, see xref:../logging/cluster-logging-dashboards.html#cluster-logging-dashboards-logging_cluster-logging-dashboards[About the OpenShift Logging dashboard] and xref:../logging/cluster-logging-dashboards.html#cluster-logging-dashboards-es_cluster-logging-dashboards[About the Logging/Elastisearch Nodes dashboard]. - -include::modules/cluster-logging-dashboards-logging.adoc[leveloffset=+1] -include::modules/cluster-logging-dashboards-es.adoc[leveloffset=+1] diff --git a/logging/cluster-logging-deploying.adoc b/logging/cluster-logging-deploying.adoc deleted file mode 100644 index d0826a689af3..000000000000 --- a/logging/cluster-logging-deploying.adoc +++ /dev/null @@ -1,78 +0,0 @@ -:_content-type: ASSEMBLY -:context: cluster-logging-deploying -[id="cluster-logging-deploying"] -= Installing the {logging-title} -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] - -toc::[] - -You can install the {logging-title} by deploying the OpenShift Elasticsearch and Red Hat OpenShift Logging Operators. The OpenShift Elasticsearch Operator creates and manages the Elasticsearch cluster used by OpenShift Logging. The {logging} Operator creates and manages the components of the logging stack. - -The process for deploying the {logging} to {product-title} -ifdef::openshift-rosa[] -(ROSA) -endif::[] - involves: - -* Reviewing the xref:../logging/config/cluster-logging-storage-considerations#cluster-logging-storage[{logging-uc} storage considerations]. - -* Installing the logging subsystem for {product-title} using xref:../logging/cluster-logging-deploying.adoc#cluster-logging-deploy-console_cluster-logging-deploying[the web console] or xref:../logging/cluster-logging-deploying.adoc#cluster-logging-deploy-cli_cluster-logging-deploying[the CLI]. - -// The following include statements pull in the module files that comprise -// the assembly. Include any combination of concept, procedure, or reference -// modules required to cover the user story. You can also include other -// assemblies. - -include::modules/cluster-logging-deploy-console.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -ifdef::openshift-enterprise,openshift-origin[] -* xref:../operators/admin/olm-adding-operators-to-cluster.adoc#olm-installing-operators-from-operatorhub_olm-adding-operators-to-a-cluster[Installing Operators from the OperatorHub] -* xref:../logging/config/cluster-logging-collector.adoc#cluster-logging-removing-unused-components-if-no-elasticsearch_cluster-logging-collector[Removing unused components if you do not use the default Elasticsearch log store] -endif::[] -ifdef::openshift-rosa,openshift-dedicated[] -* link:https://docs.openshift.com/container-platform/latest/operators/admin/olm-adding-operators-to-cluster.html[Installing Operators from OperatorHub] -* link:https://docs.openshift.com/container-platform/latest/logging/config/cluster-logging-collector.html#cluster-logging-removing-unused-components-if-no-elasticsearch_cluster-logging-collector[Removing unused components if you do not use the default Elasticsearch log store] -endif::[] - -== Post-installation tasks - -If you plan to use Kibana, you must xref:#cluster-logging-visualizer-indices_cluster-logging-deploying[manually create your Kibana index patterns and visualizations] to explore and visualize data in Kibana. - -If your network plugin enforces network isolation, xref:#cluster-logging-deploy-multitenant_cluster-logging-deploying[allow network traffic between the projects that contain the {logging} Operators]. - - -include::modules/cluster-logging-deploy-cli.adoc[leveloffset=+1] - -== Post-installation tasks - -If you plan to use Kibana, you must xref:#cluster-logging-visualizer-indices_cluster-logging-deploying[manually create your Kibana index patterns and visualizations] to explore and visualize data in Kibana. - -If your network plugin enforces network isolation, xref:#cluster-logging-deploy-multitenant_cluster-logging-deploying[allow network traffic between the projects that contain the {logging} Operators]. - -include::modules/cluster-logging-visualizer-indices.adoc[leveloffset=+2] - -include::modules/cluster-logging-deploy-multitenant.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -ifdef::openshift-enterprise,openshift-origin[] -* xref:../networking/network_policy/about-network-policy.adoc[About network policy] -* xref:../networking/openshift_sdn/about-openshift-sdn.adoc[About the OpenShift SDN default CNI network provider] -* xref:../networking/ovn_kubernetes_network_provider/about-ovn-kubernetes.adoc[About the OVN-Kubernetes default Container Network Interface (CNI) network provider] -endif::[] -ifdef::openshift-rosa,openshift-dedicated[] -* link:https://docs.openshift.com/container-platform/latest/networking/network_policy/about-network-policy.html[About network policy] -* link:https://docs.openshift.com/container-platform/latest/networking/openshift_sdn/about-openshift-sdn.html[About the OpenShift SDN default CNI network provider] -* link:https://docs.openshift.com/container-platform/latest/networking/ovn_kubernetes_network_provider/about-ovn-kubernetes.html[About the OVN-Kubernetes default Container Network Interface (CNI) network provider] -endif::[] - -// include::modules/cluster-logging-deploy-memory.adoc[leveloffset=+1] - -// include::modules/cluster-logging-deploy-certificates.adoc[leveloffset=+1] - -// include::modules/cluster-logging-deploy-label.adoc[leveloffset=+1] diff --git a/logging/cluster-logging-enabling-json-logging.adoc b/logging/cluster-logging-enabling-json-logging.adoc deleted file mode 100644 index daca7c1d0c28..000000000000 --- a/logging/cluster-logging-enabling-json-logging.adoc +++ /dev/null @@ -1,18 +0,0 @@ -:_content-type: ASSEMBLY -:context: cluster-logging-enabling-json-logging -[id="cluster-logging-enabling-json-logging"] -= Enabling JSON logging -include::_attributes/common-attributes.adoc[] - -toc::[] - -You can configure the Log Forwarding API to parse JSON strings into a structured object. - -include::modules/cluster-logging-json-log-forwarding.adoc[leveloffset=+1] -include::modules/cluster-logging-configuration-of-json-log-data-for-default-elasticsearch.adoc[leveloffset=+1] -include::modules/cluster-logging-forwarding-json-logs-to-the-default-elasticsearch.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../logging/cluster-logging-external.adoc#cluster-logging-external[Forwarding logs to third-party systems] diff --git a/logging/cluster-logging-eventrouter.adoc b/logging/cluster-logging-eventrouter.adoc deleted file mode 100644 index c0397eee4000..000000000000 --- a/logging/cluster-logging-eventrouter.adoc +++ /dev/null @@ -1,23 +0,0 @@ -:_content-type: ASSEMBLY -:context: cluster-logging-eventrouter -[id="cluster-logging-eventrouter"] -= Collecting and storing Kubernetes events -include::_attributes/common-attributes.adoc[] - -toc::[] - -The {product-title} Event Router is a pod that watches Kubernetes events and logs them for collection by the {logging}. You must manually deploy the Event Router. - -The Event Router collects events from all projects and writes them to `STDOUT`. The collector then forwards those events to the store defined in the `ClusterLogForwarder` custom resource (CR). - -[IMPORTANT] -==== -The Event Router adds additional load to Fluentd and can impact the number of other log messages that can be processed. -==== - -// The following include statements pull in the module files that comprise -// the assembly. Include any combination of concept, procedure, or reference -// modules required to cover the user story. You can also include other -// assemblies. - -include::modules/cluster-logging-eventrouter-deploy.adoc[leveloffset=+1] diff --git a/logging/cluster-logging-exported-fields.adoc b/logging/cluster-logging-exported-fields.adoc deleted file mode 100644 index 1a92d4622819..000000000000 --- a/logging/cluster-logging-exported-fields.adoc +++ /dev/null @@ -1,20 +0,0 @@ -:_content-type: ASSEMBLY -[id="cluster-logging-exported-fields"] -= Log Record Fields -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] -:context: cluster-logging-exported-fields - -toc::[] - -The following fields can be present in log records exported by the {logging}. Although log records are typically formatted as JSON objects, the same data model can be applied to other encodings. - -To search these fields from Elasticsearch and Kibana, use the full dotted field name when searching. For example, with an Elasticsearch */_search URL*, to look for a Kubernetes pod name, use `/_search/q=kubernetes.pod_name:name-of-my-pod`. - -// The logging system can parse JSON-formatted log entries to external systems. These log entries are formatted as a fluentd message with extra fields such as `kubernetes`. The fields exported by the logging system and available for searching from Elasticsearch and Kibana are documented at the end of this document. - -include::modules/cluster-logging-exported-fields-top-level-fields.adoc[leveloffset=0] - -include::modules/cluster-logging-exported-fields-kubernetes.adoc[leveloffset=0] - -// add modules/cluster-logging-exported-fields-openshift when available diff --git a/logging/cluster-logging-external.adoc b/logging/cluster-logging-external.adoc deleted file mode 100644 index 465c610fa00d..000000000000 --- a/logging/cluster-logging-external.adoc +++ /dev/null @@ -1,211 +0,0 @@ -:_content-type: ASSEMBLY -:context: cluster-logging-external -[id="cluster-logging-external"] -= Forwarding logs to external third-party logging systems -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] - -toc::[] - -By default, the {logging} sends container and infrastructure logs to the default internal log store defined in the `ClusterLogging` custom resource. However, it does not send audit logs to the internal store because it does not provide secure storage. If this default configuration meets your needs, you do not need to configure the Cluster Log Forwarder. - -To send logs to other log aggregators, you use the {product-title} Cluster Log Forwarder. This API enables you to send container, infrastructure, and audit logs to specific endpoints within or outside your cluster. In addition, you can send different types of logs to various systems so that various individuals can access each type. You can also enable Transport Layer Security (TLS) support to send logs securely, as required by your organization. - -[NOTE] -==== -To send audit logs to the default internal Elasticsearch log store, use the Cluster Log Forwarder as described in xref:../logging/config/cluster-logging-log-store.adoc#cluster-logging-elasticsearch-audit_cluster-logging-store[Forward audit logs to the log store]. -==== - -When you forward logs externally, the {logging} creates or modifies a Fluentd config map to send logs using your desired protocols. You are responsible for configuring the protocol on the external log aggregator. - -[IMPORTANT] -==== -You cannot use the config map methods and the Cluster Log Forwarder in the same cluster. -==== - -// The following include statements pull in the module files that comprise -// the assembly. Include any combination of concept, procedure, or reference -// modules required to cover the user story. You can also include other -// assemblies. - -include::modules/cluster-logging-collector-log-forwarding-about.adoc[leveloffset=+1] - -include::modules/cluster-logging-forwarding-separate-indices.adoc[leveloffset=+1] - -include::modules/cluster-logging-collector-log-forwarding-supported-plugins-5-1.adoc[leveloffset=+1] - -include::modules/cluster-logging-collector-log-forwarding-supported-plugins-5-2.adoc[leveloffset=+1] - -include::modules/cluster-logging-collector-log-forwarding-supported-plugins-5-3.adoc[leveloffset=+1] - -include::modules/cluster-logging-collector-log-forwarding-supported-plugins-5-4.adoc[leveloffset=+1] - -include::modules/cluster-logging-collector-log-forwarding-supported-plugins-5-5.adoc[leveloffset=+1] - -include::modules/cluster-logging-collector-log-forwarding-supported-plugins-5-6.adoc[leveloffset=+1] - -include::modules/cluster-logging-collector-log-forward-es.adoc[leveloffset=+1] - -include::modules/cluster-logging-collector-log-forward-fluentd.adoc[leveloffset=+1] - -include::modules/cluster-logging-collector-log-forward-syslog.adoc[leveloffset=+1] - -include::modules/cluster-logging-collector-log-forward-cloudwatch.adoc[leveloffset=+1] - -[id="cluster-logging-collector-log-forward-sts-cloudwatch_{context}"] -=== Forwarding logs to Amazon CloudWatch from STS enabled clusters - -For clusters with AWS Security Token Service (STS) enabled, you can create an AWS service account manually or create a credentials request by using the -ifdef::openshift-enterprise,openshift-origin[] -xref:../authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.adoc[Cloud Credential Operator(CCO)] -endif::[] -ifdef::openshift-rosa,openshift-dedicated[] -link:https://docs.openshift.com/container-platform/latest/authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.html[Cloud Credential Operator(CCO)] -endif::[] - utility `ccoctl`. - -[NOTE] -==== -This feature is not supported by the vector collector. -==== - -.Prerequisites - -* {logging-title-uc}: 5.5 and later - - -.Procedure -. Create a `CredentialsRequest` custom resource YAML by using the template below: -+ -.CloudWatch credentials request template -[source,yaml] ----- -apiVersion: cloudcredential.openshift.io/v1 -kind: CredentialsRequest -metadata: - name: -credrequest - namespace: openshift-cloud-credential-operator -spec: - providerSpec: - apiVersion: cloudcredential.openshift.io/v1 - kind: AWSProviderSpec - statementEntries: - - action: - - logs:PutLogEvents - - logs:CreateLogGroup - - logs:PutRetentionPolicy - - logs:CreateLogStream - - logs:DescribeLogGroups - - logs:DescribeLogStreams - effect: Allow - resource: arn:aws:logs:*:*:* - secretRef: - name: - namespace: openshift-logging - serviceAccountNames: - - logcollector ----- -+ -. Use the `ccoctl` command to create a role for AWS using your `CredentialsRequest` CR. With the `CredentialsRequest` object, this `ccoctl` command creates an IAM role with a trust policy that is tied to the specified OIDC identity provider, and a permissions policy that grants permissions to perform operations on CloudWatch resources. This command also creates a YAML configuration file in `//manifests/openshift-logging--credentials.yaml`. This secret file contains the `role_arn` key/value used during authentication with the AWS IAM identity provider. -+ -[source,terminal] ----- -$ ccoctl aws create-iam-roles \ ---name= \ ---region= \ ---credentials-requests-dir=/credrequests \ ---identity-provider-arn=arn:aws:iam:::oidc-provider/-oidc.s3..amazonaws.com <1> ----- -<1> is the name used to tag your cloud resources and should match the name used during your STS cluster install -+ -. Apply the secret created: -[source,terminal] -+ ----- -$ oc apply -f output/manifests/openshift-logging--credentials.yaml ----- -+ -. Create or edit a `ClusterLogForwarder` custom resource: -+ -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: ClusterLogForwarder -metadata: - name: instance <1> - namespace: openshift-logging <2> -spec: - outputs: - - name: cw <3> - type: cloudwatch <4> - cloudwatch: - groupBy: logType <5> - groupPrefix: <6> - region: us-east-2 <7> - secret: - name: <8> - pipelines: - - name: to-cloudwatch <9> - inputRefs: <10> - - infrastructure - - audit - - application - outputRefs: - - cw <11> ----- -<1> The name of the `ClusterLogForwarder` CR must be `instance`. -<2> The namespace for the `ClusterLogForwarder` CR must be `openshift-logging`. -<3> Specify a name for the output. -<4> Specify the `cloudwatch` type. -<5> Optional: Specify how to group the logs: -+ -* `logType` creates log groups for each log type -* `namespaceName` creates a log group for each application name space. Infrastructure and audit logs are unaffected, remaining grouped by `logType`. -* `namespaceUUID` creates a new log groups for each application namespace UUID. It also creates separate log groups for infrastructure and audit logs. -<6> Optional: Specify a string to replace the default `infrastructureName` prefix in the names of the log groups. -<7> Specify the AWS region. -<8> Specify the name of the secret that contains your AWS credentials. -<9> Optional: Specify a name for the pipeline. -<10> Specify which log types to forward by using the pipeline: `application,` `infrastructure`, or `audit`. -<11> Specify the name of the output to use when forwarding logs with this pipeline. - -[role="_additional-resources"] -.Additional resources -* link:https://docs.aws.amazon.com/STS/latest/APIReference/welcome.html[AWS STS API Reference] - -include::modules/cluster-logging-collector-log-forward-secret-cloudwatch.adoc[leveloffset=+2] - -include::modules/cluster-logging-collector-log-forward-loki.adoc[leveloffset=+1] - -include::modules/cluster-logging-troubleshooting-loki-entry-out-of-order-errors.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../logging/cluster-logging-exported-fields.adoc#cluster-logging-exported-fields-kubernetes_cluster-logging-exported-fields[Log Record Fields]. - -* link:https://grafana.com/docs/loki/latest/configuration/[Configuring Loki server] - -ifndef::openshift-rosa[] -include::modules/cluster-logging-collector-log-forward-gcp.adoc[leveloffset=+1] -endif::openshift-rosa[] - -include::modules/logging-forward-splunk.adoc[leveloffset=+1] - -include::modules/logging-http-forward.adoc[leveloffset=+1] - -include::modules/cluster-logging-collector-log-forward-project.adoc[leveloffset=+1] - -include::modules/cluster-logging-collector-log-forward-logs-from-application-pods.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -ifdef::openshift-enterprise,openshift-origin[] -* xref:../networking/ovn_kubernetes_network_provider/logging-network-policy.adoc#logging-network-policy[Logging for egress firewall and network policy rules] -endif::[] -ifdef::openshift-rosa,openshift-dedicated[] -* link:https://docs.openshift.com/container-platform/latest/networking/ovn_kubernetes_network_provider/logging-network-policy.html#logging-network-policy[Logging for egress firewall and network policy rules] -endif::[] - -include::modules/cluster-logging-troubleshooting-log-forwarding.adoc[leveloffset=+1] diff --git a/logging/cluster-logging-loki.adoc b/logging/cluster-logging-loki.adoc deleted file mode 100644 index 7fc2561b7074..000000000000 --- a/logging/cluster-logging-loki.adoc +++ /dev/null @@ -1,23 +0,0 @@ -:_content-type: ASSEMBLY -:context: cluster-logging-loki -[id="cluster-logging-loki"] -= Loki -include::_attributes/common-attributes.adoc[] - -toc::[] - -include::modules/cluster-logging-loki-about.adoc[leveloffset=+1] - -include::modules/cluster-logging-loki-deploy.adoc[leveloffset=+1] - -include::modules/logging-loki-retention.adoc[leveloffset=+1] - -include::modules/cluster-logging-forwarding-lokistack.adoc[leveloffset=+1] - -include::modules/cluster-logging-troubleshooting-loki-entry-out-of-order-errors.adoc[leveloffset=+2] - -== Additional Resources -* link:https://grafana.com/docs/loki/latest/logql/[Loki Query Language (LogQL) Documentation] -* link:https://loki-operator.dev/docs/howto_connect_grafana.md/[Grafana Dashboard Documentation] -* link:https://loki-operator.dev/docs/object_storage.md/[Loki Object Storage Documentation] -* link:https://grafana.com/docs/loki/latest/operations/storage/schema/#changing-the-schema[Loki Storage Schema Documentation] diff --git a/logging/cluster-logging-release-notes.adoc b/logging/cluster-logging-release-notes.adoc deleted file mode 100644 index edcebf2448c5..000000000000 --- a/logging/cluster-logging-release-notes.adoc +++ /dev/null @@ -1,1082 +0,0 @@ -:_content-type: ASSEMBLY -[id="cluster-logging-release-notes"] -include::_attributes/common-attributes.adoc[] -= Release notes for Logging - -:context: cluster-logging-release-notes-v5x - -toc::[] - -include::snippets/logging-compatibility-snip.adoc[] - -include::snippets/logging-stable-updates-snip.adoc[] - -include::modules/logging-rn-5.7.2.adoc[leveloffset=+1] - -include::modules/logging-rn-5.7.1.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.7.0.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.6.5.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.6.4.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.6.3.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.6.2.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.6.1.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.6.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.5.10.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.5.9.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.5.8.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.5.7.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.5.6.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.5.5.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.5.4.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.5.3.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.5.2.adoc[leveloffset=+1] - -[id="cluster-logging-release-notes-5-5-1"] -== Logging 5.5.1 -This release includes link:https://access.redhat.com/errata/RHSA-2022:6344[OpenShift Logging Bug Fix Release 5.5.1]. - -[id="openshift-logging-5-5-1-enhancements_{context}"] -=== Enhancements -* This enhancement adds an *Aggregated Logs* tab to the *Pod Details* page of the {product-title} web console when the Logging Console Plugin is in use. This enhancement is only available on {product-title} 4.10 and later. (link:https://issues.redhat.com/browse/LOG-2647[LOG-2647]) - -* This enhancement adds Google Cloud Logging as an output option for log forwarding. (link:https://issues.redhat.com/browse/LOG-1482[LOG-1482]) -//xref:cluster-logging-collector-log-forward-gcp.adoc - -[id="openshift-logging-5-5-1-bug-fixes_{context}"] -=== Bug fixes -* Before this update, the Operator did not ensure that the pod was ready, which caused the cluster to reach an inoperable state during a cluster restart. With this update, the Operator marks new pods as ready before continuing to a new pod during a restart, which resolves the issue. (link:https://issues.redhat.com/browse/LOG-2745[LOG-2745]) - -* Before this update, Fluentd would sometimes not recognize that the Kubernetes platform rotated the log file and would no longer read log messages. This update corrects that by setting the configuration parameter suggested by the upstream development team. (link:https://issues.redhat.com/browse/LOG-2995[LOG-2995]) - -* Before this update, the addition of multi-line error detection caused internal routing to change and forward records to the wrong destination. With this update, the internal routing is correct. (link:https://issues.redhat.com/browse/LOG-2801[LOG-2801]) - -* Before this update, changing the {product-title} web console's refresh interval created an error when the *Query* field was empty. With this update, changing the interval is not an available option when the *Query* field is empty. (link:https://issues.redhat.com/browse/LOG-2917[LOG-2917]) - -[id="openshift-logging-5-5-1-cves_{context}"] -=== CVEs -* link:https://access.redhat.com/security/cve/CVE-2022-1705[CVE-2022-1705] -* link:https://access.redhat.com/security/cve/CVE-2022-2526[CVE-2022-2526] -* link:https://access.redhat.com/security/cve/CVE-2022-29154[CVE-2022-29154] -* link:https://access.redhat.com/security/cve/CVE-2022-30631[CVE-2022-30631] -* link:https://access.redhat.com/security/cve/CVE-2022-32148[CVE-2022-32148] -* link:https://access.redhat.com/security/cve/CVE-2022-32206[CVE-2022-32206] -* link:https://access.redhat.com/security/cve/CVE-2022-32208[CVE-2022-32208] - -include::modules/cluster-logging-rn-5.5.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.4.14.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.4.13.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.4.12.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.4.11.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.4.10.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.4.9.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.4.8.adoc[leveloffset=+1] - -//include::modules/cluster-logging-rn-5.4.7.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.4.6.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.4.5.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.4.4.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.4.3.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.4.2.adoc[leveloffset=+1] - -[id="cluster-logging-release-notes-5-4-1_{context}"] -== Logging 5.4.1 -This release includes link:https://access.redhat.com/errata/RHSA-2022:2216[RHSA-2022:2216-OpenShift Logging Bug Fix Release 5.4.1]. - -[id="openshift-logging-5-4-1-bug-fixes_{context}"] -=== Bug fixes -* Before this update, the log file metric exporter only reported logs created while the exporter was running, which resulted in inaccurate log growth data. This update resolves this issue by monitoring `/var/log/pods`. (https://issues.redhat.com/browse/LOG-2442[LOG-2442]) - -* Before this update, the collector would be blocked because it continually tried to use a stale connection when forwarding logs to fluentd forward receivers. With this release, the `keepalive_timeout` value has been set to 30 seconds (`30s`) so that the collector recycles the connection and re-attempts to send failed messages within a reasonable amount of time. (https://issues.redhat.com/browse/LOG-2534[LOG-2534]) - -* Before this update, an error in the gateway component enforcing tenancy for reading logs limited access to logs with a Kubernetes namespace causing "audit" and some "infrastructure" logs to be unreadable. With this update, the proxy correctly detects users with admin access and allows access to logs without a namespace. (https://issues.redhat.com/browse/LOG-2448[LOG-2448]) - -* Before this update, the `system:serviceaccount:openshift-monitoring:prometheus-k8s` service account had cluster level privileges as a `clusterrole` and `clusterrolebinding`. This update restricts the service account` to the `openshift-logging` namespace with a role and rolebinding. (https://issues.redhat.com/browse/LOG-2437[LOG-2437]) - -* Before this update, Linux audit log time parsing relied on an ordinal position of a key/value pair. This update changes the parsing to use a regular expression to find the time entry. (https://issues.redhat.com/browse/LOG-2321[LOG-2321]) - - -[id="openshift-logging-5-4-1-CVEs_{context}"] -=== CVEs -.Click to expand CVEs -[%collapsible] -==== -* https://access.redhat.com/security/cve/CVE-2018-25032[CVE-2018-25032] -* https://access.redhat.com/security/cve/CVE-2021-4028[CVE-2021-4028] -* https://access.redhat.com/security/cve/CVE-2021-37136[CVE-2021-37136] -* https://access.redhat.com/security/cve/CVE-2021-37137[CVE-2021-37137] -* https://access.redhat.com/security/cve/CVE-2021-43797[CVE-2021-43797] -* https://access.redhat.com/security/cve/CVE-2022-0778[CVE-2022-0778] -* https://access.redhat.com/security/cve/CVE-2022-1154[CVE-2022-1154] -* https://access.redhat.com/security/cve/CVE-2022-1271[CVE-2022-1271] -* https://access.redhat.com/security/cve/CVE-2022-21426[CVE-2022-21426] -* https://access.redhat.com/security/cve/CVE-2022-21434[CVE-2022-21434] -* https://access.redhat.com/security/cve/CVE-2022-21443[CVE-2022-21443] -* https://access.redhat.com/security/cve/CVE-2022-21476[CVE-2022-21476] -* https://access.redhat.com/security/cve/CVE-2022-21496[CVE-2022-21496] -* https://access.redhat.com/security/cve/CVE-2022-21698[CVE-2022-21698] -* https://access.redhat.com/security/cve/CVE-2022-25636[CVE-2022-25636] -==== - - -[id="cluster-logging-release-notes-5-4-0_{context}"] -== Logging 5.4 -The following advisories are available for logging 5.4: -link:https://access.redhat.com/errata/RHSA-2022:1461[{logging-title-uc} Release 5.4] - -[id="openshift-logging-5-4-0-tech-prev_{context}"] -=== Technology Previews - -include::modules/cluster-logging-vector-tech-preview.adoc[leveloffset=+2] -include::modules/cluster-logging-loki-tech-preview.adoc[leveloffset=+2] - -[id="openshift-logging-5-4-0-bug-fixes_{context}"] -=== Bug fixes - -* Before this update, the `cluster-logging-operator` used cluster scoped roles and bindings to establish permissions for the Prometheus service account to scrape metrics. These permissions were created when deploying the Operator using the console interface but were missing when deploying from the command line. This update fixes the issue by making the roles and bindings namespace-scoped. (link:https://issues.redhat.com/browse/LOG-2286[LOG-2286]) - -* Before this update, a prior change to fix dashboard reconciliation introduced a `ownerReferences` field to the resource across namespaces. As a result, both the config map and dashboard were not created in the namespace. With this update, the removal of the `ownerReferences` field resolves the issue, and the OpenShift Logging dashboard is available in the console. (link:https://issues.redhat.com/browse/LOG-2163[LOG-2163]) - -* Before this update, changes to the metrics dashboards did not deploy because the `cluster-logging-operator` did not correctly compare existing and modified config maps that contain the dashboard. With this update, the addition of a unique hash value to object labels resolves the issue. (link:https://issues.redhat.com/browse/LOG-2071[LOG-2071]) - -* Before this update, the OpenShift Logging dashboard did not correctly display the pods and namespaces in the table, which displays the top producing containers collected over the last 24 hours. With this update, the pods and namespaces are displayed correctly. (link:https://issues.redhat.com/browse/LOG-2069[LOG-2069]) - -* Before this update, when the `ClusterLogForwarder` was set up with `Elasticsearch OutputDefault` and Elasticsearch outputs did not have structured keys, the generated configuration contained the incorrect values for authentication. This update corrects the secret and certificates used. (link:https://issues.redhat.com/browse/LOG-2056[LOG-2056]) - -* Before this update, the OpenShift Logging dashboard displayed an empty CPU graph because of a reference to an invalid metric. With this update, the correct data point has been selected, resolving the issue. (link:https://issues.redhat.com/browse/LOG-2026[LOG-2026]) - -* Before this update, the Fluentd container image included builder tools that were unnecessary at run time. This update removes those tools from the image.(link:https://issues.redhat.com/browse/LOG-1927[LOG-1927]) - -* Before this update, a name change of the deployed collector in the 5.3 release caused the logging collector to generate the `FluentdNodeDown` alert. This update resolves the issue by fixing the job name for the Prometheus alert. (link:https://issues.redhat.com/browse/LOG-1918[LOG-1918]) - -* Before this update, the log collector was collecting its own logs due to a refactoring of the component name change. This lead to a potential feedback loop of the collector processing its own log that might result in memory and log message size issues. This update resolves the issue by excluding the collector logs from the collection. (link:https://issues.redhat.com/browse/LOG-1774[LOG-1774]) - -* Before this update, Elasticsearch generated the error `Unable to create PersistentVolumeClaim due to forbidden: exceeded quota: infra-storage-quota.` if the PVC already existed. With this update, Elasticsearch checks for existing PVCs, resolving the issue. (link:https://issues.redhat.com/browse/LOG-2131[LOG-2131]) - -* Before this update, Elasticsearch was unable to return to the ready state when the `elasticsearch-signing` secret was removed. With this update, Elasticsearch is able to go back to the ready state after that secret is removed. (link:https://issues.redhat.com/browse/LOG-2171[LOG-2171]) - -* Before this update, the change of the path from which the collector reads container logs caused the collector to forward some records to the wrong indices. With this update, the collector now uses the correct configuration to resolve the issue. (link:https://issues.redhat.com/browse/LOG-2160[LOG-2160]) - -* Before this update, clusters with a large number of namespaces caused Elasticsearch to stop serving requests because the list of namespaces reached the maximum header size limit. With this update, headers only include a list of namespace names, resolving the issue. (link:https://issues.redhat.com/browse/LOG-1899[LOG-1899]) - -* Before this update, the *{product-title} Logging* dashboard showed the number of shards 'x' times larger than the actual value when Elasticsearch had 'x' nodes. This issue occurred because it was printing all primary shards for each Elasticsearch pod and calculating a sum on it, although the output was always for the whole Elasticsearch cluster. With this update, the number of shards is now correctly calculated. (link:https://issues.redhat.com/browse/LOG-2156[LOG-2156]) - -* Before this update, the secrets `kibana` and `kibana-proxy` were not recreated if they were deleted manually. With this update, the `elasticsearch-operator` will watch the resources and automatically recreate them if deleted. (link:https://issues.redhat.com/browse/LOG-2250[LOG-2250]) - -* Before this update, tuning the buffer chunk size could cause the collector to generate a warning about the chunk size exceeding the byte limit for the event stream. With this update, you can also tune the read line limit, resolving the issue. (link:https://issues.redhat.com/browse/LOG-2379[LOG-2379]) - -* Before this update, the logging console link in OpenShift web console was not removed with the ClusterLogging CR. With this update, deleting the CR or uninstalling the Cluster Logging Operator removes the link. (link:https://issues.redhat.com/browse/LOG-2373[LOG-2373]) - -* Before this update, a change to the container logs path caused the collection metric to always be zero with older releases configured with the original path. With this update, the plugin which exposes metrics about collected logs supports reading from either path to resolve the issue. (link:https://issues.redhat.com/browse/LOG-2462[LOG-2462]) - -=== CVEs -[id="openshift-logging-5-4-0-CVEs_{context}"] -* link:https://access.redhat.com/security/cve/CVE-2022-0759[CVE-2022-0759] -** link:https://bugzilla.redhat.com/show_bug.cgi?id=2058404[BZ-2058404] -* link:https://access.redhat.com/security/cve/CVE-2022-21698[CVE-2022-21698] -** link:https://bugzilla.redhat.com/show_bug.cgi?id=2045880[BZ-2045880] - -include::modules/cluster-logging-rn-5.3.14.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.3.13.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.3.12.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.3.11.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.3.10.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.3.9.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.3.8.adoc[leveloffset=+1] - -[id="cluster-logging-release-notes-5-3-7_{context}"] -== OpenShift Logging 5.3.7 -This release includes link:https://access.redhat.com/errata/RHSA-2022:2217[RHSA-2022:2217 OpenShift Logging Bug Fix Release 5.3.7] - -[id="openshift-logging-5-3-7-bug-fixes_{context}"] -=== Bug fixes -* Before this update, Linux audit log time parsing relied on an ordinal position of key/value pair. This update changes the parsing to utilize a regex to find the time entry. (https://issues.redhat.com/browse/LOG-2322[LOG-2322]) - -* Before this update, some log forwarder outputs could re-order logs with the same time-stamp. With this update, a sequence number has been added to the log record to order entries that have matching timestamps. (https://issues.redhat.com/browse/LOG-2334[LOG-2334]) - -* Before this update, clusters with a large number of namespaces caused Elasticsearch to stop serving requests because the list of namespaces reached the maximum header size limit. With this update, headers only include a list of namespace names, resolving the issue. (https://issues.redhat.com/browse/LOG-2450[LOG-2450]) - -* Before this update, `system:serviceaccount:openshift-monitoring:prometheus-k8s` had cluster level privileges as a `clusterrole` and `clusterrolebinding`. This update restricts the `serviceaccount` to the `openshift-logging` namespace with a role and rolebinding. (https://issues.redhat.com/browse/LOG-2481[LOG-2481)]) - -=== CVEs -[id="openshift-logging-5-3-7-CVEs_{context}"] -.Click to expand CVEs -[%collapsible] -==== -* https://access.redhat.com/security/cve/CVE-2018-25032[CVE-2018-25032] -* https://access.redhat.com/security/cve/CVE-2021-4028[CVE-2021-4028] -* https://access.redhat.com/security/cve/CVE-2021-37136[CVE-2021-37136] -* https://access.redhat.com/security/cve/CVE-2021-37137[CVE-2021-37137] -* https://access.redhat.com/security/cve/CVE-2021-43797[CVE-2021-43797] -* https://access.redhat.com/security/cve/CVE-2022-0759[CVE-2022-0759] -* https://access.redhat.com/security/cve/CVE-2022-0778[CVE-2022-0778] -* https://access.redhat.com/security/cve/CVE-2022-1154[CVE-2022-1154] -* https://access.redhat.com/security/cve/CVE-2022-1271[CVE-2022-1271] -* https://access.redhat.com/security/cve/CVE-2022-21426[CVE-2022-21426] -* https://access.redhat.com/security/cve/CVE-2022-21434[CVE-2022-21434] -* https://access.redhat.com/security/cve/CVE-2022-21443[CVE-2022-21443] -* https://access.redhat.com/security/cve/CVE-2022-21476[CVE-2022-21476] -* https://access.redhat.com/security/cve/CVE-2022-21496[CVE-2022-21496] -* https://access.redhat.com/security/cve/CVE-2022-21698[CVE-2022-21698] -* https://access.redhat.com/security/cve/CVE-2022-25636[CVE-2022-25636] -==== - -[id="cluster-logging-release-notes-5-3-6_{context}"] -== OpenShift Logging 5.3.6 -This release includes link:https://access.redhat.com/errata/RHBA-2022:1377[RHBA-2022:1377 OpenShift Logging Bug Fix Release 5.3.6] - -[id="openshift-logging-5-3-6-bug-fixes_{context}"] -=== Bug fixes -* Before this update, defining a toleration with no key and the existing Operator caused the Operator to be unable to complete an upgrade. With this update, this toleration no longer blocks the upgrade from completing. (link:https://issues.redhat.com/browse/LOG-2126[LOG-2126]) - -* Before this change, it was possible for the collector to generate a warning where the chunk byte limit was exceeding an emitted event. With this change, you can tune the readline limit to resolve the issue as advised by the upstream documentation. (link:https://issues.redhat.com/browse/LOG-2380[LOG-2380]) - -[id="cluster-logging-release-notes-5-3-5_{context}"] -== OpenShift Logging 5.3.5 -[role="_abstract"] -This release includes link:https://access.redhat.com/errata/RHSA-2022:0721[RHSA-2022:0721 OpenShift Logging Bug Fix Release 5.3.5] - -[id="openshift-logging-5-3-5-bug-fixes_{context}"] -=== Bug fixes -* Before this update, if you removed OpenShift Logging from {product-title}, the web console continued displaying a link to the *Logging* page. With this update, removing or uninstalling OpenShift Logging also removes that link. (link:https://issues.redhat.com/browse/LOG-2182[LOG-2182]) - -=== CVEs -[id="openshift-logging-5-3-5-CVEs_{context}"] -.Click to expand CVEs -[%collapsible] -==== -* link:https://access.redhat.com/security/cve/CVE-2020-28491[CVE-2020-28491] -* link:https://access.redhat.com/security/cve/CVE-2021-3521[CVE-2021-3521] -* link:https://access.redhat.com/security/cve/CVE-2021-3872[CVE-2021-3872] -* link:https://access.redhat.com/security/cve/CVE-2021-3984[CVE-2021-3984] -* link:https://access.redhat.com/security/cve/CVE-2021-4019[CVE-2021-4019] -* link:https://access.redhat.com/security/cve/CVE-2021-4122[CVE-2021-4122] -* link:https://access.redhat.com/security/cve/CVE-2021-4192[CVE-2021-4192] -* link:https://access.redhat.com/security/cve/CVE-2021-4193[CVE-2021-4193] -* link:https://access.redhat.com/security/cve/CVE-2022-0552[CVE-2022-0552] -==== - -[id="cluster-logging-release-notes-5-3-4_{context}"] -== OpenShift Logging 5.3.4 -[role="_abstract"] -This release includes link:https://access.redhat.com/errata/RHBA-2022:0411[RHBA-2022:0411 OpenShift Logging Bug Fix Release 5.3.4] - -[id="openshift-logging-5-3-4-bug-fixes_{context}"] -=== Bug fixes -* Before this update, changes to the metrics dashboards had not yet been deployed because the `cluster-logging-operator` did not correctly compare existing and desired config maps that contained the dashboard. This update fixes the logic by adding a unique hash value to the object labels. (link:https://issues.redhat.com/browse/LOG-2066[LOG-2066]) - -* Before this update, Elasticsearch pods failed to start after updating with FIPS enabled. With this update, Elasticsearch pods start successfully. (link:https://issues.redhat.com/browse/LOG-1974[LOG-1974]) - -* Before this update, elasticsearch generated the error "Unable to create PersistentVolumeClaim due to forbidden: exceeded quota: infra-storage-quota." if the PVC already existed. With this update, elasticsearch checks for existing PVCs, resolving the issue. (link:https://issues.redhat.com/browse/LOG-2127[LOG-2127]) - -=== CVEs -[id="openshift-logging-5-3-4-CVEs_{context}"] -.Click to expand CVEs -[%collapsible] -==== -* link:https://access.redhat.com/security/cve/CVE-2021-3521[CVE-2021-3521] -* link:https://access.redhat.com/security/cve/CVE-2021-3872[CVE-2021-3872] -* link:https://access.redhat.com/security/cve/CVE-2021-3984[CVE-2021-3984] -* link:https://access.redhat.com/security/cve/CVE-2021-4019[CVE-2021-4019] -* link:https://access.redhat.com/security/cve/CVE-2021-4122[CVE-2021-4122] -* link:https://access.redhat.com/security/cve/CVE-2021-4155[CVE-2021-4155] -* link:https://access.redhat.com/security/cve/CVE-2021-4192[CVE-2021-4192] -* link:https://access.redhat.com/security/cve/CVE-2021-4193[CVE-2021-4193] -* link:https://access.redhat.com/security/cve/CVE-2022-0185[CVE-2022-0185] -* link:https://access.redhat.com/security/cve/CVE-2022-21248[CVE-2022-21248] -* link:https://access.redhat.com/security/cve/CVE-2022-21277[CVE-2022-21277] -* link:https://access.redhat.com/security/cve/CVE-2022-21282[CVE-2022-21282] -* link:https://access.redhat.com/security/cve/CVE-2022-21283[CVE-2022-21283] -* link:https://access.redhat.com/security/cve/CVE-2022-21291[CVE-2022-21291] -* link:https://access.redhat.com/security/cve/CVE-2022-21293[CVE-2022-21293] -* link:https://access.redhat.com/security/cve/CVE-2022-21294[CVE-2022-21294] -* link:https://access.redhat.com/security/cve/CVE-2022-21296[CVE-2022-21296] -* link:https://access.redhat.com/security/cve/CVE-2022-21299[CVE-2022-21299] -* link:https://access.redhat.com/security/cve/CVE-2022-21305[CVE-2022-21305] -* link:https://access.redhat.com/security/cve/CVE-2022-21340[CVE-2022-21340] -* link:https://access.redhat.com/security/cve/CVE-2022-21341[CVE-2022-21341] -* link:https://access.redhat.com/security/cve/CVE-2022-21360[CVE-2022-21360] -* link:https://access.redhat.com/security/cve/CVE-2022-21365[CVE-2022-21365] -* link:https://access.redhat.com/security/cve/CVE-2022-21366[CVE-2022-21366] -==== - -[id="cluster-logging-release-notes-5-3-3_{context}"] -== OpenShift Logging 5.3.3 -This release includes link:https://access.redhat.com/errata/RHSA-2022:0227[RHSA-2022:0227 OpenShift Logging Bug Fix Release 5.3.3] - -[id="openshift-logging-5-3-3-bug-fixes"] -=== Bug fixes -* Before this update, changes to the metrics dashboards had not yet been deployed because the cluster-logging-operator did not correctly compare existing and desired configmaps containing the dashboard. This update fixes the logic by adding a dashboard unique hash value to the object labels.(link:https://issues.redhat.com/browse/LOG-2066[LOG-2066]) - -* This update changes the log4j dependency to 2.17.1 to resolve link:https://access.redhat.com/security/cve/CVE-2021-44832[CVE-2021-44832].(link:https://issues.redhat.com/browse/LOG-2102[LOG-2102]) - -=== CVEs -[id="openshift-logging-5-3-3-CVEs_{context}"] -.Click to expand CVEs -[%collapsible] -==== -* link:https://access.redhat.com/security/cve/CVE-2021-27292[CVE-2021-27292] -** link:https://bugzilla.redhat.com/show_bug.cgi?id=1940613[BZ-1940613] -* link:https://access.redhat.com/security/cve/CVE-2021-44832[CVE-2021-44832] -** link:https://bugzilla.redhat.com/show_bug.cgi?id=2035951[BZ-2035951] -==== - -[id="cluster-logging-release-notes-5-3-2_{context}"] -== OpenShift Logging 5.3.2 -This release includes link:https://access.redhat.com/errata/RHSA-2022:0044[RHSA-2022:0044 OpenShift Logging Bug Fix Release 5.3.2] - -[id="openshift-logging-5-3-2-bug-fixes_{context}"] -=== Bug fixes -* Before this update, Elasticsearch rejected logs from the Event Router due to a parsing error. This update changes the data model to resolve the parsing error. However, as a result, previous indices might cause warnings or errors within Kibana. The `kubernetes.event.metadata.resourceVersion` field causes errors until existing indices are removed or reindexed. If this field is not used in Kibana, you can ignore the error messages. If you have a retention policy that deletes old indices, the policy eventually removes the old indices and stops the error messages. Otherwise, manually reindex to stop the error messages. (link:https://issues.redhat.com/browse/LOG-2087[LOG-2087]) - -* Before this update, the OpenShift Logging Dashboard displayed the wrong pod namespace in the table that displays top producing and collected containers over the last 24 hours. With this update, the OpenShift Logging Dashboard displays the correct pod namespace. (link:https://issues.redhat.com/browse/LOG-2051[LOG-2051]) - -* Before this update, if `outputDefaults.elasticsearch.structuredTypeKey` in the `ClusterLogForwarder` custom resource (CR) instance did not have a structured key, the CR replaced the output secret with the default secret used to communicate to the default log store. With this update, the defined output secret is correctly used. (link:https://issues.redhat.com/browse/LOG-2046[LOG-2046]) - -[id="openshift-logging-5-3-2-CVEs_{context}"] -=== CVEs -.Click to expand CVEs -[%collapsible] -==== -* https://access.redhat.com/security/cve/CVE-2020-36327[CVE-2020-36327] -** https://bugzilla.redhat.com/show_bug.cgi?id=1958999[BZ-1958999] -* https://access.redhat.com/security/cve/CVE-2021-45105[CVE-2021-45105] -** https://bugzilla.redhat.com/show_bug.cgi?id=2034067[BZ-2034067] -* https://access.redhat.com/security/cve/CVE-2021-3712[CVE-2021-3712] -* https://access.redhat.com/security/cve/CVE-2021-20321[CVE-2021-20321] -* https://access.redhat.com/security/cve/CVE-2021-42574[CVE-2021-42574] -==== - -[id="cluster-logging-release-notes-5-3-1_{context}"] -== OpenShift Logging 5.3.1 -This release includes link:https://access.redhat.com/errata/RHSA-2021:5129[RHSA-2021:5129 OpenShift Logging Bug Fix Release 5.3.1] - -[id="openshift-logging-5-3-1-bug-fixes_{context}"] -=== Bug fixes -* Before this update, the Fluentd container image included builder tools that were unnecessary at run time. This update removes those tools from the image. (link:https://issues.redhat.com/browse/LOG-1998[LOG-1998]) - -* Before this update, the Logging dashboard displayed an empty CPU graph because of a reference to an invalid metric. With this update, the Logging dashboard displays CPU graphs correctly. (link:https://issues.redhat.com/browse/LOG-1925[LOG-1925]) - -* Before this update, the Elasticsearch Prometheus exporter plugin compiled index-level metrics using a high-cost query that impacted the Elasticsearch node performance. This update implements a lower-cost query that improves performance. (link:https://issues.redhat.com/browse/LOG-1897[LOG-1897]) - - -[id="openshift-logging-5-3-1-CVEs_{context}"] -=== CVEs -.Click to expand CVEs -[%collapsible] -==== -* link:https://www.redhat.com/security/data/cve/CVE-2021-21409.html[CVE-2021-21409] -** link:https://bugzilla.redhat.com/show_bug.cgi?id=1944888[BZ-1944888] -* link:https://www.redhat.com/security/data/cve/CVE-2021-37136.html[CVE-2021-37136] -** link:https://bugzilla.redhat.com/show_bug.cgi?id=2004133[BZ-2004133] -* link:https://www.redhat.com/security/data/cve/CVE-2021-37137.html[CVE-2021-37137] -** link:https://bugzilla.redhat.com/show_bug.cgi?id=2004135[BZ-2004135] -* link:https://www.redhat.com/security/data/cve/CVE-2021-44228.html[CVE-2021-44228] -** link:https://bugzilla.redhat.com/show_bug.cgi?id=2030932[BZ-2030932] -* link:https://www.redhat.com/security/data/cve/CVE-2018-25009.html[CVE-2018-25009] -* link:https://www.redhat.com/security/data/cve/CVE-2018-25010.html[CVE-2018-25010] -* link:https://www.redhat.com/security/data/cve/CVE-2018-25012.html[CVE-2018-25012] -* link:https://www.redhat.com/security/data/cve/CVE-2018-25013.html[CVE-2018-25013] -* link:https://www.redhat.com/security/data/cve/CVE-2018-25014.html[CVE-2018-25014] -* link:https://www.redhat.com/security/data/cve/CVE-2019-5827.html[CVE-2019-5827] -* link:https://www.redhat.com/security/data/cve/CVE-2019-13750.html[CVE-2019-13750] -* link:https://www.redhat.com/security/data/cve/CVE-2019-13751.html[CVE-2019-13751] -* link:https://www.redhat.com/security/data/cve/CVE-2019-17594.html[CVE-2019-17594] -* link:https://www.redhat.com/security/data/cve/CVE-2019-17595.html[CVE-2019-17595] -* link:https://www.redhat.com/security/data/cve/CVE-2019-18218.html[CVE-2019-18218] -* link:https://www.redhat.com/security/data/cve/CVE-2019-19603.html[CVE-2019-19603] -* link:https://www.redhat.com/security/data/cve/CVE-2019-20838.html[CVE-2019-20838] -* link:https://www.redhat.com/security/data/cve/CVE-2020-12762.html[CVE-2020-12762] -* link:https://www.redhat.com/security/data/cve/CVE-2020-13435.html[CVE-2020-13435] -* link:https://www.redhat.com/security/data/cve/CVE-2020-14145.html[CVE-2020-14145] -* link:https://www.redhat.com/security/data/cve/CVE-2020-14155.html[CVE-2020-14155] -* link:https://www.redhat.com/security/data/cve/CVE-2020-16135.html[CVE-2020-16135] -* link:https://www.redhat.com/security/data/cve/CVE-2020-17541.html[CVE-2020-17541] -* link:https://www.redhat.com/security/data/cve/CVE-2020-24370.html[CVE-2020-24370] -* link:https://www.redhat.com/security/data/cve/CVE-2020-35521.html[CVE-2020-35521] -* link:https://www.redhat.com/security/data/cve/CVE-2020-35522.html[CVE-2020-35522] -* link:https://www.redhat.com/security/data/cve/CVE-2020-35523.html[CVE-2020-35523] -* link:https://www.redhat.com/security/data/cve/CVE-2020-35524.html[CVE-2020-35524] -* link:https://www.redhat.com/security/data/cve/CVE-2020-36330.html[CVE-2020-36330] -* link:https://www.redhat.com/security/data/cve/CVE-2020-36331.html[CVE-2020-36331] -* link:https://www.redhat.com/security/data/cve/CVE-2020-36332.html[CVE-2020-36332] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3200.html[CVE-2021-3200] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3426.html[CVE-2021-3426] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3445.html[CVE-2021-3445] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3481.html[CVE-2021-3481] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3572.html[CVE-2021-3572] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3580.html[CVE-2021-3580] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3712.html[CVE-2021-3712] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3800.html[CVE-2021-3800] -* link:https://www.redhat.com/security/data/cve/CVE-2021-20231.html[CVE-2021-20231] -* link:https://www.redhat.com/security/data/cve/CVE-2021-20232.html[CVE-2021-20232] -* link:https://www.redhat.com/security/data/cve/CVE-2021-20266.html[CVE-2021-20266] -* link:https://www.redhat.com/security/data/cve/CVE-2021-20317.html[CVE-2021-20317] -* link:https://www.redhat.com/security/data/cve/CVE-2021-22876.html[CVE-2021-22876] -* link:https://www.redhat.com/security/data/cve/CVE-2021-22898.html[CVE-2021-22898] -* link:https://www.redhat.com/security/data/cve/CVE-2021-22925.html[CVE-2021-22925] -* link:https://www.redhat.com/security/data/cve/CVE-2021-27645.html[CVE-2021-27645] -* link:https://www.redhat.com/security/data/cve/CVE-2021-28153.html[CVE-2021-28153] -* link:https://www.redhat.com/security/data/cve/CVE-2021-31535.html[CVE-2021-31535] -* link:https://www.redhat.com/security/data/cve/CVE-2021-33560.html[CVE-2021-33560] -* link:https://www.redhat.com/security/data/cve/CVE-2021-33574.html[CVE-2021-33574] -* link:https://www.redhat.com/security/data/cve/CVE-2021-35942.html[CVE-2021-35942] -* link:https://www.redhat.com/security/data/cve/CVE-2021-36084.html[CVE-2021-36084] -* link:https://www.redhat.com/security/data/cve/CVE-2021-36085.html[CVE-2021-36085] -* link:https://www.redhat.com/security/data/cve/CVE-2021-36086.html[CVE-2021-36086] -* link:https://www.redhat.com/security/data/cve/CVE-2021-36087.html[CVE-2021-36087] -* link:https://www.redhat.com/security/data/cve/CVE-2021-42574.html[CVE-2021-42574] -* link:https://www.redhat.com/security/data/cve/CVE-2021-43267.html[CVE-2021-43267] -* link:https://www.redhat.com/security/data/cve/CVE-2021-43527.html[CVE-2021-43527] -* link:https://www.redhat.com/security/data/cve/CVE-2021-45046.html[CVE-2021-45046] -==== - - -[id="cluster-logging-release-notes-5-3-0_{context}"] -== OpenShift Logging 5.3.0 -This release includes link:https://access.redhat.com/errata/RHSA-2021:4627[RHSA-2021:4627 OpenShift Logging Bug Fix Release 5.3.0] - -[id="openshift-logging-5-3-0-new-features-and-enhancements_{context}"] -=== New features and enhancements -* With this update, authorization options for Log Forwarding have been expanded. Outputs may now be configured with SASL, username/password, or TLS. - -[id="openshift-logging-5-3-0-bug-fixes_{context}"] -=== Bug fixes -* Before this update, if you forwarded logs using the syslog protocol, serializing a ruby hash encoded key/value pairs to contain a '=>' character and replaced tabs with "#11". This update fixes the issue so that log messages are correctly serialized as valid JSON. (link:https://issues.redhat.com/browse/LOG-1494[LOG-1494]) - -* Before this update, application logs were not correctly configured to forward to the proper Cloudwatch stream with multi-line error detection enabled. (link:https://issues.redhat.com/browse/LOG-1939[LOG-1939]) - -* Before this update, a name change of the deployed collector in the 5.3 release caused the alert 'fluentnodedown' to generate. (link:https://issues.redhat.com/browse/LOG-1918[LOG-1918]) - -* Before this update, a regression introduced in a prior release configuration caused the collector to flush its buffered messages before shutdown, creating a delay the termination and restart of collector Pods. With this update, fluentd no longer flushes buffers at shutdown, resolving the issue. (link:https://issues.redhat.com/browse/LOG-1735[LOG-1735]) - -* Before this update, a regression introduced in a prior release intentionally disabled JSON message parsing. This update re-enables JSON parsing. It also sets the log entry "level" based on the "level" field in parsed JSON message or by using regex to extract a match from a message field. (link:https://issues.redhat.com/browse/LOG-1199[LOG-1199]) - -* Before this update, the `ClusterLogging` custom resource (CR) applied the value of the `totalLimitSize` field to the Fluentd `total_limit_size` field, even if the required buffer space was not available. With this update, the CR applies the lesser of the two `totalLimitSize` or 'default' values to the Fluentd `total_limit_size` field, resolving the issue. (link:https://issues.redhat.com/browse/LOG-1776[LOG-1776]) - -[id="openshift-logging-5-3-0-known-issues_{context}"] -=== Known issues -* If you forward logs to an external Elasticsearch server and then change a configured value in the pipeline secret, such as the username and password, the Fluentd forwarder loads the new secret but uses the old value to connect to an external Elasticsearch server. This issue happens because the Red Hat OpenShift Logging Operator does not currently monitor secrets for content changes. (link:https://issues.redhat.com/browse/LOG-1652[LOG-1652]) -+ -As a workaround, if you change the secret, you can force the Fluentd pods to redeploy by entering: -+ -[source,terminal] ----- -$ oc delete pod -l component=collector ----- - -[id="openshift-logging-5-3-0-deprecated-removed-features_{context}"] -=== Deprecated and removed features -Some features available in previous releases have been deprecated or removed. - -Deprecated functionality is still included in OpenShift Logging and continues to be supported; however, it will be removed in a future release of this product and is not recommended for new deployments. - -[id="openshift-logging-5-3-0-legacy-forwarding_{context}"] -==== Forwarding logs using the legacy Fluentd and legacy syslog methods have been removed - -In OpenShift Logging 5.3, the legacy methods of forwarding logs to Syslog and Fluentd are removed. Bug fixes and support are provided through the end of the OpenShift Logging 5.2 life cycle. After which, no new feature enhancements are made. - -Instead, use the following non-legacy methods: - -* xref:../logging/cluster-logging-external.adoc#cluster-logging-collector-log-forward-fluentd_cluster-logging-external[Forwarding logs using the Fluentd forward protocol] -* xref:../logging/cluster-logging-external.adoc#cluster-logging-collector-log-forward-syslog_cluster-logging-external[Forwarding logs using the syslog protocol] - -[id="openshift-logging-5-3-0-legacy-forwarding-config_{context}"] -==== Configuration mechanisms for legacy forwarding methods have been removed - -In OpenShift Logging 5.3, the legacy configuration mechanism for log forwarding is removed: You cannot forward logs using the legacy Fluentd method and legacy Syslog method. Use the standard log forwarding methods instead. - -[id="openshift-logging-5-3-0-CVEs_{context}"] -=== CVEs -.Click to expand CVEs -[%collapsible] -==== -* link:https://www.redhat.com/security/data/cve/CVE-2018-20673.html[CVE-2018-20673] -* link:https://www.redhat.com/security/data/cve/CVE-2018-25009.html[CVE-2018-25009] -* link:https://www.redhat.com/security/data/cve/CVE-2018-25010.html[CVE-2018-25010] -* link:https://www.redhat.com/security/data/cve/CVE-2018-25012.html[CVE-2018-25012] -* link:https://www.redhat.com/security/data/cve/CVE-2018-25013.html[CVE-2018-25013] -* link:https://www.redhat.com/security/data/cve/CVE-2018-25014.html[CVE-2018-25014] -* link:https://www.redhat.com/security/data/cve/CVE-2019-5827.html[CVE-2019-5827] -* link:https://www.redhat.com/security/data/cve/CVE-2019-13750.html[CVE-2019-13750] -* link:https://www.redhat.com/security/data/cve/CVE-2019-13751.html[CVE-2019-13751] -* link:https://www.redhat.com/security/data/cve/CVE-2019-14615.html[CVE-2019-14615] -* link:https://www.redhat.com/security/data/cve/CVE-2019-17594.html[CVE-2019-17594] -* link:https://www.redhat.com/security/data/cve/CVE-2019-17595.html[CVE-2019-17595] -* link:https://www.redhat.com/security/data/cve/CVE-2019-18218.html[CVE-2019-18218] -* link:https://www.redhat.com/security/data/cve/CVE-2019-19603.html[CVE-2019-19603] -* link:https://www.redhat.com/security/data/cve/CVE-2019-20838.html[CVE-2019-20838] -* link:https://www.redhat.com/security/data/cve/CVE-2020-0427.html[CVE-2020-0427] -* link:https://www.redhat.com/security/data/cve/CVE-2020-10001.html[CVE-2020-10001] -* link:https://www.redhat.com/security/data/cve/CVE-2020-12762.html[CVE-2020-12762] -* link:https://www.redhat.com/security/data/cve/CVE-2020-13435.html[CVE-2020-13435] -* link:https://www.redhat.com/security/data/cve/CVE-2020-14145.html[CVE-2020-14145] -* link:https://www.redhat.com/security/data/cve/CVE-2020-14155.html[CVE-2020-14155] -* link:https://www.redhat.com/security/data/cve/CVE-2020-16135.html[CVE-2020-16135] -* link:https://www.redhat.com/security/data/cve/CVE-2020-17541.html[CVE-2020-17541] -* link:https://www.redhat.com/security/data/cve/CVE-2020-24370.html[CVE-2020-24370] -* link:https://www.redhat.com/security/data/cve/CVE-2020-24502.html[CVE-2020-24502] -* link:https://www.redhat.com/security/data/cve/CVE-2020-24503.html[CVE-2020-24503] -* link:https://www.redhat.com/security/data/cve/CVE-2020-24504.html[CVE-2020-24504] -* link:https://www.redhat.com/security/data/cve/CVE-2020-24586.html[CVE-2020-24586] -* link:https://www.redhat.com/security/data/cve/CVE-2020-24587.html[CVE-2020-24587] -* link:https://www.redhat.com/security/data/cve/CVE-2020-24588.html[CVE-2020-24588] -* link:https://www.redhat.com/security/data/cve/CVE-2020-26139.html[CVE-2020-26139] -* link:https://www.redhat.com/security/data/cve/CVE-2020-26140.html[CVE-2020-26140] -* link:https://www.redhat.com/security/data/cve/CVE-2020-26141.html[CVE-2020-26141] -* link:https://www.redhat.com/security/data/cve/CVE-2020-26143.html[CVE-2020-26143] -* link:https://www.redhat.com/security/data/cve/CVE-2020-26144.html[CVE-2020-26144] -* link:https://www.redhat.com/security/data/cve/CVE-2020-26145.html[CVE-2020-26145] -* link:https://www.redhat.com/security/data/cve/CVE-2020-26146.html[CVE-2020-26146] -* link:https://www.redhat.com/security/data/cve/CVE-2020-26147.html[CVE-2020-26147] -* link:https://www.redhat.com/security/data/cve/CVE-2020-27777.html[CVE-2020-27777] -* link:https://www.redhat.com/security/data/cve/CVE-2020-29368.html[CVE-2020-29368] -* link:https://www.redhat.com/security/data/cve/CVE-2020-29660.html[CVE-2020-29660] -* link:https://www.redhat.com/security/data/cve/CVE-2020-35448.html[CVE-2020-35448] -* link:https://www.redhat.com/security/data/cve/CVE-2020-35521.html[CVE-2020-35521] -* link:https://www.redhat.com/security/data/cve/CVE-2020-35522.html[CVE-2020-35522] -* link:https://www.redhat.com/security/data/cve/CVE-2020-35523.html[CVE-2020-35523] -* link:https://www.redhat.com/security/data/cve/CVE-2020-35524.html[CVE-2020-35524] -* link:https://www.redhat.com/security/data/cve/CVE-2020-36158.html[CVE-2020-36158] -* link:https://www.redhat.com/security/data/cve/CVE-2020-36312.html[CVE-2020-36312] -* link:https://www.redhat.com/security/data/cve/CVE-2020-36330.html[CVE-2020-36330] -* link:https://www.redhat.com/security/data/cve/CVE-2020-36331.html[CVE-2020-36331] -* link:https://www.redhat.com/security/data/cve/CVE-2020-36332.html[CVE-2020-36332] -* link:https://www.redhat.com/security/data/cve/CVE-2020-36386.html[CVE-2020-36386] -* link:https://www.redhat.com/security/data/cve/CVE-2021-0129.html[CVE-2021-0129] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3200.html[CVE-2021-3200] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3348.html[CVE-2021-3348] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3426.html[CVE-2021-3426] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3445.html[CVE-2021-3445] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3481.html[CVE-2021-3481] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3487.html[CVE-2021-3487] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3489.html[CVE-2021-3489] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3564.html[CVE-2021-3564] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3572.html[CVE-2021-3572] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3573.html[CVE-2021-3573] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3580.html[CVE-2021-3580] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3600.html[CVE-2021-3600] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3635.html[CVE-2021-3635] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3659.html[CVE-2021-3659] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3679.html[CVE-2021-3679] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3732.html[CVE-2021-3732] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3778.html[CVE-2021-3778] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3796.html[CVE-2021-3796] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3800.html[CVE-2021-3800] -* link:https://www.redhat.com/security/data/cve/CVE-2021-20194.html[CVE-2021-20194] -* link:https://www.redhat.com/security/data/cve/CVE-2021-20197.html[CVE-2021-20197] -* link:https://www.redhat.com/security/data/cve/CVE-2021-20231.html[CVE-2021-20231] -* link:https://www.redhat.com/security/data/cve/CVE-2021-20232.html[CVE-2021-20232] -* link:https://www.redhat.com/security/data/cve/CVE-2021-20239.html[CVE-2021-20239] -* link:https://www.redhat.com/security/data/cve/CVE-2021-20266.html[CVE-2021-20266] -* link:https://www.redhat.com/security/data/cve/CVE-2021-20284.html[CVE-2021-20284] -* link:https://www.redhat.com/security/data/cve/CVE-2021-22876.html[CVE-2021-22876] -* link:https://www.redhat.com/security/data/cve/CVE-2021-22898.html[CVE-2021-22898] -* link:https://www.redhat.com/security/data/cve/CVE-2021-22925.html[CVE-2021-22925] -* link:https://www.redhat.com/security/data/cve/CVE-2021-23133.html[CVE-2021-23133] -* link:https://www.redhat.com/security/data/cve/CVE-2021-23840.html[CVE-2021-23840] -* link:https://www.redhat.com/security/data/cve/CVE-2021-23841.html[CVE-2021-23841] -* link:https://www.redhat.com/security/data/cve/CVE-2021-27645.html[CVE-2021-27645] -* link:https://www.redhat.com/security/data/cve/CVE-2021-28153.html[CVE-2021-28153] -* link:https://www.redhat.com/security/data/cve/CVE-2021-28950.html[CVE-2021-28950] -* link:https://www.redhat.com/security/data/cve/CVE-2021-28971.html[CVE-2021-28971] -* link:https://www.redhat.com/security/data/cve/CVE-2021-29155.html[CVE-2021-29155] -* link:https://www.redhat.com/security/data/cve/CVE-2021-29646.htm[lCVE-2021-29646] -* link:https://www.redhat.com/security/data/cve/CVE-2021-29650.html[CVE-2021-29650] -* link:https://www.redhat.com/security/data/cve/CVE-2021-31440.html[CVE-2021-31440] -* link:https://www.redhat.com/security/data/cve/CVE-2021-31535.html[CVE-2021-31535] -* link:https://www.redhat.com/security/data/cve/CVE-2021-31829.html[CVE-2021-31829] -* link:https://www.redhat.com/security/data/cve/CVE-2021-31916.html[CVE-2021-31916] -* link:https://www.redhat.com/security/data/cve/CVE-2021-33033.html[CVE-2021-33033] -* link:https://www.redhat.com/security/data/cve/CVE-2021-33194.html[CVE-2021-33194] -* link:https://www.redhat.com/security/data/cve/CVE-2021-33200.html[CVE-2021-33200] -* link:https://www.redhat.com/security/data/cve/CVE-2021-33560.html[CVE-2021-33560] -* link:https://www.redhat.com/security/data/cve/CVE-2021-33574.html[CVE-2021-33574] -* link:https://www.redhat.com/security/data/cve/CVE-2021-35942.html[CVE-2021-35942] -* link:https://www.redhat.com/security/data/cve/CVE-2021-36084.html[CVE-2021-36084] -* link:https://www.redhat.com/security/data/cve/CVE-2021-36085.html[CVE-2021-36085] -* link:https://www.redhat.com/security/data/cve/CVE-2021-36086.html[CVE-2021-36086] -* link:https://www.redhat.com/security/data/cve/CVE-2021-36087.html[CVE-2021-36087] -* link:https://www.redhat.com/security/data/cve/CVE-2021-42574.html[CVE-2021-42574] -==== - -include::modules/cluster-logging-rn-5.2.13.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.2.12.adoc[leveloffset=+1] - -include::modules/cluster-logging-rn-5.2.11.adoc[leveloffset=+1] - -[id="cluster-logging-release-notes-5-2-10_{context}"] -== OpenShift Logging 5.2.10 -[role="_abstract"] -This release includes link:https://access.redhat.com/errata/[ OpenShift Logging Bug Fix Release 5.2.10]] - -[id="openshift-logging-5-2-10-bug-fixes_{context}"] -=== Bug fixes -* Before this update some log forwarder outputs could re-order logs with the same time-stamp. With this update, a sequence number has been added to the log record to order entries that have matching timestamps.(https://issues.redhat.com/browse/LOG-2335[LOG-2335]) - -* Before this update, clusters with a large number of namespaces caused Elasticsearch to stop serving requests because the list of namespaces reached the maximum header size limit. With this update, headers only include a list of namespace names, resolving the issue. (https://issues.redhat.com/browse/LOG-2475[LOG-2475]) - -* Before this update, `system:serviceaccount:openshift-monitoring:prometheus-k8s` had cluster level privileges as a `clusterrole` and `clusterrolebinding`. This update restricts the `serviceaccount` to the `openshift-logging` namespace with a role and rolebinding. (https://issues.redhat.com/browse/LOG-2480[LOG-2480]) - -* Before this update, the `cluster-logging-operator` utilized cluster scoped roles and bindings to establish permissions for the Prometheus service account to scrape metrics. These permissions were only created when deploying the Operator using the console interface and were missing when the Operator was deployed from the command line. This fixes the issue by making this role and binding namespace scoped. (https://issues.redhat.com/browse/LOG-1972[LOG-1972]) - -[id="openshift-logging-5-2-10-CVEs_{context}"] -=== CVEs -.Click to expand CVEs -[%collapsible] -==== -* link:https://access.redhat.com/security/cve/CVE-2018-25032[CVE-2018-25032] -* link:https://access.redhat.com/security/cve/CVE-2021-4028[CVE-2021-4028] -* link:https://access.redhat.com/security/cve/CVE-2021-37136[CVE-2021-37136] -* link:https://access.redhat.com/security/cve/CVE-2021-37137[CVE-2021-37137] -* link:https://access.redhat.com/security/cve/CVE-2021-43797[CVE-2021-43797] -* link:https://access.redhat.com/security/cve/CVE-2022-0778[CVE-2022-0778] -* link:https://access.redhat.com/security/cve/CVE-2022-1154[CVE-2022-1154] -* link:https://access.redhat.com/security/cve/CVE-2022-1271[CVE-2022-1271] -* link:https://access.redhat.com/security/cve/CVE-2022-21426[CVE-2022-21426] -* link:https://access.redhat.com/security/cve/CVE-2022-21434[CVE-2022-21434] -* link:https://access.redhat.com/security/cve/CVE-2022-21443[CVE-2022-21443] -* link:https://access.redhat.com/security/cve/CVE-2022-21476[CVE-2022-21476] -* link:https://access.redhat.com/security/cve/CVE-2022-21496[CVE-2022-21496] -* link:https://access.redhat.com/security/cve/CVE-2022-21698[CVE-2022-21698] -* link:https://access.redhat.com/security/cve/CVE-2022-25636[CVE-2022-25636] -==== - -[id="cluster-logging-release-notes-5-2-9_{context}"] -== OpenShift Logging 5.2.9 -[role="_abstract"] -This release includes link:https://access.redhat.com/errata/RHBA-2022:1375[RHBA-2022:1375 OpenShift Logging Bug Fix Release 5.2.9]] - -[id="openshift-logging-5-2-9-bug-fixes_{context}"] -=== Bug fixes -* Before this update, defining a toleration with no key and the existing Operator caused the Operator to be unable to complete an upgrade. With this update, this toleration no longer blocks the upgrade from completing. (link:https://issues.redhat.com/browse/LOG-2304[LOG-2304]) - -[id="cluster-logging-release-notes-5-2-8_{context}"] -== OpenShift Logging 5.2.8 - -This release includes link:https://access.redhat.com/errata/RHSA-2022:0728[RHSA-2022:0728 OpenShift Logging Bug Fix Release 5.2.8] - -[id="openshift-logging-5-2-8-bug-fixes_{context}"] -=== Bug fixes -* Before this update, if you removed OpenShift Logging from {product-title}, the web console continued displaying a link to the *Logging* page. With this update, removing or uninstalling OpenShift Logging also removes that link. (link:https://issues.redhat.com/browse/LOG-2180[LOG-2180]) - -[id="openshift-logging-5-2-8-CVEs_{context}"] -=== CVEs -.Click to expand CVEs -[%collapsible] -==== -* link:https://access.redhat.com/security/cve/CVE-2020-28491[CVE-2020-28491] -** link:https://bugzilla.redhat.com/show_bug.cgi?id=1930423[BZ-1930423] -* link:https://access.redhat.com/security/cve/CVE-2022-0552[CVE-2022-0552] -** link:https://bugzilla.redhat.com/show_bug.cgi?id=2052539[BG-2052539] -==== - -[id="cluster-logging-release-notes-5-2-7_{context}"] -== OpenShift Logging 5.2.7 - -This release includes link:https://access.redhat.com/errata/RHBA-2022:0478[RHBA-2022:0478 OpenShift Logging Bug Fix Release 5.2.7] - -[id="openshift-logging-5-2-7-bug-fixes_{context}"] -=== Bug fixes -* Before this update, Elasticsearch pods with FIPS enabled failed to start after updating. With this update, Elasticsearch pods start successfully. (link:https://issues.redhat.com/browse/LOG-2000[LOG-2000]) - -* Before this update, if a persistent volume claim (PVC) already existed, Elasticsearch generated an error, "Unable to create PersistentVolumeClaim due to forbidden: exceeded quota: infra-storage-quota." With this update, Elasticsearch checks for existing PVCs, resolving the issue. (link:https://issues.redhat.com/browse/LOG-2118[LOG-2118]) - -[id="openshift-logging-5-2-7-CVEs_{context}"] -=== CVEs -.Click to expand CVEs -[%collapsible] -==== -* link:https://access.redhat.com/security/cve/CVE-2021-3521[CVE-2021-3521] -* link:https://access.redhat.com/security/cve/CVE-2021-3872[CVE-2021-3872] -* link:https://access.redhat.com/security/cve/CVE-2021-3984[CVE-2021-3984] -* link:https://access.redhat.com/security/cve/CVE-2021-4019[CVE-2021-4019] -* link:https://access.redhat.com/security/cve/CVE-2021-4122[CVE-2021-4122] -* link:https://access.redhat.com/security/cve/CVE-2021-4155[CVE-2021-4155] -* link:https://access.redhat.com/security/cve/CVE-2021-4192[CVE-2021-4192] -* link:https://access.redhat.com/security/cve/CVE-2021-4193[CVE-2021-4193] -* link:https://access.redhat.com/security/cve/CVE-2022-0185[CVE-2022-0185] -==== - -[id="cluster-logging-release-notes-5-2-6_{context}"] -== OpenShift Logging 5.2.6 - -This release includes link:https://access.redhat.com/errata/RHSA-2022:0230[RHSA-2022:0230 OpenShift Logging Bug Fix Release 5.2.6] - -[id="openshift-logging-5-2-6-bug-fixes_{context}"] -=== Bug fixes -* Before this update, the release did not include a filter change which caused Fluentd to crash. With this update, the missing filter has been corrected. (link:https://issues.redhat.com/browse/LOG-2104[LOG-2104]) - -* This update changes the log4j dependency to 2.17.1 to resolve link:https://access.redhat.com/security/cve/CVE-2021-44832[CVE-2021-44832].(link:https://issues.redhat.com/browse/LOG-2101[LOG-2101]) - -[id="openshift-logging-5-2-6-CVEs_{context}"] -=== CVEs -.Click to expand CVEs -[%collapsible] -==== -* link:https://access.redhat.com/security/cve/CVE-2021-27292[CVE-2021-27292] -** link:https://bugzilla.redhat.com/show_bug.cgi?id=1940613[BZ-1940613] -* link:https://access.redhat.com/security/cve/CVE-2021-44832[CVE-2021-44832] -** link:https://bugzilla.redhat.com/show_bug.cgi?id=2035951[BZ-2035951] -==== - -[id="cluster-logging-release-notes-5-2-5_{context}"] -== OpenShift Logging 5.2.5 - -This release includes link:https://access.redhat.com/errata/RHSA-2022:0043[RHSA-2022:0043 OpenShift Logging Bug Fix Release 5.2.5] - -[id="openshift-logging-5-2-5-bug-fixes_{context}"] -=== Bug fixes -* Before this update, Elasticsearch rejected logs from the Event Router due to a parsing error. This update changes the data model to resolve the parsing error. However, as a result, previous indices might cause warnings or errors within Kibana. The `kubernetes.event.metadata.resourceVersion` field causes errors until existing indices are removed or reindexed. If this field is not used in Kibana, you can ignore the error messages. If you have a retention policy that deletes old indices, the policy eventually removes the old indices and stops the error messages. Otherwise, manually reindex to stop the error messages. link:https://issues.redhat.com/browse/LOG-2087[LOG-2087]) - - -[id="openshift-logging-5-2-5-CVEs_{context}"] -=== CVEs -.Click to expand CVEs -[%collapsible] -==== -* link:https://access.redhat.com/security/cve/CVE-2021-3712[CVE-2021-3712] -* link:https://access.redhat.com/security/cve/CVE-2021-20321[CVE-2021-20321] -* link:https://access.redhat.com/security/cve/CVE-2021-42574[CVE-2021-42574] -* link:https://access.redhat.com/security/cve/CVE-2021-45105[CVE-2021-45105] -==== - -[id="cluster-logging-release-notes-5-2-4_{context}"] -== OpenShift Logging 5.2.4 - -This release includes link:https://access.redhat.com/errata/RHSA-2021:5127[RHSA-2021:5127 OpenShift Logging Bug Fix Release 5.2.4] - -[id="openshift-logging-5-2-4-bug-fixes_{context}"] -=== Bug fixes - -* Before this update, records shipped via syslog would serialize a ruby hash encoding key/value pairs to contain a '=>' character, as well as replace tabs with "#11". This update serializes the message correctly as proper JSON. (link:https://issues.redhat.com/browse/LOG-1775[LOG-1775]) - -* Before this update, the Elasticsearch Prometheus exporter plugin compiled index-level metrics using a high-cost query that impacted the Elasticsearch node performance. This update implements a lower-cost query that improves performance. (link:https://issues.redhat.com/browse/LOG-1970[LOG-1970]) - -* Before this update, Elasticsearch sometimes rejected messages when Log Forwarding was configured with multiple outputs. This happened because configuring one of the outputs modified message content to be a single message. With this update, Log Forwarding duplicates the messages for each output so that output-specific processing does not affect the other outputs. (link:https://issues.redhat.com/browse/LOG-1824[LOG-1824]) - - -[id="openshift-logging-5-2-4-CVEs_{context}"] -=== CVEs -.Click to expand CVEs -[%collapsible] -==== -* link:https://www.redhat.com/security/data/cve/CVE-2018-25009.html[CVE-2018-25009] -* link:https://www.redhat.com/security/data/cve/CVE-2018-25010.html[CVE-2018-25010] -* link:https://www.redhat.com/security/data/cve/CVE-2018-25012.html[CVE-2018-25012] -* link:https://www.redhat.com/security/data/cve/CVE-2018-25013.html[CVE-2018-25013] -* link:https://www.redhat.com/security/data/cve/CVE-2018-25014.html[CVE-2018-25014] -* link:https://www.redhat.com/security/data/cve/CVE-2019-5827.html[CVE-2019-5827] -* link:https://www.redhat.com/security/data/cve/CVE-2019-13750.html[CVE-2019-13750] -* link:https://www.redhat.com/security/data/cve/CVE-2019-13751.html[CVE-2019-13751] -* link:https://www.redhat.com/security/data/cve/CVE-2019-17594.html[CVE-2019-17594] -* link:https://www.redhat.com/security/data/cve/CVE-2019-17595.html[CVE-2019-17595] -* link:https://www.redhat.com/security/data/cve/CVE-2019-18218.html[CVE-2019-18218] -* link:https://www.redhat.com/security/data/cve/CVE-2019-19603.html[CVE-2019-19603] -* link:https://www.redhat.com/security/data/cve/CVE-2019-20838.html[CVE-2019-20838] -* link:https://www.redhat.com/security/data/cve/CVE-2020-12762.html[CVE-2020-12762] -* link:https://www.redhat.com/security/data/cve/CVE-2020-13435.html[CVE-2020-13435] -* link:https://www.redhat.com/security/data/cve/CVE-2020-14145.html[CVE-2020-14145] -* link:https://www.redhat.com/security/data/cve/CVE-2020-14155.html[CVE-2020-14155] -* link:https://www.redhat.com/security/data/cve/CVE-2020-16135.html[CVE-2020-16135] -* link:https://www.redhat.com/security/data/cve/CVE-2020-17541.html[CVE-2020-17541] -* link:https://www.redhat.com/security/data/cve/CVE-2020-24370.html[CVE-2020-24370] -* link:https://www.redhat.com/security/data/cve/CVE-2020-35521.html[CVE-2020-35521] -* link:https://www.redhat.com/security/data/cve/CVE-2020-35522.html[CVE-2020-35522] -* link:https://www.redhat.com/security/data/cve/CVE-2020-35523.html[CVE-2020-35523] -* link:https://www.redhat.com/security/data/cve/CVE-2020-35524.html[CVE-2020-35524] -* link:https://www.redhat.com/security/data/cve/CVE-2020-36330.html[CVE-2020-36330] -* link:https://www.redhat.com/security/data/cve/CVE-2020-36331.html[CVE-2020-36331] -* link:https://www.redhat.com/security/data/cve/CVE-2020-36332.html[CVE-2020-36332] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3200.html[CVE-2021-3200] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3426.html[CVE-2021-3426] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3445.html[CVE-2021-3445] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3481.html[CVE-2021-3481] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3572.html[CVE-2021-3572] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3580.html[CVE-2021-3580] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3712.html[CVE-2021-3712] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3800.html[CVE-2021-3800] -* link:https://www.redhat.com/security/data/cve/CVE-2021-20231.html[CVE-2021-20231] -* link:https://www.redhat.com/security/data/cve/CVE-2021-20232.html[CVE-2021-20232] -* link:https://www.redhat.com/security/data/cve/CVE-2021-20266.html[CVE-2021-20266] -* link:https://www.redhat.com/security/data/cve/CVE-2021-20317.html[CVE-2021-20317] -* link:https://www.redhat.com/security/data/cve/CVE-2021-21409.html[CVE-2021-21409] -* link:https://www.redhat.com/security/data/cve/CVE-2021-22876.html[CVE-2021-22876] -* link:https://www.redhat.com/security/data/cve/CVE-2021-22898.html[CVE-2021-22898] -* link:https://www.redhat.com/security/data/cve/CVE-2021-22925.html[CVE-2021-22925] -* link:https://www.redhat.com/security/data/cve/CVE-2021-27645.html[CVE-2021-27645] -* link:https://www.redhat.com/security/data/cve/CVE-2021-28153.html[CVE-2021-28153] -* link:https://www.redhat.com/security/data/cve/CVE-2021-31535.html[CVE-2021-31535] -* link:https://www.redhat.com/security/data/cve/CVE-2021-33560.html[CVE-2021-33560] -* link:https://www.redhat.com/security/data/cve/CVE-2021-33574.html[CVE-2021-33574] -* link:https://www.redhat.com/security/data/cve/CVE-2021-35942.html[CVE-2021-35942] -* link:https://www.redhat.com/security/data/cve/CVE-2021-36084.html[CVE-2021-36084] -* link:https://www.redhat.com/security/data/cve/CVE-2021-36085.html[CVE-2021-36085] -* link:https://www.redhat.com/security/data/cve/CVE-2021-36086.html[CVE-2021-36086] -* link:https://www.redhat.com/security/data/cve/CVE-2021-36087.html[CVE-2021-36087] -* link:https://www.redhat.com/security/data/cve/CVE-2021-37136.html[CVE-2021-37136] -* link:https://www.redhat.com/security/data/cve/CVE-2021-37137.html[CVE-2021-37137] -* link:https://www.redhat.com/security/data/cve/CVE-2021-42574.html[CVE-2021-42574] -* link:https://www.redhat.com/security/data/cve/CVE-2021-43267.html[CVE-2021-43267] -* link:https://www.redhat.com/security/data/cve/CVE-2021-43527.html[CVE-2021-43527] -* link:https://www.redhat.com/security/data/cve/CVE-2021-44228.html[CVE-2021-44228] -* link:https://www.redhat.com/security/data/cve/CVE-2021-45046.html[CVE-2021-45046] -==== - -[id="cluster-logging-release-notes-5-2-3_{context}"] -== OpenShift Logging 5.2.3 - -This release includes link:https://access.redhat.com/errata/RHSA-2021:4032[RHSA-2021:4032 OpenShift Logging Bug Fix Release 5.2.3] - -[id="openshift-logging-5-2-3-bug-fixes_{context}"] -=== Bug fixes - -* Before this update, some alerts did not include a namespace label. This omission does not comply with the OpenShift Monitoring Team's guidelines for writing alerting rules in {product-title}. With this update, all the alerts in Elasticsearch Operator include a namespace label and follow all the guidelines for writing alerting rules in {product-title}. (link:https://issues.redhat.com/browse/LOG-1857[LOG-1857]) - -* Before this update, a regression introduced in a prior release intentionally disabled JSON message parsing. This update re-enables JSON parsing. It also sets the log entry `level` based on the `level` field in parsed JSON message or by using regex to extract a match from a message field. (link:https://issues.redhat.com/browse/LOG-1759[LOG-1759]) - -[id="openshift-logging-5-2-3-CVEs_{context}"] -=== CVEs -.Click to expand CVEs -[%collapsible] -==== -* link:https://access.redhat.com/security/cve/CVE-2021-23369[CVE-2021-23369] -** link:https://bugzilla.redhat.com/show_bug.cgi?id=1948761[BZ-1948761] -* link:https://access.redhat.com/security/cve/CVE-2021-23383[CVE-2021-23383] -** link:https://bugzilla.redhat.com/show_bug.cgi?id=1956688[BZ-1956688] -* link:https://access.redhat.com/security/cve/CVE-2018-20673[CVE-2018-20673] -* link:https://access.redhat.com/security/cve/CVE-2019-5827[CVE-2019-5827] -* link:https://access.redhat.com/security/cve/CVE-2019-13750[CVE-2019-13750] -* link:https://access.redhat.com/security/cve/CVE-2019-13751[CVE-2019-13751] -* link:https://access.redhat.com/security/cve/CVE-2019-17594[CVE-2019-17594] -* link:https://access.redhat.com/security/cve/CVE-2019-17595[CVE-2019-17595] -* link:https://access.redhat.com/security/cve/CVE-2019-18218[CVE-2019-18218] -* link:https://access.redhat.com/security/cve/CVE-2019-19603[CVE-2019-19603] -* link:https://access.redhat.com/security/cve/CVE-2019-20838[CVE-2019-20838] -* link:https://access.redhat.com/security/cve/CVE-2020-12762[CVE-2020-12762] -* link:https://access.redhat.com/security/cve/CVE-2020-13435[CVE-2020-13435] -* link:https://access.redhat.com/security/cve/CVE-2020-14155[CVE-2020-14155] -* link:https://access.redhat.com/security/cve/CVE-2020-16135[CVE-2020-16135] -* link:https://access.redhat.com/security/cve/CVE-2020-24370[CVE-2020-24370] -* link:https://access.redhat.com/security/cve/CVE-2021-3200[CVE-2021-3200] -* link:https://access.redhat.com/security/cve/CVE-2021-3426[CVE-2021-3426] -* link:https://access.redhat.com/security/cve/CVE-2021-3445[CVE-2021-3445] -* link:https://access.redhat.com/security/cve/CVE-2021-3572[CVE-2021-3572] -* link:https://access.redhat.com/security/cve/CVE-2021-3580[CVE-2021-3580] -* link:https://access.redhat.com/security/cve/CVE-2021-3778[CVE-2021-3778] -* link:https://access.redhat.com/security/cve/CVE-2021-3796[CVE-2021-3796] -* link:https://access.redhat.com/security/cve/CVE-2021-3800[CVE-2021-3800] -* link:https://access.redhat.com/security/cve/CVE-2021-20231[CVE-2021-20231] -* link:https://access.redhat.com/security/cve/CVE-2021-20232[CVE-2021-20232] -* link:https://access.redhat.com/security/cve/CVE-2021-20266[CVE-2021-20266] -* link:https://access.redhat.com/security/cve/CVE-2021-22876[CVE-2021-22876] -* link:https://access.redhat.com/security/cve/CVE-2021-22898[CVE-2021-22898] -* link:https://access.redhat.com/security/cve/CVE-2021-22925[CVE-2021-22925] -* link:https://access.redhat.com/security/cve/CVE-2021-23840[CVE-2021-23840] -* link:https://access.redhat.com/security/cve/CVE-2021-23841[CVE-2021-23841] -* link:https://access.redhat.com/security/cve/CVE-2021-27645[CVE-2021-27645] -* link:https://access.redhat.com/security/cve/CVE-2021-28153[CVE-2021-28153] -* link:https://access.redhat.com/security/cve/CVE-2021-33560[CVE-2021-33560] -* link:https://access.redhat.com/security/cve/CVE-2021-33574[CVE-2021-33574] -* link:https://access.redhat.com/security/cve/CVE-2021-35942[CVE-2021-35942] -* link:https://access.redhat.com/security/cve/CVE-2021-36084[CVE-2021-36084] -* link:https://access.redhat.com/security/cve/CVE-2021-36085[CVE-2021-36085] -* link:https://access.redhat.com/security/cve/CVE-2021-36086[CVE-2021-36086] -* link:https://access.redhat.com/security/cve/CVE-2021-36087[CVE-2021-36087] -==== - -[id="cluster-logging-release-notes-5-2-2_{context}"] -== OpenShift Logging 5.2.2 - -This release includes link:https://access.redhat.com/errata/RHBA-2021:3747[RHBA-2021:3747 OpenShift Logging Bug Fix Release 5.2.2] - -[id="openshift-logging-5-2-2-bug-fixes_{context}"] -=== Bug fixes - -* Before this update, the `ClusterLogging` custom resource (CR) applied the value of the `totalLimitSize` field to the Fluentd `total_limit_size` field, even if the required buffer space was not available. With this update, the CR applies the lesser of the two `totalLimitSize` or 'default' values to the Fluentd `total_limit_size` field, resolving the issue.(link:https://issues.redhat.com/browse/LOG-1738[LOG-1738]) - -* Before this update, a regression introduced in a prior release configuration caused the collector to flush its buffered messages before shutdown, creating a delay to the termination and restart of collector pods. With this update, Fluentd no longer flushes buffers at shutdown, resolving the issue. (link:https://issues.redhat.com/browse/LOG-1739[LOG-1739]) - -* Before this update, an issue in the bundle manifests prevented installation of the Elasticsearch Operator through OLM on {product-title} 4.9. With this update, a correction to bundle manifests re-enables installation and upgrade in 4.9.(link:https://issues.redhat.com/browse/LOG-1780[LOG-1780]) - -[id="openshift-logging-5-2-2-CVEs_{context}"] -=== CVEs -.Click to expand CVEs -[%collapsible] -==== -* link:https://www.redhat.com/security/data/cve/CVE-2020-25648.html[CVE-2020-25648] -* link:https://www.redhat.com/security/data/cve/CVE-2021-22922.html[CVE-2021-22922] -* link:https://www.redhat.com/security/data/cve/CVE-2021-22923.html[CVE-2021-22923] -* link:https://www.redhat.com/security/data/cve/CVE-2021-22924.html[CVE-2021-22924] -* link:https://www.redhat.com/security/data/cve/CVE-2021-36222.html[CVE-2021-36222] -* link:https://www.redhat.com/security/data/cve/CVE-2021-37576.html[CVE-2021-37576] -* link:https://www.redhat.com/security/data/cve/CVE-2021-37750.html[CVE-2021-37750] -* link:https://www.redhat.com/security/data/cve/CVE-2021-38201.html[CVE-2021-38201] -==== - -[id="cluster-logging-release-notes-5-2-1_{context}"] -== OpenShift Logging 5.2.1 - -This release includes link:https://access.redhat.com/errata/RHBA-2021:3550[RHBA-2021:3550 OpenShift Logging Bug Fix Release 5.2.1] - -[id="openshift-logging-5-2-1-bug-fixes_{context}"] -=== Bug fixes - -* Before this update, due to an issue in the release pipeline scripts, the value of the `olm.skipRange` field remained unchanged at `5.2.0` instead of reflecting the current release number. This update fixes the pipeline scripts to update the value of this field when the release numbers change. (link:https://issues.redhat.com/browse/LOG-1743[LOG-1743]) - -[id="openshift-logging-5-2-1-CVEs_{context}"] -=== CVEs - -(None) - - -[id="cluster-logging-release-notes-5-2-0_{context}"] -== OpenShift Logging 5.2.0 - -This release includes link:https://access.redhat.com/errata/RHBA-2021:3393[RHBA-2021:3393 OpenShift Logging Bug Fix Release 5.2.0] - -[id="openshift-logging-5-2-0-new-features-and-enhancements_{context}"] -=== New features and enhancements - -* With this update, you can forward log data to Amazon CloudWatch, which provides application and infrastructure monitoring. For more information, see xref:../logging/cluster-logging-external.html#cluster-logging-collector-log-forward-cloudwatch_cluster-logging-external[Forwarding logs to Amazon CloudWatch]. (link:https://issues.redhat.com/browse/LOG-1173[LOG-1173]) - -* With this update, you can forward log data to Loki, a horizontally scalable, highly available, multi-tenant log aggregation system. For more information, see xref:../logging/cluster-logging-external.html#cluster-logging-collector-log-forward-loki_cluster-logging-external[Forwarding logs to Loki]. (link:https://issues.redhat.com/browse/LOG-684[LOG-684]) - -* With this update, if you use the Fluentd forward protocol to forward log data over a TLS-encrypted connection, now you can use a password-encrypted private key file and specify the passphrase in the Cluster Log Forwarder configuration. For more information, see xref:../logging/cluster-logging-external.html#cluster-logging-collector-log-forward-fluentd_cluster-logging-external[Forwarding logs using the Fluentd forward protocol]. (link:https://issues.redhat.com/browse/LOG-1525[LOG-1525]) - -* This enhancement enables you to use a username and password to authenticate a log forwarding connection to an external Elasticsearch instance. For example, if you cannot use mutual TLS (mTLS) because a third-party operates the Elasticsearch instance, you can use HTTP or HTTPS and set a secret that contains the username and password. For more information, see xref:../logging/cluster-logging-external.adoc#cluster-logging-collector-log-forward-es_cluster-logging-external[Forwarding logs to an external Elasticsearch instance]. (link:https://issues.redhat.com/browse/LOG-1022[LOG-1022]) - -* With this update, you can collect OVN network policy audit logs for forwarding to a logging server. (link:https://issues.redhat.com/browse/LOG-1526[LOG-1526]) - -* By default, the data model introduced in {product-title} 4.5 gave logs from different namespaces a single index in common. This change made it harder to see which namespaces produced the most logs. -+ -The current release adds namespace metrics to the *Logging* dashboard in the {product-title} console. With these metrics, you can see which namespaces produce logs and how many logs each namespace produces for a given timestamp. -+ -To see these metrics, open the *Administrator* perspective in the {product-title} web console, and navigate to *Observe* -> *Dashboards* -> *Logging/Elasticsearch*. (link:https://issues.redhat.com/browse/LOG-1680[LOG-1680]) - -* The current release, OpenShift Logging 5.2, enables two new metrics: For a given timestamp or duration, you can see the total logs produced or logged by individual containers, and the total logs collected by the collector. These metrics are labeled by namespace, pod, and container name so that you can see how many logs each namespace and pod collects and produces. (link:https://issues.redhat.com/browse/LOG-1213[LOG-1213]) - -[id="openshift-logging-5-2-0-bug-fixes_{context}"] -=== Bug fixes - -* Before this update, when the OpenShift Elasticsearch Operator created index management cronjobs, it added the `POLICY_MAPPING` environment variable twice, which caused the apiserver to report the duplication. This update fixes the issue so that the `POLICY_MAPPING` environment variable is set only once per cronjob, and there is no duplication for the apiserver to report. (link:https://issues.redhat.com/browse/LOG-1130[LOG-1130]) - -* Before this update, suspending an Elasticsearch cluster to zero nodes did not suspend the index-management cronjobs, which put these cronjobs into maximum backoff. Then, after unsuspending the Elasticsearch cluster, these cronjobs stayed halted due to maximum backoff reached. This update resolves the issue by suspending the cronjobs and the cluster. (link:https://issues.redhat.com/browse/LOG-1268[LOG-1268]) - -* Before this update, in the *Logging* dashboard in the {product-title} console, the list of top 10 log-producing containers was missing the "chart namespace" label and provided the incorrect metric name, `fluentd_input_status_total_bytes_logged`. With this update, the chart shows the namespace label and the correct metric name, `log_logged_bytes_total`. (link:https://issues.redhat.com/browse/LOG-1271[LOG-1271]) - -* Before this update, if an index management cronjob terminated with an error, it did not report the error exit code: instead, its job status was "complete." This update resolves the issue by reporting the error exit codes of index management cronjobs that terminate with errors. (link:https://issues.redhat.com/browse/LOG-1273[LOG-1273]) - -* The `priorityclasses.v1beta1.scheduling.k8s.io` was removed in 1.22 and replaced by `priorityclasses.v1.scheduling.k8s.io` (`v1beta1` was replaced by `v1`). Before this update, `APIRemovedInNextReleaseInUse` alerts were generated for `priorityclasses` because `v1beta1` was still present . This update resolves the issue by replacing `v1beta1` with `v1`. The alert is no longer generated. (link:https://issues.redhat.com/browse/LOG-1385[LOG-1385]) - -* Previously, the OpenShift Elasticsearch Operator and Red Hat OpenShift Logging Operator did not have the annotation that was required for them to appear in the {product-title} web console list of Operators that can run in a disconnected environment. This update adds the `operators.openshift.io/infrastructure-features: '["Disconnected"]'` annotation to these two Operators so that they appear in the list of Operators that run in disconnected environments. (link:https://issues.redhat.com/browse/LOG-1420[LOG-1420]) - -* Before this update, Red Hat OpenShift Logging Operator pods were scheduled on CPU cores that were reserved for customer workloads on performance-optimized single-node clusters. With this update, cluster logging Operator pods are scheduled on the correct CPU cores. (link:https://issues.redhat.com/browse/LOG-1440[LOG-1440]) - -* Before this update, some log entries had unrecognized UTF-8 bytes, which caused Elasticsearch to reject the messages and block the entire buffered payload. With this update, rejected payloads drop the invalid log entries and resubmit the remaining entries to resolve the issue. (link:https://issues.redhat.com/browse/LOG-1499[LOG-1499]) - -* Before this update, the `kibana-proxy` pod sometimes entered the `CrashLoopBackoff` state and logged the following message `Invalid configuration: cookie_secret must be 16, 24, or 32 bytes to create an AES cipher when pass_access_token == true or cookie_refresh != 0, but is 29 bytes.` The exact actual number of bytes could vary. With this update, the generation of the Kibana session secret has been corrected, and the kibana-proxy pod no longer enters a `CrashLoopBackoff` state due to this error. (link:https://issues.redhat.com/browse/LOG-1446[LOG-1446]) - -* Before this update, the AWS CloudWatch Fluentd plugin logged its AWS API calls to the Fluentd log at all log levels, consuming additional {product-title} node resources. With this update, the AWS CloudWatch Fluentd plugin logs AWS API calls only at the "debug" and "trace" log levels. This way, at the default "warn" log level, Fluentd does not consume extra node resources. (link:https://issues.redhat.com/browse/LOG-1071[LOG-1071]) - -* Before this update, the Elasticsearch OpenDistro security plugin caused user index migrations to fail. This update resolves the issue by providing a newer version of the plugin. Now, index migrations proceed without errors. (link:https://issues.redhat.com/browse/LOG-1276[LOG-1276]) - -* Before this update, in the *Logging* dashboard in the {product-title} console, the list of top 10 log-producing containers lacked data points. This update resolves the issue, and the dashboard displays all data points. (link:https://issues.redhat.com/browse/LOG-1353[LOG-1353]) - -* Before this update, if you were tuning the performance of the Fluentd log forwarder by adjusting the `chunkLimitSize` and `totalLimitSize` values, the `Setting queued_chunks_limit_size for each buffer to` message reported values that were too low. The current update fixes this issue so that this message reports the correct values. (link:https://issues.redhat.com/browse/LOG-1411[LOG-1411]) - -* Before this update, the Kibana OpenDistro security plugin caused user index migrations to fail. This update resolves the issue by providing a newer version of the plugin. Now, index migrations proceed without errors. (link:https://issues.redhat.com/browse/LOG-1558[LOG-1558]) - -* Before this update, using a namespace input filter prevented logs in that namespace from appearing in other inputs. With this update, logs are sent to all inputs that can accept them. (link:https://issues.redhat.com/browse/LOG-1570[LOG-1570]) - -* Before this update, a missing license file for the `viaq/logerr` dependency caused license scanners to abort without success. With this update, the `viaq/logerr` dependency is licensed under Apache 2.0 and the license scanners run successfully. (link:https://issues.redhat.com/browse/LOG-1590[LOG-1590]) - -* Before this update, an incorrect brew tag for `curator5` within the `elasticsearch-operator-bundle` build pipeline caused the pull of an image pinned to a dummy SHA1. With this update, the build pipeline uses the `logging-curator5-rhel8` reference for `curator5`, enabling index management cronjobs to pull the correct image from `registry.redhat.io`. (link:https://issues.redhat.com/browse/LOG-1624[LOG-1624]) - -* Before this update, an issue with the `ServiceAccount` permissions caused errors such as `no permissions for [indices:admin/aliases/get]`. With this update, a permission fix resolves the issue. (link:https://issues.redhat.com/browse/LOG-1657[LOG-1657]) - -* Before this update, the Custom Resource Definition (CRD) for the Red Hat OpenShift Logging Operator was missing the Loki output type, which caused the admission controller to reject the `ClusterLogForwarder` custom resource object. With this update, the CRD includes Loki as an output type so that administrators can configure `ClusterLogForwarder` to send logs to a Loki server. (link:https://issues.redhat.com/browse/LOG-1683[LOG-1683]) - -* Before this update, OpenShift Elasticsearch Operator reconciliation of the `ServiceAccounts` overwrote third-party-owned fields that contained secrets. This issue caused memory and CPU spikes due to frequent recreation of secrets. This update resolves the issue. Now, the OpenShift Elasticsearch Operator does not overwrite third-party-owned fields. (link:https://issues.redhat.com/browse/LOG-1714[LOG-1714]) - -* Before this update, in the `ClusterLogging` custom resource (CR) definition, if you specified a `flush_interval` value but did not set `flush_mode` to `interval`, the Red Hat OpenShift Logging Operator generated a Fluentd configuration. However, the Fluentd collector generated an error at runtime. With this update, the Red Hat OpenShift Logging Operator validates the `ClusterLogging` CR definition and only generates the Fluentd configuration if both fields are specified. (link:https://issues.redhat.com/browse/LOG-1723[LOG-1723]) - -[id="openshift-logging-5-2-0-known-issues_{context}"] -=== Known issues - -* If you forward logs to an external Elasticsearch server and then change a configured value in the pipeline secret, such as the username and password, the Fluentd forwarder loads the new secret but uses the old value to connect to an external Elasticsearch server. This issue happens because the Red Hat OpenShift Logging Operator does not currently monitor secrets for content changes. (link:https://issues.redhat.com/browse/LOG-1652[LOG-1652]) -+ -As a workaround, if you change the secret, you can force the Fluentd pods to redeploy by entering: -+ -[source,terminal] ----- -$ oc delete pod -l component=collector ----- - -[id="openshift-logging-5-2-0-deprecated-removed-features_{context}"] -=== Deprecated and removed features - -Some features available in previous releases have been deprecated or removed. - -Deprecated functionality is still included in OpenShift Logging and continues to be supported; however, it will be removed in a future release of this product and is not recommended for new deployments. - -[id="openshift-logging-5-2-0-legacy-forwarding_{context}"] -=== Forwarding logs using the legacy Fluentd and legacy syslog methods have been deprecated - -From {product-title} 4.6 to the present, forwarding logs by using the following legacy methods have been deprecated and will be removed in a future release: - -* Forwarding logs using the legacy Fluentd method -* Forwarding logs using the legacy syslog method - -Instead, use the following non-legacy methods: - -* xref:../logging/cluster-logging-external.adoc#cluster-logging-collector-log-forward-fluentd_cluster-logging-external[Forwarding logs using the Fluentd forward protocol] - -* xref:../logging/cluster-logging-external.adoc#cluster-logging-collector-log-forward-syslog_cluster-logging-external[Forwarding logs using the syslog protocol] - -[id="openshift-logging-5-2-0-CVEs_{context}"] -=== CVEs -.Click to expand CVEs -[%collapsible] -==== -* link:https://www.redhat.com/security/data/cve/CVE-2021-22922.html[CVE-2021-22922] -* link:https://www.redhat.com/security/data/cve/CVE-2021-22923.html[CVE-2021-22923] -* link:https://www.redhat.com/security/data/cve/CVE-2021-22924.html[CVE-2021-22924] -* link:https://www.redhat.com/security/data/cve/CVE-2021-32740.html[CVE-2021-32740] -* link:https://www.redhat.com/security/data/cve/CVE-2021-36222.html[CVE-2021-36222] -* link:https://www.redhat.com/security/data/cve/CVE-2021-37750.html[CVE-2021-37750] -==== diff --git a/logging/cluster-logging-uninstall.adoc b/logging/cluster-logging-uninstall.adoc deleted file mode 100644 index 531a1ccff50f..000000000000 --- a/logging/cluster-logging-uninstall.adoc +++ /dev/null @@ -1,25 +0,0 @@ -:_content-type: ASSEMBLY -:context: cluster-logging-uninstall -[id="cluster-logging-uninstall"] -= Uninstalling OpenShift Logging -include::_attributes/common-attributes.adoc[] - -toc::[] - -You can remove the {logging} from your {product-title} cluster. - -// The following include statements pull in the module files that comprise -// the assembly. Include any combination of concept, procedure, or reference -// modules required to cover the user story. You can also include other -// assemblies. - -include::modules/cluster-logging-uninstall.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -ifdef::openshift-enterprise,openshift-origin[] -* xref:../storage/understanding-persistent-storage.adoc#reclaim-manual_understanding-persistent-storage[Reclaiming a persistent volume manually] -endif::[] -ifdef::openshift-rosa,openshift-dedicated[] -* link:https://docs.openshift.com/container-platform/latest/storage/understanding-persistent-storage.html#reclaim-manual_understanding-persistent-storage[Reclaiming a persistent volume manually] -endif::[] \ No newline at end of file diff --git a/logging/cluster-logging-upgrading.adoc b/logging/cluster-logging-upgrading.adoc deleted file mode 100644 index 9b6bee0ea1cc..000000000000 --- a/logging/cluster-logging-upgrading.adoc +++ /dev/null @@ -1,20 +0,0 @@ -:_content-type: ASSEMBLY -:context: cluster-logging-upgrading -[id="cluster-logging-upgrading"] -= Updating OpenShift Logging -include::_attributes/common-attributes.adoc[] - -toc::[] - -[id="cluster-logging-supported-versions"] -== Supported Versions -For version compatibility and support information, see link:https://access.redhat.com/support/policy/updates/openshift#logging[Red Hat OpenShift Container Platform Life Cycle Policy] - -To upgrade from cluster logging in {product-title} version 4.6 and earlier to OpenShift Logging 5.x, you update the {product-title} cluster to version 4.7 or 4.8. Then, you update the following operators: - -* From Elasticsearch Operator 4.x to OpenShift Elasticsearch Operator 5.x -* From Cluster Logging Operator 4.x to Red Hat OpenShift Logging Operator 5.x - -To upgrade from a previous version of OpenShift Logging to the current version, you update OpenShift Elasticsearch Operator and Red Hat OpenShift Logging Operator to their current versions. - -include::modules/cluster-logging-updating-logging-to-current.adoc[leveloffset=+1] diff --git a/logging/cluster-logging-visualizer.adoc b/logging/cluster-logging-visualizer.adoc deleted file mode 100644 index 6ded5d71ff00..000000000000 --- a/logging/cluster-logging-visualizer.adoc +++ /dev/null @@ -1,31 +0,0 @@ -:_content-type: ASSEMBLY -:context: cluster-logging-visualizer -[id="cluster-logging-visualizer-using"] -= Viewing cluster logs by using Kibana -include::_attributes/common-attributes.adoc[] - -toc::[] - -The {logging} includes a web console for visualizing collected log data. Currently, {product-title} deploys the Kibana console for visualization. - -Using the log visualizer, you can do the following with your data: - -* search and browse the data using the *Discover* tab. -* chart and map the data using the *Visualize* tab. -* create and view custom dashboards using the *Dashboard* tab. - -Use and configuration of the Kibana interface is beyond the scope of this documentation. For more information, -on using the interface, see the link:https://www.elastic.co/guide/en/kibana/6.8/connect-to-elasticsearch.html[Kibana documentation]. - -[NOTE] -==== -The audit logs are not stored in the internal {product-title} Elasticsearch instance by default. To view the audit logs in Kibana, you must use the xref:../logging/config/cluster-logging-log-store.adoc#cluster-logging-elasticsearch-audit_cluster-logging-store[Log Forwarding API] to configure a pipeline that uses the `default` output for audit logs. -==== - -// The following include statements pull in the module files that comprise -// the assembly. Include any combination of concept, procedure, or reference -// modules required to cover the user story. You can also include other -// assemblies. - -include::modules/cluster-logging-visualizer-indices.adoc[leveloffset=+1] -include::modules/cluster-logging-visualizer-kibana.adoc[leveloffset=+1] diff --git a/logging/cluster-logging.adoc b/logging/cluster-logging.adoc deleted file mode 100644 index 3ee017345a73..000000000000 --- a/logging/cluster-logging.adoc +++ /dev/null @@ -1,89 +0,0 @@ -:_content-type: ASSEMBLY -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] -[id="cluster-logging"] -= Understanding the {logging-title} -:context: cluster-logging - -toc::[] - -ifdef::openshift-enterprise,openshift-rosa,openshift-dedicated,openshift-webscale,openshift-origin[] -As a cluster administrator, you can deploy the {logging} to aggregate all the logs from your {product-title} cluster, such as node system audit logs, application container logs, and infrastructure logs. The {logging} aggregates these logs from throughout your cluster and stores them in a default log store. You can xref:../logging/cluster-logging-visualizer.adoc#cluster-logging-visualizer[use the Kibana web console to visualize log data]. - -The {logging} aggregates the following types of logs: - -* `application` - Container logs generated by user applications running in the cluster, except infrastructure container applications. -* `infrastructure` - Logs generated by infrastructure components running in the cluster and {product-title} nodes, such as journal logs. Infrastructure components are pods that run in the `openshift*`, `kube*`, or `default` projects. -* `audit` - Logs generated by auditd, the node audit system, which are stored in the */var/log/audit/audit.log* file, and the audit logs from the Kubernetes apiserver and the OpenShift apiserver. - -[NOTE] -==== -Because the internal {product-title} Elasticsearch log store does not provide secure storage for audit logs, audit logs are not stored in the internal Elasticsearch instance by default. If you want to send the audit logs to the default internal Elasticsearch log store, for example to view the audit logs in Kibana, you must use the Log Forwarding API as described in xref:../logging/config/cluster-logging-log-store.adoc#cluster-logging-elasticsearch-audit_cluster-logging-store[Forward audit logs to the log store]. -==== -endif::[] - -include::modules/logging-support-considerations.adoc[leveloffset=+1] - -// The following include statements pull in the module files that comprise -// the assembly. Include any combination of concept, procedure, or reference -// modules required to cover the user story. You can also include other -// assemblies. - -ifdef::openshift-rosa,openshift-dedicated[] -include::modules/cluster-logging-cloudwatch.adoc[leveloffset=+1] -.Next steps -* See xref:../logging/cluster-logging-external.html#cluster-logging-collector-log-forward-cloudwatch_cluster-logging-external[Forwarding logs to Amazon CloudWatch] for instructions. -endif::[] - -include::modules/logging-common-terms.adoc[leveloffset=+1] -include::modules/cluster-logging-about.adoc[leveloffset=+1] - -For information, see xref:../logging/cluster-logging-deploying.adoc#cluster-logging-deploying[Installing the {logging-title}]. - -include::modules/cluster-logging-json-logging-about.adoc[leveloffset=+2] - -include::modules/cluster-logging-collecting-storing-kubernetes-events.adoc[leveloffset=+2] - -For information, see xref:../logging/cluster-logging-eventrouter.adoc#cluster-logging-eventrouter[About collecting and storing Kubernetes events]. - -include::modules/cluster-logging-update-logging.adoc[leveloffset=+2] - -For information, see xref:../logging/cluster-logging-upgrading.adoc#cluster-logging-upgrading[Updating OpenShift Logging]. - -include::modules/cluster-logging-view-cluster-dashboards.adoc[leveloffset=+2] - -For information, see xref:../logging/cluster-logging-dashboards.adoc#cluster-logging-dashboards[About viewing the cluster dashboard]. - -include::modules/cluster-logging-troubleshoot-logging.adoc[leveloffset=+2] - -include::modules/cluster-logging-Uninstall-logging.adoc[leveloffset=+2] - -For information, see xref:../logging/cluster-logging-uninstall.adoc#cluster-logging-uninstall_cluster-logging-uninstall[Uninstalling OpenShift Logging]. - -include::modules/cluster-logging-export-fields.adoc[leveloffset=+2] - -For information, see xref:../logging/cluster-logging-exported-fields.adoc#cluster-logging-exported-fields[About exporting fields]. - -include::modules/cluster-logging-about-components.adoc[leveloffset=+2] - -include::modules/cluster-logging-about-collector.adoc[leveloffset=+2] - -For information, see xref:../logging/config/cluster-logging-collector.adoc#cluster-logging-collector[Configuring the logging collector]. - -include::modules/cluster-logging-about-logstore.adoc[leveloffset=+2] - -For information, see xref:../logging/config/cluster-logging-log-store.adoc#cluster-logging-store[Configuring the log store]. - -include::modules/cluster-logging-about-visualizer.adoc[leveloffset=+2] - -For information, see xref:../logging/config/cluster-logging-visualizer.adoc#cluster-logging-visualizer[Configuring the log visualizer]. - -include::modules/cluster-logging-eventrouter-about.adoc[leveloffset=+2] - -For information, see xref:../logging/cluster-logging-eventrouter.adoc#cluster-logging-eventrouter[Collecting and storing Kubernetes events]. - -include::modules/cluster-logging-forwarding-about.adoc[leveloffset=+2] - -For information, see xref:../logging/cluster-logging-external.adoc#cluster-logging-external[Forwarding logs to third-party systems]. - -include::modules/cluster-logging-feature-reference.adoc[leveloffset=+1] diff --git a/logging/config/_attributes b/logging/config/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/logging/config/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/logging/config/cluster-logging-collector.adoc b/logging/config/cluster-logging-collector.adoc deleted file mode 100644 index d19b0ce8a72a..000000000000 --- a/logging/config/cluster-logging-collector.adoc +++ /dev/null @@ -1,33 +0,0 @@ -:_content-type: ASSEMBLY -:context: cluster-logging-collector -[id="cluster-logging-collector"] -= Configuring the logging collector -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] - -toc::[] - -{logging-title-uc} collects operations and application logs from your cluster and enriches the data with Kubernetes pod and project metadata. - -You can configure the CPU and memory limits for the log collector and xref:../../logging/config/cluster-logging-moving-nodes.adoc#cluster-logging-moving[move the log collector pods to specific nodes]. All supported modifications to the log collector can be performed though the `spec.collection.log.fluentd` stanza in the `ClusterLogging` custom resource (CR). - - -// The following include statements pull in the module files that comprise -// the assembly. Include any combination of concept, procedure, or reference -// modules required to cover the user story. You can also include other -// assemblies. - -include::modules/cluster-logging-maintenance-support-about.adoc[leveloffset=+1] - -include::modules/cluster-logging-collector-pod-location.adoc[leveloffset=+1] - -include::modules/cluster-logging-collector-limits.adoc[leveloffset=+1] - -include::modules/cluster-logging-collector-tuning.adoc[leveloffset=+1] - -include::modules/cluster-logging-removing-unused-components-if-no-elasticsearch.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../logging/cluster-logging-external.adoc#cluster-logging-external[Forwarding logs to third-party systems] diff --git a/logging/config/cluster-logging-configuring-cr.adoc b/logging/config/cluster-logging-configuring-cr.adoc deleted file mode 100644 index 4cb8c3f1d142..000000000000 --- a/logging/config/cluster-logging-configuring-cr.adoc +++ /dev/null @@ -1,16 +0,0 @@ -:_content-type: ASSEMBLY -:context: cluster-logging-configuring-cr -[id="cluster-logging-configuring-cr"] -= About the Cluster Logging custom resource -include::_attributes/common-attributes.adoc[] - -toc::[] - -To configure {logging-title} you customize the `ClusterLogging` custom resource (CR). - -// The following include statements pull in the module files that comprise -// the assembly. Include any combination of concept, procedure, or reference -// modules required to cover the user story. You can also include other -// assemblies. - -include::modules/cluster-logging-about-crd.adoc[leveloffset=+1] diff --git a/logging/config/cluster-logging-configuring.adoc b/logging/config/cluster-logging-configuring.adoc deleted file mode 100644 index 251f7e03d361..000000000000 --- a/logging/config/cluster-logging-configuring.adoc +++ /dev/null @@ -1,81 +0,0 @@ -:_content-type: ASSEMBLY -:context: cluster-logging-configuring -[id="cluster-logging-configuring"] -= Configuring OpenShift Logging -include::_attributes/common-attributes.adoc[] - -toc::[] - -{logging-title-uc} is configurable using a `ClusterLogging` custom resource (CR) deployed -in the `openshift-logging` project. - -The {logging} operator watches for changes to `ClusterLogging` CR, -creates any missing logging components, and adjusts the logging environment accordingly. - -The `ClusterLogging` CR is based on the `ClusterLogging` custom resource definition (CRD), which defines a complete {logging} environment and includes all the components of the logging stack to collect, store and visualize logs. - -.Sample `ClusterLogging` custom resource (CR) -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogging -metadata: - creationTimestamp: '2019-03-20T18:07:02Z' - generation: 1 - name: instance - namespace: openshift-logging -spec: - collection: - logs: - fluentd: - resources: null - type: fluentd - logStore: - elasticsearch: - nodeCount: 3 - redundancyPolicy: SingleRedundancy - resources: - limits: - cpu: - memory: - requests: - cpu: - memory: - storage: {} - type: elasticsearch - managementState: Managed - visualization: - kibana: - proxy: - resources: null - replicas: 1 - resources: null - type: kibana ----- -You can configure the following for the {logging}: - -* You can overwrite the image for each {logging} component by modifying the appropriate -environment variable in the `cluster-logging-operator` Deployment. - -* You can specify specific nodes for the logging components using node selectors. - -//// -* You can specify the Log collectors to deploy to each node in a cluster, either Fluentd or Rsyslog. - -[IMPORTANT] -==== -The Rsyslog log collector is currently a Technology Preview feature. -==== -//// - -// The following include statements pull in the module files that comprise -// the assembly. Include any combination of concept, procedure, or reference -// modules required to cover the user story. You can also include other -// assemblies. - -// modules/cluster-logging-configuring-image-about.adoc[leveloffset=+1] - -[IMPORTANT] -==== -The logging routes are managed by the {logging-title} Operator and cannot be modified by the user. -==== diff --git a/logging/config/cluster-logging-log-store.adoc b/logging/config/cluster-logging-log-store.adoc deleted file mode 100644 index 0292b0ffe999..000000000000 --- a/logging/config/cluster-logging-log-store.adoc +++ /dev/null @@ -1,52 +0,0 @@ -:_content-type: ASSEMBLY -[id="cluster-logging-store"] -= Configuring the log store -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] -:context: cluster-logging-store - -toc::[] - -{logging-title-uc} uses Elasticsearch 6 (ES) to store and organize the log data. - -You can make modifications to your log store, including: - -* storage for your Elasticsearch cluster -* shard replication across data nodes in the cluster, from full replication to no replication -* external access to Elasticsearch data - -//Following paragraph also in modules/cluster-logging-deploy-storage-considerations.adoc - -Elasticsearch is a memory-intensive application. Each Elasticsearch node needs at least 16G of memory for both memory requests and limits, unless you specify otherwise in the `ClusterLogging` custom resource. The initial set of {product-title} nodes might not be large enough to support the Elasticsearch cluster. You must add additional nodes to the {product-title} cluster to run with the recommended -or higher memory, up to a maximum of 64G for each Elasticsearch node. - -Each Elasticsearch node can operate with a lower memory setting, though this is not recommended for production environments. - - -// The following include statements pull in the module files that comprise -// the assembly. Include any combination of concept, procedure, or reference -// modules required to cover the user story. You can also include other -// assemblies. - -include::modules/cluster-logging-elasticsearch-audit.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* For more information on the Log Forwarding API, see xref:../../logging/cluster-logging-external.adoc#cluster-logging-external[Forwarding logs using the Log Forwarding API]. - -include::modules/cluster-logging-elasticsearch-retention.adoc[leveloffset=+1] - -include::modules/cluster-logging-logstore-limits.adoc[leveloffset=+1] - -include::modules/cluster-logging-elasticsearch-ha.adoc[leveloffset=+1] - -include::modules/cluster-logging-elasticsearch-scaledown.adoc[leveloffset=+1] - -include::modules/cluster-logging-elasticsearch-storage.adoc[leveloffset=+1] - -include::modules/cluster-logging-elasticsearch-persistent-storage-empty.adoc[leveloffset=+1] - -include::modules/cluster-logging-manual-rollout-rolling.adoc[leveloffset=+1] - -include::modules/cluster-logging-elasticsearch-exposing.adoc[leveloffset=+1] diff --git a/logging/config/cluster-logging-maintenance-support.adoc b/logging/config/cluster-logging-maintenance-support.adoc deleted file mode 100644 index fba711d9d4be..000000000000 --- a/logging/config/cluster-logging-maintenance-support.adoc +++ /dev/null @@ -1,13 +0,0 @@ -:_content-type: ASSEMBLY -:context: cluster-logging-unsupported -[id="cluster-logging-maintenance-and-support"] -= Maintenance and support -include::_attributes/common-attributes.adoc[] - -toc::[] - -include::modules/cluster-logging-maintenance-support-about.adoc[leveloffset=+1] - -include::modules/cluster-logging-maintenance-support-list.adoc[leveloffset=+1] - -include::modules/unmanaged-operators.adoc[leveloffset=+1] diff --git a/logging/config/cluster-logging-memory.adoc b/logging/config/cluster-logging-memory.adoc deleted file mode 100644 index 154fd68985c4..000000000000 --- a/logging/config/cluster-logging-memory.adoc +++ /dev/null @@ -1,19 +0,0 @@ -:_content-type: ASSEMBLY -:context: cluster-logging-memory -[id="cluster-logging-memory"] -= Configuring CPU and memory limits for {logging} components -include::_attributes/common-attributes.adoc[] - -toc::[] - - -You can configure both the CPU and memory limits for each of the {logging} components as needed. - - -// The following include statements pull in the module files that comprise -// the assembly. Include any combination of concept, procedure, or reference -// modules required to cover the user story. You can also include other -// assemblies. - - -include::modules/cluster-logging-cpu-memory.adoc[leveloffset=+1] diff --git a/logging/config/cluster-logging-moving-nodes.adoc b/logging/config/cluster-logging-moving-nodes.adoc deleted file mode 100644 index 89ca4a5032be..000000000000 --- a/logging/config/cluster-logging-moving-nodes.adoc +++ /dev/null @@ -1,20 +0,0 @@ -:_content-type: ASSEMBLY -:context: cluster-logging-moving -[id="cluster-logging-moving"] -= Moving {logging} resources with node selectors -include::_attributes/common-attributes.adoc[] - -toc::[] - - - - - -You can use node selectors to deploy the Elasticsearch and Kibana pods to different nodes. - -// The following include statements pull in the module files that comprise -// the assembly. Include any combination of concept, procedure, or reference -// modules required to cover the user story. You can also include other -// assemblies. - -include::modules/infrastructure-moving-logging.adoc[leveloffset=+1] diff --git a/logging/config/cluster-logging-storage-considerations.adoc b/logging/config/cluster-logging-storage-considerations.adoc deleted file mode 100644 index 83d432692e54..000000000000 --- a/logging/config/cluster-logging-storage-considerations.adoc +++ /dev/null @@ -1,25 +0,0 @@ -:_content-type: ASSEMBLY -:context: cluster-logging-storage -[id="cluster-logging-storage"] -= Configuring {logging} storage -include::_attributes/common-attributes.adoc[] - -toc::[] - - -Elasticsearch is a memory-intensive application. The default {logging} installation deploys 16G of memory for both memory requests and memory limits. -The initial set of {product-title} nodes might not be large enough to support the Elasticsearch cluster. You must add additional nodes to the {product-title} cluster to run with the recommended or higher memory. Each Elasticsearch node can operate with a lower memory setting, though this is not recommended for production environments. - -// The following include statements pull in the module files that comprise -// the assembly. Include any combination of concept, procedure, or reference -// modules required to cover the user story. You can also include other -// assemblies. - - -include::modules/cluster-logging-deploy-storage-considerations.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="cluster-logging-storage-considerations-addtl-resources"] -== Additional resources - -* xref:../../logging/config/cluster-logging-log-store.adoc#cluster-logging-elasticsearch-storage_cluster-logging-store[Configuring persistent storage for the log store] diff --git a/logging/config/cluster-logging-systemd.adoc b/logging/config/cluster-logging-systemd.adoc deleted file mode 100644 index aa6fb1228fc8..000000000000 --- a/logging/config/cluster-logging-systemd.adoc +++ /dev/null @@ -1,19 +0,0 @@ -:_content-type: ASSEMBLY -:context: cluster-logging-systemd -[id="cluster-logging-systemd"] -= Configuring systemd-journald and Fluentd -include::_attributes/common-attributes.adoc[] - -toc::[] - -Because Fluentd reads from the journal, and the journal default settings are very low, journal entries can be lost because the journal cannot keep up with the logging rate from system services. - -We recommend setting `RateLimitIntervalSec=30s` and `RateLimitBurst=10000` (or even higher if necessary) to prevent the journal from losing entries. - -// The following include statements pull in the module files that comprise -// the assembly. Include any combination of concept, procedure, or reference -// modules required to cover the user story. You can also include other -// assemblies. - - -include::modules/cluster-logging-systemd-scaling.adoc[leveloffset=+1] diff --git a/logging/config/cluster-logging-tolerations.adoc b/logging/config/cluster-logging-tolerations.adoc deleted file mode 100644 index aeb9ce64e70b..000000000000 --- a/logging/config/cluster-logging-tolerations.adoc +++ /dev/null @@ -1,104 +0,0 @@ -:_content-type: ASSEMBLY -:context: cluster-logging-tolerations -[id="cluster-logging-tolerations"] -= Using tolerations to control OpenShift Logging pod placement -include::_attributes/common-attributes.adoc[] - -toc::[] - -You can use taints and tolerations to ensure that {logging} pods run -on specific nodes and that no other workload can run on those nodes. - -Taints and tolerations are simple `key:value` pair. A taint on a node -instructs the node to repel all pods that do not tolerate the taint. - -The `key` is any string, up to 253 characters and the `value` is any string up to 63 characters. -The string must begin with a letter or number, and may contain letters, numbers, hyphens, dots, and underscores. - -.Sample {logging} CR with tolerations -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: "ClusterLogging" -metadata: - name: "instance" - namespace: openshift-logging - -... - -spec: - managementState: "Managed" - logStore: - type: "elasticsearch" - elasticsearch: - nodeCount: 3 - tolerations: <1> - - key: "logging" - operator: "Exists" - effect: "NoExecute" - tolerationSeconds: 6000 - resources: - limits: - memory: 16Gi - requests: - cpu: 200m - memory: 16Gi - storage: {} - redundancyPolicy: "ZeroRedundancy" - visualization: - type: "kibana" - kibana: - tolerations: <2> - - key: "logging" - operator: "Exists" - effect: "NoExecute" - tolerationSeconds: 6000 - resources: - limits: - memory: 2Gi - requests: - cpu: 100m - memory: 1Gi - replicas: 1 - collection: - logs: - type: "fluentd" - fluentd: - tolerations: <3> - - key: "logging" - operator: "Exists" - effect: "NoExecute" - tolerationSeconds: 6000 - resources: - limits: - memory: 2Gi - requests: - cpu: 100m - memory: 1Gi ----- - -<1> This toleration is added to the Elasticsearch pods. -<2> This toleration is added to the Kibana pod. -<3> This toleration is added to the logging collector pods. - -// The following include statements pull in the module files that comprise -// the assembly. Include any combination of concept, procedure, or reference -// modules required to cover the user story. You can also include other -// assemblies. - -include::modules/cluster-logging-elasticsearch-tolerations.adoc[leveloffset=+1] - -include::modules/cluster-logging-kibana-tolerations.adoc[leveloffset=+1] - -include::modules/cluster-logging-collector-tolerations.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="cluster-logging-tolerations-addtl-resources"] -== Additional resources - -ifdef::openshift-enterprise,openshift-origin[] -* xref:../../nodes/scheduling/nodes-scheduler-taints-tolerations.adoc#nodes-scheduler-taints-tolerations[Controlling pod placement using node taints]. -endif::[] -ifdef::openshift-rosa,openshift-dedicated[] -* link:https://docs.openshift.com/container-platform/latest/nodes/scheduling/nodes-scheduler-taints-tolerations.html#nodes-scheduler-taints-tolerations[Controlling pod placement using node taints]. -endif::[] \ No newline at end of file diff --git a/logging/config/cluster-logging-visualizer.adoc b/logging/config/cluster-logging-visualizer.adoc deleted file mode 100644 index f1d0670fe26f..000000000000 --- a/logging/config/cluster-logging-visualizer.adoc +++ /dev/null @@ -1,20 +0,0 @@ -:_content-type: ASSEMBLY -:context: cluster-logging-visualizer -[id="cluster-logging-visualizer"] -= Configuring the log visualizer -include::_attributes/common-attributes.adoc[] - -toc::[] - -{product-title} uses Kibana to display the log data collected by the {logging}. - -You can scale Kibana for redundancy and configure the CPU and memory for your Kibana nodes. - -// The following include statements pull in the module files that comprise -// the assembly. Include any combination of concept, procedure, or reference -// modules required to cover the user story. You can also include other -// assemblies. - -include::modules/cluster-logging-cpu-memory.adoc[leveloffset=+1] - -include::modules/cluster-logging-kibana-scaling.adoc[leveloffset=+1] diff --git a/logging/config/images b/logging/config/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/logging/config/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/logging/config/modules b/logging/config/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/logging/config/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/logging/config/snippets b/logging/config/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/logging/config/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/logging/dedicated-cluster-deploying.adoc b/logging/dedicated-cluster-deploying.adoc deleted file mode 100644 index 3c618d182d34..000000000000 --- a/logging/dedicated-cluster-deploying.adoc +++ /dev/null @@ -1,9 +0,0 @@ -:_content-type: ASSEMBLY -:context: dedicated-cluster-deploying -[id="dedicated-cluster-deploying"] -= Installing the Red Hat OpenShift Logging Operator and OpenShift Elasticsearch Operator -include::_attributes/common-attributes.adoc[] - -toc::[] - -include::modules/dedicated-cluster-install-deploy.adoc[leveloffset=+1] diff --git a/logging/dedicated-cluster-logging.adoc b/logging/dedicated-cluster-logging.adoc deleted file mode 100644 index 9da78a1e6091..000000000000 --- a/logging/dedicated-cluster-logging.adoc +++ /dev/null @@ -1,63 +0,0 @@ -:_content-type: ASSEMBLY -:context: dedicated-cluster-logging -[id="dedicated-cluster-logging"] -= Configuring the {logging-title} -include::_attributes/common-attributes.adoc[] - -toc::[] - -As a cluster administrator, you can deploy the {logging} to aggregate logs for a range of services. - -{product-title} clusters can perform logging tasks using the OpenShift Elasticsearch Operator. - -The {logging} is configurable using a `ClusterLogging` custom resource (CR) -deployed in the `openshift-logging` project namespace. - -The Red Hat OpenShift Logging Operator watches for changes to `ClusterLogging` CR, creates -any missing logging components, and adjusts the logging environment accordingly. - -The `ClusterLogging` CR is based on the `ClusterLogging` custom resource -definition (CRD), which defines a complete OpenShift Logging environment and -includes all the components of the logging stack to collect, store, and visualize -logs. - -The `retentionPolicy` parameter in the `ClusterLogging` custom resource (CR) defines how long the internal Elasticsearch log store retains logs. - -.Sample `ClusterLogging` custom resource (CR) -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: "ClusterLogging" -metadata: - name: "instance" - namespace: "openshift-logging" -spec: - managementState: "Managed" - logStore: - type: "elasticsearch" - elasticsearch: - nodeCount: 3 - storage: - storageClassName: "gp2" - size: "200Gi" - redundancyPolicy: "SingleRedundancy" - nodeSelector: - node-role.kubernetes.io/worker: "" - resources: - limits: - memory: 16G - request: - memory: 16G - visualization: - type: "kibana" - kibana: - replicas: 1 - nodeSelector: - node-role.kubernetes.io/worker: "" - collection: - logs: - type: "fluentd" - fluentd: {} - nodeSelector: - node-role.kubernetes.io/worker: "" ----- diff --git a/logging/images b/logging/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/logging/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/logging/modules b/logging/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/logging/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/logging/rosa-viewing-logs.adoc b/logging/rosa-viewing-logs.adoc deleted file mode 100644 index 17f65bafe2ae..000000000000 --- a/logging/rosa-viewing-logs.adoc +++ /dev/null @@ -1,10 +0,0 @@ -:_content-type: ASSEMBLY -[id="rosa-viewing-logs"] -= Viewing cluster logs in the AWS Console -include::_attributes/attributes-openshift-dedicated.adoc[] -:context: rosa-viewing-logs -toc::[] - -You can view forwarded cluster logs in the AWS console. - -include::modules/rosa-view-cloudwatch-logs.adoc[leveloffset=+1] diff --git a/logging/sd-accessing-the-service-logs.adoc b/logging/sd-accessing-the-service-logs.adoc deleted file mode 100644 index f1ac7ad2e886..000000000000 --- a/logging/sd-accessing-the-service-logs.adoc +++ /dev/null @@ -1,33 +0,0 @@ -:_content-type: ASSEMBLY -[id="sd-accessing-the-service-logs"] -= Accessing the service logs for {product-title} clusters -include::_attributes/attributes-openshift-dedicated.adoc[] -:context: sd-accessing-the-service-logs - -toc::[] - -[role="_abstract"] -You can view the service logs for your {product-title} -ifdef::openshift-rosa[] - (ROSA) -endif::[] - clusters by using the {cluster-manager-first}. The service logs detail cluster events such as load balancer quota updates and scheduled maintenance upgrades. The logs also show cluster resource changes such as the addition or deletion of users, groups, and identity providers. - -// Commented out while the OpenShift Cluster Manager CLI is in Developer Preview: -//You can view the service logs for your {product-title} (ROSA) clusters by using {cluster-manager-first} or the {cluster-manager} CLI (`ocm`). The service logs detail cluster events such as load balancer quota updates and scheduled maintenance upgrades. The logs also show cluster resource changes such as the addition or deletion of users, groups, and identity providers. - -Additionally, you can add notification contacts for -ifdef::openshift-rosa[] - a ROSA -endif::[] -ifdef::openshift-dedicated[] - an {product-title} -endif::[] - cluster. Subscribed users receive emails about cluster events that require customer action, known cluster incidents, upgrade maintenance, and other topics. - -// Commented out while the OpenShift Cluster Manager CLI is in Developer Preview: -//include::modules/viewing-the-service-logs.adoc[leveloffset=+1] -//include::modules/viewing-the-service-logs-ocm.adoc[leveloffset=+2] -//include::modules/viewing-the-service-logs-cli.adoc[leveloffset=+2] -include::modules/viewing-the-service-logs-ocm.adoc[leveloffset=+1] -include::modules/adding-cluster-notification-contacts.adoc[leveloffset=+1] diff --git a/logging/snippets b/logging/snippets deleted file mode 120000 index 9f5bc7e4dde0..000000000000 --- a/logging/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets \ No newline at end of file diff --git a/logging/troubleshooting/_attributes b/logging/troubleshooting/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/logging/troubleshooting/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/logging/troubleshooting/cluster-logging-alerts.adoc b/logging/troubleshooting/cluster-logging-alerts.adoc deleted file mode 100644 index 63079a60893e..000000000000 --- a/logging/troubleshooting/cluster-logging-alerts.adoc +++ /dev/null @@ -1,37 +0,0 @@ -:_content-type: ASSEMBLY -:context: cluster-logging-alerts -[id="cluster-logging-alerts"] -= Understanding {logging} alerts -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] - -toc::[] - -All of the logging collector alerts are listed on the Alerting UI of the -ifndef::openshift-rosa,openshift-dedicated[] -{product-title} web console. -endif::[] -ifdef::openshift-rosa,openshift-dedicated[] -{cluster-manager-url}. -endif::[] - -// The following include statements pull in the module files that comprise -// the assembly. Include any combination of concept, procedure, or reference -// modules required to cover the user story. You can also include other -// assemblies. - - -include::modules/cluster-logging-collector-alerts-viewing.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* For more information on the Alerting UI, see -ifdef::openshift-enterprise,openshift-origin[] -xref:../../monitoring/managing-alerts.adoc#managing-alerts[Managing alerts]. -endif::[] -ifdef::openshift-rosa,openshift-dedicated[] -link:https://docs.openshift.com/container-platform/latest/monitoring/managing-alerts.html#managing-alerts[Managing alerts]. -endif::[] - -include::modules/cluster-logging-collector-alerts.adoc[leveloffset=+1] -include::modules/cluster-logging-elasticsearch-rules.adoc[leveloffset=+1] diff --git a/logging/troubleshooting/cluster-logging-cluster-status.adoc b/logging/troubleshooting/cluster-logging-cluster-status.adoc deleted file mode 100644 index 22b4c854a6a3..000000000000 --- a/logging/troubleshooting/cluster-logging-cluster-status.adoc +++ /dev/null @@ -1,19 +0,0 @@ -:_content-type: ASSEMBLY -:context: cluster-logging-cluster-status -[id="cluster-logging-cluster-status"] -= Viewing OpenShift Logging status -include::_attributes/common-attributes.adoc[] - -toc::[] - -You can view the status of the Red Hat OpenShift Logging Operator and for a number of {logging} components. - -// The following include statements pull in the module files that comprise -// the assembly. Include any combination of concept, procedure, or reference -// modules required to cover the user story. You can also include other -// assemblies. - - -include::modules/cluster-logging-clo-status.adoc[leveloffset=+1] - -include::modules/cluster-logging-clo-status-comp.adoc[leveloffset=+1] diff --git a/logging/troubleshooting/cluster-logging-log-store-status.adoc b/logging/troubleshooting/cluster-logging-log-store-status.adoc deleted file mode 100644 index 49644546fbd8..000000000000 --- a/logging/troubleshooting/cluster-logging-log-store-status.adoc +++ /dev/null @@ -1,21 +0,0 @@ -:_content-type: ASSEMBLY -:context: cluster-logging-elasticsearch -[id="cluster-logging-log-store-status"] -= Viewing the status of the Elasticsearch log store -include::_attributes/common-attributes.adoc[] - -toc::[] - -You can view the status of the OpenShift Elasticsearch Operator and for a number of Elasticsearch components. - -// The following include statements pull in the module files that comprise -// the assembly. Include any combination of concept, procedure, or reference -// modules required to cover the user story. You can also include other -// assemblies. - - -include::modules/cluster-logging-log-store-status-viewing.adoc[leveloffset=+1] - -include::modules/cluster-logging-log-store-status-comp.adoc[leveloffset=+1] - -include::modules/ref_cluster-logging-elasticsearch-cluster-status.adoc[leveloffset=+1] diff --git a/logging/troubleshooting/cluster-logging-must-gather.adoc b/logging/troubleshooting/cluster-logging-must-gather.adoc deleted file mode 100644 index 57f44d87f1b9..000000000000 --- a/logging/troubleshooting/cluster-logging-must-gather.adoc +++ /dev/null @@ -1,35 +0,0 @@ -:_content-type: ASSEMBLY -:context: cluster-logging-must-gather -[id="cluster-logging-must-gather"] -= Collecting logging data for Red Hat Support -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] - -toc::[] - -When opening a support case, it is helpful to provide debugging information about your cluster to Red Hat Support. - -The -ifdef::openshift-enterprise,openshift-origin[] -xref:../../support/gathering-cluster-data.adoc#gathering-cluster-data[`must-gather` tool] -endif::[] -ifdef::openshift-rosa,openshift-dedicated[] -link:https://docs.openshift.com/container-platform/latest/support/gathering-cluster-data.html#gathering-cluster-data[`must-gather` tool] -endif::[] -enables you to collect diagnostic information for project-level resources, cluster-level resources, and each of the {logging} components. - -For prompt support, supply diagnostic information for both {product-title} and OpenShift Logging. - -[NOTE] -==== -Do not use the `hack/logging-dump.sh` script. The script is no longer supported and does not collect data. -==== - -include::modules/cluster-logging-must-gather-about.adoc[leveloffset=+1] - -[id="cluster-logging-must-gather-prereqs"] -== Prerequisites - -* The {logging} and Elasticsearch must be installed. - -include::modules/cluster-logging-must-gather-collecting.adoc[leveloffset=+1] diff --git a/logging/troubleshooting/cluster-logging-troubleshooting-for-critical-alerts.adoc b/logging/troubleshooting/cluster-logging-troubleshooting-for-critical-alerts.adoc deleted file mode 100644 index f58cef1f008c..000000000000 --- a/logging/troubleshooting/cluster-logging-troubleshooting-for-critical-alerts.adoc +++ /dev/null @@ -1,554 +0,0 @@ -:_content-type: ASSEMBLY -[id="cluster-logging-troubleshooting-for-critical-alerts"] -= Troubleshooting for Critical Alerts -include::_attributes/common-attributes.adoc[] - -toc::[] -:toclevels: 2 - -// WARNING - DO NOT ALTER THE URL PATH OF THIS CONTENT, OR YOU WILL BREAK LINKS FROM ALERT MESSAGES THAT LINK TO THIS CONTENT. -// However, if you must make such changes, consult with the logging team beforehand. - - -[id="elasticsearch-cluster-health-is-red"] -== Elasticsearch Cluster Health is Red - -At least one primary shard and its replicas are not allocated to a node. - -.Troubleshooting - -. Check the Elasticsearch cluster health and verify that the cluster `status` is red. -+ -[source,terminal] ----- -oc exec -n openshift-logging -c elasticsearch -- health ----- - -. List the nodes that have joined the cluster. -+ -[source,terminal] ----- -oc exec -n openshift-logging -c elasticsearch -- es_util --query=_cat/nodes?v ----- - -. List the Elasticsearch pods and compare them with the nodes in the command output from the previous step. -+ -[source,terminal] ----- -oc -n openshift-logging get pods -l component=elasticsearch ----- - -. If some of the Elasticsearch nodes have not joined the cluster, perform the following steps. - -.. Confirm that Elasticsearch has an elected control plane node. -+ -[source,terminal] ----- -oc exec -n openshift-logging -c elasticsearch -- es_util --query=_cat/master?v ----- - -.. Review the pod logs of the elected control plane node for issues. -+ -[source,terminal] ----- -oc logs -c elasticsearch -n openshift-logging ----- - -.. Review the logs of nodes that have not joined the cluster for issues. -+ -[source,terminal] ----- -oc logs -c elasticsearch -n openshift-logging ----- - -. If all the nodes have joined the cluster, perform the following steps, check if the cluster is in the process of recovering. -+ -[source,terminal] ----- -oc exec -n openshift-logging -c elasticsearch -- es_util --query=_cat/recovery?active_only=true ----- -+ -If there is no command output, the recovery process might be delayed or stalled by pending tasks. - -. Check if there are pending tasks. -+ -[source,terminal] ----- -oc exec -n openshift-logging -c elasticsearch -- health |grep number_of_pending_tasks ----- - -. If there are pending tasks, monitor their status. -+ -If their status changes and indicates that the cluster is recovering, continue waiting. The recovery time varies according to the size of the cluster and other factors. -+ -Otherwise, if the status of the pending tasks does not change, this indicates that the recovery has stalled. - -. If it seems like the recovery has stalled, check if `cluster.routing.allocation.enable` is set to `none`. -+ -[source,terminal] ----- -oc exec -n openshift-logging -c elasticsearch -- es_util --query=_cluster/settings?pretty ----- - -. If `cluster.routing.allocation.enable` is set to `none`, set it to `all`. -+ -[source,terminal] ----- -oc exec -n openshift-logging -c elasticsearch -- es_util --query=_cluster/settings?pretty -X PUT -d '{"persistent": {"cluster.routing.allocation.enable":"all"}}' ----- - -. Check which indices are still red. -+ -[source,terminal] ----- -oc exec -n openshift-logging -c elasticsearch -- es_util --query=_cat/indices?v ----- - -. If any indices are still red, try to clear them by performing the following steps. - -.. Clear the cache. -+ -[source,terminal] ----- -oc exec -n openshift-logging -c elasticsearch -- es_util --query=/_cache/clear?pretty ----- - -.. Increase the max allocation retries. -+ -[source,terminal] ----- -oc exec -n openshift-logging -c elasticsearch -- es_util --query=/_settings?pretty -X PUT -d '{"index.allocation.max_retries":10}' ----- - -.. Delete all the scroll items. -+ -[source,terminal] ----- -oc exec -n openshift-logging -c elasticsearch -- es_util --query=_search/scroll/_all -X DELETE ----- - -.. Increase the timeout. -+ -[source,terminal] ----- -oc exec -n openshift-logging -c elasticsearch -- es_util --query=/_settings?pretty -X PUT -d '{"index.unassigned.node_left.delayed_timeout":"10m"}' ----- - -. If the preceding steps do not clear the red indices, delete the indices individually. - -.. Identify the red index name. -+ -[source,terminal] ----- -oc exec -n openshift-logging -c elasticsearch -- es_util --query=_cat/indices?v ----- - -.. Delete the red index. -+ -[source,terminal] ----- -oc exec -n openshift-logging -c elasticsearch -- es_util --query= -X DELETE ----- - -. If there are no red indices and the cluster status is red, check for a continuous heavy processing load on a data node. - -.. Check if the Elasticsearch JVM Heap usage is high. -+ -[source,terminal] ----- -oc exec -n openshift-logging -c elasticsearch -- es_util --query=_nodes/stats?pretty ----- -+ -In the command output, review the `node_name.jvm.mem.heap_used_percent` field to determine the JVM Heap usage. - -.. Check for high CPU utilization. - -[role="_additional-resources"] -.Additional resources - -* Search for "Free up or increase disk space" in the Elasticsearch topic, link:https://www.elastic.co/guide/en/elasticsearch/reference/7.13/fix-common-cluster-issues.html#fix-red-yellow-cluster-status[Fix a red or yellow cluster status]. - -[id="elasticsearch-cluster-health-is-yellow"] -== Elasticsearch Cluster Health is Yellow - -Replica shards for at least one primary shard are not allocated to nodes. - -.Troubleshooting - -. Increase the node count by adjusting `nodeCount` in the `ClusterLogging` CR. - -[role="_additional-resources"] -.Additional resources - -//* Search for "Elasticsearch Disk Usage" in xref:../../logging/cluster-logging-dashboards.adoc#cluster-logging-dashboards-logging_cluster-logging-dashboards[OpenShift Logging dashboards]. -* xref:../../logging/config/cluster-logging-configuring-cr.adoc#cluster-logging-configuring-crd_cluster-logging-configuring-cr[About the Cluster Logging custom resource] -* xref:../../logging/config/cluster-logging-log-store.adoc#cluster-logging-elasticsearch-storage_cluster-logging-store[Configuring persistent storage for the log store] - -* Search for "Free up or increase disk space" in the Elasticsearch topic, link:https://www.elastic.co/guide/en/elasticsearch/reference/7.13/fix-common-cluster-issues.html#fix-red-yellow-cluster-status[Fix a red or yellow cluster status]. - - -// [id="elasticsearch-write-requests-rejection-jumps"] -// == Elasticsearch Write Requests Rejection Jumps -// -// .Troubleshooting -// TBD -// Note for writer: This is a warning alert and we haven't documented troubleshooting steps for warning alerts yet. I guess you can skip this in current release. - -[id="elasticsearch-node-disk-low-watermark-reached"] -== Elasticsearch Node Disk Low Watermark Reached - -Elasticsearch does not allocate shards to nodes that https://www.elastic.co/guide/en/elasticsearch/reference/6.8/disk-allocator.html[reach the low watermark]. - -.Troubleshooting - -. Identify the node on which Elasticsearch is deployed. -+ -[source,terminal] ----- -oc -n openshift-logging get po -o wide ----- - -. Check if there are `unassigned shards`. -+ -[source,terminal] ----- -oc exec -n openshift-logging -c elasticsearch -- es_util --query=_cluster/health?pretty | grep unassigned_shards ----- - -. If there are unassigned shards, check the disk space on each node. -+ -[source,terminal] ----- -for pod in `oc -n openshift-logging get po -l component=elasticsearch -o jsonpath='{.items[*].metadata.name}'`; do echo $pod; oc -n openshift-logging exec -c elasticsearch $pod -- df -h /elasticsearch/persistent; done ----- - -. Check the `nodes.node_name.fs` field to determine the free disk space on that node. -+ -If the used disk percentage is above 85%, the node has exceeded the low watermark, and shards can no longer be allocated to this node. - -. Try to increase the disk space on all nodes. - -. If increasing the disk space is not possible, try adding a new data node to the cluster. - -. If adding a new data node is problematic, decrease the total cluster redundancy policy. - -.. Check the current `redundancyPolicy`. -+ -[source,terminal] ----- -oc -n openshift-logging get es elasticsearch -o jsonpath='{.spec.redundancyPolicy}' ----- -+ -[NOTE] -==== -If you are using a `ClusterLogging` CR, enter: - -[source,terminal] ----- -oc -n openshift-logging get cl -o jsonpath='{.items[*].spec.logStore.elasticsearch.redundancyPolicy}' ----- -==== - -.. If the cluster `redundancyPolicy` is higher than `SingleRedundancy`, set it to `SingleRedundancy` and save this change. - -. If the preceding steps do not fix the issue, delete the old indices. - -.. Check the status of all indices on Elasticsearch. -+ -[source,terminal] ----- -oc exec -n openshift-logging -c elasticsearch -- indices ----- - -.. Identify an old index that can be deleted. - -.. Delete the index. -+ -[source,terminal] ----- -oc exec -n openshift-logging -c elasticsearch -- es_util --query= -X DELETE ----- - -[role="_additional-resources"] -.Additional resources - -* Search for "redundancyPolicy" in the "Sample `ClusterLogging` custom resource (CR)" in xref:../../logging/config/cluster-logging-configuring-cr.adoc#cluster-logging-configuring-crd_cluster-logging-configuring-cr[About the Cluster Logging custom resource] - - -[id="elasticsearch-node-disk-high-watermark-reached"] -== Elasticsearch Node Disk High Watermark Reached - -Elasticsearch attempts to relocate shards away from a node link:https://www.elastic.co/guide/en/elasticsearch/reference/6.8/disk-allocator.html[that has reached the high watermark]. - -.Troubleshooting - -. Identify the node on which Elasticsearch is deployed. -+ -[source,terminal] ----- -oc -n openshift-logging get po -o wide ----- - -. Check the disk space on each node. -+ -[source,terminal] ----- -for pod in `oc -n openshift-logging get po -l component=elasticsearch -o jsonpath='{.items[*].metadata.name}'`; do echo $pod; oc -n openshift-logging exec -c elasticsearch $pod -- df -h /elasticsearch/persistent; done ----- - -. Check if the cluster is rebalancing. -+ -[source,terminal] ----- -oc exec -n openshift-logging -c elasticsearch -- es_util --query=_cluster/health?pretty | grep relocating_shards ----- -+ -If the command output shows relocating shards, the High Watermark has been exceeded. The default value of the High Watermark is 90%. -+ -The shards relocate to a node with low disk usage that has not crossed any watermark threshold limits. - -. To allocate shards to a particular node, free up some space. - -. Try to increase the disk space on all nodes. - -. If increasing the disk space is not possible, try adding a new data node to the cluster. - -. If adding a new data node is problematic, decrease the total cluster redundancy policy. - -.. Check the current `redundancyPolicy`. -+ -[source,terminal] ----- -oc -n openshift-logging get es elasticsearch -o jsonpath='{.spec.redundancyPolicy}' ----- -+ -[NOTE] -==== -If you are using a `ClusterLogging` CR, enter: - -[source,terminal] ----- -oc -n openshift-logging get cl -o jsonpath='{.items[*].spec.logStore.elasticsearch.redundancyPolicy}' ----- -==== - -.. If the cluster `redundancyPolicy` is higher than `SingleRedundancy`, set it to `SingleRedundancy` and save this change. - -. If the preceding steps do not fix the issue, delete the old indices. - -.. Check the status of all indices on Elasticsearch. -+ -[source,terminal] ----- -oc exec -n openshift-logging -c elasticsearch -- indices ----- - -.. Identify an old index that can be deleted. - -.. Delete the index. -+ -[source,terminal] ----- -oc exec -n openshift-logging -c elasticsearch -- es_util --query= -X DELETE ----- - -[role="_additional-resources"] -.Additional resources - -* Search for "redundancyPolicy" in the "Sample `ClusterLogging` custom resource (CR)" in xref:../../logging/config/cluster-logging-configuring-cr.adoc#cluster-logging-configuring-crd_cluster-logging-configuring-cr[About the Cluster Logging custom resource] - - -[id="elasticsearch-node-disk-flood-watermark-reached"] -== Elasticsearch Node Disk Flood Watermark Reached - -Elasticsearch enforces a read-only index block on every index that has both of these conditions: - -* One or more shards are allocated to the node. -* One or more disks exceed the https://www.elastic.co/guide/en/elasticsearch/reference/6.8/disk-allocator.html[flood stage]. - -.Troubleshooting - -. Check the disk space of the Elasticsearch node. -+ -[source,terminal] ----- -for pod in `oc -n openshift-logging get po -l component=elasticsearch -o jsonpath='{.items[*].metadata.name}'`; do echo $pod; oc -n openshift-logging exec -c elasticsearch $pod -- df -h /elasticsearch/persistent; done ----- -+ -Check the `nodes.node_name.fs` field to determine the free disk space on that node. - -. If the used disk percentage is above 95%, it signifies that the node has crossed the flood watermark. Writing is blocked for shards allocated on this particular node. - -. Try to increase the disk space on all nodes. - -. If increasing the disk space is not possible, try adding a new data node to the cluster. - -. If adding a new data node is problematic, decrease the total cluster redundancy policy. - -.. Check the current `redundancyPolicy`. -+ -[source,terminal] ----- -oc -n openshift-logging get es elasticsearch -o jsonpath='{.spec.redundancyPolicy}' ----- -+ -[NOTE] -==== -If you are using a `ClusterLogging` CR, enter: - -[source,terminal] ----- -oc -n openshift-logging get cl -o jsonpath='{.items[*].spec.logStore.elasticsearch.redundancyPolicy}' ----- -==== - -.. If the cluster `redundancyPolicy` is higher than `SingleRedundancy`, set it to `SingleRedundancy` and save this change. - -. If the preceding steps do not fix the issue, delete the old indices. - -.. Check the status of all indices on Elasticsearch. -+ -[source,terminal] ----- -oc exec -n openshift-logging -c elasticsearch -- indices ----- - -.. Identify an old index that can be deleted. - -.. Delete the index. -+ -[source,terminal] ----- -oc exec -n openshift-logging -c elasticsearch -- es_util --query= -X DELETE ----- - - . Continue freeing up and monitoring the disk space until the used disk space drops below 90%. Then, unblock write to this particular node. -+ -[source,terminal] ----- -oc exec -n openshift-logging -c elasticsearch -- es_util --query=_all/_settings?pretty -X PUT -d '{"index.blocks.read_only_allow_delete": null}' ----- - -[role="_additional-resources"] -.Additional resources - -* Search for "redundancyPolicy" in the "Sample `ClusterLogging` custom resource (CR)" in xref:../../logging/config/cluster-logging-configuring-cr.adoc#cluster-logging-configuring-crd_cluster-logging-configuring-cr[About the Cluster Logging custom resource] - - -[id="elasticsearch-jvm-heap-use-is-high"] -== Elasticsearch JVM Heap Use is High - -The Elasticsearch node JVM Heap memory used is above 75%. - -.Troubleshooting - -Consider https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#heap-size-settings[increasing the heap size]. - -[id="aggregated-logging-system-cpu-is-high"] -== Aggregated Logging System CPU is High - -System CPU usage on the node is high. - -.Troubleshooting - -Check the CPU of the cluster node. Consider allocating more CPU resources to the node. - -[id="elasticsearch-process-cpu-is-high"] -== Elasticsearch Process CPU is High - -Elasticsearch process CPU usage on the node is high. - -.Troubleshooting - -Check the CPU of the cluster node. Consider allocating more CPU resources to the node. - -[id="elasticsearch-disk-space-is-running-low"] -== Elasticsearch Disk Space is Running Low - -The Elasticsearch Cluster is predicted to be out of disk space within the next 6 hours based on current disk usage. - -.Troubleshooting - -. Get the disk space of the Elasticsearch node. -+ -[source,terminal] ----- -for pod in `oc -n openshift-logging get po -l component=elasticsearch -o jsonpath='{.items[*].metadata.name}'`; do echo $pod; oc -n openshift-logging exec -c elasticsearch $pod -- df -h /elasticsearch/persistent; done ----- - -. In the command output, check the `nodes.node_name.fs` field to determine the free disk space on that node. - -. Try to increase the disk space on all nodes. - -. If increasing the disk space is not possible, try adding a new data node to the cluster. - -. If adding a new data node is problematic, decrease the total cluster redundancy policy. - -.. Check the current `redundancyPolicy`. -+ -[source,terminal] ----- -oc -n openshift-logging get es elasticsearch -o jsonpath='{.spec.redundancyPolicy}' ----- -+ -[NOTE] -==== -If you are using a `ClusterLogging` CR, enter: - -[source,terminal] ----- -oc -n openshift-logging get cl -o jsonpath='{.items[*].spec.logStore.elasticsearch.redundancyPolicy}' ----- -==== - -.. If the cluster `redundancyPolicy` is higher than `SingleRedundancy`, set it to `SingleRedundancy` and save this change. - -. If the preceding steps do not fix the issue, delete the old indices. - -.. Check the status of all indices on Elasticsearch. -+ -[source,terminal] ----- -oc exec -n openshift-logging -c elasticsearch -- indices ----- - -.. Identify an old index that can be deleted. - -.. Delete the index. -+ -[source,terminal] ----- -oc exec -n openshift-logging -c elasticsearch -- es_util --query= -X DELETE ----- - -[role="_additional-resources"] -.Additional resources - -* Search for "redundancyPolicy" in the "Sample `ClusterLogging` custom resource (CR)" in xref:../../logging/config/cluster-logging-configuring-cr.adoc#cluster-logging-configuring-crd_cluster-logging-configuring-cr[About the Cluster Logging custom resource] - -* Search for "ElasticsearchDiskSpaceRunningLow" in xref:../../logging/troubleshooting/cluster-logging-alerts.adoc#cluster-logging-elasticsearch-rules_cluster-logging-alerts[About Elasticsearch alerting rules]. - -* Search for "Free up or increase disk space" in the Elasticsearch topic, link:https://www.elastic.co/guide/en/elasticsearch/reference/7.13/fix-common-cluster-issues.html#fix-red-yellow-cluster-status[Fix a red or yellow cluster status]. - - - -[id="elasticsearch-filedescriptor-usage-is-high"] -== Elasticsearch FileDescriptor Usage is high - -Based on current usage trends, the predicted number of file descriptors on the node is insufficient. - -.Troubleshooting - -Check and, if needed, configure the value of `max_file_descriptors` for each node, as described in the Elasticsearch link:https://www.elastic.co/guide/en/elasticsearch/reference/current/file-descriptors.html[File descriptors] topic. - -[role="_additional-resources"] -.Additional resources - -* Search for "ElasticsearchHighFileDescriptorUsage" in xref:../../logging/troubleshooting/cluster-logging-alerts.adoc#cluster-logging-elasticsearch-rules_cluster-logging-alerts[About Elasticsearch alerting rules]. -* Search for "File Descriptors In Use" in xref:../../logging/cluster-logging-dashboards.adoc#cluster-logging-dashboards-logging_cluster-logging-dashboards[OpenShift Logging dashboards]. - - - -// Follow up items: - -// `oc edit es elasticsearch` is not documented anywhere outside this topic. diff --git a/logging/troubleshooting/images b/logging/troubleshooting/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/logging/troubleshooting/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/logging/troubleshooting/modules b/logging/troubleshooting/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/logging/troubleshooting/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/logging/troubleshooting/snippets b/logging/troubleshooting/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/logging/troubleshooting/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/logging/v5_5/_attributes b/logging/v5_5/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/logging/v5_5/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/logging/v5_5/images b/logging/v5_5/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/logging/v5_5/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/logging/v5_5/logging-5-5-administration.adoc b/logging/v5_5/logging-5-5-administration.adoc deleted file mode 100644 index 338243b3df90..000000000000 --- a/logging/v5_5/logging-5-5-administration.adoc +++ /dev/null @@ -1,22 +0,0 @@ -:_content-type: ASSEMBLY -[id="logging-administration-5-5"] -= Administering your logging deployment -include::_attributes/common-attributes.adoc[] -:context: logging-5.5-administration - -toc::[] - -//Installing the Red Hat OpenShift Logging Operator via webconsole -include::modules/logging-deploy-RHOL-console.adoc[leveloffset=+1] - -//Installing the Loki Operator via webconsole -include::modules/logging-deploy-loki-console.adoc[leveloffset=+1] - -//Generic installing operators from operator hub using CLI -include::modules/olm-installing-from-operatorhub-using-cli.adoc[leveloffset=+1] - -//Generic deleting operators from cluster using web console -include::modules/olm-deleting-operators-from-a-cluster-using-web-console.adoc[leveloffset=+1] - -//Generic deleting operators from a cluster using CLI -include::modules/olm-deleting-operators-from-a-cluster-using-cli.adoc[leveloffset=+1] diff --git a/logging/v5_5/logging-5-5-architecture.adoc b/logging/v5_5/logging-5-5-architecture.adoc deleted file mode 100644 index d6ea69fcb56d..000000000000 --- a/logging/v5_5/logging-5-5-architecture.adoc +++ /dev/null @@ -1,12 +0,0 @@ -:_content-type: ASSEMBLY -[id="logging-architecture-5-5"] -= Understanding logging architecture -include::_attributes/common-attributes.adoc[] -:context: logging-5.5-architecture - -toc::[] - -:context: logging-5-5-architecture -include::modules/logging-architecture-overview.adoc[leveloffset=+1,lines=9..31] - -include::modules/logging-support-considerations.adoc[leveloffset=+1] diff --git a/logging/v5_5/logging-5-5-configuration.adoc b/logging/v5_5/logging-5-5-configuration.adoc deleted file mode 100644 index e78d97a6c6ed..000000000000 --- a/logging/v5_5/logging-5-5-configuration.adoc +++ /dev/null @@ -1,13 +0,0 @@ -:_content-type: ASSEMBLY -[id="logging-configuration-5-5"] -= Configuring your logging deployment -include::_attributes/common-attributes.adoc[] -:context: logging-5.5-configuration - -toc::[] - -include::snippets/logging-crs-by-operator-snip.adoc[] - -include::snippets/logging-supported-config-snip.adoc[] - -include::modules/logging-multiline-except.adoc[leveloffset=+1] diff --git a/logging/v5_5/logging-5-5-getting-started.adoc b/logging/v5_5/logging-5-5-getting-started.adoc deleted file mode 100644 index bc2831906ad7..000000000000 --- a/logging/v5_5/logging-5-5-getting-started.adoc +++ /dev/null @@ -1,7 +0,0 @@ -:_content-type: ASSEMBLY - -[id="logging-5-5-getting-started"] -= Getting started with logging 5.5 - -:context: logging-5-5-getting-started -include::modules/logging-getting-started.adoc[lines=5..38] diff --git a/logging/v5_5/logging-5-5-reference.adoc b/logging/v5_5/logging-5-5-reference.adoc deleted file mode 100644 index a67d4f9013c4..000000000000 --- a/logging/v5_5/logging-5-5-reference.adoc +++ /dev/null @@ -1,7 +0,0 @@ -:_content-type: ASSEMBLY -[id="logging-reference-5-5"] -= Logging References -include::_attributes/common-attributes.adoc[] -:context: logging-5.5-reference - -toc::[] diff --git a/logging/v5_5/logging-5-5-release-notes.adoc b/logging/v5_5/logging-5-5-release-notes.adoc deleted file mode 100644 index 284b8b5f8cc9..000000000000 --- a/logging/v5_5/logging-5-5-release-notes.adoc +++ /dev/null @@ -1,31 +0,0 @@ -:_content-type: ASSEMBLY -[id="logging-release-notes-5-5"] -= Logging 5.5 Release Notes -include::_attributes/common-attributes.adoc[] -:context: logging-5.5-release-notes - -toc::[] - -include::snippets/logging-compatibility-snip.adoc[] - -include::modules/logging-rn-5.5.10.adoc[leveloffset=+1] - -include::modules/logging-rn-5.5.9.adoc[leveloffset=+1] - -include::modules/logging-rn-5.5.8.adoc[leveloffset=+1] - -include::modules/logging-rn-5.5.7.adoc[leveloffset=+1] - -include::modules/logging-rn-5.5.6.adoc[leveloffset=+1] - -include::modules/logging-rn-5.5.5.adoc[leveloffset=+1] - -include::modules/logging-rn-5.5.4.adoc[leveloffset=+1] - -include::modules/logging-rn-5.5.3.adoc[leveloffset=+1] - -include::modules/logging-rn-5.5.2.adoc[leveloffset=+1] - -include::modules/logging-rn-5.5.1.adoc[leveloffset=+1] - -include::modules/logging-rn-5.5.0.adoc[leveloffset=+1] diff --git a/logging/v5_5/modules b/logging/v5_5/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/logging/v5_5/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/logging/v5_5/snippets b/logging/v5_5/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/logging/v5_5/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/logging/v5_6/_attributes b/logging/v5_6/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/logging/v5_6/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/logging/v5_6/images b/logging/v5_6/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/logging/v5_6/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/logging/v5_6/logging-5-6-administration.adoc b/logging/v5_6/logging-5-6-administration.adoc deleted file mode 100644 index 14b7b31be3e3..000000000000 --- a/logging/v5_6/logging-5-6-administration.adoc +++ /dev/null @@ -1,22 +0,0 @@ -:_content-type: ASSEMBLY -[id="logging-administration-5-6"] -= Administering your logging deployment -include::_attributes/common-attributes.adoc[] -:context: logging-5.6-administration - -toc::[] - -//Installing the Red Hat OpenShift Logging Operator via webconsole -include::modules/logging-deploy-RHOL-console.adoc[leveloffset=+1] - -//Installing the Loki Operator via webconsole -include::modules/logging-deploy-loki-console.adoc[leveloffset=+1] - -//Generic installing operators from operator hub using CLI -include::modules/olm-installing-from-operatorhub-using-cli.adoc[leveloffset=+1] - -//Generic deleting operators from cluster using web console -include::modules/olm-deleting-operators-from-a-cluster-using-web-console.adoc[leveloffset=+1] - -//Generic deleting operators from a cluster using CLI -include::modules/olm-deleting-operators-from-a-cluster-using-cli.adoc[leveloffset=+1] diff --git a/logging/v5_6/logging-5-6-architecture.adoc b/logging/v5_6/logging-5-6-architecture.adoc deleted file mode 100644 index 08c96effd8bc..000000000000 --- a/logging/v5_6/logging-5-6-architecture.adoc +++ /dev/null @@ -1,12 +0,0 @@ -:_content-type: ASSEMBLY -[id="logging-architecture-5-6"] -= Understanding logging architecture -include::_attributes/common-attributes.adoc[] -:context: logging-5.6-architecture - -toc::[] - - -include::modules/logging-architecture-overview.adoc[leveloffset=+1,lines=9..31] - -include::modules/logging-support-considerations.adoc[leveloffset=+1] diff --git a/logging/v5_6/logging-5-6-configuration.adoc b/logging/v5_6/logging-5-6-configuration.adoc deleted file mode 100644 index 35b9787a8d47..000000000000 --- a/logging/v5_6/logging-5-6-configuration.adoc +++ /dev/null @@ -1,15 +0,0 @@ -:_content-type: ASSEMBLY -[id="logging-configuration-5-6"] -= Configuring your logging deployment -include::_attributes/common-attributes.adoc[] -:context: logging-5.6-configuration - -toc::[] - -include::snippets/logging-crs-by-operator-snip.adoc[] - -include::snippets/logging-supported-config-snip.adoc[] - -include::modules/logging-loki-retention.adoc[leveloffset=+1] - -include::modules/logging-multiline-except.adoc[leveloffset=+1] diff --git a/logging/v5_6/logging-5-6-getting-started.adoc b/logging/v5_6/logging-5-6-getting-started.adoc deleted file mode 100644 index c4cb0a606a71..000000000000 --- a/logging/v5_6/logging-5-6-getting-started.adoc +++ /dev/null @@ -1,7 +0,0 @@ -:_content-type: ASSEMBLY - -[id="logging-getting-started-5-6"] -= Getting started with logging 5.6 - -:context: logging-5-6-getting-started -include::modules/logging-getting-started.adoc[lines=5..38] diff --git a/logging/v5_6/logging-5-6-reference.adoc b/logging/v5_6/logging-5-6-reference.adoc deleted file mode 100644 index 704514dbc3ce..000000000000 --- a/logging/v5_6/logging-5-6-reference.adoc +++ /dev/null @@ -1,15 +0,0 @@ -:_content-type: ASSEMBLY -[id="logging-reference-5-6"] -= Logging References -include::_attributes/common-attributes.adoc[] -:context: logging-5.6-reference - -toc::[] - -:leveloffset: +1 - -include::modules/logging-feature-reference-5.6.adoc[] - -include::modules/logging-5.6-api-ref.adoc[] - -:leveloffset: -1 diff --git a/logging/v5_6/logging-5-6-release-notes.adoc b/logging/v5_6/logging-5-6-release-notes.adoc deleted file mode 100644 index cb94df782bb0..000000000000 --- a/logging/v5_6/logging-5-6-release-notes.adoc +++ /dev/null @@ -1,23 +0,0 @@ -:_content-type: ASSEMBLY -[id="logging-release-notes-5-6"] -= Logging 5.6 Release Notes -include::_attributes/common-attributes.adoc[] -:context: logging-5.6-release-notes - -toc::[] - -include::snippets/logging-compatibility-snip.adoc[] - -include::snippets/logging-stable-updates-snip.adoc[] - -include::modules/logging-rn-5.6.5.adoc[leveloffset=+1] - -include::modules/logging-rn-5.6.4.adoc[leveloffset=+1] - -include::modules/logging-rn-5.6.3.adoc[leveloffset=+1] - -include::modules/logging-rn-5.6.2.adoc[leveloffset=+1] - -include::modules/logging-rn-5.6.1.adoc[leveloffset=+1] - -include::modules/logging-rn-5.6.0.adoc[leveloffset=+1] diff --git a/logging/v5_6/modules b/logging/v5_6/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/logging/v5_6/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/logging/v5_6/snippets b/logging/v5_6/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/logging/v5_6/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/logging/v5_7/_attributes b/logging/v5_7/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/logging/v5_7/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/logging/v5_7/images b/logging/v5_7/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/logging/v5_7/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/logging/v5_7/logging-5-7-administration.adoc b/logging/v5_7/logging-5-7-administration.adoc deleted file mode 100644 index 1f54801bdf44..000000000000 --- a/logging/v5_7/logging-5-7-administration.adoc +++ /dev/null @@ -1,22 +0,0 @@ -:_content-type: ASSEMBLY -[id="logging-administration-5-7"] -= Administering your logging deployment -include::_attributes/common-attributes.adoc[] -:context: logging-5.7-administration - -toc::[] - -//Installing the Red Hat OpenShift Logging Operator via webconsole -include::modules/logging-deploy-RHOL-console.adoc[leveloffset=+1] - -//Installing the Loki Operator via webconsole -include::modules/logging-deploy-loki-console.adoc[leveloffset=+1] - -//Generic installing operators from operator hub using CLI -include::modules/olm-installing-from-operatorhub-using-cli.adoc[leveloffset=+1] - -//Generic deleting operators from cluster using web console -include::modules/olm-deleting-operators-from-a-cluster-using-web-console.adoc[leveloffset=+1] - -//Generic deleting operators from a cluster using CLI -include::modules/olm-deleting-operators-from-a-cluster-using-cli.adoc[leveloffset=+1] diff --git a/logging/v5_7/logging-5-7-architecture.adoc b/logging/v5_7/logging-5-7-architecture.adoc deleted file mode 100644 index 86b1c12c5a04..000000000000 --- a/logging/v5_7/logging-5-7-architecture.adoc +++ /dev/null @@ -1,14 +0,0 @@ -:_content-type: ASSEMBLY -[id="logging-architecture-5-7"] -= Understanding logging architecture -include::_attributes/common-attributes.adoc[] -:context: logging-5.7-architecture - -toc::[] - -:context: logging-5-7-architecture-overview -include::modules/logging-architecture-overview.adoc[lines=9..31] - -include::modules/logging-support-considerations.adoc[leveloffset=+1] - -include::snippets/logging-5.7-outputs-snip.adoc[] diff --git a/logging/v5_7/logging-5-7-configuration.adoc b/logging/v5_7/logging-5-7-configuration.adoc deleted file mode 100644 index beeaef734e50..000000000000 --- a/logging/v5_7/logging-5-7-configuration.adoc +++ /dev/null @@ -1,15 +0,0 @@ -:_content-type: ASSEMBLY -[id="logging-configuration-5-7"] -= Configuring your logging deployment -include::_attributes/common-attributes.adoc[] -:context: logging-5.7-configuration - -toc::[] - -include::snippets/logging-crs-by-operator-snip.adoc[] - -include::snippets/logging-supported-config-snip.adoc[] - -include::modules/logging-loki-retention.adoc[leveloffset=+1] - -include::modules/logging-multiline-except.adoc[leveloffset=+1] diff --git a/logging/v5_7/logging-5-7-getting-started.adoc b/logging/v5_7/logging-5-7-getting-started.adoc deleted file mode 100644 index 50fa6948a13d..000000000000 --- a/logging/v5_7/logging-5-7-getting-started.adoc +++ /dev/null @@ -1,7 +0,0 @@ -:_content-type: ASSEMBLY - -[id="logging-getting-started-5-7"] -= Getting started with logging 5.7 - -:context: logging-5-7-getting-started -include::modules/logging-getting-started.adoc[lines=5..38] diff --git a/logging/v5_7/logging-5-7-reference.adoc b/logging/v5_7/logging-5-7-reference.adoc deleted file mode 100644 index 0d9019e2fb4f..000000000000 --- a/logging/v5_7/logging-5-7-reference.adoc +++ /dev/null @@ -1,7 +0,0 @@ -:_content-type: ASSEMBLY -[id="logging-reference-5-7"] -= Logging References -include::_attributes/common-attributes.adoc[] -:context: logging-5.7-reference - -toc::[] diff --git a/logging/v5_7/logging-5-7-release-notes.adoc b/logging/v5_7/logging-5-7-release-notes.adoc deleted file mode 100644 index b713014f2c61..000000000000 --- a/logging/v5_7/logging-5-7-release-notes.adoc +++ /dev/null @@ -1,16 +0,0 @@ -:_content-type: ASSEMBLY -[id="logging-release-notes-5-7"] -= Logging 5.7 Release Notes -:context: logging-5.7-release-notes - -toc::[] - -include::snippets/logging-compatibility-snip.adoc[] - -include::snippets/logging-stable-updates-snip.adoc[] - -include::modules/logging-rn-5.7.2.adoc[leveloffset=+1] - -include::modules/logging-rn-5.7.1.adoc[leveloffset=+1] - -include::modules/logging-rn-5.7.0.adoc[leveloffset=+1] diff --git a/logging/v5_7/modules b/logging/v5_7/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/logging/v5_7/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/logging/v5_7/snippets b/logging/v5_7/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/logging/v5_7/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/logging/viewing-resource-logs.adoc b/logging/viewing-resource-logs.adoc deleted file mode 100644 index d0d19b48b600..000000000000 --- a/logging/viewing-resource-logs.adoc +++ /dev/null @@ -1,16 +0,0 @@ -:_content-type: ASSEMBLY -[id="vewing-resource-logs"] -= Viewing logs for a resource -include::_attributes/common-attributes.adoc[] -:context: viewing-resource-logs - -toc::[] - -You can view the logs for various resources, such as builds, deployments, and pods by using the OpenShift CLI (oc) and the web console. - -[NOTE] -==== -Resource logs are a default feature that provides limited log viewing capability. To enhance your log retrieving and viewing experience, it is recommended that you install xref:../logging/cluster-logging.adoc#cluster-logging[OpenShift Logging]. The {logging} aggregates all the logs from your {product-title} cluster, such as node system audit logs, application container logs, and infrastructure logs, into a dedicated log store. You can then query, discover, and visualize your log data through the xref:../logging/cluster-logging-visualizer.adoc#cluster-logging-visualizer-using[Kibana interface]. Resource logs do not access the {logging} log store. -==== - -include::modules/viewing-resource-logs-cli-console.adoc[leveloffset=+1] diff --git a/machine_management/_attributes b/machine_management/_attributes deleted file mode 120000 index f27fd275ea6b..000000000000 --- a/machine_management/_attributes +++ /dev/null @@ -1 +0,0 @@ -../_attributes/ \ No newline at end of file diff --git a/machine_management/adding-rhel-compute.adoc b/machine_management/adding-rhel-compute.adoc deleted file mode 100644 index 0f0d484458e1..000000000000 --- a/machine_management/adding-rhel-compute.adoc +++ /dev/null @@ -1,52 +0,0 @@ -:_content-type: ASSEMBLY -[id="adding-rhel-compute"] -= Adding RHEL compute machines to an {product-title} cluster -include::_attributes/common-attributes.adoc[] -:context: adding-rhel-compute - -toc::[] - -In {product-title}, you can add {op-system-base-full} compute machines to a user-provisioned infrastructure cluster or an installation-provisioned infrastructure cluster on the `x86_64` architecture. You can use {op-system-base} as the operating system only on compute machines. - -include::modules/rhel-compute-overview.adoc[leveloffset=+1] - -include::modules/rhel-compute-requirements.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../nodes/nodes/nodes-nodes-working.adoc#nodes-nodes-working-deleting_nodes-nodes-working[Deleting nodes] - - -include::modules/csr-management.adoc[leveloffset=+2] - -[id="adding-rhel-compute-preparing-image-cloud"] -== Preparing an image for your cloud - -Amazon Machine Images (AMI) are required because various image formats cannot be used directly by AWS. You may use the AMIs that Red Hat has provided, or you can manually import your own images. The AMI must exist before the EC2 instance can be provisioned. You will need a valid AMI ID so that the correct {op-system-base} version needed for the compute machines is selected. - -include::modules/rhel-images-aws.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* You may also manually link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/html/image_builder_guide/sect-documentation-image_builder-chapter5-section_2[import {op-system-base} images to AWS]. - -include::modules/rhel-preparing-playbook-machine.adoc[leveloffset=+1] - -include::modules/rhel-preparing-node.adoc[leveloffset=+1] - -include::modules/rhel-attaching-instance-aws.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* See xref:../installing/installing_aws/installing-aws-account.adoc#installation-aws-permissions-iam-roles_installing-aws-account[Required AWS permissions for IAM roles]. - -include::modules/rhel-worker-tag.adoc[leveloffset=+1] - -include::modules/rhel-adding-node.adoc[leveloffset=+1] - -include::modules/installation-approve-csrs.adoc[leveloffset=+1] - -include::modules/rhel-ansible-parameters.adoc[leveloffset=+1] - -include::modules/rhel-removing-rhcos.adoc[leveloffset=+2] diff --git a/machine_management/applying-autoscaling.adoc b/machine_management/applying-autoscaling.adoc deleted file mode 100644 index afcb53b8b76e..000000000000 --- a/machine_management/applying-autoscaling.adoc +++ /dev/null @@ -1,64 +0,0 @@ -:_content-type: ASSEMBLY -[id="applying-autoscaling"] -= Applying autoscaling to an {product-title} cluster -include::_attributes/common-attributes.adoc[] -:context: applying-autoscaling - -toc::[] - -Applying autoscaling to an {product-title} cluster involves deploying a cluster autoscaler and then deploying machine autoscalers for each machine type in your cluster. - -[IMPORTANT] -==== -You can configure the cluster autoscaler only in clusters where the machine API is operational. -==== - -include::modules/cluster-autoscaler-about.adoc[leveloffset=+1] - -[id="configuring-clusterautoscaler"] -== Configuring the cluster autoscaler - -First, deploy the cluster autoscaler to manage automatic resource scaling in your {product-title} cluster. - -[NOTE] -==== -Because the cluster autoscaler is scoped to the entire cluster, you can make only one cluster autoscaler for the cluster. -==== - -include::modules/cluster-autoscaler-cr.adoc[leveloffset=+2] - -:FeatureName: cluster autoscaler -:FeatureResourceName: ClusterAutoscaler -include::modules/deploying-resource.adoc[leveloffset=+2] - -== Next steps - -* After you configure the cluster autoscaler, you must configure at least one machine autoscaler. - -include::modules/machine-autoscaler-about.adoc[leveloffset=+1] - -[id="configuring-machineautoscaler"] -== Configuring the machine autoscalers - -After you deploy the cluster autoscaler, deploy `MachineAutoscaler` resources that reference the compute machine sets that are used to scale the cluster. - -[IMPORTANT] -==== -You must deploy at least one `MachineAutoscaler` resource after you deploy the `ClusterAutoscaler` resource. -==== - -[NOTE] -==== -You must configure separate resources for each compute machine set. Remember that compute machine sets are different in each region, so consider whether you want to enable machine scaling in multiple regions. The compute machine set that you scale must have at least one machine in it. -==== - -include::modules/machine-autoscaler-cr.adoc[leveloffset=+2] - -:FeatureName: machine autoscaler -:FeatureResourceName: MachineAutoscaler -include::modules/deploying-resource.adoc[leveloffset=+2] - -[role="_additional-resources"] -== Additional resources - -* For more information about pod priority, see xref:../nodes/pods/nodes-pods-priority.adoc#nodes-pods-priority[Including pod priority in pod scheduling decisions in {product-title}]. diff --git a/machine_management/capi-machine-management.adoc b/machine_management/capi-machine-management.adoc deleted file mode 100644 index 828aa46563d7..000000000000 --- a/machine_management/capi-machine-management.adoc +++ /dev/null @@ -1,103 +0,0 @@ -:_content-type: ASSEMBLY -[id="capi-machine-management"] -= Managing machines with the Cluster API -include::_attributes/common-attributes.adoc[] -:context: capi-machine-management - -toc::[] - -:FeatureName: Managing machines with the Cluster API -include::snippets/technology-preview.adoc[] - -The link:https://cluster-api.sigs.k8s.io/[Cluster API] is an upstream project that is integrated into {product-title} as a Technology Preview for Amazon Web Services (AWS), Google Cloud Platform (GCP), and Microsoft Azure clusters. You can use the Cluster API to create and manage compute machine sets and compute machines in your {product-title} cluster. This capability is in addition or an alternative to managing machines with the Machine API. - -For {product-title} {product-version} clusters, you can use the Cluster API to perform node host provisioning management actions after the cluster installation finishes. This system enables an elastic, dynamic provisioning method on top of public or private cloud infrastructure. - -With the Cluster API Technology Preview, you can create compute machines and compute machine sets on {product-title} clusters for supported providers. You can also explore the features that are enabled by this implementation that might not be available with the Machine API. - -[discrete] -[id="cluster-api-benefits_{context}"] -== Benefits - -By using the Cluster API, {product-title} users and developers are able to realize the following advantages: - -* The option to use upstream community Cluster API infrastructure providers which might not be supported by the Machine API. - -* The opportunity to collaborate with third parties who maintain machine controllers for infrastructure providers. - -* The ability to use the same set of Kubernetes tools for infrastructure management in {product-title}. - -* The ability to create compute machine sets by using the Cluster API that support features that are not available with the Machine API. - -[discrete] -[id="capi-tech-preview-limitations"] -== Limitations - -Using the Cluster API to manage machines is a Technology Preview feature and has the following limitations: - -* Only AWS, GCP, and Azure clusters are supported. - -* To use this feature, you must enable the `TechPreviewNoUpgrade` xref:../nodes/clusters/nodes-cluster-enabling-features.adoc#nodes-cluster-enabling-features-about_nodes-cluster-enabling[feature set]. Enabling this feature set cannot be undone and prevents minor version updates. - -* You must create the primary resources that the Cluster API requires manually. - -* You cannot manage control plane machines by using the Cluster API. - -* Migration of existing compute machine sets created by the Machine API to Cluster API compute machine sets is not supported. - -* Full feature parity with the Machine API is not available. - -//Cluster API architecture -include::modules/cluster-api-architecture.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../operators/operator-reference.adoc#cluster-capi-operator_cluster-operators-ref[Cluster CAPI Operator] - -[id="capi-sample-yaml-files"] -== Sample YAML files - -For the Cluster API Technology Preview, you must create the primary resources that the Cluster API requires manually. The following example YAML files show how to make these resources work together and configure settings for the machines that they create that are appropriate for your environment. - -//Sample YAML for a CAPI cluster resource -include::modules/capi-yaml-cluster.adoc[leveloffset=+2] - -The remaining Cluster API resources are provider-specific. Refer to the example YAML files for your cluster: - -* xref:../machine_management/capi-machine-management.adoc#capi-sample-yaml-files-aws[Sample YAML files for configuring Amazon Web Services clusters] - -* xref:../machine_management/capi-machine-management.adoc#capi-sample-yaml-files-gcp[Sample YAML files for configuring Google Cloud Platform clusters] - -[id="capi-sample-yaml-files-aws"] -=== Sample YAML files for configuring Amazon Web Services clusters - -Some Cluster API resources are provider-specific. The following example YAML files show configurations for an Amazon Web Services (AWS) cluster. - -//Sample YAML for a CAPI AWS provider resource -include::modules/capi-yaml-infrastructure-aws.adoc[leveloffset=+3] - -//Sample YAML for CAPI AWS machine template resource -include::modules/capi-yaml-machine-template-aws.adoc[leveloffset=+3] - -//Sample YAML for a CAPI AWS compute machine set resource -include::modules/capi-yaml-machine-set-aws.adoc[leveloffset=+3] - -[id="capi-sample-yaml-files-gcp"] -=== Sample YAML files for configuring Google Cloud Platform clusters - -Some Cluster API resources are provider-specific. The following example YAML files show configurations for a Google Cloud Platform (GCP) cluster. - -//Sample YAML for a CAPI GCP provider resource -include::modules/capi-yaml-infrastructure-gcp.adoc[leveloffset=+3] - -//Sample YAML for CAPI GCP machine template resource -include::modules/capi-yaml-machine-template-gcp.adoc[leveloffset=+3] - -//Sample YAML for a CAPI GCP compute machine set resource -include::modules/capi-yaml-machine-set-gcp.adoc[leveloffset=+3] - -//Creating a CAPI compute machine set -include::modules/capi-machine-set-creating.adoc[leveloffset=+1] - -//Troubleshooting clusters that use the Cluster API -include::modules/capi-troubleshooting.adoc[leveloffset=+1] diff --git a/machine_management/control_plane_machine_management/_attributes b/machine_management/control_plane_machine_management/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/machine_management/control_plane_machine_management/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/machine_management/control_plane_machine_management/cpmso-about.adoc b/machine_management/control_plane_machine_management/cpmso-about.adoc deleted file mode 100644 index cd2368cff6ac..000000000000 --- a/machine_management/control_plane_machine_management/cpmso-about.adoc +++ /dev/null @@ -1,50 +0,0 @@ -:_content-type: ASSEMBLY -[id="cpmso-about"] -= About control plane machine sets -include::_attributes/common-attributes.adoc[] -:context: cpmso-about - -toc::[] - -With control plane machine sets, you can automate management of the control plane machine resources within your {product-title} cluster. - -[IMPORTANT] -==== -Control plane machine sets cannot manage compute machines, and compute machine sets cannot manage control plane machines. -==== - -Control plane machine sets provide for control plane machines similar management capabilities as compute machine sets provide for compute machines. However, these two types of machine sets are separate custom resources defined within the Machine API and have several fundamental differences in their architecture and functionality. - -//Control Plane Machine Set Operator overview -include::modules/cpmso-overview.adoc[leveloffset=+1] - -[id="cpmso-limitations_{context}"] -== Limitations - -The Control Plane Machine Set Operator has the following limitations: - -* The Operator requires the Machine API Operator to be operational and is therefore not supported on clusters with manually provisioned machines. When installing a {product-title} cluster with manually provisioned machines for a platform that creates an active generated `ControlPlaneMachineSet` custom resource (CR), you must remove the Kubernetes manifest files that define the control plane machine set as instructed in the installation process. - -* Only Amazon Web Services (AWS), Google Cloud Platform (GCP), Microsoft Azure, Nutanix, and VMware vSphere clusters are supported. - -* Only clusters with three control plane machines are supported. - -* Horizontal scaling of the control plane is not supported. - -* Deploying Azure control plane machines on Ephemeral OS disks increases risk for data loss and is not supported. - -* Deploying control plane machines as AWS Spot Instances, GCP preemptible VMs, or Azure Spot VMs is not supported. -+ -[IMPORTANT] -==== -Attempting to deploy control plane machines as AWS Spot Instances, GCP preemptible VMs, or Azure Spot VMs might cause the cluster to lose etcd quorum. A cluster that loses all control plane machines simultaneously is unrecoverable. -==== - -* Making changes to the control plane machine set during or prior to installation is not supported. You must make any changes to the control plane machine set only after installation. - -[role="_additional-resources"] -[id="additional-resources_cpmso-about"] -== Additional resources - -* xref:../../operators/operator-reference.adoc#control-plane-machine-set-operator_cluster-operators-ref[Control Plane Machine Set Operator reference] -* xref:../../rest_api/machine_apis/controlplanemachineset-machine-openshift-io-v1.adoc#controlplanemachineset-machine-openshift-io-v1[`ControlPlaneMachineSet` custom resource] \ No newline at end of file diff --git a/machine_management/control_plane_machine_management/cpmso-configuration.adoc b/machine_management/control_plane_machine_management/cpmso-configuration.adoc deleted file mode 100644 index 1030ebec5bc1..000000000000 --- a/machine_management/control_plane_machine_management/cpmso-configuration.adoc +++ /dev/null @@ -1,96 +0,0 @@ -:_content-type: ASSEMBLY -[id="cpmso-configuration"] -= Control plane machine set configuration -include::_attributes/common-attributes.adoc[] -:context: cpmso-configuration - -toc::[] - -These example YAML snippets show the base structure for a control plane machine set custom resource (CR) and platform-specific samples for provider specification and failure domain configurations. - -//Sample YAML for a control plane machine set custom resource -include::modules/cpmso-yaml-sample-cr.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../../machine_management/control_plane_machine_management/cpmso-getting-started.adoc#cpmso-getting-started[Getting started with control plane machine sets] - -* xref:../../machine_management/control_plane_machine_management/cpmso-using.adoc#cpmso-feat-config-update_cpmso-using[Updating the control plane configuration] - -[discrete] -[id="cpmso-sample-yaml-provider-specific_{context}"] -=== Provider-specific configuration - -The `` and `` sections of the control plane machine set resources are provider-specific. Refer to the example YAML for your cluster: - -* xref:../../machine_management/control_plane_machine_management/cpmso-configuration.adoc#cpmso-sample-yaml-aws_cpmso-configuration[Sample YAML snippets for configuring Amazon Web Services clusters] - -* xref:../../machine_management/control_plane_machine_management/cpmso-configuration.adoc#cpmso-sample-yaml-gcp_cpmso-configuration[Sample YAML snippets for configuring Google Cloud Platform clusters] - -* xref:../../machine_management/control_plane_machine_management/cpmso-configuration.adoc#cpmso-sample-yaml-azure_cpmso-configuration[Sample YAML snippets for configuring Microsoft Azure clusters] - -* xref:../../machine_management/control_plane_machine_management/cpmso-configuration.adoc#cpmso-sample-yaml-nutanix_cpmso-configuration[Sample YAML snippets for configuring Nutanix clusters] - -* xref:../../machine_management/control_plane_machine_management/cpmso-configuration.adoc#cpmso-sample-yaml-vsphere_cpmso-configuration[Sample YAML snippets for configuring VMware vSphere clusters] - -[id="cpmso-sample-yaml-aws_{context}"] -== Sample YAML for configuring Amazon Web Services clusters - -Some sections of the control plane machine set CR are provider-specific. The following example YAML snippets show provider specification and failure domain configurations for an Amazon Web Services (AWS) cluster. - -//Sample AWS provider specification -include::modules/cpmso-yaml-provider-spec-aws.adoc[leveloffset=+2] - -//Sample AWS failure domain configuration -include::modules/cpmso-yaml-failure-domain-aws.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../../machine_management/control_plane_machine_management/cpmso-using.adoc#cpmso-supported-features-aws_cpmso-using[Enabling Amazon Web Services features for control plane machines] - -[id="cpmso-sample-yaml-gcp_{context}"] -== Sample YAML for configuring Google Cloud Platform clusters - -Some sections of the control plane machine set CR are provider-specific. The following example YAML snippets show provider specification and failure domain configurations for a Google Cloud Platform (GCP) cluster. - -//Sample GCP provider specification -include::modules/cpmso-yaml-provider-spec-gcp.adoc[leveloffset=+2] - -//Sample GCP failure domain configuration -include::modules/cpmso-yaml-failure-domain-gcp.adoc[leveloffset=+2] -//// -//To be added in a later PR -[role="_additional-resources"] -.Additional resources -* xref:../../machine_management/control_plane_machine_management/cpmso-using.adoc#cpmso-supported-features-gcp_cpmso-using[Enabling Google Cloud Platform features for control plane machines] -//// -[id="cpmso-sample-yaml-azure_{context}"] -== Sample YAML for configuring Microsoft Azure clusters - -Some sections of the control plane machine set CR are provider-specific. The following example YAML snippets show provider specification and failure domain configurations for an Azure cluster. - -//Sample Azure provider specification -include::modules/cpmso-yaml-provider-spec-azure.adoc[leveloffset=+2] - -//Sample Azure failure domain configuration -include::modules/cpmso-yaml-failure-domain-azure.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../../machine_management/control_plane_machine_management/cpmso-using.adoc#cpmso-supported-features-azure_cpmso-using[Enabling Microsoft Azure features for control plane machines] - -[id="cpmso-sample-yaml-nutanix_{context}"] -== Sample YAML for configuring Nutanix clusters - -Some sections of the control plane machine set CR are provider-specific. The following example YAML snippet shows a provider specification configuration for a Nutanix cluster. - -//Sample Nutanix provider specification -include::modules/cpmso-yaml-provider-spec-nutanix.adoc[leveloffset=+2] - -[id="cpmso-sample-yaml-vsphere_{context}"] -== Sample YAML for configuring VMware vSphere clusters - -Some sections of the control plane machine set CR are provider-specific. The following example YAML snippet shows a provider specification configuration for a VMware vSphere cluster. - -//Sample VMware vSphere provider specification -include::modules/cpmso-yaml-provider-spec-vsphere.adoc[leveloffset=+2] diff --git a/machine_management/control_plane_machine_management/cpmso-disabling.adoc b/machine_management/control_plane_machine_management/cpmso-disabling.adoc deleted file mode 100644 index 0dfdf337985d..000000000000 --- a/machine_management/control_plane_machine_management/cpmso-disabling.adoc +++ /dev/null @@ -1,26 +0,0 @@ -:_content-type: ASSEMBLY -[id="cpmso-disabling"] -= Disabling the control plane machine set -include::_attributes/common-attributes.adoc[] -:context: cpmso-disabling - -toc::[] - -The `.spec.state` field in an activated `ControlPlaneMachineSet` custom resource (CR) cannot be changed from `Active` to `Inactive`. To disable the control plane machine set, you must delete the CR so that it is removed from the cluster. - -When you delete the CR, the Control Plane Machine Set Operator performs cleanup operations and disables the control plane machine set. The Operator then removes the CR from the cluster and creates an inactive control plane machine set with default settings. - -//Deleting the control plane machine set -include::modules/cpmso-deleting.adoc[leveloffset=+1] - -//Checking the control plane machine set custom resource status -include::modules/cpmso-checking-status.adoc[leveloffset=+1] - -[id="cpmso-reenabling_{context}"] -== Re-enabling the control plane machine set - -To re-enable the control plane machine set, you must ensure that the configuration in the CR is correct for your cluster and activate it. - -[role="_additional-resources"] -.Additional resources -* xref:../../machine_management/control_plane_machine_management/cpmso-getting-started.adoc#cpmso-activating_cpmso-getting-started[Activating the control plane machine set custom resource] diff --git a/machine_management/control_plane_machine_management/cpmso-getting-started.adoc b/machine_management/control_plane_machine_management/cpmso-getting-started.adoc deleted file mode 100644 index 7e52420a13d3..000000000000 --- a/machine_management/control_plane_machine_management/cpmso-getting-started.adoc +++ /dev/null @@ -1,82 +0,0 @@ -:_content-type: ASSEMBLY -[id="cpmso-getting-started"] -= Getting started with control plane machine sets -include::_attributes/common-attributes.adoc[] -:context: cpmso-getting-started - -toc::[] - -The process for getting started with control plane machine sets depends on the state of the `ControlPlaneMachineSet` custom resource (CR) in your cluster. - -Clusters with an active generated CR:: Clusters that have a generated CR with an active state use the control plane machine set by default. No administrator action is required. - -Clusters with an inactive generated CR:: For clusters that include an inactive generated CR, you must review the CR configuration and xref:../../machine_management/control_plane_machine_management/cpmso-getting-started.adoc#cpmso-activating_cpmso-getting-started[activate the CR]. - -Clusters without a generated CR:: For clusters that do not include a generated CR, you must xref:../../machine_management/control_plane_machine_management/cpmso-getting-started.adoc#cpmso-creating-cr_cpmso-getting-started[create and activate a CR] with the appropriate configuration for your cluster. - -If you are uncertain about the state of the `ControlPlaneMachineSet` CR in your cluster, you can xref:../../machine_management/control_plane_machine_management/cpmso-getting-started.adoc#cpmso-checking-status_cpmso-getting-started[verify the CR status]. - -[id="cpmso-platform-matrix_{context}"] -== Supported cloud providers - -In {product-title} {product-version}, the control plane machine set is supported for Amazon Web Services (AWS), Google Cloud Platform (GCP), Microsoft Azure, Nutanix, and VMware vSphere clusters. - -The status of the control plane machine set after installation depends on your cloud provider and the version of {product-title} that you installed on your cluster. - -.Control plane machine set implementation for {product-title} {product-version} -[cols="<.^5,^.^4,^.^4,^.^4"] -|==== -|Cloud provider |Active by default |Generated CR |Manual CR required - -|Amazon Web Services (AWS) -|X ^[1]^ -|X -| - -|Google Cloud Platform (GCP) -|X ^[2]^ -|X -| - -|Microsoft Azure -|X ^[2]^ -|X -| - -|Nutanix -|X -|X -| - -|VMware vSphere -| -| -|X -|==== -[.small] --- -1. AWS clusters that are upgraded from version 4.11 or earlier require xref:../../machine_management/control_plane_machine_management/cpmso-getting-started.adoc#cpmso-activating_cpmso-getting-started[CR activation]. -2. GCP and Azure clusters that are upgraded from version 4.12 or earlier require xref:../../machine_management/control_plane_machine_management/cpmso-getting-started.adoc#cpmso-activating_cpmso-getting-started[CR activation]. --- - -//Checking the control plane machine set custom resource state -include::modules/cpmso-checking-status.adoc[leveloffset=+1] - -//Activating the control plane machine set custom resource -include::modules/cpmso-activating.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../../machine_management/control_plane_machine_management/cpmso-configuration.adoc#cpmso-configuration[Control plane machine set configuration] - -//Creating a control plane machine set custom resource -include::modules/cpmso-creating-cr.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../../machine_management/control_plane_machine_management/cpmso-using.adoc#cpmso-feat-config-update_cpmso-using[Updating the control plane configuration] -* xref:../../machine_management/control_plane_machine_management/cpmso-configuration.adoc#cpmso-configuration[Control plane machine set configuration] -* xref:../../machine_management/control_plane_machine_management/cpmso-configuration.adoc#cpmso-sample-yaml-aws_cpmso-configuration[Sample YAML for configuring Amazon Web Services clusters] -* xref:../../machine_management/control_plane_machine_management/cpmso-configuration.adoc#cpmso-sample-yaml-gcp_cpmso-configuration[Sample YAML for configuring Google Cloud Platform clusters] -* xref:../../machine_management/control_plane_machine_management/cpmso-configuration.adoc#cpmso-sample-yaml-azure_cpmso-configuration[Sample YAML for configuring Microsoft Azure clusters] -* xref:../../machine_management/control_plane_machine_management/cpmso-configuration.adoc#cpmso-sample-yaml-vsphere_cpmso-configuration[Sample YAML for configuring VMware vSphere clusters] diff --git a/machine_management/control_plane_machine_management/cpmso-resiliency.adoc b/machine_management/control_plane_machine_management/cpmso-resiliency.adoc deleted file mode 100644 index edf92eb33e7d..000000000000 --- a/machine_management/control_plane_machine_management/cpmso-resiliency.adoc +++ /dev/null @@ -1,39 +0,0 @@ -:_content-type: ASSEMBLY -[id="cpmso-resiliency"] -= Control plane resiliency and recovery -include::_attributes/common-attributes.adoc[] -:context: cpmso-resiliency - -toc::[] - -You can use the control plane machine set to improve the resiliency of the control plane for your {product-title} cluster. - -[id="cpmso-failure-domains_{context}"] -== High availability and fault tolerance with failure domains - -When possible, the control plane machine set spreads the control plane machines across multiple failure domains. This configuration provides high availability and fault tolerance within the control plane. This strategy can help protect the control plane when issues arise within the infrastructure provider. - -//Failure domain platform support and configuration -include::modules/cpmso-failure-domains-provider.adoc[leveloffset=+2] -[role="_additional-resources"] -.Additional resources -* xref:../../machine_management/control_plane_machine_management/cpmso-configuration.adoc#cpmso-yaml-failure-domain-aws_cpmso-configuration[Sample Amazon Web Services failure domain configuration] - -* xref:../../machine_management/control_plane_machine_management/cpmso-configuration.adoc#cpmso-yaml-failure-domain-gcp_cpmso-configuration[Sample Google Cloud Platform failure domain configuration] - -* xref:../../machine_management/control_plane_machine_management/cpmso-configuration.adoc#cpmso-yaml-failure-domain-azure_cpmso-configuration[Sample Microsoft Azure failure domain configuration] - -//Balancing control plane machines -include::modules/cpmso-failure-domains-balancing.adoc[leveloffset=+2] - -//Recovery of the failed control plane machines -include::modules/cpmso-control-plane-recovery.adoc[leveloffset=+1] -[role="_additional-resources"] -.Additional resources -* xref:../../machine_management/deploying-machine-health-checks.adoc#deploying-machine-health-checks[Deploying machine health checks] - -//Quorum protection with machine lifecycle hooks -include::modules/machine-lifecycle-hook-deletion-etcd.adoc[leveloffset=+1] -[role="_additional-resources"] -.Additional resources -* xref:../../machine_management/deleting-machine.adoc#machine-lifecycle-hook-deletion_deleting-machine[Lifecycle hooks for the machine deletion phase] \ No newline at end of file diff --git a/machine_management/control_plane_machine_management/cpmso-troubleshooting.adoc b/machine_management/control_plane_machine_management/cpmso-troubleshooting.adoc deleted file mode 100644 index 0a04b3553105..000000000000 --- a/machine_management/control_plane_machine_management/cpmso-troubleshooting.adoc +++ /dev/null @@ -1,31 +0,0 @@ -:_content-type: ASSEMBLY -[id="cpmso-troubleshooting"] -= Troubleshooting the control plane machine set -include::_attributes/common-attributes.adoc[] -:context: cpmso-troubleshooting - -toc::[] - -Use the information in this section to understand and recover from issues you might encounter. - -//Checking the control plane machine set custom resource status -include::modules/cpmso-checking-status.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../../machine_management/control_plane_machine_management/cpmso-getting-started.adoc#cpmso-activating_cpmso-getting-started[Activating the control plane machine set custom resource] -* xref:../../machine_management/control_plane_machine_management/cpmso-getting-started.adoc#cpmso-creating-cr_cpmso-getting-started[Creating a control plane machine set custom resource] - -//Adding a missing Azure internal load balancer -include::modules/cpmso-ts-ilb-missing.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../../machine_management/control_plane_machine_management/cpmso-configuration.adoc#cpmso-yaml-provider-spec-azure_cpmso-configuration[Sample Azure provider specification] - -//Recovering a degraded etcd Operator after a machine health check operation -include::modules/cpmso-ts-mhc-etcd-degraded.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc#dr-restoring-cluster-state[Restoring to a previous cluster state] \ No newline at end of file diff --git a/machine_management/control_plane_machine_management/cpmso-using.adoc b/machine_management/control_plane_machine_management/cpmso-using.adoc deleted file mode 100644 index 52128df5138a..000000000000 --- a/machine_management/control_plane_machine_management/cpmso-using.adoc +++ /dev/null @@ -1,114 +0,0 @@ -:_content-type: ASSEMBLY -[id="cpmso-using"] -= Managing control plane machines with control plane machine sets -include::_attributes/common-attributes.adoc[] -:context: cpmso-using - -toc::[] - -Control plane machine sets automate several essential aspects of control plane management. - -//Vertical resizing of the control plane -//include::modules/cpmso-feat-vertical-resize.adoc[leveloffset=+1] - -//Updating the control plane configuration -include::modules/cpmso-feat-config-update.adoc[leveloffset=+1] - -//Automatically updating the control plane configuration -include::modules/cpmso-feat-auto-update.adoc[leveloffset=+2] - -//Testing changes to the control plane configuration -include::modules/cpmso-feat-test-changes.adoc[leveloffset=+2] - -[id="cpmso-supported-features-aws_{context}"] -== Enabling Amazon Web Services features for control plane machines - -You can enable Amazon Web Services (AWS) features on control plane machines by changing the configuration of your control plane machine set. When you save an update to the control plane machine set, the Control Plane Machine Set Operator updates the control plane machines according to your configured update strategy. - -:context: cpmso-using-aws -//Restricting the API server to private (AWS control plane machine set version) -include::modules/private-clusters-setting-api-private.adoc[leveloffset=+2] -:context: cpmso-using - -//Selecting a larger Amazon Web Services instance type for control plane machines -include::modules/cpms-changing-aws-instance-type.adoc[leveloffset=+2] - -//Machine sets that enable the Amazon EC2 Instance Metadata Service -include::modules/machineset-imds-options.adoc[leveloffset=+2] - -//Creating machines that use the Amazon EC2 Instance Metadata Service -include::modules/machineset-creating-imds-options.adoc[leveloffset=+3] - -//Machine sets that deploy machines as Dedicated Instances -include::modules/machineset-dedicated-instances.adoc[leveloffset=+2] - -//Creating Dedicated Instances by using machine sets -include::modules/machineset-creating-dedicated-instances.adoc[leveloffset=+3] - -[id="cpmso-supported-features-azure_{context}"] -== Enabling Microsoft Azure features for control plane machines - -You can enable Microsoft Azure features on control plane machines by changing the configuration of your control plane machine set. When you save an update to the control plane machine set, the Control Plane Machine Set Operator updates the control plane machines according to your configured update strategy. - -:context: cpmso-using-azure -//Restricting the API server to private (Azure control plane machine set version) -include::modules/private-clusters-setting-api-private.adoc[leveloffset=+2] -:context: cpmso-using - -//Selecting an Azure Marketplace image -include::modules/installation-azure-marketplace-subscribe.adoc[leveloffset=+2] - -//Enabling Azure boot diagnostics -include::modules/machineset-azure-boot-diagnostics.adoc[leveloffset=+2] - -//Machine sets that deploy machines on ultra disks as data disks -include::modules/machineset-azure-ultra-disk.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* link:https://docs.microsoft.com/en-us/azure/virtual-machines/disks-types#ultra-disks[Microsoft Azure ultra disks documentation] - -//Creating machines on ultra disks by using machine sets -include::modules/machineset-creating-azure-ultra-disk.adoc[leveloffset=+3] - -//Troubleshooting resources for machine sets that enable ultra disks -include::modules/machineset-troubleshooting-azure-ultra-disk.adoc[leveloffset=+3] - -//Enabling customer-managed encryption keys for a machine set -include::modules/machineset-customer-managed-encryption-azure.adoc[leveloffset=+2] - -// Accelerated Networking for Microsoft Azure VMs -include::modules/machineset-azure-accelerated-networking.adoc[leveloffset=+2] - -//Not applicable for 4.12, possibly 4.13? -//[role="_additional-resources"] -//.Additional resources -//* xref:../../installing/installing_azure/installing-azure-customizations.adoc#machineset-azure-enabling-accelerated-networking-new-install_installing-azure-customizations[Enabling Accelerated Networking during installation] - -// Enabling Accelerated Networking on an existing Microsoft Azure cluster -include::modules/machineset-azure-enabling-accelerated-networking-existing.adoc[leveloffset=+3] - -[id="cpmso-supported-features-gcp_{context}"] -== Enabling Google Cloud Platform features for control plane machines - -You can enable Google Cloud Platform (GCP) features on control plane machines by changing the configuration of your control plane machine set. When you save an update to the control plane machine set, the Control Plane Machine Set Operator updates the control plane machines according to your configured update strategy. - -//Note: GCP GPU features should be compatible with CPMS, but dev cannot think of a use case. Leaving them out to keep things less cluttered. If a customer use case emerges, we can just add the necessary modules in here. - -//Configuring persistent disk types by using machine sets -include::modules/machineset-gcp-pd-disk-types.adoc[leveloffset=+2] - -//Configuring Confidential VM by using machine sets -include::modules/machineset-gcp-confidential-vm.adoc[leveloffset=+2] - -//Configuring Shielded VM options by using machine sets -include::modules/machineset-gcp-shielded-vms.adoc[leveloffset=+2] -[role="_additional-resources"] -.Additional resources -* link:https://cloud.google.com/compute/shielded-vm/docs/shielded-vm[What is Shielded VM?] -** link:https://cloud.google.com/compute/shielded-vm/docs/shielded-vm#secure-boot[Secure Boot] -** link:https://cloud.google.com/compute/shielded-vm/docs/shielded-vm#vtpm[Virtual Trusted Platform Module (vTPM)] -** link:https://cloud.google.com/compute/shielded-vm/docs/shielded-vm#integrity-monitoring[Integrity monitoring] - -//Enabling customer-managed encryption keys for a machine set -include::modules/machineset-gcp-enabling-customer-managed-encryption.adoc[leveloffset=+2] \ No newline at end of file diff --git a/machine_management/control_plane_machine_management/images b/machine_management/control_plane_machine_management/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/machine_management/control_plane_machine_management/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/machine_management/control_plane_machine_management/modules b/machine_management/control_plane_machine_management/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/machine_management/control_plane_machine_management/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/machine_management/control_plane_machine_management/snippets b/machine_management/control_plane_machine_management/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/machine_management/control_plane_machine_management/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/machine_management/creating-infrastructure-machinesets.adoc b/machine_management/creating-infrastructure-machinesets.adoc deleted file mode 100644 index 06e9b71f5ca4..000000000000 --- a/machine_management/creating-infrastructure-machinesets.adoc +++ /dev/null @@ -1,139 +0,0 @@ -:_content-type: ASSEMBLY -[id="creating-infrastructure-machinesets"] -= Creating infrastructure machine sets -include::_attributes/common-attributes.adoc[] -:context: creating-infrastructure-machinesets - -toc::[] - -include::modules/machine-user-provisioned-limitations.adoc[leveloffset=+1] - - -You can use infrastructure machine sets to create machines that host only infrastructure components, such as the default router, the integrated container image registry, and the components for cluster metrics and monitoring. These infrastructure machines are not counted toward the total number of subscriptions that are required to run the environment. - -In a production deployment, it is recommended that you deploy at least three machine sets to hold infrastructure components. Both OpenShift Logging and {SMProductName} deploy Elasticsearch, which requires three instances to be installed on different nodes. Each of these nodes can be deployed to different availability zones for high availability. This configuration requires three different machine sets, one for each availability zone. In global Azure regions that do not have multiple availability zones, you can use availability sets to ensure high availability. - -include::modules/infrastructure-components.adoc[leveloffset=+1] - -For information about infrastructure nodes and which components can run on infrastructure nodes, see the "Red Hat OpenShift control plane and infrastructure nodes" section in the link:https://www.redhat.com/en/resources/openshift-subscription-sizing-guide[OpenShift sizing and subscription guide for enterprise Kubernetes] document. - -To create an infrastructure node, you can xref:../machine_management/creating-infrastructure-machinesets.adoc#machineset-creating_creating-infrastructure-machinesets[use a machine set], xref:../machine_management/creating-infrastructure-machinesets.adoc#creating-an-infra-node_creating-infrastructure-machinesets[label the node], or xref:../machine_management/creating-infrastructure-machinesets.adoc#creating-infra-machines_creating-infrastructure-machinesets[use a machine config pool]. - -[id="creating-infrastructure-machinesets-production"] -== Creating infrastructure machine sets for production environments - -In a production deployment, it is recommended that you deploy at least three compute machine sets to hold infrastructure components. Both OpenShift Logging and {SMProductName} deploy Elasticsearch, which requires three instances to be installed on different nodes. Each of these nodes can be deployed to different availability zones for high availability. A configuration like this requires three different compute machine sets, one for each availability zone. In global Azure regions that do not have multiple availability zones, you can use availability sets to ensure high availability. - -[id="creating-infrastructure-machinesets-clouds"] -=== Creating infrastructure machine sets for different clouds - -Use the sample compute machine set for your cloud. - -include::modules/machineset-yaml-alibaba.adoc[leveloffset=+3] - -//Machine set parameters for Alibaba Cloud usage statistics -[discrete] -include::modules/machineset-yaml-alibaba-usage-stats.adoc[leveloffset=+4] - -include::modules/machineset-yaml-aws.adoc[leveloffset=+3] - -Machine sets running on AWS support non-guaranteed xref:../machine_management/creating_machinesets/creating-machineset-aws.adoc#machineset-non-guaranteed-instance_creating-machineset-aws[Spot Instances]. You can save on costs by using Spot Instances at a lower price compared to -On-Demand Instances on AWS. xref:../machine_management/creating_machinesets/creating-machineset-aws.adoc#machineset-creating-non-guaranteed-instance_creating-machineset-aws[Configure Spot Instances] by adding `spotMarketOptions` to the `MachineSet` YAML file. - -include::modules/machineset-yaml-azure.adoc[leveloffset=+3] - -Machine sets running on Azure support non-guaranteed xref:../machine_management/creating_machinesets/creating-machineset-azure.adoc#machineset-non-guaranteed-instance_creating-machineset-azure[Spot VMs]. You can save on costs by using Spot VMs at a lower price compared to standard VMs on Azure. You can xref:../machine_management/creating_machinesets/creating-machineset-azure.adoc#machineset-creating-non-guaranteed-instance_creating-machineset-azure[configure Spot VMs] by adding `spotVMOptions` to the `MachineSet` YAML file. - -[role="_additional-resources"] -.Additional resources -* xref:../machine_management/creating_machinesets/creating-machineset-azure.adoc#installation-azure-marketplace-subscribe_creating-machineset-azure[Selecting an Azure Marketplace image] - -include::modules/machineset-yaml-azure-stack-hub.adoc[leveloffset=+3] - -[NOTE] -==== -Machine sets running on Azure Stack Hub do not support non-guaranteed Spot VMs. -==== - -include::modules/machineset-yaml-ibm-cloud.adoc[leveloffset=+3] - -include::modules/machineset-yaml-gcp.adoc[leveloffset=+3] - -Machine sets running on GCP support non-guaranteed xref:../machine_management/creating_machinesets/creating-machineset-gcp.adoc#machineset-non-guaranteed-instance_creating-machineset-gcp[preemptible VM instances]. You can save on costs by using preemptible VM instances at a lower price -compared to normal instances on GCP. You can xref:../machine_management/creating_machinesets/creating-machineset-gcp.adoc#machineset-creating-non-guaranteed-instance_creating-machineset-gcp[configure preemptible VM instances] by adding `preemptible` to the `MachineSet` YAML file. - -include::modules/machineset-yaml-nutanix.adoc[leveloffset=+3] - -include::modules/machineset-yaml-osp.adoc[leveloffset=+3] - -include::modules/machineset-yaml-rhv.adoc[leveloffset=+3] - -include::modules/machineset-yaml-vsphere.adoc[leveloffset=+3] - -include::modules/machineset-creating.adoc[leveloffset=+2] - -include::modules/creating-an-infra-node.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:moving-resources-to-infrastructure-machinesets[Moving resources to infrastructure machine sets] - -include::modules/creating-infra-machines.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* See xref:../architecture/control-plane.adoc#architecture-machine-config-pools_control-plane[Node configuration management with machine config pools] for more information on grouping infra machines in a custom pool. - -[id="assigning-machineset-resources-to-infra-nodes"] -== Assigning machine set resources to infrastructure nodes - -After creating an infrastructure machine set, the `worker` and `infra` roles are applied to new infra nodes. Nodes with the `infra` role applied are not counted toward the total number of subscriptions that are required to run the environment, even when the `worker` role is also applied. - -However, with an infra node being assigned as a worker, there is a chance user workloads could get inadvertently assigned to an infra node. To avoid this, you can apply a taint to the infra node and tolerations for the pods you want to control. - -include::modules/binding-infra-node-workloads-using-taints-tolerations.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* See xref:../nodes/scheduling/nodes-scheduler-about.adoc#nodes-scheduler-about[Controlling pod placement using the scheduler] for general information on scheduling a pod to a node. -* See xref:moving-resources-to-infrastructure-machinesets[Moving resources to infrastructure machine sets] for instructions on scheduling pods to infra nodes. - -[id="moving-resources-to-infrastructure-machinesets"] -== Moving resources to infrastructure machine sets - -Some of the infrastructure resources are deployed in your cluster by default. You can move them to the infrastructure machine sets that you created by adding the infrastructure node selector, as shown: - -[source,yaml] ----- -spec: - nodePlacement: <1> - nodeSelector: - matchLabels: - node-role.kubernetes.io/infra: "" - tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/infra - value: reserved - - effect: NoExecute - key: node-role.kubernetes.io/infra - value: reserved ----- -<1> Add a `nodeSelector` parameter with the appropriate value to the component you want to move. You can use a `nodeSelector` in the format shown or use `: ` pairs, based on the value specified for the node. If you added a taint to the infrasructure node, also add a matching toleration. - -Applying a specific node selector to all infrastructure components causes {product-title} to xref:../machine_management/creating-infrastructure-machinesets.adoc#moving-resources-to-infrastructure-machinesets[schedule those workloads on nodes with that label]. - -include::modules/infrastructure-moving-router.adoc[leveloffset=+2] - -include::modules/infrastructure-moving-registry.adoc[leveloffset=+2] - -include::modules/infrastructure-moving-monitoring.adoc[leveloffset=+2] - -include::modules/infrastructure-moving-logging.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* See xref:../monitoring/configuring-the-monitoring-stack.adoc#moving-monitoring-components-to-different-nodes_configuring-the-monitoring-stack[the monitoring documentation] for the general instructions on moving {product-title} components. diff --git a/machine_management/creating_machinesets/_attributes b/machine_management/creating_machinesets/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/machine_management/creating_machinesets/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/machine_management/creating_machinesets/creating-machineset-alibaba.adoc b/machine_management/creating_machinesets/creating-machineset-alibaba.adoc deleted file mode 100644 index 391471881e89..000000000000 --- a/machine_management/creating_machinesets/creating-machineset-alibaba.adoc +++ /dev/null @@ -1,21 +0,0 @@ -:_content-type: ASSEMBLY -[id="creating-machineset-alibaba"] -= Creating a compute machine set on Alibaba Cloud -include::_attributes/common-attributes.adoc[] -:context: creating-machineset-alibaba - -toc::[] - -You can create a different compute machine set to serve a specific purpose in your {product-title} cluster on Alibaba Cloud. For example, you might create infrastructure machine sets and related machines so that you can move supporting workloads to the new machines. - -//[IMPORTANT] admonition for UPI -include::modules/machine-user-provisioned-limitations.adoc[leveloffset=+1] - -//Sample YAML for a compute machine set custom resource on Alibaba Cloud -include::modules/machineset-yaml-alibaba.adoc[leveloffset=+1] - -//Machine set parameters for Alibaba Cloud usage statistics -include::modules/machineset-yaml-alibaba-usage-stats.adoc[leveloffset=+2] - -//Creating a compute machine set -include::modules/machineset-creating.adoc[leveloffset=+1] diff --git a/machine_management/creating_machinesets/creating-machineset-aws.adoc b/machine_management/creating_machinesets/creating-machineset-aws.adoc deleted file mode 100644 index 912e18199775..000000000000 --- a/machine_management/creating_machinesets/creating-machineset-aws.adoc +++ /dev/null @@ -1,42 +0,0 @@ -:_content-type: ASSEMBLY -[id="creating-machineset-aws"] -= Creating a compute machine set on AWS -include::_attributes/common-attributes.adoc[] -:context: creating-machineset-aws - -toc::[] - -You can create a different compute machine set to serve a specific purpose in your {product-title} cluster on Amazon Web Services (AWS). For example, you might create infrastructure machine sets and related machines so that you can move supporting workloads to the new machines. - -//[IMPORTANT] admonition for UPI -include::modules/machine-user-provisioned-limitations.adoc[leveloffset=+1] - -//Sample YAML for a compute machine set custom resource on AWS -include::modules/machineset-yaml-aws.adoc[leveloffset=+1] - -//Creating a compute machine set -include::modules/machineset-creating.adoc[leveloffset=+1] - -//Machine sets that enable the Amazon EC2 Instance Metadata Service -include::modules/machineset-imds-options.adoc[leveloffset=+1] - -//Creating machines that use the Amazon EC2 Instance Metadata Service -include::modules/machineset-creating-imds-options.adoc[leveloffset=+2] - -//Machine sets that deploy machines as Dedicated Instances -include::modules/machineset-dedicated-instances.adoc[leveloffset=+1] - -//Creating Dedicated Instances by using machine sets -include::modules/machineset-creating-dedicated-instances.adoc[leveloffset=+2] - -//Machine sets that deploy machines as Spot Instances -include::modules/machineset-non-guaranteed-instance.adoc[leveloffset=+1] - -//Creating Spot Instances by using compute machine sets -include::modules/machineset-creating-non-guaranteed-instances.adoc[leveloffset=+2] - -//Adding a GPU node to a machine set (stesmith) -include::modules/nvidia-gpu-aws-adding-a-gpu-node.adoc[leveloffset=+1] - -//Deploying the Node Feature Discovery Operator (stesmith) -include::modules/nvidia-gpu-aws-deploying-the-node-feature-discovery-operator.adoc[leveloffset=+1] diff --git a/machine_management/creating_machinesets/creating-machineset-azure-stack-hub.adoc b/machine_management/creating_machinesets/creating-machineset-azure-stack-hub.adoc deleted file mode 100644 index bd14868eb8bf..000000000000 --- a/machine_management/creating_machinesets/creating-machineset-azure-stack-hub.adoc +++ /dev/null @@ -1,24 +0,0 @@ -:_content-type: ASSEMBLY -[id="creating-machineset-azure-stack-hub"] -= Creating a compute machine set on Azure Stack Hub -include::_attributes/common-attributes.adoc[] -:context: creating-machineset-azure-stack-hub - -toc::[] - -You can create a different compute machine set to serve a specific purpose in your {product-title} cluster on Microsoft Azure Stack Hub. For example, you might create infrastructure machine sets and related machines so that you can move supporting workloads to the new machines. - -//[IMPORTANT] admonition for UPI -include::modules/machine-user-provisioned-limitations.adoc[leveloffset=+1] - -//Sample YAML for a compute machine set custom resource on Azure Stack Hub -include::modules/machineset-yaml-azure-stack-hub.adoc[leveloffset=+1] - -//Creating a compute machine set -include::modules/machineset-creating.adoc[leveloffset=+1] - -//Enabling Azure boot diagnostics on compute machines -include::modules/machineset-azure-boot-diagnostics.adoc[leveloffset=+1] - -//Enabling customer-managed encryption keys for a compute machine set -include::modules/machineset-customer-managed-encryption-azure.adoc[leveloffset=+1] diff --git a/machine_management/creating_machinesets/creating-machineset-azure.adoc b/machine_management/creating_machinesets/creating-machineset-azure.adoc deleted file mode 100644 index e3f30f1f0ad3..000000000000 --- a/machine_management/creating_machinesets/creating-machineset-azure.adoc +++ /dev/null @@ -1,75 +0,0 @@ -:_content-type: ASSEMBLY -[id="creating-machineset-azure"] -= Creating a compute machine set on Azure -include::_attributes/common-attributes.adoc[] -:context: creating-machineset-azure - -toc::[] - -You can create a different compute machine set to serve a specific purpose in your {product-title} cluster on Microsoft Azure. For example, you might create infrastructure machine sets and related machines so that you can move supporting workloads to the new machines. - -//[IMPORTANT] admonition for UPI -include::modules/machine-user-provisioned-limitations.adoc[leveloffset=+1] - -//Sample YAML for a compute machine set custom resource on Azure -include::modules/machineset-yaml-azure.adoc[leveloffset=+1] - -//Creating a compute machine set -include::modules/machineset-creating.adoc[leveloffset=+1] - -//Selecting an Azure Marketplace image -include::modules/installation-azure-marketplace-subscribe.adoc[leveloffset=+1] - -//Enabling Azure boot diagnostics -include::modules/machineset-azure-boot-diagnostics.adoc[leveloffset=+1] - -//Machine sets that deploy machines as Spot VMs -include::modules/machineset-non-guaranteed-instance.adoc[leveloffset=+1] - -//Creating Spot VMs by using compute machine sets -include::modules/machineset-creating-non-guaranteed-instances.adoc[leveloffset=+2] - -//Machine sets that deploy machines on Ephemeral OS disks -include::modules/machineset-azure-ephemeral-os.adoc[leveloffset=+1] - -//Creating machines on Ephemeral OS disks by using compute machine sets -include::modules/machineset-creating-azure-ephemeral-os.adoc[leveloffset=+2] - -//Machine sets that deploy machines on ultra disks as data disks -include::modules/machineset-azure-ultra-disk.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* link:https://docs.microsoft.com/en-us/azure/virtual-machines/disks-types#ultra-disks[Microsoft Azure ultra disks documentation] -* xref:../../storage/container_storage_interface/persistent-storage-csi-azure.adoc#machineset-azure-ultra-disk_persistent-storage-csi-azure[Machine sets that deploy machines on ultra disks using CSI PVCs] -* xref:../../storage/persistent_storage/persistent-storage-azure.adoc#machineset-azure-ultra-disk_persistent-storage-azure[Machine sets that deploy machines on ultra disks using in-tree PVCs] - -//Creating machines on ultra disks by using machine sets -include::modules/machineset-creating-azure-ultra-disk.adoc[leveloffset=+2] - -//Troubleshooting resources for machine sets that enable ultra disks -include::modules/machineset-troubleshooting-azure-ultra-disk.adoc[leveloffset=+2] - -//Enabling customer-managed encryption keys for a machine set -include::modules/machineset-customer-managed-encryption-azure.adoc[leveloffset=+1] - -// Accelerated Networking for Microsoft Azure VMs -include::modules/machineset-azure-accelerated-networking.adoc[leveloffset=+1] - -//Adding a GPU node to a machine set (stesmith) -include::modules/nvidia-gpu-azure-adding-a-gpu-node.adoc[leveloffset=+1] - -//Deploying the Node Feature Discovery Operator (stesmith) -include::modules/nvidia-gpu-aws-deploying-the-node-feature-discovery-operator.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/installing_azure/installing-azure-customizations.adoc#machineset-azure-enabling-accelerated-networking-new-install_installing-azure-customizations[Enabling Accelerated Networking during installation] - -// Enabling Accelerated Networking on an existing Microsoft Azure cluster -include::modules/machineset-azure-enabling-accelerated-networking-existing.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../../machine_management/manually-scaling-machineset.adoc#manually-scaling-machineset[Manually scaling a compute machine set] diff --git a/machine_management/creating_machinesets/creating-machineset-bare-metal.adoc b/machine_management/creating_machinesets/creating-machineset-bare-metal.adoc deleted file mode 100644 index cca9c7f268d1..000000000000 --- a/machine_management/creating_machinesets/creating-machineset-bare-metal.adoc +++ /dev/null @@ -1,18 +0,0 @@ -:_content-type: ASSEMBLY -[id="creating-machineset-bare-metal"] -= Creating a compute machine set on bare metal -include::_attributes/common-attributes.adoc[] -:context: creating-machineset-bare-metal - -toc::[] - -You can create a different compute machine set to serve a specific purpose in your {product-title} cluster on bare metal. For example, you might create infrastructure machine sets and related machines so that you can move supporting workloads to the new machines. - -include::modules/machine-user-provisioned-limitations.adoc[leveloffset=+1] - -include::modules/machineset-yaml-baremetal.adoc[leveloffset=+1] - -include::modules/machineset-creating.adoc[leveloffset=+1] - -// Mothballed - re-add when available -// include::modules/machineset-osp-adding-bare-metal.adoc[leveloffset=+1] diff --git a/machine_management/creating_machinesets/creating-machineset-gcp.adoc b/machine_management/creating_machinesets/creating-machineset-gcp.adoc deleted file mode 100644 index 9857240d32be..000000000000 --- a/machine_management/creating_machinesets/creating-machineset-gcp.adoc +++ /dev/null @@ -1,51 +0,0 @@ -:_content-type: ASSEMBLY -[id="creating-machineset-gcp"] -= Creating a compute machine set on GCP -include::_attributes/common-attributes.adoc[] -:context: creating-machineset-gcp - -toc::[] - -You can create a different compute machine set to serve a specific purpose in your {product-title} cluster on Google Cloud Platform (GCP). For example, you might create infrastructure machine sets and related machines so that you can move supporting workloads to the new machines. - -//[IMPORTANT] admonition for UPI -include::modules/machine-user-provisioned-limitations.adoc[leveloffset=+1] - -//Sample YAML for a compute machine set custom resource on GCP -include::modules/machineset-yaml-gcp.adoc[leveloffset=+1] - -//Creating a compute machine set -include::modules/machineset-creating.adoc[leveloffset=+1] - -//Configuring persistent disk types by using compute machine sets -include::modules/machineset-gcp-pd-disk-types.adoc[leveloffset=+1] - -//Configuring Confidential VM by using machine sets -include::modules/machineset-gcp-confidential-vm.adoc[leveloffset=+1] - -//Machine sets that deploy machines as preemptible VM instances -include::modules/machineset-non-guaranteed-instance.adoc[leveloffset=+1] - -//Creating preemptible VM instances by using compute machine sets -include::modules/machineset-creating-non-guaranteed-instances.adoc[leveloffset=+2] - -//Configuring Shielded VM options by using machine sets -include::modules/machineset-gcp-shielded-vms.adoc[leveloffset=+1] -[role="_additional-resources"] -.Additional resources -* link:https://cloud.google.com/compute/shielded-vm/docs/shielded-vm[What is Shielded VM?] -** link:https://cloud.google.com/compute/shielded-vm/docs/shielded-vm#secure-boot[Secure Boot] -** link:https://cloud.google.com/compute/shielded-vm/docs/shielded-vm#vtpm[Virtual Trusted Platform Module (vTPM)] -** link:https://cloud.google.com/compute/shielded-vm/docs/shielded-vm#integrity-monitoring[Integrity monitoring] - -//Enabling customer-managed encryption keys for a compute machine set -include::modules/machineset-gcp-enabling-customer-managed-encryption.adoc[leveloffset=+1] - -//Enabling GPU support for a compute machine set -include::modules/machineset-gcp-enabling-gpu-support.adoc[leveloffset=+1] - -//Adding a GPU node to a machine set (stesmith) -include::modules/nvidia-gpu-gcp-adding-a-gpu-node.adoc[leveloffset=+1] - -//Deploying the Node Feature Discovery Operator (stesmith) -include::modules/nvidia-gpu-aws-deploying-the-node-feature-discovery-operator.adoc[leveloffset=+1] diff --git a/machine_management/creating_machinesets/creating-machineset-ibm-cloud.adoc b/machine_management/creating_machinesets/creating-machineset-ibm-cloud.adoc deleted file mode 100644 index fc58ff72de4f..000000000000 --- a/machine_management/creating_machinesets/creating-machineset-ibm-cloud.adoc +++ /dev/null @@ -1,18 +0,0 @@ -:_content-type: ASSEMBLY -[id="creating-machineset-ibm-cloud"] -= Creating a compute machine set on IBM Cloud -include::_attributes/common-attributes.adoc[] -:context: creating-machineset-ibm-cloud - -toc::[] - -You can create a different compute machine set to serve a specific purpose in your {product-title} cluster on IBM Cloud. For example, you might create infrastructure machine sets and related machines so that you can move supporting workloads to the new machines. - -//[IMPORTANT] admonition for UPI -include::modules/machine-user-provisioned-limitations.adoc[leveloffset=+1] - -//Sample YAML for a machine set custom resource on IBM Cloud -include::modules/machineset-yaml-ibm-cloud.adoc[leveloffset=+1] - -//Creating a compute machine set -include::modules/machineset-creating.adoc[leveloffset=+1] diff --git a/machine_management/creating_machinesets/creating-machineset-ibm-power-vs.adoc b/machine_management/creating_machinesets/creating-machineset-ibm-power-vs.adoc deleted file mode 100644 index 0ee790376372..000000000000 --- a/machine_management/creating_machinesets/creating-machineset-ibm-power-vs.adoc +++ /dev/null @@ -1,18 +0,0 @@ -:_content-type: ASSEMBLY -[id="creating-machineset-ibm-power-vs"] -= Creating a compute machine set on {ibmpowerProductName} Virtual Server -include::_attributes/common-attributes.adoc[] -:context: creating-machineset-ibm-power-vs - -toc::[] - -You can create a different compute machine set to serve a specific purpose in your {product-title} cluster on {ibmpowerProductName} Virtual Server. For example, you might create infrastructure machine sets and related machines so that you can move supporting workloads to the new machines. - -//[IMPORTANT] admonition for UPI -include::modules/machine-user-provisioned-limitations.adoc[leveloffset=+1] - -//Sample YAML for a machine set custom resource on IBM Cloud -include::modules/machineset-yaml-ibm-power-vs.adoc[leveloffset=+1] - -//Creating a machine set -include::modules/machineset-creating.adoc[leveloffset=+1] diff --git a/machine_management/creating_machinesets/creating-machineset-nutanix.adoc b/machine_management/creating_machinesets/creating-machineset-nutanix.adoc deleted file mode 100644 index 41e16fc9f03b..000000000000 --- a/machine_management/creating_machinesets/creating-machineset-nutanix.adoc +++ /dev/null @@ -1,18 +0,0 @@ -:_content-type: ASSEMBLY -[id="creating-machineset-nutanix"] -= Creating a compute machine set on Nutanix -include::_attributes/common-attributes.adoc[] -:context: creating-machineset-nutanix - -toc::[] - -You can create a different compute machine set to serve a specific purpose in your {product-title} cluster on Nutanix. For example, you might create infrastructure machine sets and related machines so that you can move supporting workloads to the new machines. - -//[IMPORTANT] admonition for UPI -include::modules/machine-user-provisioned-limitations.adoc[leveloffset=+1] - -//Sample YAML for a compute machine set custom resource on Nutanix -include::modules/machineset-yaml-nutanix.adoc[leveloffset=+1] - -//Creating a compute machine set -include::modules/machineset-creating.adoc[leveloffset=+1] diff --git a/machine_management/creating_machinesets/creating-machineset-osp.adoc b/machine_management/creating_machinesets/creating-machineset-osp.adoc deleted file mode 100644 index 57e30a54a9ee..000000000000 --- a/machine_management/creating_machinesets/creating-machineset-osp.adoc +++ /dev/null @@ -1,27 +0,0 @@ -:_content-type: ASSEMBLY -[id="creating-machineset-osp"] -= Creating a compute machine set on OpenStack -include::_attributes/common-attributes.adoc[] -:context: creating-machineset-osp - -toc::[] - -You can create a different compute machine set to serve a specific purpose in your {product-title} cluster on {rh-openstack-first}. For example, you might create infrastructure machine sets and related machines so that you can move supporting workloads to the new machines. - -include::modules/machine-user-provisioned-limitations.adoc[leveloffset=+1] - -include::modules/machineset-yaml-osp.adoc[leveloffset=+1] - -include::modules/machineset-yaml-osp-sr-iov.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/installing_openstack/installing-openstack-nfv-preparing.adoc#installing-openstack-nfv-preparing[Preparing to install a cluster that uses SR-IOV or OVS-DPDK on OpenStack] - -include::modules/machineset-yaml-osp-sr-iov-port-security.adoc[leveloffset=+1] - -include::modules/machineset-creating.adoc[leveloffset=+1] - -// Mothballed - re-add when available -// include::modules/machineset-osp-adding-bare-metal.adoc[leveloffset=+1] diff --git a/machine_management/creating_machinesets/creating-machineset-rhv.adoc b/machine_management/creating_machinesets/creating-machineset-rhv.adoc deleted file mode 100644 index a98622b2a337..000000000000 --- a/machine_management/creating_machinesets/creating-machineset-rhv.adoc +++ /dev/null @@ -1,15 +0,0 @@ -:_content-type: ASSEMBLY -[id="creating-machineset-rhv"] -= Creating a compute machine set on {rh-virtualization} -include::_attributes/common-attributes.adoc[] -:context: creating-machineset-rhv - -toc::[] - -You can create a different compute machine set to serve a specific purpose in your {product-title} cluster on {rh-virtualization-first}. For example, you might create infrastructure machine sets and related machines so that you can move supporting workloads to the new machines. - -include::modules/machine-user-provisioned-limitations.adoc[leveloffset=+1] - -include::modules/machineset-yaml-rhv.adoc[leveloffset=+1] - -include::modules/machineset-creating.adoc[leveloffset=+1] diff --git a/machine_management/creating_machinesets/creating-machineset-vsphere.adoc b/machine_management/creating_machinesets/creating-machineset-vsphere.adoc deleted file mode 100644 index 9965cbd3ba96..000000000000 --- a/machine_management/creating_machinesets/creating-machineset-vsphere.adoc +++ /dev/null @@ -1,39 +0,0 @@ -:_content-type: ASSEMBLY -[id="creating-machineset-vsphere"] -= Creating a compute machine set on vSphere -include::_attributes/common-attributes.adoc[] -:context: creating-machineset-vsphere - -toc::[] - -You can create a different compute machine set to serve a specific purpose in your {product-title} cluster on VMware vSphere. For example, you might create infrastructure machine sets and related machines so that you can move supporting workloads to the new machines. - -include::modules/machine-user-provisioned-limitations.adoc[leveloffset=+1] - -//Sample YAML for a compute machine set custom resource on vSphere -include::modules/machineset-yaml-vsphere.adoc[leveloffset=+1] - -//Minimum required vCenter privileges for compute machine set management -include::modules/machineset-vsphere-required-permissions.adoc[leveloffset=+1] - -//Requirements for clusters with user-provisioned infrastructure to use compute machine sets -include::modules/compute-machineset-upi-reqs.adoc[leveloffset=+1] - -//Obtaining the infrastructure ID -[discrete] -include::modules/machineset-upi-reqs-infra-id.adoc[leveloffset=+2] - -//Satisfying vSphere credentials requirements -[discrete] -include::modules/machineset-upi-reqs-vsphere-creds.adoc[leveloffset=+2] - -//Satisfying ignition configuration requirements -[discrete] -include::modules/machineset-upi-reqs-ignition-config.adoc[leveloffset=+2] -[role="_additional-resources"] -.Additional resources -* xref:../../post_installation_configuration/machine-configuration-tasks.adoc#understanding-the-machine-config-operator[Understanding the Machine Config Operator] -* xref:../../installing/installing_vsphere/installing-vsphere.adoc#installation-vsphere-machines_installing-vsphere[Installing {op-system} and starting the {product-title} bootstrap process] - -//Creating a compute machine set -include::modules/machineset-creating.adoc[leveloffset=+1] diff --git a/machine_management/creating_machinesets/images b/machine_management/creating_machinesets/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/machine_management/creating_machinesets/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/machine_management/creating_machinesets/modules b/machine_management/creating_machinesets/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/machine_management/creating_machinesets/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/machine_management/creating_machinesets/snippets b/machine_management/creating_machinesets/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/machine_management/creating_machinesets/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/machine_management/deleting-machine.adoc b/machine_management/deleting-machine.adoc deleted file mode 100644 index 9f7d9dd1c3c7..000000000000 --- a/machine_management/deleting-machine.adoc +++ /dev/null @@ -1,32 +0,0 @@ -:_content-type: ASSEMBLY -[id="deleting-machine"] -= Deleting a machine -include::_attributes/common-attributes.adoc[] -:context: deleting-machine - -toc::[] - -You can delete a specific machine. - -//Deleting a specific machine -include::modules/machine-delete.adoc[leveloffset=+1] - -//Lifecycle hooks for the machine deletion phase -include::modules/machine-lifecycle-hook-deletion.adoc[leveloffset=+1] - -//Deletion lifecycle hook configuration -include::modules/machine-lifecycle-hook-deletion-format.adoc[leveloffset=+2] - -//Machine deletion lifecycle hook examples for Operator developers -include::modules/machine-lifecycle-hook-deletion-uses.adoc[leveloffset=+2] - -//Quorum protection with machine lifecycle hooks -include::modules/machine-lifecycle-hook-deletion-etcd.adoc[leveloffset=+2] - - -[role="_additional-resources"] -[id="additional-resources_unhealthy-etcd-member"] -== Additional resources - -* xref:../backup_and_restore/control_plane_backup_and_restore/replacing-unhealthy-etcd-member.adoc#replacing-unhealthy-etcd-member[Replacing an unhealthy etcd member] -* xref:../machine_management/control_plane_machine_management/cpmso-using.adoc#cpmso-using[Managing control plane machines with control plane machine sets] \ No newline at end of file diff --git a/machine_management/deploying-machine-health-checks.adoc b/machine_management/deploying-machine-health-checks.adoc deleted file mode 100644 index 6f7bcb0feb70..000000000000 --- a/machine_management/deploying-machine-health-checks.adoc +++ /dev/null @@ -1,26 +0,0 @@ -:_content-type: ASSEMBLY -[id="deploying-machine-health-checks"] -= Deploying machine health checks -include::_attributes/common-attributes.adoc[] -:context: deploying-machine-health-checks - -toc::[] - -You can configure and deploy a machine health check to automatically repair damaged machines in a machine pool. - -include::modules/machine-user-provisioned-limitations.adoc[leveloffset=+1] - -include::modules/machine-health-checks-about.adoc[leveloffset=+1] -[role="_additional-resources"] -.Additional resources -* xref:../nodes/nodes/nodes-nodes-viewing.adoc#nodes-nodes-viewing-listing_nodes-nodes-viewing[About listing all the nodes in a cluster] -* xref:../machine_management/deploying-machine-health-checks.adoc#machine-health-checks-short-circuiting_deploying-machine-health-checks[Short-circuiting machine health check remediation] -* xref:../machine_management/control_plane_machine_management/cpmso-about.adoc#cpmso-about[About the Control Plane Machine Set Operator] - -include::modules/machine-health-checks-resource.adoc[leveloffset=+1] - -include::modules/machine-health-checks-creating.adoc[leveloffset=+1] - -You can configure and deploy a machine health check to detect and repair unhealthy bare metal nodes. - -include::modules/mgmt-power-remediation-baremetal-about.adoc[leveloffset=+1] diff --git a/machine_management/images b/machine_management/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/machine_management/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/machine_management/index.adoc b/machine_management/index.adoc deleted file mode 100644 index ec56da0b86c7..000000000000 --- a/machine_management/index.adoc +++ /dev/null @@ -1,97 +0,0 @@ -:_content-type: ASSEMBLY -[id="overview-of-machine-management"] -= Overview of machine management -include::_attributes/common-attributes.adoc[] -:context: overview-of-machine-management - -toc::[] - -You can use machine management to flexibly work with underlying infrastructure such as Amazon Web Services (AWS), Microsoft Azure, Google Cloud Platform (GCP), {rh-openstack-first}, {rh-virtualization-first}, and VMware vSphere to manage the {product-title} cluster. -You can control the cluster and perform auto-scaling, such as scaling up and down the cluster based on specific workload policies. - -It is important to have a cluster that adapts to changing workloads. The {product-title} cluster can horizontally scale up and down when the load increases or decreases. - -Machine management is implemented as a xref:../operators/understanding/crds/crd-extending-api-with-crds.adoc#crd-extending-api-with-crds[custom resource definition] (CRD). -A CRD object defines a new unique object `Kind` in the cluster and enables the Kubernetes API server to handle the object's entire lifecycle. - -The Machine API Operator provisions the following resources: - -* `MachineSet` -* `Machine` -* `ClusterAutoscaler` -* `MachineAutoscaler` -* `MachineHealthCheck` - -include::modules/machine-api-overview.adoc[leveloffset=+1] - -[id="machine-mgmt-intro-managing-compute_{context}"] -== Managing compute machines - -As a cluster administrator, you can perform the following actions: - -* Create a compute machine set for the following cloud providers: - -** xref:../machine_management/creating_machinesets/creating-machineset-aws.adoc#creating-machineset-aws[AWS] - -** xref:../machine_management/creating_machinesets/creating-machineset-azure.adoc#creating-machineset-azure[Azure] - -** xref:../machine_management/creating_machinesets/creating-machineset-gcp.adoc#creating-machineset-gcp[GCP] - -** xref:../machine_management/creating_machinesets/creating-machineset-osp.adoc#creating-machineset-osp[{rh-openstack}] - -** xref:../machine_management/creating_machinesets/creating-machineset-rhv.adoc#creating-machineset-rhv[{rh-virtualization}] - -** xref:../machine_management/creating_machinesets/creating-machineset-vsphere.adoc#creating-machineset-vsphere[vSphere] - -* Create a machine set for a bare metal deployment: xref:../machine_management/creating_machinesets/creating-machineset-bare-metal.adoc#creating-machineset-bare-metal[Creating a compute machine set on bare metal] - -* xref:../machine_management/manually-scaling-machineset.adoc#manually-scaling-machineset[Manually scale a compute machine set] by adding or removing a machine from the compute machine set. - -* xref:../machine_management/modifying-machineset.adoc#modifying-machineset[Modify a compute machine set] through the `MachineSet` YAML configuration file. - -* xref:../machine_management/deleting-machine.adoc#deleting-machine[Delete] a machine. - -* xref:../machine_management/creating-infrastructure-machinesets.adoc#creating-infrastructure-machinesets[Create infrastructure compute machine sets]. - -* Configure and deploy a xref:../machine_management/deploying-machine-health-checks.adoc#deploying-machine-health-checks[machine health check] to automatically fix damaged machines in a machine pool. - -[id="machine-mgmt-intro-managing-control-plane_{context}"] -== Managing control plane machines - -As a cluster administrator, you can perform the following actions: - -* xref:../machine_management/control_plane_machine_management/cpmso-using.adoc#cpmso-feat-config-update_cpmso-using[Update your control plane configuration] with a control plane machine set for the following cloud providers: - -** xref:../machine_management/control_plane_machine_management/cpmso-configuration.adoc#cpmso-sample-yaml-aws_cpmso-configuration[AWS] - -** xref:../machine_management/control_plane_machine_management/cpmso-configuration.adoc#cpmso-sample-yaml-gcp_cpmso-configuration[GCP] - -** xref:../machine_management/control_plane_machine_management/cpmso-configuration.adoc#cpmso-sample-yaml-azure_cpmso-configuration[Azure] - -** xref:../machine_management/control_plane_machine_management/cpmso-configuration.adoc#cpmso-sample-yaml-nutanix_cpmso-configuration[Nutanix] - -** xref:../machine_management/control_plane_machine_management/cpmso-configuration.adoc#cpmso-sample-yaml-vsphere_cpmso-configuration[vSphere] - -* Configure and deploy a xref:../machine_management/deploying-machine-health-checks.adoc#deploying-machine-health-checks[machine health check] to automatically recover unhealthy control plane machines. - -[id="machine-mgmt-intro-autoscaling_{context}"] -== Applying autoscaling to an {product-title} cluster - -You can automatically scale your {product-title} cluster to ensure flexibility for changing workloads. To xref:../machine_management/applying-autoscaling.adoc#applying-autoscaling[autoscale] your cluster, you must first deploy a cluster autoscaler, and then deploy a machine autoscaler for each compute machine set. - -* The xref:../machine_management/applying-autoscaling.adoc#cluster-autoscaler-about_applying-autoscaling[_cluster autoscaler_] increases and decreases the size of the cluster based on deployment needs. - -* The xref:../machine_management/applying-autoscaling.adoc#machine-autoscaler-about_applying-autoscaling[_machine autoscaler_] adjusts the number of machines in the compute machine sets that you deploy in your {product-title} cluster. - -[id="machine-mgmt-intro-add-for-upi_{context}"] -== Adding compute machines on user-provisioned infrastructure -User-provisioned infrastructure is an environment where you can deploy infrastructure such as compute, network, and storage resources that host the {product-title}. You can xref:../machine_management//user_infra/adding-compute-user-infra-general.adoc#adding-compute-user-infra-general[add compute machines] to a cluster on user-provisioned infrastructure during or after the installation process. - -[id="machine-mgmt-intro-add-rhel_{context}"] -== Adding RHEL compute machines to your cluster - -As a cluster administrator, you can perform the following actions: - -** xref:../machine_management/adding-rhel-compute.adoc#adding-rhel-compute[Add Red Hat Enterprise Linux (RHEL) compute machines], also known as worker machines, to a user-provisioned infrastructure cluster or an installation-provisioned infrastructure cluster. - -** xref:../machine_management/more-rhel-compute.adoc#more-rhel-compute[Add more Red Hat Enterprise Linux (RHEL) compute machines] to an existing cluster. diff --git a/machine_management/manually-scaling-machineset.adoc b/machine_management/manually-scaling-machineset.adoc deleted file mode 100644 index 08994f3dfcc6..000000000000 --- a/machine_management/manually-scaling-machineset.adoc +++ /dev/null @@ -1,29 +0,0 @@ -:_content-type: ASSEMBLY -[id="manually-scaling-machineset"] -= Manually scaling a compute machine set -include::_attributes/common-attributes.adoc[] -:context: manually-scaling-machineset - -toc::[] - -You can add or remove an instance of a machine in a compute machine set. - -[NOTE] -==== -If you need to modify aspects of a compute machine set outside of scaling, see xref:../machine_management/modifying-machineset.adoc#modifying-machineset[Modifying a compute machine set]. -==== - -== Prerequisites - -* If you enabled the cluster-wide proxy and scale up compute machines not included in `networking.machineNetwork[].cidr` from the installation configuration, you must xref:../networking/enable-cluster-wide-proxy.adoc#nw-proxy-configure-object_config-cluster-wide-proxy[add the compute machines to the Proxy object's `noProxy` field] to prevent connection issues. - -include::modules/machine-user-provisioned-limitations.adoc[leveloffset=+1] - -include::modules/machineset-manually-scaling.adoc[leveloffset=+1] - -include::modules/machineset-delete-policy.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_manually-scaling-machineset"] -== Additional resources -* xref:../machine_management/deleting-machine.adoc#machine-lifecycle-hook-deletion_deleting-machine[Lifecycle hooks for the machine deletion phase] \ No newline at end of file diff --git a/machine_management/modifying-machineset.adoc b/machine_management/modifying-machineset.adoc deleted file mode 100644 index 32dc7de23b7b..000000000000 --- a/machine_management/modifying-machineset.adoc +++ /dev/null @@ -1,37 +0,0 @@ -:_content-type: ASSEMBLY -[id="modifying-machineset"] -= Modifying a compute machine set -include::_attributes/common-attributes.adoc[] -:context: modifying-machineset - -toc::[] - -You can modify a compute machine set, such as adding labels, changing the instance type, or changing block storage. - -On {rh-virtualization-first}, you can also change a compute machine set to provision new nodes on a different storage domain. - -[NOTE] -==== -If you need to scale a compute machine set without making other changes, see xref:../machine_management/manually-scaling-machineset.adoc#manually-scaling-machineset[Manually scaling a compute machine set]. -==== - -include::modules/machineset-modifying.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../machine_management/deleting-machine.adoc#machine-lifecycle-hook-deletion_deleting-machine[Lifecycle hooks for the machine deletion phase] - -[id="migrating-nodes-to-a-different-storage-domain-rhv_{context}"] -== Migrating nodes to a different storage domain on {rh-virtualization} - -You can migrate the {product-title} control plane and compute nodes to a different storage domain in a {rh-virtualization-first} cluster. - -include::modules/machineset-migrating-compute-nodes-to-diff-sd-rhv.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../machine_management/creating_machinesets/creating-machineset-rhv.adoc#machineset-creating_creating-machineset-rhv[Creating a compute machine set] -* xref:../machine_management/manually-scaling-machineset.adoc#machineset-manually-scaling_manually-scaling-machineset[Scaling a compute machine set manually] -* xref:../nodes/scheduling/nodes-scheduler-about.adoc#nodes-scheduler-about[Controlling pod placement using the scheduler] - -include::modules/machineset-migrating-control-plane-nodes-to-diff-sd-rhv.adoc[leveloffset=+2] diff --git a/machine_management/modules b/machine_management/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/machine_management/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/machine_management/more-rhel-compute.adoc b/machine_management/more-rhel-compute.adoc deleted file mode 100644 index b3860d5f32b8..000000000000 --- a/machine_management/more-rhel-compute.adoc +++ /dev/null @@ -1,47 +0,0 @@ -:_content-type: ASSEMBLY -[id="more-rhel-compute"] -= Adding more RHEL compute machines to an {product-title} cluster -include::_attributes/common-attributes.adoc[] -:context: more-rhel-compute - -toc::[] - -If your {product-title} cluster already includes Red Hat Enterprise Linux (RHEL) compute machines, which are also known as worker machines, you can add more RHEL compute machines to it. - -include::modules/rhel-compute-overview.adoc[leveloffset=+1] - -include::modules/rhel-compute-requirements.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../nodes/nodes/nodes-nodes-working.adoc#nodes-nodes-working-deleting_nodes-nodes-working[Deleting nodes] - -include::modules/csr-management.adoc[leveloffset=+2] - -[id="more-rhel-compute-preparing-image-cloud"] -== Preparing an image for your cloud - -Amazon Machine Images (AMI) are required since various image formats cannot be used directly by AWS. You may use the AMIs that Red Hat has provided, or you can manually import your own images. The AMI must exist before the EC2 instance can be provisioned. You must list the AMI IDs so that the correct {op-system-base} version needed for the compute machines is selected. - -include::modules/rhel-images-aws.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* You may also manually link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/html/image_builder_guide/sect-documentation-image_builder-chapter5-section_2[import {op-system-base} images to AWS]. - -include::modules/rhel-preparing-node.adoc[leveloffset=+1] - -include::modules/rhel-attaching-instance-aws.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* See xref:../installing/installing_aws/installing-aws-account.adoc#installation-aws-permissions-iam-roles_installing-aws-account[Required AWS permissions for IAM roles]. - -include::modules/rhel-worker-tag.adoc[leveloffset=+1] - -include::modules/rhel-adding-more-nodes.adoc[leveloffset=+1] - -include::modules/installation-approve-csrs.adoc[leveloffset=+1] - -include::modules/rhel-ansible-parameters.adoc[leveloffset=+1] diff --git a/machine_management/snippets b/machine_management/snippets deleted file mode 120000 index 9f5bc7e4dde0..000000000000 --- a/machine_management/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets \ No newline at end of file diff --git a/machine_management/user_infra/_attributes b/machine_management/user_infra/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/machine_management/user_infra/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/machine_management/user_infra/adding-aws-compute-user-infra.adoc b/machine_management/user_infra/adding-aws-compute-user-infra.adoc deleted file mode 100644 index 7f8b9c5a2838..000000000000 --- a/machine_management/user_infra/adding-aws-compute-user-infra.adoc +++ /dev/null @@ -1,19 +0,0 @@ -:_content-type: ASSEMBLY -[id="adding-aws-compute-user-infra"] -= Adding compute machines to AWS by using CloudFormation templates -include::_attributes/common-attributes.adoc[] -:context: adding-aws-compute-user-infra - -toc::[] - -You can add more compute machines to your {product-title} cluster on Amazon Web Services (AWS) that you created by using the sample CloudFormation templates. - -[id="prerequisites_adding-aws-compute-user-infra"] -== Prerequisites - -* You installed your cluster on AWS by using the provided xref:../../installing/installing_aws/installing-aws-user-infra.adoc#installing-aws-user-infra[AWS CloudFormation templates]. -* You have the JSON file and CloudFormation template that you used to create the compute machines during cluster installation. If you do not have these files, you must recreate them by following the instructions in the xref:../../installing/installing_aws/installing-aws-user-infra.adoc#installing-aws-user-infra[installation procedure]. - -include::modules/machine-adding-aws-compute-cloudformation.adoc[leveloffset=+1] - -include::modules/installation-approve-csrs.adoc[leveloffset=+1] diff --git a/machine_management/user_infra/adding-bare-metal-compute-user-infra.adoc b/machine_management/user_infra/adding-bare-metal-compute-user-infra.adoc deleted file mode 100644 index f1fa52de60fe..000000000000 --- a/machine_management/user_infra/adding-bare-metal-compute-user-infra.adoc +++ /dev/null @@ -1,37 +0,0 @@ -:_content-type: ASSEMBLY -[id="adding-bare-metal-compute-user-infra"] -= Adding compute machines to bare metal -include::_attributes/common-attributes.adoc[] -:context: adding-bare-metal-compute-user-infra - -toc::[] - -You can add more compute machines to your {product-title} cluster on bare metal. - -== Prerequisites - -* You xref:../../installing/installing_bare_metal/installing-bare-metal.adoc#installing-bare-metal[installed a cluster on bare metal]. -* You have installation media and {op-system-first} images that you used to create your cluster. If you do not have these files, you must obtain them by following the instructions in the xref:../../installing/installing_bare_metal/installing-bare-metal.adoc#installing-bare-metal[installation procedure]. -* If a DHCP server is available for your user-provisioned infrastructure, you have added the details for the additional compute machines to your DHCP server configuration. This includes a persistent IP address, DNS server information, and a hostname for each machine. -* You have updated your DNS configuration to include the record name and IP address of each compute machine that you are adding. You have validated that DNS lookup and reverse DNS lookup resolve correctly. - -[IMPORTANT] -==== -If you do not have access to the {op-system-first} images that were used to create your cluster, you can add more compute machines to your {product-title} cluster with newer versions of {op-system-first} images. For instructions, see link:https://access.redhat.com/solutions/5514051[Adding new nodes to UPI cluster fails after upgrading to OpenShift 4.6+]. -==== - -[id="creating-rhcos-machines-bare-metal"] -== Creating {op-system-first} machines - -Before you add more compute machines to a cluster that you installed on bare metal infrastructure, you must create {op-system} machines for it to use. You can either use an ISO image or network PXE booting to create the machines. - -[NOTE] -==== -You must use the same ISO image that you used to install a cluster to deploy all new nodes in a cluster. It is recommended to use the same Ignition config file. The nodes automatically upgrade themselves on the first boot before running the workloads. You can add the nodes before or after the upgrade. -==== - -include::modules/machine-user-infra-machines-iso.adoc[leveloffset=+2] - -include::modules/machine-user-infra-machines-pxe.adoc[leveloffset=+2] - -include::modules/installation-approve-csrs.adoc[leveloffset=+1] diff --git a/machine_management/user_infra/adding-compute-user-infra-general.adoc b/machine_management/user_infra/adding-compute-user-infra-general.adoc deleted file mode 100644 index 7f782dac844d..000000000000 --- a/machine_management/user_infra/adding-compute-user-infra-general.adoc +++ /dev/null @@ -1,46 +0,0 @@ -:_content-type: ASSEMBLY -[id="adding-compute-user-infra-general"] -= Adding compute machines to clusters with user-provisioned infrastructure manually -include::_attributes/common-attributes.adoc[] -:context: adding-compute-user-infra-general - -toc::[] - -You can add compute machines to a cluster on user-provisioned infrastructure either as part of the installation process or after installation. The post-installation process requires some of the same configuration files and parameters that were used during installation. - -[id="upi-adding-compute-aws"] -== Adding compute machines to Amazon Web Services - -To add more compute machines to your {product-title} cluster on Amazon Web Services (AWS), see xref:../../machine_management/user_infra/adding-aws-compute-user-infra.adoc#adding-aws-compute-user-infra[Adding compute machines to AWS by using CloudFormation templates]. - -[id="upi-adding-compute-azure"] -== Adding compute machines to Microsoft Azure - -To add more compute machines to your {product-title} cluster on Microsoft Azure, see xref:../../installing/installing_azure/installing-azure-user-infra.adoc#installation-creating-azure-worker_installing-azure-user-infra[Creating additional worker machines in Azure]. - -[id="upi-adding-compute-ash"] -== Adding compute machines to Azure Stack Hub - -To add more compute machines to your {product-title} cluster on Azure Stack Hub, see xref:../../installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc#installation-creating-azure-worker_installing-azure-stack-hub-user-infra[Creating additional worker machines in Azure Stack Hub]. - -[id="upi-adding-compute-gcp"] -== Adding compute machines to Google Cloud Platform - -To add more compute machines to your {product-title} cluster on Google Cloud Platform (GCP), see xref:../../installing/installing_gcp/installing-restricted-networks-gcp.adoc#installation-creating-gcp-worker_installing-restricted-networks-gcp[Creating additional worker machines in GCP]. - -[id="upi-adding-compute-vsphere"] -== Adding compute machines to vSphere - -You can xref:../../machine_management/creating_machinesets/creating-machineset-vsphere.adoc#creating-machineset-vsphere[use compute machine sets] to automate the creation of additional compute machines for your {product-title} cluster on vSphere. - -To manually add more compute machines to your cluster, see xref:../../machine_management/user_infra/adding-vsphere-compute-user-infra.adoc#adding-vsphere-compute-user-infra[Adding compute machines to vSphere manually]. - -[id="upi-adding-compute-rhv"] -== Adding compute machines to {rh-virtualization} - -To add more compute machines to your {product-title} cluster on {rh-virtualization}, see xref:../../machine_management/user_infra/adding-rhv-compute-user-infra.adoc#adding-rhv-compute-user-infra[Adding compute machines to {rh-virtualization}]. - -[id="upi-adding-compute-bare-metal"] -== Adding compute machines to bare metal - -To add more compute machines to your {product-title} cluster on bare metal, see xref:../../machine_management/user_infra/adding-bare-metal-compute-user-infra.adoc#adding-bare-metal-compute-user-infra[Adding compute machines to bare metal]. diff --git a/machine_management/user_infra/adding-rhv-compute-user-infra.adoc b/machine_management/user_infra/adding-rhv-compute-user-infra.adoc deleted file mode 100644 index fa5ca34fcc7f..000000000000 --- a/machine_management/user_infra/adding-rhv-compute-user-infra.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Assembly included in the following assemblies: -// * machine_management/user_infra/adding-compute-user-infra-general.adoc - -:_content-type: ASSEMBLY -[id="adding-rhv-compute-user-infra"] -= Adding compute machines to a cluster on {rh-virtualization} -include::_attributes/common-attributes.adoc[] -:context: adding-rhv-compute-user-infra - -toc::[] - -In {product-title} version {product-version}, you can add more compute machines to a user-provisioned {product-title} cluster on {rh-virtualization}. - -.Prerequisites - -* You installed a cluster on {rh-virtualization} with user-provisioned infrastructure. - -include::modules/machine-user-provisioned-rhv.adoc[leveloffset=+1] diff --git a/machine_management/user_infra/adding-vsphere-compute-user-infra.adoc b/machine_management/user_infra/adding-vsphere-compute-user-infra.adoc deleted file mode 100644 index ab5b6dd86d57..000000000000 --- a/machine_management/user_infra/adding-vsphere-compute-user-infra.adoc +++ /dev/null @@ -1,29 +0,0 @@ -:_content-type: ASSEMBLY -[id="adding-vsphere-compute-user-infra"] -= Adding compute machines to vSphere manually -include::_attributes/common-attributes.adoc[] -:context: adding-vsphere-compute-user-infra - -toc::[] - -You can add more compute machines to your {product-title} cluster on VMware vSphere manually. - -[NOTE] -==== -You can also xref:../../machine_management/creating_machinesets/creating-machineset-vsphere.adoc#creating-machineset-vsphere[use compute machine sets] to automate the creation of additional VMware vSphere compute machines for your cluster. -==== - -== Prerequisites - -* You xref:../../installing/installing_vsphere/installing-vsphere.adoc#installing-vsphere[installed a cluster on vSphere]. - -* You have installation media and {op-system-first} images that you used to create your cluster. If you do not have these files, you must obtain them by following the instructions in the xref:../../installing/installing_vsphere/installing-vsphere.adoc#installing-vsphere[installation procedure]. - -[IMPORTANT] -==== -If you do not have access to the {op-system-first} images that were used to create your cluster, you can add more compute machines to your {product-title} cluster with newer versions of {op-system-first} images. For instructions, see link:https://access.redhat.com/solutions/5514051[Adding new nodes to UPI cluster fails after upgrading to OpenShift 4.6+]. -==== - -include::modules/machine-vsphere-machines.adoc[leveloffset=+1] - -include::modules/installation-approve-csrs.adoc[leveloffset=+1] diff --git a/machine_management/user_infra/images b/machine_management/user_infra/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/machine_management/user_infra/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/machine_management/user_infra/modules b/machine_management/user_infra/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/machine_management/user_infra/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/machine_management/user_infra/snippets b/machine_management/user_infra/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/machine_management/user_infra/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/masters/images b/masters/images deleted file mode 120000 index e4c5bd02a10a..000000000000 --- a/masters/images +++ /dev/null @@ -1 +0,0 @@ -../images/ \ No newline at end of file diff --git a/masters/modules b/masters/modules deleted file mode 120000 index 43aab75b53c9..000000000000 --- a/masters/modules +++ /dev/null @@ -1 +0,0 @@ -../modules/ \ No newline at end of file diff --git a/metering/_attributes b/metering/_attributes deleted file mode 120000 index f27fd275ea6b..000000000000 --- a/metering/_attributes +++ /dev/null @@ -1 +0,0 @@ -../_attributes/ \ No newline at end of file diff --git a/metering/configuring_metering/_attributes b/metering/configuring_metering/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/metering/configuring_metering/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/metering/configuring_metering/images b/metering/configuring_metering/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/metering/configuring_metering/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/metering/configuring_metering/metering-about-configuring.adoc b/metering/configuring_metering/metering-about-configuring.adoc deleted file mode 100644 index e13b9bbe28f3..000000000000 --- a/metering/configuring_metering/metering-about-configuring.adoc +++ /dev/null @@ -1,20 +0,0 @@ -:_content-type: ASSEMBLY -[id="metering-about-configuring"] -= About configuring metering -include::_attributes/common-attributes.adoc[] -:context: metering-about-configuring - -toc::[] - -:FeatureName: Metering -include::modules/deprecated-feature.adoc[leveloffset=+1] - -The `MeteringConfig` custom resource specifies all the configuration details for your metering installation. When you first install the metering stack, a default `MeteringConfig` custom resource is generated. Use the examples in the documentation to modify this default file. Keep in mind the following key points: - -* At a minimum, you need to xref:../../metering/configuring_metering/metering-configure-persistent-storage.adoc#metering-configure-persistent-storage[configure persistent storage] and xref:../../metering/configuring_metering/metering-configure-hive-metastore.adoc#metering-configure-hive-metastore[configure the Hive metastore]. - -* Most default configuration settings work, but larger deployments or highly customized deployments should review all configuration options carefully. - -* Some configuration options can not be modified after installation. - -For configuration options that can be modified after installation, make the changes in your `MeteringConfig` custom resource and reapply the file. diff --git a/metering/configuring_metering/metering-common-config-options.adoc b/metering/configuring_metering/metering-common-config-options.adoc deleted file mode 100644 index d526bc0fc855..000000000000 --- a/metering/configuring_metering/metering-common-config-options.adoc +++ /dev/null @@ -1,175 +0,0 @@ -:_content-type: ASSEMBLY -[id="metering-common-config-options"] -= Common configuration options -include::_attributes/common-attributes.adoc[] -:context: metering-common-config-options - -toc::[] - -:FeatureName: Metering -include::modules/deprecated-feature.adoc[leveloffset=+1] - -== Resource requests and limits -You can adjust the CPU, memory, or storage resource requests and/or limits for pods and volumes. The `default-resource-limits.yaml` below provides an example of setting resource request and limits for each component. - -[source,yaml] ----- -apiVersion: metering.openshift.io/v1 -kind: MeteringConfig -metadata: - name: "operator-metering" -spec: - reporting-operator: - spec: - resources: - limits: - cpu: 1 - memory: 500Mi - requests: - cpu: 500m - memory: 100Mi - presto: - spec: - coordinator: - resources: - limits: - cpu: 4 - memory: 4Gi - requests: - cpu: 2 - memory: 2Gi - - worker: - replicas: 0 - resources: - limits: - cpu: 8 - memory: 8Gi - requests: - cpu: 4 - memory: 2Gi - - hive: - spec: - metastore: - resources: - limits: - cpu: 4 - memory: 2Gi - requests: - cpu: 500m - memory: 650Mi - storage: - class: null - create: true - size: 5Gi - server: - resources: - limits: - cpu: 1 - memory: 1Gi - requests: - cpu: 500m - memory: 500Mi ----- - -== Node selectors -You can run the metering components on specific sets of nodes. Set the `nodeSelector` on a metering component to control where the component is scheduled. The `node-selectors.yaml` file below provides an example of setting node selectors for each component. - -[NOTE] -==== -Add the `openshift.io/node-selector: ""` namespace annotation to the metering namespace YAML file before configuring specific node selectors for the operand pods. Specify `""` as the annotation value. -==== - -[source,yaml] ----- -apiVersion: metering.openshift.io/v1 -kind: MeteringConfig -metadata: - name: "operator-metering" -spec: - reporting-operator: - spec: - nodeSelector: - "node-role.kubernetes.io/infra": "" <1> - - presto: - spec: - coordinator: - nodeSelector: - "node-role.kubernetes.io/infra": "" <1> - worker: - nodeSelector: - "node-role.kubernetes.io/infra": "" <1> - hive: - spec: - metastore: - nodeSelector: - "node-role.kubernetes.io/infra": "" <1> - server: - nodeSelector: - "node-role.kubernetes.io/infra": "" <1> ----- -<1> Add a `nodeSelector` parameter with the appropriate value to the component you want to move. You can use a `nodeSelector` in the format shown or use key-value pairs, based on the value specified for the node. - -[NOTE] -==== -Add the `openshift.io/node-selector: ""` namespace annotation to the metering namespace YAML file before configuring specific node selectors for the operand pods. When the `openshift.io/node-selector` annotation is set on the project, the value is used in preference to the value of the `spec.defaultNodeSelector` field in the cluster-wide `Scheduler` object. -==== - -.Verification - -You can verify the metering node selectors by performing any of the following checks: - -* Verify that all pods for metering are correctly scheduled on the IP of the node that is configured in the `MeteringConfig` custom resource: -+ --- -. Check all pods in the `openshift-metering` namespace: -+ -[source,terminal] ----- -$ oc --namespace openshift-metering get pods -o wide ----- -+ -The output shows the `NODE` and corresponding `IP` for each pod running in the `openshift-metering` namespace. -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES -hive-metastore-0 1/2 Running 0 4m33s 10.129.2.26 ip-10-0-210-167.us-east-2.compute.internal -hive-server-0 2/3 Running 0 4m21s 10.128.2.26 ip-10-0-150-175.us-east-2.compute.internal -metering-operator-964b4fb55-4p699 2/2 Running 0 7h30m 10.131.0.33 ip-10-0-189-6.us-east-2.compute.internal -nfs-server 1/1 Running 0 7h30m 10.129.2.24 ip-10-0-210-167.us-east-2.compute.internal -presto-coordinator-0 2/2 Running 0 4m8s 10.131.0.35 ip-10-0-189-6.us-east-2.compute.internal -reporting-operator-869b854c78-8g2x5 1/2 Running 0 7h27m 10.128.2.25 ip-10-0-150-175.us-east-2.compute.internal ----- -+ -. Compare the nodes in the `openshift-metering` namespace to each node `NAME` in your cluster: -+ -[source,terminal] ----- -$ oc get nodes ----- -+ -.Example output -[source,terminal] ----- -NAME STATUS ROLES AGE VERSION -ip-10-0-147-106.us-east-2.compute.internal Ready master 14h v1.26.0 -ip-10-0-150-175.us-east-2.compute.internal Ready worker 14h v1.26.0 -ip-10-0-175-23.us-east-2.compute.internal Ready master 14h v1.26.0 -ip-10-0-189-6.us-east-2.compute.internal Ready worker 14h v1.26.0 -ip-10-0-205-158.us-east-2.compute.internal Ready master 14h v1.26.0 -ip-10-0-210-167.us-east-2.compute.internal Ready worker 14h v1.26.0 ----- --- - -* Verify that the node selector configuration in the `MeteringConfig` custom resource does not interfere with the cluster-wide node selector configuration such that no metering operand pods are scheduled. - -** Check the cluster-wide `Scheduler` object for the `spec.defaultNodeSelector` field, which shows where pods are scheduled by default: -+ -[source,terminal] ----- -$ oc get schedulers.config.openshift.io cluster -o yaml ----- diff --git a/metering/configuring_metering/metering-configure-aws-billing-correlation.adoc b/metering/configuring_metering/metering-configure-aws-billing-correlation.adoc deleted file mode 100644 index db22749f27e3..000000000000 --- a/metering/configuring_metering/metering-configure-aws-billing-correlation.adoc +++ /dev/null @@ -1,116 +0,0 @@ -:_content-type: ASSEMBLY -[id="metering-configure-aws-billing-correlation"] -= Configure AWS billing correlation -include::_attributes/common-attributes.adoc[] -:context: metering-configure-aws-billing-correlation - -toc::[] - -:FeatureName: Metering -include::modules/deprecated-feature.adoc[leveloffset=+1] - -Metering can correlate cluster usage information with https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/billing-reports-costusage.html[AWS detailed billing information], attaching a dollar amount to resource usage. For clusters running in EC2, you can enable this by modifying the example `aws-billing.yaml` file below. - -[source,yaml] ----- -apiVersion: metering.openshift.io/v1 -kind: MeteringConfig -metadata: - name: "operator-metering" -spec: - openshift-reporting: - spec: - awsBillingReportDataSource: - enabled: true - # Replace these with where your AWS billing reports are - # stored in S3. - bucket: "" <1> - prefix: "" - region: "" - - reporting-operator: - spec: - config: - aws: - secretName: "" <2> - - presto: - spec: - config: - aws: - secretName: "" <2> - - hive: - spec: - config: - aws: - secretName: "" <2> ----- -To enable AWS billing correlation, first ensure the AWS Cost and Usage Reports are enabled. For more information, see https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/billing-reports-gettingstarted-turnonreports.html[Turning on the AWS Cost and Usage Report] in the AWS documentation. - -<1> Update the bucket, prefix, and region to the location of your AWS Detailed billing report. -<2> All `secretName` fields should be set to the name of a secret in the metering namespace containing AWS credentials in the `data.aws-access-key-id` and `data.aws-secret-access-key` fields. See the example secret file below for more details. - -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: -data: - aws-access-key-id: "dGVzdAo=" - aws-secret-access-key: "c2VjcmV0Cg==" ----- - -To store data in S3, the `aws-access-key-id` and `aws-secret-access-key` credentials must have read and write access to the bucket. For an example of an IAM policy granting the required permissions, see the `aws/read-write.json` file below. - -[source,json] ----- -{ - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "1", - "Effect": "Allow", - "Action": [ - "s3:AbortMultipartUpload", - "s3:DeleteObject", - "s3:GetObject", - "s3:HeadBucket", - "s3:ListBucket", - "s3:ListMultipartUploadParts", - "s3:PutObject" - ], - "Resource": [ - "arn:aws:s3:::operator-metering-data/*", <1> - "arn:aws:s3:::operator-metering-data" <1> - ] - } - ] -} -{ - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "1", - "Effect": "Allow", - "Action": [ - "s3:AbortMultipartUpload", - "s3:DeleteObject", - "s3:GetObject", - "s3:HeadBucket", - "s3:ListBucket", - "s3:ListMultipartUploadParts", - "s3:PutObject" - ], - "Resource": [ - "arn:aws:s3:::operator-metering-data/*", <1> - "arn:aws:s3:::operator-metering-data" <1> - ] - } - ] -} ----- -<1> Replace `operator-metering-data` with the name of your bucket. - -This can be done either pre-installation or post-installation. Disabling it post-installation can cause errors in the Reporting Operator. diff --git a/metering/configuring_metering/metering-configure-hive-metastore.adoc b/metering/configuring_metering/metering-configure-hive-metastore.adoc deleted file mode 100644 index b9aba58f910a..000000000000 --- a/metering/configuring_metering/metering-configure-hive-metastore.adoc +++ /dev/null @@ -1,18 +0,0 @@ -:_content-type: ASSEMBLY -[id="metering-configure-hive-metastore"] -= Configuring the Hive metastore -include::_attributes/common-attributes.adoc[] -:context: metering-configure-hive-metastore - -toc::[] - -:FeatureName: Metering -include::modules/deprecated-feature.adoc[leveloffset=+1] - -Hive metastore is responsible for storing all the metadata about the database tables created in Presto and Hive. By default, the metastore stores this information in a local embedded Derby database in a persistent volume attached to the pod. - -Generally, the default configuration of the Hive metastore works for small clusters, but users may wish to improve performance or move storage requirements out of cluster by using a dedicated SQL database for storing the Hive metastore data. - -include::modules/metering-configure-persistentvolumes.adoc[leveloffset=+1] - -include::modules/metering-use-mysql-or-postgresql-for-hive.adoc[leveloffset=+1] diff --git a/metering/configuring_metering/metering-configure-persistent-storage.adoc b/metering/configuring_metering/metering-configure-persistent-storage.adoc deleted file mode 100644 index 6e9930cd4e45..000000000000 --- a/metering/configuring_metering/metering-configure-persistent-storage.adoc +++ /dev/null @@ -1,22 +0,0 @@ -:_content-type: ASSEMBLY -[id="metering-configure-persistent-storage"] -= Configuring persistent storage -include::_attributes/common-attributes.adoc[] -:context: metering-configure-persistent-storage - -toc::[] - -:FeatureName: Metering -include::modules/deprecated-feature.adoc[leveloffset=+1] - -Metering requires persistent storage to persist data collected by the Metering Operator and to store the results of reports. A number of different storage providers and storage formats are supported. Select your storage provider and modify the example configuration files to configure persistent storage for your metering installation. - -include::modules/metering-store-data-in-s3.adoc[leveloffset=+1] - -include::modules/metering-store-data-in-s3-compatible.adoc[leveloffset=+1] - -include::modules/metering-store-data-in-azure.adoc[leveloffset=+1] - -include::modules/metering-store-data-in-gcp.adoc[leveloffset=+1] - -include::modules/metering-store-data-in-shared-volumes.adoc[leveloffset=+1] diff --git a/metering/configuring_metering/metering-configure-reporting-operator.adoc b/metering/configuring_metering/metering-configure-reporting-operator.adoc deleted file mode 100644 index dc6b53385854..000000000000 --- a/metering/configuring_metering/metering-configure-reporting-operator.adoc +++ /dev/null @@ -1,16 +0,0 @@ -:_content-type: ASSEMBLY -[id="metering-configure-reporting-operator"] -= Configuring the Reporting Operator -include::_attributes/common-attributes.adoc[] -:context: metering-configure-reporting-operator - -toc::[] - -:FeatureName: Metering -include::modules/deprecated-feature.adoc[leveloffset=+1] - -The Reporting Operator is responsible for collecting data from Prometheus, storing the metrics in Presto, running report queries against Presto, and exposing their results via an HTTP API. Configuring the Reporting Operator is primarily done in your `MeteringConfig` custom resource. - -include::modules/metering-prometheus-connection.adoc[leveloffset=+1] - -include::modules/metering-exposing-the-reporting-api.adoc[leveloffset=+1] diff --git a/metering/configuring_metering/modules b/metering/configuring_metering/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/metering/configuring_metering/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/metering/configuring_metering/snippets b/metering/configuring_metering/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/metering/configuring_metering/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/metering/images b/metering/images deleted file mode 120000 index e4c5bd02a10a..000000000000 --- a/metering/images +++ /dev/null @@ -1 +0,0 @@ -../images/ \ No newline at end of file diff --git a/metering/metering-about-metering.adoc b/metering/metering-about-metering.adoc deleted file mode 100644 index 84814aedd33d..000000000000 --- a/metering/metering-about-metering.adoc +++ /dev/null @@ -1,12 +0,0 @@ -:_content-type: ASSEMBLY -[id="about-metering"] -= About Metering -include::_attributes/common-attributes.adoc[] -:context: about-metering - -toc::[] - -:FeatureName: Metering -include::modules/deprecated-feature.adoc[leveloffset=+1] - -include::modules/metering-overview.adoc[leveloffset=+1] diff --git a/metering/metering-installing-metering.adoc b/metering/metering-installing-metering.adoc deleted file mode 100644 index 566797b48389..000000000000 --- a/metering/metering-installing-metering.adoc +++ /dev/null @@ -1,62 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-metering"] -= Installing metering -include::_attributes/common-attributes.adoc[] -:context: installing-metering - -toc::[] - -:FeatureName: Metering -include::modules/deprecated-feature.adoc[leveloffset=+1] - -Review the following sections before installing metering into your cluster. - -To get started installing metering, first install the Metering Operator from OperatorHub. Next, configure your instance of metering by creating a `MeteringConfig` custom resource (CR). Installing the Metering Operator creates a default `MeteringConfig` resource that you can modify using the examples in the documentation. After creating your `MeteringConfig` resource, install the metering stack. Last, verify your installation. - -include::modules/metering-install-prerequisites.adoc[leveloffset=+1] - -include::modules/metering-install-operator.adoc[leveloffset=+1] - -// Including this content directly in the assembly because the workflow requires linking off to the config docs, and we don't current link -// inside of modules - klamenzo 2019-09-23 -[id="metering-install-metering-stack_{context}"] -== Installing the metering stack - -After adding the Metering Operator to your cluster you can install the components of metering by installing the metering stack. - -== Prerequisites - -* Review the xref:../metering/configuring_metering/metering-about-configuring.adoc#metering-about-configuring[configuration options] -* Create a `MeteringConfig` resource. You can begin the following process to generate a default `MeteringConfig` resource, then use the examples in the documentation to modify this default file for your specific installation. Review the following topics to create your `MeteringConfig` resource: -** For configuration options, review xref:../metering/configuring_metering/metering-about-configuring.adoc#metering-about-configuring[About configuring metering]. -** At a minimum, you need to xref:../metering/configuring_metering/metering-configure-persistent-storage.adoc#metering-configure-persistent-storage[configure persistent storage] and xref:../metering/configuring_metering/metering-configure-hive-metastore.adoc#metering-configure-hive-metastore[configure the Hive metastore]. - -[IMPORTANT] -==== -There can only be one `MeteringConfig` resource in the `openshift-metering` namespace. Any other configuration is not supported. -==== - -.Procedure - -. From the web console, ensure you are on the *Operator Details* page for the Metering Operator in the `openshift-metering` project. You can navigate to this page by clicking *Operators* -> *Installed Operators*, then selecting the Metering Operator. - -. Under *Provided APIs*, click *Create Instance* on the Metering Configuration card. This opens a YAML editor with the default `MeteringConfig` resource file where you can define your configuration. -+ -[NOTE] -==== -For example configuration files and all supported configuration options, review the xref:../metering/configuring_metering/metering-about-configuring.adoc#metering-about-configuring[configuring metering documentation]. -==== - -. Enter your `MeteringConfig` resource into the YAML editor and click *Create*. - -The `MeteringConfig` resource begins to create the necessary resources for your metering stack. You can now move on to verifying your installation. - -include::modules/metering-install-verify.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="metering-install-additional-resources_{context}"] -== Additional resources - -* For more information on configuration steps and available storage platforms, see xref:../metering/configuring_metering/metering-configure-persistent-storage.adoc#metering-configure-persistent-storage[Configuring persistent storage]. - -* For the steps to configure Hive, see xref:../metering/configuring_metering/metering-configure-hive-metastore.adoc#metering-configure-hive-metastore[Configuring the Hive metastore]. diff --git a/metering/metering-troubleshooting-debugging.adoc b/metering/metering-troubleshooting-debugging.adoc deleted file mode 100644 index 53333e6f390d..000000000000 --- a/metering/metering-troubleshooting-debugging.adoc +++ /dev/null @@ -1,21 +0,0 @@ -:_content-type: ASSEMBLY -[id="metering-troubleshooting-debugging"] -= Troubleshooting and debugging metering -include::_attributes/common-attributes.adoc[] -:context: metering-troubleshooting-debugging - -toc::[] - -:FeatureName: Metering -include::modules/deprecated-feature.adoc[leveloffset=+1] - -Use the following sections to help troubleshoot and debug specific issues with metering. - -In addition to the information in this section, be sure to review the following topics: - -* xref:../metering/metering-installing-metering.adoc#metering-install-prerequisites_installing-metering[Prerequisites for installing metering]. -* xref:../metering/configuring_metering/metering-about-configuring.adoc#metering-about-configuring[About configuring metering] - -include::modules/metering-troubleshooting.adoc[leveloffset=+1] - -include::modules/metering-debugging.adoc[leveloffset=+1] diff --git a/metering/metering-uninstall.adoc b/metering/metering-uninstall.adoc deleted file mode 100644 index 256c69f27b39..000000000000 --- a/metering/metering-uninstall.adoc +++ /dev/null @@ -1,31 +0,0 @@ -:_content-type: ASSEMBLY -:context: metering-uninstall -[id="metering-uninstall"] -= Uninstalling metering -include::_attributes/common-attributes.adoc[] - -toc::[] - -:FeatureName: Metering -include::modules/deprecated-feature.adoc[leveloffset=+1] - -You can remove metering from your {product-title} cluster. - -[NOTE] -==== -Metering does not manage or delete Amazon S3 bucket data. After uninstalling metering, you must manually clean up S3 buckets that were used to store metering data. -==== - -[id="metering-remove"] -== Removing the Metering Operator from your cluster - -Remove the Metering Operator from your cluster by following the documentation on xref:../operators/admin/olm-deleting-operators-from-cluster.adoc#olm-deleting-operators-from-a-cluster[deleting Operators from a cluster]. - -[NOTE] -==== -Removing the Metering Operator from your cluster does not remove its custom resource definitions or managed resources. See the following sections on xref:../metering/metering-uninstall.adoc#metering-uninstall_metering-uninstall[Uninstalling a metering namespace] and xref:../metering/metering-uninstall.adoc#metering-uninstall-crds_metering-uninstall[Uninstalling metering custom resource definitions] for steps to remove any remaining metering components. -==== - -include::modules/metering-uninstall.adoc[leveloffset=+1] - -include::modules/metering-uninstall-crds.adoc[leveloffset=+1] diff --git a/metering/metering-upgrading-metering.adoc b/metering/metering-upgrading-metering.adoc deleted file mode 100644 index b74273887552..000000000000 --- a/metering/metering-upgrading-metering.adoc +++ /dev/null @@ -1,148 +0,0 @@ -:_content-type: ASSEMBLY -[id="upgrading-metering"] -= Upgrading metering -include::_attributes/common-attributes.adoc[] -:context: upgrading-metering - -toc::[] - -:FeatureName: Metering -include::modules/deprecated-feature.adoc[leveloffset=+1] - -You can upgrade metering to {product-version} by updating the Metering Operator subscription. - -== Prerequisites - -* The cluster is updated to {product-version}. -* The xref:../metering/metering-installing-metering.adoc#metering-install-operator_installing-metering[Metering Operator] is installed from OperatorHub. -+ -[NOTE] -==== -You must upgrade the Metering Operator to {product-version} manually. Metering does not upgrade automatically if you selected the "Automatic" *Approval Strategy* in a previous installation. -==== -* The xref:../metering/configuring_metering/metering-about-configuring.adoc#metering-about-configuring[MeteringConfig custom resource] is configured. -* The xref:../metering/metering-installing-metering.adoc#metering-install-metering-stack_installing-metering[metering stack] is installed. -* Ensure that metering status is healthy by checking that all pods are ready. - -[IMPORTANT] -==== -Potential data loss can occur if you modify your metering storage configuration after installing or upgrading metering. -==== - -.Procedure - -. Click *Operators* -> *Installed Operators* from the web console. - -. Select the `openshift-metering` project. - -. Click *Metering Operator*. - -. Click *Subscription* -> *Channel*. - -. In the *Change Subscription Update Channel* window, select *{product-version}* and click *Save*. -+ -[NOTE] -==== -Wait several seconds to allow the subscription to update before proceeding to the next step. -==== -. Click *Operators* -> *Installed Operators*. -+ -The Metering Operator is shown as 4.9. For example: -+ ----- -Metering -4.9.0-202107012112.p0 provided by Red Hat, Inc ----- - -.Verification -You can verify the metering upgrade by performing any of the following checks: - -* Check the Metering Operator cluster service version (CSV) for the new metering version. This can be done through either the web console or CLI. -+ --- -.Procedure (UI) - . Navigate to *Operators* -> *Installed Operators* in the metering namespace. - . Click *Metering Operator*. - . Click *Subscription* for *Subscription Details*. - . Check the *Installed Version* for the upgraded metering version. The *Starting Version* shows the metering version prior to upgrading. - -.Procedure (CLI) -* Check the Metering Operator CSV: -+ -[source,terminal] ----- -$ oc get csv | grep metering ----- -+ -.Example output for metering upgrade from 4.8 to 4.9 -[source,terminal] ----- -NAME DISPLAY VERSION REPLACES PHASE -metering-operator.4.9.0-202107012112.p0 Metering 4.9.0-202107012112.p0 metering-operator.4.8.0-202007012112.p0 Succeeded ----- --- - -* Check that all required pods in the `openshift-metering` namespace are created. This can be done through either the web console or CLI. -+ --- -[NOTE] -==== -Many pods rely on other components to function before they themselves can be considered ready. Some pods may restart if other pods take too long to start. This is to be expected during the Metering Operator upgrade. -==== - -.Procedure (UI) -* Navigate to *Workloads* -> *Pods* in the metering namespace and verify that pods are being created. This can take several minutes after upgrading the metering stack. - -.Procedure (CLI) -* Check that all required pods in the `openshift-metering` namespace are created: -+ -[source,terminal] ----- -$ oc -n openshift-metering get pods ----- -.Example output -[source,terminal] -+ ----- -NAME READY STATUS RESTARTS AGE -hive-metastore-0 2/2 Running 0 3m28s -hive-server-0 3/3 Running 0 3m28s -metering-operator-68dd64cfb6-2k7d9 2/2 Running 0 5m17s -presto-coordinator-0 2/2 Running 0 3m9s -reporting-operator-5588964bf8-x2tkn 2/2 Running 0 2m40s ----- --- - -* Verify that the `ReportDataSource` resources are importing new data, indicated by a valid timestamp in the `NEWEST METRIC` column. This might take several minutes. Filter out the "-raw" `ReportDataSource` resources, which do not import data: -+ -[source,terminal] ----- -$ oc get reportdatasources -n openshift-metering | grep -v raw ----- -+ -Timestamps in the `NEWEST METRIC` column indicate that `ReportDataSource` resources are beginning to import new data. -+ -.Example output -[source,terminal] ----- -NAME EARLIEST METRIC NEWEST METRIC IMPORT START IMPORT END LAST IMPORT TIME AGE -node-allocatable-cpu-cores 2021-07-01T21:10:00Z 2021-07-02T19:52:00Z 2021-07-01T19:11:00Z 2021-07-02T19:52:00Z 2021-07-02T19:56:44Z 23h -node-allocatable-memory-bytes 2021-07-01T21:10:00Z 2021-07-02T19:52:00Z 2021-07-01T19:11:00Z 2021-07-02T19:52:00Z 2021-07-02T19:52:07Z 23h -node-capacity-cpu-cores 2021-07-01T21:10:00Z 2021-07-02T19:52:00Z 2021-07-01T19:11:00Z 2021-07-02T19:52:00Z 2021-07-02T19:56:52Z 23h -node-capacity-memory-bytes 2021-07-01T21:10:00Z 2021-07-02T19:57:00Z 2021-07-01T19:10:00Z 2021-07-02T19:57:00Z 2021-07-02T19:57:03Z 23h -persistentvolumeclaim-capacity-bytes 2021-07-01T21:09:00Z 2021-07-02T19:52:00Z 2021-07-01T19:11:00Z 2021-07-02T19:52:00Z 2021-07-02T19:56:46Z 23h -persistentvolumeclaim-phase 2021-07-01T21:10:00Z 2021-07-02T19:52:00Z 2021-07-01T19:11:00Z 2021-07-02T19:52:00Z 2021-07-02T19:52:36Z 23h -persistentvolumeclaim-request-bytes 2021-07-01T21:10:00Z 2021-07-02T19:57:00Z 2021-07-01T19:10:00Z 2021-07-02T19:57:00Z 2021-07-02T19:57:03Z 23h -persistentvolumeclaim-usage-bytes 2021-07-01T21:09:00Z 2021-07-02T19:52:00Z 2021-07-01T19:11:00Z 2021-07-02T19:52:00Z 2021-07-02T19:52:02Z 23h -pod-limit-cpu-cores 2021-07-01T21:10:00Z 2021-07-02T19:57:00Z 2021-07-01T19:10:00Z 2021-07-02T19:57:00Z 2021-07-02T19:57:02Z 23h -pod-limit-memory-bytes 2021-07-01T21:10:00Z 2021-07-02T19:58:00Z 2021-07-01T19:11:00Z 2021-07-02T19:58:00Z 2021-07-02T19:59:06Z 23h -pod-persistentvolumeclaim-request-info 2021-07-01T21:10:00Z 2021-07-02T19:52:00Z 2021-07-01T19:11:00Z 2021-07-02T19:52:00Z 2021-07-02T19:52:07Z 23h -pod-request-cpu-cores 2021-07-01T21:10:00Z 2021-07-02T19:58:00Z 2021-07-01T19:11:00Z 2021-07-02T19:58:00Z 2021-07-02T19:58:57Z 23h -pod-request-memory-bytes 2021-07-01T21:10:00Z 2021-07-02T19:52:00Z 2021-07-01T19:11:00Z 2021-07-02T19:52:00Z 2021-07-02T19:55:32Z 23h -pod-usage-cpu-cores 2021-07-01T21:09:00Z 2021-07-02T19:52:00Z 2021-07-01T19:11:00Z 2021-07-02T19:52:00Z 2021-07-02T19:54:55Z 23h -pod-usage-memory-bytes 2021-07-01T21:08:00Z 2021-07-02T19:52:00Z 2021-07-01T19:11:00Z 2021-07-02T19:52:00Z 2021-07-02T19:55:00Z 23h -report-ns-pvc-usage 5h36m -report-ns-pvc-usage-hourly ----- - -After all pods are ready and you have verified that new data is being imported, metering continues to collect data and report on your cluster. Review a previously xref:../metering/reports/metering-about-reports.adoc#metering-example-report-with-schedule_metering-about-reports[scheduled report] or create a xref:../metering/reports/metering-about-reports.adoc#metering-example-report-without-schedule_metering-about-reports[run-once metering report] to confirm the metering upgrade. diff --git a/metering/metering-usage-examples.adoc b/metering/metering-usage-examples.adoc deleted file mode 100644 index c522b516c96e..000000000000 --- a/metering/metering-usage-examples.adoc +++ /dev/null @@ -1,22 +0,0 @@ -:_content-type: ASSEMBLY -[id="metering-usage-examples"] -= Examples of using metering -include::_attributes/common-attributes.adoc[] -:context: metering-usage-examples - -toc::[] - -:FeatureName: Metering -include::modules/deprecated-feature.adoc[leveloffset=+1] - -Use the following example reports to get started measuring capacity, usage, and utilization in your cluster. These examples showcase the various types of reports metering offers, along with a selection of the predefined queries. - -== Prerequisites -* xref:../metering/metering-installing-metering.adoc#metering-install-operator_installing-metering[Install metering] -* Review the details about xref:../metering/metering-using-metering#using-metering[writing and viewing reports]. - -include::modules/metering-cluster-capacity-examples.adoc[leveloffset=+1] - -include::modules/metering-cluster-usage-examples.adoc[leveloffset=+1] - -include::modules/metering-cluster-utilization-examples.adoc[leveloffset=+1] diff --git a/metering/metering-using-metering.adoc b/metering/metering-using-metering.adoc deleted file mode 100644 index 163b41c09a11..000000000000 --- a/metering/metering-using-metering.adoc +++ /dev/null @@ -1,19 +0,0 @@ -:_content-type: ASSEMBLY -[id="using-metering"] -= Using Metering -include::_attributes/common-attributes.adoc[] -:context: using-metering - -toc::[] - -:FeatureName: Metering -include::modules/deprecated-feature.adoc[leveloffset=+1] - -== Prerequisites - -* xref:../metering/metering-installing-metering.adoc#metering-install-operator_installing-metering[Install Metering] -* Review the details about the available options that can be configured for a xref:../metering/reports/metering-about-reports.adoc#metering-about-reports[report] and how they function. - -include::modules/metering-writing-reports.adoc[leveloffset=+1] - -include::modules/metering-viewing-report-results.adoc[leveloffset=+1] diff --git a/metering/modules b/metering/modules deleted file mode 120000 index 43aab75b53c9..000000000000 --- a/metering/modules +++ /dev/null @@ -1 +0,0 @@ -../modules/ \ No newline at end of file diff --git a/metering/reports/_attributes b/metering/reports/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/metering/reports/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/metering/reports/images b/metering/reports/images deleted file mode 120000 index 847b03ed0541..000000000000 --- a/metering/reports/images +++ /dev/null @@ -1 +0,0 @@ -../../images/ \ No newline at end of file diff --git a/metering/reports/metering-about-reports.adoc b/metering/reports/metering-about-reports.adoc deleted file mode 100644 index c909f6fd0ee4..000000000000 --- a/metering/reports/metering-about-reports.adoc +++ /dev/null @@ -1,16 +0,0 @@ -:_content-type: ASSEMBLY -[id="metering-about-reports"] -= About Reports -include::_attributes/common-attributes.adoc[] -:context: metering-about-reports - -toc::[] - -:FeatureName: Metering -include::modules/deprecated-feature.adoc[leveloffset=+1] - -A `Report` custom resource provides a method to manage periodic Extract Transform and Load (ETL) jobs using SQL queries. Reports are composed from other metering resources, such as `ReportQuery` resources that provide the actual SQL query to run, and `ReportDataSource` resources that define the data available to the `ReportQuery` and `Report` resources. - -Many use cases are addressed by the predefined `ReportQuery` and `ReportDataSource` resources that come installed with metering. Therefore, you do not need to define your own unless you have a use case that is not covered by these predefined resources. - -include::modules/metering-reports.adoc[leveloffset=+1] diff --git a/metering/reports/metering-storage-locations.adoc b/metering/reports/metering-storage-locations.adoc deleted file mode 100644 index ab06b989cfa3..000000000000 --- a/metering/reports/metering-storage-locations.adoc +++ /dev/null @@ -1,83 +0,0 @@ -:_content-type: ASSEMBLY -[id="metering-storage-locations"] -= Storage locations -include::_attributes/common-attributes.adoc[] -:context: metering-storage-locations - -toc::[] - -:FeatureName: Metering -include::modules/deprecated-feature.adoc[leveloffset=+1] - -A `StorageLocation` custom resource configures where data will be stored by the Reporting Operator. This includes the data collected from Prometheus, and the results produced by generating a `Report` custom resource. - -You only need to configure a `StorageLocation` custom resource if you want to store data in multiple locations, like multiple S3 buckets or both S3 and HDFS, or if you wish to access a database in Hive and Presto that was not created by metering. For most users this is not a requirement, and the xref:../../metering/configuring_metering/metering-about-configuring.adoc#metering-about-configuring[documentation on configuring metering] is sufficient to configure all necessary storage components. - -== Storage location examples - -The following example shows the built-in local storage option, and is configured to use Hive. By default, data is stored wherever Hive is configured to use storage, such as HDFS, S3, or a `ReadWriteMany` persistent volume claim (PVC). - -.Local storage example -[source,yaml] ----- -apiVersion: metering.openshift.io/v1 -kind: StorageLocation -metadata: - name: hive - labels: - operator-metering: "true" -spec: - hive: <1> - databaseName: metering <2> - unmanagedDatabase: false <3> ----- - -<1> If the `hive` section is present, then the `StorageLocation` resource will be configured to store data in Presto by creating the table using the Hive server. Only `databaseName` and `unmanagedDatabase` are required fields. -<2> The name of the database within hive. -<3> If `true`, the `StorageLocation` resource will not be actively managed, and the `databaseName` is expected to already exist in Hive. If `false`, the Reporting Operator will create the database in Hive. - -The following example uses an AWS S3 bucket for storage. The prefix is appended to the bucket name when constructing the path to use. - -.Remote storage example -[source,yaml] ----- -apiVersion: metering.openshift.io/v1 -kind: StorageLocation -metadata: - name: example-s3-storage - labels: - operator-metering: "true" -spec: - hive: - databaseName: example_s3_storage - unmanagedDatabase: false - location: "s3a://bucket-name/path/within/bucket" <1> ----- -<1> Optional: The filesystem URL for Presto and Hive to use for the database. This can be an `hdfs://` or `s3a://` filesystem URL. - -There are additional optional fields that can be specified in the `hive` section: - -* `defaultTableProperties`: Contains configuration options for creating tables using Hive. -* `fileFormat`: The file format used for storing files in the filesystem. See the link:https://cwiki.apache.org/confluence/display/Hive/LanguageManual+DDL#LanguageManualDDL-StorageFormatsStorageFormatsRowFormat,StorageFormat,andSerDe[Hive Documentation on File Storage Format] for a list of options and more details. -* `rowFormat`: Controls the link:https://cwiki.apache.org/confluence/display/Hive/LanguageManual+DDL#LanguageManualDDL-RowFormats&SerDe[ Hive row format]. This controls how Hive serializes and deserializes rows. See the link:https://cwiki.apache.org/confluence/display/Hive/LanguageManual+DDL#LanguageManualDDL-RowFormats&SerDe[Hive Documentation on Row Formats and SerDe] for more details. - -== Default storage location -If an annotation `storagelocation.metering.openshift.io/is-default` exists and is set to `true` on a `StorageLocation` resource, then that resource becomes the default storage resource. Any components with a storage configuration option where the storage location is not specified will use the default storage resource. There can be only one default storage resource. If more than one resource with the annotation exists, an error is logged because the Reporting Operator cannot determine the default. - -.Default storage example -[source,yaml] ----- -apiVersion: metering.openshift.io/v1 -kind: StorageLocation -metadata: - name: example-s3-storage - labels: - operator-metering: "true" - annotations: - storagelocation.metering.openshift.io/is-default: "true" -spec: - hive: - databaseName: example_s3_storage - unmanagedDatabase: false - location: "s3a://bucket-name/path/within/bucket" ----- diff --git a/metering/reports/modules b/metering/reports/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/metering/reports/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/metering/reports/snippets b/metering/reports/snippets deleted file mode 120000 index 5a3f5add140e..000000000000 --- a/metering/reports/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets/ \ No newline at end of file diff --git a/metering/snippets b/metering/snippets deleted file mode 120000 index 9f5bc7e4dde0..000000000000 --- a/metering/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets \ No newline at end of file diff --git a/metrics/PLACEHOLDER b/metrics/PLACEHOLDER deleted file mode 100644 index 4020393e57eb..000000000000 --- a/metrics/PLACEHOLDER +++ /dev/null @@ -1,2 +0,0 @@ -Please delete this file once you have assemblies here. - diff --git a/metrics/_attributes b/metrics/_attributes deleted file mode 120000 index f27fd275ea6b..000000000000 --- a/metrics/_attributes +++ /dev/null @@ -1 +0,0 @@ -../_attributes/ \ No newline at end of file diff --git a/metrics/images b/metrics/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/metrics/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/metrics/modules b/metrics/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/metrics/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/metrics/snippets b/metrics/snippets deleted file mode 120000 index 9f5bc7e4dde0..000000000000 --- a/metrics/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets \ No newline at end of file diff --git a/microshift_cli_ref/_attributes b/microshift_cli_ref/_attributes deleted file mode 120000 index 93957f02273f..000000000000 --- a/microshift_cli_ref/_attributes +++ /dev/null @@ -1 +0,0 @@ -../_attributes \ No newline at end of file diff --git a/microshift_cli_ref/images b/microshift_cli_ref/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/microshift_cli_ref/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/microshift_cli_ref/microshift-cli-tools-introduction.adoc b/microshift_cli_ref/microshift-cli-tools-introduction.adoc deleted file mode 100644 index f2cbdf7c6100..000000000000 --- a/microshift_cli_ref/microshift-cli-tools-introduction.adoc +++ /dev/null @@ -1,31 +0,0 @@ -:_content-type: ASSEMBLY -[id="microshift-cli-tools"] -= {product-title} CLI tools introduction -include::_attributes/attributes-microshift.adoc[] -:context: microshift-cli-tools-introduction - -toc::[] - -You can use different command-line interface (CLI) tools to build, deploy, and manage {product-title} clusters and workloads. With CLI tools, you can complete various administration and development operations from the terminal to manage deployments and interact with each component of the system. - -CLI tools available for use with {product-title} are the following: - -* Built-in `microshift` command types -* Linux CLI tools -* Kubernetes CLI (`kubectl`) -* The {oc-first} tool with an enabled subset of commands - -[NOTE] -==== -Commands for multi-node deployments, projects, and developer tooling are not supported by {product-title}. -==== - -[role="_additional-resources"] -[id="additional-resources_microshift-cli-tools"] -.Additional resources - -* xref:..//microshift_cli_ref/microshift-oc-cli-install.adoc#microshift-oc-cli-install[Installing the OpenShift CLI tool for MicroShift]. - -* link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.13/html/cli_tools/openshift-cli-oc[Detailed description of the OpenShift CLI (oc)]. - -* link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9[Red Hat Enterprise Linux (RHEL) documentation for specific use cases]. \ No newline at end of file diff --git a/microshift_cli_ref/microshift-cli-using-oc.adoc b/microshift_cli_ref/microshift-cli-using-oc.adoc deleted file mode 100644 index 68b8a08ae508..000000000000 --- a/microshift_cli_ref/microshift-cli-using-oc.adoc +++ /dev/null @@ -1,84 +0,0 @@ -:_content-type: ASSEMBLY -[id="microshift-cli-using-oc"] -= Using the oc tool -include::_attributes/attributes-microshift.adoc[] -:context: microshift-using-oc - -toc::[] - -The optional OpenShift CLI (`oc`) tool provides a subset of `oc` commands for {product-title} deployments. Using `oc` is convenient if you are familiar with {OCP} and Kubernetes. - -include::modules/microshift-cli-oc-about.adoc[leveloffset=+1] - -[id="cli-using-cli_{context}"] -== Using the OpenShift CLI in {product-title} - -Review the following sections to learn how to complete common tasks in {product-title} using the `oc` CLI. - -[id="viewing-pods_{context}"] -=== Viewing pods - -Use the `oc get pods` command to view the pods for the current project. - -[NOTE] -==== -When you run `oc` inside a pod and do not specify a namespace, the namespace of the pod is used by default. -==== - -[source,terminal] ----- -$ oc get pods -o wide ----- - -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE -cakephp-ex-1-build 0/1 Completed 0 5m45s 10.131.0.10 ip-10-0-141-74.ec2.internal -cakephp-ex-1-deploy 0/1 Completed 0 3m44s 10.129.2.9 ip-10-0-147-65.ec2.internal -cakephp-ex-1-ktz97 1/1 Running 0 3m33s 10.128.2.11 ip-10-0-168-105.ec2.internal ----- - -[id="viewing-pod-logs_{context}"] -=== Viewing pod logs - -Use the `oc logs` command to view logs for a particular pod. - -[source,terminal] ----- -$ oc logs cakephp-ex-1-deploy ----- - -.Example output -[source,terminal] ----- ---> Scaling cakephp-ex-1 to 1 ---> Success ----- - -[id="listing-supported-apis_{context}"] -=== Listing supported API resources - -Use the `oc api-resources` command to view the list of supported API resources -on the server. - -[source,terminal] ----- -$ oc api-resources ----- - -.Example output -[source,terminal] ----- -NAME SHORTNAMES APIGROUP NAMESPACED KIND -bindings true Binding -componentstatuses cs false ComponentStatus -configmaps cm true ConfigMap -... ----- - -// Getting help -include::modules/microshift-cli-oc-get-help.adoc[leveloffset=+1] - -//Errors when using oc commands not enabled in MicroShift -include::modules/microshift-oc-apis-errors.adoc[leveloffset=+1] \ No newline at end of file diff --git a/microshift_cli_ref/microshift-oc-cli-commands-list.adoc b/microshift_cli_ref/microshift-oc-cli-commands-list.adoc deleted file mode 100644 index 65b735b52e61..000000000000 --- a/microshift_cli_ref/microshift-oc-cli-commands-list.adoc +++ /dev/null @@ -1,37 +0,0 @@ -:_content-type: ASSEMBLY -[id="microshift-oc-cli-commands"] -= OpenShift CLI command reference -include::_attributes/attributes-microshift.adoc[] -:context: microshift-oc-cli-commands - -toc::[] - -Descriptions and example commands for OpenShift CLI (`oc`) commands are included in this reference document. You must have `cluster-admin` or equivalent permissions to use these commands. To list administrator commands and information about them, use the following commands: - -* Enter the `oc adm -h` command to list all administrator commands: -+ -.Command syntax -+ -[source,terminal] ----- -$ oc adm -h ----- - -* Enter the `oc --help` command to get additional details for a specific command: -+ -.Command syntax -+ -[source,terminal] ----- -$ oc --help ----- - -[IMPORTANT] -==== -Using `oc --help` lists details for any `oc` command. Not all `oc` commands apply to using {product-title}. -==== - -// The OCP files are auto-generated from the openshift/oc repository; use the MicroShift-specific flags to generate MicroShift command files from the same repo -include::modules/microshift-oc-by-example-content.adoc[leveloffset=+1] - -include::modules/microshift-oc-adm-by-example-content.adoc[leveloffset=+1] diff --git a/microshift_cli_ref/microshift-oc-cli-install.adoc b/microshift_cli_ref/microshift-oc-cli-install.adoc deleted file mode 100644 index 51047052db69..000000000000 --- a/microshift_cli_ref/microshift-oc-cli-install.adoc +++ /dev/null @@ -1,23 +0,0 @@ -:_content-type: ASSEMBLY -[id="microshift-oc-cli-install"] -= Getting started with the OpenShift CLI -include::_attributes/attributes-microshift.adoc[] -:context: cli-oc-installing - -toc::[] - -To use the OpenShift CLI (`oc`) tool, you must download and install it separately from your {product-title} installation. - -[id="installing-the-openshift-cli"] -== Installing the OpenShift CLI - -You can install the OpenShift CLI (`oc`) either by downloading the binary or by using Homebrew. - -// Installing the CLI by downloading the binary -include::modules/cli-installing-cli.adoc[leveloffset=+2] - -// Installing the CLI by using Homebrew -include::modules/cli-installing-cli-brew.adoc[leveloffset=+2] - -// Installing the CLI using RPM -include::modules/cli-installing-cli-rpm.adoc[leveloffset=+2] \ No newline at end of file diff --git a/microshift_cli_ref/microshift-oc-config.adoc b/microshift_cli_ref/microshift-oc-config.adoc deleted file mode 100644 index 166c16641d11..000000000000 --- a/microshift_cli_ref/microshift-oc-config.adoc +++ /dev/null @@ -1,20 +0,0 @@ -:_content-type: ASSEMBLY -[id="cli-configuring-cli"] -= Configuring the OpenShift CLI -include::_attributes/attributes-microshift.adoc[] -:context: cli-configuring-cli - -toc::[] - -Configure `oc` based on your preferences for working with it. - -[id="cli-enabling-tab-completion"] -== Enabling tab completion - -You can enable tab completion for the Bash or Zsh shells. - -// Enabling tab completion for Bash -include::modules/cli-configuring-completion.adoc[leveloffset=+2] - -// Enabling tab completion for Zsh -include::modules/cli-configuring-completion-zsh.adoc[leveloffset=+2] diff --git a/microshift_cli_ref/microshift-usage-oc-kubectl.adoc b/microshift_cli_ref/microshift-usage-oc-kubectl.adoc deleted file mode 100644 index a1016ff3ab97..000000000000 --- a/microshift_cli_ref/microshift-usage-oc-kubectl.adoc +++ /dev/null @@ -1,62 +0,0 @@ -:_content-type: ASSEMBLY -[id="microshift-usage-oc-kubectl"] -= Using oc and kubectl commands -include::_attributes/attributes-microshift.adoc[] -:context: usage-oc-kubectl - -toc::[] - -The Kubernetes command-line interface (CLI), `kubectl`, can be used to run commands against a Kubernetes cluster. Because {product-title} is a certified Kubernetes distribution, you can use the supported `kubectl` CLI tool that ships with {product-title}, or you can gain extended functionality by using the `oc` CLI tool. - -[id="microshift-kubectl-binary_{context}"] -== The kubectl CLI tool - -You can use the `kubectl` CLI tool to interact with Kubernetes primitives on your {product-title} cluster. You can also use existing `kubectl` workflows and scripts for new {product-title} users coming from another Kubernetes environment, or for those who prefer to use the `kubectl` CLI. - -The `kubectl` CLI tool is included in the archive if you download the `oc` CLI tool. - -For more information, read the link:https://kubernetes.io/docs/reference/kubectl/overview/[Kubernetes CLI tool documentation]. - -[id="microshift-oc-binary_{context}"] -== The oc CLI tool - -The `oc` CLI tool offers the same capabilities as the `kubectl` CLI tool, but it extends to natively support additional {product-title} features, including: - -* **Route resource** -+ -The `Route` resource object is specific to {product-title} distributions, and builds upon standard Kubernetes primitives. -+ -* **Additional commands** -+ -The additional command `oc new-app`, for example, makes it easier to get new applications started using existing source code or pre-built images. - -[IMPORTANT] -==== -If you installed an earlier version of the `oc` CLI tool, you cannot use it to complete all of the commands in {product-title} {ocp-version}. If you want the latest features, you must download and install the latest version of the `oc` CLI tool corresponding to your {product-title} version. -==== - -Non-security API changes will involve, at minimum, two minor releases (4.1 to 4.2 to 4.3, for example) to allow older `oc` binaries to update. Using new capabilities might require newer `oc` binaries. A 4.3 server might have additional capabilities that a 4.2 `oc` binary cannot use and a 4.3 `oc` binary might have additional capabilities that are unsupported by a 4.2 server. - -.Compatibility Matrix - -[cols="1,1,1"] -|=== - -| -|*X.Y* (`oc` Client) -|*X.Y+N* footnote:versionpolicyn[Where *N* is a number greater than or equal to 1.] (`oc` Client) - -|*X.Y* (Server) -|image:redcircle-1.png[] -|image:redcircle-3.png[] - -|*X.Y+N* footnote:versionpolicyn[] (Server) -|image:redcircle-2.png[] -|image:redcircle-1.png[] - -|=== -image:redcircle-1.png[] Fully compatible. - -image:redcircle-2.png[] `oc` client might not be able to access server features. - -image:redcircle-3.png[] `oc` client might provide options and features that might not be compatible with the accessed server. diff --git a/microshift_cli_ref/modules b/microshift_cli_ref/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/microshift_cli_ref/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/microshift_cli_ref/snippets b/microshift_cli_ref/snippets deleted file mode 120000 index 9d58b92e5058..000000000000 --- a/microshift_cli_ref/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets/ \ No newline at end of file diff --git a/microshift_configuring/_attributes b/microshift_configuring/_attributes deleted file mode 120000 index 93957f02273f..000000000000 --- a/microshift_configuring/_attributes +++ /dev/null @@ -1 +0,0 @@ -../_attributes \ No newline at end of file diff --git a/microshift_configuring/images b/microshift_configuring/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/microshift_configuring/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/microshift_configuring/microshift-cluster-access-kubeconfig.adoc b/microshift_configuring/microshift-cluster-access-kubeconfig.adoc deleted file mode 100644 index 78b503705d15..000000000000 --- a/microshift_configuring/microshift-cluster-access-kubeconfig.adoc +++ /dev/null @@ -1,23 +0,0 @@ -:_content-type: ASSEMBLY -[id="microshift-kubeconfig"] -= Cluster access with kubeconfig -include::_attributes/attributes-microshift.adoc[] -:context: microshift-kubeconfig - -toc::[] - -Learn about how `kubeconfig` files are used with {product-title} deployments. CLI tools use `kubeconfig` files to communicate with the API server of a cluster. These files provide cluster details, IP addresses, and other information needed for authentication. - -include::modules/microshift-kubeconfig-overview.adoc[leveloffset=+1] - -include::modules/microshift-kubeconfig-local-access.adoc[leveloffset=+1] - -include::modules/microshift-accessing-cluster-locally.adoc[leveloffset=+2] - -include::modules/microshift-kubeconfig-remote-con.adoc[leveloffset=+1] - -include::modules/microshift-kubeconfig-generating-remote-kcfiles.adoc[leveloffset=+1] - -include::modules/microshift-accessing-cluster-open-firewall.adoc[leveloffset=+2] - -include::modules/microshift-accessing-cluster-remotely.adoc[leveloffset=+2] \ No newline at end of file diff --git a/microshift_configuring/microshift-using-config-tools.adoc b/microshift_configuring/microshift-using-config-tools.adoc deleted file mode 100644 index c8f77a24a629..000000000000 --- a/microshift_configuring/microshift-using-config-tools.adoc +++ /dev/null @@ -1,13 +0,0 @@ -:_content-type: ASSEMBLY -[id="microshift-using-config-tools"] -= How configuration tools work -include::_attributes/attributes-microshift.adoc[] -:context: microshift-configuring - -toc::[] - -A YAML file customizes {product-title} instances with your preferences, settings, and parameters. - -include::modules/microshift-config-yaml.adoc[leveloffset=+1] - -include::modules/microshift-config-nodeport-limits.adoc[leveloffset=+1] \ No newline at end of file diff --git a/microshift_configuring/modules b/microshift_configuring/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/microshift_configuring/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/microshift_configuring/snippets b/microshift_configuring/snippets deleted file mode 120000 index 9d58b92e5058..000000000000 --- a/microshift_configuring/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets/ \ No newline at end of file diff --git a/microshift_getting_started/_attributes b/microshift_getting_started/_attributes deleted file mode 120000 index 93957f02273f..000000000000 --- a/microshift_getting_started/_attributes +++ /dev/null @@ -1 +0,0 @@ -../_attributes \ No newline at end of file diff --git a/microshift_getting_started/images b/microshift_getting_started/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/microshift_getting_started/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/microshift_getting_started/microshift-architecture.adoc b/microshift_getting_started/microshift-architecture.adoc deleted file mode 100644 index 038212cde83f..000000000000 --- a/microshift_getting_started/microshift-architecture.adoc +++ /dev/null @@ -1,77 +0,0 @@ -:_content-type: ASSEMBLY -[id="microshift-architecture"] -= Architecture -include::_attributes/attributes-microshift.adoc[] -include::_attributes/common-attributes.adoc[] -:context: microshift-architecture - -toc::[] - -Learn the specifics of {product-title} architecture including design intent, how it differs from {oke}, and API compatibility. - -[id="microshift-architectural-design_{context}"] -== Architectural design -{product-title} is a single-node container orchestration runtime designed to extend the benefits of using containers for running applications to low-resource edge environments. Because {product-title} is primarily a platform for deploying applications, only the APIs and features essential to operating in edge and small form factor computing environments are included. - -For example, {product-title} contains only the following Kubernetes cluster capabilities: - -* Networking -* Ingress -* Storage -* Helm - -{product-title} also provides the following Kubernetes functions: - -* Orchestration -* Security - -To optimize your deployments, use {product-title} with a compatible operating system, such as {op-system-ostree-first}. Using {product-title} and {op-system-ostree-first} together forms {op-system-bundle}. Virtual machines are handled by the operating system in {product-title} deployments. - -.{product-title} as part of {op-system-bundle}. -image::311_RHDevice_Edge_Overview_0223_1.png[<{product-title} is tasked with only the Kubernetes cluster services networking, ingress, storage, helm, with additional Kubernetes functions of orchestration and security, as the following diagram illustrates.>] - -The following operational differences from {oke} can help you understand where {product-title} can be deployed: - -[id="microshift-differences-oke_{context}"] -== Key differences from {oke} - -* Devices with {product-title} installed are self-managing -* Compatible with RPM-OStree-based systems -* Uses only the APIs needed for essential functions, such as security and runtime controls -* Enables a subset of commands from the OpenShift CLI (`oc`) tool -* Does not support workload high availability (HA) or horizontal scalability with the addition of worker nodes - -.{product-title} differences from {oke}. -image::311_RHDevice_Edge_Overview_0223_2.png[<{product-title} is tasked with only the Kubernetes cluster capabilities of networking, ingress, storage, helm, with the additional Kubernetes functions of orchestration and security, as the following diagram illustrates.>] - -Figure 2 shows that {oke} has the same cluster capabilities as {product-title}, and adds the following: - -* Install -* Over-the-air updates -* Cluster Operators -* Operator Lifecycle Manager -* Monitoring -* Logging -* Registry -* Authorization -* Console -* Cloud Integration -* Virtual Machines (VMs) through {VirtProductName} - -In {oke} and other {OCP} deployments, all of the components from the operating system through the cluster capabilities work as one comprehensive unit, with full cluster services for a multi-node Kubernetes workload. With {product-title}, functions such as over-the-air-updates, monitoring, and logging, are performed by the operating system. - -[id="microshift-openshift-apis_{context}"] -== {product-title} OpenShift APIs - -In addition to standard Kubernetes APIs, {product-title} includes a small subset of the APIs supported by {OCP}. - -[cols="1,1",options="header"] -|=== -^| API ^| API group -| xref:../microshift_rest_api/network_apis/route-route-openshift-io-v1.adoc#route-route-openshift-io-v1[Route] -| route.openshift.io/v1 -| xref:../microshift_rest_api/security_apis/securitycontextconstraints-security-openshift-io-v1.adoc#securitycontextconstraints-security-openshift-io-v1[SecurityContextConstraints] -| security.openshift.io/v1 -|=== - -include::modules/microshift-k8s-apis.adoc[leveloffset=+1] \ No newline at end of file diff --git a/microshift_getting_started/microshift-understanding.adoc b/microshift_getting_started/microshift-understanding.adoc deleted file mode 100644 index 1cd5f1b01264..000000000000 --- a/microshift_getting_started/microshift-understanding.adoc +++ /dev/null @@ -1,11 +0,0 @@ -:_content-type: ASSEMBLY -[id="con-microshift-understanding"] -= Understanding {product-title} -include::_attributes/attributes-microshift.adoc[] -:context: microshift-understanding - -toc::[] - -Get an overview of what you can do with {product-title}, a Kubernetes distribution derived from {OCP} that is designed for optimizing small form factor devices and edge computing. - -include::modules/microshift-about.adoc[leveloffset=+1] diff --git a/microshift_getting_started/modules b/microshift_getting_started/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/microshift_getting_started/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/microshift_getting_started/snippets b/microshift_getting_started/snippets deleted file mode 120000 index 9d58b92e5058..000000000000 --- a/microshift_getting_started/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets/ \ No newline at end of file diff --git a/microshift_install/_attributes b/microshift_install/_attributes deleted file mode 120000 index 93957f02273f..000000000000 --- a/microshift_install/_attributes +++ /dev/null @@ -1 +0,0 @@ -../_attributes \ No newline at end of file diff --git a/microshift_install/images b/microshift_install/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/microshift_install/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/microshift_install/microshift-embed-in-rpm-ostree.adoc b/microshift_install/microshift-embed-in-rpm-ostree.adoc deleted file mode 100644 index 202e38dc9bd3..000000000000 --- a/microshift_install/microshift-embed-in-rpm-ostree.adoc +++ /dev/null @@ -1,59 +0,0 @@ -:_content-type: ASSEMBLY -[id="microshift-embed-in-rpm-ostree"] -= Embedding {product-title} in a {op-system-ostree} image -include::_attributes/attributes-microshift.adoc[] -:context: microshift-embed-in-rpm-ostree - -toc::[] - -You can embed {product-title} into a {op-system-ostree-first} {op-system-version} image. Use this guide to build a {op-system} image containing {product-title}. - -include::snippets/microshift-tech-preview-snip.adoc[leveloffset=+1] - -include::modules/microshift-preparing-for-image-building.adoc[leveloffset=+1] - -include::modules/microshift-adding-repos-to-image-builder.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html/composing_installing_and_managing_rhel_for_edge_images/setting-up-image-builder_composing-installing-managing-rhel-for-edge-images#edge-image-builder-system-requirements_setting-up-image-builder[Image Builder system requirements] -* link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html/composing_installing_and_managing_rhel_for_edge_images/setting-up-image-builder_composing-installing-managing-rhel-for-edge-images#edge-installing-image-builder_setting-up-image-builder[Installing Image Builder] - - -include::modules/microshift-adding-service-to-blueprint.adoc[leveloffset=+1] - -include::modules/microshift-creating-ostree-iso.adoc[leveloffset=+1] - -include::modules/microshift-add-blueprint-build-iso.adoc[leveloffset=+1] - -include::modules/microshift-download-iso-prep-for-use.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html/composing_installing_and_managing_rhel_for_edge_images/composing-a-rhel-for-edge-image-using-image-builder-command-line_composing-installing-managing-rhel-for-edge-images[Creating a RHEL for Edge Container blueprint using image builder CLI] -* link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html/composing_installing_and_managing_rhel_for_edge_images/composing-a-rhel-for-edge-image-using-image-builder-command-line_composing-installing-managing-rhel-for-edge-images#image-customizations_composing-a-rhel-for-edge-image-using-image-builder-command-line[Supported image customizations] -* link:https://www.osbuild.org/guides/image-builder-on-premises/building-ostree-images.html#building-ostree-image[Building ostree images] -* link:https://www.osbuild.org/guides/image-builder-on-premises/blueprint-reference.html[Blueprint reference] -* link:https://podman.io/docs/installation[Installing podman] - -include::modules/microshift-provisioning-ostree.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html-single/composing_installing_and_managing_rhel_for_edge_images/index[{op-system-ostree} documentation] -* xref:../microshift_install/microshift-install-rpm.adoc#microshift-install-system-requirements_microshift-install-rpm[System requirements for installing MicroShift] -* link:https://console.redhat.com/openshift/install/pull-secret[Red Hat Hybrid Cloud Console pull secret] -* xref:../microshift_networking/microshift-firewall.adoc#microshift-firewall-req-settings_microshift-firewall[Required firewall settings] -* link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html/performing_an_advanced_rhel_9_installation/creating-kickstart-files_installing-rhel-as-an-experienced-user[Creating a Kickstart file] -* link:https://access.redhat.com/solutions/60959[How to embed a Kickstart file into an ISO image] - -include::modules/microshift-accessing.adoc[leveloffset=+1] - -include::modules/microshift-accessing-cluster-locally.adoc[leveloffset=+2] - -include::modules/microshift-accessing-cluster-open-firewall.adoc[leveloffset=+2] - -include::modules/microshift-accessing-cluster-remotely.adoc[leveloffset=+2] diff --git a/microshift_install/microshift-greenboot.adoc b/microshift_install/microshift-greenboot.adoc deleted file mode 100644 index 58070df51102..000000000000 --- a/microshift_install/microshift-greenboot.adoc +++ /dev/null @@ -1,44 +0,0 @@ -:_content-type: ASSEMBLY -[id="microshift-greenboot"] -= The greenboot health check -include::_attributes/attributes-microshift.adoc[] -:context: microshift-greenboot - -toc::[] - -Greenboot is the generic health check framework for the `systemd` service on RPM-OSTree-based systems. The `microshift-greenboot` RPM and `greenboot-default-health-check` are optional RPM packages you can install. Greenboot is used to assess system health and automate a rollback to the last healthy state in the event of software trouble. - -This health check framework is especially useful when you need to check for software problems and perform system rollbacks on edge devices where direct serviceability is either limited or non-existent. When health check scripts are installed and configured, health checks run every time the system starts. - -Using greenboot can reduce your risk of being locked out of edge devices during updates and prevent a significant interruption of service if an update fails. When a failure is detected, the system boots into the last known working configuration using the `rpm-ostree` rollback capability. - -A {product-title} health check script is included in the `microshift-greenboot` RPM. The `greenboot-default-health-check` RPM includes health check scripts verifying that DNS and `ostree` services are accessible. You can also create your own health check scripts based on the workloads you are running. You can write one that verifies that an application has started, for example. - -[NOTE] -==== -Rollback is not possible in the case of an update failure on a system not using OSTree. This is true even though health checks might run. -==== - -include::modules/microshift-greenboot-dir-structure.adoc[leveloffset=+1] - -include::modules/microshift-greenboot-microshift-health-script.adoc[leveloffset=+1] - -include::modules/microshift-greenboot-systemd-journal-data.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../microshift_running_apps/microshift-applications.adoc#microshift-manifests-example_applications-microshift[Auto applying manifests] - -include::modules/microshift-greenboot-updates-workloads.adoc[leveloffset=+1] - -include::modules/microshift-greenboot-workloads-validation.adoc[leveloffset=+1] - -include::modules/microshift-greenboot-health-check-log.adoc[leveloffset=+1] - -include::modules/microshift-greenboot-prerollback-log.adoc[leveloffset=+1] - -include::modules/microshift-greenboot-check-update.adoc[leveloffset=+1] - -//[role="_additional-resources_microshift-greenboot"] -//.Additional resources -//once the greenboot application health check is merged, an assembly-level xref can go here \ No newline at end of file diff --git a/microshift_install/microshift-install-rpm.adoc b/microshift_install/microshift-install-rpm.adoc deleted file mode 100644 index abd5e837642f..000000000000 --- a/microshift_install/microshift-install-rpm.adoc +++ /dev/null @@ -1,50 +0,0 @@ -:_content-type: ASSEMBLY -[id="microshift-install-rpm"] -= Installing {product-title} from an RPM package -include::_attributes/attributes-microshift.adoc[] -:context: microshift-install-rpm - -toc::[] - -You can install {product-title} from an RPM package on a machine with {op-system-base-full} {op-system-version}. - -include::snippets/microshift-tech-preview-snip.adoc[leveloffset=+1] - -include::modules/microshift-install-system-requirements.adoc[leveloffset=+1] - -include::modules/microshift-install-rpm-before.adoc[leveloffset=+1] - -include::modules/microshift-install-rpm-preparing.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* Download the link:https://console.redhat.com/openshift/install/pull-secret[pull secret] from the Red Hat Hybrid Cloud Console. -* xref:../microshift_configuring/microshift-using-config-tools.adoc#microshift-using-config-tools[Configuring MicroShift]. -* For more options on partition configuration, read link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html-single/performing_a_standard_rhel_9_installation/index#manual-partitioning_graphical-installation[Configuring Manual Partitioning]. -* For more information about resizing your existing LVs to free up capacity in your VGs, read link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html-single/configuring_and_managing_logical_volumes/index#managing-lvm-volume-groups_configuring-and-managing-logical-volumes[Managing LVM Volume Groups]. -* For more information about creating VGs and PVs, read link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html/configuring_and_managing_logical_volumes/overview-of-logical-volume-management_configuring-and-managing-logical-volumes[Overview of logical volume management]. - -include::modules/microshift-install-rpms.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../microshift_install/microshift-install-rpm.adoc#microshift-install-system-requirements_microshift-install-rpm[System requirements for installing MicroShift]. -* xref:../microshift_install/microshift-install-rpm.adoc#microshift-install-rpm-preparing_microshift-install-rpm[Preparing to install MicroShift from an RPM package]. - -include::modules/microshift-service-starting.adoc[leveloffset=+1] - -include::modules/microshift-service-stopping.adoc[leveloffset=+1] - -include::modules/microshift-accessing.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../microshift_cli_ref/microshift-oc-cli-install.adoc#microshift-oc-cli-install[Installing the OpenShift CLI tool]. - -include::modules/microshift-accessing-cluster-locally.adoc[leveloffset=+2] - -include::modules/microshift-accessing-cluster-open-firewall.adoc[leveloffset=+2] - -include::modules/microshift-accessing-cluster-remotely.adoc[leveloffset=+2] - -//note: additional resources are deliberately set without ID and context so that they trail modules; these are not intended to appear as assembly-level additional resources \ No newline at end of file diff --git a/microshift_install/modules b/microshift_install/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/microshift_install/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/microshift_install/snippets b/microshift_install/snippets deleted file mode 120000 index 9d58b92e5058..000000000000 --- a/microshift_install/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets/ \ No newline at end of file diff --git a/microshift_networking/_attributes b/microshift_networking/_attributes deleted file mode 120000 index 93957f02273f..000000000000 --- a/microshift_networking/_attributes +++ /dev/null @@ -1 +0,0 @@ -../_attributes \ No newline at end of file diff --git a/microshift_networking/images b/microshift_networking/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/microshift_networking/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/microshift_networking/microshift-firewall.adoc b/microshift_networking/microshift-firewall.adoc deleted file mode 100644 index ccd6d7bba098..000000000000 --- a/microshift_networking/microshift-firewall.adoc +++ /dev/null @@ -1,35 +0,0 @@ -:_content-type: ASSEMBLY -[id="microshift-using-a-firewall"] -= Using a firewall -include::_attributes/attributes-microshift.adoc[] -:context: microshift-firewall - -toc::[] - -Firewalls are not required in {product-title}, but using a firewall can prevent undesired access to the {product-title} API. - -include::modules/microshift-firewall-about.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources"] -.Additional resources - -* xref:../microshift_networking/microshift-firewall.adoc#microshift-firewall-req-settings_microshift-firewall[Required firewall settings] -* xref:..//microshift_networking/microshift-firewall.adoc#microshift-firewall-allow-traffic_microshift-firewall[Allowing network traffic through the firewall] - -include::modules/microshift-firewalld-install.adoc[leveloffset=+1] -include::modules/microshift-firewall-req-settings.adoc[leveloffset=+1] -include::modules/microshift-firewall-opt-settings.adoc[leveloffset=+1] -include::modules/microshift-firewall-allow-traffic.adoc[leveloffset=+1] -include::modules/microshift-firewall-apply-settings.adoc[leveloffset=+1] -include::modules/microshift-firewall-verify-settings.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_microshift-using-a-firewall"] -.Additional resources - -* link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html/configuring_firewalls_and_packet_filters/using-and-configuring-firewalld_firewall-packet-filters[RHEL: Using and configuring firewalld] - -* link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html/configuring_firewalls_and_packet_filters/using-and-configuring-firewalld_firewall-packet-filters#viewing-the-current-status-and-settings-of-firewalld_using-and-configuring-firewalld[RHEL: Viewing the current status of firewalld] - -include::modules/microshift-firewall-known-issue.adoc[leveloffset=+1] diff --git a/microshift_networking/microshift-networking.adoc b/microshift_networking/microshift-networking.adoc deleted file mode 100644 index ba1bdbd99cbd..000000000000 --- a/microshift_networking/microshift-networking.adoc +++ /dev/null @@ -1,48 +0,0 @@ -:_content-type: ASSEMBLY -[id="microshift-applying-networking-settings"] -= Understanding networking settings -include::_attributes/attributes-microshift.adoc[] -:context: microshift-networking - -toc::[] - -Learn how to apply networking customization and default settings to {product-title} deployments. Each node is contained to a single machine and single {product-title}, so each deployment requires individual configuration, pods, and settings. - -Cluster Administrators have several options for exposing applications that run inside a cluster to external traffic and securing network connections: - -* A service such as NodePort - -* API resources, such as `Ingress` and `Route` - -By default, Kubernetes allocates each pod an internal IP address for applications running within the pod. Pods and their containers can have traffic between them, but clients outside the cluster do not have direct network access to pods except when exposed with a service such as NodePort. - -[NOTE] -==== -To troubleshoot connection problems with the NodePort service, read about the known issue in the Release Notes. -==== - -include::modules/microshift-cni.adoc[leveloffset=+1] - -include::modules/microshift-configuring-ovn.adoc[leveloffset=+1] - -include::modules/microshift-restart-ovnkube-master.adoc[leveloffset=+1] - -include::modules/microshift-http-proxy.adoc[leveloffset=+1] - -include::modules/microshift-rpm-ostree-https.adoc[leveloffset=+1] - -include::modules/microshift-cri-o-container-runtime.adoc[leveloffset=+1] - -include::modules/microshift-ovs-snapshot.adoc[leveloffset=+1] - -include::modules/microshift-deploying-a-load-balancer.adoc[leveloffset=+1] - -include::modules/microshift-blocking-nodeport-access.adoc[leveloffset=+1] - -include::modules/microshift-mDNS.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_microshift-understanding-networking-settings"] -.Additional resources - -* xref:../microshift_release_notes/microshift-4-14-release-notes.adoc#microshift-4-14-known-issues[{product-title} {product-version} release notes --> Known issues] diff --git a/microshift_networking/modules b/microshift_networking/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/microshift_networking/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/microshift_networking/snippets b/microshift_networking/snippets deleted file mode 120000 index 9d58b92e5058..000000000000 --- a/microshift_networking/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets/ \ No newline at end of file diff --git a/microshift_release_notes/_attributes b/microshift_release_notes/_attributes deleted file mode 120000 index 93957f02273f..000000000000 --- a/microshift_release_notes/_attributes +++ /dev/null @@ -1 +0,0 @@ -../_attributes \ No newline at end of file diff --git a/microshift_release_notes/images b/microshift_release_notes/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/microshift_release_notes/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/microshift_release_notes/microshift-4-12-release-notes.adoc b/microshift_release_notes/microshift-4-12-release-notes.adoc deleted file mode 100644 index dae99bd25c8a..000000000000 --- a/microshift_release_notes/microshift-4-12-release-notes.adoc +++ /dev/null @@ -1,11 +0,0 @@ -[id="microshift-4-12-release-notes"] -= {product-title} {product-version} release notes -include::_attributes/attributes-microshift.adoc[] -:context: release-notes - -Do not add or edit release notes here. Edit release notes directly in the branch -that they are relevant for. - -Release note changes should be added/edited in their own PR. - -This file is here to allow builds to work. diff --git a/microshift_release_notes/microshift-4-13-release-notes.adoc b/microshift_release_notes/microshift-4-13-release-notes.adoc deleted file mode 100644 index 1a1acab21ad5..000000000000 --- a/microshift_release_notes/microshift-4-13-release-notes.adoc +++ /dev/null @@ -1,11 +0,0 @@ -[id="microshift-4-13-release-notes"] -= {product-title} {product-version} release notes -include::_attributes/attributes-microshift.adoc[] -:context: release-notes - -Do not add or edit release notes here. Edit release notes directly in the branch -that they are relevant for. - -Release note changes should be added/edited in their own PR. - -This file is here to allow builds to work. diff --git a/microshift_release_notes/microshift-4-14-release-notes.adoc b/microshift_release_notes/microshift-4-14-release-notes.adoc deleted file mode 100644 index 0cdebd29ac07..000000000000 --- a/microshift_release_notes/microshift-4-14-release-notes.adoc +++ /dev/null @@ -1,11 +0,0 @@ -[id="microshift-4-14-release-notes"] -= {product-title} {product-version} release notes -include::_attributes/attributes-microshift.adoc[] -:context: release-notes - -Do not add or edit release notes here. Edit release notes directly in the branch -that they are relevant for. - -Release note changes should be added/edited in their own PR. - -This file is here to allow builds to work. diff --git a/microshift_release_notes/modules b/microshift_release_notes/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/microshift_release_notes/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/microshift_release_notes/snippets b/microshift_release_notes/snippets deleted file mode 120000 index 9d58b92e5058..000000000000 --- a/microshift_release_notes/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets/ \ No newline at end of file diff --git a/microshift_rest_api/modules b/microshift_rest_api/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/microshift_rest_api/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/microshift_rest_api/network_apis/route-route-openshift-io-v1.adoc b/microshift_rest_api/network_apis/route-route-openshift-io-v1.adoc deleted file mode 100644 index a3165a17636c..000000000000 --- a/microshift_rest_api/network_apis/route-route-openshift-io-v1.adoc +++ /dev/null @@ -1,1204 +0,0 @@ -// Automatically generated by 'openshift-apidocs-gen'. Do not edit. -:_content-type: ASSEMBLY -[id="route-route-openshift-io-v1"] -= Route [route.openshift.io/v1] -:toc: macro -:toc-title: - -toc::[] - - -Description:: -+ --- -A route allows developers to expose services through an HTTP(S) aware load balancing and proxy layer via a public DNS entry. The route may further specify TLS options and a certificate, or specify a public CNAME that the router should also accept for HTTP and HTTPS traffic. An administrator typically configures their router to be visible outside the cluster firewall, and may also add additional security, caching, or traffic controls on the service content. Routers usually talk directly to the service endpoints. - -Once a route is created, the `host` field may not be changed. Generally, routers use the oldest route with a given host when resolving conflicts. - -Routers are subject to additional customization and may support additional controls via the annotations field. - -Because administrators may configure multiple routers, the route status field is used to return information to clients about the names and states of the route under each router. If a client chooses a duplicate name, for instance, the route status conditions are used to indicate the route cannot be chosen. - -To enable HTTP/2 ALPN on a route it requires a custom (non-wildcard) certificate. This prevents connection coalescing by clients, notably web browsers. We do not support HTTP/2 ALPN on routes that use the default certificate because of the risk of connection re-use/coalescing. Routes that do not have their own custom certificate will not be HTTP/2 ALPN-enabled on either the frontend or the backend. - -Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). --- - -Type:: - `object` - -Required:: - - `spec` - - -== Specification - -[cols="1,1,1",options="header"] -|=== -| Property | Type | Description - -| `apiVersion` -| `string` -| APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - -| `kind` -| `string` -| Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - -| `metadata` -| `ObjectMeta_v2` -| - -| `spec` -| `object` -| RouteSpec describes the hostname or path the route exposes, any security information, and one to four backends (services) the route points to. Requests are distributed among the backends depending on the weights assigned to each backend. When using roundrobin scheduling the portion of requests that go to each backend is the backend weight divided by the sum of all of the backend weights. When the backend has more than one endpoint the requests that end up on the backend are roundrobin distributed among the endpoints. Weights are between 0 and 256 with default 100. Weight 0 causes no requests to the backend. If all weights are zero the route will be considered to have no backends and return a standard 503 response. - -The `tls` field is optional and allows specific certificates or behavior for the route. Routers typically configure a default certificate on a wildcard domain to terminate routes without explicit certificates, but custom hostnames usually must choose passthrough (send traffic directly to the backend via the TLS Server-Name- Indication field) or provide a certificate. - -| `status` -| `object` -| RouteStatus provides relevant info about the status of a route, including which routers acknowledge it. - -|=== -=== .spec -Description:: -+ --- -RouteSpec describes the hostname or path the route exposes, any security information, and one to four backends (services) the route points to. Requests are distributed among the backends depending on the weights assigned to each backend. When using roundrobin scheduling the portion of requests that go to each backend is the backend weight divided by the sum of all of the backend weights. When the backend has more than one endpoint the requests that end up on the backend are roundrobin distributed among the endpoints. Weights are between 0 and 256 with default 100. Weight 0 causes no requests to the backend. If all weights are zero the route will be considered to have no backends and return a standard 503 response. - -The `tls` field is optional and allows specific certificates or behavior for the route. Routers typically configure a default certificate on a wildcard domain to terminate routes without explicit certificates, but custom hostnames usually must choose passthrough (send traffic directly to the backend via the TLS Server-Name- Indication field) or provide a certificate. --- - -Type:: - `object` - -Required:: - - `to` - - - -[cols="1,1,1",options="header"] -|=== -| Property | Type | Description - -| `alternateBackends` -| `array` -| alternateBackends allows up to 3 additional backends to be assigned to the route. Only the Service kind is allowed, and it will be defaulted to Service. Use the weight field in RouteTargetReference object to specify relative preference. - -| `alternateBackends[]` -| `object` -| RouteTargetReference specifies the target that resolve into endpoints. Only the 'Service' kind is allowed. Use 'weight' field to emphasize one over others. - -| `host` -| `string` -| host is an alias/DNS that points to the service. Optional. If not specified a route name will typically be automatically chosen. Must follow DNS952 subdomain conventions. - -| `path` -| `string` -| path that the router watches for, to route traffic for to the service. Optional - -| `port` -| `object` -| RoutePort defines a port mapping from a router to an endpoint in the service endpoints. - -| `subdomain` -| `string` -| subdomain is a DNS subdomain that is requested within the ingress controller's domain (as a subdomain). If host is set this field is ignored. An ingress controller may choose to ignore this suggested name, in which case the controller will report the assigned name in the status.ingress array or refuse to admit the route. If this value is set and the server does not support this field host will be populated automatically. Otherwise host is left empty. The field may have multiple parts separated by a dot, but not all ingress controllers may honor the request. This field may not be changed after creation except by a user with the update routes/custom-host permission. - -Example: subdomain `frontend` automatically receives the router subdomain `apps.mycluster.com` to have a full hostname `frontend.apps.mycluster.com`. - -| `tls` -| `object` -| TLSConfig defines config used to secure a route and provide termination - -| `to` -| `object` -| RouteTargetReference specifies the target that resolve into endpoints. Only the 'Service' kind is allowed. Use 'weight' field to emphasize one over others. - -| `wildcardPolicy` -| `string` -| Wildcard policy if any for the route. Currently only 'Subdomain' or 'None' is allowed. - -|=== -=== .spec.alternateBackends -Description:: -+ --- -alternateBackends allows up to 3 additional backends to be assigned to the route. Only the Service kind is allowed, and it will be defaulted to Service. Use the weight field in RouteTargetReference object to specify relative preference. --- - -Type:: - `array` - - - - -=== .spec.alternateBackends[] -Description:: -+ --- -RouteTargetReference specifies the target that resolve into endpoints. Only the 'Service' kind is allowed. Use 'weight' field to emphasize one over others. --- - -Type:: - `object` - -Required:: - - `kind` - - `name` - - - -[cols="1,1,1",options="header"] -|=== -| Property | Type | Description - -| `kind` -| `string` -| The kind of target that the route is referring to. Currently, only 'Service' is allowed - -| `name` -| `string` -| name of the service/target that is being referred to. e.g. name of the service - -| `weight` -| `integer` -| weight as an integer between 0 and 256, default 100, that specifies the target's relative weight against other target reference objects. 0 suppresses requests to this backend. - -|=== -=== .spec.port -Description:: -+ --- -RoutePort defines a port mapping from a router to an endpoint in the service endpoints. --- - -Type:: - `object` - -Required:: - - `targetPort` - - - -[cols="1,1,1",options="header"] -|=== -| Property | Type | Description - -| `targetPort` -| `IntOrString` -| The target port on pods selected by the service this route points to. If this is a string, it will be looked up as a named port in the target endpoints port list. Required - -|=== -=== .spec.tls -Description:: -+ --- -TLSConfig defines config used to secure a route and provide termination --- - -Type:: - `object` - -Required:: - - `termination` - - - -[cols="1,1,1",options="header"] -|=== -| Property | Type | Description - -| `caCertificate` -| `string` -| caCertificate provides the cert authority certificate contents - -| `certificate` -| `string` -| certificate provides certificate contents. This should be a single serving certificate, not a certificate chain. Do not include a CA certificate. - -| `destinationCACertificate` -| `string` -| destinationCACertificate provides the contents of the ca certificate of the final destination. When using reencrypt termination this file should be provided in order to have routers use it for health checks on the secure connection. If this field is not specified, the router may provide its own destination CA and perform hostname validation using the short service name (service.namespace.svc), which allows infrastructure generated certificates to automatically verify. - -| `insecureEdgeTerminationPolicy` -| `string` -| insecureEdgeTerminationPolicy indicates the desired behavior for insecure connections to a route. While each router may make its own decisions on which ports to expose, this is normally port 80. - -* Allow - traffic is sent to the server on the insecure port (default) * Disable - no traffic is allowed on the insecure port. * Redirect - clients are redirected to the secure port. - -| `key` -| `string` -| key provides key file contents - -| `termination` -| `string` -| termination indicates termination type. - -* edge - TLS termination is done by the router and http is used to communicate with the backend (default) * passthrough - Traffic is sent straight to the destination without the router providing TLS termination * reencrypt - TLS termination is done by the router and https is used to communicate with the backend - -|=== -=== .spec.to -Description:: -+ --- -RouteTargetReference specifies the target that resolve into endpoints. Only the 'Service' kind is allowed. Use 'weight' field to emphasize one over others. --- - -Type:: - `object` - -Required:: - - `kind` - - `name` - - - -[cols="1,1,1",options="header"] -|=== -| Property | Type | Description - -| `kind` -| `string` -| The kind of target that the route is referring to. Currently, only 'Service' is allowed - -| `name` -| `string` -| name of the service/target that is being referred to. e.g. name of the service - -| `weight` -| `integer` -| weight as an integer between 0 and 256, default 100, that specifies the target's relative weight against other target reference objects. 0 suppresses requests to this backend. - -|=== -=== .status -Description:: -+ --- -RouteStatus provides relevant info about the status of a route, including which routers acknowledge it. --- - -Type:: - `object` - - - - -[cols="1,1,1",options="header"] -|=== -| Property | Type | Description - -| `ingress` -| `array` -| ingress describes the places where the route may be exposed. The list of ingress points may contain duplicate Host or RouterName values. Routes are considered live once they are `Ready` - -| `ingress[]` -| `object` -| RouteIngress holds information about the places where a route is exposed. - -|=== -=== .status.ingress -Description:: -+ --- -ingress describes the places where the route may be exposed. The list of ingress points may contain duplicate Host or RouterName values. Routes are considered live once they are `Ready` --- - -Type:: - `array` - - - - -=== .status.ingress[] -Description:: -+ --- -RouteIngress holds information about the places where a route is exposed. --- - -Type:: - `object` - - - - -[cols="1,1,1",options="header"] -|=== -| Property | Type | Description - -| `conditions` -| `array` -| Conditions is the state of the route, may be empty. - -| `conditions[]` -| `object` -| RouteIngressCondition contains details for the current condition of this route on a particular router. - -| `host` -| `string` -| Host is the host string under which the route is exposed; this value is required - -| `routerCanonicalHostname` -| `string` -| CanonicalHostname is the external host name for the router that can be used as a CNAME for the host requested for this route. This value is optional and may not be set in all cases. - -| `routerName` -| `string` -| Name is a name chosen by the router to identify itself; this value is required - -| `wildcardPolicy` -| `string` -| Wildcard policy is the wildcard policy that was allowed where this route is exposed. - -|=== -=== .status.ingress[].conditions -Description:: -+ --- -Conditions is the state of the route, may be empty. --- - -Type:: - `array` - - - - -=== .status.ingress[].conditions[] -Description:: -+ --- -RouteIngressCondition contains details for the current condition of this route on a particular router. --- - -Type:: - `object` - -Required:: - - `type` - - `status` - - - -[cols="1,1,1",options="header"] -|=== -| Property | Type | Description - -| `lastTransitionTime` -| `Time` -| RFC 3339 date and time when this condition last transitioned - -| `message` -| `string` -| Human readable message indicating details about last transition. - -| `reason` -| `string` -| (brief) reason for the condition's last transition, and is usually a machine and human readable constant - -| `status` -| `string` -| Status is the status of the condition. Can be True, False, Unknown. - -| `type` -| `string` -| Type is the type of the condition. Currently only Admitted. - -|=== - -== API endpoints - -The following API endpoints are available: - -* `/apis/route.openshift.io/v1/routes` -- `GET`: list or watch objects of kind Route -* `/apis/route.openshift.io/v1/watch/routes` -- `GET`: watch individual changes to a list of Route. deprecated: use the 'watch' parameter with a list operation instead. -* `/apis/route.openshift.io/v1/namespaces/{namespace}/routes` -- `DELETE`: delete collection of Route -- `GET`: list or watch objects of kind Route -- `POST`: create a Route -* `/apis/route.openshift.io/v1/watch/namespaces/{namespace}/routes` -- `GET`: watch individual changes to a list of Route. deprecated: use the 'watch' parameter with a list operation instead. -* `/apis/route.openshift.io/v1/namespaces/{namespace}/routes/{name}` -- `DELETE`: delete a Route -- `GET`: read the specified Route -- `PATCH`: partially update the specified Route -- `PUT`: replace the specified Route -* `/apis/route.openshift.io/v1/watch/namespaces/{namespace}/routes/{name}` -- `GET`: watch changes to an object of kind Route. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter. -* `/apis/route.openshift.io/v1/namespaces/{namespace}/routes/{name}/status` -- `GET`: read status of the specified Route -- `PATCH`: partially update status of the specified Route -- `PUT`: replace status of the specified Route - - -=== /apis/route.openshift.io/v1/routes - - -.Global query parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `allowWatchBookmarks` -| `boolean` -| allowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. -| `continue` -| `string` -| The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. -| `fieldSelector` -| `string` -| A selector to restrict the list of returned objects by their fields. Defaults to everything. -| `labelSelector` -| `string` -| A selector to restrict the list of returned objects by their labels. Defaults to everything. -| `limit` -| `integer` -| limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. -| `pretty` -| `string` -| If 'true', then the output is pretty printed. -| `resourceVersion` -| `string` -| resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset -| `resourceVersionMatch` -| `string` -| resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset -| `timeoutSeconds` -| `integer` -| Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. -| `watch` -| `boolean` -| Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. -|=== - -HTTP method:: - `GET` - -Description:: - list or watch objects of kind Route - - -.HTTP responses -[cols="1,1",options="header"] -|=== -| HTTP code | Reponse body -| 200 - OK -| `RouteList` schema -| 401 - Unauthorized -| Empty -|=== - - -=== /apis/route.openshift.io/v1/watch/routes - - -.Global query parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `allowWatchBookmarks` -| `boolean` -| allowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. -| `continue` -| `string` -| The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. -| `fieldSelector` -| `string` -| A selector to restrict the list of returned objects by their fields. Defaults to everything. -| `labelSelector` -| `string` -| A selector to restrict the list of returned objects by their labels. Defaults to everything. -| `limit` -| `integer` -| limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. -| `pretty` -| `string` -| If 'true', then the output is pretty printed. -| `resourceVersion` -| `string` -| resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset -| `resourceVersionMatch` -| `string` -| resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset -| `timeoutSeconds` -| `integer` -| Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. -| `watch` -| `boolean` -| Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. -|=== - -HTTP method:: - `GET` - -Description:: - watch individual changes to a list of Route. deprecated: use the 'watch' parameter with a list operation instead. - - -.HTTP responses -[cols="1,1",options="header"] -|=== -| HTTP code | Reponse body -| 200 - OK -| `WatchEvent` schema -| 401 - Unauthorized -| Empty -|=== - - -=== /apis/route.openshift.io/v1/namespaces/{namespace}/routes - -.Global path parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `namespace` -| `string` -| object name and auth scope, such as for teams and projects -|=== - -.Global query parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `pretty` -| `string` -| If 'true', then the output is pretty printed. -|=== - -HTTP method:: - `DELETE` - -Description:: - delete collection of Route - - -.Query parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `continue` -| `string` -| The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. -| `dryRun` -| `string` -| When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed -| `fieldSelector` -| `string` -| A selector to restrict the list of returned objects by their fields. Defaults to everything. -| `gracePeriodSeconds` -| `integer` -| The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. -| `labelSelector` -| `string` -| A selector to restrict the list of returned objects by their labels. Defaults to everything. -| `limit` -| `integer` -| limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. -| `orphanDependents` -| `boolean` -| Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. -| `propagationPolicy` -| `string` -| Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. -| `resourceVersion` -| `string` -| resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset -| `resourceVersionMatch` -| `string` -| resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset -| `timeoutSeconds` -| `integer` -| Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. -|=== - -.Body parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `body` -| `DeleteOptions` schema -| -|=== - -.HTTP responses -[cols="1,1",options="header"] -|=== -| HTTP code | Reponse body -| 200 - OK -| `Status` schema -| 401 - Unauthorized -| Empty -|=== - -HTTP method:: - `GET` - -Description:: - list or watch objects of kind Route - - -.Query parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `allowWatchBookmarks` -| `boolean` -| allowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. -| `continue` -| `string` -| The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. -| `fieldSelector` -| `string` -| A selector to restrict the list of returned objects by their fields. Defaults to everything. -| `labelSelector` -| `string` -| A selector to restrict the list of returned objects by their labels. Defaults to everything. -| `limit` -| `integer` -| limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. -| `resourceVersion` -| `string` -| resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset -| `resourceVersionMatch` -| `string` -| resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset -| `timeoutSeconds` -| `integer` -| Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. -| `watch` -| `boolean` -| Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. -|=== - - -.HTTP responses -[cols="1,1",options="header"] -|=== -| HTTP code | Reponse body -| 200 - OK -| `RouteList` schema -| 401 - Unauthorized -| Empty -|=== - -HTTP method:: - `POST` - -Description:: - create a Route - - -.Query parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `dryRun` -| `string` -| When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed -| `fieldManager` -| `string` -| fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. -|=== - -.Body parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `body` -| xref:../network_apis/route-route-openshift-io-v1.adoc#route-route-openshift-io-v1[`Route`] schema -| -|=== - -.HTTP responses -[cols="1,1",options="header"] -|=== -| HTTP code | Reponse body -| 200 - OK -| xref:../network_apis/route-route-openshift-io-v1.adoc#route-route-openshift-io-v1[`Route`] schema -| 201 - Created -| xref:../network_apis/route-route-openshift-io-v1.adoc#route-route-openshift-io-v1[`Route`] schema -| 202 - Accepted -| xref:../network_apis/route-route-openshift-io-v1.adoc#route-route-openshift-io-v1[`Route`] schema -| 401 - Unauthorized -| Empty -|=== - - -=== /apis/route.openshift.io/v1/watch/namespaces/{namespace}/routes - -.Global path parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `namespace` -| `string` -| object name and auth scope, such as for teams and projects -|=== - -.Global query parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `allowWatchBookmarks` -| `boolean` -| allowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. -| `continue` -| `string` -| The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. -| `fieldSelector` -| `string` -| A selector to restrict the list of returned objects by their fields. Defaults to everything. -| `labelSelector` -| `string` -| A selector to restrict the list of returned objects by their labels. Defaults to everything. -| `limit` -| `integer` -| limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. -| `pretty` -| `string` -| If 'true', then the output is pretty printed. -| `resourceVersion` -| `string` -| resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset -| `resourceVersionMatch` -| `string` -| resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset -| `timeoutSeconds` -| `integer` -| Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. -| `watch` -| `boolean` -| Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. -|=== - -HTTP method:: - `GET` - -Description:: - watch individual changes to a list of Route. deprecated: use the 'watch' parameter with a list operation instead. - - -.HTTP responses -[cols="1,1",options="header"] -|=== -| HTTP code | Reponse body -| 200 - OK -| `WatchEvent` schema -| 401 - Unauthorized -| Empty -|=== - - -=== /apis/route.openshift.io/v1/namespaces/{namespace}/routes/{name} - -.Global path parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `name` -| `string` -| name of the Route -| `namespace` -| `string` -| object name and auth scope, such as for teams and projects -|=== - -.Global query parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `pretty` -| `string` -| If 'true', then the output is pretty printed. -|=== - -HTTP method:: - `DELETE` - -Description:: - delete a Route - - -.Query parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `dryRun` -| `string` -| When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed -| `gracePeriodSeconds` -| `integer` -| The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. -| `orphanDependents` -| `boolean` -| Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. -| `propagationPolicy` -| `string` -| Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. -|=== - -.Body parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `body` -| `DeleteOptions` schema -| -|=== - -.HTTP responses -[cols="1,1",options="header"] -|=== -| HTTP code | Reponse body -| 200 - OK -| `Status` schema -| 202 - Accepted -| `Status` schema -| 401 - Unauthorized -| Empty -|=== - -HTTP method:: - `GET` - -Description:: - read the specified Route - - -.HTTP responses -[cols="1,1",options="header"] -|=== -| HTTP code | Reponse body -| 200 - OK -| xref:../network_apis/route-route-openshift-io-v1.adoc#route-route-openshift-io-v1[`Route`] schema -| 401 - Unauthorized -| Empty -|=== - -HTTP method:: - `PATCH` - -Description:: - partially update the specified Route - - -.Query parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `dryRun` -| `string` -| When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed -| `fieldManager` -| `string` -| fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch). -| `force` -| `boolean` -| Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests. -|=== - -.Body parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `body` -| `Patch` schema -| -|=== - -.HTTP responses -[cols="1,1",options="header"] -|=== -| HTTP code | Reponse body -| 200 - OK -| xref:../network_apis/route-route-openshift-io-v1.adoc#route-route-openshift-io-v1[`Route`] schema -| 201 - Created -| xref:../network_apis/route-route-openshift-io-v1.adoc#route-route-openshift-io-v1[`Route`] schema -| 401 - Unauthorized -| Empty -|=== - -HTTP method:: - `PUT` - -Description:: - replace the specified Route - - -.Query parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `dryRun` -| `string` -| When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed -| `fieldManager` -| `string` -| fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. -|=== - -.Body parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `body` -| xref:../network_apis/route-route-openshift-io-v1.adoc#route-route-openshift-io-v1[`Route`] schema -| -|=== - -.HTTP responses -[cols="1,1",options="header"] -|=== -| HTTP code | Reponse body -| 200 - OK -| xref:../network_apis/route-route-openshift-io-v1.adoc#route-route-openshift-io-v1[`Route`] schema -| 201 - Created -| xref:../network_apis/route-route-openshift-io-v1.adoc#route-route-openshift-io-v1[`Route`] schema -| 401 - Unauthorized -| Empty -|=== - - -=== /apis/route.openshift.io/v1/watch/namespaces/{namespace}/routes/{name} - -.Global path parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `name` -| `string` -| name of the Route -| `namespace` -| `string` -| object name and auth scope, such as for teams and projects -|=== - -.Global query parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `allowWatchBookmarks` -| `boolean` -| allowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. -| `continue` -| `string` -| The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. -| `fieldSelector` -| `string` -| A selector to restrict the list of returned objects by their fields. Defaults to everything. -| `labelSelector` -| `string` -| A selector to restrict the list of returned objects by their labels. Defaults to everything. -| `limit` -| `integer` -| limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. -| `pretty` -| `string` -| If 'true', then the output is pretty printed. -| `resourceVersion` -| `string` -| resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset -| `resourceVersionMatch` -| `string` -| resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset -| `timeoutSeconds` -| `integer` -| Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. -| `watch` -| `boolean` -| Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. -|=== - -HTTP method:: - `GET` - -Description:: - watch changes to an object of kind Route. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter. - - -.HTTP responses -[cols="1,1",options="header"] -|=== -| HTTP code | Reponse body -| 200 - OK -| `WatchEvent` schema -| 401 - Unauthorized -| Empty -|=== - - -=== /apis/route.openshift.io/v1/namespaces/{namespace}/routes/{name}/status - -.Global path parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `name` -| `string` -| name of the Route -| `namespace` -| `string` -| object name and auth scope, such as for teams and projects -|=== - -.Global query parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `pretty` -| `string` -| If 'true', then the output is pretty printed. -|=== - -HTTP method:: - `GET` - -Description:: - read status of the specified Route - - -.HTTP responses -[cols="1,1",options="header"] -|=== -| HTTP code | Reponse body -| 200 - OK -| xref:../network_apis/route-route-openshift-io-v1.adoc#route-route-openshift-io-v1[`Route`] schema -| 401 - Unauthorized -| Empty -|=== - -HTTP method:: - `PATCH` - -Description:: - partially update status of the specified Route - - -.Query parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `dryRun` -| `string` -| When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed -| `fieldManager` -| `string` -| fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch). -| `force` -| `boolean` -| Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests. -|=== - -.Body parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `body` -| `Patch` schema -| -|=== - -.HTTP responses -[cols="1,1",options="header"] -|=== -| HTTP code | Reponse body -| 200 - OK -| xref:../network_apis/route-route-openshift-io-v1.adoc#route-route-openshift-io-v1[`Route`] schema -| 201 - Created -| xref:../network_apis/route-route-openshift-io-v1.adoc#route-route-openshift-io-v1[`Route`] schema -| 401 - Unauthorized -| Empty -|=== - -HTTP method:: - `PUT` - -Description:: - replace status of the specified Route - - -.Query parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `dryRun` -| `string` -| When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed -| `fieldManager` -| `string` -| fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. -|=== - -.Body parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `body` -| xref:../network_apis/route-route-openshift-io-v1.adoc#route-route-openshift-io-v1[`Route`] schema -| -|=== - -.HTTP responses -[cols="1,1",options="header"] -|=== -| HTTP code | Reponse body -| 200 - OK -| xref:../network_apis/route-route-openshift-io-v1.adoc#route-route-openshift-io-v1[`Route`] schema -| 201 - Created -| xref:../network_apis/route-route-openshift-io-v1.adoc#route-route-openshift-io-v1[`Route`] schema -| 401 - Unauthorized -| Empty -|=== - - diff --git a/microshift_rest_api/security_apis/securitycontextconstraints-security-openshift-io-v1.adoc b/microshift_rest_api/security_apis/securitycontextconstraints-security-openshift-io-v1.adoc deleted file mode 100644 index 3b17cbb183a4..000000000000 --- a/microshift_rest_api/security_apis/securitycontextconstraints-security-openshift-io-v1.adoc +++ /dev/null @@ -1,660 +0,0 @@ -// Automatically generated by 'openshift-apidocs-gen'. Do not edit. -:_content-type: ASSEMBLY -[id="securitycontextconstraints-security-openshift-io-v1"] -= SecurityContextConstraints [security.openshift.io/v1] -:toc: macro -:toc-title: - -toc::[] - - -Description:: -+ --- -SecurityContextConstraints (SCC) governs the ability to make requests that affect the SecurityContext that applies to a container. Use the security.openshift.io group to manage SecurityContextConstraints. Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). --- - -Type:: - `object` - -Required:: - - `allowHostDirVolumePlugin` - - `allowHostIPC` - - `allowHostNetwork` - - `allowHostPID` - - `allowHostPorts` - - `allowPrivilegedContainer` - - `readOnlyRootFilesystem` - - -== Specification - -[cols="1,1,1",options="header"] -|=== -| Property | Type | Description - -| `allowHostDirVolumePlugin` -| `boolean` -| AllowHostDirVolumePlugin determines if the policy allow containers to use the HostDir volume plugin - -| `allowHostIPC` -| `boolean` -| AllowHostIPC determines if the policy allows host ipc in the containers. - -| `allowHostNetwork` -| `boolean` -| AllowHostNetwork determines if the policy allows the use of HostNetwork in the pod spec. - -| `allowHostPID` -| `boolean` -| AllowHostPID determines if the policy allows host pid in the containers. - -| `allowHostPorts` -| `boolean` -| AllowHostPorts determines if the policy allows host ports in the containers. - -| `allowPrivilegeEscalation` -| `` -| AllowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true. - -| `allowPrivilegedContainer` -| `boolean` -| AllowPrivilegedContainer determines if a container can request to be run as privileged. - -| `allowedCapabilities` -| `` -| AllowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field maybe added at the pod author's discretion. You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities. To allow all capabilities you may use '*'. - -| `allowedFlexVolumes` -| `` -| AllowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the "Volumes" field. - -| `allowedUnsafeSysctls` -| `` -| AllowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in "*" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection. - Examples: e.g. "foo/*" allows "foo/bar", "foo/baz", etc. e.g. "foo.*" allows "foo.bar", "foo.baz", etc. - -| `apiVersion` -| `string` -| APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - -| `defaultAddCapabilities` -| `` -| DefaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capabiility in both DefaultAddCapabilities and RequiredDropCapabilities. - -| `defaultAllowPrivilegeEscalation` -| `` -| DefaultAllowPrivilegeEscalation controls the default setting for whether a process can gain more privileges than its parent process. - -| `forbiddenSysctls` -| `` -| ForbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in "*" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden. - Examples: e.g. "foo/*" forbids "foo/bar", "foo/baz", etc. e.g. "foo.*" forbids "foo.bar", "foo.baz", etc. - -| `fsGroup` -| `` -| FSGroup is the strategy that will dictate what fs group is used by the SecurityContext. - -| `groups` -| `` -| The groups that have permission to use this security context constraints - -| `kind` -| `string` -| Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - -| `metadata` -| `ObjectMeta` -| Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - -| `priority` -| `` -| Priority influences the sort order of SCCs when evaluating which SCCs to try first for a given pod request based on access in the Users and Groups fields. The higher the int, the higher priority. An unset value is considered a 0 priority. If scores for multiple SCCs are equal they will be sorted from most restrictive to least restrictive. If both priorities and restrictions are equal the SCCs will be sorted by name. - -| `readOnlyRootFilesystem` -| `boolean` -| ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the SCC should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to. - -| `requiredDropCapabilities` -| `` -| RequiredDropCapabilities are the capabilities that will be dropped from the container. These are required to be dropped and cannot be added. - -| `runAsUser` -| `` -| RunAsUser is the strategy that will dictate what RunAsUser is used in the SecurityContext. - -| `seLinuxContext` -| `` -| SELinuxContext is the strategy that will dictate what labels will be set in the SecurityContext. - -| `seccompProfiles` -| `` -| SeccompProfiles lists the allowed profiles that may be set for the pod or container's seccomp annotations. An unset (nil) or empty value means that no profiles may be specifid by the pod or container. The wildcard '*' may be used to allow all profiles. When used to generate a value for a pod the first non-wildcard profile will be used as the default. - -| `supplementalGroups` -| `` -| SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. - -| `users` -| `` -| The users who have permissions to use this security context constraints - -| `volumes` -| `` -| Volumes is a white list of allowed volume plugins. FSType corresponds directly with the field names of a VolumeSource (azureFile, configMap, emptyDir). To allow all volumes you may use "*". To allow no volumes, set to ["none"]. - -|=== - -== API endpoints - -The following API endpoints are available: - -* `/apis/security.openshift.io/v1/securitycontextconstraints` -- `DELETE`: delete collection of SecurityContextConstraints -- `GET`: list objects of kind SecurityContextConstraints -- `POST`: create SecurityContextConstraints -* `/apis/security.openshift.io/v1/watch/securitycontextconstraints` -- `GET`: watch individual changes to a list of SecurityContextConstraints. deprecated: use the 'watch' parameter with a list operation instead. -* `/apis/security.openshift.io/v1/securitycontextconstraints/{name}` -- `DELETE`: delete SecurityContextConstraints -- `GET`: read the specified SecurityContextConstraints -- `PATCH`: partially update the specified SecurityContextConstraints -- `PUT`: replace the specified SecurityContextConstraints -* `/apis/security.openshift.io/v1/watch/securitycontextconstraints/{name}` -- `GET`: watch changes to an object of kind SecurityContextConstraints. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter. - - -=== /apis/security.openshift.io/v1/securitycontextconstraints - - -.Global query parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `pretty` -| `string` -| If 'true', then the output is pretty printed. -|=== - -HTTP method:: - `DELETE` - -Description:: - delete collection of SecurityContextConstraints - - -.Query parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `allowWatchBookmarks` -| `boolean` -| allowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. -| `continue` -| `string` -| The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. -| `fieldSelector` -| `string` -| A selector to restrict the list of returned objects by their fields. Defaults to everything. -| `labelSelector` -| `string` -| A selector to restrict the list of returned objects by their labels. Defaults to everything. -| `limit` -| `integer` -| limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. -| `resourceVersion` -| `string` -| resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset -| `resourceVersionMatch` -| `string` -| resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset -| `timeoutSeconds` -| `integer` -| Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. -| `watch` -| `boolean` -| Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. -|=== - - -.HTTP responses -[cols="1,1",options="header"] -|=== -| HTTP code | Reponse body -| 200 - OK -| `Status` schema -| 401 - Unauthorized -| Empty -|=== - -HTTP method:: - `GET` - -Description:: - list objects of kind SecurityContextConstraints - - -.Query parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `allowWatchBookmarks` -| `boolean` -| allowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. -| `continue` -| `string` -| The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. -| `fieldSelector` -| `string` -| A selector to restrict the list of returned objects by their fields. Defaults to everything. -| `labelSelector` -| `string` -| A selector to restrict the list of returned objects by their labels. Defaults to everything. -| `limit` -| `integer` -| limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. -| `resourceVersion` -| `string` -| resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset -| `resourceVersionMatch` -| `string` -| resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset -| `timeoutSeconds` -| `integer` -| Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. -| `watch` -| `boolean` -| Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. -|=== - - -.HTTP responses -[cols="1,1",options="header"] -|=== -| HTTP code | Reponse body -| 200 - OK -| `SecurityContextConstraintsList` schema -| 401 - Unauthorized -| Empty -|=== - -HTTP method:: - `POST` - -Description:: - create SecurityContextConstraints - - -.Query parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `dryRun` -| `string` -| When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed -| `fieldManager` -| `string` -| fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. -| `fieldValidation` -| `string` -| fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields, provided that the `ServerSideFieldValidation` feature gate is also enabled. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23 and is the default behavior when the `ServerSideFieldValidation` feature gate is disabled. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default when the `ServerSideFieldValidation` feature gate is enabled. - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered. -|=== - -.Body parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `body` -| xref:../security_apis/securitycontextconstraints-security-openshift-io-v1.adoc#securitycontextconstraints-security-openshift-io-v1[`SecurityContextConstraints`] schema -| -|=== - -.HTTP responses -[cols="1,1",options="header"] -|=== -| HTTP code | Reponse body -| 200 - OK -| xref:../security_apis/securitycontextconstraints-security-openshift-io-v1.adoc#securitycontextconstraints-security-openshift-io-v1[`SecurityContextConstraints`] schema -| 201 - Created -| xref:../security_apis/securitycontextconstraints-security-openshift-io-v1.adoc#securitycontextconstraints-security-openshift-io-v1[`SecurityContextConstraints`] schema -| 202 - Accepted -| xref:../security_apis/securitycontextconstraints-security-openshift-io-v1.adoc#securitycontextconstraints-security-openshift-io-v1[`SecurityContextConstraints`] schema -| 401 - Unauthorized -| Empty -|=== - - -=== /apis/security.openshift.io/v1/watch/securitycontextconstraints - - -.Global query parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `allowWatchBookmarks` -| `boolean` -| allowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. -| `continue` -| `string` -| The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. -| `fieldSelector` -| `string` -| A selector to restrict the list of returned objects by their fields. Defaults to everything. -| `labelSelector` -| `string` -| A selector to restrict the list of returned objects by their labels. Defaults to everything. -| `limit` -| `integer` -| limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. -| `pretty` -| `string` -| If 'true', then the output is pretty printed. -| `resourceVersion` -| `string` -| resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset -| `resourceVersionMatch` -| `string` -| resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset -| `timeoutSeconds` -| `integer` -| Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. -| `watch` -| `boolean` -| Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. -|=== - -HTTP method:: - `GET` - -Description:: - watch individual changes to a list of SecurityContextConstraints. deprecated: use the 'watch' parameter with a list operation instead. - - -.HTTP responses -[cols="1,1",options="header"] -|=== -| HTTP code | Reponse body -| 200 - OK -| `WatchEvent` schema -| 401 - Unauthorized -| Empty -|=== - - -=== /apis/security.openshift.io/v1/securitycontextconstraints/{name} - -.Global path parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `name` -| `string` -| name of the SecurityContextConstraints -|=== - -.Global query parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `pretty` -| `string` -| If 'true', then the output is pretty printed. -|=== - -HTTP method:: - `DELETE` - -Description:: - delete SecurityContextConstraints - - -.Query parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `dryRun` -| `string` -| When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed -| `gracePeriodSeconds` -| `integer` -| The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. -| `orphanDependents` -| `boolean` -| Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. -| `propagationPolicy` -| `string` -| Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. -|=== - -.Body parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `body` -| `DeleteOptions` schema -| -|=== - -.HTTP responses -[cols="1,1",options="header"] -|=== -| HTTP code | Reponse body -| 200 - OK -| `Status` schema -| 202 - Accepted -| `Status` schema -| 401 - Unauthorized -| Empty -|=== - -HTTP method:: - `GET` - -Description:: - read the specified SecurityContextConstraints - - -.Query parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `resourceVersion` -| `string` -| resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset -|=== - - -.HTTP responses -[cols="1,1",options="header"] -|=== -| HTTP code | Reponse body -| 200 - OK -| xref:../security_apis/securitycontextconstraints-security-openshift-io-v1.adoc#securitycontextconstraints-security-openshift-io-v1[`SecurityContextConstraints`] schema -| 401 - Unauthorized -| Empty -|=== - -HTTP method:: - `PATCH` - -Description:: - partially update the specified SecurityContextConstraints - - -.Query parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `dryRun` -| `string` -| When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed -| `fieldManager` -| `string` -| fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. -| `fieldValidation` -| `string` -| fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields, provided that the `ServerSideFieldValidation` feature gate is also enabled. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23 and is the default behavior when the `ServerSideFieldValidation` feature gate is disabled. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default when the `ServerSideFieldValidation` feature gate is enabled. - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered. -|=== - -.Body parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `body` -| `Patch` schema -| -|=== - -.HTTP responses -[cols="1,1",options="header"] -|=== -| HTTP code | Reponse body -| 200 - OK -| xref:../security_apis/securitycontextconstraints-security-openshift-io-v1.adoc#securitycontextconstraints-security-openshift-io-v1[`SecurityContextConstraints`] schema -| 401 - Unauthorized -| Empty -|=== - -HTTP method:: - `PUT` - -Description:: - replace the specified SecurityContextConstraints - - -.Query parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `dryRun` -| `string` -| When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed -| `fieldManager` -| `string` -| fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. -| `fieldValidation` -| `string` -| fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields, provided that the `ServerSideFieldValidation` feature gate is also enabled. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23 and is the default behavior when the `ServerSideFieldValidation` feature gate is disabled. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default when the `ServerSideFieldValidation` feature gate is enabled. - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered. -|=== - -.Body parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `body` -| xref:../security_apis/securitycontextconstraints-security-openshift-io-v1.adoc#securitycontextconstraints-security-openshift-io-v1[`SecurityContextConstraints`] schema -| -|=== - -.HTTP responses -[cols="1,1",options="header"] -|=== -| HTTP code | Reponse body -| 200 - OK -| xref:../security_apis/securitycontextconstraints-security-openshift-io-v1.adoc#securitycontextconstraints-security-openshift-io-v1[`SecurityContextConstraints`] schema -| 201 - Created -| xref:../security_apis/securitycontextconstraints-security-openshift-io-v1.adoc#securitycontextconstraints-security-openshift-io-v1[`SecurityContextConstraints`] schema -| 401 - Unauthorized -| Empty -|=== - - -=== /apis/security.openshift.io/v1/watch/securitycontextconstraints/{name} - -.Global path parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `name` -| `string` -| name of the SecurityContextConstraints -|=== - -.Global query parameters -[cols="1,1,2",options="header"] -|=== -| Parameter | Type | Description -| `allowWatchBookmarks` -| `boolean` -| allowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. -| `continue` -| `string` -| The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. -| `fieldSelector` -| `string` -| A selector to restrict the list of returned objects by their fields. Defaults to everything. -| `labelSelector` -| `string` -| A selector to restrict the list of returned objects by their labels. Defaults to everything. -| `limit` -| `integer` -| limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. -| `pretty` -| `string` -| If 'true', then the output is pretty printed. -| `resourceVersion` -| `string` -| resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset -| `resourceVersionMatch` -| `string` -| resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset -| `timeoutSeconds` -| `integer` -| Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. -| `watch` -| `boolean` -| Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. -|=== - -HTTP method:: - `GET` - -Description:: - watch changes to an object of kind SecurityContextConstraints. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter. - - -.HTTP responses -[cols="1,1",options="header"] -|=== -| HTTP code | Reponse body -| 200 - OK -| `WatchEvent` schema -| 401 - Unauthorized -| Empty -|=== - - diff --git a/microshift_rest_api/understanding-api-support-tiers.adoc b/microshift_rest_api/understanding-api-support-tiers.adoc deleted file mode 100644 index 73a078930fbe..000000000000 --- a/microshift_rest_api/understanding-api-support-tiers.adoc +++ /dev/null @@ -1,20 +0,0 @@ -:_content-type: ASSEMBLY -[id="understanding-api-support-tiers"] -= Understanding API tiers -include::_attributes/common-attributes.adoc[] -:context: understanding-api-tiers - -toc::[] - -[IMPORTANT] -==== -This guidance does not cover layered {product-title} offerings. -==== - -Red Hat requests that application developers validate that any behavior they depend on is explicitly defined in the formal API documentation to prevent introducing dependencies on unspecified implementation-specific behavior or dependencies on bugs in a particular implementation of an API. For example, new releases of an ingress router may not be compatible with older releases if an application uses an undocumented API or relies on undefined behavior. - -include::modules/api-support-tiers.adoc[leveloffset=+1] - -include::modules/api-support-tiers-mapping.adoc[leveloffset=+1] - -include::modules/api-support-deprecation-policy.adoc[leveloffset=+1] diff --git a/microshift_rest_api/understanding-compatibility-guidelines.adoc b/microshift_rest_api/understanding-compatibility-guidelines.adoc deleted file mode 100644 index b2153251d9e2..000000000000 --- a/microshift_rest_api/understanding-compatibility-guidelines.adoc +++ /dev/null @@ -1,18 +0,0 @@ -:_content-type: ASSEMBLY -[id="compatibility-guidelines"] -= Understanding API compatibility guidelines -include::_attributes/common-attributes.adoc[] -:context: compatibility-guidelines - -toc::[] - -[IMPORTANT] -==== -This guidance does not cover layered {product-title} offerings. -==== - -include::modules/api-compatibility-guidelines.adoc[leveloffset=+1] - -include::modules/api-compatibility-exceptions.adoc[leveloffset=+1] - -include::modules/api-compatibility-common-terminology.adoc[leveloffset=+1] diff --git a/microshift_running_apps/_attributes b/microshift_running_apps/_attributes deleted file mode 120000 index 93957f02273f..000000000000 --- a/microshift_running_apps/_attributes +++ /dev/null @@ -1 +0,0 @@ -../_attributes \ No newline at end of file diff --git a/microshift_running_apps/images b/microshift_running_apps/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/microshift_running_apps/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/microshift_running_apps/microshift-applications.adoc b/microshift_running_apps/microshift-applications.adoc deleted file mode 100644 index aa25d0c15308..000000000000 --- a/microshift_running_apps/microshift-applications.adoc +++ /dev/null @@ -1,12 +0,0 @@ -:_content-type: ASSEMBLY -[id="applications-with-microshift"] -= Application deployment with {product-title} -include::_attributes/attributes-microshift.adoc[] -:context: applications-microshift - -toc::[] - -You can use the `kustomize` configuration management tool to deploy applications. Read through the following procedure for an example of how this tool works in {product-title}. - -include::modules/microshift-manifests-overview.adoc[leveloffset=+1] -include::modules/microshift-applying-manifests-example.adoc[leveloffset=+1] \ No newline at end of file diff --git a/microshift_running_apps/microshift-operators.adoc b/microshift_running_apps/microshift-operators.adoc deleted file mode 100644 index bfee1f5a18c4..000000000000 --- a/microshift_running_apps/microshift-operators.adoc +++ /dev/null @@ -1,22 +0,0 @@ -:_content-type: ASSEMBLY -[id="operators-with-microshift"] -= How Operators work with {product-title} -include::_attributes/attributes-microshift.adoc[] -:context: operators-microshift - -toc::[] - -You can use Operators with {product-title} to create applications that monitor the running services in your cluster. Operators can manage applications and their resources, such as deploying a database or message bus. As customized software running inside your cluster, Operators can be used to implement and automate common operations. - -Operators offer a more localized configuration experience and integrate with Kubernetes APIs and CLI tools such as `kubectl` and `oc`. Operators are designed specifically for your applications. Operators enable you to configure components instead of modifying a global configuration file. - -{product-title} applications are generally expected to be deployed in static environments. However, Operators are available if helpful in your use case. To determine an Operator's compatibility with {product-title}, check the Operator's documentation. - -[id="how-to-install-operators_{context}"] -== How to install Operators in {product-title} - -To minimize the footprint of {product-title}, Operators are installed directly with manifests instead of using the Operator Lifecycle Manager (OLM). The following examples provide instructions on how you can use the `kustomize` configuration management tool with {product-title} to deploy an application. Use the same steps to install Operators with manifests. - -include::modules/microshift-manifests-overview.adoc[leveloffset=+2] - -include::modules/microshift-applying-manifests-example.adoc[leveloffset=+2] \ No newline at end of file diff --git a/microshift_running_apps/modules b/microshift_running_apps/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/microshift_running_apps/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/microshift_running_apps/snippets b/microshift_running_apps/snippets deleted file mode 120000 index 9d58b92e5058..000000000000 --- a/microshift_running_apps/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets/ \ No newline at end of file diff --git a/microshift_storage/_attributes b/microshift_storage/_attributes deleted file mode 120000 index 93957f02273f..000000000000 --- a/microshift_storage/_attributes +++ /dev/null @@ -1 +0,0 @@ -../_attributes \ No newline at end of file diff --git a/microshift_storage/container_storage_interface_microshift/_attributes b/microshift_storage/container_storage_interface_microshift/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/microshift_storage/container_storage_interface_microshift/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/microshift_storage/container_storage_interface_microshift/images b/microshift_storage/container_storage_interface_microshift/images deleted file mode 120000 index 5fa6987088da..000000000000 --- a/microshift_storage/container_storage_interface_microshift/images +++ /dev/null @@ -1 +0,0 @@ -../../images \ No newline at end of file diff --git a/microshift_storage/container_storage_interface_microshift/microshift-persistent-storage-csi.adoc b/microshift_storage/container_storage_interface_microshift/microshift-persistent-storage-csi.adoc deleted file mode 100644 index a30265734821..000000000000 --- a/microshift_storage/container_storage_interface_microshift/microshift-persistent-storage-csi.adoc +++ /dev/null @@ -1,25 +0,0 @@ -:_content-type: ASSEMBLY -[id="persistent-storage-csi-microshift"] -= Configuring CSI volumes for {product-title} -include::_attributes/attributes-microshift.adoc[] -:context: persistent-storage-csi-microshift - -toc::[] - -The Container Storage Interface (CSI) allows {product-title} to consume -storage from storage back ends that implement the -link:https://github.com/container-storage-interface/spec[CSI interface] -as persistent storage. - -[NOTE] -==== -{product-title} {product-version} supports version 1.5.0 of the link:https://github.com/container-storage-interface/spec[CSI specification]. -==== - -include::modules/persistent-storage-csi-dynamic-provisioning.adoc[leveloffset=+1] -include::modules/persistent-storage-csi-mysql-example.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.12/html/storage/using-container-storage-interface-csi#persistent-storage-csi[{ocp} CSI Overview] \ No newline at end of file diff --git a/microshift_storage/container_storage_interface_microshift/modules b/microshift_storage/container_storage_interface_microshift/modules deleted file mode 120000 index 36719b9de743..000000000000 --- a/microshift_storage/container_storage_interface_microshift/modules +++ /dev/null @@ -1 +0,0 @@ -../../modules/ \ No newline at end of file diff --git a/microshift_storage/container_storage_interface_microshift/snippets b/microshift_storage/container_storage_interface_microshift/snippets deleted file mode 120000 index 7bf6da9a51d0..000000000000 --- a/microshift_storage/container_storage_interface_microshift/snippets +++ /dev/null @@ -1 +0,0 @@ -../../snippets \ No newline at end of file diff --git a/microshift_storage/dynamic-provisioning-microshift.adoc b/microshift_storage/dynamic-provisioning-microshift.adoc deleted file mode 100644 index 62b80e93cf2a..000000000000 --- a/microshift_storage/dynamic-provisioning-microshift.adoc +++ /dev/null @@ -1,17 +0,0 @@ -:_content-type: ASSEMBLY -[id="dynamic-provisioning-microshift"] -= Dynamic provisioning for {product-title} -include::_attributes/attributes-microshift.adoc[] -:context: dynamic-provisioning-microshift - -toc::[] - -include::modules/dynamic-provisioning-about.adoc[leveloffset=+1] - -include::modules/dynamic-provisioning-defining-storage-class.adoc[leveloffset=+1] - -include::modules/dynamic-provisioning-storage-class-definition.adoc[leveloffset=+2] - -include::modules/dynamic-provisioning-annotations.adoc[leveloffset=+2] - -include::modules/dynamic-provisioning-change-default-class.adoc[leveloffset=+1] diff --git a/microshift_storage/expanding-persistent-volumes-microshift.adoc b/microshift_storage/expanding-persistent-volumes-microshift.adoc deleted file mode 100644 index e583b0ab31d3..000000000000 --- a/microshift_storage/expanding-persistent-volumes-microshift.adoc +++ /dev/null @@ -1,17 +0,0 @@ -:_content-type: ASSEMBLY -[id="expanding-persistent-volumes-microshift"] -= Expanding persistent volumes for {product-title} -include::_attributes/attributes-microshift.adoc[] -:context: expanding-persistent-volumes-microshift - -toc::[] - -Learn how to expand persistent volumes in {product-title}. - -include::modules/storage-expanding-csi-volumes.adoc[leveloffset=+1] - -include::modules/storage-expanding-local-volumes.adoc[leveloffset=+1] - -include::modules/storage-expanding-filesystem-pvc.adoc[leveloffset=+1] - -include::modules/storage-expanding-recovering-failure.adoc[leveloffset=+1] \ No newline at end of file diff --git a/microshift_storage/generic-ephemeral-volumes-microshift.adoc b/microshift_storage/generic-ephemeral-volumes-microshift.adoc deleted file mode 100644 index c28401c95259..000000000000 --- a/microshift_storage/generic-ephemeral-volumes-microshift.adoc +++ /dev/null @@ -1,17 +0,0 @@ -:_content-type: ASSEMBLY -[id="generic-ephemeral-volumes-microshift"] -= Generic ephemeral volumes for {product-title} -include::_attributes/common-attributes.adoc[] -:context: generic-ephemeral-volumes-microshift - -toc::[] - -include::modules/storage-ephemeral-vols-overview.adoc[leveloffset=+1] - -include::modules/storage-ephemeral-vols-lifecycle.adoc[leveloffset=+1] - -include::modules/storage-ephemeral-vols-security.adoc[leveloffset=+1] - -include::modules/storage-ephemeral-vols-pvc-naming.adoc[leveloffset=+1] - -include::modules/storage-ephemeral-vols-procedure.adoc[leveloffset=+1] diff --git a/microshift_storage/images b/microshift_storage/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/microshift_storage/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/microshift_storage/index.adoc b/microshift_storage/index.adoc deleted file mode 100644 index 15eb5282f21e..000000000000 --- a/microshift_storage/index.adoc +++ /dev/null @@ -1,34 +0,0 @@ -:_content-type: ASSEMBLY -[id="storage-overview-microshift"] -= {product-title} storage overview -include::_attributes/attributes-microshift.adoc[] -:context: storage-overview-microshift - -toc::[] - -{product-title} supports multiple types of storage, both for on-premise and cloud providers. You can manage container storage for persistent and non-persistent data in a {product-title} cluster. - -[id="microshift-storage-types"] -== Storage types - -{product-title} storage is broadly classified into two categories, namely ephemeral storage and persistent storage. - -[id="microshift-ephemeral-storage"] -=== Ephemeral storage - -Pods and containers are ephemeral or transient in nature and designed for stateless applications. Ephemeral storage allows administrators and developers to better manage the local storage for some of their operations. To read details about ephemeral storage, click xref:../microshift_storage/understanding-ephemeral-storage-microshift.adoc#understanding-ephemeral-storage-microshift[Understanding ephemeral storage]. - -[id="microshift-persistent-storage"] -=== Persistent storage - -Stateful applications deployed in containers require persistent storage. {product-title} uses a pre-provisioned storage framework called persistent volumes (PV) to allow cluster administrators to provision persistent storage. The data inside these volumes can exist beyond the lifecycle of an individual pod. Developers can use persistent volume claims (PVCs) to request storage requirements. For persistent storage details, read xref:../microshift_storage/understanding-persistent-storage-microshift.adoc#understanding-persistent-storage-microshift[Understanding persistent storage]. - -[id="microshift-dynamic-provisioning-overview"] -=== Dynamic storage provisioning - -Using dynamic provisioning allows you to create storage volumes on-demand, eliminating the need for pre-provisioned storage. For more information about how dynamic provisioning works in {product-title}, read xref:../microshift_storage/microshift-storage-plugin-overview.adoc#microshift-storage-plugin-overview[Dynamic provisioning]. - -//[id="microshift-container-storage-interface"] -//== Container Storage Interface (CSI) - -//CSI is an API specification for the management of container storage across different container orchestration (CO) systems. You can manage the storage volumes within the container native environments, without having specific knowledge of the underlying storage infrastructure. With the CSI, storage works uniformly across different container orchestration systems, regardless of the storage vendors you are using. For more information about CSI, read ../microshift_storage/container_storage_interface_microshift/microshift-persistent-storage-csi.adoc#persistent-storage-csi-microshift[Using Container Storage Interface (CSI) for MicroShift]. \ No newline at end of file diff --git a/microshift_storage/microshift-storage-plugin-overview.adoc b/microshift_storage/microshift-storage-plugin-overview.adoc deleted file mode 100644 index 40cf9b624d2c..000000000000 --- a/microshift_storage/microshift-storage-plugin-overview.adoc +++ /dev/null @@ -1,17 +0,0 @@ -:_content-type: ASSEMBLY -[id="microshift-storage-plugin-overview"] -= Dynamic storage using the LVMS plugin -include::_attributes/attributes-microshift.adoc[] -:context: microshift-storage-plugin-overview - -toc::[] - -{product-title} enables dynamic storage provisioning that is ready for immediate use with the logical volume manager storage (LVMS) Container Storage Interface (CSI) provider. The LVMS plugin is the Red Hat downstream version of TopoLVM, a CSI plugin for managing LVM volumes for Kubernetes. - -LVMS provisions new logical volume management (LVM) logical volumes (LVs) for container workloads with appropriately configured persistent volume claims (PVC). Each PVC references a storage class that represents an LVM Volume Group (VG) on the host node. LVs are only provisioned for scheduled pods. - -include::modules/microshift-lvms-system-requirements.adoc[leveloffset=+1] -include::modules/microshift-lvms-deployment.adoc[leveloffset=+1] -include::modules/microshift-lvmd-yaml-creating.adoc[leveloffset=+1] -include::modules/microshift-lvms-config-example-basic.adoc[leveloffset=+1] -include::modules/microshift-lvms-using.adoc[leveloffset=+1] \ No newline at end of file diff --git a/microshift_storage/modules b/microshift_storage/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/microshift_storage/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/microshift_storage/snippets b/microshift_storage/snippets deleted file mode 120000 index 9d58b92e5058..000000000000 --- a/microshift_storage/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets/ \ No newline at end of file diff --git a/microshift_storage/understanding-ephemeral-storage-microshift.adoc b/microshift_storage/understanding-ephemeral-storage-microshift.adoc deleted file mode 100644 index 562e2c31ff70..000000000000 --- a/microshift_storage/understanding-ephemeral-storage-microshift.adoc +++ /dev/null @@ -1,17 +0,0 @@ -:_content-type: ASSEMBLY -[id="understanding-ephemeral-storage-microshift"] -= Understanding ephemeral storage for {product-title} -include::_attributes/attributes-microshift.adoc[] -:context: understanding-ephemeral-storage-microshift - -toc::[] - -Ephemeral storage is unstructured and temporary. It is often used with immutable applications. This guide discusses how ephemeral storage works for {product-title}. - -include::modules/storage-ephemeral-storage-overview.adoc[leveloffset=+1] - -include::modules/storage-ephemeral-storage-types.adoc[leveloffset=+1] - -include::modules/storage-ephemeral-storage-manage.adoc[leveloffset=+1] - -include::modules/storage-ephemeral-storage-monitoring.adoc[leveloffset=+1] diff --git a/microshift_storage/understanding-persistent-storage-microshift.adoc b/microshift_storage/understanding-persistent-storage-microshift.adoc deleted file mode 100644 index 136e25ba85f5..000000000000 --- a/microshift_storage/understanding-persistent-storage-microshift.adoc +++ /dev/null @@ -1,28 +0,0 @@ -:_content-type: ASSEMBLY -[id="understanding-persistent-storage-microshift"] -= Understanding persistent storage for {product-title} -include::_attributes/attributes-microshift.adoc[] -:context: understanding-persistent-storage-microshift - -toc::[] - -Managing storage is a distinct problem from managing compute resources. {product-title} uses the Kubernetes persistent volume (PV) framework to allow cluster administrators to provision persistent storage for a cluster. Developers can use persistent volume claims (PVCs) to request PV resources without having specific knowledge of the underlying storage infrastructure. - -include::modules/storage-persistent-storage-overview.adoc[leveloffset=+1] - -[id="additional-resources_understanding-persistent-storage-microshift"] -[role="_additional-resources"] -.Additional resources -* link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.13/html/storage/understanding-persistent-storage#pv-access-modes_understanding-persistent-storage[Access modes for persistent storage] - -include::modules/storage-persistent-storage-lifecycle.adoc[leveloffset=+1] - -include::modules/storage-persistent-storage-reclaim-manual.adoc[leveloffset=+2] - -include::modules/storage-persistent-storage-reclaim.adoc[leveloffset=+2] - -include::modules/storage-persistent-storage-pv.adoc[leveloffset=+1] - -include::modules/storage-persistent-storage-pvc.adoc[leveloffset=+1] - -include::modules/storage-persistent-storage-fsGroup.adoc[leveloffset=+1] diff --git a/microshift_support/_attributes b/microshift_support/_attributes deleted file mode 120000 index 93957f02273f..000000000000 --- a/microshift_support/_attributes +++ /dev/null @@ -1 +0,0 @@ -../_attributes \ No newline at end of file diff --git a/microshift_support/images b/microshift_support/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/microshift_support/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/microshift_support/microshift-etcd.adoc b/microshift_support/microshift-etcd.adoc deleted file mode 100644 index f8edbb6b6956..000000000000 --- a/microshift_support/microshift-etcd.adoc +++ /dev/null @@ -1,16 +0,0 @@ -:_content-type: ASSEMBLY -[id="microshift-etcd"] -= MicroShift etcd -include::_attributes/attributes-microshift.adoc[] -:context: microshift-etcd - -toc::[] - -[role="_abstract"] -{product-title} etcd is delivered as part of the {product-title} RPM. The etcd service is run as a separate process and the lifecycle is managed automatically by {product-title}. - -:FeatureName: MicroShift -include::snippets/microshift-tech-preview-snip.adoc[leveloffset=+1] - -include::modules/microshift-observe-debug-etcd-server.adoc[leveloffset=+1] -include::modules/microshift-config-etcd.adoc[leveloffset=+1] \ No newline at end of file diff --git a/microshift_support/microshift-sos-report.adoc b/microshift_support/microshift-sos-report.adoc deleted file mode 100644 index 003f451db34b..000000000000 --- a/microshift_support/microshift-sos-report.adoc +++ /dev/null @@ -1,22 +0,0 @@ -:_content-type: ASSEMBLY -[id="microshift-sos-report"] -= MicroShift sos report -include::_attributes/attributes-microshift.adoc[] -:context: microshift-sos-report - -toc::[] - -[role="_abstract"] -`sos` is a tool you can use to collect troubleshooting information about a host. An `sos report` will generate a detailed report with all the enabled plugins and data from the different components and applications in a system. - -:FeatureName: MicroShift -include::snippets/microshift-tech-preview-snip.adoc[leveloffset=+1] - -include::modules/microshift-about-sos-reports.adoc[leveloffset=+1] -include::modules/microshift-gathering-sos-report.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_microshift-sos-report"] -== Additional resources -* link:https://access.redhat.com/solutions/2112[How to provide files to Red Hat Support (vmcore, rhev logcollector, sosreports, heap dumps, log files, etc.] -* link:https://access.redhat.com/solutions/3592[What is an sos report and how to create one in {op-system-base-full}?] \ No newline at end of file diff --git a/microshift_support/modules b/microshift_support/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/microshift_support/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/microshift_support/snippets b/microshift_support/snippets deleted file mode 120000 index 9d58b92e5058..000000000000 --- a/microshift_support/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets/ \ No newline at end of file diff --git a/microshift_troubleshooting/_attributes b/microshift_troubleshooting/_attributes deleted file mode 120000 index 93957f02273f..000000000000 --- a/microshift_troubleshooting/_attributes +++ /dev/null @@ -1 +0,0 @@ -../_attributes \ No newline at end of file diff --git a/microshift_troubleshooting/images b/microshift_troubleshooting/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/microshift_troubleshooting/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/microshift_troubleshooting/microshift-things-to-know.adoc b/microshift_troubleshooting/microshift-things-to-know.adoc deleted file mode 100644 index bcc13a898716..000000000000 --- a/microshift_troubleshooting/microshift-things-to-know.adoc +++ /dev/null @@ -1,20 +0,0 @@ -:_content-type: ASSEMBLY -[id="microshift-things-to-know"] -= Responsive restarts and security certificates -include::_attributes/attributes-microshift.adoc[] -:context: microshift-configuring - -toc::[] - -{product-title} responds to system configuration changes and restarts after alterations are detected, including IP address changes, clock adjustments, and security certificate age. - -[id="microshift-ip-address-clock-changes_{context}"] -== IP address changes or clock adjustments - -{product-title} depends on device IP addresses and system-wide clock settings to remain consistent during its runtime. However, these settings may occasionally change on edge devices, such as DHCP or Network Time Protocol (NTP) updates. - -When such changes occur, some {product-title} components may stop functioning properly. To mitigate this situation, {product-title} monitors the IP address and system time and restarts if either setting change is detected. - -The threshold for clock changes is a time adjustment of greater than 10 seconds in either direction. Smaller drifts on regular time adjustments performed by the Network Time Protocol (NTP) service do not cause a restart. - -include::modules/microshift-certificate-lifetime.adoc[leveloffset=+1] \ No newline at end of file diff --git a/microshift_troubleshooting/microshift-version.adoc b/microshift_troubleshooting/microshift-version.adoc deleted file mode 100644 index 9d3e8d83ce99..000000000000 --- a/microshift_troubleshooting/microshift-version.adoc +++ /dev/null @@ -1,13 +0,0 @@ -:_content-type: ASSEMBLY -[id="microshift-version"] -= Checking which version you have installed -include::_attributes/attributes-microshift.adoc[] -:context: microshift-version - -toc::[] - -To begin troubleshooting, determine which version of {product-title} you have installed. - -include::modules/microshift-version-cli.adoc[leveloffset=+1] - -include::modules/microshift-version-api.adoc[leveloffset=+1] diff --git a/microshift_troubleshooting/modules b/microshift_troubleshooting/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/microshift_troubleshooting/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/microshift_troubleshooting/snippets b/microshift_troubleshooting/snippets deleted file mode 120000 index 9d58b92e5058..000000000000 --- a/microshift_troubleshooting/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets/ \ No newline at end of file diff --git a/microshift_updating/_attributes b/microshift_updating/_attributes deleted file mode 120000 index 93957f02273f..000000000000 --- a/microshift_updating/_attributes +++ /dev/null @@ -1 +0,0 @@ -../_attributes \ No newline at end of file diff --git a/microshift_updating/images b/microshift_updating/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/microshift_updating/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/microshift_updating/microshift-about-updates.adoc b/microshift_updating/microshift-about-updates.adoc deleted file mode 100644 index 8f8c152235e4..000000000000 --- a/microshift_updating/microshift-about-updates.adoc +++ /dev/null @@ -1,48 +0,0 @@ -:_content-type: ASSEMBLY -[id="microshift-about-updates"] -= About {product-title} updates -include::_attributes/attributes-microshift.adoc[] -:context: microshift-about-updates - -toc::[] - -You can update a {product-title} cluster by using the OpenShift CLI (`oc`). -// This PR is for the book build. Note that the OCP structure consists of a landing page of xrefs to other major sections within the book. MicroShift likely does not require that depth of structure, so starting simply with one page. - -[id="microshift-about-updates-understanding-microshift-updates"] -== Understanding {product-title} updates -{product-title} updates are available as either RPMs or by embedding the {product-title} image in an RPM OSTree blueprint. -You can update an {product-title} cluster by using the OpenShift CLI (`oc`). -//Platform administrators can view new update options by looking at the output of the `oc adm upgrade` command. -//An update begins when... - -[NOTE] -==== -Operators previously installed must be reinstalled using manifests. -==== - -[id="microshift-about-updates-rpm-updates"] -=== RPM updates -Using the RPM update method replaces your existing version. No rollback is possible with this update type. -//we can call a module here or xref out; not sure the best method for our use case until we have the content - -[id="microshift-about-updates-rpm-ostree-updates"] -=== RPM OSTree updates -Using the RPM OSTree update path allows for system rollback. -//we can call a module here or xref out; not sure the best method for our use case until we have the content - -[id="microshift-about-updates-checking-version-update-compatibility"] -== Checking version update compatibility -Before attempting an update, determine which version of {product-title} you have installed. Only the following update paths are supported: - -* Version 4.13 to 4.14 -//replace with matrix including RHEL versions? -//place xref here to version-check assembly - -[id="microshift-about-updates-update-disconnected-environment"] -== Updating a cluster in a disconnected environment -//sample topic only - -[id="microshift-about-updates-troubleshooting-updates"] -== Troubleshooting updates -//sample topic only \ No newline at end of file diff --git a/microshift_updating/modules b/microshift_updating/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/microshift_updating/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/microshift_updating/snippets b/microshift_updating/snippets deleted file mode 120000 index 9d58b92e5058..000000000000 --- a/microshift_updating/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets/ \ No newline at end of file diff --git a/migrating_from_ocp_3_to_4/_attributes b/migrating_from_ocp_3_to_4/_attributes deleted file mode 120000 index f27fd275ea6b..000000000000 --- a/migrating_from_ocp_3_to_4/_attributes +++ /dev/null @@ -1 +0,0 @@ -../_attributes/ \ No newline at end of file diff --git a/migrating_from_ocp_3_to_4/about-migrating-from-3-to-4.adoc b/migrating_from_ocp_3_to_4/about-migrating-from-3-to-4.adoc deleted file mode 100644 index 3114c50dd278..000000000000 --- a/migrating_from_ocp_3_to_4/about-migrating-from-3-to-4.adoc +++ /dev/null @@ -1,38 +0,0 @@ -:_content-type: ASSEMBLY -[id="about-migrating-from-3-to-4"] -= About migrating from {product-title} 3 to 4 -include::_attributes/common-attributes.adoc[] -:context: about-migrating-from-3-to-4 - -toc::[] - -{product-title} 4 contains new technologies and functionality that result in a cluster that is self-managing, flexible, and automated. {product-title} 4 clusters are deployed and managed very differently from {product-title} 3. - -The most effective way to migrate from {product-title} 3 to 4 is by using a CI/CD pipeline to automate deployments in an link:https://www.redhat.com/en/topics/devops/what-is-application-lifecycle-management-alm[application lifecycle management] framework. - -If you do not have a CI/CD pipeline or if you are migrating stateful applications, you can use the {mtc-full} ({mtc-short}) to migrate your application workloads. - -You can use Red Hat Advanced Cluster Management for Kubernetes to help you import and manage your {product-title} 3 clusters easily, enforce policies, and redeploy your applications. Take advantage of the link:https://www.redhat.com/en/engage/free-access-redhat-e-202202170127[free subscription] to use Red Hat Advanced Cluster Management to simplify your migration process. - -To successfully transition to {product-title} 4, review the following information: - -xref:../migrating_from_ocp_3_to_4/planning-migration-3-4.adoc#planning-migration-3-4[Differences between {product-title} 3 and 4]:: -* Architecture -* Installation and upgrade -* Storage, network, logging, security, and monitoring considerations - -xref:../migrating_from_ocp_3_to_4/about-mtc-3-4.adoc#about-mtc-3-4[About the {mtc-full}]:: -* Workflow -* File system and snapshot copy methods for persistent volumes (PVs) -* Direct volume migration -* Direct image migration - -xref:../migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc#advanced-migration-options-3-4[Advanced migration options]:: -* Automating your migration with migration hooks -* Using the {mtc-short} API -* Excluding resources from a migration plan -* Configuring the `MigrationController` custom resource for large-scale migrations -* Enabling automatic PV resizing for direct volume migration -* Enabling cached Kubernetes clients for improved performance - -For new features and enhancements, technical changes, and known issues, see the xref:../migration_toolkit_for_containers/mtc-release-notes.adoc#mtc-release-notes[{mtc-short} release notes]. diff --git a/migrating_from_ocp_3_to_4/about-mtc-3-4.adoc b/migrating_from_ocp_3_to_4/about-mtc-3-4.adoc deleted file mode 100644 index 26bce76859f7..000000000000 --- a/migrating_from_ocp_3_to_4/about-mtc-3-4.adoc +++ /dev/null @@ -1,27 +0,0 @@ -:_content-type: ASSEMBLY -[id="about-mtc-3-4"] -= About the Migration Toolkit for Containers -include::_attributes/common-attributes.adoc[] -:context: about-mtc-3-4 - -toc::[] - -The {mtc-full} ({mtc-short}) enables you to migrate stateful application workloads from {product-title} 3 to {product-version} at the granularity of a namespace. - -[IMPORTANT] -==== -Before you begin your migration, be sure to review the xref:../migrating_from_ocp_3_to_4/planning-migration-3-4.adoc#planning-migration-3-4[differences between {product-title} 3 and 4]. -==== - -{mtc-short} provides a web console and an API, based on Kubernetes custom resources, to help you control the migration and minimize application downtime. - -The {mtc-short} console is installed on the target cluster by default. You can configure the {mtc-full} Operator to install the console on an link:https://access.redhat.com/articles/5064151[{product-title} 3 source cluster or on a remote cluster]. - -{mtc-short} supports the file system and snapshot data copy methods for migrating data from the source cluster to the target cluster. You can select a method that is suited for your environment and is supported by your storage provider. - -The service catalog is deprecated in {product-title} 4. You can migrate workload resources provisioned with the service catalog from {product-title} 3 to 4 but you cannot perform service catalog actions such as `provision`, `deprovision`, or `update` on these workloads after migration. The {mtc-short} console displays a message if the service catalog resources cannot be migrated. - -include::modules/migration-terminology.adoc[leveloffset=+1] -include::modules/migration-mtc-workflow.adoc[leveloffset=+1] -include::modules/migration-understanding-data-copy-methods.adoc[leveloffset=+1] -include::modules/migration-direct-volume-migration-and-direct-image-migration.adoc[leveloffset=+1] diff --git a/migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc b/migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc deleted file mode 100644 index e7962ab751c8..000000000000 --- a/migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc +++ /dev/null @@ -1,76 +0,0 @@ -:_content-type: ASSEMBLY -[id="advanced-migration-options-3-4"] -= Advanced migration options -include::_attributes/common-attributes.adoc[] -:context: advanced-migration-options-3-4 -:advanced-migration-options-3-4: - -toc::[] - -You can automate your migrations and modify the `MigPlan` and `MigrationController` custom resources in order to perform large-scale migrations and to improve performance. - -include::modules/migration-terminology.adoc[leveloffset=+1] - -include::modules/migration-migrating-on-prem-to-cloud.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* For information about creating a MigCluster CR manifest for each remote cluster, see xref:../migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc#migration-migrating-applications-api_advanced-migration-options-3-4[Migrating an application by using the {mtc-short} API]. -* For information about adding a cluster using the web console, see xref:../migrating_from_ocp_3_to_4/migrating-applications-3-4.adoc#migrating-applications-mtc-web-console_migrating-applications-3-4[Migrating your applications by using the {mtc-short} web console] - -[id="migrating-applications-cli_{context}"] -== Migrating applications by using the command line - -You can migrate applications with the {mtc-short} API by using the command line interface (CLI) in order to automate the migration. - -include::modules/migration-prerequisites.adoc[leveloffset=+2] -include::modules/migration-creating-registry-route-for-dim.adoc[leveloffset=+2] -include::modules/migration-about-configuring-proxies.adoc[leveloffset=+2] -include::modules/migration-configuring-proxies.adoc[leveloffset=+3] -include::modules/migration-migrating-applications-api.adoc[leveloffset=+2] -include::modules/migration-state-migration-cli.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="additional-resources-for-state-migration_{context}"] -[discrete] -=== Additional resources - -* See xref:../migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc#migration-excluding-pvcs_advanced-migration-options-3-4[Excluding PVCs from migration] to select PVCs for state migration. -* See xref:../migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc#migration-mapping-pvcs_advanced-migration-options-3-4[Mapping PVCs] to migrate source PV data to provisioned PVCs on the destination cluster. -* See xref:../migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc#migration-kubernetes-objects_advanced-migration-options-3-4[Migrating Kubernetes objects] to migrate the Kubernetes objects that constitute an application's state. - -include::modules/migration-hooks.adoc[leveloffset=+1] -include::modules/migration-writing-ansible-playbook-hook.adoc[leveloffset=+2] - -[id="migration-plan-options_{context}"] -== Migration plan options - -You can exclude, edit, and map components in the `MigPlan` custom resource (CR). - -include::modules/migration-excluding-resources.adoc[leveloffset=+2] -include::modules/migration-mapping-destination-namespaces-in-the-migplan-cr.adoc[leveloffset=+2] -include::modules/migration-excluding-pvcs.adoc[leveloffset=+2] -include::modules/migration-mapping-pvcs.adoc[leveloffset=+2] -include::modules/migration-editing-pvs-in-migplan.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="additional-resources-for-editing-pv-attributes_{context}"] -[discrete] -==== Additional resources - -* For details about the `move` and `copy` actions, see xref:../migrating_from_ocp_3_to_4/about-mtc-3-4.adoc#migration-mtc-workflow_about-mtc-3-4[MTC workflow]. -* For details about the `skip` action, see xref:../migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc#migration-excluding-pvcs_advanced-migration-options-3-4[Excluding PVCs from migration]. -* For details about the file system and snapshot copy methods, see xref:../migrating_from_ocp_3_to_4/about-mtc-3-4.adoc#migration-understanding-data-copy-methods_about-mtc-3-4[About data copy methods]. - -include::modules/migration-kubernetes-objects.adoc[leveloffset=+2] - -[id="migration-controller-options_{context}"] -== Migration controller options - -You can edit migration plan limits, enable persistent volume resizing, or enable cached Kubernetes clients in the `MigrationController` custom resource (CR) for large migrations and improved performance. - -include::modules/migration-changing-migration-plan-limits.adoc[leveloffset=+2] -include::modules/migration-enabling-pv-resizing-dvm.adoc[leveloffset=+2] -include::modules/migration-enabling-cached-kubernetes-clients.adoc[leveloffset=+2] - -:advanced-migration-options-3-4!: diff --git a/migrating_from_ocp_3_to_4/images b/migrating_from_ocp_3_to_4/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/migrating_from_ocp_3_to_4/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/migrating_from_ocp_3_to_4/index.adoc b/migrating_from_ocp_3_to_4/index.adoc deleted file mode 100644 index 349b4d9779eb..000000000000 --- a/migrating_from_ocp_3_to_4/index.adoc +++ /dev/null @@ -1,82 +0,0 @@ -:_content-type: ASSEMBLY -[id="migration-from-version-3-to-4-overview"] -= Migration from OpenShift Container Platform 3 to 4 overview -include::_attributes/common-attributes.adoc[] -:context: migration-from-version-3-to-4-overview - -toc::[] - -{product-title} 4 clusters are different from {product-title} 3 clusters. {product-title} 4 clusters contain new technologies and functionality that result in a cluster that is self-managing, flexible, and automated. To learn more about migrating from {product-title} 3 to 4 see xref:../migrating_from_ocp_3_to_4/about-migrating-from-3-to-4.adoc#about-migrating-from-3-to-4[About migrating from OpenShift Container Platform 3 to 4]. - -[id="mtc-3-to-4-overview-differences-mtc"] -== Differences between {product-title} 3 and 4 -Before migrating from {product-title} 3 to 4, you can check xref:../migrating_from_ocp_3_to_4/planning-migration-3-4.adoc#planning-migration-3-4[differences between {product-title} 3 and 4]. Review the following information: - -* xref:../architecture/architecture.adoc#architecture[Architecture] -* xref:../architecture/architecture-installation.adoc#architecture-installation[Installation and update] -* xref:../storage/index.adoc#index[Storage], xref:../networking/understanding-networking.adoc#understanding-networking[network], xref:../logging/cluster-logging.adoc#cluster-logging[logging], xref:../security/index.adoc#index[security], and xref:../monitoring/monitoring-overview.adoc#monitoring-overview[monitoring considerations] - -[id="mtc-3-to-4-overview-planning-network-considerations-mtc"] -== Planning network considerations -Before migrating from {product-title} 3 to 4, review the xref:../migrating_from_ocp_3_to_4/planning-migration-3-4.adoc#planning-migration-3-4[differences between {product-title} 3 and 4] for information about the following areas: - -* xref:../migrating_from_ocp_3_to_4/planning-considerations-3-4.adoc#dns-considerations_planning-considerations-3-4[DNS considerations] -** xref:../migrating_from_ocp_3_to_4/planning-considerations-3-4.adoc#migration-isolating-dns-domain-of-target-cluster-from-clients_planning-considerations-3-4[Isolating the DNS domain of the target cluster from the clients]. -** xref:../migrating_from_ocp_3_to_4/planning-considerations-3-4.adoc#migration-setting-up-target-cluster-to-accept-source-dns-domain_planning-considerations-3-4[Setting up the target cluster to accept the source DNS domain]. - -You can migrate stateful application workloads from {product-title} 3 to 4 at the granularity of a namespace. To learn more about MTC see xref:../migrating_from_ocp_3_to_4/about-mtc-3-4.adoc#about-mtc-3-4[Understanding MTC]. - -[NOTE] -==== -If you are migrating from {product-title} 3, see xref:../migrating_from_ocp_3_to_4/about-migrating-from-3-to-4.adoc#about-migrating-from-3-to-4[About migrating from {product-title} 3 to 4] and xref:../migrating_from_ocp_3_to_4/installing-3-4.adoc#migration-installing-legacy-operator_installing-3-4[Installing the legacy {mtc-full} Operator on {product-title} 3]. -==== - -[id="mtc-overview-install-mtc"] -== Installing MTC -Review the following tasks to install the MTC: - -. xref:../migrating_from_ocp_3_to_4/installing-3-4.adoc#migration-installing-mtc-on-ocp-4_installing-3-4[Install the {mtc-full} Operator on target cluster by using Operator Lifecycle Manager (OLM)]. -. xref:../migrating_from_ocp_3_to_4/installing-3-4.adoc#migration-installing-legacy-operator_installing-3-4[Install the legacy {mtc-full} Operator on the source cluster manually]. -. xref:../migrating_from_ocp_3_to_4/installing-3-4.adoc#configuring-replication-repository_installing-3-4[Configure object storage to use as a replication repository]. - -[id="mtc-overview-upgrade-mtc"] -== Upgrading MTC -You xref:../migrating_from_ocp_3_to_4/upgrading-3-4.adoc#upgrading-3-4[upgrade the {mtc-full} ({mtc-short})] on {product-title} {product-version} by using OLM. You upgrade {mtc-short} on {product-title} 3 by reinstalling the legacy {mtc-full} Operator. - -[id="mtc-overview-mtc-checklists"] -== Reviewing premigration checklists -Before you migrate your application workloads with the Migration Toolkit for Containers (MTC), review the xref:../migrating_from_ocp_3_to_4/premigration-checklists-3-4.adoc#premigration-checklists-3-4[premigration checklists]. - -[id="mtc-overview-migrate-mtc-applications"] -== Migrating applications -You can migrate your applications by using the MTC xref:../migrating_from_ocp_3_to_4/migrating-applications-3-4.adoc#migrating-applications-mtc-web-console_migrating-applications-3-4[web console] or xref:../migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc#migrating-applications-cli_advanced-migration-options-3-4[the command line]. - -[id="mtc-overview-advanced-migration-options"] -== Advanced migration options -You can automate your migrations and modify MTC custom resources to improve the performance of large-scale migrations by using the following options: - -* xref:../migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc#migration-state-migration-cli_advanced-migration-options-3-4[Running a state migration] -* xref:../migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc#migration-hooks_advanced-migration-options-3-4[Creating migration hooks] -* xref:../migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc#migration-plan-options_advanced-migration-options-3-4[Editing, excluding, and mapping migrated resources] -* xref:../migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc#migration-controller-options_advanced-migration-options-3-4[Configuring the migration controller for large migrations] - -[id="mtc-overview-troubleshooting-mtc"] -== Troubleshooting migrations -You can perform the following troubleshooting tasks: - -* xref:../migrating_from_ocp_3_to_4/troubleshooting-3-4.adoc#migration-viewing-migration-plan-resources_troubleshooting-3-4[Viewing migration plan resources by using the MTC web console] -* xref:../migrating_from_ocp_3_to_4/troubleshooting-3-4.adoc#migration-viewing-migration-plan-log_troubleshooting-3-4[Viewing the migration plan aggregated log file] -* xref:../migrating_from_ocp_3_to_4/troubleshooting-3-4.adoc#migration-using-mig-log-reader_troubleshooting-3-4[Using the migration log reader] -* xref:../migrating_from_ocp_3_to_4/troubleshooting-3-4.adoc#migration-accessing-performance-metrics_troubleshooting-3-4[Accessing performance metrics] -* xref:../migrating_from_ocp_3_to_4/troubleshooting-3-4.adoc#migration-using-must-gather_troubleshooting-3-4[Using the `must-gather` tool] -* xref:../migrating_from_ocp_3_to_4/troubleshooting-3-4.adoc#migration-debugging-velero-resources_troubleshooting-3-4[Using the Velero CLI to debug `Backup` and `Restore` CRs] -* xref:../migrating_from_ocp_3_to_4/troubleshooting-3-4.adoc#migration-using-mtc-crs-for-troubleshooting_troubleshooting-3-4[Using MTC custom resources for troubleshooting] -* xref:../migrating_from_ocp_3_to_4/troubleshooting-3-4.adoc#common-issues-and-concerns_troubleshooting-3-4[Checking common issues and concerns] - -[id="mtc-overview-roll-back-mtc"] -== Rolling back a migration -You can xref:../migrating_from_ocp_3_to_4/troubleshooting-3-4.adoc#rolling-back-migration_troubleshooting-3-4[roll back a migration] by using the MTC web console, by using the CLI, or manually. - -[id="mtc-overview-uninstall-mtc"] -== Uninstalling MTC and deleting resources -You can xref:../migrating_from_ocp_3_to_4/installing-3-4.adoc#migration-uninstalling-mtc-clean-up_installing-3-4[uninstall the MTC and delete its resources] to clean up the cluster. diff --git a/migrating_from_ocp_3_to_4/installing-3-4.adoc b/migrating_from_ocp_3_to_4/installing-3-4.adoc deleted file mode 100644 index f22b2e7ce4d7..000000000000 --- a/migrating_from_ocp_3_to_4/installing-3-4.adoc +++ /dev/null @@ -1,63 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-3-4"] -= Installing the Migration Toolkit for Containers -include::_attributes/common-attributes.adoc[] -:context: installing-3-4 -:installing-3-4: - -toc::[] - -You can install the {mtc-full} ({mtc-short}) on {product-title} 3 and 4. - -After you install the {mtc-full} Operator on {product-title} {product-version} by using the Operator Lifecycle Manager, you manually install the legacy {mtc-full} Operator on {product-title} 3. - -By default, the {mtc-short} web console and the `Migration Controller` pod run on the target cluster. You can configure the `Migration Controller` custom resource manifest to run the {mtc-short} web console and the `Migration Controller` pod on a link:https://access.redhat.com/articles/5064151[source cluster or on a remote cluster]. - -After you have installed {mtc-short}, you must configure an object storage to use as a replication repository. - -To uninstall {mtc-short}, see xref:../migrating_from_ocp_3_to_4/installing-3-4.adoc#migration-uninstalling-mtc-clean-up_installing-3-4[Uninstalling {mtc-short} and deleting resources]. - -include::modules/migration-compatibility-guidelines.adoc[leveloffset=+1] -include::modules/migration-installing-legacy-operator.adoc[leveloffset=+1] -include::modules/migration-installing-mtc-on-ocp-4.adoc[leveloffset=+1] -include::modules/migration-about-configuring-proxies.adoc[leveloffset=+1] -include::modules/migration-configuring-proxies.adoc[leveloffset=+2] - -For more information, see xref:../networking/enable-cluster-wide-proxy.adoc#nw-proxy-configure-object_config-cluster-wide-proxy[Configuring the cluster-wide proxy]. - -[id="configuring-replication-repository_{context}"] -== Configuring a replication repository - -You must configure an object storage to use as a replication repository. The {mtc-full} ({mtc-short}) copies data from the source cluster to the replication repository, and then from the replication repository to the target cluster. - -{mtc-short} supports the xref:../migrating_from_ocp_3_to_4/about-mtc-3-4.adoc#migration-understanding-data-copy-methods_about-mtc-3-4[file system and snapshot data copy methods] for migrating data from the source cluster to the target cluster. You can select a method that is suited for your environment and is supported by your storage provider. - -The following storage providers are supported: - -* xref:../migrating_from_ocp_3_to_4/installing-3-4.adoc#migration-configuring-mcg_installing-3-4[Multicloud Object Gateway] -* xref:../migrating_from_ocp_3_to_4/installing-3-4.adoc#migration-configuring-aws-s3_installing-3-4[Amazon Web Services S3] -* xref:../migrating_from_ocp_3_to_4/installing-3-4.adoc#migration-configuring-gcp_installing-3-4[Google Cloud Platform] -* xref:../migrating_from_ocp_3_to_4/installing-3-4.adoc#migration-configuring-azure_installing-3-4[Microsoft Azure Blob] -* Generic S3 object storage, for example, Minio or Ceph S3 - -[id="replication-repository-prerequisites_{context}"] -=== Prerequisites - -* All clusters must have uninterrupted network access to the replication repository. -* If you use a proxy server with an internally hosted replication repository, you must ensure that the proxy allows access to the replication repository. - -include::modules/migration-configuring-mcg.adoc[leveloffset=+2] -include::modules/migration-configuring-aws-s3.adoc[leveloffset=+2] -include::modules/migration-configuring-gcp.adoc[leveloffset=+2] -include::modules/migration-configuring-azure.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="{context}_configuring-replication-repository-additional-resources"] -=== Additional resources - -* xref:../migrating_from_ocp_3_to_4/about-mtc-3-4.adoc#migration-mtc-workflow_about-mtc-3-4[{mtc-short} workflow] -* xref:../migrating_from_ocp_3_to_4/about-mtc-3-4.adoc#migration-understanding-data-copy-methods_about-mtc-3-4[About data copy methods] -* xref:../migrating_from_ocp_3_to_4/migrating-applications-3-4.adoc#migration-adding-replication-repository-to-cam_migrating-applications-3-4[Adding a replication repository to the {mtc-short} web console] - -include::modules/migration-uninstalling-mtc-clean-up.adoc[leveloffset=+1] -:installing-3-4!: diff --git a/migrating_from_ocp_3_to_4/installing-restricted-3-4.adoc b/migrating_from_ocp_3_to_4/installing-restricted-3-4.adoc deleted file mode 100644 index 2f26cd542668..000000000000 --- a/migrating_from_ocp_3_to_4/installing-restricted-3-4.adoc +++ /dev/null @@ -1,57 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-restricted-3-4"] -= Installing the Migration Toolkit for Containers in a restricted network environment -include::_attributes/common-attributes.adoc[] -:context: installing-restricted-3-4 -:installing-restricted-3-4: - -toc::[] - -You can install the {mtc-full} ({mtc-short}) on {product-title} 3 and 4 in a restricted network environment by performing the following procedures: - -. Create a xref:../operators/admin/olm-restricted-networks.adoc#olm-mirror-catalog_olm-restricted-networks[mirrored Operator catalog]. -+ -This process creates a `mapping.txt` file, which contains the mapping between the `registry.redhat.io` image and your mirror registry image. The `mapping.txt` file is required for installing the Operator on the source cluster. -. Install the {mtc-full} Operator on the {product-title} {product-version} target cluster by using Operator Lifecycle Manager. -+ -By default, the {mtc-short} web console and the `Migration Controller` pod run on the target cluster. You can configure the `Migration Controller` custom resource manifest to run the {mtc-short} web console and the `Migration Controller` pod on a link:https://access.redhat.com/articles/5064151[source cluster or on a remote cluster]. - -. Install the _legacy_ {mtc-full} Operator on the {product-title} 3 source cluster from the command line interface. -. Configure object storage to use as a replication repository. - -To uninstall {mtc-short}, see xref:../migrating_from_ocp_3_to_4/installing-restricted-3-4.adoc#migration-uninstalling-mtc-clean-up_installing-restricted-3-4[Uninstalling {mtc-short} and deleting resources]. - -include::modules/migration-compatibility-guidelines.adoc[leveloffset=+1] -include::modules/migration-installing-mtc-on-ocp-4.adoc[leveloffset=+1] -include::modules/migration-installing-legacy-operator.adoc[leveloffset=+1] -include::modules/migration-about-configuring-proxies.adoc[leveloffset=+1] -include::modules/migration-configuring-proxies.adoc[leveloffset=+2] - -For more information, see xref:../networking/enable-cluster-wide-proxy.adoc#nw-proxy-configure-object_config-cluster-wide-proxy[Configuring the cluster-wide proxy]. - -[id="configuring-replication-repository_{context}"] -== Configuring a replication repository - -The Multicloud Object Gateway is the only supported option for a restricted network environment. - -{mtc-short} supports the xref:../migrating_from_ocp_3_to_4/about-mtc-3-4.adoc#migration-understanding-data-copy-methods_about-mtc-3-4[file system and snapshot data copy methods] for migrating data from the source cluster to the target cluster. You can select a method that is suited for your environment and is supported by your storage provider. - -[id="replication-repository-prerequisites_{context}"] -=== Prerequisites - -* All clusters must have uninterrupted network access to the replication repository. -* If you use a proxy server with an internally hosted replication repository, you must ensure that the proxy allows access to the replication repository. - -include::modules/migration-configuring-mcg.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="{context}_configuring-replication-repository-additional-resources"] -=== Additional resources - -* link:https://access.redhat.com/documentation/en-us/red_hat_openshift_data_foundation/4.9/html/planning_your_deployment/disconnected-environment_rhodf[Disconnected environment] in the {rh-storage-first} documentation. -* xref:../migrating_from_ocp_3_to_4/about-mtc-3-4.adoc#migration-mtc-workflow_about-mtc-3-4[{mtc-short} workflow] -* xref:../migrating_from_ocp_3_to_4/about-mtc-3-4.adoc#migration-understanding-data-copy-methods_about-mtc-3-4[About data copy methods] -* xref:../migrating_from_ocp_3_to_4/migrating-applications-3-4.adoc#migration-adding-replication-repository-to-cam_migrating-applications-3-4[Adding a replication repository to the {mtc-short} web console] - -include::modules/migration-uninstalling-mtc-clean-up.adoc[leveloffset=+1] -:installing-restricted-3-4!: diff --git a/migrating_from_ocp_3_to_4/migrating-applications-3-4.adoc b/migrating_from_ocp_3_to_4/migrating-applications-3-4.adoc deleted file mode 100644 index 613fe5bb3b6f..000000000000 --- a/migrating_from_ocp_3_to_4/migrating-applications-3-4.adoc +++ /dev/null @@ -1,62 +0,0 @@ -:_content-type: ASSEMBLY -[id="migrating-applications-3-4"] -= Migrating your applications -include::_attributes/common-attributes.adoc[] -:context: migrating-applications-3-4 -:migrating-applications-3-4: - -toc::[] - -You can migrate your applications by using the {mtc-full} ({mtc-short}) web console or from the xref:../migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc#migrating-applications-cli_advanced-migration-options-3-4[command line]. - -You can use stage migration and cutover migration to migrate an application between clusters: - -* Stage migration copies data from the source cluster to the target cluster without stopping the application. You can run a stage migration multiple times to reduce the duration of the cutover migration. -* Cutover migration stops the transactions on the source cluster and moves the resources to the target cluster. - -You can use state migration to migrate an application's state: - -* State migration copies selected persistent volume claims (PVCs). -* You can use state migration to migrate a namespace within the same cluster. - -Most cluster-scoped resources are not yet handled by {mtc-short}. If your applications require cluster-scoped resources, you might have to create them manually on the target cluster. - -During migration, {mtc-short} preserves the following namespace annotations: - -* `openshift.io/sa.scc.mcs` -* `openshift.io/sa.scc.supplemental-groups` -* `openshift.io/sa.scc.uid-range` - -These annotations preserve the UID range, ensuring that the containers retain their file system permissions on the target cluster. There is a risk that the migrated UIDs could duplicate UIDs within an existing or future namespace on the target cluster. - -include::modules/migration-prerequisites.adoc[leveloffset=+1] - -[role="_additional-resources"] -[discrete] -[id="additional-resources-for-migration-prerequisites_{context}"] -=== Additional resources for migration prerequisites - -* link:https://docs.openshift.com/container-platform/3.11/install_config/registry/securing_and_exposing_registry.html#exposing-the-registry[Manually exposing a secure registry for {product-title} 3] -* xref:../migrating_from_ocp_3_to_4/troubleshooting-3-4.adoc#migration-updating-deprecated-internal-images_troubleshooting-3-4[Updating deprecated internal images] - -[id="migrating-applications-mtc-web-console_{context}"] -== Migrating your applications by using the {mtc-short} web console - -You can configure clusters and a replication repository by using the {mtc-short} web console. Then, you can create and run a migration plan. - -include::modules/migration-launching-cam.adoc[leveloffset=+2] -include::modules/migration-adding-cluster-to-cam.adoc[leveloffset=+2] -include::modules/migration-adding-replication-repository-to-cam.adoc[leveloffset=+2] -include::modules/migration-creating-migration-plan-cam.adoc[leveloffset=+2] - -[role="_additional-resources"] -[discrete] -[id="additional-resources-for-persistent-volume-copy-methods_{context}"] -=== Additional resources - -* xref:../migrating_from_ocp_3_to_4/about-mtc-3-4.adoc#file-system-copy-method_about-mtc-3-4[{mtc-short} file system copy method] -* xref:../migrating_from_ocp_3_to_4/about-mtc-3-4.adoc#snapshot-copy-method_about-mtc-3-4[{mtc-short} snapshot copy method] - -include::modules/migration-running-migration-plan-cam.adoc[leveloffset=+2] - -:migrating-applications-3-4!: diff --git a/migrating_from_ocp_3_to_4/modules b/migrating_from_ocp_3_to_4/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/migrating_from_ocp_3_to_4/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/migrating_from_ocp_3_to_4/planning-considerations-3-4.adoc b/migrating_from_ocp_3_to_4/planning-considerations-3-4.adoc deleted file mode 100644 index 869067e5371f..000000000000 --- a/migrating_from_ocp_3_to_4/planning-considerations-3-4.adoc +++ /dev/null @@ -1,26 +0,0 @@ -:_content-type: ASSEMBLY -[id="planning-considerations-3-4"] -= Network considerations -include::_attributes/common-attributes.adoc[] -:context: planning-considerations-3-4 - -toc::[] - -Review the strategies for redirecting your application network traffic after migration. - -[id="dns-considerations_{context}"] -== DNS considerations - -The DNS domain of the target cluster is different from the domain of the source cluster. By default, applications get FQDNs of the target cluster after migration. - -To preserve the source DNS domain of migrated applications, select one of the two options described below. - -include::modules/migration-isolating-dns-domain-of-target-cluster-from-clients.adoc[leveloffset=+2] -include::modules/migration-setting-up-target-cluster-to-accept-source-dns-domain.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* See xref:../security/certificates/replacing-default-ingress-certificate.adoc#replacing-default-ingress[Replacing the default ingress certificate] for more information. - -include::modules/migration-network-traffic-redirection-strategies.adoc[leveloffset=+1] diff --git a/migrating_from_ocp_3_to_4/planning-migration-3-4.adoc b/migrating_from_ocp_3_to_4/planning-migration-3-4.adoc deleted file mode 100644 index eb8636c56569..000000000000 --- a/migrating_from_ocp_3_to_4/planning-migration-3-4.adoc +++ /dev/null @@ -1,239 +0,0 @@ -:_content-type: ASSEMBLY -[id="planning-migration-3-4"] -= Differences between {product-title} 3 and 4 -include::_attributes/common-attributes.adoc[] -:context: planning-migration-3-4 - -toc::[] - -{product-title} {product-version} introduces architectural changes and enhancements. The procedures that you used to manage your {product-title} 3 cluster might not apply to {product-title} 4. - -ifndef::openshift-origin[] -For information about configuring your {product-title} 4 cluster, review the appropriate sections of the {product-title} documentation. For information about new features and other notable technical changes, review the xref:../release_notes/ocp-4-14-release-notes.adoc#ocp-4-14-release-notes[OpenShift Container Platform 4.14 release notes]. -endif::[] - -It is not possible to upgrade your existing {product-title} 3 cluster to {product-title} 4. You must start with a new {product-title} 4 installation. Tools are available to assist in migrating your control plane settings and application workloads. - -[id="migration-differences-architecture"] -== Architecture - -With {product-title} 3, administrators individually deployed {op-system-base-full} hosts, and then installed {product-title} on top of these hosts to form a cluster. Administrators were responsible for properly configuring these hosts and performing updates. - -{product-title} 4 represents a significant change in the way that {product-title} clusters are deployed and managed. {product-title} 4 includes new technologies and functionality, such as Operators, machine sets, and {op-system-first}, which are core to the operation of the cluster. This technology shift enables clusters to self-manage some functions previously performed by administrators. This also ensures platform stability and consistency, and simplifies installation and scaling. - -Beginning with {product-title} 4.13, {op-system} now uses {op-system-base-full} 9.2 packages. This enhancement enables the latest fixes and features as well as the latest hardware support and driver updates. For more information about how this upgrade to RHEL 9.2 might affect your options configuration and services as well as driver and container support, see the link:https://docs.openshift.com/container-platform/4.13/release_notes/ocp-4-13-release-notes.html#ocp-4-13-rhel-9-considerations[RHCOS now uses RHEL 9.2] in the _OpenShift Container Platform 4.13 release notes_. - -For more information, see xref:../architecture/architecture.adoc#architecture[OpenShift Container Platform architecture]. - -[discrete] -=== Immutable infrastructure - -{product-title} 4 uses {op-system-first}, which is designed to run containerized applications, and provides efficient installation, Operator-based management, and simplified upgrades. {op-system} is an immutable container host, rather than a customizable operating system like {op-system-base}. {op-system} enables {product-title} 4 to manage and automate the deployment of the underlying container host. {op-system} is a part of {product-title}, which means that everything runs inside a container and is deployed using {product-title}. - -In {product-title} 4, control plane nodes must run {op-system}, ensuring that full-stack automation is maintained for the control plane. This makes rolling out updates and upgrades a much easier process than in {product-title} 3. - -For more information, see xref:../architecture/architecture-rhcos.adoc#architecture-rhcos[{op-system-first}]. - -[discrete] -=== Operators - -Operators are a method of packaging, deploying, and managing a Kubernetes application. Operators ease the operational complexity of running another piece of software. They watch over your environment and use the current state to make decisions in real time. Advanced Operators are designed to upgrade and react to failures automatically. - -For more information, see xref:../operators/understanding/olm-what-operators-are.adoc#olm-what-operators-are[Understanding Operators]. - -[id="migration-differences-install"] -== Installation and upgrade - -[discrete] -=== Installation process - -To install {product-title} 3.11, you prepared your {op-system-base-full} hosts, set all of the configuration values your cluster needed, and then ran an Ansible playbook to install and set up your cluster. - -In {product-title} {product-version}, you use the OpenShift installation program to create a minimum set of resources required for a cluster. After the cluster is running, you use Operators to further configure your cluster and to install new services. After first boot, {op-system-first} systems are managed by the Machine Config Operator (MCO) that runs in the {product-title} cluster. - -For more information, see xref:../architecture/architecture-installation.adoc#installation-process_architecture-installation[Installation process]. - -ifndef::openshift-origin[] -If you want to add {op-system-base-full} worker machines to your {product-title} {product-version} cluster, you use an Ansible playbook to join the {op-system-base} worker machines after the cluster is running. For more information, see xref:../machine_management/adding-rhel-compute.adoc#adding-rhel-compute[Adding {op-system-base} compute machines to an {product-title} cluster]. -endif::[] - -[discrete] -=== Infrastructure options - -In {product-title} 3.11, you installed your cluster on infrastructure that you prepared and maintained. In addition to providing your own infrastructure, {product-title} 4 offers an option to deploy a cluster on infrastructure that the {product-title} installation program provisions and the cluster maintains. - -For more information, see xref:../architecture/architecture-installation.adoc#installation-overview_architecture-installation[OpenShift Container Platform installation overview]. - -[discrete] -=== Upgrading your cluster - -In {product-title} 3.11, you upgraded your cluster by running Ansible playbooks. In {product-title} {product-version}, the cluster manages its own updates, including updates to {op-system-first} on cluster nodes. You can easily upgrade your cluster by using the web console or by using the `oc adm upgrade` command from the OpenShift CLI and the Operators will automatically upgrade themselves. If your {product-title} {product-version} cluster has {op-system-base} worker machines, then you will still need to run an Ansible playbook to upgrade those worker machines. - -For more information, see xref:../updating/updating_a_cluster/updating-cluster-web-console.adoc#updating-cluster-web-console[Updating clusters]. - -[id="migration-considerations"] -== Migration considerations - -Review the changes and other considerations that might affect your transition from {product-title} 3.11 to {product-title} 4. - -[id="migration-preparing-storage"] -=== Storage considerations - -Review the following storage changes to consider when transitioning from {product-title} 3.11 to {product-title} {product-version}. - -[discrete] -==== Local volume persistent storage - -Local storage is only supported by using the Local Storage Operator in {product-title} {product-version}. It is not supported to use the local provisioner method from {product-title} 3.11. - -For more information, see xref:../storage/persistent_storage/persistent_storage_local/persistent-storage-local.adoc#persistent-storage-using-local-volume[Persistent storage using local volumes]. - -[discrete] -==== FlexVolume persistent storage - -The FlexVolume plugin location changed from {product-title} 3.11. The new location in {product-title} {product-version} is `/etc/kubernetes/kubelet-plugins/volume/exec`. Attachable FlexVolume plugins are no longer supported. - -For more information, see xref:../storage/persistent_storage/persistent-storage-flexvolume.adoc#persistent-storage-using-flexvolume[Persistent storage using FlexVolume]. - -[discrete] -==== Container Storage Interface (CSI) persistent storage - -Persistent storage using the Container Storage Interface (CSI) was link:https://access.redhat.com/support/offerings/techpreview[Technology Preview] in {product-title} 3.11. {product-title} {product-version} ships with xref:../storage/container_storage_interface/persistent-storage-csi.adoc#csi-drivers-supported_persistent-storage-csi[several CSI drivers]. You can also install your own driver. - -For more information, see xref:../storage/container_storage_interface/persistent-storage-csi.adoc#persistent-storage-using-csi[Persistent storage using the Container Storage Interface (CSI)]. - -[discrete] -==== Red Hat OpenShift Data Foundation - -OpenShift Container Storage 3, which is available for use with {product-title} 3.11, uses Red Hat Gluster Storage as the backing storage. - -{rh-storage-first} 4, which is available for use with {product-title} 4, uses Red Hat Ceph Storage as the backing storage. - -For more information, see xref:../storage/persistent_storage/persistent-storage-ocs.adoc#red-hat-openshift-data-foundation[Persistent storage using Red Hat OpenShift Data Foundation] and the link:https://access.redhat.com/articles/4731161[interoperability matrix] article. - -[discrete] -==== Unsupported persistent storage options - -Support for the following persistent storage options from {product-title} 3.11 has changed in {product-title} {product-version}: - -* GlusterFS is no longer supported. -* CephFS as a standalone product is no longer supported. -* Ceph RBD as a standalone product is no longer supported. - -If you used one of these in {product-title} 3.11, you must choose a different persistent storage option for full support in {product-title} {product-version}. - -For more information, see xref:../storage/understanding-persistent-storage.adoc#understanding-persistent-storage[Understanding persistent storage]. - -[discrete] -==== Migration of in-tree volumes to CSI drivers - -{product-title} 4 is migrating in-tree volume plugins to their Container Storage Interface (CSI) counterparts. In {product-title} {product-version}, CSI drivers are the new default for the following in-tree volume types: - -* Amazon Web Services (AWS) Elastic Block Storage (EBS) -* Azure Disk -* Azure File -* Google Cloud Platform Persistent Disk (GCP PD) -* OpenStack Cinder -* VMware vSphere -+ -[NOTE] -==== -As of {product-title} 4.13, VMware vSphere is not available by default. However, you can opt into VMware vSphere. -==== - -All aspects of volume lifecycle, such as creation, deletion, mounting, and unmounting, is handled by the CSI driver. - -For more information, see xref:../storage/container_storage_interface/persistent-storage-csi-migration.adoc#persistent-storage-csi-migration[CSI automatic migration]. - -[id="migration-preparing-networking"] -=== Networking considerations - -Review the following networking changes to consider when transitioning from {product-title} 3.11 to {product-title} {product-version}. - -[discrete] -==== Network isolation mode - -The default network isolation mode for {product-title} 3.11 was `ovs-subnet`, though users frequently switched to use `ovn-multitenant`. The default network isolation mode for {product-title} {product-version} is controlled by a network policy. - -If your {product-title} 3.11 cluster used the `ovs-subnet` or `ovs-multitenant` mode, it is recommended to switch to a network policy for your {product-title} {product-version} cluster. Network policies are supported upstream, are more flexible, and they provide the functionality that `ovs-multitenant` does. If you want to maintain the `ovs-multitenant` behavior while using a network policy in {product-title} {product-version}, follow the steps to xref:../networking/network_policy/multitenant-network-policy.adoc#multitenant-network-policy[configure multitenant isolation using network policy]. - -For more information, see xref:../networking/network_policy/about-network-policy.adoc#about-network-policy[About network policy]. - -[discrete] -==== OVN-Kubernetes as the default networking plugin in Red Hat OpenShift Networking - -In {product-title} 3.11, OpenShift SDN was the default networking plugin in Red Hat OpenShift Networking. In {product-title} {product-version}, OVN-Kubernetes is now the default networking plugin. - -For information on migrating to OVN-Kubernetes from OpenShift SDN, see xref:../networking/ovn_kubernetes_network_provider/migrate-from-openshift-sdn.adoc#migrate-from-openshift-sdn[Migrating from the OpenShift SDN network plugin]. - -[id="migration-preparing-logging"] -=== Logging considerations - -Review the following logging changes to consider when transitioning from {product-title} 3.11 to {product-title} {product-version}. - -[discrete] -==== Deploying OpenShift Logging - -{product-title} 4 provides a simple deployment mechanism for OpenShift Logging, by using a Cluster Logging custom resource. - -For more information, see xref:../logging/cluster-logging-deploying.adoc#cluster-logging-deploying_cluster-logging-deploying[Installing OpenShift Logging]. - -[discrete] -==== Aggregated logging data - -You cannot transition your aggregate logging data from {product-title} 3.11 into your new {product-title} 4 cluster. - -For more information, see xref:../logging/cluster-logging.adoc#cluster-logging-about_cluster-logging[About OpenShift Logging]. - -[discrete] -==== Unsupported logging configurations - -Some logging configurations that were available in {product-title} 3.11 are no longer supported in {product-title} {product-version}. - -For more information on the explicitly unsupported logging cases, see xref:../logging/config/cluster-logging-maintenance-support.adoc#cluster-logging-maintenance-and-support[Maintenance and support]. - -[id="migration-preparing-security"] -=== Security considerations - -Review the following security changes to consider when transitioning from {product-title} 3.11 to {product-title} {product-version}. - -[discrete] -==== Unauthenticated access to discovery endpoints - -In {product-title} 3.11, an unauthenticated user could access the discovery endpoints (for example, [x-]`/api/*` and [x-]`/apis/*`). For security reasons, unauthenticated access to the discovery endpoints is no longer allowed in {product-title} {product-version}. If you do need to allow unauthenticated access, you can configure the RBAC settings as necessary; however, be sure to consider the security implications as this can expose internal cluster components to the external network. - -// TODO: Anything to xref to, or additional details? - -[discrete] -==== Identity providers - -Configuration for identity providers has changed for {product-title} 4, including the following notable changes: - -* The request header identity provider in {product-title} {product-version} requires mutual TLS, where in {product-title} 3.11 it did not. -* The configuration of the OpenID Connect identity provider was simplified in {product-title} {product-version}. It now obtains data, which previously had to specified in {product-title} 3.11, from the provider's `/.well-known/openid-configuration` endpoint. - -For more information, see xref:../authentication/understanding-identity-provider.adoc#understanding-identity-provider[Understanding identity provider configuration]. - -[discrete] -==== OAuth token storage format - -Newly created OAuth HTTP bearer tokens no longer match the names of their OAuth access token objects. The object names are now a hash of the bearer token and are no longer sensitive. This reduces the risk of leaking sensitive information. - -[discrete] -==== Default security context constraints - -The `restricted` security context constraints (SCC) in {product-title} 4 can no longer be accessed by any authenticated user as the `restricted` SCC in {product-title} 3.11. The broad authenticated access is now granted to the `restricted-v2` SCC, which is more restrictive than the old `restricted` SCC. The `restricted` SCC still exists; users that want to use it must be specifically given permissions to do it. - -For more information, see xref:../authentication/managing-security-context-constraints.adoc#managing-pod-security-policies[Managing security context constraints]. - -[id="migration-preparing-monitoring"] -=== Monitoring considerations - -Review the following monitoring changes when transitioning from {product-title} 3.11 to {product-title} {product-version}. You cannot migrate Hawkular configurations and metrics to Prometheus. - -[discrete] -==== Alert for monitoring infrastructure availability - -The default alert that triggers to ensure the availability of the monitoring structure was called `DeadMansSwitch` in {product-title} 3.11. This was renamed to `Watchdog` in {product-title} 4. If you had PagerDuty integration set up with this alert in {product-title} 3.11, you must set up the PagerDuty integration for the `Watchdog` alert in {product-title} 4. - -For more information, see xref:../monitoring/managing-alerts.adoc#applying-custom-alertmanager-configuration_managing-alerts[Applying custom Alertmanager configuration]. diff --git a/migrating_from_ocp_3_to_4/premigration-checklists-3-4.adoc b/migrating_from_ocp_3_to_4/premigration-checklists-3-4.adoc deleted file mode 100644 index 8db3285d6cc1..000000000000 --- a/migrating_from_ocp_3_to_4/premigration-checklists-3-4.adoc +++ /dev/null @@ -1,113 +0,0 @@ -:_content-type: ASSEMBLY -[id="premigration-checklists-3-4"] -= Premigration checklists -include::_attributes/common-attributes.adoc[] -:context: premigration-checklists-3-4 - -toc::[] - -Before you migrate your application workloads with the {mtc-full} ({mtc-short}), review the following checklists. - -[id="resources_{context}"] -== Resources - -* [ ] If your application uses an internal service network or an external route for communicating with services, the relevant route exists. -* [ ] If your application uses cluster-level resources, you have re-created them on the target cluster. -* [ ] You have xref:../migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc#migration-excluding-resources_advanced-migration-options-3-4[excluded] persistent volumes (PVs), image streams, and other resources that you do not want to migrate. -* [ ] PV data has been backed up in case an application displays unexpected behavior after migration and corrupts the data. - -[id="source-cluster_{context}"] -== Source cluster - -* [ ] The cluster meets the link:https://docs.openshift.com/container-platform/3.11/install/prerequisites.html#hardware[minimum hardware requirements]. -* [ ] You have installed the correct legacy {mtc-full} Operator version: -** `operator-3.7.yml` on {product-title} version 3.7. -** `operator.yml` on {product-title} versions 3.9 to 4.5. -* [ ] All nodes have an active {product-title} subscription. -* [ ] You have performed all the link:https://docs.openshift.com/container-platform/3.11/day_two_guide/run_once_tasks.html#day-two-guide-default-storage-class[run-once tasks]. -* [ ] You have performed all the link:https://docs.openshift.com/container-platform/3.11/day_two_guide/environment_health_checks.html[environment health checks]. -* [ ] You have checked for PVs with abnormal configurations stuck in a *Terminating* state by running the following command: -+ -[source,terminal] ----- -$ oc get pv ----- - -* [ ] You have checked for pods whose status is other than *Running* or *Completed* by running the following command: -+ -[source,terminal] ----- -$ oc get pods --all-namespaces | egrep -v 'Running | Completed' ----- - -* [ ] You have checked for pods with a high restart count by running the following command: -+ -[source,terminal] ----- -$ oc get pods --all-namespaces --field-selector=status.phase=Running \ - -o json | jq '.items[]|select(any( .status.containerStatuses[]; \ - .restartCount > 3))|.metadata.name' ----- -+ -Even if the pods are in a *Running* state, a high restart count might indicate underlying problems. - -* [ ] You have removed old builds, deployments, and images from each namespace to be migrated by xref:../applications/pruning-objects.adoc#pruning-objects[pruning]. -* [ ] The {product-registry} uses a link:https://docs.openshift.com/container-platform/3.11/scaling_performance/optimizing_storage.html#registry[supported storage type]. -* [ ] Direct image migration only: The {product-registry} is link:https://docs.openshift.com/container-platform/3.11/install_config/registry/securing_and_exposing_registry.html#exposing-the-registry[exposed] to external traffic. -* [ ] You can read and write images to the registry. -* [ ] The link:https://access.redhat.com/articles/3093761[etcd cluster] is healthy. -* [ ] The link:https://docs.openshift.com/container-platform/3.11/install_config/master_node_configuration.html#master-node-configuration-node-qps-burst[average API server response time] on the source cluster is less than 50 ms. -* [ ] The cluster certificates are link:https://docs.openshift.com/container-platform/3.11/install_config/redeploying_certificates.html#install-config-cert-expiry[valid] for the duration of the migration process. -* [ ] You have checked for pending certificate-signing requests by running the following command: -+ -[source,terminal] ----- -$ oc get csr -A | grep pending -i ----- - -* [ ] The link:https://docs.openshift.com/container-platform/3.11/install_config/configuring_authentication.html#overview[identity provider] is working. -* [ ] You have set the value of the `openshift.io/host.generated` annotation parameter to `true` for each {product-title} route, which updates the host name of the route for the target cluster. Otherwise, the migrated routes retain the source cluster host name. - -[id="target-cluster_{context}"] -== Target cluster - -* [ ] You have installed {mtc-full} Operator version 1.5.1. -* [ ] All xref:../migrating_from_ocp_3_to_4/migrating-applications-3-4.adoc#migration-prerequisites_migrating-applications-3-4[{mtc-short} prerequisites] are met. -* [ ] The cluster meets the minimum hardware requirements for the specific platform and installation method, for example, on xref:../installing/installing_bare_metal/installing-bare-metal.adoc#minimum-resource-requirements_installing-bare-metal[bare metal]. -* [ ] The cluster has xref:../storage/dynamic-provisioning.adoc#defining-storage-classes_dynamic-provisioning[storage classes] defined for the storage types used by the source cluster, for example, block volume, file system, or object storage. -+ -[NOTE] -==== -NFS does not require a defined storage class. -==== - -* [ ] The cluster has the correct network configuration and permissions to access external services, for example, databases, source code repositories, container image registries, and CI/CD tools. -* [ ] External applications and services that use services provided by the cluster have the correct network configuration and permissions to access the cluster. -* [ ] Internal container image dependencies are met. -+ -If an application uses an internal image in the `openshift` namespace that is not supported by {product-title} {product-version}, you can manually update the xref:../migrating_from_ocp_3_to_4/troubleshooting-3-4.adoc#migration-updating-deprecated-internal-images_troubleshooting-3-4[{product-title} 3 image stream tag] with `podman`. -* [ ] The target cluster and the replication repository have sufficient storage space. -* [ ] The xref:../authentication/understanding-identity-provider.adoc#supported-identity-providers[identity provider] is working. -* [ ] DNS records for your application exist on the target cluster. -* [ ] Certificates that your application uses exist on the target cluster. -* [ ] You have configured appropriate firewall rules on the target cluster. -* [ ] You have correctly configured load balancing on the target cluster. -* [ ] If you migrate objects to an existing namespace on the target cluster that has the same name as the namespace being migrated from the source, the target namespace contains no objects of the same name and type as the objects being migrated. -+ -[NOTE] -==== -Do not create namespaces for your application on the target cluster before migration because this might cause quotas to change. -==== - -[id="performance_{context}"] -== Performance - -* [ ] The migration network has a minimum throughput of 10 Gbps. -* [ ] The clusters have sufficient resources for migration. -+ -[NOTE] -==== -Clusters require additional memory, CPUs, and storage in order to run a migration on top of normal workloads. Actual resource requirements depend on the number of Kubernetes resources being migrated in a single migration plan. You must test migrations in a non-production environment in order to estimate the resource requirements. -==== -* [ ] The xref:../support/troubleshooting/verifying-node-health.adoc#reviewing-node-status-use-and-configuration_verifying-node-health[memory and CPU usage] of the nodes are healthy. -* [ ] The link:https://access.redhat.com/solutions/4885641[etcd disk performance] of the clusters has been checked with `fio`. diff --git a/migrating_from_ocp_3_to_4/snippets b/migrating_from_ocp_3_to_4/snippets deleted file mode 120000 index 9f5bc7e4dde0..000000000000 --- a/migrating_from_ocp_3_to_4/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets \ No newline at end of file diff --git a/migrating_from_ocp_3_to_4/troubleshooting-3-4.adoc b/migrating_from_ocp_3_to_4/troubleshooting-3-4.adoc deleted file mode 100644 index 951cf515fd72..000000000000 --- a/migrating_from_ocp_3_to_4/troubleshooting-3-4.adoc +++ /dev/null @@ -1,67 +0,0 @@ -:_content-type: ASSEMBLY -[id="troubleshooting-3-4"] -= Troubleshooting -include::_attributes/common-attributes.adoc[] -:context: troubleshooting-3-4 -:troubleshooting-3-4: -:namespace: openshift-migration -:local-product: {mtc-short} -:must-gather: registry.redhat.io/rhmtc/openshift-migration-must-gather-rhel8:v{mtc-version} - -toc::[] - -This section describes resources for troubleshooting the {mtc-full} ({mtc-short}). - -For known issues, see the xref:../migration_toolkit_for_containers/mtc-release-notes.adoc#mtc-release-notes[{mtc-short} release notes]. - -include::modules/migration-mtc-workflow.adoc[leveloffset=+1] - -[discrete] -include::modules/migration-about-mtc-custom-resources.adoc[leveloffset=+2] - -include::modules/migration-mtc-cr-manifests.adoc[leveloffset=+1] - -[id="logs-and-debugging-tools_{context}"] -== Logs and debugging tools - -This section describes logs and debugging tools that you can use for troubleshooting. - -include::modules/migration-viewing-migration-plan-resources.adoc[leveloffset=+2] -include::modules/migration-viewing-migration-plan-log.adoc[leveloffset=+2] -include::modules/migration-using-mig-log-reader.adoc[leveloffset=+2] -include::modules/migration-accessing-performance-metrics.adoc[leveloffset=+2] -include::modules/migration-provided-metrics.adoc[leveloffset=+3] -include::modules/migration-using-must-gather.adoc[leveloffset=+2] -include::modules/migration-debugging-velero-resources.adoc[leveloffset=+2] -include::modules/migration-partial-failure-velero.adoc[leveloffset=+2] -include::modules/migration-using-mtc-crs-for-troubleshooting.adoc[leveloffset=+2] - -[id="common-issues-and-concerns_{context}"] -== Common issues and concerns - -This section describes common issues and concerns that can cause issues during migration. - -include::modules/migration-updating-deprecated-internal-images.adoc[leveloffset=+2] -include::modules/migration-dvm-error-node-selectors.adoc[leveloffset=+2] -include::modules/migration-error-messages.adoc[leveloffset=+2] -include::modules/migration-known-issues.adoc[leveloffset=+2] - -[id="rolling-back-migration_{context}"] -== Rolling back a migration - -You can roll back a migration by using the {mtc-short} web console or the CLI. - -You can also xref:../migrating_from_ocp_3_to_4/troubleshooting-3-4.adoc#migration-rolling-back-migration-manually_troubleshooting-3-4[roll back a migration manually]. - -include::modules/migration-rolling-back-migration-web-console.adoc[leveloffset=+2] -include::modules/migration-rolling-back-migration-cli.adoc[leveloffset=+2] -include::modules/migration-rolling-back-migration-manually.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="additional-resources-uninstalling_{context}"] -[discrete] -=== Additional resources - -* xref:../operators/admin/olm-deleting-operators-from-cluster.adoc#olm-deleting-operators-from-a-cluster-using-web-console_olm-deleting-operators-from-cluster[Deleting Operators from a cluster using the web console] - -:troubleshooting-3-4!: diff --git a/migrating_from_ocp_3_to_4/upgrading-3-4.adoc b/migrating_from_ocp_3_to_4/upgrading-3-4.adoc deleted file mode 100644 index 2b5567c7de14..000000000000 --- a/migrating_from_ocp_3_to_4/upgrading-3-4.adoc +++ /dev/null @@ -1,22 +0,0 @@ -:_content-type: ASSEMBLY -[id="upgrading-3-4"] -= Upgrading the Migration Toolkit for Containers -include::_attributes/common-attributes.adoc[] -:context: upgrading-3-4 -:upgrading-3-4: - -toc::[] - -You can upgrade the {mtc-full} ({mtc-short}) on {product-title} {product-version} by using Operator Lifecycle Manager. - -You can upgrade {mtc-short} on {product-title} 3 by reinstalling the legacy {mtc-full} Operator. - -[IMPORTANT] -==== -If you are upgrading from {mtc-short} version 1.3, you must perform an additional procedure to update the `MigPlan` custom resource (CR). -==== - -include::modules/migration-upgrading-mtc-on-ocp-4.adoc[leveloffset=+1] -include::modules/migration-upgrading-mtc-with-legacy-operator.adoc[leveloffset=+1] -include::modules/migration-upgrading-from-mtc-1-3.adoc[leveloffset=+1] -:upgrading-3-4!: diff --git a/migration_toolkit_for_containers/_attributes b/migration_toolkit_for_containers/_attributes deleted file mode 120000 index f27fd275ea6b..000000000000 --- a/migration_toolkit_for_containers/_attributes +++ /dev/null @@ -1 +0,0 @@ -../_attributes/ \ No newline at end of file diff --git a/migration_toolkit_for_containers/about-mtc.adoc b/migration_toolkit_for_containers/about-mtc.adoc deleted file mode 100644 index ada85b70b63e..000000000000 --- a/migration_toolkit_for_containers/about-mtc.adoc +++ /dev/null @@ -1,30 +0,0 @@ -:_content-type: ASSEMBLY -[id="about-mtc"] -= About the Migration Toolkit for Containers -include::_attributes/common-attributes.adoc[] -:context: about-mtc - -toc::[] - -The {mtc-full} ({mtc-short}) enables you to migrate stateful application workloads between {product-title} 4 clusters at the granularity of a namespace. - -[NOTE] -==== -If you are migrating from {product-title} 3, see xref:../migrating_from_ocp_3_to_4/about-migrating-from-3-to-4.adoc#about-migrating-from-3-to-4[About migrating from {product-title} 3 to 4] and xref:../migrating_from_ocp_3_to_4/installing-3-4.adoc#migration-installing-legacy-operator_installing-3-4[Installing the legacy {mtc-full} Operator on {product-title} 3]. -==== - -You can migrate applications within the same cluster or between clusters by using state migration. - -{mtc-short} provides a web console and an API, based on Kubernetes custom resources, to help you control the migration and minimize application downtime. - -The {mtc-short} console is installed on the target cluster by default. You can configure the {mtc-full} Operator to install the console on a link:https://access.redhat.com/articles/5064151[remote cluster]. - -See xref:../migration_toolkit_for_containers/advanced-migration-options-mtc.adoc#advanced-migration-options-mtc[Advanced migration options] for information about the following topics: - -* Automating your migration with migration hooks and the {mtc-short} API. -* Configuring your migration plan to exclude resources, support large-scale migrations, and enable automatic PV resizing for direct volume migration. - -include::modules/migration-terminology.adoc[leveloffset=+1] -include::modules/migration-mtc-workflow.adoc[leveloffset=+1] -include::modules/migration-understanding-data-copy-methods.adoc[leveloffset=+1] -include::modules/migration-direct-volume-migration-and-direct-image-migration.adoc[leveloffset=+1] diff --git a/migration_toolkit_for_containers/advanced-migration-options-mtc.adoc b/migration_toolkit_for_containers/advanced-migration-options-mtc.adoc deleted file mode 100644 index 65b150a366ba..000000000000 --- a/migration_toolkit_for_containers/advanced-migration-options-mtc.adoc +++ /dev/null @@ -1,69 +0,0 @@ -:_content-type: ASSEMBLY -[id="advanced-migration-options-mtc"] -= Advanced migration options -include::_attributes/common-attributes.adoc[] -:context: advanced-migration-options-mtc - -toc::[] - -You can automate your migrations and modify the `MigPlan` and `MigrationController` custom resources in order to perform large-scale migrations and to improve performance. - -include::modules/migration-terminology.adoc[leveloffset=+1] - -[id="migrating-applications-cli_{context}"] -== Migrating applications by using the command line - -You can migrate applications with the {mtc-short} API by using the command line interface (CLI) in order to automate the migration. - -include::modules/migration-prerequisites.adoc[leveloffset=+2] -include::modules/migration-creating-registry-route-for-dim.adoc[leveloffset=+2] -include::modules/migration-about-configuring-proxies.adoc[leveloffset=+2] -include::modules/migration-configuring-proxies.adoc[leveloffset=+3] -include::modules/migration-migrating-applications-api.adoc[leveloffset=+2] -include::modules/migration-state-migration-cli.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="additional-resources-for-state-migration_{context}"] -[discrete] -=== Additional resources - -* See xref:../migration_toolkit_for_containers/advanced-migration-options-mtc.adoc#migration-excluding-pvcs_advanced-migration-options-mtc[Excluding PVCs from migration] to select PVCs for state migration. -* See xref:../migration_toolkit_for_containers/advanced-migration-options-mtc.adoc#migration-mapping-pvcs_advanced-migration-options-mtc[Mapping PVCs] to migrate source PV data to provisioned PVCs on the destination cluster. -* See xref:../migration_toolkit_for_containers/advanced-migration-options-mtc.adoc#migration-kubernetes-objects_advanced-migration-options-mtc[Migrating Kubernetes objects] to migrate the Kubernetes objects that constitute an application's state. - -include::modules/migration-hooks.adoc[leveloffset=+1] -include::modules/migration-writing-ansible-playbook-hook.adoc[leveloffset=+2] - -[id="migration-plan-options_{context}"] -== Migration plan options - -You can exclude, edit, and map components in the `MigPlan` custom resource (CR). - -include::modules/migration-excluding-resources.adoc[leveloffset=+2] -include::modules/migration-mapping-destination-namespaces-in-the-migplan-cr.adoc[leveloffset=+2] -include::modules/migration-excluding-pvcs.adoc[leveloffset=+2] -include::modules/migration-mapping-pvcs.adoc[leveloffset=+2] -include::modules/migration-editing-pvs-in-migplan.adoc[leveloffset=+2] -include::modules/migration-converting-storage-classes.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="additional-resources-for-editing-pv-attributes_{context}"] -[discrete] -==== Additional resources - -* For details about the `move` and `copy` actions, see xref:../migration_toolkit_for_containers/about-mtc.adoc#migration-mtc-workflow_about-mtc[MTC workflow]. -* For details about the `skip` action, see xref:../migration_toolkit_for_containers/advanced-migration-options-mtc.adoc#migration-excluding-pvcs_advanced-migration-options-mtc[Excluding PVCs from migration]. -* For details about the file system and snapshot copy methods, see xref:../migration_toolkit_for_containers/about-mtc.adoc#migration-understanding-data-copy-methods_about-mtc[About data copy methods]. - -include::modules/migration-kubernetes-objects.adoc[leveloffset=+2] - -[id="migration-controller-options_{context}"] -== Migration controller options - -You can edit migration plan limits, enable persistent volume resizing, or enable cached Kubernetes clients in the `MigrationController` custom resource (CR) for large migrations and improved performance. - -include::modules/migration-changing-migration-plan-limits.adoc[leveloffset=+2] -include::modules/migration-enabling-pv-resizing-dvm.adoc[leveloffset=+2] -include::modules/migration-enabling-cached-kubernetes-clients.adoc[leveloffset=+2] - -:advanced-migration-options-mtc!: diff --git a/migration_toolkit_for_containers/images b/migration_toolkit_for_containers/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/migration_toolkit_for_containers/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/migration_toolkit_for_containers/index.adoc b/migration_toolkit_for_containers/index.adoc deleted file mode 100644 index 3b1383756e43..000000000000 --- a/migration_toolkit_for_containers/index.adoc +++ /dev/null @@ -1,68 +0,0 @@ -[id="migration-toolkit-for-containers-overview"] -= Migration toolkit for containers overview -include::_attributes/common-attributes.adoc[] -:context: migration-toolkit-for-containers-overview - -toc::[] - -You can migrate stateful application workloads between {product-title} 4 clusters at the granularity of a namespace by using the Migration Toolkit for Containers (MTC). To learn more about MTC see xref:../migration_toolkit_for_containers/about-mtc.adoc#about-mtc[understanding MTC]. - -[NOTE] -==== -If you are migrating from {product-title} 3, see xref:../migrating_from_ocp_3_to_4/about-migrating-from-3-to-4.adoc#about-migrating-from-3-to-4[about migrating from {product-title} 3 to 4] and xref:../migrating_from_ocp_3_to_4/installing-3-4.adoc#migration-installing-legacy-operator_installing-3-4[installing the legacy {mtc-full} Operator on {product-title} 3]. -==== - -[id="mtc-overview-install-mtc"] -== Installing MTC -You must install the Migration Toolkit for Containers Operator that is compatible for your {product-title} version: - -* {product-title} 4.6 and later versions: xref:../migration_toolkit_for_containers/installing-mtc.adoc#installing-mtc[Install the Migration Toolkit for Containers Operator by using Operator Lifecycle Manager (OLM)]. -* {product-title} 4.5 and earlier versions: xref:../migration_toolkit_for_containers/installing-mtc.adoc#configuring-replication-repository_installing-mtc[Manually install the legacy Migration Toolkit for Containers Operator]. - -Then you xref:../migration_toolkit_for_containers/installing-mtc.adoc#configuring-replication-repository_installing-mtc[configure object storage to use as a replication repository]. - -[id="mtc-overview-upgrade-mtc"] -== Upgrading MTC -You can xref:../migration_toolkit_for_containers/upgrading-mtc.adoc#upgrading-mtc[upgrade the MTC] by using OLM. - -[id="mtc-overview-mtc-checklists"] -== Reviewing MTC checklists -Before you migrate your application workloads with the Migration Toolkit for Containers (MTC), review the xref:../migration_toolkit_for_containers/premigration-checklists-mtc.adoc#premigration-checklists-mtc[premigration checklists]. - -[id="mtc-overview-migrate-mtc-applications"] -== Migrating applications -You can migrate your applications by using the MTC xref:../migration_toolkit_for_containers/migrating-applications-with-mtc.adoc#migrating-applications-mtc-web-console_migrating-applications-with-mtc[web console] or xref:../migration_toolkit_for_containers/advanced-migration-options-mtc.adoc#migrating-applications-cli_advanced-migration-options-mtc[the command line]. - -[id="mtc-overview-advanced-migration-options"] -== Advanced migration options -You can automate your migrations and modify the `MigPlan` and `MigrationController` custom resources in order to perform large-scale migrations and to improve performance. You can check the following items: - -* xref:../migration_toolkit_for_containers/advanced-migration-options-mtc.adoc#migration-creating-registry-route-for-dim_advanced-migration-options-mtc[Create a registry route for direct image migration] -* xref:../migration_toolkit_for_containers/advanced-migration-options-mtc.adoc#migration-configuring-proxies_advanced-migration-options-mtc[Configuring proxies] -* xref:../migration_toolkit_for_containers/advanced-migration-options-mtc.adoc#migration-migrating-applications-api_advanced-migration-options-mtc[Migrating an application by using the MTC API] -* xref:../migration_toolkit_for_containers/advanced-migration-options-mtc.adoc#migration-state-migration-cli_advanced-migration-options-mtc[Running a state migration] -* xref:../migration_toolkit_for_containers/advanced-migration-options-mtc.adoc#migration-hooks_advanced-migration-options-mtc[Creating migration hooks] -* xref:../migration_toolkit_for_containers/advanced-migration-options-mtc.adoc#migration-plan-options_advanced-migration-options-mtc[Editing, excluding, and mapping migrated resources] -* xref:../migration_toolkit_for_containers/advanced-migration-options-mtc.adoc#migration-controller-options_advanced-migration-options-mtc[Configuring the migration controller for large migrations] - -[id="mtc-overview-troubleshooting-mtc"] -== Troubleshooting migrations -You can perform the following troubleshooting tasks: - -* xref:../migration_toolkit_for_containers/troubleshooting-mtc.adoc#migration-viewing-migration-plan-resources_troubleshooting-mtc[Viewing plan resources] -* xref:../migration_toolkit_for_containers/troubleshooting-mtc.adoc#migration-viewing-migration-plan-log_troubleshooting-mtc[Viewing the migration plan aggregated log file] -* xref:../migration_toolkit_for_containers/troubleshooting-mtc.adoc#migration-using-mig-log-reader_troubleshooting-mtc[Using the migration log reader] -* xref:../migration_toolkit_for_containers/troubleshooting-mtc.adoc#migration-accessing-performance-metrics_troubleshooting-mtc[Accessing performance metrics] -* xref:../migration_toolkit_for_containers/troubleshooting-mtc.adoc#migration-using-must-gather_troubleshooting-mtc[Using the `must-gather` tool] -* xref:../migration_toolkit_for_containers/troubleshooting-mtc.adoc#migration-debugging-velero-resources_troubleshooting-mtc[Using the Velero CLI to debug Backup and Restore CRs] -* xref:../migration_toolkit_for_containers/troubleshooting-mtc.adoc#migration-partial-failure-velero_troubleshooting-mtc[Debugging a partial migration failure] -* xref:../migration_toolkit_for_containers/troubleshooting-mtc.adoc#migration-using-mtc-crs-for-troubleshooting_troubleshooting-mtc[Using MTC custom resources for troubleshooting] -* xref:../migration_toolkit_for_containers/troubleshooting-mtc.adoc#common-issues-and-concerns_troubleshooting-mtc[Checking common issues and concerns] - -[id="mtc-overview-roll-back-mtc"] -== Rolling back a migration -You can xref:../migration_toolkit_for_containers/troubleshooting-mtc.adoc#rolling-back-migration_troubleshooting-mtc[roll back a migration] by using the MTC web console, the CLI or manually. - -[id="mtc-overview-uninstall-mtc"] -== Uninstalling MTC and deleting resources -You can xref:../migration_toolkit_for_containers/installing-mtc.adoc#migration-uninstalling-mtc-clean-up_installing-mtc[uninstall the MTC and delete its resources] to clean up the cluster. diff --git a/migration_toolkit_for_containers/installing-mtc-restricted.adoc b/migration_toolkit_for_containers/installing-mtc-restricted.adoc deleted file mode 100644 index 219633728254..000000000000 --- a/migration_toolkit_for_containers/installing-mtc-restricted.adoc +++ /dev/null @@ -1,95 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-mtc-restricted"] -= Installing the Migration Toolkit for Containers in a restricted network environment -include::_attributes/common-attributes.adoc[] -:context: installing-mtc-restricted -:installing-mtc-restricted: - -toc::[] - -You can install the {mtc-full} ({mtc-short}) on {product-title} 4 in a restricted network environment by performing the following procedures: - -. Create a xref:../operators/admin/olm-restricted-networks.adoc#olm-mirror-catalog_olm-restricted-networks[mirrored Operator catalog]. -+ -This process creates a `mapping.txt` file, which contains the mapping between the `registry.redhat.io` image and your mirror registry image. The `mapping.txt` file is required for installing the _legacy_ {mtc-full} Operator on an {product-title} 4.2 to 4.5 source cluster. -. Install the {mtc-full} Operator on the {product-title} {product-version} target cluster by using Operator Lifecycle Manager. -+ -By default, the {mtc-short} web console and the `Migration Controller` pod run on the target cluster. You can configure the `Migration Controller` custom resource manifest to run the {mtc-short} web console and the `Migration Controller` pod on a link:https://access.redhat.com/articles/5064151[remote cluster]. - -. Install the {mtc-full} Operator on the source cluster: - -* {product-title} 4.6 or later: Install the {mtc-full} Operator by using Operator Lifecycle Manager. -* {product-title} 4.2 to 4.5: Install the legacy {mtc-full} Operator from the command line interface. - -. Configure object storage to use as a replication repository. - -[NOTE] -==== -To install {mtc-short} on {product-title} 3, see xref:../migrating_from_ocp_3_to_4/installing-restricted-3-4.adoc#migration-installing-legacy-operator_installing-restricted-3-4[Installing the legacy {mtc-full} Operator on {product-title} 3]. -==== -To uninstall {mtc-short}, see xref:../migration_toolkit_for_containers/installing-mtc-restricted.adoc#migration-uninstalling-mtc-clean-up_installing-mtc-restricted[Uninstalling {mtc-short} and deleting resources]. - -include::modules/migration-compatibility-guidelines.adoc[leveloffset=+1] -include::modules/migration-installing-mtc-on-ocp-4.adoc[leveloffset=+1] -include::modules/migration-installing-legacy-operator.adoc[leveloffset=+1] -include::modules/migration-about-configuring-proxies.adoc[leveloffset=+1] -include::modules/migration-configuring-proxies.adoc[leveloffset=+2] - -For more information, see xref:../networking/enable-cluster-wide-proxy.adoc#nw-proxy-configure-object_config-cluster-wide-proxy[Configuring the cluster-wide proxy]. - -[id="migration-rsync-root-non-root_{context}"] -== Running Rsync as either root or non-root - -[IMPORTANT] -==== -This section applies only when you are working with the OpenShift API, not the web console. -==== - -OpenShift environments have the `PodSecurityAdmission` controller enabled by default. This controller requires cluster administrators to enforce Pod Security Standards by means of namespace labels. All workloads in the cluster are expected to run one of the following Pod Security Standard levels: `Privileged`, `Baseline` or `Restricted`. Every cluster has its own default policy set. - -To guarantee successful data transfer in all environments, {mtc-full} ({mtc-short}) 1.7.5 introduced changes in Rsync pods, including running Rsync pods as non-root user by default. This ensures that data transfer is possible even for workloads that do not necessarily require higher privileges. This change was made because it is best to run workloads with the lowest level of privileges possible. - -[discrete] -[id="migration-rsync-override-data-transfer_{context}"] -=== Manually overriding default non-root operation for data transfer - -Although running Rsync pods as non-root user works in most cases, data transfer might fail when you run workloads as root user on the source side. {mtc-short} provides two ways to manually override default non-root operation for data transfer: - -* Configure all migrations to run an Rsync pod as root on the destination cluster for all migrations. -* Run an Rsync pod as root on the destination cluster per migration. - -In both cases, you must set the following labels on the source side of any namespaces that are running workloads with higher privileges prior to migration: `enforce`, `audit`, and `warn.` - -To learn more about Pod Security Admission and setting values for labels, see xref:../authentication/understanding-and-managing-pod-security-admission.adoc#security-context-constraints-psa-opting_understanding-and-managing-pod-security-admission[Controlling pod security admission synchronization]. - -include::modules/migration-rsync-migration-controller-root-non-root.adoc[leveloffset=+1] - -include::modules/migration-rsync-mig-migration-root-non-root.adoc[leveloffset=+1] - -[id="configuring-replication-repository_{context}"] -== Configuring a replication repository - -The Multicloud Object Gateway is the only supported option for a restricted network environment. - -{mtc-short} supports the xref:../migration_toolkit_for_containers/about-mtc.adoc#migration-understanding-data-copy-methods_about-mtc[file system and snapshot data copy methods] for migrating data from the source cluster to the target cluster. You can select a method that is suited for your environment and is supported by your storage provider. - -[id="replication-repository-prerequisites_{context}"] -=== Prerequisites - -* All clusters must have uninterrupted network access to the replication repository. -* If you use a proxy server with an internally hosted replication repository, you must ensure that the proxy allows access to the replication repository. - -include::modules/migration-configuring-mcg.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="{context}_configuring-replication-repository-additional-resources"] -=== Additional resources - -* link:https://access.redhat.com/documentation/en-us/red_hat_openshift_data_foundation/4.9/html/planning_your_deployment/disconnected-environment_rhodf[Disconnected environment] in the {rh-storage-first} documentation. -* xref:../migration_toolkit_for_containers/about-mtc.adoc#migration-mtc-workflow_about-mtc[{mtc-short} workflow] -* xref:../migration_toolkit_for_containers/about-mtc.adoc#migration-understanding-data-copy-methods_about-mtc[About data copy methods] -* xref:../migration_toolkit_for_containers/migrating-applications-with-mtc.adoc#migration-adding-replication-repository-to-cam_migrating-applications-with-mtc[Adding a replication repository to the {mtc-short} web console] - -include::modules/migration-uninstalling-mtc-clean-up.adoc[leveloffset=+1] - -:installing-mtc-restricted!: diff --git a/migration_toolkit_for_containers/installing-mtc.adoc b/migration_toolkit_for_containers/installing-mtc.adoc deleted file mode 100644 index b9d233b2a2b2..000000000000 --- a/migration_toolkit_for_containers/installing-mtc.adoc +++ /dev/null @@ -1,94 +0,0 @@ -:_content-type: ASSEMBLY -[id="installing-mtc"] -= Installing the Migration Toolkit for Containers -include::_attributes/common-attributes.adoc[] -:context: installing-mtc -:installing-mtc: - -toc::[] - -You can install the {mtc-full} ({mtc-short}) on {product-title} 4. - -[NOTE] -==== -To install {mtc-short} on {product-title} 3, see xref:../migrating_from_ocp_3_to_4/installing-3-4.adoc#migration-installing-legacy-operator_installing-3-4[Installing the legacy {mtc-full} Operator on {product-title} 3]. -==== - -By default, the {mtc-short} web console and the `Migration Controller` pod run on the target cluster. You can configure the `Migration Controller` custom resource manifest to run the {mtc-short} web console and the `Migration Controller` pod on a link:https://access.redhat.com/articles/5064151[remote cluster]. - -After you have installed {mtc-short}, you must configure an object storage to use as a replication repository. - -To uninstall {mtc-short}, see xref:../migration_toolkit_for_containers/installing-mtc.adoc#migration-uninstalling-mtc-clean-up_installing-mtc[Uninstalling {mtc-short} and deleting resources]. - -include::modules/migration-compatibility-guidelines.adoc[leveloffset=+1] -include::modules/migration-installing-legacy-operator.adoc[leveloffset=+1] -include::modules/migration-installing-mtc-on-ocp-4.adoc[leveloffset=+1] -include::modules/migration-about-configuring-proxies.adoc[leveloffset=+1] -include::modules/migration-configuring-proxies.adoc[leveloffset=+2] - -For more information, see xref:../networking/enable-cluster-wide-proxy.adoc#nw-proxy-configure-object_config-cluster-wide-proxy[Configuring the cluster-wide proxy]. - -[id="migration-rsync-root-non-root_{context}"] -=== Running Rsync as either root or non-root - -[IMPORTANT] -==== -This section applies only when you are working with the OpenShift API, not the web console. -==== - -OpenShift environments have the `PodSecurityAdmission` controller enabled by default. This controller requires cluster administrators to enforce Pod Security Standards by means of namespace labels. All workloads in the cluster are expected to run one of the following Pod Security Standard levels: `Privileged`, `Baseline` or `Restricted`. Every cluster has its own default policy set. - -To guarantee successful data transfer in all environments, {mtc-full} ({mtc-short}) 1.7.5 introduced changes in Rsync pods, including running Rsync pods as non-root user by default. This ensures that data transfer is possible even for workloads that do not necessarily require higher privileges. This change was made because it is best to run workloads with the lowest level of privileges possible. - -==== Manually overriding default non-root operation for data trannsfer - -Although running Rsync pods as non-root user works in most cases, data transfer might fail when you run workloads as root user on the source side. {mtc-short} provides two ways to manually override default non-root operation for data transfer: - -* Configure all migrations to run an Rsync pod as root on the destination cluster for all migrations. -* Run an Rsync pod as root on the destination cluster per migration. - -In both cases, you must set the following labels on the source side of any namespaces that are running workloads with higher privileges prior to migration: `enforce`, `audit`, and `warn.` - -To learn more about Pod Security Admission and setting values for labels, see xref:../authentication/understanding-and-managing-pod-security-admission.adoc#security-context-constraints-psa-opting_understanding-and-managing-pod-security-admission[Controlling pod security admission synchronization]. - -include::modules/migration-rsync-migration-controller-root-non-root.adoc[leveloffset=+2] - -include::modules/migration-rsync-mig-migration-root-non-root.adoc[leveloffset=+2] - -[id="configuring-replication-repository_{context}"] -== Configuring a replication repository - -You must configure an object storage to use as a replication repository. The {mtc-full} ({mtc-short}) copies data from the source cluster to the replication repository, and then from the replication repository to the target cluster. - -{mtc-short} supports the xref:../migration_toolkit_for_containers/about-mtc.adoc#migration-understanding-data-copy-methods_about-mtc[file system and snapshot data copy methods] for migrating data from the source cluster to the target cluster. Select a method that is suited for your environment and is supported by your storage provider. - -{mtc-short} supports the following storage providers: - -* xref:../migration_toolkit_for_containers/installing-mtc.adoc#migration-configuring-mcg_installing-mtc[Multicloud Object Gateway] -* xref:../migration_toolkit_for_containers/installing-mtc.adoc#migration-configuring-aws-s3_installing-mtc[Amazon Web Services S3] -* xref:../migration_toolkit_for_containers/installing-mtc.adoc#migration-configuring-gcp_installing-mtc[Google Cloud Platform] -* xref:../migration_toolkit_for_containers/installing-mtc.adoc#migration-configuring-azure_installing-mtc[Microsoft Azure Blob] -* Generic S3 object storage, for example, Minio or Ceph S3 - -[id="replication-repository-prerequisites_{context}"] -=== Prerequisites - -* All clusters must have uninterrupted network access to the replication repository. -* If you use a proxy server with an internally hosted replication repository, you must ensure that the proxy allows access to the replication repository. - -include::modules/migration-configuring-mcg.adoc[leveloffset=+2] -include::modules/migration-configuring-aws-s3.adoc[leveloffset=+2] -include::modules/migration-configuring-gcp.adoc[leveloffset=+2] -include::modules/migration-configuring-azure.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="{context}_configuring-replication-repository-additional-resources"] -=== Additional resources - -* xref:../migration_toolkit_for_containers/about-mtc.adoc#migration-mtc-workflow_about-mtc[{mtc-short} workflow] -* xref:../migration_toolkit_for_containers/about-mtc.adoc#migration-understanding-data-copy-methods_about-mtc[About data copy methods] -* xref:../migration_toolkit_for_containers/migrating-applications-with-mtc.adoc#migration-adding-replication-repository-to-cam_migrating-applications-with-mtc[Adding a replication repository to the {mtc-short} web console] - -include::modules/migration-uninstalling-mtc-clean-up.adoc[leveloffset=+1] - -:installing-mtc!: diff --git a/migration_toolkit_for_containers/migrating-applications-with-mtc.adoc b/migration_toolkit_for_containers/migrating-applications-with-mtc.adoc deleted file mode 100644 index 27bb37cfbe5d..000000000000 --- a/migration_toolkit_for_containers/migrating-applications-with-mtc.adoc +++ /dev/null @@ -1,51 +0,0 @@ -:_content-type: ASSEMBLY -[id="migrating-applications-with-mtc"] -= Migrating your applications -include::_attributes/common-attributes.adoc[] -:context: migrating-applications-with-mtc - -toc::[] - -You can migrate your applications by using the {mtc-full} ({mtc-short}) web console or the xref:../migration_toolkit_for_containers/advanced-migration-options-mtc.adoc#migrating-applications-cli_advanced-migration-options-mtc[command line]. - -Most cluster-scoped resources are not yet handled by {mtc-short}. If your applications require cluster-scoped resources, you might have to create them manually on the target cluster. - -You can use stage migration and cutover migration to migrate an application between clusters: - -* Stage migration copies data from the source cluster to the target cluster without stopping the application. You can run a stage migration multiple times to reduce the duration of the cutover migration. -* Cutover migration stops the transactions on the source cluster and moves the resources to the target cluster. - -You can use state migration to migrate an application's state: - -* State migration copies selected persistent volume claims (PVCs). -* You can use state migration to migrate a namespace within the same cluster. - -During migration, the {mtc-full} ({mtc-short}) preserves the following namespace annotations: - -* `openshift.io/sa.scc.mcs` -* `openshift.io/sa.scc.supplemental-groups` -* `openshift.io/sa.scc.uid-range` -+ -These annotations preserve the UID range, ensuring that the containers retain their file system permissions on the target cluster. There is a risk that the migrated UIDs could duplicate UIDs within an existing or future namespace on the target cluster. - -include::modules/migration-prerequisites.adoc[leveloffset=+1] - -[id="migrating-applications-mtc-web-console_{context}"] -== Migrating your applications by using the {mtc-short} web console - -You can configure clusters and a replication repository by using the {mtc-short} web console. Then, you can create and run a migration plan. - -include::modules/migration-launching-cam.adoc[leveloffset=+2] -include::modules/migration-adding-cluster-to-cam.adoc[leveloffset=+2] -include::modules/migration-adding-replication-repository-to-cam.adoc[leveloffset=+2] -include::modules/migration-creating-migration-plan-cam.adoc[leveloffset=+2] - -[discrete] -[id="additional-resources-for-persistent-volume-copy-methods_{context}"] -[role="_additional-resources"] -=== Additional resources for persistent volume copy methods - -* xref:../migration_toolkit_for_containers/about-mtc.adoc#file-system-copy-method_about-mtc[{mtc-short} file system copy method] -* xref:../migration_toolkit_for_containers/about-mtc.adoc#snapshot-copy-method_about-mtc[{mtc-short} snapshot copy method] - -include::modules/migration-running-migration-plan-cam.adoc[leveloffset=+2] diff --git a/migration_toolkit_for_containers/modules b/migration_toolkit_for_containers/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/migration_toolkit_for_containers/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/migration_toolkit_for_containers/mtc-release-notes.adoc b/migration_toolkit_for_containers/mtc-release-notes.adoc deleted file mode 100644 index 27843cd26105..000000000000 --- a/migration_toolkit_for_containers/mtc-release-notes.adoc +++ /dev/null @@ -1,22 +0,0 @@ -:_content-type: ASSEMBLY -[id="mtc-release-notes"] -= Migration Toolkit for Containers release notes -include::_attributes/common-attributes.adoc[] -:context: mtc-release-notes - -toc::[] - -The release notes for {mtc-full} ({mtc-short}) describe new features and enhancements, deprecated features, and known issues. - -The {mtc-short} enables you to migrate application workloads between {product-title} clusters at the granularity of a namespace. - -You can migrate from xref:../migrating_from_ocp_3_to_4/about-migrating-from-3-to-4.adoc[{product-title} 3 to {product-version}] and between {product-title} 4 clusters. - -{mtc-short} provides a web console and an API, based on Kubernetes custom resources, to help you control the migration and minimize application downtime. - -For information on the support policy for {mtc-short}, see link:https://access.redhat.com/support/policy/updates/openshift#app_migration[OpenShift Application and Cluster Migration Solutions], part of the _Red Hat {product-title} Life Cycle Policy_. - -include::modules/migration-mtc-release-notes-1-7-10.adoc[leveloffset=+1] -include::modules/migration-mtc-release-notes-1-7.adoc[leveloffset=+1] -include::modules/migration-mtc-release-notes-1-6.adoc[leveloffset=+1] -include::modules/migration-mtc-release-notes-1-5.adoc[leveloffset=+1] diff --git a/migration_toolkit_for_containers/network-considerations-mtc.adoc b/migration_toolkit_for_containers/network-considerations-mtc.adoc deleted file mode 100644 index 14dc958da62b..000000000000 --- a/migration_toolkit_for_containers/network-considerations-mtc.adoc +++ /dev/null @@ -1,26 +0,0 @@ -:_content-type: ASSEMBLY -[id="network-considerations-mtc"] -= Network considerations -include::_attributes/common-attributes.adoc[] -:context: network-considerations-mtc - -toc::[] - -Review the strategies for redirecting your application network traffic after migration. - -[id="dns-considerations_{context}"] -== DNS considerations - -The DNS domain of the target cluster is different from the domain of the source cluster. By default, applications get FQDNs of the target cluster after migration. - -To preserve the source DNS domain of migrated applications, select one of the two options described below. - -include::modules/migration-isolating-dns-domain-of-target-cluster-from-clients.adoc[leveloffset=+2] -include::modules/migration-setting-up-target-cluster-to-accept-source-dns-domain.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* See xref:../security/certificates/replacing-default-ingress-certificate.adoc#replacing-default-ingress[Replacing the default ingress certificate] for more information. - -include::modules/migration-network-traffic-redirection-strategies.adoc[leveloffset=+1] diff --git a/migration_toolkit_for_containers/premigration-checklists-mtc.adoc b/migration_toolkit_for_containers/premigration-checklists-mtc.adoc deleted file mode 100644 index f56524d9775c..000000000000 --- a/migration_toolkit_for_containers/premigration-checklists-mtc.adoc +++ /dev/null @@ -1,76 +0,0 @@ -:_content-type: ASSEMBLY -[id="premigration-checklists-mtc"] -= Premigration checklists -include::_attributes/common-attributes.adoc[] -:context: premigration-checklists-mtc - -toc::[] - -Before you migrate your application workloads with the {mtc-full} ({mtc-short}), review the following checklists. - -[id="cluster-health-checklist_{context}"] -== Cluster health checklist - -* [ ] The clusters meet the minimum hardware requirements for the specific platform and installation method, for example, on xref:../installing/installing_bare_metal/installing-bare-metal.adoc#minimum-resource-requirements_installing-bare-metal[bare metal]. -* [ ] All xref:../migration_toolkit_for_containers/migrating-applications-with-mtc.adoc#migration-prerequisites_migrating-applications-with-mtc[{mtc-short} prerequisites] are met. -* [ ] All nodes have an active {product-title} subscription. -* [ ] You have xref:../support/troubleshooting/verifying-node-health.adoc#verifying-node-health[verified node health]. -* [ ] The xref:../authentication/understanding-identity-provider.adoc#supported-identity-providers[identity provider] is working. -* [ ] The migration network has a minimum throughput of 10 Gbps. -* [ ] The clusters have sufficient resources for migration. -+ -[NOTE] -==== -Clusters require additional memory, CPUs, and storage in order to run a migration on top of normal workloads. Actual resource requirements depend on the number of Kubernetes resources being migrated in a single migration plan. You must test migrations in a non-production environment in order to estimate the resource requirements. -==== - -* [ ] The link:https://access.redhat.com/solutions/4885641[etcd disk performance] of the clusters has been checked with `fio`. - -[id="source-cluster-checklist_{context}"] -== Source cluster checklist - -* [ ] You have checked for persistent volumes (PVs) with abnormal configurations stuck in a *Terminating* state by running the following command: -+ -[source,terminal] ----- -$ oc get pv ----- - -* [ ] You have checked for pods whose status is other than *Running* or *Completed* by running the following command: -+ -[source,terminal] ----- -$ oc get pods --all-namespaces | egrep -v 'Running | Completed' ----- - -* [ ] You have checked for pods with a high restart count by running the following command: -+ -[source,terminal] ----- -$ oc get pods --all-namespaces --field-selector=status.phase=Running \ - -o json | jq '.items[]|select(any( .status.containerStatuses[]; \ - .restartCount > 3))|.metadata.name' ----- -+ -Even if the pods are in a *Running* state, a high restart count might indicate underlying problems. - -* [ ] The cluster certificates are valid for the duration of the migration process. -* [ ] You have checked for pending certificate-signing requests by running the following command: -+ -[source,terminal] ----- -$ oc get csr -A | grep pending -i ----- - -* [ ] The registry uses a xref:../scalability_and_performance/optimization/optimizing-storage.adoc#optimizing-storage[recommended storage type]. -* [ ] You can read and write images to the registry. -* [ ] The link:https://access.redhat.com/articles/3093761[etcd cluster] is healthy. -* [ ] The xref:../post_installation_configuration/node-tasks.adoc#create-a-kubeletconfig-crd-to-edit-kubelet-parameters_post-install-node-tasks[average API server response time] on the source cluster is less than 50 ms. - -[id="target-cluster-checklist_{context}"] -== Target cluster checklist - -* [ ] The cluster has the correct network configuration and permissions to access external services, for example, databases, source code repositories, container image registries, and CI/CD tools. -* [ ] External applications and services that use services provided by the cluster have the correct network configuration and permissions to access the cluster. -* [ ] Internal container image dependencies are met. -* [ ] The target cluster and the replication repository have sufficient storage space. diff --git a/migration_toolkit_for_containers/snippets b/migration_toolkit_for_containers/snippets deleted file mode 120000 index 9f5bc7e4dde0..000000000000 --- a/migration_toolkit_for_containers/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets \ No newline at end of file diff --git a/migration_toolkit_for_containers/troubleshooting-mtc.adoc b/migration_toolkit_for_containers/troubleshooting-mtc.adoc deleted file mode 100644 index 611c367a20f1..000000000000 --- a/migration_toolkit_for_containers/troubleshooting-mtc.adoc +++ /dev/null @@ -1,65 +0,0 @@ -:_content-type: ASSEMBLY -[id="troubleshooting-mtc"] -= Troubleshooting -include::_attributes/common-attributes.adoc[] -:context: troubleshooting-mtc -:troubleshooting-mtc: -:namespace: openshift-migration -:local-product: {mtc-short} -:must-gather: registry.redhat.io/rhmtc/openshift-migration-must-gather-rhel8:v{mtc-version} - -toc::[] - -This section describes resources for troubleshooting the {mtc-full} ({mtc-short}). - -For known issues, see the xref:../migration_toolkit_for_containers/mtc-release-notes.adoc#mtc-release-notes[{mtc-short} release notes]. - -include::modules/migration-mtc-workflow.adoc[leveloffset=+1] - -[discrete] -include::modules/migration-about-mtc-custom-resources.adoc[leveloffset=+2] - -include::modules/migration-mtc-cr-manifests.adoc[leveloffset=+1] - -[id="logs-and-debugging-tools_{context}"] -== Logs and debugging tools - -This section describes logs and debugging tools that you can use for troubleshooting. - -include::modules/migration-viewing-migration-plan-resources.adoc[leveloffset=+2] -include::modules/migration-viewing-migration-plan-log.adoc[leveloffset=+2] -include::modules/migration-using-mig-log-reader.adoc[leveloffset=+2] -include::modules/migration-accessing-performance-metrics.adoc[leveloffset=+2] -include::modules/migration-provided-metrics.adoc[leveloffset=+3] -include::modules/migration-using-must-gather.adoc[leveloffset=+2] -include::modules/migration-debugging-velero-resources.adoc[leveloffset=+2] -include::modules/migration-partial-failure-velero.adoc[leveloffset=+2] -include::modules/migration-using-mtc-crs-for-troubleshooting.adoc[leveloffset=+2] - -[id="common-issues-and-concerns_{context}"] -== Common issues and concerns - -This section describes common issues and concerns that can cause issues during migration. - -include::modules/migration-dvm-error-node-selectors.adoc[leveloffset=+2] -include::modules/migration-error-messages.adoc[leveloffset=+2] - -[id="rolling-back-migration_{context}"] -== Rolling back a migration - -You can roll back a migration by using the {mtc-short} web console or the CLI. - -You can also xref:../migration_toolkit_for_containers/troubleshooting-mtc.adoc#migration-rolling-back-migration-manually_troubleshooting-mtc[roll back a migration manually]. - -include::modules/migration-rolling-back-migration-web-console.adoc[leveloffset=+2] -include::modules/migration-rolling-back-migration-cli.adoc[leveloffset=+2] -include::modules/migration-rolling-back-migration-manually.adoc[leveloffset=+2] - -[role="_additional-resources"] -[id="additional-resources-uninstalling_{context}"] -[discrete] -=== Additional resources - -* xref:../operators/admin/olm-deleting-operators-from-cluster.adoc#olm-deleting-operators-from-a-cluster-using-web-console_olm-deleting-operators-from-cluster[Deleting Operators from a cluster using the web console] - -:troubleshooting-mtc!: diff --git a/migration_toolkit_for_containers/upgrading-mtc.adoc b/migration_toolkit_for_containers/upgrading-mtc.adoc deleted file mode 100644 index 3d95d8f4981b..000000000000 --- a/migration_toolkit_for_containers/upgrading-mtc.adoc +++ /dev/null @@ -1,22 +0,0 @@ -:_content-type: ASSEMBLY -[id="upgrading-mtc"] -= Upgrading the Migration Toolkit for Containers -include::_attributes/common-attributes.adoc[] -:context: upgrading-mtc -:upgrading-mtc: - -toc::[] - -You can upgrade the {mtc-full} ({mtc-short}) on {product-title} {product-version} by using Operator Lifecycle Manager. - -You can upgrade {mtc-short} on {product-title} 4.5, and earlier versions, by reinstalling the legacy {mtc-full} Operator. - -[IMPORTANT] -==== -If you are upgrading from {mtc-short} version 1.3, you must perform an additional procedure to update the `MigPlan` custom resource (CR). -==== - -include::modules/migration-upgrading-mtc-on-ocp-4.adoc[leveloffset=+1] -include::modules/migration-upgrading-mtc-with-legacy-operator.adoc[leveloffset=+1] -include::modules/migration-upgrading-from-mtc-1-3.adoc[leveloffset=+1] -:upgrading-mtc!: diff --git a/mod_docs_guide/_attributes b/mod_docs_guide/_attributes deleted file mode 120000 index 20cc1dcb77bf..000000000000 --- a/mod_docs_guide/_attributes +++ /dev/null @@ -1 +0,0 @@ -../../_attributes/ \ No newline at end of file diff --git a/mod_docs_guide/getting-started-modular-docs-ocp.adoc b/mod_docs_guide/getting-started-modular-docs-ocp.adoc deleted file mode 100644 index e68883c24bac..000000000000 --- a/mod_docs_guide/getting-started-modular-docs-ocp.adoc +++ /dev/null @@ -1,35 +0,0 @@ -// Base the file name and the ID on the assembly title. For example: -// * file name: my-assembly-a.adoc -// * ID: [id="my-assembly-a"] -// * Title: = My assembly A - -// Choose a context that is not too long and encapsulates what this assembly or -// module is about. Context MUST be unique. - -// Not sure if this guide is correct with the current guidelines anymore. Need to validate. - -[id="getting-started-modular-docs-ocp"] -= Getting started with modular docs on OpenShift -include::_attributes/common-attributes.adoc[] -:context: assembly-gsg - -toc::[] - -This is the modular docs getting started guide for the OpenShift documentation -team and anyone who might be contributing content to it. - -This guide has been written using the format of the modular docs -initiative. - -== Prerequisites - -* You have read through and familiarized yourself with the -link:https://redhat-documentation.github.io/modular-docs[Red Hat CCS modular docs guide]. -* You have reviewed -xref:../mod_docs_guide/mod-docs-conventions-ocp.adoc#mod-docs-ocp-references[the Modular Docs OpenShift Conventions]. -* [Optional] You have received the modular docs training. -* You know how to use Git. - -include::modules/creating-your-first-content.adoc[leveloffset=+1] - -// include::modules/mod-docs-ocp-conventions.adoc[leveloffset+=1] diff --git a/mod_docs_guide/images b/mod_docs_guide/images deleted file mode 120000 index 5e67573196d8..000000000000 --- a/mod_docs_guide/images +++ /dev/null @@ -1 +0,0 @@ -../images \ No newline at end of file diff --git a/mod_docs_guide/mod-docs-conventions-ocp.adoc b/mod_docs_guide/mod-docs-conventions-ocp.adoc deleted file mode 100644 index 6fb7008ef877..000000000000 --- a/mod_docs_guide/mod-docs-conventions-ocp.adoc +++ /dev/null @@ -1,19 +0,0 @@ - -// Base the file name and the ID on the assembly title. For example: -// * file name: my-assembly-a.adoc -// * ID: [id="my-assembly-a"] -// * Title: = My assembly A - -// Choose a context that is not too long and encapsulates what this assembly or -// module is about. Context MUST be unique across the docs set. - -[id="mod-docs-ocp-references"] -= Modular docs OpenShift conventions -include::_attributes/common-attributes.adoc[] -:context: assembly-ocp-conventions - -toc::[] - -Before you contribute to the OpenShift docs repo, review the following modular docs conventions. - -include::modules/mod-docs-ocp-conventions.adoc[leveloffset=+1] diff --git a/mod_docs_guide/modules b/mod_docs_guide/modules deleted file mode 120000 index 464b823aca16..000000000000 --- a/mod_docs_guide/modules +++ /dev/null @@ -1 +0,0 @@ -../modules \ No newline at end of file diff --git a/mod_docs_guide/snippets b/mod_docs_guide/snippets deleted file mode 120000 index 9f5bc7e4dde0..000000000000 --- a/mod_docs_guide/snippets +++ /dev/null @@ -1 +0,0 @@ -../snippets \ No newline at end of file diff --git a/modules/about-administrator-perspective.adoc b/modules/about-administrator-perspective.adoc deleted file mode 100644 index f2a4c30e6501..000000000000 --- a/modules/about-administrator-perspective.adoc +++ /dev/null @@ -1,26 +0,0 @@ -// Module included in the following assemblies: -// -// web_console/web-console-overview.adoc - -:_content-type: CONCEPT -[id="about-administrator-perspective_{context}"] -= About the Administrator perspective in the web console - -The *Administrator* perspective enables you to view the cluster inventory, capacity, general and specific utilization information, and the stream of important events, all of which help you to simplify planning and troubleshooting tasks. Both project administrators and cluster administrators can view the *Administrator* perspective. - -Cluster administrators can also open an embedded command line terminal instance with the web terminal Operator in {product-title} 4.7 and later. - -[NOTE] -==== -The default web console perspective that is shown depends on the role of the user. The *Administrator* perspective is displayed by default if the user is recognized as an administrator. -==== - -The *Administrator* perspective provides workflows specific to administrator use cases, such as the ability to: - -* Manage workload, storage, networking, and cluster settings. -* Install and manage Operators using the Operator Hub. -* Add identity providers that allow users to log in and manage user access through roles and role bindings. -* View and manage a variety of advanced settings such as cluster updates, partial cluster updates, cluster Operators, custom resource definitions (CRDs), role bindings, and resource quotas. -* Access and manage monitoring features such as metrics, alerts, and monitoring dashboards. -* View and manage logging, metrics, and high-status information about the cluster. -* Visually interact with applications, components, and services associated with the *Administrator* perspective in {product-title}. diff --git a/modules/about-bare-metal-hosts-and-nodes.adoc b/modules/about-bare-metal-hosts-and-nodes.adoc deleted file mode 100644 index abfa0610db47..000000000000 --- a/modules/about-bare-metal-hosts-and-nodes.adoc +++ /dev/null @@ -1,11 +0,0 @@ -// Module included in the following assemblies: -// -// scalability_and_performance/managing-bare-metal-hosts.adoc - -:_content-type: CONCEPT -[id="about-bare-metal-hosts-and-nodes_{context}"] -= About bare metal hosts and nodes - -To provision a {op-system-first} bare metal host as a node in your cluster, first create a `MachineSet` custom resource (CR) object that corresponds to the bare metal host hardware. Bare metal host compute machine sets describe infrastructure components specific to your configuration. You apply specific Kubernetes labels to these compute machine sets and then update the infrastructure components to run on only those machines. - -`Machine` CR's are created automatically when you scale up the relevant `MachineSet` containing a `metal3.io/autoscale-to-hosts` annotation. {product-title} uses `Machine` CR's to provision the bare metal node that corresponds to the host as specified in the `MachineSet` CR. diff --git a/modules/about-cli-profiles-switch.adoc b/modules/about-cli-profiles-switch.adoc deleted file mode 100644 index 9dad337a6cf1..000000000000 --- a/modules/about-cli-profiles-switch.adoc +++ /dev/null @@ -1,114 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/openshift_cli/managing-cli-profiles.adoc - -:_content-type: CONCEPT -[id="about-switches-between-cli-profiles_{context}"] -= About switches between CLI profiles - -Contexts allow you to easily switch between multiple users across multiple {product-title} servers, or clusters, when using CLI operations. Nicknames make managing CLI configurations easier by providing short-hand references to contexts, user credentials, and cluster details. -After logging in with the CLI for the first time, {product-title} creates a `~/.kube/config` file if one does not already exist. As more authentication and connection details are provided to the CLI, either automatically during an `oc login` operation or by manually configuring CLI profiles, the updated information is stored in the configuration file: - -.CLI config file - -[source,yaml] ----- -apiVersion: v1 -clusters: <1> -- cluster: - insecure-skip-tls-verify: true - server: https://openshift1.example.com:8443 - name: openshift1.example.com:8443 -- cluster: - insecure-skip-tls-verify: true - server: https://openshift2.example.com:8443 - name: openshift2.example.com:8443 -contexts: <2> -- context: - cluster: openshift1.example.com:8443 - namespace: alice-project - user: alice/openshift1.example.com:8443 - name: alice-project/openshift1.example.com:8443/alice -- context: - cluster: openshift1.example.com:8443 - namespace: joe-project - user: alice/openshift1.example.com:8443 - name: joe-project/openshift1/alice -current-context: joe-project/openshift1.example.com:8443/alice <3> -kind: Config -preferences: {} -users: <4> -- name: alice/openshift1.example.com:8443 - user: - token: xZHd2piv5_9vQrg-SKXRJ2Dsl9SceNJdhNTljEKTb8k ----- - -<1> The `clusters` section defines connection details for {product-title} clusters, including the address for their master server. In this example, one cluster is nicknamed `openshift1.example.com:8443` and another is nicknamed `openshift2.example.com:8443`. -<2> This `contexts` section defines two contexts: one nicknamed `alice-project/openshift1.example.com:8443/alice`, using the `alice-project` project, `openshift1.example.com:8443` cluster, and `alice` user, and another nicknamed `joe-project/openshift1.example.com:8443/alice`, using the `joe-project` project, `openshift1.example.com:8443` cluster and `alice` user. -<3> The `current-context` parameter shows that the `joe-project/openshift1.example.com:8443/alice` context is currently in use, allowing the `alice` user to work in the `joe-project` project on the `openshift1.example.com:8443` cluster. -<4> The `users` section defines user credentials. In this example, the user nickname `alice/openshift1.example.com:8443` uses an access token. - -The CLI can support multiple configuration files which are loaded at runtime and merged together along with any override options specified from the command line. After you are logged in, you can use the `oc status` or `oc project` command to verify your current working environment: - -.Verify the current working environment - -[source,terminal,options="nowrap"] ----- -$ oc status ----- - -.Example output -[source,terminal] ----- -oc status -In project Joe's Project (joe-project) - -service database (172.30.43.12:5434 -> 3306) - database deploys docker.io/openshift/mysql-55-centos7:latest - #1 deployed 25 minutes ago - 1 pod - -service frontend (172.30.159.137:5432 -> 8080) - frontend deploys origin-ruby-sample:latest <- - builds https://github.com/openshift/ruby-hello-world with joe-project/ruby-20-centos7:latest - #1 deployed 22 minutes ago - 2 pods - -To see more information about a service or deployment, use 'oc describe service ' or 'oc describe dc '. -You can use 'oc get all' to see lists of each of the types described in this example. ----- - -.List the current project -[source,terminal,options="nowrap"] ----- -$ oc project ----- - -.Example output -[source,terminal] ----- -Using project "joe-project" from context named "joe-project/openshift1.example.com:8443/alice" on server "https://openshift1.example.com:8443". ----- - -You can run the `oc login` command again and supply the required information during the interactive process, to log in using any other combination of user credentials and cluster details. A context is constructed based on the supplied information if one does not already exist. If you are already logged in and want to switch to another project the current user already has access to, use the `oc project` command and enter the name of the project: - -[source,terminal,options="nowrap"] ----- -$ oc project alice-project ----- - -.Example output -[source,terminal] ----- -Now using project "alice-project" on server "https://openshift1.example.com:8443". ----- - -At any time, you can use the `oc config view` command to view your current CLI configuration, as seen in the output. Additional CLI configuration commands are also available for more advanced usage. - -[NOTE] -==== -If you have access to administrator credentials but are no longer logged in as the default system user `system:admin`, you can log back in as this user at any time as long as the credentials are still present in your CLI config file. The following command logs in and switches to the default project: - -[source,terminal] ----- -$ oc login -u system:admin -n default ----- -==== diff --git a/modules/about-crio.adoc b/modules/about-crio.adoc deleted file mode 100644 index 221569317b1c..000000000000 --- a/modules/about-crio.adoc +++ /dev/null @@ -1,11 +0,0 @@ -// Module included in the following assemblies: -// -// * support/troubleshooting/troubleshooting-crio-issues.adoc - -:_content-type: CONCEPT -[id="about-crio_{context}"] -= About CRI-O container runtime engine - -include::snippets/about-crio-snippet.adoc[] - -When container runtime issues occur, verify the status of the `crio` systemd service on each node. Gather CRI-O journald unit logs from nodes that have container runtime issues. diff --git a/modules/about-developer-perspective.adoc b/modules/about-developer-perspective.adoc deleted file mode 100644 index 6566e4d23db1..000000000000 --- a/modules/about-developer-perspective.adoc +++ /dev/null @@ -1,34 +0,0 @@ -// Module included in the following assemblies: -// -// web_console/web-console-overview.adoc - -:_content-type: CONCEPT -[id="about-developer-perspective_{context}"] -= About the Developer perspective in the web console - -The *Developer* perspective offers several built-in ways to deploy applications, services, and databases. In the *Developer* perspective, you can: - -* View real-time visualization of rolling and recreating rollouts on the component. -* View the application status, resource utilization, project event streaming, and quota consumption. -* Share your project with others. -* Troubleshoot problems with your applications by running Prometheus Query Language (PromQL) queries on your project and examining the metrics visualized on a plot. The metrics provide information about the state of a cluster and any user-defined workloads that you are monitoring. - -Cluster administrators can also open an embedded command line terminal instance in the web console in {product-title} 4.7 and later. - -[NOTE] -==== -The default web console perspective that is shown depends on the role of the user. The *Developer* perspective is displayed by default if the user is recognised as a developer. -==== - -The *Developer* perspective provides workflows specific to developer use cases, such as the ability to: - -* Create and deploy applications on {product-title} by importing existing codebases, images, and container files. -* Visually interact with applications, components, and services associated with them within a project and monitor their deployment and build status. -* Group components within an application and connect the components within and across applications. -* Integrate serverless capabilities (Technology Preview). -* Create workspaces to edit your application code using Eclipse Che. - -You can use the *Topology* view to display applications, components, and workloads of your project. If you have no workloads in the project, the *Topology* view will show some links to create or import them. You can also use the *Quick Search* to import components directly. - -.Additional Resources -See link:https://docs.openshift.com/container-platform/4.13/applications/odc-viewing-application-composition-using-topology-view.html[Viewing application composition using the Topology] view for more information on using the *Topology* view in *Developer* perspective. diff --git a/modules/about-etcd-encryption.adoc b/modules/about-etcd-encryption.adoc deleted file mode 100644 index bd7cd292fb4e..000000000000 --- a/modules/about-etcd-encryption.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// -// * security/encrypting-etcd.adoc -// * post_installation_configuration/cluster-tasks.adoc - -:_content-type: CONCEPT -[id="about-etcd_{context}"] -= About etcd encryption - -By default, etcd data is not encrypted in {product-title}. You can enable etcd encryption for your cluster to provide an additional layer of data security. For example, it can help protect the loss of sensitive data if an etcd backup is exposed to the incorrect parties. - -When you enable etcd encryption, the following OpenShift API server and Kubernetes API server resources are encrypted: - -* Secrets -* Config maps -* Routes -* OAuth access tokens -* OAuth authorize tokens - -When you enable etcd encryption, encryption keys are created. You must have these keys to restore from an etcd backup. - -[NOTE] -==== -Etcd encryption only encrypts values, not keys. Resource types, namespaces, and object names are unencrypted. - -If etcd encryption is enabled during a backup, the `__static_kuberesources_.tar.gz__` file contains the encryption keys for the etcd snapshot. For security reasons, store this file separately from the etcd snapshot. However, this file is required to restore a previous state of etcd from the respective etcd snapshot. -==== diff --git a/modules/about-installing-oadp-on-multiple-namespaces.adoc b/modules/about-installing-oadp-on-multiple-namespaces.adoc deleted file mode 100644 index a45f74871bd9..000000000000 --- a/modules/about-installing-oadp-on-multiple-namespaces.adoc +++ /dev/null @@ -1,20 +0,0 @@ -// Module included in the following assemblies: -// -// * backup_and_restore/installing/about-installing-oadp.adoc - - -:_content-type: CONCEPT -[id="about-installing-oadp-on-multiple-namespaces_{context}"] -= Installation of OADP on multiple namespaces - -You can install OADP into multiple namespaces on the same cluster so that multiple project owners can manage their own OADP instance. This use case has been validated with Restic and CSI. - -You install each instance of OADP as specified by the per-platform procedures contained in this document with the following additional requirements: - -* All deployments of OADP on the same cluster must be the same version, for example, 1.1.4. Installing different versions of OADP on the same cluster is *not* supported. -* Each individual deployment of OADP must have a unique set of credentials and a unique `BackupStorageLocation` configuration. -* By default, each OADP deployment has cluster-level access across namespaces. {product-title} administrators need to review security and RBAC settings carefully and make any necessary changes to them to ensure that each OADP instance has the correct permissions. - - - - diff --git a/modules/about-manually-maintained-credentials-upgrade.adoc b/modules/about-manually-maintained-credentials-upgrade.adoc deleted file mode 100644 index 3abd90f3a528..000000000000 --- a/modules/about-manually-maintained-credentials-upgrade.adoc +++ /dev/null @@ -1,57 +0,0 @@ -// Module included in the following assemblies: -// -// * updating/preparing-manual-creds-update.adoc -// * authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.adoc - -:_content-type: CONCEPT - -[id="about-manually-maintained-credentials-upgrade_{context}"] -= Update requirements for clusters with manually maintained credentials - -Before you update a cluster that uses manually maintained credentials with the Cloud Credential Operator (CCO), you must update the cloud provider resources for the new release. - -If the cloud credential management for your cluster was configured using the CCO utility (`ccoctl`), use the `ccoctl` utility to update the resources. Clusters that were configured to use manual mode without the `ccoctl` utility require manual updates for the resources. - -After updating the cloud provider resources, you must update the `upgradeable-to` annotation for the cluster to indicate that it is ready to update. - -[NOTE] -==== -The process to update the cloud provider resources and the `upgradeable-to` annotation can only be completed by using command line tools. -==== - -[id="cco-platform-options_{context}"] -== Cloud credential configuration options and update requirements by platform type - -Some platforms only support using the CCO in one mode. For clusters that are installed on those platforms, the platform type determines the credentials update requirements. - -For platforms that support using the CCO in multiple modes, you must determine which mode the cluster is configured to use and take the required actions for that configuration. - -.Credentials update requirements by platform type -image::334_OpenShift_cluster_updating_and_CCO_workflows_0523_4.11_B.png[Decision tree showing the possible update paths for your cluster depending on the configured CCO credentials mode.] - -{rh-openstack-first}, {rh-virtualization-first}, and VMware vSphere:: -These platforms do not support using the CCO in manual mode. Clusters on these platforms handle changes in cloud provider resources automatically and do not require an update to the `upgradeable-to` annotation. -+ -Administrators of clusters on these platforms should skip the manually maintained credentials section of the update process. - -{alibaba}, IBM Cloud, and Nutanix:: -Clusters installed on these platforms are configured using the `ccoctl` utility. -+ -Administrators of clusters on these platforms must take the following actions: -+ -. Configure the `ccoctl` utility for the new release. -. Use the `ccoctl` utility to update the cloud provider resources. -. Indicate that the cluster is ready to update with the `upgradeable-to` annotation. - -Microsoft Azure Stack Hub:: -These clusters use manual mode with long-lived credentials and do not use the `ccoctl` utility. -+ -Administrators of clusters on these platforms must take the following actions: -+ -. Manually update the cloud provider resources for the new release. -. Indicate that the cluster is ready to update with the `upgradeable-to` annotation. - -Amazon Web Services (AWS), global Microsoft Azure, and Google Cloud Platform (GCP):: -Clusters installed on these platforms support multiple CCO modes. -+ -The required update process depends on the mode that the cluster is configured to use. If you are not sure what mode the CCO is configured to use on your cluster, you can use the web console or the CLI to determine this information. diff --git a/modules/about-oadp-update-channels.adoc b/modules/about-oadp-update-channels.adoc deleted file mode 100644 index 1a46ba12cfe3..000000000000 --- a/modules/about-oadp-update-channels.adoc +++ /dev/null @@ -1,39 +0,0 @@ -// Module included in the following assemblies: -// -// * backup_and_restore/installing/about-installing-oadp.adoc - - -:_content-type: CONCEPT -[id="about-oadp-update-channels_{context}"] -= About OADP update channels - -When you install an OADP Operator, you choose an _update channel_. This channel determines which upgrades to the OADP Operator and to Velero you receive. You can switch channels at any time. - -The following update channels are available: - -* The *stable* channel is now deprecated. The *stable* channel contains the patches (z-stream updates) of OADP `ClusterServiceVersion` for `oadp.v1.1.z` and older versions from `oadp.v1.0.z`. - -* The *stable-1.0* channel contains `oadp.v1.0._z_`, the most recent OADP 1.0 `ClusterServiceVersion`. - -* The *stable-1.1* channel contains `oadp.v1.1._z_`, the most recent OADP 1.1 `ClusterServiceVersion`. - -* The *stable-1.2* channel contains `oadp.v1.2._z_`, the most recent OADP 1.2 `ClusterServiceVersion`. - -*Which update channel is right for you?* - -* The *stable* channel is now deprecated. If you are already using the stable channel, you will continue to get updates from `oadp.v1.1._z_`. - -* Choose the *stable-1._y_* update channel to install OADP 1._y_ and to continue receiving patches for it. If you choose this channel, you will receive all z-stream patches for version 1._y_._z_. - -*When must you switch update channels?* - -* If you have OADP 1._y_ installed, and you want to receive patches only for that y-stream, you must switch from the *stable* update channel to the *stable-1._y_* update channel. You will then receive all z-stream patches for version 1._y_._z_. - -* If you have OADP 1.0 installed, want to upgrade to OADP 1.1, and then receive patches only for OADP 1.1, you must switch from the *stable-1.0* update channel to the *stable-1.1* update channel. You will then receive all z-stream patches for version 1.1._z_. - -* If you have OADP 1._y_ installed, with _y_ greater than 0, and want to switch to OADP 1.0, you must _uninstall_ your OADP Operator and then reinstall it using the *stable-1.0* update channel. You will then receive all z-stream patches for version 1.0._z_. - -[NOTE] -==== -You cannot switch from OADP 1._y_ to OADP 1.0 by switching update channels. You must uninstall the Operator and then reinstall it. -==== diff --git a/modules/about-project-creation.adoc b/modules/about-project-creation.adoc deleted file mode 100644 index f8fcbae54cc7..000000000000 --- a/modules/about-project-creation.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * applications/projects/configuring-project-creation.adoc - -:_content-type: CONCEPT -[id="about-project-creation_{context}"] -= About project creation - -The {product-title} API server automatically provisions new projects based on -the project template that is identified by the `projectRequestTemplate` -parameter in the cluster's project configuration resource. If the parameter is -not defined, the API server creates a default template that creates a project -with the requested name, and assigns the requesting user to the `admin` role for -that project. - -When a project request is submitted, the API substitutes the following -parameters into the template: - -.Default project template parameters -[cols="4,8",options="header"] -|=== -|Parameter |Description - -|`PROJECT_NAME` -|The name of the project. Required. - -|`PROJECT_DISPLAYNAME` -|The display name of the project. May be empty. - -|`PROJECT_DESCRIPTION` -|The description of the project. May be empty. - -|`PROJECT_ADMIN_USER` -|The user name of the administrating user. - -|`PROJECT_REQUESTING_USER` -|The user name of the requesting user. -|=== - -Access to the API is granted to developers with the `self-provisioner` role and -the `self-provisioners` cluster role binding. This role is available to all -authenticated developers by default. diff --git a/modules/about-scaling-a-user-provisioned-installation-with-the-bare-metal-operator.adoc b/modules/about-scaling-a-user-provisioned-installation-with-the-bare-metal-operator.adoc deleted file mode 100644 index b69932c9f83c..000000000000 --- a/modules/about-scaling-a-user-provisioned-installation-with-the-bare-metal-operator.adoc +++ /dev/null @@ -1,10 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_bare_metal/scaling-a-user-provisioned-cluster-with-the-bare-metal-operator.adoc - -:_content-type: CONCEPT - -[id="about-scaling-a-user-provisioned-cluster-with-the-bare-metal-operator_{context}"] -= About scaling a user-provisioned cluster with the Bare Metal Operator - -You can scale user-provisioned infrastructure clusters by using the Bare Metal Operator (BMO) and other metal3 components. User-provisioned infrastructure installations do not feature the Machine API Operator. The Machine API Operator typically manages the lifecycle of bare-metal hosts in a cluster. However, it is possible to use the BMO and other metal3 components to scale nodes in user-provisioned clusters without requiring the Machine API Operator. diff --git a/modules/about-sosreport.adoc b/modules/about-sosreport.adoc deleted file mode 100644 index 6d819a84a840..000000000000 --- a/modules/about-sosreport.adoc +++ /dev/null @@ -1,11 +0,0 @@ -// Module included in the following assemblies: -// -// * support/gathering-cluster-data.adoc - -:_content-type: CONCEPT -[id="about-sosreport_{context}"] -= About sosreport - -`sosreport` is a tool that collects configuration details, system information, and diagnostic data from {op-system-base-full} and {op-system-first} systems. `sosreport` provides a standardized way to collect diagnostic information relating to a node, which can then be provided to Red Hat Support for issue diagnosis. - -In some support interactions, Red Hat Support may ask you to collect a `sosreport` archive for a specific {product-title} node. For example, it might sometimes be necessary to review system logs or other node-specific data that is not included within the output of `oc adm must-gather`. diff --git a/modules/about-toolbox.adoc b/modules/about-toolbox.adoc deleted file mode 100644 index 486ab0e6950f..000000000000 --- a/modules/about-toolbox.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Module included in the following assemblies: -// -// * support/gathering-cluster-data.adoc - -:_content-type: CONCEPT -[id="about-toolbox_{context}"] -= About `toolbox` - -ifndef::openshift-origin[] -`toolbox` is a tool that starts a container on a {op-system-first} system. The tool is primarily used to start a container that includes the required binaries and plugins that are needed to run commands such as `sosreport` and `redhat-support-tool`. - -The primary purpose for a `toolbox` container is to gather diagnostic information and to provide it to Red Hat Support. However, if additional diagnostic tools are required, you can add RPM packages or run an image that is an alternative to the standard support tools image. -endif::openshift-origin[] - -ifdef::openshift-origin[] -`toolbox` is a tool that starts a container on a {op-system-first} system. The tool is primarily used to start a container that includes the required binaries and plugins that are needed to run your favorite debugging or admin tools. -endif::openshift-origin[] diff --git a/modules/about-using-gpu-operator.adoc b/modules/about-using-gpu-operator.adoc deleted file mode 100644 index e9879d71049a..000000000000 --- a/modules/about-using-gpu-operator.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// -// * virt/virtual_machines/advanced_vm_management/virt-configuring-mediated-devices.adoc - -:_content-type: CONCEPT -[id="about-using-nvidia-gpu_{context}"] -= About using the NVIDIA GPU Operator - -The NVIDIA GPU Operator manages NVIDIA GPU resources in an {product-title} cluster and automates tasks related to bootstrapping GPU nodes. -Since the GPU is a special resource in the cluster, you must install some components before deploying application workloads onto the GPU. -These components include the NVIDIA drivers which enables compute unified device architecture (CUDA), Kubernetes device plugin, container runtime and others such as automatic node labelling, monitoring and more. -[NOTE] -==== -The NVIDIA GPU Operator is supported only by NVIDIA. For more information about obtaining support from NVIDIA, see link:https://access.redhat.com/solutions/5174941[Obtaining Support from NVIDIA]. -==== - -There are two ways to enable GPUs with {product-title} {VirtProductName}: the {product-title}-native way described here and by using the NVIDIA GPU Operator. - -The NVIDIA GPU Operator is a Kubernetes Operator that enables {product-title} {VirtProductName} to expose GPUs to virtualized workloads running on {product-title}. -It allows users to easily provision and manage GPU-enabled virtual machines, providing them with the ability to run complex artificial intelligence/machine learning (AI/ML) workloads on the same platform as their other workloads. -It also provides an easy way to scale the GPU capacity of their infrastructure, allowing for rapid growth of GPU-based workloads. - -For more information about using the NVIDIA GPU Operator to provision worker nodes for running GPU-accelerated VMs, see link:https://docs.nvidia.com/datacenter/cloud-native/gpu-operator/openshift/openshift-virtualization.html[NVIDIA GPU Operator with OpenShift Virtualization]. diff --git a/modules/about-ztp.adoc b/modules/about-ztp.adoc deleted file mode 100644 index f618e11dd7b3..000000000000 --- a/modules/about-ztp.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/ztp_far_edge/ztp-deploying-far-edge-clusters-at-scale.adoc - -:_content-type: CONCEPT -[id="about-ztp_{context}"] -= Using {ztp} to provision clusters at the network far edge - -{rh-rhacm-first} manages clusters in a hub-and-spoke architecture, where a single hub cluster manages many spoke clusters. Hub clusters running {rh-rhacm} provision and deploy the managed clusters by using {ztp-first} and the assisted service that is deployed when you install {rh-rhacm}. - -The assisted service handles provisioning of {product-title} on single node clusters, three-node clusters, or standard clusters running on bare metal. - -A high-level overview of using {ztp} to provision and maintain bare-metal hosts with {product-title} is as follows: - -* A hub cluster running {rh-rhacm} manages an {product-registry} that mirrors the {product-title} release images. {rh-rhacm} uses the {product-registry} to provision the managed clusters. - -* You manage the bare-metal hosts in a YAML format inventory file, versioned in a Git repository. - -* You make the hosts ready for provisioning as managed clusters, and use {rh-rhacm} and the assisted service to install the bare-metal hosts on site. - -Installing and deploying the clusters is a two-stage process, involving an initial installation phase, and a subsequent configuration phase. The following diagram illustrates this workflow: - -image::217_OpenShift_Zero_Touch_Provisioning_updates_1022_2.png[Using GitOps and {ztp} to install and deploy managed clusters] diff --git a/modules/access-cluster.adoc b/modules/access-cluster.adoc deleted file mode 100644 index 233466d4b87a..000000000000 --- a/modules/access-cluster.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_install_access_delete_cluster/config-identity-providers.adoc -// * osd_getting_started/osd-getting-started.adoc - -:_content-type: PROCEDURE -[id="access-cluster_{context}"] -= Accessing your cluster - -After you have configured your identity providers, users can access the cluster from {cluster-manager-first}. - -.Prerequisites - -* You logged in to {cluster-manager-url}. -* You created an {product-title} cluster. -* You configured an identity provider for your cluster. -* You added your user account to the configured identity provider. - -.Procedure - -. From {cluster-manager-url}, click on the cluster you want to access. - -. Click *Open Console*. - -. Click on your identity provider and provide your credentials to log into the cluster. - -. Click *Open console* to open the web console for your cluster. - -. Click on your identity provider and provide your credentials to log in to the cluster. Complete any authorization requests that are presented by your provider. diff --git a/modules/access-service.adoc b/modules/access-service.adoc deleted file mode 100644 index a48382e04b52..000000000000 --- a/modules/access-service.adoc +++ /dev/null @@ -1,36 +0,0 @@ -// Module included in the following assemblies: -// -// * assemblies/adding-service.adoc - -:_content-type: PROCEDURE -[id="access-service_{context}"] - -= Accessing installed add-on services on your cluster - -After you successfully install an add-on service on your {product-title} -ifdef::openshift-rosa[] -(ROSA) -endif::openshift-rosa[] -cluster, you can access the service by using the OpenShift web console. - -.Prerequisites - -* You have successfully installed a service on your {product-title} cluster. - -.Procedure - -. Navigate to the *Clusters* page in {cluster-manager-url}. - -. Select the cluster with an installed service you want to access. - -. Navigate to the *Add-ons* tab, and locate the installed service that you want to access. - -. Click *View on console* from the service option to open the OpenShift web console. - -. Enter your credentials to log in to the OpenShift web console. - -. Click the *Red Hat Applications* menu by clicking the three-by-three matrix icon in the upper right corner of the main screen. - -. Select the service you want to open from the drop-down menu. A new browser tab opens and you are required to authenticate through Red Hat Single Sign-On. - -You have now accessed your service and can begin using it. diff --git a/modules/accessing-an-example-cluster-node-tuning-operator-specification.adoc b/modules/accessing-an-example-cluster-node-tuning-operator-specification.adoc deleted file mode 100644 index 6952da48f917..000000000000 --- a/modules/accessing-an-example-cluster-node-tuning-operator-specification.adoc +++ /dev/null @@ -1,26 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/using-node-tuning-operator.adoc -// * post_installation_configuration/node-tasks.adoc - -:_content-type: PROCEDURE -[id="accessing-an-example-node-tuning-operator-specification_{context}"] -= Accessing an example Node Tuning Operator specification - -Use this process to access an example Node Tuning Operator specification. - -.Procedure - - * Run the following command to access an example Node Tuning Operator specification: -+ -[source,terminal] ----- -$ oc get Tuned/default -o yaml -n openshift-cluster-node-tuning-operator ----- - -The default CR is meant for delivering standard node-level tuning for the {product-title} platform and it can only be modified to set the Operator Management state. Any other custom changes to the default CR will be overwritten by the Operator. For custom tuning, create your own Tuned CRs. Newly created CRs will be combined with the default CR and custom tuning applied to {product-title} nodes based on node or pod labels and profile priorities. - -[WARNING] -==== -While in certain situations the support for pod labels can be a convenient way of automatically delivering required tuning, this practice is discouraged and strongly advised against, especially in large-scale clusters. The default Tuned CR ships without pod label matching. If a custom profile is created with pod label matching, then the functionality will be enabled at that time. The pod label functionality will be deprecated in future versions of the Node Tuning Operator. -==== diff --git a/modules/accessing-hosts-on-aws.adoc b/modules/accessing-hosts-on-aws.adoc deleted file mode 100644 index bf9a54c876f2..000000000000 --- a/modules/accessing-hosts-on-aws.adoc +++ /dev/null @@ -1,51 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/accessing-hosts.adoc - -:_content-type: PROCEDURE -[id="accessing-hosts-on-aws_{context}"] -= Accessing hosts on Amazon Web Services in an installer-provisioned infrastructure cluster - -The {product-title} installer does not create any public IP addresses for any of -the Amazon Elastic Compute Cloud (Amazon EC2) instances that it provisions for -your {product-title} cluster. To be able to SSH to your {product-title} -hosts, you must follow this procedure. - -.Procedure - -. Create a security group that allows SSH access into the virtual private cloud -(VPC) created by the `openshift-install` command. - -. Create an Amazon EC2 instance on one of the public subnets the installer -created. - -. Associate a public IP address with the Amazon EC2 instance that you created. -+ -Unlike with the {product-title} installation, you should associate the Amazon -EC2 instance you created with an SSH keypair. It does not matter what operating -system you choose for this instance, as it will simply serve as an SSH bastion -to bridge the internet into your {product-title} cluster's VPC. The Amazon -Machine Image (AMI) you use does matter. With {op-system-first}, -for example, you can provide keys via Ignition, like the installer does. - -. After you provisioned your Amazon EC2 instance and can SSH into it, you must add -the SSH key that you associated with your {product-title} installation. This key -can be different from the key for the bastion instance, but does not have to be. -+ -[NOTE] -==== -Direct SSH access is only recommended for disaster recovery. When the Kubernetes -API is responsive, run privileged pods instead. -==== - -. Run `oc get nodes`, inspect the output, and choose one of the nodes that is a -master. The hostname looks similar to `ip-10-0-1-163.ec2.internal`. - -. From the bastion SSH host you manually deployed into Amazon EC2, SSH into that -control plane host. Ensure that you use the same SSH key you specified during the -installation: -+ -[source,terminal] ----- -$ ssh -i core@ ----- diff --git a/modules/accessing-metrics-outside-cluster.adoc b/modules/accessing-metrics-outside-cluster.adoc deleted file mode 100644 index ba88f496261b..000000000000 --- a/modules/accessing-metrics-outside-cluster.adoc +++ /dev/null @@ -1,54 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/enabling-monitoring-for-user-defined-projects.adoc - -:_content-type: PROCEDURE -[id="accessing-metrics-from-outside-cluster_{context}"] -= Accessing metrics from outside the cluster for custom applications - -Learn how to query Prometheus statistics from the command line when monitoring your own services. You can access monitoring data from outside the cluster with the `thanos-querier` route. - -.Prerequisites - -* You deployed your own service, following the _Enabling monitoring for user-defined projects_ procedure. - -.Procedure - -. Extract a token to connect to Prometheus: -+ -[source,terminal] ----- -$ SECRET=`oc get secret -n openshift-user-workload-monitoring | grep prometheus-user-workload-token | head -n 1 | awk '{print $1 }'` ----- -+ -[source,terminal] ----- -$ TOKEN=`echo $(oc get secret $SECRET -n openshift-user-workload-monitoring -o json | jq -r '.data.token') | base64 -d` ----- - -. Extract your route host: -+ -[source,terminal] ----- -$ THANOS_QUERIER_HOST=`oc get route thanos-querier -n openshift-monitoring -o json | jq -r '.spec.host'` ----- - -. Query the metrics of your own services in the command line. For example: -+ -[source,terminal] ----- -$ NAMESPACE=ns1 ----- -+ -[source,terminal] ----- -$ curl -X GET -kG "https://$THANOS_QUERIER_HOST/api/v1/query?" --data-urlencode "query=up{namespace='$NAMESPACE'}" -H "Authorization: Bearer $TOKEN" ----- -+ -The output will show you the duration that your application pods have been up. -+ -.Example output -[source,terminal] ----- -{"status":"success","data":{"resultType":"vector","result":[{"metric":{"__name__":"up","endpoint":"web","instance":"10.129.0.46:8080","job":"prometheus-example-app","namespace":"ns1","pod":"prometheus-example-app-68d47c4fb6-jztp2","service":"prometheus-example-app"},"value":[1591881154.748,"1"]}]}} ----- diff --git a/modules/accessing-running-pods.adoc b/modules/accessing-running-pods.adoc deleted file mode 100644 index 5513024f44b0..000000000000 --- a/modules/accessing-running-pods.adoc +++ /dev/null @@ -1,47 +0,0 @@ -// Module included in the following assemblies: -// -// * support/troubleshooting/investigating-pod-issues.adoc - -:_content-type: PROCEDURE -[id="accessing-running-pods_{context}"] -= Accessing running pods - -You can review running pods dynamically by opening a shell inside a pod or by gaining network access through port forwarding. - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. -* Your API service is still functional. -* You have installed the OpenShift CLI (`oc`). - -.Procedure - -. Switch into the project that contains the pod you would like to access. This is necessary because the `oc rsh` command does not accept the `-n` namespace option: -+ -[source,terminal] ----- -$ oc project ----- - -. Start a remote shell into a pod: -+ -[source,terminal] ----- -$ oc rsh <1> ----- -<1> If a pod has multiple containers, `oc rsh` defaults to the first container unless `-c ` is specified. - -. Start a remote shell into a specific container within a pod: -+ -[source,terminal] ----- -$ oc rsh -c pod/ ----- - -. Create a port forwarding session to a port on a pod: -+ -[source,terminal] ----- -$ oc port-forward : <1> ----- -<1> Enter `Ctrl+C` to cancel the port forwarding session. diff --git a/modules/accessing-windows-node-using-rdp.adoc b/modules/accessing-windows-node-using-rdp.adoc deleted file mode 100644 index 136d9cb95ee3..000000000000 --- a/modules/accessing-windows-node-using-rdp.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * support/troubleshooting/troubleshooting-windows-container-workload-issues.adoc - -:_content-type: PROCEDURE -[id="accessing-windows-node-using-rdp_{context}"] -= Accessing a Windows node using RDP - -You can access a Windows node by using a Remote Desktop Protocol (RDP). - -.Prerequisites - -* You installed the Windows Machine Config Operator (WMCO) using Operator Lifecycle Manager (OLM). -* You have created a Windows compute machine set. -* You have added the key used in the `cloud-private-key` secret and the key used when creating the cluster to the ssh-agent. For security reasons, remember to remove the keys from the ssh-agent after use. -* You have connected to the Windows node link:https://access.redhat.com/solutions/4073041[using an `ssh-bastion` pod]. - -.Procedure - -. Run the following command to set up an SSH tunnel: -+ -[source,terminal] ----- -$ ssh -L 2020::3389 \ <1> - core@$(oc get service --all-namespaces -l run=ssh-bastion -o go-template="{{ with (index (index .items 0).status.loadBalancer.ingress 0) }}{{ or .hostname .ip }}{{end}}") ----- -<1> Specify the internal IP address of the node, which can be discovered by running the following command: -+ -[source,terminal] ----- -$ oc get nodes -o jsonpath={.status.addresses[?\(@.type==\"InternalIP\"\)].address} ----- - -. From within the resulting shell, SSH into the Windows node and run the following command to create a password for the user: -+ -[source,terminal] ----- -C:\> net user * <1> ----- -<1> Specify the cloud provider user name, such as `Administrator` for AWS or `capi` for Azure. - -You can now remotely access the Windows node at `localhost:2020` using an RDP client. diff --git a/modules/accessing-windows-node-using-ssh.adoc b/modules/accessing-windows-node-using-ssh.adoc deleted file mode 100644 index 234e4fac65ff..000000000000 --- a/modules/accessing-windows-node-using-ssh.adoc +++ /dev/null @@ -1,34 +0,0 @@ -// Module included in the following assemblies: -// -// * support/troubleshooting/troubleshooting-windows-container-workload-issues.adoc - -:_content-type: PROCEDURE -[id="accessing-windows-node-using-ssh_{context}"] -= Accessing a Windows node using SSH - -You can access a Windows node by using a secure shell (SSH). - -.Prerequisites - -* You have installed the Windows Machine Config Operator (WMCO) using Operator Lifecycle Manager (OLM). -* You have created a Windows compute machine set. -* You have added the key used in the `cloud-private-key` secret and the key used when creating the cluster to the ssh-agent. For security reasons, remember to remove the keys from the ssh-agent after use. -* You have connected to the Windows node link:https://access.redhat.com/solutions/4073041[using an `ssh-bastion` pod]. - -.Procedure - -* Access the Windows node by running the following command: -+ -[source,terminal] ----- -$ ssh -t -o StrictHostKeyChecking=no -o ProxyCommand='ssh -A -o StrictHostKeyChecking=no \ - -o ServerAliveInterval=30 -W %h:%p core@$(oc get service --all-namespaces -l run=ssh-bastion \ - -o go-template="{{ with (index (index .items 0).status.loadBalancer.ingress 0) }}{{ or .hostname .ip }}{{end}}")' @ <1> <2> ----- -<1> Specify the cloud provider username, such as `Administrator` for Amazon Web Services (AWS) or `capi` for Microsoft Azure. -<2> Specify the internal IP address of the node, which can be discovered by running the following command: -+ -[source,terminal] ----- -$ oc get nodes -o jsonpath={.status.addresses[?\(@.type==\"InternalIP\"\)].address} ----- diff --git a/modules/add-user.adoc b/modules/add-user.adoc deleted file mode 100644 index ca7fba2406ed..000000000000 --- a/modules/add-user.adoc +++ /dev/null @@ -1,26 +0,0 @@ -// Module included in the following assemblies: -// -// * assemblies/quickstart-osd.adoc - -:_content-type: PROCEDURE -[id="add-user_{context}"] -= Adding a user - - -Administrator roles are managed using a `dedicated-admins` group on the cluster. You can add and remove users from {cluster-manager-first}. - -.Procedure - -. Navigate to the *Clusters* page and select the cluster you want to add users to. - -. Click the *Access control* tab. - -. Under the *Cluster administrative users* heading, click *Add User*. - -. Enter the user ID you want to add. - -. Click *Add user*. - -.Verification - -* You now see the user listed under the *Cluster administrative users* heading. diff --git a/modules/adding-a-custom-logo.adoc b/modules/adding-a-custom-logo.adoc deleted file mode 100644 index 72b86e611336..000000000000 --- a/modules/adding-a-custom-logo.adoc +++ /dev/null @@ -1,79 +0,0 @@ -// Module included in the following assemblies: -// -// * web_console/customizing-the-web-console.adoc - -:_content-type: PROCEDURE -[id="adding-a-custom-logo_{context}"] -= Adding a custom logo and product name - -You can create custom branding by adding a custom logo or custom product name. You can set both or one without the other, as these settings are independent of each other. - -.Prerequisites - -* You must have administrator privileges. -* Create a file of the logo that you want to use. The logo can be a file in any common image format, including GIF, JPG, PNG, or SVG, and is constrained to a `max-height` of `60px`. - -.Procedure - -. Import your logo file into a config map in the `openshift-config` namespace: -+ -[source,terminal] ----- -$ oc create configmap console-custom-logo --from-file /path/to/console-custom-logo.png -n openshift-config ----- -+ -[TIP] -==== -You can alternatively apply the following YAML to create the config map: - -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: console-custom-logo - namespace: openshift-config -binaryData: - console-custom-logo.png: ... <1> ----- -<1> Provide a valid base64-encoded logo. -==== - -. Edit the web console's Operator configuration to include `customLogoFile` and `customProductName`: -+ -[source,terminal] ----- -$ oc edit consoles.operator.openshift.io cluster ----- -+ -[source,yaml] ----- -apiVersion: operator.openshift.io/v1 -kind: Console -metadata: - name: cluster -spec: - customization: - customLogoFile: - key: console-custom-logo.png - name: console-custom-logo - customProductName: My Console ----- -+ -Once the Operator configuration is updated, it will sync the custom logo config map into the console namespace, mount it to the console pod, and redeploy. - -. Check for success. If there are any issues, the console cluster Operator will report a `Degraded` status, and the console Operator configuration will also report a `CustomLogoDegraded` status, but with reasons like `KeyOrFilenameInvalid` or `NoImageProvided`. -+ -To check the `clusteroperator`, run: -+ -[source,terminal] ----- -$ oc get clusteroperator console -o yaml ----- -+ -To check the console Operator configuration, run: -+ -[source,terminal] ----- -$ oc get consoles.operator.openshift.io -o yaml ----- diff --git a/modules/adding-bare-metal-host-to-cluster-using-web-console.adoc b/modules/adding-bare-metal-host-to-cluster-using-web-console.adoc deleted file mode 100644 index dea67e07e3e3..000000000000 --- a/modules/adding-bare-metal-host-to-cluster-using-web-console.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -// scalability_and_performance/managing-bare-metal-hosts.adoc - -:_content-type: PROCEDURE -[id="adding-bare-metal-host-to-cluster-using-web-console_{context}"] -= Adding a bare metal host to the cluster using the web console - -You can add bare metal hosts to the cluster in the web console. - -.Prerequisites - -* Install an {op-system} cluster on bare metal. -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -. In the web console, navigate to *Compute* -> *Bare Metal Hosts*. -. Select *Add Host* -> *New with Dialog*. -. Specify a unique name for the new bare metal host. -. Set the *Boot MAC address*. -. Set the *Baseboard Management Console (BMC) Address*. -. Enter the user credentials for the host's baseboard management controller (BMC). -. Select to power on the host after creation, and select *Create*. -. Scale up the number of replicas to match the number of available bare metal hosts. Navigate to *Compute* -> *MachineSets*, and increase the number of machine replicas in the cluster by selecting *Edit Machine count* from the *Actions* drop-down menu. - -[NOTE] -==== -You can also manage the number of bare metal nodes using the `oc scale` command and the appropriate bare metal compute machine set. -==== diff --git a/modules/adding-bare-metal-host-to-cluster-using-yaml.adoc b/modules/adding-bare-metal-host-to-cluster-using-yaml.adoc deleted file mode 100644 index 50665bc96ce2..000000000000 --- a/modules/adding-bare-metal-host-to-cluster-using-yaml.adoc +++ /dev/null @@ -1,47 +0,0 @@ -// Module included in the following assemblies: -// -// scalability_and_performance/managing-bare-metal-hosts.adoc - -:_content-type: PROCEDURE -[id="adding-bare-metal-host-to-cluster-using-yaml_{context}"] -= Adding a bare metal host to the cluster using YAML in the web console - -You can add bare metal hosts to the cluster in the web console using a YAML file that describes the bare metal host. - -.Prerequisites - -* Install a {op-system} compute machine on bare metal infrastructure for use in the cluster. -* Log in as a user with `cluster-admin` privileges. -* Create a `Secret` CR for the bare metal host. - -.Procedure - -. In the web console, navigate to *Compute* -> *Bare Metal Hosts*. -. Select *Add Host* -> *New from YAML*. -. Copy and paste the below YAML, modifying the relevant fields with the details of your host: -+ -[source,yaml] ----- -apiVersion: metal3.io/v1alpha1 -kind: BareMetalHost -metadata: - name: -spec: - online: true - bmc: - address: - credentialsName: <1> - disableCertificateVerification: True <2> - bootMACAddress: ----- -+ -<1> `credentialsName` must reference a valid `Secret` CR. The `baremetal-operator` cannot manage the bare metal host without a valid `Secret` referenced in the `credentialsName`. For more information about secrets and how to create them, see xref:../nodes/pods/nodes-pods-secrets.adoc#nodes-pods-secrets-about_nodes-pods-secrets[Understanding secrets]. -<2> Setting `disableCertificateVerification` to `true` disables TLS host validation between the cluster and the baseboard management controller (BMC). - -. Select *Create* to save the YAML and create the new bare metal host. -. Scale up the number of replicas to match the number of available bare metal hosts. Navigate to *Compute* -> *MachineSets*, and increase the number of machines in the cluster by selecting *Edit Machine count* from the *Actions* drop-down menu. -+ -[NOTE] -==== -You can also manage the number of bare metal nodes using the `oc scale` command and the appropriate bare metal compute machine set. -==== diff --git a/modules/adding-cluster-notification-contacts.adoc b/modules/adding-cluster-notification-contacts.adoc deleted file mode 100644 index 655864015282..000000000000 --- a/modules/adding-cluster-notification-contacts.adoc +++ /dev/null @@ -1,37 +0,0 @@ -// Module included in the following assemblies: -// -// * rosa_cluster_admin/rosa_logging/rosa-accessing-the-service-logs.adoc -// * osd_cluster_admin/osd_logging/osd-accessing-the-service-logs.adoc -// * logging/sd-accessing-the-service-logs.adoc - -:_content-type: PROCEDURE -[id="adding-cluster-notification-contacts_{context}"] -= Adding cluster notification contacts - -You can add notification contacts for your -ifdef::openshift-dedicated[] -{product-title} -endif::openshift-dedicated[] -ifdef::openshift-rosa[] -{product-title} (ROSA) -endif::openshift-rosa[] -cluster. When an event occurs that triggers a cluster notification email, subscribed users are notified. - -.Procedure - -. Navigate to {cluster-manager-url} and select your cluster. - -. On the *Support* tab, under the *Notification contacts* heading, click *Add notification contact*. - -. Enter the Red Hat username or email of the contact you want to add. -+ -[NOTE] -==== -The username or email address must relate to a user account in the Red Hat organization where the cluster is deployed. -==== - -. Click *Add contact*. - -.Verification - -* You see a confirmation message when you have successfully added the contact. The user appears under the *Notification contacts* heading on the *Support* tab. diff --git a/modules/adding-custom-notification-banners.adoc b/modules/adding-custom-notification-banners.adoc deleted file mode 100644 index 15ddd52527d7..000000000000 --- a/modules/adding-custom-notification-banners.adoc +++ /dev/null @@ -1,37 +0,0 @@ -// Module included in the following assemblies: -// -// * web_console/customizing-the-web-console.adoc - -:_content-type: PROCEDURE -[id="creating-custom-notification-banners_{context}"] -= Creating custom notification banners - -.Prerequisites - -* You must have administrator privileges. - -.Procedure - -. From *Administration* -> *Custom Resource Definitions*, click on -*ConsoleNotification*. -. Select *Instances* tab -. Click *Create Console Notification* and edit the file: -+ -[source,yaml] ----- -apiVersion: console.openshift.io/v1 -kind: ConsoleNotification -metadata: - name: example -spec: - text: This is an example notification message with an optional link. - location: BannerTop <1> - link: - href: 'https://www.example.com' - text: Optional link text - color: '#fff' - backgroundColor: '#0088ce' ----- -<1> Valid location settings are `BannerTop`, `BannerBottom`, and `BannerTopBottom`. - -. Click *Create* to apply your changes. diff --git a/modules/adding-service-existing.adoc b/modules/adding-service-existing.adoc deleted file mode 100644 index 62e20985eca8..000000000000 --- a/modules/adding-service-existing.adoc +++ /dev/null @@ -1,43 +0,0 @@ -// Module included in the following assemblies: -// -// * assemblies/adding-service.adoc - -:_content-type: PROCEDURE -[id="adding-service-existing_{context}"] - -= Adding an add-on service to a cluster - -You can add an add-on service to an existing {product-title} -ifdef::openshift-rosa[] -(ROSA) -endif::openshift-rosa[] -cluster by using {cluster-manager-first}. - -.Prerequisites - -* You have created and provisioned a cluster for {product-title}. -* Your cluster meets all of the prerequisites for the service that you want to add on to your cluster. -* For paid add-on services, note the following considerations: -** If the organization has sufficient quota, and if the service is compatible with the cluster, the service appears in {cluster-manager}. -** If the organization has never had quota, or if the cluster is not compatible, then the service does not display. -** If the organization had quota in the past, but the quota is currently `0`, the service is still visible but disabled in {cluster-manager} until you get more quota. - -// TODO: Could this just be one of the above prereqs instead of its own NOTE? -[NOTE] -==== -To add a service to a cluster, you must be the cluster owner. -==== - -.Procedure - -. Navigate to the *Clusters* page in {cluster-manager-url}. - -. Select the cluster you want to add a service to. - -. Click the *Add-ons* tab. - -. Click the service option you want to add, click *Install*. An installing icon appears, indicating that the service has begun installing. -+ -A green check mark appears in the service option when the installation is complete. You might have to refresh your browser to see the installation status. - -. When the service is *Installed*, click *View in console* to access the service. diff --git a/modules/adding-tab-pods-page.adoc b/modules/adding-tab-pods-page.adoc deleted file mode 100644 index a51172c1fd69..000000000000 --- a/modules/adding-tab-pods-page.adoc +++ /dev/null @@ -1,115 +0,0 @@ -// Module included in the following assemblies: -// -// * web_console/dynamic-plugin/dynamic-plugin-example.adoc - -:_content-type: PROCEDURE -[id="adding-tab-to-pods-page_{context}"] -= Adding a tab to the pods page - -There are different customizations you can make to the {product-title} web console. The following procedure adds a tab to the *Pod details* page as an example extension to your plugin. - -[NOTE] -==== -The {product-title} web console runs in a container connected to the cluster you have logged into. See "Dynamic plugin development" for information to test the plugin before creating your own. -==== - -.Procedure - -. Visit the link:https://github.com/openshift/console-plugin-template[`console-plugin-template`] repository containing a template for creating plugins in a new tab. -+ -[IMPORTANT] -==== -Custom plugin code is not supported by Red Hat. Only link:https://access.redhat.com/solutions/5893251[Cooperative community support] is available for your plugin. -==== - -. Create a GitHub repository for the template by clicking *Use this template* -> *_Create new repository_*. - -. Rename the new repository with the name of your plugin. - -. Clone the new repository to your local machine so you can edit the code. - -. Edit the `package.json` file, adding your plugin's metadata to the `consolePlugin` declaration. For example: -+ -[source,json] - ----- -"consolePlugin": { - "name": "my-plugin", <1> - "version": "0.0.1", <2> - "displayName": "My Plugin", <3> - "description": "Enjoy this shiny, new console plugin!", <4> - "exposedModules": { - "ExamplePage": "./components/ExamplePage" - }, - "dependencies": { - "@console/pluginAPI": "/*" - } -} ----- -<1> Update the name of your plugin. -<2> Update the version. -<3> Update the display name for your plugin. -<4> Update the description with a synopsis about your plugin. - -. Add the following to the `console-extensions.json` file: -+ -[source,json] - ----- -{ - "type": "console.tab/horizontalNav", - "properties": { - "page": { - "name": "Example Tab", - "href": "example" - }, - "model": { - "group": "core", - "version": "v1", - "kind": "Pod" - }, - "component": { "$codeRef": "ExampleTab" } - } -} ----- - -. Edit the `package.json` file to include the following changes: -+ -[source,json] - ----- - "exposedModules": { - "ExamplePage": "./components/ExamplePage", - "ExampleTab": "./components/ExampleTab" - } ----- - -. Write a message to display on a new custom tab on the *Pods* page by creating a new file `src/components/ExampleTab.tsx` and adding the following script: -+ -[source,tsx] - ----- -import * as React from 'react'; - -export default function ExampleTab() { - return ( -

This is a custom tab added to a resource using a dynamic plugin.

- ); -} ----- - -. Install a Helm chart with the name of the plugin as the Helm release name into a new namespace or an existing namespace as specified by the `-n` command-line option to deploy your plugin on a cluster. Provide the location of the image within the `plugin.image` parameter by using the following command: - -+ -[source,terminal] ----- -$ helm upgrade -i my-plugin charts openshift-console-plugin -n my-plugin-namespace --create-namespace --set plugin image=my-plugin-image-location ----- -+ -[NOTE] -==== -For more information on deploying your plugin on a cluster, see "Deploy your plugin on a cluster". -==== - -.Verification -* Visit a *Pod* page to view the added tab. diff --git a/modules/adding-tls-termination.adoc b/modules/adding-tls-termination.adoc deleted file mode 100644 index 2192b5d6b3f9..000000000000 --- a/modules/adding-tls-termination.adoc +++ /dev/null @@ -1,62 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/aws_load_balancer_operator/add-tls-termination.adoc - -:_content-type: PROCEDURE -[id="nw-adding-tls-termination_{context}"] -= Adding TLS termination on the AWS Load Balancer - -You can route the traffic for the domain to pods of a service and add TLS termination on the AWS Load Balancer. - -.Prerequisites - -* You have an access to the OpenShift CLI (`oc`). - -.Procedure - -. Install the Operator and create an instance of the `aws-load-balancer-controller` resource: -+ -[source,yaml] ----- -apiVersion: networking.olm.openshift.io/v1 -kind: AWSLoadBalancerController -metadata: - name: cluster <1> -spec: - subnetTagging: auto - ingressClass: tls-termination <2> ----- -<1> Defines the `aws-load-balancer-controller` instance. -<2> Defines the name of an `ingressClass` resource reconciled by the AWS Load Balancer Controller. This `ingressClass` resource gets created if it is not present. You can add additional `ingressClass` values. The controller reconciles the `ingressClass` values if the `spec.controller` is set to `ingress.k8s.aws/alb`. - -. Create an `Ingress` resource: -+ -[source,yaml] ----- -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: <1> - annotations: - alb.ingress.kubernetes.io/scheme: internet-facing <2> - alb.ingress.kubernetes.io/certificate-arn: arn:aws:acm:us-west-2:xxxxx <3> -spec: - ingressClassName: tls-termination <4> - rules: - - host: <5> - http: - paths: - - path: / - pathType: Exact - backend: - service: - name: <6> - port: - number: 80 ----- -<1> Specifies the name of an ingress. -<2> The controller provisions the load balancer for this `Ingress` resource in a public subnet so that the load balancer is reachable over the internet. -<3> The Amazon Resource Name of the certificate that you attach to the load balancer. -<4> Defines the ingress class name. -<5> Defines the domain for traffic routing. -<6> Defines the service for traffic routing. diff --git a/modules/adding-to-a-project.adoc b/modules/adding-to-a-project.adoc deleted file mode 100644 index 5cc909680bb7..000000000000 --- a/modules/adding-to-a-project.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// -// applications/projects/working-with-projects.adoc - -:_content-type: PROCEDURE -[id="adding-to-a-project_{context}"] -= Adding to a project - -.Procedure - -. Select *Developer* from the context selector at the top of the web console -navigation menu. - -. Click *+Add* - -. At the top of the page, select the name of the project that you want to add to. - -. Click a method for adding to your project, and then follow the workflow. - -[NOTE] -==== -You can also add components to the topology using quick search. -==== diff --git a/modules/adding-yaml-examples-to-kube-resources.adoc b/modules/adding-yaml-examples-to-kube-resources.adoc deleted file mode 100644 index d179f415b02e..000000000000 --- a/modules/adding-yaml-examples-to-kube-resources.adoc +++ /dev/null @@ -1,56 +0,0 @@ -// Module included in the following assemblies: -// -// * web_console/customizing-the-web-console.adoc - -:_content-type: PROCEDURE -[id="adding-yaml-examples-to-kube-resources_{context}"] -= Adding YAML examples to Kubernetes resources - -You can dynamically add YAML examples to any Kubernetes resources at any time. - -.Prerequisites - -* You must have cluster administrator privileges. - -.Procedure - -. From *Administration* -> *Custom Resource Definitions*, click on *ConsoleYAMLSample*. - -. Click *YAML* and edit the file: -+ -[source,yaml] ----- -apiVersion: console.openshift.io/v1 -kind: ConsoleYAMLSample -metadata: - name: example -spec: - targetResource: - apiVersion: batch/v1 - kind: Job - title: Example Job - description: An example Job YAML sample - yaml: | - apiVersion: batch/v1 - kind: Job - metadata: - name: countdown - spec: - template: - metadata: - name: countdown - spec: - containers: - - name: counter - image: centos:7 - command: - - "bin/bash" - - "-c" - - "for i in 9 8 7 6 5 4 3 2 1 ; do echo $i ; done" - restartPolicy: Never ----- -Use `spec.snippet` to indicate that the YAML sample is not the full YAML resource -definition, but a fragment that can be inserted into the existing YAML document -at the user's cursor. - -. Click *Save*. diff --git a/modules/admin-credentials-root-secret-formats.adoc b/modules/admin-credentials-root-secret-formats.adoc deleted file mode 100644 index 03e566790073..000000000000 --- a/modules/admin-credentials-root-secret-formats.adoc +++ /dev/null @@ -1,149 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/managing_cloud_provider_credentials/cco-mode-mint.adoc -// * authentication/managing_cloud_provider_credentials/cco-mode-passthrough.adoc - -ifeval::["{context}" == "cco-mode-mint"] -:mint: -endif::[] -ifeval::["{context}" == "cco-mode-passthrough"] -:passthrough: -endif::[] - -:_content-type: REFERENCE -[id="admin-credentials-root-secret-formats_{context}"] -= Admin credentials root secret format - -Each cloud provider uses a credentials root secret in the `kube-system` -namespace by convention, which is then used to satisfy all credentials requests -and create their respective secrets. -This is done either by minting new credentials with _mint mode_, or by copying the credentials root secret with _passthrough mode_. - -The format for the secret varies by cloud, and is also used for each -`CredentialsRequest` secret. - -.Amazon Web Services (AWS) secret format - -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - namespace: kube-system - name: aws-creds -stringData: - aws_access_key_id: - aws_secret_access_key: ----- - -ifdef::passthrough[] - -.Microsoft Azure secret format - -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - namespace: kube-system - name: azure-credentials -stringData: - azure_subscription_id: - azure_client_id: - azure_client_secret: - azure_tenant_id: - azure_resource_prefix: - azure_resourcegroup: - azure_region: ----- - -On Microsoft Azure, the credentials secret format includes two properties that must contain the cluster's infrastructure ID, generated randomly for each cluster installation. This value can be found after running create manifests: - -[source,terminal] ----- -$ cat .openshift_install_state.json | jq '."*installconfig.ClusterID".InfraID' -r ----- - -.Example output -[source,terminal] ----- -mycluster-2mpcn ----- - -This value would be used in the secret data as follows: - -[source,yaml] ----- -azure_resource_prefix: mycluster-2mpcn -azure_resourcegroup: mycluster-2mpcn-rg ----- -endif::passthrough[] - -.Google Cloud Platform (GCP) secret format - -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - namespace: kube-system - name: gcp-credentials -stringData: - service_account.json: ----- - -ifdef::passthrough[] - -.{rh-openstack-first} secret format - -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - namespace: kube-system - name: openstack-credentials -data: - clouds.yaml: - clouds.conf: ----- - -.{rh-virtualization-first} secret format - -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - namespace: kube-system - name: ovirt-credentials -data: - ovirt_url: - ovirt_username: - ovirt_password: - ovirt_insecure: - ovirt_ca_bundle: ----- - -.VMware vSphere secret format - -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - namespace: kube-system - name: vsphere-creds -data: - vsphere.openshift.example.com.username: - vsphere.openshift.example.com.password: ----- - -endif::passthrough[] - -ifeval::["{context}" == "cco-mode-mint"] -:!mint: -endif::[] -ifeval::["{context}" == "cco-mode-passthrough"] -:!passthrough: -endif::[] \ No newline at end of file diff --git a/modules/admission-plug-ins-about.adoc b/modules/admission-plug-ins-about.adoc deleted file mode 100644 index 0616ddd904c3..000000000000 --- a/modules/admission-plug-ins-about.adoc +++ /dev/null @@ -1,22 +0,0 @@ -// Module included in the following assemblies: -// -// * architecture/admission-plug-ins.adoc - -:_content-type: CONCEPT -[id="admission-plug-ins-about_{context}"] -= About admission plugins - -Admission plugins are used to help regulate how {product-title} {product-version} functions. Admission plugins intercept requests to the master API to validate resource requests and ensure policies are adhered to, after the request is authenticated and authorized. For example, they are commonly used to enforce security policy, resource limitations or configuration requirements. - -Admission plugins run in sequence as an admission chain. If any admission plugin in the sequence rejects a request, the whole chain is aborted and an error is returned. - -{product-title} has a default set of admission plugins enabled for each resource type. These are required for proper functioning of the cluster. Admission plugins ignore resources that they are not responsible for. - -In addition to the defaults, the admission chain can be extended dynamically through webhook admission plugins that call out to custom webhook servers. There are two types of webhook admission plugins: a mutating admission plugin and a validating admission plugin. The mutating admission plugin runs first and can both modify resources and validate requests. The validating admission plugin validates requests and runs after the mutating admission plugin so that modifications triggered by the mutating admission plugin can also be validated. - -Calling webhook servers through a mutating admission plugin can produce side effects on resources related to the target object. In such situations, you must take steps to validate that the end result is as expected. - -[WARNING] -==== -Dynamic admission should be used cautiously because it impacts cluster control plane operations. When calling webhook servers through webhook admission plugins in {product-title} {product-version}, ensure that you have read the documentation fully and tested for side effects of mutations. Include steps to restore resources back to their original state prior to mutation, in the event that a request does not pass through the entire admission chain. -==== diff --git a/modules/admission-plug-ins-default.adoc b/modules/admission-plug-ins-default.adoc deleted file mode 100644 index 6ef96178cbc8..000000000000 --- a/modules/admission-plug-ins-default.adoc +++ /dev/null @@ -1,77 +0,0 @@ -// Module included in the following assemblies: -// -// * architecture/admission-plug-ins.adoc - -[id="admission-plug-ins-default_{context}"] -= Default admission plugins - -//Future xref - A set of default admission plugins is enabled in {product-title} {product-version}. These default plugins contribute to fundamental control plane functionality, such as ingress policy, xref:../nodes/clusters/nodes-cluster-overcommit.adoc#nodes-cluster-resource-override_nodes-cluster-overcommit[cluster resource limit override] and quota policy. -Default validating and admission plugins are enabled in {product-title} {product-version}. These default plugins contribute to fundamental control plane functionality, such as ingress policy, cluster resource limit override and quota policy. The following lists contain the default admission plugins: - -.Validating admission plugins -[%collapsible] -==== -* `LimitRanger` -* `ServiceAccount` -* `PodNodeSelector` -* `Priority` -* `PodTolerationRestriction` -* `OwnerReferencesPermissionEnforcement` -* `PersistentVolumeClaimResize` -* `RuntimeClass` -* `CertificateApproval` -* `CertificateSigning` -* `CertificateSubjectRestriction` -* `autoscaling.openshift.io/ManagementCPUsOverride` -* `authorization.openshift.io/RestrictSubjectBindings` -* `scheduling.openshift.io/OriginPodNodeEnvironment` -* `network.openshift.io/ExternalIPRanger` -* `network.openshift.io/RestrictedEndpointsAdmission` -* `image.openshift.io/ImagePolicy` -* `security.openshift.io/SecurityContextConstraint` -* `security.openshift.io/SCCExecRestrictions` -* `route.openshift.io/IngressAdmission` -* `config.openshift.io/ValidateAPIServer` -* `config.openshift.io/ValidateAuthentication` -* `config.openshift.io/ValidateFeatureGate` -* `config.openshift.io/ValidateConsole` -* `operator.openshift.io/ValidateDNS` -* `config.openshift.io/ValidateImage` -* `config.openshift.io/ValidateOAuth` -* `config.openshift.io/ValidateProject` -* `config.openshift.io/DenyDeleteClusterConfiguration` -* `config.openshift.io/ValidateScheduler` -* `quota.openshift.io/ValidateClusterResourceQuota` -* `security.openshift.io/ValidateSecurityContextConstraints` -* `authorization.openshift.io/ValidateRoleBindingRestriction` -* `config.openshift.io/ValidateNetwork` -* `operator.openshift.io/ValidateKubeControllerManager` -* `ValidatingAdmissionWebhook` -* `ResourceQuota` -* `quota.openshift.io/ClusterResourceQuota` -==== - - -.Mutating admission plugins -[%collapsible] -==== -* `NamespaceLifecycle` -* `LimitRanger` -* `ServiceAccount` -* `NodeRestriction` -* `TaintNodesByCondition` -* `PodNodeSelector` -* `Priority` -* `DefaultTolerationSeconds` -* `PodTolerationRestriction` -* `DefaultStorageClass` -* `StorageObjectInUseProtection` -* `RuntimeClass` -* `DefaultIngressClass` -* `autoscaling.openshift.io/ManagementCPUsOverride` -* `scheduling.openshift.io/OriginPodNodeEnvironment` -* `image.openshift.io/ImagePolicy` -* `security.openshift.io/SecurityContextConstraint` -* `security.openshift.io/DefaultSecurityContextConstraints` -* `MutatingAdmissionWebhook` -==== diff --git a/modules/admission-webhook-types.adoc b/modules/admission-webhook-types.adoc deleted file mode 100644 index b00f98cb2794..000000000000 --- a/modules/admission-webhook-types.adoc +++ /dev/null @@ -1,110 +0,0 @@ -// Module included in the following assemblies: -// -// * architecture/admission-plug-ins.adoc - -[id="admission-webhook-types_{context}"] -= Types of webhook admission plugins - -Cluster administrators can call out to webhook servers through the mutating admission plugin or the validating admission plugin in the API server admission chain. - -[id="mutating-admission-plug-in_{context}"] -== Mutating admission plugin - -The mutating admission plugin is invoked during the mutation phase of the admission process, which allows modification of resource content before it is persisted. One example webhook that can be called through the mutating admission plugin is the Pod Node Selector feature, which uses an annotation on a namespace to find a label selector and add it to the pod specification. - -[id="mutating-admission-plug-in-config_{context}"] -.Sample mutating admission plugin configuration - -[source,yaml] ----- -apiVersion: admissionregistration.k8s.io/v1beta1 -kind: MutatingWebhookConfiguration <1> -metadata: - name: <2> -webhooks: -- name: <3> - clientConfig: <4> - service: - namespace: default <5> - name: kubernetes <6> - path: <7> - caBundle: <8> - rules: <9> - - operations: <10> - - - apiGroups: - - "" - apiVersions: - - "*" - resources: - - - failurePolicy: <11> - sideEffects: None ----- - -<1> Specifies a mutating admission plugin configuration. -<2> The name for the `MutatingWebhookConfiguration` object. Replace `` with the appropriate value. -<3> The name of the webhook to call. Replace `` with the appropriate value. -<4> Information about how to connect to, trust, and send data to the webhook server. -<5> The namespace where the front-end service is created. -<6> The name of the front-end service. -<7> The webhook URL used for admission requests. Replace `` with the appropriate value. -<8> A PEM-encoded CA certificate that signs the server certificate that is used by the webhook server. Replace `` with the appropriate certificate in base64 format. -<9> Rules that define when the API server should use this webhook admission plugin. -<10> One or more operations that trigger the API server to call this webhook admission plugin. Possible values are `create`, `update`, `delete` or `connect`. Replace `` and `` with the appropriate values. -<11> Specifies how the policy should proceed if the webhook server is unavailable. -Replace `` with either `Ignore` (to unconditionally accept the request in the event of a failure) or `Fail` (to deny the failed request). Using `Ignore` can result in unpredictable behavior for all clients. - -[IMPORTANT] -==== -In {product-title} {product-version}, objects created by users or control loops through a mutating admission plugin might return unexpected results, especially if values set in an initial request are overwritten, which is not recommended. -==== - -[id="validating-admission-plug-in_{context}"] -== Validating admission plugin - -A validating admission plugin is invoked during the validation phase of the admission process. This phase allows the enforcement of invariants on particular API resources to ensure that the resource does not change again. The Pod Node Selector is also an example of a webhook which is called by the validating admission plugin, to ensure that all `nodeSelector` fields are constrained by the node selector restrictions on the namespace. - -[id="validating-admission-plug-in-config_{context}"] -//http://blog.kubernetes.io/2018/01/extensible-admission-is-beta.html -.Sample validating admission plugin configuration - -[source,yaml] ----- -apiVersion: admissionregistration.k8s.io/v1beta1 -kind: ValidatingWebhookConfiguration <1> -metadata: - name: <2> -webhooks: -- name: <3> - clientConfig: <4> - service: - namespace: default <5> - name: kubernetes <6> - path: <7> - caBundle: <8> - rules: <9> - - operations: <10> - - - apiGroups: - - "" - apiVersions: - - "*" - resources: - - - failurePolicy: <11> - sideEffects: Unknown ----- - -<1> Specifies a validating admission plugin configuration. -<2> The name for the `ValidatingWebhookConfiguration` object. Replace `` with the appropriate value. -<3> The name of the webhook to call. Replace `` with the appropriate value. -<4> Information about how to connect to, trust, and send data to the webhook server. -<5> The namespace where the front-end service is created. -<6> The name of the front-end service. -<7> The webhook URL used for admission requests. Replace `` with the appropriate value. -<8> A PEM-encoded CA certificate that signs the server certificate that is used by the webhook server. Replace `` with the appropriate certificate in base64 format. -<9> Rules that define when the API server should use this webhook admission plugin. -<10> One or more operations that trigger the API server to call this webhook admission plugin. Possible values are `create`, `update`, `delete` or `connect`. Replace `` and `` with the appropriate values. -<11> Specifies how the policy should proceed if the webhook server is unavailable. -Replace `` with either `Ignore` (to unconditionally accept the request in the event of a failure) or `Fail` (to deny the failed request). Using `Ignore` can result in unpredictable behavior for all clients. diff --git a/modules/admission-webhooks-about.adoc b/modules/admission-webhooks-about.adoc deleted file mode 100644 index 6a8a92b02180..000000000000 --- a/modules/admission-webhooks-about.adoc +++ /dev/null @@ -1,52 +0,0 @@ -// Module included in the following assemblies: -// -// * architecture/admission-plug-ins.adoc - -[id="admission-webhooks-about_{context}"] -= Webhook admission plugins - -In addition to {product-title} default admission plugins, dynamic admission can be implemented through webhook admission plugins that call webhook servers, to extend the functionality of the admission chain. Webhook servers are called over HTTP at defined endpoints. - -There are two types of webhook admission plugins in {product-title}: - -//Future xref - * During the admission process, xref:../architecture/admission-plug-ins.adoc#mutating-admission-plug-in[the mutating admission plugin] can perform tasks, such as injecting affinity labels. -* During the admission process, the _mutating admission plugin_ can perform tasks, such as injecting affinity labels. - -//Future xref - * At the end of the admission process, xref:../architecture/admission-plug-ins.adoc#validating-admission-plug-in[the validating admission plugin] makes sure an object is configured properly, for example ensuring affinity labels are as expected. If the validation passes, {product-title} schedules the object as configured. -* At the end of the admission process, the _validating admission plugin_ can be used to make sure an object is configured properly, for example ensuring affinity labels are as expected. If the validation passes, {product-title} schedules the object as configured. - -When an API request comes in, mutating or validating admission plugins use the list of external webhooks in the configuration and call them in parallel: - -* If all of the webhooks approve the request, the admission chain continues. - -* If any of the webhooks deny the request, the admission request is denied and the reason for doing so is based on the first denial. - -* If more than one webhook denies the admission request, only the first denial reason is returned to the user. - -* If an error is encountered when calling a webhook, the request is either denied or the webhook is ignored depending on the error policy set. If the error policy is set to `Ignore`, the request is unconditionally accepted in the event of a failure. If the policy is set to `Fail`, failed requests are denied. Using `Ignore` can result in unpredictable behavior for all clients. - -//Future xrefs - Communication between the webhook admission plugin and the webhook server must use TLS. Generate a certificate authority (CA) certificate and use the certificate to sign the server certificate that is used by your webhook server. The PEM-encoded CA certificate is supplied to the webhook admission plugin using a mechanism, such as xref:../security/certificates/service-serving-certificate.adoc#service-serving-certificate[service serving certificate secrets]. -Communication between the webhook admission plugin and the webhook server must use TLS. Generate a CA certificate and use the certificate to sign the server certificate that is used by your webhook admission server. The PEM-encoded CA certificate is supplied to the webhook admission plugin using a mechanism, such as service serving certificate secrets. - -The following diagram illustrates the sequential admission chain process within which multiple webhook servers are called. - -.API admission chain with mutating and validating admission plugins -image::api-admission-chain.png["API admission stage", align="center"] - -An example webhook admission plugin use case is where all pods must have a common set of labels. In this example, the mutating admission plugin can inject labels and the validating admission plugin can check that labels are as expected. {product-title} would subsequently schedule pods that include required labels and reject those that do not. - -Some common webhook admission plugin use cases include: - -//Future xref - * Namespace reservation. -* Namespace reservation. -//Future xrefs - * :../networking/hardware_networks/configuring-sriov-operator.adoc#configuring-sriov-operator[Limiting custom network resources managed by the SR-IOV network device plugin]. -* Limiting custom network resources managed by the SR-IOV network device plugin. -//Future xref - * xref:../nodes/scheduling/nodes-scheduler-taints-tolerations.adoc#nodes-scheduler-taints-tolerations_dedicating_nodes-scheduler-taints-tolerations[Defining tolerations that enable taints to qualify which pods should be scheduled on a node]. -* Defining tolerations that enable taints to qualify which pods should be scheduled on a node. -//Future xref - * xref:../nodes/pods/nodes-pods-priority.adoc#admin-guide-priority-preemption-names_nodes-pods-priority[Pod priority class validation]. -* Pod priority class validation. - -[NOTE] -==== -The maximum default webhook timeout value in {product-title} is 13 seconds, and it cannot be changed. -==== diff --git a/modules/advanced-node-tuning-hosted-cluster.adoc b/modules/advanced-node-tuning-hosted-cluster.adoc deleted file mode 100644 index 750ff762b47d..000000000000 --- a/modules/advanced-node-tuning-hosted-cluster.adoc +++ /dev/null @@ -1,155 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/using-node-tuning-operator.adoc - -:_content-type: PROCEDURE -[id="advanced-node-tuning-hosted-cluster_{context}"] -= Advanced node tuning for hosted clusters by setting kernel boot parameters - -:FeatureName: Hosted control planes -include::snippets/technology-preview.adoc[] - -For more advanced tuning in hosted control planes, which requires setting kernel boot parameters, you can also use the Node Tuning Operator. The following example shows how you can create a node pool with huge pages reserved. - -.Procedure - -. Create a `ConfigMap` object that contains a `Tuned` object manifest for creating 10 huge pages that are 2 MB in size. Save this `ConfigMap` manifest in a file named `tuned-hugepages.yaml`: -+ -[source,yaml] ----- - apiVersion: v1 - kind: ConfigMap - metadata: - name: tuned-hugepages - namespace: clusters - data: - tuning: | - apiVersion: tuned.openshift.io/v1 - kind: Tuned - metadata: - name: hugepages - namespace: openshift-cluster-node-tuning-operator - spec: - profile: - - data: | - [main] - summary=Boot time configuration for hugepages - include=openshift-node - [bootloader] - cmdline_openshift_node_hugepages=hugepagesz=2M hugepages=50 - name: openshift-node-hugepages - recommend: - - priority: 20 - profile: openshift-node-hugepages ----- -+ -[NOTE] -==== -The `.spec.recommend.match` field is intentionally left blank. In this case, this `Tuned` object is applied to all nodes in the node pool where this `ConfigMap` object is referenced. Group nodes with the same hardware configuration into the same node pool. Otherwise, TuneD operands can calculate conflicting kernel parameters for two or more nodes that share the same node pool. -==== - -. Create the `ConfigMap` object in the management cluster: -+ -[source,terminal] ----- -$ oc --kubeconfig="$MGMT_KUBECONFIG" create -f tuned-hugepages.yaml ----- - -. Create a `NodePool` manifest YAML file, customize the upgrade type of the `NodePool`, and reference the `ConfigMap` object that you created in the `spec.tuningConfig` section. Create the `NodePool` manifest and save it in a file named `hugepages-nodepool.yaml` by using the `hypershift` CLI: -+ -[source,yaml] ----- - NODEPOOL_NAME=hugepages-example - INSTANCE_TYPE=m5.2xlarge - NODEPOOL_REPLICAS=2 - - hypershift create nodepool aws \ - --cluster-name $CLUSTER_NAME \ - --name $NODEPOOL_NAME \ - --node-count $NODEPOOL_REPLICAS \ - --instance-type $INSTANCE_TYPE \ - --render > hugepages-nodepool.yaml ----- - -. In the `hugepages-nodepool.yaml` file, set `.spec.management.upgradeType` to `InPlace`, and set `.spec.tuningConfig` to reference the `tuned-hugepages` `ConfigMap` object that you created. -+ -[source,yaml] ----- - apiVersion: hypershift.openshift.io/v1alpha1 - kind: NodePool - metadata: - name: hugepages-nodepool - namespace: clusters - ... - spec: - management: - ... - upgradeType: InPlace - ... - tuningConfig: - - name: tuned-hugepages ----- -+ -[NOTE] -==== -To avoid the unnecessary re-creation of nodes when you apply the new `MachineConfig` objects, set `.spec.management.upgradeType` to `InPlace`. If you use the `Replace` upgrade type, nodes are fully deleted and new nodes can replace them when you apply the new kernel boot parameters that the TuneD operand calculated. -==== - -. Create the `NodePool` in the management cluster: -+ -[source,terminal] ----- -$ oc --kubeconfig="$MGMT_KUBECONFIG" create -f hugepages-nodepool.yaml ----- - -.Verification - -After the nodes are available, the containerized TuneD daemon calculates the required kernel boot parameters based on the applied TuneD profile. After the nodes are ready and reboot once to apply the generated `MachineConfig` object, you can verify that the TuneD profile is applied and that the kernel boot parameters are set. - -. List the `Tuned` objects in the hosted cluster: -+ -[source,terminal] ----- -$ oc --kubeconfig="$HC_KUBECONFIG" get Tuneds -n openshift-cluster-node-tuning-operator ----- -+ -.Example output -[source,terminal] ----- -NAME AGE -default 123m -hugepages-8dfb1fed 1m23s -rendered 123m ----- - -. List the `Profile` objects in the hosted cluster: -+ -[source,terminal] ----- -$ oc --kubeconfig="$HC_KUBECONFIG" get Profiles -n openshift-cluster-node-tuning-operator ----- -+ -.Example output -[source,terminal] ----- -NAME TUNED APPLIED DEGRADED AGE -nodepool-1-worker-1 openshift-node True False 132m -nodepool-1-worker-2 openshift-node True False 131m -hugepages-nodepool-worker-1 openshift-node-hugepages True False 4m8s -hugepages-nodepool-worker-2 openshift-node-hugepages True False 3m57s ----- -+ -Both of the worker nodes in the new `NodePool` have the `openshift-node-hugepages` profile applied. - -. To confirm that the tuning was applied correctly, start a debug shell on a node and check `/proc/cmdline`. -+ -[source,terminal] ----- -$ oc --kubeconfig="$HC_KUBECONFIG" debug node/nodepool-1-worker-1 -- chroot /host cat /proc/cmdline ----- -+ -.Example output -[source,terminal] ----- -BOOT_IMAGE=(hd0,gpt3)/ostree/rhcos-... hugepagesz=2M hugepages=50 ----- \ No newline at end of file diff --git a/modules/agent-install-about-mirroring-for-disconnected-registry.adoc b/modules/agent-install-about-mirroring-for-disconnected-registry.adoc deleted file mode 100644 index 2e041c2cbdc6..000000000000 --- a/modules/agent-install-about-mirroring-for-disconnected-registry.adoc +++ /dev/null @@ -1,56 +0,0 @@ -// Module included in the following assemblies: -// -// * list of assemblies where this module is included -// * installing/installing_with_agent_based_installer/understanding-disconnected-installation-mirroring.adoc -// re-use of applicable content from disconnected install mirroring - -:_content-type: CONCEPT -[id="agent-install-about-mirroring-for-disconnected-registry_{context}"] -= About mirroring the {product-title} image repository for a disconnected registry - -To use mirror images for a disconnected installation with the Agent-based Installer, you must modify the `install-config.yaml` file. - -You can mirror the release image by using the output of either the `oc adm release mirror` or `oc mirror` command. -This is dependent on which command you used to set up the mirror registry. - -The following example shows the output of the `oc adm release mirror` command. - -[source,terminal] ----- -$ oc adm release mirror ----- - -.Example output - -[source,terminal] ----- -To use the new mirrored repository to install, add the following -section to the install-config.yaml: - -imageContentSources: - -mirrors: -virthost.ostest.test.metalkube.org:5000/localimages/local-release-image -source: quay.io/openshift-release-dev/ocp-v4.0-art-dev -mirrors: -virthost.ostest.test.metalkube.org:5000/localimages/local-release-image -source: registry.ci.openshift.org/ocp/release ----- - -The following example shows part of the `imageContentSourcePolicy.yaml` file generated by the oc-mirror plugin. The file can be found in the results directory, for example `oc-mirror-workspace/results-1682697932/`. - -.Example `imageContentSourcePolicy.yaml` file - -[source,yaml] ----- -spec: - repositoryDigestMirrors: - - mirrors: - - virthost.ostest.test.metalkube.org:5000/openshift/release - source: quay.io/openshift-release-dev/ocp-v4.0-art-dev - - mirrors: - - virthost.ostest.test.metalkube.org:5000/openshift/release-images - source: quay.io/openshift-release-dev/ocp-release ----- - - diff --git a/modules/agent-install-configuring-for-disconnected-registry.adoc b/modules/agent-install-configuring-for-disconnected-registry.adoc deleted file mode 100644 index e1a8378c6594..000000000000 --- a/modules/agent-install-configuring-for-disconnected-registry.adoc +++ /dev/null @@ -1,62 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_with_agent_based_installer/understanding-disconnected-installation-mirroring.adoc - -:_content-type: Procedure[id="agent-install-configuring-for-disconnected-registry_{context}"] -= Configuring the Agent-based Installer to use mirrored images - -You must use the output of either the `oc adm release mirror` command or the oc-mirror plugin to configure the Agent-based Installer to use mirrored images. - -.Procedure - -. If you used the oc-mirror plugin to mirror your release images: - -.. Open the `imageContentSourcePolicy.yaml` located in the results directory, for example `oc-mirror-workspace/results-1682697932/`. - -.. Copy the text in the `repositoryDigestMirrors` section of the yaml file. - -. If you used the `oc adm release mirror` command to mirror your release images: - -* Copy the text in the `imageContentSources` section of the command output. - -. Paste the copied text into the `imageContentSources` field of the `install-config.yaml` file. - -. Add the certificate file used for the mirror registry to the `additionalTrustBundle` field of the yaml file. -+ -[IMPORTANT] -==== -The value must be the contents of the certificate file that you used for your mirror registry. -The certificate file can be an existing, trusted certificate authority, or the self-signed certificate that you generated for the mirror registry. -==== -+ -.Example `install-config.yaml` file - -[source,yaml] ----- - additionalTrustBundle: | - -----BEGIN CERTIFICATE----- - ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ - -----END CERTIFICATE----- ----- - -. If you are using {ztp} manifests: add the `registries.conf` and `ca-bundle.crt` files to the `mirror` path to add the mirror configuration in the agent ISO image. -+ -[NOTE] -==== -You can create the `registries.conf` file from the output of either the `oc adm release mirror` command or the `oc mirror` plugin. The format of the `/etc/containers/registries.conf` file has changed. It is now version 2 and in TOML format. -==== -+ -.Example `registries.conf` file - -[source,toml] ----- -[[registry]] -location = "registry.ci.openshift.org/ocp/release" mirror-by-digest-only = true - -[[registry.mirror]] location = "virthost.ostest.test.metalkube.org:5000/localimages/local-release-image" - -[[registry]] -location = "quay.io/openshift-release-dev/ocp-v4.0-art-dev" mirror-by-digest-only = true - -[[registry.mirror]] location = "virthost.ostest.test.metalkube.org:5000/localimages/local-release-image" ----- \ No newline at end of file diff --git a/modules/agent-install-ipi-install-root-device-hints.adoc b/modules/agent-install-ipi-install-root-device-hints.adoc deleted file mode 100644 index 15133769910e..000000000000 --- a/modules/agent-install-ipi-install-root-device-hints.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// This is included in the following assemblies: -// -// preparing-to-install-with-agent-based-installer.adoc - -:_content-type: REFERENCE -[id='root-device-hints_{context}'] -= About root device hints - -The `rootDeviceHints` parameter enables the installer to provision the {op-system-first} image to a particular device. The installer examines the devices in the order it discovers them, and compares the discovered values with the hint values. The installer uses the first discovered device that matches the hint value. The configuration can combine multiple hints, but a device must match all hints for the installer to select it. - -.Subfields - -|=== -| Subfield | Description - -| `deviceName` | A string containing a Linux device name like `/dev/vda`. The hint must match the actual value exactly. - -| `hctl` | A string containing a SCSI bus address like `0:0:0:0`. The hint must match the actual value exactly. - -| `model` | A string containing a vendor-specific device identifier. The hint can be a substring of the actual value. - -| `vendor` | A string containing the name of the vendor or manufacturer of the device. The hint can be a sub-string of the actual value. - -| `serialNumber` | A string containing the device serial number. The hint must match the actual value exactly. - -| `minSizeGigabytes` | An integer representing the minimum size of the device in gigabytes. - -| `wwn` | A string containing the unique storage identifier. The hint must match the actual value exactly. - -| `rotational` | A boolean indicating whether the device should be a rotating disk (true) or not (false). - -|=== - -.Example usage - -[source,yaml] ----- - - name: master-0 - role: master - rootDeviceHints: - deviceName: "/dev/sda" ----- diff --git a/modules/agent-install-networking.adoc b/modules/agent-install-networking.adoc deleted file mode 100644 index 4797f35c5962..000000000000 --- a/modules/agent-install-networking.adoc +++ /dev/null @@ -1,132 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing-with-agent-based-installer/preparing-to-install-with-agent-based-installer.adoc - -:_content-type: CONCEPT -[id="agent-install-networking_{context}"] -= About networking - -The *rendezvous IP* must be known at the time of generating the agent ISO, so that during the initial boot all the hosts can check in to the assisted service. -If the IP addresses are assigned using a Dynamic Host Configuration Protocol (DHCP) server, then the `rendezvousIP` field must be set to an IP address of one of the hosts that will become part of the deployed control plane. -In an environment without a DHCP server, you can define IP addresses statically. - -In addition to static IP addresses, you can apply any network configuration that is in NMState format. This includes VLANs and NIC bonds. - -== DHCP - -.Preferred method: `install-config.yaml` and `agent.config.yaml` - -You must specify the value for the `rendezvousIP` field. The `networkConfig` fields can be left blank: - -.Sample agent-config.yaml.file - -[source,yaml] ----- -apiVersion: v1alpha1 -kind: AgentConfig -metadata: - name: sno-cluster -rendezvousIP: 192.168.111.80 <1> ----- -<1> The IP address for the rendezvous host. - -== Static networking - -.. Preferred method: `install-config.yaml` and `agent.config.yaml` - -+ -.Sample agent-config.yaml.file -+ -[source,yaml] ----- - cat > agent-config.yaml << EOF - apiVersion: v1alpha1 - kind: AgentConfig - metadata: - name: sno-cluster - rendezvousIP: 192.168.111.80 <1> - hosts: - - hostname: master-0 - interfaces: - - name: eno1 - macAddress: 00:ef:44:21:e6:a5 <2> - networkConfig: - interfaces: - - name: eno1 - type: ethernet - state: up - mac-address: 00:ef:44:21:e6:a5 - ipv4: - enabled: true - address: - - ip: 192.168.111.80 <3> - prefix-length: 23 <4> - dhcp: false - dns-resolver: - config: - server: - - 192.168.111.1 <5> - routes: - config: - - destination: 0.0.0.0/0 - next-hop-address: 192.168.111.1 <6> - next-hop-interface: eth0 - table-id: 254 ----- -<1> If a value is not specified for the `rendezvousIP` field, one address will be chosen from the static IP addresses specified in the `networkConfig` fields. -<2> The MAC address of an interface on the host, used to determine which host to apply the configuration to. -<3> The static IP address of the target bare metal host. -<4> The static IP address’s subnet prefix for the target bare metal host. -<5> The DNS server for the target bare metal host. -<6> Next hop address for the node traffic. This must be in the same subnet as the IP address set for the specified interface. - -+ -.. Optional method: {ztp} manifests - -+ -The optional method of the {ztp} custom resources comprises 6 custom resources; you can configure static IPs in the `nmstateconfig.yaml` file. - -+ -[source,yaml] ----- -apiVersion: agent-install.openshift.io/v1beta1 -kind: NMStateConfig -metadata: - name: master-0 - namespace: openshift-machine-api - labels: - cluster0-nmstate-label-name: cluster0-nmstate-label-value -spec: - config: - interfaces: - - name: eth0 - type: ethernet - state: up - mac-address: 52:54:01:aa:aa:a1 - ipv4: - enabled: true - address: - - ip: 192.168.122.2 <1> - prefix-length: 23 <2> - dhcp: false - dns-resolver: - config: - server: - - 192.168.122.1 <3> - routes: - config: - - destination: 0.0.0.0/0 - next-hop-address: 192.168.122.1 <4> - next-hop-interface: eth0 - table-id: 254 - interfaces: - - name: eth0 - macAddress: 52:54:01:aa:aa:a1 <5> ----- -<1> The static IP address of the target bare metal host. -<2> The static IP address’s subnet prefix for the target bare metal host. -<3> The DNS server for the target bare metal host. -<4> Next hop address for the node traffic. This must be in the same subnet as the IP address set for the specified interface. -<5> The MAC address of an interface on the host, used to determine which host to apply the configuration to. - -The rendezvous IP is chosen from the static IP addresses specified in the `config` fields. diff --git a/modules/agent-install-sample-config-bond-sriov.adoc b/modules/agent-install-sample-config-bond-sriov.adoc deleted file mode 100644 index 1cdbcb2cbc25..000000000000 --- a/modules/agent-install-sample-config-bond-sriov.adoc +++ /dev/null @@ -1,119 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing-with-agent-based-installer/preparing-to-install-with-agent-based-installer.adoc - -:_content-type: REFERENCE -[id="agent-install-sample-config-bond-sriov_{context}"] -= Example: Bonds and SR-IOV dual-nic node network configuration - -:FeatureName: Support for Day 1 operations associated with enabling NIC partitioning for SR-IOV devices -include::snippets/technology-preview.adoc[leveloffset=+1] - -The following `agent-config.yaml` file is an example of a manifest for dual port NIC with a bond and SR-IOV interfaces: - -[source,yaml] ----- -apiVersion: v1alpha1 -kind: AgentConfig -rendezvousIP: 10.10.10.14 -hosts: - - hostname: worker-1 - interfaces: - - name: eno1 - macAddress: 0c:42:a1:55:f3:06 - - name: eno2 - macAddress: 0c:42:a1:55:f3:07 - networkConfig: <1> - interfaces: <2> - - name: eno1 <3> - type: ethernet <4> - state: up - mac-address: 0c:42:a1:55:f3:06 - ipv4: - enabled: true - dhcp: false <5> - ethernet: - sr-iov: - total-vfs: 2 <6> - ipv6: - enabled: false - - name: sriov:eno1:0 - type: ethernet - state: up <7> - ipv4: - enabled: false <8> - ipv6: - enabled: false - dhcp: false - - name: sriov:eno1:1 - type: ethernet - state: down - - name: eno2 - type: ethernet - state: up - mac-address: 0c:42:a1:55:f3:07 - ipv4: - enabled: true - ethernet: - sr-iov: - total-vfs: 2 - ipv6: - enabled: false - - name: sriov:eno2:0 - type: ethernet - state: up - ipv4: - enabled: false - ipv6: - enabled: false - - name: sriov:eno2:1 - type: ethernet - state: down - - name: bond0 - type: bond - state: up - min-tx-rate: 100 <9> - max-tx-rate: 200 <10> - link-aggregation: - mode: active-backup <11> - options: - primary: sriov:eno1:0 <12> - port: - - sriov:eno1:0 - - sriov:eno2:0 - ipv4: - address: - - ip: 10.19.16.57 <13> - prefix-length: 23 - dhcp: false - enabled: true - ipv6: - enabled: false - dns-resolver: - config: - server: - - 10.11.5.160 - - 10.2.70.215 - routes: - config: - - destination: 0.0.0.0/0 - next-hop-address: 10.19.17.254 - next-hop-interface: bond0 <14> - table-id: 254 ----- -<1> The `networkConfig` field contains information about the network configuration of the host, with subfields including `interfaces`,`dns-resolver`, and `routes`. -<2> The `interfaces` field is an array of network interfaces defined for the host. -<3> The name of the interface. -<4> The type of interface. This example creates an ethernet interface. -<5> Set this to `false` to disable DHCP for the physical function (PF) if it is not strictly required. -<6> Set this to the number of SR-IOV virtual functions (VFs) to instantiate. -<7> Set this to `up`. -<8> Set this to `false` to disable IPv4 addressing for the VF attached to the bond. -<9> Sets a minimum transmission rate, in Mbps, for the VF. This sample value sets a rate of 100 Mbps. - * This value must be less than or equal to the maximum transmission rate. - * Intel NICs do not support the `min-tx-rate` parameter. For more information, see link:https://bugzilla.redhat.com/show_bug.cgi?id=1772847[*BZ#1772847*]. -<10> Sets a maximum transmission rate, in Mbps, for the VF. This sample value sets a rate of 200 Mbps. -<11> Sets the desired bond mode. -<12> Sets the preferred port of the bonding interface. The primary device is the first of the bonding interfaces to be used and is not abandoned unless it fails. This setting is particularly useful when one NIC in the bonding interface is faster and, therefore, able to handle a bigger load. This setting is only valid when the bonding interface is in `active-backup` mode (mode 1) and `balance-tlb` (mode 5). -<13> Sets a static IP address for the bond interface. This is the node IP address. -<14> Sets `bond0` as the gateway for the default route. \ No newline at end of file diff --git a/modules/agent-install-sample-config-bonds-vlans.adoc b/modules/agent-install-sample-config-bonds-vlans.adoc deleted file mode 100644 index 8c1a13ea404e..000000000000 --- a/modules/agent-install-sample-config-bonds-vlans.adoc +++ /dev/null @@ -1,73 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing-with-agent-based-installer/preparing-to-install-with-agent-based-installer.adoc - -:_content-type: REFERENCE -[id="agent-install-sample-config-bonds-vlans_{context}"] -= Example: Bonds and VLAN interface node network configuration - -The following `agent-config.yaml` file is an example of a manifest for bond and VLAN interfaces. - -[source,yaml] ----- - apiVersion: v1alpha1 - kind: AgentConfig - rendezvousIP: 10.10.10.14 - hosts: - - hostname: master0 - role: master - interfaces: - - name: enp0s4 - macAddress: 00:21:50:90:c0:10 - - name: enp0s5 - macAddress: 00:21:50:90:c0:20 - networkConfig: - interfaces: - - name: bond0.300 <1> - type: vlan <2> - state: up - vlan: - base-iface: bond0 - id: 300 - ipv4: - enabled: true - address: - - ip: 10.10.10.14 - prefix-length: 24 - dhcp: false - - name: bond0 <1> - type: bond <3> - state: up - mac-address: 00:21:50:90:c0:10 <4> - ipv4: - enabled: false - ipv6: - enabled: false - link-aggregation: - mode: active-backup <5> - options: - miimon: "150" <6> - port: - - enp0s4 - - enp0s5 - dns-resolver: <7> - config: - server: - - 10.10.10.11 - - 10.10.10.12 - routes: - config: - - destination: 0.0.0.0/0 - next-hop-address: 10.10.10.10 <8> - next-hop-interface: bond0.300 <9> - table-id: 254 ----- -<1> Name of the interface. -<2> The type of interface. This example creates a VLAN. -<3> The type of interface. This example creates a bond. -<4> The mac address of the interface. -<5> The `mode` attribute specifies the bonding mode. -<6> Specifies the MII link monitoring frequency in milliseconds. This example inspects the bond link every 150 milliseconds. -<7> Optional: Specifies the search and server settings for the DNS server. -<8> Next hop address for the node traffic. This must be in the same subnet as the IP address set for the specified interface. -<9> Next hop interface for the node traffic. diff --git a/modules/agent-installer-configuring-fips-compliance.adoc b/modules/agent-installer-configuring-fips-compliance.adoc deleted file mode 100644 index 7e988324cbd8..000000000000 --- a/modules/agent-installer-configuring-fips-compliance.adoc +++ /dev/null @@ -1,41 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_with_agent_bases_installer/preparing-to-install-with-agent-based-installer.adoc - - -:_content-type: PROCEDURE -[id="agent-installer-configuring-fips-compliance_{context}"] - -= Configuring FIPS through the Agent-based Installer - -During a cluster deployment, the Federal Information Processing Standards (FIPS) change is applied when the Red Hat Enterprise Linux CoreOS (RHCOS) machines are deployed in your cluster. For Red Hat Enterprise Linux (RHEL) machines, you must enable FIPS mode when you install the operating system on the machines that you plan to use as worker machines. - -You can enable FIPS mode through the preferred method of `install-config.yaml` and `agent.config.yaml`: - -. You must set value of the `fips` field to `True` in the `install-config.yaml` file: -+ -.Sample install-config.yaml.file - -[source,yaml] ----- -apiVersion: v1 -baseDomain: test.example.com -metadata: - name: sno-cluster -fips: True ----- - -. Optional: If you are using the {ztp} manifests, you must set the value of `fips` as `True` in the `Agent-install.openshift.io/install-config-overrides` field in the `agent-cluster-install.yaml` file: - -+ -.Sample agent-cluster-install.yaml file -[source,yaml] ----- -apiVersion: extensions.hive.openshift.io/v1beta1 -kind: AgentClusterInstall -metadata: - annotations: - agent-install.openshift.io/install-config-overrides: '{"fips": True}' - name: sno-cluster - namespace: sno-cluster-test ----- diff --git a/modules/agent-installer-fips-compliance.adoc b/modules/agent-installer-fips-compliance.adoc deleted file mode 100644 index fef47f8db3f8..000000000000 --- a/modules/agent-installer-fips-compliance.adoc +++ /dev/null @@ -1,16 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_with_agent_bases_installer/preparing-to-install-with-agent-based-installer.adoc - - -:_content-type: CONCEPT -[id="agent-installer-fips-compliance_{context}"] -= About FIPS compliance - -For many {product-title} customers, regulatory readiness, or compliance, on some level is required before any systems can be put into production. That regulatory readiness can be imposed by national standards, industry standards or the organization's corporate governance framework. -Federal Information Processing Standards (FIPS) compliance is one of the most critical components required in highly secure environments to ensure that only supported cryptographic technologies are allowed on nodes. - -[IMPORTANT] -==== -The use of FIPS Validated / Modules in Process cryptographic libraries is only supported on {product-title} deployments on the `x86_64` architecture. -==== diff --git a/modules/ai-adding-worker-nodes-to-cluster.adoc b/modules/ai-adding-worker-nodes-to-cluster.adoc deleted file mode 100644 index e195cb619082..000000000000 --- a/modules/ai-adding-worker-nodes-to-cluster.adoc +++ /dev/null @@ -1,320 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes/nodes-sno-worker-nodes.adoc - -:_content-type: PROCEDURE -[id="ai-adding-worker-nodes-to-cluster_{context}"] -= Adding worker nodes using the Assisted Installer REST API - -You can add worker nodes to clusters using the Assisted Installer REST API. - -.Prerequisites - -* Install the OpenShift Cluster Manager CLI (`ocm`). - -* Log in to link:https://console.redhat.com/openshift/assisted-installer/clusters[{cluster-manager}] as a user with cluster creation privileges. - -* Install `jq`. - -* Ensure that all the required DNS records exist for the cluster that you are adding the worker node to. - -.Procedure - -. Authenticate against the Assisted Installer REST API and generate a JSON web token (JWT) for your session. The generated JWT token is valid for 15 minutes only. - -. Set the `$API_URL` variable by running the following command: -+ -[source,terminal] ----- -$ export API_URL= <1> ----- -<1> Replace `` with the Assisted Installer API URL, for example, `https://api.openshift.com` - -. Import the {sno} cluster by running the following commands: - -.. Set the `$OPENSHIFT_CLUSTER_ID` variable. Log in to the cluster and run the following command: -+ -[source,terminal] ----- -$ export OPENSHIFT_CLUSTER_ID=$(oc get clusterversion -o jsonpath='{.items[].spec.clusterID}') ----- - -.. Set the `$CLUSTER_REQUEST` variable that is used to import the cluster: -+ -[source,terminal] ----- -$ export CLUSTER_REQUEST=$(jq --null-input --arg openshift_cluster_id "$OPENSHIFT_CLUSTER_ID" '{ - "api_vip_dnsname": "", <1> - "openshift_cluster_id": $openshift_cluster_id, - "name": "" <2> -}') ----- -<1> Replace `` with the hostname for the cluster's API server. This can be the DNS domain for the API server or the IP address of the single node which the worker node can reach. For example, `api.compute-1.example.com`. -<2> Replace `` with the plain text name for the cluster. The cluster name should match the cluster name that was set during the Day 1 cluster installation. - -.. Import the cluster and set the `$CLUSTER_ID` variable. Run the following command: -+ -[source,terminal] ----- -$ CLUSTER_ID=$(curl "$API_URL/api/assisted-install/v2/clusters/import" -H "Authorization: Bearer ${JWT_TOKEN}" -H 'accept: application/json' -H 'Content-Type: application/json' \ - -d "$CLUSTER_REQUEST" | tee /dev/stderr | jq -r '.id') ----- - -. Generate the `InfraEnv` resource for the cluster and set the `$INFRA_ENV_ID` variable by running the following commands: - -.. Download the pull secret file from Red Hat OpenShift Cluster Manager at link:console.redhat.com/openshift/install/pull-secret[console.redhat.com]. - -.. Set the `$INFRA_ENV_REQUEST` variable: -+ -[source,terminal] ----- -export INFRA_ENV_REQUEST=$(jq --null-input \ - --slurpfile pull_secret \//<1> - --arg ssh_pub_key "$(cat )" \//<2> - --arg cluster_id "$CLUSTER_ID" '{ - "name": "", <3> - "pull_secret": $pull_secret[0] | tojson, - "cluster_id": $cluster_id, - "ssh_authorized_key": $ssh_pub_key, - "image_type": "" <4> -}') ----- -<1> Replace `` with the path to the local file containing the downloaded pull secret from Red Hat OpenShift Cluster Manager at link:console.redhat.com/openshift/install/pull-secret[console.redhat.com]. -<2> Replace `` with the path to the public SSH key required to access the host. If you do not set this value, you cannot access the host while in discovery mode. -<3> Replace `` with the plain text name for the `InfraEnv` resource. -<4> Replace `` with the ISO image type, either `full-iso` or `minimal-iso`. - -.. Post the `$INFRA_ENV_REQUEST` to the link:https://api.openshift.com/?urls.primaryName=assisted-service%20service#/installer/RegisterInfraEnv[/v2/infra-envs] API and set the `$INFRA_ENV_ID` variable: -+ -[source,terminal] ----- -$ INFRA_ENV_ID=$(curl "$API_URL/api/assisted-install/v2/infra-envs" -H "Authorization: Bearer ${JWT_TOKEN}" -H 'accept: application/json' -H 'Content-Type: application/json' -d "$INFRA_ENV_REQUEST" | tee /dev/stderr | jq -r '.id') ----- - -. Get the URL of the discovery ISO for the cluster worker node by running the following command: -+ -[source,terminal] ----- -$ curl -s "$API_URL/api/assisted-install/v2/infra-envs/$INFRA_ENV_ID" -H "Authorization: Bearer ${JWT_TOKEN}" | jq -r '.download_url' ----- -+ -.Example output -[source,terminal] ----- -https://api.openshift.com/api/assisted-images/images/41b91e72-c33e-42ee-b80f-b5c5bbf6431a?arch=x86_64&image_token=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2NTYwMjYzNzEsInN1YiI6IjQxYjkxZTcyLWMzM2UtNDJlZS1iODBmLWI1YzViYmY2NDMxYSJ9.1EX_VGaMNejMhrAvVRBS7PDPIQtbOOc8LtG8OukE1a4&type=minimal-iso&version=4.13 ----- - -. Download the ISO: -+ -[source,terminal] ----- -$ curl -L -s '' --output rhcos-live-minimal.iso <1> ----- -<1> Replace `` with the URL for the ISO from the previous step. - -. Boot the new worker host from the downloaded `rhcos-live-minimal.iso`. - -. Get the list of hosts in the cluster that are _not_ installed. Keep running the following command until the new host shows up: -+ -[source,terminal] ----- -$ curl -s "$API_URL/api/assisted-install/v2/clusters/$CLUSTER_ID" -H "Authorization: Bearer ${JWT_TOKEN}" | jq -r '.hosts[] | select(.status != "installed").id' ----- -+ -.Example output -[source,terminal] ----- -2294ba03-c264-4f11-ac08-2f1bb2f8c296 ----- - -. Set the `$HOST_ID` variable for the new worker node, for example: -+ -[source,terminal] ----- -$ HOST_ID= <1> ----- -<1> Replace `` with the host ID from the previous step. - -. Check that the host is ready to install by running the following command: -+ -[NOTE] -==== -Ensure that you copy the entire command including the complete `jq` expression. -==== -+ -[source,terminal] ----- -$ curl -s $API_URL/api/assisted-install/v2/clusters/$CLUSTER_ID -H "Authorization: Bearer ${JWT_TOKEN}" | jq ' -def host_name($host): - if (.suggested_hostname // "") == "" then - if (.inventory // "") == "" then - "Unknown hostname, please wait" - else - .inventory | fromjson | .hostname - end - else - .suggested_hostname - end; - -def is_notable($validation): - ["failure", "pending", "error"] | any(. == $validation.status); - -def notable_validations($validations_info): - [ - $validations_info // "{}" - | fromjson - | to_entries[].value[] - | select(is_notable(.)) - ]; - -{ - "Hosts validations": { - "Hosts": [ - .hosts[] - | select(.status != "installed") - | { - "id": .id, - "name": host_name(.), - "status": .status, - "notable_validations": notable_validations(.validations_info) - } - ] - }, - "Cluster validations info": { - "notable_validations": notable_validations(.validations_info) - } -} -' -r ----- -+ -.Example output -[source,terminal] ----- -{ - "Hosts validations": { - "Hosts": [ - { - "id": "97ec378c-3568-460c-bc22-df54534ff08f", - "name": "localhost.localdomain", - "status": "insufficient", - "notable_validations": [ - { - "id": "ntp-synced", - "status": "failure", - "message": "Host couldn't synchronize with any NTP server" - }, - { - "id": "api-domain-name-resolved-correctly", - "status": "error", - "message": "Parse error for domain name resolutions result" - }, - { - "id": "api-int-domain-name-resolved-correctly", - "status": "error", - "message": "Parse error for domain name resolutions result" - }, - { - "id": "apps-domain-name-resolved-correctly", - "status": "error", - "message": "Parse error for domain name resolutions result" - } - ] - } - ] - }, - "Cluster validations info": { - "notable_validations": [] - } -} ----- - -. When the previous command shows that the host is ready, start the installation using the link:https://api.openshift.com/?urls.primaryName=assisted-service%20service#/installer/v2InstallHost[/v2/infra-envs/{infra_env_id}/hosts/{host_id}/actions/install] API by running the following command: -+ -[source,terminal] ----- -$ curl -X POST -s "$API_URL/api/assisted-install/v2/infra-envs/$INFRA_ENV_ID/hosts/$HOST_ID/actions/install" -H "Authorization: Bearer ${JWT_TOKEN}" ----- - -. As the installation proceeds, the installation generates pending certificate signing requests (CSRs) for the worker node. -+ -[IMPORTANT] -==== -You must approve the CSRs to complete the installation. -==== -+ -Keep running the following API call to monitor the cluster installation: -+ -[source,terminal] ----- -$ curl -s "$API_URL/api/assisted-install/v2/clusters/$CLUSTER_ID" -H "Authorization: Bearer ${JWT_TOKEN}" | jq '{ - "Cluster day-2 hosts": - [ - .hosts[] - | select(.status != "installed") - | {id, requested_hostname, status, status_info, progress, status_updated_at, updated_at, infra_env_id, cluster_id, created_at} - ] -}' ----- -+ -.Example output -[source,terminal] ----- -{ - "Cluster day-2 hosts": [ - { - "id": "a1c52dde-3432-4f59-b2ae-0a530c851480", - "requested_hostname": "control-plane-1", - "status": "added-to-existing-cluster", - "status_info": "Host has rebooted and no further updates will be posted. Please check console for progress and to possibly approve pending CSRs", - "progress": { - "current_stage": "Done", - "installation_percentage": 100, - "stage_started_at": "2022-07-08T10:56:20.476Z", - "stage_updated_at": "2022-07-08T10:56:20.476Z" - }, - "status_updated_at": "2022-07-08T10:56:20.476Z", - "updated_at": "2022-07-08T10:57:15.306369Z", - "infra_env_id": "b74ec0c3-d5b5-4717-a866-5b6854791bd3", - "cluster_id": "8f721322-419d-4eed-aa5b-61b50ea586ae", - "created_at": "2022-07-06T22:54:57.161614Z" - } - ] -} ----- - -. Optional: Run the following command to see all the events for the cluster: -+ -[source,terminal] ----- -$ curl -s "$API_URL/api/assisted-install/v2/events?cluster_id=$CLUSTER_ID" -H "Authorization: Bearer ${JWT_TOKEN}" | jq -c '.[] | {severity, message, event_time, host_id}' ----- -+ -.Example output -[source,terminal] ----- -{"severity":"info","message":"Host compute-0: updated status from insufficient to known (Host is ready to be installed)","event_time":"2022-07-08T11:21:46.346Z","host_id":"9d7b3b44-1125-4ad0-9b14-76550087b445"} -{"severity":"info","message":"Host compute-0: updated status from known to installing (Installation is in progress)","event_time":"2022-07-08T11:28:28.647Z","host_id":"9d7b3b44-1125-4ad0-9b14-76550087b445"} -{"severity":"info","message":"Host compute-0: updated status from installing to installing-in-progress (Starting installation)","event_time":"2022-07-08T11:28:52.068Z","host_id":"9d7b3b44-1125-4ad0-9b14-76550087b445"} -{"severity":"info","message":"Uploaded logs for host compute-0 cluster 8f721322-419d-4eed-aa5b-61b50ea586ae","event_time":"2022-07-08T11:29:47.802Z","host_id":"9d7b3b44-1125-4ad0-9b14-76550087b445"} -{"severity":"info","message":"Host compute-0: updated status from installing-in-progress to added-to-existing-cluster (Host has rebooted and no further updates will be posted. Please check console for progress and to possibly approve pending CSRs)","event_time":"2022-07-08T11:29:48.259Z","host_id":"9d7b3b44-1125-4ad0-9b14-76550087b445"} -{"severity":"info","message":"Host: compute-0, reached installation stage Rebooting","event_time":"2022-07-08T11:29:48.261Z","host_id":"9d7b3b44-1125-4ad0-9b14-76550087b445"} ----- - -. Log in to the cluster and approve the pending CSRs to complete the installation. - -.Verification - -* Check that the new worker node was successfully added to the cluster with a status of `Ready`: -+ -[source,terminal] ----- -$ oc get nodes ----- -+ -.Example output -[source,terminal] ----- -NAME STATUS ROLES AGE VERSION -control-plane-1.example.com Ready master,worker 56m v1.26.0 -compute-1.example.com Ready worker 11m v1.26.0 ----- diff --git a/modules/ai-authenticating-against-ai-rest-api.adoc b/modules/ai-authenticating-against-ai-rest-api.adoc deleted file mode 100644 index 020e34fa64c0..000000000000 --- a/modules/ai-authenticating-against-ai-rest-api.adoc +++ /dev/null @@ -1,72 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes/nodes-sno-worker-nodes.adoc - -:_content-type: PROCEDURE -[id="ai-authenticating-against-ai-rest-api_{context}"] -= Authenticating against the Assisted Installer REST API - -Before you can use the Assisted Installer REST API, you must authenticate against the API using a JSON web token (JWT) that you generate. - -.Prerequisites - -* Log in to link:https://console.redhat.com/openshift/assisted-installer/clusters[{cluster-manager}] as a user with cluster creation privileges. - -* Install `jq`. - -.Procedure - -. Log in to link:https://console.redhat.com/openshift/token/show[{cluster-manager}] and copy your API token. - -. Set the `$OFFLINE_TOKEN` variable using the copied API token by running the following command: -+ -[source,terminal] ----- -$ export OFFLINE_TOKEN= ----- - -. Set the `$JWT_TOKEN` variable using the previously set `$OFFLINE_TOKEN` variable: -+ -[source,terminal] ----- -$ export JWT_TOKEN=$( - curl \ - --silent \ - --header "Accept: application/json" \ - --header "Content-Type: application/x-www-form-urlencoded" \ - --data-urlencode "grant_type=refresh_token" \ - --data-urlencode "client_id=cloud-services" \ - --data-urlencode "refresh_token=${OFFLINE_TOKEN}" \ - "https://sso.redhat.com/auth/realms/redhat-external/protocol/openid-connect/token" \ - | jq --raw-output ".access_token" -) ----- -+ -[NOTE] -==== -The JWT token is valid for 15 minutes only. -==== - -.Verification - -* Optional: Check that you can access the API by running the following command: -+ -[source,terminal] ----- -$ curl -s https://api.openshift.com/api/assisted-install/v2/component-versions -H "Authorization: Bearer ${JWT_TOKEN}" | jq ----- -+ -.Example output -[source,yaml] ----- -{ - "release_tag": "v2.5.1", - "versions": - { - "assisted-installer": "registry.redhat.io/rhai-tech-preview/assisted-installer-rhel8:v1.0.0-175", - "assisted-installer-controller": "registry.redhat.io/rhai-tech-preview/assisted-installer-reporter-rhel8:v1.0.0-223", - "assisted-installer-service": "quay.io/app-sre/assisted-service:ac87f93", - "discovery-agent": "registry.redhat.io/rhai-tech-preview/assisted-installer-agent-rhel8:v1.0.0-156" - } -} ----- diff --git a/modules/ai-sno-requirements-for-installing-worker-nodes.adoc b/modules/ai-sno-requirements-for-installing-worker-nodes.adoc deleted file mode 100644 index b0e32c1e235d..000000000000 --- a/modules/ai-sno-requirements-for-installing-worker-nodes.adoc +++ /dev/null @@ -1,49 +0,0 @@ -// This is included in the following assemblies: -// -// * nodes/nodes/nodes-sno-worker-nodes.adoc - -[id="ai-sno-requirements-for-installing-worker-nodes_{context}"] -= Requirements for installing {sno} worker nodes - -To install a {sno} worker node, you must address the following requirements: - -* *Administration host:* You must have a computer to prepare the ISO and to monitor the installation. - -* *Production-grade server:* Installing {sno} worker nodes requires a server with sufficient resources to run {product-title} services and a production workload. -+ -.Minimum resource requirements -[options="header"] -|==== - -|Profile|vCPU|Memory|Storage - -|Minimum|2 vCPU cores|8GB of RAM| 100GB - -|==== -+ -[NOTE] -==== -One vCPU is equivalent to one physical core when simultaneous multithreading (SMT), or hyperthreading, is not enabled. When enabled, use the following formula to calculate the corresponding ratio: - -(threads per core × cores) × sockets = vCPUs -==== -+ -The server must have a Baseboard Management Controller (BMC) when booting with virtual media. - -* *Networking:* The worker node server must have access to the internet or access to a local registry if it is not connected to a routable network. The worker node server must have a DHCP reservation or a static IP address and be able to access the {sno} cluster Kubernetes API, ingress route, and cluster node domain names. You must configure the DNS to resolve the IP address to each of the following fully qualified domain names (FQDN) for the {sno} cluster: -+ -.Required DNS records -[options="header"] -|==== - -|Usage|FQDN|Description - -|Kubernetes API|`api..`| Add a DNS A/AAAA or CNAME record. This record must be resolvable by clients external to the cluster. - -|Internal API|`api-int..`| Add a DNS A/AAAA or CNAME record when creating the ISO manually. This record must be resolvable by nodes within the cluster. - -|Ingress route|`*.apps..`| Add a wildcard DNS A/AAAA or CNAME record that targets the node. This record must be resolvable by clients external to the cluster. - -|==== -+ -Without persistent IP addresses, communications between the `apiserver` and `etcd` might fail. diff --git a/modules/alternatives-to-storing-admin-secrets-in-kube-system.adoc b/modules/alternatives-to-storing-admin-secrets-in-kube-system.adoc deleted file mode 100644 index 74d9b70fbee6..000000000000 --- a/modules/alternatives-to-storing-admin-secrets-in-kube-system.adoc +++ /dev/null @@ -1,94 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/manually-creating-iam.adoc -// * installing/installing_azure/manually-creating-iam-azure.adoc -// * installing/installing_gcp/manually-creating-iam-gcp.adoc -// * installing/installing_ibm_cloud_public/configuring-iam-ibm-cloud.adoc - -ifeval::["{context}" == "manually-creating-iam-aws"] -:aws: -endif::[] -ifeval::["{context}" == "manually-creating-iam-azure"] -:azure: -endif::[] -ifeval::["{context}" == "manually-creating-iam-gcp"] -:google-cloud-platform: -endif::[] -ifeval::["{context}" == "configuring-iam-ibm-cloud"] -:ibm-cloud: -endif::[] - -[id="alternatives-to-storing-admin-secrets-in-kube-system_{context}"] -= Alternatives to storing administrator-level secrets in the kube-system project - -The Cloud Credential Operator (CCO) manages cloud provider credentials as Kubernetes custom resource definitions (CRDs). You can configure the CCO to suit the security requirements of your organization by setting different values for the `credentialsMode` parameter in the `install-config.yaml` file. - -ifdef::aws,google-cloud-platform[] -If you prefer not to store an administrator-level credential secret in the cluster `kube-system` project, you can choose one of the following options when installing {product-title}: - -endif::aws,google-cloud-platform[] - -ifdef::aws[] -* *Use the Amazon Web Services Security Token Service*: -+ -You can use the CCO utility (`ccoctl`) to configure the cluster to use the Amazon Web Services Security Token Service (AWS STS). When the CCO utility is used to configure the cluster for STS, it assigns IAM roles that provide short-term, limited-privilege security credentials to components. -+ -[NOTE] -==== -This credentials strategy is supported for only new {product-title} clusters and must be configured during installation. You cannot reconfigure an existing cluster that uses a different credentials strategy to use this feature. -==== - -endif::aws[] - -ifdef::google-cloud-platform[] -* *Use manual mode with GCP Workload Identity*: -+ -You can use the CCO utility (`ccoctl`) to configure the cluster to use manual mode with GCP Workload Identity. When the CCO utility is used to configure the cluster for GCP Workload Identity, it signs service account tokens that provide short-term, limited-privilege security credentials to components. -+ -[NOTE] -==== -This credentials strategy is supported for only new {product-title} clusters and must be configured during installation. You cannot reconfigure an existing cluster that uses a different credentials strategy to use this feature. -==== - -endif::google-cloud-platform[] - -ifdef::aws,google-cloud-platform[] -* *Manage cloud credentials manually*: -+ -You can set the `credentialsMode` parameter for the CCO to `Manual` to manage cloud credentials manually. Using manual mode allows each cluster component to have only the permissions it requires, without storing an administrator-level credential in the cluster. You can also use this mode if your environment does not have connectivity to the cloud provider public IAM endpoint. However, you must manually reconcile permissions with new release images for every upgrade. You must also manually supply credentials for every component that requests them. - -* *Remove the administrator-level credential secret after installing {product-title} with mint mode*: -+ -If you are using the CCO with the `credentialsMode` parameter set to `Mint`, you can remove or rotate the administrator-level credential after installing {product-title}. Mint mode is the default configuration for the CCO. This option requires the presence of the administrator-level credential during an installation. The administrator-level credential is used during the installation to mint other credentials with some permissions granted. The original credential secret is not stored in the cluster permanently. - -[NOTE] -==== -Prior to a non z-stream upgrade, you must reinstate the credential secret with the administrator-level credential. If the credential is not present, the upgrade might be blocked. -==== - -endif::aws,google-cloud-platform[] - -ifdef::azure[] -If you prefer not to store an administrator-level credential secret in the cluster `kube-system` project, you can set the `credentialsMode` parameter for the CCO to `Manual` when installing {product-title} and manage your cloud credentials manually. - -Using manual mode allows each cluster component to have only the permissions it requires, without storing an administrator-level credential in the cluster. You can also use this mode if your environment does not have connectivity to the cloud provider public IAM endpoint. However, you must manually reconcile permissions with new release images for every upgrade. You must also manually supply credentials for every component that requests them. -endif::azure[] - -ifdef::ibm-cloud[] -Storing an administrator-level credential secret in the cluster `kube-system` project is not supported for IBM Cloud; therefore, you must set the `credentialsMode` parameter for the CCO to `Manual` when installing {product-title} and manage your cloud credentials manually. - -Using manual mode allows each cluster component to have only the permissions it requires, without storing an administrator-level credential in the cluster. You can also use this mode if your environment does not have connectivity to the cloud provider public IAM endpoint. However, you must manually reconcile permissions with new release images for every upgrade. You must also manually supply credentials for every component that requests them. -endif::ibm-cloud[] - -ifeval::["{context}" == "manually-creating-iam-aws"] -:!aws: -endif::[] -ifeval::["{context}" == "manually-creating-iam-azure"] -:!azure: -endif::[] -ifeval::["{context}" == "manually-creating-iam-gcp"] -:!google-cloud-platform: -endif::[] -ifeval::["{context}" == "configuring-iam-ibm-cloud"] -:!ibm-cloud: -endif::[] diff --git a/modules/annotating-a-route-with-a-cookie-name.adoc b/modules/annotating-a-route-with-a-cookie-name.adoc deleted file mode 100644 index f2bef5522ad0..000000000000 --- a/modules/annotating-a-route-with-a-cookie-name.adoc +++ /dev/null @@ -1,36 +0,0 @@ -// Module included in the following assemblies: -// -// *using-cookies-to-keep-route-statefulness - -[id="annotating-a-route-with-a-cookie_{context}"] -= Annotating a route with a cookie - -You can set a cookie name to overwrite the default, auto-generated one for the -route. This allows the application receiving route traffic to know the cookie -name. By deleting the cookie it can force the next request to re-choose an -endpoint. So, if a server was overloaded it tries to remove the requests from the -client and redistribute them. - -.Procedure - -. Annotate the route with the desired cookie name: -+ -[source,terminal] ----- -$ oc annotate route router.openshift.io/="-" ----- -+ -For example, to annotate the cookie name of `my_cookie` to the `my_route` with -the annotation of `my_cookie_anno`: -+ -[source,terminal] ----- -$ oc annotate route my_route router.openshift.io/my_cookie="-my_cookie_anno" ----- - -. Save the cookie, and access the route: -+ -[source,terminal] ----- -$ curl $my_route -k -c /tmp/my_cookie ----- diff --git a/modules/api-compatibility-common-terminology.adoc b/modules/api-compatibility-common-terminology.adoc deleted file mode 100644 index 9862ede14c06..000000000000 --- a/modules/api-compatibility-common-terminology.adoc +++ /dev/null @@ -1,54 +0,0 @@ -// Module included in the following assemblies: -// -// * rest_api/understanding-compatibility-guidelines.adoc - -[id="api-compatibility-common-terminology_{context}"] -= API compatibility common terminology - -[id="api-compatibility-common-terminology-api_{context}"] -== Application Programming Interface (API) - -An API is a public interface implemented by a software program that enables it to interact with other software. In {product-title}, the API is served from a centralized API server and is used as the hub for all system interaction. - -[id="api-compatibility-common-terminology-aoe_{context}"] -== Application Operating Environment (AOE) - -An AOE is the integrated environment that executes the end-user application program. The AOE is a containerized environment that provides isolation from the host operating system (OS). At a minimum, AOE allows the application to run in an isolated manner from the host OS libraries and binaries, but still share the same OS kernel as all other containers on the host. The AOE is enforced at runtime and it describes the interface between an application and its operating environment. It includes intersection points between the platform, operating system and environment, with the user application including projection of downward API, DNS, resource accounting, device access, platform workload identity, isolation among containers, isolation between containers and host OS. - -The AOE does not include components that might vary by installation, such as Container Network Interface (CNI) plugin selection or extensions to the product such as admission hooks. Components that integrate with the cluster at a level below the container environment might be subjected to additional variation between versions. - -[id="api-compatibility-common-terminology-virtualized_{context}"] -== Compatibility in a virtualized environment - -Virtual environments emulate bare-metal environments such that unprivileged applications that run on bare-metal environments will run, unmodified, in corresponding virtual environments. Virtual environments present simplified abstracted views of physical resources, so some differences might exist. - -[id="api-compatibility-common-terminology-cloud_{context}"] -== Compatibility in a cloud environment - -{product-title} might choose to offer integration points with a hosting cloud environment via cloud provider specific integrations. The compatibility of these integration points are specific to the guarantee provided by the native cloud vendor and its intersection with the {product-title} compatibility window. Where {product-title} provides an integration with a cloud environment natively as part of the default installation, Red Hat develops against stable cloud API endpoints to provide commercially reasonable support with forward looking compatibility that includes stable deprecation policies. Example areas of integration between the cloud provider and {product-title} include, but are not limited to, dynamic volume provisioning, service load balancer integration, pod workload identity, dynamic management of compute, and infrastructure provisioned as part of initial installation. - -[id="api-compatibility-common-terminology-releases_{context}"] -== Major, minor, and z-stream releases - -A Red Hat major release represents a significant step in the development of a product. Minor releases appear more frequently within the scope of a major release and represent deprecation boundaries that might impact future application compatibility. A z-stream release is an update to a minor release which provides a stream of continuous fixes to an associated minor release. API and AOE compatibility is never broken in a z-stream release except when this policy is explicitly overridden in order to respond to an unforeseen security impact. - -For example, in the release 4.3.2: - -* 4 is the major release version -* 3 is the minor release version -* 2 is the z-stream release version - -[id="api-compatibility-common-terminology-eus_{context}"] -== Extended user support (EUS) - -A minor release in an {product-title} major release that has an extended support window for critical bug fixes. Users are able to migrate between EUS releases by incrementally adopting minor versions between EUS releases. It is important to note that the deprecation policy is defined across minor releases and not EUS releases. As a result, an EUS user might have to respond to a deprecation when migrating to a future EUS while sequentially upgrading through each minor release. - -[id="api-compatibility-common-terminology-dev-preview_{context}"] -== Developer Preview - -An optional product capability that is not officially supported by Red Hat, but is intended to provide a mechanism to explore early phase technology. By default, Developer Preview functionality is opt-in, and subject to removal at any time. Enabling a Developer Preview feature might render a cluster unsupportable dependent upon the scope of the feature. - -[id="api-compatibility-common-terminology-tech-preview_{context}"] -== Technology Preview - -An optional product capability that provides early access to upcoming product innovations to test functionality and provide feedback during the development process. The feature is not fully supported, might not be functionally complete, and is not intended for production use. Usage of a Technology Preview function requires explicit opt-in. Learn more about the link:https://access.redhat.com/support/offerings/techpreview[Technology Preview Features Support Scope]. diff --git a/modules/api-compatibility-exceptions.adoc b/modules/api-compatibility-exceptions.adoc deleted file mode 100644 index 720e5a597039..000000000000 --- a/modules/api-compatibility-exceptions.adoc +++ /dev/null @@ -1,40 +0,0 @@ -// Module included in the following assemblies: -// -// * rest_api/understanding-compatibility-guidelines.adoc -// * microshift_rest_api/understanding-compatibility-guidelines.adoc - -[id="api-compatibility-exceptions_{context}"] -= API compatibility exceptions - -The following are exceptions to compatibility in {product-title}: - -ifndef::microshift[] -[discrete] -[id="OS-file-system-modifications-not-made_{context}"] -== RHEL CoreOS file system modifications not made with a supported Operator - -No assurances are made at this time that a modification made to the host operating file system is preserved across minor releases except for where that modification is made through the public interface exposed via a supported Operator, such as the Machine Config Operator or Node Tuning Operator. - -[discrete] -[id="modifications-to-cluster-infrastructure-in-cloud_{context}"] -== Modifications to cluster infrastructure in cloud or virtualized environments - -No assurances are made at this time that a modification to the cloud hosting environment that supports the cluster is preserved except for where that modification is made through a public interface exposed in the product or is documented as a supported configuration. Cluster infrastructure providers are responsible for preserving their cloud or virtualized infrastructure except for where they delegate that authority to the product through an API. -endif::microshift[] - -[discrete] -[id="Functional-defaults-between-upgraded-cluster-new-installation_{context}"] -== Functional defaults between an upgraded cluster and a new installation - -No assurances are made at this time that a new installation of a product minor release will have the same functional defaults as a version of the product that was installed with a prior minor release and upgraded to the equivalent version. For example, future versions of the product may provision cloud infrastructure with different defaults than prior minor versions. In addition, different default security choices may be made in future versions of the product than those made in past versions of the product. Past versions of the product will forward upgrade, but preserve legacy choices where appropriate specifically to maintain backwards compatibility. - -[discrete] -[id="API-fields-that-have-the-prefix-unsupported-annotations_{context}"] -== Usage of API fields that have the prefix "unsupported” or undocumented annotations - -Select APIs in the product expose fields with the prefix `unsupported`. No assurances are made at this time that usage of this field is supported across releases or within a release. Product support can request a customer to specify a value in this field when debugging specific problems, but its usage is not supported outside of that interaction. Usage of annotations on objects that are not explicitly documented are not assured support across minor releases. - -[discrete] -[id="API-availability-per-product-installation-topology_{context}"] -== API availability per product installation topology -The OpenShift distribution will continue to evolve its supported installation topology, and not all APIs in one install topology will necessarily be included in another. For example, certain topologies may restrict read/write access to particular APIs if they are in conflict with the product installation topology or not include a particular API at all if not pertinent to that topology. APIs that exist in a given topology will be supported in accordance with the compatibility tiers defined above. diff --git a/modules/api-compatibility-guidelines.adoc b/modules/api-compatibility-guidelines.adoc deleted file mode 100644 index b165dd9bea87..000000000000 --- a/modules/api-compatibility-guidelines.adoc +++ /dev/null @@ -1,28 +0,0 @@ -// Module included in the following assemblies: -// -// * rest_api/understanding-compatibility-guidelines.adoc - -[id="api-compatibility-guidelines_{context}"] -= API compatibility guidelines - -Red Hat recommends that application developers adopt the following principles in order to improve compatibility with {product-title}: - -* Use APIs and components with support tiers that match the application's need. -* Build applications using the published client libraries where possible. -* Applications are only guaranteed to run correctly if they execute in an environment that is as new as the environment it was built to execute against. An application that was built for {product-title} 4.7 is not guaranteed to function properly on {product-title} 4.6. -* Do not design applications that rely on configuration files provided by system packages or other components. These files can change between versions unless the upstream community is explicitly committed to preserving them. Where appropriate, depend on any Red Hat provided interface abstraction over those configuration files in order to maintain forward compatibility. Direct file system modification of configuration files is discouraged, and users are strongly encouraged to integrate with an Operator provided API where available to avoid dual-writer conflicts. -* Do not depend on API fields prefixed with `unsupported` or annotations that are not explicitly mentioned in product documentation. -* Do not depend on components with shorter compatibility guarantees than your application. -* Do not perform direct storage operations on the etcd server. All etcd access must be performed via the api-server or through documented backup and restore procedures. - -Red Hat recommends that application developers follow the link:https://access.redhat.com/articles/rhel8-abi-compatibility#Guidelines[compatibility guidelines] defined by {op-system-base-full}. {product-title} strongly recommends the following guidelines when building an application or hosting an application on the platform: - -* Do not depend on a specific Linux kernel or {product-title} version. -* Avoid reading from `proc`, `sys`, and `debug` file systems, or any other pseudo file system. -* Avoid using `ioctls` to directly interact with hardware. -* Avoid direct interaction with `cgroups` in order to not conflict with {product-title} host-agents that provide the container execution environment. - -[NOTE] -==== -During the lifecycle of a release, Red Hat makes commercially reasonable efforts to maintain API and application operating environment (AOE) compatibility across all minor releases and z-stream releases. If necessary, Red Hat might make exceptions to this compatibility goal for critical impact security or other significant issues. -==== diff --git a/modules/api-support-deprecation-policy.adoc b/modules/api-support-deprecation-policy.adoc deleted file mode 100644 index c17a088bece5..000000000000 --- a/modules/api-support-deprecation-policy.adoc +++ /dev/null @@ -1,103 +0,0 @@ -// Module included in the following assemblies: -// -// * rest_api/understanding-api-support-tiers.adoc - -[id="api-deprecation-policy_{context}"] -= API deprecation policy - -{product-title} is composed of many components sourced from many upstream communities. It is anticipated that the set of components, the associated API interfaces, and correlated features will evolve over time and might require formal deprecation in order to remove the capability. - -[id="deprecating-parts-of-the-api_{context}"] -== Deprecating parts of the API - -{product-title} is a distributed system where multiple components interact with a shared state managed by the cluster control plane through a set of structured APIs. Per Kubernetes conventions, each API presented by {product-title} is associated with a group identifier and each API group is independently versioned. Each API group is managed in a distinct upstream community including Kubernetes, Metal3, Multus, Operator Framework, Open Cluster Management, OpenShift itself, and more. - -While each upstream community might define their own unique deprecation policy for a given API group and version, Red Hat normalizes the community specific policy to one of the compatibility levels defined prior based on our integration in and awareness of each upstream community to simplify end-user consumption and support. - -The deprecation policy and schedule for APIs vary by compatibility level. - -The deprecation policy covers all elements of the API including: - -* REST resources, also known as API objects -* Fields of REST resources -* Annotations on REST resources, excluding version-specific qualifiers -* Enumerated or constant values - -Other than the most recent API version in each group, older API versions must be supported after their announced deprecation for a duration of no less than: - -[cols="2",options="header"] -|=== -|API tier -|Duration - -|Tier 1 -|Stable within a major release. They may be deprecated within a major release, but they will not be removed until a subsequent major release. - -|Tier 2 -|9 months or 3 releases from the announcement of deprecation, whichever is longer. - -|Tier 3 -|See the component-specific schedule. - -|Tier 4 -|None. No compatibility is guaranteed. - -|=== - -The following rules apply to all tier 1 APIs: - -* API elements can only be removed by incrementing the version of the group. -* API objects must be able to round-trip between API versions without information loss, with the exception of whole REST resources that do not exist in some versions. In cases where equivalent fields do not exist between versions, data will be preserved in the form of annotations during conversion. -* API versions in a given group can not deprecate until a new API version at least as stable is released, except in cases where the entire API object is being removed. - -[id="deprecating-cli-elements_{context}"] -== Deprecating CLI elements - -Client-facing CLI commands are not versioned in the same way as the API, but are user-facing component systems. The two major ways a user interacts with a CLI are through a command or flag, which is referred to in this context as CLI elements. - -All CLI elements default to API tier 1 unless otherwise noted or the CLI depends on a lower tier API. - -[cols="3",options="header"] -|=== - -| -|Element -|API tier - -|Generally available (GA) -|Flags and commands -|Tier 1 - -|Technology Preview -|Flags and commands -|Tier 3 - -|Developer Preview -|Flags and commands -|Tier 4 - -|=== - -[id="deprecating-entire-component_{context}"] -== Deprecating an entire component - -The duration and schedule for deprecating an entire component maps directly to the duration associated with the highest API tier of an API exposed by that component. For example, a component that surfaced APIs with tier 1 and 2 could not be removed until the tier 1 deprecation schedule was met. - -[cols="2",options="header"] -|=== -|API tier -|Duration - -|Tier 1 -|Stable within a major release. They may be deprecated within a major release, but they will not be removed until a subsequent major release. - -|Tier 2 -|9 months or 3 releases from the announcement of deprecation, whichever is longer. - -|Tier 3 -|See the component-specific schedule. - -|Tier 4 -|None. No compatibility is guaranteed. - -|=== diff --git a/modules/api-support-tiers-mapping.adoc b/modules/api-support-tiers-mapping.adoc deleted file mode 100644 index 43d98272d256..000000000000 --- a/modules/api-support-tiers-mapping.adoc +++ /dev/null @@ -1,127 +0,0 @@ -// Module included in the following assemblies: -// -// * rest_api/understanding-api-support-tiers.adoc - -[id="api-support-tiers-mapping_{context}"] -= Mapping API tiers to API groups - -For each API tier defined by Red Hat, we provide a mapping table for specific API groups where the upstream communities are committed to maintain forward compatibility. Any API group that does not specify an explicit compatibility level and is not specifically discussed below is assigned API tier 3 by default except for `v1alpha1` APIs which are assigned tier 4 by default. - -[id="mapping-support-tiers-to-kubernetes-api-groups_{context}"] -== Support for Kubernetes API groups - -API groups that end with the suffix `*.k8s.io` or have the form `version.` with no suffix are governed by the Kubernetes deprecation policy and follow a general mapping between API version exposed and corresponding support tier unless otherwise specified. - -[cols="2",options="header"] -|=== -|API version example -|API tier - -|`v1` -|Tier 1 - -|`v1beta1` -|Tier 2 - -|`v1alpha1` -|Tier 4 - -|=== - -ifndef::microshift[] -[id="mapping-support-tiers-to-openshift-api-groups_{context}"] -== Support for OpenShift API groups - -API groups that end with the suffix `*.openshift.io` are governed by the {product-title} deprecation policy and follow a general mapping between API version exposed and corresponding compatibility level unless otherwise specified. - -[cols="2",options="header"] -|=== -|API version example -|API tier - -|`apps.openshift.io/v1` -|Tier 1 - -|`authorization.openshift.io/v1` -|Tier 1, some tier 1 deprecated - -|`build.openshift.io/v1` -|Tier 1, some tier 1 deprecated - -|`config.openshift.io/v1` -|Tier 1 - -|`image.openshift.io/v1` -|Tier 1 - -|`network.openshift.io/v1` -|Tier 1 - -|`network.operator.openshift.io/v1` -|Tier 1 - -|`oauth.openshift.io/v1` -|Tier 1 - -|`imagecontentsourcepolicy.operator.openshift.io/v1alpha1` -|Tier 1 - -|`project.openshift.io/v1` -|Tier 1 - -|`quota.openshift.io/v1` -|Tier 1 - -|`route.openshift.io/v1` -|Tier 1 - -|`quota.openshift.io/v1` -|Tier 1 - -|`security.openshift.io/v1` -|Tier 1 except for `RangeAllocation` (tier 4) and `*Reviews` (tier 2) - -|`template.openshift.io/v1` -|Tier 1 - -|`console.openshift.io/v1` -|Tier 2 - -|=== -endif::microshift[] - -ifdef::microshift[] -[id="microshift-mapping-support-tiers-to-openshift-api-groups_{context}"] -== Support for OpenShift API groups -API groups that end with the suffix `*.openshift.io` are governed by the {product-title} deprecation policy and follow a general mapping between API version exposed and corresponding compatibility level unless otherwise specified. - -[cols="2",options="header"] -|=== -|API version example -|API tier - -|`route.openshift.io/v1` -|Tier 1 - -|`security.openshift.io/v1` -|Tier 1 except for `RangeAllocation` (tier 4) and `*Reviews` (tier 2) - -|=== -endif::microshift[] - -ifndef::microshift[] -[id="mapping-support-tiers-to-monitoring-api-groups_{context}"] -== Support for Monitoring API groups - -API groups that end with the suffix `monitoring.coreos.com` have the following mapping: - -[cols="2",options="header"] -|=== -|API version example -|API tier - -|`v1` -|Tier 1 - -|=== -endif::microshift[] \ No newline at end of file diff --git a/modules/api-support-tiers.adoc b/modules/api-support-tiers.adoc deleted file mode 100644 index 7b83cf1e81f7..000000000000 --- a/modules/api-support-tiers.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * rest_api/understanding-api-support-tiers.adoc - -[id="api-tiers_{context}"] -= API tiers - -All commercially supported APIs, components, and features are associated under one of the following support levels: - -[discrete] -[id="api-tier-1_{context}"] -== API tier 1 -APIs and application operating environments (AOEs) are stable within a major release. They may be deprecated within a major release, but they will not be removed until a subsequent major release. - -[discrete] -[id="api-tier-2_{context}"] -== API tier 2 -APIs and AOEs are stable within a major release for a minimum of 9 months or 3 minor releases from the announcement of deprecation, whichever is longer. - -[discrete] -[id="api-tier-3_{context}"] -== API tier 3 -This level applies to languages, tools, applications, and optional Operators included with {product-title} through Operator Hub. Each component will specify a lifetime during which the API and AOE will be supported. Newer versions of language runtime specific components will attempt to be as API and AOE compatible from minor version to minor version as possible. Minor version to minor version compatibility is not guaranteed, however. - -Components and developer tools that receive continuous updates through the Operator Hub, referred to as Operators and operands, should be considered API tier 3. Developers should use caution and understand how these components may change with each minor release. Users are encouraged to consult the compatibility guidelines documented by the component. - -[discrete] -[id="api-tier-4_{context}"] -== API tier 4 -No compatibility is provided. API and AOE can change at any point. These capabilities should not be used by applications needing long-term support. - -It is common practice for Operators to use custom resource definitions (CRDs) internally to accomplish a task. These objects are not meant for use by actors external to the Operator and are intended to be hidden. If any CRD is not meant for use by actors external to the Operator, the `operators.operatorframework.io/internal-objects` annotation in the Operators `ClusterServiceVersion` (CSV) should be specified to signal that the corresponding resource is internal use only and the CRD may be explicitly labeled as tier 4. diff --git a/modules/apiserversource-kn.adoc b/modules/apiserversource-kn.adoc deleted file mode 100644 index 53c684393991..000000000000 --- a/modules/apiserversource-kn.adoc +++ /dev/null @@ -1,156 +0,0 @@ -// Module included in the following assemblies: -// -// * serverless/eventing/event-sources/serverless-apiserversource.adoc -// * serverless/reference/kn-eventing-ref.adoc - -:_content-type: PROCEDURE -[id="apiserversource-kn_{context}"] -= Creating an API server source by using the Knative CLI - -You can use the `kn source apiserver create` command to create an API server source by using the `kn` CLI. Using the `kn` CLI to create an API server source provides a more streamlined and intuitive user interface than modifying YAML files directly. - -.Prerequisites - -* The {ServerlessOperatorName} and Knative Eventing are installed on the cluster. -* You have created a project or have access to a project with the appropriate roles and permissions to create applications and other workloads in {product-title}. -* You have installed the OpenShift CLI (`oc`). -* You have installed the Knative (`kn`) CLI. - -.Procedure - -include::snippets/serverless-service-account-apiserversource.adoc[] - -. Create an API server source that has an event sink. In the following example, the sink is a broker: -+ -[source,terminal] ----- -$ kn source apiserver create --sink broker: --resource "event:v1" --service-account --mode Resource ----- -// need to revisit these docs and give better tutorial examples with different sinks; out of scope for the current PR - -. To check that the API server source is set up correctly, create a Knative service that dumps incoming messages to its log: -+ -[source,terminal] ----- -$ kn service create --image quay.io/openshift-knative/knative-eventing-sources-event-display:latest ----- - -. If you used a broker as an event sink, create a trigger to filter events from the `default` broker to the service: -+ -[source,terminal] ----- -$ kn trigger create --sink ksvc: ----- - -. Create events by launching a pod in the default namespace: -+ -[source,terminal] ----- -$ oc create deployment hello-node --image quay.io/openshift-knative/knative-eventing-sources-event-display:latest ----- - -. Check that the controller is mapped correctly by inspecting the output generated by the following command: -+ -[source,terminal] ----- -$ kn source apiserver describe ----- -+ -.Example output -[source,terminal] ----- -Name: mysource -Namespace: default -Annotations: sources.knative.dev/creator=developer, sources.knative.dev/lastModifier=developer -Age: 3m -ServiceAccountName: events-sa -Mode: Resource -Sink: - Name: default - Namespace: default - Kind: Broker (eventing.knative.dev/v1) -Resources: - Kind: event (v1) - Controller: false -Conditions: - OK TYPE AGE REASON - ++ Ready 3m - ++ Deployed 3m - ++ SinkProvided 3m - ++ SufficientPermissions 3m - ++ EventTypesProvided 3m ----- - -.Verification - -You can verify that the Kubernetes events were sent to Knative by looking at the message dumper function logs. - -. Get the pods: -+ -[source,terminal] ----- -$ oc get pods ----- - -. View the message dumper function logs for the pods: -+ -[source,terminal] ----- -$ oc logs $(oc get pod -o name | grep event-display) -c user-container ----- -+ -.Example output -[source,terminal] ----- -☁️ cloudevents.Event -Validation: valid -Context Attributes, - specversion: 1.0 - type: dev.knative.apiserver.resource.update - datacontenttype: application/json - ... -Data, - { - "apiVersion": "v1", - "involvedObject": { - "apiVersion": "v1", - "fieldPath": "spec.containers{hello-node}", - "kind": "Pod", - "name": "hello-node", - "namespace": "default", - ..... - }, - "kind": "Event", - "message": "Started container", - "metadata": { - "name": "hello-node.159d7608e3a3572c", - "namespace": "default", - .... - }, - "reason": "Started", - ... - } ----- - -.Deleting the API server source - -. Delete the trigger: -+ -[source,terminal] ----- -$ kn trigger delete ----- - -. Delete the event source: -+ -[source,terminal] ----- -$ kn source apiserver delete ----- - -. Delete the service account, cluster role, and cluster binding: -+ -[source,terminal] ----- -$ oc delete -f authentication.yaml ----- diff --git a/modules/apiserversource-yaml.adoc b/modules/apiserversource-yaml.adoc deleted file mode 100644 index 6cacbe9cc46f..000000000000 --- a/modules/apiserversource-yaml.adoc +++ /dev/null @@ -1,217 +0,0 @@ -// Module included in the following assemblies: -// -// * serverless/eventing/event-sources/serverless-apiserversource.adoc - -:_content-type: PROCEDURE -[id="apiserversource-yaml_context"] -= Creating an API server source by using YAML files - -Creating Knative resources by using YAML files uses a declarative API, which enables you to describe event sources declaratively and in a reproducible manner. To create an API server source by using YAML, you must create a YAML file that defines an `ApiServerSource` object, then apply it by using the `oc apply` command. - -.Prerequisites - -* The {ServerlessOperatorName} and Knative Eventing are installed on the cluster. -* You have created a project or have access to a project with the appropriate roles and permissions to create applications and other workloads in {product-title}. -* You have created the `default` broker in the same namespace as the one defined in the API server source YAML file. -* Install the OpenShift CLI (`oc`). - -.Procedure - -include::snippets/serverless-service-account-apiserversource.adoc[] - -. Create an API server source as a YAML file: -+ -[source,yaml] ----- -apiVersion: sources.knative.dev/v1alpha1 -kind: ApiServerSource -metadata: - name: testevents -spec: - serviceAccountName: events-sa - mode: Resource - resources: - - apiVersion: v1 - kind: Event - sink: - ref: - apiVersion: eventing.knative.dev/v1 - kind: Broker - name: default ----- - -. Apply the `ApiServerSource` YAML file: -+ -[source,terminal] ----- -$ oc apply -f ----- - -. To check that the API server source is set up correctly, create a Knative service as a YAML file that dumps incoming messages to its log: -+ -[source,yaml] ----- -apiVersion: serving.knative.dev/v1 -kind: Service -metadata: - name: event-display - namespace: default -spec: - template: - spec: - containers: - - image: quay.io/openshift-knative/knative-eventing-sources-event-display:latest ----- - -. Apply the `Service` YAML file: -+ -[source,terminal] ----- -$ oc apply -f ----- - -. Create a `Trigger` object as a YAML file that filters events from the `default` broker to the service created in the previous step: -+ -[source,yaml] ----- -apiVersion: eventing.knative.dev/v1 -kind: Trigger -metadata: - name: event-display-trigger - namespace: default -spec: - broker: default - subscriber: - ref: - apiVersion: serving.knative.dev/v1 - kind: Service - name: event-display ----- - -. Apply the `Trigger` YAML file: -+ -[source,terminal] ----- -$ oc apply -f ----- - -. Create events by launching a pod in the default namespace: -+ -[source,terminal] ----- -$ oc create deployment hello-node --image=quay.io/openshift-knative/knative-eventing-sources-event-display ----- - -. Check that the controller is mapped correctly, by entering the following command and inspecting the output: -+ -[source,terminal] ----- -$ oc get apiserversource.sources.knative.dev testevents -o yaml ----- -+ -.Example output -[source,yaml] ----- -apiVersion: sources.knative.dev/v1alpha1 -kind: ApiServerSource -metadata: - annotations: - creationTimestamp: "2020-04-07T17:24:54Z" - generation: 1 - name: testevents - namespace: default - resourceVersion: "62868" - selfLink: /apis/sources.knative.dev/v1alpha1/namespaces/default/apiserversources/testevents2 - uid: 1603d863-bb06-4d1c-b371-f580b4db99fa -spec: - mode: Resource - resources: - - apiVersion: v1 - controller: false - controllerSelector: - apiVersion: "" - kind: "" - name: "" - uid: "" - kind: Event - labelSelector: {} - serviceAccountName: events-sa - sink: - ref: - apiVersion: eventing.knative.dev/v1 - kind: Broker - name: default ----- - -.Verification - -To verify that the Kubernetes events were sent to Knative, you can look at the message dumper function logs. - -. Get the pods by entering the following command: -+ -[source,terminal] ----- -$ oc get pods ----- -. View the message dumper function logs for the pods by entering the following command: -+ -[source,terminal] ----- -$ oc logs $(oc get pod -o name | grep event-display) -c user-container ----- -+ -.Example output -[source,terminal] ----- -☁️ cloudevents.Event -Validation: valid -Context Attributes, - specversion: 1.0 - type: dev.knative.apiserver.resource.update - datacontenttype: application/json - ... -Data, - { - "apiVersion": "v1", - "involvedObject": { - "apiVersion": "v1", - "fieldPath": "spec.containers{hello-node}", - "kind": "Pod", - "name": "hello-node", - "namespace": "default", - ..... - }, - "kind": "Event", - "message": "Started container", - "metadata": { - "name": "hello-node.159d7608e3a3572c", - "namespace": "default", - .... - }, - "reason": "Started", - ... - } ----- - -.Deleting the API server source - -. Delete the trigger: -+ -[source,terminal] ----- -$ oc delete -f trigger.yaml ----- - -. Delete the event source: -+ -[source,terminal] ----- -$ oc delete -f k8s-events.yaml ----- - -. Delete the service account, cluster role, and cluster binding: -+ -[source,terminal] ----- -$ oc delete -f authentication.yaml ----- diff --git a/modules/application-health-about.adoc b/modules/application-health-about.adoc deleted file mode 100644 index 2124f13ba458..000000000000 --- a/modules/application-health-about.adoc +++ /dev/null @@ -1,210 +0,0 @@ -// Module included in the following assemblies: -// -// * applications/application-health.adoc - -:_content-type: CONCEPT -[id="application-health-about_{context}"] -= Understanding health checks - -A health check periodically performs diagnostics on a -running container using any combination of the readiness, liveness, and startup health checks. - -You can include one or more probes in the specification for the pod that contains the container which you want to perform the health checks. - -[NOTE] -==== -If you want to add or edit health checks in an existing pod, you must edit the pod `DeploymentConfig` object or use the *Developer* perspective in the web console. You cannot use the CLI to add or edit health checks for an existing pod. -==== - -Readiness probe:: -A _readiness probe_ determines if a container is ready to accept service requests. If -the readiness probe fails for a container, the kubelet removes the pod from the list of available service endpoints. -+ -After a failure, the probe continues to examine the pod. If the pod becomes available, the kubelet adds the pod to the list of available service endpoints. - -Liveness health check:: -A _liveness probe_ determines if a container is still -running. If the liveness probe fails due to a condition such as a deadlock, the kubelet kills the container. The pod then -responds based on its restart policy. -+ -For example, a liveness probe on a pod with a `restartPolicy` of `Always` or `OnFailure` -kills and restarts the container. - -Startup probe:: -A _startup probe_ indicates whether the application within a container is started. All other probes are disabled until the startup succeeds. If the startup probe does not succeed within a specified time period, the kubelet kills the container, and the container is subject to the pod `restartPolicy`. -+ -Some applications can require additional startup time on their first initialization. You can use a startup probe with a liveness or readiness probe to delay that probe long enough to handle lengthy start-up time using the `failureThreshold` and `periodSeconds` parameters. -+ -For example, you can add a startup probe, with a `failureThreshold` of 30 failures and a `periodSeconds` of 10 seconds (30 * 10s = 300s) for a maximum of 5 minutes, to a liveness probe. After the startup probe succeeds the first time, the liveness probe takes over. - -You can configure liveness, readiness, and startup probes with any of the following types of tests: - -* HTTP `GET`: When using an HTTP `GET` test, the test determines the healthiness of the container by using a web hook. The test is successful if the HTTP response code is between `200` and `399`. -+ -You can use an HTTP `GET` test with applications that return HTTP status codes when completely initialized. - -* Container Command: When using a container command test, the probe executes a command inside the container. The probe is successful if the test exits with a `0` status. - -* TCP socket: When using a TCP socket test, the probe attempts to open a socket to the container. The container is only -considered healthy if the probe can establish a connection. You can use a TCP socket test with applications that do not start listening until -initialization is complete. - -You can configure several fields to control the behavior of a probe: - -* `initialDelaySeconds`: The time, in seconds, after the container starts before the probe can be scheduled. The default is 0. -* `periodSeconds`: The delay, in seconds, between performing probes. The default is `10`. This value must be greater than `timeoutSeconds`. -* `timeoutSeconds`: The number of seconds of inactivity after which the probe times out and the container is assumed to have failed. The default is `1`. This value must be lower than `periodSeconds`. -* `successThreshold`: The number of times that the probe must report success after a failure to reset the container status to successful. The value must be `1` for a liveness probe. The default is `1`. -* `failureThreshold`: The number of times that the probe is allowed to fail. The default is 3. After the specified attempts: -** for a liveness probe, the container is restarted -** for a readiness probe, the pod is marked `Unready` -** for a startup probe, the container is killed and is subject to the pod's `restartPolicy` - -[discrete] -[id="application-health-examples"] -== Example probes - -The following are samples of different probes as they would appear in an object specification. - -.Sample readiness probe with a container command readiness probe in a pod spec -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - labels: - test: health-check - name: my-application -... -spec: - containers: - - name: goproxy-app <1> - args: - image: registry.k8s.io/goproxy:0.1 <2> - readinessProbe: <3> - exec: <4> - command: <5> - - cat - - /tmp/healthy -... ----- - -<1> The container name. -<2> The container image to deploy. -<3> A readiness probe. -<4> A container command test. -<5> The commands to execute on the container. - -.Sample container command startup probe and liveness probe with container command tests in a pod spec -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - labels: - test: health-check - name: my-application -... -spec: - containers: - - name: goproxy-app <1> - args: - image: registry.k8s.io/goproxy:0.1 <2> - livenessProbe: <3> - httpGet: <4> - scheme: HTTPS <5> - path: /healthz - port: 8080 <6> - httpHeaders: - - name: X-Custom-Header - value: Awesome - startupProbe: <7> - httpGet: <8> - path: /healthz - port: 8080 <9> - failureThreshold: 30 <10> - periodSeconds: 10 <11> -... ----- - -<1> The container name. -<2> Specify the container image to deploy. -<3> A liveness probe. -<4> An HTTP `GET` test. -<5> The internet scheme: `HTTP` or `HTTPS`. The default value is `HTTP`. -<6> The port on which the container is listening. -<7> A startup probe. -<8> An HTTP `GET` test. -<9> The port on which the container is listening. -<10> The number of times to try the probe after a failure. -<11> The number of seconds to perform the probe. - -.Sample liveness probe with a container command test that uses a timeout in a pod spec -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - labels: - test: health-check - name: my-application -... -spec: - containers: - - name: goproxy-app <1> - args: - image: registry.k8s.io/goproxy:0.1 <2> - livenessProbe: <3> - exec: <4> - command: <5> - - /bin/bash - - '-c' - - timeout 60 /opt/eap/bin/livenessProbe.sh - periodSeconds: 10 <6> - successThreshold: 1 <7> - failureThreshold: 3 <8> -... ----- - -<1> The container name. -<2> Specify the container image to deploy. -<3> The liveness probe. -<4> The type of probe, here a container command probe. -<5> The command line to execute inside the container. -<6> How often in seconds to perform the probe. -<7> The number of consecutive successes needed to show success after a failure. -<8> The number of times to try the probe after a failure. - -.Sample readiness probe and liveness probe with a TCP socket test in a deployment -[source,yaml] ----- -kind: Deployment -apiVersion: apps/v1 -... -spec: -... - template: - spec: - containers: - - resources: {} - readinessProbe: <1> - tcpSocket: - port: 8080 - timeoutSeconds: 1 - periodSeconds: 10 - successThreshold: 1 - failureThreshold: 3 - terminationMessagePath: /dev/termination-log - name: ruby-ex - livenessProbe: <2> - tcpSocket: - port: 8080 - initialDelaySeconds: 15 - timeoutSeconds: 1 - periodSeconds: 10 - successThreshold: 1 - failureThreshold: 3 -... ----- -<1> The readiness probe. -<2> The liveness probe. diff --git a/modules/application-health-configuring.adoc b/modules/application-health-configuring.adoc deleted file mode 100644 index fe308a5dc9d8..000000000000 --- a/modules/application-health-configuring.adoc +++ /dev/null @@ -1,136 +0,0 @@ -// Module included in the following assemblies: -// -// * applications/application-health.adoc - -:_content-type: PROCEDURE -[id="application-health-configuring_{context}"] -= Configuring health checks using the CLI - -To configure readiness, liveness, and startup probes, add one or more probes to the specification for the pod that contains the container which you want to perform the health checks - -[NOTE] -==== -If you want to add or edit health checks in an existing pod, you must edit the pod `DeploymentConfig` object or use the *Developer* perspective in the web console. You cannot use the CLI to add or edit health checks for an existing pod. -==== - -.Procedure - -To add probes for a container: - -. Create a `Pod` object to add one or more probes: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - labels: - test: health-check - name: my-application -spec: - containers: - - name: my-container <1> - args: - image: registry.k8s.io/goproxy:0.1 <2> - livenessProbe: <3> - tcpSocket: <4> - port: 8080 <5> - initialDelaySeconds: 15 <6> - periodSeconds: 20 <7> - timeoutSeconds: 10 <8> - readinessProbe: <9> - httpGet: <10> - host: my-host <11> - scheme: HTTPS <12> - path: /healthz - port: 8080 <13> - startupProbe: <14> - exec: <15> - command: <16> - - cat - - /tmp/healthy - failureThreshold: 30 <17> - periodSeconds: 20 <18> - timeoutSeconds: 10 <19> ----- -<1> Specify the container name. -<2> Specify the container image to deploy. -<3> Optional: Create a Liveness probe. -<4> Specify a test to perform, here a TCP Socket test. -<5> Specify the port on which the container is listening. -<6> Specify the time, in seconds, after the container starts before the probe can be scheduled. -<7> Specify the number of seconds to perform the probe. The default is `10`. This value must be greater than `timeoutSeconds`. -<8> Specify the number of seconds of inactivity after which the probe is assumed to have failed. The default is `1`. This value must be lower than `periodSeconds`. -<9> Optional: Create a Readiness probe. -<10> Specify the type of test to perform, here an HTTP test. -<11> Specify a host IP address. When `host` is not defined, the `PodIP` is used. -<12> Specify `HTTP` or `HTTPS`. When `scheme` is not defined, the `HTTP` scheme is used. -<13> Specify the port on which the container is listening. -<14> Optional: Create a Startup probe. -<15> Specify the type of test to perform, here an Container Execution probe. -<16> Specify the commands to execute on the container. -<17> Specify the number of times to try the probe after a failure. -<18> Specify the number of seconds to perform the probe. The default is `10`. This value must be greater than `timeoutSeconds`. -<19> Specify the number of seconds of inactivity after which the probe is assumed to have failed. The default is `1`. This value must be lower than `periodSeconds`. -+ -[NOTE] -==== -If the `initialDelaySeconds` value is lower than the `periodSeconds` value, the first Readiness probe occurs at some point between the two periods due to an issue with timers. - -The `timeoutSeconds` value must be lower than the `periodSeconds` value. -==== - -. Create the `Pod` object: -+ -[source,terminal] ----- -$ oc create -f .yaml ----- - -. Verify the state of the health check pod: -+ -[source,terminal] ----- -$ oc describe pod health-check ----- -+ -.Example output -[source,terminal] ----- -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal Scheduled 9s default-scheduler Successfully assigned openshift-logging/liveness-exec to ip-10-0-143-40.ec2.internal - Normal Pulling 2s kubelet, ip-10-0-143-40.ec2.internal pulling image "registry.k8s.io/liveness" - Normal Pulled 1s kubelet, ip-10-0-143-40.ec2.internal Successfully pulled image "registry.k8s.io/liveness" - Normal Created 1s kubelet, ip-10-0-143-40.ec2.internal Created container - Normal Started 1s kubelet, ip-10-0-143-40.ec2.internal Started container ----- -+ -The following is the output of a failed probe that restarted a container: -+ -.Sample Liveness check output with unhealthy container -[source,terminal] ----- -$ oc describe pod pod1 ----- -+ -.Example output -[source,terminal] ----- -.... - -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal Scheduled Successfully assigned aaa/liveness-http to ci-ln-37hz77b-f76d1-wdpjv-worker-b-snzrj - Normal AddedInterface 47s multus Add eth0 [10.129.2.11/23] - Normal Pulled 46s kubelet, ci-ln-37hz77b-f76d1-wdpjv-worker-b-snzrj Successfully pulled image "registry.k8s.io/liveness" in 773.406244ms - Normal Pulled 28s kubelet, ci-ln-37hz77b-f76d1-wdpjv-worker-b-snzrj Successfully pulled image "registry.k8s.io/liveness" in 233.328564ms - Normal Created 10s (x3 over 46s) kubelet, ci-ln-37hz77b-f76d1-wdpjv-worker-b-snzrj Created container liveness - Normal Started 10s (x3 over 46s) kubelet, ci-ln-37hz77b-f76d1-wdpjv-worker-b-snzrj Started container liveness - Warning Unhealthy 10s (x6 over 34s) kubelet, ci-ln-37hz77b-f76d1-wdpjv-worker-b-snzrj Liveness probe failed: HTTP probe failed with statuscode: 500 - Normal Killing 10s (x2 over 28s) kubelet, ci-ln-37hz77b-f76d1-wdpjv-worker-b-snzrj Container liveness failed liveness probe, will be restarted - Normal Pulling 10s (x3 over 47s) kubelet, ci-ln-37hz77b-f76d1-wdpjv-worker-b-snzrj Pulling image "registry.k8s.io/liveness" - Normal Pulled 10s kubelet, ci-ln-37hz77b-f76d1-wdpjv-worker-b-snzrj Successfully pulled image "registry.k8s.io/liveness" in 244.116568ms ----- diff --git a/modules/applications-create-using-cli-image.adoc b/modules/applications-create-using-cli-image.adoc deleted file mode 100644 index 09570b85efda..000000000000 --- a/modules/applications-create-using-cli-image.adoc +++ /dev/null @@ -1,38 +0,0 @@ -[id="applications-create-using-cli-image_{context}"] -= Creating an application from an image - -You can deploy an application from an existing image. Images can come from image streams in the {product-title} server, images in a specific registry, or images in the local Docker server. - -The `new-app` command attempts to determine the type of image specified in the arguments passed to it. However, you can explicitly tell `new-app` whether the image is a container image using the `--docker-image` argument or an image stream using the `-i|--image-stream` argument. - -[NOTE] -==== -If you specify an image from your local Docker repository, you must ensure that the same image is available to the {product-title} cluster nodes. -==== - -== Docker Hub MySQL image - -Create an application from the Docker Hub MySQL image, for example: - -[source,terminal] ----- -$ oc new-app mysql ----- - -== Image in a private registry - -Create an application using an image in a private registry, specify the full container image specification: - -[source,terminal] ----- -$ oc new-app myregistry:5000/example/myimage ----- - -== Existing image stream and optional image stream tag - -Create an application from an existing image stream and optional image stream tag: - -[source,terminal] ----- -$ oc new-app my-stream:v1 ----- diff --git a/modules/applications-create-using-cli-modify.adoc b/modules/applications-create-using-cli-modify.adoc deleted file mode 100644 index f5975d0617c4..000000000000 --- a/modules/applications-create-using-cli-modify.adoc +++ /dev/null @@ -1,225 +0,0 @@ -[id="applications-create-using-cli-modify_{context}"] -= Modifying application creation - -The `new-app` command generates {product-title} objects that build, deploy, and run the application that is created. Normally, these objects are created in the current project and assigned names that are derived from the input source repositories or the input images. However, with `new-app` you can modify this behavior. - -.`new-app` output objects -[cols="2,8",options="header"] -|=== - -|Object |Description - -|`BuildConfig` -|A `BuildConfig` object is created for each source repository that is specified in the command line. The `BuildConfig` object specifies the strategy to use, the source location, and the build output location. - -|`ImageStreams` -|For the `BuildConfig` object, two image streams are usually created. One represents the input image. With source builds, this is the builder image. -ifndef::openshift-online[] -With `Docker` builds, this is the *FROM* image. -endif::[] -The second one represents the output image. If a container image was specified as input to `new-app`, then an image stream is created for that image as well. - -|`DeploymentConfig` -|A `DeploymentConfig` object is created either to deploy the output of a build, or a specified image. The `new-app` command creates `emptyDir` volumes for all Docker volumes that are specified in containers included in the resulting `DeploymentConfig` object . - -|`Service` -|The `new-app` command attempts to detect exposed ports in input images. It uses the lowest numeric exposed port to generate a service that exposes that port. To expose a different port, after `new-app` has completed, simply use the `oc expose` command to generate additional services. - -|Other -|Other objects can be generated when instantiating templates, according to the template. - -|=== - -[id="specifying-environment-variables"] -== Specifying environment variables - -When generating applications from a template, source, or an image, you can use the `-e|--env` argument to pass environment variables to the application container at run time: - -[source,terminal] ----- -$ oc new-app openshift/postgresql-92-centos7 \ - -e POSTGRESQL_USER=user \ - -e POSTGRESQL_DATABASE=db \ - -e POSTGRESQL_PASSWORD=password ----- - -The variables can also be read from file using the `--env-file` argument. The following is an example file called `postgresql.env`: - -[source,terminal] ----- -POSTGRESQL_USER=user -POSTGRESQL_DATABASE=db -POSTGRESQL_PASSWORD=password ----- - -Read the variables from the file: - -[source,terminal] ----- -$ oc new-app openshift/postgresql-92-centos7 --env-file=postgresql.env ----- - -Additionally, environment variables can be given on standard input by using `--env-file=-`: - -[source,terminal] ----- -$ cat postgresql.env | oc new-app openshift/postgresql-92-centos7 --env-file=- ----- - -[NOTE] -==== -Any `BuildConfig` objects created as part of `new-app` processing are not updated with environment variables passed with the `-e|--env` or `--env-file` argument. -==== - -[id="specifying-build-environment-variables"] -== Specifying build environment variables - -When generating applications from a template, source, or an image, you can use the `--build-env` argument to pass environment variables to the build container at run time: - -[source,terminal] ----- -$ oc new-app openshift/ruby-23-centos7 \ - --build-env HTTP_PROXY=http://myproxy.net:1337/ \ - --build-env GEM_HOME=~/.gem ----- - -The variables can also be read from a file using the `--build-env-file` argument. The following is an example file called `ruby.env`: - -[source,terminal] ----- -HTTP_PROXY=http://myproxy.net:1337/ -GEM_HOME=~/.gem ----- - -Read the variables from the file: - -[source,terminal] ----- -$ oc new-app openshift/ruby-23-centos7 --build-env-file=ruby.env ----- - -Additionally, environment variables can be given on standard input by using `--build-env-file=-`: - -[source,terminal] ----- -$ cat ruby.env | oc new-app openshift/ruby-23-centos7 --build-env-file=- ----- - -[id="specifying-labels"] -== Specifying labels - -When generating applications from source, images, or templates, you can use the `-l|--label` argument to add labels to the created objects. Labels make it easy to collectively select, configure, and delete objects associated with the application. - -[source,terminal] ----- -$ oc new-app https://github.com/openshift/ruby-hello-world -l name=hello-world ----- - -[id="viewing-output-without-creation"] -== Viewing the output without creation - -To see a dry-run of running the `new-app` command, you can use the `-o|--output` argument with a `yaml` or `json` value. You can then use the output to preview the objects that are created or redirect it to a file that you can edit. After you are satisfied, you can use `oc create` to create the {product-title} objects. - -To output `new-app` artifacts to a file, run the following: - -[source,terminal] ----- -$ oc new-app https://github.com/openshift/ruby-hello-world \ - -o yaml > myapp.yaml ----- - -Edit the file: - -[source,terminal] ----- -$ vi myapp.yaml ----- - -Create a new application by referencing the file: - -[source,terminal] ----- -$ oc create -f myapp.yaml ----- - -[id="creating-objects-different-names"] -== Creating objects with different names - -Objects created by `new-app` are normally named after the source repository, or the image used to generate them. You can set the name of the objects produced by adding a `--name` flag to the command: - -[source,terminal] ----- -$ oc new-app https://github.com/openshift/ruby-hello-world --name=myapp ----- - -[id="creating-objects-different-project"] -== Creating objects in a different project - -Normally, `new-app` creates objects in the current project. However, you can create objects in a different project by using the `-n|--namespace` argument: - -[source,terminal] ----- -$ oc new-app https://github.com/openshift/ruby-hello-world -n myproject ----- - -[id="creating-multiple-objects"] -== Creating multiple objects - -The `new-app` command allows creating multiple applications specifying multiple parameters to `new-app`. Labels specified in the command line apply to all objects created by the single command. Environment variables apply to all components created from source or images. - -To create an application from a source repository and a Docker Hub image: - -[source,terminal] ----- -$ oc new-app https://github.com/openshift/ruby-hello-world mysql ----- - -[NOTE] -==== -If a source code repository and a builder image are specified as separate arguments, `new-app` uses the builder image as the builder for the source code repository. If this is not the intent, specify the required builder image for the source using the `~` separator. -==== - -[id="grouping-images-source-single-pod"] -== Grouping images and source in a single pod - -The `new-app` command allows deploying multiple images together in a single pod. To specify which images to group together, use the `+` separator. The `--group` command line argument can also be used to specify the images that should be grouped together. To group the image built from a source repository with other images, specify its builder image in the group: - -[source,terminal] ----- -$ oc new-app ruby+mysql ----- - -To deploy an image built from source and an external image together: - -[source,terminal] ----- -$ oc new-app \ - ruby~https://github.com/openshift/ruby-hello-world \ - mysql \ - --group=ruby+mysql ----- - -[id="searching-for-images-templates-other-inputs"] -== Searching for images, templates, and other inputs - -To search for images, templates, and other inputs for the `oc new-app` command, add the `--search` and `--list` flags. For example, to find all of the images or templates that include PHP: - -[source,terminal] ----- -$ oc new-app --search php ----- - -[id="setting-the-import-mode"] -== Setting the import mode - -To set the import mode when using `oc new-app`, add the `--import-mode` flag. This flag can be appended with `Legacy` or `PreserveOriginal`, which provides users the option to create image streams using a single sub-manifest, or all manifests, respectively. - -[souce,terminal] ----- -$ oc new-app --image=registry.redhat.io/ubi8/httpd-24:latest --import-mode=Legacy --name=test ----- - -[source,terminal] ----- -$ oc new-app --image=registry.redhat.io/ubi8/httpd-24:latest --import-mode=PreserveOriginal --name=test ----- diff --git a/modules/applications-create-using-cli-source-code.adoc b/modules/applications-create-using-cli-source-code.adoc deleted file mode 100644 index fd7b10132c08..000000000000 --- a/modules/applications-create-using-cli-source-code.adoc +++ /dev/null @@ -1,153 +0,0 @@ -[id="applications-create-using-cli-source-code_{context}"] -= Creating an application from source code - -With the `new-app` command you can create applications from source code in a local or remote Git repository. - -The `new-app` command creates a build configuration, which itself creates a new application image from your source code. The `new-app` command typically also creates a `Deployment` object to deploy the new image, and a service to provide load-balanced access to the deployment running your image. - -{product-title} automatically detects whether the pipeline, source, or docker build strategy should be used, and in the case of source build, detects an appropriate language builder image. - -[id="local_{context}"] -== Local - -To create an application from a Git repository in a local directory: - -[source,terminal] ----- -$ oc new-app / ----- - -[NOTE] -==== -If you use a local Git repository, the repository must have a remote named `origin` that points to a URL that is accessible by the {product-title} cluster. If there is no recognized remote, running the `new-app` command will create a binary build. -==== - -[id="remote_{context}"] -== Remote - -To create an application from a remote Git repository: - -[source,terminal] ----- -$ oc new-app https://github.com/sclorg/cakephp-ex ----- - -To create an application from a private remote Git repository: - -[source,terminal] ----- -$ oc new-app https://github.com/youruser/yourprivaterepo --source-secret=yoursecret ----- - -[NOTE] -==== -If you use a private remote Git repository, you can use the `--source-secret` flag to specify an existing source clone secret that will get injected into your build config to access the repository. -==== - -You can use a subdirectory of your source code repository by specifying a `--context-dir` flag. To create an application from a remote Git repository and a context subdirectory: - -[source,terminal] ----- -$ oc new-app https://github.com/sclorg/s2i-ruby-container.git \ - --context-dir=2.0/test/puma-test-app ----- - -Also, when specifying a remote URL, you can specify a Git branch to use by appending `#` to the end of the URL: - -[source,terminal] ----- -$ oc new-app https://github.com/openshift/ruby-hello-world.git#beta4 ----- - -[id="build-strategy-detection_{context}"] -== Build strategy detection - -{product-title} automatically determines which build strategy to use by detecting certain files: - -* If a Jenkins file exists in the root or specified context directory of the source repository when creating a new application, {product-title} generates a pipeline build strategy. -+ -[NOTE] -==== -The `pipeline` build strategy is deprecated; consider using {pipelines-title} instead. -==== -* If a Dockerfile exists in the root or specified context directory of the source repository when creating a new application, {product-title} generates a docker build strategy. -* If neither a Jenkins file nor a Dockerfile is detected, {product-title} generates a source build strategy. - -Override the automatically detected build strategy by setting the `--strategy` flag to `docker`, `pipeline`, or `source`. - -[source,terminal] ----- -$ oc new-app /home/user/code/myapp --strategy=docker ----- - -[NOTE] -==== -The `oc` command requires that files containing build sources are available in a remote Git repository. For all source builds, you must use `git remote -v`. -==== - -[id="language-detection_{context}"] -== Language detection - -If you use the source build strategy, `new-app` attempts to determine the language builder to use by the presence of certain files in the root or specified context directory of the repository: - -.Languages detected by `new-app` -[cols="4,8",options="header"] -|=== - -|Language |Files -ifdef::openshift-enterprise,openshift-webscale,openshift-aro,openshift-online[] -|`dotnet` -|`project.json`, `pass:[*.csproj]` -endif::[] -|`jee` -|`pom.xml` - -|`nodejs` -|`app.json`, `package.json` - -|`perl` -|`cpanfile`, `index.pl` - -|`php` -|`composer.json`, `index.php` - -|`python` -|`requirements.txt`, `setup.py` - -|`ruby` -|`Gemfile`, `Rakefile`, `config.ru` - -|`scala` -|`build.sbt` - -|`golang` -|`Godeps`, `main.go` -|=== - -After a language is detected, `new-app` searches the {product-title} server for image stream tags that have a `supports` annotation matching the detected language, or an image stream that matches the name of the detected language. If a match is not found, `new-app` searches the link:https://registry.hub.docker.com[Docker Hub registry] for an image that matches the detected language based on name. - -You can override the image the builder uses for a particular source repository by specifying the image, either an image stream or container -specification, and the repository with a `~` as a separator. Note that if this is done, build strategy detection and language detection are not carried out. - -For example, to use the `myproject/my-ruby` imagestream with the source in a remote repository: - -[source,terminal] ----- -$ oc new-app myproject/my-ruby~https://github.com/openshift/ruby-hello-world.git ----- - -To use the `openshift/ruby-20-centos7:latest` container image stream with the source in a local repository: - -[source,terminal] ----- -$ oc new-app openshift/ruby-20-centos7:latest~/home/user/code/my-ruby-app ----- - -[NOTE] -==== -Language detection requires the Git client to be locally installed so that your repository can be cloned and inspected. If Git is not available, you can avoid the language detection step by specifying the builder image to use with your repository with the `~` syntax. - -The `-i ` invocation requires that `new-app` attempt to clone `repository` to determine what type of artifact it is, so this will fail if Git is not available. - -The `-i --code ` invocation requires `new-app` clone `repository` to determine whether `image` should be used as a builder for the source code, or deployed separately, as in the case of a database image. -==== diff --git a/modules/applications-create-using-cli-template.adoc b/modules/applications-create-using-cli-template.adoc deleted file mode 100644 index d50a7837da31..000000000000 --- a/modules/applications-create-using-cli-template.adoc +++ /dev/null @@ -1,53 +0,0 @@ -[id="applications-create-using-cli-template_{context}"] -= Creating an application from a template - -You can create an application from a previously stored template or from a -template file, by specifying the name of the template as an argument. For -example, you can store a sample application template and use it to create an -application. - -Upload an application template to your current project's template library. The following example uploads an application template from a file called `examples/sample-app/application-template-stibuild.json`: - -[source,terminal] ----- -$ oc create -f examples/sample-app/application-template-stibuild.json ----- - -Then create a new application by referencing the application template. In this example, the template name is `ruby-helloworld-sample`: - -[source,terminal] ----- -$ oc new-app ruby-helloworld-sample ----- - -To create a new application by referencing a template file in your local file system, without first storing it in {product-title}, use the `-f|--file` argument. For example: - -[source,terminal] ----- -$ oc new-app -f examples/sample-app/application-template-stibuild.json ----- - -== Template parameters - -When creating an application based on a template, use the `-p|--param` argument to set parameter values that are defined by the template: - -[source,terminal] ----- -$ oc new-app ruby-helloworld-sample \ - -p ADMIN_USERNAME=admin -p ADMIN_PASSWORD=mypassword ----- - -You can store your parameters in a file, then use that file with `--param-file` when instantiating a template. If you want to read the parameters from standard input, use `--param-file=-`. The following is an example file called `helloworld.params`: - -[source,terminal] ----- -ADMIN_USERNAME=admin -ADMIN_PASSWORD=mypassword ----- - -Reference the parameters in the file when instantiating a template: - -[source,terminal] ----- -$ oc new-app ruby-helloworld-sample --param-file=helloworld.params ----- diff --git a/modules/applying-custom-seccomp-profile.adoc b/modules/applying-custom-seccomp-profile.adoc deleted file mode 100644 index 32c17f98f639..000000000000 --- a/modules/applying-custom-seccomp-profile.adoc +++ /dev/null @@ -1,23 +0,0 @@ -:_content-type: PROCEDURE -[id="applying-custom-seccomp-profile_{context}"] -= Applying the custom seccomp profile to the workload - -.Prerequisite -* The cluster administrator has set up the custom seccomp profile. For more details, see "Setting up the custom seccomp profile". - -.Procedure -* Apply the seccomp profile to the workload by setting the `securityContext.seccompProfile.type` field as following: -+ -.Example -+ -[source, yaml] ----- -spec: - securityContext: - seccompProfile: - type: Localhost - localhostProfile: .json <1> ----- -<1> Provide the name of your custom seccomp profile. -+ -Alternatively, you can use the pod annotations `seccomp.security.alpha.kubernetes.io/pod: localhost/.json`. However, this method is deprecated in {product-title} {product-version}. diff --git a/modules/arch-cluster-operators.adoc b/modules/arch-cluster-operators.adoc deleted file mode 100644 index 3097bfc1aebb..000000000000 --- a/modules/arch-cluster-operators.adoc +++ /dev/null @@ -1,10 +0,0 @@ -// Module included in the following assemblies: -// -// * architecture/control-plane.adoc - -[id="cluster-operators_{context}"] -= Cluster Operators - -In {product-title}, all cluster functions are divided into a series of default _cluster Operators_. Cluster Operators manage a particular area of cluster functionality, such as cluster-wide application logging, management of the Kubernetes control plane, or the machine provisioning system. - -Cluster Operators are represented by a `ClusterOperator` object, which cluster administrators can view in the {product-title} web console from the *Administration* -> *Cluster Settings* page. Each cluster Operator provides a simple API for determining cluster functionality. The Operator hides the details of managing the lifecycle of that component. Operators can manage a single component or tens of components, but the end goal is always to reduce operational burden by automating common actions. diff --git a/modules/arch-olm-operators.adoc b/modules/arch-olm-operators.adoc deleted file mode 100644 index 6681f67f1c91..000000000000 --- a/modules/arch-olm-operators.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// * architecture/control-plane.adoc - -[id="olm-operators_{context}"] -= Add-on Operators - -Operator Lifecycle Manager (OLM) and OperatorHub are default components in {product-title} that help manage Kubernetes-native applications as Operators. Together they provide the system for discovering, installing, and managing the optional add-on Operators available on the cluster. - -Using OperatorHub in the {product-title} web console, cluster administrators and authorized users can select Operators to install from catalogs of Operators. After installing an Operator from OperatorHub, it can be made available globally or in specific namespaces to run in user applications. - -Default catalog sources are available that include Red Hat Operators, certified Operators, and community Operators. Cluster administrators can also add their own custom catalog sources, which can contain a custom set of Operators. - -Developers can use the Operator SDK to help author custom Operators that take advantage of OLM features, as well. Their Operator can then be bundled and added to a custom catalog source, which can be added to a cluster and made available to users. - -[NOTE] -==== -OLM does not manage the cluster Operators that comprise the {product-title} architecture. -==== diff --git a/modules/arch-platform-operators.adoc b/modules/arch-platform-operators.adoc deleted file mode 100644 index 48b96b5c07eb..000000000000 --- a/modules/arch-platform-operators.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// Module included in the following assemblies: -// -// * architecture/control-plane.adoc -// * operators/admin/olm-managing-po.adoc - -:_content-type: CONCEPT - -ifeval::["{context}" == "control-plane"] -[id="platform-operators_{context}"] -= Platform Operators (Technology Preview) - -:FeatureName: The platform Operator type -include::snippets/technology-preview.adoc[] -endif::[] - -ifeval::["{context}" == "olm-managing-po"] -[id="platform-operators_{context}"] -= About platform Operators -endif::[] - -Operator Lifecycle Manager (OLM) introduces a new type of Operator called _platform Operators_. A platform Operator is an OLM-based Operator that can be installed during or after an {product-title} cluster's Day 0 operations and participates in the cluster's lifecycle. As a cluster administrator, you can use platform Operators to further customize your {product-title} installation to meet your requirements and use cases. - -Using the existing cluster capabilities feature in {product-title}, cluster administrators can already disable a subset of Cluster Version Operator-based (CVO) components considered non-essential to the initial payload prior to cluster installation. Platform Operators iterate on this model by providing additional customization options. Through the platform Operator mechanism, which relies on resources from the RukPak component, OLM-based Operators can now be installed at cluster installation time and can block cluster rollout if the Operator fails to install successfully. - -In {product-title} 4.12, this Technology Preview release focuses on the basic platform Operator mechanism and builds a foundation for expanding the concept in upcoming releases. You can use the cluster-wide `PlatformOperator` API to configure Operators before or after cluster creation on clusters that have enabled the `TechPreviewNoUpgrades` feature set. \ No newline at end of file diff --git a/modules/architecture-container-application-benefits.adoc b/modules/architecture-container-application-benefits.adoc deleted file mode 100644 index 5328cb429c8a..000000000000 --- a/modules/architecture-container-application-benefits.adoc +++ /dev/null @@ -1,26 +0,0 @@ -// Module included in the following assemblies: -// -// * architecture/architecture.adoc - -[id="architecture-container-application-benefits_{context}"] -= The benefits of containerized applications - -Using containerized applications offers many advantages over using traditional deployment methods. Where applications were once expected to be installed on operating systems that included all their dependencies, containers let an application carry their dependencies with them. Creating containerized applications offers many benefits. - -[id="operating-system-benefits_{context}"] -== Operating system benefits - -Containers use small, dedicated Linux operating systems without a kernel. Their file system, networking, cgroups, process tables, and namespaces are separate from the host Linux system, but the containers can integrate with the hosts seamlessly when necessary. Being based on Linux allows containers to use all the advantages that come with the open source development model of rapid innovation. - -Because each container uses a dedicated operating system, you can deploy applications that require conflicting software dependencies on the same host. Each container carries its own dependent software and manages its own interfaces, such as networking and file systems, so applications never need to compete for those assets. - -[id="deployment-scaling-benefits_{context}"] -== Deployment and scaling benefits - -If you employ rolling upgrades between major releases of your application, you can continuously improve your applications without downtime and still maintain compatibility with the current release. - -You can also deploy and test a new version of an application alongside the existing version. If the container passes your tests, simply deploy more new containers and remove the old ones.  - -Since all the software dependencies for an application are resolved within the container itself, you can use a standardized operating system on each host in your data center. You do not need to configure a specific operating system for each application host. When your data center needs more capacity, you can deploy another generic host system. - -Similarly, scaling containerized applications is simple. {product-title} offers a simple, standard way of scaling any containerized service. For example, if you build applications as a set of microservices rather than large, monolithic applications, you can scale the individual microservices individually to meet demand. This capability allows you to scale only the required services instead of the entire application, which can allow you to meet application demands while using minimal resources. diff --git a/modules/architecture-kubernetes-introduction.adoc b/modules/architecture-kubernetes-introduction.adoc deleted file mode 100644 index 8fd1f576e11a..000000000000 --- a/modules/architecture-kubernetes-introduction.adoc +++ /dev/null @@ -1,34 +0,0 @@ -// Module included in the following assemblies: -// -// * architecture/architecture.adoc - -:_content-type: CONCEPT -[id="architecture-kubernetes-introduction_{context}"] -= About Kubernetes - -Although container images and the containers that run from them are the -primary building blocks for modern application development, to run them at scale -requires a reliable and flexible distribution system. Kubernetes is the -defacto standard for orchestrating containers. - -Kubernetes is an open source container orchestration engine for automating -deployment, scaling, and management of containerized applications. The general -concept of Kubernetes is fairly simple: - -* Start with one or more worker nodes to run the container workloads. -* Manage the deployment of those workloads from one or more control plane nodes. -* Wrap containers in a deployment unit called a pod. Using pods provides extra -metadata with the container and offers the ability to group several containers -in a single deployment entity. -* Create special kinds of assets. For example, services are represented by a -set of pods and a policy that defines how they are accessed. This policy -allows containers to connect to the services that they need even if they do not -have the specific IP addresses for the services. Replication controllers are -another special asset that indicates how many pod replicas are required to run -at a time. You can use this capability to automatically scale your application -to adapt to its current demand. - -In only a few years, Kubernetes has seen massive cloud and on-premise adoption. -The open source development model allows many people to extend Kubernetes -by implementing different technologies for components such as networking, -storage, and authentication. diff --git a/modules/architecture-machine-config-pools.adoc b/modules/architecture-machine-config-pools.adoc deleted file mode 100644 index 7c700689dd9f..000000000000 --- a/modules/architecture-machine-config-pools.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// Module included in the following assemblies: -// -// * architecture/control-plane.adoc - -[id="architecture-machine-config-pools_{context}"] -= Node configuration management with machine config pools - -Machines that run control plane components or user workloads are divided into groups based on the types of resources they handle. These groups of machines are called machine config pools (MCP). Each MCP manages a set of nodes and its corresponding machine configs. The role of the node determines which MCP it belongs to; the MCP governs nodes based on its assigned node role label. Nodes in an MCP have the same configuration; this means nodes can be scaled up and torn down in response to increased or decreased workloads. - -By default, there are two MCPs created by the cluster when it is installed: `master` and `worker`. Each default MCP has a defined configuration applied by the Machine Config Operator (MCO), which is responsible for managing MCPs and facilitating MCP upgrades. You can create additional MCPs, or custom pools, to manage nodes that have custom use cases that extend outside of the default node types. - -Custom pools are pools that inherit their configurations from the worker pool. They use any machine config targeted for the worker pool, but add the ability to deploy changes only targeted at the custom pool. Since a custom pool inherits its configuration from the worker pool, any change to the worker pool is applied to the custom pool as well. Custom pools that do not inherit their configurations from the worker pool are not supported by the MCO. - -[NOTE] -==== -A node can only be included in one MCP. If a node has multiple labels that correspond to several MCPs, like `worker,infra`, it is managed by the infra custom pool, not the worker pool. Custom pools take priority on selecting nodes to manage based on node labels; nodes that do not belong to a custom pool are managed by the worker pool. -==== - -It is recommended to have a custom pool for every node role you want to manage in your cluster. For example, if you create infra nodes to handle infra workloads, it is recommended to create a custom infra MCP to group those nodes together. If you apply an `infra` role label to a worker node so it has the `worker,infra` dual label, but do not have a custom infra MCP, the MCO considers it a worker node. If you remove the `worker` label from a node and apply the `infra` label without grouping it in a custom pool, the node is not recognized by the MCO and is unmanaged by the cluster. - -[IMPORTANT] -==== -Any node labeled with the `infra` role that is only running infra workloads is not counted toward the total number of subscriptions. The MCP managing an infra node is mutually exclusive from how the cluster determines subscription charges; tagging a node with the appropriate `infra` role and using taints to prevent user workloads from being scheduled on that node are the only requirements for avoiding subscription charges for infra workloads. -==== - -The MCO applies updates for pools independently; for example, if there is an update that affects all pools, nodes from each pool update in parallel with each other. If you add a custom pool, nodes from that pool also attempt to update concurrently with the master and worker nodes. - -There might be situations where the configuration on a node does not fully match what the currently-applied machine config specifies. This state is called _configuration drift_. The Machine Config Daemon (MCD) regularly checks the nodes for configuration drift. If the MCD detects configuration drift, the MCO marks the node `degraded` until an administrator corrects the node configuration. A degraded node is online and operational, but, it cannot be updated. - diff --git a/modules/architecture-machine-roles.adoc b/modules/architecture-machine-roles.adoc deleted file mode 100644 index b7e497484b35..000000000000 --- a/modules/architecture-machine-roles.adoc +++ /dev/null @@ -1,133 +0,0 @@ -// Module included in the following assemblies: -// -// * architecture/control-plane.adoc - -[id="architecture-machine-roles_{context}"] -= Machine roles in {product-title} - -{product-title} assigns hosts different roles. These roles define the function of the machine within the cluster. The cluster contains definitions for the standard `master` and `worker` role types. - -[NOTE] -==== -The cluster also contains the definition for the `bootstrap` role. Because the bootstrap machine is used only during cluster installation, its function is explained in the cluster installation documentation. -==== - -== Control plane and node host compatibility - -The {product-title} version must match between control plane host and node host. For example, in a 4.13 cluster, all control plane hosts must be 4.13 and all nodes must be 4.13. - -Temporary mismatches during cluster upgrades are acceptable. For example, when upgrading from {product-title} 4.12 to 4.13, some nodes will upgrade to 4.13 before others. Prolonged skewing of control plane hosts and node hosts might expose older compute machines to bugs and missing features. Users should resolve skewed control plane hosts and node hosts as soon as possible. - -The `kubelet` service must not be newer than `kube-apiserver`, and can be up to two minor versions older depending on whether your {product-title} version is odd or even. The table below shows the appropriate version compatibility: - -[cols="2",options="header"] -|=== -| {product-title} version -| Supported `kubelet` skew - - -| Odd {product-title} minor versions ^[1]^ -| Up to one version older - -| Even {product-title} minor versions ^[2]^ -| Up to two versions older -|=== -[.small] --- -1. For example, {product-title} 4.11, 4.13. -2. For example, {product-title} 4.10, 4.12. --- - -[id="defining-workers_{context}"] -== Cluster workers - -In a Kubernetes cluster, the worker nodes are where the actual workloads requested by Kubernetes users run and are managed. The worker nodes advertise their capacity and the scheduler, which a control plane service, determines on which nodes to start pods and containers. Important services run on each worker node, including CRI-O, which is the container engine; Kubelet, which is the service that accepts and fulfills requests for running and stopping container workloads; a service proxy, which manages communication for pods across workers; and the runC or crun low-level container runtime, which creates and runs containers. - -[NOTE] -==== -For information about how to enable crun instead of the default runC, see the documentation for creating a `ContainerRuntimeConfig` CR. -==== - -In {product-title}, compute machine sets control the compute machines, which are assigned the `worker` machine role. Machines with the `worker` role drive compute workloads that are governed by a specific machine pool that autoscales them. Because {product-title} has the capacity to support multiple machine types, the machines with the `worker` role are classed as _compute_ machines. In this release, the terms _worker machine_ and _compute machine_ are used interchangeably because the only default type of compute machine is the worker machine. In future versions of {product-title}, different types of compute machines, such as infrastructure machines, might be used by default. - -[NOTE] -==== -Compute machine sets are groupings of compute machine resources under the `machine-api` namespace. Compute machine sets are configurations that are designed to start new compute machines on a specific cloud provider. Conversely, machine config pools (MCPs) are part of the Machine Config Operator (MCO) namespace. An MCP is used to group machines together so the MCO can manage their configurations and facilitate their upgrades. -==== - -[id="defining-masters_{context}"] -== Cluster control planes - -In a Kubernetes cluster, the _master_ nodes run services that are required to control the Kubernetes cluster. In {product-title}, the control plane is comprised of control plane machines that have a `master` machine role. They contain more than just the Kubernetes services for managing the {product-title} cluster. - -For most {product-title} clusters, control plane machines are defined by a series of standalone machine API resources. For supported cloud provider and {product-title} version combinations, control planes can be managed with control plane machine sets. Extra controls apply to control plane machines to prevent you from deleting all control plane machines and breaking your cluster. - -[NOTE] -==== -Exactly three control plane nodes must be used for all production deployments. -==== - -Services that fall under the Kubernetes category on the control plane include the Kubernetes API server, etcd, the Kubernetes controller manager, and the Kubernetes scheduler. - -.Kubernetes services that run on the control plane -[cols="1,2",options="header"] -|=== -|Component |Description -|Kubernetes API server -|The Kubernetes API server validates and configures the data for pods, services, -and replication controllers. It also provides a focal point for the shared state of the cluster. - -|etcd -|etcd stores the persistent control plane state while other components watch etcd for -changes to bring themselves into the specified state. -//etcd can be optionally configured for high availability, typically deployed with 2n+1 peer services. - -|Kubernetes controller manager -|The Kubernetes controller manager watches etcd for changes to objects such as -replication, namespace, and service account controller objects, and then uses the -API to enforce the specified state. Several such processes create a cluster with -one active leader at a time. - -|Kubernetes scheduler -|The Kubernetes scheduler watches for newly created pods without an assigned node and selects the best node to host the pod. -|=== - -There are also OpenShift services that run on the control plane, which include the OpenShift API server, OpenShift controller manager, OpenShift OAuth API server, and OpenShift OAuth server. - -.OpenShift services that run on the control plane -[cols="1,2",options="header"] -|=== -|Component |Description -|OpenShift API server -|The OpenShift API server validates and configures the data for OpenShift resources, such as projects, routes, and templates. - -The OpenShift API server is managed by the OpenShift API Server Operator. -|OpenShift controller manager -|The OpenShift controller manager watches etcd for changes to OpenShift objects, such as project, route, and template controller objects, and then uses the API to enforce the specified state. - -The OpenShift controller manager is managed by the OpenShift Controller Manager Operator. -|OpenShift OAuth API server -|The OpenShift OAuth API server validates and configures the data to authenticate to {product-title}, such as users, groups, and OAuth tokens. - -The OpenShift OAuth API server is managed by the Cluster Authentication Operator. -|OpenShift OAuth server -|Users request tokens from the OpenShift OAuth server to authenticate themselves to the API. - -The OpenShift OAuth server is managed by the Cluster Authentication Operator. -|=== - -Some of these services on the control plane machines run as systemd services, while others run as static pods. - -Systemd services are appropriate for services that you need to always come up on that particular system shortly after it starts. For control plane machines, those include sshd, which allows remote login. It also includes services such as: - -* The CRI-O container engine (crio), which runs and manages the containers. {product-title} {product-version} uses CRI-O instead of the Docker Container Engine. -* Kubelet (kubelet), which accepts requests for managing containers on the machine from control plane services. - -CRI-O and Kubelet must run directly on the host as systemd services because they need to be running before you can run other containers. - -The [x-]`installer-*` and [x-]`revision-pruner-*` control plane pods must run with root permissions because they write to the `/etc/kubernetes` directory, which is owned by the root user. These pods are in the following namespaces: - -* `openshift-etcd` -* `openshift-kube-apiserver` -* `openshift-kube-controller-manager` -* `openshift-kube-scheduler` diff --git a/modules/architecture-platform-benefits.adoc b/modules/architecture-platform-benefits.adoc deleted file mode 100644 index a8de25b3f252..000000000000 --- a/modules/architecture-platform-benefits.adoc +++ /dev/null @@ -1,124 +0,0 @@ -// Module included in the following assemblies: -// -// * architecture/architecture.adoc - -:_content-type: CONCEPT -[id="architecture-platform-benefits_{context}"] -= {product-title} overview - -//// -Red Hat was one of the early contributors of Kubernetes and quickly integrated -it as the centerpiece of its {product-title} product line. Today, Red Hat -continues as one of the largest contributors to Kubernetes across a wide range -of technology areas. -//// - -{product-title} provides enterprise-ready enhancements to Kubernetes, including the following enhancements: - -ifdef::openshift-origin,openshift-enterprise,openshift-webscale[] -* Hybrid cloud deployments. You can deploy {product-title} clusters to a variety of public cloud platforms or in your data center. -endif::[] -* Integrated Red Hat technology. Major components in {product-title} come from {op-system-base-full} and related Red Hat technologies. {product-title} benefits from the intense testing and certification initiatives for Red Hat's enterprise quality software. -* Open source development model. Development is completed in the open, and the source code is available from public software repositories. This open collaboration fosters rapid innovation and development. - -Although Kubernetes excels at managing your applications, it does not specify -or manage platform-level requirements or deployment processes. Powerful and -flexible platform management tools and processes are important benefits that -{product-title} {product-version} offers. The following sections describe some -unique features and benefits of {product-title}. - -[id="architecture-custom-os_{context}"] -== Custom operating system - -{product-title} uses {op-system-first}, a container-oriented operating system that is specifically designed for running containerized applications from {product-title} and works with new tools to provide fast installation, Operator-based management, and simplified upgrades. - -{op-system} includes: - -* Ignition, which {product-title} uses as a firstboot system configuration for initially bringing up and configuring machines. -* CRI-O, a Kubernetes native container runtime implementation that integrates closely with the operating system to deliver an efficient and optimized Kubernetes experience. CRI-O provides facilities for running, stopping, and restarting containers. It fully replaces the Docker Container Engine, which was used in {product-title} 3. -* Kubelet, the primary node agent for Kubernetes that is responsible for -launching and monitoring containers. - -In {product-title} {product-version}, you must use {op-system} for all control -plane machines, but you can use Red Hat Enterprise Linux (RHEL) as the operating -system for compute machines, which are also known as worker machines. If you choose to use RHEL workers, you -must perform more system maintenance than if you use {op-system} for all of the -cluster machines. - -[id="architecture-platform-management_{context}"] -== Simplified installation and update process - -With {product-title} {product-version}, if you have an account with the right -permissions, you can deploy a production cluster in supported clouds by running -a single command and providing a few values. You can also customize your cloud -installation or install your cluster in your data center if you use a supported -platform. - -For clusters that use {op-system} for all machines, updating, or -upgrading, {product-title} is a simple, highly-automated process. Because -{product-title} completely controls the systems and services that run on each -machine, including the operating system itself, from a central control plane, -upgrades are designed to become automatic events. If your cluster contains -RHEL worker machines, the control plane benefits from the streamlined update -process, but you must perform more tasks to upgrade the RHEL machines. - -[id="architecture-key-features_{context}"] -== Other key features - -Operators are both the fundamental unit of the {product-title} {product-version} -code base and a convenient way to deploy applications and software components -for your applications to use. In {product-title}, Operators serve as the platform foundation and remove the need for manual upgrades of operating systems and control plane applications. {product-title} Operators such as the -Cluster Version Operator and Machine Config Operator allow simplified, -cluster-wide management of those critical components. - -Operator Lifecycle Manager (OLM) and the OperatorHub provide facilities for -storing and distributing Operators to people developing and deploying applications. - -The Red Hat Quay Container Registry is a Quay.io container registry that serves -most of the container images and Operators to {product-title} clusters. -Quay.io is a public registry version of Red Hat Quay that stores millions of images -and tags. - -Other enhancements to Kubernetes in {product-title} include improvements in -software defined networking (SDN), authentication, log aggregation, monitoring, -and routing. {product-title} also offers a comprehensive web console and the -custom OpenShift CLI (`oc`) interface. - - -//// -{product-title} includes the following infrastructure components: - -* OpenShift API server -* Kubernetes API server -* Kubernetes controller manager -* Kubernetes nodes/kubelet -* CRI-O -* {op-system} -* Infrastructure Operators -* Networking (SDN/Router/DNS) -* Storage -* Monitoring -* Telemetry -* Security -* Authorization/Authentication/Oauth -* Logging - -It also offers the following user interfaces: -* Web Console -* OpenShift CLI (`oc`) -* Rest API -//// - - -[id="architecture-overview-image_{context}"] -== {product-title} lifecycle - -The following figure illustrates the basic {product-title} lifecycle: - -* Creating an {product-title} cluster -* Managing the cluster -* Developing and deploying applications -* Scaling up applications - -.High level {product-title} overview -image::product-workflow-overview.png[High-level {product-title} flow] diff --git a/modules/architecture-platform-introduction.adoc b/modules/architecture-platform-introduction.adoc deleted file mode 100644 index a31f748429c7..000000000000 --- a/modules/architecture-platform-introduction.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// Module included in the following assemblies: -// * architecture/architecture.adoc - -[id="architecture-platform-introduction_{context}"] -= Introduction to {product-title} - -{product-title} is a platform for developing and running containerized -applications. It is designed to allow applications and the data centers -that support them to expand from just a few machines and applications to -thousands of machines that serve millions of clients. - -With its foundation in Kubernetes, {product-title} incorporates the same -technology that serves as the engine for massive telecommunications, streaming -video, gaming, banking, and other applications. Its implementation in open -Red Hat technologies lets you extend your containerized applications beyond a -single cloud to on-premise and multi-cloud environments. - -image::oke-arch-ocp-stack.png[Red Hat {oke}] - -// The architecture presented here is meant to give you insights into how {product-title} works. It does this by stepping you through the process of installing an {product-title} cluster, managing the cluster, and developing and deploying applications on it. Along the way, this architecture describes: - -// * Major components of {product-title} -// * Ways of exploring different aspects of {product-title} yourself -// * Available frontdoors (and backdoors) to modify the installation and management of your {product-title} cluster -// * Different types of container application types diff --git a/modules/architecture-rhcos-updating-bootloader.adoc b/modules/architecture-rhcos-updating-bootloader.adoc deleted file mode 100644 index 694af67ecf58..000000000000 --- a/modules/architecture-rhcos-updating-bootloader.adoc +++ /dev/null @@ -1,103 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_vsphere/installing-restricted-networks-vsphere.adoc -// * installing/installing_vsphere/installing-vsphere-network-customizations.adoc -// * installing/installing_vsphere/installing-vsphere.adoc -// * installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc -// * installing/installing_bare_metal/installing-bare-metal.adoc -// * installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc - -[id="architecture-rhcos-updating-bootloader.adoc_{context}"] -= Updating the bootloader using bootupd - -To update the bootloader by using `bootupd`, you must either install `bootupd` on {op-system} machines manually or provide a machine config with the enabled `systemd` unit. Unlike `grubby` or other bootloader tools, `bootupd` does not manage kernel space configuration such as passing kernel arguments. - -After you have installed `bootupd`, you can manage it remotely from the {product-title} cluster. - -[NOTE] -==== -It is recommended that you use `bootupd` only on bare metal or virtualized hypervisor installations, such as for protection against the BootHole vulnerability. -==== - -.Manual install method -You can manually install `bootupd` by using the `bootctl` command-line tool. - -. Inspect the system status: -+ -[source,terminal] ----- -# bootupctl status ----- -+ -.Example output for `x86_64` -[source,terminal] ----- -Component EFI - Installed: grub2-efi-x64-1:2.04-31.fc33.x86_64,shim-x64-15-8.x86_64 - Update: At latest version ----- -ifndef::openshift-origin[] -+ -.Example output for `aarch64` -[source, terminal] ----- -Component EFI - Installed: grub2-efi-aa64-1:2.02-99.el8_4.1.aarch64,shim-aa64-15.4-2.el8_1.aarch64 - Update: At latest version ----- -endif::openshift-origin[] - -[start=2] -. {op-system} images created without `bootupd` installed on them require an explicit adoption phase. -+ -If the system status is `Adoptable`, perform the adoption: -+ -[source,terminal] ----- -# bootupctl adopt-and-update ----- -+ -.Example output -[source,terminal] ----- -Updated: grub2-efi-x64-1:2.04-31.fc33.x86_64,shim-x64-15-8.x86_64 ----- - -. If an update is available, apply the update so that the changes take effect on the next reboot: -+ -[source,terminal] ----- -# bootupctl update ----- -+ -.Example output -[source,terminal] ----- -Updated: grub2-efi-x64-1:2.04-31.fc33.x86_64,shim-x64-15-8.x86_64 ----- - -.Machine config method -Another way to enable `bootupd` is by providing a machine config. - -* Provide a machine config file with the enabled `systemd` unit, as shown in the following example: -+ -.Example output -[source,yaml] ----- - variant: rhcos - version: 1.1.0 - systemd: - units: - - name: custom-bootupd-auto.service - enabled: true - contents: | - [Unit] - Description=Bootupd automatic update - - [Service] - ExecStart=/usr/bin/bootupctl update - RemainAfterExit=yes - - [Install] - WantedBy=multi-user.target ----- diff --git a/modules/assisted-installer-adding-hosts-to-the-cluster.adoc b/modules/assisted-installer-adding-hosts-to-the-cluster.adoc deleted file mode 100644 index 1942549311d3..000000000000 --- a/modules/assisted-installer-adding-hosts-to-the-cluster.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// This is included in the following assemblies: -// -// assisted-installer-installing.adoc - -:_content-type: PROCEDURE -[id="adding-hosts-to-the-cluster_{context}"] -= Adding hosts to the cluster - -You must add one or more hosts to the cluster. Adding a host to the cluster involves generating a discovery ISO. The discovery ISO runs {op-system-first} in-memory with an agent. Perform the following procedure for each host on the cluster. - -.Procedure - -. Click the *Add hosts* button and select the installation media. - -.. Select *Minimal image file: Provision with virtual media* to download a smaller image that will fetch the data needed to boot. The nodes must have virtual media capability. This is the recommended method. - -.. Select *Full image file: Provision with physical media* to download the larger full image. - -. Add an SSH public key so that you can connect to the cluster nodes as the `core` user. Having a login to the cluster nodes can provide you with debugging information during the installation. - -. Optional: If the cluster hosts are behind a firewall that requires the use of a proxy, select *Configure cluster-wide proxy settings*. Enter the username, password, IP address and port for the HTTP and HTTPS URLs of the proxy server. - -. Click *Generate Discovery ISO*. - -. Download the discovery ISO. diff --git a/modules/assisted-installer-assisted-installer-prerequisites.adoc b/modules/assisted-installer-assisted-installer-prerequisites.adoc deleted file mode 100644 index 564af9a31f0c..000000000000 --- a/modules/assisted-installer-assisted-installer-prerequisites.adoc +++ /dev/null @@ -1,60 +0,0 @@ -// This is included in the following assemblies: -// -// installing-on-prem-assisted.adoc -:_content-type: CONCEPT - -[id='assisted-installer-prerequisites_{context}'] -= Assisted Installer prerequisites - -The {ai-full} validates the following prerequisites to ensure successful installation. - -== Hardware - -For control plane nodes or the {sno} node, nodes must have at least the following resources: - -* 8 CPU cores -* 16.00 GiB RAM -* 100 GB storage -* 10ms write speed or less for etcd `wal_fsync_duration_seconds` - -For worker nodes, each node must have at least the following resources: - -* 4 CPU cores -* 16.00 GiB RAM -* 100 GB storage - -== Networking - -The network must meet the following requirements: - -* A DHCP server unless using static IP addressing. -* A base domain name. You must ensure that the following requirements are met: - - There is no wildcard, such as `*..`, or the installation will not proceed. - - A DNS A/AAAA record for `api..`. - - A DNS A/AAAA record with a wildcard for `*.apps..`. -* Port `6443` is open for the API URL if you intend to allow users outside the firewall to access the cluster via the `oc` CLI tool. -* Port `443` is open for the console if you intend to allow users outside the firewall to access the console. - -[IMPORTANT] -==== -DNS A/AAAA record settings at top-level domain registrars can take significant time to update. Ensure the A/AAAA record DNS settings are working before installation to prevent installation delays. -==== - -The {product-title} cluster's network must also meet the following requirements: - -* Connectivity between all cluster nodes -* Connectivity for each node to the internet -* Access to an NTP server for time synchronization between the cluster nodes - -== Preflight validations - -The {ai-full} ensures the cluster meets the prerequisites before installation, because it eliminates complex post-installation troubleshooting, thereby saving significant amounts of time and effort. Before installing software on the nodes, the {ai-full} conducts the following validations: - -* Ensures network connectivity -* Ensures sufficient network bandwidth -* Ensures connectivity to the registry -* Ensures time synchronization between cluster nodes -* Verifies that the cluster nodes meet the minimum hardware requirements -* Validates the installation configuration parameters - -If the {ai-full} does not successfully validate the foregoing requirements, installation will not proceed. diff --git a/modules/assisted-installer-booting-with-a-usb-drive.adoc b/modules/assisted-installer-booting-with-a-usb-drive.adoc deleted file mode 100644 index d0b0dfb8a6ab..000000000000 --- a/modules/assisted-installer-booting-with-a-usb-drive.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// This is included in the following assemblies: -// -// installing_sno/install-sno-installing-sno.adoc - -:_content-type: PROCEDURE -[id="booting-with-a-usb-drive_{context}"] -= Booting with a USB drive - -To register nodes with the {ai-full} using a bootable USB drive, use the following procedure. - -.Procedure - -. Attach the {op-system} discovery ISO to the target host. - -. Configure the boot drive order in the server BIOS settings to boot from the attached discovery ISO, and then reboot the server. - -. On the administration host, return to the browser. Wait for the host to appear in the list of discovered hosts. diff --git a/modules/assisted-installer-completing-the-installation.adoc b/modules/assisted-installer-completing-the-installation.adoc deleted file mode 100644 index ef22614e4013..000000000000 --- a/modules/assisted-installer-completing-the-installation.adoc +++ /dev/null @@ -1,57 +0,0 @@ -// This is included in the following assemblies: -// -// assisted-installer-installing.adoc - -:_content-type: PROCEDURE -[id="completing-the-installation_{context}"] -= Completing the installation - -After the cluster is installed and initialized, the {ai-full} indicates that the installation is finished. The {ai-full} provides the console URL, the `kubeadmin` username and password, and the `kubeconfig` file. Additionally, the {ai-full} provides cluster details including the {product-title} version, base domain, CPU architecture, API and Ingress IP addresses, and the cluster and service network IP addresses. - -.Prerequisites - -* You have installed the `oc` CLI tool. - - -.Procedure - -. Make a copy of the `kubeadmin` username and password. - -. Download the `kubeconfig` file and copy it to the `auth` directory under your working directory: -+ -[source,terminal] ----- -$ mkdir -p /auth ----- -+ -[source,terminal] ----- -$ cp kubeadmin /auth ----- -+ -[NOTE] -==== -The `kubeconfig` file is available for download for 24 hours after completing the installation. -==== - -. Add the `kubeconfig` file to your environment: -+ -[source,terminal] ----- -$ export KUBECONFIG=/auth/kubeconfig ----- - -. Login with the `oc` CLI tool: -+ -[source,terminal] ----- -$ oc login -u kubeadmin -p ----- -+ -Replace `` with the password of the `kubeadmin` user. - -. Click on the web console URL or click *Launch OpenShift Console* to open the console. - -. Enter the `kubeadmin` username and password. Follow the instructions in the {product-title} console to configure an identity provider and configure alert receivers. - -. Add a bookmark of the {product-title} console. diff --git a/modules/assisted-installer-configuring-host-network-interfaces.adoc b/modules/assisted-installer-configuring-host-network-interfaces.adoc deleted file mode 100644 index daf69425239a..000000000000 --- a/modules/assisted-installer-configuring-host-network-interfaces.adoc +++ /dev/null @@ -1,31 +0,0 @@ -// This is included in the following assemblies: -// -// assisted-installer-installing.adoc - -:_content-type: PROCEDURE -[id="configuring-host-network-interfaces_{context}"] -= Optional: Configuring host network interfaces - -The {ai-full} supports IPv4 networking and dual stack networking. The {ai-full} also supports configuring host network interfaces with the NMState library, a declarative network manager API for hosts. You can use NMState to deploy hosts with static IP addressing, bonds, VLANs and other advanced networking features. If you chose to configure host network interfaces, you must set network-wide configurations. Then, you must create a host-specific configuration for each host and generate the discovery ISO with the host-specific settings. - -.Procedure - -. Select the internet protocol version. Valid options are *IPv4* and *Dual stack*. - -. If the cluster hosts are on a shared VLAN, enter the VLAN ID. - -. Enter the network-wide IP addresses. If you selected *Dual stack* networking, you must enter both IPv4 and IPv6 addresses. - -.. Enter the cluster network's IP address range in CIDR notation. - -.. Enter the default gateway IP address. - -.. Enter the DNS server IP addresss. - -. Enter the host-specific configuration. - -.. If you are only setting a static IP address that uses a single network interface, use the form view to enter the IP address and the MAC address for the host. - -.. If you are using multiple interfaces, bonding, or other advanced networking features, use the YAML view and enter the desired network state for the host using NMState syntax. - -.. Add the MAC address and interface name for each interface used in your network configuration. diff --git a/modules/assisted-installer-configuring-hosts.adoc b/modules/assisted-installer-configuring-hosts.adoc deleted file mode 100644 index 730906f8bfcd..000000000000 --- a/modules/assisted-installer-configuring-hosts.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// This is included in the following assemblies: -// -// assisted-installer-installing.adoc - -:_content-type: PROCEDURE -[id="configuring-hosts_{context}"] -= Configuring hosts - -After booting the hosts with the discovery ISO, the hosts will appear in the table at the bottom of the page. You can configure the hostname, role, and installation disk for each host. - -.Procedure - -. Select a host. - -. From the *Actions* list, select *Change hostname*. You must ensure each host has a valid and unique hostname. If necessary, enter a new name for the host and click *Change*. - -. For multi-host clusters, in the *Role* column next to the host name, you can click on the menu to change the role of the host. -+ -If you do not select a role, the {ai-full} will assign the role automatically. The minimum hardware requirements for control plane nodes exceed that of worker nodes. If you assign a role to a host, ensure that you assign the control plane role to hosts that meet the minimum hardware requirements. - -. To the left of the checkbox next to a host name, click to expand the host details. If you have multiple disk drives, you can select a different disk drive to act as the installation disk. - -. Repeat this procedure for each host. - -Once all cluster hosts appear with a status of *Ready*, proceed to the next step. diff --git a/modules/assisted-installer-configuring-networking.adoc b/modules/assisted-installer-configuring-networking.adoc deleted file mode 100644 index 02abe638d69d..000000000000 --- a/modules/assisted-installer-configuring-networking.adoc +++ /dev/null @@ -1,51 +0,0 @@ -// This is included in the following assemblies: -// -// assisted-installer-installing.adoc - -:_content-type: PROCEDURE -[id="configuring-networking_{context}"] -= Configuring networking - -Before installing {product-title}, you must configure the cluster network. - -.Procedure - -. In the *Networking* page, select one of the following if it is not already selected for you: -+ -** *Cluster-Managed Networking:* Selecting cluster-managed networking means that the {ai-full} will configure a standard network topology, including `keepalived` and Virtual Router Redundancy Protocol (VRRP) for managing the API and Ingress VIP addresses. -+ -** *User-Managed Networking*: Selecting user-managed networking allows you to deploy {product-title} with a non-standard network topology. For example, if you want to deploy with an external load balancer instead of `keepalived` and VRRP, or if you intend to deploy the cluster nodes across many distinct L2 network segments. - -. For cluster-managed networking, configure the following settings: - -.. Define the *Machine network*. You can use the default network or select a subnet. - -.. Define an *API virtual IP*. An API virtual IP provides an endpoint for all users to interact with, and configure the platform. - -.. Define an *Ingress virtual IP*. An Ingress virtual IP provides an endpoint for application traffic flowing from outside the cluster. - -. For user-managed networking, configure the following settings: - -.. Select your *Networking stack type*: -+ -** *IPv4*: Select this type when your hosts are only using IPv4. -+ -** *Dual-stack*: You can select dual-stack when your hosts are using IPv4 together with IPv6. - -.. Define the *Machine network*. You can use the default network or select a subnet. - -.. Define an *API virtual IP*. An API virtual IP provides an endpoint for all users to interact with, and configure the platform. - -.. Define an *Ingress virtual IP*. An Ingress virtual IP provides an endpoint for application traffic flowing from outside the cluster. - -.. Optional: You can select *Allocate IPs via DHCP server* to automatically allocate the *API IP* and *Ingress IP* using the DHCP server. - -. Optional: Select *Use advanced networking* to configure the following advanced networking properties: - -** *Cluster network CIDR*: Define an IP address block from which Pod IP addresses are allocated. - -** *Cluster network host prefix*: Define a subnet prefix length to assign to each node. - -** *Service network CIDR*: Define an IP address to use for service IP addresses. - -** *Network type*: Select either *Software-Defined Networking (SDN)* for standard networking or *Open Virtual Networking (OVN)* for telco features. diff --git a/modules/assisted-installer-installing-the-cluster.adoc b/modules/assisted-installer-installing-the-cluster.adoc deleted file mode 100644 index 88dde8da1bbd..000000000000 --- a/modules/assisted-installer-installing-the-cluster.adoc +++ /dev/null @@ -1,15 +0,0 @@ -// This is included in the following assemblies: -// -// assisted-installer-installing.adoc - -:_content-type: PROCEDURE -[id="installing-the-cluster_{context}"] -= Installing the cluster - -After you have completed the configuration and all the nodes are *Ready*, you can begin installation. The installation process takes a considerable amount of time, and you can monitor the installation from the {ai-full} web console. Nodes will reboot during the installation, and they will initialize after installation. - -.Procedure - -* Press *Begin installation*. - -. Click on the link in the *Status* column of the *Host Inventory* list to see the installation status of a particular host. diff --git a/modules/assisted-installer-pre-installation-considerations.adoc b/modules/assisted-installer-pre-installation-considerations.adoc deleted file mode 100644 index ce46b6db4d63..000000000000 --- a/modules/assisted-installer-pre-installation-considerations.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// This is included in the following assemblies: -// -// installing-on-prem-assisted.adoc - -:_content-type: CONCEPT -[id='pre-installation-considerations_{context}'] -= Pre-installation considerations - -Before installing {product-title} with the {ai-full}, you must consider the following configuration choices: - -* Which base domain to use -* Which {product-title} product version to install -* Whether to install a full cluster or {sno} -* Whether to use a DHCP server or a static network configuration -* Whether to use IPv4 or dual-stack networking -* Whether to install {VirtProductName} -* Whether to install {rh-storage-first} -* Whether to integrate with vSphere when installing on vSphere diff --git a/modules/assisted-installer-release-notes.adoc b/modules/assisted-installer-release-notes.adoc deleted file mode 100644 index dc5bc02db2ac..000000000000 --- a/modules/assisted-installer-release-notes.adoc +++ /dev/null @@ -1,36 +0,0 @@ -// This is included in the following assemblies: -// -//installing_bare_metal_assisted/installing-bare-metal-assisted.adoc - -:_content-type: REFERENCE -[id="assisted-installer-release-notes_{context}"] -= {ai-full} {ai-version} release notes - -[id="ai-release-notes-about-this-release_{context}"] -== About this release - -These release notes track the development of {ai-full} {ai-version}. - -This product was previously released as a Technology Preview product and is now generally available and enabled by default in the {cluster-manager-first}. - -[id="ai-release-notes-bug-fixes_{context}"] -== Bug fixes - -* Previously, users could define `localhost` as a valid host name for all of their hosts. As a result, host names were not unique, and {ai-full} could not install the cluster. With this release, users cannot complete the cluster installation if any of the hosts are named `localhost`. An error appears and users must rename the hosts. -//(link:https://issues.redhat.com/browse/MGMT-8088[MGMT-8088]) - -* Previously, in the *OpenShift Web Console troubleshooting* window, the *Optional* field remained blank when undefined instead of displaying an IP address. With this release, the *Optional* field was removed. -//(link:https://issues.redhat.com/browse/MGMT-9283[MGMT-9283]) - -* Previously, when installing a cluster on vSphere, the {ai-full} created machines and `MachineSet` objects for every virtual machine. With this release, {ai-full} no longer creates machines or `MachineSet` objects for user-provisioned VMs. -//(link:https://issues.redhat.com/browse/MGMT-9559[MGMT-9559]) - -* Previously, if Operators failed to install during an installation with {ai-full}, users received an error message and were directed to reset the cluster installation. With this release, if Operators fail to install, the cluster is automatically degraded. - -* Previously, after installing an Operator using {ai-full}, the Operator appeared as *available* in the cluster *Status* area in the *Installation progress* page. However, users had to check the Operator avilability in the {product-title} web console. With this release, the Operator appears as *installed* in the *Status* area. - -[id="ai-release-notes-known-issues_{context}"] -== Known issues - -* The minimum disk size required for installing on bare metal using {ai-full} is specified as 120GB. The actual required minimum disk size is 100GB. -//(link:https://issues.redhat.com/browse/MGMT-9682[MGMT-9682]) diff --git a/modules/assisted-installer-setting-the-cluster-details.adoc b/modules/assisted-installer-setting-the-cluster-details.adoc deleted file mode 100644 index e1718b286592..000000000000 --- a/modules/assisted-installer-setting-the-cluster-details.adoc +++ /dev/null @@ -1,47 +0,0 @@ -// This is included in the following assemblies: -// -// installing-on-prem-assisted.adoc - -:_content-type: PROCEDURE -[id='setting-the-cluster-details_{context}'] -= Setting the cluster details - -To create a cluster with the {ai-full} web user interface, use the following procedure. - -.Procedure - -. Log in to the link:https://console.redhat.com[RedHat Hybrid Cloud Console]. - -. In the menu, click *OpenShift*. - -. Click *Create cluster*. - -. Click the *Datacenter* tab. - -. Under the *{ai-full}* section, select *Create cluster*. - -. Enter a name for the cluster in the *Cluster name* field. - -. Enter a base domain for the cluster in the *Base domain* field. All subdomains for the cluster will use this base domain. -+ -[NOTE] -==== -The base domain must be a valid DNS name. You must not have a wild card domain set up for the base domain. -==== - -. Select the version of {product-title} to install. - -. Optional: Select *Install single node Openshift (SNO)* if you want to install {product-title} on a single node. - -. Optional: The {ai-full} already has the pull secret associated to your account. If you want to use a different pull secret, select *Edit pull secret*. - -. Optional: {ai-full} defaults to using x86_64 CPU architecture. If you are installing {product-title} on 64-bit ARM CPUs, select *Use arm64 CPU architecture*. Keep in mind, some features are not available with 64-bit ARM CPU architecture. - -. Optional: If you are using a static IP configuration for the cluster nodes instead of DHCP reservations, select *Static network configuration*. - -. Optional: If you want to enable encryption of the installation disks, select *Enable encryption of installation disks*. For multi-node clusters, you can choose to encrypt the control plane and worker node installation disks separately. - -[IMPORTANT] -==== -You cannot change the base domain, the SNO checkbox, the CPU architecture, the host's network configuration, or the disk-encryption after installation begins. -==== diff --git a/modules/assisted-installer-using-the-assisted-installer.adoc b/modules/assisted-installer-using-the-assisted-installer.adoc deleted file mode 100644 index e4df53f5e835..000000000000 --- a/modules/assisted-installer-using-the-assisted-installer.adoc +++ /dev/null @@ -1,49 +0,0 @@ -// This is included in the following assemblies: -// -// installing-on-prem-assisted.adoc -:_content-type: CONCEPT - -[id="using-the-assisted-installer_{context}"] -= Using the Assisted Installer - -The {product-title} link:https://console.redhat.com/openshift/assisted-installer/clusters/~new[{ai-full}] is a user-friendly installation solution offered on the link:http://console.redhat.com[Red Hat Hybrid Cloud Console]. The {ai-full} supports the various deployment platforms with a focus on bare metal, Nutanix, and vSphere infrastructures. - -The {ai-full} provides installation functionality as a service. This software-as-a-service (SaaS) approach has the following advantages: - -* *Web user interface:* The web user interface performs cluster installation without the user having to create the installation configuration files manually. -* *No bootstrap node:* A bootstrap node is not required when installing with the {ai-full}. The bootstrapping process executes on a node within the cluster. -* *Hosting:* The {ai-full} hosts: - - Ignition files - - The installation configuration - - A discovery ISO - - The installer -* *Streamlined installation workflow:* Deployment does not require in-depth knowledge of {product-title}. The {ai-full} provides reasonable defaults and provides the installer as a service, which: - - Eliminates the need to install and run the {product-title} installer locally. - - Ensures the latest version of the installer up to the latest tested z-stream releases. Older versions remain available, if needed. - - Enables building automation by using the API without the need to run the {product-title} installer locally. -* *Advanced networking:* The {ai-full} supports IPv4 networking with SDN and OVN, IPv6 and dual stack networking with OVN only, NMState-based static IP addressing, and an HTTP/S proxy. OVN is the default Container Network Interface (CNI) for OpenShift Container Platform 4.12 and later releases, but you can use SDN. - -* *Pre-installation validation:* The {ai-full} validates the configuration before installation to ensure a high probability of success. Validation includes: - - Ensuring network connectivity - - Ensuring sufficient network bandwidth - - Ensuring connectivity to the registry - - Ensuring time synchronization between cluster nodes - - Verifying that the cluster nodes meet the minimum hardware requirements - - Validating the installation configuration parameters -* *REST API:* The {ai-full} has a REST API, enabling automation. - -The {ai-full} supports installing {product-title} on premises in a connected environment, including with an optional HTTP/S proxy. It can install the following: - -* Highly available {product-title} or Single Node OpenShift (SNO) -+ -[NOTE] -==== -SNO is not supported on {ibmzProductName} and {ibmpowerProductName}. -==== -+ -* {product-title} on bare metal, Nutanix, or vSphere with full platform integration, or other virtualization platforms without integration -* Optionally {VirtProductName} and {rh-storage} (formerly OpenShift Container Storage) - -The user interface provides an intuitive interactive workflow where automation does not exist or is not required. Users may also automate installations using the REST API. - -See link:https://console.redhat.com/openshift/assisted-installer/clusters/~new[Install OpenShift with the Assisted Installer] to create an {product-title} cluster with the {ai-full}. See the link:https://access.redhat.com/documentation/en-us/assisted_installer_for_openshift_container_platform/2022/html-single/assisted_installer_for_openshift_container_platform/index[Assisted Installer for OpenShift Container Platform] documentation for details on using the {ai-full}. diff --git a/modules/assuming-an-aws-iam-role-in-your-own-pods.adoc b/modules/assuming-an-aws-iam-role-in-your-own-pods.adoc deleted file mode 100644 index 62e76c2bac51..000000000000 --- a/modules/assuming-an-aws-iam-role-in-your-own-pods.adoc +++ /dev/null @@ -1,11 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/assuming-an-aws-iam-role-for-a-service-account.adoc - -:_content-type: PROCEDURE -[id="assuming-an-aws-iam-role-in-your-own-pods_{context}"] -= Assuming an AWS IAM role in your own pods - -Follow the procedures in this section to enable a service account to assume an AWS Identity and Access Management (IAM) role in a pod deployed in a user-defined project. - -You can create the required resources, including an AWS IAM role, a service account, a container image that includes an AWS SDK, and a pod deployed by using the image. In the example, the AWS Boto3 SDK for Python is used. You can also verify that the pod identity webhook mutates the AWS environment variables, the volume mount, and the token volume into your pod. Additionally, you can check that the service account assumes the AWS IAM role in your pod and can successfully run AWS SDK operations. diff --git a/modules/auth-allowing-javascript-access-api-server.adoc b/modules/auth-allowing-javascript-access-api-server.adoc deleted file mode 100644 index 8330c0f70297..000000000000 --- a/modules/auth-allowing-javascript-access-api-server.adoc +++ /dev/null @@ -1,63 +0,0 @@ -// Module included in the following assemblies: -// -// * security/allowing-javascript-access-api-server.adoc - -:_content-type: PROCEDURE -[id="auth-allowing-javascript-access-api-server_{context}"] -= Allowing JavaScript-based access to the API server from additional hosts - -The default {product-title} configuration only allows the web console to send requests to the API server. - -If you need to access the API server or OAuth server from a JavaScript -application using a different hostname, you can configure additional hostnames -to allow. - -.Prerequisites - -* Access to the cluster as a user with the `cluster-admin` role. - -.Procedure - -. Edit the `APIServer` resource: -+ -[source,terminal] ----- -$ oc edit apiserver.config.openshift.io cluster ----- -+ -. Add the `additionalCORSAllowedOrigins` field under the `spec` section and -specify one or more additional hostnames: -+ -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: APIServer -metadata: - annotations: - release.openshift.io/create-only: "true" - creationTimestamp: "2019-07-11T17:35:37Z" - generation: 1 - name: cluster - resourceVersion: "907" - selfLink: /apis/config.openshift.io/v1/apiservers/cluster - uid: 4b45a8dd-a402-11e9-91ec-0219944e0696 -spec: - additionalCORSAllowedOrigins: - - (?i)//my\.subdomain\.domain\.com(:|\z) <1> ----- -<1> The hostname is specified as a link:https://github.com/google/re2/wiki/Syntax[Golang regular expression] that matches -against CORS headers from HTTP requests against the API server and OAuth server. -+ -[NOTE] -==== -This example uses the following syntax: - -* The `(?i)` makes it case-insensitive. -* The `//` pins to the beginning of the domain and matches the double slash -following `http:` or `https:`. -* The `\.` escapes dots in the domain name. -* The `(:|\z)` matches the end of the domain name `(\z)` or a port separator -`(:)`. -==== - -. Save the file to apply the changes. diff --git a/modules/authentication-api-impersonation.adoc b/modules/authentication-api-impersonation.adoc deleted file mode 100644 index 63e6de2d9f40..000000000000 --- a/modules/authentication-api-impersonation.adoc +++ /dev/null @@ -1,10 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/understanding-authentication.adoc -// * applications/projects/creating-project-other-user.adoc -// * users_and_roles/impersonating-system-admin.adoc - -[id="authentication-api-impersonation_{context}"] -= API impersonation - -You can configure a request to the {product-title} API to act as though it originated from another user. For more information, see link:https://kubernetes.io/docs/reference/access-authn-authz/authentication/#user-impersonation[User impersonation] in the Kubernetes documentation. diff --git a/modules/authentication-authorization-common-terms.adoc b/modules/authentication-authorization-common-terms.adoc deleted file mode 100644 index e0e5cb6c239a..000000000000 --- a/modules/authentication-authorization-common-terms.adoc +++ /dev/null @@ -1,87 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/index.adoc - -:_content-type: REFERENCE -[id="openshift-auth-common-terms_{context}"] -= Glossary of common terms for {product-title} authentication and authorization - -This glossary defines common terms that are used in {product-title} authentication and authorization. - -authentication:: -An authentication determines access to an {product-title} cluster and ensures only authenticated users access the {product-title} cluster. - -authorization:: -Authorization determines whether the identified user has permissions to perform the requested action. - -bearer token:: -Bearer token is used to authenticate to API with the header `Authorization: Bearer `. - -Cloud Credential Operator:: -The Cloud Credential Operator (CCO) manages cloud provider credentials as custom resource definitions (CRDs). - -config map:: -A config map provides a way to inject configuration data into the pods. You can reference the data stored in a config map in a volume of type `ConfigMap`. Applications running in a pod can use this data. - -containers:: -Lightweight and executable images that consist software and all its dependencies. Because containers virtualize the operating system, you can run containers in a data center, public or private cloud, or your local host. - -Custom Resource (CR):: -A CR is an extension of the Kubernetes API. - -group:: -A group is a set of users. A group is useful for granting permissions to multiple users one time. - -HTPasswd:: -HTPasswd updates the files that store usernames and password for authentication of HTTP users. - -Keystone:: -Keystone is an {rh-openstack-first} project that provides identity, token, catalog, and policy services. - -Lightweight directory access protocol (LDAP):: -LDAP is a protocol that queries user information. - -manual mode:: -In manual mode, a user manages cloud credentials instead of the Cloud Credential Operator (CCO). - -mint mode:: -Mint mode is the default and recommended best practice setting for the Cloud Credential Operator (CCO) to use on the platforms for which it is supported. In this mode, the CCO uses the provided administrator-level cloud credential to create new credentials for components in the cluster with only the specific permissions that are required. - -namespace:: -A namespace isolates specific system resources that are visible to all processes. Inside a namespace, only processes that are members of that namespace can see those resources. - -node:: -A node is a worker machine in the {product-title} cluster. A node is either a virtual machine (VM) or a physical machine. - -OAuth client:: -OAuth client is used to get a bearer token. - -OAuth server:: -The {product-title} control plane includes a built-in OAuth server that determines the user’s identity from the configured identity provider and creates an access token. - -OpenID Connect:: -The OpenID Connect is a protocol to authenticate the users to use single sign-on (SSO) to access sites that use OpenID Providers. - -passthrough mode:: -In passthrough mode, the Cloud Credential Operator (CCO) passes the provided cloud credential to the components that request cloud credentials. - -pod:: -A pod is the smallest logical unit in Kubernetes. A pod is comprised of one or more containers to run in a worker node. - -regular users:: -Users that are created automatically in the cluster upon first login or via the API. - -request header:: -A request header is an HTTP header that is used to provide information about HTTP request context, so that the server can track the response of the request. - -role-based access control (RBAC):: -A key security control to ensure that cluster users and workloads have access to only the resources required to execute their roles. - -service accounts:: -Service accounts are used by the cluster components or applications. - -system users:: -Users that are created automatically when the cluster is installed. - -users:: -Users is an entity that can make requests to API. diff --git a/modules/authentication-kubeadmin.adoc b/modules/authentication-kubeadmin.adoc deleted file mode 100644 index 1d07236299b3..000000000000 --- a/modules/authentication-kubeadmin.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/removing-kubeadmin.adoc -// * post_installation_configuration/preparing-for-users.adoc - -[id="understanding-kubeadmin_{context}"] -= The kubeadmin user - -{product-title} creates a cluster administrator, `kubeadmin`, after the -installation process completes. - -This user has the `cluster-admin` role automatically applied and is treated -as the root user for the cluster. The password is dynamically generated -and unique to your {product-title} environment. After installation -completes the password is provided in the installation program's output. -For example: - -[source,terminal] ----- -INFO Install complete! -INFO Run 'export KUBECONFIG=/auth/kubeconfig' to manage the cluster with 'oc', the OpenShift CLI. -INFO The cluster is ready when 'oc login -u kubeadmin -p ' succeeds (wait a few minutes). -INFO Access the OpenShift web-console here: https://console-openshift-console.apps.demo1.openshift4-beta-abcorp.com -INFO Login to the console with user: kubeadmin, password: ----- diff --git a/modules/authentication-prometheus-system-metrics.adoc b/modules/authentication-prometheus-system-metrics.adoc deleted file mode 100644 index e8a10be6a179..000000000000 --- a/modules/authentication-prometheus-system-metrics.adoc +++ /dev/null @@ -1,14 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/understanding-authentication.adoc - -[id="authentication-prometheus-system-metrics_{context}"] -= Authentication metrics for Prometheus - -{product-title} captures the following Prometheus system metrics during authentication attempts: - -* `openshift_auth_basic_password_count` counts the number of `oc login` user name and password attempts. -* `openshift_auth_basic_password_count_result` counts the number of `oc login` user name and password attempts by result, `success` or `error`. -* `openshift_auth_form_password_count` counts the number of web console login attempts. -* `openshift_auth_form_password_count_result` counts the number of web console login attempts by result, `success` or `error`. -* `openshift_auth_password_total` counts the total number of `oc login` and web console login attempts. diff --git a/modules/authentication-remove-kubeadmin.adoc b/modules/authentication-remove-kubeadmin.adoc deleted file mode 100644 index bbe29a4ce955..000000000000 --- a/modules/authentication-remove-kubeadmin.adoc +++ /dev/null @@ -1,34 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/understanding-authentication.adoc -// * authentication/understanding-identity-provider.adoc -// * post_installation_configuration/preparing-for-users.adoc - -:_content-type: PROCEDURE -[id="removing-kubeadmin_{context}"] -= Removing the kubeadmin user - -After you define an identity provider and create a new `cluster-admin` -user, you can remove the `kubeadmin` to improve cluster security. - -[WARNING] -==== -If you follow this procedure before another user is a `cluster-admin`, -then {product-title} must be reinstalled. It is not possible to undo -this command. -==== - -.Prerequisites - -* You must have configured at least one identity provider. -* You must have added the `cluster-admin` role to a user. -* You must be logged in as an administrator. - -.Procedure - -* Remove the `kubeadmin` secrets: -+ -[source,terminal] ----- -$ oc delete secrets kubeadmin -n kube-system ----- diff --git a/modules/automatic-network-verification-bypassing.adoc b/modules/automatic-network-verification-bypassing.adoc deleted file mode 100644 index 1902455a418a..000000000000 --- a/modules/automatic-network-verification-bypassing.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/network-verification.adoc - -:_content-type: CONCEPT -[id="automatic-network-verification-bypassing_{context}"] -= Automatic network verification bypassing - -You can bypass the automatic network verification if you want to deploy -ifdef::openshift-dedicated[] -an {product-title} -endif::openshift-dedicated[] -ifdef::openshift-rosa[] -a {product-title} (ROSA) -endif::openshift-rosa[] -cluster with known network configuration issues into an existing Virtual Private Cloud (VPC). - -If you bypass the network verification when you create a cluster, the cluster has a limited support status. After installation, you can resolve the issues and then manually run the network verification. The limited support status is removed after the verification succeeds. - -ifdef::openshift-rosa[] -.Bypassing automatic network verification by using {cluster-manager} - -endif::openshift-rosa[] -When you install a cluster into an existing VPC by using {cluster-manager-first}, you can bypass the automatic verification by selecting *Bypass network verification* on the *Virtual Private Cloud (VPC) subnet settings* page. - -ifdef::openshift-rosa[] -.Bypassing automatic network verification by using the ROSA CLI (`rosa`) - -When you install a cluster into an existing VPC by using the `rosa create cluster` command, you can bypass the automatic verification by including the `--bypass-network-verify --force` arguments. The following example bypasses the network verification before creating a cluster: - -[source,terminal] ----- -$ rosa create cluster --cluster-name mycluster \ - --subnet-ids subnet-03146b9b52b6024cb,subnet-03146b9b52b2034cc \ - --bypass-network-verify --force ----- - -[NOTE] -==== -Alternatively, you can specify the `--interactive` argument and select the option in the interactive prompts to bypass the network verification checks. -==== -endif::openshift-rosa[] diff --git a/modules/automatically-scaling-machines-to-available-bare-metal-hosts.adoc b/modules/automatically-scaling-machines-to-available-bare-metal-hosts.adoc deleted file mode 100644 index fa805eadf566..000000000000 --- a/modules/automatically-scaling-machines-to-available-bare-metal-hosts.adoc +++ /dev/null @@ -1,31 +0,0 @@ -// Module included in the following assemblies: -// -// scalability_and_performance/managing-bare-metal-hosts.adoc - -:_content-type: PROCEDURE -[id="automatically-scaling-machines-to-available-bare-metal-hosts_{context}"] -= Automatically scaling machines to the number of available bare metal hosts - -To automatically create the number of `Machine` objects that matches the number of available `BareMetalHost` objects, add a `metal3.io/autoscale-to-hosts` annotation to the `MachineSet` object. - -.Prerequisites - -* Install {op-system} bare metal compute machines for use in the cluster, and create corresponding `BareMetalHost` objects. -* Install the {product-title} CLI (`oc`). -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -. Annotate the compute machine set that you want to configure for automatic scaling by adding the `metal3.io/autoscale-to-hosts` annotation. Replace `` with the name of the compute machine set. -+ -[source,terminal] ----- -$ oc annotate machineset -n openshift-machine-api 'metal3.io/autoscale-to-hosts=' ----- -+ -Wait for the new scaled machines to start. - -[NOTE] -==== -When you use a `BareMetalHost` object to create a machine in the cluster and labels or selectors are subsequently changed on the `BareMetalHost`, the `BareMetalHost` object continues be counted against the `MachineSet` that the `Machine` object was created from. -==== diff --git a/modules/available-persistent-storage-options.adoc b/modules/available-persistent-storage-options.adoc deleted file mode 100644 index 718d2d073858..000000000000 --- a/modules/available-persistent-storage-options.adoc +++ /dev/null @@ -1,49 +0,0 @@ -// Module included in the following assemblies: -// -// * storage/optimizing-storage.adoc -// * post_installation_configuration/storage-configuration.adoc - -[id="available-persistent-storage-options_{context}"] -= Available persistent storage options - -Understand your persistent storage options so that you can optimize your -{product-title} environment. - -.Available storage options -[cols="1,4,3",options="header"] -|=== -| Storage type | Description | Examples - -|Block -a|* Presented to the operating system (OS) as a block device -* Suitable for applications that need full control of storage and operate at a low level on files -bypassing the file system -* Also referred to as a Storage Area Network (SAN) -* Non-shareable, which means that only one client at a time can mount an endpoint of this type -| AWS EBS and VMware vSphere support dynamic persistent volume (PV) provisioning natively in {product-title}. -// Ceph RBD, OpenStack Cinder, Azure Disk, GCE persistent disk - -|File -a| * Presented to the OS as a file system export to be mounted -* Also referred to as Network Attached Storage (NAS) -* Concurrency, latency, file locking mechanisms, and other capabilities vary widely between protocols, implementations, vendors, and scales. -|RHEL NFS, NetApp NFS ^[1]^, and Vendor NFS -// Azure File, AWS EFS - -| Object -a| * Accessible through a REST API endpoint -* Configurable for use in the {product-registry} -* Applications must build their drivers into the application and/or container. -| AWS S3 -// Aliyun OSS, Ceph Object Storage (RADOS Gateway) -// Google Cloud Storage, Azure Blob Storage, OpenStack Swift -|=== -[.small] --- -1. NetApp NFS supports dynamic PV provisioning when using the Trident plugin. --- - -[IMPORTANT] -==== -Currently, CNS is not supported in {product-title} {product-version}. -==== diff --git a/modules/aws-cloudwatch.adoc b/modules/aws-cloudwatch.adoc deleted file mode 100644 index e675ca103e80..000000000000 --- a/modules/aws-cloudwatch.adoc +++ /dev/null @@ -1,9 +0,0 @@ -// Module included in the following assemblies: -// -// * adding_service_cluster/rosa-available-services.adoc -:_content-type: CONCEPT -[id="aws-cloudwatch_{context}"] - -= Amazon CloudWatch - -Amazon CloudWatch forwards logs from {product-title} (ROSA) to the AWS console for viewing. You must first install the ROSA `cluster-logging-operator` using the ROSA CLI (`rosa`) before installing the Amazon CloudWatch service through {cluster-manager-first} console. diff --git a/modules/aws-console-changing-aws-instance-type.adoc b/modules/aws-console-changing-aws-instance-type.adoc deleted file mode 100644 index 74608149b371..000000000000 --- a/modules/aws-console-changing-aws-instance-type.adoc +++ /dev/null @@ -1,34 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/recommended-performance-scale-practices/recommended-control-plane-practices.adoc - -:_content-type: PROCEDURE -[id="aws-console-changing-aws-instance-type_{context}"] -= Changing the Amazon Web Services instance type by using the AWS console - -You can change the Amazon Web Services (AWS) instance type that your control plane machines use by updating the instance type in the AWS console. - -.Prerequisites - -* You have access to the AWS console with the permissions required to modify the EC2 Instance for your cluster. -* You have access to the {product-title} cluster as a user with the `cluster-admin` role. - -.Procedure - -. Open the AWS console and fetch the instances for the control plane machines. - -. Choose one control plane machine instance. - -.. For the selected control plane machine, back up the etcd data by creating an etcd snapshot. For more information, see "Backing up etcd". - -.. In the AWS console, stop the control plane machine instance. - -.. Select the stopped instance, and click *Actions* -> *Instance Settings* -> *Change instance type*. - -.. Change the instance to a larger type, ensuring that the type is the same base as the previous selection, and apply changes. For example, you can change `m6i.xlarge` to `m6i.2xlarge` or `m6i.4xlarge`. - -.. Start the instance. - -.. If your {product-title} cluster has a corresponding `Machine` object for the instance, update the instance type of the object to match the instance type set in the AWS console. - -. Repeat this process for each control plane machine. diff --git a/modules/aws-direct-connect.adoc b/modules/aws-direct-connect.adoc deleted file mode 100644 index 45b8635ba9b7..000000000000 --- a/modules/aws-direct-connect.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_cluster_admin/osd_private_connections/aws-private-connections.adoc - -:_content-type: PROCEDURE -[id="aws-direct-connect_{context}"] -= Configuring AWS Direct Connect - - -{AWS} Direct Connect requires a hosted Virtual Interface (VIF) connected to a Direct Connect Gateway (DXGateway), which is in turn associated to a Virtual Gateway (VGW) or a Transit Gateway in order to access a remote Virtual Private Cloud (VPC) in the same or another account. - -If you do not have an existing DXGateway, the typical process involves creating the hosted VIF, with the DXGateway and VGW being created in your AWS account. - -If you have an existing DXGateway connected to one or more existing VGWs, the process involves your AWS account sending an Association Proposal to the DXGateway owner. The DXGateway owner must ensure that the proposed CIDR will not conflict with any other VGWs they have associated. - -.Prerequisites - -* Confirm the CIDR range of the {product-title} VPC will not conflict with any other VGWs you have associated. -* Gather the following information: -** The Direct Connect Gateway ID. -** The AWS Account ID associated with the virtual interface. -** The BGP ASN assigned for the DXGateway. Optional: the Amazon default ASN may also be used. - -.Procedure - -. link:https://docs.aws.amazon.com/directconnect/latest/UserGuide/create-vif.html[Create a VIF] or link:https://docs.aws.amazon.com/directconnect/latest/UserGuide/viewvifdetails.html[view your existing VIFs] to determine the type of direct connection you need to create. - -. Create your gateway. -.. If the Direct Connect VIF type is *Private*, link:https://docs.aws.amazon.com/directconnect/latest/UserGuide/virtualgateways.html#create-virtual-private-gateway[create a virtual private gateway]. -.. If the Direct Connect VIF is *Public*, link:https://docs.aws.amazon.com/directconnect/latest/UserGuide/direct-connect-gateways-intro.html#create-direct-connect-gateway[create a Direct Connect gateway]. - -. If you have an existing gateway you want to use, link:https://docs.aws.amazon.com/directconnect/latest/UserGuide/multi-account-associate-vgw.html[create an association proposal] and send the proposal to the DXGateway owner for approval. -+ -[WARNING] -==== -When connecting to an existing DXGateway, you are responsible for the link:https://aws.amazon.com/directconnect/pricing/[costs]. -==== - -[role="_additional-resources"] -.Additional resources - -* For more information and troubleshooting help, see the link:https://docs.aws.amazon.com/directconnect/latest/UserGuide/Welcome.html[AWS Direct Connect] guide. diff --git a/modules/aws-limits.adoc b/modules/aws-limits.adoc deleted file mode 100644 index 34338992529f..000000000000 --- a/modules/aws-limits.adoc +++ /dev/null @@ -1,86 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_planning/aws-ccs.adoc - -[id="aws-limits_{context}"] -= AWS account limits - - -The {product-title} cluster uses a number of Amazon Web Services (AWS) components, and the default link:https://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html[service limits] affect your ability to install {product-title} clusters. If you use certain cluster configurations, deploy your cluster in certain AWS regions, or run multiple clusters from your account, you might need to request additional resources for your AWS account. - -The following table summarizes the AWS components whose limits can impact your ability to install and run {product-title} clusters. - - -[cols="3a,3a,3a,8a",options="header"] -|=== -|Component |Number of clusters available by default| Default AWS limit |Description - -|Instance Limits -|Varies -|Varies -|At a minimum, each cluster creates the following instances: - -* One bootstrap machine, which is removed after installation -* Three control plane nodes -* Two infrastructure nodes for a single availability zone; three infrascture nodes for multi-availability zones -* Two worker nodes for a single availability zone; three worker nodes for multi-availability zones - -These instance type counts are within a new account's default limit. To deploy more worker nodes, deploy large workloads, or use a different instance type, review your account limits to ensure that your cluster can deploy the machines that you need. - -In most regions, the bootstrap and worker machines uses an `m4.large` machines and the control plane machines use `m4.xlarge` instances. In some regions, including all regions that do not support these instance types, `m5.large` and `m5.xlarge` instances are used instead. - -|Elastic IPs (EIPs) -|0 to 1 -|5 EIPs per account -|To provision the cluster in a highly available configuration, the installation program creates a public and private subnet for each link:https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html[availability zone within a region]. Each private subnet requires a link:https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html[NAT Gateway], and each NAT gateway requires a separate -link:https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ipaddresses-eip.html[elastic IP]. Review the link:https://aws.amazon.com/about-aws/global-infrastructure/[AWS region map] to determine how many availability zones are in each region. To take advantage of the default high availability, install the cluster in a region with at least three availability zones. To install a cluster in a region with more than five availability zones, you must increase the EIP limit. - -// TODO: The above elastic IP link is redirected. Find new link. Is it https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html ? - -[IMPORTANT] -==== -To use the `us-east-1` region, you must increase the EIP limit for your account. -==== - -|Virtual Private Clouds (VPCs) -|5 -|5 VPCs per region -|Each cluster creates its own VPC. - -|Elastic Load Balancing (ELB/NLB) -|3 -|20 per region -|By default, each cluster creates internal and external network load balancers for the primary API server and a single classic elastic load balancer for the router. Deploying more Kubernetes LoadBalancer Service objects will create additional link:https://aws.amazon.com/elasticloadbalancing/[load balancers]. - - -|NAT Gateways -|5 -|5 per availability zone -|The cluster deploys one NAT gateway in each availability zone. - -|Elastic Network Interfaces (ENIs) -|At least 12 -|350 per region -|The default installation creates 21 ENIs and an ENI for each availability zone in your region. For example, the `us-east-1` region contains six availability zones, so a cluster that is deployed in that zone uses 27 ENIs. Review the link:https://aws.amazon.com/about-aws/global-infrastructure/[AWS region map] to determine how many availability zones are in each region. - -Additional ENIs are created for additional machines and elastic load balancers that are created by cluster usage and deployed workloads. - -|VPC Gateway -|20 -|20 per account -|Each cluster creates a single VPC Gateway for S3 access. - - -|S3 buckets -|99 -|100 buckets per account -|Because the installation process creates a temporary bucket and the registry component in each cluster creates a bucket, you can create only 99 {product-title} clusters per AWS account. - -|Security Groups -|250 -|2,500 per account -|Each cluster creates 10 distinct security groups. - | Fail, optionally surfacing response body to the user -|=== - -// TODO: what is this random text/cell on line 82^? diff --git a/modules/aws-vpc.adoc b/modules/aws-vpc.adoc deleted file mode 100644 index 4572be7b66f4..000000000000 --- a/modules/aws-vpc.adoc +++ /dev/null @@ -1,39 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_cluster_admin/osd_private_connections/aws-private-connections.adoc - -:_content-type: PROCEDURE -[id="aws-vpc_{context}"] -= Configuring AWS VPC peering - - -A Virtual Private Cloud (VPC) peering connection is a networking connection between two VPCs that enables you to route traffic between them using private IPv4 addresses or IPv6 addresses. You can configure an {AWS} VPC containing an {product-title} cluster to peer with another AWS VPC network. - -[WARNING] -==== -Private clusters cannot be fully deleted by {cluster-manager-first} if the VPC the cluster is installed in is peered. - -AWS supports inter-region VPC peering between all commercial regions link:https://aws.amazon.com/vpc/faqs/#Peering_Connections[excluding China]. -==== - -.Prerequisites - -* Gather the following information about the Customer VPC that is required to initiate the peering request: -** Customer AWS account number -** Customer VPC ID -** Customer VPC Region -** Customer VPC CIDR -* Check the CIDR block used by the {product-title} Cluster VPC. If it overlaps or matches the CIDR block for the Customer VPC, then peering between these two VPCs is not possible; see the Amazon VPC link:https://docs.aws.amazon.com/vpc/latest/peering/invalid-peering-configurations.html[Unsupported VPC peering configurations] documentation for details. If the CIDR blocks do not overlap, you can proceed with the procedure. - -.Procedure - -. link:https://docs.aws.amazon.com/vpc/latest/peering/create-vpc-peering-connection.html#create-vpc-peering-connection-local[Initiate the VPC peering request]. - -. link:https://docs.aws.amazon.com/vpc/latest/peering/create-vpc-peering-connection.html#accept-vpc-peering-connection[Accept the VPC peering request]. - -. link:https://docs.aws.amazon.com/vpc/latest/peering/vpc-peering-routing.html[Update your Route tables for the VPC peering connection]. - -[role="_additional-resources"] -.Additional resources - -* For more information and troubleshooting help, see the link:https://docs.aws.amazon.com/vpc/latest/peering/what-is-vpc-peering.html[AWS VPC] guide. diff --git a/modules/aws-vpn.adoc b/modules/aws-vpn.adoc deleted file mode 100644 index 8f1c5afb1f4f..000000000000 --- a/modules/aws-vpn.adoc +++ /dev/null @@ -1,47 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_cluster_admin/osd_private_connections/aws-private-connections.adoc - -:_content-type: PROCEDURE -[id="aws-vpn_{context}"] -= Configuring an AWS VPN - - -You can configure an {AWS} {product-title} cluster to use a customer’s on-site hardware Virtual Private Network (VPN) device. By default, instances that you launch into an AWS Virtual Private Cloud (VPC) cannot communicate with your own (remote) network. You can enable access to your remote network from your VPC by creating an AWS Site-to-Site VPN connection, and configuring routing to pass traffic through the connection. - -[NOTE] -==== -AWS VPN does not currently provide a managed option to apply NAT to VPN traffic. See the link:https://aws.amazon.com/premiumsupport/knowledge-center/configure-nat-for-vpn-traffic/[AWS Knowledge Center] for more details. - -Routing all traffic, for example `0.0.0.0/0`, through a private connection is not supported. This requires deleting the internet gateway, which disables SRE management traffic. -==== - -.Prerequisites - -* Hardware VPN gateway device model and software version, for example Cisco ASA running version 8.3. See the link:https://docs.aws.amazon.com/vpc/latest/adminguide/Introduction.html#DevicesTested[AWS documentation] to confirm whether your gateway device is supported by AWS. -* Public, static IP address for the VPN gateway device. -* BGP or static routing: if BGP, the ASN is required. If static routing, you must -configure at least one static route. -* Optional: IP and port/protocol of a reachable service to test the VPN connection. - -.Procedure - -. link:https://docs.aws.amazon.com/vpn/latest/s2svpn/SetUpVPNConnections.html#vpn-create-cgw[Create a customer gateway] to configure the VPN connection. - -. If you do not already have a Virtual Private Gateway attached to the intended VPC, link:https://docs.aws.amazon.com/vpn/latest/s2svpn/SetUpVPNConnections.html#vpn-create-target-gateway[create and attach] a Virtual Private Gateway. - -. link:https://docs.aws.amazon.com/vpn/latest/s2svpn/SetUpVPNConnections.html#vpn-configure-route-tables[Configure routing and enable VPN route propagation]. - -. link:https://docs.aws.amazon.com/vpn/latest/s2svpn/SetUpVPNConnections.html#vpn-configure-security-groups[Update your security group]. - -. link:https://docs.aws.amazon.com/vpn/latest/s2svpn/SetUpVPNConnections.html#vpn-create-vpn-connection[Establish the Site-to-Site VPN connection]. -+ -[NOTE] -==== -Note the VPC subnet information, which you must add to your configuration as the remote network. -==== - -[role="_additional-resources"] -.Additional resources - -* For more information and troubleshooting help, see the link:https://docs.aws.amazon.com/vpn/latest/s2svpn/VPC_VPN.html[AWS VPN] guide. diff --git a/modules/azure-stack-hub-internal-ca.adoc b/modules/azure-stack-hub-internal-ca.adoc deleted file mode 100644 index d882b1ea1c75..000000000000 --- a/modules/azure-stack-hub-internal-ca.adoc +++ /dev/null @@ -1,34 +0,0 @@ -// Module included in the following assemblies: -// -// *installing/installing_azure_stack_hub/installing-azure-stack-hub-default.adoc - -:_content-type: PROCEDURE -[id="internal-certificate-authority_{context}"] -= Configuring the cluster to use an internal CA - -If the Azure Stack Hub environment is using an internal Certificate Authority (CA), update the `cluster-proxy-01-config.yaml file` to configure the cluster to use the internal CA. - -.Prerequisites - -* Create the `install-config.yaml` file and specify the certificate trust bundle in `.pem` format. -* Create the cluster manifests. - -.Procedure - -. From the directory in which the installation program creates files, go to the `manifests` directory. -. Add `user-ca-bundle` to the `spec.trustedCA.name` field. -+ -.Example `cluster-proxy-01-config.yaml` file -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: Proxy -metadata: - creationTimestamp: null - name: cluster -spec: - trustedCA: - name: user-ca-bundle -status: {} ----- -. Optional: Back up the `manifests/ cluster-proxy-01-config.yaml` file. The installation program consumes the `manifests/` directory when you deploy the cluster. diff --git a/modules/backup-etcd-hosted-cluster.adoc b/modules/backup-etcd-hosted-cluster.adoc deleted file mode 100644 index 455fc4e25f9f..000000000000 --- a/modules/backup-etcd-hosted-cluster.adoc +++ /dev/null @@ -1,81 +0,0 @@ -// Module included in the following assembly: -// -// * hcp-backup-restore-dr.adoc - -:_content-type: PROCEDURE -[id="backup-etcd-hosted-cluster_{context}"] -= Taking a snapshot of etcd on a hosted cluster - -As part of the process to back up etcd for a hosted cluster, you take a snapshot of etcd. After you take the snapshot, you can restore it, for example, as part of a disaster recovery operation. - -[IMPORTANT] -==== -This procedure requires API downtime. -==== - -.Procedure - -. Pause reconciliation of the hosted cluster by entering this command: -+ -[source,terminal] ----- -$ oc patch -n clusters hostedclusters/${CLUSTER_NAME} -p '{"spec":{"pausedUntil":"'${PAUSED_UNTIL}'"}}' --type=merge ----- - -. Stop all etcd-writer deployments by entering this command: -+ -[source,terminal] ----- -$ oc scale deployment -n ${HOSTED_CLUSTER_NAMESPACE} --replicas=0 kube-apiserver openshift-apiserver openshift-oauth-apiserver ----- - -. Take an etcd snapshot by using the `exec` command in each etcd container: -+ -[source,terminal] ----- -$ oc exec -it etcd-0 -n ${HOSTED_CLUSTER_NAMESPACE} -- env ETCDCTL_API=3 /usr/bin/etcdctl --cacert /etc/etcd/tls/client/etcd-client-ca.crt --cert /etc/etcd/tls/client/etcd-client.crt --key /etc/etcd/tls/client/etcd-client.key --endpoints=localhost:2379 snapshot save /var/lib/data/snapshot.db -$ oc exec -it etcd-0 -n ${HOSTED_CLUSTER_NAMESPACE} -- env ETCDCTL_API=3 /usr/bin/etcdctl -w table snapshot status /var/lib/data/snapshot.db ----- - -. Copy the snapshot data to a location where you can retrieve it later, such as an S3 bucket, as shown in the following example. -+ -[NOTE] -==== -The following example uses signature version 2. If you are in a region that supports signature version 4, such as the us-east-2 region, use signature version 4. Otherwise, if you use signature version 2 to copy the snapshot to an S3 bucket, the upload fails and signature version 2 is deprecated. -==== -+ -.Example -[source,terminal] ----- -BUCKET_NAME=somebucket -FILEPATH="/${BUCKET_NAME}/${CLUSTER_NAME}-snapshot.db" -CONTENT_TYPE="application/x-compressed-tar" -DATE_VALUE=`date -R` -SIGNATURE_STRING="PUT\n\n${CONTENT_TYPE}\n${DATE_VALUE}\n${FILEPATH}" -ACCESS_KEY=accesskey -SECRET_KEY=secret -SIGNATURE_HASH=`echo -en ${SIGNATURE_STRING} | openssl sha1 -hmac ${SECRET_KEY} -binary | base64` - -oc exec -it etcd-0 -n ${HOSTED_CLUSTER_NAMESPACE} -- curl -X PUT -T "/var/lib/data/snapshot.db" \ - -H "Host: ${BUCKET_NAME}.s3.amazonaws.com" \ - -H "Date: ${DATE_VALUE}" \ - -H "Content-Type: ${CONTENT_TYPE}" \ - -H "Authorization: AWS ${ACCESS_KEY}:${SIGNATURE_HASH}" \ - https://${BUCKET_NAME}.s3.amazonaws.com/${CLUSTER_NAME}-snapshot.db ----- - -. If you want to be able to restore the snapshot on a new cluster later, save the encryption secret that the hosted cluster references, as shown in this example: -+ -.Example -[source,terminal] ----- -oc get hostedcluster $CLUSTER_NAME -o=jsonpath='{.spec.secretEncryption.aescbc}' -{"activeKey":{"name":"CLUSTER_NAME-etcd-encryption-key"}} - -# Save this secret, or the key it contains so the etcd data can later be decrypted -oc get secret ${CLUSTER_NAME}-etcd-encryption-key -o=jsonpath='{.data.key}' ----- - -.Next steps - -Restore the etcd snapshot. diff --git a/modules/backup-etcd.adoc b/modules/backup-etcd.adoc deleted file mode 100644 index ab3985bf46ea..000000000000 --- a/modules/backup-etcd.adoc +++ /dev/null @@ -1,88 +0,0 @@ -// Module included in the following assemblies: -// -// * disaster_recovery/backing-up-etcd.adoc -// * post_installation_configuration/cluster-tasks.adoc - -:_content-type: PROCEDURE -[id="backing-up-etcd-data_{context}"] -= Backing up etcd data - -Follow these steps to back up etcd data by creating an etcd snapshot and backing up the resources for the static pods. This backup can be saved and used at a later time if you need to restore etcd. - -[IMPORTANT] -==== -Only save a backup from a single control plane host. Do not take a backup from each control plane host in the cluster. -==== - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. -* You have checked whether the cluster-wide proxy is enabled. -+ -[TIP] -==== -You can check whether the proxy is enabled by reviewing the output of `oc get proxy cluster -o yaml`. The proxy is enabled if the `httpProxy`, `httpsProxy`, and `noProxy` fields have values set. -==== - -.Procedure - -. Start a debug session for a control plane node: -+ -[source,terminal] ----- -$ oc debug node/ ----- - -. Change your root directory to `/host`: -+ -[source,terminal] ----- -sh-4.2# chroot /host ----- - -. If the cluster-wide proxy is enabled, be sure that you have exported the `NO_PROXY`, `HTTP_PROXY`, and `HTTPS_PROXY` environment variables. - -. Run the `cluster-backup.sh` script and pass in the location to save the backup to. -+ -[TIP] -==== -The `cluster-backup.sh` script is maintained as a component of the etcd Cluster Operator and is a wrapper around the `etcdctl snapshot save` command. -==== -+ -[source,terminal] ----- -sh-4.4# /usr/local/bin/cluster-backup.sh /home/core/assets/backup ----- -+ -.Example script output -[source,terminal] ----- -found latest kube-apiserver: /etc/kubernetes/static-pod-resources/kube-apiserver-pod-6 -found latest kube-controller-manager: /etc/kubernetes/static-pod-resources/kube-controller-manager-pod-7 -found latest kube-scheduler: /etc/kubernetes/static-pod-resources/kube-scheduler-pod-6 -found latest etcd: /etc/kubernetes/static-pod-resources/etcd-pod-3 -ede95fe6b88b87ba86a03c15e669fb4aa5bf0991c180d3c6895ce72eaade54a1 -etcdctl version: 3.4.14 -API version: 3.4 -{"level":"info","ts":1624647639.0188997,"caller":"snapshot/v3_snapshot.go:119","msg":"created temporary db file","path":"/home/core/assets/backup/snapshot_2021-06-25_190035.db.part"} -{"level":"info","ts":"2021-06-25T19:00:39.030Z","caller":"clientv3/maintenance.go:200","msg":"opened snapshot stream; downloading"} -{"level":"info","ts":1624647639.0301006,"caller":"snapshot/v3_snapshot.go:127","msg":"fetching snapshot","endpoint":"https://10.0.0.5:2379"} -{"level":"info","ts":"2021-06-25T19:00:40.215Z","caller":"clientv3/maintenance.go:208","msg":"completed snapshot read; closing"} -{"level":"info","ts":1624647640.6032252,"caller":"snapshot/v3_snapshot.go:142","msg":"fetched snapshot","endpoint":"https://10.0.0.5:2379","size":"114 MB","took":1.584090459} -{"level":"info","ts":1624647640.6047094,"caller":"snapshot/v3_snapshot.go:152","msg":"saved","path":"/home/core/assets/backup/snapshot_2021-06-25_190035.db"} -Snapshot saved at /home/core/assets/backup/snapshot_2021-06-25_190035.db -{"hash":3866667823,"revision":31407,"totalKey":12828,"totalSize":114446336} -snapshot db and kube resources are successfully saved to /home/core/assets/backup ----- -+ -In this example, two files are created in the `/home/core/assets/backup/` directory on the control plane host: - -* `snapshot_.db`: This file is the etcd snapshot. The `cluster-backup.sh` script confirms its validity. -* `static_kuberesources_.tar.gz`: This file contains the resources for the static pods. If etcd encryption is enabled, it also contains the encryption keys for the etcd snapshot. -+ -[NOTE] -==== -If etcd encryption is enabled, it is recommended to store this second file separately from the etcd snapshot for security reasons. However, this file is required to restore from the etcd snapshot. - -Keep in mind that etcd encryption only encrypts values, not keys. This means that resource types, namespaces, and object names are unencrypted. -==== diff --git a/modules/baremetal-event-relay.adoc b/modules/baremetal-event-relay.adoc deleted file mode 100644 index 9cf801e6b3b3..000000000000 --- a/modules/baremetal-event-relay.adoc +++ /dev/null @@ -1,48 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator-reference.adoc -[id="baremetal-event-relay_{context}"] -= {redfish-operator} - -[discrete] -== Purpose -The OpenShift {redfish-operator} manages the life-cycle of the Bare Metal Event Relay. The Bare Metal Event Relay enables you to configure the types of cluster event that are monitored using Redfish hardware events. - -[discrete] -== Configuration objects -You can use this command to edit the configuration after installation: for example, the webhook port. -You can edit configuration objects with: - -[source,terminal] ----- -$ oc -n [namespace] edit cm hw-event-proxy-operator-manager-config ----- - -[source,terminal] ----- -apiVersion: controller-runtime.sigs.k8s.io/v1alpha1 -kind: ControllerManagerConfig -health: - healthProbeBindAddress: :8081 -metrics: - bindAddress: 127.0.0.1:8080 -webhook: - port: 9443 -leaderElection: - leaderElect: true - resourceName: 6e7a703c.redhat-cne.org ----- - -[discrete] -== Project -link:https://github.com/redhat-cne/hw-event-proxy-operator[hw-event-proxy-operator] - -[discrete] -== CRD -The proxy enables applications running on bare-metal clusters to respond quickly to Redfish hardware changes and failures such as breaches of temperature thresholds, fan failure, disk loss, power outages, and memory failure, reported using the HardwareEvent CR. - -`hardwareevents.event.redhat-cne.org`: - -* Scope: Namespaced -* CR: HardwareEvent -* Validation: Yes diff --git a/modules/baseline-router-performance.adoc b/modules/baseline-router-performance.adoc deleted file mode 100644 index 9ec6a14c9d19..000000000000 --- a/modules/baseline-router-performance.adoc +++ /dev/null @@ -1,65 +0,0 @@ -// Module included in the following assemblies: -// * scalability_and_performance/optimization/routing-optimization.adoc -// * post_installation_configuration/network-configuration.adoc - -[id="baseline-router-performance_{context}"] -= Baseline Ingress Controller (router) performance - -The {product-title} Ingress Controller, or router, is the ingress point for ingress traffic for applications and services that are configured using routes and ingresses. - -When evaluating a single HAProxy router performance in terms of HTTP requests handled per second, the performance varies depending on many factors. In particular: - -* HTTP keep-alive/close mode - -* Route type - -* TLS session resumption client support - -* Number of concurrent connections per target route - -* Number of target routes - -* Back end server page size - -* Underlying infrastructure (network/SDN solution, CPU, and so on) - -While performance in your specific environment will vary, Red Hat lab tests on a public cloud instance of size 4 vCPU/16GB RAM. A single HAProxy router handling 100 routes terminated by backends serving 1kB static pages is able to handle the following number of transactions per second. - -In HTTP keep-alive mode scenarios: - -[cols="3",options="header"] -|=== -|*Encryption* |*LoadBalancerService*|*HostNetwork* -|none |21515|29622 -|edge |16743|22913 -|passthrough |36786|53295 -|re-encrypt |21583|25198 -|=== - -In HTTP close (no keep-alive) scenarios: - -[cols="3",options="header"] -|=== -|*Encryption* |*LoadBalancerService*|*HostNetwork* -|none |5719|8273 -|edge |2729|4069 -|passthrough |4121|5344 -|re-encrypt |2320|2941 -|=== - -The default Ingress Controller configuration was used with the `spec.tuningOptions.threadCount` field set to `4`. Two different endpoint publishing strategies were tested: Load Balancer Service and Host Network. TLS session resumption was used for encrypted routes. With HTTP keep-alive, a single HAProxy router is capable of saturating a 1 Gbit NIC at page sizes as small as 8 kB. - -When running on bare metal with modern processors, you can expect roughly twice the performance of the public cloud instance above. This overhead is introduced by the virtualization layer in place on public clouds and holds mostly true for private cloud-based virtualization as well. The following table is a guide to how many applications to use behind the router: - -[cols="2,4",options="header"] -|=== -|*Number of applications* |*Application type* -|5-10 |static file/web server or caching proxy -|100-1000 |applications generating dynamic content - -|=== - -In general, HAProxy can support routes for up to 1000 applications, depending on the technology in use. Ingress Controller performance might be limited by the -capabilities and performance of the applications behind it, such as language or static versus dynamic content. - -Ingress, or router, sharding should be used to serve more routes towards applications and help horizontally scale the routing tier. diff --git a/modules/binding-infra-node-workloads-using-taints-tolerations.adoc b/modules/binding-infra-node-workloads-using-taints-tolerations.adoc deleted file mode 100644 index 618adb487fac..000000000000 --- a/modules/binding-infra-node-workloads-using-taints-tolerations.adoc +++ /dev/null @@ -1,109 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/creating-infrastructure-machinesets.adoc -// * post_installation_configuration/cluster-tasks.adoc - -:_content-type: PROCEDURE -[id="binding-infra-node-workloads-using-taints-tolerations_{context}"] -= Binding infrastructure node workloads using taints and tolerations - -If you have an infra node that has the `infra` and `worker` roles assigned, you must configure the node so that user workloads are not assigned to it. - -[IMPORTANT] -==== -It is recommended that you preserve the dual `infra,worker` label that is created for infra nodes and use taints and tolerations to manage nodes that user workloads are scheduled on. If you remove the `worker` label from the node, you must create a custom pool to manage it. A node with a label other than `master` or `worker` is not recognized by the MCO without a custom pool. Maintaining the `worker` label allows the node to be managed by the default worker machine config pool, if no custom pools that select the custom label exists. The `infra` label communicates to the cluster that it does not count toward the total number of subscriptions. -==== - -.Prerequisites - -* Configure additional `MachineSet` objects in your {product-title} cluster. - -.Procedure - -. Add a taint to the infra node to prevent scheduling user workloads on it: - -.. Determine if the node has the taint: -+ -[source,terminal] ----- -$ oc describe nodes ----- -+ -.Sample output -[source,text] ----- -oc describe node ci-ln-iyhx092-f76d1-nvdfm-worker-b-wln2l -Name: ci-ln-iyhx092-f76d1-nvdfm-worker-b-wln2l -Roles: worker - ... -Taints: node-role.kubernetes.io/infra:NoSchedule - ... ----- -+ -This example shows that the node has a taint. You can proceed with adding a toleration to your pod in the next step. - -.. If you have not configured a taint to prevent scheduling user workloads on it: -+ -[source,terminal] ----- -$ oc adm taint nodes =: ----- -+ -For example: -+ -[source,terminal] ----- -$ oc adm taint nodes node1 node-role.kubernetes.io/infra=reserved:NoExecute ----- -+ -[TIP] -==== -You can alternatively apply the following YAML to add the taint: - -[source,yaml] ----- -kind: Node -apiVersion: v1 -metadata: - name: - labels: - ... -spec: - taints: - - key: node-role.kubernetes.io/infra - effect: NoExecute - value: reserved - ... ----- -==== -+ -This example places a taint on `node1` that has key `node-role.kubernetes.io/infra` and taint effect `NoSchedule`. Nodes with the `NoSchedule` effect schedule only pods that tolerate the taint, but allow existing pods to remain scheduled on the node. -+ -[NOTE] -==== -If a descheduler is used, pods violating node taints could be evicted from the cluster. -==== - -. Add tolerations for the pod configurations you want to schedule on the infra node, like router, registry, and monitoring workloads. Add the following code to the `Pod` object specification: -+ -[source,yaml] ----- -tolerations: - - effect: NoExecute <1> - key: node-role.kubernetes.io/infra <2> - operator: Exists <3> - value: reserved <4> ----- -<1> Specify the effect that you added to the node. -<2> Specify the key that you added to the node. -<3> Specify the `Exists` Operator to require a taint with the key `node-role.kubernetes.io/infra` to be present on the node. -<4> Specify the value of the key-value pair taint that you added to the node. -+ -This toleration matches the taint created by the `oc adm taint` command. A pod with this toleration can be scheduled onto the infra node. -+ -[NOTE] -==== -Moving pods for an Operator installed via OLM to an infra node is not always possible. The capability to move Operator pods depends on the configuration of each Operator. -==== - -. Schedule the pod to the infra node using a scheduler. See the documentation for _Controlling pod placement onto nodes_ for details. diff --git a/modules/bmo-about-the-bare-metal-operator.adoc b/modules/bmo-about-the-bare-metal-operator.adoc deleted file mode 100644 index c0d7a33fd257..000000000000 --- a/modules/bmo-about-the-bare-metal-operator.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// This is included in the following assemblies: -// -// post_installation_configuration/bare-metal-configuration.adoc - -:_module-type: CONCEPT -[id="bmo-about-the-bare-metal-operator_{context}"] -= About the Bare Metal Operator - -Use the Bare Metal Operator (BMO) to provision, manage, and inspect bare-metal hosts in your cluster. - -The BMO uses three resources to complete these tasks: - -* `BareMetalHost` -* `HostFirmwareSettings` -* `FirmwareSchema` - -The BMO maintains an inventory of the physical hosts in the cluster by mapping each bare-metal host to an instance of the `BareMetalHost` custom resource definition. Each `BareMetalHost` resource features hardware, software, and firmware details. The BMO continually inspects the bare-metal hosts in the cluster to ensure each `BareMetalHost` resource accurately details the components of the corresponding host. - -The BMO also uses the `HostFirmwareSettings` resource and the `FirmwareSchema` resource to detail firmware specifications for the bare-metal host. - -The BMO interfaces with bare-metal hosts in the cluster by using the Ironic API service. The Ironic service uses the Baseboard Management Controller (BMC) on the host to interface with the machine. - -Some common tasks you can complete by using the BMO include the following: - -* Provision bare-metal hosts to the cluster with a specific image -* Format a host's disk contents before provisioning or after deprovisioning -* Turn on or off a host -* Change firmware settings -* View the host's hardware details \ No newline at end of file diff --git a/modules/bmo-about-the-baremetalhost-resource.adoc b/modules/bmo-about-the-baremetalhost-resource.adoc deleted file mode 100644 index 86168debc9b5..000000000000 --- a/modules/bmo-about-the-baremetalhost-resource.adoc +++ /dev/null @@ -1,323 +0,0 @@ -// This is included in the following assemblies: -// -// post_installation_configuration/bare-metal-configuration.adoc - -:_content-type: REFERENCE -[id="about-the-baremetalhost-resource_{context}"] -= About the BareMetalHost resource - -Metal^3^ introduces the concept of the `BareMetalHost` resource, which defines a physical host and its properties. The `BareMetalHost` resource contains two sections: - -. The `BareMetalHost` spec -. The `BareMetalHost` status - -== The BareMetalHost spec - -The `spec` section of the `BareMetalHost` resource defines the desired state of the host. - -.BareMetalHost spec -[options="header"] -|==== -|Parameters |Description - -| `automatedCleaningMode` -| An interface to enable or disable automated cleaning during provisioning and de-provisioning. When set to `disabled`, it skips automated cleaning. When set to `metadata`, automated cleaning is enabled. The default setting is `metadata`. - -a| ----- -bmc: - address: - credentialsName: - disableCertificateVerification: ----- -a| The `bmc` configuration setting contains the connection information for the baseboard management controller (BMC) on the host. The fields are: - -* `address`: The URL for communicating with the host's BMC controller. - -* `credentialsName`: A reference to a secret containing the username and password for the BMC. - -* `disableCertificateVerification`: A boolean to skip certificate validation when set to `true`. - -| `bootMACAddress` -| The MAC address of the NIC used for provisioning the host. - -| `bootMode` -| The boot mode of the host. It defaults to `UEFI`, but it can also be set to `legacy` for BIOS boot, or `UEFISecureBoot`. - -| `consumerRef` -| A reference to another resource that is using the host. It could be empty if another resource is not currently using the host. For example, a `Machine` resource might use the host when the `machine-api` is using the host. - -| `description` -| A human-provided string to help identify the host. - -| `externallyProvisioned` -a| A boolean indicating whether the host provisioning and deprovisioning are managed externally. When set: - -* Power status can still be managed using the online field. -* Hardware inventory will be monitored, but no provisioning or deprovisioning operations are performed on the host. - -| `firmware` -a| Contains information about the BIOS configuration of bare metal hosts. Currently, `firmware` is only supported by iRMC, iDRAC, iLO4 and iLO5 BMCs. The sub fields are: - -** `simultaneousMultithreadingEnabled`: Allows a single physical processor core to appear as several logical processors. Valid settings are `true` or `false`. -** `sriovEnabled`: SR-IOV support enables a hypervisor to create virtual instances of a PCI-express device, potentially increasing performance. Valid settings are `true` or `false`. -** `virtualizationEnabled`: Supports the virtualization of platform hardware. Valid settings are `true` or `false`. - -a| ----- -image: - url: - checksum: - checksumType: - format: ----- -a| The `image` configuration setting holds the details for the image to be deployed on the host. Ironic requires the image fields. However, when the `externallyProvisioned` configuration setting is set to `true` and the external management doesn't require power control, the fields can be empty. The fields are: - -* `url`: The URL of an image to deploy to the host. -* `checksum`: The actual checksum or a URL to a file containing the checksum for the image at `image.url`. -* `checksumType`: You can specify checksum algorithms. Currently `image.checksumType` only supports `md5`, `sha256`, and `sha512`. The default checksum type is `md5`. -* `format`: This is the disk format of the image. It can be one of `raw`, `qcow2`, `vdi`, `vmdk`, `live-iso` or be left unset. Setting it to `raw` enables raw image streaming in the Ironic agent for that image. Setting it to `live-iso` enables iso images to live boot without deploying to disk, and it ignores the `checksum` fields. - -| `networkData` -| A reference to the secret containing the network configuration data and its namespace, so that it can be attached to the host before the host boots to set up the network. - -| `online` -| A boolean indicating whether the host should be powered on (`true`) or off (`false`). Changing this value will trigger a change in the power state of the physical host. - -a| ----- -raid: - hardwareRAIDVolumes: - softwareRAIDVolumes: ----- -a| (Optional) Contains the information about the RAID configuration for bare metal hosts. If not specified, it retains the current configuration. - -[NOTE] -==== -{product-title} {product-version} supports hardware RAID for BMCs using the iRMC protocol only. {product-title} {product-version} does not support software RAID. -==== - -See the following configuration settings: - -* `hardwareRAIDVolumes`: Contains the list of logical drives for hardware RAID, and defines the desired volume configuration in the hardware RAID. If you don't specify `rootDeviceHints`, the first volume is the root volume. The sub-fields are: - -** `level`: The RAID level for the logical drive. The following levels are supported: `0`,`1`,`2`,`5`,`6`,`1+0`,`5+0`,`6+0`. -** `name`: The name of the volume as a string. It should be unique within the server. If not specified, the volume name will be auto-generated. -** `numberOfPhysicalDisks`: The number of physical drives as an integer to use for the logical drove. Defaults to the minimum number of disk drives required for the particular RAID level. -** `physicalDisks`: The list of names of physical disk drives as a string. This is an optional field. If specified, the controller field must be specified too. -** `controller`: (Optional) The name of the RAID controller as a string to use in the hardware RAID volume. -** `rotational`: If set to `true`, it will only select rotational disk drives. If set to `false`, it will only select solid-state and NVMe drives. If not set, it selects any drive types, which is the default behavior. -** `sizeGibibytes`: The size of the logical drive as an integer to create in GiB. If unspecified or set to `0`, it will use the maximum capacity of physical drive for the logical drive. - -* `softwareRAIDVolumes`: {product-title} {product-version} does not support software RAID. The following information is for reference only. This configuration contains the list of logical disks for software RAID. If you don't specify `rootDeviceHints`, the first volume is the root volume. If you set `HardwareRAIDVolumes`, this item will be invalid. Software RAIDs will always be deleted. The number of created software RAID devices must be `1` or `2`. If there is only one software RAID device, it must be `RAID-1`. If there are two RAID devices, the first device must be `RAID-1`, while the RAID level for the second device can be `0`, `1`, or `1+0`. The first RAID device will be the deployment device. Therefore, enforcing `RAID-1` reduces the risk of a non-booting node in case of a device failure. The `softwareRAIDVolume` field defines the desired configuration of the volume in the software RAID. The sub-fields are: - -** `level`: The RAID level for the logical drive. The following levels are supported: `0`,`1`,`1+0`. -** `physicalDisks`: A list of device hints. The number of items should be greater than or equal to `2`. -** `sizeGibibytes`: The size of the logical disk drive as an integer to be created in GiB. If unspecified or set to `0`, it will use the maximum capacity of physical drive for logical drive. - -You can set the `hardwareRAIDVolume` as an empty slice to clear the hardware RAID configuration. For example: - ----- -spec: - raid: - hardwareRAIDVolume: [] ----- - -If you receive an error message indicating that the driver does not support RAID, set the `raid`, `hardwareRAIDVolumes` or `softwareRAIDVolumes` to nil. You might need to ensure the host has a RAID controller. - -a| ----- -rootDeviceHints: - deviceName: - hctl: - model: - vendor: - serialNumber: - minSizeGigabytes: - wwn: - wwnWithExtension: - wwnVendorExtension: - rotational: ----- -a| The `rootDeviceHints` parameter enables provisioning of the {op-system} image to a particular device. It examines the devices in the order it discovers them, and compares the discovered values with the hint values. It uses the first discovered device that matches the hint value. The configuration can combine multiple hints, but a device must match all hints to get selected. The fields are: - -* `deviceName`: A string containing a Linux device name like `/dev/vda`. The hint must match the actual value exactly. - -* `hctl`: A string containing a SCSI bus address like `0:0:0:0`. The hint must match the actual value exactly. - -* `model`: A string containing a vendor-specific device identifier. The hint can be a substring of the actual value. - -* `vendor`: A string containing the name of the vendor or manufacturer of the device. The hint can be a sub-string of the actual value. - -* `serialNumber`: A string containing the device serial number. The hint must match the actual value exactly. - -* `minSizeGigabytes`: An integer representing the minimum size of the device in gigabytes. - -* `wwn`: A string containing the unique storage identifier. The hint must match the actual value exactly. - -* `wwnWithExtension`: A string containing the unique storage identifier with the vendor extension appended. The hint must match the actual value exactly. - -* `wwnVendorExtension`: A string containing the unique vendor storage identifier. The hint must match the actual value exactly. - -* `rotational`: A boolean indicating whether the device should be a rotating disk (true) or not (false). - -|==== - -== The BareMetalHost status - -The `BareMetalHost` status represents the host's current state, and includes tested credentials, current hardware details, and other information. - - -.BareMetalHost status -[options="header"] -|==== -|Parameters |Description - -| `goodCredentials` -| A reference to the secret and its namespace holding the last set of baseboard management controller (BMC) credentials the system was able to validate as working. - -| `errorMessage` -| Details of the last error reported by the provisioning backend, if any. - -| `errorType` -a| Indicates the class of problem that has caused the host to enter an error state. The error types are: - -* `provisioned registration error`: Occurs when the controller is unable to re-register an already provisioned host. -* `registration error`: Occurs when the controller is unable to connect to the host's baseboard management controller. -* `inspection error`: Occurs when an attempt to obtain hardware details from the host fails. -* `preparation error`: Occurs when cleaning fails. -* `provisioning error`: Occurs when the controller fails to provision or deprovision the host. -* `power management error`: Occurs when the controller is unable to modify the power state of the host. -* `detach error`: Occurs when the controller is unable to detatch the host from the provisioner. - -a| ----- -hardware: - cpu - arch: - model: - clockMegahertz: - flags: - count: ----- -a| The `hardware.cpu` field details of the CPU(s) in the system. The fields include: - -* `arch`: The architecture of the CPU. -* `model`: The CPU model as a string. -* `clockMegahertz`: The speed in MHz of the CPU. -* `flags`: The list of CPU flags. For example, `'mmx','sse','sse2','vmx'` etc. -* `count`: The number of CPUs available in the system. - -a| ----- -hardware: - firmware: ----- -| Contains BIOS firmware information. For example, the hardware vendor and version. - -a| ----- -hardware: - nics: - - ip: - name: - mac: - speedGbps: - vlans: - vlanId: - pxe: ----- -a| The `hardware.nics` field contains a list of network interfaces for the host. The fields include: - -* `ip`: The IP address of the NIC, if one was assigned when the discovery agent ran. -* `name`: A string identifying the network device. For example, `nic-1`. -* `mac`: The MAC address of the NIC. -* `speedGbps`: The speed of the device in Gbps. -* `vlans`: A list holding all the VLANs available for this NIC. -* `vlanId`: The untagged VLAN ID. -* `pxe`: Whether the NIC is able to boot using PXE. - -a| ----- -hardware: - ramMebibytes: ----- -| The host's amount of memory in Mebibytes (MiB). - -a| ----- -hardware: - storage: - - name: - rotational: - sizeBytes: - serialNumber: ----- -a| The `hardware.storage` field contains a list of storage devices available to the host. The fields include: - -* `name`: A string identifying the storage device. For example, `disk 1 (boot)`. -* `rotational`: Indicates whether the disk is rotational, and returns either `true` or `false`. -* `sizeBytes`: The size of the storage device. -* `serialNumber`: The device's serial number. - -a| ----- -hardware: - systemVendor: - manufacturer: - productName: - serialNumber: ----- -| Contains information about the host's `manufacturer`, the `productName`, and the `serialNumber`. - - -| `lastUpdated` -| The timestamp of the last time the status of the host was updated. - -| `operationalStatus` -a| The status of the server. The status is one of the following: - -* `OK`: Indicates all the details for the host are known, correctly configured, working, and manageable. -* `discovered`: Implies some of the host's details are either not working correctly or missing. For example, the BMC address is known but the login credentials are not. -* `error`: Indicates the system found some sort of irrecoverable error. Refer to the `errorMessage` field in the status section for more details. -* `delayed`: Indicates that provisioning is delayed to limit simultaneous provisioning of multiple hosts. -* `detached`: Indicates the host is marked `unmanaged`. - -| `poweredOn` -| Boolean indicating whether the host is powered on. - -a| ----- -provisioning: - state: - id: - image: - raid: - firmware: - rootDeviceHints: ----- -a| The `provisioning` field contains values related to deploying an image to the host. The sub-fields include: - -* `state`: The current state of any ongoing provisioning operation. The states include: -** ``: There is no provisioning happening at the moment. -** `unmanaged`: There is insufficient information available to register the host. -** `registering`: The agent is checking the host's BMC details. -** `match profile`: The agent is comparing the discovered hardware details on the host against known profiles. -** `available`: The host is available for provisioning. This state was previously known as `ready`. -** `preparing`: The existing configuration will be removed, and the new configuration will be set on the host. -** `provisioning`: The provisioner is writing an image to the host's storage. -** `provisioned`: The provisioner wrote an image to the host's storage. -** `externally provisioned`: Metal^3^ does not manage the image on the host. -** `deprovisioning`: The provisioner is wiping the image from the host's storage. -** `inspecting`: The agent is collecting hardware details for the host. -** `deleting`: The agent is deleting the from the cluster. -* `id`: The unique identifier for the service in the underlying provisioning tool. -* `image`: The image most recently provisioned to the host. -* `raid`: The list of hardware or software RAID volumes recently set. -* `firmware`: The BIOS configuration for the bare metal server. -* `rootDeviceHints`: The root device selection instructions used for the most recent provisioning operation. - -| `triedCredentials` -| A reference to the secret and its namespace holding the last set of BMC credentials that were sent to the provisioning backend. - -|==== diff --git a/modules/bmo-about-the-firmwareschema-resource.adoc b/modules/bmo-about-the-firmwareschema-resource.adoc deleted file mode 100644 index 96228e915345..000000000000 --- a/modules/bmo-about-the-firmwareschema-resource.adoc +++ /dev/null @@ -1,44 +0,0 @@ -// This is included in the following assemblies: -// -// post_installation_configuration/bare-metal-configuration.adoc - -:_content-type: REFERENCE -[id="about-the-firmwareschema-resource_{context}"] -= About the FirmwareSchema resource - -BIOS settings vary among hardware vendors and host models. A `FirmwareSchema` resource is a read-only resource that contains the types and limits for each BIOS setting on each host model. The data comes directly from the BMC through Ironic. The `FirmwareSchema` enables you to identify valid values you can specify in the `spec` field of the `HostFirmwareSettings` resource. The `FirmwareSchema` resource has a unique identifier derived from its settings and limits. Identical host models use the same `FirmwareSchema` identifier. It is likely that multiple instances of `HostFirmwareSettings` use the same `FirmwareSchema`. - -.FirmwareSchema specification -[options="header"] -|==== -|Parameters|Description - -a| ----- - - attribute_type: - allowable_values: - lower_bound: - upper_bound: - min_length: - max_length: - read_only: - unique: ----- - -a| The `spec` is a simple map consisting of the BIOS setting name and the limits of the setting. The fields include: - -* `attribute_type`: The type of setting. The supported types are: -** `Enumeration` -** `Integer` -** `String` -** `Boolean` -* `allowable_values`: A list of allowable values when the `attribute_type` is `Enumeration`. -* `lower_bound`: The lowest allowed value when `attribute_type` is `Integer`. -* `upper_bound`: The highest allowed value when `attribute_type` is `Integer`. -* `min_length`: The shortest string length that the value can have when `attribute_type` is `String`. -* `max_length`: The longest string length that the value can have when `attribute_type` is `String`. -* `read_only`: The setting is read only and cannot be modified. -* `unique`: The setting is specific to this host. - -|==== diff --git a/modules/bmo-about-the-hostfirmwaresettings-resource.adoc b/modules/bmo-about-the-hostfirmwaresettings-resource.adoc deleted file mode 100644 index e10a31dacab7..000000000000 --- a/modules/bmo-about-the-hostfirmwaresettings-resource.adoc +++ /dev/null @@ -1,83 +0,0 @@ -// This is included in the following assemblies: -// -// post_installation_configuration/bare-metal-configuration.adoc - -:_content-type: REFERENCE -[id="about-the-hostfirmwaresettings-resource_{context}"] -= About the HostFirmwareSettings resource - -You can use the `HostFirmwareSettings` resource to retrieve and manage the BIOS settings for a host. When a host moves to the `Available` state, Ironic reads the host's BIOS settings and creates the `HostFirmwareSettings` resource. The resource contains the complete BIOS configuration returned from the baseboard management controller (BMC). Whereas, the `firmware` field in the `BareMetalHost` resource returns three vendor-independent fields, the `HostFirmwareSettings` resource typically comprises many BIOS settings of vendor-specific fields per host. - -The `HostFirmwareSettings` resource contains two sections: - -. The `HostFirmwareSettings` spec. -. The `HostFirmwareSettings` status. - -== The `HostFirmwareSettings` spec - -The `spec` section of the `HostFirmwareSettings` resource defines the desired state of the host's BIOS, and it is empty by default. Ironic uses the settings in the `spec.settings` section to update the baseboard management controller (BMC) when the host is in the `Preparing` state. Use the `FirmwareSchema` resource to ensure that you do not send invalid name/value pairs to hosts. See "About the FirmwareSchema resource" for additional details. - -.Example -[source,terminal] ----- -spec: - settings: - ProcTurboMode: Disabled<1> ----- -<1> In the foregoing example, the `spec.settings` section contains a name/value pair that will set the `ProcTurboMode` BIOS setting to `Disabled`. - -[NOTE] -==== -Integer parameters listed in the `status` section appear as strings. For example, `"1"`. When setting integers in the `spec.settings` section, the values should be set as integers without quotes. For example, `1`. -==== - -== The `HostFirmwareSettings` status - -The `status` represents the current state of the host's BIOS. - -.HostFirmwareSettings -[options="header"] -|==== -|Parameters|Description -a| ----- -status: - conditions: - - lastTransitionTime: - message: - observedGeneration: - reason: - status: - type: ----- -a| The `conditions` field contains a list of state changes. The sub-fields include: - -* `lastTransitionTime`: The last time the state changed. -* `message`: A description of the state change. -* `observedGeneration`: The current generation of the `status`. If `metadata.generation` and this field are not the same, the `status.conditions` might be out of date. -* `reason`: The reason for the state change. -* `status`: The status of the state change. The status can be `True`, `False` or `Unknown`. -* `type`: The type of state change. The types are `Valid` and `ChangeDetected`. - -a| ----- -status: - schema: - name: - namespace: - lastUpdated: ----- -a| The `FirmwareSchema` for the firmware settings. The fields include: - -* `name`: The name or unique identifier referencing the schema. -* `namespace`: The namespace where the schema is stored. -* `lastUpdated`: The last time the resource was updated. - -a| ----- -status: - settings: ----- -| The `settings` field contains a list of name/value pairs of a host's current BIOS settings. - -|==== diff --git a/modules/bmo-editing-the-hostfirmwaresettings-resource.adoc b/modules/bmo-editing-the-hostfirmwaresettings-resource.adoc deleted file mode 100644 index 3ff058b6689a..000000000000 --- a/modules/bmo-editing-the-hostfirmwaresettings-resource.adoc +++ /dev/null @@ -1,96 +0,0 @@ -// This is included in the following assemblies: -// -// post_installation_configuration/bare-metal-configuration.adoc - -:_content-type: PROCEDURE -[id="editing-the-hostfirmwaresettings-resource_{context}"] -= Editing the HostFirmwareSettings resource - -You can edit the `HostFirmwareSettings` of provisioned hosts. - -[IMPORTANT] -==== -You can only edit hosts when they are in the `provisioned` state, excluding read-only values. You cannot edit hosts in the `externally provisioned` state. - -==== - -.Procedure - -. Get the list of `HostFirmwareSettings` resources: -+ -[source,terminal] ----- -$ oc get hfs -n openshift-machine-api ----- - -. Edit a host's `HostFirmwareSettings` resource: -+ -[source,terminal] ----- -$ oc edit hfs -n openshift-machine-api ----- -+ -Where `` is the name of a provisioned host. The `HostFirmwareSettings` resource will open in the default editor for your terminal. - -. Add name/value pairs to the `spec.settings` section: -+ -.Example -[source,terminal] ----- -spec: - settings: - name: value <1> ----- -<1> Use the `FirmwareSchema` resource to identify the available settings for the host. You cannot set values that are read-only. - -. Save the changes and exit the editor. - -. Get the host's machine name: -+ -[source,terminal] ----- - $ oc get bmh -n openshift-machine name ----- -+ -Where `` is the name of the host. The machine name appears under the `CONSUMER` field. - -. Annotate the machine to delete it from the machineset: -+ -[source,terminal] ----- -$ oc annotate machine machine.openshift.io/delete-machine=true -n openshift-machine-api ----- -+ -Where `` is the name of the machine to delete. - -. Get a list of nodes and count the number of worker nodes: -+ -[source,terminal] ----- -$ oc get nodes ----- - -. Get the machineset: -+ -[source,terminal] ----- -$ oc get machinesets -n openshift-machine-api ----- - -. Scale the machineset: -+ -[source,terminal] ----- -$ oc scale machineset -n openshift-machine-api --replicas= ----- -+ -Where `` is the name of the machineset and `` is the decremented number of worker nodes. - -. When the host enters the `Available` state, scale up the machineset to make the `HostFirmwareSettings` resource changes take effect: -+ -[source,terminal] ----- -$ oc scale machineset -n openshift-machine-api --replicas= ----- -+ -Where `` is the name of the machineset and `` is the number of worker nodes. diff --git a/modules/bmo-getting-the-baremetalhost-resource.adoc b/modules/bmo-getting-the-baremetalhost-resource.adoc deleted file mode 100644 index 689e3137e5ff..000000000000 --- a/modules/bmo-getting-the-baremetalhost-resource.adoc +++ /dev/null @@ -1,141 +0,0 @@ -// This is included in the following assemblies: -// -// post_installation_configuration/bare-metal-configuration.adoc -:_content-type: PROCEDURE -[id="getting-the-baremetalhost-resource_{context}"] -= Getting the BareMetalHost resource - -The `BareMetalHost` resource contains the properties of a physical host. You must get the `BareMetalHost` resource for a physical host to review its properties. - -.Procedure - -. Get the list of `BareMetalHost` resources: -+ -[source,terminal] ----- -$ oc get bmh -n openshift-machine-api -o yaml ----- -+ -[NOTE] -==== -You can use `baremetalhost` as the long form of `bmh` with `oc get` command. -==== - -. Get the list of hosts: -+ -[source,terminal] ----- -$ oc get bmh -n openshift-machine-api ----- - -. Get the `BareMetalHost` resource for a specific host: -+ -[source,terminal] ----- -$ oc get bmh -n openshift-machine-api -o yaml ----- -+ -Where `` is the name of the host. -+ -.Example output -[source,yaml] ----- -apiVersion: metal3.io/v1alpha1 -kind: BareMetalHost -metadata: - creationTimestamp: "2022-06-16T10:48:33Z" - finalizers: - - baremetalhost.metal3.io - generation: 2 - name: openshift-worker-0 - namespace: openshift-machine-api - resourceVersion: "30099" - uid: 1513ae9b-e092-409d-be1b-ad08edeb1271 -spec: - automatedCleaningMode: metadata - bmc: - address: redfish://10.46.61.19:443/redfish/v1/Systems/1 - credentialsName: openshift-worker-0-bmc-secret - disableCertificateVerification: true - bootMACAddress: 48:df:37:c7:f7:b0 - bootMode: UEFI - consumerRef: - apiVersion: machine.openshift.io/v1beta1 - kind: Machine - name: ocp-edge-958fk-worker-0-nrfcg - namespace: openshift-machine-api - customDeploy: - method: install_coreos - hardwareProfile: unknown - online: true - rootDeviceHints: - deviceName: /dev/sda - userData: - name: worker-user-data-managed - namespace: openshift-machine-api -status: - errorCount: 0 - errorMessage: "" - goodCredentials: - credentials: - name: openshift-worker-0-bmc-secret - namespace: openshift-machine-api - credentialsVersion: "16120" - hardware: - cpu: - arch: x86_64 - clockMegahertz: 2300 - count: 64 - flags: - - 3dnowprefetch - - abm - - acpi - - adx - - aes - model: Intel(R) Xeon(R) Gold 5218 CPU @ 2.30GHz - firmware: - bios: - date: 10/26/2020 - vendor: HPE - version: U30 - hostname: openshift-worker-0 - nics: - - mac: 48:df:37:c7:f7:b3 - model: 0x8086 0x1572 - name: ens1f3 - ramMebibytes: 262144 - storage: - - hctl: "0:0:0:0" - model: VK000960GWTTB - name: /dev/sda - sizeBytes: 960197124096 - type: SSD - vendor: ATA - systemVendor: - manufacturer: HPE - productName: ProLiant DL380 Gen10 (868703-B21) - serialNumber: CZ200606M3 - hardwareProfile: unknown - lastUpdated: "2022-06-16T11:41:42Z" - operationalStatus: OK - poweredOn: true - provisioning: - ID: 217baa14-cfcf-4196-b764-744e184a3413 - bootMode: UEFI - customDeploy: - method: install_coreos - image: - url: "" - raid: - hardwareRAIDVolumes: null - softwareRAIDVolumes: [] - rootDeviceHints: - deviceName: /dev/sda - state: provisioned - triedCredentials: - credentials: - name: openshift-worker-0-bmc-secret - namespace: openshift-machine-api - credentialsVersion: "16120" - ----- diff --git a/modules/bmo-getting-the-firmwareschema-resource.adoc b/modules/bmo-getting-the-firmwareschema-resource.adoc deleted file mode 100644 index 4c43701107d6..000000000000 --- a/modules/bmo-getting-the-firmwareschema-resource.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// This is included in the following assemblies: -// -// post_installation_configuration/bare-metal-configuration.adoc - -:_content-type: PROCEDURE -[id="getting-the-firmwareschema-resource_{context}"] -= Getting the FirmwareSchema resource - -Each host model from each vendor has different BIOS settings. When editing the `HostFirmwareSettings` resource's `spec` section, the name/value pairs you set must conform to that host's firmware schema. To ensure you are setting valid name/value pairs, get the `FirmwareSchema` for the host and review it. - -.Procedure - -. To get a list of `FirmwareSchema` resource instances, execute the following: -+ -[source,terminal] ----- -$ oc get firmwareschema -n openshift-machine-api ----- - -. To get a particular `FirmwareSchema` instance, execute: -+ -[source,terminal] ----- -$ oc get firmwareschema -n openshift-machine-api -o yaml ----- -+ -Where `` is the name of the schema instance stated in the `HostFirmwareSettings` resource (see Table 3). diff --git a/modules/bmo-getting-the-hostfirmwaresettings-resource.adoc b/modules/bmo-getting-the-hostfirmwaresettings-resource.adoc deleted file mode 100644 index 6a6c3e2b56b2..000000000000 --- a/modules/bmo-getting-the-hostfirmwaresettings-resource.adoc +++ /dev/null @@ -1,38 +0,0 @@ -// This is included in the following assemblies: -// -// post_installation_configuration/bare-metal-configuration.adoc - -[id="getting-the-hostfirmwaresettings-resource_{context}"] -= Getting the HostFirmwareSettings resource - -The `HostFirmwareSettings` resource contains the vendor-specific BIOS properties of a physical host. You must get the `HostFirmwareSettings` resource for a physical host to review its BIOS properties. - -.Procedure - -. Get the detailed list of `HostFirmwareSettings` resources: -+ -[source,terminal] ----- -$ oc get hfs -n openshift-machine-api -o yaml ----- -+ -[NOTE] -==== -You can use `hostfirmwaresettings` as the long form of `hfs` with the `oc get` command. -==== - -. Get the list of `HostFirmwareSettings` resources: -+ -[source,terminal] ----- -$ oc get hfs -n openshift-machine-api ----- - -. Get the `HostFirmwareSettings` resource for a particular host -+ -[source,terminal] ----- -$ oc get hfs -n openshift-machine-api -o yaml ----- -+ -Where `` is the name of the host. diff --git a/modules/bmo-verifying-the-hostfirmware-settings-resource-is-valid.adoc b/modules/bmo-verifying-the-hostfirmware-settings-resource-is-valid.adoc deleted file mode 100644 index a358314edcca..000000000000 --- a/modules/bmo-verifying-the-hostfirmware-settings-resource-is-valid.adoc +++ /dev/null @@ -1,41 +0,0 @@ -// This is included in the following assemblies: -// -// post_installation_configuration/bare-metal-configuration.adoc - -:_content-type: PROCEDURE -[id="verifying-the-hostfirmware-settings-resource-is-valid_{context}"] -= Verifying the HostFirmware Settings resource is valid - -When the user edits the `spec.settings` section to make a change to the `HostFirmwareSetting`(HFS) resource, the Bare Metal Operator (BMO) validates the change against the `FimwareSchema` resource, which is a read-only resource. If the setting is invalid, the BMO will set the `Type` value of the `status.Condition` setting to `False` and also generate an event and store it in the HFS resource. Use the following procedure to verify that the resource is valid. - -.Procedure - -. Get a list of `HostFirmwareSetting` resources: -+ -[source,terminal] ----- -$ oc get hfs -n openshift-machine-api ----- - -. Verify that the `HostFirmwareSettings` resource for a particular host is valid: -+ -[source,terminal] ----- -$ oc describe hfs -n openshift-machine-api ----- -+ -Where `` is the name of the host. -+ -.Example output -[source,terminal] ----- -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal ValidationFailed 2m49s metal3-hostfirmwaresettings-controller Invalid BIOS setting: Setting ProcTurboMode is invalid, unknown enumeration value - Foo ----- -+ -[IMPORTANT] -==== -If the response returns `ValidationFailed`, there is an error in the resource configuration and you must update the values to conform to the `FirmwareSchema` resource. -==== diff --git a/modules/bootstrap-aws-load-balancer-operator.adoc b/modules/bootstrap-aws-load-balancer-operator.adoc deleted file mode 100644 index 328f354d15b5..000000000000 --- a/modules/bootstrap-aws-load-balancer-operator.adoc +++ /dev/null @@ -1,59 +0,0 @@ -// Module included in the following assemblies: -// * networking/installing-albo-sts-cluster.adoc - -:_content-type: PROCEDURE -[id="nw-bootstra-albo-on-sts-cluster_{context}"] -= Bootstrapping AWS Load Balancer Operator on Security Token Service cluster - -.Prerequisites - -* You must extract and prepare the `ccoctl` binary. - -.Procedure - -. Create the `aws-load-balancer-operator` namespace by running the following command: -+ -[source,terminal] ----- -$ oc create namespace aws-load-balancer-operator ----- - -. Download the `CredentialsRequest` custom resource (CR) of the AWS Load Balancer Operator, and create a directory to store it by running the following command: -+ -[source,terminal] ----- -$ curl --create-dirs -o /cr.yaml https://raw.githubusercontent.com/openshift/aws-load-balancer-operator/main/hack/operator-credentials-request.yaml ----- - -. Use the `ccoctl` tool to process `CredentialsRequest` objects of the AWS Load Balancer Operator, by running the following command: -+ -[source,terminal] ----- -$ ccoctl aws create-iam-roles \ - --name --region= \ - --credentials-requests-dir= \ - --identity-provider-arn ----- - -. Apply the secrets generated in the manifests directory of your cluster by running the following command: -+ -[source,terminal] ----- -$ ls manifests/*-credentials.yaml | xargs -I{} oc apply -f {} ----- - -. Verify that the credentials secret of the AWS Load Balancer Operator is created by running the following command: -+ -[source,terminal] ----- -$ oc -n aws-load-balancer-operator get secret aws-load-balancer-operator --template='{{index .data "credentials"}}' | base64 -d ----- -+ -.Example output -[source,terminal] ----- -[default] -sts_regional_endpoints = regional -role_arn = arn:aws:iam::999999999999:role/aws-load-balancer-operator-aws-load-balancer-operator -web_identity_token_file = /var/run/secrets/openshift/serviceaccount/token ----- diff --git a/modules/bound-sa-tokens-about.adoc b/modules/bound-sa-tokens-about.adoc deleted file mode 100644 index 03114f2b9277..000000000000 --- a/modules/bound-sa-tokens-about.adoc +++ /dev/null @@ -1,9 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/bound-service-account-tokens.adoc - -:_content-type: CONCEPT -[id="bound-sa-tokens-about_{context}"] -= About bound service account tokens - -You can use bound service account tokens to limit the scope of permissions for a given service account token. These tokens are audience and time-bound. This facilitates the authentication of a service account to an IAM role and the generation of temporary credentials mounted to a pod. You can request bound service account tokens by using volume projection and the TokenRequest API. diff --git a/modules/bound-sa-tokens-configuring-externally.adoc b/modules/bound-sa-tokens-configuring-externally.adoc deleted file mode 100644 index 5b26a8f1c045..000000000000 --- a/modules/bound-sa-tokens-configuring-externally.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/bound-service-account-tokens.adoc - -:_content-type: PROCEDURE -[id="bound-sa-tokens-configuring-externally_{context}"] -= Creating bound service account tokens outside the pod - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. -* You have created a service account. This procedure assumes that the service account is named `build-robot`. - -.Procedure - -* Create the bound service account token outside the pod by running the following command: -+ -[source,terminal] ----- -$ oc create token build-robot ----- -+ -.Example output -[source,terminal] ----- -eyJhbGciOiJSUzI1NiIsImtpZCI6IkY2M1N4MHRvc2xFNnFSQlA4eG9GYzVPdnN3NkhIV0tRWmFrUDRNcWx4S0kifQ.eyJhdWQiOlsiaHR0cHM6Ly9pc3N1ZXIyLnRlc3QuY29tIiwiaHR0cHM6Ly9pc3N1ZXIxLnRlc3QuY29tIiwiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjIl0sImV4cCI6MTY3OTU0MzgzMCwiaWF0IjoxNjc5NTQwMjMwLCJpc3MiOiJodHRwczovL2lzc3VlcjIudGVzdC5jb20iLCJrdWJlcm5ldGVzLmlvIjp7Im5hbWVzcGFjZSI6ImRlZmF1bHQiLCJzZXJ2aWNlYWNjb3VudCI6eyJuYW1lIjoidGVzdC1zYSIsInVpZCI6ImM3ZjA4MjkwLWIzOTUtNGM4NC04NjI4LTMzMTM1NTVhNWY1OSJ9fSwibmJmIjoxNjc5NTQwMjMwLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6ZGVmYXVsdDp0ZXN0LXNhIn0.WyAOPvh1BFMUl3LNhBCrQeaB5wSynbnCfojWuNNPSilT4YvFnKibxwREwmzHpV4LO1xOFZHSi6bXBOmG_o-m0XNDYL3FrGHd65mymiFyluztxa2lgHVxjw5reIV5ZLgNSol3Y8bJqQqmNg3rtQQWRML2kpJBXdDHNww0E5XOypmffYkfkadli8lN5QQD-MhsCbiAF8waCYs8bj6V6Y7uUKTcxee8sCjiRMVtXKjQtooERKm-CH_p57wxCljIBeM89VdaR51NJGued4hVV5lxvVrYZFu89lBEAq4oyQN_d6N1vBWGXQMyoihnt_fQjn-NfnlJWk-3NSZDIluDJAv7e-MTEk3geDrHVQKNEzDei2-Un64hSzb-n1g1M0Vn0885wQBQAePC9UlZm8YZlMNk1tq6wIUKQTMv3HPfi5HtBRqVc2eVs0EfMX4-x-PHhPCasJ6qLJWyj6DvyQ08dP4DW_TWZVGvKlmId0hzwpg59TTcLR0iCklSEJgAVEEd13Aa_M0-faD11L3MhUGxw0qxgOsPczdXUsolSISbefs7OKymzFSIkTAn9sDQ8PHMOsuyxsK8vzfrR-E0z7MAeguZ2kaIY7cZqbN6WFy0caWgx46hrKem9vCKALefElRYbCg3hcBmowBcRTOqaFHLNnHghhU1LaRpoFzH7OUarqX9SGQ ----- diff --git a/modules/bound-sa-tokens-configuring.adoc b/modules/bound-sa-tokens-configuring.adoc deleted file mode 100644 index 7c544dcab394..000000000000 --- a/modules/bound-sa-tokens-configuring.adoc +++ /dev/null @@ -1,139 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/bound-service-account-tokens.adoc - -:_content-type: PROCEDURE -[id="bound-sa-tokens-configuring_{context}"] -= Configuring bound service account tokens using volume projection - -You can configure pods to request bound service account tokens by using volume projection. - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. -* You have created a service account. This procedure assumes that the service account is named `build-robot`. - -.Procedure - -. Optional: Set the service account issuer. -+ -This step is typically not required if the bound tokens are used only within the cluster. -+ -[IMPORTANT] -==== -If you change the service account issuer to a custom one, the previous service account issuer is still trusted for the next 24 hours. - -You can force all holders to request a new bound token either by manually restarting all pods in the cluster or by performing a rolling node restart. Before performing either action, wait for a new revision of the Kubernetes API server pods to roll out with your service account issuer changes. -==== - -.. Edit the `cluster` `Authentication` object: -+ -[source,terminal] ----- -$ oc edit authentications cluster ----- - -.. Set the `spec.serviceAccountIssuer` field to the desired service account issuer value: -+ -[source,yaml] ----- -spec: - serviceAccountIssuer: https://test.default.svc <1> ----- -<1> This value should be a URL from which the recipient of a bound token can source the public keys necessary to verify the signature of the token. The default is [x-]`https://kubernetes.default.svc`. - -.. Save the file to apply the changes. - -.. Wait for a new revision of the Kubernetes API server pods to roll out. It can take several minutes for all nodes to update to the new revision. Run the following command: -+ -[source,terminal] ----- -$ oc get kubeapiserver -o=jsonpath='{range .items[0].status.conditions[?(@.type=="NodeInstallerProgressing")]}{.reason}{"\n"}{.message}{"\n"}' ----- -+ -Review the `NodeInstallerProgressing` status condition for the Kubernetes API server to verify that all nodes are at the latest revision. The output shows `AllNodesAtLatestRevision` upon successful update: -+ -[source,terminal] ----- -AllNodesAtLatestRevision -3 nodes are at revision 12 <1> ----- -<1> In this example, the latest revision number is `12`. -+ -If the output shows a message similar to one of the following messages, the update is still in progress. Wait a few minutes and try again. - -** `3 nodes are at revision 11; 0 nodes have achieved new revision 12` -** `2 nodes are at revision 11; 1 nodes are at revision 12` - -.. Optional: Force the holder to request a new bound token either by performing a rolling node restart or by manually restarting all pods in the cluster. - -*** Perform a rolling node restart: -+ -[WARNING] -==== -It is not recommended to perform a rolling node restart if you have custom workloads running on your cluster, because it can cause a service interruption. Instead, manually restart all pods in the cluster. -==== -+ -Restart nodes sequentially. Wait for the node to become fully available before restarting the next node. See _Rebooting a node gracefully_ for instructions on how to drain, restart, and mark a node as schedulable again. - -*** Manually restart all pods in the cluster: -+ -[WARNING] -==== -Be aware that running this command causes a service interruption, because it deletes every running pod in every namespace. These pods will automatically restart after they are deleted. -==== -+ -Run the following command: -+ -[source,terminal] ----- -$ for I in $(oc get ns -o jsonpath='{range .items[*]} {.metadata.name}{"\n"} {end}'); \ - do oc delete pods --all -n $I; \ - sleep 1; \ - done ----- - -. Configure a pod to use a bound service account token by using volume projection. - -.. Create a file called `pod-projected-svc-token.yaml` with the following contents: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: nginx -spec: - containers: - - image: nginx - name: nginx - volumeMounts: - - mountPath: /var/run/secrets/tokens - name: vault-token - serviceAccountName: build-robot <1> - volumes: - - name: vault-token - projected: - sources: - - serviceAccountToken: - path: vault-token <2> - expirationSeconds: 7200 <3> - audience: vault <4> ----- -<1> A reference to an existing service account. -<2> The path relative to the mount point of the file to project the token into. -<3> Optionally set the expiration of the service account token, in seconds. The default is 3600 seconds (1 hour) and must be at least 600 seconds (10 minutes). The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours. -<4> Optionally set the intended audience of the token. The recipient of a token should verify that the recipient identity matches the audience claim of the token, and should otherwise reject the token. The audience defaults to the identifier of the API server. - -.. Create the pod: -+ -[source,terminal] ----- -$ oc create -f pod-projected-svc-token.yaml ----- -+ -The kubelet requests and stores the token on behalf of the pod, makes the token available to the pod at a configurable file path, and refreshes the token as it approaches expiration. - -. The application that uses the bound token must handle reloading the token when it rotates. -+ -The kubelet rotates the token if it is older than 80 percent of its time to live, or if the token is older than 24 hours. diff --git a/modules/build-image-docker.adoc b/modules/build-image-docker.adoc deleted file mode 100644 index 9c6a6cabd400..000000000000 --- a/modules/build-image-docker.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * web_console/dynamic-plugin/deploy-plugin-cluster.adoc - -:_content-type: PROCEDURE -[id="build-image-with-docker_{context}"] -= Build an image with Docker - -To deploy your plugin on a cluster, you need to build an image and push it to an image registry. - -.Procedure - -. Build the image with the following command: -+ -[source,terminal] ----- -$ docker build -t quay.io/my-repositroy/my-plugin:latest . ----- - -. Optional: If you want to test your image, run the following command: -+ -[source,terminal] ----- -$ docker run -it --rm -d -p 9001:80 quay.io/my-repository/my-plugin:latest ----- - -. Push the image by running the following command: -+ -[source,terminal] ----- -$ docker push quay.io/my-repository/my-plugin:latest ----- diff --git a/modules/building-memcached-operator-using-osdk.adoc b/modules/building-memcached-operator-using-osdk.adoc deleted file mode 100644 index 5406c77dec65..000000000000 --- a/modules/building-memcached-operator-using-osdk.adoc +++ /dev/null @@ -1,443 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-getting-started.adoc - -[id="building-memcached-operator-using-osdk_{context}"] -= Building a Go-based Operator using the Operator SDK - -This procedure walks through an example of building a simple Memcached Operator using tools and libraries provided by the SDK. - -.Prerequisites - -- Operator SDK CLI installed on the development workstation -- Operator Lifecycle Manager (OLM) installed on a Kubernetes-based cluster (v1.8 -or above to support the `apps/v1beta2` API group), for example {product-title} {product-version} -- Access to the cluster using an account with `cluster-admin` permissions -- OpenShift CLI (`oc`) v{product-version}+ installed - -.Procedure - -. *Create a new project.* -+ -Use the CLI to create a new `memcached-operator` project: -+ -[source,terminal] ----- -$ mkdir -p $GOPATH/src/github.com/example-inc/ ----- -+ -[source,terminal] ----- -$ cd $GOPATH/src/github.com/example-inc/ ----- -+ -[source,terminal] ----- -$ operator-sdk new memcached-operator ----- -+ -[source,terminal] ----- -$ cd memcached-operator ----- - -. *Add a new custom resource definition (CRD).* - -.. Use the CLI to add a new CRD API called `Memcached`, with `APIVersion` set to `cache.example.com/v1apha1` and `Kind` set to `Memcached`: -+ -[source,terminal] ----- -$ operator-sdk add api \ - --api-version=cache.example.com/v1alpha1 \ - --kind=Memcached ----- -+ -This scaffolds the Memcached resource API under `pkg/apis/cache/v1alpha1/`. - -.. Modify the spec and status of the `Memcached` custom resource (CR) at the `pkg/apis/cache/v1alpha1/memcached_types.go` file: -+ -[source,go] ----- -type MemcachedSpec struct { - // Size is the size of the memcached deployment - Size int32 `json:"size"` -} -type MemcachedStatus struct { - // Nodes are the names of the memcached pods - Nodes []string `json:"nodes"` -} ----- - -.. After modifying the `*_types.go` file, always run the following command to update the generated code for that resource type: -+ -[source,terminal] ----- -$ operator-sdk generate k8s ----- - -. *Optional: Add custom validation to your CRD.* -+ -OpenAPI v3.0 schemas are added to CRD manifests in the `spec.validation` block when the manifests are generated. This validation block allows Kubernetes to validate the properties in a Memcached CR when it is created or updated. -+ -Additionally, a `pkg/apis///zz_generated.openapi.go` file is generated. This file contains the Go representation of this validation block if the `+k8s:openapi-gen=true annotation` is present above the `Kind` type declaration, which is present by default. This auto-generated code is the OpenAPI model of your Go `Kind` type, from which you can create a full OpenAPI Specification and generate a client. -+ -As an Operator author, you can use Kubebuilder markers (annotations) to configure custom validations for your API. These markers must always have a `+kubebuilder:validation` prefix. For example, adding an enum-type specification can be done by adding the following marker: -+ -[source,go] ----- -// +kubebuilder:validation:Enum=Lion;Wolf;Dragon -type Alias string ----- -+ -Usage of markers in API code is discussed in the Kubebuilder link:https://book.kubebuilder.io/reference/generating-crd.html[Generating CRDs] and link:https://book.kubebuilder.io/reference/markers.html[Markers for Config/Code Generation] documentation. A full list of OpenAPIv3 validation markers is also available in the Kubebuilder link:https://book.kubebuilder.io/reference/markers/crd-validation.html[CRD Validation] documentation. -+ -If you add any custom validations, run the following command to update the OpenAPI validation section in the `deploy/crds/cache.example.com_memcacheds_crd.yaml` file for the CRD: -+ -[source,terminal] ----- -$ operator-sdk generate crds ----- -+ -.Example generated YAML -[source,yaml] ----- -spec: - validation: - openAPIV3Schema: - properties: - spec: - properties: - size: - format: int32 - type: integer ----- - -. *Add a new controller.* - -.. Add a new controller to the project to watch and reconcile the `Memcached` resource: -+ -[source,terminal] ----- -$ operator-sdk add controller \ - --api-version=cache.example.com/v1alpha1 \ - --kind=Memcached ----- -+ -This scaffolds a new controller implementation under `pkg/controller/memcached/`. - -.. For this example, replace the generated controller file `pkg/controller/memcached/memcached_controller.go` with the link:https://github.com/operator-framework/operator-sdk/blob/master/example/memcached-operator/memcached_controller.go.tmpl[example implementation]. -+ -The example controller executes the following reconciliation logic for each `Memcached` resource: -+ --- -* Create a Memcached deployment if it does not exist. -* Ensure that the Deployment size is the same as specified by the `Memcached` CR spec. -* Update the `Memcached` resource status with the names of the Memcached pods. --- -+ -The next two sub-steps inspect how the controller watches resources and how the reconcile loop is triggered. You can skip these steps to go directly to building and running the Operator. - -.. Inspect the controller implementation at the `pkg/controller/memcached/memcached_controller.go` file to see how the controller watches resources. -+ -The first watch is for the `Memcached` type as the primary resource. For each add, update, or delete event, the reconcile loop is sent a reconcile `Request` (a `:` key) for that `Memcached` object: -+ -[source,go] ----- -err := c.Watch( - &source.Kind{Type: &cachev1alpha1.Memcached{}}, &handler.EnqueueRequestForObject{}) ----- -+ -The next watch is for `Deployment` objects, but the event handler maps each event to a reconcile `Request` for the owner of the deployment. In this case, this is the `Memcached` object for which the deployment was created. This allows the controller to watch deployments as a secondary resource: -+ -[source,go] ----- -err := c.Watch(&source.Kind{Type: &appsv1.Deployment{}}, &handler.EnqueueRequestForOwner{ - IsController: true, - OwnerType: &cachev1alpha1.Memcached{}, - }) ----- - -.. Every controller has a `Reconciler` object with a `Reconcile()` method that implements the reconcile loop. The reconcile loop is passed the `Request` argument which is a `:` key used to lookup the primary resource object, `Memcached`, from the cache: -+ -[source,go] ----- -func (r *ReconcileMemcached) Reconcile(request reconcile.Request) (reconcile.Result, error) { - // Lookup the Memcached instance for this reconcile request - memcached := &cachev1alpha1.Memcached{} - err := r.client.Get(context.TODO(), request.NamespacedName, memcached) - ... -} ----- -+ -Based on the return value of the `Reconcile()` function, the reconcile `Request` might be requeued, and the loop might be triggered again: -+ -[source,go] ----- -// Reconcile successful - don't requeue -return reconcile.Result{}, nil -// Reconcile failed due to error - requeue -return reconcile.Result{}, err -// Requeue for any reason other than error -return reconcile.Result{Requeue: true}, nil ----- -[id="building-memcached-operator-using-osdk-build-and-run_{context}"] - -. *Build and run the Operator.* - -.. Before running the Operator, the CRD must be registered with the Kubernetes API server: -+ -[source,terminal] ----- -$ oc create \ - -f deploy/crds/cache_v1alpha1_memcached_crd.yaml ----- - -.. After registering the CRD, there are two options for running the Operator: -+ --- -* As a Deployment inside a Kubernetes cluster -* As Go program outside a cluster --- -+ -Choose one of the following methods. - -... _Option A:_ Running as a deployment inside the cluster. - -.... Build the `memcached-operator` image and push it to a registry: -+ -[source,terminal] ----- -$ operator-sdk build quay.io/example/memcached-operator:v0.0.1 ----- - -.... The deployment manifest is generated at `deploy/operator.yaml`. Update the deployment image as follows since the default is just a placeholder: -+ -[source,terminal] ----- -$ sed -i 's|REPLACE_IMAGE|quay.io/example/memcached-operator:v0.0.1|g' deploy/operator.yaml ----- - -.... Ensure you have an account on link:https://quay.io[Quay.io] for the next step, or substitute your preferred container registry. On the registry, link:https://quay.io/new/[create a new public image] repository named `memcached-operator`. - -.... Push the image to the registry: -+ -[source,terminal] ----- -$ podman push quay.io/example/memcached-operator:v0.0.1 ----- - -.... Set up RBAC and create the `memcached-operator` manifests: -+ -[source,terminal] ----- -$ oc create -f deploy/role.yaml ----- -+ -[source,terminal] ----- -$ oc create -f deploy/role_binding.yaml ----- -+ -[source,terminal] ----- -$ oc create -f deploy/service_account.yaml ----- -+ -[source,terminal] ----- -$ oc create -f deploy/operator.yaml ----- - -.... Verify that the `memcached-operator` deploy is up and running: -+ -[source,terminal] ----- -$ oc get deployment ----- -+ -.Example output -[source,terminal] ----- -NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE -memcached-operator 1 1 1 1 1m ----- - -... _Option B:_ Running locally outside the cluster. -+ -This method is preferred during development cycle to deploy and test faster. -+ -Run the Operator locally with the default Kubernetes configuration file present at `$HOME/.kube/config`: -+ -[source,terminal] ----- -$ operator-sdk run --local --namespace=default ----- -+ -You can use a specific `kubeconfig` using the flag `--kubeconfig=`. - -. *Verify that the Operator can deploy a Memcached application* by creating a `Memcached` CR. - -.. Create the example `Memcached` CR that was generated at `deploy/crds/cache_v1alpha1_memcached_cr.yaml`. - -.. View the file: -+ -[source,terminal] ----- -$ cat deploy/crds/cache_v1alpha1_memcached_cr.yaml ----- -+ -.Example output -[source,terminal] ----- -apiVersion: "cache.example.com/v1alpha1" -kind: "Memcached" -metadata: - name: "example-memcached" -spec: - size: 3 ----- - -.. Create the object: -+ -[source,terminal] ----- -$ oc apply -f deploy/crds/cache_v1alpha1_memcached_cr.yaml ----- - -.. Ensure that `memcached-operator` creates the deployment for the CR: -+ -[source,terminal] ----- -$ oc get deployment ----- -+ -.Example output -[source,terminal] ----- -NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE -memcached-operator 1 1 1 1 2m -example-memcached 3 3 3 3 1m ----- - -.. Check the pods and CR to confirm the CR status is updated with the pod names: -+ -[source,terminal] ----- -$ oc get pods ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -example-memcached-6fd7c98d8-7dqdr 1/1 Running 0 1m -example-memcached-6fd7c98d8-g5k7v 1/1 Running 0 1m -example-memcached-6fd7c98d8-m7vn7 1/1 Running 0 1m -memcached-operator-7cc7cfdf86-vvjqk 1/1 Running 0 2m ----- -+ -[source,terminal] ----- -$ oc get memcached/example-memcached -o yaml ----- -+ -.Example output -[source,terminal] ----- -apiVersion: cache.example.com/v1alpha1 -kind: Memcached -metadata: - clusterName: "" - creationTimestamp: 2018-03-31T22:51:08Z - generation: 0 - name: example-memcached - namespace: default - resourceVersion: "245453" - selfLink: /apis/cache.example.com/v1alpha1/namespaces/default/memcacheds/example-memcached - uid: 0026cc97-3536-11e8-bd83-0800274106a1 -spec: - size: 3 -status: - nodes: - - example-memcached-6fd7c98d8-7dqdr - - example-memcached-6fd7c98d8-g5k7v - - example-memcached-6fd7c98d8-m7vn7 ----- - -. *Verify that the Operator can manage a deployed Memcached application* by updating the size of the deployment. - -.. Change the `spec.size` field in the `memcached` CR from `3` to `4`: -+ -[source,terminal] ----- -$ cat deploy/crds/cache_v1alpha1_memcached_cr.yaml ----- -+ -.Example output -[source,terminal] ----- -apiVersion: "cache.example.com/v1alpha1" -kind: "Memcached" -metadata: - name: "example-memcached" -spec: - size: 4 ----- - -.. Apply the change: -+ -[source,terminal] ----- -$ oc apply -f deploy/crds/cache_v1alpha1_memcached_cr.yaml ----- - -.. Confirm that the Operator changes the deployment size: -+ -[source,terminal] ----- -$ oc get deployment ----- -+ -.Example output -[source,terminal] ----- -NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE -example-memcached 4 4 4 4 5m ----- - -. *Clean up the resources:* -+ -[source,terminal] ----- -$ oc delete -f deploy/crds/cache_v1alpha1_memcached_cr.yaml ----- -+ -[source,terminal] ----- -$ oc delete -f deploy/crds/cache_v1alpha1_memcached_crd.yaml ----- -+ -[source,terminal] ----- -$ oc delete -f deploy/operator.yaml ----- -+ -[source,terminal] ----- -$ oc delete -f deploy/role.yaml ----- -+ -[source,terminal] ----- -$ oc delete -f deploy/role_binding.yaml ----- -+ -[source,terminal] ----- -$ oc delete -f deploy/service_account.yaml ----- - -[role="_additional-resources"] -.Additional resources - -* For more information about OpenAPI v3.0 validation schemas in CRDs, refer to the link:https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/#specifying-a-structural-schema[Kubernetes documentation]. diff --git a/modules/builds-about.adoc b/modules/builds-about.adoc deleted file mode 100644 index 71dbee2838f5..000000000000 --- a/modules/builds-about.adoc +++ /dev/null @@ -1,31 +0,0 @@ -// Module included in the following assemblies: -// -//*builds/understanding-image-builds - - -[id="builds-about_{context}"] -= Builds - -A build is the process of transforming input parameters into a resulting object. Most often, the process is used to transform input parameters or source code into a runnable image. A `BuildConfig` object is the definition of the entire build process. - -{product-title} uses Kubernetes by creating containers from build images and pushing them to a container image registry. - -Build objects share common characteristics including inputs for a build, the requirement to complete a build process, logging the build process, publishing resources from successful builds, and publishing the final status of the build. Builds take advantage of resource restrictions, specifying limitations on resources such as CPU usage, memory usage, and build or pod execution time. - -ifdef::openshift-origin,openshift-enterprise[] -The {product-title} build system provides extensible support for build strategies that are based on selectable types specified in the build API. There are three primary build strategies available: - -* Docker build -* Source-to-image (S2I) build -* Custom build - -By default, docker builds and S2I builds are supported. -endif::[] - -The resulting object of a build depends on the builder used to create it. For docker and S2I builds, the resulting objects are runnable images. For custom builds, the resulting objects are whatever the builder image author has specified. - -Additionally, the pipeline build strategy can be used to implement sophisticated -workflows: - -* Continuous integration -* Continuous deployment diff --git a/modules/builds-adding-input-secrets-configmaps.adoc b/modules/builds-adding-input-secrets-configmaps.adoc deleted file mode 100644 index 1b52c52f7643..000000000000 --- a/modules/builds-adding-input-secrets-configmaps.adoc +++ /dev/null @@ -1,123 +0,0 @@ -:_content-type: PROCEDURE -[id="builds-adding-input-secrets-configmaps_{context}"] -= Adding input secrets and config maps - -To provide credentials and other configuration data to a build without placing them in source control, you can define input secrets and input config maps. - -In some scenarios, build operations require credentials or other configuration data to access dependent resources. To make that information available without placing it in source control, you can define input secrets and input config maps. - -.Procedure - -To add an input secret, config maps, or both to an existing `BuildConfig` object: - -. Create the `ConfigMap` object, if it does not exist: -+ -[source,terminal] ----- -$ oc create configmap settings-mvn \ - --from-file=settings.xml= ----- -+ -This creates a new config map named `settings-mvn`, which contains the plain text content of the `settings.xml` file. -+ -[TIP] -==== -You can alternatively apply the following YAML to create the config map: -[source,yaml] ----- -apiVersion: core/v1 -kind: ConfigMap -metadata: - name: settings-mvn -data: - settings.xml: | - - … # Insert maven settings here - ----- -==== - - -. Create the `Secret` object, if it does not exist: -+ -[source,terminal] ----- -$ oc create secret generic secret-mvn \ - --from-file=ssh-privatekey= - --type=kubernetes.io/ssh-auth ----- -+ -This creates a new secret named `secret-mvn`, which contains the base64 encoded content of the `id_rsa` private key. -+ -[TIP] -==== -You can alternatively apply the following YAML to create the input secret: -[source,yaml] ----- -apiVersion: core/v1 -kind: Secret -metadata: - name: secret-mvn -type: kubernetes.io/ssh-auth -data: - ssh-privatekey: | - # Insert ssh private key, base64 encoded ----- -==== - -. Add the config map and secret to the `source` section in the existing -`BuildConfig` object: -+ -[source,yaml] ----- -source: - git: - uri: https://github.com/wildfly/quickstart.git - contextDir: helloworld - configMaps: - - configMap: - name: settings-mvn - secrets: - - secret: - name: secret-mvn ----- - -To include the secret and config map in a new `BuildConfig` object, run the following command: - -[source,terminal] ----- -$ oc new-build \ - openshift/wildfly-101-centos7~https://github.com/wildfly/quickstart.git \ - --context-dir helloworld --build-secret “secret-mvn” \ - --build-config-map "settings-mvn" ----- - -During the build, the `settings.xml` and `id_rsa` files are copied into the directory where the source code is located. In {product-title} S2I builder images, this is the image working directory, which is set using the `WORKDIR` instruction in the `Dockerfile`. If you want to specify another directory, add a `destinationDir` to the definition: - -[source,yaml] ----- -source: - git: - uri: https://github.com/wildfly/quickstart.git - contextDir: helloworld - configMaps: - - configMap: - name: settings-mvn - destinationDir: ".m2" - secrets: - - secret: - name: secret-mvn - destinationDir: ".ssh" ----- - -You can also specify the destination directory when creating a new `BuildConfig` object: - -[source,terminal] ----- -$ oc new-build \ - openshift/wildfly-101-centos7~https://github.com/wildfly/quickstart.git \ - --context-dir helloworld --build-secret “secret-mvn:.ssh” \ - --build-config-map "settings-mvn:.m2" ----- - -In both cases, the `settings.xml` file is added to the `./.m2` directory of the build environment, and the `id_rsa` key is added to the `./.ssh` directory. diff --git a/modules/builds-adding-source-clone-secrets.adoc b/modules/builds-adding-source-clone-secrets.adoc deleted file mode 100644 index 8102276ddf00..000000000000 --- a/modules/builds-adding-source-clone-secrets.adoc +++ /dev/null @@ -1,20 +0,0 @@ -// Module included in the following assemblies: -// -//* builds/creating-build-inputs.adoc - -[id="builds-adding-source-clone-secrets_{context}"] -= Source Clone Secrets - -Builder pods require access to any Git repositories defined as source for a build. Source clone secrets are used to provide the builder pod with access it would not normally have access to, such as private repositories or repositories with self-signed or untrusted SSL certificates. - -The following source clone secret configurations are supported: - -* .gitconfig File -* Basic Authentication -* SSH Key Authentication -* Trusted Certificate Authorities - -[NOTE] -==== -You can also use combinations of these configurations to meet your specific needs. -==== diff --git a/modules/builds-assigning-builds-to-nodes.adoc b/modules/builds-assigning-builds-to-nodes.adoc deleted file mode 100644 index bfdc08422f83..000000000000 --- a/modules/builds-assigning-builds-to-nodes.adoc +++ /dev/null @@ -1,35 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/advanced-build-operations.adoc - -:_content-type: PROCEDURE -[id="builds-assigning-builds-to-nodes_{context}"] -= Assigning builds to specific nodes - -Builds can be targeted to run on specific nodes by specifying labels in the `nodeSelector` field of a build configuration. The `nodeSelector` value is a set of key-value pairs that are matched to `Node` labels when scheduling the build pod. - -The `nodeSelector` value can also be controlled by cluster-wide default and override values. Defaults will only be applied if the build configuration does not define any key-value pairs for the `nodeSelector` and also does not define an explicitly empty map value of `nodeSelector:{}`. Override values will replace values in the build configuration on a key by key basis. - -//See Configuring Global Build Defaults and Overrides for more information. - -[NOTE] -==== -If the specified `NodeSelector` cannot be matched to a node with those labels, the build still stay in the `Pending` state indefinitely. -==== - -.Procedure - -* Assign builds to run on specific nodes by assigning labels in the `nodeSelector` field of the `BuildConfig`, for example: -+ -[source,yaml] ----- -apiVersion: "v1" -kind: "BuildConfig" -metadata: - name: "sample-build" -spec: - nodeSelector:<1> - key1: value1 - key2: value2 ----- -<1> Builds associated with this build configuration will run only on nodes with the `key1=value2` and `key2=value2` labels. diff --git a/modules/builds-automatically-add-source-clone-secrets.adoc b/modules/builds-automatically-add-source-clone-secrets.adoc deleted file mode 100644 index 9be83d18d409..000000000000 --- a/modules/builds-automatically-add-source-clone-secrets.adoc +++ /dev/null @@ -1,71 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/creating-build-inputs.adoc - -:_content-type: PROCEDURE -[id="builds-automatically-add-source-clone-secrets_{context}"] -= Automatically adding a source clone secret to a build configuration - -When a `BuildConfig` is created, {product-title} can automatically populate its source clone secret reference. This behavior allows the resulting builds to automatically use the credentials stored in the referenced secret to authenticate to a remote Git repository, without requiring further configuration. - -To use this functionality, a secret containing the Git repository credentials must exist in the namespace in which the `BuildConfig` is later created. This secrets must include one or more annotations prefixed with `build.openshift.io/source-secret-match-uri-`. The value of each of these annotations is a Uniform Resource Identifier (URI) pattern, which is defined as follows. When a `BuildConfig` is created without a source clone secret reference and its Git source URI matches a URI pattern in a secret annotation, {product-title} automatically inserts a reference to that secret in the `BuildConfig`. - -.Prerequisites - -A URI pattern must consist of: - -* A valid scheme: `*://`, `git://`, `http://`, `https://` or `ssh://` -* A host: \*` or a valid hostname or IP address optionally preceded by `*.` -* A path: `/\*` or `/` followed by any characters optionally including `*` characters - -In all of the above, a `*` character is interpreted as a wildcard. - -[IMPORTANT] -==== -URI patterns must match Git source URIs which are conformant to link:https://www.ietf.org/rfc/rfc3986.txt[RFC3986]. Do not include a username (or password) component in a URI pattern. - -For example, if you use `ssh://git@bitbucket.atlassian.com:7999/ATLASSIAN jira.git` for a git repository URL, the source secret must be specified as `pass:c[ssh://bitbucket.atlassian.com:7999/*]` (and not `pass:c[ssh://git@bitbucket.atlassian.com:7999/*]`). - -[source,terminal] ----- -$ oc annotate secret mysecret \ - 'build.openshift.io/source-secret-match-uri-1=ssh://bitbucket.atlassian.com:7999/*' ----- - -==== - -.Procedure - -If multiple secrets match the Git URI of a particular `BuildConfig`, {product-title} selects the secret with the longest match. This allows for basic overriding, as in the following example. - -The following fragment shows two partial source clone secrets, the first matching any server in the domain `mycorp.com` accessed by HTTPS, and the second overriding access to servers `mydev1.mycorp.com` and `mydev2.mycorp.com`: - -[source,yaml] ----- -kind: Secret -apiVersion: v1 -metadata: - name: matches-all-corporate-servers-https-only - annotations: - build.openshift.io/source-secret-match-uri-1: https://*.mycorp.com/* -data: - ... ---- -kind: Secret -apiVersion: v1 -metadata: - name: override-for-my-dev-servers-https-only - annotations: - build.openshift.io/source-secret-match-uri-1: https://mydev1.mycorp.com/* - build.openshift.io/source-secret-match-uri-2: https://mydev2.mycorp.com/* -data: - ... ----- - -* Add a `build.openshift.io/source-secret-match-uri-` annotation to a pre-existing secret using: -+ -[source,terminal] ----- -$ oc annotate secret mysecret \ - 'build.openshift.io/source-secret-match-uri-1=https://*.mycorp.com/*' ----- diff --git a/modules/builds-basic-access-build-logs.adoc b/modules/builds-basic-access-build-logs.adoc deleted file mode 100644 index 2d5c743cb2b6..000000000000 --- a/modules/builds-basic-access-build-logs.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Module included in the following assemblies: -// * builds/basic-build-operations.adoc - -:_content-type: PROCEDURE -[id="builds-basic-access-build-logs_{context}"] -= Accessing build logs - -You can access build logs using the web console or the CLI. - -.Procedure - -* To stream the logs using the build directly, enter the following command: -+ -[source,terminal] ----- -$ oc describe build ----- diff --git a/modules/builds-basic-access-build-verbosity.adoc b/modules/builds-basic-access-build-verbosity.adoc deleted file mode 100644 index c969ea8da1c0..000000000000 --- a/modules/builds-basic-access-build-verbosity.adoc +++ /dev/null @@ -1,46 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/basic-build-operations.adoc - -:_content-type: PROCEDURE -[id="builds-basic-access-build-verbosity_{context}"] -= Enabling log verbosity - -You can enable a more verbose output by passing the `BUILD_LOGLEVEL` environment variable as part of the `sourceStrategy` -ifdef::openshift-origin,openshift-enterprise[] -or `dockerStrategy` -endif::[] -in a `BuildConfig`. - -[NOTE] -==== -An administrator can set the default build verbosity for the entire {product-title} instance by configuring `env/BUILD_LOGLEVEL`. This default can be overridden by specifying `BUILD_LOGLEVEL` in a given `BuildConfig`. You can specify a higher priority override on the command line for non-binary builds by passing `--build-loglevel` to `oc start-build`. -==== - -Available log levels for source builds are as follows: - -[horizontal] -Level 0:: Produces output from containers running the `assemble` script and all encountered errors. This is the default. -Level 1:: Produces basic information about the executed process. -Level 2:: Produces very detailed information about the executed process. -Level 3:: Produces very detailed information about the executed process, and a listing of the archive contents. -Level 4:: Currently produces the same information as level 3. -Level 5:: Produces everything mentioned on previous levels and additionally provides docker push messages. - -.Procedure - -* To enable more verbose output, pass the `BUILD_LOGLEVEL` environment variable as part of the `sourceStrategy` -ifndef::openshift-online[] -or `dockerStrategy` -endif::[] -in a `BuildConfig`: -+ -[source,yaml] ----- -sourceStrategy: -... - env: - - name: "BUILD_LOGLEVEL" - value: "2" <1> ----- -<1> Adjust this value to the desired log level. diff --git a/modules/builds-basic-access-buildconfig-logs.adoc b/modules/builds-basic-access-buildconfig-logs.adoc deleted file mode 100644 index c5eaa2386c74..000000000000 --- a/modules/builds-basic-access-buildconfig-logs.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Module included in the following assemblies: -// * builds/basic-build-operations.adoc - -:_content-type: PROCEDURE -[id="builds-basic-access-buildconfig-logs_{context}"] -= Accessing BuildConfig logs - -You can access `BuildConfig` logs using the web console or the CLI. - -.Procedure - -* To stream the logs of the latest build for a `BuildConfig`, enter the following command: -+ -[source,terminal] ----- -$ oc logs -f bc/ ----- diff --git a/modules/builds-basic-access-buildconfig-version-logs.adoc b/modules/builds-basic-access-buildconfig-version-logs.adoc deleted file mode 100644 index 4dfe03f3ce88..000000000000 --- a/modules/builds-basic-access-buildconfig-version-logs.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Module included in the following assemblies: -// * builds/basic-build-operations.adoc - -:_content-type: PROCEDURE -[id="builds-basic-access-buildconfig-version-logs_{context}"] -= Accessing BuildConfig logs for a given version build - -You can access logs for a given version build for a `BuildConfig` using the web console or the CLI. - -.Procedure - -* To stream the logs for a given version build for a `BuildConfig`, enter the following command: -+ -[source,terminal] ----- -$ oc logs --version= bc/ ----- diff --git a/modules/builds-basic-cancel-all-state.adoc b/modules/builds-basic-cancel-all-state.adoc deleted file mode 100644 index 9a6cac4782b2..000000000000 --- a/modules/builds-basic-cancel-all-state.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Module included in the following assemblies: -// * builds/basic-build-operations.adoc - -:_content-type: PROCEDURE -[id="builds-basic-cancel-all-state_{context}"] -= Canceling all builds in a given state - -You can cancel all builds in a given state, such as `new` or `pending`, while ignoring the builds in other states. - -.Procedure - -* To cancel all in a given state, enter the following command: -+ -[source,terminal] ----- -$ oc cancel-build bc/ ----- diff --git a/modules/builds-basic-cancel-all.adoc b/modules/builds-basic-cancel-all.adoc deleted file mode 100644 index 5fbfd37fdc1d..000000000000 --- a/modules/builds-basic-cancel-all.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Module included in the following assemblies: -// * builds/basic-build-operations.adoc - -:_content-type: PROCEDURE -[id="builds-basic-cancel-all_{context}"] -= Canceling all builds - -You can cancel all builds from the build configuration with the following CLI command. - -.Procedure - -* To cancel all builds, enter the following command: -+ -[source,terminal] ----- -$ oc cancel-build bc/ ----- diff --git a/modules/builds-basic-cancel-build.adoc b/modules/builds-basic-cancel-build.adoc deleted file mode 100644 index 3f34748c789c..000000000000 --- a/modules/builds-basic-cancel-build.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Module included in the following assemblies: -// * builds/basic-build-operations.adoc - -:_content-type: PROCEDURE -[id="builds-basic-cancel-build_{context}"] -= Canceling a build - -You can cancel a build using the web console, or with the following CLI command. - -.Procedure - -* To manually cancel a build, enter the following command: -+ -[source,terminal] ----- -$ oc cancel-build ----- diff --git a/modules/builds-basic-cancel-multiple.adoc b/modules/builds-basic-cancel-multiple.adoc deleted file mode 100644 index f116b33d4b15..000000000000 --- a/modules/builds-basic-cancel-multiple.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Module included in the following assemblies: -// * builds/basic-build-operations.adoc - -:_content-type: PROCEDURE -[id="builds-basic-cancel-multiple_{context}"] -= Canceling multiple builds - -You can cancel multiple builds with the following CLI command. - -.Procedure - -* To manually cancel multiple builds, enter the following command: -+ -[source,terminal] ----- -$ oc cancel-build ----- diff --git a/modules/builds-basic-delete-buildconfig.adoc b/modules/builds-basic-delete-buildconfig.adoc deleted file mode 100644 index c0297e357dca..000000000000 --- a/modules/builds-basic-delete-buildconfig.adoc +++ /dev/null @@ -1,26 +0,0 @@ -// Module included in the following assemblies: -// * builds/basic-build-operations.adoc - -:_content-type: PROCEDURE -[id="builds-basic-delete-buildconfig_{context}"] -= Deleting a BuildConfig - -You can delete a `BuildConfig` using the following command. - -.Procedure - -* To delete a `BuildConfig`, enter the following command: -+ -[source,terminal] ----- -$ oc delete bc ----- -+ -This also deletes all builds that were instantiated from this `BuildConfig`. - -* To delete a `BuildConfig` and keep the builds instatiated from the `BuildConfig`, specify the `--cascade=false` flag when you enter the following command: -+ -[source,terminal] ----- -$ oc delete --cascade=false bc ----- diff --git a/modules/builds-basic-edit-buildconfig.adoc b/modules/builds-basic-edit-buildconfig.adoc deleted file mode 100644 index 698f4139f838..000000000000 --- a/modules/builds-basic-edit-buildconfig.adoc +++ /dev/null @@ -1,44 +0,0 @@ -// Module included in the following assemblies: -// * builds/basic-build-operations.adoc - -:_content-type: PROCEDURE -[id="builds-basic-edit-buildconfig_{context}"] -= Editing a BuildConfig - -To edit your build configurations, you use the *Edit BuildConfig* option in the *Builds* view of the *Developer* perspective. - -You can use either of the following views to edit a `BuildConfig`: - -* The *Form view* enables you to edit your `BuildConfig` using the standard form fields and checkboxes. -* The *YAML view* enables you to edit your `BuildConfig` with full control over the operations. - -You can switch between the *Form view* and *YAML view* without losing any data. The data in the *Form view* is transferred to the *YAML view* and vice versa. - -.Procedure - -. In the *Builds* view of the *Developer* perspective, click the menu {kebab} to see the *Edit BuildConfig* option. -. Click *Edit BuildConfig* to see the *Form view* option. -. In the *Git* section, enter the Git repository URL for the codebase you want to use to create an application. The URL is then validated. -* Optional: Click *Show Advanced Git Options* to add details such as: -** *Git Reference* to specify a branch, tag, or commit that contains code you want to use to build the application. -** *Context Dir* to specify the subdirectory that contains code you want to use to build the application. -** *Source Secret* to create a *Secret Name* with credentials for pulling your source code from a private repository. -. In the *Build from* section, select the option that you would like to build from. You can use the following options: -** *Image Stream tag* references an image for a given image stream and tag. Enter the project, image stream, and tag of the location you would like to build from and push to. -** *Image Stream image* references an image for a given image stream and image name. Enter the image stream image you would like to build from. Also enter the project, image stream, and tag to push to. -** *Docker image*: The Docker image is referenced through a Docker image repository. You will also need to enter the project, image stream, and tag to refer to where you would like to push to. -. Optional: In the *Environment Variables* section, add the environment variables associated with the project by using the *Name* and *Value* fields. To add more environment variables, use *Add Value*, or *Add from ConfigMap* and *Secret* . -. Optional: To further customize your application, use the following advanced options: -Trigger:: -Triggers a new image build when the builder image changes. Add more triggers by clicking *Add Trigger* and selecting the *Type* and *Secret*. - -Secrets:: -Adds secrets for your application. Add more secrets by clicking *Add secret* and selecting the *Secret* and *Mount point*. - -Policy:: -Click *Run policy* to select the build run policy. The selected policy determines the order in which builds created from the build configuration must run. - -Hooks:: -Select *Run build hooks after image is built* to run commands at the end of the build and verify the image. Add *Hook type*, *Command*, and *Arguments* to append to the command. - -. Click *Save* to save the `BuildConfig`. diff --git a/modules/builds-basic-start-build.adoc b/modules/builds-basic-start-build.adoc deleted file mode 100644 index 15f093b5c24b..000000000000 --- a/modules/builds-basic-start-build.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Module included in the following assemblies: -// * builds/basic-build-operations.adoc - -:_content-type: PROCEDURE -[id="builds-basic-start-build_{context}"] -= Starting a build - -You can manually start a new build from an existing build configuration in your current project. - -.Procedure - -To manually start a build, enter the following command: - -[source,terminal] ----- -$ oc start-build ----- diff --git a/modules/builds-basic-start-environment-variable.adoc b/modules/builds-basic-start-environment-variable.adoc deleted file mode 100644 index f09305f58976..000000000000 --- a/modules/builds-basic-start-environment-variable.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Module included in the following assemblies: -// * builds/basic-build-operations.adoc - -:_content-type: PROCEDURE -[id="builds-basic-start-environment-variable_{context}"] -= Setting environment variables when starting a build - -You can specify the `--env` flag to set any desired environment variable for the build. - -.Procedure - -* To specify a desired environment variable, enter the following command: -+ -[source,terminal] ----- -$ oc start-build --env== ----- diff --git a/modules/builds-basic-start-logs.adoc b/modules/builds-basic-start-logs.adoc deleted file mode 100644 index 4c4ec349f430..000000000000 --- a/modules/builds-basic-start-logs.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Module included in the following assemblies: -// * builds/basic-build-operations.adoc - -:_content-type: PROCEDURE -[id="builds-basic-start-logs_{context}"] -= Streaming build logs - -You can specify the `--follow` flag to stream the build's logs in `stdout`. - -.Procedure - -* To manually stream a build's logs in `stdout`, enter the following command: -+ -[source,terminal] ----- -$ oc start-build --follow ----- diff --git a/modules/builds-basic-start-re-run.adoc b/modules/builds-basic-start-re-run.adoc deleted file mode 100644 index 3a7690c1964b..000000000000 --- a/modules/builds-basic-start-re-run.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Module included in the following assemblies: -// * builds/basic-build-operations.adoc - -:_content-type: PROCEDURE -[id="builds-basic-start-re-run_{context}"] -= Re-running a build - -You can manually re-run a build using the `--from-build` flag. - -.Procedure - -* To manually re-run a build, enter the following command: -+ -[source,terminal] ----- -$ oc start-build --from-build= ----- diff --git a/modules/builds-basic-start-source.adoc b/modules/builds-basic-start-source.adoc deleted file mode 100644 index 598a7b46399b..000000000000 --- a/modules/builds-basic-start-source.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// * builds/basic-build-operations.adoc - -:_content-type: PROCEDURE -[id="builds-basic-start-source_{context}"] -= Starting a build with source - -Rather than relying on a Git source pull -ifdef::openshift-origin,openshift-enterprise[] -or a Dockerfile -endif::[] -for a build, you can also start a build by directly pushing your source, which could be the contents of a Git or SVN working directory, a set of pre-built binary artifacts you want to deploy, or a single file. This can be done by specifying one of the following options for the `start-build` command: - -[cols="1,2",options="header"] -|=== -|Option |Description - -|`--from-dir=` -|Specifies a directory that will be archived and used as a binary input for the build. - -|`--from-file=` -|Specifies a single file that will be the only file in the build source. The file is placed in the root of an empty directory with the same file name as the original file provided. - -|`--from-repo=` -|Specifies a path to a local repository to use as the binary input for a build. Add the `--commit` option to control which branch, tag, or commit is used for the build. -|=== - -When passing any of these options directly to the build, the contents are streamed to the build and override the current build source settings. - -[NOTE] -==== -Builds triggered from binary input will not preserve the source on the server, so rebuilds triggered by base image changes will use the source specified in the build configuration. -==== - -.Procedure - -* Start a build from a source using the following command to send the contents of a local Git repository as an archive from the tag `v2`: -+ -[source,terminal] ----- -$ oc start-build hello-world --from-repo=../hello-world --commit=v2 ----- diff --git a/modules/builds-basic-view-build-details.adoc b/modules/builds-basic-view-build-details.adoc deleted file mode 100644 index 8d32c5b7f846..000000000000 --- a/modules/builds-basic-view-build-details.adoc +++ /dev/null @@ -1,31 +0,0 @@ -// Module included in the following assemblies: -// * builds/basic-build-operations.adoc - -:_content-type: PROCEDURE -[id="builds-basic-view-build-details_{context}"] -= Viewing build details - -You can view build details with the web console or by using the `oc describe` CLI command. - -This displays information including: - -* The build source. -* The build strategy. -* The output destination. -* Digest of the image in the destination registry. -* How the build was created. - -If the build uses the -ifdef::openshift-origin,openshift-enterprise[] -`Docker` or -endif::[] -`Source` strategy, the `oc describe` output also includes information about the source revision used for the build, including the commit ID, author, committer, and message. - -.Procedure - -* To view build details, enter the following command: -+ -[source,terminal] ----- -$ oc describe build ----- diff --git a/modules/builds-binary-source.adoc b/modules/builds-binary-source.adoc deleted file mode 100644 index 25af1615c63b..000000000000 --- a/modules/builds-binary-source.adoc +++ /dev/null @@ -1,41 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/creating-build-inputs.adoc - -[id="builds-binary-source_{context}"] -= Binary (local) source - -Streaming content from a local file system to the builder is called a `Binary` type build. The corresponding value of `BuildConfig.spec.source.type` is `Binary` for these builds. - -This source type is unique in that it is leveraged solely based on your use of the `oc start-build`. - -[NOTE] -==== -Binary type builds require content to be streamed from the local file system, so automatically triggering a binary type build, like an image change trigger, is not possible. This is because the binary files cannot be provided. Similarly, you cannot launch binary type builds from the web console. -==== - -To utilize binary builds, invoke `oc start-build` with one of these options: - -* `--from-file`: The contents of the file you specify are sent as a binary stream to the builder. You can also specify a URL to a file. Then, the builder stores the data in a file with the same name at the top of the build context. - -* `--from-dir` and `--from-repo`: The contents are archived and sent as a binary stream to the builder. Then, the builder extracts the contents of the archive within the build context directory. With `--from-dir`, you can also specify a URL to an archive, which is extracted. - -* `--from-archive`: The archive you specify is sent to the builder, where it is extracted within the build context directory. This option behaves the same as `--from-dir`; an archive is created on your host first, whenever the argument to these options is a directory. - -In each of the previously listed cases: - -* If your `BuildConfig` already has a `Binary` source type defined, it is effectively ignored and replaced by what the client sends. - -* If your `BuildConfig` has a `Git` source type defined, it is dynamically disabled, since `Binary` and `Git` are mutually exclusive, and the data in the binary stream provided to the builder takes precedence. - -Instead of a file name, you can pass a URL with HTTP or HTTPS schema to `--from-file` and `--from-archive`. When using `--from-file` with a URL, the name of the file in the builder image is determined by the `Content-Disposition` header sent by the web server, or the last component of the URL path if the header is not present. No form of authentication is supported and it is not possible to use custom TLS certificate or disable certificate validation. - -When using `oc new-build --binary=true`, the command ensures that the restrictions associated with binary builds are enforced. The resulting `BuildConfig` has a source type of `Binary`, meaning that the only valid way to run a build for this `BuildConfig` is to use `oc start-build` with one of the `--from` options to provide the requisite binary data. - -ifndef::openshift-online[] -The Dockerfile and `contextDir` source options have special meaning with binary builds. - -Dockerfile can be used with any binary build source. If Dockerfile is used and the binary stream is an archive, its contents serve as a replacement Dockerfile to any Dockerfile in the archive. If Dockerfile is used with the `--from-file` argument, and the file argument is named Dockerfile, the value from Dockerfile replaces the value from the binary stream. -endif::[] - -In the case of the binary stream encapsulating extracted archive content, the value of the `contextDir` field is interpreted as a subdirectory within the archive, and, if valid, the builder changes into that subdirectory before executing the build. diff --git a/modules/builds-build-custom-builder-image.adoc b/modules/builds-build-custom-builder-image.adoc deleted file mode 100644 index 811a96a47180..000000000000 --- a/modules/builds-build-custom-builder-image.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/custom-builds-buildah.adoc - - -:_content-type: PROCEDURE -[id="builds-build-custom-builder-image_{context}"] -= Build custom builder image - -You can use {product-title} to build and push custom builder images to use in a custom strategy. - -.Prerequisites - -* Define all the inputs that will go into creating your new custom builder image. - -.Procedure - -. Define a `BuildConfig` object that will build your custom builder image: -+ -[source,terminal] ----- -$ oc new-build --binary --strategy=docker --name custom-builder-image ----- - -. From the directory in which you created your custom build image, run the build: -+ -[source,terminal] ----- -$ oc start-build custom-builder-image --from-dir . -F ----- -+ -After the build completes, your new custom builder image is available in your project in an image stream tag that is named `custom-builder-image:latest`. diff --git a/modules/builds-build-environment.adoc b/modules/builds-build-environment.adoc deleted file mode 100644 index 9477f016a7c4..000000000000 --- a/modules/builds-build-environment.adoc +++ /dev/null @@ -1,15 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/creating-build-inputs.adoc - -[id="builds-build-environment_{context}"] -= Build environments - -As with pod environment variables, build environment variables can be defined in terms of references to other resources or variables using the Downward API. There are some exceptions, which are noted. - -You can also manage environment variables defined in the `BuildConfig` with the `oc set env` command. - -[NOTE] -==== -Referencing container resources using `valueFrom` in build environment variables is not supported as the references are resolved before the container is created. -==== diff --git a/modules/builds-build-hooks.adoc b/modules/builds-build-hooks.adoc deleted file mode 100644 index f2aa289b7be6..000000000000 --- a/modules/builds-build-hooks.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/triggering-builds-build-hooks.adoc - -[id="builds-build-hooks_{context}"] -= Build hooks - -Build hooks allow behavior to be injected into the build process. - -The `postCommit` field of a `BuildConfig` object runs commands inside a temporary container that is running the build output image. The hook is run immediately after the last layer of the image has been committed and before the image is pushed to a registry. - -The current working directory is set to the image's `WORKDIR`, which is the default working directory of the container image. For most images, this is where the source code is located. - -The hook fails if the script or command returns a non-zero exit code or if starting the temporary container fails. When the hook fails it marks the build as failed and the image is not pushed to a registry. The reason for failing can be inspected by looking at the build logs. - -Build hooks can be used to run unit tests to verify the image before the build is marked complete and the image is made available in a registry. If all tests pass and the test runner returns with exit code `0`, the build is marked successful. In case of any test failure, the build is marked as failed. In all cases, the build log contains the output of the test runner, which can be used to identify failed tests. - -The `postCommit` hook is not only limited to running tests, but can be used for other commands as well. Since it runs in a temporary container, changes made by the hook do not persist, meaning that running the hook cannot affect the final image. This behavior allows for, among other uses, the installation and usage of test dependencies that are automatically discarded and are not present in the final image. diff --git a/modules/builds-build-pruning.adoc b/modules/builds-build-pruning.adoc deleted file mode 100644 index ceba6c9191a5..000000000000 --- a/modules/builds-build-pruning.adoc +++ /dev/null @@ -1,40 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/advanced-build-operations.adoc - -:_content-type: PROCEDURE -[id="builds-build-pruning_{context}"] -= Pruning builds - -By default, builds that have completed their lifecycle are persisted indefinitely. You can limit the number of previous builds that are retained. - -.Procedure - -. Limit the number of previous builds that are retained by supplying a positive integer value for `successfulBuildsHistoryLimit` or `failedBuildsHistoryLimit` in your `BuildConfig`, for example: -+ -[source,yaml] ----- -apiVersion: "v1" -kind: "BuildConfig" -metadata: - name: "sample-build" -spec: - successfulBuildsHistoryLimit: 2 <1> - failedBuildsHistoryLimit: 2 <2> ----- -<1> `successfulBuildsHistoryLimit` will retain up to two builds with a status of `completed`. -<2> `failedBuildsHistoryLimit` will retain up to two builds with a status of `failed`, `canceled`, or `error`. - -. Trigger build pruning by one of the following actions: -+ -* Updating a build configuration. -* Waiting for a build to complete its lifecycle. - -Builds are sorted by their creation timestamp with the oldest builds being pruned first. - -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -[NOTE] -==== -Administrators can manually prune builds using the 'oc adm' object pruning command. -==== -endif::[] diff --git a/modules/builds-build-run-policy.adoc b/modules/builds-build-run-policy.adoc deleted file mode 100644 index 216ce745586b..000000000000 --- a/modules/builds-build-run-policy.adoc +++ /dev/null @@ -1,13 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/advanced-build-operations.adoc - -[id="builds-build-run-policy_{context}"] -= Build run policy - -The build run policy describes the order in which the builds created from the build configuration should run. This can be done by changing the value of the `runPolicy` field in the `spec` section of the `Build` specification. - -It is also possible to change the `runPolicy` value for existing build configurations, by: - -* Changing `Parallel` to `Serial` or `SerialLatestOnly` and triggering a new build from this configuration causes the new build to wait until all parallel builds complete as the serial build can only run alone. -* Changing `Serial` to `SerialLatestOnly` and triggering a new build causes cancellation of all existing builds in queue, except the currently running build and the most recently created build. The newest build runs next. diff --git a/modules/builds-buildconfig.adoc b/modules/builds-buildconfig.adoc deleted file mode 100644 index 86101dfb34d7..000000000000 --- a/modules/builds-buildconfig.adoc +++ /dev/null @@ -1,65 +0,0 @@ -// Module included in the following assemblies: -// * builds/understanding-builds.adoc - -:_content-type: REFERENCE -[id="builds-buildconfig_{context}"] -= BuildConfigs - -A build configuration describes a single build definition and a set of triggers for when a new build is created. Build configurations are defined by a `BuildConfig`, which is a REST object that can be used in a POST to the API server to create a new instance. - -A build configuration, or `BuildConfig`, is characterized by a build strategy -and one or more sources. The strategy determines the process, while the sources provide its input. - -Depending on how you choose to create your application using {product-title}, a `BuildConfig` is typically generated automatically for you if you use the web console or CLI, and it can be edited at any time. Understanding the parts that make up a `BuildConfig` and their available options can help if you choose to manually change your configuration later. - -The following example `BuildConfig` results in a new build every time a container image tag or the source code changes: - -.`BuildConfig` object definition -[source,yaml] ----- -kind: BuildConfig -apiVersion: build.openshift.io/v1 -metadata: - name: "ruby-sample-build" <1> -spec: - runPolicy: "Serial" <2> - triggers: <3> - - - type: "GitHub" - github: - secret: "secret101" - - type: "Generic" - generic: - secret: "secret101" - - - type: "ImageChange" - source: <4> - git: - uri: "https://github.com/openshift/ruby-hello-world" - strategy: <5> - sourceStrategy: - from: - kind: "ImageStreamTag" - name: "ruby-20-centos7:latest" - output: <6> - to: - kind: "ImageStreamTag" - name: "origin-ruby-sample:latest" - postCommit: <7> - script: "bundle exec rake test" ----- -<1> This specification creates a new `BuildConfig` named `ruby-sample-build`. -<2> The `runPolicy` field controls whether builds created from this build configuration can be run simultaneously. The default value is `Serial`, which means new builds run sequentially, not simultaneously. -<3> You can specify a list of triggers, which cause a new build to be created. -<4> The `source` section defines the source of the build. The source type determines the primary source of input, and can be either `Git`, to point to a code repository location, -ifndef::openshift-online[] -`Dockerfile`, to build from an inline Dockerfile, -endif::[] -or `Binary`, to accept binary payloads. It is possible to have multiple sources at once. For more information about each source type, see "Creating build inputs". -<5> The `strategy` section describes the build strategy used to execute the build. You can specify a `Source` -ifndef::openshift-online[] -, `Docker`, or `Custom` -endif::[] -strategy here. This example uses the `ruby-20-centos7` container image that Source-to-image (S2I) uses for the application build. -<6> After the container image is successfully built, it is pushed into the repository described in the `output` section. -<7> The `postCommit` section defines an optional build hook. diff --git a/modules/builds-chaining-builds.adoc b/modules/builds-chaining-builds.adoc deleted file mode 100644 index b8275716fd8f..000000000000 --- a/modules/builds-chaining-builds.adoc +++ /dev/null @@ -1,85 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/advanced-build-operations.adoc - -[id="builds-chaining-builds_{context}"] -= Chained builds - -For compiled languages such as Go, C, C++, and Java, including the dependencies necessary for compilation in the application image might increase the size of the image or introduce vulnerabilities that can be exploited. - -To avoid these problems, two builds can be chained together. One build that produces the compiled artifact, and a second build that places that artifact in a separate image that runs the artifact. - -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -In the following example, a source-to-image (S2I) build is combined with a docker build to compile an artifact that is then placed in a separate runtime image. - -[NOTE] -==== -Although this example chains a S2I build and a docker build, the first build can use any strategy that produces an image containing the desired artifacts, and the second build can use any strategy that can consume input content from an image. -==== - -//image::chained-build.png[Chained Build] *Needs update* - -The first build takes the application source and produces an image containing a `WAR` file. The image is pushed to the `artifact-image` image stream. The path of the output artifact depends on the `assemble` script of the S2I builder used. In this case, it is output to `/wildfly/standalone/deployments/ROOT.war`. - -[source,yaml] ----- -apiVersion: build.openshift.io/v1 -kind: BuildConfig -metadata: - name: artifact-build -spec: - output: - to: - kind: ImageStreamTag - name: artifact-image:latest - source: - git: - uri: https://github.com/openshift/openshift-jee-sample.git - ref: "master" - strategy: - sourceStrategy: - from: - kind: ImageStreamTag - name: wildfly:10.1 - namespace: openshift ----- - -The second build uses image source with a path to the WAR file inside the output image from the first build. An inline `dockerfile` copies that `WAR` file into a runtime image. - -[source,yaml] ----- -apiVersion: build.openshift.io/v1 -kind: BuildConfig -metadata: - name: image-build -spec: - output: - to: - kind: ImageStreamTag - name: image-build:latest - source: - dockerfile: |- - FROM jee-runtime:latest - COPY ROOT.war /deployments/ROOT.war - images: - - from: <1> - kind: ImageStreamTag - name: artifact-image:latest - paths: <2> - - sourcePath: /wildfly/standalone/deployments/ROOT.war - destinationDir: "." - strategy: - dockerStrategy: - from: <3> - kind: ImageStreamTag - name: jee-runtime:latest - triggers: - - imageChange: {} - type: ImageChange ----- -<1> `from` specifies that the docker build should include the output of the image from the `artifact-image` image stream, which was the target of the previous build. -<2> `paths` specifies which paths from the target image to include in the current docker build. -<3> The runtime image is used as the source image for the docker build. - -The result of this setup is that the output image of the second build does not have to contain any of the build tools that are needed to create the `WAR` file. Also, because the second build contains an image change trigger, whenever the first build is run and produces a new image with the binary artifact, the second build is automatically triggered to produce a runtime image that contains that artifact. Therefore, both builds behave as a single build with two stages. -endif::[] diff --git a/modules/builds-configuration-change-triggers.adoc b/modules/builds-configuration-change-triggers.adoc deleted file mode 100644 index d9c548c19d66..000000000000 --- a/modules/builds-configuration-change-triggers.adoc +++ /dev/null @@ -1,20 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/triggering-builds-build-hooks.adoc - -[id="builds-configuration-change-triggers_{context}"] -= Configuration change triggers - -A configuration change trigger allows a build to be automatically invoked as soon as a new `BuildConfig` is created. - -The following is an example trigger definition YAML within the `BuildConfig`: - -[source,yaml] ----- - type: "ConfigChange" ----- - -[NOTE] -==== -Configuration change triggers currently only work when creating a new `BuildConfig`. In a future release, configuration change triggers will also be able to launch a build whenever a `BuildConfig` is updated. -==== diff --git a/modules/builds-configuration-file.adoc b/modules/builds-configuration-file.adoc deleted file mode 100644 index 1ebe97f28a56..000000000000 --- a/modules/builds-configuration-file.adoc +++ /dev/null @@ -1,81 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/build-configuration.adoc - -:_content-type: PROCEDURE -[id="builds-configuration-file_{context}"] -= Configuring build settings - -You can configure build settings by editing the `build.config.openshift.io/cluster` resource. - -.Procedure - -* Edit the `build.config.openshift.io/cluster` resource: -+ -[source,terminal] ----- -$ oc edit build.config.openshift.io/cluster ----- -+ -The following is an example `build.config.openshift.io/cluster` resource: -+ -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: Build<1> -metadata: - annotations: - release.openshift.io/create-only: "true" - creationTimestamp: "2019-05-17T13:44:26Z" - generation: 2 - name: cluster - resourceVersion: "107233" - selfLink: /apis/config.openshift.io/v1/builds/cluster - uid: e2e9cc14-78a9-11e9-b92b-06d6c7da38dc -spec: - buildDefaults:<2> - defaultProxy:<3> - httpProxy: http://proxy.com - httpsProxy: https://proxy.com - noProxy: internal.com - env:<4> - - name: envkey - value: envvalue - gitProxy:<5> - httpProxy: http://gitproxy.com - httpsProxy: https://gitproxy.com - noProxy: internalgit.com - imageLabels:<6> - - name: labelkey - value: labelvalue - resources:<7> - limits: - cpu: 100m - memory: 50Mi - requests: - cpu: 10m - memory: 10Mi - buildOverrides:<8> - imageLabels:<9> - - name: labelkey - value: labelvalue - nodeSelector:<10> - selectorkey: selectorvalue - tolerations:<11> - - effect: NoSchedule - key: node-role.kubernetes.io/builds -operator: Exists ----- -<1> `Build`: Holds cluster-wide information on how to handle builds. The canonical, and only valid name is `cluster`. -<2> `buildDefaults`: Controls the default information for builds. -<3> `defaultProxy`: Contains the default proxy settings for all build operations, including image pull or push and source download. -<4> `env`: A set of default environment variables that are applied to the build if the specified variables do not exist on the build. -<5> `gitProxy`: Contains the proxy settings for Git operations only. If set, this overrides any Proxy settings for all Git commands, such as `git clone`. -<6> `imageLabels`: A list of labels that are applied to the resulting image. -You can override a default label by providing a label with the same name in the `BuildConfig`. -<7> `resources`: Defines resource requirements to execute the build. -<8> `buildOverrides`: Controls override settings for builds. -<9> `imageLabels`: A list of labels that are applied to the resulting image. -If you provided a label in the `BuildConfig` with the same name as one in this table, your label will be overwritten. -<10> `nodeSelector`: A selector which must be true for the build pod to fit on a node. -<11> `tolerations`: A list of tolerations that overrides any existing tolerations set on a build pod. diff --git a/modules/builds-configuration-parameters.adoc b/modules/builds-configuration-parameters.adoc deleted file mode 100644 index 30581f31fff1..000000000000 --- a/modules/builds-configuration-parameters.adoc +++ /dev/null @@ -1,51 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/build-configuration.adoc - -[id="builds-configuration-parameters_{context}"] -= Build controller configuration parameters - -The `build.config.openshift.io/cluster` resource offers the following configuration parameters. - -[cols="3a,8a",options="header"] -|=== -|Parameter |Description - -|`Build` -|Holds cluster-wide information on how to handle builds. The canonical, and only valid name is `cluster`. - -`spec`: Holds user-settable values for the build controller configuration. - -|`buildDefaults` -|Controls the default information for builds. - -`defaultProxy`: Contains the default proxy settings for all build operations, including image pull or push and source download. - -You can override values by setting the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables in the `BuildConfig` strategy. - -`gitProxy`: Contains the proxy settings for Git operations only. If set, this overrides any proxy settings for all Git commands, such as `git clone`. - -Values that are not set here are inherited from DefaultProxy. - -`env`: A set of default environment variables that are applied to the build if the specified variables do not exist on the build. - -`imageLabels`: A list of labels that are applied to the resulting image. You can override a default label by providing a label with the same name in the `BuildConfig`. - -`resources`: Defines resource requirements to execute the build. - -|`ImageLabel` -|`name`: Defines the name of the label. It must have non-zero length. - -|`buildOverrides` -|Controls override settings for builds. - -`imageLabels`: A list of labels that are applied to the resulting image. If you provided a label in the `BuildConfig` with the same name as one in this table, your label will be overwritten. - -`nodeSelector`: A selector which must be true for the build pod to fit on a node. - -`tolerations`: A list of tolerations that overrides any existing tolerations set on a build pod. - -|`BuildList` -|`items`: Standard object's metadata. - -|=== diff --git a/modules/builds-configuring-post-commit-build-hooks.adoc b/modules/builds-configuring-post-commit-build-hooks.adoc deleted file mode 100644 index 4084f62e322b..000000000000 --- a/modules/builds-configuring-post-commit-build-hooks.adoc +++ /dev/null @@ -1,53 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/triggering-builds-build-hooks.adoc - -:_content-type: PROCEDURE -[id="builds-configuring-post-commit-build-hooks_{context}"] -= Configuring post commit build hooks - -There are different ways to configure the post build hook. All forms in the following examples are equivalent and run `bundle exec rake test --verbose`. - -.Procedure - -* Shell script: -+ -[source,yaml] ----- -postCommit: - script: "bundle exec rake test --verbose" ----- -+ -The `script` value is a shell script to be run with `/bin/sh -ic`. Use this when a shell script is appropriate to execute the build hook. For example, for running unit tests as above. To control the image entry point, or if the image does not have `/bin/sh`, use `command` and/or `args`. -+ -[NOTE] -==== -The additional `-i` flag was introduced to improve the experience working with CentOS and RHEL images, and may be removed in a future release. -==== - -* Command as the image entry point: -+ -[source,yaml] ----- -postCommit: - command: ["/bin/bash", "-c", "bundle exec rake test --verbose"] ----- -+ -In this form, `command` is the command to run, which overrides the image -entry point in the exec form, as documented in the link:https://docs.docker.com/engine/reference/builder/#entrypoint[Dockerfile reference]. This is needed if the image does not have `/bin/sh`, or if you do not want to use a shell. In all other cases, using `script` might be more convenient. - -* Command with arguments: -+ -[source,yaml] ----- -postCommit: - command: ["bundle", "exec", "rake", "test"] - args: ["--verbose"] ----- -+ -This form is equivalent to appending the arguments to `command`. - -[NOTE] -==== -Providing both `script` and `command` simultaneously creates an invalid build hook. -==== diff --git a/modules/builds-create-custom-build-artifacts.adoc b/modules/builds-create-custom-build-artifacts.adoc deleted file mode 100644 index 8c3a4ccd74d1..000000000000 --- a/modules/builds-create-custom-build-artifacts.adoc +++ /dev/null @@ -1,63 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/custom-builds-buildah.adoc - - -:_content-type: PROCEDURE -[id="builds-create-custom-build-artifacts_{context}"] -= Creating custom build artifacts - -You must create the image you want to use as your custom build image. - -.Procedure - -. Starting with an empty directory, create a file named `Dockerfile` with the following content: -+ -[source,terminal] ----- -FROM registry.redhat.io/rhel8/buildah -# In this example, `/tmp/build` contains the inputs that build when this -# custom builder image is run. Normally the custom builder image fetches -# this content from some location at build time, by using git clone as an example. -ADD dockerfile.sample /tmp/input/Dockerfile -ADD build.sh /usr/bin -RUN chmod a+x /usr/bin/build.sh -# /usr/bin/build.sh contains the actual custom build logic that will be run when -# this custom builder image is run. -ENTRYPOINT ["/usr/bin/build.sh"] ----- - -. In the same directory, create a file named `dockerfile.sample`. This file is included in the custom build image and defines the image that is produced by the custom build: -+ -[source,terminal] ----- -FROM registry.access.redhat.com/ubi9/ubi -RUN touch /tmp/build ----- - -. In the same directory, create a file named `build.sh`. This file contains the logic that is run when the custom build runs: -+ -[source,terminal] ----- -#!/bin/sh -# Note that in this case the build inputs are part of the custom builder image, but normally this -# is retrieved from an external source. -cd /tmp/input -# OUTPUT_REGISTRY and OUTPUT_IMAGE are env variables provided by the custom -# build framework -TAG="${OUTPUT_REGISTRY}/${OUTPUT_IMAGE}" - - -# performs the build of the new image defined by dockerfile.sample -buildah --storage-driver vfs bud --isolation chroot -t ${TAG} . - - -# buildah requires a slight modification to the push secret provided by the service -# account to use it for pushing the image -cp /var/run/secrets/openshift.io/push/.dockercfg /tmp -(echo "{ \"auths\": " ; cat /var/run/secrets/openshift.io/push/.dockercfg ; echo "}") > /tmp/.dockercfg - - -# push the new image to the target for the build -buildah --storage-driver vfs push --tls-verify=false --authfile /tmp/.dockercfg ${TAG} ----- diff --git a/modules/builds-create-imagestreamtag.adoc b/modules/builds-create-imagestreamtag.adoc deleted file mode 100644 index c424f221a576..000000000000 --- a/modules/builds-create-imagestreamtag.adoc +++ /dev/null @@ -1,70 +0,0 @@ -// Module included in the following assemblies: -// -//* builds/running-entitled-builds.adoc - -:_content-type: PROCEDURE -[id="builds-create-imagestreamtag_{context}"] -= Creating an image stream tag for the Red Hat Universal Base Image - -To use Red Hat subscriptions within a build, you create an image stream tag to reference the Universal Base Image (UBI). - -To make the UBI available *in every project* in the cluster, you add the image stream tag to the `openshift` namespace. Otherwise, to make it available *in a specific project*, you add the image stream tag to that project. - -The benefit of using image stream tags this way is that doing so grants access to the UBI based on the `registry.redhat.io` credentials in the install pull secret without exposing the pull secret to other users. This is more convenient than requiring each developer to install pull secrets with `registry.redhat.io` credentials in each project. - -.Procedure - -* To create an `ImageStreamTag` in the `openshift` namespace, so it is available to developers in all projects, enter: -+ -[source,terminal] ----- -$ oc tag --source=docker registry.redhat.io/ubi9/ubi:latest ubi:latest -n openshift ----- -+ -[TIP] -==== -You can alternatively apply the following YAML to create an `ImageStreamTag` in the `openshift` namespace: -[source,yaml] ----- -apiVersion: image.openshift.io/v1 -kind: ImageStream -metadata: - name: ubi - namespace: openshift -spec: - tags: - - from: - kind: DockerImage - name: registry.redhat.io/ubi9/ubi:latest - name: latest - referencePolicy: - type: Source ----- -==== - -* To create an `ImageStreamTag` in a single project, enter: -+ -[source,terminal] ----- -$ oc tag --source=docker registry.redhat.io/ubi9/ubi:latest ubi:latest ----- -+ -[TIP] -==== -You can alternatively apply the following YAML to create an `ImageStreamTag` in a single project: -[source,yaml] ----- -apiVersion: image.openshift.io/v1 -kind: ImageStream -metadata: - name: ubi -spec: - tags: - - from: - kind: DockerImage - name: registry.redhat.io/ubi9/ubi:latest - name: latest - referencePolicy: - type: Source ----- -==== diff --git a/modules/builds-creating-secrets.adoc b/modules/builds-creating-secrets.adoc deleted file mode 100644 index e5b1c4de60f3..000000000000 --- a/modules/builds-creating-secrets.adoc +++ /dev/null @@ -1,67 +0,0 @@ -// Module included in the following assemblies: -// * builds/creating-build-inputs.adoc - -:_content-type: PROCEDURE -[id="builds-creating-secrets_{context}"] -= Creating secrets - -You must create a secret before creating the pods that depend on that secret. - -When creating secrets: - -* Create a secret object with secret data. -* Update the pod service account to allow the reference to the secret. -* Create a pod, which consumes the secret as an environment variable or as a file using a `secret` volume. - -.Procedure - -* Use the create command to create a secret object from a JSON or YAML file: -+ -[source,terminal] ----- -$ oc create -f ----- -+ -For example, you can create a secret from your local `.docker/config.json` file: -+ -[source,terminal] ----- -$ oc create secret generic dockerhub \ - --from-file=.dockerconfigjson= \ - --type=kubernetes.io/dockerconfigjson ----- -+ -This command generates a JSON specification of the secret named `dockerhub` and creates the object. -+ -.YAML Opaque Secret Object Definition -+ -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: mysecret -type: Opaque <1> -data: - username: dXNlci1uYW1l - password: cGFzc3dvcmQ= ----- -+ -<1> Specifies an _opaque_ secret. -+ -.Docker Configuration JSON File Secret Object Definition -+ -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: aregistrykey - namespace: myapps -type: kubernetes.io/dockerconfigjson <1> -data: - .dockerconfigjson:bm5ubm5ubm5ubm5ubm5ubm5ubm5ubmdnZ2dnZ2dnZ2dnZ2dnZ2dnZ2cgYXV0aCBrZXlzCg== <2> ----- -+ -<1> Specifies that the secret is using a docker configuration JSON file. -<2> The output of a base64-encoded the docker configuration JSON file diff --git a/modules/builds-custom-strategy.adoc b/modules/builds-custom-strategy.adoc deleted file mode 100644 index 76d6c22656d9..000000000000 --- a/modules/builds-custom-strategy.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/creating-build-inputs.adoc - -[id="builds-custom-strategy_{context}"] -= Custom strategy - -When using a Custom strategy, all the defined input secrets and config maps are available in the builder container in the `/var/run/secrets/openshift.io/build` directory. The custom build image must use these secrets and config maps appropriately. With the Custom strategy, you can define secrets as described in Custom strategy options. - -There is no technical difference between existing strategy secrets and the input secrets. However, your builder image can distinguish between them and use them differently, based on your build use case. - -The input secrets are always mounted into the `/var/run/secrets/openshift.io/build` directory, or your builder can parse the `$BUILD` environment variable, which includes the full build object. - -[IMPORTANT] -==== -If a pull secret for the registry exists in both the namespace and the node, builds default to using the pull secret in the namespace. -==== diff --git a/modules/builds-define-build-inputs.adoc b/modules/builds-define-build-inputs.adoc deleted file mode 100644 index 3177260cb4e6..000000000000 --- a/modules/builds-define-build-inputs.adoc +++ /dev/null @@ -1,77 +0,0 @@ -// Module included in the following assemblies: -// -//* builds/creating-build-inputs.adoc - -[id="builds-define-build-inputs_{context}"] -= Build inputs - -A build input provides source content for builds to operate on. You can use the following build inputs to provide sources in {product-title}, listed in order of precedence: - -ifndef::openshift-online[] -* Inline Dockerfile definitions -endif::[] -* Content extracted from existing images -* Git repositories -* Binary (Local) inputs -* Input secrets -* External artifacts - -ifdef::openshift-online[] -[IMPORTANT] -==== -The docker build strategy is not supported in {product-title}. Therefore, inline Dockerfile definitions are not accepted. -==== -endif::[] - -You can combine multiple inputs in a single build. -ifndef::openshift-online[] -However, as the inline Dockerfile takes precedence, it can overwrite any other file named Dockerfile provided by another input. -endif::[] -Binary (local) input and Git repositories are mutually exclusive inputs. - -You can use input secrets when you do not want certain resources or credentials used during a build to be available in the final application image produced by the build, or want to consume a value that is defined in a secret resource. External artifacts can be used to pull in additional files that are not available as one of the other build input types. - -When you run a build: - -. A working directory is constructed and all input content is placed in the working directory. For example, the input Git repository is cloned into the working directory, and files specified from input images are copied into the working directory using the target path. - -. The build process changes directories into the `contextDir`, if one is defined. - -ifndef::openshift-online[] -. The inline Dockerfile, if any, is written to the current directory. -endif::[] - -. The content from the current directory is provided to the build process -for reference by the -ifndef::openshift-online[] -Dockerfile, custom builder logic, or -endif::[] -`assemble` script. This means any input content that resides outside the `contextDir` is ignored by the build. - -The following example of a source definition includes multiple input types and an explanation of how they are combined. For more details on how each input type is defined, see the specific sections for each input type. - -[source,yaml] ----- -source: - git: - uri: https://github.com/openshift/ruby-hello-world.git <1> - ref: "master" - images: - - from: - kind: ImageStreamTag - name: myinputimage:latest - namespace: mynamespace - paths: - - destinationDir: app/dir/injected/dir <2> - sourcePath: /usr/lib/somefile.jar - contextDir: "app/dir" <3> -ifndef::openshift-online[] - dockerfile: "FROM centos:7\nRUN yum install -y httpd" <4> -endif::[] ----- -<1> The repository to be cloned into the working directory for the build. -<2> `/usr/lib/somefile.jar` from `myinputimage` is stored in `/app/dir/injected/dir`. -<3> The working directory for the build becomes `/app/dir`. -ifndef::openshift-online[] -<4> A Dockerfile with this content is created in `/app/dir`, overwriting any existing file with that name. -endif::[] diff --git a/modules/builds-disabling-build-strategy-globally.adoc b/modules/builds-disabling-build-strategy-globally.adoc deleted file mode 100644 index 1e680725bd2e..000000000000 --- a/modules/builds-disabling-build-strategy-globally.adoc +++ /dev/null @@ -1,93 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/securing-builds-by-strategy.adoc - - -:_content-type: PROCEDURE -[id="builds-disabling-build-strategy-globally_{context}"] -= Disabling access to a build strategy globally - -To prevent access to a particular build strategy globally, log in as a user with cluster administrator privileges, remove the corresponding role from the `system:authenticated` group, and apply the annotation `rbac.authorization.kubernetes.io/autoupdate: "false"` to protect them from changes between the API restarts. The following example shows disabling the docker build strategy. - -.Procedure - -. Apply the `rbac.authorization.kubernetes.io/autoupdate` annotation: -+ -[source,terminal] ----- -$ oc edit clusterrolebinding system:build-strategy-docker-binding ----- -+ -.Example output -[source,yaml] ----- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - annotations: - rbac.authorization.kubernetes.io/autoupdate: "false" <1> - creationTimestamp: 2018-08-10T01:24:14Z - name: system:build-strategy-docker-binding - resourceVersion: "225" - selfLink: /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/system%3Abuild-strategy-docker-binding - uid: 17b1f3d4-9c3c-11e8-be62-0800277d20bf -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: system:build-strategy-docker -subjects: -- apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated ----- -<1> Change the `rbac.authorization.kubernetes.io/autoupdate` annotation's value to `"false"`. - -. Remove the role: -+ -[source,terminal] ----- -$ oc adm policy remove-cluster-role-from-group system:build-strategy-docker system:authenticated ----- - -. Ensure the build strategy subresources are also removed from these roles: -+ -[source,terminal] ----- -$ oc edit clusterrole admin ----- -+ -[source,terminal] ----- -$ oc edit clusterrole edit ----- - -. For each role, specify the subresources that correspond to the resource of the strategy to disable. - -.. Disable the docker Build Strategy for *admin*: -+ -[source,yaml] ----- -kind: ClusterRole -metadata: - name: admin -... -- apiGroups: - - "" - - build.openshift.io - resources: - - buildconfigs - - buildconfigs/webhooks - - builds/custom <1> - - builds/source - verbs: - - create - - delete - - deletecollection - - get - - list - - patch - - update - - watch -... ----- -<1> Add `builds/custom` and `builds/source` to disable docker builds globally for users with the *admin* role. diff --git a/modules/builds-displaying-webhook-urls.adoc b/modules/builds-displaying-webhook-urls.adoc deleted file mode 100644 index 0243bfd84089..000000000000 --- a/modules/builds-displaying-webhook-urls.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/triggering-builds-build-hooks.adoc - -:_content-type: PROCEDURE -[id="builds-displaying-webhook-urls_{context}"] -= Displaying webhook URLs - -You can use the following command to display webhook URLs associated with a build configuration. If the command does not display any webhook URLs, then no webhook trigger is defined for that build configuration. - -.Procedure - -* To display any webhook URLs associated with a `BuildConfig`, run: - -[source,terminal] ----- -$ oc describe bc ----- diff --git a/modules/builds-docker-credentials-private-registries.adoc b/modules/builds-docker-credentials-private-registries.adoc deleted file mode 100644 index 8e7fc9a5b216..000000000000 --- a/modules/builds-docker-credentials-private-registries.adoc +++ /dev/null @@ -1,125 +0,0 @@ -// Module included in the following assemblies: -// -//* builds/creating-build-inputs.adoc - -:_content-type: PROCEDURE -[id="builds-docker-credentials-private-registries_{context}"] -= Using docker credentials for private registries - -You can supply builds with a .`docker/config.json` file with valid credentials for private container registries. This allows you to push the output image into a private container image registry or pull a builder image from the private container image registry that requires authentication. - -You can supply credentials for multiple repositories within the same registry, each with credentials specific to that registry path. - -[NOTE] -==== -For the {product-title} container image registry, this is not required because secrets are generated automatically for you by {product-title}. -==== - -The `.docker/config.json` file is found in your home directory by default and -has the following format: - -[source,yaml] ----- -auths: - index.docker.io/v1/: <1> - auth: "YWRfbGzhcGU6R2labnRib21ifTE=" <2> - email: "user@example.com" <3> - docker.io/my-namespace/my-user/my-image: <4> - auth: "GzhYWRGU6R2fbclabnRgbkSp="" - email: "user@example.com" - docker.io/my-namespace: <5> - auth: "GzhYWRGU6R2deesfrRgbkSp="" - email: "user@example.com" ----- -<1> URL of the registry. -<2> Encrypted password. -<3> Email address for the login. -<4> URL and credentials for a specific image in a namespace. -<5> URL and credentials for a registry namespace. - -You can define multiple container image registries or define multiple repositories in the same registry. Alternatively, you can also add authentication entries to this file by running the `docker login` command. The file will be created if it does not exist. - -Kubernetes provides `Secret` objects, which can be used to store configuration and passwords. - -.Prerequisites - -* You must have a `.docker/config.json` file. - -.Procedure - -. Create the secret from your local `.docker/config.json` file: -+ -[source,terminal] ----- -$ oc create secret generic dockerhub \ - --from-file=.dockerconfigjson= \ - --type=kubernetes.io/dockerconfigjson ----- -+ -This generates a JSON specification of the secret named `dockerhub` and creates the object. -+ -. Add a `pushSecret` field into the `output` section of the `BuildConfig` and set it to the name of the `secret` that you created, which in the previous example is `dockerhub`: -+ -[source,yaml] ----- -spec: - output: - to: - kind: "DockerImage" - name: "private.registry.com/org/private-image:latest" - pushSecret: - name: "dockerhub" ----- -+ -You can use the `oc set build-secret` command to set the push secret on the build configuration: -+ -[source,terminal] ----- -$ oc set build-secret --push bc/sample-build dockerhub ----- -+ -You can also link the push secret to the service account used by the build instead of specifying the `pushSecret` field. By default, builds use the `builder` service account. The push secret is automatically added to the build if the secret contains a credential that matches the repository hosting the build's output image. -+ -[source,terminal] ----- -$ oc secrets link builder dockerhub ----- -+ -. Pull the builder container image from a private container image registry by specifying the `pullSecret` field, which is part of the build strategy definition: -+ -[source,yaml] ----- -strategy: - sourceStrategy: - from: - kind: "DockerImage" - name: "docker.io/user/private_repository" - pullSecret: - name: "dockerhub" ----- -+ -You can use the `oc set build-secret` command to set the pull secret on the build configuration: -+ -[source,terminal] ----- -$ oc set build-secret --pull bc/sample-build dockerhub ----- -+ -ifndef::openshift-online[] -[NOTE] -==== -This example uses `pullSecret` in a Source build, but it is also applicable in Docker and Custom builds. -==== -endif::[] -+ -You can also link the pull secret to the service account used by the build instead of specifying the `pullSecret` field. By default, builds use the `builder` service account. The pull secret is automatically added to the build if the secret contains a credential that matches the repository hosting the build's input image. To link the pull secret to the service account used by the build instead of specifying the `pullSecret` field, run: -+ -[source,terminal] ----- -$ oc secrets link builder dockerhub ----- -+ -[NOTE] -==== -You must specify a `from` image in the `BuildConfig` spec to take advantage of this feature. Docker strategy builds generated by `oc new-build` or `oc new-app` may not do this in some situations. -==== diff --git a/modules/builds-docker-source-build-output.adoc b/modules/builds-docker-source-build-output.adoc deleted file mode 100644 index b6bc7851cbf1..000000000000 --- a/modules/builds-docker-source-build-output.adoc +++ /dev/null @@ -1,34 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/managing-build-output.adoc - -[id="builds-docker-source-build-output_{context}"] -= Build output - -Builds that use the -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -docker or -endif::[] -source-to-image (S2I) strategy result in the creation of a new container image. The image is then pushed to the container image registry specified in the `output` section of the `Build` specification. - -If the output kind is `ImageStreamTag`, then the image will be pushed to the integrated {product-registry} and tagged in the specified imagestream. If the output is of type `DockerImage`, then the name of the output reference will be used as a docker push specification. The specification may contain a registry or will default to DockerHub if no registry is specified. If the output section of the build specification is empty, then the image will not be pushed at the end of the build. - -.Output to an ImageStreamTag -[source,yaml] ----- -spec: - output: - to: - kind: "ImageStreamTag" - name: "sample-image:latest" ----- - -.Output to a docker Push Specification -[source,yaml] ----- -spec: - output: - to: - kind: "DockerImage" - name: "my-registry.mycompany.com:5000/myimages/myimage:tag" ----- diff --git a/modules/builds-docker-strategy.adoc b/modules/builds-docker-strategy.adoc deleted file mode 100644 index 8d70207eb909..000000000000 --- a/modules/builds-docker-strategy.adoc +++ /dev/null @@ -1,34 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/creating-build-inputs.adoc - -[id="builds-docker-strategy_{context}"] -= Docker strategy - -When using a docker strategy, you can add all defined input secrets into your container image using the link:https://docs.docker.com/engine/reference/builder/#add[`ADD`] and link:https://docs.docker.com/engine/reference/builder/#copy[`COPY` instructions] in your Dockerfile. - -If you do not specify the `destinationDir` for a secret, then the files are copied into the same directory in which the Dockerfile is located. If you specify a relative path as `destinationDir`, then the secrets are copied into that directory, relative to your Dockerfile location. This makes the secret files available to the Docker build operation as part of the context directory used during the build. - -.Example of a Dockerfile referencing secret and config map data ----- -FROM centos/ruby-22-centos7 - -USER root -COPY ./secret-dir /secrets -COPY ./config / - -# Create a shell script that will output secrets and ConfigMaps when the image is run -RUN echo '#!/bin/sh' > /input_report.sh -RUN echo '(test -f /secrets/secret1 && echo -n "secret1=" && cat /secrets/secret1)' >> /input_report.sh -RUN echo '(test -f /config && echo -n "relative-configMap=" && cat /config)' >> /input_report.sh -RUN chmod 755 /input_report.sh - -CMD ["/bin/sh", "-c", "/input_report.sh"] ----- - -[IMPORTANT] -==== -Users normally remove their input secrets from the final application image so that the secrets are not present in the container running from that image. However, the secrets still exist in the image itself in the layer where they were added. This removal is part of the Dockerfile itself. - -To prevent the contents of input secrets and config maps from appearing in the build output container images and avoid this removal process altogether, xref:../../cicd/builds/build-strategies.html#builds-using-build-volumes_build-strategies-docker[use build volumes] in your Docker build strategy instead. -==== diff --git a/modules/builds-dockerfile-source.adoc b/modules/builds-dockerfile-source.adoc deleted file mode 100644 index 74e58d40d5a7..000000000000 --- a/modules/builds-dockerfile-source.adoc +++ /dev/null @@ -1,22 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/creating-build-inputs.adoc - -[id="builds-dockerfile-source_{context}"] -= Dockerfile source - -When you supply a `dockerfile` value, the content of this field is written to disk as a file named `dockerfile`. This is done after other input sources are processed, so if the input source repository contains a Dockerfile in the root directory, it is overwritten with this content. - -The source definition is part of the `spec` section in the `BuildConfig`: - -[source,yaml] ----- -source: - dockerfile: "FROM centos:7\nRUN yum install -y httpd" <1> ----- -<1> The `dockerfile` field contains an inline Dockerfile that is built. - -[role="_additional-resources"] -.Additional resources - -* The typical use for this field is to provide a Dockerfile to a docker strategy build. diff --git a/modules/builds-gitconfig-file-secured-git.adoc b/modules/builds-gitconfig-file-secured-git.adoc deleted file mode 100644 index 0bb18b8ad29c..000000000000 --- a/modules/builds-gitconfig-file-secured-git.adoc +++ /dev/null @@ -1,64 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/creating-build-inputs.adoc - -:_content-type: PROCEDURE -[id="builds-gitconfig-file-secured-git_{context}"] -= Creating a secret from a .gitconfig file for secured Git - -If your Git server is secured with two-way SSL and user name with password, you must add the certificate files to your source build and add references to the certificate files in the `.gitconfig` file. - -.Prerequisites - -* You must have Git credentials. - -.Procedure - -Add the certificate files to your source build and add references to the certificate files in the `.gitconfig` file. - -. Add the `client.crt`, `cacert.crt`, and `client.key` files to the `/var/run/secrets/openshift.io/source/` folder in the application source code. - -. In the `.gitconfig` file for the server, add the `[http]` section shown in the following example: -+ -[source,terminal] ----- -# cat .gitconfig ----- -+ -.Example output -[source,terminal] ----- -[user] - name = - email = -[http] - sslVerify = false - sslCert = /var/run/secrets/openshift.io/source/client.crt - sslKey = /var/run/secrets/openshift.io/source/client.key - sslCaInfo = /var/run/secrets/openshift.io/source/cacert.crt ----- - -. Create the secret: -+ -[source,terminal] ----- -$ oc create secret generic \ ---from-literal=username= \ <1> ---from-literal=password= \ <2> ---from-file=.gitconfig=.gitconfig \ ---from-file=client.crt=/var/run/secrets/openshift.io/source/client.crt \ ---from-file=cacert.crt=/var/run/secrets/openshift.io/source/cacert.crt \ ---from-file=client.key=/var/run/secrets/openshift.io/source/client.key ----- -<1> The user's Git user name. -<2> The password for this user. - -[IMPORTANT] -==== -To avoid having to enter your password again, be sure to specify the source-to-image (S2I) image in your builds. However, if you cannot clone the repository, you must still specify your user name and password to promote the build. -==== - -[role="_additional-resources"] -.Additional resources - -* `/var/run/secrets/openshift.io/source/` folder in the application source code. diff --git a/modules/builds-gitconfig-file.adoc b/modules/builds-gitconfig-file.adoc deleted file mode 100644 index 111e5e11fe42..000000000000 --- a/modules/builds-gitconfig-file.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/creating-build-inputs.adoc - -:_content-type: PROCEDURE -[id="builds-gitconfig-file_{context}"] -= Creating a secret from a .gitconfig file - -If the cloning of your application is dependent on a `.gitconfig` file, then you can create a secret that contains it. Add it to the builder service account and then your `BuildConfig`. - -.Procedure - -* To create a secret from a `.gitconfig` file: - -[source,terminal] ----- -$ oc create secret generic --from-file= ----- - -[NOTE] -==== -SSL verification can be turned off if `sslVerify=false` is set for the `http` -section in your `.gitconfig` file: - -[source,text] ----- -[http] - sslVerify=false ----- -==== diff --git a/modules/builds-identifying-image-change-triggers.adoc b/modules/builds-identifying-image-change-triggers.adoc deleted file mode 100644 index 460a160b0a4d..000000000000 --- a/modules/builds-identifying-image-change-triggers.adoc +++ /dev/null @@ -1,98 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/triggering-builds-build-hooks.adoc - -:_content-type: PROCEDURE -[id="builds-image-change-trigger-identification_{context}"] -= Identifying the image change trigger of a build - -As a developer, if you have image change triggers, you can identify which image change initiated the last build. This can be useful for debugging or troubleshooting builds. - -.Example `BuildConfig` -[source,yaml] ----- -apiVersion: build.openshift.io/v1 -kind: BuildConfig -metadata: - name: bc-ict-example - namespace: bc-ict-example-namespace -spec: - -# ... - - triggers: - - imageChange: - from: - kind: ImageStreamTag - name: input:latest - namespace: bc-ict-example-namespace - - imageChange: - from: - kind: ImageStreamTag - name: input2:latest - namespace: bc-ict-example-namespace - type: ImageChange -status: - imageChangeTriggers: - - from: - name: input:latest - namespace: bc-ict-example-namespace - lastTriggerTime: "2021-06-30T13:47:53Z" - lastTriggeredImageID: image-registry.openshift-image-registry.svc:5000/bc-ict-example-namespace/input@sha256:0f88ffbeb9d25525720bfa3524cb1bf0908b7f791057cf1acfae917b11266a69 - - from: - name: input2:latest - namespace: bc-ict-example-namespace - lastTriggeredImageID: image-registry.openshift-image-registry.svc:5000/bc-ict-example-namespace/input2@sha256:0f88ffbeb9d25525720bfa3524cb2ce0908b7f791057cf1acfae917b11266a69 - - lastVersion: 1 ----- - -[NOTE] -==== -This example omits elements that are not related to image change triggers. -==== - -.Prerequisites - -* You have configured multiple image change triggers. These triggers have triggered one or more builds. - -.Procedure - -. In `buildConfig.status.imageChangeTriggers` to identify the `lastTriggerTime` that has the latest timestamp. -+ -This `ImageChangeTriggerStatus` - - - Then you use the `name` and `namespace` from that build to find the corresponding image change trigger in `buildConfig.spec.triggers`. - -. Under `imageChangeTriggers`, compare timestamps to identify the latest - -.Image change triggers - -In your build configuration, `buildConfig.spec.triggers` is an array of build trigger policies, `BuildTriggerPolicy`. - -Each `BuildTriggerPolicy` has a `type` field and set of pointers fields. Each pointer field corresponds to one of the allowed values for the `type` field. As such, you can only set `BuildTriggerPolicy` to only one pointer field. - -For image change triggers, the value of `type` is `ImageChange`. Then, the `imageChange` field is the pointer to an `ImageChangeTrigger` object, which has the following fields: - -* `lastTriggeredImageID`: This field, which is not shown in the example, is deprecated in {product-title} 4.8 and will be ignored in a future release. It contains the resolved image reference for the `ImageStreamTag` when the last build was triggered from this `BuildConfig`. -* `paused`: You can use this field, which is not shown in the example, to temporarily disable this particular image change trigger. -* `from`: You use this field to reference the `ImageStreamTag` that drives this image change trigger. Its type is the core Kubernetes type, `OwnerReference`. - -The `from` field has the following fields of note: -** `kind`: For image change triggers, the only supported value is `ImageStreamTag`. -** `namespace`: You use this field to specify the namespace of the `ImageStreamTag`. -** `name`: You use this field to specify the `ImageStreamTag`. - -.Image change trigger status - -In your build configuration, `buildConfig.status.imageChangeTriggers` is an array of `ImageChangeTriggerStatus` elements. Each `ImageChangeTriggerStatus` element includes the `from`, `lastTriggeredImageID`, and `lastTriggerTime` elements shown in the preceding example. - -The `ImageChangeTriggerStatus` that has the most recent `lastTriggerTime` triggered the most recent build. You use its `name` and `namespace` to identify the image change trigger in `buildConfig.spec.triggers` that triggered the build. - -The `lastTriggerTime` with the most recent timestamp signifies the `ImageChangeTriggerStatus` of the last build. This `ImageChangeTriggerStatus` has the same `name` and `namespace` as the image change trigger in `buildConfig.spec.triggers` that triggered the build. - -[role="_additional-resources"] -.Additional resources - -* link:http://docs.docker.com/v1.7/reference/api/hub_registry_spec/#docker-registry-1-0[v1 container registries] diff --git a/modules/builds-image-change-trigger-identification.adoc b/modules/builds-image-change-trigger-identification.adoc deleted file mode 100644 index b0fd3f449be2..000000000000 --- a/modules/builds-image-change-trigger-identification.adoc +++ /dev/null @@ -1,80 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/triggering-builds-build-hooks.adoc - -[id="builds-image-change-trigger-identification_{context}"] -= Image change trigger identification - -As a developer, if you have configured image change triggers, you can identify which image change initiated the last build. - -To accomplish this, you must identify elements in your build configuration's specification and status that are related to image change triggers. - -This way, you can use the timestamp in `buildConfig.status.imageChangeTriggers` to identify the most recent build. Then you can use the name and namespace of the image stream that triggered this build to find the corresponding image change trigger in the `buildConfig.spec.triggers`. - - -== Image change trigger elements in the specification - -In your build configuration specification, `buildConfig.spec.triggers` is an array of build trigger policies, `BuildTriggerPolicy`. - -Each `BuildTriggerPolicy` has a `type` field and set of pointers fields, where each pointer field corresponds to one of the allowed values for the `type` field. As such, only one pointer field can be set for a given `BuildTriggerPolicy`. - -So for image change triggers, the value of `type` is `ImageChange`. - -Then, the `imageChange` field is the pointer to an `ImageChangeTrigger` object. So this will be set. It has the following fields: - -* `lastTriggeredImageID`: This field is deprecated in {product-title} 4.8, but is still being set. It will be ignored in a future release. It contains the resolved image reference for the `ImageStreamTag` when the last build was triggered from this `BuildConfig`. -* `paused`: This field is used to temporarily disable this particular image change trigger. -* `from`: This field is used to reference the `ImageStreamTag` that drives this image change trigger. Its type is the core Kubernetes type, `OwnerReference`. The `from` field has the following fields of note: - * `kind`: In this case, the only supported value is `ImageStreamTag`. - * `namespace`: The namespace where the `ImageStreamTag` lives. - * `name`: The name of the `ImageStreamTag`. - -The following example shows the relative location of the elements mentioned in the preceding list and omits unrelated elements, such as `name`, `source`, and `strategy`. - -.Example `BuildConfig.spec` -[source,yaml] ----- -kind: BuildConfig -apiVersion: build.openshift.io/v1 -spec: - triggers: - - imageChange: - from: - kind: ImageStreamTag - name: <1> - namespace: <2> - type: ImageChange ----- -<1> The name of an image stream, such as `input:latest`. -<2> A namespace, such as `my-namespace`. - -== Image change trigger elements in the status - -In your build configuration status, `buildConfig.status.imageChangeTriggers` is an array of `ImageChangeTriggerStatus` elements. Each `ImageChangeTriggerStatus` element includes the `from`, `lastTriggeredImageID`, and `lastTriggerTime` elements shown in the following example. This example omits elements that are not related to image change triggers. - -.Example `BuildConfig.status` -[source,yaml] ----- -kind: BuildConfig -apiVersion: build.openshift.io/v1 -status: - imageChangeTriggers: - - from: - name: <1> - namespace: <2> - lastTriggeredImageID: <3> - lastTriggerTime: <4> ----- -<1> The name of an image stream, such as `input:latest`. -<2> A namespace, such as `my-namespace`. -<3> The SHA or ID of the `ImageStreamTag` when a build started. Its value is updated each time a build is started, even if this `ImageStreamTag` is not the reason the build started. -<4> The last time this particular `ImageStreamTag` triggered a build to start. Its value is only updated when this trigger specifically started a Build. - -== Identification of image change triggers - -The `ImageChangeTriggerStatus` that has the most recent `lastTriggerTime` triggered the most recent build. You can use its `name` and `namespace` to correlate it with the `ImageStreamTag` of one of the image change triggers you defined in the `buildConfig.spec.triggers`. - -[role="_additional-resources"] -.Additional resources - -* link:http://docs.docker.com/v1.7/reference/api/hub_registry_spec/#docker-registry-1-0[v1 container registries] diff --git a/modules/builds-image-source.adoc b/modules/builds-image-source.adoc deleted file mode 100644 index 8d7c16c0ed97..000000000000 --- a/modules/builds-image-source.adoc +++ /dev/null @@ -1,72 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/creating-build-inputs.adoc - -:_content-type: CONCEPT - -[id="builds-image-source_{context}"] -= Image source - -You can add additional files to the build process with images. Input images are referenced in the same way the `From` and `To` image targets are defined. This means both container images and image stream tags can be referenced. In conjunction with the image, you must provide one or more path pairs to indicate the path of the files or directories to copy the image and the destination to place them in the build context. - -The source path can be any absolute path within the image specified. The destination must be a relative directory path. At build time, the image is loaded and the indicated files and directories are copied into the context directory of the build process. This is the same directory into which the source repository content is cloned. If the source path ends in `/.` then the content of the directory is copied, but the directory itself is not created at the destination. - -Image inputs are specified in the `source` definition of the `BuildConfig`: - -[source,yaml] ----- -source: - git: - uri: https://github.com/openshift/ruby-hello-world.git - ref: "master" - images: <1> - - from: <2> - kind: ImageStreamTag - name: myinputimage:latest - namespace: mynamespace - paths: <3> - - destinationDir: injected/dir <4> - sourcePath: /usr/lib/somefile.jar <5> - - from: - kind: ImageStreamTag - name: myotherinputimage:latest - namespace: myothernamespace - pullSecret: mysecret <6> - paths: - - destinationDir: injected/dir - sourcePath: /usr/lib/somefile.jar ----- -<1> An array of one or more input images and files. -<2> A reference to the image containing the files to be copied. -<3> An array of source/destination paths. -<4> The directory relative to the build root where the build process can access the file. -<5> The location of the file to be copied out of the referenced image. -<6> An optional secret provided if credentials are needed to access the input image. -+ -[NOTE] -==== -If your cluster uses an `ImageDigestMirrorSet` or `ImageTagMirrorSet` object to configure repository mirroring, you can use only global pull secrets for mirrored registries. You cannot add a pull secret to a project. -==== - -.Images that require pull secrets - -When using an input image that requires a pull secret, you can link the pull secret to the service account used by the build. By default, builds use the `builder` service account. The pull secret is automatically added to the build if the secret contains a credential that matches the repository hosting the input image. To link a pull secret to the service account used by the build, run: - -[source,terminal] ----- -$ oc secrets link builder dockerhub ----- - -ifndef::openshift-online[] -[NOTE] -==== -This feature is not supported for builds using the custom strategy. -==== -endif::[] - -.Images on mirrored registries that require pull secrets - -When using an input image from a mirrored registry, if you get a `build error: failed to pull image` message, you can resolve the error by using either of the following methods: - -* Create an input secret that contains the authentication credentials for the builder image’s repository and all known mirrors. In this case, create a pull secret for credentials to the image registry and its mirrors. -* Use the input secret as the pull secret on the `BuildConfig` object. diff --git a/modules/builds-input-secrets-configmaps.adoc b/modules/builds-input-secrets-configmaps.adoc deleted file mode 100644 index b79521842418..000000000000 --- a/modules/builds-input-secrets-configmaps.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/creating-build-inputs.adoc - -[id="builds-input-secrets-configmaps_{context}"] -= Input secrets and config maps - -[IMPORTANT] -==== -To prevent the contents of input secrets and config maps from appearing in build output container images, use build volumes in your xref:../../cicd/builds/build-strategies.html#builds-using-build-volumes_build-strategies-docker[Docker build] and xref:../../cicd/builds/build-strategies.html#builds-using-build-volumes_build-strategies-s2i[source-to-image build] strategies. -==== - -In some scenarios, build operations require credentials or other configuration data to access dependent resources, but it is undesirable for that information to be placed in source control. You can define input secrets and input config maps for this purpose. - -For example, when building a Java application with Maven, you can set up a private mirror of Maven Central or JCenter that is accessed by private keys. To download libraries from that private mirror, you have to supply the -following: - -. A `settings.xml` file configured with the mirror's URL and connection settings. -. A private key referenced in the settings file, such as `~/.ssh/id_rsa`. - -For security reasons, you do not want to expose your credentials in the application image. - -This example describes a Java application, but you can use the same approach for adding SSL certificates into the `/etc/ssl/certs` directory, API keys or tokens, license files, and more. diff --git a/modules/builds-manually-add-source-clone-secrets.adoc b/modules/builds-manually-add-source-clone-secrets.adoc deleted file mode 100644 index 8284e05ce320..000000000000 --- a/modules/builds-manually-add-source-clone-secrets.adoc +++ /dev/null @@ -1,43 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/creating-build-inputs.adoc - -:_content-type: PROCEDURE -[id="builds-manually-add-source-clone-secrets_{context}"] -= Manually adding a source clone secret - -Source clone secrets can be added manually to a build configuration by adding a `sourceSecret` field to the `source` section inside the `BuildConfig` and setting it to the name of the secret that you created. In this example, it is the `basicsecret`. - -[source,yaml] ----- -apiVersion: "v1" -kind: "BuildConfig" -metadata: - name: "sample-build" -spec: - output: - to: - kind: "ImageStreamTag" - name: "sample-image:latest" - source: - git: - uri: "https://github.com/user/app.git" - sourceSecret: - name: "basicsecret" - strategy: - sourceStrategy: - from: - kind: "ImageStreamTag" - name: "python-33-centos7:latest" ----- - -.Procedure - -You can also use the `oc set build-secret` command to set the source clone secret on an existing build configuration. - -* To set the source clone secret on an existing build configuration, enter the following command: -+ -[source,terminal] ----- -$ oc set build-secret --source bc/sample-build basicsecret ----- diff --git a/modules/builds-output-image-environment-variables.adoc b/modules/builds-output-image-environment-variables.adoc deleted file mode 100644 index 74c9ab57ab41..000000000000 --- a/modules/builds-output-image-environment-variables.adoc +++ /dev/null @@ -1,39 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/managing-build-output.adoc - -[id="builds-output-image-environment-variables_{context}"] -= Output image environment variables - -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -docker and -endif::[] -source-to-image (S2I) strategy builds set the following environment variables on output images: - -[options="header"] -|=== - -|Variable |Description - -|`OPENSHIFT_BUILD_NAME` -|Name of the build - -|`OPENSHIFT_BUILD_NAMESPACE` -|Namespace of the build - -|`OPENSHIFT_BUILD_SOURCE` -|The source URL of the build - -|`OPENSHIFT_BUILD_REFERENCE` -|The Git reference used in the build - -|`OPENSHIFT_BUILD_COMMIT` -|Source commit used in the build -|=== - -Additionally, any user-defined environment variable, for example those configured with -S2I] -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -or docker -endif::[] -strategy options, will also be part of the output image environment variable list. diff --git a/modules/builds-output-image-labels.adoc b/modules/builds-output-image-labels.adoc deleted file mode 100644 index 779c2380129e..000000000000 --- a/modules/builds-output-image-labels.adoc +++ /dev/null @@ -1,52 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/managing-build-output.adoc - -[id="builds-output-image-labels_{context}"] -= Output image labels - -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -docker and -endif::[] -source-to-image (S2I)` builds set the following labels on output images: - -[options="header"] -|=== - -|Label |Description - -|`io.openshift.build.commit.author` -|Author of the source commit used in the build - -|`io.openshift.build.commit.date` -|Date of the source commit used in the build - -|`io.openshift.build.commit.id` -|Hash of the source commit used in the build - -|`io.openshift.build.commit.message` -|Message of the source commit used in the build - -|`io.openshift.build.commit.ref` -|Branch or reference specified in the source - -|`io.openshift.build.source-location` -|Source URL for the build -|=== - -You can also use the `BuildConfig.spec.output.imageLabels` field to specify a list of custom labels that will be applied to each image built from the build configuration. - -.Custom Labels to be Applied to Built Images -[source,yaml] ----- -spec: - output: - to: - kind: "ImageStreamTag" - name: "my-image:latest" - imageLabels: - - name: "vendor" - value: "MyCompany" - - name: "authoritative-source-url" - value: "registry.mycompany.com" ----- diff --git a/modules/builds-restricting-build-strategy-globally.adoc b/modules/builds-restricting-build-strategy-globally.adoc deleted file mode 100644 index d069e219dce6..000000000000 --- a/modules/builds-restricting-build-strategy-globally.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/securing-builds-by-strategy.adoc - - -:_content-type: PROCEDURE -[id="builds-restricting-build-strategy-globally_{context}"] -= Restricting build strategies to users globally - -You can allow a set of specific users to create builds with a particular strategy. - -.Prerequisites - -* Disable global access to the build strategy. - -.Procedure - -* Assign the role that corresponds to the build strategy to a specific user. For -example, to add the `system:build-strategy-docker` cluster role to the user -`devuser`: -+ -[source,terminal] ----- -$ oc adm policy add-cluster-role-to-user system:build-strategy-docker devuser ----- -+ -[WARNING] -==== -Granting a user access at the cluster level to the `builds/docker` subresource means that the user can create builds with the docker strategy in any project in which they can create builds. -==== diff --git a/modules/builds-restricting-build-strategy-to-user.adoc b/modules/builds-restricting-build-strategy-to-user.adoc deleted file mode 100644 index e61500218bba..000000000000 --- a/modules/builds-restricting-build-strategy-to-user.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/securing-builds-by-strategy.adoc - - -:_content-type: PROCEDURE -[id="builds-restricting-build-strategy-to-user_{context}"] -= Restricting build strategies to a user within a project - -Similar to granting the build strategy role to a user globally, you can allow a set of specific users within a project to create builds with a particular strategy. - -.Prerequisites - -* Disable global access to the build strategy. - -.Procedure - -* Assign the role that corresponds to the build strategy to a specific user within a project. For example, to add the `system:build-strategy-docker` role within the project `devproject` to the user `devuser`: -+ -[source,terminal] ----- -$ oc adm policy add-role-to-user system:build-strategy-docker devuser -n devproject ----- diff --git a/modules/builds-running-entitled-builds-with-sharedsecret-objects.adoc b/modules/builds-running-entitled-builds-with-sharedsecret-objects.adoc deleted file mode 100644 index 09e696674da1..000000000000 --- a/modules/builds-running-entitled-builds-with-sharedsecret-objects.adoc +++ /dev/null @@ -1,202 +0,0 @@ -:_content-type: PROCEDURE -[id="builds-running-entitled-builds-with-sharedsecret-objects_{context}"] -= Running entitled builds using SharedSecret objects - -You can configure and perform a build in one namespace that securely uses RHEL entitlements from a `Secret` object in another namespace. - -You can still access RHEL entitlements from OpenShift Builds by creating a `Secret` object with your subscription credentials in the same namespace as your `Build` object. However, now, in {product-title} 4.10 and later, you can access your credentials and certificates from a `Secret` object in one of the {product-title} system namespaces. You run entitled builds with a CSI volume mount of a `SharedSecret` custom resource (CR) instance that references the `Secret` object. - -This procedure relies on the newly introduced Shared Resources CSI Driver feature, which you can use to declare CSI Volume mounts in {product-title} Builds. It also relies on the {product-title} Insights Operator. - -[IMPORTANT] -==== -[subs="attributes+"] -The Shared Resources CSI Driver and The Build CSI Volumes are both Technology Preview features, which are not supported with Red Hat production service level agreements (SLAs) and might not be functionally complete. Red Hat does not recommend using them in production. These features provide early access to upcoming product features, enabling customers to test functionality and provide feedback during the development process. - -For more information about the support scope of Red Hat Technology Preview features, see link:https://access.redhat.com/support/offerings/techpreview/[Technology Preview Features Support Scope]. - -The Shared Resources CSI Driver and the Build CSI Volumes features also belong to the `TechPreviewNoUpgrade` feature set, which is a subset of the current Technology Preview features. You can enable the `TechPreviewNoUpgrade` feature set on test clusters, where you can fully test them while leaving the features disabled on production clusters. Enabling this feature set cannot be undone and prevents minor version updates. This feature set is not recommended on production clusters. See "Enabling Technology Preview features using feature gates" in the following "Additional resources" section. -==== - -.Prerequisites - -* You have enabled the `TechPreviewNoUpgrade` feature set by using the feature gates. -* You have a `SharedSecret` custom resource (CR) instance that references the `Secret` object where the Insights Operator stores the subscription credentials. -* You must have permission to perform the following actions: -** Create build configs and start builds. -** Discover which `SharedSecret` CR instances are available by entering the `oc get sharedsecrets` command and getting a non-empty list back. -** Determine if the `builder` service account available to you in your namespace is allowed to use the given `SharedSecret` CR instance. In other words, you can run `oc adm policy who-can use ` to see if the `builder` service account in your namespace is listed. - -[NOTE] -==== -If neither of the last two prerequisites in this list are met, establish, or ask someone to establish, the necessary role-based access control (RBAC) so that you can discover `SharedSecret` CR instances and enable service accounts to use `SharedSecret` CR instances. -==== - -.Procedure - -. Grant the `builder` service account RBAC permissions to use the `SharedSecret` CR instance by using `oc apply` with YAML content: -+ -[NOTE] -==== -Currently, `kubectl` and `oc` have hard-coded special case logic restricting the `use` verb to roles centered around pod security. Therefore, you cannot use `oc create role ...` to create the role needed for consuming `SharedSecret` CR instances. -==== -+ -.Example `oc apply -f` command with YAML `Role` object definition -[source,terminal] ----- -$ oc apply -f - < 1ef7c6d8c1a -STEP 3/9: RUN rm /etc/rhsm-host -time="2022-02-03T20:28:33Z" level=warning msg="Adding metacopy option, configured globally" ---> b1c61f88b39 -STEP 4/9: RUN yum repolist --disablerepo=* -Updating Subscription Management repositories. - - -... - ---> b067f1d63eb -STEP 5/9: RUN subscription-manager repos --enable rhocp-4.9-for-rhel-8-x86_64-rpms -Repository 'rhocp-4.9-for-rhel-8-x86_64-rpms' is enabled for this system. -time="2022-02-03T20:28:40Z" level=warning msg="Adding metacopy option, configured globally" ---> 03927607ebd -STEP 6/9: RUN yum -y update -Updating Subscription Management repositories. - -... - -Upgraded: - systemd-239-51.el8_5.3.x86_64 systemd-libs-239-51.el8_5.3.x86_64 - systemd-pam-239-51.el8_5.3.x86_64 -Installed: - diffutils-3.6-6.el8.x86_64 libxkbcommon-0.9.1-1.el8.x86_64 - xkeyboard-config-2.28-1.el8.noarch - -Complete! -time="2022-02-03T20:29:05Z" level=warning msg="Adding metacopy option, configured globally" ---> db57e92ff63 -STEP 7/9: RUN yum install -y openshift-clients.x86_64 -Updating Subscription Management repositories. - -... - -Installed: - bash-completion-1:2.7-5.el8.noarch - libpkgconf-1.4.2-1.el8.x86_64 - openshift-clients-4.9.0-202201211735.p0.g3f16530.assembly.stream.el8.x86_64 - pkgconf-1.4.2-1.el8.x86_64 - pkgconf-m4-1.4.2-1.el8.noarch - pkgconf-pkg-config-1.4.2-1.el8.x86_64 - -Complete! -time="2022-02-03T20:29:19Z" level=warning msg="Adding metacopy option, configured globally" ---> 609507b059e -STEP 8/9: ENV "OPENSHIFT_BUILD_NAME"="my-csi-bc-1" "OPENSHIFT_BUILD_NAMESPACE"="my-csi-app-namespace" ---> cab2da3efc4 -STEP 9/9: LABEL "io.openshift.build.name"="my-csi-bc-1" "io.openshift.build.namespace"="my-csi-app-namespace" -COMMIT temp.builder.openshift.io/my-csi-app-namespace/my-csi-bc-1:edfe12ca ---> 821b582320b -Successfully tagged temp.builder.openshift.io/my-csi-app-namespace/my-csi-bc-1:edfe12ca -821b582320b41f1d7bab4001395133f86fa9cc99cc0b2b64c5a53f2b6750db91 -Build complete, no image push requested ----- -==== diff --git a/modules/builds-secrets-overview.adoc b/modules/builds-secrets-overview.adoc deleted file mode 100644 index 41795716e3f9..000000000000 --- a/modules/builds-secrets-overview.adoc +++ /dev/null @@ -1,74 +0,0 @@ -// Module included in the following assemblies: -// * builds/creating-build-inputs.adoc - -[id="builds-secrets-overview_{context}"] -= What is a secret? - -The `Secret` object type provides a mechanism to hold sensitive information such as passwords, {product-title} client configuration files, `dockercfg` files, private source repository credentials, and so on. Secrets decouple sensitive content from the pods. You can mount secrets into containers using a volume plugin or the system can use secrets to perform actions on behalf of a pod. - -.YAML Secret Object Definition - -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: test-secret - namespace: my-namespace -type: Opaque <1> -data: <2> - username: dmFsdWUtMQ0K <3> - password: dmFsdWUtMg0KDQo= -stringData: <4> - hostname: myapp.mydomain.com <5> ----- -<1> Indicates the structure of the secret's key names and values. -<2> The allowable format for the keys in the `data` field must meet the guidelines in the `DNS_SUBDOMAIN` value in the Kubernetes identifiers glossary. -<3> The value associated with keys in the `data` map must be base64 encoded. -<4> Entries in the `stringData` map are converted to base64 and the entry are then moved to the `data` map automatically. This field is write-only. The value is only be returned by the `data` field. -<5> The value associated with keys in the `stringData` map is made up of plain text strings. - -[id="builds-secrets-overview-properties_{context}"] -== Properties of secrets - -Key properties include: - -* Secret data can be referenced independently from its definition. -* Secret data volumes are backed by temporary file-storage facilities (tmpfs) and never come to rest on a node. -* Secret data can be shared within a namespace. - -[id="builds-secrets-overview-types_{context}"] -== Types of Secrets - -The value in the `type` field indicates the structure of the secret's key names and values. The type can be used to enforce the presence of user names and keys in the secret object. If you do not want validation, use the `opaque` type, which is the default. - -Specify one of the following types to trigger minimal server-side validation to ensure the presence of specific key names in the secret data: - -* `kubernetes.io/service-account-token`. Uses a service account token. -* `kubernetes.io/dockercfg`. Uses the `.dockercfg` file for required Docker credentials. -* `kubernetes.io/dockerconfigjson`. Uses the `.docker/config.json` file for required Docker credentials. -* `kubernetes.io/basic-auth`. Use with basic authentication. -* `kubernetes.io/ssh-auth`. Use with SSH key authentication. -* `kubernetes.io/tls`. Use with TLS certificate authorities. - -Specify `type= Opaque` if you do not want validation, which means the secret does not claim to conform to any convention for key names or values. An `opaque` secret, allows for unstructured `key:value` pairs that can contain arbitrary values. - -[NOTE] -==== -You can specify other arbitrary types, such as `example.com/my-secret-type`. These types are not enforced server-side, but indicate that the creator of the -secret intended to conform to the key/value requirements of that type. -==== - -[id="builds-secrets-overview-updates_{context}"] -== Updates to secrets - -When you modify the value of a secret, the value used by an already running pod does not dynamically change. To change a secret, you must delete the original pod and create a new pod, in some cases with an identical `PodSpec`. - -Updating a secret follows the same workflow as deploying a new container image. You can use the `kubectl rolling-update` command. - -The `resourceVersion` value in a secret is not specified when it is referenced. Therefore, if a secret is updated at the same time as pods are starting, the version of the secret that is used for the pod is not defined. - -[NOTE] -==== -Currently, it is not possible to check the resource version of a secret object that was used when a pod was created. It is planned that pods report this information, so that a controller could restart ones using an old `resourceVersion`. In the interim, do not update the data of existing secrets, but create new ones with distinct names. -==== diff --git a/modules/builds-secrets-restrictions.adoc b/modules/builds-secrets-restrictions.adoc deleted file mode 100644 index c26236140854..000000000000 --- a/modules/builds-secrets-restrictions.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// * builds/creating-build-inputs.adoc - -[id="builds-secrets-restrictions_{context}"] -= Secrets restrictions - -To use a secret, a pod needs to reference the secret. A secret can be used with a pod in three ways: - -* To populate environment variables for containers. -* As files in a volume mounted on one or more of its containers. -* By kubelet when pulling images for the pod. - -Volume type secrets write data into the container as a file using the volume mechanism. `imagePullSecrets` use service accounts for the automatic injection of the secret into all pods in a namespaces. - -When a template contains a secret definition, the only way for the template to use the provided secret is to ensure that the secret volume sources are validated and that the specified object reference actually points to an object of type `Secret`. Therefore, a secret needs to be created before any pods that depend on it. The most effective way to ensure this is to have it get injected automatically through the use of a service account. - -Secret API objects reside in a namespace. They can only be referenced by pods in that same namespace. - -Individual secrets are limited to 1MB in size. This is to discourage the creation of large secrets that would exhaust apiserver and kubelet memory. However, creation of a number of smaller secrets could also exhaust memory. diff --git a/modules/builds-service-serving-certificate-secrets.adoc b/modules/builds-service-serving-certificate-secrets.adoc deleted file mode 100644 index d1c3ca70611d..000000000000 --- a/modules/builds-service-serving-certificate-secrets.adoc +++ /dev/null @@ -1,28 +0,0 @@ -// Module included in the following assemblies: -// * builds/creating-build-inputs.adoc - -:_content-type: PROCEDURE -[id="builds-service-serving-certificate-secrets_{context}"] -= Service serving certificate secrets - -Service serving certificate secrets are intended to support complex middleware applications that need out-of-the-box certificates. It has the same settings as the server certificates generated by the administrator tooling for nodes and masters. - -.Procedure - -To secure communication to your service, have the cluster generate a signed serving certificate/key pair into a secret in your namespace. - -* Set the `service.beta.openshift.io/serving-cert-secret-name` annotation on your service with the value set to the name you want to use for your secret. -+ -Then, your `PodSpec` can mount that secret. When it is available, your pod runs. The certificate is good for the internal service DNS name, `..svc`. -+ -The certificate and key are in PEM format, stored in `tls.crt` and `tls.key` respectively. The certificate/key pair is automatically replaced when it gets close to expiration. View the expiration date in the `service.beta.openshift.io/expiry` annotation on the secret, which is in RFC3339 format. - -[NOTE] -==== -In most cases, the service DNS name `..svc` is not externally routable. The primary use of `..svc` is for intracluster or intraservice communication, and with re-encrypt routes. -==== - -Other pods can trust cluster-created certificates, which are only signed for -internal DNS names, by using the certificate authority (CA) bundle in the `/var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt` file that is automatically mounted in their pod. - -The signature algorithm for this feature is `x509.SHA256WithRSA`. To manually rotate, delete the generated secret. A new certificate is created. diff --git a/modules/builds-setting-build-resources.adoc b/modules/builds-setting-build-resources.adoc deleted file mode 100644 index 65cb9157f799..000000000000 --- a/modules/builds-setting-build-resources.adoc +++ /dev/null @@ -1,48 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/advanced-build-operations.adoc - -:_content-type: PROCEDURE -[id="builds-setting-build-resources_{context}"] -= Setting build resources - -By default, builds are completed by pods using unbound resources, such as memory and CPU. These resources can be limited. - -.Procedure - -You can limit resource use in two ways: - -* Limit resource use by specifying resource limits in the default container limits of a project. -* Limit resource use by specifying resource limits as part of the build configuration. ** In the following example, each of the `resources`, `cpu`, and `memory` parameters are optional: -+ -[source,yaml] ----- -apiVersion: "v1" -kind: "BuildConfig" -metadata: - name: "sample-build" -spec: - resources: - limits: - cpu: "100m" <1> - memory: "256Mi" <2> ----- -<1> `cpu` is in CPU units: `100m` represents 0.1 CPU units (100 * 1e-3). -<2> `memory` is in bytes: `256Mi` represents 268435456 bytes (256 * 2 ^ 20). -+ -However, if a quota has been defined for your project, one of the following two items is required: -+ -*** A `resources` section set with an explicit `requests`: -+ -[source,yaml] ----- -resources: - requests: <1> - cpu: "100m" - memory: "256Mi" ----- -<1> The `requests` object contains the list of resources that correspond to the list of resources in the quota. -+ -*** A limit range defined in your project, where the defaults from the `LimitRange` object apply to pods created during the build process. -+ -Otherwise, build pod creation will fail, citing a failure to satisfy quota. diff --git a/modules/builds-setting-maximum-duration.adoc b/modules/builds-setting-maximum-duration.adoc deleted file mode 100644 index 7bf9472791d3..000000000000 --- a/modules/builds-setting-maximum-duration.adoc +++ /dev/null @@ -1,26 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/advanced-build-operations.adoc - -:_content-type: PROCEDURE -[id="builds-setting-maximum-duration_{context}"] -= Setting maximum duration - -When defining a `BuildConfig` object, you can define its maximum duration by setting the `completionDeadlineSeconds` field. It is specified in seconds and is not set by default. When not set, there is no maximum duration enforced. - -The maximum duration is counted from the time when a build pod gets scheduled in the system, and defines how long it can be active, including the time needed to pull the builder image. After reaching the specified timeout, the build is terminated by {product-title}. - -.Procedure - -* To set maximum duration, specify `completionDeadlineSeconds` in your `BuildConfig`. The following example shows the part of a `BuildConfig` specifying `completionDeadlineSeconds` field for 30 minutes: -+ -[source,yaml] ----- -spec: - completionDeadlineSeconds: 1800 ----- - -[NOTE] -==== -This setting is not supported with the Pipeline Strategy option. -==== diff --git a/modules/builds-setting-triggers-manually.adoc b/modules/builds-setting-triggers-manually.adoc deleted file mode 100644 index 39037726d40b..000000000000 --- a/modules/builds-setting-triggers-manually.adoc +++ /dev/null @@ -1,44 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/triggering-builds-build-hooks.adoc - -:_content-type: PROCEDURE -[id="builds-setting-triggers-manually_{context}"] -= Setting triggers manually - -Triggers can be added to and removed from build configurations with `oc set triggers`. - -.Procedure - -* To set a GitHub webhook trigger on a build configuration, use: -+ -[source,terminal] ----- -$ oc set triggers bc --from-github ----- - -* To set an imagechange trigger, use: -+ -[source,terminal] ----- -$ oc set triggers bc --from-image='' ----- - -* To remove a trigger, add `--remove`: -+ -[source,terminal] ----- -$ oc set triggers bc --from-bitbucket --remove ----- - -[NOTE] -==== -When a webhook trigger already exists, adding it again regenerates the webhook secret. -==== - -For more information, consult the help documentation with by running: - -[source,terminal] ----- -$ oc set triggers --help ----- diff --git a/modules/builds-source-code.adoc b/modules/builds-source-code.adoc deleted file mode 100644 index 41ec0a214aae..000000000000 --- a/modules/builds-source-code.adoc +++ /dev/null @@ -1,40 +0,0 @@ -// Module included in the following assemblies: -//* builds/creating-build-inputs.adoc - -[id="builds-source-code_{context}"] -= Git source - -When specified, source code is fetched from the supplied location. - -ifndef::openshift-online[] -If you supply an inline Dockerfile, it overwrites the Dockerfile in the `contextDir` of the Git repository. -endif::[] - -The source definition is part of the `spec` section in the `BuildConfig`: - -[source,yaml] ----- -source: - git: <1> - uri: "https://github.com/openshift/ruby-hello-world" - ref: "master" - contextDir: "app/dir" <2> -ifndef::openshift-online[] - dockerfile: "FROM openshift/ruby-22-centos7\nUSER example" <3> -endif::[] ----- -<1> The `git` field contains the URI to the remote Git repository of the source code. Optionally, specify the `ref` field to check out a specific Git reference. A valid `ref` can be a SHA1 tag or a branch name. -<2> The `contextDir` field allows you to override the default location inside the source code repository where the build looks for the application source code. If your application exists inside a sub-directory, you can override the default location (the root folder) using this field. -ifndef::openshift-online[] -<3> If the optional `dockerfile` field is provided, it should be a string containing a Dockerfile that overwrites any Dockerfile that may exist in the source repository. -endif::[] - -If the `ref` field denotes a pull request, the system uses a `git fetch` operation and then checkout `FETCH_HEAD`. - -When no `ref` value is provided, {product-title} performs a shallow clone (`--depth=1`). In this case, only the files associated with the most recent commit on the default branch (typically `master`) are downloaded. This results in repositories downloading faster, but without the full commit history. To perform a full `git clone` of the default branch of a specified repository, set `ref` to the name of the default branch (for example `master`). - - -[WARNING] -==== -Git clone operations that go through a proxy that is performing man in the middle (MITM) TLS hijacking or reencrypting of the proxied connection do not work. -==== diff --git a/modules/builds-source-input-satellite-config.adoc b/modules/builds-source-input-satellite-config.adoc deleted file mode 100644 index f386639992e6..000000000000 --- a/modules/builds-source-input-satellite-config.adoc +++ /dev/null @@ -1,62 +0,0 @@ -// Module included in the following assemblies: -// -//* builds/running-entitled-builds.adoc - -:_content-type: PROCEDURE -[id="builds-source-input-satellite-config_{context}"] -= Adding Red Hat Satellite configurations to builds - -Builds that use Red Hat Satellite to install content must provide appropriate configurations to obtain content from Satellite repositories. - -.Prerequisites - -* You must provide or create a `yum`-compatible repository configuration file that downloads content from your Satellite instance. -+ -.Sample repository configuration -+ -[source,terminal] ----- -[test-] -name=test- -baseurl = https://satellite.../content/dist/rhel/server/7/7Server/x86_64/os -enabled=1 -gpgcheck=0 -sslverify=0 -sslclientkey = /etc/pki/entitlement/...-key.pem -sslclientcert = /etc/pki/entitlement/....pem ----- - -.Procedure - -. Create a `ConfigMap` containing the Satellite repository configuration file: -+ -[source,terminal] ----- -$ oc create configmap yum-repos-d --from-file /path/to/satellite.repo ----- - -. Add the Satellite repository configuration and entitlement key as a build volumes: -+ -[source,yaml] ----- -strategy: - dockerStrategy: - from: - kind: ImageStreamTag - name: ubi:latest - volumes: - - name: yum-repos-d - mounts: - - destinationPath: /etc/yum.repos.d - source: - type: ConfigMap - configMap: - name: yum-repos-d - - name: etc-pki-entitlement - mounts: - - destinationPath: /etc/pki/entitlement - source: - type: Secret - secret: - secretName: etc-pki-entitlement ----- diff --git a/modules/builds-source-input-subman-config.adoc b/modules/builds-source-input-subman-config.adoc deleted file mode 100644 index 5f04670bd739..000000000000 --- a/modules/builds-source-input-subman-config.adoc +++ /dev/null @@ -1,43 +0,0 @@ -// Module included in the following assemblies: -// -//* builds/running-entitled-builds.adoc - -[id="builds-source-input-subman-config_{context}"] -= Adding Subscription Manager configurations to builds - -Builds that use the Subscription Manager to install content must provide appropriate configuration files and certificate authorities for subscribed repositories. - -.Prerequisites - -You must have access to the Subscription Manager's configuration and certificate authority files. - -.Procedure - -. Create a `ConfigMap` for the Subscription Manager configuration: -+ -[source,terminal] ----- -$ oc create configmap rhsm-conf --from-file /path/to/rhsm/rhsm.conf ----- - -. Create a `ConfigMap` for the certificate authority: -+ -[source,terminal] ----- -$ oc create configmap rhsm-ca --from-file /path/to/rhsm/ca/redhat-uep.pem ----- - -. Add the Subscription Manager configuration and certificate authority to the -`BuildConfig`: -+ -[source,yaml] ----- -source: - configMaps: - - configMap: - name: rhsm-conf - destinationDir: rhsm-conf - - configMap: - name: rhsm-ca - destinationDir: rhsm-ca ----- diff --git a/modules/builds-source-secret-basic-auth.adoc b/modules/builds-source-secret-basic-auth.adoc deleted file mode 100644 index 19d84dc40173..000000000000 --- a/modules/builds-source-secret-basic-auth.adoc +++ /dev/null @@ -1,34 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/creating-build-inputs.adoc - -:_content-type: PROCEDURE -[id="builds-source-secret-basic-auth_{context}"] -= Creating a secret from source code basic authentication - -Basic authentication requires either a combination of `--username` and `--password`, or a token to authenticate against the software configuration management (SCM) server. - -.Prerequisites - -* User name and password to access the private repository. - -.Procedure - -. Create the secret first before using the `--username` and `--password` to access the private repository: -+ -[source,terminal] ----- -$ oc create secret generic \ - --from-literal=username= \ - --from-literal=password= \ - --type=kubernetes.io/basic-auth ----- -+ -. Create a basic authentication secret with a token: -+ -[source,terminal] ----- -$ oc create secret generic \ - --from-literal=password= \ - --type=kubernetes.io/basic-auth ----- diff --git a/modules/builds-source-secret-combinations-basic-auth-ca.adoc b/modules/builds-source-secret-combinations-basic-auth-ca.adoc deleted file mode 100644 index 6c4e755705ab..000000000000 --- a/modules/builds-source-secret-combinations-basic-auth-ca.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// -//* builds/creating-build-inputs.adoc - -:_content-type: PROCEDURE -[id="builds-source-secret-combinations-basic-auth-ca_{context}"] -= Creating a basic authentication secret with a CA certificate - -You can combine the different methods for creating source clone secrets for your specific needs, such as a secret that combines a basic authentication and certificate authority (CA) certificate. - -.Prerequisites - -* Basic authentication credentials -* CA certificate - -.Procedure - -* Create a basic authentication secret with a CA certificate, run: -+ -[source,terminal] ----- -$ oc create secret generic \ - --from-literal=username= \ - --from-literal=password= \ - --from-file=ca-cert= \ - --type=kubernetes.io/basic-auth ----- diff --git a/modules/builds-source-secret-combinations-basic-auth-gitconfig-ca.adoc b/modules/builds-source-secret-combinations-basic-auth-gitconfig-ca.adoc deleted file mode 100644 index a7236a9a7dab..000000000000 --- a/modules/builds-source-secret-combinations-basic-auth-gitconfig-ca.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// Module included in the following assemblies: -// -//* builds/creating-build-inputs.adoc - -:_content-type: PROCEDURE -[id="builds-source-secret-combinations-basic-auth-gitconfig-ca_{context}"] -= Creating a basic authentication secret with a .gitconfig file and CA certificate - -You can combine the different methods for creating source clone secrets for your specific needs, such as a secret that combines a basic authentication, `.gitconfig` file, and certificate authority (CA) certificate. - -.Prerequisites - -* Basic authentication credentials -* `.gitconfig` file -* CA certificate - -.Procedure - -* To create a basic authentication secret with a `.gitconfig` file and CA certificate, run: -+ -[source,terminal] ----- -$ oc create secret generic \ - --from-literal=username= \ - --from-literal=password= \ - --from-file= \ - --from-file=ca-cert= \ - --type=kubernetes.io/basic-auth ----- diff --git a/modules/builds-source-secret-combinations-basic-auth-gitconfig.adoc b/modules/builds-source-secret-combinations-basic-auth-gitconfig.adoc deleted file mode 100644 index 8df61ec39b53..000000000000 --- a/modules/builds-source-secret-combinations-basic-auth-gitconfig.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// -//* builds/creating-build-inputs.adoc - -:_content-type: PROCEDURE -[id="builds-source-secret-combinations-basic-auth-gitconfig_{context}"] -= Creating a basic authentication secret with a .gitconfig file - -You can combine the different methods for creating source clone secrets for your specific needs, such as a secret that combines a basic authentication and `.gitconfig` file. - -.Prerequisites - -* Basic authentication credentials -* `.gitconfig` file - -.Procedure - -* To create a basic authentication secret with a `.gitconfig` file, run: -+ -[source,terminal] ----- -$ oc create secret generic \ - --from-literal=username= \ - --from-literal=password= \ - --from-file= \ - --type=kubernetes.io/basic-auth ----- diff --git a/modules/builds-source-secret-combinations-gitconfig-ca.adoc b/modules/builds-source-secret-combinations-gitconfig-ca.adoc deleted file mode 100644 index debed8750cf9..000000000000 --- a/modules/builds-source-secret-combinations-gitconfig-ca.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/creating-build-inputs.adoc - -:_content-type: PROCEDURE -[id="builds-source-secret-combinations-gitconfig-ca_{context}"] -= Creating a secret that combines a .gitconfig file and CA certificate - -You can combine the different methods for creating source clone secrets for your specific needs, such as a secret that combines a `.gitconfig` file and certificate authority (CA) certificate. - -.Prerequisites - -* .gitconfig file -* CA certificate - -.Procedure - -* To create a secret that combines a `.gitconfig` file and CA certificate, run: -+ -[source,terminal] ----- -$ oc create secret generic \ - --from-file=ca.crt= \ - --from-file= ----- diff --git a/modules/builds-source-secret-combinations-ssh-gitconfig.adoc b/modules/builds-source-secret-combinations-ssh-gitconfig.adoc deleted file mode 100644 index 8c82cc2f5c19..000000000000 --- a/modules/builds-source-secret-combinations-ssh-gitconfig.adoc +++ /dev/null @@ -1,26 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/creating-build-inputs.adoc - -:_content-type: PROCEDURE -[id="builds-source-secret-combinations-ssh-gitconfig_{context}"] -= Creating a SSH-based authentication secret with a `.gitconfig` file - -You can combine the different methods for creating source clone secrets for your specific needs, such as a SSH-based authentication secret with a `.gitconfig` file. - -.Prerequisites - -* SSH authentication -* .gitconfig file - -.Procedure - -* To create a SSH-based authentication secret with a `.gitconfig` file, run: -+ -[source,terminal] ----- -$ oc create secret generic \ - --from-file=ssh-privatekey= \ - --from-file= \ - --type=kubernetes.io/ssh-auth ----- diff --git a/modules/builds-source-secret-combinations.adoc b/modules/builds-source-secret-combinations.adoc deleted file mode 100644 index 128dc9356a8a..000000000000 --- a/modules/builds-source-secret-combinations.adoc +++ /dev/null @@ -1,8 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/creating-build-inputs.adoc - -[id="builds-source-secret-combinations_{context}"] -= Source secret combinations - -You can combine the different methods for creating source clone secrets for your specific needs. diff --git a/modules/builds-source-secret-ssh-key-auth.adoc b/modules/builds-source-secret-ssh-key-auth.adoc deleted file mode 100644 index f85a5927b7c5..000000000000 --- a/modules/builds-source-secret-ssh-key-auth.adoc +++ /dev/null @@ -1,49 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/creating-build-inputs.adoc - -:_content-type: PROCEDURE -[id="builds-source-secret-ssh-key-auth_{context}"] -= Creating a secret from source code SSH key authentication - -SSH key based authentication requires a private SSH key. - -The repository keys are usually located in the `$HOME/.ssh/` directory, and are named `id_dsa.pub`, `id_ecdsa.pub`, `id_ed25519.pub`, or `id_rsa.pub` by default. - -.Procedure - -. Generate SSH key credentials: -+ -[source,terminal] ----- -$ ssh-keygen -t ed25519 -C "your_email@example.com" ----- -+ -[NOTE] -==== -Creating a passphrase for the SSH key prevents {product-title} from building. When prompted for a passphrase, leave it blank. -==== -+ -Two files are created: the public key and a corresponding private key (one of `id_dsa`, `id_ecdsa`, `id_ed25519`, or `id_rsa`). With both of these in place, consult your source control management (SCM) system's manual on how to upload -the public key. The private key is used to access your private repository. -+ -. Before using the SSH key to access the private repository, create the secret: -+ -[source,terminal] ----- -$ oc create secret generic \ - --from-file=ssh-privatekey= \ - --from-file= \ <1> - --type=kubernetes.io/ssh-auth ----- -<1> Optional: Adding this field enables strict server host key check. -+ -[WARNING] -==== -Skipping the `known_hosts` file while creating the secret makes the build vulnerable to a potential man-in-the-middle (MITM) attack. -==== -+ -[NOTE] -==== -Ensure that the `known_hosts` file includes an entry for the host of your source code. -==== diff --git a/modules/builds-source-secret-trusted-ca.adoc b/modules/builds-source-secret-trusted-ca.adoc deleted file mode 100644 index d4ab011b1739..000000000000 --- a/modules/builds-source-secret-trusted-ca.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/creating-build-inputs.adoc - -:_content-type: PROCEDURE -[id="builds-source-secret-trusted-ca_{context}"] -= Creating a secret from source code trusted certificate authorities - -The set of Transport Layer Security (TLS) certificate authorities (CA) that are trusted during a Git clone operation are built into the {product-title} infrastructure images. If your Git server uses a self-signed certificate or one signed by an authority not trusted by the image, you can create a secret that contains the certificate or disable TLS verification. - -If you create a secret for the CA certificate, {product-title} uses it to access your Git server during the Git clone operation. Using this method is significantly more secure than disabling Git SSL verification, which accepts any TLS certificate that is presented. - -.Procedure - -Create a secret with a CA certificate file. - -. If your CA uses Intermediate Certificate Authorities, combine the certificates for all CAs in a `ca.crt` file. Enter the following command: -+ -[source,terminal] ----- -$ cat intermediateCA.crt intermediateCA.crt rootCA.crt > ca.crt ----- - -.. Create the secret: -+ -[source,terminal] ----- -$ oc create secret generic mycert --from-file=ca.crt= <1> ----- -<1> You must use the key name `ca.crt`. diff --git a/modules/builds-source-secrets-entitlements.adoc b/modules/builds-source-secrets-entitlements.adoc deleted file mode 100644 index d96bbdaa2285..000000000000 --- a/modules/builds-source-secrets-entitlements.adoc +++ /dev/null @@ -1,45 +0,0 @@ -// Module included in the following assemblies: -// -//* builds/running-entitled-builds.adoc - -:_content-type: PROCEDURE -[id="builds-source-secrets-entitlements_{context}"] -= Adding subscription entitlements as a build secret - -Builds that use Red Hat subscriptions to install content must include the entitlement keys as a build secret. - -.Prerequisites - -You must have access to Red Hat entitlements through your subscription. The entitlement secret is automatically created by the Insights Operator. - - -[TIP] -==== -When you perform an Entitlement Build using {op-system-base-full} 7, you must have the following instructions in your Dockerfile before you run any `yum` commands: - -[source,terminal] ----- -RUN rm /etc/rhsm-host ----- -==== - -.Procedure - -. Add the etc-pki-entitlement secret as a build volume in the build configuration’s Docker strategy: -+ -[source,yaml] ----- -strategy: - dockerStrategy: - from: - kind: ImageStreamTag - name: ubi:latest - volumes: - - name: etc-pki-entitlement - mounts: - - destinationPath: /etc/pki/entitlement - source: - type: Secret - secret: - secretName: etc-pki-entitlement ----- diff --git a/modules/builds-source-to-image.adoc b/modules/builds-source-to-image.adoc deleted file mode 100644 index fa824fd6a69b..000000000000 --- a/modules/builds-source-to-image.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/creating-build-inputs.adoc - -[id="builds-source-to-image_{context}"] -= Source-to-image strategy - -When using a `Source` strategy, all defined input secrets are copied to their respective `destinationDir`. If you left `destinationDir` empty, then the secrets are placed in the working directory of the builder image. - -The same rule is used when a `destinationDir` is a relative path. The secrets are placed in the paths that are relative to the working directory of the image. The final directory in the `destinationDir` path is created if it does not exist in the builder image. All preceding directories in the `destinationDir` must exist, or an error will occur. - -[NOTE] -==== -Input secrets are added as world-writable, have `0666` permissions, and are truncated to size zero after executing the `assemble` script. This means that the secret files exist in the resulting image, but they are empty for security reasons. - -Input config maps are not truncated after the `assemble` script completes. -==== diff --git a/modules/builds-strategy-custom-build.adoc b/modules/builds-strategy-custom-build.adoc deleted file mode 100644 index e21f003af6d3..000000000000 --- a/modules/builds-strategy-custom-build.adoc +++ /dev/null @@ -1,12 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/build-strategies.adoc - -[id="builds-strategy-custom-build_{context}"] -= Custom build - -The custom build strategy allows developers to define a specific builder image responsible for the entire build process. Using your own builder image allows you to customize your build process. - -A custom builder image is a plain container image embedded with build process logic, for example for building RPMs or base images. - -Custom builds run with a high level of privilege and are not available to users by default. Only users who can be trusted with cluster administration permissions should be granted access to run custom builds. diff --git a/modules/builds-strategy-custom-environment-variables.adoc b/modules/builds-strategy-custom-environment-variables.adoc deleted file mode 100644 index 0f34f2d3a76d..000000000000 --- a/modules/builds-strategy-custom-environment-variables.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -//* builds/build-strategies.adoc - -:_content-type: PROCEDURE -[id="builds-strategy-custom-environment-variables_{context}"] -= Using environment variables for custom builds - -To make environment variables available to the custom build process, you can add environment variables to the `customStrategy` definition of the build configuration. - -The environment variables defined there are passed to the pod that runs the -custom build. - -.Procedure - -. Define a custom HTTP proxy to be used during build: -+ -[source,yaml] ----- -customStrategy: -... - env: - - name: "HTTP_PROXY" - value: "http://myproxy.net:5187/" ----- -+ -. To manage environment variables defined in the build configuration, enter the following command: -+ -[source,terminal] ----- -$ oc set env ----- diff --git a/modules/builds-strategy-custom-from-image.adoc b/modules/builds-strategy-custom-from-image.adoc deleted file mode 100644 index 944868031aa8..000000000000 --- a/modules/builds-strategy-custom-from-image.adoc +++ /dev/null @@ -1,22 +0,0 @@ -// Module included in the following assemblies: -// -//* builds/build-strategies.adoc - -:_content-type: PROCEDURE -[id="builds-strategy-custom-from-image_{context}"] -= Using FROM image for custom builds - -You can use the `customStrategy.from` section to indicate the image to use for the custom build - -.Procedure - -* Set the `customStrategy.from` section: -+ -[source,yaml] ----- -strategy: - customStrategy: - from: - kind: "DockerImage" - name: "openshift/sti-image-builder" ----- diff --git a/modules/builds-strategy-custom-secrets.adoc b/modules/builds-strategy-custom-secrets.adoc deleted file mode 100644 index 5689e522dcad..000000000000 --- a/modules/builds-strategy-custom-secrets.adoc +++ /dev/null @@ -1,28 +0,0 @@ -// Module included in the following assemblies: -// -//* builds/build-strategies.adoc - -:_content-type: PROCEDURE -[id="builds-strategy-custom-secrets_{context}"] -= Using secrets in custom builds - -In addition to secrets for source and images that can be added to all build types, custom strategies allow adding an arbitrary list of secrets to the builder pod. - -.Procedure - -* To mount each secret at a specific location, edit the `secretSource` and `mountPath` fields of the `strategy` YAML file: -+ -[source,yaml] ----- -strategy: - customStrategy: - secrets: - - secretSource: <1> - name: "secret1" - mountPath: "/tmp/secret1" <2> - - secretSource: - name: "secret2" - mountPath: "/tmp/secret2" ----- -<1> `secretSource` is a reference to a secret in the same namespace as the build. -<2> `mountPath` is the path inside the custom builder where the secret should be mounted. diff --git a/modules/builds-strategy-docker-build-arguments.adoc b/modules/builds-strategy-docker-build-arguments.adoc deleted file mode 100644 index b43dbe77cee7..000000000000 --- a/modules/builds-strategy-docker-build-arguments.adoc +++ /dev/null @@ -1,31 +0,0 @@ -// Module included in the following assemblies: -// * builds/build-strategies.adoc - -:_content-type: PROCEDURE -[id="builds-strategy-docker-build-arguments_{context}"] -= Adding docker build arguments - -You can set link:http://docs.docker.com/v1.7/reference/api/hub_registry_spec/#docker-registry-1-0[docker build arguments] using the `buildArgs` array. The build arguments are passed to docker when a build is started. - -[TIP] -==== -See link:https://docs.docker.com/engine/reference/builder/#understand-how-arg-and-from-interact[Understand how ARG and FROM interact] in the Dockerfile reference documentation. -==== - -.Procedure - -To set docker build arguments, add entries to the `buildArgs` array, which is located in the `dockerStrategy` definition of the `BuildConfig` object. For example: - -[source,yaml] ----- -dockerStrategy: -... - buildArgs: - - name: "foo" - value: "bar" ----- - -[NOTE] -==== -Only the `name` and `value` fields are supported. Any settings on the `valueFrom` field are ignored. -==== diff --git a/modules/builds-strategy-docker-build.adoc b/modules/builds-strategy-docker-build.adoc deleted file mode 100644 index 89be74d57726..000000000000 --- a/modules/builds-strategy-docker-build.adoc +++ /dev/null @@ -1,14 +0,0 @@ -// Module included in the following assemblies: -// -//*builds/build-strategies.adoc -//*builds/understanding-image-builds - -[id="builds-strategy-docker-build_{context}"] -= Docker build - -{product-title} uses Buildah to build a container image from a Dockerfile. For more information on building container images with Dockerfiles, see link:https://docs.docker.com/engine/reference/builder/[the Dockerfile reference documentation]. - -[TIP] -==== -If you set Docker build arguments by using the `buildArgs` array, see link:https://docs.docker.com/engine/reference/builder/#understand-how-arg-and-from-interact[Understand how ARG and FROM interact] in the Dockerfile reference documentation. -==== diff --git a/modules/builds-strategy-docker-entitled-satellite.adoc b/modules/builds-strategy-docker-entitled-satellite.adoc deleted file mode 100644 index 845c1c9b4861..000000000000 --- a/modules/builds-strategy-docker-entitled-satellite.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -//* builds/running-entitled-builds.adoc - -:_content-type: PROCEDURE -[id="builds-strategy-docker-entitled-satellite_{context}"] -= Docker builds using Red Hat Satellite subscriptions - -Docker strategy builds can use Red Hat Satellite repositories to install subscription content. - -.Prerequisites - -* You have added the entitlement keys and Satellite repository configurations as build volumes. - -.Procedure - -Use the following as an example Dockerfile to install content with Satellite: - -[source,terminal] ----- -FROM registry.redhat.io/ubi9/ubi:latest -RUN dnf search kernel-devel --showduplicates && \ - dnf install -y kernel-devel ----- diff --git a/modules/builds-strategy-docker-entitled-subman.adoc b/modules/builds-strategy-docker-entitled-subman.adoc deleted file mode 100644 index 3a52967a0c7f..000000000000 --- a/modules/builds-strategy-docker-entitled-subman.adoc +++ /dev/null @@ -1,24 +0,0 @@ -// Module included in the following assemblies: -// -//* builds/running-entitled-builds.adoc - -:_content-type: PROCEDURE -[id="builds-strategy-docker-entitled-subman_{context}"] -= Docker builds using Subscription Manager - -Docker strategy builds can use the Subscription Manager to install subscription content. - -.Prerequisites - -The entitlement keys must be added as build strategy volumes. - -.Procedure - -Use the following as an example Dockerfile to install content with the Subscription Manager: - -[source,terminal] ----- -FROM registry.redhat.io/ubi9/ubi:latest -RUN dnf search kernel-devel --showduplicates && \ - dnf install -y kernel-devel ----- diff --git a/modules/builds-strategy-docker-environment-variables.adoc b/modules/builds-strategy-docker-environment-variables.adoc deleted file mode 100644 index d5d023f8b213..000000000000 --- a/modules/builds-strategy-docker-environment-variables.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// * builds/build-strategies.adoc - -:_content-type: PROCEDURE -[id="builds-strategy-docker-environment-variables_{context}"] -= Using docker environment variables - -To make environment variables available to the docker build process and resulting image, you can add environment variables to the `dockerStrategy` definition of the build configuration. - -The environment variables defined there are inserted as a single `ENV` Dockerfile instruction right after the `FROM` instruction, so that it can be referenced later on within the Dockerfile. - -.Procedure - -The variables are defined during build and stay in the output image, therefore they will be present in any container that runs that image as well. - -For example, defining a custom HTTP proxy to be used during build and runtime: - -[source,yaml] ----- -dockerStrategy: -... - env: - - name: "HTTP_PROXY" - value: "http://myproxy.net:5187/" ----- - -You can also manage environment variables defined in the build configuration with the `oc set env` command. diff --git a/modules/builds-strategy-docker-force-pull-example.adoc b/modules/builds-strategy-docker-force-pull-example.adoc deleted file mode 100644 index a6d63f81a576..000000000000 --- a/modules/builds-strategy-docker-force-pull-example.adoc +++ /dev/null @@ -1,15 +0,0 @@ -// Module included in the following assemblies: -// * builds/build-strategies.adoc - -[id="builds-strategy-docker-force-pull-example_{context}"] -= Docker force pull flag example - -Set the following to use the `forcePull` flag with Docker: - -[source,yaml] ----- -strategy: - dockerStrategy: - forcePull: true <1> ----- -<1> This flag causes the local builder image to be ignored, and a fresh version to be pulled from the registry to which the imagestream points. Setting `forcePull` to `false` results in the default behavior of honoring the image stored locally. diff --git a/modules/builds-strategy-docker-from-image.adoc b/modules/builds-strategy-docker-from-image.adoc deleted file mode 100644 index 5a20f963a70b..000000000000 --- a/modules/builds-strategy-docker-from-image.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// Module included in the following assemblies: -// * builds/build-strategies.adoc - -:_content-type: PROCEDURE -[id="builds-strategy-docker-from-image_{context}"] -= Replacing Dockerfile FROM image - -You can replace the `FROM` instruction of the Dockerfile with the `from` of the `BuildConfig` object. If the Dockerfile uses multi-stage builds, the image in the last `FROM` instruction will be replaced. - -.Procedure - -To replace the `FROM` instruction of the Dockerfile with the `from` of the `BuildConfig`. - -[source,yaml] ----- -strategy: - dockerStrategy: - from: - kind: "ImageStreamTag" - name: "debian:latest" ----- diff --git a/modules/builds-strategy-docker-squash-layers.adoc b/modules/builds-strategy-docker-squash-layers.adoc deleted file mode 100644 index f4f5eeab55ae..000000000000 --- a/modules/builds-strategy-docker-squash-layers.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -//* builds/running-entitled-builds.adoc - -:_content-type: PROCEDURE -[id="builds-strategy-docker-squash-layers_{context}"] -= Squashing layers with docker builds - -Docker builds normally create a layer representing each instruction in a Dockerfile. Setting the `imageOptimizationPolicy` to `SkipLayers` merges all instructions into a single layer on top of the base image. - -.Procedure - -* Set the `imageOptimizationPolicy` to `SkipLayers`: -+ -[source,yaml] ----- -strategy: - dockerStrategy: - imageOptimizationPolicy: SkipLayers ----- diff --git a/modules/builds-strategy-dockerfile-path.adoc b/modules/builds-strategy-dockerfile-path.adoc deleted file mode 100644 index 588e23dc482b..000000000000 --- a/modules/builds-strategy-dockerfile-path.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// Module included in the following assemblies: -// * builds/build-strategies.adoc - -:_content-type: PROCEDURE -[id="builds-strategy-dockerfile-path_{context}"] -= Using Dockerfile path - -By default, docker builds use a Dockerfile located at the root of the context specified in the `BuildConfig.spec.source.contextDir` field. - -The `dockerfilePath` field allows the build to use a different path to locate your Dockerfile, relative to the `BuildConfig.spec.source.contextDir` field. It can be a different file name than the default Dockerfile, such as `MyDockerfile`, or a path to a Dockerfile in a subdirectory, such as `dockerfiles/app1/Dockerfile`. - -.Procedure - -To use the `dockerfilePath` field for the build to use a different path to locate your Dockerfile, set: - -[source,yaml] ----- -strategy: - dockerStrategy: - dockerfilePath: dockerfiles/app1/Dockerfile ----- diff --git a/modules/builds-strategy-enable-pulling-pushing.adoc b/modules/builds-strategy-enable-pulling-pushing.adoc deleted file mode 100644 index 93e9d9b706d7..000000000000 --- a/modules/builds-strategy-enable-pulling-pushing.adoc +++ /dev/null @@ -1,26 +0,0 @@ -// Module included in the following assemblies: -// -//* builds/running-entitled-builds.adoc - -:_content-type: PROCEDURE -[id="builds-strategy-enable-pulling-pushing_{context}"] -= Enabling pulling and pushing - -You can enable pulling to a private registry by setting the pull secret and pushing by setting the push secret in the build configuration. - -.Procedure - -To enable pulling to a private registry: - -* Set the pull secret in the build configuration. - -To enable pushing: - - * Set the push secret in the build configuration. - -//// -[NOTE] -==== -This module needs specific instructions and examples. And needs to be used for Docker and S2I. -==== -//// diff --git a/modules/builds-strategy-force-pull-procedure.adoc b/modules/builds-strategy-force-pull-procedure.adoc deleted file mode 100644 index bfc8efea84a1..000000000000 --- a/modules/builds-strategy-force-pull-procedure.adoc +++ /dev/null @@ -1,11 +0,0 @@ -// Module included in the following assemblies: -//* builds/build-strategies.adoc - -[id="builds-strategy-force-pull-procedure_{context}"] -= Using the force pull flag - -By default, if the builder image specified in the build configuration is available locally on the node, that image will be used. However, you can use the `forcepull` flag to override the local image and refresh it from the registry. - -.Procedure - -To override the local image and refresh it from the registry to which the image stream points, create a `BuildConfig` with the `forcePull` flag set to `true`. diff --git a/modules/builds-strategy-pipeline-build.adoc b/modules/builds-strategy-pipeline-build.adoc deleted file mode 100644 index a1caef9af190..000000000000 --- a/modules/builds-strategy-pipeline-build.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// Module included in the following assemblies: -// -//*builds/build-strategies.adoc -//*builds/understanding-image-builds - -[id="builds-strategy-pipeline-build_{context}"] -= Pipeline build - -[IMPORTANT] -==== -The Pipeline build strategy is deprecated in {product-title} 4. Equivalent and improved functionality is present in the {product-title} Pipelines based on Tekton. - -Jenkins images on {product-title} are fully supported and users should follow Jenkins user documentation for defining their `jenkinsfile` in a job or store it in a Source Control Management system. -==== - -The Pipeline build strategy allows developers to define a Jenkins pipeline for use by the Jenkins pipeline plugin. The build can be started, monitored, and managed by {product-title} in the same way as any other build type. - -Pipeline workflows are defined in a `jenkinsfile`, either embedded directly in the build configuration, or supplied in a Git repository and referenced by the build configuration. - -//The first time a project defines a build configuration using a Pipeline -//strategy, {product-title} instantiates a Jenkins server to execute the -//pipeline. Subsequent Pipeline build configurations in the project share this -//Jenkins server. - -//[role="_additional-resources"] -//.Additional resources - -//* Pipeline build configurations require a Jenkins server to manage the -//pipeline execution. diff --git a/modules/builds-strategy-pipeline-environment-variables.adoc b/modules/builds-strategy-pipeline-environment-variables.adoc deleted file mode 100644 index b66b2f868efe..000000000000 --- a/modules/builds-strategy-pipeline-environment-variables.adoc +++ /dev/null @@ -1,40 +0,0 @@ -// Module included in the following assemblies: -// * builds/build-strategies.adoc - -:_content-type: PROCEDURE -[id="builds-strategy-pipeline-environment-variables_{context}"] -= Using environment variables for pipeline builds - -[IMPORTANT] -==== -The Pipeline build strategy is deprecated in {product-title} 4. Equivalent and improved functionality is present in the {product-title} Pipelines based on Tekton. - -Jenkins images on {product-title} are fully supported and users should follow Jenkins user documentation for defining their `jenkinsfile` in a job or store it in a Source Control Management system. -==== - -To make environment variables available to the Pipeline build process, you can add environment variables to the `jenkinsPipelineStrategy` definition of the build configuration. - -Once defined, the environment variables will be set as parameters for any Jenkins job associated with the build configuration. - -.Procedure - -* To define environment variables to be used during build, edit the YAML file: -+ -[source,yaml] ----- -jenkinsPipelineStrategy: -... - env: - - name: "FOO" - value: "BAR" ----- - -You can also manage environment variables defined in the build configuration with the `oc set env` command. - - - -//[NOTE] -//==== -// This module needs specific instructions and examples. -// This is similar between Docker, S2I, and Custom. -//==== diff --git a/modules/builds-strategy-pipeline-mapping-buildconfig-jenkins.adoc b/modules/builds-strategy-pipeline-mapping-buildconfig-jenkins.adoc deleted file mode 100644 index 93912c1694d9..000000000000 --- a/modules/builds-strategy-pipeline-mapping-buildconfig-jenkins.adoc +++ /dev/null @@ -1,24 +0,0 @@ -// Module included in the following assemblies: -// * builds/build-strategies.adoc - -[id="builds-strategy-pipeline-mapping-buildconfig-jenkins_{context}"] -= Mapping between BuildConfig environment variables and Jenkins job parameters - -When a Jenkins job is created or updated based on changes to a Pipeline strategy build configuration, any environment variables in the build configuration are mapped to Jenkins job parameters definitions, where the default values for the Jenkins job parameters definitions are the current values of the associated environment variables. - -After the Jenkins job's initial creation, you can still add additional parameters to the job from the Jenkins console. The parameter names differ from the names of the environment variables in the build configuration. The parameters are honored when builds are started for those Jenkins jobs. - -How you start builds for the Jenkins job dictates how the parameters are set. - -* If you start with `oc start-build`, the values of the environment variables in the build configuration are the parameters set for the corresponding job instance. Any changes you make to the parameters' default values from the Jenkins console are ignored. The build configuration values take precedence. - -* If you start with `oc start-build -e`, the values for the environment variables specified in the `-e` option take precedence. -** If you specify an environment variable not listed in the build configuration, they will be added as a Jenkins job parameter definitions. -** Any changes you make from the Jenkins console to the parameters corresponding to the environment variables are ignored. The build configuration and what you specify with `oc start-build -e` takes precedence. - -* If you start the Jenkins job with the Jenkins console, then you can control the setting of the parameters with the Jenkins console as part of starting a build for the job. - -[NOTE] -==== -It is recommended that you specify in the build configuration all possible environment variables to be associated with job parameters. Doing so reduces disk I/O and improves performance during Jenkins processing. -==== diff --git a/modules/builds-strategy-pipeline-providing-jenkinsfile.adoc b/modules/builds-strategy-pipeline-providing-jenkinsfile.adoc deleted file mode 100644 index e1dd011463bb..000000000000 --- a/modules/builds-strategy-pipeline-providing-jenkinsfile.adoc +++ /dev/null @@ -1,72 +0,0 @@ -// Module included in the following assemblies: -// * builds/build-strategies.adoc - -:_content-type: PROCEDURE -[id="builds-strategy-pipeline-providing-jenkinsfile_{context}"] -= Providing the Jenkins file for pipeline builds - -[IMPORTANT] -==== -The Pipeline build strategy is deprecated in {product-title} 4. Equivalent and improved functionality is present in the {product-title} Pipelines based on Tekton. - -Jenkins images on {product-title} are fully supported and users should follow Jenkins user documentation for defining their `jenkinsfile` in a job or store it in a Source Control Management system. -==== - -The `jenkinsfile` uses the standard groovy language syntax to allow fine grained control over the configuration, build, and deployment of your application. - -You can supply the `jenkinsfile` in one of the following ways: - -* A file located within your source code repository. -* Embedded as part of your build configuration using the `jenkinsfile` field. - -When using the first option, the `jenkinsfile` must be included in your applications source code repository at one of the following locations: - -* A file named `jenkinsfile` at the root of your repository. -* A file named `jenkinsfile` at the root of the source `contextDir` of your repository. -* A file name specified via the `jenkinsfilePath` field of the `JenkinsPipelineStrategy` section of your BuildConfig, which is relative to the source `contextDir` if supplied, otherwise it defaults to the root of the repository. - -The `jenkinsfile` is run on the Jenkins agent pod, which must have the -{product-title} client binaries available if you intend to use the {product-title} DSL. - -.Procedure - -To provide the Jenkins file, you can either: - -* Embed the Jenkins file in the build configuration. -* Include in the build configuration a reference to the Git repository that contains the Jenkins file. - -.Embedded Definition -[source,yaml] ----- -kind: "BuildConfig" -apiVersion: "v1" -metadata: - name: "sample-pipeline" -spec: - strategy: - jenkinsPipelineStrategy: - jenkinsfile: |- - node('agent') { - stage 'build' - openshiftBuild(buildConfig: 'ruby-sample-build', showBuildLogs: 'true') - stage 'deploy' - openshiftDeploy(deploymentConfig: 'frontend') - } ----- - -.Reference to Git Repository -[source,yaml] ----- -kind: "BuildConfig" -apiVersion: "v1" -metadata: - name: "sample-pipeline" -spec: - source: - git: - uri: "https://github.com/openshift/ruby-hello-world" - strategy: - jenkinsPipelineStrategy: - jenkinsfilePath: some/repo/dir/filename <1> ----- -<1> The optional `jenkinsfilePath` field specifies the name of the file to use, relative to the source `contextDir`. If `contextDir` is omitted, it defaults to the root of the repository. If `jenkinsfilePath` is omitted, it defaults to `jenkinsfile`. diff --git a/modules/builds-strategy-s2i-build.adoc b/modules/builds-strategy-s2i-build.adoc deleted file mode 100644 index e8d51fde8f34..000000000000 --- a/modules/builds-strategy-s2i-build.adoc +++ /dev/null @@ -1,31 +0,0 @@ -// Module included in the following assemblies: -// -//* builds/build-strategies.adoc -//* builds/understanding-image-builds.adoc - -[id="builds-strategy-s2i-build_{context}"] -= Source-to-image build - -Source-to-image (S2I) is a tool for building reproducible container images. It produces ready-to-run images by injecting application source into a container image and assembling a new image. The new image incorporates the base image, the builder, and built source and is ready to use with the `buildah run` command. S2I supports incremental builds, which re-use previously downloaded dependencies, previously built artifacts, and so on. - - -//// -The advantages of S2I include the following: - -[horizontal] -Image flexibility:: S2I scripts can be written to inject application code into almost any existing Docker-formatted container image, taking advantage of the existing ecosystem. Note that, currently, S2I relies on `tar` to inject application source, so the image needs to be able to process tarred content. - -Speed:: With S2I, the assemble process can perform a large number of complex operations without creating a new layer at each step, resulting in a fast process. In addition, S2I scripts can be written to re-use artifacts stored in a previous version of the application image, rather than having to download or build them each time the build is run. - -Patchability:: S2I allows you to rebuild the application consistently if an underlying image needs a patch due to a security issue. - -Operational efficiency:: By restricting build operations instead of allowing arbitrary actions, as a Dockerfile would allow, the PaaS operator can avoid accidental or intentional abuses of the build system. - -Operational security:: Building an arbitrary Dockerfile exposes the host system to root privilege escalation. This can be exploited by a malicious user because the entire Docker build process is run as a user with Docker privileges. S2I restricts the operations performed as a root user and can run the scripts as a non-root user. - -User efficiency:: S2I prevents developers from performing arbitrary `yum install` type operations, which could slow down development iteration, during their application build. - -Ecosystem:: S2I encourages a shared ecosystem of images where you can leverage best practices for your applications. - -Reproducibility:: Produced images can include all inputs including specific versions of build tools and dependencies. This ensures that the image can be reproduced precisely. -//// diff --git a/modules/builds-strategy-s2i-buildconfig-environment.adoc b/modules/builds-strategy-s2i-buildconfig-environment.adoc deleted file mode 100644 index ac7626313953..000000000000 --- a/modules/builds-strategy-s2i-buildconfig-environment.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -//* * builds/build-strategies.adoc - -:_content-type: PROCEDURE -[id="builds-strategy-s2i-buildconfig-environment_{context}"] -= Using source-to-image build configuration environment - -You can add environment variables to the `sourceStrategy` definition of the build configuration. The environment variables defined there are visible during the `assemble` script execution and will be defined in the output image, making them also available to the `run` script and application code. - -.Procedure - -* For example, to disable assets compilation for your Rails application: -+ -[source,yaml] ----- -sourceStrategy: -... - env: - - name: "DISABLE_ASSET_COMPILATION" - value: "true" ----- - -[role="_additional-resources"] -.Additional resources - -* The build environment section provides more advanced instructions. -* You can also manage environment variables defined in the build configuration with the `oc set env` command. diff --git a/modules/builds-strategy-s2i-environment-files.adoc b/modules/builds-strategy-s2i-environment-files.adoc deleted file mode 100644 index e93d9ad7bc91..000000000000 --- a/modules/builds-strategy-s2i-environment-files.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// * builds/build-strategies.adoc - -:_content-type: PROCEDURE -[id="builds-strategy-s2i-environment-files_{context}"] -= Using source-to-image environment files - -Source build enables you to set environment values, one per line, inside your application, by specifying them in a `.s2i/environment` file in the source repository. The environment variables specified in this file are present during the build process and in the output image. - -If you provide a `.s2i/environment` file in your source repository, source-to-image (S2I) reads this file during the build. This allows customization of the build behavior as the `assemble` script may use these variables. - -.Procedure - -For example, to disable assets compilation for your Rails application during the build: - -* Add `DISABLE_ASSET_COMPILATION=true` in the `.s2i/environment` file. - -In addition to builds, the specified environment variables are also available in the running application itself. For example, to cause the Rails application to start in `development` mode instead of `production`: - -* Add `RAILS_ENV=development` to the `.s2i/environment` file. - - -The complete list of supported environment variables is available in the using images section for each image. diff --git a/modules/builds-strategy-s2i-environment-variables.adoc b/modules/builds-strategy-s2i-environment-variables.adoc deleted file mode 100644 index 027e0fd17889..000000000000 --- a/modules/builds-strategy-s2i-environment-variables.adoc +++ /dev/null @@ -1,7 +0,0 @@ -// Module included in the following assemblies: -// * builds/build-strategies.adoc - -[id="builds-strategy-s2i-environment-variables_{context}"] -= Source-to-image environment variables - -There are two ways to make environment variables available to the source build process and resulting image. Environment files and BuildConfig environment values. Variables provided will be present during the build process and in the output image. diff --git a/modules/builds-strategy-s2i-force-pull-example.adoc b/modules/builds-strategy-s2i-force-pull-example.adoc deleted file mode 100644 index 85f659bdd1a2..000000000000 --- a/modules/builds-strategy-s2i-force-pull-example.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// * builds/build-strategies.adoc - -[id="builds-strategy-s2i-force-pull-example_{context}"] -= Source-to-Image (S2I) force pull flag example - -Set the following to use the `forcePull` flag with S2I: - -[source,yaml] ----- -strategy: - sourceStrategy: - from: - kind: "ImageStreamTag" - name: "builder-image:latest" <1> - forcePull: true <2> ----- -<1> The builder image being used, where the local version on the node may not be up to date with the version in the registry to which the imagestream points. -<2> This flag causes the local builder image to be ignored and a fresh version to be pulled from the registry to which the imagestream points. Setting `forcePull` to `false` results in the default behavior of honoring the image stored locally. diff --git a/modules/builds-strategy-s2i-ignore-source-files.adoc b/modules/builds-strategy-s2i-ignore-source-files.adoc deleted file mode 100644 index c7d3d76d7d48..000000000000 --- a/modules/builds-strategy-s2i-ignore-source-files.adoc +++ /dev/null @@ -1,9 +0,0 @@ -// Module included in the following assemblies: -// * builds/build-strategies.adoc - -[id="builds-strategy-s2i-ignore-source-files_{context}"] -= Ignoring source-to-image source files - -Source-to-image (S2I) supports a `.s2iignore` file, which contains a list of file patterns that should be ignored. Files in the build working directory, as provided by the various input sources, that match a pattern found in the `.s2iignore` file will not be made available to the `assemble` script. - -//For more details on the format of the `.s2iignore` file, see the S2I documentation. diff --git a/modules/builds-strategy-s2i-incremental-builds.adoc b/modules/builds-strategy-s2i-incremental-builds.adoc deleted file mode 100644 index 0cb82d974fec..000000000000 --- a/modules/builds-strategy-s2i-incremental-builds.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// Module included in the following assemblies: -//* builds/build-strategies.adoc - -:_content-type: PROCEDURE -[id="builds-strategy-s2i-incremental-builds_{context}"] -= Performing source-to-image incremental builds - -Source-to-image (S2I) can perform incremental builds, which means it reuses artifacts from previously-built images. - -.Procedure - -* To create an incremental build, apply the following modification to the strategy definition: -+ -[source,yaml] ----- -strategy: - sourceStrategy: - from: - kind: "ImageStreamTag" - name: "incremental-image:latest" <1> - incremental: true <2> ----- -<1> Specify an image that supports incremental builds. Consult the documentation of the builder image to determine if it supports this behavior. -<2> This flag controls whether an incremental build is attempted. If the builder image does not support incremental builds, the build will still succeed, but you will get a log message stating the incremental build was not successful because of a missing `save-artifacts` script. - -[role="_additional-resources"] -.Additional resources - -* See S2I Requirements for information on how to create a builder image supporting incremental builds. diff --git a/modules/builds-strategy-s2i-override-builder-image-scripts.adoc b/modules/builds-strategy-s2i-override-builder-image-scripts.adoc deleted file mode 100644 index f0a81b4c49e2..000000000000 --- a/modules/builds-strategy-s2i-override-builder-image-scripts.adoc +++ /dev/null @@ -1,31 +0,0 @@ -// Module included in the following assemblies: -// * builds/build-strategies.adoc - -:_content-type: PROCEDURE -[id="builds-strategy-s2i-override-builder-image-scripts_{context}"] -= Overriding source-to-image builder image scripts - -You can override the `assemble`, `run`, and `save-artifacts` source-to-image (S2I) scripts provided by the builder image. - -.Procedure - -To override the `assemble`, `run`, and `save-artifacts` S2I scripts provided by the builder image, either: - -* Provide an `assemble`, `run`, or `save-artifacts` script in the `.s2i/bin` directory of your application source repository. -* Provide a URL of a directory containing the scripts as part of the strategy definition. For example: -+ -[source,yaml] ----- -strategy: - sourceStrategy: - from: - kind: "ImageStreamTag" - name: "builder-image:latest" - scripts: "http://somehost.com/scripts_directory" <1> ----- -<1> This path will have `run`, `assemble`, and `save-artifacts` appended to it. If any or all scripts are found they will be used in place of the same named scripts provided in the image. - -[NOTE] -==== -Files located at the `scripts` URL take precedence over files located in `.s2i/bin` of the source repository. -==== diff --git a/modules/builds-strategy-secrets-web-console.adoc b/modules/builds-strategy-secrets-web-console.adoc deleted file mode 100644 index f1af2f37776e..000000000000 --- a/modules/builds-strategy-secrets-web-console.adoc +++ /dev/null @@ -1,31 +0,0 @@ -// Module included in the following assemblies: -// * builds/build-strategies.adoc - -:_content-type: PROCEDURE -[id="builds-strategy-secrets-web-console_{context}"] -= Adding secrets with web console - -You can add a secret to your build configuration so that it can access a private repository. - -.Procedure - -To add a secret to your build configuration so that it can access a private -repository from the {product-title} web console: - -. Create a new {product-title} project. - -. Create a secret that contains credentials for accessing a private source code -repository. - -. Create a build configuration. - -. On the build configuration editor page or in the `create app from builder image` page of the web console, set the *Source Secret*. - -. Click *Save*. - - -//[NOTE] -//==== -// This module needs specific instructions and examples. -// This is applicable for Docker, S2I, and Custom. -//==== diff --git a/modules/builds-triggers.adoc b/modules/builds-triggers.adoc deleted file mode 100644 index a2ec77d47ca7..000000000000 --- a/modules/builds-triggers.adoc +++ /dev/null @@ -1,12 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/triggering-builds-build-hooks.adoc - -[id="builds-triggers_{context}"] -= Build triggers - -When defining a `BuildConfig`, you can define triggers to control the circumstances in which the `BuildConfig` should be run. The following build triggers are available: - -* Webhook -* Image change -* Configuration change diff --git a/modules/builds-troubleshooting-access-resources.adoc b/modules/builds-troubleshooting-access-resources.adoc deleted file mode 100644 index 678a0656c2ad..000000000000 --- a/modules/builds-troubleshooting-access-resources.adoc +++ /dev/null @@ -1,24 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/troubleshooting-builds.adoc - -[id="builds-troubleshooting-access-resources_{context}"] -= Resolving denial for access to resources - -If your request for access to resources is denied: - -Issue:: -A build fails with: - -[source,terminal] ----- -requested access to the resource is denied ----- - -Resolution:: -You have exceeded one of the image quotas set on your project. Check your current quota and verify the limits applied and storage in use: - -[source,terminal] ----- -$ oc describe quota ----- diff --git a/modules/builds-troubleshooting-service-certificate-generation.adoc b/modules/builds-troubleshooting-service-certificate-generation.adoc deleted file mode 100644 index 42f3a772cb49..000000000000 --- a/modules/builds-troubleshooting-service-certificate-generation.adoc +++ /dev/null @@ -1,41 +0,0 @@ -// Module included in the following assemblies: -// -// *builds/troubleshooting-builds.adoc - -[id="builds-troubleshooting-service-certificate-generation_{context}"] -= Service certificate generation failure - -If your request for access to resources is denied: - -Issue:: -If a service certificate generation fails with (service's `service.beta.openshift.io/serving-cert-generation-error` annotation contains): - -.Example output -[source,terminal] ----- -secret/ssl-key references serviceUID 62ad25ca-d703-11e6-9d6f-0e9c0057b608, which does not match 77b6dd80-d716-11e6-9d6f-0e9c0057b60 ----- - -Resolution:: -The service that generated the certificate no longer exists, or has a different `serviceUID`. You must force certificates regeneration by removing the old secret, and clearing the following annotations on the service: `service.beta.openshift.io/serving-cert-generation-error` and `service.beta.openshift.io/serving-cert-generation-error-num`: - -[source,terminal] ----- -$ oc delete secret ----- - -[source,terminal] ----- -$ oc annotate service service.beta.openshift.io/serving-cert-generation-error- ----- - -[source,terminal] ----- -$ oc annotate service service.beta.openshift.io/serving-cert-generation-error-num- ----- - -[NOTE] -==== -The command removing annotation has a `-` after the annotation name to be -removed. -==== diff --git a/modules/builds-tutorial-pipeline.adoc b/modules/builds-tutorial-pipeline.adoc deleted file mode 100644 index a47415c5ae5f..000000000000 --- a/modules/builds-tutorial-pipeline.adoc +++ /dev/null @@ -1,224 +0,0 @@ -// Module included in the following assemblies: -// * builds/build-strategies.adoc - -:_content-type: PROCEDURE -[id="builds-tutorial-pipeline_{context}"] -= Pipeline build tutorial - -[IMPORTANT] -==== -The Pipeline build strategy is deprecated in {product-title} 4. Equivalent and improved functionality is present in the {product-title} Pipelines based on Tekton. - -Jenkins images on {product-title} are fully supported and users should follow Jenkins user documentation for defining their `jenkinsfile` in a job or store it in a Source Control Management system. -==== - -This example demonstrates how to create an {product-title} Pipeline that will build, deploy, and verify a `Node.js/MongoDB` application using the `nodejs-mongodb.json` template. - -.Procedure - -. Create the Jenkins master: -+ -[source,terminal] ----- - $ oc project ----- -+ -Select the project that you want to use or create a new project with `oc new-project `. -+ -[source,terminal] ----- - $ oc new-app jenkins-ephemeral <2> ----- -+ -If you want to use persistent storage, use `jenkins-persistent` instead. -+ -. Create a file named `nodejs-sample-pipeline.yaml` with the following content: -+ -[NOTE] -==== -This creates a `BuildConfig` object that employs the Jenkins pipeline strategy to build, deploy, and scale the `Node.js/MongoDB` example application. -==== -+ -[source,yaml] -+ ----- -kind: "BuildConfig" -apiVersion: "v1" -metadata: - name: "nodejs-sample-pipeline" -spec: - strategy: - jenkinsPipelineStrategy: - jenkinsfile: - type: JenkinsPipeline ----- -+ -. After you create a `BuildConfig` object with a `jenkinsPipelineStrategy`, tell the -pipeline what to do by using an inline `jenkinsfile`: -+ -[NOTE] -==== -This example does not set up a Git repository for the application. - -The following `jenkinsfile` content is written in Groovy using the {product-title} DSL. For this example, include inline content in the `BuildConfig` object using the YAML Literal Style, though including a `jenkinsfile` in your source repository is the preferred method. -==== -+ -[source,groovy] ----- -def templatePath = 'https://raw.githubusercontent.com/openshift/nodejs-ex/master/openshift/templates/nodejs-mongodb.json' <1> -def templateName = 'nodejs-mongodb-example' <2> -pipeline { - agent { - node { - label 'nodejs' <3> - } - } - options { - timeout(time: 20, unit: 'MINUTES') <4> - } - stages { - stage('preamble') { - steps { - script { - openshift.withCluster() { - openshift.withProject() { - echo "Using project: ${openshift.project()}" - } - } - } - } - } - stage('cleanup') { - steps { - script { - openshift.withCluster() { - openshift.withProject() { - openshift.selector("all", [ template : templateName ]).delete() <5> - if (openshift.selector("secrets", templateName).exists()) { <6> - openshift.selector("secrets", templateName).delete() - } - } - } - } - } - } - stage('create') { - steps { - script { - openshift.withCluster() { - openshift.withProject() { - openshift.newApp(templatePath) <7> - } - } - } - } - } - stage('build') { - steps { - script { - openshift.withCluster() { - openshift.withProject() { - def builds = openshift.selector("bc", templateName).related('builds') - timeout(5) { <8> - builds.untilEach(1) { - return (it.object().status.phase == "Complete") - } - } - } - } - } - } - } - stage('deploy') { - steps { - script { - openshift.withCluster() { - openshift.withProject() { - def rm = openshift.selector("dc", templateName).rollout() - timeout(5) { <9> - openshift.selector("dc", templateName).related('pods').untilEach(1) { - return (it.object().status.phase == "Running") - } - } - } - } - } - } - } - stage('tag') { - steps { - script { - openshift.withCluster() { - openshift.withProject() { - openshift.tag("${templateName}:latest", "${templateName}-staging:latest") <10> - } - } - } - } - } - } -} ----- -<1> Path of the template to use. -<2> Name of the template that will be created. -<3> Spin up a `node.js` agent pod on which to run this build. -<4> Set a timeout of 20 minutes for this pipeline. -<5> Delete everything with this template label. -<6> Delete any secrets with this template label. -<7> Create a new application from the `templatePath`. -<8> Wait up to five minutes for the build to complete. -<9> Wait up to five minutes for the deployment to complete. -<10> If everything else succeeded, tag the `$ {templateName}:latest` image as -`$ {templateName}-staging:latest`. A pipeline build configuration for the staging -environment can watch for the `$ {templateName}-staging:latest` image to change -and then deploy it to the staging environment. -+ -[NOTE] -==== -The previous example was written using the declarative pipeline style, but the older scripted pipeline style is also supported. -==== -+ -. Create the Pipeline `BuildConfig` in your {product-title} cluster: -+ -[source,terminal] ----- -$ oc create -f nodejs-sample-pipeline.yaml ----- -+ -.. If you do not want to create your own file, you can use the sample from the Origin repository by running: -+ -[source,terminal] ----- -$ oc create -f https://raw.githubusercontent.com/openshift/origin/master/examples/jenkins/pipeline/nodejs-sample-pipeline.yaml ----- -+ -. Start the Pipeline: -+ -[source,terminal] ----- -$ oc start-build nodejs-sample-pipeline ----- -+ -[NOTE] -==== -Alternatively, you can start your pipeline with the {product-title} web console by navigating to the Builds -> Pipeline section and clicking *Start Pipeline*, or by visiting the Jenkins Console, navigating to the Pipeline that you created, and clicking *Build Now*. -==== -+ -Once the pipeline is started, you should see the following actions performed within your project: -+ -* A job instance is created on the Jenkins server. -* An agent pod is launched, if your pipeline requires one. -* The pipeline runs on the agent pod, or the master if no agent is required. -** Any previously created resources with the `template=nodejs-mongodb-example` label will be deleted. -** A new application, and all of its associated resources, will be created from the `nodejs-mongodb-example` template. -** A build will be started using the `nodejs-mongodb-example` `BuildConfig`. -*** The pipeline will wait until the build has completed to trigger the next stage. -** A deployment will be started using the `nodejs-mongodb-example` deployment configuration. -*** The pipeline will wait until the deployment has completed to trigger the next stage. -** If the build and deploy are successful, the `nodejs-mongodb-example:latest` image will be tagged as `nodejs-mongodb-example:stage`. -* The agent pod is deleted, if one was required for the pipeline. -+ -[NOTE] -==== -The best way to visualize the pipeline execution is by viewing it in the {product-title} web console. You can view your pipelines by logging in to the web console and navigating to Builds -> Pipelines. -==== diff --git a/modules/builds-understanding-openshift-pipeline.adoc b/modules/builds-understanding-openshift-pipeline.adoc deleted file mode 100644 index af71e4a98ff0..000000000000 --- a/modules/builds-understanding-openshift-pipeline.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -//* builds/build-strategies.adoc - -:_content-type: CONCEPT -[id="builds-understanding-openshift-pipeline_{context}"] -= Understanding {product-title} pipelines - -[IMPORTANT] -==== -The Pipeline build strategy is deprecated in {product-title} 4. Equivalent and improved functionality is present in the {product-title} Pipelines based on Tekton. - -Jenkins images on {product-title} are fully supported and users should follow Jenkins user documentation for defining their `jenkinsfile` in a job or store it in a Source Control Management system. -==== - -Pipelines give you control over building, deploying, and promoting your applications on {product-title}. Using a combination of the Jenkins Pipeline build strategy, `jenkinsfiles`, and the {product-title} Domain Specific Language (DSL) provided by the Jenkins Client Plugin, you can create advanced build, test, deploy, and promote pipelines for any scenario. - -*{product-title} Jenkins Sync Plugin* - -The {product-title} Jenkins Sync Plugin keeps the build configuration and build objects in sync with Jenkins jobs and builds, and provides the following: - - * Dynamic job and run creation in Jenkins. - * Dynamic creation of agent pod templates from image streams, image stream tags, or config maps. - * Injection of environment variables. - * Pipeline visualization in the {product-title} web console. - * Integration with the Jenkins Git plugin, which passes commit information from {product-title} builds to the Jenkins Git plugin. - * Synchronization of secrets into Jenkins credential entries. - -*{product-title} Jenkins Client Plugin* - -The {product-title} Jenkins Client Plugin is a Jenkins plugin which aims to provide a readable, concise, comprehensive, and fluent Jenkins Pipeline syntax for rich interactions with an {product-title} API Server. The plugin uses the {product-title} command line tool, `oc`, which must be available on the nodes executing the script. - -The Jenkins Client Plugin must be installed on your Jenkins master so the {product-title} DSL will be available to use within the `jenkinsfile` for your application. This plugin is installed and enabled by default when using the {product-title} Jenkins image. - -For {product-title} Pipelines within your project, you will must use the Jenkins Pipeline Build Strategy. This strategy defaults to using a `jenkinsfile` at the root of your source repository, but also provides the following configuration options: - -* An inline `jenkinsfile` field within your build configuration. -* A `jenkinsfilePath` field within your build configuration that references the location of the `jenkinsfile` to use relative to the source `contextDir`. - -[NOTE] -==== -The optional `jenkinsfilePath` field specifies the name of the file to use, relative to the source `contextDir`. If `contextDir` is omitted, it defaults to the root of the repository. If `jenkinsfilePath` is omitted, it defaults to `jenkinsfile`. -==== diff --git a/modules/builds-use-custom-builder-image.adoc b/modules/builds-use-custom-builder-image.adoc deleted file mode 100644 index 06e7f899f50d..000000000000 --- a/modules/builds-use-custom-builder-image.adoc +++ /dev/null @@ -1,79 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/custom-builds-buildah.adoc - - -:_content-type: PROCEDURE -[id="builds-use-custom-builder-image_{context}"] -= Use custom builder image - -You can define a `BuildConfig` object that uses the custom strategy in conjunction with your custom builder image to execute your custom build logic. - -.Prerequisites - -* Define all the required inputs for new custom builder image. -* Build your custom builder image. - -.Procedure - -. Create a file named `buildconfig.yaml`. This file defines the `BuildConfig` object that is created in your project and executed: -+ -[source,yaml] ----- -kind: BuildConfig -apiVersion: build.openshift.io/v1 -metadata: - name: sample-custom-build - labels: - name: sample-custom-build - annotations: - template.alpha.openshift.io/wait-for-ready: 'true' -spec: - strategy: - type: Custom - customStrategy: - forcePull: true - from: - kind: ImageStreamTag - name: custom-builder-image:latest - namespace: <1> - output: - to: - kind: ImageStreamTag - name: sample-custom:latest ----- -<1> Specify your project name. - -. Create the `BuildConfig`: -+ -[source,terminal] ----- -$ oc create -f buildconfig.yaml ----- - -. Create a file named `imagestream.yaml`. This file defines the image stream to which the build will push the image: -+ -[source,yaml] ----- -kind: ImageStream -apiVersion: image.openshift.io/v1 -metadata: - name: sample-custom -spec: {} ----- - -. Create the imagestream: -+ -[source,terminal] ----- -$ oc create -f imagestream.yaml ----- - -. Run your custom build: -+ -[source,terminal] ----- -$ oc start-build sample-custom-build -F ----- -+ -When the build runs, it launches a pod running the custom builder image that was built earlier. The pod runs the `build.sh` logic that is defined as the entrypoint for the custom builder image. The `build.sh` logic invokes Buildah to build the `dockerfile.sample` that was embedded in the custom builder image, and then uses Buildah to push the new image to the `sample-custom image stream`. diff --git a/modules/builds-using-bitbucket-webhooks.adoc b/modules/builds-using-bitbucket-webhooks.adoc deleted file mode 100644 index b10cac521aba..000000000000 --- a/modules/builds-using-bitbucket-webhooks.adoc +++ /dev/null @@ -1,50 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/triggering-builds-build-hooks.adoc - -:_content-type: PROCEDURE -[id="builds-using-bitbucket-webhooks_{context}"] -= Using Bitbucket webhooks - -link:https://confluence.atlassian.com/bitbucket/manage-webhooks-735643732.html[Bitbucket webhooks] handle the call made by Bitbucket when a repository is updated. Similar to the previous triggers, you must specify a secret. The following example is a trigger definition YAML within the `BuildConfig`: - -[source,yaml] ----- -type: "Bitbucket" -bitbucket: - secretReference: - name: "mysecret" ----- - -The payload URL is returned as the Bitbucket Webhook URL by the `oc describe` command, and is structured as follows: - -.Example output -[source,terminal] ----- -https:///apis/build.openshift.io/v1/namespaces//buildconfigs//webhooks//bitbucket ----- - -.Procedure - -. To configure a Bitbucket Webhook: - -.. Describe the 'BuildConfig' to get the webhook URL: -+ -[source,terminal] ----- -$ oc describe bc ----- - -.. Copy the webhook URL, replacing `` with your secret value. - -.. Follow the link:https://confluence.atlassian.com/bitbucket/manage-webhooks-735643732.html[Bitbucket setup instructions] to paste the webhook URL into your Bitbucket repository settings. - -. Given a file containing a valid JSON payload, such as `payload.json`, you can -manually trigger the webhook with `curl`: -+ -[source,terminal] ----- -$ curl -H "X-Event-Key: repo:push" -H "Content-Type: application/json" -k -X POST --data-binary @payload.json https:///apis/build.openshift.io/v1/namespaces//buildconfigs//webhooks//bitbucket ----- -+ -The `-k` argument is only necessary if your API server does not have a properly signed certificate. diff --git a/modules/builds-using-build-fields-as-environment-variables.adoc b/modules/builds-using-build-fields-as-environment-variables.adoc deleted file mode 100644 index a5f5637df94e..000000000000 --- a/modules/builds-using-build-fields-as-environment-variables.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/creating-build-inputs.adoc - -:_content-type: PROCEDURE -[id="builds-using-build-fields-as-environment-variables_{context}"] -= Using build fields as environment variables - -You can inject information about the build object by setting the `fieldPath` environment variable source to the `JsonPath` of the field from which you are interested in obtaining the value. - -[NOTE] -==== -Jenkins Pipeline strategy does not support `valueFrom` syntax for environment variables. -==== - -.Procedure - -* Set the `fieldPath` environment variable source to the `JsonPath` of the field from which you are interested in obtaining the value: -+ -[source,yaml] ----- -env: - - name: FIELDREF_ENV - valueFrom: - fieldRef: - fieldPath: metadata.name ----- diff --git a/modules/builds-using-build-volumes.adoc b/modules/builds-using-build-volumes.adoc deleted file mode 100644 index 90a499f783b8..000000000000 --- a/modules/builds-using-build-volumes.adoc +++ /dev/null @@ -1,127 +0,0 @@ -ifeval::["{context}" == "build-strategies-docker"] -:dockerstrategy: -endif::[] -ifeval::["{context}" == "build-strategies-s2i"] -:sourcestrategy: -endif::[] - -:_content-type: PROCEDURE -[id="builds-using-build-volumes_{context}"] -= Using build volumes - -You can mount build volumes to give running builds access to information that you don't want to persist in the output container image. - -Build volumes provide sensitive information, such as repository credentials, that the build environment or configuration only needs at build time. Build volumes are different from xref:../../cicd/builds/creating-build-inputs.adoc#builds-define-build-inputs_creating-build-inputs[build inputs], whose data can persist in the output container image. - -The mount points of build volumes, from which the running build reads data, are functionally similar to link:https://kubernetes.io/docs/concepts/storage/volumes/[pod volume mounts]. - -.Prerequisites -* You have xref:../../cicd/builds/creating-build-inputs.adoc#builds-input-secrets-configmaps_creating-build-inputs[added an input secret, config map, or both to a BuildConfig object]. - -.Procedure - -ifdef::dockerstrategy[] - -* In the `dockerStrategy` definition of the `BuildConfig` object, add any build volumes to the `volumes` array. For example: -+ -[source,yaml] ----- -spec: - dockerStrategy: - volumes: - - name: secret-mvn <1> - mounts: - - destinationPath: /opt/app-root/src/.ssh <2> - source: - type: Secret <3> - secret: - secretName: my-secret <4> - - name: settings-mvn <1> - mounts: - - destinationPath: /opt/app-root/src/.m2 <2> - source: - type: ConfigMap <3> - configMap: - name: my-config <4> - - name: my-csi-volume <1> - mounts: - - destinationPath: /opt/app-root/src/some_path <2> - source: - type: CSI <3> - csi: - driver: csi.sharedresource.openshift.io <5> - readOnly: true <6> - volumeAttributes: <7> - attribute: value ----- -<1> Required. A unique name. -<2> Required. The absolute path of the mount point. It must not contain `..` or `:` and doesn't collide with the destination path generated by the builder. The `/opt/app-root/src` is the default home directory for many Red Hat S2I-enabled images. -<3> Required. The type of source, `ConfigMap`, `Secret`, or `CSI`. -<4> Required. The name of the source. -<5> Required. The driver that provides the ephemeral CSI volume. -<6> Required. This value must be set to `true`. Provides a read-only volume. -<7> Optional. The volume attributes of the ephemeral CSI volume. Consult the CSI driver's documentation for supported attribute keys and values. - -[NOTE] -==== -The Shared Resource CSI Driver is supported as a Technology Preview feature. -==== - -endif::dockerstrategy[] - -ifdef::sourcestrategy[] - -* In the `sourceStrategy` definition of the `BuildConfig` object, add any build volumes to the `volumes` array. For example: -+ -[source,yaml] ----- -spec: - sourceStrategy: - volumes: - - name: secret-mvn <1> - mounts: - - destinationPath: /opt/app-root/src/.ssh <2> - source: - type: Secret <3> - secret: - secretName: my-secret <4> - - name: settings-mvn <1> - mounts: - - destinationPath: /opt/app-root/src/.m2 <2> - source: - type: ConfigMap <3> - configMap: - name: my-config <4> - - name: my-csi-volume <1> - mounts: - - destinationPath: /opt/app-root/src/some_path <2> - source: - type: CSI <3> - csi: - driver: csi.sharedresource.openshift.io <5> - readOnly: true <6> - volumeAttributes: <7> - attribute: value ----- - -<1> Required. A unique name. -<2> Required. The absolute path of the mount point. It must not contain `..` or `:` and doesn't collide with the destination path generated by the builder. The `/opt/app-root/src` is the default home directory for many Red Hat S2I-enabled images. -<3> Required. The type of source, `ConfigMap`, `Secret`, or `CSI`. -<4> Required. The name of the source. -<5> Required. The driver that provides the ephemeral CSI volume. -<6> Required. This value must be set to `true`. Provides a read-only volume. -<7> Optional. The volume attributes of the ephemeral CSI volume. Consult the CSI driver's documentation for supported attribute keys and values. - -[NOTE] -==== -The Shared Resource CSI Driver is supported as a Technology Preview feature. -==== - -endif::sourcestrategy[] - -ifeval::["{context}" == "build-strategies-docker"] -:!dockerstrategy: -endif::[] -ifeval::["{context}" == "build-strategies-s2i"] -:!sourcestrategy: -endif::[] diff --git a/modules/builds-using-cli-post-commit-build-hooks.adoc b/modules/builds-using-cli-post-commit-build-hooks.adoc deleted file mode 100644 index 75fdddda29c8..000000000000 --- a/modules/builds-using-cli-post-commit-build-hooks.adoc +++ /dev/null @@ -1,28 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/triggering-builds-build-hooks.adoc - -:_content-type: PROCEDURE -[id="builds-using-cli-post-commit-build-hooks_{context}"] -= Using the CLI to set post commit build hooks - -The `oc set build-hook` command can be used to set the build hook for a build configuration. - -.Procedure - -. To set a command as the post-commit build hook: -+ -[source,terminal] ----- -$ oc set build-hook bc/mybc \ - --post-commit \ - --command \ - -- bundle exec rake test --verbose ----- -+ -. To set a script as the post-commit build hook: -+ -[source,terminal] ----- -$ oc set build-hook bc/mybc --post-commit --script="bundle exec rake test --verbose" ----- diff --git a/modules/builds-using-external-artifacts.adoc b/modules/builds-using-external-artifacts.adoc deleted file mode 100644 index 60a39fea15fa..000000000000 --- a/modules/builds-using-external-artifacts.adoc +++ /dev/null @@ -1,59 +0,0 @@ -// Module included in the following assemblies: -// -//* builds/creating-build-inputs.adoc - -[id="builds-using-external-artifacts_{context}"] -= External artifacts - -It is not recommended to store binary files in a source repository. Therefore, you must define a build which pulls additional files, such as Java `.jar` dependencies, during the build process. How this is done depends on the build strategy you are using. - -For a Source build strategy, you must put appropriate shell commands into the `assemble` script: - -.`.s2i/bin/assemble` File -[source,terminal] ----- -#!/bin/sh -APP_VERSION=1.0 -wget http://repository.example.com/app/app-$APP_VERSION.jar -O app.jar ----- - -.`.s2i/bin/run` File -[source,terminal] ----- -#!/bin/sh -exec java -jar app.jar ----- - -ifndef::openshift-online[] -For a Docker build strategy, you must modify the Dockerfile and invoke -shell commands with the link:https://docs.docker.com/engine/reference/builder/#run[`RUN` instruction]: - -.Excerpt of Dockerfile -[source,terminal] ----- -FROM jboss/base-jdk:8 - -ENV APP_VERSION 1.0 -RUN wget http://repository.example.com/app/app-$APP_VERSION.jar -O app.jar - -EXPOSE 8080 -CMD [ "java", "-jar", "app.jar" ] ----- -endif::[] - -In practice, you may want to use an environment variable for the file location so that the specific file to be downloaded can be customized using an environment variable defined on the `BuildConfig`, rather than updating the -ifndef::openshift-online[] -Dockerfile or -endif::[] -`assemble` script. - -You can choose between different methods of defining environment variables: - -* Using the `.s2i/environment` file] (only for a Source build strategy) -* Setting in `BuildConfig` -* Providing explicitly using `oc start-build --env` (only for builds that are triggered manually) - -//[role="_additional-resources"] -//.Additional resources -//* For more information on how to control which *_assemble_* and *_run_* script is -//used by a Source build, see Overriding builder image scripts. diff --git a/modules/builds-using-generic-webhooks.adoc b/modules/builds-using-generic-webhooks.adoc deleted file mode 100644 index 22b4ed0b0bed..000000000000 --- a/modules/builds-using-generic-webhooks.adoc +++ /dev/null @@ -1,76 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/triggering-builds-build-hooks.adoc - -:_content-type: PROCEDURE -[id="builds-using-generic-webhooks_{context}"] -= Using generic webhooks - -Generic webhooks are invoked from any system capable of making a web request. As with the other webhooks, you must specify a secret, which is part of the URL that the caller must use to trigger the build. The secret ensures the uniqueness of the URL, preventing others from triggering the build. The following is an example trigger definition YAML within the `BuildConfig`: - -[source,yaml] ----- -type: "Generic" -generic: - secretReference: - name: "mysecret" - allowEnv: true <1> ----- -<1> Set to `true` to allow a generic webhook to pass in environment variables. - -.Procedure - -. To set up the caller, supply the calling system with the URL of the generic -webhook endpoint for your build: -+ -.Example output -[source,terminal] ----- -https:///apis/build.openshift.io/v1/namespaces//buildconfigs//webhooks//generic ----- -+ -The caller must invoke the webhook as a `POST` operation. - -. To invoke the webhook manually you can use `curl`: -+ -[source,terminal] ----- -$ curl -X POST -k https:///apis/build.openshift.io/v1/namespaces//buildconfigs//webhooks//generic ----- -+ -The HTTP verb must be set to `POST`. The insecure `-k` flag is specified to ignore certificate validation. This second flag is not necessary if your cluster has properly signed certificates. -+ -The endpoint can accept an optional payload with the following format: -+ -[source,yaml] ----- -git: - uri: "" - ref: "" - commit: "" - author: - name: "" - email: "" - committer: - name: "" - email: "" - message: "" -env: <1> - - name: "" - value: "" ----- -<1> Similar to the `BuildConfig` environment variables, the environment variables defined here are made available to your build. If these variables collide with the `BuildConfig` environment variables, these variables take precedence. By default, environment variables passed by webhook are ignored. Set the `allowEnv` field to `true` on the webhook definition to enable this behavior. - -. To pass this payload using `curl`, define it in a file named `payload_file.yaml` and run: -+ -[source,terminal] ----- -$ curl -H "Content-Type: application/yaml" --data-binary @payload_file.yaml -X POST -k https:///apis/build.openshift.io/v1/namespaces//buildconfigs//webhooks//generic ----- -+ -The arguments are the same as the previous example with the addition of a header and a payload. The `-H` argument sets the `Content-Type` header to `application/yaml` or `application/json` depending on your payload format. The `--data-binary` argument is used to send a binary payload with newlines intact with the `POST` request. - -[NOTE] -==== -{product-title} permits builds to be triggered by the generic webhook even if an invalid request payload is presented, for example, invalid content type, unparsable or invalid content, and so on. This behavior is maintained for backwards compatibility. If an invalid request payload is presented, {product-title} returns a warning in JSON format as part of its `HTTP 200 OK` response. -==== diff --git a/modules/builds-using-github-webhooks.adoc b/modules/builds-using-github-webhooks.adoc deleted file mode 100644 index 104cd1fd173a..000000000000 --- a/modules/builds-using-github-webhooks.adoc +++ /dev/null @@ -1,91 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/triggering-builds-build-hooks.adoc - -:_content-type: PROCEDURE -[id="builds-using-github-webhooks_{context}"] -= Using GitHub webhooks - -GitHub webhooks handle the call made by GitHub when a repository is updated. When defining the trigger, you must specify a secret, which is part of the URL you supply to GitHub when configuring the webhook. - -Example GitHub webhook definition: - -[source,yaml] ----- -type: "GitHub" -github: - secretReference: - name: "mysecret" ----- - -[NOTE] -==== -The secret used in the webhook trigger configuration is not the same as `secret` field you encounter when configuring webhook in GitHub UI. The former is to make the webhook URL unique and hard to predict, the latter is an optional string field used to create HMAC hex digest of the body, which is sent as an `X-Hub-Signature` header. -==== - -The payload URL is returned as the GitHub Webhook URL by the `oc describe` -command (see Displaying Webhook URLs), and is structured as follows: - -.Example output -[source,terminal] ----- -https:///apis/build.openshift.io/v1/namespaces//buildconfigs//webhooks//github ----- - -.Prerequisites - -* Create a `BuildConfig` from a GitHub repository. - -.Procedure - -. To configure a GitHub Webhook: - -.. After creating a `BuildConfig` from a GitHub repository, run: -+ -[source,terminal] ----- -$ oc describe bc/ ----- -+ -This generates a webhook GitHub URL that looks like: -+ -.Example output -[source,terminal] ----- -/buildconfigs//webhooks//github ----- - -.. Cut and paste this URL into GitHub, from the GitHub web console. - -.. In your GitHub repository, select *Add Webhook* from *Settings -> Webhooks*. - -.. Paste the URL output into the *Payload URL* field. - -.. Change the *Content Type* from GitHub's default `application/x-www-form-urlencoded` to `application/json`. - -.. Click *Add webhook*. -+ -You should see a message from GitHub stating that your webhook was successfully configured. -+ -Now, when you push a change to your GitHub repository, a new build automatically starts, and upon a successful build a new deployment starts. -+ -[NOTE] -==== -link:https://gogs.io[Gogs] supports the same webhook payload format as GitHub. Therefore, if you are using a Gogs server, you can define a GitHub webhook trigger on your `BuildConfig` and trigger it by your Gogs server as well. -==== - -. Given a file containing a valid JSON payload, such as `payload.json`, you can manually trigger the webhook with `curl`: -+ -[source,terminal] ----- -$ curl -H "X-GitHub-Event: push" -H "Content-Type: application/json" -k -X POST --data-binary @payload.json https:///apis/build.openshift.io/v1/namespaces//buildconfigs//webhooks//github ----- -+ -The `-k` argument is only necessary if your API server does not have a properly -signed certificate. - -[role="_additional-resources"] -.Additional resources - -//* link:https://developer.github.com/webhooks/[GitHub] -* link:https://gogs.io[Gogs] diff --git a/modules/builds-using-gitlab-webhooks.adoc b/modules/builds-using-gitlab-webhooks.adoc deleted file mode 100644 index 6629fa6b2f2f..000000000000 --- a/modules/builds-using-gitlab-webhooks.adoc +++ /dev/null @@ -1,58 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/triggering-builds-build-hooks.adoc - -:_content-type: PROCEDURE -[id="builds-using-gitlab-webhooks_{context}"] -= Using GitLab webhooks - -GitLab webhooks handle the call made by GitLab when a repository is updated. As with the GitHub triggers, you must specify a secret. The following example is a trigger definition YAML within the `BuildConfig`: - -[source,yaml] ----- -type: "GitLab" -gitlab: - secretReference: - name: "mysecret" ----- - -The payload URL is returned as the GitLab Webhook URL by the `oc describe` command, and is structured as follows: - -.Example output -[source,terminal] ----- -https:///apis/build.openshift.io/v1/namespaces//buildconfigs//webhooks//gitlab ----- - -.Procedure - -. To configure a GitLab Webhook: - -.. Describe the `BuildConfig` to get the webhook URL: -+ -[source,terminal] ----- -$ oc describe bc ----- - -.. Copy the webhook URL, replacing `` with your secret value. - -.. Follow the link:https://docs.gitlab.com/ce/user/project/integrations/webhooks.html#webhooks[GitLab setup instructions] -to paste the webhook URL into your GitLab repository settings. - -. Given a file containing a valid JSON payload, such as `payload.json`, you can -manually trigger the webhook with `curl`: -+ -[source,terminal] ----- -$ curl -H "X-GitLab-Event: Push Hook" -H "Content-Type: application/json" -k -X POST --data-binary @payload.json https:///apis/build.openshift.io/v1/namespaces//buildconfigs//webhooks//gitlab ----- -+ -The `-k` argument is only necessary if your API server does not have a properly -signed certificate. - -//// -[role="_additional-resources"] -.Additional resources -//// -//* link:https://docs.gitlab.com/ce/user/project/integrations/webhooks.html[GitLab] diff --git a/modules/builds-using-image-change-triggers.adoc b/modules/builds-using-image-change-triggers.adoc deleted file mode 100644 index d4af4c231e65..000000000000 --- a/modules/builds-using-image-change-triggers.adoc +++ /dev/null @@ -1,99 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/triggering-builds-build-hooks.adoc - -:_content-type: PROCEDURE -[id="builds-using-image-change-triggers_{context}"] -= Using image change triggers - -As a developer, you can configure your build to run automatically every time a base image changes. - -You can use image change triggers to automatically invoke your build when a new version of an upstream image is available. For example, if a build is based on a RHEL image, you can trigger that build to run any time the RHEL image changes. As a result, the application image is always running on the latest RHEL base image. - -[NOTE] -==== -Image streams that point to container images in link:http://docs.docker.com/v1.7/reference/api/hub_registry_spec/#docker-registry-1-0[v1 container registries] only trigger a build once when the image stream tag becomes available and not on subsequent image updates. This is due to the lack of uniquely identifiable images in v1 container registries. -==== - -.Procedure - -. Define an `ImageStream` that points to the upstream image you want to use as a trigger: -+ -[source,yaml] ----- -kind: "ImageStream" -apiVersion: "v1" -metadata: - name: "ruby-20-centos7" ----- -+ -This defines the image stream that is tied to a container image repository located at `__/__/ruby-20-centos7`. The `` is defined as a service with the name `docker-registry` running in {product-title}. - -. If an image stream is the base image for the build, set the `from` field in the build strategy to point to the `ImageStream`: -+ -[source,yaml] ----- -strategy: - sourceStrategy: - from: - kind: "ImageStreamTag" - name: "ruby-20-centos7:latest" ----- -+ -In this case, the `sourceStrategy` definition is consuming the `latest` tag of the image stream named `ruby-20-centos7` located within this namespace. - -. Define a build with one or more triggers that point to `ImageStreams`: -+ -[source,yaml] ----- -type: "ImageChange" <1> -imageChange: {} -type: "ImageChange" <2> -imageChange: - from: - kind: "ImageStreamTag" - name: "custom-image:latest" ----- -<1> An image change trigger that monitors the `ImageStream` and `Tag` as defined by the build strategy's `from` field. The `imageChange` object here must be empty. -<2> An image change trigger that monitors an arbitrary image stream. The `imageChange` part, in this case, must include a `from` field that references the `ImageStreamTag` to monitor. - -When using an image change trigger for the strategy image stream, the generated build is supplied with an immutable docker tag that points to the latest image corresponding to that tag. This new image reference is used by the strategy when it executes for the build. - -For other image change triggers that do not reference the strategy image stream, a new build is started, but the build strategy is not updated with a unique image reference. - -Since this example has an image change trigger for the strategy, the resulting build is: - -[source,yaml] ----- -strategy: - sourceStrategy: - from: - kind: "DockerImage" - name: "172.30.17.3:5001/mynamespace/ruby-20-centos7:" ----- - -This ensures that the triggered build uses the new image that was just pushed to the repository, and the build can be re-run any time with the same inputs. - -You can pause an image change trigger to allow multiple changes on the referenced image stream before a build is started. You can also set the `paused` attribute to true when initially adding an `ImageChangeTrigger` to a `BuildConfig` to prevent a build from being immediately triggered. - -[source,yaml] ----- -type: "ImageChange" -imageChange: - from: - kind: "ImageStreamTag" - name: "custom-image:latest" - paused: true ----- - -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -In addition to setting the image field for all `Strategy` types, for custom builds, the `OPENSHIFT_CUSTOM_BUILD_BASE_IMAGE` environment variable is checked. -If it does not exist, then it is created with the immutable image reference. If it does exist, then it is updated with the immutable image reference. -endif::[] - -If a build is triggered due to a webhook trigger or manual request, the build that is created uses the `` resolved from the `ImageStream` referenced by the `Strategy`. This ensures that builds are performed using consistent image tags for ease of reproduction. - -[role="_additional-resources"] -.Additional resources - -* link:http://docs.docker.com/v1.7/reference/api/hub_registry_spec/#docker-registry-1-0[v1 container registries] diff --git a/modules/builds-using-proxy-git-cloning.adoc b/modules/builds-using-proxy-git-cloning.adoc deleted file mode 100644 index 56c89d8be2da..000000000000 --- a/modules/builds-using-proxy-git-cloning.adoc +++ /dev/null @@ -1,34 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/creating-build-inputs.adoc - -[id="builds-using-proxy-git-cloning_{context}"] -= Using a proxy - -If your Git repository can only be accessed using a proxy, you can define the proxy to use in the `source` section of the build configuration. You can configure both an HTTP and HTTPS proxy to use. Both fields are optional. Domains for which no proxying should be performed can also be specified in the `NoProxy` field. - -[NOTE] -==== -Your source URI must use the HTTP or HTTPS protocol for this to work. -==== - -[source,yaml] ----- -source: - git: - uri: "https://github.com/openshift/ruby-hello-world" - ref: "master" - httpProxy: http://proxy.example.com - httpsProxy: https://proxy.example.com - noProxy: somedomain.com, otherdomain.com ----- - -[NOTE] -==== -For Pipeline strategy builds, given the current restrictions with the Git plugin for Jenkins, any Git operations through the Git plugin do not leverage the HTTP or HTTPS proxy defined in the `BuildConfig`. The Git plugin only uses the proxy configured in the Jenkins UI at the Plugin Manager panel. This proxy is then used for all git interactions within Jenkins, across all jobs. -==== - -[role="_additional-resources"] -.Additional resources - -* You can find instructions on how to configure proxies through the Jenkins UI at link:https://wiki.jenkins-ci.org/display/JENKINS/JenkinsBehindProxy[JenkinsBehindProxy]. diff --git a/modules/builds-using-secrets-as-environment-variables.adoc b/modules/builds-using-secrets-as-environment-variables.adoc deleted file mode 100644 index 9ffb37083e5d..000000000000 --- a/modules/builds-using-secrets-as-environment-variables.adoc +++ /dev/null @@ -1,35 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/creating-build-inputs.adoc - -:_content-type: PROCEDURE -[id="builds-using-secrets-as-environment-variables_{context}"] -= Using secrets as environment variables - -You can make key values from secrets available as environment variables using the `valueFrom` syntax. - -[IMPORTANT] -==== -This method shows the secrets as plain text in the output of the build pod console. To avoid this, use input secrets and config maps instead. -==== - -.Procedure - -* To use a secret as an environment variable, set the `valueFrom` syntax: -+ -[source,yaml] ----- -apiVersion: build.openshift.io/v1 -kind: BuildConfig -metadata: - name: secret-example-bc -spec: - strategy: - sourceStrategy: - env: - - name: MYVAL - valueFrom: - secretKeyRef: - key: myval - name: mysecret ----- diff --git a/modules/builds-using-secrets.adoc b/modules/builds-using-secrets.adoc deleted file mode 100644 index a8046e128bee..000000000000 --- a/modules/builds-using-secrets.adoc +++ /dev/null @@ -1,121 +0,0 @@ -// Module included in the following assemblies: -// * builds/creating-build-inputs.adoc - - -:_content-type: PROCEDURE -[id="builds-using-secrets_{context}"] -= Using secrets - -After creating secrets, you can create a pod to reference your secret, get logs, and delete the pod. - -.Procedure - -. Create the pod to reference your secret: -+ -[source,terminal] ----- -$ oc create -f .yaml ----- - -. Get the logs: -+ -[source,terminal] ----- -$ oc logs secret-example-pod ----- - -. Delete the pod: -+ -[source,terminal] ----- -$ oc delete pod secret-example-pod ----- - -[role="_additional-resources"] -.Additional resources - -* Example YAML files with secret data: -+ -.YAML Secret That Will Create Four Files -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: test-secret -data: - username: dmFsdWUtMQ0K <1> - password: dmFsdWUtMQ0KDQo= <2> -stringData: - hostname: myapp.mydomain.com <3> - secret.properties: |- <4> - property1=valueA - property2=valueB ----- -<1> File contains decoded values. -<2> File contains decoded values. -<3> File contains the provided string. -<4> File contains the provided data. -+ -.YAML of a pod populating files in a volume with secret data -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: secret-example-pod -spec: - containers: - - name: secret-test-container - image: busybox - command: [ "/bin/sh", "-c", "cat /etc/secret-volume/*" ] - volumeMounts: - # name must match the volume name below - - name: secret-volume - mountPath: /etc/secret-volume - readOnly: true - volumes: - - name: secret-volume - secret: - secretName: test-secret - restartPolicy: Never ----- -+ -.YAML of a pod populating environment variables with secret data -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: secret-example-pod -spec: - containers: - - name: secret-test-container - image: busybox - command: [ "/bin/sh", "-c", "export" ] - env: - - name: TEST_SECRET_USERNAME_ENV_VAR - valueFrom: - secretKeyRef: - name: test-secret - key: username - restartPolicy: Never ----- -+ -.YAML of a Build Config Populating Environment Variables with Secret Data -[source,yaml] ----- -apiVersion: build.openshift.io/v1 -kind: BuildConfig -metadata: - name: secret-example-bc -spec: - strategy: - sourceStrategy: - env: - - name: TEST_SECRET_USERNAME_ENV_VAR - valueFrom: - secretKeyRef: - name: test-secret - key: username ----- diff --git a/modules/builds-webhook-triggers.adoc b/modules/builds-webhook-triggers.adoc deleted file mode 100644 index 38c80b0d3c5d..000000000000 --- a/modules/builds-webhook-triggers.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/triggering-builds-build-hooks.adoc - -[id="builds-webhook-triggers_{context}"] -= Webhook triggers - -Webhook triggers allow you to trigger a new build by sending a request to the {product-title} API endpoint. You can define these triggers using GitHub, GitLab, Bitbucket, or Generic webhooks. - -Currently, {product-title} webhooks only support the analogous versions of the push event for each of the Git-based Source Code Management (SCM) systems. All other event types are ignored. - -When the push events are processed, the {product-title} control plane host confirms if the branch reference inside the event matches the branch reference in the corresponding `BuildConfig`. If so, it then checks out the exact commit reference noted in the webhook event on the {product-title} build. If they do not match, no build is triggered. - -[NOTE] -==== -`oc new-app` and `oc new-build` create GitHub and Generic webhook triggers automatically, but any other needed webhook triggers must be added manually. You can manually add triggers by setting triggers. -==== - -For all webhooks, you must define a secret with a key named `WebHookSecretKey` and the value being the value to be supplied when invoking the webhook. The webhook definition must then reference the secret. The secret ensures the uniqueness of the URL, preventing others from triggering the build. The value of the key is compared to the secret provided during the webhook invocation. - -For example here is a GitHub webhook with a reference to a secret named `mysecret`: - -[source,yaml] ----- -type: "GitHub" -github: - secretReference: - name: "mysecret" ----- - -The secret is then defined as follows. Note that the value of the secret is base64 encoded as is required for any `data` field of a `Secret` object. - -[source,yaml] ----- -- kind: Secret - apiVersion: v1 - metadata: - name: mysecret - creationTimestamp: - data: - WebHookSecretKey: c2VjcmV0dmFsdWUx ----- diff --git a/modules/byoh-configuring.adoc b/modules/byoh-configuring.adoc deleted file mode 100644 index 0a80416ac66e..000000000000 --- a/modules/byoh-configuring.adoc +++ /dev/null @@ -1,55 +0,0 @@ -// Module included in the following assemblies: -// -// * windows_containers/creating_windows_machinesets/byoh-windows-instance.adoc - -:_content-type: PROCEDURE -[id="configuring-byoh-windows-instance"] -= Configuring a BYOH Windows instance - -Creating a BYOH Windows instance requires creating a config map in the Windows Machine Config Operator (WMCO) namespace. - -.Prerequisites -Any Windows instances that are to be attached to the cluster as a node must fulfill the following requirements: - -* The instance must be on the same network as the Linux worker nodes in the cluster. -* Port 22 must be open and running an SSH server. -* The default shell for the SSH server must be the link:https://docs.microsoft.com/en-us/windows-server/administration/openssh/openssh_server_configuration#configuring-the-default-shell-for-openssh-in-windows[Windows Command shell], or `cmd.exe`. -* Port 10250 must be open for log collection. -* An administrator user is present with the private key used in the secret set as an authorized SSH key. -* If you are creating a BYOH Windows instance for an installer-provisioned infrastructure (IPI) AWS cluster, you must add a tag to the AWS instance that matches the `spec.template.spec.value.tag` value in the compute machine set for your worker nodes. For example, `kubernetes.io/cluster/: owned` or `kubernetes.io/cluster/: shared`. -* If you are creating a BYOH Windows instance on vSphere, communication with the internal API server must be enabled. -* The hostname of the instance must follow the link:https://datatracker.ietf.org/doc/html/rfc1123[RFC 1123] DNS label requirements, which include the following standards: -** Contains only lowercase alphanumeric characters or '-'. -** Starts with an alphanumeric character. -** Ends with an alphanumeric character. - -[NOTE] -==== -Windows instances deployed by the WMCO are configured with the containerd container runtime. Because the WMCO installs and manages the runtime, it is recommended that you not manually install containerd on nodes. -==== - -.Procedure -. Create a ConfigMap named `windows-instances` in the WMCO namespace that describes the Windows instances to be added. -+ -[NOTE] -==== -Format each entry in the config map's data section by using the address as the key while formatting the value as `username=`. -==== -+ -.Example config map -[source,yaml] ----- -kind: ConfigMap -apiVersion: v1 -metadata: - name: windows-instances - namespace: openshift-windows-machine-config-operator -data: - 10.1.42.1: |- <1> - username=Administrator <2> - instance.example.com: |- - username=core ----- -<1> The address that the WMCO uses to reach the instance over SSH, either a DNS name or an IPv4 address. A DNS PTR record must exist for this address. It is recommended that you use a DNS name with your BYOH instance if your organization uses DHCP to assign IP addresses. If not, you need to update the `windows-instances` ConfigMap whenever the instance is assigned a new IP address. -<2> The name of the administrator user created in the prerequisites. - diff --git a/modules/byoh-removal.adoc b/modules/byoh-removal.adoc deleted file mode 100644 index aa36b2487712..000000000000 --- a/modules/byoh-removal.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// -// * windows_containers/creating_windows_machinesets/byoh-windows-instance.adoc - -[id="removing-byoh-windows-instance"] -= Removing BYOH Windows instances -You can remove BYOH instances attached to the cluster by deleting the instance's entry in the config map. Deleting an instance reverts that instance back to its state prior to adding to the cluster. Any logs and container runtime artifacts are not added to these instances. - -For an instance to be cleanly removed, it must be accessible with the current private key provided to WMCO. For example, to remove the `10.1.42.1` instance from the previous example, the config map would be changed to the following: - -[source,yaml] ----- -kind: ConfigMap -apiVersion: v1 -metadata: - name: windows-instances - namespace: openshift-windows-machine-config-operator -data: - instance.example.com: |- - username=core ----- - -Deleting `windows-instances` is viewed as a request to deconstruct all Windows instances added as nodes. diff --git a/modules/ca-bundle-replacing.adoc b/modules/ca-bundle-replacing.adoc deleted file mode 100644 index 1b96d175e8b9..000000000000 --- a/modules/ca-bundle-replacing.adoc +++ /dev/null @@ -1,28 +0,0 @@ -// Module included in the following assemblies: -// -// * security/certificates/updating-ca-bundle.adoc - -:_content-type: PROCEDURE -[id="ca-bundle-replacing_{context}"] -= Replacing the CA Bundle certificate - -.Procedure - -. Create a config map that includes the root CA certificate used to sign the wildcard certificate: -+ -[source,terminal] ----- -$ oc create configmap custom-ca \ - --from-file=ca-bundle.crt= \//<1> - -n openshift-config ----- -<1> `` is the path to the CA certificate bundle on your local file system. - -. Update the cluster-wide proxy configuration with the newly created config map: -+ -[source,terminal] ----- -$ oc patch proxy/cluster \ - --type=merge \ - --patch='{"spec":{"trustedCA":{"name":"custom-ca"}}}' ----- diff --git a/modules/ca-bundle-understanding.adoc b/modules/ca-bundle-understanding.adoc deleted file mode 100644 index 949b7f2c768d..000000000000 --- a/modules/ca-bundle-understanding.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// -// * security/certificates/updating-ca-bundle.adoc - -:_content-type: SNIPPET -[id="ca-bundle-understanding_{context}"] -= Understanding the CA Bundle certificate - -Proxy certificates allow users to specify one or more custom certificate authority (CA) used by platform components when making egress connections. - -The `trustedCA` field of the Proxy object is a reference to a config map that contains a user-provided trusted certificate authority (CA) bundle. This bundle is merged with the {op-system-first} trust bundle and injected into the trust store of platform components that make egress HTTPS calls. For example, `image-registry-operator` calls an external image registry to download images. If `trustedCA` is not specified, only the {op-system} trust bundle is used for proxied HTTPS connections. Provide custom CA certificates to the {op-system} trust bundle if you want to use your own certificate infrastructure. - -The `trustedCA` field should only be consumed by a proxy validator. The validator is responsible for reading the certificate bundle from required key `ca-bundle.crt` and copying it to a config map named `trusted-ca-bundle` in the `openshift-config-managed` namespace. The namespace for the config map referenced by `trustedCA` is `openshift-config`: - -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: user-ca-bundle - namespace: openshift-config -data: - ca-bundle.crt: | - -----BEGIN CERTIFICATE----- - Custom CA certificate bundle. - -----END CERTIFICATE----- ----- diff --git a/modules/capi-machine-set-creating.adoc b/modules/capi-machine-set-creating.adoc deleted file mode 100644 index 240c070f444a..000000000000 --- a/modules/capi-machine-set-creating.adoc +++ /dev/null @@ -1,206 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/capi-machine-management.adoc - -:_content-type: PROCEDURE -[id="capi-machine-set-creating_{context}"] -= Creating a Cluster API compute machine set - -You can create compute machine sets that use the Cluster API to dynamically manage the machine compute resources for specific workloads of your choice. - -.Prerequisites - -* Deploy an {product-title} cluster. -* Enable the use of the Cluster API. -* Install the OpenShift CLI (`oc`). -* Log in to `oc` as a user with `cluster-admin` permission. - -.Procedure - -. Create a YAML file that contains the cluster custom resource (CR) and is named `.yaml`. -+ -If you are not sure which value to set for the `` parameter, you can check the value for an existing Machine API compute machine set in your cluster. - -.. To list the Machine API compute machine sets, run the following command: -+ -[source,terminal] ----- -$ oc get machinesets -n openshift-machine-api <1> ----- -<1> Specify the `openshift-machine-api` namespace. -+ -.Example output -[source,terminal] ----- -NAME DESIRED CURRENT READY AVAILABLE AGE -agl030519-vplxk-worker-us-east-1a 1 1 1 1 55m -agl030519-vplxk-worker-us-east-1b 1 1 1 1 55m -agl030519-vplxk-worker-us-east-1c 1 1 1 1 55m -agl030519-vplxk-worker-us-east-1d 0 0 55m -agl030519-vplxk-worker-us-east-1e 0 0 55m -agl030519-vplxk-worker-us-east-1f 0 0 55m ----- - -.. To display the contents of a specific compute machine set CR, run the following command: -+ -[source,terminal] ----- -$ oc get machineset \ --n openshift-machine-api \ --o yaml ----- -+ -.Example output -[source,yaml] ----- -... -template: - metadata: - labels: - machine.openshift.io/cluster-api-cluster: agl030519-vplxk <1> - machine.openshift.io/cluster-api-machine-role: worker - machine.openshift.io/cluster-api-machine-type: worker - machine.openshift.io/cluster-api-machineset: agl030519-vplxk-worker-us-east-1a -... ----- -<1> The cluster ID, which you use for the `` parameter. - -. Create the cluster CR by running the following command: -+ -[source,terminal] ----- -$ oc create -f .yaml ----- -+ -.Verification -+ -To confirm that the cluster CR is created, run the following command: -+ -[source,terminal] ----- -$ oc get cluster ----- -+ -.Example output -[source,terminal] ----- -NAME PHASE AGE VERSION - Provisioning 4h6m ----- - -. Create a YAML file that contains the infrastructure CR and is named `.yaml`. - -. Create the infrastructure CR by running the following command: -+ -[source,terminal] ----- -$ oc create -f .yaml ----- -+ -.Verification -+ -To confirm that the infrastructure CR is created, run the following command: -+ -[source,terminal] ----- -$ oc get ----- -+ -where `` is the value that corresponds to your platform. -+ -.Example output -[source,terminal] ----- -NAME CLUSTER READY VPC BASTION IP - true ----- - -. Create a YAML file that contains the machine template CR and is named `.yaml`. - -. Create the machine template CR by running the following command: -+ -[source,terminal] ----- -$ oc create -f .yaml ----- -+ -.Verification -+ -To confirm that the machine template CR is created, run the following command: -+ -[source,terminal] ----- -$ oc get ----- -+ -where `` is the value that corresponds to your platform. -+ -.Example output -[source,terminal] ----- -NAME AGE - 77m ----- - -. Create a YAML file that contains the compute machine set CR and is named `.yaml`. - -. Create the compute machine set CR by running the following command: -+ -[source,terminal] ----- -$ oc create -f .yaml ----- -+ -.Verification -+ -To confirm that the compute machine set CR is created, run the following command: -+ -[source,terminal] ----- -$ oc get machineset -n openshift-cluster-api <1> ----- -<1> Specify the `openshift-cluster-api` namespace. -+ -.Example output -[source,terminal] ----- -NAME CLUSTER REPLICAS READY AVAILABLE AGE VERSION - 1 1 1 17m ----- -+ -When the new compute machine set is available, the `REPLICAS` and `AVAILABLE` values match. If the compute machine set is not available, wait a few minutes and run the command again. - -.Verification - -* To verify that the compute machine set is creating machines according to your desired configuration, you can review the lists of machines and nodes in the cluster. - -** To view the list of Cluster API machines, run the following command: -+ -[source,terminal] ----- -$ oc get machine -n openshift-cluster-api <1> ----- -<1> Specify the `openshift-cluster-api` namespace. -+ -.Example output -[source,terminal] ----- -NAME CLUSTER NODENAME PROVIDERID PHASE AGE VERSION -- ..compute.internal Running 8m23s ----- - -** To view the list of nodes, run the following command: -+ -[source,terminal] ----- -$ oc get node ----- -+ -.Example output -[source,terminal] ----- -NAME STATUS ROLES AGE VERSION -..compute.internal Ready worker 5h14m v1.26.0 -..compute.internal Ready master 5h19m v1.26.0 -..compute.internal Ready worker 7m v1.26.0 ----- diff --git a/modules/capi-troubleshooting.adoc b/modules/capi-troubleshooting.adoc deleted file mode 100644 index d81354af80b0..000000000000 --- a/modules/capi-troubleshooting.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/capi-machine-management.adoc - -:_content-type: REFERENCE -[id="capi-troubleshooting_{context}"] -= Troubleshooting clusters that use the Cluster API - -Use the information in this section to understand and recover from issues you might encounter. Generally, troubleshooting steps for problems with the Cluster API are similar to those steps for problems with the Machine API. - -The Cluster CAPI Operator and its operands are provisioned in the `openshift-cluster-api` namespace, whereas the Machine API uses the `openshift-machine-api` namespace. When using `oc` commands that reference a namespace, be sure to reference the correct one. - -[id="ts-capi-cli_{context}"] -== CLI commands return Cluster API machines - -For clusters that use the Cluster API, `oc` commands such as `oc get machine` return results for Cluster API machines. Because the letter `c` precedes the letter `m` alphabetically, Cluster API machines appear in the return before Machine API machines do. - -* To list only Machine API machines, use the fully qualified name `machines.machine.openshift.io` when running the `oc get machine` command: -+ -[source,terminal] ----- -$ oc get machines.machine.openshift.io ----- - -* To list only Cluster API machines, use the fully qualified name `machines.cluster.x-k8s.io` when running the `oc get machine` command: -+ -[source,terminal] ----- -$ oc get machines.cluster.x-k8s.io ----- diff --git a/modules/capi-yaml-cluster.adoc b/modules/capi-yaml-cluster.adoc deleted file mode 100644 index 6ae11078445e..000000000000 --- a/modules/capi-yaml-cluster.adoc +++ /dev/null @@ -1,31 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/capi-machine-management.adoc - -:_content-type: REFERENCE -[id="capi-yaml-cluster_{context}"] -= Sample YAML for a Cluster API cluster resource - -The cluster resource defines the name and infrastructure provider for the cluster and is managed by the Cluster API. This resource has the same structure for all providers. - -[source,yaml] ----- -apiVersion: cluster.x-k8s.io/v1beta1 -kind: Cluster -metadata: - name: <1> - namespace: openshift-cluster-api -spec: - infrastructureRef: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 - kind: <2> - name: <1> - namespace: openshift-cluster-api ----- -<1> Specify the name of the cluster. -<2> Specify the infrastructure kind for the cluster. Valid values are: -+ --- -* `AWSCluster`: The cluster is running on Amazon Web Services (AWS). -* `GCPCluster`: The cluster is running on Google Cloud Platform (GCP). --- \ No newline at end of file diff --git a/modules/capi-yaml-infrastructure-aws.adoc b/modules/capi-yaml-infrastructure-aws.adoc deleted file mode 100644 index b835fa3c3437..000000000000 --- a/modules/capi-yaml-infrastructure-aws.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/capi-machine-management.adoc - -:_content-type: REFERENCE -[id="capi-yaml-infrastructure-aws_{context}"] -= Sample YAML for a Cluster API infrastructure resource on Amazon Web Services - -The infrastructure resource is provider-specific and defines properties that are shared by all the compute machine sets in the cluster, such as the region and subnets. The compute machine set references this resource when creating machines. - -[source,yaml] ----- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 -kind: AWSCluster <1> -metadata: - name: <2> - namespace: openshift-cluster-api -spec: - region: <3> ----- -<1> Specify the infrastructure kind for the cluster. This value must match the value for your platform. -<2> Specify the name of the cluster. -<3> Specify the AWS region. \ No newline at end of file diff --git a/modules/capi-yaml-infrastructure-gcp.adoc b/modules/capi-yaml-infrastructure-gcp.adoc deleted file mode 100644 index bffc4d600b28..000000000000 --- a/modules/capi-yaml-infrastructure-gcp.adoc +++ /dev/null @@ -1,26 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/capi-machine-management.adoc - -:_content-type: REFERENCE -[id="capi-yaml-infrastructure-gcp_{context}"] -= Sample YAML for a Cluster API infrastructure resource on Google Cloud Platform - -The infrastructure resource is provider-specific and defines properties that are shared by all the compute machine sets in the cluster, such as the region and subnets. The compute machine set references this resource when creating machines. - -[source,yaml] ----- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 -kind: GCPCluster <1> -metadata: - name: <2> -spec: - network: - name: -network <2> - project: <3> - region: <4> ----- -<1> Specify the infrastructure kind for the cluster. This value must match the value for your platform. -<2> Specify the name of the cluster. -<3> Specify the GCP project name. -<4> Specify the GCP region. \ No newline at end of file diff --git a/modules/capi-yaml-machine-set-aws.adoc b/modules/capi-yaml-machine-set-aws.adoc deleted file mode 100644 index a6bc4e18599a..000000000000 --- a/modules/capi-yaml-machine-set-aws.adoc +++ /dev/null @@ -1,40 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/capi-machine-management.adoc - -:_content-type: REFERENCE -[id="capi-yaml-machine-set-aws_{context}"] -= Sample YAML for a Cluster API compute machine set resource on Amazon Web Services - -The compute machine set resource defines additional properties of the machines that it creates. The compute machine set also references the infrastructure resource and machine template when creating machines. - -[source,yaml] ----- -apiVersion: cluster.x-k8s.io/v1alpha4 -kind: MachineSet -metadata: - name: <1> - namespace: openshift-cluster-api -spec: - clusterName: <2> - replicas: 1 - selector: - matchLabels: - test: example - template: - metadata: - labels: - test: example - spec: - bootstrap: - dataSecretName: worker-user-data <3> - clusterName: <2> - infrastructureRef: - apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 - kind: AWSMachineTemplate <4> - name: <2> ----- -<1> Specify a name for the compute machine set. -<2> Specify the name of the cluster. -<3> For the Cluster API Technology Preview, the Operator can use the worker user data secret from `openshift-machine-api` namespace. -<4> Specify the machine template kind. This value must match the value for your platform. \ No newline at end of file diff --git a/modules/capi-yaml-machine-set-gcp.adoc b/modules/capi-yaml-machine-set-gcp.adoc deleted file mode 100644 index fa1b5076a334..000000000000 --- a/modules/capi-yaml-machine-set-gcp.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/capi-machine-management.adoc - -:_content-type: REFERENCE -[id="capi-yaml-machine-set-gcp_{context}"] -= Sample YAML for a Cluster API compute machine set resource on Google Cloud Platform - -The compute machine set resource defines additional properties of the machines that it creates. The compute machine set also references the infrastructure resource and machine template when creating machines. - -[source,yaml] ----- -apiVersion: cluster.x-k8s.io/v1beta1 -kind: MachineSet -metadata: - name: <1> - namespace: openshift-cluster-api -spec: - clusterName: <2> - replicas: 1 - selector: - matchLabels: - test: test - template: - metadata: - labels: - test: test - spec: - bootstrap: - dataSecretName: worker-user-data <3> - clusterName: <2> - infrastructureRef: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 - kind: GCPMachineTemplate <4> - name: <1> - failureDomain: <5> ----- -<1> Specify a name for the compute machine set. -<2> Specify the name of the cluster. -<3> For the Cluster API Technology Preview, the Operator can use the worker user data secret from `openshift-machine-api` namespace. -<4> Specify the machine template kind. This value must match the value for your platform. -<5> Specify the failure domain within the GCP region. \ No newline at end of file diff --git a/modules/capi-yaml-machine-template-aws.adoc b/modules/capi-yaml-machine-template-aws.adoc deleted file mode 100644 index 2e40623bc553..000000000000 --- a/modules/capi-yaml-machine-template-aws.adoc +++ /dev/null @@ -1,41 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/capi-machine-management.adoc - -:_content-type: REFERENCE -[id="capi-yaml-machine-template-aws_{context}"] -= Sample YAML for a Cluster API machine template resource on Amazon Web Services - -The machine template resource is provider-specific and defines the basic properties of the machines that a compute machine set creates. The compute machine set references this template when creating machines. - -[source,yaml] ----- -apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 -kind: AWSMachineTemplate <1> -metadata: - name: <2> - namespace: openshift-cluster-api -spec: - template: - spec: <3> - uncompressedUserData: true - iamInstanceProfile: .... - instanceType: m5.large - cloudInit: - insecureSkipSecretsManager: true - ami: - id: .... - subnet: - filters: - - name: tag:Name - values: - - ... - additionalSecurityGroups: - - filters: - - name: tag:Name - values: - - ... ----- -<1> Specify the machine template kind. This value must match the value for your platform. -<2> Specify a name for the machine template. -<3> Specify the details for your environment. The values here are examples. \ No newline at end of file diff --git a/modules/capi-yaml-machine-template-gcp.adoc b/modules/capi-yaml-machine-template-gcp.adoc deleted file mode 100644 index 5a43a909cf51..000000000000 --- a/modules/capi-yaml-machine-template-gcp.adoc +++ /dev/null @@ -1,38 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/capi-machine-management.adoc - -:_content-type: REFERENCE -[id="capi-yaml-machine-template-gcp_{context}"] -= Sample YAML for a Cluster API machine template resource on Google Cloud Platform - -The machine template resource is provider-specific and defines the basic properties of the machines that a compute machine set creates. The compute machine set references this template when creating machines. - -[source,yaml] ----- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 -kind: GCPMachineTemplate <1> -metadata: - name: <2> - namespace: openshift-cluster-api -spec: - template: - spec: <3> - rootDeviceType: pd-ssd - rootDeviceSize: 128 - instanceType: n1-standard-4 - image: projects/rhcos-cloud/global/images/rhcos-411-85-202203181601-0-gcp-x86-64 - subnet: -worker-subnet - serviceAccounts: - email: - scopes: - - https://www.googleapis.com/auth/cloud-platform - additionalLabels: - kubernetes-io-cluster-: owned - additionalNetworkTags: - - -worker - ipForwarding: Disabled ----- -<1> Specify the machine template kind. This value must match the value for your platform. -<2> Specify a name for the machine template. -<3> Specify the details for your environment. The values here are examples. diff --git a/modules/cco-ccoctl-configuring.adoc b/modules/cco-ccoctl-configuring.adoc deleted file mode 100644 index 512bcc332de3..000000000000 --- a/modules/cco-ccoctl-configuring.adoc +++ /dev/null @@ -1,232 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc -// * authentication/managing_cloud_provider_credentials/cco-mode-gcp-workload-identity.adoc -// * installing/installing_ibm_cloud_public/configuring-iam-ibm-cloud.adoc -// * installing/installing_ibm_powervs/preparing-to-install-on-ibm-power-vs.doc -// * installing/installing_alibaba/manually-creating-alibaba-ram.adoc -// * installing/installing_nutanix/preparing-to-install-on-nutanix.adoc -// * updating/preparing-manual-creds-update.adoc - -ifeval::["{context}" == "cco-mode-sts"] -:aws-sts: -endif::[] -ifeval::["{context}" == "cco-mode-gcp-workload-identity"] -:google-cloud-platform: -endif::[] -ifeval::["{context}" == "configuring-iam-ibm-cloud"] -:ibm-cloud: -endif::[] -ifeval::["{context}" == "manually-creating-alibaba-ram"] -:alibabacloud: -endif::[] -ifeval::["{context}" == "preparing-to-install-on-nutanix"] -:nutanix: -endif::[] -ifeval::["{context}" == "preparing-manual-creds-update"] -:update: -endif::[] -ifeval::["{context}" == "preparing-to-install-on-ibm-power-vs"] -:ibm-power-vs: -endif::[] - -:_content-type: PROCEDURE -[id="cco-ccoctl-configuring_{context}"] -ifndef::update[= Configuring the Cloud Credential Operator utility] -ifdef::update[= Configuring the Cloud Credential Operator utility for a cluster update] - -//This applies only to Alibaba Cloud. -ifdef::alibabacloud[] -To assign RAM users and policies that provide long-lived RAM AccessKeys (AKs) for each in-cluster component, extract and prepare the Cloud Credential Operator (CCO) utility (`ccoctl`) binary. -endif::alibabacloud[] - -//Nutanix-only intro because it needs context in its install procedure. -ifdef::nutanix[] -The Cloud Credential Operator (CCO) manages cloud provider credentials as Kubernetes custom resource definitions (CRDs). To install a cluster on Nutanix, you must set the CCO to `manual` mode as part of the installation process. -endif::nutanix[] -ifdef::ibm-power-vs[] -The Cloud Credential Operator (CCO) manages cloud provider credentials as Kubernetes custom resource definitions (CRDs). To install a cluster on {ibmpowerProductName} Virtual Server, you must set the CCO to `manual` mode as part of the installation process. -endif::ibm-power-vs[] - -//Alibaba Cloud uses ccoctl, but creates different kinds of resources than other clouds, so this applies to everyone else. The upgrade procs also have a different intro, so they are excluded here. -ifndef::alibabacloud,update[] -To create and manage cloud credentials from outside of the cluster when the Cloud Credential Operator (CCO) is operating in manual mode, extract and prepare the CCO utility (`ccoctl`) binary. -endif::alibabacloud,update[] - -//Intro for the upgrade procs. -ifdef::update[] -To upgrade a cluster that uses the Cloud Credential Operator (CCO) in manual mode to create and manage cloud credentials from outside of the cluster, extract and prepare the CCO utility (`ccoctl`) binary. -endif::update[] - -[NOTE] -==== -The `ccoctl` utility is a Linux binary that must run in a Linux environment. -==== - -.Prerequisites - -* You have access to an {product-title} account with cluster administrator access. -* You have installed the OpenShift CLI (`oc`). - -//Upgrade prereqs -ifdef::update[] -* Your cluster was configured using the `ccoctl` utility to create and manage cloud credentials from outside of the cluster. -endif::update[] - -//AWS permissions needed when running ccoctl during install (I think we can omit from upgrade, since they already have an appropriate AWS account if they are upgrading). -ifdef::aws-sts[] -* You have created an AWS account for the `ccoctl` utility to use with the following permissions: -+ -.Required AWS permissions -[cols="a,a"] -|==== -|Permission type |Required permissions - -|`iam` permissions -|* `iam:CreateOpenIDConnectProvider` -* `iam:CreateRole` -* `iam:DeleteOpenIDConnectProvider` -* `iam:DeleteRole` -* `iam:DeleteRolePolicy` -* `iam:GetOpenIDConnectProvider` -* `iam:GetRole` -* `iam:GetUser` -* `iam:ListOpenIDConnectProviders` -* `iam:ListRolePolicies` -* `iam:ListRoles` -* `iam:PutRolePolicy` -* `iam:TagOpenIDConnectProvider` -* `iam:TagRole` - -|`s3` permissions -|* `s3:CreateBucket` -* `s3:DeleteBucket` -* `s3:DeleteObject` -* `s3:GetBucketAcl` -* `s3:GetBucketTagging` -* `s3:GetObject` -* `s3:GetObjectAcl` -* `s3:GetObjectTagging` -* `s3:ListBucket` -* `s3:PutBucketAcl` -* `s3:PutBucketPolicy` -* `s3:PutBucketPublicAccessBlock` -* `s3:PutBucketTagging` -* `s3:PutObject` -* `s3:PutObjectAcl` -* `s3:PutObjectTagging` - -|`cloudfront` permissions -|* `cloudfront:ListCloudFrontOriginAccessIdentities` -* `cloudfront:ListDistributions` -* `cloudfront:ListTagsForResource` - -|==== -+ -If you plan to store the OIDC configuration in a private S3 bucket that is accessed by the IAM identity provider through a public CloudFront distribution URL, the AWS account that runs the `ccoctl` utility requires the following additional permissions: -+ --- -* `cloudfront:CreateCloudFrontOriginAccessIdentity` -* `cloudfront:CreateDistribution` -* `cloudfront:DeleteCloudFrontOriginAccessIdentity` -* `cloudfront:DeleteDistribution` -* `cloudfront:GetCloudFrontOriginAccessIdentity` -* `cloudfront:GetCloudFrontOriginAccessIdentityConfig` -* `cloudfront:GetDistribution` -* `cloudfront:TagResource` -* `cloudfront:UpdateDistribution` --- -+ -[NOTE] -==== -These additional permissions support the use of the `--create-private-s3-bucket` option when processing credentials requests with the `ccoctl aws create-all` command. -==== -endif::aws-sts[] - -.Procedure - -. Obtain the {product-title} release image by running the following command: -+ -[source,terminal] ----- -$ RELEASE_IMAGE=$(./openshift-install version | awk '/release image/ {print $3}') ----- - -. Obtain the CCO container image from the {product-title} release image by running the following command: -+ -[source,terminal] ----- -$ CCO_IMAGE=$(oc adm release info --image-for='cloud-credential-operator' $RELEASE_IMAGE -a ~/.pull-secret) ----- -+ -[NOTE] -==== -Ensure that the architecture of the `$RELEASE_IMAGE` matches the architecture of the environment in which you will use the `ccoctl` tool. -==== - -. Extract the `ccoctl` binary from the CCO container image within the {product-title} release image by running the following command: -+ -[source,terminal] ----- -$ oc image extract $CCO_IMAGE --file="/usr/bin/ccoctl" -a ~/.pull-secret ----- - -. Change the permissions to make `ccoctl` executable by running the following command: -+ -[source,terminal] ----- -$ chmod 775 ccoctl ----- - -.Verification - -* To verify that `ccoctl` is ready to use, display the help file by running the following command: -+ -[source,terminal] ----- -$ ccoctl --help ----- -+ -.Output of `ccoctl --help` -[source,terminal] ----- -OpenShift credentials provisioning tool - -Usage: - ccoctl [command] - -Available Commands: - alibabacloud Manage credentials objects for alibaba cloud - aws Manage credentials objects for AWS cloud - gcp Manage credentials objects for Google cloud - help Help about any command - ibmcloud Manage credentials objects for IBM Cloud - nutanix Manage credentials objects for Nutanix - -Flags: - -h, --help help for ccoctl - -Use "ccoctl [command] --help" for more information about a command. ----- - -ifeval::["{context}" == "cco-mode-sts"] -:!aws-sts: -endif::[] -ifeval::["{context}" == "cco-mode-gcp-workload-identity"] -:!google-cloud-platform: -endif::[] -ifeval::["{context}" == "configuring-iam-ibm-cloud"] -:!ibm-cloud: -endif::[] -ifeval::["{context}" == "manually-creating-alibaba-ram"] -:!alibabacloud: -endif::[] -ifeval::["{context}" == "preparing-to-install-on-nutanix"] -:!nutanix: -endif::[] -ifeval::["{context}" == "preparing-manual-creds-update"] -:!update: -endif::[] -ifeval::["{context}" == "preparing-to-install-on-ibm-power-vs"] -:!ibm-power-vs: -endif::[] diff --git a/modules/cco-ccoctl-creating-at-once.adoc b/modules/cco-ccoctl-creating-at-once.adoc deleted file mode 100644 index 757d474040a1..000000000000 --- a/modules/cco-ccoctl-creating-at-once.adoc +++ /dev/null @@ -1,351 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc -// * authentication/managing_cloud_provider_credentials/cco-mode-gcp-workload-identity.adoc -// * installing/installing_alibaba/manually-creating-alibaba-ram.adoc -// * installing/installing_alibaba/installing-alibaba-network-customizations.adoc -// * installing/installing_alibaba/installing-alibaba-vpc.adoc - -ifeval::["{context}" == "cco-mode-sts"] -:aws-sts: -endif::[] -ifeval::["{context}" == "cco-mode-gcp-workload-identity"] -:google-cloud-platform: -endif::[] -ifeval::["{context}" == "installing-alibaba-default"] -:alibabacloud-default: -endif::[] -ifeval::["{context}" == "installing-alibaba-customizations"] -:alibabacloud-customizations: -endif::[] -ifeval::["{context}" == "installing-alibaba-vpc"] -:alibabacloud-vpc: -endif::[] - -:_content-type: PROCEDURE -[id="cco-ccoctl-creating-at-once_{context}"] -ifdef::aws-sts[] -= Creating AWS resources with a single command - -If you do not need to review the JSON files that the `ccoctl` tool creates before modifying AWS resources, and if the process the `ccoctl` tool uses to create AWS resources automatically meets the requirements of your organization, you can use the `ccoctl aws create-all` command to automate the creation of AWS resources. - -Otherwise, you can create the AWS resources individually. - -//to-do if possible: xref to modules/cco-ccoctl-creating-individually.adoc for `create the AWS resources individually` -endif::aws-sts[] -ifdef::google-cloud-platform[] -= Creating GCP resources with the Cloud Credential Operator utility - -You can use the `ccoctl gcp create-all` command to automate the creation of GCP resources. -endif::google-cloud-platform[] -ifdef::alibabacloud-default,alibabacloud-customizations,alibabacloud-vpc[] -[id="cco-ccoctl-creating-at-once_{context}"] -= Creating credentials for {product-title} components with the ccoctl tool - -You can use the {product-title} Cloud Credential Operator (CCO) utility to automate the creation of Alibaba Cloud RAM users and policies for each in-cluster component. -endif::alibabacloud-default,alibabacloud-customizations,alibabacloud-vpc[] - -[NOTE] -==== -By default, `ccoctl` creates objects in the directory in which the commands are run. To create the objects in a different directory, use the `--output-dir` flag. This procedure uses `` to refer to this directory. -==== - -.Prerequisites - -You must have: - -* Extracted and prepared the `ccoctl` binary. -ifdef::alibabacloud-default,alibabacloud-customizations,alibabacloud-vpc[] -* Created a RAM user with sufficient permission to create the {product-title} cluster. -* Added the AccessKeyID (`access_key_id`) and AccessKeySecret (`access_key_secret`) of that RAM user into the link:https://www.alibabacloud.com/help/en/doc-detail/311667.htm#h2-sls-mfm-3p3[`~/.alibabacloud/credentials` file] on your local computer. -endif::alibabacloud-default,alibabacloud-customizations,alibabacloud-vpc[] - -.Procedure - -ifdef::alibabacloud-default,alibabacloud-customizations,alibabacloud-vpc[] -. Set the `$RELEASE_IMAGE` variable by running the following command: -+ -[source,terminal] ----- -$ RELEASE_IMAGE=$(./openshift-install version | awk '/release image/ {print $3}') ----- -endif::alibabacloud-default,alibabacloud-customizations,alibabacloud-vpc[] - -. Extract the list of `CredentialsRequest` objects from the {product-title} release image by running the following command: -+ -[source,terminal] -ifdef::aws-sts[] ----- -$ oc adm release extract \ ---credentials-requests \ ---cloud=aws \ ---to=/credrequests \ <1> ---from=quay.io//ocp-release: ----- -endif::aws-sts[] -ifdef::google-cloud-platform[] ----- -$ oc adm release extract \ ---credentials-requests \ ---cloud=gcp \ ---to=/credrequests \ <1> -quay.io//ocp-release: ----- -endif::google-cloud-platform[] -ifdef::alibabacloud-default,alibabacloud-customizations,alibabacloud-vpc[] ----- -$ oc adm release extract \ ---credentials-requests \ ---cloud=alibabacloud \ ---to=/credrequests \ <1> -$RELEASE_IMAGE ----- -endif::alibabacloud-default,alibabacloud-customizations,alibabacloud-vpc[] -+ -<1> `credrequests` is the directory where the list of `CredentialsRequest` objects is stored. This command creates the directory if it does not exist. -+ -[NOTE] -==== -This command can take a few moments to run. -==== - -ifdef::aws-sts[] -. If your cluster uses cluster capabilities to disable one or more optional components, delete the `CredentialsRequest` custom resources for any disabled components. -+ -.Example `credrequests` directory contents for {product-title} 4.12 on AWS -+ -[source,terminal] ----- -0000_30_machine-api-operator_00_credentials-request.yaml <1> -0000_50_cloud-credential-operator_05-iam-ro-credentialsrequest.yaml <2> -0000_50_cluster-image-registry-operator_01-registry-credentials-request.yaml <3> -0000_50_cluster-ingress-operator_00-ingress-credentials-request.yaml <4> -0000_50_cluster-network-operator_02-cncc-credentials.yaml <5> -0000_50_cluster-storage-operator_03_credentials_request_aws.yaml <6> ----- -+ -<1> The Machine API Operator CR is required. -<2> The Cloud Credential Operator CR is required. -<3> The Image Registry Operator CR is required. -<4> The Ingress Operator CR is required. -<5> The Network Operator CR is required. -<6> The Storage Operator CR is an optional component and might be disabled in your cluster. -endif::aws-sts[] -ifdef::google-cloud-platform[] -. If your cluster uses cluster capabilities to disable one or more optional components, delete the `CredentialsRequest` custom resources for any disabled components. -+ -.Example `credrequests` directory contents for {product-title} 4.12 on GCP -+ -[source,terminal] ----- -0000_26_cloud-controller-manager-operator_16_credentialsrequest-gcp.yaml <1> -0000_30_machine-api-operator_00_credentials-request.yaml <2> -0000_50_cloud-credential-operator_05-gcp-ro-credentialsrequest.yaml <3> -0000_50_cluster-image-registry-operator_01-registry-credentials-request-gcs.yaml <4> -0000_50_cluster-ingress-operator_00-ingress-credentials-request.yaml <5> -0000_50_cluster-network-operator_02-cncc-credentials.yaml <6> -0000_50_cluster-storage-operator_03_credentials_request_gcp.yaml <7> ----- -+ -<1> The Cloud Controller Manager Operator CR is required. -<2> The Machine API Operator CR is required. -<3> The Cloud Credential Operator CR is required. -<4> The Image Registry Operator CR is required. -<5> The Ingress Operator CR is required. -<6> The Network Operator CR is required. -<7> The Storage Operator CR is an optional component and might be disabled in your cluster. -endif::google-cloud-platform[] -ifdef::alibabacloud-default,alibabacloud-customizations,alibabacloud-vpc[] -. If your cluster uses cluster capabilities to disable one or more optional components, delete the `CredentialsRequest` custom resources for any disabled components. -+ -.Example `credrequests` directory contents for {product-title} 4.12 on Alibaba Cloud -+ -[source,terminal] ----- -0000_30_machine-api-operator_00_credentials-request.yaml <1> -0000_50_cluster-image-registry-operator_01-registry-credentials-request-alibaba.yaml <2> -0000_50_cluster-ingress-operator_00-ingress-credentials-request.yaml <3> -0000_50_cluster-storage-operator_03_credentials_request_alibaba.yaml <4> ----- -+ -<1> The Machine API Operator CR is required. -<2> The Image Registry Operator CR is required. -<3> The Ingress Operator CR is required. -<4> The Storage Operator CR is an optional component and might be disabled in your cluster. -endif::alibabacloud-default,alibabacloud-customizations,alibabacloud-vpc[] - -ifdef::aws-sts,google-cloud-platform[] -. Use the `ccoctl` tool to process all `CredentialsRequest` objects in the `credrequests` directory: -+ -endif::aws-sts,google-cloud-platform[] -ifdef::aws-sts[] -[source,terminal] ----- -$ ccoctl aws create-all \ - --name= \// <1> - --region= \// <2> - --credentials-requests-dir=/credrequests \// <3> - --output-dir= \// <4> - --create-private-s3-bucket <5> ----- -<1> Specify the name used to tag any cloud resources that are created for tracking. -<2> Specify the AWS region in which cloud resources will be created. -<3> Specify the directory containing the files for the component `CredentialsRequest` objects. -<4> Optional: Specify the directory in which you want the `ccoctl` utility to create objects. By default, the utility creates objects in the directory in which the commands are run. -<5> Optional: By default, the `ccoctl` utility stores the OpenID Connect (OIDC) configuration files in a public S3 bucket and uses the S3 URL as the public OIDC endpoint. To store the OIDC configuration in a private S3 bucket that is accessed by the IAM identity provider through a public CloudFront distribution URL instead, use the `--create-private-s3-bucket` parameter. -+ -[NOTE] -==== -If your cluster uses Technology Preview features that are enabled by the `TechPreviewNoUpgrade` feature set, you must include the `--enable-tech-preview` parameter. -==== -endif::aws-sts[] -ifdef::google-cloud-platform[] -[source,terminal] ----- -$ ccoctl gcp create-all \ ---name= \ ---region= \ ---project= \ ---credentials-requests-dir=/credrequests ----- -+ -where: -+ --- -** `` is the user-defined name for all created GCP resources used for tracking. -** `` is the GCP region in which cloud resources will be created. -** `` is the GCP project ID in which cloud resources will be created. -** `/credrequests` is the directory containing the files of `CredentialsRequest` manifests to create GCP service accounts. --- -+ -[NOTE] -==== -If your cluster uses Technology Preview features that are enabled by the `TechPreviewNoUpgrade` feature set, you must include the `--enable-tech-preview` parameter. -==== -endif::google-cloud-platform[] - -ifdef::alibabacloud-default,alibabacloud-customizations,alibabacloud-vpc[] -. Use the `ccoctl` tool to process all `CredentialsRequest` objects in the `credrequests` directory: - -.. Run the following command to use the tool: -+ -[source,terminal] ----- -$ ccoctl alibabacloud create-ram-users \ ---name \ ---region= \ ---credentials-requests-dir=/credrequests \ ---output-dir= ----- -+ -where: -+ --- -** `` is the name used to tag any cloud resources that are created for tracking. -** `` is the Alibaba Cloud region in which cloud resources will be created. -** `/credrequests` is the directory containing the files for the component `CredentialsRequest` objects. -** `` is the directory where the generated component credentials secrets will be placed. --- -+ -[NOTE] -==== -If your cluster uses Technology Preview features that are enabled by the `TechPreviewNoUpgrade` feature set, you must include the `--enable-tech-preview` parameter. -==== -+ -.Example output -+ -[source,terminal] ----- -2022/02/11 16:18:26 Created RAM User: user1-alicloud-openshift-machine-api-alibabacloud-credentials -2022/02/11 16:18:27 Ready for creating new ram policy user1-alicloud-openshift-machine-api-alibabacloud-credentials-policy-policy -2022/02/11 16:18:27 RAM policy user1-alicloud-openshift-machine-api-alibabacloud-credentials-policy-policy has created -2022/02/11 16:18:28 Policy user1-alicloud-openshift-machine-api-alibabacloud-credentials-policy-policy has attached on user user1-alicloud-openshift-machine-api-alibabacloud-credentials -2022/02/11 16:18:29 Created access keys for RAM User: user1-alicloud-openshift-machine-api-alibabacloud-credentials -2022/02/11 16:18:29 Saved credentials configuration to: user1-alicloud/manifests/openshift-machine-api-alibabacloud-credentials-credentials.yaml -... ----- -+ -[NOTE] -==== -A RAM user can have up to two AccessKeys at the same time. If you run `ccoctl alibabacloud create-ram-users` more than twice, the previous generated manifests secret becomes stale and you must reapply the newly generated secrets. -==== -// Above output was in AWS area but I believe belongs here. - -.. Verify that the {product-title} secrets are created: -+ -[source,terminal] ----- -$ ls /manifests ----- -+ -.Example output: -+ -[source,terminal] ----- -openshift-cluster-csi-drivers-alibaba-disk-credentials-credentials.yaml -openshift-image-registry-installer-cloud-credentials-credentials.yaml -openshift-ingress-operator-cloud-credentials-credentials.yaml -openshift-machine-api-alibabacloud-credentials-credentials.yaml ----- -+ -You can verify that the RAM users and policies are created by querying Alibaba Cloud. For more information, refer to Alibaba Cloud documentation on listing RAM users and policies. - -. Copy the generated credential files to the target manifests directory: -+ -[source,terminal] ----- -$ cp .//manifests/*credentials.yaml ./dir>/manifests/ ----- -+ -where: - -``:: Specifies the directory created by the `ccoctl alibabacloud create-ram-users` command. -``:: Specifies the directory in which the installation program creates files. -endif::alibabacloud-default,alibabacloud-customizations,alibabacloud-vpc[] - -ifdef::aws-sts,google-cloud-platform[] -.Verification - -* To verify that the {product-title} secrets are created, list the files in the `/manifests` directory: -+ -[source,terminal] ----- -$ ls /manifests ----- -endif::aws-sts,google-cloud-platform[] -ifdef::aws-sts[] -+ -.Example output: -+ -[source,terminal] ----- -cluster-authentication-02-config.yaml -openshift-cloud-credential-operator-cloud-credential-operator-iam-ro-creds-credentials.yaml -openshift-cluster-csi-drivers-ebs-cloud-credentials-credentials.yaml -openshift-image-registry-installer-cloud-credentials-credentials.yaml -openshift-ingress-operator-cloud-credentials-credentials.yaml -openshift-machine-api-aws-cloud-credentials-credentials.yaml ----- -//Would love a GCP version of the above output. - -You can verify that the IAM roles are created by querying AWS. For more information, refer to AWS documentation on listing IAM roles. -endif::aws-sts[] -ifdef::google-cloud-platform[] -You can verify that the IAM service accounts are created by querying GCP. For more information, refer to GCP documentation on listing IAM service accounts. -endif::google-cloud-platform[] - -ifeval::["{context}" == "cco-mode-sts"] -:!aws-sts: -endif::[] -ifeval::["{context}" == "cco-mode-gcp-workload-identity"] -:!google-cloud-platform: -endif::[] -ifeval::["{context}" == "installing-alibaba-default"] -:!alibabacloud-default: -endif::[] -ifeval::["{context}" == "installing-alibaba-customizations"] -:!alibabacloud-customizations: -endif::[] -ifeval::["{context}" == "installing-alibaba-vpc"] -:!alibabacloud-vpc: -endif::[] diff --git a/modules/cco-ccoctl-creating-individually.adoc b/modules/cco-ccoctl-creating-individually.adoc deleted file mode 100644 index d2404785a72d..000000000000 --- a/modules/cco-ccoctl-creating-individually.adoc +++ /dev/null @@ -1,159 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc - -:_content-type: PROCEDURE -[id="cco-ccoctl-creating-individually_{context}"] -= Creating AWS resources individually - -If you need to review the JSON files that the `ccoctl` tool creates before modifying AWS resources, or if the process the `ccoctl` tool uses to create AWS resources automatically does not meet the requirements of your organization, you can create the AWS resources individually. For example, this option might be useful for an organization that shares the responsibility for creating these resources among different users or departments. - -Otherwise, you can use the `ccoctl aws create-all` command to create the AWS resources automatically. - -//to-do if possible: xref to modules/cco-ccoctl-creating-at-once.adoc for `create the AWS resources automatically` - -[NOTE] -==== -By default, `ccoctl` creates objects in the directory in which the commands are run. To create the objects in a different directory, use the `--output-dir` flag. This procedure uses `` to refer to this directory. - -Some `ccoctl` commands make AWS API calls to create or modify AWS resources. You can use the `--dry-run` flag to avoid making API calls. Using this flag creates JSON files on the local file system instead. You can review and modify the JSON files and then apply them with the AWS CLI tool using the `--cli-input-json` parameters. -==== - -.Prerequisites - -* Extract and prepare the `ccoctl` binary. - -.Procedure - -. Generate the public and private RSA key files that are used to set up the OpenID Connect provider for the cluster: -+ -[source,terminal] ----- -$ ccoctl aws create-key-pair ----- -+ -.Example output: -+ -[source,terminal] ----- -2021/04/13 11:01:02 Generating RSA keypair -2021/04/13 11:01:03 Writing private key to //serviceaccount-signer.private -2021/04/13 11:01:03 Writing public key to //serviceaccount-signer.public -2021/04/13 11:01:03 Copying signing key for use by installer ----- -+ -where `serviceaccount-signer.private` and `serviceaccount-signer.public` are the generated key files. -+ -This command also creates a private key that the cluster requires during installation in `//tls/bound-service-account-signing-key.key`. - -. Create an OpenID Connect identity provider and S3 bucket on AWS: -+ -[source,terminal] ----- -$ ccoctl aws create-identity-provider \ ---name= \ ---region= \ ---public-key-file=/serviceaccount-signer.public ----- -+ -where: -+ --- -** `` is the name used to tag any cloud resources that are created for tracking. -** `` is the AWS region in which cloud resources will be created. -** `` is the path to the public key file that the `ccoctl aws create-key-pair` command generated. --- -+ -.Example output: -+ -[source,terminal] ----- -2021/04/13 11:16:09 Bucket -oidc created -2021/04/13 11:16:10 OpenID Connect discovery document in the S3 bucket -oidc at .well-known/openid-configuration updated -2021/04/13 11:16:10 Reading public key -2021/04/13 11:16:10 JSON web key set (JWKS) in the S3 bucket -oidc at keys.json updated -2021/04/13 11:16:18 Identity Provider created with ARN: arn:aws:iam:::oidc-provider/-oidc.s3..amazonaws.com ----- -+ -where `openid-configuration` is a discovery document and `keys.json` is a JSON web key set file. -+ -This command also creates a YAML configuration file in `//manifests/cluster-authentication-02-config.yaml`. This file sets the issuer URL field for the service account tokens that the cluster generates, so that the AWS IAM identity provider trusts the tokens. - -. Create IAM roles for each component in the cluster. - -.. Extract the list of `CredentialsRequest` objects from the {product-title} release image: -+ -[source,terminal] ----- -$ oc adm release extract --credentials-requests \ ---cloud=aws \ ---to=/credrequests <1> ---from=quay.io//ocp-release: ----- -+ -<1> `credrequests` is the directory where the list of `CredentialsRequest` objects is stored. This command creates the directory if it does not exist. - -.. If your cluster uses cluster capabilities to disable one or more optional components, delete the `CredentialsRequest` custom resources for any disabled components. -+ -.Example `credrequests` directory contents for {product-title} 4.12 on AWS -+ -[source,terminal] ----- -0000_30_machine-api-operator_00_credentials-request.yaml <1> -0000_50_cloud-credential-operator_05-iam-ro-credentialsrequest.yaml <2> -0000_50_cluster-image-registry-operator_01-registry-credentials-request.yaml <3> -0000_50_cluster-ingress-operator_00-ingress-credentials-request.yaml <4> -0000_50_cluster-network-operator_02-cncc-credentials.yaml <5> -0000_50_cluster-storage-operator_03_credentials_request_aws.yaml <6> ----- -+ -<1> The Machine API Operator CR is required. -<2> The Cloud Credential Operator CR is required. -<3> The Image Registry Operator CR is required. -<4> The Ingress Operator CR is required. -<5> The Network Operator CR is required. -<6> The Storage Operator CR is an optional component and might be disabled in your cluster. - -.. Use the `ccoctl` tool to process all `CredentialsRequest` objects in the `credrequests` directory: -+ -[source,terminal] ----- -$ ccoctl aws create-iam-roles \ ---name= \ ---region= \ ---credentials-requests-dir=/credrequests \ ---identity-provider-arn=arn:aws:iam:::oidc-provider/-oidc.s3..amazonaws.com ----- -+ -[NOTE] -==== -For AWS environments that use alternative IAM API endpoints, such as GovCloud, you must also specify your region with the `--region` parameter. - -If your cluster uses Technology Preview features that are enabled by the `TechPreviewNoUpgrade` feature set, you must include the `--enable-tech-preview` parameter. -==== -+ -For each `CredentialsRequest` object, `ccoctl` creates an IAM role with a trust policy that is tied to the specified OIDC identity provider, and a permissions policy as defined in each `CredentialsRequest` object from the {product-title} release image. - -.Verification - -* To verify that the {product-title} secrets are created, list the files in the `/manifests` directory: -+ -[source,terminal] ----- -$ ll /manifests ----- -+ -.Example output: -+ -[source,terminal] ----- -total 24 --rw-------. 1 161 Apr 13 11:42 cluster-authentication-02-config.yaml --rw-------. 1 379 Apr 13 11:59 openshift-cloud-credential-operator-cloud-credential-operator-iam-ro-creds-credentials.yaml --rw-------. 1 353 Apr 13 11:59 openshift-cluster-csi-drivers-ebs-cloud-credentials-credentials.yaml --rw-------. 1 355 Apr 13 11:59 openshift-image-registry-installer-cloud-credentials-credentials.yaml --rw-------. 1 339 Apr 13 11:59 openshift-ingress-operator-cloud-credentials-credentials.yaml --rw-------. 1 337 Apr 13 11:59 openshift-machine-api-aws-cloud-credentials-credentials.yaml ----- - -You can verify that the IAM roles are created by querying AWS. For more information, refer to AWS documentation on listing IAM roles. diff --git a/modules/cco-ccoctl-deleting-sts-resources.adoc b/modules/cco-ccoctl-deleting-sts-resources.adoc deleted file mode 100644 index 3c3ceb12b8c8..000000000000 --- a/modules/cco-ccoctl-deleting-sts-resources.adoc +++ /dev/null @@ -1,122 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/uninstalling-cluster-aws.adoc -// * installing/installing_gcp/uninstalling-cluster-gcp.adoc - -ifeval::["{context}" == "uninstall-cluster-aws"] -:aws-sts: -endif::[] -ifeval::["{context}" == "uninstalling-cluster-gcp"] -:google-cloud-platform: -endif::[] - -:_content-type: PROCEDURE -[id="cco-ccoctl-deleting-sts-resources_{context}"] -ifdef::aws-sts[] -= Deleting AWS resources with the Cloud Credential Operator utility - -To clean up resources after uninstalling an {product-title} cluster with the Cloud Credential Operator (CCO) in manual mode with STS, you can use the CCO utility (`ccoctl`) to remove the AWS resources that `ccoctl` created during installation. -endif::aws-sts[] - -ifdef::google-cloud-platform[] -= Deleting GCP resources with the Cloud Credential Operator utility - -To clean up resources after uninstalling an {product-title} cluster with the Cloud Credential Operator (CCO) in manual mode with GCP Workload Identity, you can use the CCO utility (`ccoctl`) to remove the GCP resources that `ccoctl` created during installation. -endif::google-cloud-platform[] - -.Prerequisites - -* Extract and prepare the `ccoctl` binary. -ifdef::aws-sts[] -* Install an {product-title} cluster with the CCO in manual mode with STS. -endif::aws-sts[] -ifdef::google-cloud-platform[] -* Install an {product-title} cluster with the CCO in manual mode with GCP Workload Identity. -endif::google-cloud-platform[] - -.Procedure - -ifdef::aws-sts[] -* Delete the AWS resources that `ccoctl` created: -+ -[source,terminal] ----- -$ ccoctl aws delete \ - --name= \ <1> - --region= <2> ----- -+ -<1> `` matches the name that was originally used to create and tag the cloud resources. -<2> `` is the AWS region in which to delete cloud resources. -+ -.Example output: -+ -[source,terminal] ----- -2021/04/08 17:50:41 Identity Provider object .well-known/openid-configuration deleted from the bucket -oidc -2021/04/08 17:50:42 Identity Provider object keys.json deleted from the bucket -oidc -2021/04/08 17:50:43 Identity Provider bucket -oidc deleted -2021/04/08 17:51:05 Policy -openshift-cloud-credential-operator-cloud-credential-o associated with IAM Role -openshift-cloud-credential-operator-cloud-credential-o deleted -2021/04/08 17:51:05 IAM Role -openshift-cloud-credential-operator-cloud-credential-o deleted -2021/04/08 17:51:07 Policy -openshift-cluster-csi-drivers-ebs-cloud-credentials associated with IAM Role -openshift-cluster-csi-drivers-ebs-cloud-credentials deleted -2021/04/08 17:51:07 IAM Role -openshift-cluster-csi-drivers-ebs-cloud-credentials deleted -2021/04/08 17:51:08 Policy -openshift-image-registry-installer-cloud-credentials associated with IAM Role -openshift-image-registry-installer-cloud-credentials deleted -2021/04/08 17:51:08 IAM Role -openshift-image-registry-installer-cloud-credentials deleted -2021/04/08 17:51:09 Policy -openshift-ingress-operator-cloud-credentials associated with IAM Role -openshift-ingress-operator-cloud-credentials deleted -2021/04/08 17:51:10 IAM Role -openshift-ingress-operator-cloud-credentials deleted -2021/04/08 17:51:11 Policy -openshift-machine-api-aws-cloud-credentials associated with IAM Role -openshift-machine-api-aws-cloud-credentials deleted -2021/04/08 17:51:11 IAM Role -openshift-machine-api-aws-cloud-credentials deleted -2021/04/08 17:51:39 Identity Provider with ARN arn:aws:iam:::oidc-provider/-oidc.s3..amazonaws.com deleted ----- -//Would love a GCP version of the above output. -endif::aws-sts[] -ifdef::google-cloud-platform[] -. Obtain the {product-title} release image by running the following command: -+ -[source,terminal] ----- -$ RELEASE_IMAGE=$(./openshift-install version | awk '/release image/ {print $3}') ----- - -. Extract the list of `CredentialsRequest` custom resources (CRs) from the {product-title} release image by running the following command: -+ -[source,terminal] ----- -$ oc adm release extract --credentials-requests \ - --cloud=gcp \ - --to=/credrequests \ <1> - $RELEASE_IMAGE ----- -+ -<1> `credrequests` is the directory where the list of `CredentialsRequest` objects is stored. This command creates the directory if it does not exist. - -. Delete the GCP resources that `ccoctl` created: -+ -[source,terminal] ----- -$ ccoctl gcp delete \ - --name= \ <1> - --project= \ <2> - --credentials-requests-dir=/credrequests ----- -+ -<1> `` matches the name that was originally used to create and tag the cloud resources. -<2> `` is the GCP project ID in which to delete cloud resources. -endif::google-cloud-platform[] - -.Verification - -ifdef::aws-sts[] -* To verify that the resources are deleted, query AWS. For more information, refer to AWS documentation. -endif::aws-sts[] - -ifdef::google-cloud-platform[] -* To verify that the resources are deleted, query GCP. For more information, refer to GCP documentation. -endif::google-cloud-platform[] - -ifeval::["{context}" == "uninstall-cluster-aws"] -:!aws-sts: -endif::[] -ifeval::["{context}" == "uninstalling-cluster-gcp"] -:!google-cloud-platform: -endif::[] diff --git a/modules/cco-ccoctl-upgrading.adoc b/modules/cco-ccoctl-upgrading.adoc deleted file mode 100644 index e2ef3daff1bf..000000000000 --- a/modules/cco-ccoctl-upgrading.adoc +++ /dev/null @@ -1,110 +0,0 @@ -// Module included in the following assemblies: -// -// * updating/preparing-manual-creds-update.adoc - - -:_content-type: PROCEDURE -[id="cco-ccoctl-upgrading_{context}"] -= Updating cloud provider resources with the Cloud Credential Operator utility - -The process for upgrading an {product-title} cluster that was configured using the CCO utility (`ccoctl`) is similar to creating the cloud provider resources during installation. - -[NOTE] -==== -By default, `ccoctl` creates objects in the directory in which the commands are run. To create the objects in a different directory, use the `--output-dir` flag. This procedure uses `` to refer to this directory. - -On AWS clusters, some `ccoctl` commands make AWS API calls to create or modify AWS resources. You can use the `--dry-run` flag to avoid making API calls. Using this flag creates JSON files on the local file system instead. You can review and modify the JSON files and then apply them with the AWS CLI tool using the `--cli-input-json` parameters. -==== - -.Prerequisites - -* Obtain the {product-title} release image for the version that you are upgrading to. - -* Extract and prepare the `ccoctl` binary from the release image. - -.Procedure - -. Extract the list of `CredentialsRequest` custom resources (CRs) from the {product-title} release image by running the following command: -+ -[source,terminal] ----- -$ oc adm release extract --credentials-requests \ - --cloud= \ - --to=/credrequests \ - quay.io//ocp-release: ----- -+ -where: -+ --- -* `` is the value for your cloud provider. Valid values are `alibabacloud`, `aws`, `gcp`, `ibmcloud`, and `nutanix`. -* `credrequests` is the directory where the list of `CredentialsRequest` objects is stored. This command creates the directory if it does not exist. --- - -. For each `CredentialsRequest` CR in the release image, ensure that a namespace that matches the text in the `spec.secretRef.namespace` field exists in the cluster. This field is where the generated secrets that hold the credentials configuration are stored. -+ -.Sample AWS `CredentialsRequest` object -[source,yaml] ----- -apiVersion: cloudcredential.openshift.io/v1 -kind: CredentialsRequest -metadata: - name: cloud-credential-operator-iam-ro - namespace: openshift-cloud-credential-operator -spec: - providerSpec: - apiVersion: cloudcredential.openshift.io/v1 - kind: AWSProviderSpec - statementEntries: - - effect: Allow - action: - - iam:GetUser - - iam:GetUserPolicy - - iam:ListAccessKeys - resource: "*" - secretRef: - name: cloud-credential-operator-iam-ro-creds - namespace: openshift-cloud-credential-operator <1> ----- -<1> This field indicates the namespace which needs to exist to hold the generated secret. -+ -The `CredentialsRequest` CRs for other platforms have a similar format with different platform-specific values. - -. For any `CredentialsRequest` CR for which the cluster does not already have a namespace with the name specified in `spec.secretRef.namespace`, create the namespace by running the following command: -+ -[source,terminal] ----- -$ oc create namespace ----- - -. Use the `ccoctl` tool to process all `CredentialsRequest` objects in the `credrequests` directory by running the command for your cloud provider. The following commands process `CredentialsRequest` objects: -+ --- -* {alibaba}: `ccoctl alibabacloud create-ram-users` -* Amazon Web Services (AWS): `ccoctl aws create-iam-roles` -* Google Cloud Platform (GCP): `ccoctl gcp create-all` -* IBM Cloud: `ccoctl ibmcloud create-service-id` -* Nutanix: `ccoctl nutanix create-shared-secrets` --- -+ -[IMPORTANT] -==== -Refer to the `ccoctl` utility instructions in the installation content for your cloud provider for important platform-specific details about the required arguments and special considerations. -==== -+ -For each `CredentialsRequest` object, `ccoctl` creates the required provider resources and a permissions policy as defined in each `CredentialsRequest` object from the {product-title} release image. - -. Apply the secrets to your cluster by running the following command: -+ -[source,terminal] ----- -$ ls /manifests/*-credentials.yaml | xargs -I{} oc apply -f {} ----- - -.Verification - -You can verify that the required provider resources and permissions policies are created by querying the cloud provider. For more information, refer to your cloud provider documentation on listing roles or service accounts. - -.Next steps - -* Update the `upgradeable-to` annotation to indicate that the cluster is ready to upgrade. diff --git a/modules/cco-determine-mode-cli.adoc b/modules/cco-determine-mode-cli.adoc deleted file mode 100644 index 9017ccf1b381..000000000000 --- a/modules/cco-determine-mode-cli.adoc +++ /dev/null @@ -1,140 +0,0 @@ -// Module included in the following assemblies: -// -// * updating/preparing-manual-creds-update.adoc -// * authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.adoc - -:_content-type: PROCEDURE - -ifeval::["{context}" == "preparing-manual-creds-update"] -:update: -endif::[] -ifeval::["{context}" == "about-cloud-credential-operator"] -:about-cco: -endif::[] - -[id="cco-determine-mode-cli_{context}"] -= Determining the Cloud Credential Operator mode by using the CLI - -You can determine what mode the Cloud Credential Operator (CCO) is configured to use by using the CLI. - -[NOTE] -==== -Only Amazon Web Services (AWS), global Microsoft Azure, and Google Cloud Platform (GCP) clusters support multiple CCO modes. -==== - -.Prerequisites - -* You have access to an {product-title} account with cluster administrator permissions. -* You have installed the OpenShift CLI (`oc`). - -.Procedure - -. Log in to `oc` on the cluster as a user with the `cluster-admin` role. - -. To determine the mode that the CCO is configured to use, enter the following command: -+ -[source,terminal] ----- -$ oc get cloudcredentials cluster \ - -o=jsonpath={.spec.credentialsMode} ----- -+ -The following output values are possible, though not all are supported on all platforms: -+ --- -* `''`: The CCO is operating in the default mode. In this configuration, the CCO operates in mint or passthrough mode, depending on the credentials provided during installation. -* `Mint`: The CCO is operating in mint mode. -* `Passthrough`: The CCO is operating in passthrough mode. -* `Manual`: The CCO is operating in manual mode. --- -+ -[IMPORTANT] -==== -To determine the specific configuration of an AWS or GCP cluster that has a `spec.credentialsMode` of `''`, `Mint`, or `Manual`, you must investigate further. - -AWS and GCP clusters support using mint mode with the root secret deleted. -ifdef::update[] -If the cluster is specifically configured to use mint mode or uses mint mode by default, you must determine if the root secret is present on the cluster before updating. -endif::update[] - -An AWS or GCP cluster that uses manual mode might be configured to create and manage cloud credentials from outside of the cluster using the AWS Security Token Service (STS) or GCP Workload Identity. You can determine whether your cluster uses this strategy by examining the cluster `Authentication` object. -==== - -ifdef::about-cco[] -. AWS or GCP clusters that use the default (`''`) only: To determine whether the cluster is operating in mint or passthrough mode, run the following command: -+ -[source,terminal] ----- -$ oc get secret \ - -n kube-system \ - -o jsonpath \ - --template '{ .metadata.annotations }' ----- -+ -where `` is `aws-creds` for AWS or `gcp-credentials` for GCP. -+ -This command displays the value of the `.metadata.annotations` parameter in the cluster root secret object. The following output values are possible: -+ --- -* `Mint`: The CCO is operating in mint mode. -* `Passthrough`: The CCO is operating in passthrough mode. --- -+ -If your cluster uses mint mode, you can also determine whether the cluster is operating without the root secret. -endif::about-cco[] - -. AWS or GCP clusters that use mint mode only: To determine whether the cluster is operating without the root secret, run the following command: -+ -[source,terminal] ----- -$ oc get secret \ - -n=kube-system ----- -+ -where `` is `aws-creds` for AWS or `gcp-credentials` for GCP. -+ -If the root secret is present, the output of this command returns information about the secret. An error indicates that the root secret is not present on the cluster. - -. AWS or GCP clusters that use manual mode only: To determine whether the cluster is configured to create and manage cloud credentials from outside of the cluster, run the following command: -+ -[source,terminal] ----- -$ oc get authentication cluster \ - -o jsonpath \ - --template='{ .spec.serviceAccountIssuer }' ----- -+ -This command displays the value of the `.spec.serviceAccountIssuer` parameter in the cluster `Authentication` object. -+ --- -* An output of a URL that is associated with your cloud provider indicates that the CCO is using manual mode with AWS STS or GCP Workload Identity to create and manage cloud credentials from outside of the cluster. These clusters are configured using the `ccoctl` utility. - -* An empty output indicates that the cluster is using the CCO in manual mode but was not configured using the `ccoctl` utility. --- - -ifdef::update[] -.Next steps - -* If you are updating a cluster that has the CCO operating in mint or passthrough mode and the root secret is present, you do not need to update any cloud provider resources and can continue to the next part of the update process. - -* If your cluster is using the CCO in mint mode with the root secret removed, you must reinstate the credential secret with the administrator-level credential before continuing to the next part of the update process. - -* If your cluster was configured using the CCO utility (`ccoctl`), you must take the following actions: - -.. Configure the `ccoctl` utility for the new release and use it to update the cloud provider resources. - -.. Update the `upgradeable-to` annotation to indicate that the cluster is ready to update. - -* If your cluster is using the CCO in manual mode but was not configured using the `ccoctl` utility, you must take the following actions: - -.. Manually update the cloud provider resources for the new release. - -.. Update the `upgradeable-to` annotation to indicate that the cluster is ready to update. -endif::update[] - -ifeval::["{context}" == "preparing-manual-creds-update"] -:!update: -endif::[] -ifeval::["{context}" == "about-cloud-credential-operator"] -:!about-cco: -endif::[] diff --git a/modules/cco-determine-mode-gui.adoc b/modules/cco-determine-mode-gui.adoc deleted file mode 100644 index 8f38ae7f9931..000000000000 --- a/modules/cco-determine-mode-gui.adoc +++ /dev/null @@ -1,163 +0,0 @@ -// Module included in the following assemblies: -// -// * updating/preparing-manual-creds-update.adoc -// * authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.adoc - -:_content-type: PROCEDURE - -ifeval::["{context}" == "preparing-manual-creds-update"] -:update: -endif::[] -ifeval::["{context}" == "about-cloud-credential-operator"] -:about-cco: -endif::[] - -[id="cco-determine-mode-gui_{context}"] -= Determining the Cloud Credential Operator mode by using the web console - -You can determine what mode the Cloud Credential Operator (CCO) is configured to use by using the web console. - -[NOTE] -==== -Only Amazon Web Services (AWS), global Microsoft Azure, and Google Cloud Platform (GCP) clusters support multiple CCO modes. -==== - -.Prerequisites - -* You have access to an {product-title} account with cluster administrator permissions. - -.Procedure - -. Log in to the {product-title} web console as a user with the `cluster-admin` role. - -. Navigate to *Administration* -> *Cluster Settings*. - -. On the *Cluster Settings* page, select the *Configuration* tab. - -. Under *Configuration resource*, select *CloudCredential*. - -. On the *CloudCredential details* page, select the *YAML* tab. - -. In the YAML block, check the value of `spec.credentialsMode`. The following values are possible, though not all are supported on all platforms: -+ --- -* `''`: The CCO is operating in the default mode. In this configuration, the CCO operates in mint or passthrough mode, depending on the credentials provided during installation. -* `Mint`: The CCO is operating in mint mode. -* `Passthrough`: The CCO is operating in passthrough mode. -* `Manual`: The CCO is operating in manual mode. --- -+ -[IMPORTANT] -==== -To determine the specific configuration of an AWS or GCP cluster that has a `spec.credentialsMode` of `''`, `Mint`, or `Manual`, you must investigate further. - -AWS and GCP clusters support using mint mode with the root secret deleted. -ifdef::update[] -If the cluster is specifically configured to use mint mode or uses mint mode by default, you must determine if the root secret is present on the cluster before updating. -endif::update[] - -An AWS or GCP cluster that uses manual mode might be configured to create and manage cloud credentials from outside of the cluster using the AWS Security Token Service (STS) or GCP Workload Identity. You can determine whether your cluster uses this strategy by examining the cluster `Authentication` object. -==== - -ifdef::about-cco[] -. AWS or GCP clusters that use the default (`''`) only: To determine whether the cluster is operating in mint or passthrough mode, inspect the annotations on the cluster root secret: - -.. Navigate to *Workloads* -> *Secrets* and look for the root secret for your cloud provider. -+ -[NOTE] -==== -Ensure that the *Project* dropdown is set to *All Projects*. -==== -+ -[cols=2,options=header] -|=== -|Platform -|Secret name - -|AWS -|`aws-creds` - -|GCP -|`gcp-credentials` - -|=== - -.. To view the CCO mode that the cluster is using, click `1 annotation` under *Annotations*, and check the value field. The following values are possible: -+ --- -* `Mint`: The CCO is operating in mint mode. -* `Passthrough`: The CCO is operating in passthrough mode. --- -+ -If your cluster uses mint mode, you can also determine whether the cluster is operating without the root secret. -endif::about-cco[] - -. AWS or GCP clusters that use mint mode only: To determine whether the cluster is operating without the root secret, navigate to *Workloads* -> *Secrets* and look for the root secret for your cloud provider. -+ -[NOTE] -==== -Ensure that the *Project* dropdown is set to *All Projects*. -==== -+ -[cols=2,options=header] -|=== -|Platform -|Secret name - -|AWS -|`aws-creds` - -|GCP -|`gcp-credentials` - -|=== -+ --- -* If you see one of these values, your cluster is using mint or passthrough mode with the root secret present. -* If you do not see these values, your cluster is using the CCO in mint mode with the root secret removed. --- - -. AWS or GCP clusters that use manual mode only: To determine whether the cluster is configured to create and manage cloud credentials from outside of the cluster, you must check the cluster `Authentication` object YAML values. - -.. Navigate to *Administration* -> *Cluster Settings*. - -.. On the *Cluster Settings* page, select the *Configuration* tab. - -.. Under *Configuration resource*, select *Authentication*. - -.. On the *Authentication details* page, select the *YAML* tab. - -.. In the YAML block, check the value of the `.spec.serviceAccountIssuer` parameter. -+ --- -* A value that contains a URL that is associated with your cloud provider indicates that the CCO is using manual mode with AWS STS or GCP Workload Identity to create and manage cloud credentials from outside of the cluster. These clusters are configured using the `ccoctl` utility. - -* An empty value (`''`) indicates that the cluster is using the CCO in manual mode but was not configured using the `ccoctl` utility. --- - -ifdef::update[] -.Next steps - -* If you are updating a cluster that has the CCO operating in mint or passthrough mode and the root secret is present, you do not need to update any cloud provider resources and can continue to the next part of the update process. - -* If your cluster is using the CCO in mint mode with the root secret removed, you must reinstate the credential secret with the administrator-level credential before continuing to the next part of the update process. - -* If your cluster was configured using the CCO utility (`ccoctl`), you must take the following actions: - -.. Configure the `ccoctl` utility for the new release and use it to update the cloud provider resources. - -.. Update the `upgradeable-to` annotation to indicate that the cluster is ready to update. - -* If your cluster is using the CCO in manual mode but was not configured using the `ccoctl` utility, you must take the following actions: - -.. Manually update the cloud provider resources for the new release. - -.. Update the `upgradeable-to` annotation to indicate that the cluster is ready to update. -endif::update[] - -ifeval::["{context}" == "preparing-manual-creds-update"] -:!update: -endif::[] -ifeval::["{context}" == "about-cloud-credential-operator"] -:!about-cco: -endif::[] \ No newline at end of file diff --git a/modules/cco-manual-upgrade-annotation.adoc b/modules/cco-manual-upgrade-annotation.adoc deleted file mode 100644 index de02caaf5e44..000000000000 --- a/modules/cco-manual-upgrade-annotation.adoc +++ /dev/null @@ -1,55 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/managing_cloud_provider_credentials/cco-mode-manual.adoc -// * updating/preparing-manual-creds-update.adoc - -:_content-type: PROCEDURE - -[id="cco-manual-upgrade-annotation_{context}"] -= Indicating that the cluster is ready to upgrade - -The Cloud Credential Operator (CCO) `Upgradable` status for a cluster with manually maintained credentials is `False` by default. - -.Prerequisites - -* For the release image that you are upgrading to, you have processed any new credentials manually or by using the Cloud Credential Operator utility (`ccoctl`). -* You have installed the OpenShift CLI (`oc`). - -.Procedure - -. Log in to `oc` on the cluster as a user with the `cluster-admin` role. - -. Edit the `CloudCredential` resource to add an `upgradeable-to` annotation within the `metadata` field by running the following command: -+ -[source,terminal] ----- -$ oc edit cloudcredential cluster ----- -+ -.Text to add -+ -[source,yaml] ----- -... - metadata: - annotations: - cloudcredential.openshift.io/upgradeable-to: -... ----- -+ -Where `` is the version that you are upgrading to, in the format `x.y.z`. For example, use `4.12.2` for {product-title} 4.12.2. -+ -It may take several minutes after adding the annotation for the upgradeable status to change. - -.Verification - -//Would like to add CLI steps for same -. In the *Administrator* perspective of the web console, navigate to *Administration* -> *Cluster Settings*. - -. To view the CCO status details, click *cloud-credential* in the *Cluster Operators* list. -+ --- -* If the *Upgradeable* status in the *Conditions* section is *False*, verify that the `upgradeable-to` annotation is free of typographical errors. --- - -. When the *Upgradeable* status in the *Conditions* section is *True*, begin the {product-title} upgrade. \ No newline at end of file diff --git a/modules/ccs-aws-customer-procedure.adoc b/modules/ccs-aws-customer-procedure.adoc deleted file mode 100644 index d2e64d20d051..000000000000 --- a/modules/ccs-aws-customer-procedure.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_planning/aws-ccs.adoc - -:_content-type: PROCEDURE -[id="ccs-aws-customer-procedure_{context}"] -= Required customer procedure -// TODO: Better procedure heading that tells you what this is doing - -The Customer Cloud Subscription (CCS) model allows Red Hat to deploy and manage {product-title} into a customer’s Amazon Web Services (AWS) account. Red Hat requires several prerequisites in order to provide these services. - -.Procedure - -. If the customer is using AWS Organizations, you must either use an AWS account within your organization or link:https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_create.html#orgs_manage_accounts_create-new[create a new one]. - -. To ensure that Red Hat can perform necessary actions, you must either create a service control policy (SCP) or ensure that none is applied to the AWS account. - -. link:https://docs.aws.amazon.com/organizations/latest/userguide/orgs_introduction.html[Attach] the SCP to the AWS account. - -. Within the AWS account, you must link:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_create.html[create] an `osdCcsAdmin` IAM user with the following requirements: -** This user needs at least *Programmatic access* enabled. -** This user must have the `AdministratorAccess` policy attached to it. - -. Provide the IAM user credentials to Red Hat. -** You must provide the *access key ID* and *secret access key* in {cluster-manager-url}. diff --git a/modules/ccs-aws-customer-requirements.adoc b/modules/ccs-aws-customer-requirements.adoc deleted file mode 100644 index 16f2ed31ab09..000000000000 --- a/modules/ccs-aws-customer-requirements.adoc +++ /dev/null @@ -1,72 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_planning/aws-ccs.adoc - -[id="ccs-aws-customer-requirements_{context}"] -= Customer requirements - - -{product-title} clusters using a Customer Cloud Subscription (CCS) model on Amazon Web Services (AWS) must meet several prerequisites before they can be deployed. - -[id="ccs-requirements-account_{context}"] -== Account - -* The customer ensures that link:https://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html[AWS limits] are sufficient to support {product-title} provisioned within the customer-provided AWS account. - -* The customer-provided AWS account should be in the customer's AWS Organization with the applicable service control policy (SCP) applied. -+ -[NOTE] -==== -It is not a requirement that the customer-provided account be within an AWS Organization or for the SCP to be applied, however Red Hat must be able to perform all the actions listed in the SCP without restriction. -==== - -* The customer-provided AWS account must not be transferable to Red Hat. - -* The customer may not impose AWS usage restrictions on Red Hat activities. Imposing restrictions severely hinders Red Hat's ability to respond to incidents. - -* Red Hat deploys monitoring into AWS to alert Red Hat when a highly privileged account, such as a root account, logs into the customer-provided AWS account. - -* The customer can deploy native AWS services within the same customer-provided AWS account. -+ -[NOTE] -==== -Customers are encouraged, but not mandated, to deploy resources in a Virtual Private Cloud (VPC) separate from the VPC hosting {product-title} and other Red Hat supported services. -==== - -[id="ccs-requirements-access_{context}"] -== Access requirements - -* To appropriately manage the {product-title} service, Red Hat must have the `AdministratorAccess` policy applied to the administrator role at all times. -+ -[NOTE] -==== -This policy only provides Red Hat with permissions and capabilities to change resources in the customer-provided AWS account. -==== - -* Red Hat must have AWS console access to the customer-provided AWS account. This access is protected and managed by Red Hat. - -* The customer must not utilize the AWS account to elevate their permissions within the {product-title} cluster. - -* Actions available in {cluster-manager-url} must not be directly performed in the customer-provided AWS account. - -[id="ccs-requirements-support_{context}"] -== Support requirements - -* Red Hat recommends that the customer have at least link:https://aws.amazon.com/premiumsupport/plans/[Business Support] from AWS. - -* Red Hat has authority from the customer to request AWS support on their behalf. - -* Red Hat has authority from the customer to request AWS resource limit increases on the customer-provided account. - -* Red Hat manages the restrictions, limitations, expectations, and defaults for all {product-title} clusters in the same manner, unless otherwise specified in this requirements section. - -[id="ccs-requirements-security_{context}"] -== Security requirements - -* The customer-provided IAM credentials must be unique to the customer-provided AWS account and must not be stored anywhere in the customer-provided AWS account. - -* Volume snapshots will remain within the customer-provided AWS account and customer-specified region. - -* Red Hat must have ingress access to EC2 hosts and the API server through white-listed Red Hat machines. - -* Red Hat must have egress allowed to forward system and audit logs to a Red Hat managed central logging stack. diff --git a/modules/ccs-aws-iam.adoc b/modules/ccs-aws-iam.adoc deleted file mode 100644 index 09699c1f5372..000000000000 --- a/modules/ccs-aws-iam.adoc +++ /dev/null @@ -1,117 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_planning/aws-ccs.adoc - -[id="ccs-aws-iam_{context}"] -= Red Hat managed IAM references for AWS - -Red Hat is responsible for creating and managing the following Amazon Web Services (AWS) resources: IAM policies, IAM users, and IAM roles. - -[id="aws-policy-iam-policies_{context}"] -== IAM policies - -[NOTE] -==== -IAM policies are subject to modification as the capabilities of {product-title} change. -==== - -* The `AdministratorAccess` policy is used by the administration role. This policy provides Red Hat the access necessary to administer the {product-title} cluster in the customer-provided AWS account. -+ -[source,json] ----- -{ - "Version": "2012-10-17", - "Statement": [ - { - "Action": "*", - "Resource": "*", - "Effect": "Allow" - } - ] -} ----- - -* The `CustomerAdministratorAccess` role provides the customer access to administer a subset of services within the AWS account. At this time, the following are allowed: - -** VPC Peering -** VPN Setup -** Direct Connect (only available if granted through the service control policy) -+ -[source,json] ----- -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "ec2:AttachVpnGateway", - "ec2:DescribeVpnConnections", - "ec2:AcceptVpcPeeringConnection", - "ec2:DeleteVpcPeeringConnection", - "ec2:DescribeVpcPeeringConnections", - "ec2:CreateVpnConnectionRoute", - "ec2:RejectVpcPeeringConnection", - "ec2:DetachVpnGateway", - "ec2:DeleteVpnConnectionRoute", - "ec2:DeleteVpnGateway", - "ec2:DescribeVpcs", - "ec2:CreateVpnGateway", - "ec2:ModifyVpcPeeringConnectionOptions", - "ec2:DeleteVpnConnection", - "ec2:CreateVpcPeeringConnection", - "ec2:DescribeVpnGateways", - "ec2:CreateVpnConnection", - "ec2:DescribeRouteTables", - "ec2:CreateTags", - "ec2:CreateRoute", - "directconnect:*" - ], - "Resource": "*" - } - ] -} ----- - - -* If enabled, the `BillingReadOnlyAccess` role provides read-only access to view billing and usage information for the account. -+ -Billing and usage access is only granted if the root account in the AWS Organization has it enabled. This is an optional step the customer must perform to enable read-only billing and usage access and does not impact the creation of this profile and the role that uses it. If this role is not enabled, users will not see billing and usage information. See this tutorial on link:https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_billing.html#tutorial-billing-step1[how to enable access to billing data]. -+ -[source,json] ----- -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "aws-portal:ViewAccount", - "aws-portal:ViewBilling" - ], - "Resource": "*" - } - ] -} ----- - -[id="aws-policy-iam-users_{context}"] -== IAM users - -The `osdManagedAdmin` user is created immediately after taking control of the customer-provided AWS account. This is the user that will perform the {product-title} cluster installation. - -[id="aws-policy-iam-roles_{context}"] -== IAM roles - -* The `network-mgmt` role provides customer-federated administrative access to the AWS account through a separate AWS account. It also has the same access as a read-only role. The `network-mgmt` role only applies to non-Customer Cloud Subscription (CCS) clusters. The following policies are attached to the role: - -** AmazonEC2ReadOnlyAccess -** CustomerAdministratorAccess - -* The `read-only` role provides customer-federated read-only access to the AWS account through a separate AWS account. The following policies are attached to the role: - -** AWSAccountUsageReportAccess -** AmazonEC2ReadOnlyAccess -** AmazonS3ReadOnlyAccess -** IAMReadOnlyAccess -** BillingReadOnlyAccess diff --git a/modules/ccs-aws-provisioned.adoc b/modules/ccs-aws-provisioned.adoc deleted file mode 100644 index a714090750c2..000000000000 --- a/modules/ccs-aws-provisioned.adoc +++ /dev/null @@ -1,86 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_planning/aws-ccs.adoc - -[id="ccs-aws-provisioned_{context}"] -= Provisioned AWS Infrastructure - - -This is an overview of the provisioned Amazon Web Services (AWS) components on a deployed {product-title} cluster. For a more detailed listing of all provisioned AWS components, see the link:https://access.redhat.com/documentation/en-us/openshift_container_platform/[{OCP} documentation]. - -[id="aws-policy-ec2_{context}"] -== AWS Elastic Computing (EC2) instances - -AWS EC2 instances are required to deploy the control plane and data plane functions of {product-title} in the AWS public cloud. Instance types might vary for control plane and infrastructure nodes depending on worker node count. - -* Single availability zone -** 3 m5.2xlarge minimum (control plane nodes) -** 2 r5.xlarge minimum (infrastructure nodes) -** 2 m5.xlarge minimum but highly variable (worker nodes) - -* Multiple availability zones -** 3 m5.2xlarge minimum (control plane nodes) -** 3 r5.xlarge minimum (infrastructure nodes) -** 3 m5.xlarge minimum but highly variable (worker nodes) - -[id="aws-policy-ebs-storage_{context}"] -== AWS Elastic Block Store (EBS) storage - -Amazon EBS block storage is used for both local node storage and persistent volume storage. - -Volume requirements for each EC2 instance: - -- Control plane volumes -* Size: 350 GB -* Type: io1 -* Input/output operations per second: 1000 - -- Infrastructure volumes -* Size: 300 GB -* Type: gp2 -* Input/output operations per second: 900 - -- Worker volumes -* Size: 300 GB -* Type: gp2 -* Input/output operations per second: 900 - -[id="aws-policy-elastic-load-balancers_{context}"] -== Elastic load balancers - -Up to two Network Load Balancers (NLBs) for API and up to two Classic Load Balancers (CLBs) for application router. For more information, see the link:https://aws.amazon.com/elasticloadbalancing/features/#Details_for_Elastic_Load_Balancing_Products[ELB documentation for AWS]. - -[id="aws-policy-s3-storage_{context}"] -== S3 storage -The image registry and Elastic Block Store (EBS) volume snapshots are backed by AWS S3 storage. Pruning of resources is performed regularly to optimize S3 usage and cluster performance. - -[NOTE] -==== -Two buckets are required with a typical size of 2 TB each. -==== - -[id="aws-policy-vpc_{context}"] -== VPC -Customers should expect to see one VPC per cluster. Additionally, the VPC needs the following configurations: - -* *Subnets*: Two subnets for a cluster with a single availability zone, or six subnets for a cluster with multiple availability zones. -+ -[NOTE] -==== -A *public subnet* connects directly to the internet through an internet gateway. A *private subnet* connects to the internet through a network address translation (NAT) gateway. -==== -+ -* *Route tables*: One route table per private subnet, and one additional table per cluster. - -* *Internet gateways*: One Internet Gateway per cluster. - -* *NAT gateways*: One NAT Gateway per public subnet. - -=== Sample VPC Architecture - -image::VPC-Diagram.png[VPC Reference Architecture] - -[id="aws-policy-security-groups_{context}"] -== Security groups - -AWS security groups provide security at the protocol and port-access level; they are associated with EC2 instances and Elastic Load Balancing. Each security group contains a set of rules that filter traffic coming in and out of an EC2 instance. You must ensure the ports required for the link:https://docs.openshift.com/container-platform/4.7/installing/installing_aws/installing-aws-user-infra.html#installation-aws-user-infra-other-infrastructure_installing-aws-user-infra[{OCP} installation] are open on your network and configured to allow access between hosts. diff --git a/modules/ccs-aws-scp.adoc b/modules/ccs-aws-scp.adoc deleted file mode 100644 index 677e6ad70ace..000000000000 --- a/modules/ccs-aws-scp.adoc +++ /dev/null @@ -1,205 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_planning/aws-ccs.adoc - -[id="ccs-aws-scp_{context}"] -= Minimum required service control policy (SCP) - - -Service control policy (SCP) management is the responsibility of the customer. These policies are maintained in the AWS Organization and control what services are available within the attached AWS accounts. - -[cols="2a,2a,2a,2a",options="header"] - -|=== -| Required/optional -| Service -| Actions -| Effect - -.15+| Required -|Amazon EC2 | All |Allow -|Amazon EC2 Auto Scaling | All |Allow -|Amazon S3| All |Allow -|Identity And Access Management | All |Allow -|Elastic Load Balancing | All |Allow -|Elastic Load Balancing V2| All |Allow -|Amazon CloudWatch | All |Allow -|Amazon CloudWatch Events | All |Allow -|Amazon CloudWatch Logs | All |Allow -|AWS Support | All |Allow -|AWS Key Management Service | All |Allow -|AWS Security Token Service | All |Allow -|AWS Resource Tagging | All |Allow -|AWS Route53 DNS | All |Allow -|AWS Service Quotas | ListServices - -GetRequestedServiceQuotaChange - -GetServiceQuota - -RequestServiceQuotaIncrease - -ListServiceQuotas -| Allow - - -.3+|Optional - -| AWS Billing -| ViewAccount - -Viewbilling - -ViewUsage -| Allow - -|AWS Cost and Usage Report -|All -|Allow - -|AWS Cost Explorer Services -|All -|Allow - - -|=== - -// TODO: Need some sort of intro into whatever this is -[source,json] ----- -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "ec2:*" - ], - "Resource": [ - "*" - ] - }, - { - "Effect": "Allow", - "Action": [ - "autoscaling:*" - ], - "Resource": [ - "*" - ] - }, - { - "Effect": "Allow", - "Action": [ - "s3:*" - ], - "Resource": [ - "*" - ] - }, - { - "Effect": "Allow", - "Action": [ - "iam:*" - ], - "Resource": [ - "*" - ] - }, - { - "Effect": "Allow", - "Action": [ - "elasticloadbalancing:*" - ], - "Resource": [ - "*" - ] - }, - { - "Effect": "Allow", - "Action": [ - "cloudwatch:*" - ], - "Resource": [ - "*" - ] - }, - { - "Effect": "Allow", - "Action": [ - "events:*" - ], - "Resource": [ - "*" - ] - }, - { - "Effect": "Allow", - "Action": [ - "logs:*" - ], - "Resource": [ - "*" - ] - }, - { - "Effect": "Allow", - "Action": [ - "support:*" - ], - "Resource": [ - "*" - ] - }, - { - "Effect": "Allow", - "Action": [ - "kms:*" - ], - "Resource": [ - "*" - ] - }, - { - "Effect": "Allow", - "Action": [ - "sts:*" - ], - "Resource": [ - "*" - ] - }, - { - "Effect": "Allow", - "Action": [ - "tag:*" - ], - "Resource": [ - "*" - ] - }, - { - "Effect": "Allow", - "Action": [ - "route53:*" - ], - "Resource": [ - "*" - ] - }, - { - "Effect": "Allow", - "Action": [ - "servicequotas:ListServices", - "servicequotas:GetRequestedServiceQuotaChange", - "servicequotas:GetServiceQuota", - "servicequotas:RequestServiceQuotaIncrease", - "servicequotas:ListServiceQuotas" - ], - "Resource": [ - "*" - ] - } - ] -} ----- diff --git a/modules/ccs-aws-understand.adoc b/modules/ccs-aws-understand.adoc deleted file mode 100644 index b5c6ae6218bc..000000000000 --- a/modules/ccs-aws-understand.adoc +++ /dev/null @@ -1,14 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_planning/aws-ccs.adoc - -:_content-type: CONCEPT -[id="ccs-aws-understand_{context}"] -= Understanding Customer Cloud Subscriptions on AWS - - -To deploy {product-title} into your existing Amazon Web Services (AWS) account using the Customer Cloud Subscription (CCS) model, Red Hat requires several prerequisites be met. - -Red Hat recommends the usage of an AWS Organization to manage multiple AWS accounts. The AWS Organization, managed by the customer, hosts multiple AWS accounts. There is a root account in the organization that all accounts will refer to in the account hierarchy. - -It is recommended for the {product-title} cluster using a CCS model to be hosted in an AWS account within an AWS Organizational Unit. A service control policy (SCP) is created and applied to the AWS Organizational Unit that manages what services the AWS sub-accounts are permitted to access. The SCP applies only to available permissions within a single AWS account for all AWS sub-accounts within the Organizational Unit. It is also possible to apply a SCP to a single AWS account. All other accounts in the customer’s AWS Organization are managed in whatever manner the customer requires. Red Hat Site Reliability Engineers (SRE) will not have any control over SCPs within the AWS Organization. diff --git a/modules/ccs-gcp-customer-procedure.adoc b/modules/ccs-gcp-customer-procedure.adoc deleted file mode 100644 index 7a22e6e38685..000000000000 --- a/modules/ccs-gcp-customer-procedure.adoc +++ /dev/null @@ -1,116 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_planning/gcp-ccs.adoc -:_content-type: PROCEDURE -[id="ccs-gcp-customer-procedure_{context}"] - -= Required customer procedure -// TODO: Same as other module - Better procedure heading that tells you what this is doing - - -The Customer Cloud Subscription (CCS) model allows Red Hat to deploy and manage {product-title} into a customer's Google Cloud Platform (GCP) project. Red Hat requires several prerequisites to provide these services. - -[WARNING] -==== -To use {product-title} in your GCP project, the following GCP organizational policy constraints cannot be in place: - -* `constraints/iam.allowedPolicyMemberDomains` -* `constraints/compute.restrictLoadBalancerCreationForTypes` -* `constraints/compute.requireShieldedVm` -* `constraints/compute.vmExternalIpAccess` (This policy constraint is unsupported only during installation. You can re-enable the policy constraint after installation.) -==== - -.Procedure - -. link:https://cloud.google.com/resource-manager/docs/creating-managing-projects[Create a Google Cloud project] to host the {product-title} cluster. -+ -[NOTE] -==== -The project name must be 10 characters or less. -==== - -. link:https://cloud.google.com/service-usage/docs/enable-disable#enabling[Enable] the following required APIs in the project that hosts your {product-title} cluster: -+ -.Required API services -[cols="2a,3a",options="header"] -|=== -|API service |Console service name - - -|link:https://console.cloud.google.com/apis/library/deploymentmanager.googleapis.com?pli=1&project=openshift-gce-devel&folder=&organizationId=[Cloud Deployment Manager V2 API] -|`deploymentmanager.googleapis.com` - - -|link:https://console.cloud.google.com/apis/library/compute.googleapis.com?project=openshift-gce-devel&folder=&organizationId=[Compute Engine API] -|`compute.googleapis.com` - -|link:https://console.cloud.google.com/apis/library/cloudapis.googleapis.com?project=openshift-gce-devel&folder=&organizationId=[Google Cloud APIs] -|`cloudapis.googleapis.com` - -|link:https://console.cloud.google.com/apis/library/cloudresourcemanager.googleapis.com?project=openshift-gce-devel&folder=&organizationId=[Cloud Resource Manager API] -|`cloudresourcemanager.googleapis.com` - -|link:https://console.cloud.google.com/apis/library/dns.googleapis.com?project=openshift-gce-devel&folder=&organizationId=[Google DNS API] -|`dns.googleapis.com` - -|link:https://console.cloud.google.com/apis/library/networksecurity.googleapis.com?project=openshift-gce-devel&folder=&organizationId=[Network Security API] -|`networksecurity.googleapis.com` - -|link:https://console.cloud.google.com/apis/library/iamcredentials.googleapis.com[IAM Service Account Credentials API] -|`iamcredentials.googleapis.com` - -|link:https://console.cloud.google.com/apis/library/iam.googleapis.com?project=openshift-gce-devel&folder=&organizationId=[Identity and Access Management (IAM) API] -|`iam.googleapis.com` - -|link:https://console.cloud.google.com/apis/library/servicemanagement.googleapis.com?project=openshift-gce-devel&folder=&organizationId=[Service Management API] -|`servicemanagement.googleapis.com` - -|link:https://console.cloud.google.com/apis/library/serviceusage.googleapis.com?project=openshift-gce-devel&folder=&organizationId=[Service Usage API] -|`serviceusage.googleapis.com` - -|link:https://console.cloud.google.com/apis/library/storage-api.googleapis.com?project=openshift-gce-devel&folder=&organizationId=[Google Cloud Storage JSON API] -|`storage-api.googleapis.com` - -|link:https://console.cloud.google.com/apis/library/storage-component.googleapis.com?project=openshift-gce-devel&folder=&organizationId=[Cloud Storage] -|`storage-component.googleapis.com` - -|=== - -. To ensure that Red Hat can perform necessary actions, you must create an `osd-ccs-admin` IAM link:https://cloud.google.com/iam/docs/creating-managing-service-accounts#creating_a_service_account[service account] user within the GCP project. -+ -The following roles must be link:https://cloud.google.com/iam/docs/granting-roles-to-service-accounts#granting_access_to_a_service_account_for_a_resource[granted to the service account]: -+ -.Required roles -[cols="2a,3a",options="header"] - -|=== - -|Role|Console role name - -|Compute Admin -|`roles/compute.admin` - -|DNS Admin -|`roles/dns.admin` - -|Organizational Policy Viewer -|`roles/orgpolicy.policyViewer` - -|Owner -|`roles/owner` - -|Project IAM Admin -|`roles/resourcemanager.projectIamAdmin` - -|Service Management Administrator -|`roles/servicemanagement.admin` - -|Service Usage Admin -|`roles/serviceusage.serviceUsageAdmin` - -|Storage Admin -|`roles/storage.admin` - -|=== - -. link:https://cloud.google.com/iam/docs/creating-managing-service-account-keys#creating_service_account_keys[Create the service account key] for the `osd-ccs-admin` IAM service account. Export the key to a file named `osServiceAccount.json`; this JSON file will be uploaded in {cluster-manager-first} when you create your cluster. diff --git a/modules/ccs-gcp-customer-requirements.adoc b/modules/ccs-gcp-customer-requirements.adoc deleted file mode 100644 index de3f6ce96bf9..000000000000 --- a/modules/ccs-gcp-customer-requirements.adoc +++ /dev/null @@ -1,67 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_planning/gcp-ccs.adoc - -[id="ccs-gcp-customer-requirements_{context}"] -= Customer requirements - - -{product-title} clusters using a Customer Cloud Subscription (CCS) model on Google Cloud Platform (GCP) must meet several prerequisites before they can be deployed. - -[id="ccs-gcp-requirements-account_{context}"] -== Account - -* The customer ensures that link:https://cloud.google.com/storage/quotas[Google Cloud limits] are sufficient to support {product-title} provisioned within the customer-provided GCP account. - -* The customer-provided GCP account should be in the customer's Google Cloud Organization with the applicable Service Account applied. - -* The customer-provided GCP account must not be transferable to Red Hat. - -* The customer may not impose GCP usage restrictions on Red Hat activities. Imposing restrictions severely hinders Red Hat's ability to respond to incidents. - -* Red Hat deploys monitoring into GCP to alert Red Hat when a highly privileged account, such as a root account, logs into the customer-provided GCP account. - -* The customer can deploy native GCP services within the same customer-provided GCP account. -+ -[NOTE] -==== -Customers are encouraged, but not mandated, to deploy resources in a Virtual Private Cloud (VPC) separate from the VPC hosting {product-title} and other Red Hat supported services. -==== - -[id="ccs-gcp-requirements-access_{context}"] -== Access requirements - -* To appropriately manage the {product-title} service, Red Hat must have the `AdministratorAccess` policy applied to the administrator role at all times. -+ -[NOTE] -==== -This policy only provides Red Hat with permissions and capabilities to change resources in the customer-provided GCP account. -==== - -* Red Hat must have GCP console access to the customer-provided GCP account. This access is protected and managed by Red Hat. - -* The customer must not utilize the GCP account to elevate their permissions within the {product-title} cluster. - -* Actions available in the {cluster-manager-url} must not be directly performed in the customer-provided GCP account. - -[id="ccs-gcp-requirements-support_{context}"] -== Support requirements - -* Red Hat recommends that the customer have at least link:https://cloud.google.com/support[Production Support] from GCP. - -* Red Hat has authority from the customer to request GCP support on their behalf. - -* Red Hat has authority from the customer to request GCP resource limit increases on the customer-provided account. - -* Red Hat manages the restrictions, limitations, expectations, and defaults for all {product-title} clusters in the same manner, unless otherwise specified in this requirements section. - -[id="ccs-gcp-requirements-security_{context}"] -== Security requirements - -* The customer-provided IAM credentials must be unique to the customer-provided GCP account and must not be stored anywhere in the customer-provided GCP account. - -* Volume snapshots will remain within the customer-provided GCP account and customer-specified region. - -* Red Hat must have ingress access to the API server through white-listed Red Hat machines. - -* Red Hat must have egress allowed to forward system and audit logs to a Red Hat managed central logging stack. diff --git a/modules/ccs-gcp-iam.adoc b/modules/ccs-gcp-iam.adoc deleted file mode 100644 index 7bdee7a0dece..000000000000 --- a/modules/ccs-gcp-iam.adoc +++ /dev/null @@ -1,108 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_planning/gcp-ccs.adoc -[id="ccs-gcp-iam_{context}"] - -= Red Hat managed Google Cloud resources - - -Red Hat is responsible for creating and managing the following IAM Google Cloud Platform (GCP) resources. - -[id="ccs-gcp-iam-service-account-roles_{context}"] -== IAM service account and roles - -The `osd-managed-admin` IAM service account is created immediately after taking control of the customer-provided GCP account. This is the user that will perform the {product-title} cluster installation. - -The following roles are attached to the service account: - -.IAM roles for osd-managed-admin -[cols="2a,3a,2a",options="header"] - -|=== - -|Role |Console role name |Description - -|Compute Admin -|`roles/compute.admin` -|Provides full control of all Compute Engine resources. - -|DNS Administrator -|`roles/dns.admin` -|Provides read-write access to all Cloud DNS resources. - -|Security Admin -|`roles/iam.securityAdmin` -|Security admin role, with permissions to get and set any IAM policy. - -|Storage Admin -|`roles/storage.admin` -|Grants full control of objects and buckets. - -When applied to an individual *bucket*, control applies only to the specified bucket and objects within the bucket. - -|Service Account Admin -|`roles/iam.serviceAccountAdmin` -|Create and manage service accounts. - -|Service Account Key Admin -|`roles/iam.serviceAccountKeyAdmin` -|Create and manage (and rotate) service account keys. - -|Service Account User -|`roles/iam.serviceAccountUser` -|Run operations as the service account. - -|=== - -[id="ccs-gcp-iam-group-roles_{context}"] -== IAM group and roles - -The `sd-sre-platform-gcp-access` Google group is granted access to the GCP project to allow Red Hat Site Reliability Engineering (SRE) access to the console for emergency troubleshooting purposes. - -The following roles are attached to the group: - -.IAM roles for sd-sre-platform-gcp-access -[cols="2a,3a,2a",options="header"] - -|=== - -|Role |Console role name |Description - -|Compute Admin -|`roles/compute.admin` -|Provides full control of all Compute Engine resources. - -|Editor -|`roles/editor` -|Provides all viewer permissions, plus permissions for actions that modify state. - -|Organization Policy Viewer -|`roles/orgpolicy.policyViewer` -|Provides access to view Organization Policies on resources. - -|Project IAM Admin -|`roles/resourcemanager.projectIamAdmin` -|Provides permissions to administer IAM policies on projects. - -|Quota Administrator -|`roles/servicemanagement.quotaAdmin` -|Provides access to administer service quotas. - -|Role Administrator -|`roles/iam.roleAdmin` -|Provides access to all custom roles in the project. - -|Service Account Admin -|`roles/iam.serviceAccountAdmin` -|Create and manage service accounts. - - -|Service Usage Admin -|`roles/serviceusage.serviceUsageAdmin` -|Ability to enable, disable, and inspect service states, inspect operations, and consume quota and billing for a consumer project. - -|Tech Support Editor -|`roles/cloudsupport.techSupportEditor` -|Provides full read-write access to technical support cases. - -|=== diff --git a/modules/ccs-gcp-provisioned.adoc b/modules/ccs-gcp-provisioned.adoc deleted file mode 100644 index 9988921dbb25..000000000000 --- a/modules/ccs-gcp-provisioned.adoc +++ /dev/null @@ -1,73 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_planning/gcp-ccs.adoc - -[id="ccs-gcp-provisioned_{context}"] -= Provisioned GCP Infrastructure - -This is an overview of the provisioned Google Cloud Platform (GCP) components on a deployed {product-title} cluster. For a more detailed listing of all provisioned GCP components, see the link:https://access.redhat.com/documentation/en-us/openshift_container_platform/[{OCP} documentation]. - -[id="gcp-policy-instances_{context}"] -== Compute instances - -GCP compute instances are required to deploy the control plane and data plane functions of {product-title} in GCP. Instance types might vary for control plane and infrastructure nodes depending on worker node count. - -* Single availability zone -** 2 infra nodes (custom machine type: 4 vCPU and 32 GB RAM) -** 3 control plane nodes (custom machine type: 8 vCPU and 32 GB RAM) -** 2 worker nodes (custom machine type: 4 vCPU and 16 GB RAM) -* Multiple availability zones -** 3 infra nodes (custom machine type: 4 vCPU and 32 GB RAM) -** 3 control plane nodes (custom machine type: 8 vCPU and 32 GB RAM) -** 3 worker nodes (custom machine type: 4 vCPU and 16 GB RAM) - - -[id="gcp-policy-storage_{context}"] -== Storage - -* Infrastructure volumes: -** 128 GB SSD persistent disk (deleted on instance deletion) -** 110 GB Standard persistent disk (kept on instance deletion) -* Worker volumes: -** 128 GB SSD persistent disk (deleted on instance deletion) -* Control plane volumes: -** 128 GB SSD persistent disk (deleted on instance deletion) - -[id="gcp-policy-vpc_{context}"] -== VPC - -* **Subnets:** One master subnet for the control plane workloads and one worker subnet for all others. -* **Router tables:** One global route table per VPC. -* **Internet gateways:** One internet gateway per cluster. -* **NAT gateways:** One master NAT gateway and one worker NAT gateway per cluster. - -[id="gcp-policy-services_{context}"] -== Services - -The following services must be enabled on a GCP CCS cluster: - -* `Deploymentmanager` -* `Compute` -* `Cloudapis` -* `Cloudresourcemanager` -* `DNS` -* `Iamcredentials` -* `IAM` -* `Servicemanagement` -* `Serviceusage` -* `Storage-api` -* `Storage-component` - -[id="gcp-policy-permissions_{context}"] -== Permissions - -The following roles must be added to the support service account: - -* `Compute.admin` -* `Dns.admin` -* `orgpolicy.policyViewer` -* `Owner` -* `resourcemanager.projectIamAdmin` -* `Servicemanagement.admin` -* `serviceusage.serviceUsageAdmin` -* `storage.admin` diff --git a/modules/ccs-gcp-understand.adoc b/modules/ccs-gcp-understand.adoc deleted file mode 100644 index 51e1db886b03..000000000000 --- a/modules/ccs-gcp-understand.adoc +++ /dev/null @@ -1,14 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_planning/gcp-ccs.adoc - -:_content-type: CONCEPT -[id="ccs-gcp-understand_{context}"] -= Understanding Customer Cloud Subscriptions on GCP - - -Red Hat {product-title} provides a Customer Cloud Subscription (CCS) model that allows Red Hat to deploy and manage {product-title} into a customer's existing {GCP} account. Red Hat requires several prerequisites be met in order to provide this service. - -Red Hat recommends the usage of GCP project, managed by the customer, to organize all of your GCP resources. A project consists of a set of users and APIs, as well as billing, authentication, and monitoring settings for those APIs. - -It is recommended for the {product-title} cluster using a CCS model to be hosted in a GCP project within a GCP organization. The Organization resource is the root node of the GCP resource hierarchy and all resources that belong to an organization are grouped under the organization node. An IAM service account with certain roles granted is created and applied to the GCP project. When you make calls to the API, you typically provide service account keys for authentication. Each service account is owned by a specific project, but service accounts can be provided roles to access resources for other projects. diff --git a/modules/cert-manager-about.adoc b/modules/cert-manager-about.adoc deleted file mode 100644 index 983d08b7eec5..000000000000 --- a/modules/cert-manager-about.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// Module included in the following assemblies: -// -// * security/cert_manager_operator/index.adoc - -:_content-type: CONCEPT -[id="cert-manager-about_{context}"] -= About the {cert-manager-operator} - -The link:https://cert-manager.io/[cert-manager] project introduces certificate authorities and certificates as resource types in the Kubernetes API, which makes it possible to provide certificates on demand to developers working within your cluster. The {cert-manager-operator} provides a supported way to integrate cert-manager into your {product-title} cluster. - -The {cert-manager-operator} provides the following features: - -* Support for integrating with external certificate authorities -* Tools to manage certificates -* Ability for developers to self-serve certificates -* Automatic certificate renewal - -[IMPORTANT] -==== -Do not attempt to use more than one cert-manager Operator in your cluster. If you have a community cert-manager Operator installed in your cluster, you must uninstall it before installing the {cert-manager-operator}. -==== diff --git a/modules/cert-manager-acme-about.adoc b/modules/cert-manager-acme-about.adoc deleted file mode 100644 index 5e1ee75c13ba..000000000000 --- a/modules/cert-manager-acme-about.adoc +++ /dev/null @@ -1,9 +0,0 @@ -// Module included in the following assemblies: -// -// * security/cert_manager_operator/cert-manager-operator-issuer-acme.adoc - -:_content-type: CONCEPT -[id="cert-manager-acme-about_{context}"] -= About ACME issuers - -The ACME issuer type for the {cert-manager-operator} represents an Automated Certificate Management Environment (ACME) certificate authority (CA) server. ACME CA servers rely on a _challenge_ to verify that a client owns the domain names that the certificate is being requested for. If the challenge is successful, the {cert-manager-operator} can issue the certificate. If the challenge fails, the {cert-manager-operator} does not issue the certificate. diff --git a/modules/cert-manager-acme-challenges-types.adoc b/modules/cert-manager-acme-challenges-types.adoc deleted file mode 100644 index 55877b3e1958..000000000000 --- a/modules/cert-manager-acme-challenges-types.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Module included in the following assemblies: -// -// * security/cert_manager_operator/cert-manager-operator-issuer-acme.adoc - -:_content-type: CONCEPT -[id="cert-manager-acme-challenges-types_{context}"] -= Supported ACME challenges types - -The {cert-manager-operator} supports the following challenge types for ACME issuers: - -HTTP-01:: With the HTTP-01 challenge type, you provide a computed key at an HTTP URL endpoint in your domain. If the ACME CA server can get the key from the URL, it can validate you as the owner of the domain. -+ -For more information, see link:https://cert-manager.io/docs/configuration/acme/http01/[HTTP01] in the upstream cert-manager documentation. - -DNS-01:: With the DNS-01 challenge type, you provide a computed key at a DNS TXT record. If the ACME CA server can get the key by DNS lookup, it can validate you as the owner of the domain. -+ -For more information, see link:https://cert-manager.io/docs/configuration/acme/dns01/[DNS01] in the upstream cert-manager documentation. diff --git a/modules/cert-manager-acme-dns-providers.adoc b/modules/cert-manager-acme-dns-providers.adoc deleted file mode 100644 index 2237e77502dc..000000000000 --- a/modules/cert-manager-acme-dns-providers.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// -// * security/cert_manager_operator/cert-manager-operator-issuer-acme.adoc - -:_content-type: CONCEPT -[id="cert-manager-acme-dns-providers_{context}"] -= Supported DNS-01 providers - -The {cert-manager-operator} supports the following DNS-01 providers for ACME issuers: - -* Amazon Route 53 -* Azure DNS -+ -[NOTE] -==== -The {cert-manager-operator} does not support using Azure Active Directory (Azure AD) pod identities to assign a managed identity to a pod. -==== -* Google Cloud DNS -+ -[NOTE] -==== -The {cert-manager-operator} does not support using Google workload identity federation. -==== diff --git a/modules/cert-manager-acme-dns01-aws.adoc b/modules/cert-manager-acme-dns01-aws.adoc deleted file mode 100644 index 50ba6e3b2df5..000000000000 --- a/modules/cert-manager-acme-dns01-aws.adoc +++ /dev/null @@ -1,170 +0,0 @@ -// Module included in the following assemblies: -// -// * security/cert_manager_operator/cert-manager-operator-issuer-acme.adoc - -:_content-type: PROCEDURE -[id="cert-manager-acme-dns01-aws_{context}"] -= Configuring an ACME issuer to solve DNS-01 challenges - -You can use {cert-manager-operator} to set up an ACME issuer to solve DNS-01 challenges. This procedure uses _Let's Encrypt_ as the ACME CA server and shows how to solve DNS-01 challenges with Amazon Route 53. - -[NOTE] -==== -Private DNS zones are not supported. -==== - -.Prerequisites - -* You have access to the {product-title} cluster as a user with the `cluster-admin` role. -* You have set up an IAM role for Amazon Route 53. For more information, see link:https://cert-manager.io/docs/configuration/acme/dns01/route53/[Route53] in the upstream cert-manager documentation. -+ -[NOTE] -==== -If your cluster is _not_ configured to use the AWS Security Token Service (STS), you must provide explicit `accessKeyID` and `secretAccessKey` credentials. If you cluster uses AWS STS, you can use implicit ambient credentials. -==== - -.Procedure - -. Optional: Override the nameserver settings for the DNS-01 self check: -+ -This step is required only when the target public-hosted zone overlaps with the cluster's default private-hosted zone. - -.. Edit the `CertManager` resource by running the following command: -+ -[source,terminal] ----- -$ oc edit certmanager cluster ----- - -.. Add a `spec.controllerConfig` section with the following override arguments: -+ -[source,yaml] ----- -apiVersion: operator.openshift.io/v1alpha1 -kind: CertManager -metadata: - name: cluster - ... -spec: - ... - controllerConfig: <1> - overrideArgs: - - '--dns01-recursive-nameservers=1.1.1.1:53' <2> - - '--dns01-recursive-nameservers-only' <3> ----- -<1> Add the `spec.controllerConfig` section. -<2> Provide a comma-separated list of `:` nameservers to query for the DNS-01 self check. -<3> Specify to only use recursive nameservers instead of checking the authoritative nameservers associated with that domain. - -.. Save the file to apply the changes. - -. Optional: Create a namespace for the issuer. - -.. Create a YAML file that defines a `Namespace` object: -+ -.Example `namespace.yaml` file -[source,yaml] ----- -apiVersion: v1 -kind: Namespace -metadata: - name: my-issuer-namespace <1> ----- -<1> Specify the namespace for the issuer. - -.. Create the `Namespace` object by running the following command: -+ -[source,terminal] ----- -$ oc create -f namespace.yaml ----- - -. Create a secret to store your AWS credentials in by running the following command: -+ -[source,terminal] ----- -$ oc create secret generic aws-secret --from-literal=awsSecretAccessKey= \ <1> - -n my-issuer-namespace ----- -<1> Replace `` with your AWS secret access key. - -. Create an issuer. - -.. Create a YAML file that defines the `Issuer` object: -+ -.Example `issuer.yaml` file -[source,yaml] ----- -apiVersion: cert-manager.io/v1 -kind: Issuer -metadata: - name: letsencrypt-staging <1> - namespace: my-issuer-namespace <2> -spec: - acme: - server: https://acme-staging-v02.api.letsencrypt.org/directory <3> - email: "" <4> - privateKeySecretRef: - name: <5> - solvers: - - dns01: - route53: - accessKeyID: <6> - hostedZoneID: <7> - region: us-east-1 - secretAccessKeySecretRef: - name: "aws-secret" <8> - key: "awsSecretAccessKey" <9> ----- -<1> Provide a name for the issuer. -<2> Specify the namespace that you created for the issuer. -<3> Specify the URL to access the ACME server's `directory` endpoint. This example uses the _Let's Encrypt_ staging environment. -<4> Replace `` with your email address. -<5> Replace `` with the name of the secret to store the ACME account private key in. -<6> Replace `` with your AWS key ID. -<7> Replace `` with your hosted zone ID. -<8> Specify the name of the secret you created. -<9> Specify the key in the secret you created that stores your AWS secret access key. - -.. Create the `Issuer` object by running the following command: -+ -[source,terminal] ----- -$ oc create -f issuer.yaml ----- - -. Create a certificate. - -.. Create a YAML file that defines the `Certificate` object: -+ -.Example `certificate.yaml` file -[source,yaml] ----- -apiVersion: cert-manager.io/v1 -kind: Certificate -metadata: - name: my-tls-cert <1> - namespace: my-issuer-namespace <2> -spec: - isCA: false - commonName: '' <3> - secretName: my-tls-cert <4> - dnsNames: - - '' <5> - issuerRef: - name: letsencrypt-staging <6> - kind: Issuer ----- -<1> Provide a name for the certificate. -<2> Specify the namespace that you created for the issuer. -<3> Replace `` with your common name (CN). -<4> Specify the name of the secret to create that will contain the certificate. -<5> Replace `` with your domain name. -<6> Specify the name of the issuer that you created. - -.. Create the `Certificate` object by running the following command: -+ -[source,terminal] ----- -$ oc create -f certificate.yaml ----- diff --git a/modules/cert-manager-acme-http01.adoc b/modules/cert-manager-acme-http01.adoc deleted file mode 100644 index 40fde7e20a8c..000000000000 --- a/modules/cert-manager-acme-http01.adoc +++ /dev/null @@ -1,119 +0,0 @@ -// Module included in the following assemblies: -// -// * security/cert_manager_operator/cert-manager-operator-issuer-acme.adoc - -:_content-type: PROCEDURE -[id="cert-manager-acme-http01_{context}"] -= Configuring an ACME issuer to solve HTTP-01 challenges - -You can use {cert-manager-operator} to set up an ACME issuer to solve HTTP-01 challenges. This procedure uses Let's Encrypt as the ACME CA server. - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. -* You have a service that you want to expose. In this procedure, the service is named `sample-workload`. - -.Procedure - -. Create an ACME cluster issuer. - -.. Create a YAML file that defines the `ClusterIssuer` object: -+ -.Example `acme-cluster-issuer.yaml` file -[source,yaml] ----- -apiVersion: cert-manager.io/v1 -kind: ClusterIssuer -metadata: - name: letsencrypt-staging <1> -spec: - acme: - preferredChain: "" - privateKeySecretRef: - name: <2> - server: https://acme-staging-v02.api.letsencrypt.org/directory <3> - solvers: - - http01: - ingress: - class: openshift-default <4> ----- -<1> Provide a name for the cluster issuer. -<2> Replace `` with the name of secret to store the ACME account private key in. -<3> Specify the URL to access the ACME server's `directory` endpoint. This example uses the Let's Encrypt staging environment. -<4> Specify the Ingress class. - -.. Create the `ClusterIssuer` object by running the following command: -+ -[source,terminal] ----- -$ oc create -f acme-cluster-issuer.yaml ----- - -. Create an Ingress to expose the service of the user workload. - -.. Create a YAML file that defines a `Namespace` object: -+ -.Example `namespace.yaml` file -[source,yaml] ----- -apiVersion: v1 -kind: Namespace -metadata: - name: my-ingress-namespace <1> ----- -<1> Specify the namespace for the Ingress. - -.. Create the `Namespace` object by running the following command: -+ -[source,terminal] ----- -$ oc create -f namespace.yaml ----- - -.. Create a YAML file that defines the `Ingress` object: -+ -.Example `ingress.yaml` file -[source,yaml] ----- -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: sample-ingress <1> - namespace: my-ingress-namespace <2> - annotations: - cert-manager.io/cluster-issuer: letsencrypt-staging <3> - acme.cert-manager.io/http01-ingress-class: openshift-default <4> -spec: - ingressClassName: openshift-default <5> - tls: - - hosts: - - <6> - secretName: sample-tls <7> - rules: - - host: <8> - http: - paths: - - path: / - pathType: Prefix - backend: - service: - name: sample-workload <9> - port: - number: 80 ----- -<1> Specify the name of the Ingress. -<2> Specify the namespace that you created for the Ingress. -<3> Specify the cluster issuer that you created. -<4> Specify the Ingress class. -<5> Specify the Ingress class. -<6> Replace `` with the Subject Alternative Name to be associated with the certificate. This name is used to add DNS names to the certificate. -<7> Specify the secret to store the created certificate in. -<8> Replace `` with the hostname. You can use the `.` syntax to take advantage of the `*.` wildcard DNS record and serving certificate for the cluster. For example, you might use `apps.`. Otherwise, you must ensure that a DNS record exists for the chosen hostname. -<9> Specify the name of the service to expose. This example uses a service named `sample-workload`. - -.. Create the `Ingress` object by running the following command: -+ -[source,terminal] ----- -$ oc create -f ingress.yaml ----- diff --git a/modules/cert-manager-configure-cloud-credentials-aws-non-sts.adoc b/modules/cert-manager-configure-cloud-credentials-aws-non-sts.adoc deleted file mode 100644 index 91efdaee0e2c..000000000000 --- a/modules/cert-manager-configure-cloud-credentials-aws-non-sts.adoc +++ /dev/null @@ -1,106 +0,0 @@ -// Module included in the following assemblies: -// -// * security/cert_manager_operator/cert-manager-authenticate-aws.adoc - -:_content-type: PROCEDURE -[id="cert-manager-configure-cloud-credentials-aws-non-sts_{context}"] -= Configuring cloud credentials for the {cert-manager-operator} on AWS - -To configure the cloud credentials for the {cert-manager-operator} on the AWS cluster you must generate the cloud credentials secret by creating a `CredentialsRequest` object, and allowing the Cloud Credential Operator. - -.Prerequisites - -* You have installed the {cert-manager-operator} 1.11.1 or later. -* You have configured the Cloud Credential Operator to operate in _mint_ or _passthrough_ mode. - -.Procedure - -. Create a `CredentialsRequest` resource YAML file, for example, `sample-credential-request.yaml`, as follows: -+ -[source,yaml] ----- -apiVersion: cloudcredential.openshift.io/v1 -kind: CredentialsRequest -metadata: - name: cert-manager - namespace: openshift-cloud-credential-operator -spec: - providerSpec: - apiVersion: cloudcredential.openshift.io/v1 - kind: AWSProviderSpec - statementEntries: - - action: - - "route53:GetChange" - effect: Allow - resource: "arn:aws:route53:::change/*" - - action: - - "route53:ChangeResourceRecordSets" - - "route53:ListResourceRecordSets" - effect: Allow - resource: "arn:aws:route53:::hostedzone/*" - - action: - - "route53:ListHostedZonesByName" - effect: Allow - resource: "*" - secretRef: - name: aws-creds - namespace: cert-manager - serviceAccountNames: - - cert-manager ----- - -. Create a `CredentialsRequest` resource by running the following command: -+ -[source,terminal] ----- -$ oc create -f sample-credential-request.yaml ----- - -. Update the subscription object for {cert-manager-operator} by running the following command: -+ -[source,terminal] ----- -$ oc -n cert-manager-operator patch subscription openshift-cert-manager-operator --type=merge -p '{"spec":{"config":{"env":[{"name":"CLOUD_CREDENTIALS_SECRET_NAME","value":"aws-creds"}]}}}' ----- - -.Verification - -. Get the name of the redeployed cert-manager controller pod by running the following command: -+ -[source,terminal] ----- -$ oc get pods -l app.kubernetes.io/name=cert-manager -n cert-manager ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -cert-manager-bd7fbb9fc-wvbbt 1/1 Running 0 15m39s ----- - -. Verify that the cert-manager controller pod is updated with AWS credential volumes that are mounted under the path specified in `mountPath` by running the following command: -+ -[source,terminal] ----- -$ oc get -n cert-manager pod/ -o yaml ----- -+ -.Example output -[source,terminal] ----- -... -spec: - containers: - - args: - ... - - mountPath: /.aws - name: cloud-credentials - ... - volumes: - ... - - name: cloud-credentials - secret: - ... - secretName: aws-creds ----- \ No newline at end of file diff --git a/modules/cert-manager-configure-cloud-credentials-aws-sts.adoc b/modules/cert-manager-configure-cloud-credentials-aws-sts.adoc deleted file mode 100644 index 8365a4fdd72c..000000000000 --- a/modules/cert-manager-configure-cloud-credentials-aws-sts.adoc +++ /dev/null @@ -1,125 +0,0 @@ -// Module included in the following assemblies: -// -// * security/cert_manager_operator/cert-manager-authenticate-aws.adoc - -:_content-type: PROCEDURE -[id="cert-manager-configure-cloud-credentials-aws-sts_{context}"] -= Configuring cloud credentials for the {cert-manager-operator} for the AWS Security Token Service cluster - -To configure the cloud credentials for the {cert-manager-operator} on the AWS Security Token Service (STS) cluster with the cloud credentials. You must generate the cloud credentials manually, and apply it on the cluster by using the `ccoctl` binary. - -.Prerequisites - -* You have extracted and prepared the `ccoctl` binary. -* You have configured an {product-title} cluster with AWS STS by using the Cloud Credential Operator in manual mode. - -.Procedure - -. Create a directory to store a `CredentialsRequest` resource YAML file by running the following command: -+ -[source,terminal] ----- -$ mkdir credentials-request ----- - -. Create a `CredentialsRequest` resource YAML file under the `credentials-request` directory, such as, `sample-credential-request.yaml`, by applying the following yaml: -+ -[source,yaml] ----- -apiVersion: cloudcredential.openshift.io/v1 -kind: CredentialsRequest -metadata: - name: cert-manager - namespace: openshift-cloud-credential-operator -spec: - providerSpec: - apiVersion: cloudcredential.openshift.io/v1 - kind: AWSProviderSpec - statementEntries: - - action: - - "route53:GetChange" - effect: Allow - resource: "arn:aws:route53:::change/*" - - action: - - "route53:ChangeResourceRecordSets" - - "route53:ListResourceRecordSets" - effect: Allow - resource: "arn:aws:route53:::hostedzone/*" - - action: - - "route53:ListHostedZonesByName" - effect: Allow - resource: "*" - secretRef: - name: aws-creds - namespace: cert-manager - serviceAccountNames: - - cert-manager ----- - -. Use the `ccoctl` tool to process `CredentialsRequest` objects by running the following command: -+ -[source,terminal] ----- -$ ccoctl aws create-iam-roles \ - --name --region= \ - --credentials-requests-dir= \ - --identity-provider-arn --output-dir= ----- -+ -.Example output -[source,terminal] ----- -2023/05/15 18:10:34 Role arn:aws:iam::XXXXXXXXXXXX:role/-cert-manager-aws-creds created -2023/05/15 18:10:34 Saved credentials configuration to: /manifests/cert-manager-aws-creds-credentials.yaml -2023/05/15 18:10:35 Updated Role policy for Role -cert-manager-aws-creds ----- -+ -Copy the `` from the output to use in the next step. For example, `"arn:aws:iam::XXXXXXXXXXXX:role/-cert-manager-aws-creds"` - -. Add the `eks.amazonaws.com/role-arn=""` annotation to the service account by running the following command: -+ -[source,terminal] ----- -$ oc -n cert-manager annotate serviceaccount cert-manager eks.amazonaws.com/role-arn="" ----- - -. To create a new pod, delete the existing cert-manager controller pod by running the following command: -+ -[source,terminal] ----- -$ oc delete pods -l app.kubernetes.io/name=cert-manager -n cert-manager ----- -+ -The AWS credentials are applied to a new cert-manager controller pod within a minute. - -.Verification - -. Get the name of the updated cert-manager controller pod by running the following command: -+ -[source,terminal] ----- -$ oc get pods -l app.kubernetes.io/name=cert-manager -n cert-manager ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -cert-manager-bd7fbb9fc-wvbbt 1/1 Running 0 39s ----- - -. Verify that AWS credentials are updated by running the following command: -+ -[source,terminal] ----- -$ oc set env -n cert-manager po/ --list ----- -+ -.Example output -[source,terminal] ----- -# pods/cert-manager-57f9555c54-vbcpg, container cert-manager-controller -# POD_NAMESPACE from field path metadata.namespace -AWS_ROLE_ARN=XXXXXXXXXXXX -AWS_WEB_IDENTITY_TOKEN_FILE=/var/run/secrets/eks.amazonaws.com/serviceaccount/token ----- \ No newline at end of file diff --git a/modules/cert-manager-configure-cloud-credentials-gcp-non-sts.adoc b/modules/cert-manager-configure-cloud-credentials-gcp-non-sts.adoc deleted file mode 100644 index 7b03f29456e5..000000000000 --- a/modules/cert-manager-configure-cloud-credentials-gcp-non-sts.adoc +++ /dev/null @@ -1,107 +0,0 @@ -// Module included in the following assemblies: -// -// * security/cert_manager_operator/cert-manager-authenticate-non-sts-gcp.adoc - -:_content-type: PROCEDURE -[id="cert-manager-configure-cloud-credentials-gcp-non-sts_{context}"] -= Configuring cloud credentials for the {cert-manager-operator} on GCP - -To configure the cloud credentials for the {cert-manager-operator} on a GCP cluster you must create a `CredentialsRequest` object, and allow the Cloud Credential Operator to generate the cloud credentials secret. - -.Prerequisites - -* You have installed the {cert-manager-operator} 1.11.1 or later. -* You have configured the Cloud Credential Operator to operate in _mint_ or _passthrough_ mode. - -.Procedure - -. Create a `CredentialsRequest` resource YAML file, such as, `sample-credential-request.yaml` by applying the following yaml: -+ -[source,yaml] ----- -apiVersion: cloudcredential.openshift.io/v1 -kind: CredentialsRequest -metadata: - name: cert-manager - namespace: openshift-cloud-credential-operator -spec: - providerSpec: - apiVersion: cloudcredential.openshift.io/v1 - kind: GCPProviderSpec - predefinedRoles: - - roles/dns.admin - secretRef: - name: gcp-credentials - namespace: cert-manager - serviceAccountNames: - - cert-manager ----- -+ -[NOTE] -==== -The `dns.admin` role provides admin privileges to the service account for managing Google Cloud DNS resources. To ensure that the cert-manager runs with the service account that has the least privilege, you can create a custom role with the following permissions: - -* `dns.resourceRecordSets.*` -* `dns.changes.*` -* `dns.managedZones.list` -==== - -. Create a `CredentialsRequest` resource by running the following command: -+ -[source,terminal] ----- -$ oc create -f sample-credential-request.yaml ----- - -. Update the subscription object for {cert-manager-operator} by running the following command: -+ -[source,terminal] ----- -$ oc -n cert-manager-operator patch subscription openshift-cert-manager-operator --type=merge -p '{"spec":{"config":{"env":[{"name":"CLOUD_CREDENTIALS_SECRET_NAME","value":"gcp-credentials"}]}}}' ----- - -.Verification - -. Get the name of the redeployed cert-manager controller pod by running the following command: -+ -[source,terminal] ----- -$ oc get pods -l app.kubernetes.io/name=cert-manager -n cert-manager ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -cert-manager-bd7fbb9fc-wvbbt 1/1 Running 0 15m39s ----- - -. Verify that the cert-manager controller pod is updated with GCP credential volumes that are mounted under the path specified in `mountPath` by running the following command: -+ -[source,terminal] ----- -$ oc get -n cert-manager pod/ -o yaml ----- -+ -.Example output -[source,terminal] ----- -spec: - containers: - - args: - ... - volumeMounts: - ... - - mountPath: /.config/gcloud - name: cloud-credentials - .... - volumes: - ... - - name: cloud-credentials - secret: - ... - items: - - key: service_account.json - path: application_default_credentials.json - secretName: gcp-credentials ----- \ No newline at end of file diff --git a/modules/cert-manager-configure-cloud-credentials-gcp-sts.adoc b/modules/cert-manager-configure-cloud-credentials-gcp-sts.adoc deleted file mode 100644 index c155d3c261d0..000000000000 --- a/modules/cert-manager-configure-cloud-credentials-gcp-sts.adoc +++ /dev/null @@ -1,147 +0,0 @@ -// Module included in the following assemblies: -// -// * security/cert_manager_operator/cert-manager-authenticate-gcp.adoc - -:_content-type: PROCEDURE -[id="cert-manager-configure-cloud-credentials-gcp-sts_{context}"] -= Configuring cloud credentials for the {cert-manager-operator} with GCP Workload Identity - -Generate the cloud credentials for the {cert-manager-operator} by using the `ccoctl` binary. Then, apply them to the GCP Workload Identity cluster. - -.Prerequisites - -* You extracted and prepared the `ccoctl` binary. -* The {cert-manager-operator} 1.11.1 or later is installed. -* You have configured an {product-title} cluster with GCP Workload Identity by using the Cloud Credential Operator in a manual mode. - -.Procedure - -. Create a directory to store a `CredentialsRequest` resource YAML file by running the following command: -+ -[source,terminal] ----- -$ mkdir credentials-request ----- - -. In the `credentials-request` directory, create a YAML file that contains the following `CredentialsRequest` manifest: -+ -[source,yaml] ----- -apiVersion: cloudcredential.openshift.io/v1 -kind: CredentialsRequest -metadata: - name: cert-manager - namespace: openshift-cloud-credential-operator -spec: - providerSpec: - apiVersion: cloudcredential.openshift.io/v1 - kind: GCPProviderSpec - predefinedRoles: - - roles/dns.admin - secretRef: - name: gcp-credentials - namespace: cert-manager - serviceAccountNames: - - cert-manager ----- -+ -[NOTE] -==== -The `dns.admin` role provides admin privileges to the service account for managing Google Cloud DNS resources. To ensure that the cert-manager runs with the service account that has the least privilege, you can create a custom role with the following permissions: - -* `dns.resourceRecordSets.*` -* `dns.changes.*` -* `dns.managedZones.list` -==== - -. Use the `ccoctl` tool to process `CredentialsRequest` objects by running the following command: -+ -[source,terminal] ----- -$ ccoctl gcp create-service-accounts \ - --name --output-dir= \ - --credentials-requests-dir= \ - --workload-identity-pool \ - --workload-identity-provider \ - --project ----- -+ -.Example command -[source,terminal] ----- -$ ccoctl gcp create-service-accounts \ - --name abcde-20230525-4bac2781 --output-dir=/home/outputdir \ - --credentials-requests-dir=/home/credentials-requests \ - --workload-identity-pool abcde-20230525-4bac2781 \ - --workload-identity-provider abcde-20230525-4bac2781 \ - --project openshift-gcp-devel ----- - -. Apply the secrets generated in the manifests directory of your cluster by running the following command: -+ -[source,terminal] ----- -$ ls /manifests/*-credentials.yaml | xargs -I{} oc apply -f {} ----- - -. Update the subscription object for {cert-manager-operator} by running the following command: -+ -[source,terminal] ----- -$ oc -n cert-manager-operator patch subscription openshift-cert-manager-operator --type=merge -p '{"spec":{"config":{"env":[{"name":"CLOUD_CREDENTIALS_SECRET_NAME","value":"gcp-credentials"}]}}}' ----- - -.Verification - -. Get the name of the redeployed cert-manager controller pod by running the following command: -+ -[source,terminal] ----- -$ oc get pods -l app.kubernetes.io/name=cert-manager -n cert-manager ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -cert-manager-bd7fbb9fc-wvbbt 1/1 Running 0 15m39s ----- - -. Verify that the cert-manager controller pod is updated with GCP workload identity credential volumes that are mounted under the path specified in `mountPath` by running the following command: -+ -[source,terminal] ----- -$ oc get -n cert-manager pod/ -o yaml ----- -+ -.Example output -[source,terminal] ----- -spec: - containers: - - args: - ... - volumeMounts: - - mountPath: /var/run/secrets/openshift/serviceaccount - name: bound-sa-token - ... - - mountPath: /.config/gcloud - name: cloud-credentials - ... - volumes: - - name: bound-sa-token - projected: - ... - sources: - - serviceAccountToken: - audience: openshift - ... - path: token - - name: cloud-credentials - secret: - ... - items: - - key: service_account.json - path: application_default_credentials.json - secretName: gcp-credentials ----- \ No newline at end of file diff --git a/modules/cert-manager-enable-metrics.adoc b/modules/cert-manager-enable-metrics.adoc deleted file mode 100644 index 4329a3d67426..000000000000 --- a/modules/cert-manager-enable-metrics.adoc +++ /dev/null @@ -1,61 +0,0 @@ -// Module included in the following assemblies: -// -// * security/cert_manager_operator/cert-manager-monitoring.adoc - -:_content-type: PROCEDURE -[id="cert-manager-enable-metrics_{context}"] -= Enabling monitoring by using a service monitor for the {cert-manager-operator} - -You can enable monitoring and metrics collection for the {cert-manager-operator} by using a service monitor to perform the custom metrics scraping. - -.Prerequisites - -* You have access to the cluster with `cluster-admin` privileges. -* The {cert-manager-operator} is installed. - -.Procedure - -. Add the label to enable cluster monitoring by running the following command: -+ -[source,terminal] ----- -$ oc label namespace cert-manager openshift.io/cluster-monitoring=true ----- - -. Enable monitoring for user-defined projects. See _Enabling monitoring for user-defined projects_ for instructions. - -. Create a service monitor: - -.. Create a YAML file that defines the `ServiceMonitor` object: -+ -.Example `service-monitor.yaml` file -[source,yaml] ----- -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - labels: - app: cert-manager - app.kubernetes.io/component: controller - app.kubernetes.io/instance: cert-manager - app.kubernetes.io/name: cert-manager - name: cert-manager - namespace: cert-manager -spec: - endpoints: - - interval: 30s - port: tcp-prometheus-servicemonitor - scheme: http - selector: - matchLabels: - app.kubernetes.io/component: controller - app.kubernetes.io/instance: cert-manager - app.kubernetes.io/name: cert-manager ----- - -.. Create the `ServiceMonitor` object by running the following command: -+ -[source,terminal] ----- -$ oc create -f service-monitor.yaml ----- \ No newline at end of file diff --git a/modules/cert-manager-enable-operand-log-level.adoc b/modules/cert-manager-enable-operand-log-level.adoc deleted file mode 100644 index eb4676bf2332..000000000000 --- a/modules/cert-manager-enable-operand-log-level.adoc +++ /dev/null @@ -1,44 +0,0 @@ -// Module included in the following assemblies: -// -// * security/cert_manager_operator/cert-manager-log-levels.adoc - -:_content-type: PROCEDURE -[id="cert-manager-enable-operand-log-level_{context}"] -= Setting a log level for cert-manager - -You can set a log level for cert-manager to determine the verbosity of log messages. - -.Prerequisites - -* You have access to the cluster with `cluster-admin` privileges. -* You have installed the {cert-manager-operator} 1.11.1 or later. - -.Procedure - -. Edit the `CertManager` resource by running the following command: -+ -[source,terminal] ----- -$ oc edit certmanager.operator cluster ----- - -. Set the log level value by editing the `spec.logLevel` section: -+ -[source,yaml] ----- -apiVersion: operator.openshift.io/v1alpha1 -kind: CertManager -... -spec: - logLevel: Normal <1> ----- -<1> The default `logLevel` is `Normal`. Replace `Normal` with the desired log level value. The valid log level values for the `CertManager` resource are `Normal`, `Debug`, `Trace`, and `TraceAll`. To audit logs and perform common operations when everything is fine, set `logLevel` to `Normal` . To troubleshoot a minor issue by viewing verbose logs, set `logLevel` to `Debug` . To troubleshoot a major issue by viewing more verbose logs, you can set `logLevel` to `Trace`. To troubleshoot serious issues, set `logLevel` to `TraceAll`. -+ -[NOTE] -==== -`TraceAll` generates huge amount of logs. After setting `logLevel` to `TraceAll`, you might experience performance issues. -==== - -. Save your changes and quit the text editor to apply your changes. -+ -After applying the changes, the verbosity level for the cert-manager components controller, CA injector, and webhook is updated. \ No newline at end of file diff --git a/modules/cert-manager-enable-operator-log-level.adoc b/modules/cert-manager-enable-operator-log-level.adoc deleted file mode 100644 index a2263ef1f2dd..000000000000 --- a/modules/cert-manager-enable-operator-log-level.adoc +++ /dev/null @@ -1,49 +0,0 @@ -// Module included in the following assemblies: -// -// * security/cert_manager_operator/cert-manager-log-levels.adoc - -:_content-type: PROCEDURE -[id="cert-manager-enable-operator-log-level_{context}"] -= Setting a log level for the {cert-manager-operator} - -You can set a log level for the {cert-manager-operator} to determine the verbosity of the operator log messages. - -.Prerequisites - -* You have access to the cluster with `cluster-admin` privileges. -* You have installed the {cert-manager-operator} 1.11.1 or later. - -.Procedure - -* Update the subscription object for {cert-manager-operator} to provide the verbosity level for the operator logs by running the following command: -+ -[source,terminal] ----- -$ oc -n cert-manager-operator patch subscription openshift-cert-manager-operator --type='merge' -p '{"spec":{"config":{"env":[{"name":"OPERATOR_LOG_LEVEL","value":"v"}]}}}' <1> ----- -<1> Replace `v` with the desired log level number. The valid values for `v` can range from `1`to `10`. The default value is `2`. - -.Verification - -. The cert-manager Operator pod is redeployed. Verify that the log level of the {cert-manager-operator} is updated by running the following command: -+ -[source,terminal] ----- -$ oc set env deploy/cert-manager-operator-controller-manager -n cert-manager-operator --list | grep -e OPERATOR_LOG_LEVEL -e container ----- -+ -.Example output -[source,terminal] ----- -# deployments/cert-manager-operator-controller-manager, container kube-rbac-proxy -OPERATOR_LOG_LEVEL=9 -# deployments/cert-manager-operator-controller-manager, container cert-manager-operator -OPERATOR_LOG_LEVEL=9 ----- - -. Verify that the log level of the {cert-manager-operator} is updated by running the `oc logs` command: -+ -[source,terminal] ----- -$ oc logs deploy/cert-manager-operator-controller-manager -n cert-manager-operator ----- \ No newline at end of file diff --git a/modules/cert-manager-install-console.adoc b/modules/cert-manager-install-console.adoc deleted file mode 100644 index 4267edae6d3d..000000000000 --- a/modules/cert-manager-install-console.adoc +++ /dev/null @@ -1,60 +0,0 @@ -// Module included in the following assemblies: -// -// * security/cert_manager_operator/cert-manager-operator-install.adoc - -:_content-type: PROCEDURE -[id="cert-manager-install-console_{context}"] -= Installing the {cert-manager-operator} using the web console - -You can use the web console to install the {cert-manager-operator}. - -.Prerequisites - -* You have access to the cluster with `cluster-admin` privileges. -* You have access to the {product-title} web console. - -.Procedure - -. Log in to the {product-title} web console. - -. Navigate to *Operators* -> *OperatorHub*. - -. Enter *{cert-manager-operator}* into the filter box. - -. Select the *{cert-manager-operator}* and click *Install*. - -. On the *Install Operator* page: -.. Update the *Update channel*, if necessary. The channel defaults to *stable-v1*, which installs the latest stable release of the {cert-manager-operator}. -.. Choose the *Installed Namespace* for the Operator. The default Operator namespace is `cert-manager-operator`. -+ -If the `cert-manager-operator` namespace does not exist, it is created for you. - -.. Select an *Update approval* strategy. -+ -* The *Automatic* strategy allows Operator Lifecycle Manager (OLM) to automatically update the Operator when a new version is available. -+ -* The *Manual* strategy requires a user with appropriate credentials to approve the Operator update. - -.. Click *Install*. - -.Verification - -. Navigate to *Operators* -> *Installed Operators*. -. Verify that *{cert-manager-operator}* is listed with a *Status* of *Succeeded* in the `cert-manager-operator` namespace. -. Verify that cert-manager pods are up and running by entering the following command: -+ -[source,terminal] ----- -$ oc get pods -n cert-manager ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -cert-manager-bd7fbb9fc-wvbbt 1/1 Running 0 3m39s -cert-manager-cainjector-56cc5f9868-7g9z7 1/1 Running 0 4m5s -cert-manager-webhook-d4f79d7f7-9dg9w 1/1 Running 0 4m9s ----- -+ -You can use the {cert-manager-operator} only after cert-manager pods are up and running. \ No newline at end of file diff --git a/modules/cert-manager-issuer-types.adoc b/modules/cert-manager-issuer-types.adoc deleted file mode 100644 index 5736a227e8a0..000000000000 --- a/modules/cert-manager-issuer-types.adoc +++ /dev/null @@ -1,14 +0,0 @@ -// Module included in the following assemblies: -// -// * security/cert_manager_operator/index.adoc - -:_content-type: CONCEPT -[id="cert-manager-issuer-types_{context}"] -= Supported issuer types - -The {cert-manager-operator} supports the following issuer types: - -* Automated Certificate Management Environment (ACME) -* Certificate authority (CA) -* Self-signed -* link:https://cert-manager.io/docs/configuration/vault/[Vault] diff --git a/modules/cert-manager-override-arguments.adoc b/modules/cert-manager-override-arguments.adoc deleted file mode 100644 index 6b528d674af7..000000000000 --- a/modules/cert-manager-override-arguments.adoc +++ /dev/null @@ -1,105 +0,0 @@ -// Module included in the following assemblies: -// -// * security/cert_manager_operator/cert-manager-customizing-api-fields.adoc - -:_content-type: PROCEDURE -[id="cert-manager-override-arguments_{context}"] -= Customizing cert-manager by overriding arguments from the cert-manager Operator API - -You can override the supported arguments for the {cert-manager-operator} by adding a `spec.controllerConfig` section in the `CertManager` resource. - -.Prerequisites - -* You have access to the {product-title} cluster as a user with the `cluster-admin` role. - -.Procedure - -. Edit the `CertManager` resource by running the following command: -+ -[source,terminal] ----- -$ oc edit certmanager cluster ----- - -. Add a `spec.controllerConfig` section with the following override arguments: -+ -[source,yaml] ----- -apiVersion: operator.openshift.io/v1alpha1 -kind: CertManager -metadata: - name: cluster - ... -spec: - ... - controllerConfig: - overrideArgs: - - '--dns01-recursive-nameservers=:' <1> - - '--dns01-recursive-nameservers-only' <2> - - '--acme-http01-solver-nameservers=:' <3> - - '--v=' <4> - - '--metrics-listen-address=:' <5> - webhookConfig: - overrideArgs: - - '--v=4' <4> - cainjectorConfig: - overrideArgs: - - '--v=2' <4> ----- -<1> Provide a comma-separated list of `:` nameservers to query for the DNS-01 self check. For example, `--dns01-recursive-nameservers=1.1.1.1:53`. -<2> Specify to only use recursive nameservers instead of checking the authoritative nameservers associated with that domain. -<3> Provide a comma-separated list of `:` nameservers to query for the ACME HTTP01 self check. For example, `--acme-http01-solver-nameservers=1.1.1.1:53`. -<4> Specify to set the log level verbosity to determine the verbosity of log messages. -<5> Specify the host and port for the metrics endpoint. The default value is `--metrics-listen-address=0.0.0.0:9402`. - -. Save your changes and quit the text editor to apply your changes. - -.Verification - -* Verify that arguments are updated for cert-manager pods by running the following command: -+ -[source,terminal] ----- -$ oc get pods -n cert-manager -o yaml ----- -+ -.Example output -[source,yaml] ----- -... - metadata: - name: cert-manager-6d4b5d4c97-kldwl - namespace: cert-manager -... - spec: - containers: - - args: - - --acme-http01-solver-nameservers=1.1.1.1:53 - - --cluster-resource-namespace=$(POD_NAMESPACE) - - --dns01-recursive-nameservers=1.1.1.1:53 - - --dns01-recursive-nameservers-only - - --leader-election-namespace=kube-system - - --max-concurrent-challenges=60 - - --metrics-listen-address=0.0.0.0:9042 - - --v=6 -... - metadata: - name: cert-manager-cainjector-866c4fd758-ltxxj - namespace: cert-manager -... - spec: - containers: - - args: - - --leader-election-namespace=kube-system - - --v=2 -... - metadata: - name: cert-manager-webhook-6d48f88495-c88gd - namespace: cert-manager -... - spec: - containers: - - args: - ... - - --v=4 ----- \ No newline at end of file diff --git a/modules/cert-manager-override-environment-variables.adoc b/modules/cert-manager-override-environment-variables.adoc deleted file mode 100644 index a54fd7195e5e..000000000000 --- a/modules/cert-manager-override-environment-variables.adoc +++ /dev/null @@ -1,83 +0,0 @@ -// Module included in the following assemblies: -// -// * security/cert_manager_operator/cert-manager-customizing-api-fields.adoc - -:_content-type: PROCEDURE -[id="cert-manager-override-environment-variables_{context}"] -= Customizing cert-manager by overriding environment variables from the cert-manager Operator API - -You can override the supported environment variables for the {cert-manager-operator} by adding a `spec.controllerConfig` section in the `CertManager` resource. - -.Prerequisites - -* You have access to the {product-title} cluster as a user with the `cluster-admin` role. - -.Procedure - -. Edit the `CertManager` resource by running the following command: -+ -[source,terminal] ----- -$ oc edit certmanager cluster ----- - -. Add a `spec.controllerConfig` section with the following override arguments: -+ -[source,yaml] ----- -apiVersion: operator.openshift.io/v1alpha1 -kind: CertManager -metadata: - name: cluster - ... -spec: - ... - controllerConfig: - overrideEnv: - - name: HTTP_PROXY - value: http:// <1> - - name: HTTPS_PROXY - value: https:// <1> - - name: NO_PROXY - value: <2> ----- -<1> Replace `` with the proxy server URL. -<2> Replace `` with a comma separated list of domains. These domains are ignored by the proxy server. - -. Save your changes and quit the text editor to apply your changes. - -.Verification - -. Verify that the cert-manager controller pod is redeployed by running the following command: -+ -[source,terminal] ----- -$ oc get pods -l app.kubernetes.io/name=cert-manager -n cert-manager ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -cert-manager-bd7fbb9fc-wvbbt 1/1 Running 0 39s ----- - -. Verify that environment variables are updated for the cert-manager pod by running the following command: -+ -[source,terminal] ----- -$ oc get pod -n cert-manager -o yaml ----- -+ -.Example output -[source,yaml] ----- - env: - ... - - name: HTTP_PROXY - value: http:// - - name: HTTPS_PROXY - value: https:// - - name: NO_PROXY - value: ----- \ No newline at end of file diff --git a/modules/cert-manager-proxy-support.adoc b/modules/cert-manager-proxy-support.adoc deleted file mode 100644 index cfbd5d29f7d1..000000000000 --- a/modules/cert-manager-proxy-support.adoc +++ /dev/null @@ -1,84 +0,0 @@ -// Module included in the following assemblies: -// -// * security/cert_manager_operator/cert-manager-operator-proxy.adoc - -:_content-type: PROCEDURE -[id="cert-manager-proxy-support_{context}"] -= Injecting a custom CA certificate for the {cert-manager-operator} - -If your {product-title} cluster has the cluster-wide proxy enabled, you can inject any CA certificates that are required for proxying HTTPS connections into the {cert-manager-operator}. - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. -* You have enabled the cluster-wide proxy for {product-title}. - -.Procedure - -. Create a config map in the `cert-manager` namespace by running the following command: -+ -[source,terminal] ----- -$ oc create configmap trusted-ca -n cert-manager ----- - -. Inject the CA bundle that is trusted by {product-title} into the config map by running the following command: -+ -[source,terminal] ----- -$ oc label cm trusted-ca config.openshift.io/inject-trusted-cabundle=true -n cert-manager ----- - -. Update the deployment for the {cert-manager-operator} to use the config map by running the following command: -+ -[source,terminal] ----- -$ oc -n cert-manager-operator patch subscription openshift-cert-manager-operator --type='merge' -p '{"spec":{"config":{"env":[{"name":"TRUSTED_CA_CONFIGMAP_NAME","value":"trusted-ca"}]}}}' ----- - -.Verification - -. Verify that the deployments have finished rolling out by running the following command: -+ -[source,terminal] ----- -$ oc rollout status deployment/cert-manager-operator-controller-manager -n cert-manager-operator && \ -oc rollout status deployment/cert-manager -n cert-manager && \ -oc rollout status deployment/cert-manager-webhook -n cert-manager && \ -oc rollout status deployment/cert-manager-cainjector -n cert-manager ----- -+ -.Example output -[source,terminal] ----- -deployment "cert-manager-operator-controller-manager" successfully rolled out -deployment "cert-manager" successfully rolled out -deployment "cert-manager-webhook" successfully rolled out -deployment "cert-manager-cainjector" successfully rolled out ----- - -. Verify that the CA bundle was mounted as a volume by running the following command: -+ -[source,terminal] ----- -$ oc get deployment cert-manager -n cert-manager -o=jsonpath={.spec.template.spec.'containers[0].volumeMounts'} ----- -+ -.Example output -[source,terminal] ----- -[{"mountPath":"/etc/pki/tls/certs/cert-manager-tls-ca-bundle.crt","name":"trusted-ca","subPath":"ca-bundle.crt"}] ----- - -. Verify that the source of the CA bundle is the `trusted-ca` config map by running the following command: -+ -[source,terminal] ----- -$ oc get deployment cert-manager -n cert-manager -o=jsonpath={.spec.template.spec.volumes} ----- -+ -.Example output -[source,terminal] ----- -[{"configMap":{"defaultMode":420,"name":"trusted-ca"},"name":"trusted-ca"}] ----- diff --git a/modules/cert-manager-query-metrics.adoc b/modules/cert-manager-query-metrics.adoc deleted file mode 100644 index 0cb178d36a84..000000000000 --- a/modules/cert-manager-query-metrics.adoc +++ /dev/null @@ -1,36 +0,0 @@ -// Module included in the following assemblies: -// -// * security/cert_manager_operator/cert-manager-monitoring.adoc - -:_content-type: PROCEDURE -[id="cert-manager-query-metrics_{context}"] -= Querying metrics for the {cert-manager-operator} - -After you have enabled monitoring for the {cert-manager-operator}, you can query its metrics by using the {product-title} web console. - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. -* You have installed the {cert-manager-operator}. -* You have enabled monitoring and metrics collection for the {cert-manager-operator}. - -.Procedure - -. From the {product-title} web console, navigate to *Observe* -> *Metrics*. - -. Add a query by using one of the following formats: - -** Specify the endpoints: -+ -[source,promql] ----- -{instance=""} <1> ----- -<1> Replace `` with the value of the endpoint for the `cert-manager` service. You can find the endpoint value by running the following command: `oc describe service cert-manager -n cert-manager`. - -** Specify the `tcp-prometheus-servicemonitor` port: -+ -[source,promql] ----- -{endpoint="tcp-prometheus-servicemonitor"} ----- \ No newline at end of file diff --git a/modules/cert-manager-remove-resources-console.adoc b/modules/cert-manager-remove-resources-console.adoc deleted file mode 100644 index eb0d751fe968..000000000000 --- a/modules/cert-manager-remove-resources-console.adoc +++ /dev/null @@ -1,58 +0,0 @@ -// Module included in the following assemblies: -// -// * security/cert-manager-operator-uninstall.adoc - -:_content-type: PROCEDURE -[id="cert-manager-remove-resources-console_{context}"] -= Removing {cert-manager-operator} resources - -Once you have uninstalled the {cert-manager-operator}, you have the option to eliminate its associated resources from your cluster. - -.Prerequisites - -* You have access to the cluster with `cluster-admin` privileges. -* You have access to the {product-title} web console. - -.Procedure - -. Log in to the {product-title} web console. - -. Remove the deployments of the cert-manager components, such as `cert-manager`, `cainjector`, and `webhook`, present in the `cert-manager` namespace. - -.. Click the *Project* drop-down menu to see a list of all available projects, and select the *cert-manager* project. - -.. Navigate to *Workloads* -> *Deployments*. - -.. Select the deployment that you want to delete. - -.. Click the *Actions* drop-down menu, and select *Delete Deployment* to see a confirmation dialog box. - -.. Click *Delete* to delete the deployment. - -.. Alternatively, delete deployments of the cert-manager components such as `cert-manager`, `cainjector` and `webhook` present in the `cert-manager` namespace by using the command-line interface (CLI). -+ -[source,terminal] ----- -$ oc delete deployment -n cert-manager -l app.kubernetes.io/instance=cert-manager ----- - -. Optional: Remove the custom resource definitions (CRDs) that were installed by the {cert-manager-operator}: - -.. Navigate to *Administration* -> *CustomResourceDefinitions*. - -.. Enter `certmanager` in the *Name* field to filter the CRDs. - -.. Click the Options menu {kebab} next to each of the following CRDs, and select *Delete Custom Resource Definition*: - -*** `Certificate` -*** `CertificateRequest` -*** `CertManager` (`operator.openshift.io`) -*** `Challenge` -*** `ClusterIssuer` -*** `Issuer` -*** `Order` - -. Optional: Remove the `cert-manager-operator` namespace. -.. Navigate to *Administration* -> *Namespaces*. -.. Click the Options menu {kebab} next to the *cert-manager-operator* and select *Delete Namespace*. -.. In the confirmation dialog, enter `cert-manager-operator` in the field and click *Delete*. \ No newline at end of file diff --git a/modules/cert-manager-request-methods.adoc b/modules/cert-manager-request-methods.adoc deleted file mode 100644 index 4d972a7c64a5..000000000000 --- a/modules/cert-manager-request-methods.adoc +++ /dev/null @@ -1,13 +0,0 @@ -// Module included in the following assemblies: -// -// * security/cert_manager_operator/index.adoc - -:_content-type: CONCEPT -[id="cert-manager-request-methods_{context}"] -= Certificate request methods - -There are two ways to request a certificate using the {cert-manager-operator}: - -Using the `cert-manager.io/CertificateRequest` object:: With this method a service developer creates a `CertificateRequest` object with a valid `issuerRef` pointing to a configured issuer (configured by a service infrastructure administrator). A service infrastructure administrator then accepts or denies the certificate request. Only accepted certificate requests create a corresponding certificate. - -Using the `cert-manager.io/Certificate` object:: With this method, a service developer creates a `Certificate` object with a valid `issuerRef` and obtains a certificate from a secret that they pointed to the `Certificate` object. diff --git a/modules/cert-manager-uninstall-console.adoc b/modules/cert-manager-uninstall-console.adoc deleted file mode 100644 index 32b069abc629..000000000000 --- a/modules/cert-manager-uninstall-console.adoc +++ /dev/null @@ -1,24 +0,0 @@ -// Module included in the following assemblies: -// -// * security/cert_manager_operator/cert-manager-operator-uninstall.adoc - -:_content-type: PROCEDURE -[id="cert-manager-uninstall-console_{context}"] -= Uninstalling the {cert-manager-operator} - -You can uninstall the {cert-manager-operator} by using the web console. - -.Prerequisites - -* You have access to the cluster with `cluster-admin` privileges. -* You have access to the {product-title} web console. -* The {cert-manager-operator} is installed. -// TODO: Any other prereqs, like removing anything that is using it? - -.Procedure - -. Log in to the {product-title} web console. -. Uninstall the {cert-manager-operator} Operator. -.. Navigate to *Operators* -> *Installed Operators*. -.. Click the Options menu {kebab} next to the *{cert-manager-operator}* entry and click *Uninstall Operator*. -.. In the confirmation dialog, click *Uninstall*. diff --git a/modules/certificate-injection-using-operators.adoc b/modules/certificate-injection-using-operators.adoc deleted file mode 100644 index 17e526a88a87..000000000000 --- a/modules/certificate-injection-using-operators.adoc +++ /dev/null @@ -1,82 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/configuring-a-custom-pki.adoc - -[id="certificate-injection-using-operators_{context}"] -= Certificate injection using Operators - -Once your custom CA certificate is added to the cluster via ConfigMap, the -Cluster Network Operator merges the user-provided and system CA certificates -into a single bundle and injects the merged bundle into the Operator requesting -the trust bundle injection. - -[IMPORTANT] -==== -After adding a `config.openshift.io/inject-trusted-cabundle="true"` label to the config map, existing data in it is deleted. The Cluster Network Operator takes ownership of a config map and only accepts `ca-bundle` as data. -You must use a separate config map to store `service-ca.crt` by using the `service.beta.openshift.io/inject-cabundle=true` annotation or a similar configuration. Adding a `config.openshift.io/inject-trusted-cabundle="true"` label and `service.beta.openshift.io/inject-cabundle=true` annotation on the same config map can cause issues. -==== - -Operators request this injection by creating an empty ConfigMap with the -following label: - -[source,yaml] ----- -config.openshift.io/inject-trusted-cabundle="true" ----- - -An example of the empty ConfigMap: -[source,yaml] ----- -apiVersion: v1 -data: {} -kind: ConfigMap -metadata: - labels: - config.openshift.io/inject-trusted-cabundle: "true" - name: ca-inject <1> - namespace: apache ----- -<1> Specifies the empty ConfigMap name. - -The Operator mounts this ConfigMap into the container's local trust store. - -[NOTE] -==== -Adding a trusted CA certificate is only needed if the certificate is not -included in the {op-system-first} trust bundle. -==== - -Certificate injection is not limited to Operators. The Cluster Network Operator -injects certificates across any namespace when an empty ConfigMap is created with the -`config.openshift.io/inject-trusted-cabundle=true` label. - -The ConfigMap can reside in any namespace, but the ConfigMap must be mounted as -a volume to each container within a pod that requires a custom CA. For example: - -[source,yaml] ----- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: my-example-custom-ca-deployment - namespace: my-example-custom-ca-ns -spec: - ... - spec: - ... - containers: - - name: my-container-that-needs-custom-ca - volumeMounts: - - name: trusted-ca - mountPath: /etc/pki/ca-trust/extracted/pem - readOnly: true - volumes: - - name: trusted-ca - configMap: - name: trusted-ca - items: - - key: ca-bundle.crt <1> - path: tls-ca-bundle.pem <2> ----- -<1> `ca-bundle.crt` is required as the ConfigMap key. -<2> `tls-ca-bundle.pem` is required as the ConfigMap path. diff --git a/modules/checking-cluster-resource-availability-and-utilization.adoc b/modules/checking-cluster-resource-availability-and-utilization.adoc deleted file mode 100644 index 8ace6a550f11..000000000000 --- a/modules/checking-cluster-resource-availability-and-utilization.adoc +++ /dev/null @@ -1,48 +0,0 @@ -// Module included in the following assemblies: -// -// *installing/validating-an-installation.adoc - -:_content-type: PROCEDURE -[id="checking-cluster-resource-availability-and-utilization_{context}"] -= Checking cluster resource availability and utilization - -{product-title} provides a comprehensive set of monitoring dashboards that help you understand the state of cluster components. - -In the *Administrator* perspective, you can access dashboards for core {product-title} components, including: - -* etcd - -* Kubernetes compute resources - -* Kubernetes network resources - -* Prometheus - -* Dashboards relating to cluster and node performance - -.Example compute resources dashboard -image::monitoring-dashboard-compute-resources.png[] - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. - -.Procedure - -. In the *Administrator* perspective in the {product-title} web console, navigate to *Observe* -> *Dashboards*. - -. Choose a dashboard in the *Dashboard* list. Some dashboards, such as the *etcd* dashboard, produce additional sub-menus when selected. - -. Optional: Select a time range for the graphs in the *Time Range* list. -+ -** Select a pre-defined time period. -+ -** Set a custom time range by selecting *Custom time range* in the *Time Range* list. -+ -.. Input or select the *From* and *To* dates and times. -+ -.. Click *Save* to save the custom time range. - -. Optional: Select a *Refresh Interval*. - -. Hover over each of the graphs within a dashboard to display detailed information about specific items. diff --git a/modules/checking-file-intergrity-cr-status.adoc b/modules/checking-file-intergrity-cr-status.adoc deleted file mode 100644 index 15dcf51c86b7..000000000000 --- a/modules/checking-file-intergrity-cr-status.adoc +++ /dev/null @@ -1,24 +0,0 @@ -// Module included in the following assemblies: -// -// * security/file_integrity_operator/file-integrity-operator-understanding.adoc - -:_content-type: PROCEDURE -[id="checking-the-file-integrity-CR-status_{context}"] -= Checking the FileIntegrity custom resource status - -The `FileIntegrity` custom resource (CR) reports its status through the .`status.phase` subresource. - -.Procedure - -* To query the `FileIntegrity` CR status, run: -+ -[source,terminal] ----- -$ oc get fileintegrities/worker-fileintegrity -o jsonpath="{ .status.phase }" ----- -+ -.Example output -[source,terminal] ----- -Active ----- diff --git a/modules/checking-load-balancer-configuration.adoc b/modules/checking-load-balancer-configuration.adoc deleted file mode 100644 index c60a997a5807..000000000000 --- a/modules/checking-load-balancer-configuration.adoc +++ /dev/null @@ -1,52 +0,0 @@ -// Module included in the following assemblies: -// -// * support/troubleshooting/troubleshooting-installations.adoc - -:_content-type: PROCEDURE -[id="checking-load-balancer-configuration_{context}"] -= Checking a load balancer configuration before {product-title} installation - -Check your load balancer configuration prior to starting an {product-title} installation. - -.Prerequisites - -* You have configured an external load balancer of your choosing, in preparation for an {product-title} installation. The following example is based on a {op-system-base-full} host using HAProxy to provide load balancing services to a cluster. -* You have configured DNS in preparation for an {product-title} installation. -* You have SSH access to your load balancer. - -.Procedure - -. Check that the `haproxy` systemd service is active: -+ -[source,terminal] ----- -$ ssh @ systemctl status haproxy ----- - -. Verify that the load balancer is listening on the required ports. The following example references ports `80`, `443`, `6443`, and `22623`. -+ -* For HAProxy instances running on {op-system-base-full} 6, verify port status by using the `netstat` command: -+ -[source,terminal] ----- -$ ssh @ netstat -nltupe | grep -E ':80|:443|:6443|:22623' ----- -+ -* For HAProxy instances running on {op-system-base-full} 7 or 8, verify port status by using the `ss` command: -+ -[source,terminal] ----- -$ ssh @ ss -nltupe | grep -E ':80|:443|:6443|:22623' ----- -+ -[NOTE] -==== -Red Hat recommends the `ss` command instead of `netstat` in {op-system-base-full} 7 or later. `ss` is provided by the iproute package. For more information on the `ss` command, see the link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/html/performance_tuning_guide/sect-red_hat_enterprise_linux-performance_tuning_guide-tool_reference-ss[{op-system-base-full} 7 Performance Tuning Guide]. -==== -+ -. Check that the wildcard DNS record resolves to the load balancer: -+ -[source,terminal] ----- -$ dig @ ----- diff --git a/modules/checking-mco-status.adoc b/modules/checking-mco-status.adoc deleted file mode 100644 index a0693741c0ba..000000000000 --- a/modules/checking-mco-status.adoc +++ /dev/null @@ -1,167 +0,0 @@ -// Module included in the following assemblies: -// -// * post_installation_configuration/machine-configuration-tasks.adoc - -:_content-type: PROCEDURE -[id="checking-mco-status_{context}"] -= Checking machine config pool status - -To see the status of the Machine Config Operator (MCO), its sub-components, and the resources it manages, use the following `oc` commands: - -.Procedure -. To see the number of MCO-managed nodes available on your cluster for each machine config pool (MCP), run the following command: -+ -[source,terminal] ----- -$ oc get machineconfigpool ----- -+ -.Example output -[source,terminal] ----- -NAME CONFIG UPDATED UPDATING DEGRADED MACHINECOUNT READYMACHINECOUNT UPDATEDMACHINECOUNT DEGRADEDMACHINECOUNT AGE -master rendered-master-06c9c4… True False False 3 3 3 0 4h42m -worker rendered-worker-f4b64… False True False 3 2 2 0 4h42m ----- -+ --- -where: - -UPDATED:: The `True` status indicates that the MCO has applied the current machine config to the nodes in that MCP. The current machine config is specified in the `STATUS` field in the `oc get mcp` output. The `False` status indicates a node in the MCP is updating. -UPDATING:: The `True` status indicates that the MCO is applying the desired machine config, as specified in the `MachineConfigPool` custom resource, to at least one of the nodes in that MCP. The desired machine config is the new, edited machine config. Nodes that are updating might not be available for scheduling. The `False` status indicates that all nodes in the MCP are updated. -DEGRADED:: A `True` status indicates the MCO is blocked from applying the current or desired machine config to at least one of the nodes in that MCP, or the configuration is failing. Nodes that are degraded might not be available for scheduling. A `False` status indicates that all nodes in the MCP are ready. -MACHINECOUNT:: Indicates the total number of machines in that MCP. -READYMACHINECOUNT:: Indicates the total number of machines in that MCP that are ready for scheduling. -UPDATEDMACHINECOUNT:: Indicates the total number of machines in that MCP that have the current machine config. -DEGRADEDMACHINECOUNT:: Indicates the total number of machines in that MCP that are marked as degraded or unreconcilable. --- -+ -In the previous output, there are three control plane (master) nodes and three worker nodes. The control plane MCP and the associated nodes are updated to the current machine config. The nodes in the worker MCP are being updated to the desired machine config. Two of the nodes in the worker MCP are updated and one is still updating, as indicated by the `UPDATEDMACHINECOUNT` being `2`. There are no issues, as indicated by the `DEGRADEDMACHINECOUNT` being `0` and `DEGRADED` being `False`. -+ -While the nodes in the MCP are updating, the machine config listed under `CONFIG` is the current machine config, which the MCP is being updated from. When the update is complete, the listed machine config is the desired machine config, which the MCP was updated to. -+ -[NOTE] -==== -If a node is being cordoned, that node is not included in the `READYMACHINECOUNT`, but is included in the `MACHINECOUNT`. Also, the MCP status is set to `UPDATING`. Because the node has the current machine config, it is counted in the `UPDATEDMACHINECOUNT` total: - -.Example output -[source,terminal] ----- -NAME CONFIG UPDATED UPDATING DEGRADED MACHINECOUNT READYMACHINECOUNT UPDATEDMACHINECOUNT DEGRADEDMACHINECOUNT AGE -master rendered-master-06c9c4… True False False 3 3 3 0 4h42m -worker rendered-worker-c1b41a… False True False 3 2 3 0 4h42m ----- -==== - -. To check the status of the nodes in an MCP by examining the `MachineConfigPool` custom resource, run the following command: -: -+ -[source,terminal] ----- -$ oc describe mcp worker ----- -+ -.Example output -[source,terminal] ----- -... - Degraded Machine Count: 0 - Machine Count: 3 - Observed Generation: 2 - Ready Machine Count: 3 - Unavailable Machine Count: 0 - Updated Machine Count: 3 -Events: ----- -+ -[NOTE] -==== -If a node is being cordoned, the node is not included in the `Ready Machine Count`. It is included in the `Unavailable Machine Count`: - -.Example output -[source,terminal] ----- -... - Degraded Machine Count: 0 - Machine Count: 3 - Observed Generation: 2 - Ready Machine Count: 2 - Unavailable Machine Count: 1 - Updated Machine Count: 3 ----- -==== - -. To see each existing `MachineConfig` object, run the following command: -+ -[source,terminal] ----- -$ oc get machineconfigs ----- -+ -.Example output -[source,terminal] ----- -NAME GENERATEDBYCONTROLLER IGNITIONVERSION AGE -00-master 2c9371fbb673b97a6fe8b1c52... 3.2.0 5h18m -00-worker 2c9371fbb673b97a6fe8b1c52... 3.2.0 5h18m -01-master-container-runtime 2c9371fbb673b97a6fe8b1c52... 3.2.0 5h18m -01-master-kubelet 2c9371fbb673b97a6fe8b1c52… 3.2.0 5h18m -... -rendered-master-dde... 2c9371fbb673b97a6fe8b1c52... 3.2.0 5h18m -rendered-worker-fde... 2c9371fbb673b97a6fe8b1c52... 3.2.0 5h18m ----- -+ -Note that the `MachineConfig` objects listed as `rendered` are not meant to be changed or deleted. - -. To view the contents of a particular machine config (in this case, `01-master-kubelet`), run the following command: -+ -[source,terminal] ----- -$ oc describe machineconfigs 01-master-kubelet ----- -+ -The output from the command shows that this `MachineConfig` object contains both configuration files (`cloud.conf` and `kubelet.conf`) and a systemd service (Kubernetes Kubelet): -+ -.Example output -[source,terminal] ----- -Name: 01-master-kubelet -... -Spec: - Config: - Ignition: - Version: 3.2.0 - Storage: - Files: - Contents: - Source: data:, - Mode: 420 - Overwrite: true - Path: /etc/kubernetes/cloud.conf - Contents: - Source: data:,kind%3A%20KubeletConfiguration%0AapiVersion%3A%20kubelet.config.k8s.io%2Fv1beta1%0Aauthentication%3A%0A%20%20x509%3A%0A%20%20%20%20clientCAFile%3A%20%2Fetc%2Fkubernetes%2Fkubelet-ca.crt%0A%20%20anonymous... - Mode: 420 - Overwrite: true - Path: /etc/kubernetes/kubelet.conf - Systemd: - Units: - Contents: [Unit] -Description=Kubernetes Kubelet -Wants=rpc-statd.service network-online.target crio.service -After=network-online.target crio.service - -ExecStart=/usr/bin/hyperkube \ - kubelet \ - --config=/etc/kubernetes/kubelet.conf \ ... ----- - -If something goes wrong with a machine config that you apply, you can always back out that change. For example, if you had run `oc create -f ./myconfig.yaml` to apply a machine config, you could remove that machine config by running the following command: - -[source,terminal] ----- -$ oc delete -f ./myconfig.yaml ----- - -If that was the only problem, the nodes in the affected pool should return to a non-degraded state. This actually causes the rendered configuration to roll back to its previously rendered state. - -If you add your own machine configs to your cluster, you can use the commands shown in the previous example to check their status and the related status of the pool to which they are applied. diff --git a/modules/checking-project-status-using-the-CLI.adoc b/modules/checking-project-status-using-the-CLI.adoc deleted file mode 100644 index 0849609763c7..000000000000 --- a/modules/checking-project-status-using-the-CLI.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// applications/projects/working-with-projects.adoc - -:_content-type: PROCEDURE -[id="checking-project-status-using-the-CLI_{context}"] -= Checking project status using the CLI - -.Procedure - -. Run: -+ -[source,terminal] ----- -$ oc status ----- -+ -This command provides a high-level overview of the current project, with its -components and their relationships. diff --git a/modules/checking-project-status-using-the-web-console.adoc b/modules/checking-project-status-using-the-web-console.adoc deleted file mode 100644 index fb0d6cd6f95c..000000000000 --- a/modules/checking-project-status-using-the-web-console.adoc +++ /dev/null @@ -1,13 +0,0 @@ -// Module included in the following assemblies: -// -// applications/projects/working-with-projects.adoc - -:_content-type: PROCEDURE -[id="checking-project-status-using-the-web-console_{context}"] -= Checking project status using the web console - -.Procedure - -. Navigate to *Home* -> *Projects*. - -. Select a project to see its status. diff --git a/modules/cleaning-crio-storage.adoc b/modules/cleaning-crio-storage.adoc deleted file mode 100644 index 335e35edcfba..000000000000 --- a/modules/cleaning-crio-storage.adoc +++ /dev/null @@ -1,137 +0,0 @@ -:_content-type: PROCEDURE -[id="cleaning-crio-storage_{context}"] - -= Cleaning CRI-O storage - -You can manually clear the CRI-O ephemeral storage if you experience the following issues: - -* A node cannot run on any pods and this error appears: -[source, terminal] -+ ----- -Failed to create pod sandbox: rpc error: code = Unknown desc = failed to mount container XXX: error recreating the missing symlinks: error reading name of symlink for XXX: open /var/lib/containers/storage/overlay/XXX/link: no such file or directory ----- -+ -* You cannot create a new container on a working node and the “can’t stat lower layer” error appears: -[source, terminal] -+ ----- -can't stat lower layer ... because it does not exist. Going through storage to recreate the missing symlinks. ----- -+ -* Your node is in the `NotReady` state after a cluster upgrade or if you attempt to reboot it. - -* The container runtime implementation (`crio`) is not working properly. - -* You are unable to start a debug shell on the node using `oc debug node/` because the container runtime instance (`crio`) is not working. - -Follow this process to completely wipe the CRI-O storage and resolve the errors. - -.Prerequisites: - - * You have access to the cluster as a user with the `cluster-admin` role. - * You have installed the OpenShift CLI (`oc`). - -.Procedure - -. Use `cordon` on the node. This is to avoid any workload getting scheduled if the node gets into the `Ready` status. You will know that scheduling is disabled when `SchedulingDisabled` is in your Status section: -[source, terminal] -+ ----- -$ oc adm cordon ----- -+ -. Drain the node as the cluster-admin user: -[source, terminal] -+ ----- -$ oc adm drain --ignore-daemonsets --delete-emptydir-data ----- -+ -[NOTE] -==== -The `terminationGracePeriodSeconds` attribute of a pod or pod template controls the graceful termination period. This attribute defaults at 30 seconds, but can be customized per application as necessary. If set to more than 90 seconds, the pod might be marked as `SIGKILLed` and fail to terminate successfully. -==== - -. When the node returns, connect back to the node via SSH or Console. Then connect to the root user: -[source, terminal] -+ ----- -$ ssh core@node1.example.com -$ sudo -i ----- -+ -. Manually stop the kubelet: -[source, terminal] -+ ----- -# systemctl stop kubelet ----- -+ -. Stop the containers and pods: - -.. Use the following command to stop the pods that are not in the `HostNetwork`. They must be removed first because their removal relies on the networking plugin pods, which are in the `HostNetwork`. -[source, terminal] -+ ----- -.. for pod in $(crictl pods -q); do if [[ "$(crictl inspectp $pod | jq -r .status.linux.namespaces.options.network)" != "NODE" ]]; then crictl rmp -f $pod; fi; done ----- - -.. Stop all other pods: -[source, terminal] -+ ----- -# crictl rmp -fa ----- -+ -. Manually stop the crio services: -[source, terminal] -+ ----- -# systemctl stop crio ----- -+ -. After you run those commands, you can completely wipe the ephemeral storage: -[source, terminal] -+ ----- -# crio wipe -f ----- -+ -. Start the crio and kubelet service: -[source, terminal] -+ ----- -# systemctl start crio -# systemctl start kubelet ----- -+ -. You will know if the clean up worked if the crio and kubelet services are started, and the node is in the `Ready` status: -[source, terminal] -+ ----- -$ oc get nodes ----- -+ -.Example output -[source, terminal] -+ ----- -NAME STATUS ROLES AGE VERSION -ci-ln-tkbxyft-f76d1-nvwhr-master-1 Ready, SchedulingDisabled master 133m v1.26.0 ----- -+ -. Mark the node schedulable. You will know that the scheduling is enabled when `SchedulingDisabled` is no longer in status: -[source, terminal] -+ ----- -$ oc adm uncordon ----- -+ -.Example output -[source, terminal] -+ ----- -NAME STATUS ROLES AGE VERSION -ci-ln-tkbxyft-f76d1-nvwhr-master-1 Ready master 133m v1.26.0 ----- diff --git a/modules/cli-about-cli.adoc b/modules/cli-about-cli.adoc deleted file mode 100644 index 4f25aaa91f68..000000000000 --- a/modules/cli-about-cli.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/openshift_cli/getting-started.adoc - -:_content-type: CONCEPT -[id="cli-about-cli_{context}"] -= About the OpenShift CLI - -With the OpenShift command-line interface (CLI), the `oc` command, you can create applications and manage {product-title} projects from a terminal. The OpenShift CLI is ideal in the following situations: - -* Working directly with project source code -* Scripting {product-title} operations -ifndef::microshift[] -* Managing projects while restricted by bandwidth resources and the web console is unavailable -endif::microshift[] -ifdef::microshift[] -* Managing projects while restricted by bandwidth resources -endif::microshift[] diff --git a/modules/cli-configuring-completion-zsh.adoc b/modules/cli-configuring-completion-zsh.adoc deleted file mode 100644 index 5fb7754406c6..000000000000 --- a/modules/cli-configuring-completion-zsh.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/openshift_cli/configuring-cli.adoc - -:_content-type: PROCEDURE -[id="cli-enabling-tab-completion-zsh_{context}"] -= Enabling tab completion for Zsh - -After you install the OpenShift CLI (`oc`), you can enable tab completion to automatically complete `oc` commands or suggest options when you press Tab. The following procedure enables tab completion for the Zsh shell. - -.Prerequisites - -* You must have the OpenShift CLI (`oc`) installed. - -.Procedure - -* To add tab completion for `oc` to your `.zshrc` file, run the following command: -+ -[source,terminal] ----- -$ cat >>~/.zshrc< oc_bash_completion ----- - -. Copy the file to `/etc/bash_completion.d/`: -+ -[source,terminal] ----- -$ sudo cp oc_bash_completion /etc/bash_completion.d/ ----- -+ -You can also save the file to a local directory and source it from your `.bashrc` file instead. - -Tab completion is enabled when you open a new terminal. diff --git a/modules/cli-extending-plugins-installing.adoc b/modules/cli-extending-plugins-installing.adoc deleted file mode 100644 index 33e91a5e3764..000000000000 --- a/modules/cli-extending-plugins-installing.adoc +++ /dev/null @@ -1,61 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/openshift_cli/extending-cli-plugins.adoc - -:_content-type: PROCEDURE -[id="cli-installing-plugins_{context}"] -= Installing and using CLI plugins - -After you write a custom plugin for the {product-title} CLI, you must install -it to use the functionality that it provides. - -.Prerequisites - -* You must have the `oc` CLI tool installed. -* You must have a CLI plugin file that begins with `oc-` or `kubectl-`. - -.Procedure - -. If necessary, update the plugin file to be executable. -+ -[source,terminal] ----- -$ chmod +x ----- -. Place the file anywhere in your `PATH`, such as `/usr/local/bin/`. -+ -[source,terminal] ----- -$ sudo mv /usr/local/bin/. ----- -. Run `oc plugin list` to make sure that the plugin is listed. -+ -[source,terminal] ----- -$ oc plugin list ----- -+ -.Example output -[source,terminal] ----- -The following compatible plugins are available: - -/usr/local/bin/ ----- -+ -If your plugin is not listed here, verify that the file begins with `oc-` -or `kubectl-`, is executable, and is on your `PATH`. -. Invoke the new command or option introduced by the plugin. -+ -For example, if you built and installed the `kubectl-ns` plugin from the - link:https://github.com/kubernetes/sample-cli-plugin[Sample plugin repository], - you can use the following command to view the current namespace. -+ -[source,terminal] ----- -$ oc ns ----- -+ -Note that the command to invoke the plugin depends on the plugin file name. -For example, a plugin with the file name of `oc-foo-bar` is invoked by the `oc foo bar` -command. diff --git a/modules/cli-extending-plugins-writing.adoc b/modules/cli-extending-plugins-writing.adoc deleted file mode 100644 index a71a89e167a3..000000000000 --- a/modules/cli-extending-plugins-writing.adoc +++ /dev/null @@ -1,61 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/openshift_cli/extending-cli-plugins.adoc - -:_content-type: PROCEDURE -[id="cli-writing-plugins_{context}"] -= Writing CLI plugins - -You can write a plugin for the {product-title} CLI in any programming language -or script that allows you to write command-line commands. Note that you can not -use a plugin to overwrite an existing `oc` command. - -.Procedure - -This procedure creates a simple Bash plugin that prints a message to the -terminal when the `oc foo` command is issued. - -. Create a file called `oc-foo`. -+ -When naming your plugin file, keep the following in mind: - -* The file must begin with `oc-` or `kubectl-` to be recognized as a -plugin. -* The file name determines the command that invokes the plugin. For example, a -plugin with the file name `oc-foo-bar` can be invoked by a command of -`oc foo bar`. You can also use underscores if you want the command to contain -dashes. For example, a plugin with the file name `oc-foo_bar` can be invoked -by a command of `oc foo-bar`. - -. Add the following contents to the file. -+ -[source,bash] ----- -#!/bin/bash - -# optional argument handling -if [[ "$1" == "version" ]] -then - echo "1.0.0" - exit 0 -fi - -# optional argument handling -if [[ "$1" == "config" ]] -then - echo $KUBECONFIG - exit 0 -fi - -echo "I am a plugin named kubectl-foo" ----- - -After you install this plugin for the {product-title} CLI, it can be invoked -using the `oc foo` command. - -[role="_additional-resources"] -.Additional resources - -* Review the link:https://github.com/kubernetes/sample-cli-plugin[Sample plugin repository] -for an example of a plugin written in Go. -* Review the link:https://github.com/kubernetes/cli-runtime/[CLI runtime repository] for a set of utilities to assist in writing plugins in Go. diff --git a/modules/cli-getting-help.adoc b/modules/cli-getting-help.adoc deleted file mode 100644 index 0bbcbf6ce47d..000000000000 --- a/modules/cli-getting-help.adoc +++ /dev/null @@ -1,86 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/openshift_cli/getting-started.adoc - -[id="cli-getting-help_{context}"] -= Getting help - -You can get help with CLI commands and {product-title} resources in the -following ways. - -* Use `oc help` to get a list and description of all available CLI commands: -+ -.Example: Get general help for the CLI -[source,terminal] ----- -$ oc help ----- -+ -.Example output -[source,terminal] ----- -OpenShift Client - -This client helps you develop, build, deploy, and run your applications on any OpenShift or Kubernetes compatible -platform. It also includes the administrative commands for managing a cluster under the 'adm' subcommand. - -Usage: - oc [flags] - -Basic Commands: - login Log in to a server - new-project Request a new project - new-app Create a new application - -... ----- - -* Use the `--help` flag to get help about a specific CLI command: -+ -.Example: Get help for the `oc create` command -[source,terminal] ----- -$ oc create --help ----- -+ -.Example output -[source,terminal] ----- -Create a resource by filename or stdin - -JSON and YAML formats are accepted. - -Usage: - oc create -f FILENAME [flags] - -... ----- - -* Use the `oc explain` command to view the description and fields for a -particular resource: -+ -.Example: View documentation for the `Pod` resource -[source,terminal] ----- -$ oc explain pods ----- -+ -.Example output -[source,terminal] ----- -KIND: Pod -VERSION: v1 - -DESCRIPTION: - Pod is a collection of containers that can run on a host. This resource is - created by clients and scheduled onto hosts. - -FIELDS: - apiVersion - APIVersion defines the versioned schema of this representation of an - object. Servers should convert recognized schemas to the latest internal - value, and may reject unrecognized values. More info: - https://git.k8s.io/community/contributors/devel/api-conventions.md#resources - -... ----- diff --git a/modules/cli-installing-cli-brew.adoc b/modules/cli-installing-cli-brew.adoc deleted file mode 100644 index 8315eeef2c46..000000000000 --- a/modules/cli-installing-cli-brew.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/openshift_cli/getting-started.adoc -// * microshift_cli_ref/microshift_oc_cli_install.adoc - -:_content-type: PROCEDURE -[id="cli-installing-cli-brew_{context}"] -= Installing the OpenShift CLI by using Homebrew - -For macOS, you can install the OpenShift CLI (`oc`) by using the link:https://brew.sh[Homebrew] package manager. - -.Prerequisites - -* You must have Homebrew (`brew`) installed. - -.Procedure - -* Run the following command to install the link:https://formulae.brew.sh/formula/openshift-cli[openshift-cli] package: -+ -[source,terminal] ----- -$ brew install openshift-cli ----- diff --git a/modules/cli-installing-cli-rpm.adoc b/modules/cli-installing-cli-rpm.adoc deleted file mode 100644 index ecedb5b9bb84..000000000000 --- a/modules/cli-installing-cli-rpm.adoc +++ /dev/null @@ -1,70 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/openshift_cli/getting-started.adoc -// * microshift_cli_ref/microshift_oc_cli_install.adoc - -:_content-type: PROCEDURE -[id="cli-installing-cli-rpm_{context}"] -= Installing the OpenShift CLI by using an RPM - -For {op-system-base-full}, you can install the OpenShift CLI (`oc`) as an RPM if you have an active {product-title} subscription on your Red Hat account. - -.Prerequisites - -* Must have root or sudo privileges. - -.Procedure - -. Register with Red Hat Subscription Manager: -+ -[source,terminal] ----- -# subscription-manager register ----- - -. Pull the latest subscription data: -+ -[source,terminal] ----- -# subscription-manager refresh ----- - -. List the available subscriptions: -+ -[source,terminal] ----- -# subscription-manager list --available --matches '*OpenShift*' ----- - -. In the output for the previous command, find the pool ID for an {product-title} subscription and attach the subscription to the registered system: -+ -[source,terminal] ----- -# subscription-manager attach --pool= ----- - -. Enable the repositories required by {product-title} {product-version}. -+ -[source,terminal] ----- -# subscription-manager repos --enable="rhocp-4.13-for-rhel-8-x86_64-rpms" ----- -+ -[NOTE] -==== -It is not supported to install the OpenShift CLI (`oc`) as an RPM for {op-system-base-full} 9. You must install the OpenShift CLI for {op-system-base} 9 by downloading the binary. -==== - -. Install the `openshift-clients` package: -+ -[source,terminal] ----- -# yum install openshift-clients ----- - -After you install the CLI, it is available using the `oc` command: - -[source,terminal] ----- -$ oc ----- diff --git a/modules/cli-installing-cli-web-console-linux.adoc b/modules/cli-installing-cli-web-console-linux.adoc deleted file mode 100644 index d1490d480359..000000000000 --- a/modules/cli-installing-cli-web-console-linux.adoc +++ /dev/null @@ -1,45 +0,0 @@ -ifeval::["{context}" == "updating-restricted-network-cluster"] -:restricted: -endif::[] - -:_content-type: PROCEDURE -[id="cli-installing-cli-web-console-macos-linux_{context}"] -= Installing the OpenShift CLI on Linux using the web console - -You can install the OpenShift CLI (`oc`) binary on Linux by using the following procedure. - -.Procedure - -. From the web console, click *?*. -+ -image::click-question-mark.png[] -. Click *Command Line Tools*. -+ -image::CLI-list.png[] -. Select appropriate `oc` binary for your Linux platform, and then click *Download oc for Linux*. -. Save the file. -. Unpack the archive. -+ -[source,terminal] ----- -$ tar xvf ----- -. Move the `oc` binary to a directory that is on your `PATH`. -+ -To check your `PATH`, execute the following command: -+ -[source,terminal] ----- -$ echo $PATH ----- - -After you install the OpenShift CLI, it is available using the `oc` command: - -[source,terminal] ----- -$ oc ----- - -ifeval::["{context}" == "updating-restricted-network-cluster"] -:!restricted: -endif::[] diff --git a/modules/cli-installing-cli-web-console-macos.adoc b/modules/cli-installing-cli-web-console-macos.adoc deleted file mode 100644 index 02e13fc47b2d..000000000000 --- a/modules/cli-installing-cli-web-console-macos.adoc +++ /dev/null @@ -1,45 +0,0 @@ -:_content-type: PROCEDURE -[id="cli-installing-cli-web-console-macos_{context}"] -= Installing the OpenShift CLI on macOS using the web console -ifeval::["{context}" == "updating-restricted-network-cluster"] -:restricted: -endif::[] - -You can install the OpenShift CLI (`oc`) binary on macOS by using the following procedure. - -.Procedure - -. From the web console, click *?*. -+ -image::click-question-mark.png[] -. Click *Command Line Tools*. -+ -image::CLI-list.png[] -. Select the `oc` binary for macOS platform, and then click *Download oc for Mac for x86_64*. -+ -[NOTE] -==== -For macOS arm64, click *Download oc for Mac for ARM 64*. -==== - -. Save the file. -. Unpack and unzip the archive. -. Move the `oc` binary to a directory on your PATH. -+ -To check your `PATH`, open a terminal and execute the following command: -+ -[source,terminal] ----- -$ echo $PATH ----- - -After you install the OpenShift CLI, it is available using the `oc` command: - -[source,terminal] ----- -$ oc ----- - -ifeval::["{context}" == "updating-restricted-network-cluster"] -:!restricted: -endif::[] diff --git a/modules/cli-installing-cli-web-console-windows.adoc b/modules/cli-installing-cli-web-console-windows.adoc deleted file mode 100644 index fa18b6e7f06c..000000000000 --- a/modules/cli-installing-cli-web-console-windows.adoc +++ /dev/null @@ -1,40 +0,0 @@ -ifeval::["{context}" == "updating-restricted-network-cluster"] -:restricted: -endif::[] - -:_content-type: PROCEDURE -[id="cli-installing-cli-web-console-macos-windows_{context}"] -= Installing the OpenShift CLI on Windows using the web console - -You can install the OpenShift CLI (`oc`) binary on Winndows by using the following procedure. - -.Procedure - -. From the web console, click *?*. -+ -image::click-question-mark.png[] -. Click *Command Line Tools*. -+ -image::CLI-list.png[] -. Select the `oc` binary for Windows platform, and then click *Download oc for Windows for x86_64*. -. Save the file. -. Unzip the archive with a ZIP program. -. Move the `oc` binary to a directory that is on your `PATH`. -+ -To check your `PATH`, open the command prompt and execute the following command: -+ -[source,terminal] ----- -C:\> path ----- - -After you install the OpenShift CLI, it is available using the `oc` command: - -[source,terminal] ----- -C:\> oc ----- - -ifeval::["{context}" == "updating-restricted-network-cluster"] -:!restricted: -endif::[] diff --git a/modules/cli-installing-cli-web-console.adoc b/modules/cli-installing-cli-web-console.adoc deleted file mode 100644 index c991721ea6eb..000000000000 --- a/modules/cli-installing-cli-web-console.adoc +++ /dev/null @@ -1,22 +0,0 @@ -ifeval::["{context}" == "updating-restricted-network-cluster"] -:restricted: -endif::[] - -[id="cli-installing-cli-web-console_{context}"] -= Installing the OpenShift CLI by using the web console - -You can install the OpenShift CLI (`oc`) to interact with {product-title} from a web console. You can install `oc` on Linux, Windows, or macOS. - -[IMPORTANT] -==== -If you installed an earlier version of `oc`, you cannot use it to complete all -of the commands in {product-title} {product-version}. Download and -install the new version of `oc`. -ifdef::restricted[] -If you are upgrading a cluster in a restricted network, install the `oc` version that you plan to upgrade to. -endif::restricted[] -==== - -ifeval::["{context}" == "updating-restricted-network-cluster"] -:!restricted: -endif::[] diff --git a/modules/cli-installing-cli.adoc b/modules/cli-installing-cli.adoc deleted file mode 100644 index 130cafa81371..000000000000 --- a/modules/cli-installing-cli.adoc +++ /dev/null @@ -1,203 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_alibaba/installing-alibaba-network-customizations.adoc -// * installing/installing_alibaba/installing-alibaba-vpc.adoc -// * cli_reference/openshift_cli/getting-started.adoc -// * installing/installing_aws/installing-aws-user-infra.adoc -// * installing/installing_aws/installing-aws-customizations.adoc -// * installing/installing_aws/installing-aws-default.adoc -// * installing/installing_aws/installing-aws-china.adoc -// * installing/installing_aws/installing-aws-government-region.adoc -// * installing/installing_aws/installing-aws-secret-region.adoc -// * installing/installing_aws/installing-aws-network-customizations.adoc -// * installing/installing_aws/installing-aws-private.adoc -// * installing/installing_aws/installing-aws-vpc.adoc -// * installing/installing_aws/installing-restricted-networks-aws-installer-provisioned.adoc -// * installing/installing_aws/installing-aws-outposts-remote-workers.adocs -// * installing/installing_azure/installing-azure-customizations.adoc -// * installing/installing_azure/installing-azure-default.adoc -// * installing/installing_azure/installing-azure-government-region.adoc -// * installing/installing_azure/installing-azure-private.adoc -// * installing/installing_azure/installing-azure-vnet.adoc -// * installing/installing_azure/installing-azure-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-default.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc -// * installing/installing_bare_metal/installing-bare-metal.adoc -// * installing/installing_gcp/installing-gcp-customizations.adoc -// * installing/installing_gcp/installing-gcp-private.adoc -// * installing/installing_gcp/installing-gcp-default.adoc -// * installing/installing_gcp/installing-gcp-vpc.adoc -// * installing/installing_gcp/installing-gcp-user-infra.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp-installer-provisioned.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-customizations.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-network-customizations.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-vpc.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-private.adoc -// * installing/install_config/installing-restricted-networks-preparations.adoc -// * installing/installing_vsphere/installing-vsphere.adoc -// * installing/installing_vsphere/installing-vsphere-installer-provisioned.adoc -// * installing/installing_vsphere/installing-vsphere-installer-provisioned-customizations.adoc -// * installing/installing_vsphere/installing-vsphere-installer-provisioned-network-customizations.adoc -// * installing/installing_vsphere/installing-restricted-networks-installer-provisioned-vsphere.adoc -// * installing/installing_ibm_z/installing-ibm-z.adoc -// * openshift_images/samples-operator-alt-registry.adoc -// * installing/installing_rhv/installing-rhv-customizations.adoc -// * installing/installing_rhv/installing-rhv-default.adoc -// * updating/updating-restricted-network-cluster/mirroring-image-repository.adoc -// * microshift_cli_ref/microshift-oc-cli-install.adoc -// * updating/updating-restricted-network-cluster.adoc -// * installing/installing-nutanix-installer-provisioned.adoc -// * installing/installing-restricted-networks-nutanix-installer-provisioned.adoc -// * installing/installing_ibm_powervs/installing-ibm-power-vs-private-cluster.adoc -// * installing/installing_ibm_powervs/installing-restricted-networks-ibm-power-vs.adoc -// * installing/installing_ibm_powervs/installing-ibm-powervs-vpc.adoc -// AMQ docs link to this; do not change anchor - -ifeval::["{context}" == "mirroring-ocp-image-repository"] -:restricted: -endif::[] - -:_content-type: PROCEDURE -[id="cli-installing-cli_{context}"] -= Installing the OpenShift CLI by downloading the binary - -You can install the OpenShift CLI (`oc`) to interact with {product-title} from a -command-line interface. You can install `oc` on Linux, Windows, or macOS. - -[IMPORTANT] -==== -If you installed an earlier version of `oc`, you cannot use it to complete all of the commands in {product-title} {product-version}. Download and install the new version of `oc`. -ifdef::restricted[] -If you are updating a cluster in a disconnected environment, install the `oc` version that you plan to update to. -endif::restricted[] -==== - -[discrete] -== Installing the OpenShift CLI on Linux - -You can install the OpenShift CLI (`oc`) binary on Linux by using the following procedure. - -.Procedure - -ifdef::openshift-origin[] -. Navigate to link:https://mirror.openshift.com/pub/openshift-v4/clients/oc/latest/[https://mirror.openshift.com/pub/openshift-v4/clients/oc/latest/] and choose the folder for your operating system and architecture. -. Download `oc.tar.gz`. -endif::[] -ifndef::openshift-origin,microshift[] -. Navigate to the link:https://access.redhat.com/downloads/content/290[{product-title} downloads page] on the Red Hat Customer Portal. -. Select the architecture from the *Product Variant* drop-down list. -. Select the appropriate version from the *Version* drop-down list. -. Click *Download Now* next to the *OpenShift v{product-version} Linux Client* entry and save the file. -endif::[] -ifdef::microshift[] -. Navigate to the link:https://access.redhat.com/downloads/content/290[{ocp} downloads page] on the Red Hat Customer Portal. -. Select the architecture from the *Product Variant* drop-down list. -. Select the appropriate version from the *Version* drop-down list. -. Click *Download Now* next to the *OpenShift v{product-version} Linux Client* entry and save the file. -endif::[] -. Unpack the archive: -+ -[source,terminal] ----- -$ tar xvf ----- -. Place the `oc` binary in a directory that is on your `PATH`. -+ -To check your `PATH`, execute the following command: -+ -[source,terminal] ----- -$ echo $PATH ----- - -After you install the OpenShift CLI, it is available using the `oc` command: - -[source,terminal] ----- -$ oc ----- - -[discrete] -== Installing the OpenShift CLI on Windows - -You can install the OpenShift CLI (`oc`) binary on Windows by using the following procedure. - -.Procedure - -ifdef::openshift-origin[] -. Navigate to link:https://mirror.openshift.com/pub/openshift-v4/clients/oc/latest/[https://mirror.openshift.com/pub/openshift-v4/clients/oc/latest/] and choose the folder for your operating system and architecture. -. Download `oc.zip`. -endif::[] -ifndef::openshift-origin,microshift[] -. Navigate to the link:https://access.redhat.com/downloads/content/290[{product-title} downloads page] on the Red Hat Customer Portal. -. Select the appropriate version from the *Version* drop-down list. -. Click *Download Now* next to the *OpenShift v{product-version} Windows Client* entry and save the file. -endif::[] -ifdef::microshift[] -. Navigate to the link:https://access.redhat.com/downloads/content/290[{ocp} downloads page] on the Red Hat Customer Portal. -. Select the appropriate version from the *Version* drop-down list. -. Click *Download Now* next to the *OpenShift v{product-version} Windows Client* entry and save the file. -endif::[] -. Unzip the archive with a ZIP program. -. Move the `oc` binary to a directory that is on your `PATH`. -+ -To check your `PATH`, open the command prompt and execute the following command: -+ -[source,terminal] ----- -C:\> path ----- - -After you install the OpenShift CLI, it is available using the `oc` command: - -[source,terminal] ----- -C:\> oc ----- - -[discrete] -== Installing the OpenShift CLI on macOS - -You can install the OpenShift CLI (`oc`) binary on macOS by using the following procedure. - -.Procedure - -ifdef::openshift-origin[] -. Navigate to link:https://mirror.openshift.com/pub/openshift-v4/clients/oc/latest/[https://mirror.openshift.com/pub/openshift-v4/clients/oc/latest/] and choose the folder for your operating system and architecture. -. Download `oc.tar.gz`. -endif::[] -ifndef::openshift-origin,microshift[] -. Navigate to the link:https://access.redhat.com/downloads/content/290[{product-title} downloads page] on the Red Hat Customer Portal. -. Select the appropriate version from the *Version* drop-down list. -. Click *Download Now* next to the *OpenShift v{product-version} macOS Client* entry and save the file. -+ -[NOTE] -==== -For macOS arm64, choose the *OpenShift v{product-version} macOS arm64 Client* entry. -==== -endif::[] -ifdef::microshift[] -. Navigate to the link:https://access.redhat.com/downloads/content/290[{ocp} downloads page] on the Red Hat Customer Portal. -. Select the appropriate version from the *Version* drop-down list. -. Click *Download Now* next to the *OpenShift v{product-version} macOS Client* entry and save the file. -endif::[] -. Unpack and unzip the archive. -. Move the `oc` binary to a directory on your PATH. -+ -To check your `PATH`, open a terminal and execute the following command: -+ -[source,terminal] ----- -$ echo $PATH ----- - -After you install the OpenShift CLI, it is available using the `oc` command: - -[source,terminal] ----- -$ oc ----- - -ifeval::["{context}" == "mirroring-ocp-image-repository"] -:!restricted: -endif::[] diff --git a/modules/cli-krew-install-plugin.adoc b/modules/cli-krew-install-plugin.adoc deleted file mode 100644 index e35186d4b426..000000000000 --- a/modules/cli-krew-install-plugin.adoc +++ /dev/null @@ -1,43 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/openshift_cli/installing-cli-plugins-krew.adoc - -:_content-type: PROCEDURE -[id="cli-krew-install-plugin_{context}"] -= Installing a CLI plugin with Krew - -You can install a plugin for the OpenShift CLI (`oc`) with Krew. - -.Prerequisites - -* You have installed Krew by following the link:https://krew.sigs.k8s.io/docs/user-guide/setup/install/[installation procedure] in the Krew documentation. - -.Procedure - -. To list all available plugins, run the following command: -+ -[source,terminal] ----- -$ oc krew search ----- - -. To get information about a plugin, run the following command: -+ -[source,terminal] ----- -$ oc krew info ----- - -. To install a plugin, run the following command: -+ -[source,terminal] ----- -$ oc krew install ----- - -. To list all plugins that were installed by Krew, run the following command: -+ -[source,terminal] ----- -$ oc krew list ----- diff --git a/modules/cli-krew-remove-plugin.adoc b/modules/cli-krew-remove-plugin.adoc deleted file mode 100644 index 5c43e2a6b427..000000000000 --- a/modules/cli-krew-remove-plugin.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/openshift_cli/installing-cli-plugins-krew.adoc - -:_content-type: PROCEDURE -[id="cli-krew-remove-plugin_{context}"] -= Uninstalling a CLI plugin with Krew - -You can uninstall a plugin that was installed for the OpenShift CLI (`oc`) with Krew. - -.Prerequisites - -* You have installed Krew by following the link:https://krew.sigs.k8s.io/docs/user-guide/setup/install/[installation procedure] in the Krew documentation. -* You have installed a plugin for the OpenShift CLI with Krew. - -.Procedure - -* To uninstall a plugin, run the following command: -+ -[source,terminal] ----- -$ oc krew uninstall ----- diff --git a/modules/cli-krew-update-plugin.adoc b/modules/cli-krew-update-plugin.adoc deleted file mode 100644 index ddaf47993cf1..000000000000 --- a/modules/cli-krew-update-plugin.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/openshift_cli/installing-cli-plugins-krew.adoc - -:_content-type: PROCEDURE -[id="cli-krew-update-plugin_{context}"] -= Updating a CLI plugin with Krew - -You can update a plugin that was installed for the OpenShift CLI (`oc`) with Krew. - -.Prerequisites - -* You have installed Krew by following the link:https://krew.sigs.k8s.io/docs/user-guide/setup/install/[installation procedure] in the Krew documentation. -* You have installed a plugin for the OpenShift CLI with Krew. - -.Procedure - -* To update a single plugin, run the following command: -+ -[source,terminal] ----- -$ oc krew upgrade ----- - -* To update all plugins that were installed by Krew, run the following command: -+ -[source,terminal] ----- -$ oc krew upgrade ----- diff --git a/modules/cli-logging-in-kubeadmin.adoc b/modules/cli-logging-in-kubeadmin.adoc deleted file mode 100644 index a2965982f9e0..000000000000 --- a/modules/cli-logging-in-kubeadmin.adoc +++ /dev/null @@ -1,92 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_alibaba/installing-alibaba-network-customizations.adoc -// * installing/installing_alibaba/installing-alibaba-vpc.adoc -// * installing/installing_aws/installing-aws-user-infra.adoc -// * installing/installing_aws/installing-aws-customizations.adoc -// * installing/installing_aws/installing-aws-default.adoc -// * installing/installing_aws/installing-aws-china.adoc -// * installing/installing_aws/installing-aws-government-region.adoc -// * installing/installing_aws/installing-aws-secret-region.adoc -// * installing/installing_aws/installing-aws-network-customizations.adoc -// * installing/installing_aws/installing-aws-private.adoc -// * installing/installing_aws/installing-aws-vpc.adoc -// * installing/installing_aws/installing-restricted-networks-aws-installer-provisioned.adoc -// * installing/installing_aws/installing-aws-outposts-remote-workers.adoc -// * installing/installing_azure/installing-azure-customizations.adoc -// * installing/installing_azure/installing-azure-default.adoc -// * installing/installing_azure/installing-azure-government-region.adoc -// * installing/installing_azure/installing-azure-private.adoc -// * installing/installing_azure/installing-azure-vnet.adoc -// * installing/installing_azure/installing-azure-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-default.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc -// * installing/installing_bare_metal/installing-bare-metal.adoc -// * installing/installing_gcp/installing-gcp-customizations.adoc -// * installing/installing_gcp/installing-gcp-private.adoc -// * installing/installing_gcp/installing-gcp-default.adoc -// * installing/installing_gcp/installing-gcp-vpc.adoc -// * installing/installing_gcp/installing-gcp-user-infra.adoc -// * installing/installing_gcp_user_infra/installing-gcp-user-infra.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp-installer-provisioned.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-customizations.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-network-customizations.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-vpc.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-private.adoc -// * installing/installing_ibm_powervs/installing-ibm-power-vs-private-cluster.adoc -// * installing/installing_ibm_powervs/installing-restricted-networks-ibm-power-vs.adoc -// * installing/installing_ibm_powervs/installing-ibm-powervs-vpc.adoc -// * installing/installing_openstack/installing-openstack-installer-custom.adoc -// * installing/installing_openstack/installing-openstack-installer-kuryr.adoc -// * installing/installing_openstack/installing-openstack-installer.adoc -// * installing/installing_aws/installing-restricted-networks-aws.adoc -// * installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc -// * installing/installing_vsphere/installing-restricted-networks-vsphere.adoc -// * installing/installing_vsphere/installing-vsphere-network-customizations.adoc -// * installing/installing_vsphere/installing-vsphere.adoc -// * installing/installing_vsphere/installing-vsphere-installer-provisioned.adoc -// * installing/installing_vsphere/installing-vsphere-installer-provisioned-customizations.adoc -// * installing/installing_vsphere/installing-vsphere-installer-provisioned-network-customizations.adoc -// * installing/installing_vsphere/installing-restricted-networks-installer-provisioned-vsphere.adoc -// * installing/installing_ibm_z/installing-ibm-z.adoc -// * installing/installing_rhv/installing-rhv-customizations.adoc -// * installing/installing_rhv/installing-rhv-default.adoc - - -:_content-type: PROCEDURE -[id="cli-logging-in-kubeadmin_{context}"] -= Logging in to the cluster by using the CLI - -You can log in to your cluster as a default system user by exporting the cluster `kubeconfig` file. -The `kubeconfig` file contains information about the cluster that is used by the CLI to connect a client to the correct cluster and API server. -The file is specific to a cluster and is created during {product-title} installation. - -.Prerequisites - -* You deployed an {product-title} cluster. -* You installed the `oc` CLI. - -.Procedure - -. Export the `kubeadmin` credentials: -+ -[source,terminal] ----- -$ export KUBECONFIG=/auth/kubeconfig <1> ----- -<1> For ``, specify the path to the directory that you stored -the installation files in. - -. Verify you can run `oc` commands successfully using the exported configuration: -+ -[source,terminal] ----- -$ oc whoami ----- -+ -.Example output -[source,terminal] ----- -system:admin ----- diff --git a/modules/cli-logging-in.adoc b/modules/cli-logging-in.adoc deleted file mode 100644 index fa4bf9ab6a20..000000000000 --- a/modules/cli-logging-in.adoc +++ /dev/null @@ -1,63 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/openshift_cli/getting-started.adoc - -:_content-type: PROCEDURE -[id="cli-logging-in_{context}"] -= Logging in to the OpenShift CLI - -You can log in to the OpenShift CLI (`oc`) to access and manage your cluster. - -.Prerequisites - -* You must have access to an {product-title} cluster. -* You must have installed the OpenShift CLI (`oc`). - -[NOTE] -==== -To access a cluster that is accessible only over an HTTP proxy server, you can set the `HTTP_PROXY`, `HTTPS_PROXY` and `NO_PROXY` variables. -These environment variables are respected by the `oc` CLI so that all communication with the cluster goes through the HTTP proxy. - -Authentication headers are sent only when using HTTPS transport. -==== - -.Procedure - -. Enter the `oc login` command and pass in a user name: -+ -[source,terminal] ----- -$ oc login -u user1 ----- - -. When prompted, enter the required information: -+ -.Example output -[source,terminal] ----- -Server [https://localhost:8443]: https://openshift.example.com:6443 <1> -The server uses a certificate signed by an unknown authority. -You can bypass the certificate check, but any data you send to the server could be intercepted by others. -Use insecure connections? (y/n): y <2> - -Authentication required for https://openshift.example.com:6443 (openshift) -Username: user1 -Password: <3> -Login successful. - -You don't have any projects. You can try to create a new project, by running - - oc new-project - -Welcome! See 'oc help' to get started. ----- -<1> Enter the {product-title} server URL. -<2> Enter whether to use insecure connections. -<3> Enter the user's password. - -[NOTE] -==== -If you are logged in to the web console, you can generate an `oc login` command that includes your token and server information. You can use the command to log in to the {product-title} CLI without the interactive prompts. To generate the command, select *Copy login command* from the username drop-down menu at the top right of the web console. -==== - -You can now create a project or issue other commands for managing your cluster. diff --git a/modules/cli-logging-out.adoc b/modules/cli-logging-out.adoc deleted file mode 100644 index 57be58cea225..000000000000 --- a/modules/cli-logging-out.adoc +++ /dev/null @@ -1,24 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/openshift_cli/getting-started.adoc - -[id="cli-logging-out_{context}"] -= Logging out of the OpenShift CLI - -You can log out the OpenShift CLI to end your current session. - -* Use the `oc logout` command. -+ -[source,terminal] ----- -$ oc logout ----- -+ -.Example output -[source,terminal] ----- -Logged "user1" out on "https://openshift.example.com" ----- - -This deletes the saved authentication token from the server and removes it from -your configuration file. diff --git a/modules/cli-using-cli.adoc b/modules/cli-using-cli.adoc deleted file mode 100644 index db6446f6a3f0..000000000000 --- a/modules/cli-using-cli.adoc +++ /dev/null @@ -1,145 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/openshift_cli/getting-started.adoc - -[id="cli-using-cli_{context}"] -= Using the OpenShift CLI - -Review the following sections to learn how to complete common tasks using the CLI. - -ifndef::microshift[] -== Creating a project - -Use the `oc new-project` command to create a new project. - -[source,terminal] ----- -$ oc new-project my-project ----- - -.Example output -[source,terminal] ----- -Now using project "my-project" on server "https://openshift.example.com:6443". ----- -endif::microshift[] - -ifndef::microshift[] -== Creating a new app - -Use the `oc new-app` command to create a new application. - -[source,terminal] ----- -$ oc new-app https://github.com/sclorg/cakephp-ex ----- - -.Example output -[source,terminal] ----- ---> Found image 40de956 (9 days old) in imagestream "openshift/php" under tag "7.2" for "php" - -... - - Run 'oc status' to view your app. ----- -endif::microshift[] - -== Viewing pods - -Use the `oc get pods` command to view the pods for the current project. - -[NOTE] -==== -When you run `oc` inside a pod and do not specify a namespace, the namespace of the pod is used by default. -==== - -[source,terminal] ----- -$ oc get pods -o wide ----- - -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE -cakephp-ex-1-build 0/1 Completed 0 5m45s 10.131.0.10 ip-10-0-141-74.ec2.internal -cakephp-ex-1-deploy 0/1 Completed 0 3m44s 10.129.2.9 ip-10-0-147-65.ec2.internal -cakephp-ex-1-ktz97 1/1 Running 0 3m33s 10.128.2.11 ip-10-0-168-105.ec2.internal ----- - -== Viewing pod logs - -Use the `oc logs` command to view logs for a particular pod. - -[source,terminal] ----- -$ oc logs cakephp-ex-1-deploy ----- - -.Example output -[source,terminal] ----- ---> Scaling cakephp-ex-1 to 1 ---> Success ----- - -ifndef::microshift[] -== Viewing the current project - -Use the `oc project` command to view the current project. - -[source,terminal] ----- -$ oc project ----- - -.Example output -[source,terminal] ----- -Using project "my-project" on server "https://openshift.example.com:6443". ----- - -== Viewing the status for the current project - -Use the `oc status` command to view information about the current project, such -as services, deployments, and build configs. - -[source,terminal] ----- -$ oc status ----- - -.Example output -[source,terminal] ----- -In project my-project on server https://openshift.example.com:6443 - -svc/cakephp-ex - 172.30.236.80 ports 8080, 8443 - dc/cakephp-ex deploys istag/cakephp-ex:latest <- - bc/cakephp-ex source builds https://github.com/sclorg/cakephp-ex on openshift/php:7.2 - deployment #1 deployed 2 minutes ago - 1 pod - -3 infos identified, use 'oc status --suggest' to see details. ----- -endif::microshift[] - -== Listing supported API resources - -Use the `oc api-resources` command to view the list of supported API resources -on the server. - -[source,terminal] ----- -$ oc api-resources ----- - -.Example output -[source,terminal] ----- -NAME SHORTNAMES APIGROUP NAMESPACED KIND -bindings true Binding -componentstatuses cs false ComponentStatus -configmaps cm true ConfigMap -... ----- diff --git a/modules/cloud-credential-operator.adoc b/modules/cloud-credential-operator.adoc deleted file mode 100644 index d3daf7ae2224..000000000000 --- a/modules/cloud-credential-operator.adoc +++ /dev/null @@ -1,31 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator-reference.adoc - -[id="cloud-credential-operator_{context}"] -= Cloud Credential Operator - -[discrete] -== Purpose - -The Cloud Credential Operator (CCO) manages cloud provider credentials as Kubernetes custom resource definitions (CRDs). The CCO syncs on `CredentialsRequest` custom resources (CRs) to allow {product-title} components to request cloud provider credentials with the specific permissions that are required for the cluster to run. - -By setting different values for the `credentialsMode` parameter in the `install-config.yaml` file, the CCO can be configured to operate in several different modes. If no mode is specified, or the `credentialsMode` parameter is set to an empty string (`""`), the CCO operates in its default mode. - -[discrete] -== Project - -link:https://github.com/openshift/cloud-credential-operator[openshift-cloud-credential-operator] - -[discrete] -== CRDs - -* `credentialsrequests.cloudcredential.openshift.io` -** Scope: Namespaced -** CR: `CredentialsRequest` -** Validation: Yes - -[discrete] -== Configuration objects - -No configuration required. diff --git a/modules/cluster-api-architecture.adoc b/modules/cluster-api-architecture.adoc deleted file mode 100644 index 9a952b3e4dac..000000000000 --- a/modules/cluster-api-architecture.adoc +++ /dev/null @@ -1,44 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/capi-machine-management.adoc - -:_content-type: CONCEPT -[id="cluster-api-architecture_{context}"] -= Cluster API architecture - -The {product-title} integration of the upstream Cluster API is implemented and managed by the Cluster CAPI Operator. The Cluster CAPI Operator and its operands are provisioned in the `openshift-cluster-api` namespace, in contrast to the Machine API, which uses the `openshift-machine-api` namespace. - -[id="capi-arch-operator"] -== The Cluster CAPI Operator - -The Cluster CAPI Operator is an {product-title} Operator that maintains the lifecycle of Cluster API resources. This Operator is responsible for all administrative tasks related to deploying the Cluster API project within an {product-title} cluster. - -If a cluster is configured correctly to allow the use of the Cluster API, the Cluster CAPI Operator installs the Cluster API Operator on the cluster. - -[NOTE] -==== -The Cluster CAPI Operator is distinct from the upstream Cluster API Operator. -==== - -For more information, see the entry for the Cluster CAPI Operator in the _Cluster Operators reference_ content. - -[id="capi-arch-resources"] -== Primary resources - -The Cluster API is comprised of the following primary resources. For the Technology Preview of this feature, you must create these resources manually in the `openshift-cluster-api` namespace. - -Cluster:: A fundamental unit that represents a cluster that is managed by the Cluster API. - -Infrastructure:: A provider-specific resource that defines properties that are shared by all the compute machine sets in the cluster, such as the region and subnets. - -Machine template:: A provider-specific template that defines the properties of the machines that a compute machine set creates. - -Machine set:: A group of machines. -+ -Compute machine sets are to machines as replica sets are to pods. If you need more machines or must scale them down, you change the `replicas` field on the compute machine set to meet your compute needs. -+ -With the Cluster API, a compute machine set references a `Cluster` object and a provider-specific machine template. - -Machine:: A fundamental unit that describes the host for a node. -+ -The Cluster API creates machines based on the configuration in the machine template. \ No newline at end of file diff --git a/modules/cluster-authentication-operator.adoc b/modules/cluster-authentication-operator.adoc deleted file mode 100644 index 0056de181e25..000000000000 --- a/modules/cluster-authentication-operator.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator-reference.adoc - -[id="cluster-authentication-operator_{context}"] -= Cluster Authentication Operator - -[discrete] -== Purpose - -The Cluster Authentication Operator installs and maintains the `Authentication` custom resource in a cluster and can be viewed with: - -[source,terminal] ----- -$ oc get clusteroperator authentication -o yaml ----- - -[discrete] -== Project - -link:https://github.com/openshift/cluster-authentication-operator[cluster-authentication-operator] diff --git a/modules/cluster-autoscaler-about.adoc b/modules/cluster-autoscaler-about.adoc deleted file mode 100644 index a73d93c8a571..000000000000 --- a/modules/cluster-autoscaler-about.adoc +++ /dev/null @@ -1,66 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-about-autoscaling-nodes.adoc -// * post_installation_configuration/cluster-tasks.adoc -// * machine_management/applying-autoscaling.adoc -// * osd_cluster_admin/osd_nodes/osd-nodes-about-autoscaling-nodes.adoc - -:_content-type: CONCEPT -[id="cluster-autoscaler-about_{context}"] -= About the cluster autoscaler - -The cluster autoscaler adjusts the size of an {product-title} cluster to meet its current deployment needs. It uses declarative, Kubernetes-style arguments to provide infrastructure management that does not rely on objects of a specific cloud provider. The cluster autoscaler has a cluster scope, and is not associated with a particular namespace. - -The cluster autoscaler increases the size of the cluster when there are pods that fail to schedule on any of the current worker nodes due to insufficient resources or when another node is necessary to meet deployment needs. The cluster autoscaler does not increase the cluster resources beyond the limits that you specify. - -The cluster autoscaler computes the total -ifndef::openshift-dedicated,openshift-rosa[] -memory, CPU, and GPU -endif::[] -ifdef::openshift-dedicated,openshift-rosa[] -memory and CPU -endif::[] -on all nodes the cluster, even though it does not manage the control plane nodes. These values are not single-machine oriented. They are an aggregation of all the resources in the entire cluster. For example, if you set the maximum memory resource limit, the cluster autoscaler includes all the nodes in the cluster when calculating the current memory usage. That calculation is then used to determine if the cluster autoscaler has the capacity to add more worker resources. - -[IMPORTANT] -==== -Ensure that the `maxNodesTotal` value in the `ClusterAutoscaler` resource definition that you create is large enough to account for the total possible number of machines in your cluster. This value must encompass the number of control plane machines and the possible number of compute machines that you might scale to. -==== - -Every 10 seconds, the cluster autoscaler checks which nodes are unnecessary in the cluster and removes them. The cluster autoscaler considers a node for removal if the following conditions apply: - -* The node utilization is less than the _node utilization level_ threshold for the cluster. The node utilization level is the sum of the requested resources divided by the allocated resources for the node. If you do not specify a value in the `ClusterAutoscaler` custom resource, the cluster autoscaler uses a default value of `0.5`, which corresponds to 50% utilization. -* The cluster autoscaler can move all pods running on the node to the other nodes. The Kubernetes scheduler is responsible for scheduling pods on the nodes. -* The cluster autoscaler does not have scale down disabled annotation. - -If the following types of pods are present on a node, the cluster autoscaler will not remove the node: - -* Pods with restrictive pod disruption budgets (PDBs). -* Kube-system pods that do not run on the node by default. -* Kube-system pods that do not have a PDB or have a PDB that is too restrictive. -* Pods that are not backed by a controller object such as a deployment, replica set, or stateful set. -* Pods with local storage. -* Pods that cannot be moved elsewhere because of a lack of resources, incompatible node selectors or affinity, matching anti-affinity, and so on. -* Unless they also have a `"cluster-autoscaler.kubernetes.io/safe-to-evict": "true"` annotation, pods that have a `"cluster-autoscaler.kubernetes.io/safe-to-evict": "false"` annotation. - -For example, you set the maximum CPU limit to 64 cores and configure the cluster autoscaler to only create machines that have 8 cores each. If your cluster starts with 30 cores, the cluster autoscaler can add up to 4 more nodes with 32 cores, for a total of 62. - -If you configure the cluster autoscaler, additional usage restrictions apply: - -* Do not modify the nodes that are in autoscaled node groups directly. All nodes within the same node group have the same capacity and labels and run the same system pods. -* Specify requests for your pods. -* If you have to prevent pods from being deleted too quickly, configure appropriate PDBs. -* Confirm that your cloud provider quota is large enough to support the maximum node pools that you configure. -* Do not run additional node group autoscalers, especially the ones offered by your cloud provider. - -The horizontal pod autoscaler (HPA) and the cluster autoscaler modify cluster resources in different ways. The HPA changes the deployment's or replica set's number of replicas based on the current CPU load. If the load increases, the HPA creates new replicas, regardless of the amount of resources available to the cluster. If there are not enough resources, the cluster autoscaler adds resources so that the HPA-created pods can run. If the load decreases, the HPA stops some replicas. If this action causes some nodes to be underutilized or completely empty, the cluster autoscaler deletes the unnecessary nodes. - -The cluster autoscaler takes pod priorities into account. The Pod Priority and Preemption feature enables scheduling pods based on priorities if the cluster does not have enough resources, but the cluster autoscaler ensures that the cluster has resources to run all pods. To honor the intention of both features, the cluster autoscaler includes a priority cutoff function. You can use this cutoff to schedule "best-effort" pods, which do not cause the cluster autoscaler to increase resources but instead run only when spare resources are available. - -Pods with priority lower than the cutoff value do not cause the cluster to scale up or prevent the cluster from scaling down. No new nodes are added to run the pods, and nodes running these pods might be deleted to free resources. - -Cluster autoscaling is supported for the platforms that have machine API available on it. - -//// -Default priority cutoff is 0. It can be changed using `--expendable-pods-priority-cutoff` flag, but we discourage it. cluster autoscaler also doesn't trigger scale-up if an unschedulable Pod is already waiting for a lower priority Pod preemption. -//// diff --git a/modules/cluster-autoscaler-cr.adoc b/modules/cluster-autoscaler-cr.adoc deleted file mode 100644 index 88e368c87f91..000000000000 --- a/modules/cluster-autoscaler-cr.adoc +++ /dev/null @@ -1,76 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/applying-autoscaling.adoc -// * post_installation_configuration/cluster-tasks.adoc - -:_content-type: REFERENCE -[id="cluster-autoscaler-cr_{context}"] -= ClusterAutoscaler resource definition - -This `ClusterAutoscaler` resource definition shows the parameters and sample values for the cluster autoscaler. - - -[source,yaml] ----- -apiVersion: "autoscaling.openshift.io/v1" -kind: "ClusterAutoscaler" -metadata: - name: "default" -spec: - podPriorityThreshold: -10 <1> - resourceLimits: - maxNodesTotal: 24 <2> - cores: - min: 8 <3> - max: 128 <4> - memory: - min: 4 <5> - max: 256 <6> - gpus: - - type: nvidia.com/gpu <7> - min: 0 <8> - max: 16 <9> - - type: amd.com/gpu - min: 0 - max: 4 - logVerbosity: 4 <10> - scaleDown: <11> - enabled: true <12> - delayAfterAdd: 10m <13> - delayAfterDelete: 5m <14> - delayAfterFailure: 30s <15> - unneededTime: 5m <16> - utilizationThreshold: "0.4" <17> ----- -<1> Specify the priority that a pod must exceed to cause the cluster autoscaler to deploy additional nodes. Enter a 32-bit integer value. The `podPriorityThreshold` value is compared to the value of the `PriorityClass` that you assign to each pod. -<2> Specify the maximum number of nodes to deploy. This value is the total number of machines that are deployed in your cluster, not just the ones that the autoscaler controls. Ensure that this value is large enough to account for all of your control plane and compute machines and the total number of replicas that you specify in your `MachineAutoscaler` resources. -<3> Specify the minimum number of cores to deploy in the cluster. -<4> Specify the maximum number of cores to deploy in the cluster. -<5> Specify the minimum amount of memory, in GiB, in the cluster. -<6> Specify the maximum amount of memory, in GiB, in the cluster. -<7> Optional: Specify the type of GPU node to deploy. Only `nvidia.com/gpu` and `amd.com/gpu` are valid types. -<8> Specify the minimum number of GPUs to deploy in the cluster. -<9> Specify the maximum number of GPUs to deploy in the cluster. -<10> Specify the logging verbosity level between `0` and `10`. The following log level thresholds are provided for guidance: -+ --- -* `1`: (Default) Basic information about changes. -* `4`: Debug-level verbosity for troubleshooting typical issues. -* `9`: Extensive, protocol-level debugging information. --- -+ -If you do not specify a value, the default value of `1` is used. -<11> In this section, you can specify the period to wait for each action by using any valid link:https://golang.org/pkg/time/#ParseDuration[ParseDuration] interval, including `ns`, `us`, `ms`, `s`, `m`, and `h`. -<12> Specify whether the cluster autoscaler can remove unnecessary nodes. -<13> Optional: Specify the period to wait before deleting a node after a node has recently been _added_. If you do not specify a value, the default value of `10m` is used. -<14> Optional: Specify the period to wait before deleting a node after a node has recently been _deleted_. If you do not specify a value, the default value of `0s` is used. -<15> Optional: Specify the period to wait before deleting a node after a scale down failure occurred. If you do not specify a value, the default value of `3m` is used. -<16> Optional: Specify the period before an unnecessary node is eligible for deletion. If you do not specify a value, the default value of `10m` is used.<17> Optional: Specify the _node utilization level_ below which an unnecessary node is eligible for deletion. The node utilization level is the sum of the requested resources divided by the allocated resources for the node, and must be a value greater than `"0"` but less than `"1"`. If you do not specify a value, the cluster autoscaler uses a default value of `"0.5"`, which corresponds to 50% utilization. This value must be expressed as a string. -// Might be able to add a formula to show this visually, but need to look into asciidoc math formatting and what our tooling supports. - -[NOTE] -==== -When performing a scaling operation, the cluster autoscaler remains within the ranges set in the `ClusterAutoscaler` resource definition, such as the minimum and maximum number of cores to deploy or the amount of memory in the cluster. However, the cluster autoscaler does not correct the current values in your cluster to be within those ranges. - -The minimum and maximum CPUs, memory, and GPU values are determined by calculating those resources on all nodes in the cluster, even if the cluster autoscaler does not manage the nodes. For example, the control plane nodes are considered in the total memory in the cluster, even though the cluster autoscaler does not manage the control plane nodes. -==== diff --git a/modules/cluster-autoscaler-operator.adoc b/modules/cluster-autoscaler-operator.adoc deleted file mode 100644 index c37e338537ff..000000000000 --- a/modules/cluster-autoscaler-operator.adoc +++ /dev/null @@ -1,22 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator-reference.adoc - -[id="cluster-autoscaler-operator_{context}"] -= Cluster Autoscaler Operator - -[discrete] -== Purpose - -The Cluster Autoscaler Operator manages deployments of the OpenShift Cluster Autoscaler using the `cluster-api` provider. - -[discrete] -== Project - -link:https://github.com/openshift/cluster-autoscaler-operator[cluster-autoscaler-operator] - -[discrete] -== CRDs - -* `ClusterAutoscaler`: This is a singleton resource, which controls the configuration autoscaler instance for the cluster. The Operator only responds to the `ClusterAutoscaler` resource named `default` in the managed namespace, the value of the `WATCH_NAMESPACE` environment variable. -* `MachineAutoscaler`: This resource targets a node group and manages the annotations to enable and configure autoscaling for that group, the `min` and `max` size. Currently only `MachineSet` objects can be targeted. diff --git a/modules/cluster-bare-metal-operator.adoc b/modules/cluster-bare-metal-operator.adoc deleted file mode 100644 index cb5882fdfac2..000000000000 --- a/modules/cluster-bare-metal-operator.adoc +++ /dev/null @@ -1,65 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator-reference.adoc -// * installing/cluster-capabilities.adoc - -ifeval::["{context}" == "cluster-operators-ref"] -:operator-ref: -endif::[] - -ifeval::["{context}" == "cluster-capabilities"] -:cluster-caps: -endif::[] - -:_content-type: REFERENCE -[id="cluster-bare-metal-operator_{context}"] -ifdef::operator-ref[= Cluster Baremetal Operator] -ifdef::cluster-caps[= Bare-metal capability] - -ifdef::operator-ref[] - -[NOTE] -==== -The Cluster Baremetal Operator is an optional cluster capability that can be disabled by cluster administrators during installation. For more information about optional cluster capabilities, see "Cluster capabilities" in _Installing_. -==== - -endif::operator-ref[] - -[discrete] -== Purpose - -ifdef::cluster-caps[] - -The Cluster Baremetal Operator provides the features for the `baremetal` capability. - -endif::cluster-caps[] - -The Cluster Baremetal Operator (CBO) deploys all the components necessary to take a bare-metal server to a fully functioning worker node ready to run {product-title} compute nodes. The CBO ensures that the metal3 deployment, which consists of the Bare Metal Operator (BMO) and Ironic containers, runs on one of the control plane nodes within the {product-title} cluster. The CBO also listens for {product-title} updates to resources that it watches and takes appropriate action. - -ifdef::cluster-caps[] -The bare-metal capability is required for deployments using installer-provisioned infrastructure. Disabling the bare-metal capability can result in unexpected problems with these deployments. - -It is recommended that cluster administrators only disable the bare-metal capability during installations with user-provisioned infrastructure that do not have any `BareMetalHost` resources in the cluster. - -[IMPORTANT] -==== -If the bare-metal capability is disabled, the cluster cannot provision or manage bare-metal nodes. Only disable the capability if there are no `BareMetalHost` resources in your deployment. -==== -endif::cluster-caps[] - -ifdef::operator-ref[] - -[discrete] -== Project - -link:https://github.com/openshift/cluster-baremetal-operator[cluster-baremetal-operator] - -endif::operator-ref[] - -ifeval::["{context}" == "cluster-operators-ref"] -:!operator-ref: -endif::[] - -ifeval::["{context}" == "cluster-caps"] -:!cluster-caps: -endif::[] diff --git a/modules/cluster-capi-operator.adoc b/modules/cluster-capi-operator.adoc deleted file mode 100644 index 9a3e1caeb48a..000000000000 --- a/modules/cluster-capi-operator.adoc +++ /dev/null @@ -1,44 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator-reference.adoc - -[id="cluster-capi-operator_{context}"] -= Cluster CAPI Operator - -[NOTE] -==== -This Operator is available as a link:https://access.redhat.com/support/offerings/techpreview[Technology Preview] for Amazon Web Services (AWS), Google Cloud Platform (GCP), and Microsoft Azure clusters. -==== - -[discrete] -== Purpose - -The Cluster CAPI Operator maintains the lifecycle of Cluster API resources. This Operator is responsible for all administrative tasks related to deploying the Cluster API project within an {product-title} cluster. - -[discrete] -== Project - -link:https://github.com/openshift/cluster-capi-operator[cluster-capi-operator] - -[discrete] -== CRDs - -* `awsmachines.infrastructure.cluster.x-k8s.io` -** Scope: Namespaced -** CR: `awsmachine` -** Validation: No - -* `gcpmachines.infrastructure.cluster.x-k8s.io` -** Scope: Namespaced -** CR: `gcpmachine` -** Validation: No - -* `awsmachinetemplates.infrastructure.cluster.x-k8s.io` -** Scope: Namespaced -** CR: `awsmachinetemplate` -** Validation: No - -* `gcpmachinetemplates.infrastructure.cluster.x-k8s.io` -** Scope: Namespaced -** CR: `gcpmachinetemplate` -** Validation: No \ No newline at end of file diff --git a/modules/cluster-cloud-controller-config-osp.adoc b/modules/cluster-cloud-controller-config-osp.adoc deleted file mode 100644 index 2a10338e3833..000000000000 --- a/modules/cluster-cloud-controller-config-osp.adoc +++ /dev/null @@ -1,328 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_openstack/installing-openstack-cloud-config-reference.adoc - -:_content-type: REFERENCE -[id="cluster-cloud-controller-config_{context}"] -= The OpenStack Cloud Controller Manager (CCM) config map - -An OpenStack CCM config map defines how your cluster interacts with your {rh-openstack} cloud. By default, this configuration is stored under the `cloud.conf` key in the `cloud-conf` config map in the `openshift-cloud-controller-manager` namespace. - -[IMPORTANT] -==== -The `cloud-conf` config map is generated from the `cloud-provider-config` config map in the `openshift-config` namespace. - -To change the settings that are described by the `cloud-conf` config map, modify the `cloud-provider-config` config map. - -As part of this synchronization, the CCM Operator overrides some options. For more information, see "The {rh-openstack} Cloud Controller Manager". -==== - -For example: - -.An example `cloud-conf` config map -[source,yaml] ----- -apiVersion: v1 -data: - cloud.conf: | - [Global] <1> - secret-name = openstack-credentials - secret-namespace = kube-system - region = regionOne - [LoadBalancer] - use-octavia = True -kind: ConfigMap -metadata: - creationTimestamp: "2022-12-20T17:01:08Z" - name: cloud-conf - namespace: openshift-cloud-controller-manager - resourceVersion: "2519" - uid: cbbeedaf-41ed-41c2-9f37-4885732d3677 ----- -<1> Set global options by using a `clouds.yaml` file rather than modifying the config map. - -The following options are present in the config map. Except when indicated otherwise, they are mandatory for clusters that run on {rh-openstack}. - -// [id="ccm-config-global-options"] -// == Global options - -// The following options are used for {rh-openstack} CCM authentication with the {rh-openstack} Identity service, also known as Keystone. They are similiar to the global options that you can set by using the `openstack` CLI. - -// |=== -// | Option | Description - -// | `ca-file` -// | Optional. The CA certificate bundle file for communication with the {rh-openstack} Identity service. If you use the HTTPS protocol with The Identity service URL, this option is required. - -// | `cert-file` -// | Optional. The client certificate path to use for client TLS authentication. - -// | `key-file` -// | Optional. The client private key path to use for client TLS authentication. - -// | `region` -// | The Identity service region name. - -// | `trust-id` -// | The Identity service trust ID. A trust represents the authorization of a user, or trustor, to delegate roles to another user, or trustee. Optionally, a trust authorizes the trustee to impersonate the trustor. You can find available trusts by querying the `/v3/OS-TRUST/trusts` endpoint of the Identity service API. - -// | `trustee-id` -// | The Identity service trustee user ID. - -// | `trustee-password` -// | The Identity service trustee user password. - -// | `application-credential-id` -// | The ID of an application credential to authenticate with. An `application-credential-secret` must be set along with this parameter. - -// | `application-credential-name` -// | The name of an application credential to authenticate with. If `application-credential-id` is not set, the user name and domain must be set. - -// | `application-credential-secret` -// | The secret of an application credential to authenticate with. - -// | `tls-insecure` -// | Whether or not to verify the server's TLS certificate. If set to `true`, the certificate is not verified. By default, the certificate is verified. -// |=== - - -// [id="ccm-config-networking-options"] -// == Networking options - -// |=== -// | Option | Description - -// | `ipv6-support-disabled` -// | Whether or not IPv6 is supported as indicated by a boolean value. By default, this option is `false`. - -// | `public-network-name` -// | The name of an {rh-openstack} Networking service, or Neutron, external network. The CCM uses this option when retrieving the external IP address of a Kubernetes node. This value can contain multiple names. Specified networks are bitwise ORed. The default value is `""`. - -// | `internal-network-name` -// | The name of a Networking service internal network. The CCM uses this option when retrieving the internal IP address of a Kubernetes node. This value can contain multiple names. Specified networks are bitwise ORed. The default value is `""`. - -// | `address-sort-order` -// | This configuration key affects how the provider reports node addresses to Kubernetes node resources. The default order depends on the hard-coded order in which the provider queries addresses and what the cloud returns. A specific order is not guaranteed. - -// To override this behavior, specify a comma-separated list of CIDR addresses. CCM sorts and groups all addresses that match the list in a prioritized manner, wherein the first retrieved item has a higher priority than the last. Addresses that do not match the list remain in their default order. The default value is `""`. - -// This option can be useful if you have multiple or dual-stack interfaces attached to a node that need a user-controlled, deterministic way of sorting addresses. -// |=== - -[id="ccm-config-lb-options"] -== Load balancer options - -CCM supports several load balancer options for deployments that use Octavia. - -[NOTE] -==== -Neutron-LBaaS support is deprecated. -==== - -|=== -| Option | Description - -| `enabled` -| Whether or not to enable the `LoadBalancer` type of services integration. The default value is `true`. - -// Always enforced. -// | `use-octavia` -// | Whether or not to use Octavia for the `LoadBalancer` type of service implementation rather than Neutron-LBaaS. The default value is `true`. - -| `floating-network-id` -| Optional. The external network used to create floating IP addresses for load balancer virtual IP addresses (VIPs). If there are multiple external networks in the cloud, this option must be set or the user must specify `loadbalancer.openstack.org/floating-network-id` in the service annotation. - -| `floating-subnet-id` -| Optional. The external network subnet used to create floating IP addresses for the load balancer VIP. Can be overridden by the service annotation `loadbalancer.openstack.org/floating-subnet-id`. - -| `floating-subnet` -| Optional. A name pattern (glob or regular expression if starting with `~`) for the external network subnet used to create floating IP addresses for the load balancer VIP. Can be overridden by the service annotation `loadbalancer.openstack.org/floating-subnet`. If multiple subnets match the pattern, the first one with available IP addresses is used. - -| `floating-subnet-tags` -| Optional. Tags for the external network subnet used to create floating IP addresses for the load balancer VIP. Can be overridden by the service annotation `loadbalancer.openstack.org/floating-subnet-tags`. If multiple subnets match these tags, the first one with available IP addresses is used. - -If the {rh-openstack} network is configured with sharing disabled, for example, with the `--no-share` flag used during creation, this option is unsupported. Set the network to share to use this option. - -| `lb-method` -| The load balancing algorithm used to create the load balancer pool. -For the Amphora provider the value can be `ROUND_ROBIN`, `LEAST_CONNECTIONS`, or `SOURCE_IP`. The default value is `ROUND_ROBIN`. - -For the OVN provider, only the `SOURCE_IP_PORT` algorithm is supported. - -For the Amphora provider, if using the `LEAST_CONNECTIONS` or `SOURCE_IP` methods, configure the `create-monitor` option as `true` in the `cloud-provider-config` config map on the `openshift-config` namespace and `ETP:Local` on the load-balancer type service to allow balancing algorithm enforcement in the client to service endpoint connections. - -| `lb-provider` -| Optional. Used to specify the provider of the load balancer, for example, `amphora` or `octavia`. Only the Amphora and Octavia providers are supported. - -| `lb-version` -| Optional. The load balancer API version. Only `"v2"` is supported. - -| `subnet-id` -| The ID of the Networking service subnet on which load balancer VIPs are created. - -// This ID is also used to create pool members if `member-subnet-id` is not set. - -// | `member-subnet-id` -// | ID of the Neutron network on which to create the members of the load balancer. The load balancer gets another network port on this subnet. Defaults to `subnet-id` if not set. - -| `network-id` -| The ID of the Networking service network on which load balancer VIPs are created. Unnecessary if `subnet-id` is set. - -// | `manage-security-groups` -// | If the Neutron security groups should be managed separately. Default: false - -| `create-monitor` -| Whether or not to create a health monitor for the service load balancer. A health monitor is required for services that declare `externalTrafficPolicy: Local`. The default value is `false`. - -This option is unsupported if you use {rh-openstack} earlier than version 17 with the `ovn` provider. - -| `monitor-delay` -| The interval in seconds by which probes are sent to members of the load balancer. The default value is `5`. - -| `monitor-max-retries` -| The number of successful checks that are required to change the operating status of a load balancer member to `ONLINE`. The valid range is `1` to `10`, and the default value is `1`. - -| `monitor-timeout` -| The time in seconds that a monitor waits to connect to the back end before it times out. The default value is `3`. - -| `internal-lb` -| Whether or not to create an internal load balancer without floating IP addresses. The default value is `false`. - -// | `cascade-delete` -// | Determines whether or not to perform cascade deletion of load balancers. Default: true. - -// | `flavor-id` -// | The id of the loadbalancer flavor to use. Uses octavia default if not set. - -// | `availability-zone` -// | The name of the loadbalancer availability zone to use. It is applicable if use-octavia is set to True and requires Octavia API version 2.14 or later (Ussuri release). The Octavia availability zone capabilities will not be used if it is not set. The parameter will be ignored if the Octavia version doesn't support availability zones yet. - -| `LoadBalancerClass "ClassName"` -a| This is a config section that comprises a set of options: - - * `floating-network-id` - * `floating-subnet-id` - * `floating-subnet` - * `floating-subnet-tags` - * `network-id` - * `subnet-id` - -// * `member-subnet-id` - -The behavior of these options is the same as that of the identically named options in the load balancer section of the CCM config file. - -You can set the `ClassName` value by specifying the service annotation `loadbalancer.openstack.org/class`. - -// | `enable-ingress-hostname` -// | Used with proxy protocol (set by annotation `loadbalancer.openstack.org/proxy-protocol: "true"`) by adding a dns suffix (nip.io) to the load balancer IP address. Default false. - -// This option is currently a workaround for the issue https://github.com/kubernetes/ingress-nginx/issues/3996, should be removed or refactored after the Kubernetes [KEP-1860](https://github.com/kubernetes/enhancements/tree/master/keps/sig-network/1860-kube-proxy-IP-node-binding) is implemented. - -// | `ingress-hostname-suffix` -// | The dns suffix to the load balancer IP address when using proxy protocol. Default nip.io - -// This option is currently a workaround for the issue https://github.com/kubernetes/ingress-nginx/issues/3996, should be removed or refactored after the Kubernetes [KEP-1860](https://github.com/kubernetes/enhancements/tree/master/keps/sig-network/1860-kube-proxy-IP-node-binding) is implemented. - -// | `default-tls-container-ref` -// | Reference to a tls container. This option works with Octavia, when this option is set then the cloud provider will create an Octavia Listener of type TERMINATED_HTTPS for a TLS Terminated loadbalancer. - -// Format for tls container ref: `https://{keymanager_host}/v1/containers/{uuid}` -// Check `container-store` parameter if you want to disable validation. - -// | `container-store` -// | Optional. Used to specify the store of the tls-container-ref, e.g. "barbican" or "external" - other store will cause a warning log. Default value - `barbican` - existence of tls container ref would always be performed. If set to `external` format for tls container ref will not be validated. - -| `max-shared-lb` -| The maximum number of services that can share a load balancer. The default value is `2`. -|=== - -// [id="ccm-config-metadata-options"] -// == Metadata options - -// |=== -// | Option | Description - -// | `search-order` -// | This configuration key affects the way that the provider retrieves metadata that relates to the instances in which it runs. The default value of `configDrive,metadataService` results in the provider retrieving metadata that relates to the instance from, if available, the config drive first,and then the metadata service. Alternative values are: -// * `configDrive`: Only retrieve instance metadata from the configuration drive. -// * `metadataService`: Only retrieve instance metadata from the metadata service. -// * `metadataService,configDrive`: Retrieve instance metadata from the metadata service first if available, and then retrieve instance metadata from the configuration drive. -// |=== - -// ### Multi region support (alpha) - -// | environment variable `OS_CCM_REGIONAL` is set to `true` - allow CCM to set ProviderID with region name `${ProviderName}://${REGION}/${instance-id}`. Default: false. - -[id="cluster-cloud-controller-config-overrides"] -== Options that the Operator overrides - -The CCM Operator overrides the following options, which you might recognize from configuring {rh-openstack}. Do not configure them yourself. They are included in this document for informational purposes only. - -|=== -| Option | Description - -| `auth-url` -| The {rh-openstack} Identity service URL. For example, `http://128.110.154.166/identity`. - -| `os-endpoint-type` -| The type of endpoint to use from the service catalog. - -// If unset, public endpoints are used. - -| `username` -| The Identity service user name. - -// Leave this option unset if you are using Identity service application credentials. - -| `password` -| The Identity service user password. - -// Leave this option unset if you are using Identity service application credentials. - -| `domain-id` -| The Identity service user domain ID. - -// Leave this option unset if you are using Identity service application credentials. - -| `domain-name` -| The Identity service user domain name. - -// This option is not required if you set `domain-id`. - -| `tenant-id` -| The Identity service project ID. Leave this option unset if you are using Identity service application credentials. - -In version 3 of the Identity API, which changed the identifier `tenant` to `project`, the value of `tenant-id` is automatically mapped to the project construct in the API. - -| `tenant-name` -| The Identity service project name. - -| `tenant-domain-id` -| The Identity service project domain ID. - -| `tenant-domain-name` -| The Identity service project domain name. - -| `user-domain-id` -| The Identity service user domain ID. - -| `user-domain-name` -| The Identity service user domain name. - -| `use-clouds` -a| Whether or not to fetch authorization credentials from a `clouds.yaml` file. Options set in this section are prioritized over values read from the `clouds.yaml` file. - -CCM searches for the file in the following places: - -. The value of the `clouds-file` option. -. A file path stored in the environment variable `OS_CLIENT_CONFIG_FILE`. -. The directory `pkg/openstack`. -. The directory `~/.config/openstack`. -. The directory `/etc/openstack`. - -| `clouds-file` -| The file path of a `clouds.yaml` file. It is used if the `use-clouds` option is set to `true`. - -| `cloud` -| The named cloud in the `clouds.yaml` file that you want to use. It is used if the `use-clouds` option is set to `true`. -|=== \ No newline at end of file diff --git a/modules/cluster-cloud-controller-manager-operator.adoc b/modules/cluster-cloud-controller-manager-operator.adoc deleted file mode 100644 index 15fcc72f076f..000000000000 --- a/modules/cluster-cloud-controller-manager-operator.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator-reference.adoc - -[id="cluster-cloud-controller-manager-operator_{context}"] -= Cluster Cloud Controller Manager Operator - -[discrete] -== Purpose - -[NOTE] -==== -This Operator is General Availability for Microsoft Azure Stack Hub, Nutanix, {rh-openstack-first}, and VMware vSphere. - -It is available as a link:https://access.redhat.com/support/offerings/techpreview[Technology Preview] for Alibaba Cloud, Amazon Web Services (AWS), Google Cloud Platform (GCP), IBM Cloud, IBM Cloud Power VS, and Microsoft Azure. -==== - -The Cluster Cloud Controller Manager Operator manages and updates the cloud controller managers deployed on top of {product-title}. The Operator is based on the Kubebuilder framework and `controller-runtime` libraries. It is installed via the Cluster Version Operator (CVO). - -It contains the following components: - -* Operator -* Cloud configuration observer - -By default, the Operator exposes Prometheus metrics through the `metrics` service. - -[discrete] -== Project - -link:https://github.com/openshift/cluster-cloud-controller-manager-operator[cluster-cloud-controller-manager-operator] diff --git a/modules/cluster-config-operator.adoc b/modules/cluster-config-operator.adoc deleted file mode 100644 index a2635eb7f37b..000000000000 --- a/modules/cluster-config-operator.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator-reference.adoc - -[id="cluster-config-operator_{context}"] -= Cluster Config Operator - -[discrete] -== Purpose - -The Cluster Config Operator performs the following tasks related to `config.openshift.io`: - -* Creates CRDs. -* Renders the initial custom resources. -* Handles migrations. - - -[discrete] -== Project - -link:https://github.com/openshift/cluster-config-operator[cluster-config-operator] diff --git a/modules/cluster-csi-snapshot-controller-operator.adoc b/modules/cluster-csi-snapshot-controller-operator.adoc deleted file mode 100644 index 94231b46b00b..000000000000 --- a/modules/cluster-csi-snapshot-controller-operator.adoc +++ /dev/null @@ -1,54 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator-reference.adoc -// * installing/cluster-capabilities.adoc - -ifeval::["{context}" == "cluster-operators-ref"] -:operator-ref: -endif::[] - -ifeval::["{context}" == "cluster-capabilities"] -:cluster-caps: -endif::[] - -:_content-type: REFERENCE -[id="cluster-csi-snapshot-controller-operator_{context}"] -ifdef::operator-ref[= Cluster CSI Snapshot Controller Operator] -ifdef::cluster-caps[= CSI snapshot controller capability] - -ifdef::operator-ref[] - -[NOTE] -==== -The Cluster CSI Snapshot Controller Operator is an optional cluster capability that can be disabled by cluster administrators during installation. For more information about optional cluster capabilities, see "Cluster capabilities" in _Installing_. -==== - -endif::operator-ref[] - -[discrete] -== Purpose - -ifdef::cluster-caps[] - -The Cluster CSI Snapshot Controller Operator provides the features for the `CSISnapshot` capability. - -endif::cluster-caps[] - -The Cluster CSI Snapshot Controller Operator installs and maintains the CSI Snapshot Controller. The CSI Snapshot Controller is responsible for watching the `VolumeSnapshot` CRD objects and manages the creation and deletion lifecycle of volume snapshots. - -ifdef::operator-ref[] - -[discrete] -== Project - -link:https://github.com/openshift/cluster-csi-snapshot-controller-operator[cluster-csi-snapshot-controller-operator] - -endif::operator-ref[] - -ifeval::["{context}" == "cluster-operators-ref"] -:!operator-ref: -endif::[] - -ifeval::["{context}" == "cluster-caps"] -:!cluster-caps: -endif::[] \ No newline at end of file diff --git a/modules/cluster-dns-operator.adoc b/modules/cluster-dns-operator.adoc deleted file mode 100644 index fa88268d5ff1..000000000000 --- a/modules/cluster-dns-operator.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator-reference.adoc - -[id="dns-operator_{context}"] -= DNS Operator - -[discrete] -== Purpose - -The DNS Operator deploys and manages CoreDNS to provide a name resolution service to pods that enables DNS-based Kubernetes Service discovery in {product-title}. - -The Operator creates a working default deployment based on the cluster's configuration. - -* The default cluster domain is `cluster.local`. -* Configuration of the CoreDNS Corefile or Kubernetes plugin is not yet supported. - -The DNS Operator manages CoreDNS as a Kubernetes daemon set exposed as a service with a static IP. CoreDNS runs on all nodes in the cluster. - -[discrete] -== Project - -link:https://github.com/openshift/cluster-dns-operator[cluster-dns-operator] diff --git a/modules/cluster-entitlements.adoc b/modules/cluster-entitlements.adoc deleted file mode 100644 index 0c38b7e01af7..000000000000 --- a/modules/cluster-entitlements.adoc +++ /dev/null @@ -1,185 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_alibaba/installing-alibaba-network-customizations.adoc -// * installing/installing_alibaba/installing-alibaba-vpc.adoc -// * installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc -// * installing/installing_bare_metal/installing-bare-metal.adoc -// * installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc -// * installing/installing_vsphere/installing-vsphere-installer-provisioned-customizations.adoc -// * installing/installing_vsphere/installing-vsphere-installer-provisioned-network-customizations.adoc -// * installing/installing_vsphere/installing-restricted-networks-installer-provisioned-vsphere.adoc -// * installing/installing_vsphere/installing-vsphere-installer-provisioned.adoc -// * installing/installing_vsphere/installing-vsphere.adoc -// * installing/installing_vsphere/installing-vsphere-network-customizations.adoc -// * installing/installing_vsphere/installing-restricted-networks-vsphere.adoc -// * installing/installing_platform_agnostic/installing-platform-agnostic.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-customizations.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-network-customizations.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-vpc.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-private.adoc -// * installing/installing_ibm_z/installing-restricted-networks-ibm-z-kvm.adoc -// * installing/installing_ibm_z/installing-ibm-z-kvm.adoc -// * installing/installing_ibm_z/installing-restricted-networks-ibm-z.adoc -// * installing/installing_ibm_z/installing-ibm-z.adoc -// * installing/installing_azure/installing-azure-vnet.adoc -// * installing/installing_azure/installing-azure-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-default.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc -// * installing/installing_azure/installing-azure-default.adoc -// * installing/installing_azure/installing-azure-network-customizations.adoc -// * installing/installing_azure/installing-azure-government-region.adoc -// * installing/installing_azure/installing-azure-customizations.adoc -// * installing/installing_azure/installing-azure-private.adoc -// * installing/installing_rhv/installing-rhv-customizations.adoc -// * installing/installing_rhv/installing-rhv-user-infra.adoc -// * installing/installing_rhv/installing-rhv-restricted-network.adoc -// * installing/installing_rhv/installing-rhv-default.adoc -// * installing/installing_aws/installing-aws-network-customizations.adoc -// * installing/installing_aws/installing-aws-user-infra.adoc -// * installing/installing_aws/installing-restricted-networks-aws.adoc -// * installing/installing_aws/installing-aws-customizations.adoc -// * installing/installing_aws/installing-aws-private.adoc -// * installing/installing_aws/installing-restricted-networks-aws-installer-provisioned.adoc -// * installing/installing_aws/installing-aws-default.adoc -// * installing/installing_aws/installing-aws-vpc.adoc -// * installing/installing_aws/installing-aws-government-region.adoc -// * installing/installing_aws/installing-aws-secret-region.adoc -// * installing/installing_aws/installing-aws-china-region.adoc -// * installing/installing_aws/installing-aws-outposts-remote-workers.adoc -// * installing/installing_openstack/installing-openstack-installer-kuryr.adoc -// * installing/installing_openstack/installing-openstack-installer-restricted.adoc -// * installing/installing_openstack/installing-openstack-user.adoc -// * installing/installing_openstack/installing-openstack-user-sr-iov-kuryr.adoc -// * installing/installing_openstack/installing-openstack-user-sr-iov.adoc -// * installing/installing_openstack/installing-openstack-installer-custom.adoc -// * installing/installing_openstack/installing-openstack-user-kuryr.adoc -// * installing/installing_openstack/installing-openstack-installer.adoc -// * installing/installing_openstack/installing-openstack-installer-sr-iov.adoc -// * installing/installing_gcp/installing-gcp-customizations.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp.adoc -// * installing/installing_gcp/installing-gcp-private.adoc -// * installing/installing_gcp/installing-gcp-user-infra-vpc.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp-installer-provisioned.adoc -// * installing/installing_gcp/installing-gcp-user-infra.adoc -// * installing/installing_gcp/installing-gcp-default.adoc -// * installing/installing_gcp/installing-gcp-vpc.adoc -// * installing/installing_gcp/installing-gcp-network-customizations.adoc -// * installing/installing_ibm_power/installing-ibm-power.adoc -// * installing/installing_ibm_power/installing-restricted-networks-ibm-power.adoc -// * installing/installing_ibm_powervs/installing-ibm-power-vs-private-cluster.adoc -// * installing/installing_ibm_powervs/installing-restricted-networks-ibm-power-vs.adoc -// * installing/installing_ibm_powervs/installing-ibm-powervs-vpc.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-network-customizations.adoc -// * architecture/architecture.adoc -// * installing/installing_nutanix/installing-nutanix-installer-provisioned.adoc - -ifeval::["{context}" == "installing-restricted-networks-bare-metal"] -:restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-gcp-installer-provisioned"] -:restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-vsphere"] -:restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-installer-provisioned-vsphere"] -:restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-aws-installer-provisioned"] -:restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-aws"] -:restricted: -endif::[] -ifeval::["{context}" == "installing-rhv-restricted-network"] -:restricted: -endif::[] -ifeval::["{context}" == "installing-openstack-installer-restricted"] -:restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z"] -:restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z-kvm"] -:restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power"] -:restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power-vs"] -:restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-gcp"] -:restricted: -endif::[] - -:_content-type: CONCEPT -[id="cluster-entitlements_{context}"] -ifndef::openshift-origin[] -= Internet access for {product-title} - -In {product-title} {product-version}, you require access to the internet to -ifndef::restricted[] -install -endif::restricted[] -ifdef::restricted[] -obtain the images that are necessary to install -endif::restricted[] -your cluster. - -You must have internet access to: - -* Access {cluster-manager-url} to download the installation program and perform subscription management. If the cluster has internet access and you do not disable Telemetry, that service automatically entitles your cluster. -* Access link:http://quay.io[Quay.io] to obtain the packages that are required to install your cluster. -* Obtain the packages that are required to perform cluster updates. -ifdef::openshift-enterprise,openshift-webscale[] - -ifndef::restricted[] -[IMPORTANT] -==== -If your cluster cannot have direct internet access, you can perform a restricted network installation on some types of infrastructure that you provision. During that process, you download the required content and use it to populate a mirror registry with the installation packages. With some installation types, the environment that you install your cluster in will not require internet access. Before you update the cluster, you update the content of the mirror registry. -==== -endif::restricted[] - -endif::openshift-enterprise,openshift-webscale[] -endif::openshift-origin[] - -ifeval::["{context}" == "installing-restricted-networks-bare-metal"] -:!restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-gcp-installer-provisioned"] -:!restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-vsphere"] -:!restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-installer-provisioned-vsphere"] -:!restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-aws-installer-provisioned"] -:!restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-aws"] -:!restricted: -endif::[] -ifeval::["{context}" == "installing-rhv-restricted-network"] -:!restricted: -endif::[] -ifeval::["{context}" == "installing-openstack-installer-restricted"] -:!restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z"] -:!restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z-kvm"] -:!restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power"] -:!restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power-vs"] -:!restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-gcp"] -:!restricted: -endif::[] diff --git a/modules/cluster-image-registry-operator.adoc b/modules/cluster-image-registry-operator.adoc deleted file mode 100644 index 545abba8f2fd..000000000000 --- a/modules/cluster-image-registry-operator.adoc +++ /dev/null @@ -1,22 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator-reference.adoc - -[id="cluster-image-registry-operator_{context}"] -= Cluster Image Registry Operator - -[discrete] -== Purpose - -The Cluster Image Registry Operator manages a singleton instance of the {product-registry}. It manages all configuration of the registry, including creating storage. - -On initial start up, the Operator creates a default `image-registry` resource instance based on the configuration detected in the cluster. This indicates what cloud storage type to use based on the cloud provider. - -If insufficient information is available to define a complete `image-registry` resource, then an incomplete resource is defined and the Operator updates the resource status with information about what is missing. - -The Cluster Image Registry Operator runs in the `openshift-image-registry` namespace and it also manages the registry instance in that location. All configuration and workload resources for the registry reside in that namespace. - -[discrete] -== Project - -link:https://github.com/openshift/cluster-image-registry-operator[cluster-image-registry-operator] diff --git a/modules/cluster-kube-scheduler-operator.adoc b/modules/cluster-kube-scheduler-operator.adoc deleted file mode 100644 index b5e8d14bfd93..000000000000 --- a/modules/cluster-kube-scheduler-operator.adoc +++ /dev/null @@ -1,35 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator-reference.adoc - -[id="cluster-kube-scheduler-operator_{context}"] -= Kubernetes Scheduler Operator - -[discrete] -== Purpose - -The Kubernetes Scheduler Operator manages and updates the Kubernetes Scheduler deployed on top of {product-title}. The Operator is based on the {product-title} `library-go` framework and it is installed with the Cluster Version Operator (CVO). - -The Kubernetes Scheduler Operator contains the following components: - -* Operator -* Bootstrap manifest renderer -* Installer based on static pods -* Configuration observer - -By default, the Operator exposes Prometheus metrics through the metrics service. - -[discrete] -== Project - -link:https://github.com/openshift/cluster-kube-scheduler-operator[cluster-kube-scheduler-operator] - -[discrete] -== Configuration - -The configuration for the Kubernetes Scheduler is the result of merging: - -* a default configuration. -* an observed configuration from the spec `schedulers.config.openshift.io`. - -All of these are sparse configurations, invalidated JSON snippets which are merged to form a valid configuration at the end. diff --git a/modules/cluster-kube-storage-version-migrator-operator.adoc b/modules/cluster-kube-storage-version-migrator-operator.adoc deleted file mode 100644 index a92f41701369..000000000000 --- a/modules/cluster-kube-storage-version-migrator-operator.adoc +++ /dev/null @@ -1,16 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator-reference.adoc - -[id="cluster-kube-storage-version-migrator-operator_{context}"] -= Kubernetes Storage Version Migrator Operator - -[discrete] -== Purpose - -The Kubernetes Storage Version Migrator Operator detects changes of the default storage version, creates migration requests for resource types when the storage version changes, and processes migration requests. - -[discrete] -== Project - -link:https://github.com/openshift/cluster-kube-storage-version-migrator-operator[cluster-kube-storage-version-migrator-operator] diff --git a/modules/cluster-limitations-local-zone.adoc b/modules/cluster-limitations-local-zone.adoc deleted file mode 100644 index c2e83896622f..000000000000 --- a/modules/cluster-limitations-local-zone.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing-aws-localzone.adoc - -:_content-type: CONCEPT - -[id="cluster-limitations-local-zone_{context}"] -= Cluster limitations in AWS Local Zones - -Some limitations exist when you attempt to deploy a cluster with a default installation configuration in Amazon Web Services (AWS) Local Zones. - -[IMPORTANT] -==== -The following list details limitations when deploying a cluster in AWS Local Zones: - -- The Maximum Transmission Unit (MTU) between an Amazon EC2 instance in a Local Zone and an Amazon EC2 instance in the Region is `1300`. This causes the cluster-wide network MTU to change according to the network plugin that is used on the deployment. -- Network resources such as Network Load Balancer (NLB), Classic Load Balancer, and Network Address Translation (NAT) Gateways are not supported in AWS Local Zones. -- For an {product-title} cluster on AWS, the AWS Elastic Block Storage (EBS) `gp3` type volume is the default for node volumes and the default for the storage class. This volume type is not globally available on Local Zone locations. By default, the nodes running in Local Zones are deployed with the `gp2` EBS volume. The `gp2-csi` `StorageClass` must be set when creating workloads on Local Zone nodes. -==== diff --git a/modules/cluster-logging-Uninstall-logging.adoc b/modules/cluster-logging-Uninstall-logging.adoc deleted file mode 100644 index b7dfe0709216..000000000000 --- a/modules/cluster-logging-Uninstall-logging.adoc +++ /dev/null @@ -1,9 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging.adoc - -:_content-type: CONCEPT -[id="cluster-logging-uninstall-logging-about_{context}"] -= About uninstalling {product-title} Logging - -You can stop log aggregation by deleting the ClusterLogging custom resource (CR). After deleting the CR, there are other cluster logging components that remain, which you can optionally remove. diff --git a/modules/cluster-logging-about-collector.adoc b/modules/cluster-logging-about-collector.adoc deleted file mode 100644 index 4b6251b5841d..000000000000 --- a/modules/cluster-logging-about-collector.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging.adoc - -:_content-type: CONCEPT -[id="cluster-logging-about-collector_{context}"] -= About the logging collector - -The {logging-title} collects container and node logs. - -By default, the log collector uses the following sources: - -* journald for all system logs -* `/var/log/containers/*.log` for all container logs - -If you configure the log collector to collect audit logs, it gets them from `/var/log/audit/audit.log`. - -The logging collector is a daemon set that deploys pods to each {product-title} node. System and infrastructure logs are generated by journald log messages from the operating system, the container runtime, and {product-title}. Application logs are generated by the CRI-O container engine. Fluentd collects the logs from these sources and forwards them internally or externally as you configure in {product-title}. - -The container runtimes provide minimal information to identify the source of log messages: project, pod name, and container ID. This information is not sufficient to uniquely identify the source of the logs. If a pod with a given name and project is deleted before the log collector begins processing its logs, information from the API server, such as labels and annotations, might not be available. There might not be a way to distinguish the log messages from a similarly named pod and project or trace the logs to their source. This limitation means that log collection and normalization are considered *best effort*. - -[IMPORTANT] -==== -The available container runtimes provide minimal information to identify the -source of log messages and do not guarantee unique individual log -messages or that these messages can be traced to their source. -==== diff --git a/modules/cluster-logging-about-components.adoc b/modules/cluster-logging-about-components.adoc deleted file mode 100644 index 14d79d9bfe43..000000000000 --- a/modules/cluster-logging-about-components.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging.adoc - -:_content-type: CONCEPT -[id="cluster-logging-about-components_{context}"] -= About {logging} components - -The {logging} components include a collector deployed to each node in the {product-title} cluster -that collects all node and container logs and writes them to a log store. You can use a centralized web UI to create rich visualizations and dashboards with the aggregated data. - -The major components of the {logging} are: - -* collection - This is the component that collects logs from the cluster, formats them, and forwards them to the log store. The current implementation is Fluentd. -* log store - This is where the logs are stored. The default implementation is Elasticsearch. You can use the default Elasticsearch log store or forward logs to external log stores. The default log store is optimized and tested for short-term storage. -* visualization - This is the UI component you can use to view logs, graphs, charts, and so forth. The current implementation is Kibana. - diff --git a/modules/cluster-logging-about-crd.adoc b/modules/cluster-logging-about-crd.adoc deleted file mode 100644 index 5c2a76f06048..000000000000 --- a/modules/cluster-logging-about-crd.adoc +++ /dev/null @@ -1,75 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging.adoc - -:_content-type: CONCEPT -[id="cluster-logging-configuring-crd_{context}"] -= About the ClusterLogging custom resource - -To make changes to your {logging} environment, create and modify the `ClusterLogging` custom resource (CR). - -Instructions for creating or modifying a CR are provided in this documentation as appropriate. - -The following example shows a typical custom resource for the {logging}. - -[id="efk-logging-configuring-about-sample_{context}"] -.Sample `ClusterLogging` custom resource (CR) -ifdef::openshift-enterprise,openshift-rosa,openshift-dedicated,openshift-webscale,openshift-origin[] -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: "ClusterLogging" -metadata: - name: "instance" <1> - namespace: "openshift-logging" <2> -spec: - managementState: "Managed" <3> - logStore: - type: "elasticsearch" <4> - retentionPolicy: - application: - maxAge: 1d - infra: - maxAge: 7d - audit: - maxAge: 7d - elasticsearch: - nodeCount: 3 - resources: - limits: - memory: 16Gi - requests: - cpu: "1" - memory: 16Gi - storage: - storageClassName: "gp2" - size: "200G" - redundancyPolicy: "SingleRedundancy" - visualization: <5> - type: "kibana" - kibana: - resources: - limits: - memory: 736Mi - requests: - cpu: 100m - memory: 736Mi - replicas: 1 - collection: <6> - logs: - type: "fluentd" - fluentd: - resources: - limits: - memory: 736Mi - requests: - cpu: 100m - memory: 736Mi ----- -<1> The CR name must be `instance`. -<2> The CR must be installed to the `openshift-logging` namespace. -<3> The Red Hat OpenShift Logging Operator management state. When set to `unmanaged` the operator is in an unsupported state and will not get updates. -<4> Settings for the log store, including retention policy, the number of nodes, the resource requests and limits, and the storage class. -<5> Settings for the visualizer, including the resource requests and limits, and the number of pod replicas. -<6> Settings for the log collector, including the resource requests and limits. -endif::[] diff --git a/modules/cluster-logging-about-logstore.adoc b/modules/cluster-logging-about-logstore.adoc deleted file mode 100644 index d51ab1623e43..000000000000 --- a/modules/cluster-logging-about-logstore.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging.adoc - -:_content-type: CONCEPT -[id="cluster-logging-about-logstore_{context}"] -= About the log store - -By default, {product-title} uses link:https://www.elastic.co/products/elasticsearch[Elasticsearch (ES)] to store log data. Optionally you can use the Log Forwarder API to forward logs to an external store. Several types of store are supported, including fluentd, rsyslog, kafka and others. - -The {logging} Elasticsearch instance is optimized and tested for short term storage, approximately seven days. If you want to retain your logs over a longer term, it is recommended you move the data to a third-party storage system. - -Elasticsearch organizes the log data from Fluentd into datastores, or _indices_, then subdivides each index into multiple pieces called _shards_, which it spreads across a set of Elasticsearch nodes in an Elasticsearch cluster. You can configure Elasticsearch to make copies of the shards, called _replicas_, which Elasticsearch also spreads across the Elasticsearch nodes. The `ClusterLogging` custom resource (CR) allows you to specify how the shards are replicated to provide data redundancy and resilience to failure. You can also specify how long the different types of logs are retained using a retention policy in the `ClusterLogging` CR. - -[NOTE] -==== -The number of primary shards for the index templates is equal to the number of Elasticsearch data nodes. -==== - -The Red Hat OpenShift Logging Operator and companion OpenShift Elasticsearch Operator ensure that each Elasticsearch node is deployed using a unique deployment that includes its own storage volume. -You can use a `ClusterLogging` custom resource (CR) to increase the number of Elasticsearch nodes, as needed. -See the link:https://www.elastic.co/guide/en/elasticsearch/guide/current/hardware.html[Elasticsearch documentation] for considerations involved in configuring storage. - -[NOTE] -==== -A highly-available Elasticsearch environment requires at least three Elasticsearch nodes, each on a different host. -==== - -Role-based access control (RBAC) applied on the Elasticsearch indices enables the controlled access of the logs to the developers. Administrators can access all logs and developers can access only the logs in their projects. diff --git a/modules/cluster-logging-about-visualizer.adoc b/modules/cluster-logging-about-visualizer.adoc deleted file mode 100644 index 607f8a4d912d..000000000000 --- a/modules/cluster-logging-about-visualizer.adoc +++ /dev/null @@ -1,13 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging.adoc - -:_content-type: CONCEPT -[id="cluster-logging-about-visualizer_{context}"] -= About logging visualization - -{product-title} uses Kibana to display the log data collected by Fluentd and indexed by Elasticsearch. - -Kibana is a browser-based console interface to query, discover, and visualize your Elasticsearch data through -histograms, line graphs, pie charts, and other visualizations. - diff --git a/modules/cluster-logging-about.adoc b/modules/cluster-logging-about.adoc deleted file mode 100644 index 257ad90ea41c..000000000000 --- a/modules/cluster-logging-about.adoc +++ /dev/null @@ -1,20 +0,0 @@ -// Module included in the following assemblies: -// -// * virt/support/virt-openshift-cluster-monitoring.adoc -// * logging/cluster-logging.adoc -// * serverless/monitor/cluster-logging-serverless.adoc - -// This module uses conditionalized paragraphs so that the module -// can be re-used in associated products. - -:_content-type: CONCEPT -[id="cluster-logging-about_{context}"] -= About deploying the {logging-title} - -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -{product-title} cluster administrators can deploy the {logging} using the {product-title} web console or CLI to install the OpenShift Elasticsearch Operator and Red Hat OpenShift Logging Operator. When the Operators are installed, you create a `ClusterLogging` custom resource (CR) to schedule {logging} pods and other resources necessary to support the {logging}. The Operators are responsible for deploying, upgrading, and maintaining the {logging}. -endif::openshift-enterprise,openshift-webscale,openshift-origin[] - -The `ClusterLogging` CR defines a complete {logging} environment that includes all the components of the logging stack to collect, store and visualize logs. The Red Hat OpenShift Logging Operator watches the {logging} CR and adjusts the logging deployment accordingly. - -Administrators and application developers can view the logs of the projects for which they have view access. diff --git a/modules/cluster-logging-clo-status-comp.adoc b/modules/cluster-logging-clo-status-comp.adoc deleted file mode 100644 index ce35d99ce49f..000000000000 --- a/modules/cluster-logging-clo-status-comp.adoc +++ /dev/null @@ -1,96 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-cluster-status.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-clo-status-example_{context}"] -= Viewing the status of {logging} components - -You can view the status for a number of {logging} components. - -.Prerequisites - -* The Red Hat OpenShift Logging and Elasticsearch Operators must be installed. - -.Procedure - -. Change to the `openshift-logging` project. -+ -[source,terminal] ----- -$ oc project openshift-logging ----- - -. View the status of the {logging-title} environment: -+ -[source,terminal] ----- -$ oc describe deployment cluster-logging-operator ----- -+ -.Example output -[source,terminal] ----- -Name: cluster-logging-operator - -.... - -Conditions: - Type Status Reason - ---- ------ ------ - Available True MinimumReplicasAvailable - Progressing True NewReplicaSetAvailable - -.... - -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal ScalingReplicaSet 62m deployment-controller Scaled up replica set cluster-logging-operator-574b8987df to 1---- ----- - -. View the status of the {logging} replica set: - -.. Get the name of a replica set: -+ -.Example output -[source,terminal] ----- -$ oc get replicaset ----- -+ -.Example output -[source,terminal] ----- -NAME DESIRED CURRENT READY AGE -cluster-logging-operator-574b8987df 1 1 1 159m -elasticsearch-cdm-uhr537yu-1-6869694fb 1 1 1 157m -elasticsearch-cdm-uhr537yu-2-857b6d676f 1 1 1 156m -elasticsearch-cdm-uhr537yu-3-5b6fdd8cfd 1 1 1 155m -kibana-5bd5544f87 1 1 1 157m ----- - -.. Get the status of the replica set: -+ -[source,terminal] ----- -$ oc describe replicaset cluster-logging-operator-574b8987df ----- -+ -.Example output -[source,terminal] ----- -Name: cluster-logging-operator-574b8987df - -.... - -Replicas: 1 current / 1 desired -Pods Status: 1 Running / 0 Waiting / 0 Succeeded / 0 Failed - -.... - -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal SuccessfulCreate 66m replicaset-controller Created pod: cluster-logging-operator-574b8987df-qjhqv---- ----- diff --git a/modules/cluster-logging-clo-status.adoc b/modules/cluster-logging-clo-status.adoc deleted file mode 100644 index 14b1b1d9edd4..000000000000 --- a/modules/cluster-logging-clo-status.adoc +++ /dev/null @@ -1,233 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-cluster-status.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-clo-status_{context}"] -= Viewing the status of the Red Hat OpenShift Logging Operator - -You can view the status of your Red Hat OpenShift Logging Operator. - -.Prerequisites - -* The Red Hat OpenShift Logging and Elasticsearch Operators must be installed. - -.Procedure - -. Change to the `openshift-logging` project. -+ -[source,terminal] ----- -$ oc project openshift-logging ----- - -. To view the OpenShift Logging status: - -.. Get the OpenShift Logging status: -+ -[source,terminal] ----- -$ oc get clusterlogging instance -o yaml ----- -+ -.Example output -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogging - -.... - -status: <1> - collection: - logs: - fluentdStatus: - daemonSet: fluentd <2> - nodes: - fluentd-2rhqp: ip-10-0-169-13.ec2.internal - fluentd-6fgjh: ip-10-0-165-244.ec2.internal - fluentd-6l2ff: ip-10-0-128-218.ec2.internal - fluentd-54nx5: ip-10-0-139-30.ec2.internal - fluentd-flpnn: ip-10-0-147-228.ec2.internal - fluentd-n2frh: ip-10-0-157-45.ec2.internal - pods: - failed: [] - notReady: [] - ready: - - fluentd-2rhqp - - fluentd-54nx5 - - fluentd-6fgjh - - fluentd-6l2ff - - fluentd-flpnn - - fluentd-n2frh - logstore: <3> - elasticsearchStatus: - - ShardAllocationEnabled: all - cluster: - activePrimaryShards: 5 - activeShards: 5 - initializingShards: 0 - numDataNodes: 1 - numNodes: 1 - pendingTasks: 0 - relocatingShards: 0 - status: green - unassignedShards: 0 - clusterName: elasticsearch - nodeConditions: - elasticsearch-cdm-mkkdys93-1: - nodeCount: 1 - pods: - client: - failed: - notReady: - ready: - - elasticsearch-cdm-mkkdys93-1-7f7c6-mjm7c - data: - failed: - notReady: - ready: - - elasticsearch-cdm-mkkdys93-1-7f7c6-mjm7c - master: - failed: - notReady: - ready: - - elasticsearch-cdm-mkkdys93-1-7f7c6-mjm7c -visualization: <4> - kibanaStatus: - - deployment: kibana - pods: - failed: [] - notReady: [] - ready: - - kibana-7fb4fd4cc9-f2nls - replicaSets: - - kibana-7fb4fd4cc9 - replicas: 1 ----- -<1> In the output, the cluster status fields appear in the `status` stanza. -<2> Information on the Fluentd pods. -<3> Information on the Elasticsearch pods, including Elasticsearch cluster health, `green`, `yellow`, or `red`. -<4> Information on the Kibana pods. - - -[id="cluster-logging-clo-status-message_{context}"] -== Example condition messages - -The following are examples of some condition messages from the `Status.Nodes` section of the OpenShift Logging instance. - - -// https://github.com/openshift/elasticsearch-operator/pull/92 - -A status message similar to the following indicates a node has exceeded the configured low watermark and no shard will be allocated to this node: - -.Example output -[source,yaml] ----- - nodes: - - conditions: - - lastTransitionTime: 2019-03-15T15:57:22Z - message: Disk storage usage for node is 27.5gb (36.74%). Shards will be not - be allocated on this node. - reason: Disk Watermark Low - status: "True" - type: NodeStorage - deploymentName: example-elasticsearch-clientdatamaster-0-1 - upgradeStatus: {} ----- - -A status message similar to the following indicates a node has exceeded the configured high watermark and shards will be relocated to other nodes: - -.Example output -[source,yaml] ----- - nodes: - - conditions: - - lastTransitionTime: 2019-03-15T16:04:45Z - message: Disk storage usage for node is 27.5gb (36.74%). Shards will be relocated - from this node. - reason: Disk Watermark High - status: "True" - type: NodeStorage - deploymentName: cluster-logging-operator - upgradeStatus: {} ----- - -A status message similar to the following indicates the Elasticsearch node selector in the CR does not match any nodes in the cluster: - -.Example output -[source,text] ----- - Elasticsearch Status: - Shard Allocation Enabled: shard allocation unknown - Cluster: - Active Primary Shards: 0 - Active Shards: 0 - Initializing Shards: 0 - Num Data Nodes: 0 - Num Nodes: 0 - Pending Tasks: 0 - Relocating Shards: 0 - Status: cluster health unknown - Unassigned Shards: 0 - Cluster Name: elasticsearch - Node Conditions: - elasticsearch-cdm-mkkdys93-1: - Last Transition Time: 2019-06-26T03:37:32Z - Message: 0/5 nodes are available: 5 node(s) didn't match node selector. - Reason: Unschedulable - Status: True - Type: Unschedulable - elasticsearch-cdm-mkkdys93-2: - Node Count: 2 - Pods: - Client: - Failed: - Not Ready: - elasticsearch-cdm-mkkdys93-1-75dd69dccd-f7f49 - elasticsearch-cdm-mkkdys93-2-67c64f5f4c-n58vl - Ready: - Data: - Failed: - Not Ready: - elasticsearch-cdm-mkkdys93-1-75dd69dccd-f7f49 - elasticsearch-cdm-mkkdys93-2-67c64f5f4c-n58vl - Ready: - Master: - Failed: - Not Ready: - elasticsearch-cdm-mkkdys93-1-75dd69dccd-f7f49 - elasticsearch-cdm-mkkdys93-2-67c64f5f4c-n58vl - Ready: ----- - -A status message similar to the following indicates that the requested PVC could not bind to PV: - -.Example output -[source,text] ----- - Node Conditions: - elasticsearch-cdm-mkkdys93-1: - Last Transition Time: 2019-06-26T03:37:32Z - Message: pod has unbound immediate PersistentVolumeClaims (repeated 5 times) - Reason: Unschedulable - Status: True - Type: Unschedulable ----- - -A status message similar to the following indicates that the Fluentd pods cannot be scheduled because the node selector did not match any nodes: - -.Example output -[source,yaml] ----- -Status: - Collection: - Logs: - Fluentd Status: - Daemon Set: fluentd - Nodes: - Pods: - Failed: - Not Ready: - Ready: ----- diff --git a/modules/cluster-logging-cloudwatch.adoc b/modules/cluster-logging-cloudwatch.adoc deleted file mode 100644 index 618bf932c5b8..000000000000 --- a/modules/cluster-logging-cloudwatch.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging.adoc -// -// This module uses conditionalized paragraphs so that the module -// can be re-used in associated products. - -:_content-type: CONCEPT -[id="cluster-logging-cloudwatch_{context}"] -= CloudWatch recommendation for {product-title} - -Red Hat recommends that you use the AWS CloudWatch solution for your logging needs. - -[id="cluster-logging-requirements-explained_{context}"] -== Logging requirements - -Hosting your own logging stack requires a large amount of compute resources and storage, which might be dependent on your cloud service quota. The compute resource requirements can start at 48 GB or more, while the storage requirement can be as large as 1600 GB or more. The logging stack runs on your worker nodes, which reduces your available workload resource. With these considerations, hosting your own logging stack increases your cluster operating costs. \ No newline at end of file diff --git a/modules/cluster-logging-collecting-storing-kubernetes-events.adoc b/modules/cluster-logging-collecting-storing-kubernetes-events.adoc deleted file mode 100644 index fb0a9dcb39b5..000000000000 --- a/modules/cluster-logging-collecting-storing-kubernetes-events.adoc +++ /dev/null @@ -1,9 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging.adoc - -:_content-type: CONCEPT -[id="cluster-logging-collecting-storing-kubernetes-events-about_{context}"] -= About collecting and storing Kubernetes events - -The {product-title} Event Router is a pod that watches Kubernetes events and logs them for collection by {product-title} Logging. You must manually deploy the Event Router. diff --git a/modules/cluster-logging-collector-alerts-viewing.adoc b/modules/cluster-logging-collector-alerts-viewing.adoc deleted file mode 100644 index 6d11df002d37..000000000000 --- a/modules/cluster-logging-collector-alerts-viewing.adoc +++ /dev/null @@ -1,28 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-collector.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-collector-alerts-viewing_{context}"] -= Viewing logging collector alerts - -Alerts are shown in the -ifndef::openshift-rosa,openshift-dedicated[] -{product-title} web console, -endif::[] -ifdef::openshift-rosa,openshift-dedicated[] -{cluster-manager-url}, -endif::[] -on the *Alerts* tab of the Alerting UI. Alerts are in one of the following states: - -* *Firing*. The alert condition is true for the duration of the timeout. Click the *Options* menu at the end of the firing alert to view more information or silence the alert. -* *Pending* The alert condition is currently true, but the timeout has not been reached. -* *Not Firing*. The alert is not currently triggered. - -.Procedure - -To view the {logging} and other {product-title} alerts: - -. In the {product-title} console, click *Observe* → *Alerting*. - -. Click the *Alerts* tab. The alerts are listed, based on the filters selected. diff --git a/modules/cluster-logging-collector-alerts.adoc b/modules/cluster-logging-collector-alerts.adoc deleted file mode 100644 index 453c2076b96c..000000000000 --- a/modules/cluster-logging-collector-alerts.adoc +++ /dev/null @@ -1,43 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-collector.adoc - -:_content-type: CONCEPT -[id="cluster-logging-collector-alerts_{context}"] -= About logging collector alerts - -The following alerts are generated by the logging collector. You can view these alerts in the -ifndef::openshift-rosa,openshift-dedicated[] -{product-title} web console -endif::[] -ifdef::openshift-rosa,openshift-dedicated[] -{cluster-manager-url} -endif::[] -on the *Alerts* page of the Alerting UI. - -.Fluentd Prometheus alerts -[cols="2,2,2,1",options="header"] -|=== -|Alert |Message |Description |Severity - -|`FluentDHighErrorRate` -|` of records have resulted in an error by fluentd .` -|The number of FluentD output errors is high, by default more than 10 in the previous 15 minutes. -|Warning - -|`FluentdNodeDown` -|`Prometheus could not scrape fluentd for more than 10m.` -|Fluentd is reporting that Prometheus could not scrape a specific Fluentd instance. -|Critical - -|`FluentdQueueLengthIncreasing` -|`In the last 12h, fluentd buffer queue length constantly increased more than 1. Current value is .` -|Fluentd is reporting that the queue size is increasing. -|Critical - -|`FluentDVeryHighErrorRate` -|` of records have resulted in an error by fluentd .` -|The number of FluentD output errors is very high, by default more than 25 in the previous 15 minutes. -|Critical - -|=== diff --git a/modules/cluster-logging-collector-collecting-ovn-logs.adoc b/modules/cluster-logging-collector-collecting-ovn-logs.adoc deleted file mode 100644 index 3558b5ee370a..000000000000 --- a/modules/cluster-logging-collector-collecting-ovn-logs.adoc +++ /dev/null @@ -1,89 +0,0 @@ -:_content-type: PROCEDURE -[id="cluster-logging-collecting-ovn-audit-logs_{context}"] -= Collecting OVN network policy audit logs - -You can collect the OVN network policy audit logs from the `/var/log/ovn/acl-audit-log.log` file on OVN-Kubernetes pods and forward them to logging servers. - -.Prerequisites - -* You are using {product-title} version 4.8 or later. -* You are using Cluster Logging 5.2 or later. -* You have already set up a `ClusterLogForwarder` custom resource (CR) object. -* The {product-title} cluster is configured for OVN-Kubernetes network policy audit logging. See the following "Additional resources" section. - -[NOTE] -==== -Often, logging servers that store audit data must meet organizational and governmental requirements for compliance and security. -==== - -.Procedure - -. Create or edit a YAML file that defines the `ClusterLogForwarder` CR object as described in other topics on forwarding logs to third-party systems. - -. In the YAML file, add the `audit` log type to the `inputRefs` element in a pipeline. For example: -+ -[source,yaml] ----- - pipelines: - - name: audit-logs - inputRefs: - - audit <1> - outputRefs: - - secure-logging-server <2> ----- -<1> Specify `audit` as one of the log types to input. -<2> Specify the output that connects to your logging server. - -. Recreate the updated CR object: -+ -[source,terminal] ----- -$ oc create -f .yaml ----- - -.Verification - -Verify that audit log entries from the nodes that you are monitoring are present among the log data gathered by the logging server. - -Find an original audit log entry in `/var/log/ovn/acl-audit-log.log` and compare it with the corresponding log entry on the logging server. - -For example, an original log entry in `/var/log/ovn/acl-audit-log.log` might look like this: - -[source,txt] ----- -2021-07-06T08:26:58.687Z|00004|acl_log(ovn_pinctrl0)|INFO|name="verify-audit- -logging_deny-all", verdict=drop, severity=alert: -icmp,vlan_tci=0x0000,dl_src=0a:58:0a:81:02:12,dl_dst=0a:58:0a:81:02:14,nw_src=10 -.129.2.18,nw_dst=10.129.2.20,nw_tos=0,nw_ecn=0,nw_ttl=64,icmp_type=8,icmp_code=0 ----- - -And the corresponding OVN audit log entry you find on the logging server might look like this: - -[source,json] ----- -{ - "@timestamp" : "2021-07-06T08:26:58..687000+00:00", - "hostname":"ip.abc.iternal", - "level":"info", - "message" : "2021-07-06T08:26:58.687Z|00004|acl_log(ovn_pinctrl0)|INFO|name=\"verify-audit-logging_deny-all\", verdict=drop, severity=alert: icmp,vlan_tci=0x0000,dl_src=0a:58:0a:81:02:12,dl_dst=0a:58:0a:81:02:14,nw_src=10.129.2.18,nw_dst=10.129.2.20,nw_tos=0,nw_ecn=0,nw_ttl=64,icmp_type=8,icmp_code=0" -} ----- - -Where: - -* `@timestamp` is the timestamp of the log entry. -* `hostname` is the node from which the log originated. -* `level` is the log entry. -* `message` is the original audit log message. - -[NOTE] -==== -On an Elasticsearch server, look for log entries whose indices begin with `audit-00000`. -==== - -.Troubleshooting - -. Verify that your {product-title} cluster meets all the prerequisites. -. Verify that you have completed the procedure. -. Verify that the nodes generating OVN logs are enabled and have `/var/log/ovn/acl-audit-log.log` files. -. Check the Fluentd pod logs for issues. diff --git a/modules/cluster-logging-collector-limits.adoc b/modules/cluster-logging-collector-limits.adoc deleted file mode 100644 index 32de5dd04337..000000000000 --- a/modules/cluster-logging-collector-limits.adoc +++ /dev/null @@ -1,41 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-collector.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-collector-limits_{context}"] -= Configure log collector CPU and memory limits - -The log collector allows for adjustments to both the CPU and memory limits. - -.Procedure - -. Edit the `ClusterLogging` custom resource (CR) in the `openshift-logging` project: -+ -[source,terminal] ----- -$ oc -n openshift-logging edit ClusterLogging instance ----- -+ -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: "ClusterLogging" -metadata: - name: "instance" - namespace: openshift-logging - -... - -spec: - collection: - logs: - fluentd: - resources: - limits: <1> - memory: 736Mi - requests: - cpu: 100m - memory: 736Mi ----- -<1> Specify the CPU and memory limits and requests as needed. The values shown are the default values. diff --git a/modules/cluster-logging-collector-log-forward-cloudwatch.adoc b/modules/cluster-logging-collector-log-forward-cloudwatch.adoc deleted file mode 100644 index 45e11be8b076..000000000000 --- a/modules/cluster-logging-collector-log-forward-cloudwatch.adoc +++ /dev/null @@ -1,290 +0,0 @@ -:_content-type: PROCEDURE -[id="cluster-logging-collector-log-forward-cloudwatch_{context}"] -= Forwarding logs to Amazon CloudWatch - -You can forward logs to Amazon CloudWatch, a monitoring and log storage service hosted by Amazon Web Services (AWS). You can forward logs to CloudWatch in addition to, or instead of, the default log store. - -To configure log forwarding to CloudWatch, you must create a `ClusterLogForwarder` custom resource (CR) with an output for CloudWatch, and a pipeline that uses the output. - -.Procedure - -. Create a `Secret` YAML file that uses the `aws_access_key_id` and `aws_secret_access_key` fields to specify your base64-encoded AWS credentials. For example: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: cw-secret - namespace: openshift-logging -data: - aws_access_key_id: QUtJQUlPU0ZPRE5ON0VYQU1QTEUK - aws_secret_access_key: d0phbHJYVXRuRkVNSS9LN01ERU5HL2JQeFJmaUNZRVhBTVBMRUtFWQo= ----- - -. Create the secret. For example: -+ -[source,terminal] ----- -$ oc apply -f cw-secret.yaml ----- - -. Create or edit a YAML file that defines the `ClusterLogForwarder` CR object. In the file, specify the name of the secret. For example: -+ -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: ClusterLogForwarder -metadata: - name: instance <1> - namespace: openshift-logging <2> -spec: - outputs: - - name: cw <3> - type: cloudwatch <4> - cloudwatch: - groupBy: logType <5> - groupPrefix: <6> - region: us-east-2 <7> - secret: - name: cw-secret <8> - pipelines: - - name: infra-logs <9> - inputRefs: <10> - - infrastructure - - audit - - application - outputRefs: - - cw <11> ----- -<1> The name of the `ClusterLogForwarder` CR must be `instance`. -<2> The namespace for the `ClusterLogForwarder` CR must be `openshift-logging`. -<3> Specify a name for the output. -<4> Specify the `cloudwatch` type. -<5> Optional: Specify how to group the logs: -+ -* `logType` creates log groups for each log type -* `namespaceName` creates a log group for each application name space. It also creates separate log groups for infrastructure and audit logs. -* `namespaceUUID` creates a new log groups for each application namespace UUID. It also creates separate log groups for infrastructure and audit logs. -<6> Optional: Specify a string to replace the default `infrastructureName` prefix in the names of the log groups. -<7> Specify the AWS region. -<8> Specify the name of the secret that contains your AWS credentials. -<9> Optional: Specify a name for the pipeline. -<10> Specify which log types to forward by using the pipeline: `application,` `infrastructure`, or `audit`. -<11> Specify the name of the output to use when forwarding logs with this pipeline. - -. Create the CR object: -+ -[source,terminal] ----- -$ oc create -f .yaml ----- - -.Example: Using ClusterLogForwarder with Amazon CloudWatch - -Here, you see an example `ClusterLogForwarder` custom resource (CR) and the log data that it outputs to Amazon CloudWatch. - -Suppose that you are running -ifndef::openshift-rosa[] -an {product-title} cluster -endif::[] -ifdef::openshift-rosa[] -a ROSA cluster -endif::[] -named `mycluster`. The following command returns the cluster's `infrastructureName`, which you will use to compose `aws` commands later on: - -[source,terminal] ----- -$ oc get Infrastructure/cluster -ojson | jq .status.infrastructureName -"mycluster-7977k" ----- - -To generate log data for this example, you run a `busybox` pod in a namespace called `app`. The `busybox` pod writes a message to stdout every three seconds: - -[source,terminal] ----- -$ oc run busybox --image=busybox -- sh -c 'while true; do echo "My life is my message"; sleep 3; done' -$ oc logs -f busybox -My life is my message -My life is my message -My life is my message -... ----- - -You can look up the UUID of the `app` namespace where the `busybox` pod runs: - -[source,terminal] ----- -$ oc get ns/app -ojson | jq .metadata.uid -"794e1e1a-b9f5-4958-a190-e76a9b53d7bf" ----- - -In your `ClusterLogForwarder` custom resource (CR), you configure the `infrastructure`, `audit`, and `application` log types as inputs to the `all-logs` pipeline. You also connect this pipeline to `cw` output, which forwards the logs to a CloudWatch instance in the `us-east-2` region: - -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: ClusterLogForwarder -metadata: - name: instance - namespace: openshift-logging -spec: - outputs: - - name: cw - type: cloudwatch - cloudwatch: - groupBy: logType - region: us-east-2 - secret: - name: cw-secret - pipelines: - - name: all-logs - inputRefs: - - infrastructure - - audit - - application - outputRefs: - - cw ----- - -Each region in CloudWatch contains three levels of objects: - -* log group -** log stream -*** log event - - -With `groupBy: logType` in the `ClusterLogForwarding` CR, the three log types in the `inputRefs` produce three log groups in Amazon Cloudwatch: - -[source,terminal] ----- -$ aws --output json logs describe-log-groups | jq .logGroups[].logGroupName -"mycluster-7977k.application" -"mycluster-7977k.audit" -"mycluster-7977k.infrastructure" ----- - -Each of the log groups contains log streams: - -[source,terminal] ----- -$ aws --output json logs describe-log-streams --log-group-name mycluster-7977k.application | jq .logStreams[].logStreamName -"kubernetes.var.log.containers.busybox_app_busybox-da085893053e20beddd6747acdbaf98e77c37718f85a7f6a4facf09ca195ad76.log" ----- - -[source,terminal] ----- -$ aws --output json logs describe-log-streams --log-group-name mycluster-7977k.audit | jq .logStreams[].logStreamName -"ip-10-0-131-228.us-east-2.compute.internal.k8s-audit.log" -"ip-10-0-131-228.us-east-2.compute.internal.linux-audit.log" -"ip-10-0-131-228.us-east-2.compute.internal.openshift-audit.log" -... ----- - -[source,terminal] ----- -$ aws --output json logs describe-log-streams --log-group-name mycluster-7977k.infrastructure | jq .logStreams[].logStreamName -"ip-10-0-131-228.us-east-2.compute.internal.kubernetes.var.log.containers.apiserver-69f9fd9b58-zqzw5_openshift-oauth-apiserver_oauth-apiserver-453c5c4ee026fe20a6139ba6b1cdd1bed25989c905bf5ac5ca211b7cbb5c3d7b.log" -"ip-10-0-131-228.us-east-2.compute.internal.kubernetes.var.log.containers.apiserver-797774f7c5-lftrx_openshift-apiserver_openshift-apiserver-ce51532df7d4e4d5f21c4f4be05f6575b93196336be0027067fd7d93d70f66a4.log" -"ip-10-0-131-228.us-east-2.compute.internal.kubernetes.var.log.containers.apiserver-797774f7c5-lftrx_openshift-apiserver_openshift-apiserver-check-endpoints-82a9096b5931b5c3b1d6dc4b66113252da4a6472c9fff48623baee761911a9ef.log" -... ----- - -Each log stream contains log events. To see a log event from the `busybox` Pod, you specify its log stream from the `application` log group: - -[source,terminal] ----- -$ aws logs get-log-events --log-group-name mycluster-7977k.application --log-stream-name kubernetes.var.log.containers.busybox_app_busybox-da085893053e20beddd6747acdbaf98e77c37718f85a7f6a4facf09ca195ad76.log -{ - "events": [ - { - "timestamp": 1629422704178, - "message": "{\"docker\":{\"container_id\":\"da085893053e20beddd6747acdbaf98e77c37718f85a7f6a4facf09ca195ad76\"},\"kubernetes\":{\"container_name\":\"busybox\",\"namespace_name\":\"app\",\"pod_name\":\"busybox\",\"container_image\":\"docker.io/library/busybox:latest\",\"container_image_id\":\"docker.io/library/busybox@sha256:0f354ec1728d9ff32edcd7d1b8bbdfc798277ad36120dc3dc683be44524c8b60\",\"pod_id\":\"870be234-90a3-4258-b73f-4f4d6e2777c7\",\"host\":\"ip-10-0-216-3.us-east-2.compute.internal\",\"labels\":{\"run\":\"busybox\"},\"master_url\":\"https://kubernetes.default.svc\",\"namespace_id\":\"794e1e1a-b9f5-4958-a190-e76a9b53d7bf\",\"namespace_labels\":{\"kubernetes_io/metadata_name\":\"app\"}},\"message\":\"My life is my message\",\"level\":\"unknown\",\"hostname\":\"ip-10-0-216-3.us-east-2.compute.internal\",\"pipeline_metadata\":{\"collector\":{\"ipaddr4\":\"10.0.216.3\",\"inputname\":\"fluent-plugin-systemd\",\"name\":\"fluentd\",\"received_at\":\"2021-08-20T01:25:08.085760+00:00\",\"version\":\"1.7.4 1.6.0\"}},\"@timestamp\":\"2021-08-20T01:25:04.178986+00:00\",\"viaq_index_name\":\"app-write\",\"viaq_msg_id\":\"NWRjZmUyMWQtZjgzNC00MjI4LTk3MjMtNTk3NmY3ZjU4NDk1\",\"log_type\":\"application\",\"time\":\"2021-08-20T01:25:04+00:00\"}", - "ingestionTime": 1629422744016 - }, -... ----- - -.Example: Customizing the prefix in log group names - -In the log group names, you can replace the default `infrastructureName` prefix, `mycluster-7977k`, with an arbitrary string like `demo-group-prefix`. To make this change, you update the `groupPrefix` field in the `ClusterLogForwarding` CR: - -[source,yaml] ----- -cloudwatch: - groupBy: logType - groupPrefix: demo-group-prefix - region: us-east-2 ----- - -The value of `groupPrefix` replaces the default `infrastructureName` prefix: - -[source,terminal] ----- -$ aws --output json logs describe-log-groups | jq .logGroups[].logGroupName -"demo-group-prefix.application" -"demo-group-prefix.audit" -"demo-group-prefix.infrastructure" ----- - -.Example: Naming log groups after application namespace names - -For each application namespace in your cluster, you can create a log group in CloudWatch whose name is based on the name of the application namespace. - -If you delete an application namespace object and create a new one that has the same name, CloudWatch continues using the same log group as before. - -If you consider successive application namespace objects that have the same name as equivalent to each other, use the approach described in this example. Otherwise, if you need to distinguish the resulting log groups from each other, see the following "Naming log groups for application namespace UUIDs" section instead. - -To create application log groups whose names are based on the names of the application namespaces, you set the value of the `groupBy` field to `namespaceName` in the `ClusterLogForwarder` CR: - -[source,terminal] ----- -cloudwatch: - groupBy: namespaceName - region: us-east-2 ----- - -Setting `groupBy` to `namespaceName` affects the application log group only. It does not affect the `audit` and `infrastructure` log groups. - -In Amazon Cloudwatch, the namespace name appears at the end of each log group name. Because there is a single application namespace, "app", the following output shows a new `mycluster-7977k.app` log group instead of `mycluster-7977k.application`: - -[source,terminal] ----- -$ aws --output json logs describe-log-groups | jq .logGroups[].logGroupName -"mycluster-7977k.app" -"mycluster-7977k.audit" -"mycluster-7977k.infrastructure" ----- - -If the cluster in this example had contained multiple application namespaces, the output would show multiple log groups, one for each namespace. - -The `groupBy` field affects the application log group only. It does not affect the `audit` and `infrastructure` log groups. - -.Example: Naming log groups after application namespace UUIDs - -For each application namespace in your cluster, you can create a log group in CloudWatch whose name is based on the UUID of the application namespace. - -If you delete an application namespace object and create a new one, CloudWatch creates a new log group. - -If you consider successive application namespace objects with the same name as different from each other, use the approach described in this example. Otherwise, see the preceding "Example: Naming log groups for application namespace names" section instead. - -To name log groups after application namespace UUIDs, you set the value of the `groupBy` field to `namespaceUUID` in the `ClusterLogForwarder` CR: - -[source,terminal] ----- -cloudwatch: - groupBy: namespaceUUID - region: us-east-2 ----- - -In Amazon Cloudwatch, the namespace UUID appears at the end of each log group name. Because there is a single application namespace, "app", the following output shows a new `mycluster-7977k.794e1e1a-b9f5-4958-a190-e76a9b53d7bf` log group instead of `mycluster-7977k.application`: - -[source,terminal] ----- -$ aws --output json logs describe-log-groups | jq .logGroups[].logGroupName -"mycluster-7977k.794e1e1a-b9f5-4958-a190-e76a9b53d7bf" // uid of the "app" namespace -"mycluster-7977k.audit" -"mycluster-7977k.infrastructure" ----- - -The `groupBy` field affects the application log group only. It does not affect the `audit` and `infrastructure` log groups. \ No newline at end of file diff --git a/modules/cluster-logging-collector-log-forward-es.adoc b/modules/cluster-logging-collector-log-forward-es.adoc deleted file mode 100644 index 51ef169e9f85..000000000000 --- a/modules/cluster-logging-collector-log-forward-es.adoc +++ /dev/null @@ -1,139 +0,0 @@ -:_content-type: PROCEDURE -[id="cluster-logging-collector-log-forward-es_{context}"] -= Forwarding logs to an external Elasticsearch instance - -You can optionally forward logs to an external Elasticsearch instance in addition to, or instead of, the internal {product-title} Elasticsearch instance. You are responsible for configuring the external log aggregator to receive log data from {product-title}. - -To configure log forwarding to an external Elasticsearch instance, you must create a `ClusterLogForwarder` custom resource (CR) with an output to that instance, and a pipeline that uses the output. The external Elasticsearch output can use the HTTP (insecure) or HTTPS (secure HTTP) connection. - -To forward logs to both an external and the internal Elasticsearch instance, create outputs and pipelines to the external instance and a pipeline that uses the `default` output to forward logs to the internal instance. You do not need to create a `default` output. If you do configure a `default` output, you receive an error message because the `default` output is reserved for the Red Hat OpenShift Logging Operator. - -[NOTE] -==== -If you want to forward logs to *only* the internal {product-title} Elasticsearch instance, you do not need to create a `ClusterLogForwarder` CR. -==== - -.Prerequisites - -* You must have a logging server that is configured to receive the logging data using the specified protocol or format. - -.Procedure - -. Create or edit a YAML file that defines the `ClusterLogForwarder` CR object: -+ -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: ClusterLogForwarder -metadata: - name: instance <1> - namespace: openshift-logging <2> -spec: - outputs: - - name: elasticsearch-insecure <3> - type: "elasticsearch" <4> - url: http://elasticsearch.insecure.com:9200 <5> - - name: elasticsearch-secure - type: "elasticsearch" - url: https://elasticsearch.secure.com:9200 <6> - secret: - name: es-secret <7> - pipelines: - - name: application-logs <8> - inputRefs: <9> - - application - - audit - outputRefs: - - elasticsearch-secure <10> - - default <11> - parse: json <12> - labels: - myLabel: "myValue" <13> - - name: infrastructure-audit-logs <14> - inputRefs: - - infrastructure - outputRefs: - - elasticsearch-insecure - labels: - logs: "audit-infra" ----- -<1> The name of the `ClusterLogForwarder` CR must be `instance`. -<2> The namespace for the `ClusterLogForwarder` CR must be `openshift-logging`. -<3> Specify a name for the output. -<4> Specify the `elasticsearch` type. -<5> Specify the URL and port of the external Elasticsearch instance as a valid absolute URL. You can use the `http` (insecure) or `https` (secure HTTP) protocol. If the cluster-wide proxy using the CIDR annotation is enabled, the output must be a server name or FQDN, not an IP Address. -<6> For a secure connection, you can specify an `https` or `http` URL that you authenticate by specifying a `secret`. -<7> For an `https` prefix, specify the name of the secret required by the endpoint for TLS communication. The secret must exist in the `openshift-logging` project, and must have keys of: *tls.crt*, *tls.key*, and *ca-bundle.crt* that point to the respective certificates that they represent. Otherwise, for `http` and `https` prefixes, you can specify a secret that contains a username and password. For more information, see the following "Example: Setting secret that contains a username and password." -<8> Optional: Specify a name for the pipeline. -<9> Specify which log types to forward by using the pipeline: `application,` `infrastructure`, or `audit`. -<10> Specify the name of the output to use when forwarding logs with this pipeline. -<11> Optional: Specify the `default` output to send the logs to the internal Elasticsearch instance. -<12> Optional: Specify whether to forward structured JSON log entries as JSON objects in the `structured` field. The log entry must contain valid structured JSON; otherwise, OpenShift Logging removes the `structured` field and instead sends the log entry to the default index, `app-00000x`. -<13> Optional: String. One or more labels to add to the logs. -<14> Optional: Configure multiple outputs to forward logs to other external log aggregators of any supported type: -** A name to describe the pipeline. -** The `inputRefs` is the log type to forward by using the pipeline: `application,` `infrastructure`, or `audit`. -** The `outputRefs` is the name of the output to use. -** Optional: String. One or more labels to add to the logs. - -. Create the CR object: -+ -[source,terminal] ----- -$ oc create -f .yaml ----- - -.Example: Setting a secret that contains a username and password - -You can use a secret that contains a username and password to authenticate a secure connection to an external Elasticsearch instance. - -For example, if you cannot use mutual TLS (mTLS) keys because a third party operates the Elasticsearch instance, you can use HTTP or HTTPS and set a secret that contains the username and password. - -. Create a `Secret` YAML file similar to the following example. Use base64-encoded values for the `username` and `password` fields. The secret type is opaque by default. -+ -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: openshift-test-secret -data: - username: dGVzdHVzZXJuYW1lCg== - password: dGVzdHBhc3N3b3JkCg== ----- - -. Create the secret: -+ -[source,terminal] ----- -$ oc create secret -n openshift-logging openshift-test-secret.yaml ----- - -. Specify the name of the secret in the `ClusterLogForwarder` CR: -+ -[source,yaml] ----- -kind: ClusterLogForwarder -metadata: - name: instance - namespace: openshift-logging -spec: - outputs: - - name: elasticsearch - type: "elasticsearch" - url: https://elasticsearch.secure.com:9200 - secret: - name: openshift-test-secret ----- -+ -[NOTE] -==== -In the value of the `url` field, the prefix can be `http` or `https`. -==== - -. Create the CR object: -+ -[source,terminal] ----- -$ oc create -f .yaml ----- diff --git a/modules/cluster-logging-collector-log-forward-fluentd.adoc b/modules/cluster-logging-collector-log-forward-fluentd.adoc deleted file mode 100644 index cf1c1f6384b0..000000000000 --- a/modules/cluster-logging-collector-log-forward-fluentd.adoc +++ /dev/null @@ -1,98 +0,0 @@ -:_content-type: PROCEDURE -[id="cluster-logging-collector-log-forward-fluentd_{context}"] -= Forwarding logs using the Fluentd forward protocol - -You can use the Fluentd *forward* protocol to send a copy of your logs to an external log aggregator that is configured to accept the protocol instead of, or in addition to, the default Elasticsearch log store. You are responsible for configuring the external log aggregator to receive the logs from {product-title}. - -To configure log forwarding using the *forward* protocol, you must create a `ClusterLogForwarder` custom resource (CR) with one or more outputs to the Fluentd servers, and pipelines that use those outputs. The Fluentd output can use a TCP (insecure) or TLS (secure TCP) connection. - -[NOTE] -==== -Alternately, you can use a config map to forward logs using the *forward* protocols. However, this method is deprecated in {product-title} and will be removed in a future release. -==== - -.Prerequisites - -* You must have a logging server that is configured to receive the logging data using the specified protocol or format. - -.Procedure - -. Create or edit a YAML file that defines the `ClusterLogForwarder` CR object: -+ -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogForwarder -metadata: - name: instance <1> - namespace: openshift-logging <2> -spec: - outputs: - - name: fluentd-server-secure <3> - type: fluentdForward <4> - url: 'tls://fluentdserver.security.example.com:24224' <5> - secret: <6> - name: fluentd-secret - passphrase: phrase <7> - - name: fluentd-server-insecure - type: fluentdForward - url: 'tcp://fluentdserver.home.example.com:24224' - pipelines: - - name: forward-to-fluentd-secure <8> - inputRefs: <9> - - application - - audit - outputRefs: - - fluentd-server-secure <10> - - default <11> - parse: json <12> - labels: - clusterId: "C1234" <13> - - name: forward-to-fluentd-insecure <14> - inputRefs: - - infrastructure - outputRefs: - - fluentd-server-insecure - labels: - clusterId: "C1234" ----- -<1> The name of the `ClusterLogForwarder` CR must be `instance`. -<2> The namespace for the `ClusterLogForwarder` CR must be `openshift-logging`. -<3> Specify a name for the output. -<4> Specify the `fluentdForward` type. -<5> Specify the URL and port of the external Fluentd instance as a valid absolute URL. You can use the `tcp` (insecure) or `tls` (secure TCP) protocol. If the cluster-wide proxy using the CIDR annotation is enabled, the output must be a server name or FQDN, not an IP address. -<6> If using a `tls` prefix, you must specify the name of the secret required by the endpoint for TLS communication. The secret must exist in the `openshift-logging` project, and must have keys of: *tls.crt*, *tls.key*, and *ca-bundle.crt* that point to the respective certificates that they represent. -<7> Optional: Specify the password or passphrase that protects the private key file. -<8> Optional: Specify a name for the pipeline. -<9> Specify which log types to forward by using the pipeline: `application,` `infrastructure`, or `audit`. -<10> Specify the name of the output to use when forwarding logs with this pipeline. -<11> Optional: Specify the `default` output to forward logs to the internal Elasticsearch instance. -<12> Optional: Specify whether to forward structured JSON log entries as JSON objects in the `structured` field. The log entry must contain valid structured JSON; otherwise, OpenShift Logging removes the `structured` field and instead sends the log entry to the default index, `app-00000x`. -<13> Optional: String. One or more labels to add to the logs. -<14> Optional: Configure multiple outputs to forward logs to other external log aggregators of any supported type: -** A name to describe the pipeline. -** The `inputRefs` is the log type to forward by using the pipeline: `application,` `infrastructure`, or `audit`. -** The `outputRefs` is the name of the output to use. -** Optional: String. One or more labels to add to the logs. - -. Create the CR object: -+ -[source,terminal] ----- -$ oc create -f .yaml ----- - -[id="cluster-logging-collector-log-forward-nano-precision"] -== Enabling nanosecond precision for Logstash to ingest data from fluentd -For Logstash to ingest log data from fluentd, you must enable nanosecond precision in the Logstash configuration file. - -.Procedure -* In the Logstash configuration file, set `nanosecond_precision` to `true`. - -.Example Logstash configuration file -[source,terminal] -.... -input { tcp { codec => fluent { nanosecond_precision => true } port => 24114 } } -filter { } -output { stdout { codec => rubydebug } } -.... diff --git a/modules/cluster-logging-collector-log-forward-gcp.adoc b/modules/cluster-logging-collector-log-forward-gcp.adoc deleted file mode 100644 index 9dfe1b9a7689..000000000000 --- a/modules/cluster-logging-collector-log-forward-gcp.adoc +++ /dev/null @@ -1,59 +0,0 @@ -// Module included in the following assemblies: -// cluster-logging-external.adoc -// - -:_content-type: PROCEDURE -[id="cluster-logging-collector-log-forward-gcp_{context}"] -= Forwarding logs to Google Cloud Platform (GCP) - -You can forward logs to link:https://cloud.google.com/logging/docs/basic-concepts[Google Cloud Logging] in addition to, or instead of, the internal default {product-title} log store. - -[NOTE] -==== -Using this feature with Fluentd is not supported. -==== - -.Prerequisites -* {logging-title-uc} Operator 5.5.1 and later - -.Procedure - -. Create a secret using your link:https://cloud.google.com/iam/docs/creating-managing-service-account-keys[Google service account key]. -+ -[source,terminal,subs="+quotes"] ----- -$ oc -n openshift-logging create secret generic gcp-secret --from-file google-application-credentials.json=__ ----- -. Create a `ClusterLogForwarder` Custom Resource YAML using the template below: -+ -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: "ClusterLogForwarder" -metadata: - name: "instance" - namespace: "openshift-logging" -spec: - outputs: - - name: gcp-1 - type: googleCloudLogging - secret: - name: gcp-secret - googleCloudLogging: - projectId : "openshift-gce-devel" <1> - logId : "app-gcp" <2> - pipelines: - - name: test-app - inputRefs: <3> - - application - outputRefs: - - gcp-1 ----- -<1> Set either a `projectId`, `folderId`, `organizationId`, or `billingAccountId` field and its corresponding value, depending on where you want to store your logs in the link:https://cloud.google.com/resource-manager/docs/cloud-platform-resource-hierarchy[GCP resource hierarchy]. -<2> Set the value to add to the `logName` field of the link:https://cloud.google.com/logging/docs/reference/v2/rest/v2/LogEntry[Log Entry]. -<3> Specify which log types to forward by using the pipeline: `application`, `infrastructure`, or `audit`. - -[role="_additional-resources"] -.Additional resources -* link:https://cloud.google.com/billing/docs/concepts[Google Cloud Billing Documentation] -* link:https://cloud.google.com/logging/docs/view/logging-query-language[Google Cloud Logging Query Language Documentation] diff --git a/modules/cluster-logging-collector-log-forward-kafka.adoc b/modules/cluster-logging-collector-log-forward-kafka.adoc deleted file mode 100644 index a6a611cf2747..000000000000 --- a/modules/cluster-logging-collector-log-forward-kafka.adoc +++ /dev/null @@ -1,105 +0,0 @@ -[id="cluster-logging-collector-log-forward-kafka_{context}"] -= Forwarding logs to a Kafka broker - -You can forward logs to an external Kafka broker in addition to, or instead of, the default Elasticsearch log store. - -To configure log forwarding to an external Kafka instance, you must create a `ClusterLogForwarder` custom resource (CR) with an output to that instance, and a pipeline that uses the output. You can include a specific Kafka topic in the output or use the default. The Kafka output can use a TCP (insecure) or TLS (secure TCP) connection. - -.Procedure - -. Create or edit a YAML file that defines the `ClusterLogForwarder` CR object: -+ -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogForwarder -metadata: - name: instance <1> - namespace: openshift-logging <2> -spec: - outputs: - - name: app-logs <3> - type: kafka <4> - url: tls://kafka.example.devlab.com:9093/app-topic <5> - secret: - name: kafka-secret <6> - - name: infra-logs - type: kafka - url: tcp://kafka.devlab2.example.com:9093/infra-topic <7> - - name: audit-logs - type: kafka - url: tls://kafka.qelab.example.com:9093/audit-topic - secret: - name: kafka-secret-qe - pipelines: - - name: app-topic <8> - inputRefs: <9> - - application - outputRefs: <10> - - app-logs - parse: json <11> - labels: - logType: "application" <12> - - name: infra-topic <13> - inputRefs: - - infrastructure - outputRefs: - - infra-logs - labels: - logType: "infra" - - name: audit-topic - inputRefs: - - audit - outputRefs: - - audit-logs - - default <14> - labels: - logType: "audit" ----- -<1> The name of the `ClusterLogForwarder` CR must be `instance`. -<2> The namespace for the `ClusterLogForwarder` CR must be `openshift-logging`. -<3> Specify a name for the output. -<4> Specify the `kafka` type. -<5> Specify the URL and port of the Kafka broker as a valid absolute URL, optionally with a specific topic. You can use the `tcp` (insecure) or `tls` (secure TCP) protocol. If the cluster-wide proxy using the CIDR annotation is enabled, the output must be a server name or FQDN, not an IP address. -<6> If using a `tls` prefix, you must specify the name of the secret required by the endpoint for TLS communication. The secret must exist in the `openshift-logging` project, and must have keys of: *tls.crt*, *tls.key*, and *ca-bundle.crt* that point to the respective certificates that they represent. -<7> Optional: To send an insecure output, use a `tcp` prefix in front of the URL. Also omit the `secret` key and its `name` from this output. -<8> Optional: Specify a name for the pipeline. -<9> Specify which log types to forward by using the pipeline: `application,` `infrastructure`, or `audit`. -<10> Specify the name of the output to use when forwarding logs with this pipeline. -<11> Optional: Specify whether to forward structured JSON log entries as JSON objects in the `structured` field. The log entry must contain valid structured JSON; otherwise, OpenShift Logging removes the `structured` field and instead sends the log entry to the default index, `app-00000x`. -<12> Optional: String. One or more labels to add to the logs. -<13> Optional: Configure multiple outputs to forward logs to other external log aggregators of any supported type: -** A name to describe the pipeline. -** The `inputRefs` is the log type to forward by using the pipeline: `application,` `infrastructure`, or `audit`. -** The `outputRefs` is the name of the output to use. -** Optional: String. One or more labels to add to the logs. -<14> Optional: Specify `default` to forward logs to the internal Elasticsearch instance. - -. Optional: To forward a single output to multiple Kafka brokers, specify an array of Kafka brokers as shown in this example: -+ -[source,yaml] ----- -... -spec: - outputs: - - name: app-logs - type: kafka - secret: - name: kafka-secret-dev - kafka: <1> - brokers: <2> - - tls://kafka-broker1.example.com:9093/ - - tls://kafka-broker2.example.com:9093/ - topic: app-topic <3> -... ----- -<1> Specify a `kafka` key that has a `brokers` and `topic` key. -<2> Use the `brokers` key to specify an array of one or more brokers. -<3> Use the `topic` key to specify the target topic that will receive the logs. - -. Create the CR object: -+ -[source,terminal] ----- -$ oc create -f .yaml ----- diff --git a/modules/cluster-logging-collector-log-forward-logs-from-application-pods.adoc b/modules/cluster-logging-collector-log-forward-logs-from-application-pods.adoc deleted file mode 100644 index d12fbfe96cd8..000000000000 --- a/modules/cluster-logging-collector-log-forward-logs-from-application-pods.adoc +++ /dev/null @@ -1,73 +0,0 @@ -:_content-type: PROCEDURE -[id="cluster-logging-collector-log-forward-logs-from-application-pods_{context}"] -= Forwarding application logs from specific pods - -As a cluster administrator, you can use Kubernetes pod labels to gather log data from specific pods and forward it to a log collector. - -Suppose that you have an application composed of pods running alongside other pods in various namespaces. If those pods have labels that identify the application, you can gather and output their log data to a specific log collector. - -To specify the pod labels, you use one or more `matchLabels` key-value pairs. If you specify multiple key-value pairs, the pods must match all of them to be selected. - -.Procedure - -. Create or edit a YAML file that defines the `ClusterLogForwarder` CR object. In the file, specify the pod labels using simple equality-based selectors under `inputs[].name.application.selector.matchLabels`, as shown in the following example. -+ -.Example `ClusterLogForwarder` CR YAML file -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogForwarder -metadata: - name: instance <1> - namespace: openshift-logging <2> -spec: - pipelines: - - inputRefs: [ myAppLogData ] <3> - outputRefs: [ default ] <4> - parse: json <5> - inputs: <6> - - name: myAppLogData - application: - selector: - matchLabels: <7> - environment: production - app: nginx - namespaces: <8> - - app1 - - app2 - outputs: <9> - - default - ... ----- -<1> The name of the `ClusterLogForwarder` CR must be `instance`. -<2> The namespace for the `ClusterLogForwarder` CR must be `openshift-logging`. -<3> Specify one or more comma-separated values from `inputs[].name`. -<4> Specify one or more comma-separated values from `outputs[]`. -<5> Optional: Specify whether to forward structured JSON log entries as JSON objects in the `structured` field. The log entry must contain valid structured JSON; otherwise, OpenShift Logging removes the `structured` field and instead sends the log entry to the default index, `app-00000x`. -<6> Define a unique `inputs[].name` for each application that has a unique set of pod labels. -<7> Specify the key-value pairs of pod labels whose log data you want to gather. You must specify both a key and value, not just a key. To be selected, the pods must match all the key-value pairs. -<8> Optional: Specify one or more namespaces. -<9> Specify one or more outputs to forward your log data to. The optional `default` output shown here sends log data to the internal Elasticsearch instance. - -. Optional: To restrict the gathering of log data to specific namespaces, use `inputs[].name.application.namespaces`, as shown in the preceding example. - -. Optional: You can send log data from additional applications that have different pod labels to the same pipeline. -.. For each unique combination of pod labels, create an additional `inputs[].name` section similar to the one shown. -.. Update the `selectors` to match the pod labels of this application. -.. Add the new `inputs[].name` value to `inputRefs`. For example: -+ ----- -- inputRefs: [ myAppLogData, myOtherAppLogData ] ----- - -. Create the CR object: -+ -[source,terminal] ----- -$ oc create -f .yaml ----- - -[role="_additional-resources"] -.Additional resources - -* For more information on `matchLabels` in Kubernetes, see link:https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#resources-that-support-set-based-requirements[Resources that support set-based requirements]. diff --git a/modules/cluster-logging-collector-log-forward-loki.adoc b/modules/cluster-logging-collector-log-forward-loki.adoc deleted file mode 100644 index 82845208ad83..000000000000 --- a/modules/cluster-logging-collector-log-forward-loki.adoc +++ /dev/null @@ -1,72 +0,0 @@ -:_content-type: PROCEDURE -[id="cluster-logging-collector-log-forward-loki_{context}"] -= Forwarding logs to Loki - -You can forward logs to an external Loki logging system in addition to, or instead of, the internal default {product-title} Elasticsearch instance. - -To configure log forwarding to Loki, you must create a `ClusterLogForwarder` custom resource (CR) with an output to Loki, and a pipeline that uses the output. The output to Loki can use the HTTP (insecure) or HTTPS (secure HTTP) connection. - -.Prerequisites - -* You must have a Loki logging system running at the URL you specify with the `url` field in the CR. - -.Procedure - -. Create or edit a YAML file that defines the `ClusterLogForwarder` CR object: -+ -[source,yaml] ----- - apiVersion: "logging.openshift.io/v1" - kind: ClusterLogForwarder - metadata: - name: instance <1> - namespace: openshift-logging <2> - spec: - outputs: - - name: loki-insecure <3> - type: "loki" <4> - url: http://loki.insecure.com:3100 <5> - loki: - tenantKey: kubernetes.namespace_name - labelKeys: kubernetes.labels.foo - - name: loki-secure <6> - type: "loki" - url: https://loki.secure.com:3100 - secret: - name: loki-secret <7> - loki: - tenantKey: kubernetes.namespace_name <8> - labelKeys: kubernetes.labels.foo <9> - pipelines: - - name: application-logs <10> - inputRefs: <11> - - application - - audit - outputRefs: <12> - - loki-secure ----- -<1> The name of the `ClusterLogForwarder` CR must be `instance`. -<2> The namespace for the `ClusterLogForwarder` CR must be `openshift-logging`. -<3> Specify a name for the output. -<4> Specify the type as `"loki"`. -<5> Specify the URL and port of the Loki system as a valid absolute URL. You can use the `http` (insecure) or `https` (secure HTTP) protocol. If the cluster-wide proxy using the CIDR annotation is enabled, the output must be a server name or FQDN, not an IP Address. Loki's default port for HTTP(S) communication is 3100. -<6> For a secure connection, you can specify an `https` or `http` URL that you authenticate by specifying a `secret`. -<7> For an `https` prefix, specify the name of the secret required by the endpoint for TLS communication. The secret must exist in the `openshift-logging` project, and must have keys of: *tls.crt*, *tls.key*, and *ca-bundle.crt* that point to the respective certificates that they represent. Otherwise, for `http` and `https` prefixes, you can specify a secret that contains a username and password. For more information, see the following "Example: Setting secret that contains a username and password." -<8> Optional: Specify a meta-data key field to generate values for the `TenantID` field in Loki. For example, setting `tenantKey: kubernetes.namespace_name` uses the names of the Kubernetes namespaces as values for tenant IDs in Loki. To see which other log record fields you can specify, see the "Log Record Fields" link in the following "Additional resources" section. -<9> Optional: Specify a list of meta-data field keys to replace the default Loki labels. Loki label names must match the regular expression `[a-zA-Z_:][a-zA-Z0-9_:]*`. Illegal characters in meta-data keys are replaced with `_` to form the label name. For example, the `kubernetes.labels.foo` meta-data key becomes Loki label `kubernetes_labels_foo`. If you do not set `labelKeys`, the default value is: `[log_type, kubernetes.namespace_name, kubernetes.pod_name, kubernetes_host]`. Keep the set of labels small because Loki limits the size and number of labels allowed. See link:https://grafana.com/docs/loki/latest/configuration/#limits_config[Configuring Loki, limits_config]. You can still query based on any log record field using query filters. -<10> Optional: Specify a name for the pipeline. -<11> Specify which log types to forward by using the pipeline: `application,` `infrastructure`, or `audit`. -<12> Specify the name of the output to use when forwarding logs with this pipeline. -+ -[NOTE] -==== -Because Loki requires log streams to be correctly ordered by timestamp, `labelKeys` always includes the `kubernetes_host` label set, even if you do not specify it. This inclusion ensures that each stream originates from a single host, which prevents timestamps from becoming disordered due to clock differences on different hosts. -==== - - -. Create the CR object: -+ -[source,terminal] ----- -$ oc create -f .yaml ----- diff --git a/modules/cluster-logging-collector-log-forward-project.adoc b/modules/cluster-logging-collector-log-forward-project.adoc deleted file mode 100644 index 19f8880edb5b..000000000000 --- a/modules/cluster-logging-collector-log-forward-project.adoc +++ /dev/null @@ -1,83 +0,0 @@ -:_content-type: PROCEDURE -[id="cluster-logging-collector-log-forward-project_{context}"] -= Forwarding application logs from specific projects - -You can use the Cluster Log Forwarder to send a copy of the application logs from specific projects to an external log aggregator. You can do this in addition to, or instead of, using the default Elasticsearch log store. You must also configure the external log aggregator to receive log data from {product-title}. - -To configure forwarding application logs from a project, you must create a `ClusterLogForwarder` custom resource (CR) with at least one input from a project, optional outputs for other log aggregators, and pipelines that use those inputs and outputs. - -.Prerequisites - -* You must have a logging server that is configured to receive the logging data using the specified protocol or format. - -.Procedure - -. Create or edit a YAML file that defines the `ClusterLogForwarder` CR object: -+ -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogForwarder -metadata: - name: instance <1> - namespace: openshift-logging <2> -spec: - outputs: - - name: fluentd-server-secure <3> - type: fluentdForward <4> - url: 'tls://fluentdserver.security.example.com:24224' <5> - secret: <6> - name: fluentd-secret - - name: fluentd-server-insecure - type: fluentdForward - url: 'tcp://fluentdserver.home.example.com:24224' - inputs: <7> - - name: my-app-logs - application: - namespaces: - - my-project - pipelines: - - name: forward-to-fluentd-insecure <8> - inputRefs: <9> - - my-app-logs - outputRefs: <10> - - fluentd-server-insecure - parse: json <11> - labels: - project: "my-project" <12> - - name: forward-to-fluentd-secure <13> - inputRefs: - - application - - audit - - infrastructure - outputRefs: - - fluentd-server-secure - - default - labels: - clusterId: "C1234" ----- -<1> The name of the `ClusterLogForwarder` CR must be `instance`. -<2> The namespace for the `ClusterLogForwarder` CR must be `openshift-logging`. -<3> Specify a name for the output. -<4> Specify the output type: `elasticsearch`, `fluentdForward`, `syslog`, or `kafka`. -<5> Specify the URL and port of the external log aggregator as a valid absolute URL. If the cluster-wide proxy using the CIDR annotation is enabled, the output must be a server name or FQDN, not an IP address. -<6> If using a `tls` prefix, you must specify the name of the secret required by the endpoint for TLS communication. The secret must exist in the `openshift-logging` project and have *tls.crt*, *tls.key*, and *ca-bundle.crt* keys that each point to the certificates they represent. -<7> Configuration for an input to filter application logs from the specified projects. -<8> Configuration for a pipeline to use the input to send project application logs to an external Fluentd instance. -<9> The `my-app-logs` input. -<10> The name of the output to use. -<11> Optional: Specify whether to forward structured JSON log entries as JSON objects in the `structured` field. The log entry must contain valid structured JSON; otherwise, OpenShift Logging removes the `structured` field and instead sends the log entry to the default index, `app-00000x`. -<12> Optional: String. One or more labels to add to the logs. -<13> Configuration for a pipeline to send logs to other log aggregators. -** Optional: Specify a name for the pipeline. -** Specify which log types to forward by using the pipeline: `application,` `infrastructure`, or `audit`. -** Specify the name of the output to use when forwarding logs with this pipeline. -** Optional: Specify the `default` output to forward logs to the internal Elasticsearch instance. -** Optional: String. One or more labels to add to the logs. - -. Create the CR object: -+ -[source,terminal] ----- -$ oc create -f .yaml ----- diff --git a/modules/cluster-logging-collector-log-forward-secret-cloudwatch.adoc b/modules/cluster-logging-collector-log-forward-secret-cloudwatch.adoc deleted file mode 100644 index fc8b131c687a..000000000000 --- a/modules/cluster-logging-collector-log-forward-secret-cloudwatch.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-external.adoc -// - -:_content-type: PROCEDURE -[id="cluster-logging-collector-log-forward-secret-cloudwatch_{context}"] -== Creating a secret for AWS CloudWatch with an existing AWS role -If you have an existing role for AWS, you can create a secret for AWS with STS using the `oc create secret --from-literal` command. - -.Procedure - -* In the CLI, enter the following to generate a secret for AWS: -+ -[source,terminal] ----- -$ oc create secret generic cw-sts-secret -n openshift-logging --from-literal=role_arn=arn:aws:iam::123456789012:role/my-role_with-permissions ----- -+ -.Example Secret -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - namespace: openshift-logging - name: my-secret-name -stringData: - role_arn: arn:aws:iam::123456789012:role/my-role_with-permissions ----- diff --git a/modules/cluster-logging-collector-log-forward-syslog.adoc b/modules/cluster-logging-collector-log-forward-syslog.adoc deleted file mode 100644 index d5a72e9c2ef6..000000000000 --- a/modules/cluster-logging-collector-log-forward-syslog.adoc +++ /dev/null @@ -1,194 +0,0 @@ -:_content-type: PROCEDURE -[id="cluster-logging-collector-log-forward-syslog_{context}"] -= Forwarding logs using the syslog protocol - -You can use the *syslog* link:https://tools.ietf.org/html/rfc3164[RFC3164] or link:https://tools.ietf.org/html/rfc5424[RFC5424] protocol to send a copy of your logs to an external log aggregator that is configured to accept the protocol instead of, or in addition to, the default Elasticsearch log store. You are responsible for configuring the external log aggregator, such as a syslog server, to receive the logs from {product-title}. - -To configure log forwarding using the *syslog* protocol, you must create a `ClusterLogForwarder` custom resource (CR) with one or more outputs to the syslog servers, and pipelines that use those outputs. The syslog output can use a UDP, TCP, or TLS connection. - -//SME-Feedback-Req: Is the below note accurate? -[NOTE] -==== -Alternately, you can use a config map to forward logs using the *syslog* RFC3164 protocols. However, this method is deprecated in {product-title} and will be removed in a future release. -==== - -.Prerequisites - -* You must have a logging server that is configured to receive the logging data using the specified protocol or format. - -.Procedure - -. Create or edit a YAML file that defines the `ClusterLogForwarder` CR object: -+ -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogForwarder -metadata: - name: instance <1> - namespace: openshift-logging <2> -spec: - outputs: - - name: rsyslog-east <3> - type: syslog <4> - syslog: <5> - facility: local0 - rfc: RFC3164 - payloadKey: message - severity: informational - url: 'tls://rsyslogserver.east.example.com:514' <6> - secret: <7> - name: syslog-secret - - name: rsyslog-west - type: syslog - syslog: - appName: myapp - facility: user - msgID: mymsg - procID: myproc - rfc: RFC5424 - severity: debug - url: 'udp://rsyslogserver.west.example.com:514' - pipelines: - - name: syslog-east <8> - inputRefs: <9> - - audit - - application - outputRefs: <10> - - rsyslog-east - - default <11> - parse: json <12> - labels: - secure: "true" <13> - syslog: "east" - - name: syslog-west <14> - inputRefs: - - infrastructure - outputRefs: - - rsyslog-west - - default - labels: - syslog: "west" ----- -<1> The name of the `ClusterLogForwarder` CR must be `instance`. -<2> The namespace for the `ClusterLogForwarder` CR must be `openshift-logging`. -<3> Specify a name for the output. -<4> Specify the `syslog` type. -<5> Optional: Specify the syslog parameters, listed below. -<6> Specify the URL and port of the external syslog instance. You can use the `udp` (insecure), `tcp` (insecure) or `tls` (secure TCP) protocol. If the cluster-wide proxy using the CIDR annotation is enabled, the output must be a server name or FQDN, not an IP address. -<7> If using a `tls` prefix, you must specify the name of the secret required by the endpoint for TLS communication. The secret must exist in the `openshift-logging` project, and must have keys of: *tls.crt*, *tls.key*, and *ca-bundle.crt* that point to the respective certificates that they represent. -<8> Optional: Specify a name for the pipeline. -<9> Specify which log types to forward by using the pipeline: `application,` `infrastructure`, or `audit`. -<10> Specify the name of the output to use when forwarding logs with this pipeline. -<11> Optional: Specify the `default` output to forward logs to the internal Elasticsearch instance. -<12> Optional: Specify whether to forward structured JSON log entries as JSON objects in the `structured` field. The log entry must contain valid structured JSON; otherwise, OpenShift Logging removes the `structured` field and instead sends the log entry to the default index, `app-00000x`. -<13> Optional: String. One or more labels to add to the logs. Quote values like "true" so they are recognized as string values, not as a boolean. -<14> Optional: Configure multiple outputs to forward logs to other external log aggregators of any supported type: -** A name to describe the pipeline. -** The `inputRefs` is the log type to forward by using the pipeline: `application,` `infrastructure`, or `audit`. -** The `outputRefs` is the name of the output to use. -** Optional: String. One or more labels to add to the logs. - -. Create the CR object: -+ -[source,terminal] ----- -$ oc create -f .yaml ----- - -[id=cluster-logging-collector-log-forward-examples-syslog-log-source] -== Adding log source information to message output - -You can add `namespace_name`, `pod_name`, and `container_name` elements to the `message` field of the record by adding the `AddLogSource` field to your `ClusterLogForwarder` custom resource (CR). - -[source,yaml] ----- - spec: - outputs: - - name: syslogout - syslog: - addLogSource: true - facility: user - payloadKey: message - rfc: RFC3164 - severity: debug - tag: mytag - type: syslog - url: tls://syslog-receiver.openshift-logging.svc:24224 - pipelines: - - inputRefs: - - application - name: test-app - outputRefs: - - syslogout ----- - -[NOTE] -==== -This configuration is compatible with both RFC3164 and RFC5424. -==== - -.Example syslog message output without `AddLogSource` -[source, text] ----- -<15>1 2020-11-15T17:06:14+00:00 fluentd-9hkb4 mytag - - - {"msgcontent"=>"Message Contents", "timestamp"=>"2020-11-15 17:06:09", "tag_key"=>"rec_tag", "index"=>56} ----- - -.Example syslog message output with `AddLogSource` - -[source, text] ----- -<15>1 2020-11-16T10:49:37+00:00 crc-j55b9-master-0 mytag - - - namespace_name=clo-test-6327,pod_name=log-generator-ff9746c49-qxm7l,container_name=log-generator,message={"msgcontent":"My life is my message", "timestamp":"2020-11-16 10:49:36", "tag_key":"rec_tag", "index":76} ----- - -[id=cluster-logging-collector-log-forward-examples-syslog-parms] -== Syslog parameters - -You can configure the following for the `syslog` outputs. For more information, see the syslog link:https://tools.ietf.org/html/rfc3164[RFC3164] or link:https://tools.ietf.org/html/rfc5424[RFC5424] RFC. - -* facility: The link:https://tools.ietf.org/html/rfc5424#section-6.2.1[syslog facility]. The value can be a decimal integer or a case-insensitive keyword: -** `0` or `kern` for kernel messages -** `1` or `user` for user-level messages, the default. -** `2` or `mail` for the mail system -** `3` or `daemon` for system daemons -** `4` or `auth` for security/authentication messages -** `5` or `syslog` for messages generated internally by syslogd -** `6` or `lpr` for the line printer subsystem -** `7` or `news` for the network news subsystem -** `8` or `uucp` for the UUCP subsystem -** `9` or `cron` for the clock daemon -** `10` or `authpriv` for security authentication messages -** `11` or `ftp` for the FTP daemon -** `12` or `ntp` for the NTP subsystem -** `13` or `security` for the syslog audit log -** `14` or `console` for the syslog alert log -** `15` or `solaris-cron` for the scheduling daemon -** `16`–`23` or `local0` – `local7` for locally used facilities -* Optional: `payloadKey`: The record field to use as payload for the syslog message. -+ -[NOTE] -==== -Configuring the `payloadKey` parameter prevents other parameters from being forwarded to the syslog. -==== -+ -* rfc: The RFC to be used for sending logs using syslog. The default is RFC5424. -* severity: The link:https://tools.ietf.org/html/rfc5424#section-6.2.1[syslog severity] to set on outgoing syslog records. The value can be a decimal integer or a case-insensitive keyword: -** `0` or `Emergency` for messages indicating the system is unusable -** `1` or `Alert` for messages indicating action must be taken immediately -** `2` or `Critical` for messages indicating critical conditions -** `3` or `Error` for messages indicating error conditions -** `4` or `Warning` for messages indicating warning conditions -** `5` or `Notice` for messages indicating normal but significant conditions -** `6` or `Informational` for messages indicating informational messages -** `7` or `Debug` for messages indicating debug-level messages, the default -* tag: Tag specifies a record field to use as a tag on the syslog message. -* trimPrefix: Remove the specified prefix from the tag. - -[id=cluster-logging-collector-log-forward-examples-syslog-5424] -== Additional RFC5424 syslog parameters - -The following parameters apply to RFC5424: - -* appName: The APP-NAME is a free-text string that identifies the application that sent the log. Must be specified for `RFC5424`. -* msgID: The MSGID is a free-text string that identifies the type of message. Must be specified for `RFC5424`. -* procID: The PROCID is a free-text string. A change in the value indicates a discontinuity in syslog reporting. Must be specified for `RFC5424`. diff --git a/modules/cluster-logging-collector-log-forwarding-about.adoc b/modules/cluster-logging-collector-log-forwarding-about.adoc deleted file mode 100644 index 41d98d4df978..000000000000 --- a/modules/cluster-logging-collector-log-forwarding-about.adoc +++ /dev/null @@ -1,197 +0,0 @@ -:_content-type: CONCEPT -[id="cluster-logging-collector-log-forwarding-about_{context}"] -= About forwarding logs to third-party systems - -To send logs to specific endpoints inside and outside your {product-title} cluster, you specify a combination of _outputs_ and _pipelines_ in a `ClusterLogForwarder` custom resource (CR). You can also use _inputs_ to forward the application logs associated with a specific project to an endpoint. Authentication is provided by a Kubernetes _Secret_ object. - -_output_:: The destination for log data that you define, or where you want the logs sent. An output can be one of the following types: -+ --- -* `elasticsearch`. An external Elasticsearch instance. The `elasticsearch` output can use a TLS connection. - -* `fluentdForward`. An external log aggregation solution that supports Fluentd. This option uses the Fluentd *forward* protocols. The `fluentForward` output can use a TCP or TLS connection and supports shared-key authentication by providing a *shared_key* field in a secret. Shared-key authentication can be used with or without TLS. - -* `syslog`. An external log aggregation solution that supports the syslog link:https://tools.ietf.org/html/rfc3164[RFC3164] or link:https://tools.ietf.org/html/rfc5424[RFC5424] protocols. The `syslog` output can use a UDP, TCP, or TLS connection. - -* `cloudwatch`. Amazon CloudWatch, a monitoring and log storage service hosted by Amazon Web Services (AWS). - -* `loki`. Loki, a horizontally scalable, highly available, multi-tenant log aggregation system. - -* `kafka`. A Kafka broker. The `kafka` output can use a TCP or TLS connection. - -* `default`. The internal {product-title} Elasticsearch instance. You are not required to configure the default output. If you do configure a `default` output, you receive an error message because the `default` output is reserved for the Red Hat OpenShift Logging Operator. --- -+ -_pipeline_:: Defines simple routing from one log type to one or more outputs, or which logs you want to send. The log types are one of the following: -+ --- -* `application`. Container logs generated by user applications running in the cluster, except infrastructure container applications. - -* `infrastructure`. Container logs from pods that run in the `openshift*`, `kube*`, or `default` projects and journal logs sourced from node file system. - -* `audit`. Audit logs generated by the node audit system, `auditd`, Kubernetes API server, OpenShift API server, and OVN network. --- -+ -You can add labels to outbound log messages by using `key:value` pairs in the pipeline. For example, you might add a label to messages that are forwarded to other data centers or label the logs by type. Labels that are added to objects are also forwarded with the log message. - -_input_:: Forwards the application logs associated with a specific project to a pipeline. -+ --- -In the pipeline, you define which log types to forward using an `inputRef` parameter and where to forward the logs to using an `outputRef` parameter. --- -+ - -_Secret_:: A `key:value map` that contains confidential data such as user credentials. - -Note the following: - -* If a `ClusterLogForwarder` CR object exists, logs are not forwarded to the default Elasticsearch instance, unless there is a pipeline with the `default` output. - -* By default, the {logging} sends container and infrastructure logs to the default internal Elasticsearch log store defined in the `ClusterLogging` custom resource. However, it does not send audit logs to the internal store because it does not provide secure storage. If this default configuration meets your needs, do not configure the Log Forwarding API. - -* If you do not define a pipeline for a log type, the logs of the undefined types are dropped. For example, if you specify a pipeline for the `application` and `audit` types, but do not specify a pipeline for the `infrastructure` type, `infrastructure` logs are dropped. - -* You can use multiple types of outputs in the `ClusterLogForwarder` custom resource (CR) to send logs to servers that support different protocols. - -* The internal {product-title} Elasticsearch instance does not provide secure storage for audit logs. We recommend you ensure that the system to which you forward audit logs is compliant with your organizational and governmental regulations and is properly secured. The {logging} does not comply with those regulations. - -The following example forwards the audit logs to a secure external Elasticsearch instance, the infrastructure logs to an insecure external Elasticsearch instance, the application logs to a Kafka broker, and the application logs from the `my-apps-logs` project to the internal Elasticsearch instance. - -.Sample log forwarding outputs and pipelines -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: ClusterLogForwarder -metadata: - name: instance <1> - namespace: openshift-logging <2> -spec: - outputs: - - name: elasticsearch-secure <3> - type: "elasticsearch" - url: https://elasticsearch.secure.com:9200 - secret: - name: elasticsearch - - name: elasticsearch-insecure <4> - type: "elasticsearch" - url: http://elasticsearch.insecure.com:9200 - - name: kafka-app <5> - type: "kafka" - url: tls://kafka.secure.com:9093/app-topic - inputs: <6> - - name: my-app-logs - application: - namespaces: - - my-project - pipelines: - - name: audit-logs <7> - inputRefs: - - audit - outputRefs: - - elasticsearch-secure - - default - parse: json <8> - labels: - secure: "true" <9> - datacenter: "east" - - name: infrastructure-logs <10> - inputRefs: - - infrastructure - outputRefs: - - elasticsearch-insecure - labels: - datacenter: "west" - - name: my-app <11> - inputRefs: - - my-app-logs - outputRefs: - - default - - inputRefs: <12> - - application - outputRefs: - - kafka-app - labels: - datacenter: "south" ----- -<1> The name of the `ClusterLogForwarder` CR must be `instance`. -<2> The namespace for the `ClusterLogForwarder` CR must be `openshift-logging`. -<3> Configuration for an secure Elasticsearch output using a secret with a secure URL. -** A name to describe the output. -** The type of output: `elasticsearch`. -** The secure URL and port of the Elasticsearch instance as a valid absolute URL, including the prefix. -** The secret required by the endpoint for TLS communication. The secret must exist in the `openshift-logging` project. -<4> Configuration for an insecure Elasticsearch output: -** A name to describe the output. -** The type of output: `elasticsearch`. -** The insecure URL and port of the Elasticsearch instance as a valid absolute URL, including the prefix. -<5> Configuration for a Kafka output using a client-authenticated TLS communication over a secure URL -** A name to describe the output. -** The type of output: `kafka`. -** Specify the URL and port of the Kafka broker as a valid absolute URL, including the prefix. -<6> Configuration for an input to filter application logs from the `my-project` namespace. -<7> Configuration for a pipeline to send audit logs to the secure external Elasticsearch instance: -** A name to describe the pipeline. -** The `inputRefs` is the log type, in this example `audit`. -** The `outputRefs` is the name of the output to use, in this example `elasticsearch-secure` to forward to the secure Elasticsearch instance and `default` to forward to the internal Elasticsearch instance. -** Optional: Labels to add to the logs. -<8> Optional: Specify whether to forward structured JSON log entries as JSON objects in the `structured` field. The log entry must contain valid structured JSON; otherwise, OpenShift Logging removes the `structured` field and instead sends the log entry to the default index, `app-00000x`. -<9> Optional: String. One or more labels to add to the logs. Quote values like "true" so they are recognized as string values, not as a boolean. -<10> Configuration for a pipeline to send infrastructure logs to the insecure external Elasticsearch instance. -<11> Configuration for a pipeline to send logs from the `my-project` project to the internal Elasticsearch instance. -** A name to describe the pipeline. -** The `inputRefs` is a specific input: `my-app-logs`. -** The `outputRefs` is `default`. -** Optional: String. One or more labels to add to the logs. -<12> Configuration for a pipeline to send logs to the Kafka broker, with no pipeline name: -** The `inputRefs` is the log type, in this example `application`. -** The `outputRefs` is the name of the output to use. -** Optional: String. One or more labels to add to the logs. - -[discrete] -[id="cluster-logging-external-fluentd_{context}"] -== Fluentd log handling when the external log aggregator is unavailable - -If your external logging aggregator becomes unavailable and cannot receive logs, Fluentd continues to collect logs and stores them in a buffer. When the log aggregator becomes available, log forwarding resumes, including the buffered logs. If the buffer fills completely, Fluentd stops collecting logs. {product-title} rotates the logs and deletes them. You cannot adjust the buffer size or add a persistent volume claim (PVC) to the Fluentd daemon set or pods. - -[discrete] -== Supported Authorization Keys -Common key types are provided here. Some output types support additional specialized keys, documented with the output-specific configuration field. All secret keys are optional. Enable the security features you want by setting the relevant keys. You are responsible for creating and maintaining any additional configurations that external destinations might require, such as keys and secrets, service accounts, port openings, or global proxy configuration. Open Shift Logging will not attempt to verify a mismatch between authorization combinations. - -Transport Layer Security (TLS):: Using a TLS URL ('http://...' or 'ssl://...') without a Secret enables basic TLS server-side authentication. Additional TLS features are enabled by including a Secret and setting the following optional fields: - -* `tls.crt`: (string) File name containing a client certificate. Enables mutual authentication. Requires `tls.key`. - -* `tls.key`: (string) File name containing the private key to unlock the client certificate. Requires `tls.crt`. - -* `passphrase`: (string) Passphrase to decode an encoded TLS private key. Requires `tls.key`. - -* `ca-bundle.crt`: (string) File name of a customer CA for server authentication. - -Username and Password:: -* `username`: (string) Authentication user name. Requires `password`. -* `password`: (string) Authentication password. Requires `username`. - -Simple Authentication Security Layer (SASL):: -* `sasl.enable` (boolean) Explicitly enable or disable SASL. -If missing, SASL is automatically enabled when any of the other `sasl.` keys are set. -* `sasl.mechanisms`: (array) List of allowed SASL mechanism names. -If missing or empty, the system defaults are used. -* `sasl.allow-insecure`: (boolean) Allow mechanisms that send clear-text passwords. Defaults to false. - -== Creating a Secret - -You can create a secret in the directory that contains your certificate and key files by using the following command: -[subs="+quotes"] ----- -$ oc create secret generic -n openshift-logging \ - --from-file=tls.key= - --from-file=tls.crt= - --from-file=ca-bundle.crt= - --from-literal=username= - --from-literal=password= ----- - -[NOTE] -==== -Generic or opaque secrets are recommended for best results. -==== diff --git a/modules/cluster-logging-collector-log-forwarding-supported-plugins-5-1.adoc b/modules/cluster-logging-collector-log-forwarding-supported-plugins-5-1.adoc deleted file mode 100644 index 39c1385383fa..000000000000 --- a/modules/cluster-logging-collector-log-forwarding-supported-plugins-5-1.adoc +++ /dev/null @@ -1,53 +0,0 @@ -// Module included in the following assemblies: -// -// logging/cluster-logging-external.adoc - -[id="cluster-logging-collector-log-forwarding-supported-plugins-5-1_{context}"] - -= Supported log data output types in OpenShift Logging 5.1 - -Red Hat OpenShift Logging 5.1 provides the following output types and protocols for sending log data to target log collectors. - -Red Hat tests each of the combinations shown in the following table. However, you should be able to send log data to a wider range target log collectors that ingest these protocols. - -[options="header"] -|==== -| Output types | Protocols | Tested with - -| elasticsearch -| elasticsearch -a| Elasticsearch 6.8.1 - -Elasticsearch 6.8.4 - -Elasticsearch 7.12.2 - -| fluentdForward -| fluentd forward v1 -a| fluentd 1.7.4 - -logstash 7.10.1 - -| kafka -| kafka 0.11 -a| kafka 2.4.1 - -kafka 2.7.0 - -| syslog -| RFC-3164, RFC-5424 -| rsyslog-8.39.0 - -|==== - -// Note to tech writer, validate these items against the corresponding line of the test configuration file that Red Hat OpenShift Logging 5.0 uses: https://github.com/openshift/origin-aggregated-logging/blob/release-5.0/fluentd/Gemfile.lock -// This file is the authoritative source of information about which items and versions Red Hat tests and supports. -// According to this link:https://github.com/zendesk/ruby-kafka#compatibility[Zendesk compatibility list for ruby-kafka], the fluent-plugin-kafka plugin supports Kafka version 0.11. -// Logstash support is according to https://github.com/openshift/cluster-logging-operator/blob/master/test/functional/outputs/forward_to_logstash_test.go#L37 - -[NOTE] -==== -Previously, the syslog output supported only RFC-3164. The current syslog output adds support for RFC-5424. -==== - -//ENG-Feedback: How can we reformat this to accurately reflect 5.4? diff --git a/modules/cluster-logging-collector-log-forwarding-supported-plugins-5-2.adoc b/modules/cluster-logging-collector-log-forwarding-supported-plugins-5-2.adoc deleted file mode 100644 index aec5cc9350b6..000000000000 --- a/modules/cluster-logging-collector-log-forwarding-supported-plugins-5-2.adoc +++ /dev/null @@ -1,55 +0,0 @@ -// Module included in the following assemblies: -// -// logging/cluster-logging-external.adoc - -[id="cluster-logging-collector-log-forwarding-supported-plugins-5-2_{context}"] - -= Supported log data output types in OpenShift Logging 5.2 - -Red Hat OpenShift Logging 5.2 provides the following output types and protocols for sending log data to target log collectors. - -Red Hat tests each of the combinations shown in the following table. However, you should be able to send log data to a wider range target log collectors that ingest these protocols. - -[options="header"] -|==== -| Output types | Protocols | Tested with - -| Amazon CloudWatch -| REST over HTTPS -| The current version of Amazon CloudWatch - - -| elasticsearch -| elasticsearch -a| Elasticsearch 6.8.1 - -Elasticsearch 6.8.4 - -Elasticsearch 7.12.2 - -| fluentdForward -| fluentd forward v1 -a| fluentd 1.7.4 - -logstash 7.10.1 - -| Loki -| REST over HTTP and HTTPS -| Loki 2.3.0 deployed on OCP and Grafana labs - -| kafka -| kafka 0.11 -a| kafka 2.4.1 - -kafka 2.7.0 - -| syslog -| RFC-3164, RFC-5424 -| rsyslog-8.39.0 - -|==== - -// Note to tech writer, validate these items against the corresponding line of the test configuration file that Red Hat OpenShift Logging 5.0 uses: https://github.com/openshift/origin-aggregated-logging/blob/release-5.0/fluentd/Gemfile.lock -// This file is the authoritative source of information about which items and versions Red Hat tests and supports. -// According to this link:https://github.com/zendesk/ruby-kafka#compatibility[Zendesk compatibility list for ruby-kafka], the fluent-plugin-kafka plugin supports Kafka version 0.11. -// Logstash support is according to https://github.com/openshift/cluster-logging-operator/blob/master/test/functional/outputs/forward_to_logstash_test.go#L37 diff --git a/modules/cluster-logging-collector-log-forwarding-supported-plugins-5-3.adoc b/modules/cluster-logging-collector-log-forwarding-supported-plugins-5-3.adoc deleted file mode 100644 index 72ec71c764b7..000000000000 --- a/modules/cluster-logging-collector-log-forwarding-supported-plugins-5-3.adoc +++ /dev/null @@ -1,57 +0,0 @@ -// Module included in the following assemblies: -// -// logging/cluster-logging-external.adoc - -[id="cluster-logging-collector-log-forwarding-supported-plugins-5-3_{context}"] - -= Supported log data output types in OpenShift Logging 5.3 - -Red Hat OpenShift Logging 5.3 provides the following output types and protocols for sending log data to target log collectors. - -Red Hat tests each of the combinations shown in the following table. However, you should be able to send log data to a wider range target log collectors that ingest these protocols. - -[options="header"] -|==== -| Output types | Protocols | Tested with - -| Amazon CloudWatch -| REST over HTTPS -| The current version of Amazon CloudWatch - - -| elasticsearch -| elasticsearch -a| Elasticsearch 7.10.1 - -| fluentdForward -| fluentd forward v1 -a| fluentd 1.7.4 - -logstash 7.10.1 - -| Loki -| REST over HTTP and HTTPS -| Loki 2.2.1 deployed on OCP - -| kafka -| kafka 0.11 -a| kafka 2.7.0 - -| syslog -| RFC-3164, RFC-5424 -| rsyslog-8.39.0 - -|==== - -// Note: validate these items against the corresponding line of the test configuration files that Red Hat OpenShift Logging uses: -// -// cloudwatch https://github.com/openshift/cluster-logging-operator/blob/release-5.3/test/functional/outputs/forward_to_cloudwatch_test.go#L18 -// elasticsearch https://github.com/openshift/cluster-logging-operator/blob/release-5.3/test/functional/outputs/forward_to_elasticsearch_index_test.go#L17 -// es fluentd https://github.com/ViaQ/logging-fluentd/blob/release-5.5/fluentd/Gemfile.lock#L55 -// fluentd https://github.com/openshift/cluster-logging-operator/blob/release-5.3/Makefile#L23 -// kafka https://github.com/openshift/cluster-logging-operator/blob/release-5.3/test/helpers/kafka/constants.go#L17 -// kafka fluentd https://github.com/zendesk/ruby-kafka/tree/v1.4.0#compatibility -// logstash https://github.com/openshift/cluster-logging-operator/blob/release-5.3/test/functional/outputs/forward_to_logstash_test.go#L30 -// loki https://github.com/openshift/cluster-logging-operator/blob/release-5.3/test/helpers/loki/receiver.go#L25 -// syslog protocols https://github.com/openshift/cluster-logging-operator/tree/release-5.3/test/functional/outputs/syslog -// syslog version https://github.com/openshift/cluster-logging-operator/blob/release-5.3/test/framework/functional/output_syslog.go#L13 diff --git a/modules/cluster-logging-collector-log-forwarding-supported-plugins-5-4.adoc b/modules/cluster-logging-collector-log-forwarding-supported-plugins-5-4.adoc deleted file mode 100644 index c41053c98de8..000000000000 --- a/modules/cluster-logging-collector-log-forwarding-supported-plugins-5-4.adoc +++ /dev/null @@ -1,57 +0,0 @@ -// Module included in the following assemblies: -// -// logging/cluster-logging-external.adoc - -[id="cluster-logging-collector-log-forwarding-supported-plugins-5-4_{context}"] - -= Supported log data output types in OpenShift Logging 5.4 - -Red Hat OpenShift Logging 5.4 provides the following output types and protocols for sending log data to target log collectors. - -Red Hat tests each of the combinations shown in the following table. However, you should be able to send log data to a wider range target log collectors that ingest these protocols. - -[options="header"] -|==== -| Output types | Protocols | Tested with - -| Amazon CloudWatch -| REST over HTTPS -| The current version of Amazon CloudWatch - - -| elasticsearch -| elasticsearch -a| Elasticsearch 7.10.1 - -| fluentdForward -| fluentd forward v1 -a| fluentd 1.14.5 - -logstash 7.10.1 - -| Loki -| REST over HTTP and HTTPS -| Loki 2.2.1 deployed on OCP - -| kafka -| kafka 0.11 -a| kafka 2.7.0 - -| syslog -| RFC-3164, RFC-5424 -| rsyslog-8.39.0 - -|==== - -// Note: validate these items against the corresponding line of the test configuration files that Red Hat OpenShift Logging uses: -// -// cloudwatch https://github.com/openshift/cluster-logging-operator/blob/release-5.4/test/functional/outputs/forward_to_cloudwatch_test.go#L18 -// elasticsearch https://github.com/openshift/cluster-logging-operator/blob/release-5.4/test/functional/outputs/forward_to_elasticsearch_index_test.go#L17 -// es fluentd https://github.com/ViaQ/logging-fluentd/blob/release-5.5/fluentd/Gemfile.lock#L55 -// fluentd https://github.com/openshift/cluster-logging-operator/blob/release-5.4/Makefile#L23 -// kafka https://github.com/openshift/cluster-logging-operator/blob/release-5.4/test/helpers/kafka/constants.go#L17 -// kafka fluentd https://github.com/zendesk/ruby-kafka/tree/v1.4.0#compatibility -// logstash https://github.com/openshift/cluster-logging-operator/blob/release-5.4/test/functional/outputs/forward_to_logstash_test.go#L30 -// loki https://github.com/openshift/cluster-logging-operator/blob/release-5.4/test/helpers/loki/receiver.go#L26 -// syslog protocols https://github.com/openshift/cluster-logging-operator/tree/release-5.4/test/functional/outputs/syslog -// syslog version https://github.com/openshift/cluster-logging-operator/blob/release-5.4/test/framework/functional/output_syslog.go#L13 diff --git a/modules/cluster-logging-collector-log-forwarding-supported-plugins-5-5.adoc b/modules/cluster-logging-collector-log-forwarding-supported-plugins-5-5.adoc deleted file mode 100644 index 0ee234a1d603..000000000000 --- a/modules/cluster-logging-collector-log-forwarding-supported-plugins-5-5.adoc +++ /dev/null @@ -1,58 +0,0 @@ -// Module included in the following assemblies: -// -// logging/cluster-logging-external.adoc - -[id="cluster-logging-collector-log-forwarding-supported-plugins-5-5_{context}"] - -= Supported log data output types in OpenShift Logging 5.5 - -Red Hat OpenShift Logging 5.5 provides the following output types and protocols for sending log data to target log collectors. - -Red Hat tests each of the combinations shown in the following table. However, you should be able to send log data to a wider range target log collectors that ingest these protocols. - -[options="header"] -|==== -| Output types | Protocols | Tested with - -| Amazon CloudWatch -| REST over HTTPS -| The current version of Amazon CloudWatch - - -| elasticsearch -| elasticsearch -a| Elasticsearch 7.10.1 - -| fluentdForward -| fluentd forward v1 -a| fluentd 1.14.6 - -logstash 7.10.1 - -| Loki -| REST over HTTP and HTTPS -| Loki 2.5.0 deployed on OCP - -| kafka -| kafka 0.11 -a| kafka 2.7.0 - -| syslog -| RFC-3164, RFC-5424 -| rsyslog-8.39.0 - -|==== - -// Note: validate these items against the corresponding line of the test configuration files that Red Hat OpenShift Logging uses: -// -// cloudwatch https://github.com/openshift/cluster-logging-operator/blob/release-5.5/test/functional/outputs/forward_to_cloudwatch_test.go#L18 -// elasticsearch https://github.com/openshift/cluster-logging-operator/blob/release-5.5/test/functional/outputs/elasticsearch/forward_to_elasticsearch_index_test.go#L24 -// elasticsearch https://github.com/openshift/cluster-logging-operator/blob/release-5.5/test/framework/functional/output_elasticsearch7.go#L13 -// es fluentd https://github.com/ViaQ/logging-fluentd/blob/release-5.5/fluentd/Gemfile.lock#L55 -// fluentd https://github.com/openshift/cluster-logging-operator/blob/release-5.5/Makefile#L24 -// kafka https://github.com/openshift/cluster-logging-operator/blob/release-5.5/test/helpers/kafka/constants.go#L17 -// kafka fluentd https://github.com/zendesk/ruby-kafka/tree/v1.4.0#compatibility -// logstash https://github.com/openshift/cluster-logging-operator/blob/release-5.5/test/functional/outputs/forward_to_logstash_test.go#L30 -// loki https://github.com/openshift/cluster-logging-operator/blob/release-5.5/test/helpers/loki/receiver.go#L26 -// syslog protocols https://github.com/openshift/cluster-logging-operator/tree/release-5.5/test/functional/outputs/syslog -// syslog version https://github.com/openshift/cluster-logging-operator/blob/release-5.5/test/framework/functional/output_syslog.go#L14 \ No newline at end of file diff --git a/modules/cluster-logging-collector-log-forwarding-supported-plugins-5-6.adoc b/modules/cluster-logging-collector-log-forwarding-supported-plugins-5-6.adoc deleted file mode 100644 index f93995221cb7..000000000000 --- a/modules/cluster-logging-collector-log-forwarding-supported-plugins-5-6.adoc +++ /dev/null @@ -1,67 +0,0 @@ -// Module included in the following assemblies: -// -// logging/cluster-logging-external.adoc - -[id="cluster-logging-collector-log-forwarding-supported-plugins-5-6_{context}"] - -= Supported log data output types in OpenShift Logging 5.6 - -Red Hat OpenShift Logging 5.6 provides the following output types and protocols for sending log data to target log collectors. - -Red Hat tests each of the combinations shown in the following table. However, you should be able to send log data to a wider range target log collectors that ingest these protocols. - -[options="header"] -|==== -| Output types | Protocols | Tested with - -| Amazon CloudWatch -| REST over HTTPS -| The current version of Amazon CloudWatch - - -| elasticsearch -| elasticsearch -a| Elasticsearch 6.8.23 - -Elasticsearch 7.10.1 - -Elasticsearch 8.6.1 - -| fluentdForward -| fluentd forward v1 -a| fluentd 1.14.6 - -logstash 7.10.1 - -| Loki -| REST over HTTP and HTTPS -| Loki 2.5.0 deployed on OCP - -| kafka -| kafka 0.11 -a| kafka 2.7.0 - -| syslog -| RFC-3164, RFC-5424 -| rsyslog-8.39.0 - -|==== - -[IMPORTANT] -==== -Fluentd doesn't support Elasticsearch 8 as of 5.6.2. -Vector doesn't support fluentd/logstash/rsyslog before 5.7.0. -==== - -// Note: validate these items against the corresponding line of the test configuration files that Red Hat OpenShift Logging uses: -// -// cloudwatch https://github.com/openshift/cluster-logging-operator/blob/release-5.6/test/functional/outputs/cloudwatch/forward_to_cloudwatch_test.go#L13 -// elasticsearch https://github.com/openshift/cluster-logging-operator/blob/release-5.6/test/framework/functional/output_elasticsearch.go#L16-L18 -// es fluentd https://github.com/ViaQ/logging-fluentd/blob/release-5.6/fluentd/Gemfile.lock#L55 -// fluentd https://github.com/openshift/cluster-logging-operator/blob/release-5.6/Makefile#L50 -// kafka https://github.com/openshift/cluster-logging-operator/blob/release-5.6/test/helpers/kafka/constants.go#L17 -// kafka fluentd https://github.com/zendesk/ruby-kafka/tree/v1.4.0#compatibility -// logstash https://github.com/openshift/cluster-logging-operator/blob/release-5.6/test/functional/outputs/forward_to_logstash_test.go#L30 -// loki https://github.com/openshift/cluster-logging-operator/blob/release-5.6/test/helpers/loki/receiver.go#L27 -// syslog protocols https://github.com/openshift/cluster-logging-operator/tree/release-5.6/test/functional/outputs/syslog -// syslog version https://github.com/openshift/cluster-logging-operator/blob/release-5.6/test/framework/functional/output_syslog.go#L14 diff --git a/modules/cluster-logging-collector-pod-location.adoc b/modules/cluster-logging-collector-pod-location.adoc deleted file mode 100644 index 47d350339f6d..000000000000 --- a/modules/cluster-logging-collector-pod-location.adoc +++ /dev/null @@ -1,48 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-collector.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-collector-pod-location_{context}"] -= Viewing logging collector pods - -You can view the Fluentd logging collector pods and the corresponding nodes that they are running on. The Fluentd logging collector pods run only in the `openshift-logging` project. - -.Procedure - -* Run the following command in the `openshift-logging` project to view the Fluentd logging collector pods and their details: - -[source,terminal] ----- -$ oc get pods --selector component=collector -o wide -n openshift-logging ----- - -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES -fluentd-8d69v 1/1 Running 0 134m 10.130.2.30 master1.example.com -fluentd-bd225 1/1 Running 0 134m 10.131.1.11 master2.example.com -fluentd-cvrzs 1/1 Running 0 134m 10.130.0.21 master3.example.com -fluentd-gpqg2 1/1 Running 0 134m 10.128.2.27 worker1.example.com -fluentd-l9j7j 1/1 Running 0 134m 10.129.2.31 worker2.example.com ----- - -//// -[source,terminal] ----- -$ oc get pods -o wide | grep rsyslog ----- - -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES -rsyslog-5gtfs 1/1 Running 0 3m6s 10.130.0.40 ip-10-0-148-139.ec2.internal -rsyslog-67rv6 1/1 Running 0 3m6s 10.128.2.13 ip-10-0-158-206.ec2.internal -rsyslog-bqgjn 1/1 Running 0 3m6s 10.131.0.11 ip-10-0-132-167.ec2.internal -rsyslog-cjmdp 1/1 Running 0 3m6s 10.129.2.16 ip-10-0-139-191.ec2.internal -rsyslog-kqlzh 1/1 Running 0 3m6s 10.129.0.37 ip-10-0-141-243.ec2.internal -rsyslog-nhshr 1/1 Running 0 3m6s 10.128.0.41 ip-10-0-143-38.ec2.internal ----- -//// diff --git a/modules/cluster-logging-collector-tolerations.adoc b/modules/cluster-logging-collector-tolerations.adoc deleted file mode 100644 index 1738a57067bb..000000000000 --- a/modules/cluster-logging-collector-tolerations.adoc +++ /dev/null @@ -1,69 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-collector.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-collector-tolerations_{context}"] -= Using tolerations to control the log collector pod placement - -You can ensure which nodes the logging collector pods run on and prevent -other workloads from using those nodes by using tolerations on the pods. - -You apply tolerations to logging collector pods through the `ClusterLogging` custom resource (CR) -and apply taints to a node through the node specification. You can use taints and tolerations -to ensure the pod does not get evicted for things like memory and CPU issues. - -By default, the logging collector pods have the following toleration: - -[source,yaml] ----- -tolerations: -- key: "node-role.kubernetes.io/master" - operator: "Exists" - effect: "NoExecute" ----- - -.Prerequisites - -* The Red Hat OpenShift Logging and Elasticsearch Operators must be installed. - -.Procedure - -. Use the following command to add a taint to a node where you want logging collector pods to schedule logging collector pods: -+ -[source,terminal] ----- -$ oc adm taint nodes =: ----- -+ -For example: -+ -[source,terminal] ----- -$ oc adm taint nodes node1 collector=node:NoExecute ----- -+ -This example places a taint on `node1` that has key `collector`, value `node`, and taint effect `NoExecute`. -You must use the `NoExecute` taint effect. `NoExecute` schedules only pods that match the taint and removes existing pods -that do not match. - -. Edit the `collection` stanza of the `ClusterLogging` custom resource (CR) to configure a toleration for the logging collector pods: -+ -[source,yaml] ----- - collection: - logs: - type: "fluentd" - fluentd: - tolerations: - - key: "collector" <1> - operator: "Exists" <2> - effect: "NoExecute" <3> - tolerationSeconds: 6000 <4> ----- -<1> Specify the key that you added to the node. -<2> Specify the `Exists` operator to require the `key`/`value`/`effect` parameters to match. -<3> Specify the `NoExecute` effect. -<4> Optionally, specify the `tolerationSeconds` parameter to set how long a pod can remain bound to a node before being evicted. - -This toleration matches the taint created by the `oc adm taint` command. A pod with this toleration would be able to schedule onto `node1`. diff --git a/modules/cluster-logging-collector-tuning.adoc b/modules/cluster-logging-collector-tuning.adoc deleted file mode 100644 index a0217dd494aa..000000000000 --- a/modules/cluster-logging-collector-tuning.adoc +++ /dev/null @@ -1,173 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-collector.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-collector-tuning_{context}"] -= Advanced configuration for the log forwarder - -The {logging-title} includes multiple Fluentd parameters that you can use for tuning the performance of the Fluentd log forwarder. With these parameters, you can change the following Fluentd behaviors: - -* Chunk and chunk buffer sizes -* Chunk flushing behavior -* Chunk forwarding retry behavior - -Fluentd collects log data in a single blob called a _chunk_. When Fluentd creates a chunk, the chunk is considered to be in the _stage_, where the chunk gets filled with data. When the chunk is full, Fluentd moves the chunk to the _queue_, where chunks are held before being flushed, or written out to their destination. Fluentd can fail to flush a chunk for a number of reasons, such as network issues or capacity issues at the destination. If a chunk cannot be flushed, Fluentd retries flushing as configured. - -By default in {product-title}, Fluentd uses the _exponential backoff_ method to retry flushing, where Fluentd doubles the time it waits between attempts to retry flushing again, which helps reduce connection requests to the destination. You can disable exponential backoff and use the _periodic_ retry method instead, which retries flushing the chunks at a specified interval. - -These parameters can help you determine the trade-offs between latency and throughput. - -* To optimize Fluentd for throughput, you could use these parameters to reduce network packet count by configuring larger buffers and queues, delaying flushes, and setting longer times between retries. Be aware that larger buffers require more space on the node file system. - -* To optimize for low latency, you could use the parameters to send data as soon as possible, avoid the build-up of batches, have shorter queues and buffers, and use more frequent flush and retries. - -You can configure the chunking and flushing behavior using the following parameters in the `ClusterLogging` custom resource (CR). The parameters are then automatically added to the Fluentd config map for use by Fluentd. - -[NOTE] -==== -These parameters are: - -* Not relevant to most users. The default settings should give good general performance. -* Only for advanced users with detailed knowledge of Fluentd configuration and performance. -* Only for performance tuning. They have no effect on functional aspects of logging. -==== - -.Advanced Fluentd Configuration Parameters -[options="header"] -|=== - -|Parameter |Description |Default - -|`chunkLimitSize` -|The maximum size of each chunk. Fluentd stops writing data to a chunk when it reaches this size. Then, Fluentd sends the chunk to the queue and opens a new chunk. -|`8m` - -|`totalLimitSize` -|The maximum size of the buffer, which is the total size of the stage and the queue. If the buffer size exceeds this value, Fluentd stops adding data to chunks and fails with an error. All data not in chunks is lost. -|`8G` - -|`flushInterval` -|The interval between chunk flushes. You can use `s` (seconds), `m` (minutes), `h` (hours), or `d` (days). -|`1s` - -|`flushMode` -a| The method to perform flushes: - -* `lazy`: Flush chunks based on the `timekey` parameter. You cannot modify the `timekey` parameter. -* `interval`: Flush chunks based on the `flushInterval` parameter. -* `immediate`: Flush chunks immediately after data is added to a chunk. -|`interval` - -|`flushThreadCount` -|The number of threads that perform chunk flushing. Increasing the number of threads improves the flush throughput, which hides network latency. -|`2` - -|`overflowAction` -a|The chunking behavior when the queue is full: - -* `throw_exception`: Raise an exception to show in the log. -* `block`: Stop data chunking until the full buffer issue is resolved. -* `drop_oldest_chunk`: Drop the oldest chunk to accept new incoming chunks. Older chunks have less value than newer chunks. -|`block` - -|`retryMaxInterval` -|The maximum time in seconds for the `exponential_backoff` retry method. -|`300s` - -|`retryType` -a|The retry method when flushing fails: - -* `exponential_backoff`: Increase the time between flush retries. Fluentd doubles the time it waits until the next retry until the `retry_max_interval` parameter is reached. -* `periodic`: Retries flushes periodically, based on the `retryWait` parameter. -|`exponential_backoff` - -|`retryTimeOut` -|The maximum time interval to attempt retries before the record is discarded. -|`60m` - -|`retryWait` -|The time in seconds before the next chunk flush. -|`1s` - -|=== - -For more information on the Fluentd chunk lifecycle, see link:https://docs.fluentd.org/buffer[Buffer Plugins] in the Fluentd documentation. - -.Procedure - -. Edit the `ClusterLogging` custom resource (CR) in the `openshift-logging` project: -+ -[source,terminal] -+ ----- -$ oc edit ClusterLogging instance ----- - -. Add or modify any of the following parameters: -+ -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogging -metadata: - name: instance - namespace: openshift-logging -spec: - forwarder: - fluentd: - buffer: - chunkLimitSize: 8m <1> - flushInterval: 5s <2> - flushMode: interval <3> - flushThreadCount: 3 <4> - overflowAction: throw_exception <5> - retryMaxInterval: "300s" <6> - retryType: periodic <7> - retryWait: 1s <8> - totalLimitSize: 32m <9> -... ----- -<1> Specify the maximum size of each chunk before it is queued for flushing. -<2> Specify the interval between chunk flushes. -<3> Specify the method to perform chunk flushes: `lazy`, `interval`, or `immediate`. -<4> Specify the number of threads to use for chunk flushes. -<5> Specify the chunking behavior when the queue is full: `throw_exception`, `block`, or `drop_oldest_chunk`. -<6> Specify the maximum interval in seconds for the `exponential_backoff` chunk flushing method. -<7> Specify the retry type when chunk flushing fails: `exponential_backoff` or `periodic`. -<8> Specify the time in seconds before the next chunk flush. -<9> Specify the maximum size of the chunk buffer. - -. Verify that the Fluentd pods are redeployed: -+ -[source,terminal] ----- -$ oc get pods -l component=collector -n openshift-logging ----- - -. Check that the new values are in the `fluentd` config map: -+ -[source,terminal] ----- -$ oc extract configmap/fluentd --confirm ----- -+ -.Example fluentd.conf -[source,terminal] ----- - - @type file - path '/var/lib/fluentd/default' - flush_mode interval - flush_interval 5s - flush_thread_count 3 - retry_type periodic - retry_wait 1s - retry_max_interval 300s - retry_timeout 60m - queued_chunks_limit_size "#{ENV['BUFFER_QUEUE_LIMIT'] || '32'}" - total_limit_size 32m - chunk_limit_size 8m - overflow_action throw_exception - ----- diff --git a/modules/cluster-logging-configuration-of-json-log-data-for-default-elasticsearch.adoc b/modules/cluster-logging-configuration-of-json-log-data-for-default-elasticsearch.adoc deleted file mode 100644 index 44ef382c5ed0..000000000000 --- a/modules/cluster-logging-configuration-of-json-log-data-for-default-elasticsearch.adoc +++ /dev/null @@ -1,109 +0,0 @@ -[id="cluster-logging-configuration-of-json-log-data-for-default-elasticsearch_{context}"] -= Configuring JSON log data for Elasticsearch - -If your JSON logs follow more than one schema, storing them in a single index might cause type conflicts and cardinality problems. To avoid that, you must configure the `ClusterLogForwarder` custom resource (CR) to group each schema into a single output definition. This way, each schema is forwarded to a separate index. - -[IMPORTANT] -==== -If you forward JSON logs to the default Elasticsearch instance managed by OpenShift Logging, it generates new indices based on your configuration. To avoid performance issues associated with having too many indices, consider keeping the number of possible schemas low by standardizing to common schemas. -==== - -.Structure types - -You can use the following structure types in the `ClusterLogForwarder` CR to construct index names for the Elasticsearch log store: - -* `structuredTypeKey` (string, optional) is the name of a message field. The value of that field, if present, is used to construct the index name. -** `kubernetes.labels.` is the Kubernetes pod label whose value is used to construct the index name. -** `openshift.labels.` is the `pipeline.label.` element in the `ClusterLogForwarder` CR whose value is used to construct the index name. -** `kubernetes.container_name` uses the container name to construct the index name. -* `structuredTypeName`: (string, optional) If `structuredTypeKey` is not set or its key is not present, OpenShift Logging uses the value of `structuredTypeName` as the structured type. When you use both `structuredTypeKey` and `structuredTypeName` together, `structuredTypeName` provides a fallback index name if the key in `structuredTypeKey` is missing from the JSON log data. - -[NOTE] -==== -Although you can set the value of `structuredTypeKey` to any field shown in the "Log Record Fields" topic, the most useful fields are shown in the preceding list of structure types. -==== - -.A structuredTypeKey: kubernetes.labels. example - -Suppose the following: - -* Your cluster is running application pods that produce JSON logs in two different formats, "apache" and "google". -* The user labels these application pods with `logFormat=apache` and `logFormat=google`. -* You use the following snippet in your `ClusterLogForwarder` CR YAML file. - -[source,yaml] ----- -outputDefaults: - elasticsearch: - structuredTypeKey: kubernetes.labels.logFormat <1> - structuredTypeName: nologformat -pipelines: -- inputRefs: - outputRefs: default - parse: json <2> ----- -<1> Uses the value of the key-value pair that is formed by the Kubernetes `logFormat` label. -<2> Enables parsing JSON logs. - -In that case, the following structured log record goes to the `app-apache-write` index: - -[source] ----- -{ - "structured":{"name":"fred","home":"bedrock"}, - "kubernetes":{"labels":{"logFormat": "apache", ...}} -} ----- - -And the following structured log record goes to the `app-google-write` index: - -[source] ----- -{ - "structured":{"name":"wilma","home":"bedrock"}, - "kubernetes":{"labels":{"logFormat": "google", ...}} -} ----- - -.A structuredTypeKey: openshift.labels. example - -Suppose that you use the following snippet in your `ClusterLogForwarder` CR YAML file. - -[source,yaml] ----- -outputDefaults: - elasticsearch: - structuredTypeKey: openshift.labels.myLabel <1> - structuredTypeName: nologformat -pipelines: - - name: application-logs - inputRefs: - - application - - audit - outputRefs: - - elasticsearch-secure - - default - parse: json - labels: - myLabel: myValue <2> ----- -<1> Uses the value of the key-value pair that is formed by the OpenShift `myLabel` label. -<2> The `myLabel` element gives its string value, `myValue`, to the structured log record. - -In that case, the following structured log record goes to the `app-myValue-write` index: - -[source] ----- -{ - "structured":{"name":"fred","home":"bedrock"}, - "openshift":{"labels":{"myLabel": "myValue", ...}} -} ----- - -.Additional considerations - -* The Elasticsearch _index_ for structured records is formed by prepending "app-" to the structured type and appending "-write". -* Unstructured records are not sent to the structured index. They are indexed as usual in the application, infrastructure, or audit indices. -* If there is no non-empty structured type, forward an _unstructured_ record with no `structured` field. - -It is important not to overload Elasticsearch with too many indices. Only use distinct structured types for distinct log _formats_, *not* for each application or namespace. For example, most Apache applications use the same JSON log format and structured type, such as `LogApache`. diff --git a/modules/cluster-logging-configuring-image-about.adoc b/modules/cluster-logging-configuring-image-about.adoc deleted file mode 100644 index f0b6522dba2c..000000000000 --- a/modules/cluster-logging-configuring-image-about.adoc +++ /dev/null @@ -1,31 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-configuring.adoc - -[id="cluster-logging-configuring-image-about_{context}"] -= Understanding {logging} component images - -There are several components in the {logging-title}, each one implemented with one or more images. Each image is specified by an environment variable -defined in the *cluster-logging-operator* deployment in the *openshift-logging* project and should not be changed. - -You can view the images by running the following command: - -[source,terminal] ----- -$ oc -n openshift-logging set env deployment/cluster-logging-operator --list | grep _IMAGE ----- -// logging test command and update the example output - -.Example output -[source,terminal] ----- -FLUENTD_IMAGE=registry.redhat.io/openshift-logging/fluentd-rhel8:latest@sha256:ffdf79da7386871d2bc24cd937e02284b30f85a9979dc8c635fb73021cbca2f3 <1> ----- -<1> *FLUENTD_IMAGE* deploys Fluentd. - -[NOTE] -==== -Promtail is not officially supported at this time. -==== - -The values might be different depending on your environment. diff --git a/modules/cluster-logging-cpu-memory.adoc b/modules/cluster-logging-cpu-memory.adoc deleted file mode 100644 index 0971d084f6af..000000000000 --- a/modules/cluster-logging-cpu-memory.adoc +++ /dev/null @@ -1,76 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-collector.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-memory-limits_{context}"] -= Configuring CPU and memory limits - -The {logging} components allow for adjustments to both the CPU and memory limits. - -.Procedure - -. Edit the `ClusterLogging` custom resource (CR) in the `openshift-logging` project: -+ -[source,terminal] ----- -$ oc -n openshift-logging edit ClusterLogging instance ----- -+ -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: "ClusterLogging" -metadata: - name: "instance" - namespace: openshift-logging - -... - -spec: - managementState: "Managed" - logStore: - type: "elasticsearch" - elasticsearch: - nodeCount: 3 - resources: <1> - limits: - memory: 16Gi - requests: - cpu: 200m - memory: 16Gi - storage: - storageClassName: "gp2" - size: "200G" - redundancyPolicy: "SingleRedundancy" - visualization: - type: "kibana" - kibana: - resources: <2> - limits: - memory: 1Gi - requests: - cpu: 500m - memory: 1Gi - proxy: - resources: <2> - limits: - memory: 100Mi - requests: - cpu: 100m - memory: 100Mi - replicas: 2 - collection: - logs: - type: "fluentd" - fluentd: - resources: <3> - limits: - memory: 736Mi - requests: - cpu: 200m - memory: 736Mi ----- -<1> Specify the CPU and memory limits and requests for the log store as needed. For Elasticsearch, you must adjust both the request value and the limit value. -<2> Specify the CPU and memory limits and requests for the log visualizer as needed. -<3> Specify the CPU and memory limits and requests for the log collector as needed. diff --git a/modules/cluster-logging-dashboards-access.adoc b/modules/cluster-logging-dashboards-access.adoc deleted file mode 100644 index 9f1b1effaca1..000000000000 --- a/modules/cluster-logging-dashboards-access.adoc +++ /dev/null @@ -1,33 +0,0 @@ -// -// * logging/cluster-logging-dashboards.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-dashboards-access_{context}"] -= Accessing the Elasticsearch and OpenShift Logging dashboards - -You can view the *Logging/Elasticsearch Nodes* and *OpenShift Logging* dashboards in the -ifndef::openshift-rosa,openshift-dedicated[] -{product-title} web console. -endif::[] -ifdef::openshift-rosa,openshift-dedicated[] -{cluster-manager-url}. -endif::[] - -.Procedure - -To launch the dashboards: - -ifndef::openshift-rosa,openshift-dedicated[] -. In the {product-title} web console, click *Observe* -> *Dashboards*. -endif::[] -ifdef::openshift-rosa,openshift-dedicated[] -. In the {product-title} {hybrid-console}, click *Observe* -> *Dashboards*. -endif::[] - -. On the *Dashboards* page, select *Logging/Elasticsearch Nodes* or *OpenShift Logging* from the *Dashboard* menu. -+ -For the *Logging/Elasticsearch Nodes* dashboard, you can select the Elasticsearch node you want to view and set the data resolution. -+ -The appropriate dashboard is displayed, showing multiple charts of data. - -. Optional: Select a different time range to display or refresh rate for the data from the *Time Range* and *Refresh Interval* menus. diff --git a/modules/cluster-logging-dashboards-es.adoc b/modules/cluster-logging-dashboards-es.adoc deleted file mode 100644 index 7db01c18985b..000000000000 --- a/modules/cluster-logging-dashboards-es.adoc +++ /dev/null @@ -1,196 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-dashboards.adoc - -[id="cluster-logging-dashboards-es_{context}"] -= Charts on the Logging/Elasticsearch nodes dashboard - -The *Logging/Elasticsearch Nodes* dashboard contains charts that show details about your Elasticsearch instance, many at node-level, for further diagnostics. - -Elasticsearch status:: - -The *Logging/Elasticsearch Nodes* dashboard contains the following charts about the status of your Elasticsearch instance. - -.Elasticsearch status fields -[options="header"] -|=== -|Metric|Description - -|Cluster status -a|The cluster health status during the selected time period, using the Elasticsearch green, yellow, and red statuses: - -* 0 - Indicates that the Elasticsearch instance is in green status, which means that all shards are allocated. -* 1 - Indicates that the Elasticsearch instance is in yellow status, which means that replica shards for at least one shard are not allocated. -* 2 - Indicates that the Elasticsearch instance is in red status, which means that at least one primary shard and its replicas are not allocated. - -|Cluster nodes -|The total number of Elasticsearch nodes in the cluster. - -|Cluster data nodes -|The number of Elasticsearch data nodes in the cluster. - -|Cluster pending tasks -|The number of cluster state changes that are not finished and are waiting in a cluster queue, for example, index creation, index deletion, or shard allocation. A growing trend indicates that the cluster is not able to keep up with changes. - -|=== - -Elasticsearch cluster index shard status:: - -Each Elasticsearch index is a logical group of one or more shards, which are basic units of persisted data. There are two types of index shards: primary shards, and replica shards. When a document is indexed into an index, it is stored in one of its primary shards and copied into every replica of that shard. The number of primary shards is specified when the index is created, and the number cannot change during index lifetime. You can change the number of replica shards at any time. - -The index shard can be in several states depending on its lifecycle phase or events occurring in the cluster. When the shard is able to perform search and indexing requests, the shard is active. If the shard cannot perform these requests, the shard is non–active. A shard might be non-active if the shard is initializing, reallocating, unassigned, and so forth. - -Index shards consist of a number of smaller internal blocks, called index segments, which are physical representations of the data. An index segment is a relatively small, immutable Lucene index that is created when Lucene commits newly-indexed data. Lucene, a search library used by Elasticsearch, merges index segments into larger segments in the background to keep the total number of segments low. If the process of merging segments is slower than the speed at which new segments are created, it could indicate a problem. - -When Lucene performs data operations, such as a search operation, Lucene performs the operation against the index segments in the relevant index. For that purpose, each segment contains specific data structures that are loaded in the memory and mapped. Index mapping can have a significant impact on the memory used by segment data structures. - -The *Logging/Elasticsearch Nodes* dashboard contains the following charts about the Elasticsearch index shards. - -.Elasticsearch cluster shard status charts -[options="header"] - -|=== -|Metric|Description - -|Cluster active shards -|The number of active primary shards and the total number of shards, including replicas, in the cluster. If the number of shards grows higher, the cluster performance can start degrading. - -|Cluster initializing shards -|The number of non-active shards in the cluster. A non-active shard is one that is initializing, being reallocated to a different node, or is unassigned. A cluster typically has non–active shards for short periods. A growing number of non–active shards over longer periods could indicate a problem. - -|Cluster relocating shards -|The number of shards that Elasticsearch is relocating to a new node. Elasticsearch relocates nodes for multiple reasons, such as high memory use on a node or after a new node is added to the cluster. - -|Cluster unassigned shards -|The number of unassigned shards. Elasticsearch shards might be unassigned for reasons such as a new index being added or the failure of a node. - -|=== - -Elasticsearch node metrics:: - -Each Elasticsearch node has a finite amount of resources that can be used to process tasks. When all the resources are being used and Elasticsearch attempts to perform a new task, Elasticsearch put the tasks into a queue until some resources become available. - -The *Logging/Elasticsearch Nodes* dashboard contains the following charts about resource usage for a selected node and the number of tasks waiting in the Elasticsearch queue. - -.Elasticsearch node metric charts -[options="header"] -|=== -|Metric|Description - -|ThreadPool tasks -|The number of waiting tasks in individual queues, shown by task type. A long–term accumulation of tasks in any queue could indicate node resource shortages or some other problem. - -|CPU usage -|The amount of CPU being used by the selected Elasticsearch node as a percentage of the total CPU allocated to the host container. - -|Memory usage -|The amount of memory being used by the selected Elasticsearch node. - -|Disk usage -|The total disk space being used for index data and metadata on the selected Elasticsearch node. - -|Documents indexing rate -|The rate that documents are indexed on the selected Elasticsearch node. - -|Indexing latency -|The time taken to index the documents on the selected Elasticsearch node. Indexing latency can be affected by many factors, such as JVM Heap memory and overall load. A growing latency indicates a resource capacity shortage in the instance. - -|Search rate -|The number of search requests run on the selected Elasticsearch node. - -|Search latency -|The time taken to complete search requests on the selected Elasticsearch node. Search latency can be affected by many factors. A growing latency indicates a resource capacity shortage in the instance. - -|Documents count (with replicas) -|The number of Elasticsearch documents stored on the selected Elasticsearch node, including documents stored in both the primary shards and replica shards that are allocated on the node. - -|Documents deleting rate -|The number of Elasticsearch documents being deleted from any of the index shards that are allocated to the selected Elasticsearch node. - -|Documents merging rate -|The number of Elasticsearch documents being merged in any of index shards that are allocated to the selected Elasticsearch node. - -|=== - -Elasticsearch node fielddata:: - -link:https://www.elastic.co/guide/en/elasticsearch/reference/6.8/fielddata.html[_Fielddata_] is an Elasticsearch data structure that holds lists of terms in an index and is kept in the JVM Heap. Because fielddata building is an expensive operation, Elasticsearch caches the fielddata structures. Elasticsearch can evict a fielddata cache when the underlying index segment is deleted or merged, or if there is not enough JVM HEAP memory for all the fielddata caches. - -The *Logging/Elasticsearch Nodes* dashboard contains the following charts about Elasticsearch fielddata. - -.Elasticsearch node fielddata charts -[options="header"] -|=== -|Metric|Description - -|Fielddata memory size -|The amount of JVM Heap used for the fielddata cache on the selected Elasticsearch node. - -|Fielddata evictions -|The number of fielddata structures that were deleted from the selected Elasticsearch node. - -|=== - -Elasticsearch node query cache:: - -If the data stored in the index does not change, search query results are cached in a node-level query cache for reuse by Elasticsearch. - -The *Logging/Elasticsearch Nodes* dashboard contains the following charts about the Elasticsearch node query cache. - -.Elasticsearch node query charts -[options="header"] -|=== -|Metric|Description - -|Query cache size -|The total amount of memory used for the query cache for all the shards allocated to the selected Elasticsearch node. - -|Query cache evictions -|The number of query cache evictions on the selected Elasticsearch node. - -|Query cache hits -|The number of query cache hits on the selected Elasticsearch node. - -|Query cache misses -|The number of query cache misses on the selected Elasticsearch node. - -|=== - -Elasticsearch index throttling:: - -When indexing documents, Elasticsearch stores the documents in index segments, which are physical representations of the data. At the same time, Elasticsearch periodically merges smaller segments into a larger segment as a way to optimize resource use. If the indexing is faster then the ability to merge segments, the merge process does not complete quickly enough, which can lead to issues with searches and performance. To prevent this situation, Elasticsearch throttles indexing, typically by reducing the number of threads allocated to indexing down to a single thread. - -The *Logging/Elasticsearch Nodes* dashboard contains the following charts about Elasticsearch index throttling. - -.Index throttling charts -[options="header"] -|=== -|Metric|Description - -|Indexing throttling -|The amount of time that Elasticsearch has been throttling the indexing operations on the selected Elasticsearch node. - -|Merging throttling -|The amount of time that Elasticsearch has been throttling the segment merge operations on the selected Elasticsearch node. - -|=== - -Node JVM Heap statistics:: - -The *Logging/Elasticsearch Nodes* dashboard contains the following charts about JVM Heap operations. - -.JVM Heap statistic charts -[options="header"] -|=== -|Metric|Description - -|Heap used -|The amount of the total allocated JVM Heap space that is used on the selected Elasticsearch node. - -|GC count -|The number of garbage collection operations that have been run on the selected Elasticsearch node, by old and young garbage collection. - -|GC time -|The amount of time that the JVM spent running garbage collection operations on the selected Elasticsearch node, by old and young garbage collection. - -|=== diff --git a/modules/cluster-logging-dashboards-logging.adoc b/modules/cluster-logging-dashboards-logging.adoc deleted file mode 100644 index 214d1677c6dc..000000000000 --- a/modules/cluster-logging-dashboards-logging.adoc +++ /dev/null @@ -1,79 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-dashboards.adoc - -:_content-type: CONCEPT -[id="cluster-logging-dashboards-logging_{context}"] -= About the OpenShift Logging dashboard - -The *OpenShift Logging* dashboard contains charts that show details about your Elasticsearch instance at a cluster-level that you can use to diagnose and anticipate problems. - -.OpenShift Logging charts -[options="header"] -|=== -|Metric|Description - -|Elastic Cluster Status -a|The current Elasticsearch status: - -* ONLINE - Indicates that the Elasticsearch instance is online. -* OFFLINE - Indicates that the Elasticsearch instance is offline. - -|Elastic Nodes -|The total number of Elasticsearch nodes in the Elasticsearch instance. - -|Elastic Shards -|The total number of Elasticsearch shards in the Elasticsearch instance. - -|Elastic Documents -|The total number of Elasticsearch documents in the Elasticsearch instance. - -|Total Index Size on Disk -|The total disk space that is being used for the Elasticsearch indices. - -|Elastic Pending Tasks -|The total number of Elasticsearch changes that have not been completed, such as index creation, index mapping, shard allocation, or shard failure. - -|Elastic JVM GC time -|The amount of time that the JVM spent executing Elasticsearch garbage collection operations in the cluster. - -|Elastic JVM GC Rate -|The total number of times that JVM executed garbage activities per second. - -|Elastic Query/Fetch Latency Sum -a|* Query latency: The average time each Elasticsearch search query takes to execute. -* Fetch latency: The average time each Elasticsearch search query spends fetching data. - -Fetch latency typically takes less time than query latency. If fetch latency is consistently increasing, it might indicate slow disks, data enrichment, or large requests with too many results. - -|Elastic Query Rate -|The total queries executed against the Elasticsearch instance per second for each Elasticsearch node. - -|CPU -|The amount of CPU used by Elasticsearch, Fluentd, and Kibana, shown for each component. - -|Elastic JVM Heap Used -|The amount of JVM memory used. In a healthy cluster, the graph shows regular drops as memory is freed by JVM garbage collection. - -|Elasticsearch Disk Usage -|The total disk space used by the Elasticsearch instance for each Elasticsearch node. - -|File Descriptors In Use -|The total number of file descriptors used by Elasticsearch, Fluentd, and Kibana. - -|FluentD emit count -|The total number of Fluentd messages per second for the Fluentd default output, and the retry count for the default output. - -|FluentD Buffer Availability -|The percent of the Fluentd buffer that is available for chunks. A full buffer might indicate that Fluentd is not able to process the number of logs received. - -|Elastic rx bytes -|The total number of bytes that Elasticsearch has received from FluentD, the Elasticsearch nodes, and other sources. - -|Elastic Index Failure Rate -|The total number of times per second that an Elasticsearch index fails. A high rate might indicate an issue with indexing. - -|FluentD Output Error Rate -|The total number of times per second that FluentD is not able to output logs. - -|=== diff --git a/modules/cluster-logging-deploy-certificates.adoc b/modules/cluster-logging-deploy-certificates.adoc deleted file mode 100644 index c6c25b166b50..000000000000 --- a/modules/cluster-logging-deploy-certificates.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-deploy.adoc - -[id="cluster-logging-deploy-certificates_{context}"] -= Deploying custom certificates - -You can specify custom certificates using the following variables -instead of relying on those generated during the deployment process. These -certificates are used to encrypt and secure communication between a user's -browser and Kibana. The security-related files will be generated if they are not -supplied. - -[cols="3,7",options="header"] -|=== -|File Name -|Description - -|`openshift_logging_kibana_cert` -|A browser-facing certificate for the Kibana server. - -|`openshift_logging_kibana_key` -|A key to be used with the browser-facing Kibana certificate. - -|`openshift_logging_kibana_ca` -|The absolute path on the control node to the CA file to use -for the browser facing Kibana certs. - -|=== diff --git a/modules/cluster-logging-deploy-cli.adoc b/modules/cluster-logging-deploy-cli.adoc deleted file mode 100644 index 50fa7c12deee..000000000000 --- a/modules/cluster-logging-deploy-cli.adoc +++ /dev/null @@ -1,416 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-deploying.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-deploy-cli_{context}"] -= Installing the {logging-title} using the CLI - -You can use the {product-title} CLI to install the OpenShift Elasticsearch and Red Hat OpenShift Logging Operators. - -.Prerequisites - -* Ensure that you have the necessary persistent storage for Elasticsearch. Note that each Elasticsearch node requires its own storage volume. -+ -[NOTE] -==== -If you use a local volume for persistent storage, do not use a raw block volume, which is described with `volumeMode: block` in the `LocalVolume` object. Elasticsearch cannot use raw block volumes. -==== -+ -Elasticsearch is a memory-intensive application. By default, {product-title} installs three Elasticsearch nodes with memory requests and limits of 16 GB. This initial set of three {product-title} nodes might not have enough memory to run Elasticsearch within your cluster. If you experience memory issues that are related to Elasticsearch, add more Elasticsearch nodes to your cluster rather than increasing the memory on existing nodes. - -ifdef::openshift-origin[] -* Ensure that you have downloaded the {cluster-manager-url-pull} as shown in _Obtaining the installation program_ in the installation documentation for your platform. -+ -If you have the pull secret, add the `redhat-operators` catalog to the OperatorHub custom resource (CR) as shown in *Configuring {product-title} to use Red Hat Operators*. -endif::[] - -.Procedure - -To install the OpenShift Elasticsearch Operator and Red Hat OpenShift Logging Operator using the CLI: - -. Create a namespace for the OpenShift Elasticsearch Operator. - -.. Create a namespace object YAML file (for example, `eo-namespace.yaml`) for the OpenShift Elasticsearch Operator: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Namespace -metadata: - name: openshift-operators-redhat <1> - annotations: - openshift.io/node-selector: "" - labels: - openshift.io/cluster-monitoring: "true" <2> ----- -<1> You must specify the `openshift-operators-redhat` namespace. To prevent possible conflicts with metrics, you should configure the Prometheus Cluster Monitoring stack to scrape metrics from the `openshift-operators-redhat` namespace and not the `openshift-operators` namespace. The `openshift-operators` namespace might contain community Operators, which are untrusted and could publish a metric with the same name as -ifdef::openshift-rosa[] - a ROSA -endif::[] -ifdef::openshift-dedicated[] - an {product-title} -endif::[] -metric, which would cause conflicts. -<2> String. You must specify this label as shown to ensure that cluster monitoring scrapes the `openshift-operators-redhat` namespace. - -.. Create the namespace: -+ -[source,terminal] ----- -$ oc create -f .yaml ----- -+ -For example: -+ -[source,terminal] ----- -$ oc create -f eo-namespace.yaml ----- - -. Create a namespace for the Red Hat OpenShift Logging Operator: - -.. Create a namespace object YAML file (for example, `olo-namespace.yaml`) for the Red Hat OpenShift Logging Operator: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Namespace -metadata: - name: openshift-logging - annotations: - openshift.io/node-selector: "" - labels: - openshift.io/cluster-monitoring: "true" ----- - -.. Create the namespace: -+ -[source,terminal] ----- -$ oc create -f .yaml ----- -+ -For example: -+ -[source,terminal] ----- -$ oc create -f olo-namespace.yaml ----- - -. Install the OpenShift Elasticsearch Operator by creating the following objects: - -.. Create an Operator Group object YAML file (for example, `eo-og.yaml`) for the OpenShift Elasticsearch Operator: -+ -[source,yaml] ----- -apiVersion: operators.coreos.com/v1 -kind: OperatorGroup -metadata: - name: openshift-operators-redhat - namespace: openshift-operators-redhat <1> -spec: {} ----- -<1> You must specify the `openshift-operators-redhat` namespace. - -.. Create an Operator Group object: -+ -[source,terminal] ----- -$ oc create -f .yaml ----- -+ -For example: -+ -[source,terminal] ----- -$ oc create -f eo-og.yaml ----- - -.. Create a Subscription object YAML file (for example, `eo-sub.yaml`) to -subscribe a namespace to the OpenShift Elasticsearch Operator. -+ -.Example Subscription -[source,yaml] ----- -apiVersion: operators.coreos.com/v1alpha1 -kind: Subscription -metadata: - name: "elasticsearch-operator" - namespace: "openshift-operators-redhat" <1> -spec: - channel: "stable-5.5" <2> - installPlanApproval: "Automatic" <3> - source: "redhat-operators" <4> - sourceNamespace: "openshift-marketplace" - name: "elasticsearch-operator" ----- -<1> You must specify the `openshift-operators-redhat` namespace. -<2> Specify `stable`, or `stable-5.` as the channel. See the following note. -<3> `Automatic` allows the Operator Lifecycle Manager (OLM) to automatically update the Operator when a new version is available. `Manual` requires a user with appropriate credentials to approve the Operator update. -<4> Specify `redhat-operators`. If your {product-title} cluster is installed on a restricted network, also known as a disconnected cluster, -specify the name of the CatalogSource object created when you configured the Operator Lifecycle Manager (OLM). -+ -[NOTE] -==== -Specifying `stable` installs the current version of the latest stable release. Using `stable` with `installPlanApproval: "Automatic"`, will automatically upgrade your operators to the latest stable major and minor release. - -Specifying `stable-5.` installs the current minor version of a specific major release. Using `stable-5.` with `installPlanApproval: "Automatic"`, will automatically upgrade your operators to the latest stable minor release within the major release you specify with `x`. -==== - - -.. Create the Subscription object: -+ -[source,terminal] ----- -$ oc create -f .yaml ----- -+ -For example: -+ -[source,terminal] ----- -$ oc create -f eo-sub.yaml ----- -+ -The OpenShift Elasticsearch Operator is installed to the `openshift-operators-redhat` namespace and copied to each project in the cluster. - -.. Verify the Operator installation: -+ -[source,terminal] ----- -$ oc get csv --all-namespaces ----- -+ -.Example output -[source,terminal] ----- -NAMESPACE NAME DISPLAY VERSION REPLACES PHASE -default elasticsearch-operator.5.1.0-202007012112.p0 OpenShift Elasticsearch Operator 5.5.0-202007012112.p0 Succeeded -kube-node-lease elasticsearch-operator.5.5.0-202007012112.p0 OpenShift Elasticsearch Operator 5.5.0-202007012112.p0 Succeeded -kube-public elasticsearch-operator.5.5.0-202007012112.p0 OpenShift Elasticsearch Operator 5.5.0-202007012112.p0 Succeeded -kube-system elasticsearch-operator.5.5.0-202007012112.p0 OpenShift Elasticsearch Operator 5.5.0-202007012112.p0 Succeeded -openshift-apiserver-operator elasticsearch-operator.5.5.0-202007012112.p0 OpenShift Elasticsearch Operator 5.5.0-202007012112.p0 Succeeded -openshift-apiserver elasticsearch-operator.5.5.0-202007012112.p0 OpenShift Elasticsearch Operator 5.5.0-202007012112.p0 Succeeded -openshift-authentication-operator elasticsearch-operator.5.5.0-202007012112.p0 OpenShift Elasticsearch Operator 5.5.0-202007012112.p0 Succeeded -openshift-authentication elasticsearch-operator.5.5.0-202007012112.p0 OpenShift Elasticsearch Operator 5.5.0-202007012112.p0 Succeeded -... ----- -+ -There should be an OpenShift Elasticsearch Operator in each namespace. The version number might be different than shown. - -. Install the Red Hat OpenShift Logging Operator by creating the following objects: - -.. Create an Operator Group object YAML file (for example, `olo-og.yaml`) for the Red Hat OpenShift Logging Operator: -+ -[source,yaml] ----- -apiVersion: operators.coreos.com/v1 -kind: OperatorGroup -metadata: - name: cluster-logging - namespace: openshift-logging <1> -spec: - targetNamespaces: - - openshift-logging <1> ----- -<1> You must specify the `openshift-logging` namespace. - -.. Create an Operator Group object: -+ -[source,terminal] ----- -$ oc create -f .yaml ----- -+ -For example: -+ -[source,terminal] ----- -$ oc create -f olo-og.yaml ----- - -.. Create a Subscription object YAML file (for example, `olo-sub.yaml`) to -subscribe a namespace to the Red Hat OpenShift Logging Operator. -+ -[source,yaml] ----- -apiVersion: operators.coreos.com/v1alpha1 -kind: Subscription -metadata: - name: cluster-logging - namespace: openshift-logging <1> -spec: - channel: "stable" <2> - name: cluster-logging - source: redhat-operators <3> - sourceNamespace: openshift-marketplace ----- -<1> You must specify the `openshift-logging` namespace. -<2> Specify `stable`, or `stable-5.` as the channel. -<3> Specify `redhat-operators`. If your {product-title} cluster is installed on a restricted network, also known as a disconnected cluster, specify the name of the CatalogSource object you created when you configured the Operator Lifecycle Manager (OLM). -+ -[source,terminal] ----- -$ oc create -f .yaml ----- -+ -For example: -+ -[source,terminal] ----- -$ oc create -f olo-sub.yaml ----- -+ -The Red Hat OpenShift Logging Operator is installed to the `openshift-logging` namespace. - -.. Verify the Operator installation. -+ -There should be a Red Hat OpenShift Logging Operator in the `openshift-logging` namespace. The Version number might be different than shown. -+ -[source,terminal] ----- -$ oc get csv -n openshift-logging ----- -+ -.Example output -[source,terminal] ----- -NAMESPACE NAME DISPLAY VERSION REPLACES PHASE -... -openshift-logging clusterlogging.5.1.0-202007012112.p0 OpenShift Logging 5.1.0-202007012112.p0 Succeeded -... ----- - -. Create an OpenShift Logging instance: - -.. Create an instance object YAML file (for example, `olo-instance.yaml`) for the Red Hat OpenShift Logging Operator: -+ -[NOTE] -==== -This default OpenShift Logging configuration should support a wide array of environments. Review the topics on tuning and -configuring {logging} components for information about modifications you can make to your OpenShift Logging cluster. -==== -+ -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: "ClusterLogging" -metadata: - name: "instance" <1> - namespace: "openshift-logging" -spec: - managementState: "Managed" <2> - logStore: - type: "elasticsearch" <3> - retentionPolicy: <4> - application: - maxAge: 1d - infra: - maxAge: 7d - audit: - maxAge: 7d - elasticsearch: - nodeCount: 3 <5> - storage: - storageClassName: "" <6> - size: 200G - resources: <7> - limits: - memory: "16Gi" - requests: - memory: "16Gi" - proxy: <8> - resources: - limits: - memory: 256Mi - requests: - memory: 256Mi - redundancyPolicy: "SingleRedundancy" - visualization: - type: "kibana" <9> - kibana: - replicas: 1 - collection: - logs: - type: "fluentd" <10> - fluentd: {} ----- -<1> The name must be `instance`. -<2> The OpenShift Logging management state. In some cases, if you change the OpenShift Logging defaults, you must set this to `Unmanaged`. -However, an unmanaged deployment does not receive updates until OpenShift Logging is placed back into a managed state. Placing a deployment back into a managed state might revert any modifications you made. -<3> Settings for configuring Elasticsearch. Using the custom resource (CR), you can configure shard replication policy and persistent storage. -<4> Specify the length of time that Elasticsearch should retain each log source. Enter an integer and a time designation: weeks(w), hours(h/H), minutes(m) and seconds(s). For example, `7d` for seven days. Logs older than the `maxAge` are deleted. You must specify a retention policy for each log source or the Elasticsearch indices will not be created for that source. -<5> Specify the number of Elasticsearch nodes. See the note that follows this list. -<6> Enter the name of an existing storage class for Elasticsearch storage. For best performance, specify a storage class that allocates block storage. If you do not specify a storage class, {product-title} deploys OpenShift Logging with ephemeral storage only. -<7> Specify the CPU and memory requests for Elasticsearch as needed. If you leave these values blank, the OpenShift Elasticsearch Operator sets default values that are sufficient for most deployments. The default values are `16Gi` for the memory request and `1` for the CPU request. -<8> Specify the CPU and memory requests for the Elasticsearch proxy as needed. If you leave these values blank, the OpenShift Elasticsearch Operator sets default values that should be sufficient for most deployments. The default values are `256Mi` for the memory request and `100m` for the CPU request. -<9> Settings for configuring Kibana. Using the CR, you can scale Kibana for redundancy and configure the CPU and memory for your Kibana pods. For more information, see *Configuring the log visualizer*. -<10> Settings for configuring Fluentd. Using the CR, you can configure Fluentd CPU and memory limits. For more information, see *Configuring Fluentd*. -+ -[NOTE] -+ -==== -The maximum number of Elasticsearch control plane nodes is three. If you specify a `nodeCount` greater than `3`, {product-title} creates three Elasticsearch nodes that are Master-eligible nodes, with the master, client, and data roles. The additional Elasticsearch nodes are created as Data-only nodes, using client and data roles. Control plane nodes perform cluster-wide actions such as creating or deleting an index, shard allocation, and tracking nodes. Data nodes hold the shards and perform data-related operations such as CRUD, search, and aggregations. Data-related operations are I/O-, memory-, and CPU-intensive. It is important to monitor these resources and to add more Data nodes if the current nodes are overloaded. - -For example, if `nodeCount=4`, the following nodes are created: - -[source,terminal] ----- -$ oc get deployment ----- - -.Example output -[source,terminal] ----- -cluster-logging-operator 1/1 1 1 18h -elasticsearch-cd-x6kdekli-1 1/1 1 0 6m54s -elasticsearch-cdm-x6kdekli-1 1/1 1 1 18h -elasticsearch-cdm-x6kdekli-2 1/1 1 0 6m49s -elasticsearch-cdm-x6kdekli-3 1/1 1 0 6m44s ----- - -The number of primary shards for the index templates is equal to the number of Elasticsearch data nodes. -==== - -.. Create the instance: -+ -[source,terminal] ----- -$ oc create -f .yaml ----- -+ -For example: -+ -[source,terminal] ----- -$ oc create -f olo-instance.yaml ----- -+ -This creates the {logging} components, the `Elasticsearch` custom resource and components, and the Kibana interface. - -. Verify the installation by listing the pods in the *openshift-logging* project. -+ -You should see several pods for components of the Logging subsystem, similar to the following list: -+ -[source,terminal] ----- -$ oc get pods -n openshift-logging ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -cluster-logging-operator-66f77ffccb-ppzbg 1/1 Running 0 7m -elasticsearch-cdm-ftuhduuw-1-ffc4b9566-q6bhp 2/2 Running 0 2m40s -elasticsearch-cdm-ftuhduuw-2-7b4994dbfc-rd2gc 2/2 Running 0 2m36s -elasticsearch-cdm-ftuhduuw-3-84b5ff7ff8-gqnm2 2/2 Running 0 2m4s -collector-587vb 1/1 Running 0 2m26s -collector-7mpb9 1/1 Running 0 2m30s -collector-flm6j 1/1 Running 0 2m33s -collector-gn4rn 1/1 Running 0 2m26s -collector-nlgb6 1/1 Running 0 2m30s -collector-snpkt 1/1 Running 0 2m28s -kibana-d6d5668c5-rppqm 2/2 Running 0 2m39s ----- diff --git a/modules/cluster-logging-deploy-console.adoc b/modules/cluster-logging-deploy-console.adoc deleted file mode 100644 index c5af639bf08f..000000000000 --- a/modules/cluster-logging-deploy-console.adoc +++ /dev/null @@ -1,244 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-deploying.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-deploy-console_{context}"] -= Installing the {logging-title} using the web console - -ifndef::openshift-rosa,openshift-dedicated[] -You can use the {product-title} web console to install the OpenShift Elasticsearch and Red Hat OpenShift Logging Operators. -endif::[] -ifdef::openshift-rosa,openshift-dedicated[] -You can install the OpenShift Elasticsearch and Red Hat OpenShift Logging Operators by using the {product-title} {cluster-manager-url}. -endif::[] - -[NOTE] -==== -If you do not want to use the default Elasticsearch log store, you can remove the internal Elasticsearch `logStore` and Kibana `visualization` components from the `ClusterLogging` custom resource (CR). Removing these components is optional but saves resources. For more information, see the additional resources of this section. -==== - -.Prerequisites - -* Ensure that you have the necessary persistent storage for Elasticsearch. Note that each Elasticsearch node -requires its own storage volume. -+ -[NOTE] -==== -If you use a local volume for persistent storage, do not use a raw block volume, which is described with `volumeMode: block` in the `LocalVolume` object. Elasticsearch cannot use raw block volumes. -==== -+ -Elasticsearch is a memory-intensive application. By default, {product-title} installs three Elasticsearch nodes with memory requests and limits of 16 GB. This initial set of three {product-title} nodes might not have enough memory to run Elasticsearch within your cluster. If you experience memory issues that are related to Elasticsearch, add more Elasticsearch nodes to your cluster rather than increasing the memory on existing nodes. - -ifdef::openshift-origin[] -* Ensure that you have downloaded the {cluster-manager-url-pull} as shown in _Obtaining the installation program_ in the installation documentation for your platform. -+ -If you have the pull secret, add the `redhat-operators` catalog to the OperatorHub custom resource (CR) as shown in _Configuring {product-title} to use Red Hat Operators_. -endif::[] - -.Procedure - -ifndef::openshift-rosa,openshift-dedicated[] -To install the OpenShift Elasticsearch Operator and Red Hat OpenShift Logging Operator by using the {product-title} web console: -endif::[] -ifdef::openshift-rosa,openshift-dedicated[] -To install the OpenShift Elasticsearch Operator and Red Hat OpenShift Logging Operator by using the {product-title} {cluster-manager-url}: -endif::[] - -. Install the OpenShift Elasticsearch Operator: - -ifndef::openshift-rosa,openshift-dedicated[] -.. In the {product-title} web console, click *Operators* -> *OperatorHub*. -endif::[] -ifdef::openshift-rosa,openshift-dedicated[] -.. In the {hybrid-console}, click *Operators* -> *OperatorHub*. -endif::[] - -.. Choose *OpenShift Elasticsearch Operator* from the list of available Operators, and click *Install*. - -.. Ensure that the *All namespaces on the cluster* is selected under *Installation Mode*. - -.. Ensure that *openshift-operators-redhat* is selected under *Installed Namespace*. -+ -You must specify the `openshift-operators-redhat` namespace. The `openshift-operators` namespace might contain Community Operators, which are untrusted and could publish a metric with the same name as -ifdef::openshift-rosa[] - a ROSA -endif::[] -ifdef::openshift-dedicated[] - an {product-title} -endif::[] -metric, which would cause conflicts. - -.. Select *Enable operator recommended cluster monitoring on this namespace*. -+ -This option sets the `openshift.io/cluster-monitoring: "true"` label in the Namespace object. You must select this option to ensure that cluster monitoring scrapes the `openshift-operators-redhat` namespace. - -.. Select *stable-5.x* as the *Update Channel*. - -.. Select an *Approval Strategy*. -+ -* The *Automatic* strategy allows Operator Lifecycle Manager (OLM) to automatically update the Operator when a new version is available. -+ -* The *Manual* strategy requires a user with appropriate credentials to approve the Operator update. - -.. Click *Install*. - -.. Verify that the OpenShift Elasticsearch Operator installed by switching to the *Operators* → *Installed Operators* page. - -.. Ensure that *OpenShift Elasticsearch Operator* is listed in all projects with a *Status* of *Succeeded*. - -. Install the Red Hat OpenShift Logging Operator: - -.. In the {product-title} web console, click *Operators* -> *OperatorHub*. - -.. Choose *Red Hat OpenShift Logging* from the list of available Operators, and click *Install*. - -.. Ensure that the *A specific namespace on the cluster* is selected under *Installation Mode*. - -.. Ensure that *Operator recommended namespace* is *openshift-logging* under *Installed Namespace*. - -.. Select *Enable operator recommended cluster monitoring on this namespace*. -+ -This option sets the `openshift.io/cluster-monitoring: "true"` label in the Namespace object. You must select this option to ensure that cluster monitoring scrapes the `openshift-logging` namespace. - -.. Select *stable-5.x* as the *Update Channel*. - -.. Select an *Approval Strategy*. -+ -* The *Automatic* strategy allows Operator Lifecycle Manager (OLM) to automatically update the Operator when a new version is available. -+ -* The *Manual* strategy requires a user with appropriate credentials to approve the Operator update. - -.. Click *Install*. - -.. Verify that the Red Hat OpenShift Logging Operator installed by switching to the *Operators* → *Installed Operators* page. - -.. Ensure that *Red Hat OpenShift Logging* is listed in the *openshift-logging* project with a *Status* of *Succeeded*. -+ -If the Operator does not appear as installed, to troubleshoot further: -+ -* Switch to the *Operators* → *Installed Operators* page and inspect the *Status* column for any errors or failures. -* Switch to the *Workloads* → *Pods* page and check the logs in any pods in the `openshift-logging` project that are reporting issues. - -. Create an OpenShift Logging instance: - -.. Switch to the *Administration* -> *Custom Resource Definitions* page. - -.. On the *Custom Resource Definitions* page, click *ClusterLogging*. - -.. On the *Custom Resource Definition details* page, select *View Instances* from the *Actions* menu. - -.. On the *ClusterLoggings* page, click *Create ClusterLogging*. -+ -You might have to refresh the page to load the data. - -.. In the YAML field, replace the code with the following: -+ -[NOTE] -==== -This default OpenShift Logging configuration should support a wide array of environments. Review the topics on tuning and -configuring {logging} components for information on modifications you can make to your OpenShift Logging cluster. -==== -+ -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: "ClusterLogging" -metadata: - name: "instance" <1> - namespace: "openshift-logging" -spec: - managementState: "Managed" <2> - logStore: - type: "elasticsearch" <3> - retentionPolicy: <4> - application: - maxAge: 1d - infra: - maxAge: 7d - audit: - maxAge: 7d - elasticsearch: - nodeCount: 3 <5> - storage: - storageClassName: "" <6> - size: 200G - resources: <7> - limits: - memory: "16Gi" - requests: - memory: "16Gi" - proxy: <8> - resources: - limits: - memory: 256Mi - requests: - memory: 256Mi - redundancyPolicy: "SingleRedundancy" - visualization: - type: "kibana" <9> - kibana: - replicas: 1 - collection: - logs: - type: "fluentd" <10> - fluentd: {} ----- -<1> The name must be `instance`. -<2> The OpenShift Logging management state. In some cases, if you change the OpenShift Logging defaults, you must set this to `Unmanaged`. -However, an unmanaged deployment does not receive updates until OpenShift Logging is placed back into a managed state. -<3> Settings for configuring Elasticsearch. Using the CR, you can configure shard replication policy and persistent storage. -<4> Specify the length of time that Elasticsearch should retain each log source. Enter an integer and a time designation: weeks(w), hours(h/H), minutes(m) and seconds(s). For example, `7d` for seven days. Logs older than the `maxAge` are deleted. You must specify a retention policy for each log source or the Elasticsearch indices will not be created for that source. -<5> Specify the number of Elasticsearch nodes. See the note that follows this list. -<6> Enter the name of an existing storage class for Elasticsearch storage. For best performance, specify a storage class that allocates block storage. If you do not specify a storage class, OpenShift Logging uses ephemeral storage. -<7> Specify the CPU and memory requests for Elasticsearch as needed. If you leave these values blank, the OpenShift Elasticsearch Operator sets default values that should be sufficient for most deployments. The default values are `16Gi` for the memory request and `1` for the CPU request. -<8> Specify the CPU and memory requests for the Elasticsearch proxy as needed. If you leave these values blank, the OpenShift Elasticsearch Operator sets default values that should be sufficient for most deployments. The default values are `256Mi` for the memory request and `100m` for the CPU request. -<9> Settings for configuring Kibana. Using the CR, you can scale Kibana for redundancy and configure the CPU and memory for your Kibana nodes. For more information, see *Configuring the log visualizer*. -<10> Settings for configuring Fluentd. Using the CR, you can configure Fluentd CPU and memory limits. For more information, see *Configuring Fluentd*. -+ -[NOTE] -+ -==== -The maximum number of Elasticsearch control plane nodes is three. If you specify a `nodeCount` greater than `3`, {product-title} creates three Elasticsearch nodes that are Master-eligible nodes, with the master, client, and data roles. The additional Elasticsearch nodes are created as Data-only nodes, using client and data roles. Control plane nodes perform cluster-wide actions such as creating or deleting an index, shard allocation, and tracking nodes. Data nodes hold the shards and perform data-related operations such as CRUD, search, and aggregations. Data-related operations are I/O-, memory-, and CPU-intensive. It is important to monitor these resources and to add more Data nodes if the current nodes are overloaded. - -For example, if `nodeCount=4`, the following nodes are created: - -[source,terminal] ----- -$ oc get deployment ----- - -.Example output -[source,terminal] ----- -cluster-logging-operator 1/1 1 1 18h -elasticsearch-cd-x6kdekli-1 0/1 1 0 6m54s -elasticsearch-cdm-x6kdekli-1 1/1 1 1 18h -elasticsearch-cdm-x6kdekli-2 0/1 1 0 6m49s -elasticsearch-cdm-x6kdekli-3 0/1 1 0 6m44s ----- - -The number of primary shards for the index templates is equal to the number of Elasticsearch data nodes. -==== - -.. Click *Create*. This creates the {logging} components, the `Elasticsearch` custom resource and components, and the Kibana interface. - -. Verify the installation: - -.. Switch to the *Workloads* -> *Pods* page. - -.. Select the *openshift-logging* project. -+ -You should see several pods for OpenShift Logging, Elasticsearch, Fluentd, and Kibana similar to the following list: -+ -* cluster-logging-operator-cb795f8dc-xkckc -* collector-pb2f8 -* elasticsearch-cdm-b3nqzchd-1-5c6797-67kfz -* elasticsearch-cdm-b3nqzchd-2-6657f4-wtprv -* elasticsearch-cdm-b3nqzchd-3-588c65-clg7g -* fluentd-2c7dg -* fluentd-9z7kk -* fluentd-br7r2 -* fluentd-fn2sb -* fluentd-zqgqx -* kibana-7fb4fd4cc9-bvt4p \ No newline at end of file diff --git a/modules/cluster-logging-deploy-label.adoc b/modules/cluster-logging-deploy-label.adoc deleted file mode 100644 index 38a3aa28704e..000000000000 --- a/modules/cluster-logging-deploy-label.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-deploy.adoc - -[id="cluster-logging-deploy-label_{context}"] -= Labeling nodes - -At 100 nodes or more, pre-pull the logging images from the registry. After deploying the logging pods, such as Elasticsearch and Kibana, node labeling should be done in steps of 20 nodes at a time. For example: - -Using a simple loop: - -[source,terminal] ----- -$ while read node; do oc label nodes $node elasticsearch-fluentd=true; done < 20_fluentd.lst ----- - -The following also works: - -[source,terminal] ----- -$ oc label nodes 10.10.0.{100..119} elasticsearch-fluentd=true ----- - -Labeling nodes in groups paces the daemon sets used by the {logging}, helping to avoid contention on shared resources such as the image registry. - -[NOTE] -==== -Check for the occurrence of any "CrashLoopBackOff | ImagePullFailed | Error" issues. -`oc logs `, `oc describe pod ` and `oc get event` are helpful diagnostic commands. -==== diff --git a/modules/cluster-logging-deploy-memory.adoc b/modules/cluster-logging-deploy-memory.adoc deleted file mode 100644 index accf34ab0172..000000000000 --- a/modules/cluster-logging-deploy-memory.adoc +++ /dev/null @@ -1,16 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-deploy.adoc - -[id="cluster-logging-deploy-memory_{context}"] -= Configure memory for Elasticsearch instances - -By default, the amount of RAM allocated to each ES instance is 16GB. You can change this value as needed. - -Keep in mind that *half* of this value will be passed to the individual -Elasticsearch pods java processes -link:https://www.elastic.co/guide/en/elasticsearch/guide/current/heap-sizing.html#_give_half_your_memory_to_lucene[heap -size]. - -.Procedure - diff --git a/modules/cluster-logging-deploy-multitenant.adoc b/modules/cluster-logging-deploy-multitenant.adoc deleted file mode 100644 index 7a190bff3fc0..000000000000 --- a/modules/cluster-logging-deploy-multitenant.adoc +++ /dev/null @@ -1,72 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-deploying.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-deploy-multitenant_{context}"] -= Allowing traffic between projects when network isolation is enabled - -Your cluster network plugin might enforce network isolation. If so, you must allow network traffic between the projects that contain the operators deployed by OpenShift Logging. - -Network isolation blocks network traffic between pods or services that are in different projects. The {logging} installs the _OpenShift Elasticsearch Operator_ in the `openshift-operators-redhat` project and the _Red Hat OpenShift Logging Operator_ in the `openshift-logging` project. Therefore, you must allow traffic between these two projects. - -{product-title} offers two supported choices for the network plugin, OpenShift SDN and OVN-Kubernetes. These two providers implement various network isolation policies. - -OpenShift SDN has three modes: - -network policy:: This is the default mode. If no policy is defined, it allows all traffic. However, if a user defines a policy, they typically start by denying all traffic and then adding exceptions. This process might break applications that are running in different projects. Therefore, explicitly configure the policy to allow traffic to egress from one logging-related project to the other. - -ifdef::openshift-enterprise,openshift-origin[] -multitenant:: This mode enforces network isolation. You must join the two logging-related projects to allow traffic between them. -endif::[] - -subnet:: This mode allows all traffic. It does not enforce network isolation. No action is needed. - -OVN-Kubernetes always uses a *network policy*. Therefore, as with OpenShift SDN, you must configure the policy to allow traffic to egress from one logging-related project to the other. - -.Procedure - -* If you are using OpenShift SDN in *multitenant* mode, join the two projects. For example: -+ -[source,terminal] ----- -$ oc adm pod-network join-projects --to=openshift-operators-redhat openshift-logging ----- - -* Otherwise, for OpenShift SDN in *network policy* mode and OVN-Kubernetes, perform the following actions: - -.. Set a label on the `openshift-operators-redhat` namespace. For example: -+ -[source,terminal] ----- -$ oc label namespace openshift-operators-redhat project=openshift-operators-redhat ----- - -.. Create a network policy object in the `openshift-logging` namespace that allows ingress from the `openshift-operators-redhat`, `openshift-monitoring` and `openshift-ingress` projects to the openshift-logging project. For example: -+ -[source,yaml] ----- -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: allow-from-openshift-monitoring-ingress-operators-redhat -spec: - ingress: - - from: - - podSelector: {} - - from: - - namespaceSelector: - matchLabels: - project: "openshift-operators-redhat" - - from: - - namespaceSelector: - matchLabels: - name: "openshift-monitoring" - - from: - - namespaceSelector: - matchLabels: - network.openshift.io/policy-group: ingress - podSelector: {} - policyTypes: - - Ingress ----- diff --git a/modules/cluster-logging-deploy-storage-considerations.adoc b/modules/cluster-logging-deploy-storage-considerations.adoc deleted file mode 100644 index 220e59386b62..000000000000 --- a/modules/cluster-logging-deploy-storage-considerations.adoc +++ /dev/null @@ -1,75 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-deploy.adoc - -[id="cluster-logging-deploy-storage-considerations_{context}"] -= Storage considerations for the {logging-title} - -//// -An Elasticsearch index is a collection of primary shards and their corresponding replica shards. This is how Elasticsearch implements high availability internally, so there is little requirement to use hardware based mirroring RAID variants. RAID 0 can still be used to increase overall disk performance. -//// - -A persistent volume is required for each Elasticsearch deployment configuration. On {product-title} this is achieved using persistent volume claims. - -[NOTE] -==== -If you use a local volume for persistent storage, do not use a raw block volume, which is described with `volumeMode: block` in the `LocalVolume` object. Elasticsearch cannot use raw block volumes. -==== - -The OpenShift Elasticsearch Operator names the PVCs using the Elasticsearch resource name. - -//// -Below are capacity planning guidelines for {product-title} aggregate logging. - -*Example scenario* - -Assumptions: - -. Which application: Apache -. Bytes per line: 256 -. Lines per second load on application: 1 -. Raw text data -> JSON - -Baseline (256 characters per minute -> 15KB/min) - -[cols="3,4",options="header"] -|=== -|Logging pods -|Storage Throughput - -|3 es -1 kibana -1 fluentd -| 6 pods total: 90000 x 86400 = 7,7 GB/day - -|3 es -1 kibana -11 fluentd -| 16 pods total: 225000 x 86400 = 24,0 GB/day - -|3 es -1 kibana -20 fluentd -|25 pods total: 225000 x 86400 = 32,4 GB/day -|=== - - -Calculating the total logging throughput and disk space required for your {product-title} cluster requires knowledge of your applications. For example, if one of your applications on average logs 10 lines-per-second, each 256 bytes-per-line, calculate per-application throughput and disk space as follows: - ----- - (bytes-per-line * (lines-per-second) = 2560 bytes per app per second - (2560) * (number-of-pods-per-node,100) = 256,000 bytes per second per node - 256k * (number-of-nodes) = total logging throughput per cluster per second ----- -//// - -Fluentd ships any logs from *systemd journal* and **/var/log/containers/*.log** to Elasticsearch. - -Elasticsearch requires sufficient memory to perform large merge operations. If it does not have enough memory, it becomes unresponsive. To avoid this problem, evaluate how much application log data you need, and allocate approximately double that amount of free storage capacity. - -By default, when storage capacity is 85% full, Elasticsearch stops allocating new data to the node. At 90%, Elasticsearch attempts to relocate existing shards from that node to other nodes if possible. But if no nodes have a free capacity below 85%, Elasticsearch effectively rejects creating new indices and becomes RED. - -[NOTE] -==== -These low and high watermark values are Elasticsearch defaults in the current release. You can modify these default values. Although the alerts use the same default values, you cannot change these values in the alerts. -==== diff --git a/modules/cluster-logging-deploying-about.adoc b/modules/cluster-logging-deploying-about.adoc deleted file mode 100644 index 1fb3db28544e..000000000000 --- a/modules/cluster-logging-deploying-about.adoc +++ /dev/null @@ -1,187 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-deploying-about.adoc -// * serverless/monitor/cluster-logging-serverless.adoc - -:_content-type: CONCEPT -[id="cluster-logging-deploying-about_{context}"] -= About deploying and configuring the {logging-title} - -The {logging} is designed to be used with the default configuration, which is tuned for small to medium sized {product-title} clusters. - -The installation instructions that follow include a sample `ClusterLogging` custom resource (CR), which you can use to create a {logging} instance and configure your {logging} environment. - -If you want to use the default {logging} install, you can use the sample CR directly. - -If you want to customize your deployment, make changes to the sample CR as needed. The following describes the configurations you can make when installing your OpenShift Logging instance or modify after installation. See the Configuring sections for more information on working with each component, including modifications you can make outside of the `ClusterLogging` custom resource. - -[id="cluster-logging-deploy-about-config_{context}"] -== Configuring and Tuning the {logging} - -You can configure your {logging} by modifying the `ClusterLogging` custom resource deployed -in the `openshift-logging` project. - -You can modify any of the following components upon install or after install: - -Memory and CPU:: -You can adjust both the CPU and memory limits for each component by modifying the `resources` -block with valid memory and CPU values: - -[source,yaml] ----- -spec: - logStore: - elasticsearch: - resources: - limits: - cpu: - memory: 16Gi - requests: - cpu: 500m - memory: 16Gi - type: "elasticsearch" - collection: - logs: - fluentd: - resources: - limits: - cpu: - memory: - requests: - cpu: - memory: - type: "fluentd" - visualization: - kibana: - resources: - limits: - cpu: - memory: - requests: - cpu: - memory: - type: kibana ----- - -Elasticsearch storage:: -You can configure a persistent storage class and size for the Elasticsearch cluster using the `storageClass` `name` and `size` parameters. The Red Hat OpenShift Logging Operator creates a persistent volume claim (PVC) for each data node in the Elasticsearch cluster based on these parameters. - -[source,yaml] ----- - spec: - logStore: - type: "elasticsearch" - elasticsearch: - nodeCount: 3 - storage: - storageClassName: "gp2" - size: "200G" ----- - -This example specifies each data node in the cluster will be bound to a PVC that -requests "200G" of "gp2" storage. Each primary shard will be backed by a single replica. - -[NOTE] -==== -Omitting the `storage` block results in a deployment that includes ephemeral storage only. - -[source,yaml] ----- - spec: - logStore: - type: "elasticsearch" - elasticsearch: - nodeCount: 3 - storage: {} ----- -==== - -Elasticsearch replication policy:: -You can set the policy that defines how Elasticsearch shards are replicated across data nodes in the cluster: - -* `FullRedundancy`. The shards for each index are fully replicated to every data node. -* `MultipleRedundancy`. The shards for each index are spread over half of the data nodes. -* `SingleRedundancy`. A single copy of each shard. Logs are always available and recoverable as long as at least two data nodes exist. -* `ZeroRedundancy`. No copies of any shards. Logs may be unavailable (or lost) in the event a node is down or fails. - -//// -Log collectors:: -You can select which log collector is deployed as a daemon set to each node in the {product-title} cluster, either: - -* Fluentd - The default log collector based on Fluentd. -* Rsyslog - Alternate log collector supported as **Tech Preview** only. - ----- - spec: - collection: - logs: - fluentd: - resources: - limits: - cpu: - memory: - requests: - cpu: - memory: - type: "fluentd" ----- -//// - -[id="cluster-logging-deploy-about-sample_{context}"] -== Sample modified ClusterLogging custom resource - -The following is an example of a `ClusterLogging` custom resource modified using the options previously described. - -.Sample modified `ClusterLogging` custom resource -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: "ClusterLogging" -metadata: - name: "instance" - namespace: "openshift-logging" -spec: - managementState: "Managed" - logStore: - type: "elasticsearch" - retentionPolicy: - application: - maxAge: 1d - infra: - maxAge: 7d - audit: - maxAge: 7d - elasticsearch: - nodeCount: 3 - resources: - limits: - cpu: 200m - memory: 16Gi - requests: - cpu: 200m - memory: 16Gi - storage: - storageClassName: "gp2" - size: "200G" - redundancyPolicy: "SingleRedundancy" - visualization: - type: "kibana" - kibana: - resources: - limits: - memory: 1Gi - requests: - cpu: 500m - memory: 1Gi - replicas: 1 - collection: - logs: - type: "fluentd" - fluentd: - resources: - limits: - memory: 1Gi - requests: - cpu: 200m - memory: 1Gi ----- diff --git a/modules/cluster-logging-elasticsearch-audit.adoc b/modules/cluster-logging-elasticsearch-audit.adoc deleted file mode 100644 index d08144f52214..000000000000 --- a/modules/cluster-logging-elasticsearch-audit.adoc +++ /dev/null @@ -1,93 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-elasticsearch.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-elasticsearch-audit_{context}"] -= Forwarding audit logs to the log store - -By default, OpenShift Logging does not store audit logs in the internal {product-title} Elasticsearch log store. You can send audit logs to this log store so, for example, you can view them in Kibana. - -To send the audit logs to the default internal Elasticsearch log store, for example to view the audit logs in Kibana, you must use the Log Forwarding API. - -[IMPORTANT] -==== -The internal {product-title} Elasticsearch log store does not provide secure storage for audit logs. Verify that the system to which you forward audit logs complies with your organizational and governmental regulations and is properly secured. The {logging-title} does not comply with those regulations. -==== - -.Procedure - -To use the Log Forwarding API to forward audit logs to the internal Elasticsearch instance: - -. Create or edit a YAML file that defines the `ClusterLogForwarder` CR object: -+ -* Create a CR to send all log types to the internal Elasticsearch instance. You can use the following example without making any changes: -+ -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogForwarder -metadata: - name: instance - namespace: openshift-logging -spec: - pipelines: <1> - - name: all-to-default - inputRefs: - - infrastructure - - application - - audit - outputRefs: - - default ----- -<1> A pipeline defines the type of logs to forward using the specified output. The default output forwards logs to the internal Elasticsearch instance. -+ -[NOTE] -==== -You must specify all three types of logs in the pipeline: application, infrastructure, and audit. If you do not specify a log type, those logs are not stored and will be lost. -==== -+ -* If you have an existing `ClusterLogForwarder` CR, add a pipeline to the default output for the audit logs. You do not need to define the default output. For example: -+ -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: ClusterLogForwarder -metadata: - name: instance - namespace: openshift-logging -spec: - outputs: - - name: elasticsearch-insecure - type: "elasticsearch" - url: http://elasticsearch-insecure.messaging.svc.cluster.local - insecure: true - - name: elasticsearch-secure - type: "elasticsearch" - url: https://elasticsearch-secure.messaging.svc.cluster.local - secret: - name: es-audit - - name: secureforward-offcluster - type: "fluentdForward" - url: https://secureforward.offcluster.com:24224 - secret: - name: secureforward - pipelines: - - name: container-logs - inputRefs: - - application - outputRefs: - - secureforward-offcluster - - name: infra-logs - inputRefs: - - infrastructure - outputRefs: - - elasticsearch-insecure - - name: audit-logs - inputRefs: - - audit - outputRefs: - - elasticsearch-secure - - default <1> ----- -<1> This pipeline sends the audit logs to the internal Elasticsearch instance in addition to an external instance. diff --git a/modules/cluster-logging-elasticsearch-exposing.adoc b/modules/cluster-logging-elasticsearch-exposing.adoc deleted file mode 100644 index d8df4bfaa19b..000000000000 --- a/modules/cluster-logging-elasticsearch-exposing.adoc +++ /dev/null @@ -1,178 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-elasticsearch.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-elasticsearch-exposing_{context}"] -= Exposing the log store service as a route - -By default, the log store that is deployed with the {logging-title} is not accessible from outside the logging cluster. You can enable a route with re-encryption termination for external access to the log store service for those tools that access its data. - -Externally, you can access the log store by creating a reencrypt route, your {product-title} token and the installed log store CA certificate. Then, access a node that hosts the log store service with a cURL request that contains: - -* The `Authorization: Bearer ${token}` -* The Elasticsearch reencrypt route and an link:https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html[Elasticsearch API request]. - -Internally, you can access the log store service using the log store cluster IP, -which you can get by using either of the following commands: - -[source,terminal] ----- -$ oc get service elasticsearch -o jsonpath={.spec.clusterIP} -n openshift-logging ----- - -.Example output -[source,terminal] ----- -172.30.183.229 ----- - -[source,terminal] ----- -$ oc get service elasticsearch -n openshift-logging ----- - -.Example output -[source,terminal] ----- -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -elasticsearch ClusterIP 172.30.183.229 9200/TCP 22h ----- - -You can check the cluster IP address with a command similar to the following: - -[source,terminal] ----- -$ oc exec elasticsearch-cdm-oplnhinv-1-5746475887-fj2f8 -n openshift-logging -- curl -tlsv1.2 --insecure -H "Authorization: Bearer ${token}" "https://172.30.183.229:9200/_cat/health" ----- - -.Example output -[source,terminal] ----- - % Total % Received % Xferd Average Speed Time Time Time Current - Dload Upload Total Spent Left Speed -100 29 100 29 0 0 108 0 --:--:-- --:--:-- --:--:-- 108 ----- - -.Prerequisites - -* The Red Hat OpenShift Logging and Elasticsearch Operators must be installed. - -* You must have access to the project to be able to access to the logs. - -.Procedure - -To expose the log store externally: - -. Change to the `openshift-logging` project: -+ -[source,terminal] ----- -$ oc project openshift-logging ----- - -. Extract the CA certificate from the log store and write to the *_admin-ca_* file: -+ -[source,terminal] ----- -$ oc extract secret/elasticsearch --to=. --keys=admin-ca ----- -+ -.Example output -[source,terminal] ----- -admin-ca ----- - -. Create the route for the log store service as a YAML file: -+ -.. Create a YAML file with the following: -+ -[source,yaml] ----- -apiVersion: route.openshift.io/v1 -kind: Route -metadata: - name: elasticsearch - namespace: openshift-logging -spec: - host: - to: - kind: Service - name: elasticsearch - tls: - termination: reencrypt - destinationCACertificate: | <1> ----- -<1> Add the log store CA certifcate or use the command in the next step. You do not have to set the `spec.tls.key`, `spec.tls.certificate`, and `spec.tls.caCertificate` parameters required by some reencrypt routes. - -.. Run the following command to add the log store CA certificate to the route YAML you created in the previous step: -+ -[source,terminal] ----- -$ cat ./admin-ca | sed -e "s/^/ /" >> .yaml ----- - -.. Create the route: -+ -[source,terminal] ----- -$ oc create -f .yaml ----- -+ -.Example output -[source,terminal] ----- -route.route.openshift.io/elasticsearch created ----- -+ -//For an example reencrypt route object, see Re-encryption Termination. -//+ -//This line ^^ will be linked when the topic is available. - -. Check that the Elasticsearch service is exposed: - -.. Get the token of this service account to be used in the request: -+ -[source,terminal] ----- -$ token=$(oc whoami -t) ----- - -.. Set the *elasticsearch* route you created as an environment variable. -+ -[source,terminal] ----- -$ routeES=`oc get route elasticsearch -o jsonpath={.spec.host}` ----- - -.. To verify the route was successfully created, run the following command that accesses Elasticsearch through the exposed route: -+ -[source,terminal] ----- -curl -tlsv1.2 --insecure -H "Authorization: Bearer ${token}" "https://${routeES}" ----- -+ -The response appears similar to the following: -+ -.Example output -[source,json] ----- -{ - "name" : "elasticsearch-cdm-i40ktba0-1", - "cluster_name" : "elasticsearch", - "cluster_uuid" : "0eY-tJzcR3KOdpgeMJo-MQ", - "version" : { - "number" : "6.8.1", - "build_flavor" : "oss", - "build_type" : "zip", - "build_hash" : "Unknown", - "build_date" : "Unknown", - "build_snapshot" : true, - "lucene_version" : "7.7.0", - "minimum_wire_compatibility_version" : "5.6.0", - "minimum_index_compatibility_version" : "5.0.0" -}, - "" : "" -} ----- diff --git a/modules/cluster-logging-elasticsearch-ha.adoc b/modules/cluster-logging-elasticsearch-ha.adoc deleted file mode 100644 index 779ada7aaddb..000000000000 --- a/modules/cluster-logging-elasticsearch-ha.adoc +++ /dev/null @@ -1,58 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-elasticsearch.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-elasticsearch-ha_{context}"] -= Configuring replication policy for the log store - -You can define how Elasticsearch shards are replicated across data nodes in the cluster. - -.Prerequisites - -* The Red Hat OpenShift Logging and Elasticsearch Operators must be installed. - -.Procedure - -. Edit the `ClusterLogging` custom resource (CR) in the `openshift-logging` project: -+ -[source,terminal] ----- -$ oc edit clusterlogging instance ----- -+ -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: "ClusterLogging" -metadata: - name: "instance" - -.... - -spec: - logStore: - type: "elasticsearch" - elasticsearch: - redundancyPolicy: "SingleRedundancy" <1> ----- -<1> Specify a redundancy policy for the shards. The change is applied upon saving the changes. -+ -* *FullRedundancy*. Elasticsearch fully replicates the primary shards for each index -to every data node. This provides the highest safety, but at the cost of the highest amount of disk required and the poorest performance. -* *MultipleRedundancy*. Elasticsearch fully replicates the primary shards for each index to half of the data nodes. -This provides a good tradeoff between safety and performance. -* *SingleRedundancy*. Elasticsearch makes one copy of the primary shards for each index. -Logs are always available and recoverable as long as at least two data nodes exist. -Better performance than MultipleRedundancy, when using 5 or more nodes. You cannot -apply this policy on deployments of single Elasticsearch node. -* *ZeroRedundancy*. Elasticsearch does not make copies of the primary shards. -Logs might be unavailable or lost in the event a node is down or fails. -Use this mode when you are more concerned with performance than safety, or have -implemented your own disk/PVC backup/restore strategy. - -[NOTE] -==== -The number of primary shards for the index templates is equal to the number of Elasticsearch data nodes. -==== - diff --git a/modules/cluster-logging-elasticsearch-persistent-storage-empty.adoc b/modules/cluster-logging-elasticsearch-persistent-storage-empty.adoc deleted file mode 100644 index 45744345a89c..000000000000 --- a/modules/cluster-logging-elasticsearch-persistent-storage-empty.adoc +++ /dev/null @@ -1,33 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-elasticsearch-storage.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-elasticsearch-persistent-storage-empty_{context}"] -= Configuring the log store for emptyDir storage - -You can use emptyDir with your log store, which creates an ephemeral -deployment in which all of a pod's data is lost upon restart. - -[NOTE] -==== -When using emptyDir, if log storage is restarted or redeployed, you will lose data. -==== - -.Prerequisites -//Find & replace the below according to SME feedback. -* The Red Hat OpenShift Logging and Elasticsearch Operators must be installed. - -.Procedure - -. Edit the `ClusterLogging` CR to specify emptyDir: -+ -[source,yaml] ----- - spec: - logStore: - type: "elasticsearch" - elasticsearch: - nodeCount: 3 - storage: {} ----- diff --git a/modules/cluster-logging-elasticsearch-retention.adoc b/modules/cluster-logging-elasticsearch-retention.adoc deleted file mode 100644 index 2673a9bfe041..000000000000 --- a/modules/cluster-logging-elasticsearch-retention.adoc +++ /dev/null @@ -1,102 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-elasticsearch.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-elasticsearch-retention_{context}"] -= Configuring log retention time - -You can configure a _retention policy_ that specifies how long the default Elasticsearch log store keeps indices for each of the three log sources: infrastructure logs, application logs, and audit logs. - -To configure the retention policy, you set a `maxAge` parameter for each log source in the `ClusterLogging` custom resource (CR). The CR applies these values to the Elasticsearch rollover schedule, which determines when Elasticsearch deletes the rolled-over indices. - -Elasticsearch rolls over an index, moving the current index and creating a new index, when an index matches any of the following conditions: - -* The index is older than the `rollover.maxAge` value in the `Elasticsearch` CR. -* The index size is greater than 40 GB × the number of primary shards. -* The index doc count is greater than 40960 KB × the number of primary shards. - -Elasticsearch deletes the rolled-over indices based on the retention policy you configure. If you do not create a retention policy for any log sources, logs are deleted after seven days by default. - -.Prerequisites -//SME Feedback Req: There are a few instances of these for prereqs. Should OpenShift Logging here be the Red Hat OpenShift Logging Operator or the logging product name? -* The {logging-title} and the OpenShift Elasticsearch Operator must be installed. - -.Procedure - -To configure the log retention time: - -. Edit the `ClusterLogging` CR to add or modify the `retentionPolicy` parameter: -+ -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: "ClusterLogging" -... -spec: - managementState: "Managed" - logStore: - type: "elasticsearch" - retentionPolicy: <1> - application: - maxAge: 1d - infra: - maxAge: 7d - audit: - maxAge: 7d - elasticsearch: - nodeCount: 3 -... ----- -<1> Specify the time that Elasticsearch should retain each log source. Enter an integer and a time designation: weeks(w), hours(h/H), minutes(m) and seconds(s). For example, `1d` for one day. Logs older than the `maxAge` are deleted. By default, logs are retained for seven days. - -. You can verify the settings in the `Elasticsearch` custom resource (CR). -+ -For example, the Red Hat OpenShift Logging Operator updated the following `Elasticsearch` CR to configure a retention policy that includes settings to roll over active indices for the infrastructure logs every eight hours and the rolled-over indices are deleted seven days after rollover. {product-title} checks every 15 minutes to determine if the indices need to be rolled over. -+ -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: "Elasticsearch" -metadata: - name: "elasticsearch" -spec: -... - indexManagement: - policies: <1> - - name: infra-policy - phases: - delete: - minAge: 7d <2> - hot: - actions: - rollover: - maxAge: 8h <3> - pollInterval: 15m <4> -... ----- -<1> For each log source, the retention policy indicates when to delete and roll over logs for that source. -<2> When {product-title} deletes the rolled-over indices. This setting is the `maxAge` you set in the `ClusterLogging` CR. -<3> The index age for {product-title} to consider when rolling over the indices. This value is determined from the `maxAge` you set in the `ClusterLogging` CR. -<4> When {product-title} checks if the indices should be rolled over. This setting is the default and cannot be changed. -+ -[NOTE] -==== -Modifying the `Elasticsearch` CR is not supported. All changes to the retention policies must be made in the `ClusterLogging` CR. -==== -+ -The OpenShift Elasticsearch Operator deploys a cron job to roll over indices for each mapping using the defined policy, scheduled using the `pollInterval`. -+ -[source,terminal] ----- -$ oc get cronjob ----- -+ -.Example output -[source,terminal] ----- -NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE -elasticsearch-im-app */15 * * * * False 0 4s -elasticsearch-im-audit */15 * * * * False 0 4s -elasticsearch-im-infra */15 * * * * False 0 4s ----- diff --git a/modules/cluster-logging-elasticsearch-rules.adoc b/modules/cluster-logging-elasticsearch-rules.adoc deleted file mode 100644 index 1e4ba49ad24e..000000000000 --- a/modules/cluster-logging-elasticsearch-rules.adoc +++ /dev/null @@ -1,64 +0,0 @@ -:_content-type: CONCEPT -[id="cluster-logging-elasticsearch-rules_{context}"] -= About Elasticsearch alerting rules - -You can view these alerting rules in Prometheus. - -.Alerting rules -[cols="3,6,1",options="header"] -|=== -|Alert -|Description -|Severity - - -|`ElasticsearchClusterNotHealthy` -|The cluster health status has been RED for at least 2 minutes. The cluster does not accept writes, shards may be missing, or the master - node hasn't been elected yet. -|Critical - -|`ElasticsearchClusterNotHealthy` -|The cluster health status has been YELLOW for at least 20 minutes. Some shard replicas are not allocated. -|Warning - -|`ElasticsearchDiskSpaceRunningLow` -|The cluster is expected to be out of disk space within the next 6 hours. -|Critical - -|`ElasticsearchHighFileDescriptorUsage` -|The cluster is predicted to be out of file descriptors within the next hour. -|Warning - -|`ElasticsearchJVMHeapUseHigh` -|The JVM Heap usage on the specified node is high. -|Alert - -|`ElasticsearchNodeDiskWatermarkReached` -|The specified node has hit the low watermark due to low free disk space. Shards can not be allocated to this node anymore. You should consider adding more disk space to the node. -|Info - -|`ElasticsearchNodeDiskWatermarkReached` -|The specified node has hit the high watermark due to low free disk space. Some shards will be re-allocated to different -nodes if possible. Make sure more disk space is added to the node or drop old indices allocated to this node. -|Warning - -|`ElasticsearchNodeDiskWatermarkReached` -|The specified node has hit the flood watermark due to low free disk space. Every index that has a shard allocated on this node is enforced a read-only block. The index block must be manually released when the disk use falls below the high watermark. -|Critical - -|`ElasticsearchJVMHeapUseHigh` -|The JVM Heap usage on the specified node is too high. -|Alert - -|`ElasticsearchWriteRequestsRejectionJumps` -|Elasticsearch is experiencing an increase in write rejections on the specified node. This node might not be keeping up with the indexing speed. -|Warning - -|`AggregatedLoggingSystemCPUHigh` -|The CPU used by the system on the specified node is too high. -|Alert - -|`ElasticsearchProcessCPUHigh` -|The CPU used by Elasticsearch on the specified node is too high. -|Alert -|=== diff --git a/modules/cluster-logging-elasticsearch-scaledown.adoc b/modules/cluster-logging-elasticsearch-scaledown.adoc deleted file mode 100644 index dea23e5cc746..000000000000 --- a/modules/cluster-logging-elasticsearch-scaledown.adoc +++ /dev/null @@ -1,15 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-log-store.adoc - -[id="cluster-logging-elasticsearch-scaledown_{context}"] -= Scaling down Elasticsearch pods - -Reducing the number of Elasticsearch pods in your cluster can result in data loss or Elasticsearch performance degradation. - -If you scale down, you should scale down by one pod at a time and allow the cluster to re-balance the shards and replicas. After the Elasticsearch health status returns to `green`, you can scale down by another pod. - -[NOTE] -==== -If your Elasticsearch cluster is set to `ZeroRedundancy`, you should not scale down your Elasticsearch pods. -==== diff --git a/modules/cluster-logging-elasticsearch-storage.adoc b/modules/cluster-logging-elasticsearch-storage.adoc deleted file mode 100644 index b850964b9441..000000000000 --- a/modules/cluster-logging-elasticsearch-storage.adoc +++ /dev/null @@ -1,49 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-elasticsearch.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-elasticsearch-storage_{context}"] -= Configuring persistent storage for the log store - -Elasticsearch requires persistent storage. The faster the storage, the faster the Elasticsearch performance. - -[WARNING] -==== -Using NFS storage as a volume or a persistent volume (or via NAS such as -Gluster) is not supported for Elasticsearch storage, as Lucene relies on file -system behavior that NFS does not supply. Data corruption and other problems can -occur. -==== - -.Prerequisites - -* The Red Hat OpenShift Logging and Elasticsearch Operators must be installed. - -.Procedure - -. Edit the `ClusterLogging` CR to specify that each data node in the cluster is bound to a Persistent Volume Claim. -+ -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: "ClusterLogging" -metadata: - name: "instance" -# ... -spec: - logStore: - type: "elasticsearch" - elasticsearch: - nodeCount: 3 - storage: - storageClassName: "gp2" - size: "200G" ----- - -This example specifies each data node in the cluster is bound to a Persistent Volume Claim that requests "200G" of AWS General Purpose SSD (gp2) storage. - -[NOTE] -==== -If you use a local volume for persistent storage, do not use a raw block volume, which is described with `volumeMode: block` in the `LocalVolume` object. Elasticsearch cannot use raw block volumes. -==== diff --git a/modules/cluster-logging-elasticsearch-tolerations.adoc b/modules/cluster-logging-elasticsearch-tolerations.adoc deleted file mode 100644 index e64a8339de5a..000000000000 --- a/modules/cluster-logging-elasticsearch-tolerations.adoc +++ /dev/null @@ -1,70 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-elasticsearch.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-elasticsearch-tolerations_{context}"] -= Using tolerations to control the log store pod placement - -You can control which nodes the log store pods runs on and prevent -other workloads from using those nodes by using tolerations on the pods. - -You apply tolerations to the log store pods through the `ClusterLogging` custom resource (CR) -and apply taints to a node through the node specification. A taint on a node is a `key:value pair` that -instructs the node to repel all pods that do not tolerate the taint. Using a specific `key:value` pair -that is not on other pods ensures only the log store pods can run on that node. - -By default, the log store pods have the following toleration: - -[source,yaml] ----- -tolerations: -- effect: "NoExecute" - key: "node.kubernetes.io/disk-pressure" - operator: "Exists" ----- - -.Prerequisites - -* The Red Hat OpenShift Logging and Elasticsearch Operators must be installed. - -.Procedure - -. Use the following command to add a taint to a node where you want to schedule the OpenShift Logging pods: -+ -[source,terminal] ----- -$ oc adm taint nodes =: ----- -+ -For example: -+ -[source,terminal] ----- -$ oc adm taint nodes node1 elasticsearch=node:NoExecute ----- -+ -This example places a taint on `node1` that has key `elasticsearch`, value `node`, and taint effect `NoExecute`. -Nodes with the `NoExecute` effect schedule only pods that match the taint and remove existing pods -that do not match. - -. Edit the `logstore` section of the `ClusterLogging` CR to configure a toleration for the Elasticsearch pods: -+ -[source,yaml] ----- - logStore: - type: "elasticsearch" - elasticsearch: - nodeCount: 1 - tolerations: - - key: "elasticsearch" <1> - operator: "Exists" <2> - effect: "NoExecute" <3> - tolerationSeconds: 6000 <4> ----- -<1> Specify the key that you added to the node. -<2> Specify the `Exists` operator to require a taint with the key `elasticsearch` to be present on the Node. -<3> Specify the `NoExecute` effect. -<4> Optionally, specify the `tolerationSeconds` parameter to set how long a pod can remain bound to a node before being evicted. - -This toleration matches the taint created by the `oc adm taint` command. A pod with this toleration could be scheduled onto `node1`. diff --git a/modules/cluster-logging-eventrouter-about.adoc b/modules/cluster-logging-eventrouter-about.adoc deleted file mode 100644 index 690662b9f531..000000000000 --- a/modules/cluster-logging-eventrouter-about.adoc +++ /dev/null @@ -1,12 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging.adoc - -:_content-type: CONCEPT -[id="cluster-logging-eventrouter-about_{context}"] -= About event routing - -The Event Router is a pod that watches {product-title} events so they can be collected by the {logging-title}. -The Event Router collects events from all projects and writes them to `STDOUT`. Fluentd collects those events and forwards them into the {product-title} Elasticsearch instance. Elasticsearch indexes the events to the `infra` index. - -You must manually deploy the Event Router. diff --git a/modules/cluster-logging-eventrouter-deploy.adoc b/modules/cluster-logging-eventrouter-deploy.adoc deleted file mode 100644 index f9392832eac4..000000000000 --- a/modules/cluster-logging-eventrouter-deploy.adoc +++ /dev/null @@ -1,190 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-eventrouter.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-eventrouter-deploy_{context}"] -= Deploying and configuring the Event Router - -Use the following steps to deploy the Event Router into your cluster. You should always deploy the Event Router to the `openshift-logging` project to ensure it collects events from across the cluster. - -The following Template object creates the service account, cluster role, and cluster role binding required for the Event Router. The template also configures and deploys the Event Router pod. You can use this template without making changes, or change the deployment object CPU and memory requests. - -.Prerequisites - -* You need proper permissions to create service accounts and update cluster role bindings. For example, you can run the following template with a user that has the *cluster-admin* role. - -* The {logging-title} must be installed. - -.Procedure - -. Create a template for the Event Router: -+ -[source,yaml] ----- -kind: Template -apiVersion: template.openshift.io/v1 -metadata: - name: eventrouter-template - annotations: - description: "A pod forwarding kubernetes events to OpenShift Logging stack." - tags: "events,EFK,logging,cluster-logging" -objects: - - kind: ServiceAccount <1> - apiVersion: v1 - metadata: - name: eventrouter - namespace: ${NAMESPACE} - - kind: ClusterRole <2> - apiVersion: rbac.authorization.k8s.io/v1 - metadata: - name: event-reader - rules: - - apiGroups: [""] - resources: ["events"] - verbs: ["get", "watch", "list"] - - kind: ClusterRoleBinding <3> - apiVersion: rbac.authorization.k8s.io/v1 - metadata: - name: event-reader-binding - subjects: - - kind: ServiceAccount - name: eventrouter - namespace: ${NAMESPACE} - roleRef: - kind: ClusterRole - name: event-reader - - kind: ConfigMap <4> - apiVersion: v1 - metadata: - name: eventrouter - namespace: ${NAMESPACE} - data: - config.json: |- - { - "sink": "stdout" - } - - kind: Deployment <5> - apiVersion: apps/v1 - metadata: - name: eventrouter - namespace: ${NAMESPACE} - labels: - component: "eventrouter" - logging-infra: "eventrouter" - provider: "openshift" - spec: - selector: - matchLabels: - component: "eventrouter" - logging-infra: "eventrouter" - provider: "openshift" - replicas: 1 - template: - metadata: - labels: - component: "eventrouter" - logging-infra: "eventrouter" - provider: "openshift" - name: eventrouter - spec: - serviceAccount: eventrouter - containers: - - name: kube-eventrouter - image: ${IMAGE} - imagePullPolicy: IfNotPresent - resources: - requests: - cpu: ${CPU} - memory: ${MEMORY} - volumeMounts: - - name: config-volume - mountPath: /etc/eventrouter - volumes: - - name: config-volume - configMap: - name: eventrouter -parameters: - - name: IMAGE <6> - displayName: Image - value: "registry.redhat.io/openshift-logging/eventrouter-rhel8:v0.4" - - name: CPU <7> - displayName: CPU - value: "100m" - - name: MEMORY <8> - displayName: Memory - value: "128Mi" - - name: NAMESPACE - displayName: Namespace - value: "openshift-logging" <9> ----- -<1> Creates a Service Account in the `openshift-logging` project for the Event Router. -<2> Creates a ClusterRole to monitor for events in the cluster. -<3> Creates a ClusterRoleBinding to bind the ClusterRole to the service account. -<4> Creates a config map in the `openshift-logging` project to generate the required `config.json` file. -<5> Creates a deployment in the `openshift-logging` project to generate and configure the Event Router pod. -<6> Specifies the image, identified by a tag such as `v0.4`. -<7> Specifies the minimum amount of CPU to allocate to the Event Router pod. Defaults to `100m`. -<8> Specifies the minimum amount of memory to allocate to the Event Router pod. Defaults to `128Mi`. -<9> Specifies the `openshift-logging` project to install objects in. - -. Use the following command to process and apply the template: -+ -[source,terminal] ----- -$ oc process -f | oc apply -n openshift-logging -f - ----- -+ -For example: -+ -[source,terminal] ----- -$ oc process -f eventrouter.yaml | oc apply -n openshift-logging -f - ----- -+ -.Example output -[source,terminal] ----- -serviceaccount/eventrouter created -clusterrole.authorization.openshift.io/event-reader created -clusterrolebinding.authorization.openshift.io/event-reader-binding created -configmap/eventrouter created -deployment.apps/eventrouter created ----- - -. Validate that the Event Router installed in the `openshift-logging` project: -+ -.. View the new Event Router pod: -+ -[source,terminal] ----- -$ oc get pods --selector component=eventrouter -o name -n openshift-logging ----- -+ -.Example output -[source,terminal] ----- -pod/cluster-logging-eventrouter-d649f97c8-qvv8r ----- - -.. View the events collected by the Event Router: -+ -[source,terminal] ----- -$ oc logs -n openshift-logging ----- -+ -For example: -+ -[source,terminal] ----- -$ oc logs cluster-logging-eventrouter-d649f97c8-qvv8r -n openshift-logging ----- -+ -.Example output -[source,terminal] ----- -{"verb":"ADDED","event":{"metadata":{"name":"openshift-service-catalog-controller-manager-remover.1632d931e88fcd8f","namespace":"openshift-service-catalog-removed","selfLink":"/api/v1/namespaces/openshift-service-catalog-removed/events/openshift-service-catalog-controller-manager-remover.1632d931e88fcd8f","uid":"787d7b26-3d2f-4017-b0b0-420db4ae62c0","resourceVersion":"21399","creationTimestamp":"2020-09-08T15:40:26Z"},"involvedObject":{"kind":"Job","namespace":"openshift-service-catalog-removed","name":"openshift-service-catalog-controller-manager-remover","uid":"fac9f479-4ad5-4a57-8adc-cb25d3d9cf8f","apiVersion":"batch/v1","resourceVersion":"21280"},"reason":"Completed","message":"Job completed","source":{"component":"job-controller"},"firstTimestamp":"2020-09-08T15:40:26Z","lastTimestamp":"2020-09-08T15:40:26Z","count":1,"type":"Normal"}} ----- -+ -You can also use Kibana to view events by creating an index pattern using the Elasticsearch `infra` index. diff --git a/modules/cluster-logging-export-fields.adoc b/modules/cluster-logging-export-fields.adoc deleted file mode 100644 index aed1b489be72..000000000000 --- a/modules/cluster-logging-export-fields.adoc +++ /dev/null @@ -1,9 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging.adoc - -:_content-type: CONCEPT -[id="cluster-logging-export-fields-about_{context}"] -= About exporting fields - -The logging system exports fields. Exported fields are present in the log records and are available for searching from Elasticsearch and Kibana. diff --git a/modules/cluster-logging-exported-fields-kubernetes.adoc b/modules/cluster-logging-exported-fields-kubernetes.adoc deleted file mode 100644 index 691a0900e551..000000000000 --- a/modules/cluster-logging-exported-fields-kubernetes.adoc +++ /dev/null @@ -1,272 +0,0 @@ -[id="cluster-logging-exported-fields-kubernetes_{context}"] - -// Normally, the following title would be an H1 prefixed with an `=`. However, because the following content is auto-generated at https://github.com/ViaQ/documentation/blob/main/src/data_model/public/kubernetes.part.adoc and pasted here, it is more efficient to use it as-is with no modifications. Therefore, to "realign" the content, I am going to prefix the title with `==` and use `include::modules/cluster-logging-exported-fields-kubernetes.adoc[leveloffset=0]` in the assembly file. - -// DO NOT MODIFY THE FOLLOWING CONTENT. Instead, update https://github.com/ViaQ/documentation/blob/main/src/data_model/model/kubernetes.yaml and run `make` as instructed here: https://github.com/ViaQ/documentation - - -== kubernetes - -The namespace for Kubernetes-specific metadata - -[horizontal] -Data type:: group - -=== kubernetes.pod_name - -The name of the pod - -[horizontal] -Data type:: keyword - - -=== kubernetes.pod_id - -The Kubernetes ID of the pod - -[horizontal] -Data type:: keyword - - -=== kubernetes.namespace_name - -The name of the namespace in Kubernetes - -[horizontal] -Data type:: keyword - - -=== kubernetes.namespace_id - -The ID of the namespace in Kubernetes - -[horizontal] -Data type:: keyword - - -=== kubernetes.host - -The Kubernetes node name - -[horizontal] -Data type:: keyword - - - -=== kubernetes.container_name - -The name of the container in Kubernetes - -[horizontal] -Data type:: keyword - - - -=== kubernetes.annotations - -Annotations associated with the Kubernetes object - -[horizontal] -Data type:: group - - -=== kubernetes.labels - -Labels present on the original Kubernetes Pod - -[horizontal] -Data type:: group - - - - - - -=== kubernetes.event - -The Kubernetes event obtained from the Kubernetes master API. This event description loosely follows `type Event` in link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#event-v1-core[Event v1 core]. - -[horizontal] -Data type:: group - -==== kubernetes.event.verb - -The type of event, `ADDED`, `MODIFIED`, or `DELETED` - -[horizontal] -Data type:: keyword -Example value:: `ADDED` - - -==== kubernetes.event.metadata - -Information related to the location and time of the event creation - -[horizontal] -Data type:: group - -===== kubernetes.event.metadata.name - -The name of the object that triggered the event creation - -[horizontal] -Data type:: keyword -Example value:: `java-mainclass-1.14d888a4cfc24890` - - -===== kubernetes.event.metadata.namespace - -The name of the namespace where the event originally occurred. Note that it differs from `kubernetes.namespace_name`, which is the namespace where the `eventrouter` application is deployed. - -[horizontal] -Data type:: keyword -Example value:: `default` - - -===== kubernetes.event.metadata.selfLink - -A link to the event - -[horizontal] -Data type:: keyword -Example value:: `/api/v1/namespaces/javaj/events/java-mainclass-1.14d888a4cfc24890` - - -===== kubernetes.event.metadata.uid - -The unique ID of the event - -[horizontal] -Data type:: keyword -Example value:: `d828ac69-7b58-11e7-9cf5-5254002f560c` - - -===== kubernetes.event.metadata.resourceVersion - -A string that identifies the server's internal version of the event. Clients can use this string to determine when objects have changed. - -[horizontal] -Data type:: integer -Example value:: `311987` - - - -==== kubernetes.event.involvedObject - -The object that the event is about. - -[horizontal] -Data type:: group - -===== kubernetes.event.involvedObject.kind - -The type of object - -[horizontal] -Data type:: keyword -Example value:: `ReplicationController` - - -===== kubernetes.event.involvedObject.namespace - -The namespace name of the involved object. Note that it may differ from `kubernetes.namespace_name`, which is the namespace where the `eventrouter` application is deployed. - -[horizontal] -Data type:: keyword -Example value:: `default` - - -===== kubernetes.event.involvedObject.name - -The name of the object that triggered the event - -[horizontal] -Data type:: keyword -Example value:: `java-mainclass-1` - - -===== kubernetes.event.involvedObject.uid - -The unique ID of the object - -[horizontal] -Data type:: keyword -Example value:: `e6bff941-76a8-11e7-8193-5254002f560c` - - -===== kubernetes.event.involvedObject.apiVersion - -The version of kubernetes master API - -[horizontal] -Data type:: keyword -Example value:: `v1` - - -===== kubernetes.event.involvedObject.resourceVersion - -A string that identifies the server's internal version of the pod that triggered the event. Clients can use this string to determine when objects have changed. - -[horizontal] -Data type:: keyword -Example value:: `308882` - - - -==== kubernetes.event.reason - -A short machine-understandable string that gives the reason for generating this event - -[horizontal] -Data type:: keyword -Example value:: `SuccessfulCreate` - - -==== kubernetes.event.source_component - -The component that reported this event - -[horizontal] -Data type:: keyword -Example value:: `replication-controller` - - -==== kubernetes.event.firstTimestamp - -The time at which the event was first recorded - -[horizontal] -Data type:: date -Example value:: `2017-08-07 10:11:57.000000000 Z` - - -==== kubernetes.event.count - -The number of times this event has occurred - -[horizontal] -Data type:: integer -Example value:: `1` - - -==== kubernetes.event.type - -The type of event, `Normal` or `Warning`. New types could be added in the future. - -[horizontal] -Data type:: keyword -Example value:: `Normal` - -== OpenShift - -The namespace for openshift-logging specific metadata - -[horizontal] -Data type:: group - -=== openshift.labels - -Labels added by the Cluster Log Forwarder configuration - -[horizontal] -Data type:: group diff --git a/modules/cluster-logging-exported-fields-top-level-fields.adoc b/modules/cluster-logging-exported-fields-top-level-fields.adoc deleted file mode 100644 index 882c12ec5bfd..000000000000 --- a/modules/cluster-logging-exported-fields-top-level-fields.adoc +++ /dev/null @@ -1,116 +0,0 @@ -[id="cluster-logging-exported-fields-top-level-fields_{context}"] - -// Normally, the following title would be an H1 prefixed with an `=`. However, because the following content is auto-generated at https://github.com/ViaQ/documentation/blob/main/src/data_model/public/top-level.part.adoc and pasted here, it is more efficient to use it as-is with no modifications. Therefore, to "realign" the content, I am going to prefix the title with `==` and use `include::modules/cluster-logging-exported-fields-top-level-fields.adoc[leveloffset=0]` in the assembly file. - -// DO NOT MODIFY THE FOLLOWING CONTENT. Instead, update https://github.com/ViaQ/documentation/blob/main/src/data_model/model/top-level.yaml and run `make` as instructed here: https://github.com/ViaQ/documentation - -//The top-level fields can be present in every record. The descriptions for fields that are optional begin with "Optional:" - - -The top level fields may be present in every record. - -== message - -The original log entry text, UTF-8 encoded. This field may be absent or empty if a non-empty `structured` field is present. See the description of `structured` for more. - -[horizontal] -Data type:: text -Example value:: `HAPPY` - -== structured - -Original log entry as a structured object. This field may be present if the forwarder was configured to parse structured JSON logs. If the original log entry was a valid structured log, this field will contain an equivalent JSON structure. Otherwise this field will be empty or absent, and the `message` field will contain the original log message. The `structured` field can have any subfields that are included in the log message, there are no restrictions defined here. - -[horizontal] -Data type:: group -Example value:: map[message:starting fluentd worker pid=21631 ppid=21618 worker=0 pid:21631 ppid:21618 worker:0] - -== @timestamp - -A UTC value that marks when the log payload was created or, if the creation time is not known, when the log payload was first collected. The “@” prefix denotes a field that is reserved for a particular use. By default, most tools look for “@timestamp” with ElasticSearch. - -[horizontal] -Data type:: date -Example value:: `2015-01-24 14:06:05.071000000 Z` - -== hostname - -The name of the host where this log message originated. In a Kubernetes cluster, this is the same as `kubernetes.host`. - -[horizontal] -Data type:: keyword - -== ipaddr4 - -The IPv4 address of the source server. Can be an array. - -[horizontal] -Data type:: ip - -== ipaddr6 - -The IPv6 address of the source server, if available. Can be an array. - -[horizontal] -Data type:: ip - -== level - -The logging level from various sources, including `rsyslog(severitytext property)`, a Python logging module, and others. - -The following values come from link:http://sourceware.org/git/?p=glibc.git;a=blob;f=misc/sys/syslog.h;h=ee01478c4b19a954426a96448577c5a76e6647c0;hb=HEAD#l74[`syslog.h`], and are preceded by their http://sourceware.org/git/?p=glibc.git;a=blob;f=misc/sys/syslog.h;h=ee01478c4b19a954426a96448577c5a76e6647c0;hb=HEAD#l51[numeric equivalents]: - -* `0` = `emerg`, system is unusable. -* `1` = `alert`, action must be taken immediately. -* `2` = `crit`, critical conditions. -* `3` = `err`, error conditions. -* `4` = `warn`, warning conditions. -* `5` = `notice`, normal but significant condition. -* `6` = `info`, informational. -* `7` = `debug`, debug-level messages. - -The two following values are not part of `syslog.h` but are widely used: - -* `8` = `trace`, trace-level messages, which are more verbose than `debug` messages. -* `9` = `unknown`, when the logging system gets a value it doesn't recognize. - -Map the log levels or priorities of other logging systems to their nearest match in the preceding list. For example, from link:https://docs.python.org/2.7/library/logging.html#logging-levels[python logging], you can match `CRITICAL` with `crit`, `ERROR` with `err`, and so on. - -[horizontal] -Data type:: keyword -Example value:: `info` - -== pid - -The process ID of the logging entity, if available. - -[horizontal] -Data type:: keyword - -== service - -The name of the service associated with the logging entity, if available. For example, syslog's `APP-NAME` and rsyslog's `programname` properties are mapped to the service field. - -[horizontal] -Data type:: keyword - -== tags - -Optional. An operator-defined list of tags placed on each log by the collector or normalizer. The payload can be a string with whitespace-delimited string tokens or a JSON list of string tokens. - -[horizontal] -Data type:: text - -== file - -The path to the log file from which the collector reads this log entry. Normally, this is a path in the `/var/log` file system of a cluster node. - -[horizontal] -Data type:: text - -== offset - -The offset value. Can represent bytes to the start of the log line in the file (zero- or one-based), or log line numbers (zero- or one-based), so long as the values are strictly monotonically increasing in the context of a single log file. The values are allowed to wrap, representing a new version of the log file (rotation). - -[horizontal] -Data type:: long diff --git a/modules/cluster-logging-feature-reference.adoc b/modules/cluster-logging-feature-reference.adoc deleted file mode 100644 index cce9af7d6638..000000000000 --- a/modules/cluster-logging-feature-reference.adoc +++ /dev/null @@ -1,170 +0,0 @@ -// Module is included in the following assemblies: -//cluster-logging-loki.adoc -:_content-type: REFERENCE -[id="cluster-logging-about-vector_{context}"] -= About Vector -Vector is a log collector offered as an alternative to Fluentd for the {logging}. - -The following outputs are supported: - -* `elasticsearch`. An external Elasticsearch instance. The `elasticsearch` output can use a TLS connection. - -* `kafka`. A Kafka broker. The `kafka` output can use an unsecured or TLS connection. - -* `loki`. Loki, a horizontally scalable, highly available, multitenant log aggregation system. - - -[id="cluster-logging-vector-enable_{context}"] -== Enabling Vector -Vector is not enabled by default. Use the following steps to enable Vector on your {product-title} cluster. - -[IMPORTANT] -==== -Vector does not support FIPS Enabled Clusters. -==== - -.Prerequisites - -* {product-title}: 4.13 -* {logging-title-uc}: 5.4 -* FIPS disabled - -.Procedure - -. Edit the `ClusterLogging` custom resource (CR) in the `openshift-logging` project: -+ -[source,terminal] ----- -$ oc -n openshift-logging edit ClusterLogging instance ----- - -. Add a `logging.openshift.io/preview-vector-collector: enabled` annotation to the `ClusterLogging` custom resource (CR). - -. Add `vector` as a collection type to the `ClusterLogging` custom resource (CR). - -[source,yaml] ----- - apiVersion: "logging.openshift.io/v1" - kind: "ClusterLogging" - metadata: - name: "instance" - namespace: "openshift-logging" - annotations: - logging.openshift.io/preview-vector-collector: enabled - spec: - collection: - logs: - type: "vector" - vector: {} ----- - -[role="_additional-resources"] -.Additional resources -* link:https://vector.dev/docs/about/what-is-vector/[Vector Documentation] - -== Collector features - -.Log Sources -[options="header"] -|=============================================================== -| Feature | Fluentd | Vector -| App container logs | ✓ | ✓ -| App-specific routing | ✓ | ✓ -| App-specific routing by namespace | ✓ | ✓ -| Infra container logs | ✓ | ✓ -| Infra journal logs | ✓ | ✓ -| Kube API audit logs | ✓ | ✓ -| OpenShift API audit logs | ✓ | ✓ -| Open Virtual Network (OVN) audit logs| ✓ | ✓ -|=============================================================== - -.Outputs -[options="header"] -|========================================================== -| Feature | Fluentd | Vector -| Elasticsearch v5-v7 | ✓ | ✓ -| Fluent forward | ✓ | -| Syslog RFC3164 | ✓ | ✓ (Logging 5.7+) -| Syslog RFC5424 | ✓ | ✓ (Logging 5.7+) -| Kafka | ✓ | ✓ -| Cloudwatch | ✓ | ✓ -| Loki | ✓ | ✓ -| HTTP | ✓ | ✓ (Logging 5.7+) -|========================================================== - -.Authorization and Authentication -[options="header"] -|================================================================= -| Feature | Fluentd | Vector -| Elasticsearch certificates | ✓ | ✓ -| Elasticsearch username / password | ✓ | ✓ -| Cloudwatch keys | ✓ | ✓ -| Cloudwatch STS | ✓ | ✓ -| Kafka certificates | ✓ | ✓ -| Kafka username / password | ✓ | ✓ -| Kafka SASL | ✓ | ✓ -| Loki bearer token | ✓ | ✓ -|================================================================= - -.Normalizations and Transformations -[options="header"] -|============================================================================ -| Feature | Fluentd | Vector -| Viaq data model - app | ✓ | ✓ -| Viaq data model - infra | ✓ | ✓ -| Viaq data model - infra(journal) | ✓ | ✓ -| Viaq data model - Linux audit | ✓ | ✓ -| Viaq data model - kube-apiserver audit | ✓ | ✓ -| Viaq data model - OpenShift API audit | ✓ | ✓ -| Viaq data model - OVN | ✓ | ✓ -| Loglevel Normalization | ✓ | ✓ -| JSON parsing | ✓ | ✓ -| Structured Index | ✓ | ✓ -| Multiline error detection | ✓ | ✓ -| Multicontainer / split indices | ✓ | ✓ -| Flatten labels | ✓ | ✓ -| CLF static labels | ✓ | ✓ -|============================================================================ - -.Tuning -[options="header"] -|========================================================== -| Feature | Fluentd | Vector -| Fluentd readlinelimit | ✓ | -| Fluentd buffer | ✓ | -| - chunklimitsize | ✓ | -| - totallimitsize | ✓ | -| - overflowaction | ✓ | -| - flushthreadcount | ✓ | -| - flushmode | ✓ | -| - flushinterval | ✓ | -| - retrywait | ✓ | -| - retrytype | ✓ | -| - retrymaxinterval | ✓ | -| - retrytimeout | ✓ | -|========================================================== - -.Visibility -[options="header"] -|===================================================== -| Feature | Fluentd | Vector -| Metrics | ✓ | ✓ -| Dashboard | ✓ | ✓ -| Alerts | ✓ | -|===================================================== - -.Miscellaneous -[options="header"] -|=========================================================== -| Feature | Fluentd | Vector -| Global proxy support | ✓ | ✓ -| x86 support | ✓ | ✓ -| ARM support | ✓ | ✓ -ifndef::openshift-rosa[] -| {ibmpowerProductName} support | ✓ | ✓ -| {ibmzProductName} support | ✓ | ✓ -endif::openshift-rosa[] -| IPv6 support | ✓ | ✓ -| Log event buffering | ✓ | -| Disconnected Cluster | ✓ | ✓ -|=========================================================== diff --git a/modules/cluster-logging-forwarding-about.adoc b/modules/cluster-logging-forwarding-about.adoc deleted file mode 100644 index efe67f586b8f..000000000000 --- a/modules/cluster-logging-forwarding-about.adoc +++ /dev/null @@ -1,9 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging.adoc - -:_content-type: CONCEPT -[id="cluster-logging-forwarding-about_{context}"] -= About log forwarding - -By default, the {logging-title} sends logs to the default internal Elasticsearch log store, defined in the `ClusterLogging` custom resource (CR). If you want to forward logs to other log aggregators, you can use the log forwarding features to send logs to specific endpoints within or outside your cluster. diff --git a/modules/cluster-logging-forwarding-json-logs-to-the-default-elasticsearch.adoc b/modules/cluster-logging-forwarding-json-logs-to-the-default-elasticsearch.adoc deleted file mode 100644 index 6469db82413d..000000000000 --- a/modules/cluster-logging-forwarding-json-logs-to-the-default-elasticsearch.adoc +++ /dev/null @@ -1,56 +0,0 @@ -:_content-type: PROCEDURE -[id="cluster-logging-forwarding-json-logs-to-the-default-elasticsearch_{context}"] -= Forwarding JSON logs to the Elasticsearch log store - -For an Elasticsearch log store, if your JSON log entries _follow different schemas_, configure the `ClusterLogForwarder` custom resource (CR) to group each JSON schema into a single output definition. This way, Elasticsearch uses a separate index for each schema. - -[IMPORTANT] -==== -Because forwarding different schemas to the same index can cause type conflicts and cardinality problems, you must perform this configuration before you forward data to the Elasticsearch store. - -To avoid performance issues associated with having too many indices, consider keeping the number of possible schemas low by standardizing to common schemas. -==== - -.Procedure - -. Add the following snippet to your `ClusterLogForwarder` CR YAML file. -+ -[source,yaml] ----- -outputDefaults: - elasticsearch: - structuredTypeKey: - structuredTypeName: -pipelines: -- inputRefs: - - application - outputRefs: default - parse: json ----- - -. Optional: Use `structuredTypeKey` to specify one of the log record fields, as described in the preceding topic, xref:../logging/cluster-logging-enabling-json-logging.adoc#cluster-logging-configuration-of-json-log-data-for-default-elasticsearch_cluster-logging-enabling-json-logging[Configuring JSON log data for Elasticsearch]. Otherwise, remove this line. - -. Optional: Use `structuredTypeName` to specify a ``, as described in the preceding topic, xref:../logging/cluster-logging-enabling-json-logging.adoc#cluster-logging-configuration-of-json-log-data-for-default-elasticsearch_cluster-logging-enabling-json-logging[Configuring JSON log data for Elasticsearch]. Otherwise, remove this line. -+ -[IMPORTANT] -==== -To parse JSON logs, you must set either `structuredTypeKey` or `structuredTypeName`, or both `structuredTypeKey` and `structuredTypeName`. -==== -+ -. For `inputRefs`, specify which log types to forward by using that pipeline, such as `application,` `infrastructure`, or `audit`. - -. Add the `parse: json` element to pipelines. - -. Create the CR object: -+ -[source,terminal] ----- -$ oc create -f .yaml ----- -+ -The Red Hat OpenShift Logging Operator redeploys the Fluentd pods. However, if they do not redeploy, delete the Fluentd pods to force them to redeploy. -+ -[source,terminal] ----- -$ oc delete pod --selector logging-infra=collector ----- diff --git a/modules/cluster-logging-forwarding-lokistack.adoc b/modules/cluster-logging-forwarding-lokistack.adoc deleted file mode 100644 index 1d9455537eac..000000000000 --- a/modules/cluster-logging-forwarding-lokistack.adoc +++ /dev/null @@ -1,33 +0,0 @@ -// Module is included in the following assemblies: -//cluster-logging-loki.adoc -:_content-type: PROCEDURE -[id="cluster-logging-forwarding-lokistack_{context}"] -= Forwarding logs to LokiStack - -To configure log forwarding to the LokiStack gateway, you must create a ClusterLogging custom resource (CR). - -.Prerequisites - -* {logging-title-uc}: 5.5 and later -* `Loki Operator` Operator - -.Procedure - -. Create or edit a YAML file that defines the `ClusterLogging` custom resource (CR): - -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogging -metadata: - name: instance - namespace: openshift-logging -spec: - managementState: Managed - logStore: - type: lokistack - lokistack: - name: logging-loki - collection: - type: vector ----- diff --git a/modules/cluster-logging-forwarding-separate-indices.adoc b/modules/cluster-logging-forwarding-separate-indices.adoc deleted file mode 100644 index f4fae73a0eca..000000000000 --- a/modules/cluster-logging-forwarding-separate-indices.adoc +++ /dev/null @@ -1,68 +0,0 @@ -// Module is included in the following assemblies: -//cluster-logging-external -:_content-type: PROCEDURE -[id="cluster-logging-forwarding-separate-indices_{context}"] -= Forwarding JSON logs from containers in the same pod to separate indices - -You can forward structured logs from different containers within the same pod to different indices. To use this feature, you must configure the pipeline with multi-container support and annotate the pods. Logs are written to indices with a prefix of `app-`. It is recommended that Elasticsearch be configured with aliases to accommodate this. - -[IMPORTANT] -==== -JSON formatting of logs varies by application. Because creating too many indices impacts performance, limit your use of this feature to creating indices for logs that have incompatible JSON formats. Use queries to separate logs from different namespaces, or applications with compatible JSON formats. -==== - -.Prerequisites - -* {logging-title-uc}: 5.5 - -.Procedure -. Create or edit a YAML file that defines the `ClusterLogForwarder` CR object: -+ -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: ClusterLogForwarder -metadata: - name: instance - namespace: openshift-logging -spec: - outputDefaults: - elasticsearch: - enableStructuredContainerLogs: true <1> - pipelines: - - inputRefs: - - application - name: application-logs - outputRefs: - - default - parse: json ----- -<1> Enables multi-container outputs. - -. Create or edit a YAML file that defines the `Pod` CR object: -+ -[source,yaml] ----- - apiVersion: v1 - kind: Pod - metadata: - annotations: - containerType.logging.openshift.io/heavy: heavy <1> - containerType.logging.openshift.io/low: low - spec: - containers: - - name: heavy <2> - image: heavyimage - - name: low - image: lowimage ----- -<1> Format: `containerType.logging.openshift.io/: ` -<2> Annotation names must match container names - -[WARNING] -==== -This configuration might significantly increase the number of shards on the cluster. -==== - -.Additional Resources -* link:https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/[Kubernetes Annotations] diff --git a/modules/cluster-logging-json-log-forwarding.adoc b/modules/cluster-logging-json-log-forwarding.adoc deleted file mode 100644 index a080fb425f25..000000000000 --- a/modules/cluster-logging-json-log-forwarding.adoc +++ /dev/null @@ -1,48 +0,0 @@ -[id="cluster-logging-json-log-forwarding_{context}"] -= Parsing JSON logs - -Logs including JSON logs are usually represented as a string inside the `message` field. That makes it hard for users to query specific fields inside a JSON document. OpenShift Logging's Log Forwarding API enables you to parse JSON logs into a structured object and forward them to either OpenShift Logging-managed Elasticsearch or any other third-party system supported by the Log Forwarding API. - -To illustrate how this works, suppose that you have the following structured JSON log entry. - -.Example structured JSON log entry -[source,yaml] ----- -{"level":"info","name":"fred","home":"bedrock"} ----- - -Normally, the `ClusterLogForwarder` custom resource (CR) forwards that log entry in the `message` field. The `message` field contains the JSON-quoted string equivalent of the JSON log entry, as shown in the following example. - -.Example `message` field -[source,yaml] ----- -{"message":"{\"level\":\"info\",\"name\":\"fred\",\"home\":\"bedrock\"", - "more fields..."} ----- - -To enable parsing JSON log, you add `parse: json` to a pipeline in the `ClusterLogForwarder` CR, as shown in the following example. - -.Example snippet showing `parse: json` -[source,yaml] ----- -pipelines: -- inputRefs: [ application ] - outputRefs: myFluentd - parse: json ----- - -When you enable parsing JSON logs by using `parse: json`, the CR copies the JSON-structured log entry in a `structured` field, as shown in the following example. This does not modify the original `message` field. - -.Example `structured` output containing the structured JSON log entry -[source,yaml] ----- -{"structured": { "level": "info", "name": "fred", "home": "bedrock" }, - "more fields..."} ----- - -[IMPORTANT] -==== -If the log entry does not contain valid structured JSON, the `structured` field will be absent. -==== - -To enable parsing JSON logs for specific logging platforms, see xref:../logging/cluster-logging-external.adoc#cluster-logging-external[Forwarding logs to third-party systems]. diff --git a/modules/cluster-logging-json-logging-about.adoc b/modules/cluster-logging-json-logging-about.adoc deleted file mode 100644 index db6c394bdc8a..000000000000 --- a/modules/cluster-logging-json-logging-about.adoc +++ /dev/null @@ -1,13 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging.adoc - -:_content-type: CONCEPT -[id="cluster-logging-json-logging-about_{context}"] -= About JSON {product-title} Logging - -You can use JSON logging to configure the Log Forwarding API to parse JSON strings into a structured object. You can perform the following tasks: - -* Parse JSON logs -* Configure JSON log data for Elasticsearch -* Forward JSON logs to the Elasticsearch log store diff --git a/modules/cluster-logging-kibana-limits.adoc b/modules/cluster-logging-kibana-limits.adoc deleted file mode 100644 index aac3c3a66374..000000000000 --- a/modules/cluster-logging-kibana-limits.adoc +++ /dev/null @@ -1,48 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-visualizer.adoc - -[id="cluster-logging-kibana-limits_{context}"] -= Configure the CPU and memory limits for the log visualizer - -You can adjust both the CPU and memory limits for the pod that hosts the log visualizer. - -.Procedure - -. Edit the `ClusterLogging` custom resource (CR) in the `openshift-logging` project: -+ -[source,terminal] ----- -$ oc edit ClusterLogging instance ----- -+ -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: "ClusterLogging" -metadata: - name: "instance" - -.... - -spec: - visualization: - type: "kibana" - kibana: - replicas: - resources: <1> - limits: - memory: 1Gi - requests: - cpu: 500m - memory: 1Gi - proxy: <2> - resources: - limits: - memory: 100Mi - requests: - cpu: 100m - memory: 100Mi ----- -<1> Specify the CPU and memory limits to allocate for each node. -<2> Specify the CPU and memory limits to allocate to the Kibana proxy. diff --git a/modules/cluster-logging-kibana-scaling.adoc b/modules/cluster-logging-kibana-scaling.adoc deleted file mode 100644 index 9f56f0f93f23..000000000000 --- a/modules/cluster-logging-kibana-scaling.adoc +++ /dev/null @@ -1,35 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-visualizer.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-kibana-scaling_{context}"] -= Scaling redundancy for the log visualizer nodes - -You can scale the pod that hosts the log visualizer for redundancy. - -.Procedure - -. Edit the `ClusterLogging` custom resource (CR) in the `openshift-logging` project: -+ -[source,terminal] ----- -$ oc edit ClusterLogging instance ----- -+ -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: "ClusterLogging" -metadata: - name: "instance" - -.... - -spec: - visualization: - type: "kibana" - kibana: - replicas: 1 <1> ----- -<1> Specify the number of Kibana nodes. diff --git a/modules/cluster-logging-kibana-tolerations.adoc b/modules/cluster-logging-kibana-tolerations.adoc deleted file mode 100644 index f74d68a9061b..000000000000 --- a/modules/cluster-logging-kibana-tolerations.adoc +++ /dev/null @@ -1,60 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-visualizer.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-kibana-tolerations_{context}"] -= Using tolerations to control the log visualizer pod placement - -You can control the node where the log visualizer pod runs and prevent -other workloads from using those nodes by using tolerations on the pods. - -You apply tolerations to the log visualizer pod through the `ClusterLogging` custom resource (CR) -and apply taints to a node through the node specification. A taint on a node is a `key:value pair` that -instructs the node to repel all pods that do not tolerate the taint. Using a specific `key:value` pair -that is not on other pods ensures only the Kibana pod can run on that node. - -.Prerequisites - -* The Red Hat OpenShift Logging and Elasticsearch Operators must be installed. - -.Procedure - -. Use the following command to add a taint to a node where you want to schedule the log visualizer pod: -+ -[source,terminal] ----- -$ oc adm taint nodes =: ----- -+ -For example: -+ -[source,terminal] ----- -$ oc adm taint nodes node1 kibana=node:NoExecute ----- -+ -This example places a taint on `node1` that has key `kibana`, value `node`, and taint effect `NoExecute`. -You must use the `NoExecute` taint effect. `NoExecute` schedules only pods that match the taint and remove existing pods -that do not match. - -. Edit the `visualization` section of the `ClusterLogging` CR to configure a toleration for the Kibana pod: -+ -[source,yaml] ----- - visualization: - type: "kibana" - kibana: - tolerations: - - key: "kibana" <1> - operator: "Exists" <2> - effect: "NoExecute" <3> - tolerationSeconds: 6000 <4> ----- -<1> Specify the key that you added to the node. -<2> Specify the `Exists` operator to require the `key`/`value`/`effect` parameters to match. -<3> Specify the `NoExecute` effect. -<4> Optionally, specify the `tolerationSeconds` parameter to set how long a pod can remain bound to a node before being evicted. - - -This toleration matches the taint created by the `oc adm taint` command. A pod with this toleration would be able to schedule onto `node1`. diff --git a/modules/cluster-logging-log-store-status-comp.adoc b/modules/cluster-logging-log-store-status-comp.adoc deleted file mode 100644 index f9f96f74f824..000000000000 --- a/modules/cluster-logging-log-store-status-comp.adoc +++ /dev/null @@ -1,203 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-elasticsearch.adoc - -[id="cluster-logging-elasticsearch-status-comp_{context}"] -= Viewing the status of the log store components - -You can view the status for a number of the log store components. - -Elasticsearch indices:: -You can view the status of the Elasticsearch indices. - -. Get the name of an Elasticsearch pod: -+ -[source,terminal] ----- -$ oc get pods --selector component=elasticsearch -o name ----- -+ -.Example output -[source,terminal] ----- -pod/elasticsearch-cdm-1godmszn-1-6f8495-vp4lw -pod/elasticsearch-cdm-1godmszn-2-5769cf-9ms2n -pod/elasticsearch-cdm-1godmszn-3-f66f7d-zqkz7 ----- - -. Get the status of the indices: -+ -[source,terminal] ----- -$ oc exec elasticsearch-cdm-4vjor49p-2-6d4d7db474-q2w7z -- indices ----- -+ -.Example output -[source,terminal] ----- -Defaulting container name to elasticsearch. -Use 'oc describe pod/elasticsearch-cdm-4vjor49p-2-6d4d7db474-q2w7z -n openshift-logging' to see all of the containers in this pod. - -green open infra-000002 S4QANnf1QP6NgCegfnrnbQ 3 1 119926 0 157 78 -green open audit-000001 8_EQx77iQCSTzFOXtxRqFw 3 1 0 0 0 0 -green open .security iDjscH7aSUGhIdq0LheLBQ 1 1 5 0 0 0 -green open .kibana_-377444158_kubeadmin yBywZ9GfSrKebz5gWBZbjw 3 1 1 0 0 0 -green open infra-000001 z6Dpe__ORgiopEpW6Yl44A 3 1 871000 0 874 436 -green open app-000001 hIrazQCeSISewG3c2VIvsQ 3 1 2453 0 3 1 -green open .kibana_1 JCitcBMSQxKOvIq6iQW6wg 1 1 0 0 0 0 -green open .kibana_-1595131456_user1 gIYFIEGRRe-ka0W3okS-mQ 3 1 1 0 0 0 ----- - - -Log store pods:: -You can view the status of the pods that host the log store. - -. Get the name of a pod: -+ -[source,terminal] ----- -$ oc get pods --selector component=elasticsearch -o name ----- -+ -.Example output -[source,terminal] ----- -pod/elasticsearch-cdm-1godmszn-1-6f8495-vp4lw -pod/elasticsearch-cdm-1godmszn-2-5769cf-9ms2n -pod/elasticsearch-cdm-1godmszn-3-f66f7d-zqkz7 ----- - -. Get the status of a pod: -+ -[source,terminal] ----- -$ oc describe pod elasticsearch-cdm-1godmszn-1-6f8495-vp4lw ----- -+ -The output includes the following status information: -+ -.Example output -[source,terminal] ----- -.... -Status: Running - -.... - -Containers: - elasticsearch: - Container ID: cri-o://b7d44e0a9ea486e27f47763f5bb4c39dfd2 - State: Running - Started: Mon, 08 Jun 2020 10:17:56 -0400 - Ready: True - Restart Count: 0 - Readiness: exec [/usr/share/elasticsearch/probe/readiness.sh] delay=10s timeout=30s period=5s #success=1 #failure=3 - -.... - - proxy: - Container ID: cri-o://3f77032abaddbb1652c116278652908dc01860320b8a4e741d06894b2f8f9aa1 - State: Running - Started: Mon, 08 Jun 2020 10:18:38 -0400 - Ready: True - Restart Count: 0 - -.... - -Conditions: - Type Status - Initialized True - Ready True - ContainersReady True - PodScheduled True - -.... - -Events: ----- - -Log storage pod deployment configuration:: -You can view the status of the log store deployment configuration. - -. Get the name of a deployment configuration: -+ -[source,terminal] ----- -$ oc get deployment --selector component=elasticsearch -o name ----- -+ -.Example output -[source,terminal] ----- -deployment.extensions/elasticsearch-cdm-1gon-1 -deployment.extensions/elasticsearch-cdm-1gon-2 -deployment.extensions/elasticsearch-cdm-1gon-3 ----- - -. Get the deployment configuration status: -+ -[source,terminal] ----- -$ oc describe deployment elasticsearch-cdm-1gon-1 ----- -+ -The output includes the following status information: -+ -.Example output -[source,terminal] ----- -.... - Containers: - elasticsearch: - Image: registry.redhat.io/openshift-logging/elasticsearch6-rhel8 - Readiness: exec [/usr/share/elasticsearch/probe/readiness.sh] delay=10s timeout=30s period=5s #success=1 #failure=3 - -.... - -Conditions: - Type Status Reason - ---- ------ ------ - Progressing Unknown DeploymentPaused - Available True MinimumReplicasAvailable - -.... - -Events: ----- - -Log store replica set:: -You can view the status of the log store replica set. - -. Get the name of a replica set: -+ -[source,terminal] ----- -$ oc get replicaSet --selector component=elasticsearch -o name - -replicaset.extensions/elasticsearch-cdm-1gon-1-6f8495 -replicaset.extensions/elasticsearch-cdm-1gon-2-5769cf -replicaset.extensions/elasticsearch-cdm-1gon-3-f66f7d ----- - -. Get the status of the replica set: -+ -[source,terminal] ----- -$ oc describe replicaSet elasticsearch-cdm-1gon-1-6f8495 ----- -+ -The output includes the following status information: -+ -.Example output -[source,terminal] ----- -.... - Containers: - elasticsearch: - Image: registry.redhat.io/openshift-logging/elasticsearch6-rhel8@sha256:4265742c7cdd85359140e2d7d703e4311b6497eec7676957f455d6908e7b1c25 - Readiness: exec [/usr/share/elasticsearch/probe/readiness.sh] delay=10s timeout=30s period=5s #success=1 #failure=3 - -.... - -Events: ----- diff --git a/modules/cluster-logging-log-store-status-viewing.adoc b/modules/cluster-logging-log-store-status-viewing.adoc deleted file mode 100644 index bcc1ac8a83f6..000000000000 --- a/modules/cluster-logging-log-store-status-viewing.adoc +++ /dev/null @@ -1,247 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-log-store.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-log-store-comp-viewing_{context}"] -= Viewing the status of the log store - -You can view the status of your log store. - -.Prerequisites - -* The Red Hat OpenShift Logging and Elasticsearch Operators must be installed. - -.Procedure - -. Change to the `openshift-logging` project. -+ -[source,terminal] ----- -$ oc project openshift-logging ----- - -. To view the status: - -.. Get the name of the log store instance: -+ -[source,terminal] ----- -$ oc get Elasticsearch ----- -+ -.Example output -[source,terminal] ----- -NAME AGE -elasticsearch 5h9m ----- - -.. Get the log store status: -+ -[source,terminal] ----- -$ oc get Elasticsearch -o yaml ----- -+ -For example: -+ -[source,terminal] ----- -$ oc get Elasticsearch elasticsearch -n openshift-logging -o yaml ----- -+ -The output includes information similar to the following: -+ -.Example output -[source,terminal] ----- -status: <1> - cluster: <2> - activePrimaryShards: 30 - activeShards: 60 - initializingShards: 0 - numDataNodes: 3 - numNodes: 3 - pendingTasks: 0 - relocatingShards: 0 - status: green - unassignedShards: 0 - clusterHealth: "" - conditions: [] <3> - nodes: <4> - - deploymentName: elasticsearch-cdm-zjf34ved-1 - upgradeStatus: {} - - deploymentName: elasticsearch-cdm-zjf34ved-2 - upgradeStatus: {} - - deploymentName: elasticsearch-cdm-zjf34ved-3 - upgradeStatus: {} - pods: <5> - client: - failed: [] - notReady: [] - ready: - - elasticsearch-cdm-zjf34ved-1-6d7fbf844f-sn422 - - elasticsearch-cdm-zjf34ved-2-dfbd988bc-qkzjz - - elasticsearch-cdm-zjf34ved-3-c8f566f7c-t7zkt - data: - failed: [] - notReady: [] - ready: - - elasticsearch-cdm-zjf34ved-1-6d7fbf844f-sn422 - - elasticsearch-cdm-zjf34ved-2-dfbd988bc-qkzjz - - elasticsearch-cdm-zjf34ved-3-c8f566f7c-t7zkt - master: - failed: [] - notReady: [] - ready: - - elasticsearch-cdm-zjf34ved-1-6d7fbf844f-sn422 - - elasticsearch-cdm-zjf34ved-2-dfbd988bc-qkzjz - - elasticsearch-cdm-zjf34ved-3-c8f566f7c-t7zkt - shardAllocationEnabled: all ----- -<1> In the output, the cluster status fields appear in the `status` stanza. -<2> The status of the log store: -+ -* The number of active primary shards. -* The number of active shards. -* The number of shards that are initializing. -* The number of log store data nodes. -* The total number of log store nodes. -* The number of pending tasks. -* The log store status: `green`, `red`, `yellow`. -* The number of unassigned shards. -<3> Any status conditions, if present. The log store status indicates the reasons from the scheduler if a pod could not be placed. Any events related to the following conditions are shown: -* Container Waiting for both the log store and proxy containers. -* Container Terminated for both the log store and proxy containers. -* Pod unschedulable. -Also, a condition is shown for a number of issues; see *Example condition messages*. -<4> The log store nodes in the cluster, with `upgradeStatus`. -<5> The log store client, data, and master pods in the cluster, listed under 'failed`, `notReady`, or `ready` state. - -[id="cluster-logging-elasticsearch-status-message_{context}"] -== Example condition messages - -The following are examples of some condition messages from the `Status` section of the Elasticsearch instance. - -// https://github.com/openshift/elasticsearch-operator/pull/92 - -The following status message indicates that a node has exceeded the configured low watermark, and no shard will be allocated to this node. - -[source,yaml] ----- -status: - nodes: - - conditions: - - lastTransitionTime: 2019-03-15T15:57:22Z - message: Disk storage usage for node is 27.5gb (36.74%). Shards will be not - be allocated on this node. - reason: Disk Watermark Low - status: "True" - type: NodeStorage - deploymentName: example-elasticsearch-cdm-0-1 - upgradeStatus: {} ----- - -The following status message indicates that a node has exceeded the configured high watermark, and shards will be relocated to other nodes. - -[source,yaml] ----- -status: - nodes: - - conditions: - - lastTransitionTime: 2019-03-15T16:04:45Z - message: Disk storage usage for node is 27.5gb (36.74%). Shards will be relocated - from this node. - reason: Disk Watermark High - status: "True" - type: NodeStorage - deploymentName: example-elasticsearch-cdm-0-1 - upgradeStatus: {} ----- - -The following status message indicates that the log store node selector in the CR does not match any nodes in the cluster: - -[source,yaml] ----- -status: - nodes: - - conditions: - - lastTransitionTime: 2019-04-10T02:26:24Z - message: '0/8 nodes are available: 8 node(s) didn''t match node selector.' - reason: Unschedulable - status: "True" - type: Unschedulable ----- - -The following status message indicates that the log store CR uses a non-existent persistent volume claim (PVC). - -[source,yaml] ----- -status: - nodes: - - conditions: - - last Transition Time: 2019-04-10T05:55:51Z - message: pod has unbound immediate PersistentVolumeClaims (repeated 5 times) - reason: Unschedulable - status: True - type: Unschedulable ----- - -The following status message indicates that your log store cluster does not have enough nodes to support the redundancy policy. - -[source,yaml] ----- -status: - clusterHealth: "" - conditions: - - lastTransitionTime: 2019-04-17T20:01:31Z - message: Wrong RedundancyPolicy selected. Choose different RedundancyPolicy or - add more nodes with data roles - reason: Invalid Settings - status: "True" - type: InvalidRedundancy ----- - -This status message indicates your cluster has too many control plane nodes: - -[source,yaml] ----- -status: - clusterHealth: green - conditions: - - lastTransitionTime: '2019-04-17T20:12:34Z' - message: >- - Invalid master nodes count. Please ensure there are no more than 3 total - nodes with master roles - reason: Invalid Settings - status: 'True' - type: InvalidMasters ----- - - -The following status message indicates that Elasticsearch storage does not support the change you tried to make. - -For example: -[source,yaml] ----- -status: - clusterHealth: green - conditions: - - lastTransitionTime: "2021-05-07T01:05:13Z" - message: Changing the storage structure for a custom resource is not supported - reason: StorageStructureChangeIgnored - status: 'True' - type: StorageStructureChangeIgnored ----- - -The `reason` and `type` fields specify the type of unsupported change: - -`StorageClassNameChangeIgnored`:: Unsupported change to the storage class name. -`StorageSizeChangeIgnored`:: Unsupported change the storage size. -`StorageStructureChangeIgnored`:: Unsupported change between ephemeral and persistent storage structures. -+ -[IMPORTANT] -==== -If you try to configure the `ClusterLogging` custom resource (CR) to switch from ephemeral to persistent storage, the OpenShift Elasticsearch Operator creates a persistent volume claim (PVC) but does not create a persistent volume (PV). To clear the `StorageStructureChangeIgnored` status, you must revert the change to the `ClusterLogging` CR and delete the PVC. -==== diff --git a/modules/cluster-logging-logcli-reference.adoc b/modules/cluster-logging-logcli-reference.adoc deleted file mode 100644 index 272aa5886ac7..000000000000 --- a/modules/cluster-logging-logcli-reference.adoc +++ /dev/null @@ -1,31 +0,0 @@ -// Module is included in the following assemblies: -//cluster-logging-loki.adoc -:_content-type: REFERENCE -[id="logging-logcli-about_{context}"] - -= Querying Loki - -You can use Loki's command-line interface `logcli` to query logs. - -.Example Application Log Query -[source,terminal] ----- -$ oc extract cm/lokistack-sample-ca-bundle --to=lokistack --confirm -$ cat lokistack/*.crt >lokistack_ca.crt -$ logcli -o raw --bearer-token="${bearer_token}" --ca-cert="lokistack_ca.crt" --addr xxxxxx ----- - -.Example Infrastructure Log Query -[source,terminal] ----- -$ logcli --bearer-token="$(oc whoami -t)" --addr https://lokistack-dev-openshift-logging.apps.devcluster.openshift.com/api/logs/v1/infrastructure labels ----- - -.Example Audit log Query -[source,terminal] ----- -$ logcli --bearer-token="$(oc whoami -t)" --addr https://lokistack-dev-openshift-logging.apps.devcluster.openshift.com/api/logs/v1/audit labels ----- - -.Additional Resources -* link:https://grafana.com/docs/loki/latest/tools/logcli/[LogCLI Documentation] diff --git a/modules/cluster-logging-logstore-limits.adoc b/modules/cluster-logging-logstore-limits.adoc deleted file mode 100644 index 5af44f66a99e..000000000000 --- a/modules/cluster-logging-logstore-limits.adoc +++ /dev/null @@ -1,81 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-elasticsearch.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-logstore-limits_{context}"] -= Configuring CPU and memory requests for the log store - -Each component specification allows for adjustments to both the CPU and memory requests. -You should not have to manually adjust these values as the OpenShift Elasticsearch -Operator sets values sufficient for your environment. - -[NOTE] -==== -In large-scale clusters, the default memory limit for the Elasticsearch proxy container might not be sufficient, causing the proxy container to be OOMKilled. If you experience this issue, increase the memory requests and limits for the Elasticsearch proxy. -==== - -Each Elasticsearch node can operate with a lower memory setting though this is *not* recommended for production deployments. -For production use, you should have no less than the default 16Gi allocated to each pod. Preferably you should allocate as much as possible, up to 64Gi per pod. - -.Prerequisites - -* The Red Hat OpenShift Logging and Elasticsearch Operators must be installed. - -.Procedure - -. Edit the `ClusterLogging` custom resource (CR) in the `openshift-logging` project: -+ -[source,terminal] ----- -$ oc edit ClusterLogging instance ----- -+ -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: "ClusterLogging" -metadata: - name: "instance" -.... -spec: - logStore: - type: "elasticsearch" - elasticsearch:<1> - resources: - limits: <2> - memory: "32Gi" - requests: <3> - cpu: "1" - memory: "16Gi" - proxy: <4> - resources: - limits: - memory: 100Mi - requests: - memory: 100Mi ----- -<1> Specify the CPU and memory requests for Elasticsearch as needed. If you leave these values blank, -the OpenShift Elasticsearch Operator sets default values that should be sufficient for most deployments. The default values are `16Gi` for the memory request and `1` for the CPU request. -<2> The maximum amount of resources a pod can use. -<3> The minimum resources required to schedule a pod. -<4> Specify the CPU and memory requests for the Elasticsearch proxy as needed. If you leave these values blank, the OpenShift Elasticsearch Operator sets default values that are sufficient for most deployments. The default values are `256Mi` for the memory request and `100m` for the CPU request. - -When adjusting the amount of Elasticsearch memory, the same value should be used for both `requests` and `limits`. - -For example: - -[source,yaml] ----- - resources: - limits: <1> - memory: "32Gi" - requests: <2> - cpu: "8" - memory: "32Gi" ----- -<1> The maximum amount of the resource. -<2> The minimum amount required. - -Kubernetes generally adheres the node configuration and does not allow Elasticsearch to use the specified limits. -Setting the same value for the `requests` and `limits` ensures that Elasticsearch can use the memory you want, assuming the node has the memory available. diff --git a/modules/cluster-logging-loki-about.adoc b/modules/cluster-logging-loki-about.adoc deleted file mode 100644 index da91f15ee1a9..000000000000 --- a/modules/cluster-logging-loki-about.adoc +++ /dev/null @@ -1,46 +0,0 @@ -// Module is included in the following assemblies: -//cluster-logging-loki.adoc -:_content-type: CONCEPT -[id="about-logging-loki_{context}"] -= About the LokiStack - -In {logging} documentation, *LokiStack* refers to the {logging} supported combination of Loki, and web proxy with {product-title} authentication integration. LokiStack's proxy uses {product-title} authentication to enforce multi-tenancy. *Loki* refers to the log store as either the individual component or an external store. - -Loki is a horizontally scalable, highly available, multi-tenant log aggregation system currently offered as an alternative to Elasticsearch as a log store for the {logging}. Elasticsearch indexes incoming log records completely during ingestion. Loki only indexes a few fixed labels during ingestion, and defers more complex parsing until after the logs have been stored. This means Loki can collect logs more quickly. As with Elasticsearch, you can query Loki link:https://grafana.com/docs/loki/latest/[using JSON paths or regular expressions]. - -[id="deployment-sizing_{context}"] -== Deployment Sizing -Sizing for Loki follows the format of `N.__` where the value `` is number of instances and `` specifies performance capabilities. - -[NOTE] -==== -1x.extra-small is for demo purposes only, and is not supported. -==== - -.Loki Sizing -[options="header"] -|======================================================================================== -| | 1x.extra-small | 1x.small | 1x.medium -| *Data transfer* | Demo use only. | 500GB/day | 2TB/day -| *Queries per second (QPS)* | Demo use only. | 25-50 QPS at 200ms | 25-75 QPS at 200ms -| *Replication factor* | None | 2 | 3 -| *Total CPU requests* | 5 vCPUs | 36 vCPUs | 54 vCPUs -| *Total Memory requests* | 7.5Gi | 63Gi | 139Gi -| *Total Disk requests* | 150Gi | 300Gi | 450Gi -|======================================================================================== - -[id="CRD-API-support_{context}"] -== Supported API Custom Resource Definitions -LokiStack development is ongoing, not all APIs are supported currently supported. - -[options="header"] -|===================================================================== -| CustomResourceDefinition (CRD)| ApiVersion | Support state -| LokiStack | lokistack.loki.grafana.com/v1 | Supported in 5.5 -| RulerConfig | rulerconfig.loki.grafana/v1beta1 | Technology Preview -| AlertingRule | alertingrule.loki.grafana/v1beta1 | Technology Preview -| RecordingRule | recordingrule.loki.grafana/v1beta1 | Technology Preview -|===================================================================== - -:FeatureName: Usage of `RulerConfig`, `AlertingRule` and `RecordingRule` custom resource definitions (CRDs). -include::snippets/technology-preview.adoc[] diff --git a/modules/cluster-logging-loki-deploy.adoc b/modules/cluster-logging-loki-deploy.adoc deleted file mode 100644 index beda913b63b3..000000000000 --- a/modules/cluster-logging-loki-deploy.adoc +++ /dev/null @@ -1,150 +0,0 @@ -// Module is included in the following assemblies: -//cluster-logging-loki.adoc -:_content-type: PROCEDURE -[id="logging-loki-deploy_{context}"] -= Deploying the LokiStack - -ifndef::openshift-rosa,openshift-dedicated[] -You can use the {product-title} web console to deploy the LokiStack. -endif::[] -ifdef::openshift-rosa,openshift-dedicated[] -You can deploy the LokiStack by using the {product-title} {cluster-manager-url}. -endif::[] - -.Prerequisites - -* {logging-title-uc} Operator 5.5 and later -* Supported Log Store (AWS S3, Google Cloud Storage, Azure, Swift, Minio, OpenShift Data Foundation) - -.Procedure - -. Install the `Loki Operator` Operator: - -ifndef::openshift-rosa,openshift-dedicated[] -.. In the {product-title} web console, click *Operators* -> *OperatorHub*. -endif::[] -ifdef::openshift-rosa,openshift-dedicated[] -.. In the {hybrid-console}, click *Operators* -> *OperatorHub*. -endif::[] - -.. Choose *Loki Operator* from the list of available Operators, and click *Install*. - -.. Under *Installation Mode*, select *All namespaces on the cluster*. - -.. Under *Installed Namespace*, select *openshift-operators-redhat*. -+ -You must specify the `openshift-operators-redhat` namespace. The `openshift-operators` namespace might contain Community Operators, which are untrusted and might publish a metric with the same name as -ifndef::openshift-rosa[] -an {product-title} metric, which would cause conflicts. -endif::[] -ifdef::openshift-rosa[] -a {product-title} metric, which would cause conflicts. -endif::[] - -.. Select *Enable operator recommended cluster monitoring on this namespace*. -+ -This option sets the `openshift.io/cluster-monitoring: "true"` label in the Namespace object. You must select this option to ensure that cluster monitoring scrapes the `openshift-operators-redhat` namespace. - -.. Select an *Approval Strategy*. -+ -* The *Automatic* strategy allows Operator Lifecycle Manager (OLM) to automatically update the Operator when a new version is available. -+ -* The *Manual* strategy requires a user with appropriate credentials to approve the Operator update. - -.. Click *Install*. - -.. Verify that you installed the Loki Operator. Visit the *Operators* → *Installed Operators* page and look for *Loki Operator*. - -.. Ensure that *Loki Operator* is listed with *Status* as *Succeeded* in all the projects. -+ -. Create a `Secret` YAML file that uses the `access_key_id` and `access_key_secret` fields to specify your AWS credentials and `bucketnames`, `endpoint` and `region` to define the object storage location. For example: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: logging-loki-s3 - namespace: openshift-logging -stringData: - access_key_id: AKIAIOSFODNN7EXAMPLE - access_key_secret: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY - bucketnames: s3-bucket-name - endpoint: https://s3.eu-central-1.amazonaws.com - region: eu-central-1 ----- -+ -. Create the `LokiStack` custom resource: -+ -[source,yaml] ----- -apiVersion: loki.grafana.com/v1 -kind: LokiStack -metadata: - name: logging-loki - namespace: openshift-logging -spec: - size: 1x.small - storage: - schemas: - - version: v12 - effectiveDate: "2022-06-01" - secret: - name: logging-loki-s3 - type: s3 - storageClassName: gp3-csi <1> - tenants: - mode: openshift-logging ----- -<1> Or `gp2-csi`. -+ -.. Apply the configuration: -+ -[source,terminal] ----- -oc apply -f logging-loki.yaml ----- -+ -. Create or edit a `ClusterLogging` CR: -+ -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogging -metadata: - name: instance - namespace: openshift-logging -spec: - managementState: Managed - logStore: - type: lokistack - lokistack: - name: logging-loki - collection: - type: vector ----- -+ -.. Apply the configuration: -+ -[source,terminal] ----- -oc apply -f cr-lokistack.yaml ----- -+ -. Enable the RedHat OpenShift Logging Console Plugin: -ifndef::openshift-rosa,openshift-dedicated[] -.. In the {product-title} web console, click *Operators* -> *Installed Operators*. -endif::[] -ifdef::openshift-rosa,openshift-dedicated[] -.. In the {hybrid-console}, click *Operators* -> *Installed Operators*. -endif::[] -.. Select the *RedHat OpenShift Logging* Operator. -.. Under Console plugin, click *Disabled*. -.. Select *Enable* and then *Save*. This change will restart the 'openshift-console' pods. -.. After the pods restart, you will receive a notification that a web console update is available, prompting you to refresh. -.. After refreshing the web console, click *Observe* from the left main menu. A new option for *Logs* will be available to you. - -[NOTE] -==== -This plugin is only available on {product-title} 4.10 and later. -==== diff --git a/modules/cluster-logging-loki-tech-preview.adoc b/modules/cluster-logging-loki-tech-preview.adoc deleted file mode 100644 index 9ec3560ebf1d..000000000000 --- a/modules/cluster-logging-loki-tech-preview.adoc +++ /dev/null @@ -1,59 +0,0 @@ -// Module included in the following assemblies: -//cluster-logging-release-notes.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-loki-tech-preview_{context}"] -:FeatureName: Loki Operator -include::snippets/technology-preview.adoc[] - -[id="cluster-logging-about-loki"] -= About Loki - -Loki is a horizontally scalable, highly available, multi-tenant log aggregation system currently offered as an alternative to Elasticsearch as a log store for the {logging}. - -[role="_additional-resources"] -.Additional resources -* link:https://grafana.com/docs/loki/latest/[Loki Documentation] - -== Deploying the Lokistack -You can use the {product-title} web console to install the Loki Operator. - -.Prerequisites - -* {product-title}: 4.13 -* {logging-title-uc}: 5.4 - -To install the Loki Operator using the {product-title} web console: - -. Install the Loki Operator: - -.. In the {product-title} web console, click *Operators* -> *OperatorHub*. - -.. Choose *Loki Operator* from the list of available Operators, and click *Install*. - -.. Under *Installation Mode*, select *All namespaces on the cluster*. - -.. Under *Installed Namespace*, select *openshift-operators-redhat*. -+ -You must specify the `openshift-operators-redhat` namespace. The `openshift-operators` -namespace might contain Community Operators, which are untrusted and could publish -a metric with the same name as an {product-title} metric, which would cause -conflicts. - -.. Select *Enable operator recommended cluster monitoring on this namespace*. -+ -This option sets the `openshift.io/cluster-monitoring: "true"` label in the Namespace object. -You must select this option to ensure that cluster monitoring -scrapes the `openshift-operators-redhat` namespace. - -.. Select an *Approval Strategy*. -+ -* The *Automatic* strategy allows Operator Lifecycle Manager (OLM) to automatically update the Operator when a new version is available. -+ -* The *Manual* strategy requires a user with appropriate credentials to approve the Operator update. - -.. Click *Install*. - -.. Verify that you installed the Loki Operator. Visit the *Operators* → *Installed Operators* page and look for "Loki Operator." - -.. Ensure that *Loki Operator* is listed in all the projects whose *Status* is *Succeeded*. diff --git a/modules/cluster-logging-maintenance-support-about.adoc b/modules/cluster-logging-maintenance-support-about.adoc deleted file mode 100644 index f71a4c5c6520..000000000000 --- a/modules/cluster-logging-maintenance-support-about.adoc +++ /dev/null @@ -1,14 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/config/cluster-logging-maintenance-support.adoc - -:_content-type: CONCEPT -[id="cluster-logging-maintenance-support-about_{context}"] -= About unsupported configurations - -The supported way of configuring the {logging-title} is by configuring it using the options described in this documentation. Do not use other configurations, as they are unsupported. Configuration paradigms might change across {product-title} releases, and such cases can only be handled gracefully if all configuration possibilities are controlled. If you use configurations other than those described in this documentation, your changes will disappear because the OpenShift Elasticsearch Operator and Red Hat OpenShift Logging Operator reconcile any differences. The Operators reverse everything to the defined state by default and by design. - -[NOTE] -==== -If you _must_ perform configurations not described in the {product-title} documentation, you _must_ set your Red Hat OpenShift Logging Operator or OpenShift Elasticsearch Operator to *Unmanaged*. An unmanaged OpenShift Logging environment is _not supported_ and does not receive updates until you return OpenShift Logging to *Managed*. -==== diff --git a/modules/cluster-logging-maintenance-support-list.adoc b/modules/cluster-logging-maintenance-support-list.adoc deleted file mode 100644 index 7cfc808389f4..000000000000 --- a/modules/cluster-logging-maintenance-support-list.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/config/cluster-logging-maintenance-support.adoc - -[id="cluster-logging-maintenance-support-list_{context}"] -= Unsupported configurations - -You must set the Red Hat OpenShift Logging Operator to the unmanaged state to modify the following components: - -* The `Elasticsearch` CR - -* The Kibana deployment - -* The `fluent.conf` file - -* The Fluentd daemon set - -You must set the OpenShift Elasticsearch Operator to the unmanaged state to modify the following component: - -* the Elasticsearch deployment files. - -Explicitly unsupported cases include: - -* *Configuring default log rotation*. You cannot modify the default log rotation configuration. - -* *Configuring the collected log location*. You cannot change the location of the log collector output file, which by default is `/var/log/fluentd/fluentd.log`. - -* *Throttling log collection*. You cannot throttle down the rate at which the logs are read in by the log collector. - -* *Configuring the logging collector using environment variables*. You cannot use environment variables to modify the log collector. - -* *Configuring how the log collector normalizes logs*. You cannot modify default log normalization. diff --git a/modules/cluster-logging-manual-rollout-rolling.adoc b/modules/cluster-logging-manual-rollout-rolling.adoc deleted file mode 100644 index 3befd3648696..000000000000 --- a/modules/cluster-logging-manual-rollout-rolling.adoc +++ /dev/null @@ -1,221 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/config/cluster-logging-log-store.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-manual-rollout-rolling_{context}"] -= Performing an Elasticsearch rolling cluster restart - -Perform a rolling restart when you change the `elasticsearch` config map or any of the `elasticsearch-*` deployment configurations. - -Also, a rolling restart is recommended if the nodes on which an Elasticsearch pod runs requires a reboot. - -.Prerequisites - -* The Red Hat OpenShift Logging and Elasticsearch Operators must be installed. - -.Procedure - -To perform a rolling cluster restart: - -. Change to the `openshift-logging` project: -+ -[source,terminal] ----- -$ oc project openshift-logging ----- - -. Get the names of the Elasticsearch pods: -+ ----- -$ oc get pods -l component=elasticsearch- ----- - -. Scale down the collector pods so they stop sending new logs to Elasticsearch: -+ -[source,terminal] ----- -$ oc -n openshift-logging patch daemonset/collector -p '{"spec":{"template":{"spec":{"nodeSelector":{"logging-infra-collector": "false"}}}}}' ----- - -. Perform a shard synced flush using the {product-title} link:https://github.com/openshift/origin-aggregated-logging/tree/master/elasticsearch#es_util[*es_util*] tool to ensure there are no pending operations waiting to be written to disk prior to shutting down: -+ -[source,terminal] ----- -$ oc exec -c elasticsearch -- es_util --query="_flush/synced" -XPOST ----- -+ -For example: -+ -[source,terminal] ----- -$ oc exec -c elasticsearch-cdm-5ceex6ts-1-dcd6c4c7c-jpw6 -c elasticsearch -- es_util --query="_flush/synced" -XPOST ----- -+ -.Example output -[source,terminal] ----- -{"_shards":{"total":4,"successful":4,"failed":0},".security":{"total":2,"successful":2,"failed":0},".kibana_1":{"total":2,"successful":2,"failed":0}} ----- - -. Prevent shard balancing when purposely bringing down nodes using the {product-title} -link:https://github.com/openshift/origin-aggregated-logging/tree/master/elasticsearch#es_util[*es_util*] tool: -+ -[source,terminal] ----- -$ oc exec -c elasticsearch -- es_util --query="_cluster/settings" -XPUT -d '{ "persistent": { "cluster.routing.allocation.enable" : "primaries" } }' ----- -+ -For example: -+ -[source,terminal] ----- -$ oc exec elasticsearch-cdm-5ceex6ts-1-dcd6c4c7c-jpw6 -c elasticsearch -- es_util --query="_cluster/settings" -XPUT -d '{ "persistent": { "cluster.routing.allocation.enable" : "primaries" } }' ----- -+ -.Example output -[source,terminal] ----- -{"acknowledged":true,"persistent":{"cluster":{"routing":{"allocation":{"enable":"primaries"}}}},"transient": ----- - -. After the command is complete, for each deployment you have for an ES cluster: - -.. By default, the {product-title} Elasticsearch cluster blocks rollouts to their nodes. Use the following command to allow rollouts -and allow the pod to pick up the changes: -+ -[source,terminal] ----- -$ oc rollout resume deployment/ ----- -+ -For example: -+ -[source,terminal] ----- -$ oc rollout resume deployment/elasticsearch-cdm-0-1 ----- -+ -[source,terminal] ----- -deployment.extensions/elasticsearch-cdm-0-1 resumed ----- -+ -A new pod is deployed. After the pod has a ready container, you can -move on to the next deployment. -+ -[source,terminal] ----- -$ oc get pods -l component=elasticsearch- ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -elasticsearch-cdm-5ceex6ts-1-dcd6c4c7c-jpw6k 2/2 Running 0 22h -elasticsearch-cdm-5ceex6ts-2-f799564cb-l9mj7 2/2 Running 0 22h -elasticsearch-cdm-5ceex6ts-3-585968dc68-k7kjr 2/2 Running 0 22h ----- - -.. After the deployments are complete, reset the pod to disallow rollouts: -+ -[source,terminal] ----- -$ oc rollout pause deployment/ ----- -+ -For example: -+ -[source,terminal] ----- -$ oc rollout pause deployment/elasticsearch-cdm-0-1 ----- -+ -[source,terminal] ----- -deployment.extensions/elasticsearch-cdm-0-1 paused ----- -+ -.. Check that the Elasticsearch cluster is in a `green` or `yellow` state: -+ -[source,terminal] ----- -$ oc exec -c elasticsearch -- es_util --query=_cluster/health?pretty=true ----- -+ -[NOTE] -==== -If you performed a rollout on the Elasticsearch pod you used in the previous commands, the pod no longer exists and you need a new pod name here. -==== -+ -For example: -+ -[source,terminal] ----- -$ oc exec elasticsearch-cdm-5ceex6ts-1-dcd6c4c7c-jpw6 -c elasticsearch -- es_util --query=_cluster/health?pretty=true ----- -+ -.Example output -[source,json] ----- -{ - "cluster_name" : "elasticsearch", - "status" : "yellow", <1> - "timed_out" : false, - "number_of_nodes" : 3, - "number_of_data_nodes" : 3, - "active_primary_shards" : 8, - "active_shards" : 16, - "relocating_shards" : 0, - "initializing_shards" : 0, - "unassigned_shards" : 1, - "delayed_unassigned_shards" : 0, - "number_of_pending_tasks" : 0, - "number_of_in_flight_fetch" : 0, - "task_max_waiting_in_queue_millis" : 0, - "active_shards_percent_as_number" : 100.0 -} ----- -<1> Make sure this parameter value is `green` or `yellow` before proceeding. - -. If you changed the Elasticsearch configuration map, repeat these steps for each Elasticsearch pod. - -. After all the deployments for the cluster have been rolled out, re-enable shard balancing: -+ -[source,terminal] ----- -$ oc exec -c elasticsearch -- es_util --query="_cluster/settings" -XPUT -d '{ "persistent": { "cluster.routing.allocation.enable" : "all" } }' ----- -+ -For example: -+ -[source,terminal] ----- -$ oc exec elasticsearch-cdm-5ceex6ts-1-dcd6c4c7c-jpw6 -c elasticsearch -- es_util --query="_cluster/settings" -XPUT -d '{ "persistent": { "cluster.routing.allocation.enable" : "all" } }' ----- -+ -.Example output -[source,terminal] ----- -{ - "acknowledged" : true, - "persistent" : { }, - "transient" : { - "cluster" : { - "routing" : { - "allocation" : { - "enable" : "all" - } - } - } - } -} ----- - -. Scale up the collector pods so they send new logs to Elasticsearch. -+ -[source,terminal] ----- -$ oc -n openshift-logging patch daemonset/collector -p '{"spec":{"template":{"spec":{"nodeSelector":{"logging-infra-collector": "true"}}}}}' ----- diff --git a/modules/cluster-logging-must-gather-about.adoc b/modules/cluster-logging-must-gather-about.adoc deleted file mode 100644 index e8abfa298832..000000000000 --- a/modules/cluster-logging-must-gather-about.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/troubleshooting/cluster-logging-must-gather.adoc - -:_content-type: CONCEPT -[id="about-must-gather_{context}"] -= About the must-gather tool - -The `oc adm must-gather` CLI command collects the information from your cluster that is most likely needed for debugging issues. - -For your {logging}, `must-gather` collects the following information: - -* Project-level resources, including pods, configuration maps, service accounts, roles, role bindings, and events at the project level -* Cluster-level resources, including nodes, roles, and role bindings at the cluster level -* OpenShift Logging resources in the `openshift-logging` and `openshift-operators-redhat` namespaces, including health status for the log collector, the log store, and the log visualizer - -When you run `oc adm must-gather`, a new pod is created on the cluster. The data is collected on that pod and saved in a new directory that starts with `must-gather.local`. This directory is created in the current working directory. diff --git a/modules/cluster-logging-must-gather-collecting.adoc b/modules/cluster-logging-must-gather-collecting.adoc deleted file mode 100644 index 1747f51ffa6b..000000000000 --- a/modules/cluster-logging-must-gather-collecting.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/troubleshooting/cluster-logging-must-gather.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-must-gather-collecting_{context}"] -= Collecting OpenShift Logging data - -You can use the `oc adm must-gather` CLI command to collect information about your {logging}. - -.Procedure - -To collect {logging} information with `must-gather`: - -. Navigate to the directory where you want to store the `must-gather` information. - -. Run the `oc adm must-gather` command against the OpenShift Logging image: -+ -ifndef::openshift-origin[] -[source,terminal] ----- -$ oc adm must-gather --image=$(oc -n openshift-logging get deployment.apps/cluster-logging-operator -o jsonpath='{.spec.template.spec.containers[?(@.name == "cluster-logging-operator")].image}') ----- -endif::openshift-origin[] -ifdef::openshift-origin[] -[source,terminal] ----- -$ oc adm must-gather --image=quay.io/openshift/origin-cluster-logging-operator ----- -endif::openshift-origin[] -+ -The `must-gather` tool creates a new directory that starts with `must-gather.local` within the current directory. For example: -`must-gather.local.4157245944708210408`. - -. Create a compressed file from the `must-gather` directory that was just created. For example, on a computer that uses a Linux operating system, run the following command: -+ -[source,terminal] ----- -$ tar -cvaf must-gather.tar.gz must-gather.local.4157245944708210408 ----- - -. Attach the compressed file to your support case on the link:https://access.redhat.com/[Red Hat Customer Portal]. diff --git a/modules/cluster-logging-release-notes-5.2.z.adoc b/modules/cluster-logging-release-notes-5.2.z.adoc deleted file mode 100644 index 64c5227fa241..000000000000 --- a/modules/cluster-logging-release-notes-5.2.z.adoc +++ /dev/null @@ -1,324 +0,0 @@ -[id="cluster-logging-release-notes-5-2-10"] -== OpenShift Logging 5.2.10 -[role="_abstract"] -This release includes link:https://access.redhat.com/errata/[ OpenShift Logging Bug Fix Release 5.2.10]] - -[id="openshift-logging-5-2-10-bug-fixes"] -=== Bug fixes -* Before this update some log forwarder outputs could re-order logs with the same time-stamp. With this update, a sequence number has been added to the log record to order entries that have matching timestamps.(https://issues.redhat.com/browse/LOG-2335[LOG-2335]) - -* Before this update, clusters with a large number of namespaces caused Elasticsearch to stop serving requests because the list of namespaces reached the maximum header size limit. With this update, headers only include a list of namespace names, resolving the issue. (https://issues.redhat.com/browse/LOG-2475[LOG-2475]) - -* Before this update, `system:serviceaccount:openshift-monitoring:prometheus-k8s` had cluster level privileges as a `clusterrole` and `clusterrolebinding`. This update restricts the `serviceaccount` to the `openshift-logging` namespace with a role and rolebinding. (https://issues.redhat.com/browse/LOG-2480[LOG-2480]) - -* Before this update, the `cluster-logging-operator` utilized cluster scoped roles and bindings to establish permissions for the Prometheus service account to scrape metrics. These permissions were only created when deploying the Operator using the console interface, and were missing when the Operator was deployed from the command line. This fixes the issue by making this role and binding namespace scoped. (https://issues.redhat.com/browse/LOG-1972[LOG-1972]) - - -[id="openshift-logging-5-2-10-CVEs"] -=== CVEs -.Click to expand CVEs -[%collapsible] -==== -* link:https://access.redhat.com/security/cve/CVE-2018-25032[CVE-2018-25032] -* link:https://access.redhat.com/security/cve/CVE-2021-4028[CVE-2021-4028] -* link:https://access.redhat.com/security/cve/CVE-2021-37136[CVE-2021-37136] -* link:https://access.redhat.com/security/cve/CVE-2021-37137[CVE-2021-37137] -* link:https://access.redhat.com/security/cve/CVE-2021-43797[CVE-2021-43797] -* link:https://access.redhat.com/security/cve/CVE-2022-0778[CVE-2022-0778] -* link:https://access.redhat.com/security/cve/CVE-2022-1154[CVE-2022-1154] -* link:https://access.redhat.com/security/cve/CVE-2022-1271[CVE-2022-1271] -* link:https://access.redhat.com/security/cve/CVE-2022-21426[CVE-2022-21426] -* link:https://access.redhat.com/security/cve/CVE-2022-21434[CVE-2022-21434] -* link:https://access.redhat.com/security/cve/CVE-2022-21443[CVE-2022-21443] -* link:https://access.redhat.com/security/cve/CVE-2022-21476[CVE-2022-21476] -* link:https://access.redhat.com/security/cve/CVE-2022-21496[CVE-2022-21496] -* link:https://access.redhat.com/security/cve/CVE-2022-21698[CVE-2022-21698] -* link:https://access.redhat.com/security/cve/CVE-2022-25636[CVE-2022-25636] -==== - -[id="cluster-logging-release-notes-5-2-9"] -== OpenShift Logging 5.2.9 -[role="_abstract"] -This release includes link:https://access.redhat.com/errata/RHBA-2022:1375[RHBA-2022:1375 OpenShift Logging Bug Fix Release 5.2.9]] - -[id="openshift-logging-5-2-9-bug-fixes"] -=== Bug fixes -* Before this update, defining a toleration with no key and the existing Operator caused the Operator to be unable to complete an upgrade. With this update, this toleration no longer blocks the upgrade from completing. (link:https://issues.redhat.com/browse/LOG-2304[LOG-2304]) - -[id="cluster-logging-release-notes-5-2-8"] -== OpenShift Logging 5.2.8 - -This release includes link:https://access.redhat.com/errata/RHSA-2022:0728[RHSA-2022:0728 OpenShift Logging Bug Fix Release 5.2.8] - -[id="openshift-logging-5-2-8-bug-fixes"] -=== Bug fixes -* Before this update, if you removed OpenShift Logging from {product-title}, the web console continued displaying a link to the *Logging* page. With this update, removing or uninstalling OpenShift Logging also removes that link.(link:https://issues.redhat.com/browse/LOG-2180[LOG-2180]) - -[id="openshift-logging-5-2-8-CVEs"] -=== CVEs -.Click to expand CVEs -[%collapsible] -==== -* link:https://access.redhat.com/security/cve/CVE-2020-28491[CVE-2020-28491] -** link:https://bugzilla.redhat.com/show_bug.cgi?id=1930423[BZ-1930423] -* link:https://access.redhat.com/security/cve/CVE-2022-0552[CVE-2022-0552] -** link:https://bugzilla.redhat.com/show_bug.cgi?id=2052539[BG-2052539] -==== - -[id="cluster-logging-release-notes-5-2-7"] -== OpenShift Logging 5.2.7 - -This release includes link:https://access.redhat.com/errata/RHBA-2022:0478[RHBA-2022:0478 OpenShift Logging Bug Fix Release 5.2.7] - -[id="openshift-logging-5-2-7-bug-fixes"] -=== Bug fixes -* Before this update, Elasticsearch pods failed to start after updating with FIPS enabled. With this update, Elasticsearch pods start successfully. (link:https://issues.redhat.com/browse/LOG-2000[LOG-2000]) - -* Before this update, if a persistent volume claim (PVC) already existed, Elasticsearch generated an error, "Unable to create PersistentVolumeClaim due to forbidden: exceeded quota: infra-storage-quota." With this update, Elasticsearch checks for existing PVCs, resolving the issue. (link:https://issues.redhat.com/browse/LOG-2118[LOG-2118]) - -[id="openshift-logging-5-2-7-CVEs"] -=== CVEs -.Click to expand CVEs -[%collapsible] -==== -* link:https://access.redhat.com/security/cve/CVE-2021-3521[CVE-2021-3521] -* link:https://access.redhat.com/security/cve/CVE-2021-3872[CVE-2021-3872] -* link:https://access.redhat.com/security/cve/CVE-2021-3984[CVE-2021-3984] -* link:https://access.redhat.com/security/cve/CVE-2021-4019[CVE-2021-4019] -* link:https://access.redhat.com/security/cve/CVE-2021-4122[CVE-2021-4122] -* link:https://access.redhat.com/security/cve/CVE-2021-4155[CVE-2021-4155] -* link:https://access.redhat.com/security/cve/CVE-2021-4192[CVE-2021-4192] -* link:https://access.redhat.com/security/cve/CVE-2021-4193[CVE-2021-4193] -* link:https://access.redhat.com/security/cve/CVE-2022-0185[CVE-2022-0185] -==== - -[id="cluster-logging-release-notes-5-2-6"] -== OpenShift Logging 5.2.6 - -This release includes link:https://access.redhat.com/errata/RHSA-2022:0230[RHSA-2022:0230 OpenShift Logging Bug Fix Release 5.2.6] - -[id="openshift-logging-5-2-6-bug-fixes"] -=== Bug fixes -* Before this update, the release did not include a filter change which caused fluentd to crash. With this update, the missing filter has been corrected. (link:https://issues.redhat.com/browse/LOG-2104[LOG-2104]) - -* This update changes the log4j dependency to 2.17.1 to resolve link:https://access.redhat.com/security/cve/CVE-2021-44832[CVE-2021-44832].(link:https://issues.redhat.com/browse/LOG-2101[LOG-2101]) - -[id="openshift-logging-5-2-6-CVEs"] -=== CVEs -.Click to expand CVEs -[%collapsible] -==== -* link:https://access.redhat.com/security/cve/CVE-2021-27292[CVE-2021-27292] -** link:https://bugzilla.redhat.com/show_bug.cgi?id=1940613[BZ-1940613] -* link:https://access.redhat.com/security/cve/CVE-2021-44832[CVE-2021-44832] -** link:https://bugzilla.redhat.com/show_bug.cgi?id=2035951[BZ-2035951] -==== - -[id="cluster-logging-release-notes-5-2-5"] -== OpenShift Logging 5.2.5 - -This release includes link:https://access.redhat.com/errata/RHSA-2022:0043[RHSA-2022:0043 OpenShift Logging Bug Fix Release 5.2.5] - -[id="openshift-logging-5-2-5-bug-fixes"] -=== Bug fixes -* Before this update, Elasticsearch rejected logs from the Event Router due to a parsing error. This update changes the data model to resolve the parsing error. However, as a result, previous indices might cause warnings or errors within Kibana. The `kubernetes.event.metadata.resourceVersion` field causes errors until existing indices are removed or reindexed. If this field is not used in Kibana, you can ignore the error messages. If you have a retention policy that deletes old indices, the policy eventually removes the old indices and stops the error messages. Otherwise, manually reindex to stop the error messages. link:https://issues.redhat.com/browse/LOG-2087[LOG-2087]) - - -[id="openshift-logging-5-2-5-CVEs"] -=== CVEs -.Click to expand CVEs -[%collapsible] -==== -* link:https://access.redhat.com/security/cve/CVE-2021-3712[CVE-2021-3712] -* link:https://access.redhat.com/security/cve/CVE-2021-20321[CVE-2021-20321] -* link:https://access.redhat.com/security/cve/CVE-2021-42574[CVE-2021-42574] -* link:https://access.redhat.com/security/cve/CVE-2021-45105[CVE-2021-45105] -==== - -[id="cluster-logging-release-notes-5-2-4"] -== OpenShift Logging 5.2.4 - -This release includes link:https://access.redhat.com/errata/RHSA-2021:5127[RHSA-2021:5127 OpenShift Logging Bug Fix Release 5.2.4] - -[id="openshift-logging-5-2-4-bug-fixes"] -=== Bug fixes - -* Before this update records shipped via syslog would serialize a ruby hash encoding key/value pairs to contain a '=>' character, as well as replace tabs with "#11". This update serializes the message correctly as proper JSON. (link:https://issues.redhat.com/browse/LOG-1775[LOG-1775]) - -* Before this update, the Elasticsearch Prometheus exporter plugin compiled index-level metrics using a high-cost query that impacted the Elasticsearch node performance. This update implements a lower-cost query that improves performance. (link:https://issues.redhat.com/browse/LOG-1970[LOG-1970]) - -* Before this update, Elasticsearch sometimes rejected messages when Log Forwarding was configured with multiple outputs. This happened because configuring one of the outputs modified message content to be a single message. With this update, Log Forwarding duplicates the messages for each output so that output-specific processing does not affect the other outputs. (link:https://issues.redhat.com/browse/LOG-1824[LOG-1824]) - - -[id="openshift-logging-5-2-4-CVEs"] -=== CVEs -.Click to expand CVEs -[%collapsible] -==== -* link:https://www.redhat.com/security/data/cve/CVE-2018-25009.html[CVE-2018-25009] -* link:https://www.redhat.com/security/data/cve/CVE-2018-25010.html[CVE-2018-25010] -* link:https://www.redhat.com/security/data/cve/CVE-2018-25012.html[CVE-2018-25012] -* link:https://www.redhat.com/security/data/cve/CVE-2018-25013.html[CVE-2018-25013] -* link:https://www.redhat.com/security/data/cve/CVE-2018-25014.html[CVE-2018-25014] -* link:https://www.redhat.com/security/data/cve/CVE-2019-5827.html[CVE-2019-5827] -* link:https://www.redhat.com/security/data/cve/CVE-2019-13750.html[CVE-2019-13750] -* link:https://www.redhat.com/security/data/cve/CVE-2019-13751.html[CVE-2019-13751] -* link:https://www.redhat.com/security/data/cve/CVE-2019-17594.html[CVE-2019-17594] -* link:https://www.redhat.com/security/data/cve/CVE-2019-17595.html[CVE-2019-17595] -* link:https://www.redhat.com/security/data/cve/CVE-2019-18218.html[CVE-2019-18218] -* link:https://www.redhat.com/security/data/cve/CVE-2019-19603.html[CVE-2019-19603] -* link:https://www.redhat.com/security/data/cve/CVE-2019-20838.html[CVE-2019-20838] -* link:https://www.redhat.com/security/data/cve/CVE-2020-12762.html[CVE-2020-12762] -* link:https://www.redhat.com/security/data/cve/CVE-2020-13435.html[CVE-2020-13435] -* link:https://www.redhat.com/security/data/cve/CVE-2020-14145.html[CVE-2020-14145] -* link:https://www.redhat.com/security/data/cve/CVE-2020-14155.html[CVE-2020-14155] -* link:https://www.redhat.com/security/data/cve/CVE-2020-16135.html[CVE-2020-16135] -* link:https://www.redhat.com/security/data/cve/CVE-2020-17541.html[CVE-2020-17541] -* link:https://www.redhat.com/security/data/cve/CVE-2020-24370.html[CVE-2020-24370] -* link:https://www.redhat.com/security/data/cve/CVE-2020-35521.html[CVE-2020-35521] -* link:https://www.redhat.com/security/data/cve/CVE-2020-35522.html[CVE-2020-35522] -* link:https://www.redhat.com/security/data/cve/CVE-2020-35523.html[CVE-2020-35523] -* link:https://www.redhat.com/security/data/cve/CVE-2020-35524.html[CVE-2020-35524] -* link:https://www.redhat.com/security/data/cve/CVE-2020-36330.html[CVE-2020-36330] -* link:https://www.redhat.com/security/data/cve/CVE-2020-36331.html[CVE-2020-36331] -* link:https://www.redhat.com/security/data/cve/CVE-2020-36332.html[CVE-2020-36332] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3200.html[CVE-2021-3200] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3426.html[CVE-2021-3426] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3445.html[CVE-2021-3445] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3481.html[CVE-2021-3481] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3572.html[CVE-2021-3572] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3580.html[CVE-2021-3580] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3712.html[CVE-2021-3712] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3800.html[CVE-2021-3800] -* link:https://www.redhat.com/security/data/cve/CVE-2021-20231.html[CVE-2021-20231] -* link:https://www.redhat.com/security/data/cve/CVE-2021-20232.html[CVE-2021-20232] -* link:https://www.redhat.com/security/data/cve/CVE-2021-20266.html[CVE-2021-20266] -* link:https://www.redhat.com/security/data/cve/CVE-2021-20317.html[CVE-2021-20317] -* link:https://www.redhat.com/security/data/cve/CVE-2021-21409.html[CVE-2021-21409] -* link:https://www.redhat.com/security/data/cve/CVE-2021-22876.html[CVE-2021-22876] -* link:https://www.redhat.com/security/data/cve/CVE-2021-22898.html[CVE-2021-22898] -* link:https://www.redhat.com/security/data/cve/CVE-2021-22925.html[CVE-2021-22925] -* link:https://www.redhat.com/security/data/cve/CVE-2021-27645.html[CVE-2021-27645] -* link:https://www.redhat.com/security/data/cve/CVE-2021-28153.html[CVE-2021-28153] -* link:https://www.redhat.com/security/data/cve/CVE-2021-31535.html[CVE-2021-31535] -* link:https://www.redhat.com/security/data/cve/CVE-2021-33560.html[CVE-2021-33560] -* link:https://www.redhat.com/security/data/cve/CVE-2021-33574.html[CVE-2021-33574] -* link:https://www.redhat.com/security/data/cve/CVE-2021-35942.html[CVE-2021-35942] -* link:https://www.redhat.com/security/data/cve/CVE-2021-36084.html[CVE-2021-36084] -* link:https://www.redhat.com/security/data/cve/CVE-2021-36085.html[CVE-2021-36085] -* link:https://www.redhat.com/security/data/cve/CVE-2021-36086.html[CVE-2021-36086] -* link:https://www.redhat.com/security/data/cve/CVE-2021-36087.html[CVE-2021-36087] -* link:https://www.redhat.com/security/data/cve/CVE-2021-37136.html[CVE-2021-37136] -* link:https://www.redhat.com/security/data/cve/CVE-2021-37137.html[CVE-2021-37137] -* link:https://www.redhat.com/security/data/cve/CVE-2021-42574.html[CVE-2021-42574] -* link:https://www.redhat.com/security/data/cve/CVE-2021-43267.html[CVE-2021-43267] -* link:https://www.redhat.com/security/data/cve/CVE-2021-43527.html[CVE-2021-43527] -* link:https://www.redhat.com/security/data/cve/CVE-2021-44228.html[CVE-2021-44228] -* link:https://www.redhat.com/security/data/cve/CVE-2021-45046.html[CVE-2021-45046] -==== - -[id="cluster-logging-release-notes-5-2-3"] -== OpenShift Logging 5.2.3 - -This release includes link:https://access.redhat.com/errata/RHSA-2021:4032[RHSA-2021:4032 OpenShift Logging Bug Fix Release 5.2.3] - -[id="openshift-logging-5-2-3-bug-fixes"] -=== Bug fixes - -* Before this update, some alerts did not include a namespace label. This omission doesn't comply with the OpenShift Monitoring Team's guidelines for writing alerting rules in OpenShift. With this update, all the alerts in Elasticsearch Operator include a namespace label and follow all the guidelines for writing alerting rules in OpenShift.(link:https://issues.redhat.com/browse/LOG-1857[LOG-1857]) - -* Before this update, a regression introduced in a prior release intentionally disabled JSON message parsing. This update re-enables JSON parsing. It also sets the log entry "level" based on the "level" field in parsed JSON message or by using regex to extract a match from a message field. (link:https://issues.redhat.com/browse/LOG-1759[LOG-1759]) - -[id="openshift-logging-5-2-3-CVEs"] -=== CVEs -.Click to expand CVEs -[%collapsible] -==== -* link:https://access.redhat.com/security/cve/CVE-2021-23369[CVE-2021-23369] -** link:https://bugzilla.redhat.com/show_bug.cgi?id=1948761[BZ-1948761] -* link:https://access.redhat.com/security/cve/CVE-2021-23383[CVE-2021-23383] -** link:https://bugzilla.redhat.com/show_bug.cgi?id=1956688[BZ-1956688] -* link:https://access.redhat.com/security/cve/CVE-2018-20673[CVE-2018-20673] -* link:https://access.redhat.com/security/cve/CVE-2019-5827[CVE-2019-5827] -* link:https://access.redhat.com/security/cve/CVE-2019-13750[CVE-2019-13750] -* link:https://access.redhat.com/security/cve/CVE-2019-13751[CVE-2019-13751] -* link:https://access.redhat.com/security/cve/CVE-2019-17594[CVE-2019-17594] -* link:https://access.redhat.com/security/cve/CVE-2019-17595[CVE-2019-17595] -* link:https://access.redhat.com/security/cve/CVE-2019-18218[CVE-2019-18218] -* link:https://access.redhat.com/security/cve/CVE-2019-19603[CVE-2019-19603] -* link:https://access.redhat.com/security/cve/CVE-2019-20838[CVE-2019-20838] -* link:https://access.redhat.com/security/cve/CVE-2020-12762[CVE-2020-12762] -* link:https://access.redhat.com/security/cve/CVE-2020-13435[CVE-2020-13435] -* link:https://access.redhat.com/security/cve/CVE-2020-14155[CVE-2020-14155] -* link:https://access.redhat.com/security/cve/CVE-2020-16135[CVE-2020-16135] -* link:https://access.redhat.com/security/cve/CVE-2020-24370[CVE-2020-24370] -* link:https://access.redhat.com/security/cve/CVE-2021-3200[CVE-2021-3200] -* link:https://access.redhat.com/security/cve/CVE-2021-3426[CVE-2021-3426] -* link:https://access.redhat.com/security/cve/CVE-2021-3445[CVE-2021-3445] -* link:https://access.redhat.com/security/cve/CVE-2021-3572[CVE-2021-3572] -* link:https://access.redhat.com/security/cve/CVE-2021-3580[CVE-2021-3580] -* link:https://access.redhat.com/security/cve/CVE-2021-3778[CVE-2021-3778] -* link:https://access.redhat.com/security/cve/CVE-2021-3796[CVE-2021-3796] -* link:https://access.redhat.com/security/cve/CVE-2021-3800[CVE-2021-3800] -* link:https://access.redhat.com/security/cve/CVE-2021-20231[CVE-2021-20231] -* link:https://access.redhat.com/security/cve/CVE-2021-20232[CVE-2021-20232] -* link:https://access.redhat.com/security/cve/CVE-2021-20266[CVE-2021-20266] -* link:https://access.redhat.com/security/cve/CVE-2021-22876[CVE-2021-22876] -* link:https://access.redhat.com/security/cve/CVE-2021-22898[CVE-2021-22898] -* link:https://access.redhat.com/security/cve/CVE-2021-22925[CVE-2021-22925] -* link:https://access.redhat.com/security/cve/CVE-2021-23840[CVE-2021-23840] -* link:https://access.redhat.com/security/cve/CVE-2021-23841[CVE-2021-23841] -* link:https://access.redhat.com/security/cve/CVE-2021-27645[CVE-2021-27645] -* link:https://access.redhat.com/security/cve/CVE-2021-28153[CVE-2021-28153] -* link:https://access.redhat.com/security/cve/CVE-2021-33560[CVE-2021-33560] -* link:https://access.redhat.com/security/cve/CVE-2021-33574[CVE-2021-33574] -* link:https://access.redhat.com/security/cve/CVE-2021-35942[CVE-2021-35942] -* link:https://access.redhat.com/security/cve/CVE-2021-36084[CVE-2021-36084] -* link:https://access.redhat.com/security/cve/CVE-2021-36085[CVE-2021-36085] -* link:https://access.redhat.com/security/cve/CVE-2021-36086[CVE-2021-36086] -* link:https://access.redhat.com/security/cve/CVE-2021-36087[CVE-2021-36087] -==== - -[id="cluster-logging-release-notes-5-2-2"] -== OpenShift Logging 5.2.2 - -This release includes link:https://access.redhat.com/errata/RHBA-2021:3747[RHBA-2021:3747 OpenShift Logging Bug Fix Release 5.2.2] - -[id="openshift-logging-5-2-2-bug-fixes"] -=== Bug fixes - -* Before this update, the `ClusterLogging` custom resource (CR) applied the value of the `totalLimitSize` field to the Fluentd `total_limit_size` field, even if the required buffer space was not available. With this update, the CR applies the lesser of the two `totalLimitSize` or 'default' values to the Fluentd `total_limit_size` field, resolving the issue.(link:https://issues.redhat.com/browse/LOG-1738[LOG-1738]) - -* Before this update, a regression introduced in a prior release configuration caused the collector to flush its buffered messages before shutdown, creating a delay the termination and restart of collector Pods. With this update, fluentd no longer flushes buffers at shutdown, resolving the issue. (link:https://issues.redhat.com/browse/LOG-1739[LOG-1739]) - -* Before this update, an issue in the bundle manifests prevented installation of the Elasticsearch operator through OLM on OpenShift 4.9. With this update, a correction to bundle manifests re-enables installs and upgrades in 4.9.(link:https://issues.redhat.com/browse/LOG-1780[LOG-1780]) - -[id="openshift-logging-5-2-2-CVEs"] -=== CVEs -.Click to expand CVEs -[%collapsible] -==== -* link:https://www.redhat.com/security/data/cve/CVE-2020-25648.html[CVE-2020-25648] -* link:https://www.redhat.com/security/data/cve/CVE-2021-22922.html[CVE-2021-22922] -* link:https://www.redhat.com/security/data/cve/CVE-2021-22923.html[CVE-2021-22923] -* link:https://www.redhat.com/security/data/cve/CVE-2021-22924.html[CVE-2021-22924] -* link:https://www.redhat.com/security/data/cve/CVE-2021-36222.html[CVE-2021-36222] -* link:https://www.redhat.com/security/data/cve/CVE-2021-37576.html[CVE-2021-37576] -* link:https://www.redhat.com/security/data/cve/CVE-2021-37750.html[CVE-2021-37750] -* link:https://www.redhat.com/security/data/cve/CVE-2021-38201.html[CVE-2021-38201] -==== - -[id="cluster-logging-release-notes-5-2-1"] -== OpenShift Logging 5.2.1 - -This release includes link:https://access.redhat.com/errata/RHBA-2021:3550[RHBA-2021:3550 OpenShift Logging Bug Fix Release 5.2.1] - -[id="openshift-logging-5-2-1-bug-fixes"] -=== Bug fixes - -* Before this update, due to an issue in the release pipeline scripts, the value of the `olm.skipRange` field remained unchanged at `5.2.0` instead of reflecting the current release number. This update fixes the pipeline scripts to update the value of this field when the release numbers change. (link:https://issues.redhat.com/browse/LOG-1743[LOG-1743]) - -[id="openshift-logging-5-2-1-CVEs"] -=== CVEs - -(None) diff --git a/modules/cluster-logging-release-notes-5.3.z.adoc b/modules/cluster-logging-release-notes-5.3.z.adoc deleted file mode 100644 index 01db62341b9a..000000000000 --- a/modules/cluster-logging-release-notes-5.3.z.adoc +++ /dev/null @@ -1,247 +0,0 @@ -//Z-stream Release Notes by Version -[id="cluster-logging-release-notes-5-3-7"] -== OpenShift Logging 5.3.7 -This release includes link:https://access.redhat.com/errata/RHSA-2022:2217[RHSA-2022:2217 OpenShift Logging Bug Fix Release 5.3.7] - -[id="openshift-logging-5-3-7-bug-fixes"] -=== Bug fixes -* Before this update, Linux audit log time parsing relied on an ordinal position of key/value pair. This update changes the parsing to utilize a regex to find the time entry. (https://issues.redhat.com/browse/LOG-2322[LOG-2322]) - -* * Before this update some log forwarder outputs could re-order logs with the same time-stamp. With this update, a sequence number has been added to the log record to order entries that have matching timestamps. (https://issues.redhat.com/browse/LOG-2334[LOG-2334]) - -* Before this update, clusters with a large number of namespaces caused Elasticsearch to stop serving requests because the list of namespaces reached the maximum header size limit. With this update, headers only include a list of namespace names, resolving the issue. (https://issues.redhat.com/browse/LOG-2450[LOG-2450]) - -* Before this update, `system:serviceaccount:openshift-monitoring:prometheus-k8s` had cluster level privileges as a `clusterrole` and `clusterrolebinding`. This update restricts the `serviceaccount` to the `openshift-logging` namespace with a role and rolebinding. (https://issues.redhat.com/browse/LOG-2481[LOG-2481)]) - -=== CVEs -[id="openshift-logging-5-3-7-CVEs"] -.Click to expand CVEs -[%collapsible] -==== -* https://access.redhat.com/security/cve/CVE-2018-25032[CVE-2018-25032] -* https://access.redhat.com/security/cve/CVE-2021-4028[CVE-2021-4028] -* https://access.redhat.com/security/cve/CVE-2021-37136[CVE-2021-37136] -* https://access.redhat.com/security/cve/CVE-2021-37137[CVE-2021-37137] -* https://access.redhat.com/security/cve/CVE-2021-43797[CVE-2021-43797] -* https://access.redhat.com/security/cve/CVE-2022-0759[CVE-2022-0759] -* https://access.redhat.com/security/cve/CVE-2022-0778[CVE-2022-0778] -* https://access.redhat.com/security/cve/CVE-2022-1154[CVE-2022-1154] -* https://access.redhat.com/security/cve/CVE-2022-1271[CVE-2022-1271] -* https://access.redhat.com/security/cve/CVE-2022-21426[CVE-2022-21426] -* https://access.redhat.com/security/cve/CVE-2022-21434[CVE-2022-21434] -* https://access.redhat.com/security/cve/CVE-2022-21443[CVE-2022-21443] -* https://access.redhat.com/security/cve/CVE-2022-21476[CVE-2022-21476] -* https://access.redhat.com/security/cve/CVE-2022-21496[CVE-2022-21496] -* https://access.redhat.com/security/cve/CVE-2022-21698[CVE-2022-21698] -* https://access.redhat.com/security/cve/CVE-2022-25636[CVE-2022-25636] -==== - -[id="cluster-logging-release-notes-5-3-6"] -== OpenShift Logging 5.3.6 -This release includes link:https://access.redhat.com/errata/RHBA-2022:1377[RHBA-2022:1377 OpenShift Logging Bug Fix Release 5.3.6] - -[id="openshift-logging-5-3-6-bug-fixes"] -=== Bug fixes -* Before this update, defining a toleration with no key and the existing Operator caused the Operator to be unable to complete an upgrade. With this update, this toleration no longer blocks the upgrade from completing. (link:https://issues.redhat.com/browse/LOG-2126[LOG-2126]) - -* Before this change, it was possible for the collector to generate a warning where the chunk byte limit was exceeding an emitted event. With this change, you can tune the readline limit to resolve the issue as advised by the upstream documentation. (link:https://issues.redhat.com/browse/LOG-2380[LOG-2380]) - -[id="cluster-logging-release-notes-5-3-5"] -== OpenShift Logging 5.3.5 -[role="_abstract"] -This release includes link:https://access.redhat.com/errata/RHSA-2022:0721[RHSA-2022:0721 OpenShift Logging Bug Fix Release 5.3.5] - -[id="openshift-logging-5-3-5-bug-fixes"] -=== Bug fixes -* Before this update, if you removed OpenShift Logging from {product-title}, the web console continued displaying a link to the *Logging* page. With this update, removing or uninstalling OpenShift Logging also removes that link. (link:https://issues.redhat.com/browse/LOG-2182[LOG-2182]) - -=== CVEs -[id="openshift-logging-5-3-5-CVEs"] -.Click to expand CVEs -[%collapsible] -==== -* link:https://access.redhat.com/security/cve/CVE-2020-28491[CVE-2020-28491] -* link:https://access.redhat.com/security/cve/CVE-2021-3521[CVE-2021-3521] -* link:https://access.redhat.com/security/cve/CVE-2021-3872[CVE-2021-3872] -* link:https://access.redhat.com/security/cve/CVE-2021-3984[CVE-2021-3984] -* link:https://access.redhat.com/security/cve/CVE-2021-4019[CVE-2021-4019] -* link:https://access.redhat.com/security/cve/CVE-2021-4122[CVE-2021-4122] -* link:https://access.redhat.com/security/cve/CVE-2021-4192[CVE-2021-4192] -* link:https://access.redhat.com/security/cve/CVE-2021-4193[CVE-2021-4193] -* link:https://access.redhat.com/security/cve/CVE-2022-0552[CVE-2022-0552] -==== - -[id="cluster-logging-release-notes-5-3-4"] -== OpenShift Logging 5.3.4 -[role="_abstract"] -This release includes link:https://access.redhat.com/errata/RHBA-2022:0411[RHBA-2022:0411 OpenShift Logging Bug Fix Release 5.3.4] - -[id="openshift-logging-5-3-4-bug-fixes"] -=== Bug fixes -* Before this update, changes to the metrics dashboards had not yet been deployed because the `cluster-logging-operator` did not correctly compare existing and desired config maps that contained the dashboard. This update fixes the logic by adding a unique hash value to the object labels. (link:https://issues.redhat.com/browse/LOG-2066[LOG-2066]) - -* Before this update, Elasticsearch pods failed to start after updating with FIPS enabled. With this update, Elasticsearch pods start successfully. (link:https://issues.redhat.com/browse/LOG-1974[LOG-1974]) - -* Before this update, elasticsearch generated the error "Unable to create PersistentVolumeClaim due to forbidden: exceeded quota: infra-storage-quota." if the PVC already existed. With this update, elasticsearch checks for existing PVCs, resolving the issue. (link:https://issues.redhat.com/browse/LOG-2127[LOG-2127]) - -=== CVEs -[id="openshift-logging-5-3-4-CVEs"] -.Click to expand CVEs -[%collapsible] -==== -* link:https://access.redhat.com/security/cve/CVE-2021-3521[CVE-2021-3521] -* link:https://access.redhat.com/security/cve/CVE-2021-3872[CVE-2021-3872] -* link:https://access.redhat.com/security/cve/CVE-2021-3984[CVE-2021-3984] -* link:https://access.redhat.com/security/cve/CVE-2021-4019[CVE-2021-4019] -* link:https://access.redhat.com/security/cve/CVE-2021-4122[CVE-2021-4122] -* link:https://access.redhat.com/security/cve/CVE-2021-4155[CVE-2021-4155] -* link:https://access.redhat.com/security/cve/CVE-2021-4192[CVE-2021-4192] -* link:https://access.redhat.com/security/cve/CVE-2021-4193[CVE-2021-4193] -* link:https://access.redhat.com/security/cve/CVE-2022-0185[CVE-2022-0185] -* link:https://access.redhat.com/security/cve/CVE-2022-21248[CVE-2022-21248] -* link:https://access.redhat.com/security/cve/CVE-2022-21277[CVE-2022-21277] -* link:https://access.redhat.com/security/cve/CVE-2022-21282[CVE-2022-21282] -* link:https://access.redhat.com/security/cve/CVE-2022-21283[CVE-2022-21283] -* link:https://access.redhat.com/security/cve/CVE-2022-21291[CVE-2022-21291] -* link:https://access.redhat.com/security/cve/CVE-2022-21293[CVE-2022-21293] -* link:https://access.redhat.com/security/cve/CVE-2022-21294[CVE-2022-21294] -* link:https://access.redhat.com/security/cve/CVE-2022-21296[CVE-2022-21296] -* link:https://access.redhat.com/security/cve/CVE-2022-21299[CVE-2022-21299] -* link:https://access.redhat.com/security/cve/CVE-2022-21305[CVE-2022-21305] -* link:https://access.redhat.com/security/cve/CVE-2022-21340[CVE-2022-21340] -* link:https://access.redhat.com/security/cve/CVE-2022-21341[CVE-2022-21341] -* link:https://access.redhat.com/security/cve/CVE-2022-21360[CVE-2022-21360] -* link:https://access.redhat.com/security/cve/CVE-2022-21365[CVE-2022-21365] -* link:https://access.redhat.com/security/cve/CVE-2022-21366[CVE-2022-21366] -==== - -[id="cluster-logging-release-notes-5-3-3"] -== OpenShift Logging 5.3.3 -This release includes link:https://access.redhat.com/errata/RHSA-2022:0227[RHSA-2022:0227 OpenShift Logging Bug Fix Release 5.3.3] - -[id="openshift-logging-5-3-3-bug-fixes"] -=== Bug fixes -* Before this update, changes to the metrics dashboards had not yet been deployed because the cluster-logging-operator did not correctly compare existing and desired configmaps containing the dashboard. This update fixes the logic by adding a dashboard unique hash value to the object labels.(link:https://issues.redhat.com/browse/LOG-2066[LOG-2066]) - -* This update changes the log4j dependency to 2.17.1 to resolve link:https://access.redhat.com/security/cve/CVE-2021-44832[CVE-2021-44832].(link:https://issues.redhat.com/browse/LOG-2102[LOG-2102]) - -=== CVEs -[id="openshift-logging-5-3-3-CVEs"] -.Click to expand CVEs -[%collapsible] -==== -* link:https://access.redhat.com/security/cve/CVE-2021-27292[CVE-2021-27292] -** link:https://bugzilla.redhat.com/show_bug.cgi?id=1940613[BZ-1940613] -* link:https://access.redhat.com/security/cve/CVE-2021-44832[CVE-2021-44832] -** link:https://bugzilla.redhat.com/show_bug.cgi?id=2035951[BZ-2035951] -==== - -[id="cluster-logging-release-notes-5-3-2"] -== OpenShift Logging 5.3.2 -This release includes link:https://access.redhat.com/errata/RHSA-2022:0044[RHSA-2022:0044 OpenShift Logging Bug Fix Release 5.3.2] - -[id="openshift-logging-5-3-2-bug-fixes"] -=== Bug fixes -* Before this update, Elasticsearch rejected logs from the Event Router due to a parsing error. This update changes the data model to resolve the parsing error. However, as a result, previous indices might cause warnings or errors within Kibana. The `kubernetes.event.metadata.resourceVersion` field causes errors until existing indices are removed or reindexed. If this field is not used in Kibana, you can ignore the error messages. If you have a retention policy that deletes old indices, the policy eventually removes the old indices and stops the error messages. Otherwise, manually reindex to stop the error messages. (link:https://issues.redhat.com/browse/LOG-2087[LOG-2087]) - -* Before this update, the OpenShift Logging Dashboard displayed the wrong pod namespace in the table that displays top producing and collected containers over the last 24 hours. With this update, the OpenShift Logging Dashboard displays the correct pod namespace. (link:https://issues.redhat.com/browse/LOG-2051[LOG-2051]) - -* Before this update, if `outputDefaults.elasticsearch.structuredTypeKey` in the `ClusterLogForwarder` custom resource (CR) instance did not have a structured key, the CR replaced the output secret with the default secret used to communicate to the default log store. With this update, the defined output secret is correctly used. (link:https://issues.redhat.com/browse/LOG-2046[LOG-2046]) - -[id="openshift-logging-5-3-2-CVEs"] -=== CVEs -.Click to expand CVEs -[%collapsible] -==== -* https://access.redhat.com/security/cve/CVE-2020-36327[CVE-2020-36327] -** https://bugzilla.redhat.com/show_bug.cgi?id=1958999[BZ-1958999] -* https://access.redhat.com/security/cve/CVE-2021-45105[CVE-2021-45105] -** https://bugzilla.redhat.com/show_bug.cgi?id=2034067[BZ-2034067] -* https://access.redhat.com/security/cve/CVE-2021-3712[CVE-2021-3712] -* https://access.redhat.com/security/cve/CVE-2021-20321[CVE-2021-20321] -* https://access.redhat.com/security/cve/CVE-2021-42574[CVE-2021-42574] -==== - -[id="cluster-logging-release-notes-5-3-1"] -== OpenShift Logging 5.3.1 -This release includes link:https://access.redhat.com/errata/RHSA-2021:5129[RHSA-2021:5129 OpenShift Logging Bug Fix Release 5.3.1] - -[id="openshift-logging-5-3-1-bug-fixes"] -=== Bug fixes -* Before this update, the Fluentd container image included builder tools that were unnecessary at run time. This update removes those tools from the image. (link:https://issues.redhat.com/browse/LOG-1998[LOG-1998]) - -* Before this update, the Logging dashboard displayed an empty CPU graph because of a reference to an invalid metric. With this update, the Logging dashboard displays CPU graphs correctly. (link:https://issues.redhat.com/browse/LOG-1925[LOG-1925]) - -* Before this update, the Elasticsearch Prometheus exporter plugin compiled index-level metrics using a high-cost query that impacted the Elasticsearch node performance. This update implements a lower-cost query that improves performance. (link:https://issues.redhat.com/browse/LOG-1897[LOG-1897]) - - -[id="openshift-logging-5-3-1-CVEs"] -=== CVEs -.Click to expand CVEs -[%collapsible] -==== -* link:https://www.redhat.com/security/data/cve/CVE-2021-21409.html[CVE-2021-21409] -** link:https://bugzilla.redhat.com/show_bug.cgi?id=1944888[BZ-1944888] -* link:https://www.redhat.com/security/data/cve/CVE-2021-37136.html[CVE-2021-37136] -** link:https://bugzilla.redhat.com/show_bug.cgi?id=2004133[BZ-2004133] -* link:https://www.redhat.com/security/data/cve/CVE-2021-37137.html[CVE-2021-37137] -** link:https://bugzilla.redhat.com/show_bug.cgi?id=2004135[BZ-2004135] -* link:https://www.redhat.com/security/data/cve/CVE-2021-44228.html[CVE-2021-44228] -** link:https://bugzilla.redhat.com/show_bug.cgi?id=2030932[BZ-2030932] -* link:https://www.redhat.com/security/data/cve/CVE-2018-25009.html[CVE-2018-25009] -* link:https://www.redhat.com/security/data/cve/CVE-2018-25010.html[CVE-2018-25010] -* link:https://www.redhat.com/security/data/cve/CVE-2018-25012.html[CVE-2018-25012] -* link:https://www.redhat.com/security/data/cve/CVE-2018-25013.html[CVE-2018-25013] -* link:https://www.redhat.com/security/data/cve/CVE-2018-25014.html[CVE-2018-25014] -* link:https://www.redhat.com/security/data/cve/CVE-2019-5827.html[CVE-2019-5827] -* link:https://www.redhat.com/security/data/cve/CVE-2019-13750.html[CVE-2019-13750] -* link:https://www.redhat.com/security/data/cve/CVE-2019-13751.html[CVE-2019-13751] -* link:https://www.redhat.com/security/data/cve/CVE-2019-17594.html[CVE-2019-17594] -* link:https://www.redhat.com/security/data/cve/CVE-2019-17595.html[CVE-2019-17595] -* link:https://www.redhat.com/security/data/cve/CVE-2019-18218.html[CVE-2019-18218] -* link:https://www.redhat.com/security/data/cve/CVE-2019-19603.html[CVE-2019-19603] -* link:https://www.redhat.com/security/data/cve/CVE-2019-20838.html[CVE-2019-20838] -* link:https://www.redhat.com/security/data/cve/CVE-2020-12762.html[CVE-2020-12762] -* link:https://www.redhat.com/security/data/cve/CVE-2020-13435.html[CVE-2020-13435] -* link:https://www.redhat.com/security/data/cve/CVE-2020-14145.html[CVE-2020-14145] -* link:https://www.redhat.com/security/data/cve/CVE-2020-14155.html[CVE-2020-14155] -* link:https://www.redhat.com/security/data/cve/CVE-2020-16135.html[CVE-2020-16135] -* link:https://www.redhat.com/security/data/cve/CVE-2020-17541.html[CVE-2020-17541] -* link:https://www.redhat.com/security/data/cve/CVE-2020-24370.html[CVE-2020-24370] -* link:https://www.redhat.com/security/data/cve/CVE-2020-35521.html[CVE-2020-35521] -* link:https://www.redhat.com/security/data/cve/CVE-2020-35522.html[CVE-2020-35522] -* link:https://www.redhat.com/security/data/cve/CVE-2020-35523.html[CVE-2020-35523] -* link:https://www.redhat.com/security/data/cve/CVE-2020-35524.html[CVE-2020-35524] -* link:https://www.redhat.com/security/data/cve/CVE-2020-36330.html[CVE-2020-36330] -* link:https://www.redhat.com/security/data/cve/CVE-2020-36331.html[CVE-2020-36331] -* link:https://www.redhat.com/security/data/cve/CVE-2020-36332.html[CVE-2020-36332] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3200.html[CVE-2021-3200] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3426.html[CVE-2021-3426] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3445.html[CVE-2021-3445] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3481.html[CVE-2021-3481] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3572.html[CVE-2021-3572] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3580.html[CVE-2021-3580] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3712.html[CVE-2021-3712] -* link:https://www.redhat.com/security/data/cve/CVE-2021-3800.html[CVE-2021-3800] -* link:https://www.redhat.com/security/data/cve/CVE-2021-20231.html[CVE-2021-20231] -* link:https://www.redhat.com/security/data/cve/CVE-2021-20232.html[CVE-2021-20232] -* link:https://www.redhat.com/security/data/cve/CVE-2021-20266.html[CVE-2021-20266] -* link:https://www.redhat.com/security/data/cve/CVE-2021-20317.html[CVE-2021-20317] -* link:https://www.redhat.com/security/data/cve/CVE-2021-22876.html[CVE-2021-22876] -* link:https://www.redhat.com/security/data/cve/CVE-2021-22898.html[CVE-2021-22898] -* link:https://www.redhat.com/security/data/cve/CVE-2021-22925.html[CVE-2021-22925] -* link:https://www.redhat.com/security/data/cve/CVE-2021-27645.html[CVE-2021-27645] -* link:https://www.redhat.com/security/data/cve/CVE-2021-28153.html[CVE-2021-28153] -* link:https://www.redhat.com/security/data/cve/CVE-2021-31535.html[CVE-2021-31535] -* link:https://www.redhat.com/security/data/cve/CVE-2021-33560.html[CVE-2021-33560] -* link:https://www.redhat.com/security/data/cve/CVE-2021-33574.html[CVE-2021-33574] -* link:https://www.redhat.com/security/data/cve/CVE-2021-35942.html[CVE-2021-35942] -* link:https://www.redhat.com/security/data/cve/CVE-2021-36084.html[CVE-2021-36084] -* link:https://www.redhat.com/security/data/cve/CVE-2021-36085.html[CVE-2021-36085] -* link:https://www.redhat.com/security/data/cve/CVE-2021-36086.html[CVE-2021-36086] -* link:https://www.redhat.com/security/data/cve/CVE-2021-36087.html[CVE-2021-36087] -* link:https://www.redhat.com/security/data/cve/CVE-2021-42574.html[CVE-2021-42574] -* link:https://www.redhat.com/security/data/cve/CVE-2021-43267.html[CVE-2021-43267] -* link:https://www.redhat.com/security/data/cve/CVE-2021-43527.html[CVE-2021-43527] -* link:https://www.redhat.com/security/data/cve/CVE-2021-45046.html[CVE-2021-45046] -==== diff --git a/modules/cluster-logging-release-notes-5.4.0.adoc b/modules/cluster-logging-release-notes-5.4.0.adoc deleted file mode 100644 index 794d45e98e1d..000000000000 --- a/modules/cluster-logging-release-notes-5.4.0.adoc +++ /dev/null @@ -1,53 +0,0 @@ - -// Module included in the following assemblies: -//cluster-logging-release-notes.adoc -[id="cluster-logging-release-notes-5-4-0"] -= Logging 5.4 -The following advisories are available for logging 5.4: -link:https://access.redhat.com/errata/RHSA-2022:1461[{logging-title-uc} Release 5.4] - -[id="openshift-logging-5-4-0-bug-fixes"] -== Bug fixes - -* Before this update, the `cluster-logging-operator` utilized cluster scoped roles and bindings to establish permissions for the Prometheus service account to scrape metrics. These permissions were only created when deploying the Operator using the console interface but were missing when deploying from the command line. This update fixes the issue by making the roles and bindings namespace-scoped. (link:https://issues.redhat.com/browse/LOG-2286[LOG-2286]) - -* Before this update, a prior change to fix dashboard reconciliation introduced a `ownerReferences` field to the resource across namespaces. As a result, both the config map and dashboard were not getting created in the namespace. With this update, the removal of the `ownerReferences` field resolves the issue and the OpenShift Logging dashboard is available in the console. (link:https://issues.redhat.com/browse/LOG-2163[LOG-2163]) - -* Before this update, changes to the metrics dashboards did not deploy because the `cluster-logging-operator` did not correctly compare existing and desired configmaps containing the dashboard. With this update, the addition of a unique hash value to object labels resolves the issue. (link:https://issues.redhat.com/browse/LOG-2071[LOG-2071]) - -* Before this update, the OpenShift Logging dashboard did not correctly display the pods and namespaces in the table, which displays the top producing containers collected over the last 24 hours. With this update, the pods and namespaces are displayed correctly. (link:https://issues.redhat.com/browse/LOG-2069[LOG-2069]) - -* Before this update, when the `ClusterLogForwarder` was set up with `Elasticsearch OutputDefault` and Elasticsearch outputs did not have structured keys, the generated configuration contained the incorrect values for authentication. This update corrects the secret and certificates used. (link:https://issues.redhat.com/browse/LOG-2056[LOG-2056]) - -* Before this update, the OpenShift Logging dashboard displayed an empty CPU graph because of a reference to an invalid metric. With this update, the correct data point has been selected, resolving the issue. (link:https://issues.redhat.com/browse/LOG-2026[LOG-2026]) - -* Before this update, the Fluentd container image included builder tools that were unnecessary at run time. This update removes those tools from the image.(link:https://issues.redhat.com/browse/LOG-1927[LOG-1927]) - -* Before this update, a name change of the deployed collector in the 5.3 release caused the logging collector to generate the `FluentdNodeDown` alert. This update resolves the issue by fixing the job name for the Prometheus alert. (link:https://issues.redhat.com/browse/LOG-1918[LOG-1918]) - -* Before this update, the log collector was collecting its own logs due to a refactoring of the component name change. This could lead to a potential feedback loop of the collector processing its own log that might result in memory and log message size issues. This update resolves the issue by excluding the collector logs from the collection. (link:https://issues.redhat.com/browse/LOG-1774[LOG-1774]) - -* Before this update, Elasticsearch generated the error "Unable to create PersistentVolumeClaim due to forbidden: exceeded quota: infra-storage-quota." if the PVC already existed. With this update, Elasticsearch checks for existing PVCs, resolving the issue. (link:https://issues.redhat.com/browse/LOG-2131[LOG-2131]) - -* Before this update, Elasticsearch was unable to return to the ready state when the `elasticsearch-signing `secret was removed. With this update, Elasticsearch is able to go back to the ready state after that secret is removed. (link:https://issues.redhat.com/browse/LOG-2171[LOG-2171]) - -* Before this update, the change of the path from which the collector reads container logs caused the collector to forward some records to the wrong indices. With this update, the collector now uses the correct configuration to resolve the issue. (link:https://issues.redhat.com/browse/LOG-2160[LOG-2160]) - -* Before this update, clusters with a large number of namespaces caused Elasticsearch to stop serving requests because the list of namespaces reached the maximum header size limit. With this update, headers only include a list of namespace names, resolving the issue. (link:https://issues.redhat.com/browse/LOG-1899[LOG-1899]) - -* Before this update, the *{product-title} Logging* dashboard showed the number of shards 'x' times larger than the actual value when Elasticsearch had 'x' nodes. This issue occurred because it was printing all primary shards for each ES pod and calculating a sum on it, although the output was always for the whole ES cluster. With this update, the number of shards is now correctly calculated. (link:https://issues.redhat.com/browse/LOG-2156[LOG-2156]) - -* Before this update, the secrets "kibana" and "kibana-proxy" were not recreated if they were deleted manually. With this update, the `elasticsearch-operator` will watch the resources and automatically recreate them if deleted. (link:https://issues.redhat.com/browse/LOG-2250[LOG-2250]) - -* Before this update, tuning the buffer chunk size could cause the collector to generate a warning about the chunk size exceeding the byte limit for the event stream. With this update, you can also tune the read line limit, resolving the issue. (link:https://issues.redhat.com/browse/LOG-2379[LOG-2379]) - -* Before this update, the logging console link in OpenShift WebConsole was not removed with the ClusterLogging CR. With this update, deleting the CR or uninstalling the Cluster Logging Operator removes the link. (link:https://issues.redhat.com/browse/LOG-2373[LOG-2373]) - -* Before this update, a change to the container logs path caused this metric to always be zero with older releases configured with the original path. With this update, the plugin which exposes metrics about collected logs supports reading from either path to resolve the issue. (link:https://issues.redhat.com/browse/LOG-2462[LOG-2462]) - -== CVEs -[id="openshift-logging-5-4-0-CVEs"] -* link:https://access.redhat.com/security/cve/CVE-2022-0759[CVE-2022-0759] -** link:https://bugzilla.redhat.com/show_bug.cgi?id=2058404[BZ-2058404] -* link:https://access.redhat.com/security/cve/CVE-2022-21698[CVE-2022-21698] -** link:https://bugzilla.redhat.com/show_bug.cgi?id=2045880[BZ-2045880] diff --git a/modules/cluster-logging-release-notes-5.4.z.adoc b/modules/cluster-logging-release-notes-5.4.z.adoc deleted file mode 100644 index 5c16c65d03f4..000000000000 --- a/modules/cluster-logging-release-notes-5.4.z.adoc +++ /dev/null @@ -1,39 +0,0 @@ -//Z-stream Release Notes by Version -[id="cluster-logging-release-notes-5-4-1"] -== Logging 5.4.1 -This release includes https://access.redhat.com/errata/RHSA-2022:2216[RHSA-2022:2216-OpenShift Logging Bug Fix Release 5.4.1]. - -[id="openshift-logging-5-4-1-bug-fixes"] -=== Bug fixes -* Before this update, the log file metric exporter only reported logs created while the exporter was running, which resulted in inaccurate log growth data. This update resolves this issue by monitoring `/var/log/pods`. (https://issues.redhat.com/browse/LOG-2442[LOG-2442]) - -* Before this update, the collector would be blocked because it continually tried to use a stale connection when forwarding logs to fluentd forward receivers. With this release, the `keepalive_timeout` value has been set to 30 seconds (`30s`) so that the collector recycles the connection and re-attempts to send failed messages within a reasonable amount of time. (https://issues.redhat.com/browse/LOG-2534[LOG-2534]) - -* Before this update, an error in the gateway component enforcing tenancy for reading logs limited access to logs with a Kubernetes namespace causing "audit" and some "infrastructure" logs to be unreadable. With this update, the proxy correctly detects users with admin access and allows access to logs without a namespace. (https://issues.redhat.com/browse/LOG-2448[LOG-2448]) - -* Before this update, `system:serviceaccount:openshift-monitoring:prometheus-k8s` had cluster level privileges as a `clusterrole` and `clusterrolebinding`. This update restricts the `serviceaccount` to the `openshift-logging` namespace with a role and rolebinding. (https://issues.redhat.com/browse/LOG-2437[LOG-2437]) - -* Before this update, Linux audit log time parsing relied on an ordinal position of a key/value pair. This update changes the parsing to use a regular expression to find the time entry. (https://issues.redhat.com/browse/LOG-2321[LOG-2321]) - - -[id="openshift-logging-5-4-1-CVEs"] -=== CVEs -.Click to expand CVEs -[%collapsible] -==== -* https://access.redhat.com/security/cve/CVE-2018-25032[CVE-2018-25032] -* https://access.redhat.com/security/cve/CVE-2021-4028[CVE-2021-4028] -* https://access.redhat.com/security/cve/CVE-2021-37136[CVE-2021-37136] -* https://access.redhat.com/security/cve/CVE-2021-37137[CVE-2021-37137] -* https://access.redhat.com/security/cve/CVE-2021-43797[CVE-2021-43797] -* https://access.redhat.com/security/cve/CVE-2022-0778[CVE-2022-0778] -* https://access.redhat.com/security/cve/CVE-2022-1154[CVE-2022-1154] -* https://access.redhat.com/security/cve/CVE-2022-1271[CVE-2022-1271] -* https://access.redhat.com/security/cve/CVE-2022-21426[CVE-2022-21426] -* https://access.redhat.com/security/cve/CVE-2022-21434[CVE-2022-21434] -* https://access.redhat.com/security/cve/CVE-2022-21443[CVE-2022-21443] -* https://access.redhat.com/security/cve/CVE-2022-21476[CVE-2022-21476] -* https://access.redhat.com/security/cve/CVE-2022-21496[CVE-2022-21496] -* https://access.redhat.com/security/cve/CVE-2022-21698[CVE-2022-21698] -* https://access.redhat.com/security/cve/CVE-2022-25636[CVE-2022-25636] -==== diff --git a/modules/cluster-logging-removing-unused-components-if-no-elasticsearch.adoc b/modules/cluster-logging-removing-unused-components-if-no-elasticsearch.adoc deleted file mode 100644 index aaeee905be20..000000000000 --- a/modules/cluster-logging-removing-unused-components-if-no-elasticsearch.adoc +++ /dev/null @@ -1,61 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-collector.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-removing-unused-components-if-no-elasticsearch_{context}"] -= Removing unused components if you do not use the default Elasticsearch log store - -As an administrator, in the rare case that you forward logs to a third-party log store and do not use the default Elasticsearch log store, you can remove several unused components from your logging cluster. - -In other words, if you do not use the default Elasticsearch log store, you can remove the internal Elasticsearch `logStore` and Kibana `visualization` components from the `ClusterLogging` custom resource (CR). Removing these components is optional but saves resources. - -.Prerequisites - -* Verify that your log forwarder does not send log data to the default internal Elasticsearch cluster. Inspect the `ClusterLogForwarder` CR YAML file that you used to configure log forwarding. Verify that it _does not_ have an `outputRefs` element that specifies `default`. For example: -+ -[source,yaml] ----- -outputRefs: -- default ----- - -[WARNING] -==== -Suppose the `ClusterLogForwarder` CR forwards log data to the internal Elasticsearch cluster, and you remove the `logStore` component from the `ClusterLogging` CR. In that case, the internal Elasticsearch cluster will not be present to store the log data. This absence can cause data loss. -==== - -.Procedure - -. Edit the `ClusterLogging` custom resource (CR) in the `openshift-logging` project: -+ -[source,terminal] ----- -$ oc edit ClusterLogging instance ----- - -. If they are present, remove the `logStore` and `visualization` stanzas from the `ClusterLogging` CR. - -. Preserve the `collection` stanza of the `ClusterLogging` CR. The result should look similar to the following example: -+ -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: "ClusterLogging" -metadata: - name: "instance" - namespace: "openshift-logging" -spec: - managementState: "Managed" - collection: - logs: - type: "fluentd" - fluentd: {} ----- - -. Verify that the collector pods are redeployed: -+ -[source,terminal] ----- -$ oc get pods -l component=collector -n openshift-logging ----- diff --git a/modules/cluster-logging-rn-5.2.11.adoc b/modules/cluster-logging-rn-5.2.11.adoc deleted file mode 100644 index a3b84e3b1145..000000000000 --- a/modules/cluster-logging-rn-5.2.11.adoc +++ /dev/null @@ -1,61 +0,0 @@ -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-2-11"] -= Logging 5.2.11 -This release includes link:https://access.redhat.com/errata/RHBA-2022:5012[RHBA-2022:5012-OpenShift Logging Bug Fix Release 5.2.11] - -[id="openshift-logging-5-2-11-bug-fixes"] -== Bug fixes -* Before this update, clusters configured to perform CloudWatch forwarding wrote rejected log files to temporary storage, causing cluster instability over time. With this update, chunk backup for CloudWatch has been disabled, resolving the issue. (link:https://issues.redhat.com/browse/LOG-2635[LOG-2635]) - -[id="openshift-logging-5-2-11-CVEs"] -== CVEs -.Click to expand CVEs -[%collapsible] -==== -* link:https://access.redhat.com/security/cve/CVE-2018-25032[CVE-2018-25032] -* link:https://access.redhat.com/security/cve/CVE-2020-0404[CVE-2020-0404] -* link:https://access.redhat.com/security/cve/CVE-2020-4788[CVE-2020-4788] -* link:https://access.redhat.com/security/cve/CVE-2020-13974[CVE-2020-13974] -* link:https://access.redhat.com/security/cve/CVE-2020-19131[CVE-2020-19131] -* link:https://access.redhat.com/security/cve/CVE-2020-27820[CVE-2020-27820] -* link:https://access.redhat.com/security/cve/CVE-2021-0941[CVE-2021-0941] -* link:https://access.redhat.com/security/cve/CVE-2021-3612[CVE-2021-3612] -* link:https://access.redhat.com/security/cve/CVE-2021-3634[CVE-2021-3634] -* link:https://access.redhat.com/security/cve/CVE-2021-3669[CVE-2021-3669] -* link:https://access.redhat.com/security/cve/CVE-2021-3737[CVE-2021-3737] -* link:https://access.redhat.com/security/cve/CVE-2021-3743[CVE-2021-3743] -* link:https://access.redhat.com/security/cve/CVE-2021-3744[CVE-2021-3744] -* link:https://access.redhat.com/security/cve/CVE-2021-3752[CVE-2021-3752] -* link:https://access.redhat.com/security/cve/CVE-2021-3759[CVE-2021-3759] -* link:https://access.redhat.com/security/cve/CVE-2021-3764[CVE-2021-3764] -* link:https://access.redhat.com/security/cve/CVE-2021-3772[CVE-2021-3772] -* link:https://access.redhat.com/security/cve/CVE-2021-3773[CVE-2021-3773] -* link:https://access.redhat.com/security/cve/CVE-2021-4002[CVE-2021-4002] -* link:https://access.redhat.com/security/cve/CVE-2021-4037[CVE-2021-4037] -* link:https://access.redhat.com/security/cve/CVE-2021-4083[CVE-2021-4083] -* link:https://access.redhat.com/security/cve/CVE-2021-4157[CVE-2021-4157] -* link:https://access.redhat.com/security/cve/CVE-2021-4189[CVE-2021-4189] -* link:https://access.redhat.com/security/cve/CVE-2021-4197[CVE-2021-4197] -* link:https://access.redhat.com/security/cve/CVE-2021-4203[CVE-2021-4203] -* link:https://access.redhat.com/security/cve/CVE-2021-20322[CVE-2021-20322] -* link:https://access.redhat.com/security/cve/CVE-2021-21781[CVE-2021-21781] -* link:https://access.redhat.com/security/cve/CVE-2021-23222[CVE-2021-23222] -* link:https://access.redhat.com/security/cve/CVE-2021-26401[CVE-2021-26401] -* link:https://access.redhat.com/security/cve/CVE-2021-29154[CVE-2021-29154] -* link:https://access.redhat.com/security/cve/CVE-2021-37159[CVE-2021-37159] -* link:https://access.redhat.com/security/cve/CVE-2021-41617[CVE-2021-41617] -* link:https://access.redhat.com/security/cve/CVE-2021-41864[CVE-2021-41864] -* link:https://access.redhat.com/security/cve/CVE-2021-42739[CVE-2021-42739] -* link:https://access.redhat.com/security/cve/CVE-2021-43056[CVE-2021-43056] -* link:https://access.redhat.com/security/cve/CVE-2021-43389[CVE-2021-43389] -* link:https://access.redhat.com/security/cve/CVE-2021-43976[CVE-2021-43976] -* link:https://access.redhat.com/security/cve/CVE-2021-44733[CVE-2021-44733] -* link:https://access.redhat.com/security/cve/CVE-2021-45485[CVE-2021-45485] -* link:https://access.redhat.com/security/cve/CVE-2021-45486[CVE-2021-45486] -* link:https://access.redhat.com/security/cve/CVE-2022-0001[CVE-2022-0001] -* link:https://access.redhat.com/security/cve/CVE-2022-0002[CVE-2022-0002] -* link:https://access.redhat.com/security/cve/CVE-2022-0286[CVE-2022-0286] -* link:https://access.redhat.com/security/cve/CVE-2022-0322[CVE-2022-0322] -* link:https://access.redhat.com/security/cve/CVE-2022-1011[CVE-2022-1011] -* link:https://access.redhat.com/security/cve/CVE-2022-1271[CVE-2022-1271] -==== diff --git a/modules/cluster-logging-rn-5.2.12.adoc b/modules/cluster-logging-rn-5.2.12.adoc deleted file mode 100644 index 86c386347b62..000000000000 --- a/modules/cluster-logging-rn-5.2.12.adoc +++ /dev/null @@ -1,30 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-2-12"] -= Logging 5.2.12 -This release includes link:https://access.redhat.com/errata/RHBA-2022:5558[RHBA-2022:5558-OpenShift Logging Bug Fix Release 5.2.12]. - -[id="openshift-logging-5-2-12-bug-fixes"] -== Bug fixes -None. - -[id="openshift-logging-5-2-12-CVEs"] -== CVEs -.Click to expand CVEs -[%collapsible] -==== -* link:https://access.redhat.com/security/cve/CVE-2020-28915[CVE-2020-28915] -* link:https://access.redhat.com/security/cve/CVE-2021-40528[CVE-2021-40528] -* link:https://access.redhat.com/security/cve/CVE-2022-1271[CVE-2022-1271] -* link:https://access.redhat.com/security/cve/CVE-2022-1621[CVE-2022-1621] -* link:https://access.redhat.com/security/cve/CVE-2022-1629[CVE-2022-1629] -* link:https://access.redhat.com/security/cve/CVE-2022-22576[CVE-2022-22576] -* link:https://access.redhat.com/security/cve/CVE-2022-25313[CVE-2022-25313] -* link:https://access.redhat.com/security/cve/CVE-2022-25314[CVE-2022-25314] -* link:https://access.redhat.com/security/cve/CVE-2022-26691[CVE-2022-26691] -* link:https://access.redhat.com/security/cve/CVE-2022-27666[CVE-2022-27666] -* link:https://access.redhat.com/security/cve/CVE-2022-27774[CVE-2022-27774] -* link:https://access.redhat.com/security/cve/CVE-2022-27776[CVE-2022-27776] -* link:https://access.redhat.com/security/cve/CVE-2022-27782[CVE-2022-27782] -* link:https://access.redhat.com/security/cve/CVE-2022-29824[CVE-2022-29824] -==== diff --git a/modules/cluster-logging-rn-5.2.13.adoc b/modules/cluster-logging-rn-5.2.13.adoc deleted file mode 100644 index 468304e0e599..000000000000 --- a/modules/cluster-logging-rn-5.2.13.adoc +++ /dev/null @@ -1,31 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-2-13"] -= Logging 5.2.13 -This release includes link:https://access.redhat.com/errata/RHSA-2022:5909[RHSA-2022:5909-OpenShift Logging Bug Fix Release 5.2.13]. - -[id="openshift-logging-5-2-13-bug-fixes"] -== Bug fixes -* https://bugzilla.redhat.com/show_bug.cgi?id=2100495[BZ-2100495] - -[id="openshift-logging-5-2-13-cves"] -== CVEs -.Click to expand CVEs -[%collapsible] -==== -* link:https://access.redhat.com/security/cve/CVE-2021-38561[CVE-2021-38561] -* link:https://access.redhat.com/security/cve/CVE-2021-40528[CVE-2021-40528] -* link:https://access.redhat.com/security/cve/CVE-2022-1271[CVE-2022-1271] -* link:https://access.redhat.com/security/cve/CVE-2022-1621[CVE-2022-1621] -* link:https://access.redhat.com/security/cve/CVE-2022-1629[CVE-2022-1629] -* link:https://access.redhat.com/security/cve/CVE-2022-21540[CVE-2022-21540] -* link:https://access.redhat.com/security/cve/CVE-2022-21541[CVE-2022-21541] -* link:https://access.redhat.com/security/cve/CVE-2022-22576[CVE-2022-22576] -* link:https://access.redhat.com/security/cve/CVE-2022-25313[CVE-2022-25313] -* link:https://access.redhat.com/security/cve/CVE-2022-25314[CVE-2022-25314] -* link:https://access.redhat.com/security/cve/CVE-2022-27774[CVE-2022-27774] -* link:https://access.redhat.com/security/cve/CVE-2022-27776[CVE-2022-27776] -* link:https://access.redhat.com/security/cve/CVE-2022-27782[CVE-2022-27782] -* link:https://access.redhat.com/security/cve/CVE-2022-29824[CVE-2022-29824] -* link:https://access.redhat.com/security/cve/CVE-2022-34169[CVE-2022-34169] -==== diff --git a/modules/cluster-logging-rn-5.3.10.adoc b/modules/cluster-logging-rn-5.3.10.adoc deleted file mode 100644 index 03d1e82f5efd..000000000000 --- a/modules/cluster-logging-rn-5.3.10.adoc +++ /dev/null @@ -1,31 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-3-10"] -= Logging 5.3.10 -This release includes link:https://access.redhat.com/errata/RHSA-2022:5908[RHSA-2022:5908-OpenShift Logging Bug Fix Release 5.3.10]. - -[id="openshift-logging-5-3-10-bug-fixes"] -== Bug fixes -* https://bugzilla.redhat.com/show_bug.cgi?id=2100495[BZ-2100495] - -[id="openshift-logging-5-3-10-cves"] -== CVEs -.Click to expand CVEs -[%collapsible] -==== -* link:https://access.redhat.com/security/cve/CVE-2021-38561[CVE-2021-38561] -* link:https://access.redhat.com/security/cve/CVE-2021-40528[CVE-2021-40528] -* link:https://access.redhat.com/security/cve/CVE-2022-1271[CVE-2022-1271] -* link:https://access.redhat.com/security/cve/CVE-2022-1621[CVE-2022-1621] -* link:https://access.redhat.com/security/cve/CVE-2022-1629[CVE-2022-1629] -* link:https://access.redhat.com/security/cve/CVE-2022-21540[CVE-2022-21540] -* link:https://access.redhat.com/security/cve/CVE-2022-21541[CVE-2022-21541] -* link:https://access.redhat.com/security/cve/CVE-2022-22576[CVE-2022-22576] -* link:https://access.redhat.com/security/cve/CVE-2022-25313[CVE-2022-25313] -* link:https://access.redhat.com/security/cve/CVE-2022-25314[CVE-2022-25314] -* link:https://access.redhat.com/security/cve/CVE-2022-27774[CVE-2022-27774] -* link:https://access.redhat.com/security/cve/CVE-2022-27776[CVE-2022-27776] -* link:https://access.redhat.com/security/cve/CVE-2022-27782[CVE-2022-27782] -* link:https://access.redhat.com/security/cve/CVE-2022-29824[CVE-2022-29824] -* link:https://access.redhat.com/security/cve/CVE-2022-34169[CVE-2022-34169] -==== diff --git a/modules/cluster-logging-rn-5.3.11.adoc b/modules/cluster-logging-rn-5.3.11.adoc deleted file mode 100644 index 0102585c3311..000000000000 --- a/modules/cluster-logging-rn-5.3.11.adoc +++ /dev/null @@ -1,20 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-3-11_{context}"] -= Logging 5.3.11 -This release includes link:https://access.redhat.com/errata/RHSA-2022:6182[OpenShift Logging Bug Fix Release 5.3.11]. - -[id="openshift-logging-5-3-11-bug-fixes_{context}"] -== Bug fixes -* Before this update, the Operator did not ensure that the pod was ready, which caused the cluster to reach an inoperable state during a cluster restart. With this update, the Operator marks new pods as ready before continuing to a new pod during a restart, which resolves the issue. (link:https://issues.redhat.com/browse/LOG-2871[LOG-2871]) - -[id="openshift-logging-5-3-11-cves_{context}"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2022-1292[CVE-2022-1292] -* link:https://access.redhat.com/security/cve/CVE-2022-1586[CVE-2022-1586] -* link:https://access.redhat.com/security/cve/CVE-2022-1785[CVE-2022-1785] -* link:https://access.redhat.com/security/cve/CVE-2022-1897[CVE-2022-1897] -* link:https://access.redhat.com/security/cve/CVE-2022-1927[CVE-2022-1927] -* link:https://access.redhat.com/security/cve/CVE-2022-2068[CVE-2022-2068] -* link:https://access.redhat.com/security/cve/CVE-2022-2097[CVE-2022-2097] -* link:https://access.redhat.com/security/cve/CVE-2022-30631[CVE-2022-30631] diff --git a/modules/cluster-logging-rn-5.3.12.adoc b/modules/cluster-logging-rn-5.3.12.adoc deleted file mode 100644 index 1053b7870d85..000000000000 --- a/modules/cluster-logging-rn-5.3.12.adoc +++ /dev/null @@ -1,21 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-3-12_{context}"] -= Logging 5.3.12 -This release includes link:https://access.redhat.com/errata/RHSA-2022:6560[OpenShift Logging Bug Fix Release 5.3.12]. - -[id="openshift-logging-5-3-12-bug-fixes_{context}"] -== Bug fixes -None. - -[id="openshift-logging-5-3-12-cves_{context}"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2015-20107[CVE-2015-20107] -* link:https://access.redhat.com/security/cve/CVE-2022-0391[CVE-2022-0391] -* link:https://access.redhat.com/security/cve/CVE-2022-21123[CVE-2022-21123] -* link:https://access.redhat.com/security/cve/CVE-2022-21125[CVE-2022-21125] -* link:https://access.redhat.com/security/cve/CVE-2022-21166[CVE-2022-21166] -* link:https://access.redhat.com/security/cve/CVE-2022-29154[CVE-2022-29154] -* link:https://access.redhat.com/security/cve/CVE-2022-32206[CVE-2022-32206] -* link:https://access.redhat.com/security/cve/CVE-2022-32208[CVE-2022-32208] -* link:https://access.redhat.com/security/cve/CVE-2022-34903[CVE-2022-34903] diff --git a/modules/cluster-logging-rn-5.3.13.adoc b/modules/cluster-logging-rn-5.3.13.adoc deleted file mode 100644 index 64676f9cd3cf..000000000000 --- a/modules/cluster-logging-rn-5.3.13.adoc +++ /dev/null @@ -1,36 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-3-13_{context}"] -= Logging 5.3.13 -This release includes link:https://access.redhat.com/errata/RHSA-2022:6882[RHSA-2022:68828-OpenShift Logging Bug Fix Release 5.3.13]. - -[id="openshift-logging-5-3-13-bug-fixes"] -== Bug fixes -None. - -[id="openshift-logging-5-3-13-CVEs"] -== CVEs -.Click to expand CVEs -[%collapsible] -==== -* link:https://access.redhat.com/security/cve/CVE-2020-35525[CVE-2020-35525] -* link:https://access.redhat.com/security/cve/CVE-2020-35527[CVE-2020-35527] -* link:https://access.redhat.com/security/cve/CVE-2022-0494[CVE-2022-0494] -* link:https://access.redhat.com/security/cve/CVE-2022-1353[CVE-2022-1353] -* link:https://access.redhat.com/security/cve/CVE-2022-2509[CVE-2022-2509] -* link:https://access.redhat.com/security/cve/CVE-2022-2588[CVE-2022-2588] -* link:https://access.redhat.com/security/cve/CVE-2022-3515[CVE-2022-3515] -* link:https://access.redhat.com/security/cve/CVE-2022-21618[CVE-2022-21618] -* link:https://access.redhat.com/security/cve/CVE-2022-21619[CVE-2022-21619] -* link:https://access.redhat.com/security/cve/CVE-2022-21624[CVE-2022-21624] -* link:https://access.redhat.com/security/cve/CVE-2022-21626[CVE-2022-21626] -* link:https://access.redhat.com/security/cve/CVE-2022-21628[CVE-2022-21628] -* link:https://access.redhat.com/security/cve/CVE-2022-23816[CVE-2022-23816] -* link:https://access.redhat.com/security/cve/CVE-2022-23825[CVE-2022-23825] -* link:https://access.redhat.com/security/cve/CVE-2022-29900[CVE-2022-29900] -* link:https://access.redhat.com/security/cve/CVE-2022-29901[CVE-2022-29901] -* link:https://access.redhat.com/security/cve/CVE-2022-32149[CVE-2022-32149] -* link:https://access.redhat.com/security/cve/CVE-2022-37434[CVE-2022-37434] -* link:https://access.redhat.com/security/cve/CVE-2022-39399[CVE-2022-39399] -* link:https://access.redhat.com/security/cve/CVE-2022-40674[CVE-2022-40674] -==== diff --git a/modules/cluster-logging-rn-5.3.14.adoc b/modules/cluster-logging-rn-5.3.14.adoc deleted file mode 100644 index ac7efd450212..000000000000 --- a/modules/cluster-logging-rn-5.3.14.adoc +++ /dev/null @@ -1,84 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-3-14_{context}"] -= Logging 5.3.14 -This release includes link:https://access.redhat.com/errata/RHSA-2022:8889[OpenShift Logging Bug Fix Release 5.3.14]. - -[id="openshift-logging-5-3-14-bug-fixes"] -== Bug fixes -* Before this update, the log file size map generated by the `log-file-metrics-exporter` component did not remove entries for deleted files, resulting in increased file size, and process memory. With this update, the log file size map does not contain entries for deleted files. (link:https://issues.redhat.com/browse/LOG-3293[LOG-3293]) - -[id="openshift-logging-5-3-14-CVEs"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2016-3709[CVE-2016-3709] -* link:https://access.redhat.com/security/cve/CVE-2020-35525[CVE-2020-35525] -* link:https://access.redhat.com/security/cve/CVE-2020-35527[CVE-2020-35527] -* link:https://access.redhat.com/security/cve/CVE-2020-36516[CVE-2020-36516] -* link:https://access.redhat.com/security/cve/CVE-2020-36558[CVE-2020-36558] -* link:https://access.redhat.com/security/cve/CVE-2021-3640[CVE-2021-3640] -* link:https://access.redhat.com/security/cve/CVE-2021-30002[CVE-2021-30002] -* link:https://access.redhat.com/security/cve/CVE-2022-0168[CVE-2022-0168] -* link:https://access.redhat.com/security/cve/CVE-2022-0561[CVE-2022-0561] -* link:https://access.redhat.com/security/cve/CVE-2022-0562[CVE-2022-0562] -* link:https://access.redhat.com/security/cve/CVE-2022-0617[CVE-2022-0617] -* link:https://access.redhat.com/security/cve/CVE-2022-0854[CVE-2022-0854] -* link:https://access.redhat.com/security/cve/CVE-2022-0865[CVE-2022-0865] -* link:https://access.redhat.com/security/cve/CVE-2022-0891[CVE-2022-0891] -* link:https://access.redhat.com/security/cve/CVE-2022-0908[CVE-2022-0908] -* link:https://access.redhat.com/security/cve/CVE-2022-0909[CVE-2022-0909] -* link:https://access.redhat.com/security/cve/CVE-2022-0924[CVE-2022-0924] -* link:https://access.redhat.com/security/cve/CVE-2022-1016[CVE-2022-1016] -* link:https://access.redhat.com/security/cve/CVE-2022-1048[CVE-2022-1048] -* link:https://access.redhat.com/security/cve/CVE-2022-1055[CVE-2022-1055] -* link:https://access.redhat.com/security/cve/CVE-2022-1184[CVE-2022-1184] -* link:https://access.redhat.com/security/cve/CVE-2022-1292[CVE-2022-1292] -* link:https://access.redhat.com/security/cve/CVE-2022-1304[CVE-2022-1304] -* link:https://access.redhat.com/security/cve/CVE-2022-1355[CVE-2022-1355] -* link:https://access.redhat.com/security/cve/CVE-2022-1586[CVE-2022-1586] -* link:https://access.redhat.com/security/cve/CVE-2022-1785[CVE-2022-1785] -* link:https://access.redhat.com/security/cve/CVE-2022-1852[CVE-2022-1852] -* link:https://access.redhat.com/security/cve/CVE-2022-1897[CVE-2022-1897] -* link:https://access.redhat.com/security/cve/CVE-2022-1927[CVE-2022-1927] -* link:https://access.redhat.com/security/cve/CVE-2022-2068[CVE-2022-2068] -* link:https://access.redhat.com/security/cve/CVE-2022-2078[CVE-2022-2078] -* link:https://access.redhat.com/security/cve/CVE-2022-2097[CVE-2022-2097] -* link:https://access.redhat.com/security/cve/CVE-2022-2509[CVE-2022-2509] -* link:https://access.redhat.com/security/cve/CVE-2022-2586[CVE-2022-2586] -* link:https://access.redhat.com/security/cve/CVE-2022-2639[CVE-2022-2639] -* link:https://access.redhat.com/security/cve/CVE-2022-2938[CVE-2022-2938] -* link:https://access.redhat.com/security/cve/CVE-2022-3515[CVE-2022-3515] -* link:https://access.redhat.com/security/cve/CVE-2022-20368[CVE-2022-20368] -* link:https://access.redhat.com/security/cve/CVE-2022-21499[CVE-2022-21499] -* link:https://access.redhat.com/security/cve/CVE-2022-21618[CVE-2022-21618] -* link:https://access.redhat.com/security/cve/CVE-2022-21619[CVE-2022-21619] -* link:https://access.redhat.com/security/cve/CVE-2022-21624[CVE-2022-21624] -* link:https://access.redhat.com/security/cve/CVE-2022-21626[CVE-2022-21626] -* link:https://access.redhat.com/security/cve/CVE-2022-21628[CVE-2022-21628] -* link:https://access.redhat.com/security/cve/CVE-2022-22624[CVE-2022-22624] -* link:https://access.redhat.com/security/cve/CVE-2022-22628[CVE-2022-22628] -* link:https://access.redhat.com/security/cve/CVE-2022-22629[CVE-2022-22629] -* link:https://access.redhat.com/security/cve/CVE-2022-22662[CVE-2022-22662] -* link:https://access.redhat.com/security/cve/CVE-2022-22844[CVE-2022-22844] -* link:https://access.redhat.com/security/cve/CVE-2022-23960[CVE-2022-23960] -* link:https://access.redhat.com/security/cve/CVE-2022-24448[CVE-2022-24448] -* link:https://access.redhat.com/security/cve/CVE-2022-25255[CVE-2022-25255] -* link:https://access.redhat.com/security/cve/CVE-2022-26373[CVE-2022-26373] -* link:https://access.redhat.com/security/cve/CVE-2022-26700[CVE-2022-26700] -* link:https://access.redhat.com/security/cve/CVE-2022-26709[CVE-2022-26709] -* link:https://access.redhat.com/security/cve/CVE-2022-26710[CVE-2022-26710] -* link:https://access.redhat.com/security/cve/CVE-2022-26716[CVE-2022-26716] -* link:https://access.redhat.com/security/cve/CVE-2022-26717[CVE-2022-26717] -* link:https://access.redhat.com/security/cve/CVE-2022-26719[CVE-2022-26719] -* link:https://access.redhat.com/security/cve/CVE-2022-27404[CVE-2022-27404] -* link:https://access.redhat.com/security/cve/CVE-2022-27405[CVE-2022-27405] -* link:https://access.redhat.com/security/cve/CVE-2022-27406[CVE-2022-27406] -* link:https://access.redhat.com/security/cve/CVE-2022-27950[CVE-2022-27950] -* link:https://access.redhat.com/security/cve/CVE-2022-28390[CVE-2022-28390] -* link:https://access.redhat.com/security/cve/CVE-2022-28893[CVE-2022-28893] -* link:https://access.redhat.com/security/cve/CVE-2022-29581[CVE-2022-29581] -* link:https://access.redhat.com/security/cve/CVE-2022-30293[CVE-2022-30293] -* link:https://access.redhat.com/security/cve/CVE-2022-34903[CVE-2022-34903] -* link:https://access.redhat.com/security/cve/CVE-2022-36946[CVE-2022-36946] -* link:https://access.redhat.com/security/cve/CVE-2022-37434[CVE-2022-37434] -* link:https://access.redhat.com/security/cve/CVE-2022-39399[CVE-2022-39399] -* link:https://access.redhat.com/security/cve/CVE-2022-42898[CVE-2022-42898] diff --git a/modules/cluster-logging-rn-5.3.8.adoc b/modules/cluster-logging-rn-5.3.8.adoc deleted file mode 100644 index d3f152160b2e..000000000000 --- a/modules/cluster-logging-rn-5.3.8.adoc +++ /dev/null @@ -1,61 +0,0 @@ -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-3-8"] -= Logging 5.3.8 -This release includes link:https://access.redhat.com/errata/RHBA-2022:5010[RHBA-2022:5010-OpenShift Logging Bug Fix Release 5.3.8] - -[id="openshift-logging-5-3-8-bug-fixes"] -== Bug fixes -(None.) - -[id="openshift-logging-5-3-8-CVEs"] -== CVEs -.Click to expand CVEs -[%collapsible] -==== -* link:https://access.redhat.com/security/cve/CVE-2018-25032[CVE-2018-25032] -* link:https://access.redhat.com/security/cve/CVE-2020-0404[CVE-2020-0404] -* link:https://access.redhat.com/security/cve/CVE-2020-4788[CVE-2020-4788] -* link:https://access.redhat.com/security/cve/CVE-2020-13974[CVE-2020-13974] -* link:https://access.redhat.com/security/cve/CVE-2020-19131[CVE-2020-19131] -* link:https://access.redhat.com/security/cve/CVE-2020-27820[CVE-2020-27820] -* link:https://access.redhat.com/security/cve/CVE-2021-0941[CVE-2021-0941] -* link:https://access.redhat.com/security/cve/CVE-2021-3612[CVE-2021-3612] -* link:https://access.redhat.com/security/cve/CVE-2021-3634[CVE-2021-3634] -* link:https://access.redhat.com/security/cve/CVE-2021-3669[CVE-2021-3669] -* link:https://access.redhat.com/security/cve/CVE-2021-3737[CVE-2021-3737] -* link:https://access.redhat.com/security/cve/CVE-2021-3743[CVE-2021-3743] -* link:https://access.redhat.com/security/cve/CVE-2021-3744[CVE-2021-3744] -* link:https://access.redhat.com/security/cve/CVE-2021-3752[CVE-2021-3752] -* link:https://access.redhat.com/security/cve/CVE-2021-3759[CVE-2021-3759] -* link:https://access.redhat.com/security/cve/CVE-2021-3764[CVE-2021-3764] -* link:https://access.redhat.com/security/cve/CVE-2021-3772[CVE-2021-3772] -* link:https://access.redhat.com/security/cve/CVE-2021-3773[CVE-2021-3773] -* link:https://access.redhat.com/security/cve/CVE-2021-4002[CVE-2021-4002] -* link:https://access.redhat.com/security/cve/CVE-2021-4037[CVE-2021-4037] -* link:https://access.redhat.com/security/cve/CVE-2021-4083[CVE-2021-4083] -* link:https://access.redhat.com/security/cve/CVE-2021-4157[CVE-2021-4157] -* link:https://access.redhat.com/security/cve/CVE-2021-4189[CVE-2021-4189] -* link:https://access.redhat.com/security/cve/CVE-2021-4197[CVE-2021-4197] -* link:https://access.redhat.com/security/cve/CVE-2021-4203[CVE-2021-4203] -* link:https://access.redhat.com/security/cve/CVE-2021-20322[CVE-2021-20322] -* link:https://access.redhat.com/security/cve/CVE-2021-21781[CVE-2021-21781] -* link:https://access.redhat.com/security/cve/CVE-2021-23222[CVE-2021-23222] -* link:https://access.redhat.com/security/cve/CVE-2021-26401[CVE-2021-26401] -* link:https://access.redhat.com/security/cve/CVE-2021-29154[CVE-2021-29154] -* link:https://access.redhat.com/security/cve/CVE-2021-37159[CVE-2021-37159] -* link:https://access.redhat.com/security/cve/CVE-2021-41617[CVE-2021-41617] -* link:https://access.redhat.com/security/cve/CVE-2021-41864[CVE-2021-41864] -* link:https://access.redhat.com/security/cve/CVE-2021-42739[CVE-2021-42739] -* link:https://access.redhat.com/security/cve/CVE-2021-43056[CVE-2021-43056] -* link:https://access.redhat.com/security/cve/CVE-2021-43389[CVE-2021-43389] -* link:https://access.redhat.com/security/cve/CVE-2021-43976[CVE-2021-43976] -* link:https://access.redhat.com/security/cve/CVE-2021-44733[CVE-2021-44733] -* link:https://access.redhat.com/security/cve/CVE-2021-45485[CVE-2021-45485] -* link:https://access.redhat.com/security/cve/CVE-2021-45486[CVE-2021-45486] -* link:https://access.redhat.com/security/cve/CVE-2022-0001[CVE-2022-0001] -* link:https://access.redhat.com/security/cve/CVE-2022-0002[CVE-2022-0002] -* link:https://access.redhat.com/security/cve/CVE-2022-0286[CVE-2022-0286] -* link:https://access.redhat.com/security/cve/CVE-2022-0322[CVE-2022-0322] -* link:https://access.redhat.com/security/cve/CVE-2022-1011[CVE-2022-1011] -* link:https://access.redhat.com/security/cve/CVE-2022-1271[CVE-2022-1271] -==== diff --git a/modules/cluster-logging-rn-5.3.9.adoc b/modules/cluster-logging-rn-5.3.9.adoc deleted file mode 100644 index 1e89c948a680..000000000000 --- a/modules/cluster-logging-rn-5.3.9.adoc +++ /dev/null @@ -1,32 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-3-9"] -= Logging 5.3.9 -This release includes link:https://access.redhat.com/errata/RHBA-2022:5557[RHBA-2022:5557-OpenShift Logging Bug Fix Release 5.3.9]. - -[id="openshift-logging-5-3-9-bug-fixes"] -== Bug fixes - -* Before this update, the logging collector included a path as a label for the metrics it produced. This path changed frequently and contributed to significant storage changes for the Prometheus server. With this update, the label has been dropped to resolve the issue and reduce storage consumption. (link:https://issues.redhat.com/browse/LOG-2682[LOG-2682]) - - -[id="openshift-logging-5-3-9-CVEs"] -== CVEs -.Click to expand CVEs -[%collapsible] -==== -* link:https://access.redhat.com/security/cve/CVE-2020-28915[CVE-2020-28915] -* link:https://access.redhat.com/security/cve/CVE-2021-40528[CVE-2021-40528] -* link:https://access.redhat.com/security/cve/CVE-2022-1271[CVE-2022-1271] -* link:https://access.redhat.com/security/cve/CVE-2022-1621[CVE-2022-1621] -* link:https://access.redhat.com/security/cve/CVE-2022-1629[CVE-2022-1629] -* link:https://access.redhat.com/security/cve/CVE-2022-22576[CVE-2022-22576] -* link:https://access.redhat.com/security/cve/CVE-2022-25313[CVE-2022-25313] -* link:https://access.redhat.com/security/cve/CVE-2022-25314[CVE-2022-25314] -* link:https://access.redhat.com/security/cve/CVE-2022-26691[CVE-2022-26691] -* link:https://access.redhat.com/security/cve/CVE-2022-27666[CVE-2022-27666] -* link:https://access.redhat.com/security/cve/CVE-2022-27774[CVE-2022-27774] -* link:https://access.redhat.com/security/cve/CVE-2022-27776[CVE-2022-27776] -* link:https://access.redhat.com/security/cve/CVE-2022-27782[CVE-2022-27782] -* link:https://access.redhat.com/security/cve/CVE-2022-29824[CVE-2022-29824] -==== diff --git a/modules/cluster-logging-rn-5.4.10.adoc b/modules/cluster-logging-rn-5.4.10.adoc deleted file mode 100644 index 0bf8b088e108..000000000000 --- a/modules/cluster-logging-rn-5.4.10.adoc +++ /dev/null @@ -1,31 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-4-10_{context}"] -= Logging 5.4.10 -This release includes link:https://access.redhat.com/errata/RHBA-2023:0385[OpenShift Logging Bug Fix Release 5.4.10]. - -[id="openshift-logging-5-4-10-bug-fixes"] -== Bug fixes -None. - -[id="openshift-logging-5-4-10-CVEs"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2021-46848[CVE-2021-46848] -* link:https://access.redhat.com/security/cve/CVE-2022-2056[CVE-2022-2056] -* link:https://access.redhat.com/security/cve/CVE-2022-2057[CVE-2022-2057] -* link:https://access.redhat.com/security/cve/CVE-2022-2058[CVE-2022-2058] -* link:https://access.redhat.com/security/cve/CVE-2022-2519[CVE-2022-2519] -* link:https://access.redhat.com/security/cve/CVE-2022-2520[CVE-2022-2520] -* link:https://access.redhat.com/security/cve/CVE-2022-2521[CVE-2022-2521] -* link:https://access.redhat.com/security/cve/CVE-2022-2867[CVE-2022-2867] -* link:https://access.redhat.com/security/cve/CVE-2022-2868[CVE-2022-2868] -* link:https://access.redhat.com/security/cve/CVE-2022-2869[CVE-2022-2869] -* link:https://access.redhat.com/security/cve/CVE-2022-2953[CVE-2022-2953] -* link:https://access.redhat.com/security/cve/CVE-2022-2964[CVE-2022-2964] -* link:https://access.redhat.com/security/cve/CVE-2022-4139[CVE-2022-4139] -* link:https://access.redhat.com/security/cve/CVE-2022-35737[CVE-2022-35737] -* link:https://access.redhat.com/security/cve/CVE-2022-42010[CVE-2022-42010] -* link:https://access.redhat.com/security/cve/CVE-2022-42011[CVE-2022-42011] -* link:https://access.redhat.com/security/cve/CVE-2022-42012[CVE-2022-42012] -* link:https://access.redhat.com/security/cve/CVE-2022-42898[CVE-2022-42898] -* link:https://access.redhat.com/security/cve/CVE-2022-43680[CVE-2022-43680] diff --git a/modules/cluster-logging-rn-5.4.11.adoc b/modules/cluster-logging-rn-5.4.11.adoc deleted file mode 100644 index f659fe685961..000000000000 --- a/modules/cluster-logging-rn-5.4.11.adoc +++ /dev/null @@ -1,21 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-4-11_{context}"] -= Logging 5.4.11 -This release includes link:https://access.redhat.com/errata/RHSA-2023:0632[OpenShift Logging Bug Fix Release 5.4.11]. - -[id="openshift-logging-5-4-11-bug-fixes_{context}"] -== Bug fixes -* link:https://bugzilla.redhat.com/show_bug.cgi?id=2099524[BZ 2099524] -* link:https://bugzilla.redhat.com/show_bug.cgi?id=2161274[BZ 2161274] - -[id="openshift-logging-5-4-11-CVEs"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2021-46848[CVE-2021-46848] -* link:https://access.redhat.com/security/cve/CVE-2022-3821[CVE-2022-3821] -* link:https://access.redhat.com/security/cve/CVE-2022-35737[CVE-2022-35737] -* link:https://access.redhat.com/security/cve/CVE-2022-42010[CVE-2022-42010] -* link:https://access.redhat.com/security/cve/CVE-2022-42011[CVE-2022-42011] -* link:https://access.redhat.com/security/cve/CVE-2022-42012[CVE-2022-42012] -* link:https://access.redhat.com/security/cve/CVE-2022-42898[CVE-2022-42898] -* link:https://access.redhat.com/security/cve/CVE-2022-43680[CVE-2022-43680] diff --git a/modules/cluster-logging-rn-5.4.12.adoc b/modules/cluster-logging-rn-5.4.12.adoc deleted file mode 100644 index 137dfe68b341..000000000000 --- a/modules/cluster-logging-rn-5.4.12.adoc +++ /dev/null @@ -1,22 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-4-12_{context}"] -= Logging 5.4.12 -This release includes link:https://access.redhat.com/errata/RHSA-2023:0931[OpenShift Logging Bug Fix Release 5.4.12]. - -[id="openshift-logging-5-4-12-bug-fixes"] -== Bug fixes -None. - -[id="openshift-logging-5-4-12-CVEs"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2020-10735[CVE-2020-10735] -* link:https://access.redhat.com/security/cve/CVE-2021-28861[CVE-2021-28861] -* link:https://access.redhat.com/security/cve/CVE-2022-2873[CVE-2022-2873] -* link:https://access.redhat.com/security/cve/CVE-2022-4415[CVE-2022-4415] -* link:https://access.redhat.com/security/cve/CVE-2022-40897[CVE-2022-40897] -* link:https://access.redhat.com/security/cve/CVE-2022-41222[CVE-2022-41222] -* link:https://access.redhat.com/security/cve/CVE-2022-41717[CVE-2022-41717] -* link:https://access.redhat.com/security/cve/CVE-2022-43945[CVE-2022-43945] -* link:https://access.redhat.com/security/cve/CVE-2022-45061[CVE-2022-45061] -* link:https://access.redhat.com/security/cve/CVE-2022-48303[CVE-2022-48303] diff --git a/modules/cluster-logging-rn-5.4.13.adoc b/modules/cluster-logging-rn-5.4.13.adoc deleted file mode 100644 index cf5b6ccbd808..000000000000 --- a/modules/cluster-logging-rn-5.4.13.adoc +++ /dev/null @@ -1,18 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-4-13_{context}"] -= Logging 5.4.13 -This release includes link:https://access.redhat.com/errata/RHBA-2023:1312[OpenShift Logging Bug Fix Release 5.4.13]. - -[id="openshift-logging-5-4-13-bug-fixes"] -== Bug fixes -* Before this update, a problem with the Fluentd collector caused it to not capture OAuth login events stored in `/var/log/auth-server/audit.log`. This led to incomplete collection of login events from the OAuth service. With this update, the Fluentd collector now resolves this issue by capturing all login events from the OAuth service, including those stored in `/var/log/auth-server/audit.log`, as expected. (link:https://issues.redhat.com/browse/LOG-3731[LOG-3731]) - -[id="openshift-logging-5-4-13-CVEs"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2022-4304[CVE-2022-4304] -* link:https://access.redhat.com/security/cve/CVE-2022-4450[CVE-2022-4450] -* link:https://access.redhat.com/security/cve/CVE-2023-0215[CVE-2023-0215] -* link:https://access.redhat.com/security/cve/CVE-2023-0286[CVE-2023-0286] -* link:https://access.redhat.com/security/cve/CVE-2023-0767[CVE-2023-0767] -* link:https://access.redhat.com/security/cve/CVE-2023-23916[CVE-2023-23916] diff --git a/modules/cluster-logging-rn-5.4.14.adoc b/modules/cluster-logging-rn-5.4.14.adoc deleted file mode 100644 index 2bd231f72c23..000000000000 --- a/modules/cluster-logging-rn-5.4.14.adoc +++ /dev/null @@ -1,18 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:content-type: REFERENCE -[id="cluster-logging-release-notes-5-4-14{context}"] -= Logging 5.4.14 -This release includes link:https://access.redhat.com/errata/RHBA-2023:1843[OpenShift Logging Bug Fix Release 5.4.14]. - -[id="openshift-logging-5-4-14-bug-fixes"] -== Bug fixes -None. - -[id="openshift-logging-5-4-14-CVEs"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2022-4304[CVE-2022-4304] -* link:https://access.redhat.com/security/cve/CVE-2022-4450[CVE-2022-4450] -* link:https://access.redhat.com/security/cve/CVE-2023-0215[CVE-2023-0215] -* link:https://access.redhat.com/security/cve/CVE-2023-0286[CVE-2023-0286] -* link:https://access.redhat.com/security/cve/CVE-2023-0361[CVE-2023-0361] -* link:https://access.redhat.com/security/cve/CVE-2023-23916[CVE-2023-23916] diff --git a/modules/cluster-logging-rn-5.4.2.adoc b/modules/cluster-logging-rn-5.4.2.adoc deleted file mode 100644 index ab88c1d7bf83..000000000000 --- a/modules/cluster-logging-rn-5.4.2.adoc +++ /dev/null @@ -1,65 +0,0 @@ -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-4-2"] -= Logging 5.4.2 -This release includes link:https://access.redhat.com/errata/RHBA-2022:4874[RHBA-2022:4874-OpenShift Logging Bug Fix Release 5.4.2] - -[id="openshift-logging-5-4-2-bug-fixes"] -== Bug fixes -* Before this update, editing the Collector configuration using `oc edit` was difficult because it had inconsistent use of white-space. This change introduces logic to normalize and format the configuration prior to any updates by the Operator so that it is easy to edit using `oc edit`. (link:https://issues.redhat.com/browse/LOG-2319[LOG-2319]) - -* Before this update, the `FluentdNodeDown` alert could not provide instance labels in the message section appropriately. This update resolves the issue by fixing the alert rule to provide instance labels in cases of partial instance failures. (link:https://issues.redhat.com/browse/LOG-2607[LOG-2607]) - -* Before this update, several log levels, such as`critical`, that were documented as supported by the product were not. This update fixes the discrepancy so the documented log levels are now supported by the product. (link:https://issues.redhat.com/browse/LOG-2033[LOG-2033]) - -[id="openshift-logging-5-4-2-CVEs"] -== CVEs -.Click to expand CVEs -[%collapsible] -==== -* link:https://access.redhat.com/security/cve/CVE-2018-25032[CVE-2018-25032] -* link:https://access.redhat.com/security/cve/CVE-2020-0404[CVE-2020-0404] -* link:https://access.redhat.com/security/cve/CVE-2020-4788[CVE-2020-4788] -* link:https://access.redhat.com/security/cve/CVE-2020-13974[CVE-2020-13974] -* link:https://access.redhat.com/security/cve/CVE-2020-19131[CVE-2020-19131] -* link:https://access.redhat.com/security/cve/CVE-2020-27820[CVE-2020-27820] -* link:https://access.redhat.com/security/cve/CVE-2021-0941[CVE-2021-0941] -* link:https://access.redhat.com/security/cve/CVE-2021-3612[CVE-2021-3612] -* link:https://access.redhat.com/security/cve/CVE-2021-3634[CVE-2021-3634] -* link:https://access.redhat.com/security/cve/CVE-2021-3669[CVE-2021-3669] -* link:https://access.redhat.com/security/cve/CVE-2021-3737[CVE-2021-3737] -* link:https://access.redhat.com/security/cve/CVE-2021-3743[CVE-2021-3743] -* link:https://access.redhat.com/security/cve/CVE-2021-3744[CVE-2021-3744] -* link:https://access.redhat.com/security/cve/CVE-2021-3752[CVE-2021-3752] -* link:https://access.redhat.com/security/cve/CVE-2021-3759[CVE-2021-3759] -* link:https://access.redhat.com/security/cve/CVE-2021-3764[CVE-2021-3764] -* link:https://access.redhat.com/security/cve/CVE-2021-3772[CVE-2021-3772] -* link:https://access.redhat.com/security/cve/CVE-2021-3773[CVE-2021-3773] -* link:https://access.redhat.com/security/cve/CVE-2021-4002[CVE-2021-4002] -* link:https://access.redhat.com/security/cve/CVE-2021-4037[CVE-2021-4037] -* link:https://access.redhat.com/security/cve/CVE-2021-4083[CVE-2021-4083] -* link:https://access.redhat.com/security/cve/CVE-2021-4157[CVE-2021-4157] -* link:https://access.redhat.com/security/cve/CVE-2021-4189[CVE-2021-4189] -* link:https://access.redhat.com/security/cve/CVE-2021-4197[CVE-2021-4197] -* link:https://access.redhat.com/security/cve/CVE-2021-4203[CVE-2021-4203] -* link:https://access.redhat.com/security/cve/CVE-2021-20322[CVE-2021-20322] -* link:https://access.redhat.com/security/cve/CVE-2021-21781[CVE-2021-21781] -* link:https://access.redhat.com/security/cve/CVE-2021-23222[CVE-2021-23222] -* link:https://access.redhat.com/security/cve/CVE-2021-26401[CVE-2021-26401] -* link:https://access.redhat.com/security/cve/CVE-2021-29154[CVE-2021-29154] -* link:https://access.redhat.com/security/cve/CVE-2021-37159[CVE-2021-37159] -* link:https://access.redhat.com/security/cve/CVE-2021-41617[CVE-2021-41617] -* link:https://access.redhat.com/security/cve/CVE-2021-41864[CVE-2021-41864] -* link:https://access.redhat.com/security/cve/CVE-2021-42739[CVE-2021-42739] -* link:https://access.redhat.com/security/cve/CVE-2021-43056[CVE-2021-43056] -* link:https://access.redhat.com/security/cve/CVE-2021-43389[CVE-2021-43389] -* link:https://access.redhat.com/security/cve/CVE-2021-43976[CVE-2021-43976] -* link:https://access.redhat.com/security/cve/CVE-2021-44733[CVE-2021-44733] -* link:https://access.redhat.com/security/cve/CVE-2021-45485[CVE-2021-45485] -* link:https://access.redhat.com/security/cve/CVE-2021-45486[CVE-2021-45486] -* link:https://access.redhat.com/security/cve/CVE-2022-0001[CVE-2022-0001] -* link:https://access.redhat.com/security/cve/CVE-2022-0002[CVE-2022-0002] -* link:https://access.redhat.com/security/cve/CVE-2022-0286[CVE-2022-0286] -* link:https://access.redhat.com/security/cve/CVE-2022-0322[CVE-2022-0322] -* link:https://access.redhat.com/security/cve/CVE-2022-1011[CVE-2022-1011] -* link:https://access.redhat.com/security/cve/CVE-2022-1271[CVE-2022-1271] -==== diff --git a/modules/cluster-logging-rn-5.4.3.adoc b/modules/cluster-logging-rn-5.4.3.adoc deleted file mode 100644 index efebbe265853..000000000000 --- a/modules/cluster-logging-rn-5.4.3.adoc +++ /dev/null @@ -1,44 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-4-3"] -= Logging 5.4.3 -This release includes link:https://access.redhat.com/errata/RHSA-2022:5556[RHSA-2022:5556-OpenShift Logging Bug Fix Release 5.4.3]. - -[id="openshift-logging-elasticsearch-dep"] -== Elasticsearch Operator deprecation notice -In {logging} 5.4.3 the Elasticsearch Operator is deprecated and is planned to be removed in a future release. Red Hat will provide bug fixes and support for this feature during the current release lifecycle, but this feature will no longer receive enhancements and will be removed. As an alternative to using the Elasticsearch Operator to manage the default log storage, you can use the Loki Operator. - -[id="openshift-logging-5-4-3-bug-fixes"] -== Bug fixes -* Before this update, the OpenShift Logging Dashboard showed the number of active primary shards instead of all active shards. With this update, the dashboard displays all active shards. (link:https://issues.redhat.com//browse/LOG-2781[LOG-2781]) - -* Before this update, a bug in a library used by `elasticsearch-operator` contained a denial of service attack vulnerability. With this update, the library has been updated to a version that does not contain this vulnerability. (link:https://issues.redhat.com//browse/LOG-2816[LOG-2816]) - -* Before this update, when configuring Vector to forward logs to Loki, it was not possible to set a custom bearer token or use the default token if Loki had TLS enabled. With this update, Vector can forward logs to Loki using tokens with TLS enabled. (link:https://issues.redhat.com//browse/https://issues.redhat.com//browse/LOG-2786[LOG-2786] - -* Before this update, the ElasticSearch Operator omitted the `referencePolicy` property of the `ImageStream` custom resource when selecting an `oauth-proxy` image. This omission caused the Kibana deployment to fail in specific environments. With this update, using `referencePolicy` resolves the issue, and the Operator can deploy Kibana successfully. (link:https://issues.redhat.com/browse/LOG-2791[LOG-2791]) - -* Before this update, alerting rules for the `ClusterLogForwarder` custom resource did not take multiple forward outputs into account. This update resolves the issue. (link:https://issues.redhat.com/browse/LOG-2640[LOG-2640]) - -* Before this update, clusters configured to forward logs to Amazon CloudWatch wrote rejected log files to temporary storage, causing cluster instability over time. With this update, chunk backup for CloudWatch has been disabled, resolving the issue. (link:https://issues.redhat.com/browse/LOG-2768[LOG-2768]) - -[id="openshift-logging-5-4-3-CVEs"] -== CVEs -.Click to expand CVEs -[%collapsible] -==== -* link:https://access.redhat.com/security/cve/CVE-2020-28915[CVE-2020-28915] -* link:https://access.redhat.com/security/cve/CVE-2021-40528[CVE-2021-40528] -* link:https://access.redhat.com/security/cve/CVE-2022-1271[CVE-2022-1271] -* link:https://access.redhat.com/security/cve/CVE-2022-1621[CVE-2022-1621] -* link:https://access.redhat.com/security/cve/CVE-2022-1629[CVE-2022-1629] -* link:https://access.redhat.com/security/cve/CVE-2022-22576[CVE-2022-22576] -* link:https://access.redhat.com/security/cve/CVE-2022-25313[CVE-2022-25313] -* link:https://access.redhat.com/security/cve/CVE-2022-25314[CVE-2022-25314] -* link:https://access.redhat.com/security/cve/CVE-2022-26691[CVE-2022-26691] -* link:https://access.redhat.com/security/cve/CVE-2022-27666[CVE-2022-27666] -* link:https://access.redhat.com/security/cve/CVE-2022-27774[CVE-2022-27774] -* link:https://access.redhat.com/security/cve/CVE-2022-27776[CVE-2022-27776] -* link:https://access.redhat.com/security/cve/CVE-2022-27782[CVE-2022-27782] -* link:https://access.redhat.com/security/cve/CVE-2022-29824[CVE-2022-29824] -==== diff --git a/modules/cluster-logging-rn-5.4.4.adoc b/modules/cluster-logging-rn-5.4.4.adoc deleted file mode 100644 index 227a47277670..000000000000 --- a/modules/cluster-logging-rn-5.4.4.adoc +++ /dev/null @@ -1,22 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-4-4"] -= Logging 5.4.4 -This release includes link:https://access.redhat.com/errata/RHBA-2022:5907[RHBA-2022:5907-OpenShift Logging Bug Fix Release 5.4.4]. - -[id="openshift-logging-5-4-4-bug-fixes"] -== Bug fixes - -* Before this update, non-latin characters displayed incorrectly in Elasticsearch. With this update, Elasticsearch displays all valid UTF-8 symbols correctly. (link:https://issues.redhat.com/browse/LOG-2794[LOG-2794]) - -* Before this update, non-latin characters displayed incorrectly in Fluentd. With this update, Fluentd displays all valid UTF-8 symbols correctly. (link:https://issues.redhat.com/browse/LOG-2657[LOG-2657]) - -* Before this update, the metrics server for the collector attempted to bind to the address using a value exposed by an environment value. This change modifies the configuration to bind to any available interface. (link:https://issues.redhat.com/browse/LOG-2821[LOG-2821]) - -* Before this update, the `cluster-logging` Operator relied on the cluster to create a secret. This cluster behavior changed in {product-title} 4.11, which caused logging deployments to fail. With this update, the `cluster-logging` Operator resolves the issue by creating the secret if needed. (link:https://issues.redhat.com/browse/LOG-2840[LOG-2840]) - -[id="openshift-logging-5-4-4-cves"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2022-21540[CVE-2022-21540] -* link:https://access.redhat.com/security/cve/CVE-2022-21541[CVE-2022-21541] -* link:https://access.redhat.com/security/cve/CVE-2022-34169[CVE-2022-34169] diff --git a/modules/cluster-logging-rn-5.4.5.adoc b/modules/cluster-logging-rn-5.4.5.adoc deleted file mode 100644 index 678a4775ce8b..000000000000 --- a/modules/cluster-logging-rn-5.4.5.adoc +++ /dev/null @@ -1,26 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-4-5_{context}"] -= Logging 5.4.5 -This release includes link:https://access.redhat.com/errata/RHSA-2022:6183[RHSA-2022:6183-OpenShift Logging Bug Fix Release 5.4.5]. - -[id="openshift-logging-5-4-5-bug-fixes_{context}"] -== Bug fixes -* Before this update, the Operator did not ensure that the pod was ready, which caused the cluster to reach an inoperable state during a cluster restart. With this update, the Operator marks new pods as ready before continuing to a new pod during a restart, which resolves the issue. (link:https://issues.redhat.com/browse/LOG-2881[LOG-2881]) - -* Before this update, the addition of multi-line error detection caused internal routing to change and forward records to the wrong destination. With this update, the internal routing is correct. (link:https://issues.redhat.com/browse/LOG-2946[LOG-2946]) - -* Before this update, the Operator could not decode index setting JSON responses with a quoted Boolean value and would result in an error. With this update, the Operator can properly decode this JSON response. (link:https://issues.redhat.com/browse/LOG-3009[LOG-3009]) - -* Before this update, Elasticsearch index templates defined the fields for labels with the wrong types. This change updates those templates to match the expected types forwarded by the log collector. (link:https://issues.redhat.com/browse/LOG-2972[LOG-2972]) - -[id="openshift-logging-5-4-5-cves_{context}"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2022-1292[CVE-2022-1292] -* link:https://access.redhat.com/security/cve/CVE-2022-1586[CVE-2022-1586] -* link:https://access.redhat.com/security/cve/CVE-2022-1785[CVE-2022-1785] -* link:https://access.redhat.com/security/cve/CVE-2022-1897[CVE-2022-1897] -* link:https://access.redhat.com/security/cve/CVE-2022-1927[CVE-2022-1927] -* link:https://access.redhat.com/security/cve/CVE-2022-2068[CVE-2022-2068] -* link:https://access.redhat.com/security/cve/CVE-2022-2097[CVE-2022-2097] -* link:https://access.redhat.com/security/cve/CVE-2022-30631[CVE-2022-30631] diff --git a/modules/cluster-logging-rn-5.4.6.adoc b/modules/cluster-logging-rn-5.4.6.adoc deleted file mode 100644 index 88b38bf0a7aa..000000000000 --- a/modules/cluster-logging-rn-5.4.6.adoc +++ /dev/null @@ -1,25 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-4-6_{context}"] -= Logging 5.4.6 -This release includes link:https://access.redhat.com/errata/RHBA-2022:6558[OpenShift Logging Bug Fix Release 5.4.6]. - -[id="openshift-logging-5-4-6-bug-fixes_{context}"] -== Bug fixes -* Before this update, Fluentd would sometimes not recognize that the Kubernetes platform rotated the log file and would no longer read log messages. This update corrects that by setting the configuration parameter suggested by the upstream development team. (link:https://issues.redhat.com/browse/LOG-2792[LOG-2792]) - -* Before this update, each rollover job created empty indices when the `ClusterLogForwarder` custom resource had JSON parsing defined. With this update, new indices are not empty. (link:https://issues.redhat.com/browse/LOG-2823[LOG-2823]) - -* Before this update, if you deleted the Kibana Custom Resource, the {product-title} web console continued displaying a link to Kibana. With this update, removing the Kibana Custom Resource also removes that link. (link:https://issues.redhat.com/browse/LOG-3054[LOG-3054]) - -[id="openshift-logging-5-4-6-cves_{context}"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2015-20107[CVE-2015-20107] -* link:https://access.redhat.com/security/cve/CVE-2022-0391[CVE-2022-0391] -* link:https://access.redhat.com/security/cve/CVE-2022-21123[CVE-2022-21123] -* link:https://access.redhat.com/security/cve/CVE-2022-21125[CVE-2022-21125] -* link:https://access.redhat.com/security/cve/CVE-2022-21166[CVE-2022-21166] -* link:https://access.redhat.com/security/cve/CVE-2022-29154[CVE-2022-29154] -* link:https://access.redhat.com/security/cve/CVE-2022-32206[CVE-2022-32206] -* link:https://access.redhat.com/security/cve/CVE-2022-32208[CVE-2022-32208] -* link:https://access.redhat.com/security/cve/CVE-2022-34903[CVE-2022-34903] diff --git a/modules/cluster-logging-rn-5.4.7.adoc b/modules/cluster-logging-rn-5.4.7.adoc deleted file mode 100644 index 9d65c5b935d5..000000000000 --- a/modules/cluster-logging-rn-5.4.7.adoc +++ /dev/null @@ -1,13 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-4-7_{context}"] -= Logging 5.4.7 -This release includes link:https://access.redhat.com/errata/RHBA-2022:6857[OpenShift Logging Bug Fix Release 5.4.7]. - -[id="openshift-logging-5-4-7-bug-fixes_{context}"] -== Bug fixes -* (link:https://issues.redhat.com/browse/LOG-2464[LOG-2464]) - -[id="openshift-logging-5-4-7-cves_{context}"] -== CVEs -(None.) diff --git a/modules/cluster-logging-rn-5.4.8.adoc b/modules/cluster-logging-rn-5.4.8.adoc deleted file mode 100644 index d5dcadfe1fb4..000000000000 --- a/modules/cluster-logging-rn-5.4.8.adoc +++ /dev/null @@ -1,35 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-4-8_{context}"] -= Logging 5.4.8 -This release includes link:https://access.redhat.com/errata/RHSA-2022:7435[RHSA-2022:7435-OpenShift Logging Bug Fix Release 5.4.8]. - -[id="openshift-logging-5-4-8-bug-fixes"] -== Bug fixes -None. - -[id="openshift-logging-5-4-8-CVEs"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2016-3709[CVE-2016-3709] -* link:https://access.redhat.com/security/cve/CVE-2020-35525[CVE-2020-35525] -* link:https://access.redhat.com/security/cve/CVE-2020-35527[CVE-2020-35527] -* link:https://access.redhat.com/security/cve/CVE-2020-36518[CVE-2020-36518] -* link:https://access.redhat.com/security/cve/CVE-2022-1304[CVE-2022-1304] -* link:https://access.redhat.com/security/cve/CVE-2022-2509[CVE-2022-2509] -* link:https://access.redhat.com/security/cve/CVE-2022-3515[CVE-2022-3515] -* link:https://access.redhat.com/security/cve/CVE-2022-22624[CVE-2022-22624] -* link:https://access.redhat.com/security/cve/CVE-2022-22628[CVE-2022-22628] -* link:https://access.redhat.com/security/cve/CVE-2022-22629[CVE-2022-22629] -* link:https://access.redhat.com/security/cve/CVE-2022-22662[CVE-2022-22662] -* link:https://access.redhat.com/security/cve/CVE-2022-26700[CVE-2022-26700] -* link:https://access.redhat.com/security/cve/CVE-2022-26709[CVE-2022-26709] -* link:https://access.redhat.com/security/cve/CVE-2022-26710[CVE-2022-26710] -* link:https://access.redhat.com/security/cve/CVE-2022-26716[CVE-2022-26716] -* link:https://access.redhat.com/security/cve/CVE-2022-26717[CVE-2022-26717] -* link:https://access.redhat.com/security/cve/CVE-2022-26719[CVE-2022-26719] -* link:https://access.redhat.com/security/cve/CVE-2022-30293[CVE-2022-30293] -* link:https://access.redhat.com/security/cve/CVE-2022-32149[CVE-2022-32149] -* link:https://access.redhat.com/security/cve/CVE-2022-37434[CVE-2022-37434] -* link:https://access.redhat.com/security/cve/CVE-2022-40674[CVE-2022-40674] -* link:https://access.redhat.com/security/cve/CVE-2022-42003[CVE-2022-42003] -* link:https://access.redhat.com/security/cve/CVE-2022-42004[CVE-2022-42004] diff --git a/modules/cluster-logging-rn-5.4.9.adoc b/modules/cluster-logging-rn-5.4.9.adoc deleted file mode 100644 index 89c6b1e57b5a..000000000000 --- a/modules/cluster-logging-rn-5.4.9.adoc +++ /dev/null @@ -1,85 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-4-9_{context}"] -= Logging 5.4.9 -This release includes link:https://access.redhat.com/errata/RHBA-2022:8780[OpenShift Logging Bug Fix Release 5.4.9]. - -[id="openshift-logging-5-4-9-bug-fixes"] -== Bug fixes -* Before this update, the Fluentd collector would warn of unused configuration parameters. This update removes those configuration parameters and their warning messages. (link:https://issues.redhat.com/browse/LOG-3074[LOG-3074]) - -* Before this update, Kibana had a fixed `24h` OAuth cookie expiration time, which resulted in 401 errors in Kibana whenever the `accessTokenInactivityTimeout` field was set to a value lower than `24h`. With this update, Kibana's OAuth cookie expiration time synchronizes to the `accessTokenInactivityTimeout`, with a default value of `24h`. (link:https://issues.redhat.com/browse/LOG-3306[LOG-3306]) - -[id="openshift-logging-5-4-9-CVEs"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2016-3709[CVE-2016-3709] -* link:https://access.redhat.com/security/cve/CVE-2020-35525[CVE-2020-35525] -* link:https://access.redhat.com/security/cve/CVE-2020-35527[CVE-2020-35527] -* link:https://access.redhat.com/security/cve/CVE-2020-36516[CVE-2020-36516] -* link:https://access.redhat.com/security/cve/CVE-2020-36558[CVE-2020-36558] -* link:https://access.redhat.com/security/cve/CVE-2021-3640[CVE-2021-3640] -* link:https://access.redhat.com/security/cve/CVE-2021-30002[CVE-2021-30002] -* link:https://access.redhat.com/security/cve/CVE-2022-0168[CVE-2022-0168] -* link:https://access.redhat.com/security/cve/CVE-2022-0561[CVE-2022-0561] -* link:https://access.redhat.com/security/cve/CVE-2022-0562[CVE-2022-0562] -* link:https://access.redhat.com/security/cve/CVE-2022-0617[CVE-2022-0617] -* link:https://access.redhat.com/security/cve/CVE-2022-0854[CVE-2022-0854] -* link:https://access.redhat.com/security/cve/CVE-2022-0865[CVE-2022-0865] -* link:https://access.redhat.com/security/cve/CVE-2022-0891[CVE-2022-0891] -* link:https://access.redhat.com/security/cve/CVE-2022-0908[CVE-2022-0908] -* link:https://access.redhat.com/security/cve/CVE-2022-0909[CVE-2022-0909] -* link:https://access.redhat.com/security/cve/CVE-2022-0924[CVE-2022-0924] -* link:https://access.redhat.com/security/cve/CVE-2022-1016[CVE-2022-1016] -* link:https://access.redhat.com/security/cve/CVE-2022-1048[CVE-2022-1048] -* link:https://access.redhat.com/security/cve/CVE-2022-1055[CVE-2022-1055] -* link:https://access.redhat.com/security/cve/CVE-2022-1184[CVE-2022-1184] -* link:https://access.redhat.com/security/cve/CVE-2022-1292[CVE-2022-1292] -* link:https://access.redhat.com/security/cve/CVE-2022-1304[CVE-2022-1304] -* link:https://access.redhat.com/security/cve/CVE-2022-1355[CVE-2022-1355] -* link:https://access.redhat.com/security/cve/CVE-2022-1586[CVE-2022-1586] -* link:https://access.redhat.com/security/cve/CVE-2022-1785[CVE-2022-1785] -* link:https://access.redhat.com/security/cve/CVE-2022-1852[CVE-2022-1852] -* link:https://access.redhat.com/security/cve/CVE-2022-1897[CVE-2022-1897] -* link:https://access.redhat.com/security/cve/CVE-2022-1927[CVE-2022-1927] -* link:https://access.redhat.com/security/cve/CVE-2022-2068[CVE-2022-2068] -* link:https://access.redhat.com/security/cve/CVE-2022-2078[CVE-2022-2078] -* link:https://access.redhat.com/security/cve/CVE-2022-2097[CVE-2022-2097] -* link:https://access.redhat.com/security/cve/CVE-2022-2509[CVE-2022-2509] -* link:https://access.redhat.com/security/cve/CVE-2022-2586[CVE-2022-2586] -* link:https://access.redhat.com/security/cve/CVE-2022-2639[CVE-2022-2639] -* link:https://access.redhat.com/security/cve/CVE-2022-2938[CVE-2022-2938] -* link:https://access.redhat.com/security/cve/CVE-2022-3515[CVE-2022-3515] -* link:https://access.redhat.com/security/cve/CVE-2022-20368[CVE-2022-20368] -* link:https://access.redhat.com/security/cve/CVE-2022-21499[CVE-2022-21499] -* link:https://access.redhat.com/security/cve/CVE-2022-21618[CVE-2022-21618] -* link:https://access.redhat.com/security/cve/CVE-2022-21619[CVE-2022-21619] -* link:https://access.redhat.com/security/cve/CVE-2022-21624[CVE-2022-21624] -* link:https://access.redhat.com/security/cve/CVE-2022-21626[CVE-2022-21626] -* link:https://access.redhat.com/security/cve/CVE-2022-21628[CVE-2022-21628] -* link:https://access.redhat.com/security/cve/CVE-2022-22624[CVE-2022-22624] -* link:https://access.redhat.com/security/cve/CVE-2022-22628[CVE-2022-22628] -* link:https://access.redhat.com/security/cve/CVE-2022-22629[CVE-2022-22629] -* link:https://access.redhat.com/security/cve/CVE-2022-22662[CVE-2022-22662] -* link:https://access.redhat.com/security/cve/CVE-2022-22844[CVE-2022-22844] -* link:https://access.redhat.com/security/cve/CVE-2022-23960[CVE-2022-23960] -* link:https://access.redhat.com/security/cve/CVE-2022-24448[CVE-2022-24448] -* link:https://access.redhat.com/security/cve/CVE-2022-25255[CVE-2022-25255] -* link:https://access.redhat.com/security/cve/CVE-2022-26373[CVE-2022-26373] -* link:https://access.redhat.com/security/cve/CVE-2022-26700[CVE-2022-26700] -* link:https://access.redhat.com/security/cve/CVE-2022-26709[CVE-2022-26709] -* link:https://access.redhat.com/security/cve/CVE-2022-26710[CVE-2022-26710] -* link:https://access.redhat.com/security/cve/CVE-2022-26716[CVE-2022-26716] -* link:https://access.redhat.com/security/cve/CVE-2022-26717[CVE-2022-26717] -* link:https://access.redhat.com/security/cve/CVE-2022-26719[CVE-2022-26719] -* link:https://access.redhat.com/security/cve/CVE-2022-27404[CVE-2022-27404] -* link:https://access.redhat.com/security/cve/CVE-2022-27405[CVE-2022-27405] -* link:https://access.redhat.com/security/cve/CVE-2022-27406[CVE-2022-27406] -* link:https://access.redhat.com/security/cve/CVE-2022-27950[CVE-2022-27950] -* link:https://access.redhat.com/security/cve/CVE-2022-28390[CVE-2022-28390] -* link:https://access.redhat.com/security/cve/CVE-2022-28893[CVE-2022-28893] -* link:https://access.redhat.com/security/cve/CVE-2022-29581[CVE-2022-29581] -* link:https://access.redhat.com/security/cve/CVE-2022-30293[CVE-2022-30293] -* link:https://access.redhat.com/security/cve/CVE-2022-34903[CVE-2022-34903] -* link:https://access.redhat.com/security/cve/CVE-2022-36946[CVE-2022-36946] -* link:https://access.redhat.com/security/cve/CVE-2022-37434[CVE-2022-37434] -* link:https://access.redhat.com/security/cve/CVE-2022-39399[CVE-2022-39399] diff --git a/modules/cluster-logging-rn-5.5.10.adoc b/modules/cluster-logging-rn-5.5.10.adoc deleted file mode 100644 index 0ec69d907639..000000000000 --- a/modules/cluster-logging-rn-5.5.10.adoc +++ /dev/null @@ -1,18 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:content-type: REFERENCE -[id="cluster-logging-release-notes-5-5-10{context}"] -= Logging 5.5.10 -This release includes link:https://access.redhat.com/errata/RHBA-2023:1827[OpenShift Logging Bug Fix Release 5.5.10]. - -[id="cluster-logging-5-5-10-bug-fixes"] -== Bug fixes -* Before this update, the logging view plugin of the OpenShift Web Console showed only an error text when the LokiStack was not reachable. After this update the plugin shows a proper error message with details on how to fix the unreachable LokiStack. (link:https://issues.redhat.com/browse/LOG-2874[LOG-2874]) - -[id="cluster-logging-5-5-10-CVEs"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2022-4304[CVE-2022-4304] -* link:https://access.redhat.com/security/cve/CVE-2022-4450[CVE-2022-4450] -* link:https://access.redhat.com/security/cve/CVE-2023-0215[CVE-2023-0215] -* link:https://access.redhat.com/security/cve/CVE-2023-0286[CVE-2023-0286] -* link:https://access.redhat.com/security/cve/CVE-2023-0361[CVE-2023-0361] -* link:https://access.redhat.com/security/cve/CVE-2023-23916[CVE-2023-23916] diff --git a/modules/cluster-logging-rn-5.5.2.adoc b/modules/cluster-logging-rn-5.5.2.adoc deleted file mode 100644 index 7cab2f497647..000000000000 --- a/modules/cluster-logging-rn-5.5.2.adoc +++ /dev/null @@ -1,45 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-5-2_{context}"] -= Logging 5.5.2 -This release includes link:https://access.redhat.com/errata/RHBA-2022:6559[OpenShift Logging Bug Fix Release 5.5.2]. - -[id="openshift-logging-5-5-2-bug-fixes_{context}"] -== Bug fixes -* Before this update, alerting rules for the Fluentd collector did not adhere to the {product-title} monitoring style guidelines. This update modifies those alerts to include the namespace label, resolving the issue. (link:https://issues.redhat.com/browse/LOG-1823[LOG-1823]) - -* Before this update, the index management rollover script failed to generate a new index name whenever there was more than one hyphen character in the name of the index. With this update, index names generate correctly. (link:https://issues.redhat.com/browse/LOG-2644[LOG-2644]) - -* Before this update, the Kibana route was setting a `caCertificate` value without a certificate present. With this update, no `caCertificate` value is set. (link:https://issues.redhat.com/browse/LOG-2661[LOG-2661]) - -* Before this update, a change in the collector dependencies caused it to issue a warning message for unused parameters. With this update, removing unused configuration parameters resolves the issue. (link:https://issues.redhat.com/browse/LOG-2859[LOG-2859]) - -* Before this update, pods created for deployments that Loki Operator created were mistakenly scheduled on nodes with non-Linux operating systems, if such nodes were available in the cluster the Operator was running in. With this update, the Operator attaches an additional node-selector to the pod definitions which only allows scheduling the pods on Linux-based nodes. (link:https://issues.redhat.com/browse/LOG-2895[LOG-2895]) - -* Before this update, the OpenShift Console Logs view did not filter logs by severity due to a LogQL parser issue in the LokiStack gateway. With this update, a parser fix resolves the issue and the OpenShift Console Logs view can filter by severity. (link:https://issues.redhat.com/browse/LOG-2908[LOG-2908]) - -* Before this update, a refactoring of the Fluentd collector plugins removed the timestamp field for events. This update restores the timestamp field, sourced from the event's received time. (link:https://issues.redhat.com/browse/LOG-2923[LOG-2923]) - -* Before this update, absence of a `level` field in audit logs caused an error in vector logs. With this update, the addition of a `level` field in the audit log record resolves the issue. (link:https://issues.redhat.com/browse/LOG-2961[LOG-2961]) - -* Before this update, if you deleted the Kibana Custom Resource, the {product-title} web console continued displaying a link to Kibana. With this update, removing the Kibana Custom Resource also removes that link. (link:https://issues.redhat.com/browse/LOG-3053[LOG-3053]) - -* Before this update, each rollover job created empty indices when the `ClusterLogForwarder` custom resource had JSON parsing defined. With this update, new indices are not empty. (link:https://issues.redhat.com/browse/LOG-3063[LOG-3063]) - -* Before this update, when the user deleted the LokiStack after an update to Loki Operator 5.5 resources originally created by Loki Operator 5.4 remained. With this update, the resources' owner-references point to the 5.5 LokiStack. (link:https://issues.redhat.com/browse/LOG-2945[LOG-2945]) - -* Before this update, a user was not able to view the application logs of namespaces they have access to. With this update, the Loki Operator automatically creates a cluster role and cluster role binding allowing users to read application logs. (link:https://issues.redhat.com/browse/LOG-2918[LOG-2918]) - -* Before this update, users with cluster-admin privileges were not able to properly view infrastructure and audit logs using the logging console. With this update, the authorization check has been extended to also recognize users in cluster-admin and dedicated-admin groups as admins. (link:https://issues.redhat.com/browse/LOG-2970[LOG-2970]) - -[id="openshift-logging-5-5-2-cves_{context}"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2015-20107[CVE-2015-20107] -* link:https://access.redhat.com/security/cve/CVE-2022-0391[CVE-2022-0391] -* link:https://access.redhat.com/security/cve/CVE-2022-21123[CVE-2022-21123] -* link:https://access.redhat.com/security/cve/CVE-2022-21125[CVE-2022-21125] -* link:https://access.redhat.com/security/cve/CVE-2022-21166[CVE-2022-21166] -* link:https://access.redhat.com/security/cve/CVE-2022-29154[CVE-2022-29154] -* link:https://access.redhat.com/security/cve/CVE-2022-32206[CVE-2022-32206] -* link:https://access.redhat.com/security/cve/CVE-2022-32208[CVE-2022-32208] -* link:https://access.redhat.com/security/cve/CVE-2022-34903[CVE-2022-34903] diff --git a/modules/cluster-logging-rn-5.5.3.adoc b/modules/cluster-logging-rn-5.5.3.adoc deleted file mode 100644 index 59a9a158b01d..000000000000 --- a/modules/cluster-logging-rn-5.5.3.adoc +++ /dev/null @@ -1,36 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-5-3_{context}"] -= Logging 5.5.3 -This release includes link:https://access.redhat.com/errata/RHBA-2022:6858[OpenShift Logging Bug Fix Release 5.5.3]. - -[id="openshift-logging-5-5-3-bug-fixes_{context}"] -== Bug fixes -* Before this update, log entries that had structured messages included the original message field, which made the entry larger. This update removes the message field for structured logs to reduce the increased size. (link:https://issues.redhat.com/browse/LOG-2759[LOG-2759]) - -* Before this update, the collector configuration excluded logs from `collector`, `default-log-store`, and `visualization` pods, but was unable to exclude logs archived in a `.gz` file. With this update, archived logs stored as `.gz` files of `collector`, `default-log-store`, and `visualization` pods are also excluded. (link:https://issues.redhat.com/browse/LOG-2844[LOG-2844]) - -* Before this update, when requests to an unavailable pod were sent through the gateway, no alert would warn of the disruption. With this update, individual alerts will generate if the gateway has issues completing a write or read request. (link:https://issues.redhat.com/browse/LOG-2884[LOG-2884]) - -* Before this update, pod metadata could be altered by fluent plugins because the values passed through the pipeline by reference. This update ensures each log message receives a copy of the pod metadata so each message processes independently. (link:https://issues.redhat.com/browse/LOG-3046[LOG-3046]) - -* Before this update, selecting *unknown* severity in the OpenShift Console Logs view excluded logs with a `level=unknown` value. With this update, logs without level and with `level=unknown` values are visible when filtering by *unknown* severity. (link:https://issues.redhat.com/browse/LOG-3062[LOG-3062]) - -* Before this update, log records sent to Elasticsearch had an extra field named `write-index` that contained the name of the index to which the logs needed to be sent. This field is not a part of the data model. After this update, this field is no longer sent. (link:https://issues.redhat.com/browse/LOG-3075[LOG-3075]) - -* With the introduction of the new built-in link:https://cloud.redhat.com/blog/pod-security-admission-in-openshift-4.11[Pod Security Admission Controller], Pods not configured in accordance with the enforced security standards defined globally or on the namespace level cannot run. With this update, the Operator and collectors allow privileged execution and run without security audit warnings or errors. (link:https://issues.redhat.com/browse/LOG-3077[LOG-3077]) - -* Before this update, the Operator removed any custom outputs defined in the `ClusterLogForwarder` custom resource when using LokiStack as the default log storage. With this update, the Operator merges custom outputs with the default outputs when processing the `ClusterLogForwarder` custom resource. (link:https://issues.redhat.com/browse/LOG-3095[LOG-3095]) - -[id="openshift-logging-5-5-3-cves_{context}"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2015-20107[CVE-2015-20107] -* link:https://access.redhat.com/security/cve/CVE-2022-0391[CVE-2022-0391] -* link:https://access.redhat.com/security/cve/CVE-2022-2526[CVE-2022-2526] -* link:https://access.redhat.com/security/cve/CVE-2022-21123[CVE-2022-21123] -* link:https://access.redhat.com/security/cve/CVE-2022-21125[CVE-2022-21125] -* link:https://access.redhat.com/security/cve/CVE-2022-21166[CVE-2022-21166] -* link:https://access.redhat.com/security/cve/CVE-2022-29154[CVE-2022-29154] -* link:https://access.redhat.com/security/cve/CVE-2022-32206[CVE-2022-32206] -* link:https://access.redhat.com/security/cve/CVE-2022-32208[CVE-2022-32208] -* link:https://access.redhat.com/security/cve/CVE-2022-34903[CVE-2022-34903] diff --git a/modules/cluster-logging-rn-5.5.4.adoc b/modules/cluster-logging-rn-5.5.4.adoc deleted file mode 100644 index eee627dc3237..000000000000 --- a/modules/cluster-logging-rn-5.5.4.adoc +++ /dev/null @@ -1,41 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-5-4_{context}"] -= Logging 5.5.4 -This release includes link:https://access.redhat.com/errata/RHSA-2022:7434[RHSA-2022:7434-OpenShift Logging Bug Fix Release 5.5.4]. - -[id="openshift-logging-5-5-4-bug-fixes"] -== Bug fixes -* Before this update, an error in the query parser of the logging view plugin caused parts of the logs query to disappear if the query contained curly brackets `{}`. This made the queries invalid, leading to errors being returned for valid queries. With this update, the parser correctly handles these queries. (link:https://issues.redhat.com/browse/LOG-3042[LOG-3042]) - -* Before this update, the Operator could enter a loop of removing and recreating the collector daemonset while the Elasticsearch or Kibana deployments changed their status. With this update, a fix in the status handling of the Operator resolves the issue. (link:https://issues.redhat.com/browse/LOG-3049[LOG-3049]) - -* Before this update, no alerts were implemented to support the collector implementation of Vector. This change adds Vector alerts and deploys separate alerts, depending upon the chosen collector implementation. (link:https://issues.redhat.com/browse/LOG-3127[LOG-3127]) - -* Before this update, the secret creation component of the Elasticsearch Operator modified internal secrets constantly. With this update, the existing secret is properly handled. (link:https://issues.redhat.com/browse/LOG-3138[LOG-3138]) - -* Before this update, a prior refactoring of the logging `must-gather` scripts removed the expected location for the artifacts. This update reverts that change to write artifacts to the `/must-gather` folder. (link:https://issues.redhat.com/browse/LOG-3213[LOG-3213]) - -* Before this update, on certain clusters, the Prometheus exporter would bind on IPv4 instead of IPv6. After this update, Fluentd detects the IP version and binds to `0.0.0.0` for IPv4 or `[::]` for IPv6. (link:https://issues.redhat.com/browse/LOG-3162[LOG-3162]) - -[id="openshift-logging-5-5-4-CVEs"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2020-35525[CVE-2020-35525] -* link:https://access.redhat.com/security/cve/CVE-2020-35527[CVE-2020-35527] -* link:https://access.redhat.com/security/cve/CVE-2022-0494[CVE-2022-0494] -* link:https://access.redhat.com/security/cve/CVE-2022-1353[CVE-2022-1353] -* link:https://access.redhat.com/security/cve/CVE-2022-2509[CVE-2022-2509] -* link:https://access.redhat.com/security/cve/CVE-2022-2588[CVE-2022-2588] -* link:https://access.redhat.com/security/cve/CVE-2022-3515[CVE-2022-3515] -* link:https://access.redhat.com/security/cve/CVE-2022-21618[CVE-2022-21618] -* link:https://access.redhat.com/security/cve/CVE-2022-21619[CVE-2022-21619] -* link:https://access.redhat.com/security/cve/CVE-2022-21624[CVE-2022-21624] -* link:https://access.redhat.com/security/cve/CVE-2022-21626[CVE-2022-21626] -* link:https://access.redhat.com/security/cve/CVE-2022-21628[CVE-2022-21628] -* link:https://access.redhat.com/security/cve/CVE-2022-23816[CVE-2022-23816] -* link:https://access.redhat.com/security/cve/CVE-2022-23825[CVE-2022-23825] -* link:https://access.redhat.com/security/cve/CVE-2022-29900[CVE-2022-29900] -* link:https://access.redhat.com/security/cve/CVE-2022-29901[CVE-2022-29901] -* link:https://access.redhat.com/security/cve/CVE-2022-32149[CVE-2022-32149] -* link:https://access.redhat.com/security/cve/CVE-2022-37434[CVE-2022-37434] -* link:https://access.redhat.com/security/cve/CVE-2022-40674[CVE-2022-40674] diff --git a/modules/cluster-logging-rn-5.5.5.adoc b/modules/cluster-logging-rn-5.5.5.adoc deleted file mode 100644 index e7314fb41822..000000000000 --- a/modules/cluster-logging-rn-5.5.5.adoc +++ /dev/null @@ -1,93 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-5-5_{context}"] -= Logging 5.5.5 -This release includes link:https://access.redhat.com/errata/RHSA-2022:8781[OpenShift Logging Bug Fix Release 5.5.5]. - -[id="openshift-logging-5-5-5-bug-fixes"] -== Bug fixes -* Before this update, Kibana had a fixed `24h` OAuth cookie expiration time, which resulted in 401 errors in Kibana whenever the `accessTokenInactivityTimeout` field was set to a value lower than `24h`. With this update, Kibana's OAuth cookie expiration time synchronizes to the `accessTokenInactivityTimeout`, with a default value of `24h`. (link:https://issues.redhat.com/browse/LOG-3305[LOG-3305]) - -* Before this update, Vector parsed the message field when JSON parsing was enabled without also defining `structuredTypeKey` or `structuredTypeName` values. With this update, a value is required for either `structuredTypeKey` or `structuredTypeName` when writing structured logs to Elasticsearch. (link:https://issues.redhat.com/browse/LOG-3284[LOG-3284]) - -* Before this update, the `FluentdQueueLengthIncreasing` alert could fail to fire when there was a cardinality issue with the set of labels returned from this alert expression. This update reduces labels to only include those required for the alert. (https://issues.redhat.com/browse/LOG-3226[LOG-3226]) - -* Before this update, Loki did not have support to reach an external storage in a disconnected cluster. With this update, proxy environment variables and proxy trusted CA bundles are included in the container image to support these connections. (link:https://issues.redhat.com/browse/LOG-2860[LOG-2860]) - -* Before this update, {product-title} web console users could not choose the `ConfigMap` object that includes the CA certificate for Loki, causing pods to operate without the CA. With this update, web console users can select the config map, resolving the issue. (link:https://issues.redhat.com/browse/LOG-3310[LOG-3310]) - -* Before this update, the CA key was used as volume name for mounting the CA into Loki, causing error states when the CA Key included non-conforming characters (such as dots). With this update, the volume name is standardized to an internal string which resolves the issue. (link:https://issues.redhat.com/browse/LOG-3332[LOG-3332]) - -[id="openshift-logging-5-5-5-CVEs"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2016-3709[CVE-2016-3709] -* link:https://access.redhat.com/security/cve/CVE-2020-35525[CVE-2020-35525] -* link:https://access.redhat.com/security/cve/CVE-2020-35527[CVE-2020-35527] -* link:https://access.redhat.com/security/cve/CVE-2020-36516[CVE-2020-36516] -* link:https://access.redhat.com/security/cve/CVE-2020-36558[CVE-2020-36558] -* link:https://access.redhat.com/security/cve/CVE-2021-3640[CVE-2021-3640] -* link:https://access.redhat.com/security/cve/CVE-2021-30002[CVE-2021-30002] -* link:https://access.redhat.com/security/cve/CVE-2022-0168[CVE-2022-0168] -* link:https://access.redhat.com/security/cve/CVE-2022-0561[CVE-2022-0561] -* link:https://access.redhat.com/security/cve/CVE-2022-0562[CVE-2022-0562] -* link:https://access.redhat.com/security/cve/CVE-2022-0617[CVE-2022-0617] -* link:https://access.redhat.com/security/cve/CVE-2022-0854[CVE-2022-0854] -* link:https://access.redhat.com/security/cve/CVE-2022-0865[CVE-2022-0865] -* link:https://access.redhat.com/security/cve/CVE-2022-0891[CVE-2022-0891] -* link:https://access.redhat.com/security/cve/CVE-2022-0908[CVE-2022-0908] -* link:https://access.redhat.com/security/cve/CVE-2022-0909[CVE-2022-0909] -* link:https://access.redhat.com/security/cve/CVE-2022-0924[CVE-2022-0924] -* link:https://access.redhat.com/security/cve/CVE-2022-1016[CVE-2022-1016] -* link:https://access.redhat.com/security/cve/CVE-2022-1048[CVE-2022-1048] -* link:https://access.redhat.com/security/cve/CVE-2022-1055[CVE-2022-1055] -* link:https://access.redhat.com/security/cve/CVE-2022-1184[CVE-2022-1184] -* link:https://access.redhat.com/security/cve/CVE-2022-1292[CVE-2022-1292] -* link:https://access.redhat.com/security/cve/CVE-2022-1304[CVE-2022-1304] -* link:https://access.redhat.com/security/cve/CVE-2022-1355[CVE-2022-1355] -* link:https://access.redhat.com/security/cve/CVE-2022-1586[CVE-2022-1586] -* link:https://access.redhat.com/security/cve/CVE-2022-1785[CVE-2022-1785] -* link:https://access.redhat.com/security/cve/CVE-2022-1852[CVE-2022-1852] -* link:https://access.redhat.com/security/cve/CVE-2022-1897[CVE-2022-1897] -* link:https://access.redhat.com/security/cve/CVE-2022-1927[CVE-2022-1927] -* link:https://access.redhat.com/security/cve/CVE-2022-2068[CVE-2022-2068] -* link:https://access.redhat.com/security/cve/CVE-2022-2078[CVE-2022-2078] -* link:https://access.redhat.com/security/cve/CVE-2022-2097[CVE-2022-2097] -* link:https://access.redhat.com/security/cve/CVE-2022-2509[CVE-2022-2509] -* link:https://access.redhat.com/security/cve/CVE-2022-2586[CVE-2022-2586] -* link:https://access.redhat.com/security/cve/CVE-2022-2639[CVE-2022-2639] -* link:https://access.redhat.com/security/cve/CVE-2022-2938[CVE-2022-2938] -* link:https://access.redhat.com/security/cve/CVE-2022-3515[CVE-2022-3515] -* link:https://access.redhat.com/security/cve/CVE-2022-20368[CVE-2022-20368] -* link:https://access.redhat.com/security/cve/CVE-2022-21499[CVE-2022-21499] -* link:https://access.redhat.com/security/cve/CVE-2022-21618[CVE-2022-21618] -* link:https://access.redhat.com/security/cve/CVE-2022-21619[CVE-2022-21619] -* link:https://access.redhat.com/security/cve/CVE-2022-21624[CVE-2022-21624] -* link:https://access.redhat.com/security/cve/CVE-2022-21626[CVE-2022-21626] -* link:https://access.redhat.com/security/cve/CVE-2022-21628[CVE-2022-21628] -* link:https://access.redhat.com/security/cve/CVE-2022-22624[CVE-2022-22624] -* link:https://access.redhat.com/security/cve/CVE-2022-22628[CVE-2022-22628] -* link:https://access.redhat.com/security/cve/CVE-2022-22629[CVE-2022-22629] -* link:https://access.redhat.com/security/cve/CVE-2022-22662[CVE-2022-22662] -* link:https://access.redhat.com/security/cve/CVE-2022-22844[CVE-2022-22844] -* link:https://access.redhat.com/security/cve/CVE-2022-23960[CVE-2022-23960] -* link:https://access.redhat.com/security/cve/CVE-2022-24448[CVE-2022-24448] -* link:https://access.redhat.com/security/cve/CVE-2022-25255[CVE-2022-25255] -* link:https://access.redhat.com/security/cve/CVE-2022-26373[CVE-2022-26373] -* link:https://access.redhat.com/security/cve/CVE-2022-26700[CVE-2022-26700] -* link:https://access.redhat.com/security/cve/CVE-2022-26709[CVE-2022-26709] -* link:https://access.redhat.com/security/cve/CVE-2022-26710[CVE-2022-26710] -* link:https://access.redhat.com/security/cve/CVE-2022-26716[CVE-2022-26716] -* link:https://access.redhat.com/security/cve/CVE-2022-26717[CVE-2022-26717] -* link:https://access.redhat.com/security/cve/CVE-2022-26719[CVE-2022-26719] -* link:https://access.redhat.com/security/cve/CVE-2022-27404[CVE-2022-27404] -* link:https://access.redhat.com/security/cve/CVE-2022-27405[CVE-2022-27405] -* link:https://access.redhat.com/security/cve/CVE-2022-27406[CVE-2022-27406] -* link:https://access.redhat.com/security/cve/CVE-2022-27950[CVE-2022-27950] -* link:https://access.redhat.com/security/cve/CVE-2022-28390[CVE-2022-28390] -* link:https://access.redhat.com/security/cve/CVE-2022-28893[CVE-2022-28893] -* link:https://access.redhat.com/security/cve/CVE-2022-29581[CVE-2022-29581] -* link:https://access.redhat.com/security/cve/CVE-2022-30293[CVE-2022-30293] -* link:https://access.redhat.com/security/cve/CVE-2022-34903[CVE-2022-34903] -* link:https://access.redhat.com/security/cve/CVE-2022-36946[CVE-2022-36946] -* link:https://access.redhat.com/security/cve/CVE-2022-37434[CVE-2022-37434] -* link:https://access.redhat.com/security/cve/CVE-2022-39399[CVE-2022-39399] diff --git a/modules/cluster-logging-rn-5.5.6.adoc b/modules/cluster-logging-rn-5.5.6.adoc deleted file mode 100644 index 9e904588c6a9..000000000000 --- a/modules/cluster-logging-rn-5.5.6.adoc +++ /dev/null @@ -1,49 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-5-6_{context}"] -= Logging 5.5.6 -This release includes link:https://access.redhat.com/errata/RHBA-2023:0386[OpenShift Logging Bug Fix Release 5.5.6]. - -[id="openshift-logging-5-5-6-bug-fixes"] -== Bug fixes -* Before this update, the Pod Security admission controller added the label `podSecurityLabelSync = true` to the `openshift-logging` namespace. This resulted in our specified security labels being overwritten, and as a result Collector pods would not start. With this update, the label `podSecurityLabelSync = false` preserves security labels. Collector pods deploy as expected. (link:https://issues.redhat.com/browse/LOG-3340[LOG-3340]) - -* Before this update, the Operator installed the console view plugin, even when it was not enabled on the cluster. This caused the Operator to crash. With this update, if an account for a cluster does not have the console view enabled, the Operator functions normally and does not install the console view. (link:https://issues.redhat.com/browse/LOG-3407[LOG-3407]) - -* Before this update, a prior fix to support a regression where the status of the Elasticsearch deployment was not being updated caused the Operator to crash unless the `Red Hat Elasticsearch Operator` was deployed. With this update, that fix has been reverted so the Operator is now stable but re-introduces the previous issue related to the reported status. (link:https://issues.redhat.com/browse/LOG-3428[LOG-3428]) - -* Before this update, the Loki Operator only deployed one replica of the LokiStack gateway regardless of the chosen stack size. With this update, the number of replicas is correctly configured according to the selected size. (link:https://issues.redhat.com/browse/LOG-3478[LOG-3478]) - -* Before this update, records written to Elasticsearch would fail if multiple label keys had the same prefix and some keys included dots. With this update, underscores replace dots in label keys, resolving the issue. (link:https://issues.redhat.com/browse/LOG-3341[LOG-3341]) - -* Before this update, the logging view plugin contained an incompatible feature for certain versions of {product-title}. With this update, the correct release stream of the plugin resolves the issue. (link:https://issues.redhat.com/browse/LOG-3467[LOG-3467]) - -* Before this update, the reconciliation of the `ClusterLogForwarder` custom resource would incorrectly report a degraded status of one or more pipelines causing the collector pods to restart every 8-10 seconds. With this update, reconciliation of the `ClusterLogForwarder` custom resource processes correctly, resolving the issue. (link:https://issues.redhat.com/browse/LOG-3469[LOG-3469]) - -* Before this change the spec for the `outputDefaults` field of the ClusterLogForwarder custom resource would apply the settings to every declared Elasticsearch output type. This change corrects the behavior to match the enhancement specification where the setting specifically applies to the default managed Elasticsearch store. (link:https://issues.redhat.com/browse/LOG-3342[LOG-3342]) - -* Before this update, the OpenShift CLI (oc) `must-gather` script did not complete because the OpenShift CLI (oc) needs a folder with write permission to build its cache. With this update, the OpenShift CLI (oc) has write permissions to a folder, and the `must-gather` script completes successfully. (link:https://issues.redhat.com/browse/LOG-3472[LOG-3472]) - -* Before this update, the Loki Operator webhook server caused TLS errors. With this update, the Loki Operator webhook PKI is managed by the Operator Lifecycle Manager's dynamic webhook management resolving the issue. (link:https://issues.redhat.com/browse/LOG-3511[LOG-3511]) - -[id="openshift-logging-5-5-6-CVEs"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2021-46848[CVE-2021-46848] -* link:https://access.redhat.com/security/cve/CVE-2022-2056[CVE-2022-2056] -* link:https://access.redhat.com/security/cve/CVE-2022-2057[CVE-2022-2057] -* link:https://access.redhat.com/security/cve/CVE-2022-2058[CVE-2022-2058] -* link:https://access.redhat.com/security/cve/CVE-2022-2519[CVE-2022-2519] -* link:https://access.redhat.com/security/cve/CVE-2022-2520[CVE-2022-2520] -* link:https://access.redhat.com/security/cve/CVE-2022-2521[CVE-2022-2521] -* link:https://access.redhat.com/security/cve/CVE-2022-2867[CVE-2022-2867] -* link:https://access.redhat.com/security/cve/CVE-2022-2868[CVE-2022-2868] -* link:https://access.redhat.com/security/cve/CVE-2022-2869[CVE-2022-2869] -* link:https://access.redhat.com/security/cve/CVE-2022-2953[CVE-2022-2953] -* link:https://access.redhat.com/security/cve/CVE-2022-2964[CVE-2022-2964] -* link:https://access.redhat.com/security/cve/CVE-2022-4139[CVE-2022-4139] -* link:https://access.redhat.com/security/cve/CVE-2022-35737[CVE-2022-35737] -* link:https://access.redhat.com/security/cve/CVE-2022-42010[CVE-2022-42010] -* link:https://access.redhat.com/security/cve/CVE-2022-42011[CVE-2022-42011] -* link:https://access.redhat.com/security/cve/CVE-2022-42012[CVE-2022-42012] -* link:https://access.redhat.com/security/cve/CVE-2022-42898[CVE-2022-42898] -* link:https://access.redhat.com/security/cve/CVE-2022-43680[CVE-2022-43680] diff --git a/modules/cluster-logging-rn-5.5.7.adoc b/modules/cluster-logging-rn-5.5.7.adoc deleted file mode 100644 index c8e7b84eba7c..000000000000 --- a/modules/cluster-logging-rn-5.5.7.adoc +++ /dev/null @@ -1,22 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-5-7_{context}"] -= Logging 5.5.7 -This release includes link:https://access.redhat.com/errata/RHSA-2023:0633[OpenShift Logging Bug Fix Release 5.5.7]. - -[id="openshift-logging-5-5-7-bug-fixes"] -== Bug fixes -* Before this update, the LokiStack Gateway Labels Enforcer generated parsing errors for valid LogQL queries when using combined label filters with boolean expressions. With this update, the LokiStack LogQL implementation supports label filters with boolean expression and resolves the issue. (link:https://issues.redhat.com/browse/LOG-3534[LOG-3534]) - -* Before this update, the `ClusterLogForwarder` custom resource (CR) did not pass TLS credentials for syslog output to Fluentd, resulting in errors during forwarding. With this update, credentials pass correctly to Fluentd, resolving the issue. (link:https://issues.redhat.com/browse/LOG-3533[LOG-3533]) - -[id="openshift-logging-5-5-7-CVEs"] -== CVEs -link:https://access.redhat.com/security/cve/CVE-2021-46848[CVE-2021-46848] -link:https://access.redhat.com/security/cve/CVE-2022-3821[CVE-2022-3821] -link:https://access.redhat.com/security/cve/CVE-2022-35737[CVE-2022-35737] -link:https://access.redhat.com/security/cve/CVE-2022-42010[CVE-2022-42010] -link:https://access.redhat.com/security/cve/CVE-2022-42011[CVE-2022-42011] -link:https://access.redhat.com/security/cve/CVE-2022-42012[CVE-2022-42012] -link:https://access.redhat.com/security/cve/CVE-2022-42898[CVE-2022-42898] -link:https://access.redhat.com/security/cve/CVE-2022-43680[CVE-2022-43680] diff --git a/modules/cluster-logging-rn-5.5.8.adoc b/modules/cluster-logging-rn-5.5.8.adoc deleted file mode 100644 index b9062d4d0750..000000000000 --- a/modules/cluster-logging-rn-5.5.8.adoc +++ /dev/null @@ -1,23 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-5-8_{context}"] -= Logging 5.5.8 -This release includes link:https://access.redhat.com/errata/RHSA-2023:0930[OpenShift Logging Bug Fix Release 5.5.8]. - -[id="openshift-logging-5-5-8-bug-fixes"] -== Bug fixes -* Before this update, the `priority` field was missing from `systemd` logs due to an error in how the collector set `level` fields. With this update, these fields are set correctly, resolving the issue. (link:https://issues.redhat.com/browse/LOG-3630[LOG-3630]) - -[id="openshift-logging-5-5-8-CVEs"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2020-10735[CVE-2020-10735] -* link:https://access.redhat.com/security/cve/CVE-2021-28861[CVE-2021-28861] -* link:https://access.redhat.com/security/cve/CVE-2022-2873[CVE-2022-2873] -* link:https://access.redhat.com/security/cve/CVE-2022-4415[CVE-2022-4415] -* link:https://access.redhat.com/security/cve/CVE-2022-24999[CVE-2022-24999] -* link:https://access.redhat.com/security/cve/CVE-2022-40897[CVE-2022-40897] -* link:https://access.redhat.com/security/cve/CVE-2022-41222[CVE-2022-41222] -* link:https://access.redhat.com/security/cve/CVE-2022-41717[CVE-2022-41717] -* link:https://access.redhat.com/security/cve/CVE-2022-43945[CVE-2022-43945] -* link:https://access.redhat.com/security/cve/CVE-2022-45061[CVE-2022-45061] -* link:https://access.redhat.com/security/cve/CVE-2022-48303[CVE-2022-48303] diff --git a/modules/cluster-logging-rn-5.5.9.adoc b/modules/cluster-logging-rn-5.5.9.adoc deleted file mode 100644 index 61c30f80324a..000000000000 --- a/modules/cluster-logging-rn-5.5.9.adoc +++ /dev/null @@ -1,21 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-5-9_{context}"] -= Logging 5.5.9 -This release includes link:https://access.redhat.com/errata/RHSA-2023:1310[OpenShift Logging Bug Fix Release 5.5.9]. - -[id="openshift-logging-5-5-9-bug-fixes"] -== Bug fixes -* Before this update, a problem with the Fluentd collector caused it to not capture OAuth login events stored in `/var/log/auth-server/audit.log`. This led to incomplete collection of login events from the OAuth service. With this update, the Fluentd collector now resolves this issue by capturing all login events from the OAuth service, including those stored in `/var/log/auth-server/audit.log`, as expected.(link:https://issues.redhat.com/browse/LOG-3730[LOG-3730]) - -* Before this update, when structured parsing was enabled and messages were forwarded to multiple destinations, they were not deep copied. This resulted in some of the received logs including the structured message, while others did not. With this update, the configuration generation has been modified to deep copy messages before JSON parsing. As a result, all received logs now have structured messages included, even when they are forwarded to multiple destinations.(link:https://issues.redhat.com/browse/LOG-3767[LOG-3767]) - -[id="openshift-logging-5-5-9-CVEs"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2022-4304[CVE-2022-4304] -* link:https://access.redhat.com/security/cve/CVE-2022-4450[CVE-2022-4450] -* link:https://access.redhat.com/security/cve/CVE-2022-41717[CVE-2022-41717] -* link:https://access.redhat.com/security/cve/CVE-2023-0215[CVE-2023-0215] -* link:https://access.redhat.com/security/cve/CVE-2023-0286[CVE-2023-0286] -* link:https://access.redhat.com/security/cve/CVE-2023-0767[CVE-2023-0767] -* link:https://access.redhat.com/security/cve/CVE-2023-23916[CVE-2023-23916] diff --git a/modules/cluster-logging-rn-5.5.adoc b/modules/cluster-logging-rn-5.5.adoc deleted file mode 100644 index 47a81edfe948..000000000000 --- a/modules/cluster-logging-rn-5.5.adoc +++ /dev/null @@ -1,49 +0,0 @@ -// Module included in the following assemblies: -//cluster-logging-release-notes -[id="cluster-logging-release-notes-5-5-0"] -= Logging 5.5 -The following advisories are available for Logging 5.5:link:https://access.redhat.com/errata/RHSA-2022:6051[Release 5.5] - -[id="openshift-logging-5-5-0-enhancements"] -== Enhancements -* With this update, you can forward structured logs from different containers within the same pod to different indices. To use this feature, you must configure the pipeline with multi-container support and annotate the pods. (link:https://issues.redhat.com/browse/LOG-1296[LOG-1296]) - -[IMPORTANT] -==== -JSON formatting of logs varies by application. Because creating too many indices impacts performance, limit your use of this feature to creating indices for logs that have incompatible JSON formats. Use queries to separate logs from different namespaces, or applications with compatible JSON formats. -==== - -* With this update, you can filter logs with Elasticsearch outputs by using the Kubernetes common labels, `app.kubernetes.io/component`, `app.kubernetes.io/managed-by`, `app.kubernetes.io/part-of`, and `app.kubernetes.io/version`. Non-Elasticsearch output types can use all labels included in `kubernetes.labels`. (link:https://issues.redhat.com/browse/LOG-2388[LOG-2388]) - -* With this update, clusters with AWS Security Token Service (STS) enabled may use STS authentication to forward logs to Amazon CloudWatch. (link:https://issues.redhat.com/browse/LOG-1976[LOG-1976]) - -* With this update, the 'Loki Operator' Operator and Vector collector move from Technical Preview to General Availability. Full feature parity with prior releases are pending, and some APIs remain Technical Previews. See the *Logging with the LokiStack* section for details. - -[id="openshift-logging-5-5-0-bug-fixes"] -== Bug fixes -* Before this update, clusters configured to forward logs to Amazon CloudWatch wrote rejected log files to temporary storage, causing cluster instability over time. With this update, chunk backup for all storage options has been disabled, resolving the issue. (link:https://issues.redhat.com/browse/LOG-2746[LOG-2746]) - -* Before this update, the Operator was using versions of some APIs that are deprecated and planned for removal in future versions of {product-title}. This update moves dependencies to the supported API versions. (link:https://issues.redhat.com/browse/LOG-2656[LOG-2656]) - -Before this update, the Operator was using versions of some APIs that are deprecated and planned for removal in future versions of {product-title}. This update moves dependencies to the supported API versions. (link:https://issues.redhat.com/browse/LOG-2656[LOG-2656]) - -* Before this update, multiple `ClusterLogForwarder` pipelines configured for multiline error detection caused the collector to go into a `crashloopbackoff` error state. This update fixes the issue where multiple configuration sections had the same unique ID. (link:https://issues.redhat.com/browse/LOG-2241[LOG-2241]) - -* Before this update, the collector could not save non UTF-8 symbols to the Elasticsearch storage logs. With this update the collector encodes non UTF-8 symbols, resolving the issue. (link:https://issues.redhat.com/browse/LOG-2203[LOG-2203]) - -* Before this update, non-latin characters displayed incorrectly in Kibana. With this update, Kibana displays all valid UTF-8 symbols correctly. (link:https://issues.redhat.com/browse/LOG-2784[LOG-2784]) - -== CVEs -[id="openshift-logging-5-5-0-CVEs"] -* link:https://access.redhat.com/security/cve/CVE-2021-38561[CVE-2021-38561] -* link:https://access.redhat.com/security/cve/CVE-2022-1012[CVE-2022-1012] -* link:https://access.redhat.com/security/cve/CVE-2022-1292[CVE-2022-1292] -* link:https://access.redhat.com/security/cve/CVE-2022-1586[CVE-2022-1586] -* link:https://access.redhat.com/security/cve/CVE-2022-1785[CVE-2022-1785] -* link:https://access.redhat.com/security/cve/CVE-2022-1897[CVE-2022-1897] -* link:https://access.redhat.com/security/cve/CVE-2022-1927[CVE-2022-1927] -* link:https://access.redhat.com/security/cve/CVE-2022-2068[CVE-2022-2068] -* link:https://access.redhat.com/security/cve/CVE-2022-2097[CVE-2022-2097] -* link:https://access.redhat.com/security/cve/CVE-2022-21698[CVE-2022-21698] -* link:https://access.redhat.com/security/cve/CVE-2022-30631[CVE-2022-30631] -* link:https://access.redhat.com/security/cve/CVE-2022-32250[CVE-2022-32250] diff --git a/modules/cluster-logging-rn-5.6.1.adoc b/modules/cluster-logging-rn-5.6.1.adoc deleted file mode 100644 index 40a28ff3b4f3..000000000000 --- a/modules/cluster-logging-rn-5.6.1.adoc +++ /dev/null @@ -1,35 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-6-1_{context}"] -= Logging 5.6.1 -This release includes link:https://access.redhat.com/errata/RHSA-2023:0634[OpenShift Logging Bug Fix Release 5.6.1]. - -[id="openshift-logging-5-6-1-bug-fixes"] -== Bug fixes -* Before this update, the compactor would report TLS certificate errors from communications with the querier when retention was active. With this update, the compactor and querier no longer communicate erroneously over HTTP. (link:https://issues.redhat.com/browse/LOG-3494[LOG-3494]) - -* Before this update, the Loki Operator would not retry setting the status of the `LokiStack` CR, which caused stale status information. With this update, the Operator retries status information updates on conflict. (link:https://issues.redhat.com/browse/LOG-3496[LOG-3496]) - -* Before this update, the Loki Operator Webhook server caused TLS errors when the `kube-apiserver-operator` Operator checked the webhook validity. With this update, the Loki Operator Webhook PKI is managed by the Operator Lifecycle Manager (OLM), resolving the issue. (link:https://issues.redhat.com/browse/LOG-3510[LOG-3510]) - -* Before this update, the LokiStack Gateway Labels Enforcer generated parsing errors for valid LogQL queries when using combined label filters with boolean expressions. With this update, the LokiStack LogQL implementation supports label filters with boolean expression and resolves the issue. (link:https://issues.redhat.com/browse/LOG-3441[LOG-3441]), (link:https://issues.redhat.com/browse/LOG-3397[LOG-3397]) - -* Before this update, records written to Elasticsearch would fail if multiple label keys had the same prefix and some keys included dots. With this update, underscores replace dots in label keys, resolving the issue. (link:https://issues.redhat.com/browse/LOG-3463[LOG-3463]) - -* Before this update, the `Red Hat OpenShift Logging` Operator was not available for {product-title} 4.10 clusters because of an incompatibility between {product-title} console and the logging-view-plugin. With this update, the plugin is properly integrated with the {product-title} 4.10 admin console. (link:https://issues.redhat.com/browse/LOG-3447[LOG-3447]) - -* Before this update the reconciliation of the `ClusterLogForwarder` custom resource would incorrectly report a degraded status of pipelines that reference the default logstore. With this update, the pipeline validates properly.(link:https://issues.redhat.com/browse/LOG-3477[LOG-3477]) - - -[id="openshift-logging-5-6-1-CVEs"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2021-46848[CVE-2021-46848] -* link:https://access.redhat.com/security/cve/CVE-2022-3821[CVE-2022-3821] -* link:https://access.redhat.com/security/cve/CVE-2022-35737[CVE-2022-35737] -* link:https://access.redhat.com/security/cve/CVE-2022-42010[CVE-2022-42010] -* link:https://access.redhat.com/security/cve/CVE-2022-42011[CVE-2022-42011] -* link:https://access.redhat.com/security/cve/CVE-2022-42012[CVE-2022-42012] -* link:https://access.redhat.com/security/cve/CVE-2022-42898[CVE-2022-42898] -* link:https://access.redhat.com/security/cve/CVE-2022-43680[CVE-2022-43680] -* link:https://access.redhat.com/security/cve/CVE-2021-35065[CVE-2021-35065] -* link:https://access.redhat.com/security/cve/CVE-2022-46175[CVE-2022-46175] diff --git a/modules/cluster-logging-rn-5.6.2.adoc b/modules/cluster-logging-rn-5.6.2.adoc deleted file mode 100644 index 541faf0a0b82..000000000000 --- a/modules/cluster-logging-rn-5.6.2.adoc +++ /dev/null @@ -1,29 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-6-2_{context}"] -= Logging 5.6.2 -This release includes link:https://access.redhat.com/errata/RHBA-2023:0793[OpenShift Logging Bug Fix Release 5.6.2]. - -[id="openshift-logging-5-6-2-bug-fixes"] -== Bug fixes -* Before this update, the collector did not set `level` fields correctly based on priority for systemd logs. With this update, `level` fields are set correctly. (link:https://issues.redhat.com/browse/LOG-3429[LOG-3429]) - -* Before this update, the Operator incorrectly generated incompatibility warnings on {product-title} 4.12 or later. With this update, the Operator max {product-title} version value has been corrected, resolving the issue. (link:https://issues.redhat.com/browse/LOG-3584[LOG-3584]) - -* Before this update, creating a `ClusterLogForwarder` custom resource (CR) with an output value of `default` did not generate any errors. With this update, an error warning that this value is invalid generates appropriately. (link:https://issues.redhat.com/browse/LOG-3437[LOG-3437]) - -* Before this update, when the `ClusterLogForwarder` custom resource (CR) had multiple pipelines configured with one output set as `default`, the collector pods restarted. With this update, the logic for output validation has been corrected, resolving the issue. (link:https://issues.redhat.com/browse/LOG-3559[LOG-3559]) - -* Before this update, collector pods restarted after being created. With this update, the deployed collector does not restart on its own. (link:https://issues.redhat.com/browse/LOG-3608[LOG-3608]) - -* Before this update, patch releases removed previous versions of the Operators from the catalog. This made installing the old versions impossible. This update changes bundle configurations so that previous releases of the same minor version stay in the catalog. (link:https://issues.redhat.com/browse/LOG-3635[LOG-3635]) - -[id="openshift-logging-5-6-2-CVEs"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2022-23521[CVE-2022-23521] -* link:https://access.redhat.com/security/cve/CVE-2022-40303[CVE-2022-40303] -* link:https://access.redhat.com/security/cve/CVE-2022-40304[CVE-2022-40304] -* link:https://access.redhat.com/security/cve/CVE-2022-41903[CVE-2022-41903] -* link:https://access.redhat.com/security/cve/CVE-2022-47629[CVE-2022-47629] -* link:https://access.redhat.com/security/cve/CVE-2023-21835[CVE-2023-21835] -* link:https://access.redhat.com/security/cve/CVE-2023-21843[CVE-2023-21843] diff --git a/modules/cluster-logging-rn-5.6.3.adoc b/modules/cluster-logging-rn-5.6.3.adoc deleted file mode 100644 index 226c1059e7b7..000000000000 --- a/modules/cluster-logging-rn-5.6.3.adoc +++ /dev/null @@ -1,23 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-6-3_{context}"] -= Logging 5.6.3 -This release includes link:https://access.redhat.com/errata/RHSA-2023:0932[OpenShift Logging Bug Fix Release 5.6.3]. - -[id="openshift-logging-5-6-3-bug-fixes"] -== Bug fixes -* Before this update, the operator stored gateway tenant secret information in a config map. With this update, the operator stores this information in a secret. (link:https://issues.redhat.com/browse/LOG-3717[LOG-3717]) - -* Before this update, the Fluentd collector did not capture OAuth login events stored in `/var/log/auth-server/audit.log`. With this update, Fluentd captures these OAuth login events, resolving the issue. (link:https://issues.redhat.com/browse/LOG-3729[LOG-3729]) - -[id="openshift-logging-5-6-3-CVEs"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2020-10735[CVE-2020-10735] -* link:https://access.redhat.com/security/cve/CVE-2021-28861[CVE-2021-28861] -* link:https://access.redhat.com/security/cve/CVE-2022-2873[CVE-2022-2873] -* link:https://access.redhat.com/security/cve/CVE-2022-4415[CVE-2022-4415] -* link:https://access.redhat.com/security/cve/CVE-2022-40897[CVE-2022-40897] -* link:https://access.redhat.com/security/cve/CVE-2022-41222[CVE-2022-41222] -* link:https://access.redhat.com/security/cve/CVE-2022-43945[CVE-2022-43945] -* link:https://access.redhat.com/security/cve/CVE-2022-45061[CVE-2022-45061] -* link:https://access.redhat.com/security/cve/CVE-2022-48303[CVE-2022-48303] diff --git a/modules/cluster-logging-rn-5.6.4.adoc b/modules/cluster-logging-rn-5.6.4.adoc deleted file mode 100644 index 9e2cf339fa63..000000000000 --- a/modules/cluster-logging-rn-5.6.4.adoc +++ /dev/null @@ -1,34 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-6-4_{context}"] -= Logging 5.6.4 -This release includes link:https://access.redhat.com/errata/RHBA-2023:1311[OpenShift Logging Bug Fix Release 5.6.4]. - -[id="openshift-logging-5-6-4-bug-fixes"] -== Bug fixes -* Before this update, when LokiStack was deployed as the log store, the logs generated by Loki pods were collected and sent to LokiStack. With this update, the logs generated by Loki are excluded from collection and will not be stored. (link:https://issues.redhat.com/browse/LOG-3280[LOG-3280]) - -* Before this update, when the query editor on the Logs page of the OpenShift Web Console was empty, the drop-down menus did not populate. With this update, if an empty query is attempted, an error message is displayed and the drop-down menus now populate as expected. (link:https://issues.redhat.com/browse/LOG-3454[LOG-3454]) - -* Before this update, when the `tls.insecureSkipVerify` option was set to `true`, the Cluster Logging Operator would generate incorrect configuration. As a result, the operator would fail to send data to Elasticsearch when attempting to skip certificate validation. With this update, the Cluster Logging Operator generates the correct TLS configuration even when `tls.insecureSkipVerify` is enabled. As a result, data can be sent successfully to Elasticsearch even when attempting to skip certificate validation. (link:https://issues.redhat.com/browse/LOG-3475[LOG-3475]) - -* Before this update, when structured parsing was enabled and messages were forwarded to multiple destinations, they were not deep copied. This resulted in some of the received logs including the structured message, while others did not. With this update, the configuration generation has been modified to deep copy messages before JSON parsing. As a result, all received messages now have structured messages included, even when they are forwarded to multiple destinations. (link:https://issues.redhat.com/browse/LOG-3640[LOG-3640]) - -* Before this update, if the `collection` field contained `{}` it could result in the Operator crashing. With this update, the Operator will ignore this value, allowing the operator to continue running smoothly without interruption. (link:https://issues.redhat.com/browse/LOG-3733[LOG-3733]) - -* Before this update, the `nodeSelector` attribute for the Gateway component of LokiStack did not have any effect. With this update, the `nodeSelector` attribute functions as expected. (link:https://issues.redhat.com/browse/LOG-3783[LOG-3783]) - -* Before this update, the static LokiStack memberlist configuration relied solely on private IP networks. As a result, when the {product-title} cluster pod network was configured with a public IP range, the LokiStack pods would crashloop. With this update, the LokiStack administrator now has the option to use the pod network for the memberlist configuration. This resolves the issue and prevents the LokiStack pods from entering a crashloop state when the {product-title} cluster pod network is configured with a public IP range. (link:https://issues.redhat.com/browse/LOG-3814[LOG-3814]) - -* Before this update, if the `tls.insecureSkipVerify` field was set to `true`, the Cluster Logging Operator would generate an incorrect configuration. As a result, the Operator would fail to send data to Elasticsearch when attempting to skip certificate validation. With this update, the Operator generates the correct TLS configuration even when `tls.insecureSkipVerify` is enabled. As a result, data can be sent successfully to Elasticsearch even when attempting to skip certificate validation. (link:https://issues.redhat.com/browse/LOG-3838[LOG-3838]) - -* Before this update, if the Cluster Logging Operator (CLO) was installed without the Elasticsearch Operator, the CLO pod would continuously display an error message related to the deletion of Elasticsearch. With this update, the CLO now performs additional checks before displaying any error messages. As a result, error messages related to Elasticsearch deletion are no longer displayed in the absence of the Elasticsearch Operator.(link:https://issues.redhat.com/browse/LOG-3763[LOG-3763]) - -[id="openshift-logging-5-6-4-CVEs"] -== CVEs -* https://access.redhat.com/security/cve/CVE-2022-4304[CVE-2022-4304] -* https://access.redhat.com/security/cve/CVE-2022-4450[CVE-2022-4450] -* https://access.redhat.com/security/cve/CVE-2023-0215[CVE-2023-0215] -* https://access.redhat.com/security/cve/CVE-2023-0286[CVE-2023-0286] -* https://access.redhat.com/security/cve/CVE-2023-0767[CVE-2023-0767] -* https://access.redhat.com/security/cve/CVE-2023-23916[CVE-2023-23916] diff --git a/modules/cluster-logging-rn-5.6.5.adoc b/modules/cluster-logging-rn-5.6.5.adoc deleted file mode 100644 index e8c52a5ed062..000000000000 --- a/modules/cluster-logging-rn-5.6.5.adoc +++ /dev/null @@ -1,27 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:content-type: REFERENCE -[id="cluster-logging-release-notes-5-6-5{context}"] -= Logging 5.6.5 -This release includes link:https://access.redhat.com/errata/RHSA-2023:1953[OpenShift Logging Bug Fix Release 5.6.5]. - -[id="openshift-logging-5-6-5-bug-fixes"] -== Bug fixes -* Before this update, the template definitions prevented Elasticsearch from indexing some labels and namespace_labels, causing issues with data ingestion. With this update, the fix replaces dots and slashes in labels to ensure proper ingestion, effectively resolving the issue. (link:https://issues.redhat.com/browse/LOG-3419[LOG-3419]) - -* Before this update, if the Logs page of the OpenShift Web Console failed to connect to the LokiStack, a generic error message was displayed, providing no additional context or troubleshooting suggestions. With this update, the error message has been enhanced to include more specific details and recommendations for troubleshooting. (link:https://issues.redhat.com/browse/LOG-3750[LOG-3750]) - -* Before this update, time range formats were not validated, leading to errors selecting a custom date range. With this update, time formats are now validated, enabling users to select a valid range. If an invalid time range format is selected, an error message is displayed to the user. (link:https://issues.redhat.com/browse/LOG-3583[LOG-3583]) - -* Before this update, when searching logs in Loki, even if the length of an expression did not exceed 5120 characters, the query would fail in many cases. With this update, query authorization label matchers have been optimized, resolving the issue. (link:https://issues.redhat.com/browse/LOG-3480[LOG-3480]) - -* Before this update, the Loki Operator failed to produce a memberlist configuration that was sufficient for locating all the components when using a memberlist for private IPs. With this update, the fix ensures that the generated configuration includes the advertised port, allowing for successful lookup of all components. (link:https://issues.redhat.com/browse/LOG-4008[LOG-4008]) - -[id="openshift-logging-5-6-5-CVEs"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2022-4269[CVE-2022-4269] -* link:https://access.redhat.com/security/cve/CVE-2022-4378[CVE-2022-4378] -* link:https://access.redhat.com/security/cve/CVE-2023-0266[CVE-2023-0266] -* link:https://access.redhat.com/security/cve/CVE-2023-0361[CVE-2023-0361] -* link:https://access.redhat.com/security/cve/CVE-2023-0386[CVE-2023-0386] -* link:https://access.redhat.com/security/cve/CVE-2023-27539[CVE-2023-27539] -* link:https://access.redhat.com/security/cve/CVE-2023-28120[CVE-2023-28120] diff --git a/modules/cluster-logging-rn-5.6.adoc b/modules/cluster-logging-rn-5.6.adoc deleted file mode 100644 index 423d87804a18..000000000000 --- a/modules/cluster-logging-rn-5.6.adoc +++ /dev/null @@ -1,85 +0,0 @@ -//included in cluster-logging-release-notes.adoc -:_content-type: ASSEMBLY -[id="cluster-logging-release-notes-5-6_{context}"] -= Logging 5.6 -This release includes link:https://access.redhat.com/errata/RHSA-2023:0264[OpenShift Logging Release 5.6]. - -[id="openshift-logging-5-6-dep-notice_{context}"] -== Deprecation notice -In Logging 5.6, Fluentd is deprecated and is planned to be removed in a future release. Red Hat will provide bug fixes and support for this feature during the current release lifecycle, but this feature will no longer receive enhancements and will be removed. As an alternative to fluentd, you can use Vector instead. - -[id="openshift-logging-5-6-enhancements_{context}"] -== Enhancements -* With this update, Logging is compliant with {product-title} cluster-wide cryptographic policies. - (link:https://issues.redhat.com/browse/LOG-895[LOG-895]) - -* With this update, you can declare per-tenant, per-stream, and global policies retention policies through the LokiStack custom resource, ordered by priority. (link:https://issues.redhat.com/browse/LOG-2695[LOG-2695]) - -* With this update, Splunk is an available output option for log forwarding. (link:https://issues.redhat.com/browse/LOG-2913[LOG-2913]) - -* With this update, Vector replaces Fluentd as the default Collector. (link:https://issues.redhat.com/browse/LOG-2222[LOG-2222]) - -* With this update, the *Developer* role can access the per-project workload logs they are assigned to within the Log Console Plugin on clusters running {product-title} 4.11 and higher. (link:https://issues.redhat.com/browse/LOG-3388[LOG-3388]) - -* With this update, logs from any source contain a field `openshift.cluster_id`, the unique identifier of the cluster in which the Operator is deployed. You can view the `clusterID` value with the command below. (link:https://issues.redhat.com/browse/LOG-2715[LOG-2715]) - -include::snippets/logging-get-clusterid-snip.adoc[lines=9..12] - -[id="openshift-logging-5-6-known-issues_{context}"] -== Known Issues -* Before this update, Elasticsearch would reject logs if multiple label keys had the same prefix and some keys included the `.` character. This fixes the limitation of Elasticsearch by replacing `.` in the label keys with `_`. As a workaround for this issue, remove the labels that cause errors, or add a namespace to the label. (link:https://issues.redhat.com/browse/LOG-3463[LOG-3463]) - -[id="openshift-logging-5-6-bug-fixes_{context}"] -== Bug fixes -* Before this update, if you deleted the Kibana Custom Resource, the {product-title} web console continued displaying a link to Kibana. With this update, removing the Kibana Custom Resource also removes that link. (link:https://issues.redhat.com/browse/LOG-2993[LOG-2993]) - -* Before this update, a user was not able to view the application logs of namespaces they have access to. With this update, the Loki Operator automatically creates a cluster role and cluster role binding allowing users to read application logs. (link:https://issues.redhat.com/browse/LOG-3072[LOG-3072]) - -* Before this update, the Operator removed any custom outputs defined in the `ClusterLogForwarder` custom resource when using LokiStack as the default log storage. With this update, the Operator merges custom outputs with the default outputs when processing the `ClusterLogForwarder` custom resource. (link:https://issues.redhat.com/browse/LOG-3090[LOG-3090]) - -* Before this update, the CA key was used as the volume name for mounting the CA into Loki, causing error states when the CA Key included non-conforming characters, such as dots. With this update, the volume name is standardized to an internal string which resolves the issue. (link:https://issues.redhat.com/browse/LOG-3331[LOG-3331]) - -* Before this update, a default value set within the LokiStack Custom Resource Definition, caused an inability to create a LokiStack instance without a `ReplicationFactor` of `1`. With this update, the operator sets the actual value for the size used. (link:https://issues.redhat.com/browse/LOG-3296[LOG-3296]) - -* Before this update, Vector parsed the message field when JSON parsing was enabled without also defining `structuredTypeKey` or `structuredTypeName` values. With this update, a value is required for either `structuredTypeKey` or `structuredTypeName` when writing structured logs to Elasticsearch. (link:https://issues.redhat.com/browse/LOG-3195[LOG-3195]) - -* Before this update, the secret creation component of the Elasticsearch Operator modified internal secrets constantly. With this update, the existing secret is properly handled. (link:https://issues.redhat.com/browse/LOG-3161[LOG-3161]) - -* Before this update, the Operator could enter a loop of removing and recreating the collector daemonset while the Elasticsearch or Kibana deployments changed their status. With this update, a fix in the status handling of the Operator resolves the issue. (link:https://issues.redhat.com/browse/LOG-3157[LOG-3157]) - -* Before this update, Kibana had a fixed `24h` OAuth cookie expiration time, which resulted in 401 errors in Kibana whenever the `accessTokenInactivityTimeout` field was set to a value lower than `24h`. With this update, Kibana's OAuth cookie expiration time synchronizes to the `accessTokenInactivityTimeout`, with a default value of `24h`. (link:https://issues.redhat.com/browse/LOG-3129[LOG-3129]) - -* Before this update, the Operators general pattern for reconciling resources was to try and create before attempting to get or update which would lead to constant HTTP 409 responses after creation. With this update, Operators first attempt to retrieve an object and only create or update it if it is either missing or not as specified. (link:https://issues.redhat.com/browse/LOG-2919[LOG-2919]) - -* Before this update, the `.level` and`.structure.level` fields in Fluentd could contain different values. With this update, the values are the same for each field. (link:https://issues.redhat.com/browse/LOG-2819[LOG-2819]) - -* Before this update, the Operator did not wait for the population of the trusted CA bundle and deployed the collector a second time once the bundle updated. With this update, the Operator waits briefly to see if the bundle has been populated before it continues the collector deployment. (link:https://issues.redhat.com/browse/LOG-2789[LOG-2789]) - -* Before this update, logging telemetry info appeared twice when reviewing metrics. With this update, logging telemetry info displays as expected. (link:https://issues.redhat.com/browse/LOG-2315[LOG-2315]) - -* Before this update, Fluentd pod logs contained a warning message after enabling the JSON parsing addition. With this update, that warning message does not appear. (link:https://issues.redhat.com/browse/LOG-1806[LOG-1806]) - -* Before this update, the `must-gather` script did not complete because `oc` needs a folder with write permission to build its cache. With this update, `oc` has write permissions to a folder, and the `must-gather` script completes successfully. (link:https://issues.redhat.com/browse/LOG-3446[LOG-3446]) - -* Before this update the log collector SCC could be superseded by other SCCs on the cluster, rendering the collector unusable. This update sets the priority of the log collector SCC so that it takes precedence over the others. (link:https://issues.redhat.com/browse/LOG-3235[LOG-3235]) - -* Before this update, Vector was missing the field `sequence`, which was added to fluentd as a way to deal with a lack of actual nanoseconds precision. With this update, the field `openshift.sequence` has been added to the event logs. (link:https://issues.redhat.com/browse/LOG-3106[LOG-3106]) - -[id="openshift-logging-5-6-cves_{context}"] -== CVEs -* https://access.redhat.com/security/cve/CVE-2020-36518[CVE-2020-36518] -* https://access.redhat.com/security/cve/CVE-2021-46848[CVE-2021-46848] -* https://access.redhat.com/security/cve/CVE-2022-2879[CVE-2022-2879] -* https://access.redhat.com/security/cve/CVE-2022-2880[CVE-2022-2880] -* https://access.redhat.com/security/cve/CVE-2022-27664[CVE-2022-27664] -* https://access.redhat.com/security/cve/CVE-2022-32190[CVE-2022-32190] -* https://access.redhat.com/security/cve/CVE-2022-35737[CVE-2022-35737] -* https://access.redhat.com/security/cve/CVE-2022-37601[CVE-2022-37601] -* https://access.redhat.com/security/cve/CVE-2022-41715[CVE-2022-41715] -* https://access.redhat.com/security/cve/CVE-2022-42003[CVE-2022-42003] -* https://access.redhat.com/security/cve/CVE-2022-42004[CVE-2022-42004] -* https://access.redhat.com/security/cve/CVE-2022-42010[CVE-2022-42010] -* https://access.redhat.com/security/cve/CVE-2022-42011[CVE-2022-42011] -* https://access.redhat.com/security/cve/CVE-2022-42012[CVE-2022-42012] -* https://access.redhat.com/security/cve/CVE-2022-42898[CVE-2022-42898] -* https://access.redhat.com/security/cve/CVE-2022-43680[CVE-2022-43680] diff --git a/modules/cluster-logging-rn-5.7.0.adoc b/modules/cluster-logging-rn-5.7.0.adoc deleted file mode 100644 index b8dfbfda3dc4..000000000000 --- a/modules/cluster-logging-rn-5.7.0.adoc +++ /dev/null @@ -1,24 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:content-type: REFERENCE -[id="cluster-logging-release-notes-5-7-0{context}"] -= Logging 5.7.0 -This release includes link:https://access.redhat.com/errata/RHBA-2023:2133[OpenShift Logging Bug Fix Release 5.7.0]. - -[id="openshift-logging-5-7-enhancements"] -== Enhancements -With this update, you can enable logging to detect multi-line exceptions and reassemble them into a single log entry. - -To enable logging to detect multi-line exceptions and reassemble them into a single log entry, ensure that the `ClusterLogForwarder` Custom Resource (CR) contains a `detectMultilineErrors` field, with a value of `true`. - -[id="openshift-logging-5-7-known-issues"] -== Known Issues -None. - -[id="openshift-logging-5-7-0-bug-fixes"] -== Bug fixes -* Before this update, the `nodeSelector` attribute for the Gateway component of the LokiStack did not impact node scheduling. With this update, the `nodeSelector` attribute works as expected. (link:https://issues.redhat.com/browse/LOG-3713[LOG-3713]) - -[id="openshift-logging-5-7-0-CVEs"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2023-1999[CVE-2023-1999] -* link:https://access.redhat.com/security/cve/CVE-2023-28617[CVE-2023-28617] diff --git a/modules/cluster-logging-systemd-scaling.adoc b/modules/cluster-logging-systemd-scaling.adoc deleted file mode 100644 index aeb747e0d51d..000000000000 --- a/modules/cluster-logging-systemd-scaling.adoc +++ /dev/null @@ -1,127 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/config/cluster-logging-systemd - -:_content-type: PROCEDURE -[id="cluster-logging-systemd-scaling_{context}"] -= Configuring systemd-journald for OpenShift Logging - -As you scale up your project, the default logging environment might need some -adjustments. - -For example, if you are missing logs, you might have to increase the rate limits for journald. -You can adjust the number of messages to retain for a specified period of time to ensure that -OpenShift Logging does not use excessive resources without dropping logs. - -You can also determine if you want the logs compressed, how long to retain logs, how or if the logs are stored, -and other settings. - -.Procedure - -. Create a Butane config file, `40-worker-custom-journald.bu`, that includes an `/etc/systemd/journald.conf` file with the required settings. -+ -[NOTE] -==== -See "Creating machine configs with Butane" for information about Butane. -==== -+ -[source,yaml] ----- -variant: openshift -version: 4.13.0 -metadata: - name: 40-worker-custom-journald - labels: - machineconfiguration.openshift.io/role: "worker" -storage: - files: - - path: /etc/systemd/journald.conf - mode: 0644 <1> - overwrite: true - contents: - inline: | - Compress=yes <2> - ForwardToConsole=no <3> - ForwardToSyslog=no - MaxRetentionSec=1month <4> - RateLimitBurst=10000 <5> - RateLimitIntervalSec=30s - Storage=persistent <6> - SyncIntervalSec=1s <7> - SystemMaxUse=8G <8> - SystemKeepFree=20% <9> - SystemMaxFileSize=10M <10> ----- -+ -<1> Set the permissions for the `journal.conf` file. It is recommended to set `0644` permissions. -<2> Specify whether you want logs compressed before they are written to the file system. -Specify `yes` to compress the message or `no` to not compress. The default is `yes`. -<3> Configure whether to forward log messages. Defaults to `no` for each. Specify: -* `ForwardToConsole` to forward logs to the system console. -* `ForwardToKsmg` to forward logs to the kernel log buffer. -* `ForwardToSyslog` to forward to a syslog daemon. -* `ForwardToWall` to forward messages as wall messages to all logged-in users. -<4> Specify the maximum time to store journal entries. Enter a number to specify seconds. Or -include a unit: "year", "month", "week", "day", "h" or "m". Enter `0` to disable. The default is `1month`. -<5> Configure rate limiting. If more logs are received than what is specified in `RateLimitBurst` during the time interval defined by `RateLimitIntervalSec`, all further messages within the interval are dropped until the interval is over. It is recommended to set `RateLimitIntervalSec=30s` and `RateLimitBurst=10000`, which are the defaults. -<6> Specify how logs are stored. The default is `persistent`: -* `volatile` to store logs in memory in `/var/log/journal/`. -* `persistent` to store logs to disk in `/var/log/journal/`. systemd creates the directory if it does not exist. -* `auto` to store logs in `/var/log/journal/` if the directory exists. If it does not exist, systemd temporarily stores logs in `/run/systemd/journal`. -* `none` to not store logs. systemd drops all logs. -<7> Specify the timeout before synchronizing journal files to disk for *ERR*, *WARNING*, *NOTICE*, *INFO*, and *DEBUG* logs. -systemd immediately syncs after receiving a *CRIT*, *ALERT*, or *EMERG* log. The default is `1s`. -<8> Specify the maximum size the journal can use. The default is `8G`. -<9> Specify how much disk space systemd must leave free. The default is `20%`. -<10> Specify the maximum size for individual journal files stored persistently in `/var/log/journal`. The default is `10M`. -+ -[NOTE] -==== -If you are removing the rate limit, you might see increased CPU utilization on the -system logging daemons as it processes any messages that would have previously -been throttled. -==== -+ -For more information on systemd settings, see link:https://www.freedesktop.org/software/systemd/man/journald.conf.html[https://www.freedesktop.org/software/systemd/man/journald.conf.html]. The default settings listed on that page might not apply to {product-title}. -+ -// Defaults from https://github.com/openshift/openshift-ansible/pull/3753/files#diff-40b7a7231e77d95ca6009dc9bcc0f470R33-R34 - -. Use Butane to generate a `MachineConfig` object file, `40-worker-custom-journald.yaml`, containing the configuration to be delivered to the nodes: -+ -[source,terminal] ----- -$ butane 40-worker-custom-journald.bu -o 40-worker-custom-journald.yaml ----- - -. Apply the machine config. For example: -+ -[source,terminal] ----- -$ oc apply -f 40-worker-custom-journald.yaml ----- -+ -The controller detects the new `MachineConfig` object and generates a new `rendered-worker-` version. - -. Monitor the status of the rollout of the new rendered configuration to each node: -+ -[source,terminal] ----- -$ oc describe machineconfigpool/worker ----- -+ -.Example output -[source,terminal] ----- -Name: worker -Namespace: -Labels: machineconfiguration.openshift.io/mco-built-in= -Annotations: -API Version: machineconfiguration.openshift.io/v1 -Kind: MachineConfigPool - -... - -Conditions: - Message: - Reason: All nodes are updating to rendered-worker-913514517bcea7c93bd446f4830bc64e ----- diff --git a/modules/cluster-logging-troubleshoot-logging.adoc b/modules/cluster-logging-troubleshoot-logging.adoc deleted file mode 100644 index da30383f4ebc..000000000000 --- a/modules/cluster-logging-troubleshoot-logging.adoc +++ /dev/null @@ -1,15 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging.adoc - -:_content-type: CONCEPT -[id="cluster-logging-troubleshoot-logging-about_{context}"] -= About troubleshooting {product-title} Logging - -You can troubleshoot the logging issues by performing the following tasks: - -* Viewing logging status -* Viewing the status of the log store -* Understanding logging alerts -* Collecting logging data for Red Hat Support -* Troubleshooting for critical alerts diff --git a/modules/cluster-logging-troubleshooting-log-forwarding.adoc b/modules/cluster-logging-troubleshooting-log-forwarding.adoc deleted file mode 100644 index 943b5cf35438..000000000000 --- a/modules/cluster-logging-troubleshooting-log-forwarding.adoc +++ /dev/null @@ -1,19 +0,0 @@ - -:_content-type: PROCEDURE -[id="cluster-logging-troubleshooting-log-forwarding_{context}"] -= Troubleshooting log forwarding - -When you create a `ClusterLogForwarder` custom resource (CR), if the Red Hat OpenShift Logging Operator does not redeploy the Fluentd pods automatically, you can delete the Fluentd pods to force them to redeploy. - -.Prerequisites - -* You have created a `ClusterLogForwarder` custom resource (CR) object. - -.Procedure - -* Delete the Fluentd pods to force them to redeploy. -+ -[source,terminal] ----- -$ oc delete pod --selector logging-infra=collector ----- diff --git a/modules/cluster-logging-troubleshooting-loki-entry-out-of-order-errors.adoc b/modules/cluster-logging-troubleshooting-loki-entry-out-of-order-errors.adoc deleted file mode 100644 index acfe2f9376ab..000000000000 --- a/modules/cluster-logging-troubleshooting-loki-entry-out-of-order-errors.adoc +++ /dev/null @@ -1,137 +0,0 @@ -:_module-type: PROCEDURE - -:_content-type: PROCEDURE -[id="cluser-logging-troubleshooting-loki-entry-out-of-order-messages_{context}"] -= Troubleshooting Loki "entry out of order" errors - -If your Fluentd forwards a large block of messages to a Loki logging system that exceeds the rate limit, Loki to generates "entry out of order" errors. To fix this issue, you update some values in the Loki server configuration file, `loki.yaml`. - -[NOTE] -==== -`loki.yaml` is not available on Grafana-hosted Loki. This topic does not apply to Grafana-hosted Loki servers. -==== - -.Conditions - -* The `ClusterLogForwarder` custom resource is configured to forward logs to Loki. - -* Your system sends a block of messages that is larger than 2 MB to Loki, such as: -+ ----- -"values":[["1630410392689800468","{\"kind\":\"Event\",\"apiVersion\":\ -....... -...... -...... -...... -\"received_at\":\"2021-08-31T11:46:32.800278+00:00\",\"version\":\"1.7.4 1.6.0\"}},\"@timestamp\":\"2021-08-31T11:46:32.799692+00:00\",\"viaq_index_name\":\"audit-write\",\"viaq_msg_id\":\"MzFjYjJkZjItNjY0MC00YWU4LWIwMTEtNGNmM2E5ZmViMGU4\",\"log_type\":\"audit\"}"]]}]} ----- - -* When you enter `oc logs -c fluentd`, the Fluentd logs in your OpenShift Logging cluster show the following messages: -+ -[source,text] ----- -429 Too Many Requests Ingestion rate limit exceeded (limit: 8388608 bytes/sec) while attempting to ingest '2140' lines totaling '3285284' bytes - -429 Too Many Requests Ingestion rate limit exceeded' or '500 Internal Server Error rpc error: code = ResourceExhausted desc = grpc: received message larger than max (5277702 vs. 4194304)' ----- - -* When you open the logs on the Loki server, they display `entry out of order` messages like these: -+ -[source,text] ----- -,\nentry with timestamp 2021-08-18 05:58:55.061936 +0000 UTC ignored, reason: 'entry out of order' for stream: - -{fluentd_thread=\"flush_thread_0\", log_type=\"audit\"},\nentry with timestamp 2021-08-18 06:01:18.290229 +0000 UTC ignored, reason: 'entry out of order' for stream: {fluentd_thread="flush_thread_0", log_type="audit"} ----- - -.Procedure - -. Update the following fields in the `loki.yaml` configuration file on the Loki server with the values shown here: -+ - * `grpc_server_max_recv_msg_size: 8388608` - * `chunk_target_size: 8388608` - * `ingestion_rate_mb: 8` - * `ingestion_burst_size_mb: 16` - -. Apply the changes in `loki.yaml` to the Loki server. - -.Example `loki.yaml` file -[source,yaml] ----- -auth_enabled: false - -server: - http_listen_port: 3100 - grpc_listen_port: 9096 - grpc_server_max_recv_msg_size: 8388608 - -ingester: - wal: - enabled: true - dir: /tmp/wal - lifecycler: - address: 127.0.0.1 - ring: - kvstore: - store: inmemory - replication_factor: 1 - final_sleep: 0s - chunk_idle_period: 1h # Any chunk not receiving new logs in this time will be flushed - chunk_target_size: 8388608 - max_chunk_age: 1h # All chunks will be flushed when they hit this age, default is 1h - chunk_retain_period: 30s # Must be greater than index read cache TTL if using an index cache (Default index read cache TTL is 5m) - max_transfer_retries: 0 # Chunk transfers disabled - -schema_config: - configs: - - from: 2020-10-24 - store: boltdb-shipper - object_store: filesystem - schema: v11 - index: - prefix: index_ - period: 24h - -storage_config: - boltdb_shipper: - active_index_directory: /tmp/loki/boltdb-shipper-active - cache_location: /tmp/loki/boltdb-shipper-cache - cache_ttl: 24h # Can be increased for faster performance over longer query periods, uses more disk space - shared_store: filesystem - filesystem: - directory: /tmp/loki/chunks - -compactor: - working_directory: /tmp/loki/boltdb-shipper-compactor - shared_store: filesystem - -limits_config: - reject_old_samples: true - reject_old_samples_max_age: 12h - ingestion_rate_mb: 8 - ingestion_burst_size_mb: 16 - -chunk_store_config: - max_look_back_period: 0s - -table_manager: - retention_deletes_enabled: false - retention_period: 0s - -ruler: - storage: - type: local - local: - directory: /tmp/loki/rules - rule_path: /tmp/loki/rules-temp - alertmanager_url: http://localhost:9093 - ring: - kvstore: - store: inmemory - enable_api: true ----- - -[role="_additional-resources"] -.Additional resources - -* link:https://grafana.com/docs/loki/latest/configuration/[Configuring Loki] diff --git a/modules/cluster-logging-troubleshooting-unknown.adoc b/modules/cluster-logging-troubleshooting-unknown.adoc deleted file mode 100644 index 92e173aa7ce8..000000000000 --- a/modules/cluster-logging-troubleshooting-unknown.adoc +++ /dev/null @@ -1,40 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-troublehsooting.adoc - -[id="cluster-logging-troubleshooting-unknown_{context}"] -= Troubleshooting a Kubernetes unknown error while connecting to Elasticsearch - -If you are attempting to use a F-5 load balancer in front of Kibana with -`X-Forwarded-For` enabled, this can cause an issue in which the Elasticsearch -`Searchguard` plugin is unable to correctly accept connections from Kibana. - -.Example Kibana Error Message ----- -Kibana: Unknown error while connecting to Elasticsearch - -Error: Unknown error while connecting to Elasticsearch -Error: UnknownHostException[No trusted proxies] ----- - -.Procedure - -To configure Searchguard to ignore the extra header: - -. Scale down all Fluentd pods. - -. Scale down Elasticsearch after the Fluentd pods have terminated. - -. Add `searchguard.http.xforwardedfor.header: DUMMY` to the Elasticsearch -configuration section. -+ -[source,terminal] ----- -$ oc edit configmap/elasticsearch <1> ----- -<1> This approach requires that Elasticsearch configurations are within a config map. -+ - -. Scale Elasticsearch back up. - -. Scale up all Fluentd pods. diff --git a/modules/cluster-logging-uninstall.adoc b/modules/cluster-logging-uninstall.adoc deleted file mode 100644 index 39b4b50f3cbd..000000000000 --- a/modules/cluster-logging-uninstall.adoc +++ /dev/null @@ -1,87 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-uninstall.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-uninstall_{context}"] -= Uninstalling the {logging-title} - -You can stop log aggregation by deleting the `ClusterLogging` custom resource (CR). After deleting the CR, there are other {logging} components that remain, which you can optionally remove. - - -Deleting the `ClusterLogging` CR does not remove the persistent volume claims (PVCs). To preserve or delete the remaining PVCs, persistent volumes (PVs), and associated data, you must take further action. - -.Prerequisites - -* The Red Hat OpenShift Logging and Elasticsearch Operators must be installed. - -.Procedure - -To remove OpenShift Logging: - -. Use the -ifndef::openshift-rosa,openshift-dedicated[] -{product-title} web console -endif::[] -ifdef::openshift-rosa,openshift-dedicated[] -{cluster-manager-url} -endif::[] - to remove the `ClusterLogging` CR: - -.. Switch to the *Administration* -> *Custom Resource Definitions* page. - -.. On the *Custom Resource Definitions* page, click *ClusterLogging*. - -.. On the *Custom Resource Definition Details* page, click *Instances*. - -.. Click the Options menu {kebab} next to the instance and select *Delete ClusterLogging*. - -. Optional: Delete the custom resource definitions (CRD): - -.. Switch to the *Administration* -> *Custom Resource Definitions* page. - -.. Click the Options menu {kebab} next to *ClusterLogForwarder* and select *Delete Custom Resource Definition*. - -.. Click the Options menu {kebab} next to *ClusterLogging* and select *Delete Custom Resource Definition*. - -.. Click the Options menu {kebab} next to *Elasticsearch* and select *Delete Custom Resource Definition*. - -. Optional: Remove the Red Hat OpenShift Logging Operator and OpenShift Elasticsearch Operator: - -.. Switch to the *Operators* -> *Installed Operators* page. - -.. Click the Options menu {kebab} next to the Red Hat OpenShift Logging Operator and select *Uninstall Operator*. - -.. Click the Options menu {kebab} next to the OpenShift Elasticsearch Operator and select *Uninstall Operator*. - -. Optional: Remove the OpenShift Logging and Elasticsearch projects. - -.. Switch to the *Home* -> *Projects* page. - -.. Click the Options menu {kebab} next to the *openshift-logging* project and select *Delete Project*. - -.. Confirm the deletion by typing `openshift-logging` in the dialog box and click *Delete*. - -.. Click the Options menu {kebab} next to the *openshift-operators-redhat* project and select *Delete Project*. -+ -[IMPORTANT] -==== -Do not delete the `openshift-operators-redhat` project if other global operators are installed in this namespace. -==== - -.. Confirm the deletion by typing `openshift-operators-redhat` in the dialog box and click *Delete*. - -. To keep the PVCs for reuse with other pods, keep the labels or PVC names that you need to reclaim the PVCs. - -. Optional: If you do not want to keep the PVCs, you can delete them. -+ -[WARNING] -==== -Releasing or deleting PVCs can delete PVs and cause data loss. -==== - -.. Switch to the *Storage* -> *Persistent Volume Claims* page. - -.. Click the Options menu {kebab} next to each PVC and select *Delete Persistent Volume Claim*. - -.. If you want to recover storage space, you can delete the PVs. diff --git a/modules/cluster-logging-update-logging.adoc b/modules/cluster-logging-update-logging.adoc deleted file mode 100644 index 06724db46dc1..000000000000 --- a/modules/cluster-logging-update-logging.adoc +++ /dev/null @@ -1,12 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging.adoc - -:_content-type: CONCEPT -[id="cluster-logging-update-logging-about_{context}"] -= About updating {product-title} Logging - -{product-title} allows you to update {product-title} logging. You must update the following operators while updating {product-title} Logging: - -* Elasticsearch Operator -* Cluster Logging Operator diff --git a/modules/cluster-logging-updating-logging-to-5-0.adoc b/modules/cluster-logging-updating-logging-to-5-0.adoc deleted file mode 100644 index bdf2c3d5ff47..000000000000 --- a/modules/cluster-logging-updating-logging-to-5-0.adoc +++ /dev/null @@ -1,225 +0,0 @@ -:_content-type: PROCEDURE -[id="cluster-logging-updating-logging-to-5-0_{context}"] -= Updating from cluster logging in {product-title} 4.6 or earlier to OpenShift Logging 5.x - -{product-title} 4.7 made the following name changes: - -* The _cluster logging_ feature became the _Red Hat OpenShift Logging_ 5.x product. -* The _Cluster Logging_ Operator became the _Red Hat OpenShift Logging_ Operator. -* The _Elasticsearch_ Operator became _OpenShift Elasticsearch_ Operator. - -To upgrade from cluster logging in {product-title} version 4.6 and earlier to OpenShift Logging 5.x, you update the {product-title} cluster to version 4.7, 4.8, or 4.9. Then, you update the following operators: - -* From Elasticsearch Operator 4.x to OpenShift Elasticsearch Operator 5.x -* From Cluster Logging Operator 4.x to Red Hat OpenShift Logging Operator 5.x - -[IMPORTANT] -==== -You must update the OpenShift Elasticsearch Operator _before_ you update the Red Hat OpenShift Logging Operator. You must also update _both_ Operators to the same version. -==== - -If you update the operators in the wrong order, Kibana does not update and the Kibana custom resource (CR) is not created. To work around this problem, you delete the Red Hat OpenShift Logging Operator pod. When the Red Hat OpenShift Logging Operator pod redeploys, it creates the Kibana CR and Kibana becomes available again. - -.Prerequisites - -* The {product-title} version is 4.7 or later. - -* The OpenShift Logging status is healthy: -** All pods are `ready`. -** The Elasticsearch cluster is healthy. - -* Your Elasticsearch and Kibana data is backed up. - -.Procedure - -. Update the OpenShift Elasticsearch Operator: - -.. From the web console, click *Operators* -> *Installed Operators*. - -.. Select the `openshift-operators-redhat` project. - -.. Click the *OpenShift Elasticsearch Operator*. - -.. Click *Subscription* -> *Channel*. - -.. In the *Change Subscription Update Channel* window, select *stable-5.x* and click *Save*. - -.. Wait for a few seconds, then click *Operators* -> *Installed Operators*. -+ -Verify that the OpenShift Elasticsearch Operator version is 5.x.x. -+ -Wait for the *Status* field to report *Succeeded*. - -. Update the Cluster Logging Operator: - -.. From the web console, click *Operators* -> *Installed Operators*. - -.. Select the `openshift-logging` project. - -.. Click the *Cluster Logging Operator*. - -.. Click *Subscription* -> *Channel*. - -.. In the *Change Subscription Update Channel* window, select *stable-5.x* and click *Save*. - -.. Wait for a few seconds, then click *Operators* -> *Installed Operators*. -+ -Verify that the Red Hat OpenShift Logging Operator version is 5.0.x or 5.x.x. -+ -Wait for the *Status* field to report *Succeeded*. - -. Check the logging components: - -.. Ensure that all Elasticsearch pods are in the *Ready* status: -+ -[source,terminal] ----- -$ oc get pod -n openshift-logging --selector component=elasticsearch ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -elasticsearch-cdm-1pbrl44l-1-55b7546f4c-mshhk 2/2 Running 0 31m -elasticsearch-cdm-1pbrl44l-2-5c6d87589f-gx5hk 2/2 Running 0 30m -elasticsearch-cdm-1pbrl44l-3-88df5d47-m45jc 2/2 Running 0 29m ----- -+ -.. Ensure that the Elasticsearch cluster is healthy: -+ -[source,terminal] ----- -$ oc exec -n openshift-logging -c elasticsearch elasticsearch-cdm-1pbrl44l-1-55b7546f4c-mshhk -- health ----- -+ -[source,json] ----- -{ - "cluster_name" : "elasticsearch", - "status" : "green", -} ----- - -.. Ensure that the Elasticsearch cron jobs are created: -+ -[source,terminal] ----- -$ oc project openshift-logging ----- -+ -[source,terminal] ----- -$ oc get cronjob ----- -+ -[source,terminal] ----- -NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE -elasticsearch-im-app */15 * * * * False 0 56s -elasticsearch-im-audit */15 * * * * False 0 56s -elasticsearch-im-infra */15 * * * * False 0 56s ----- - -.. Verify that the log store is updated to 5.0 or 5.x and the indices are `green`: -+ -[source,terminal] ----- -$ oc exec -c elasticsearch -- indices ----- -+ -Verify that the output includes the `app-00000x`, `infra-00000x`, `audit-00000x`, `.security` indices. -+ -.Sample output with indices in a green status -[%collapsible] -==== -[source,terminal] ----- -Tue Jun 30 14:30:54 UTC 2020 -health status index uuid pri rep docs.count docs.deleted store.size pri.store.size -green open infra-000008 bnBvUFEXTWi92z3zWAzieQ 3 1 222195 0 289 144 -green open infra-000004 rtDSzoqsSl6saisSK7Au1Q 3 1 226717 0 297 148 -green open infra-000012 RSf_kUwDSR2xEuKRZMPqZQ 3 1 227623 0 295 147 -green open .kibana_7 1SJdCqlZTPWlIAaOUd78yg 1 1 4 0 0 0 -green open infra-000010 iXwL3bnqTuGEABbUDa6OVw 3 1 248368 0 317 158 -green open infra-000009 YN9EsULWSNaxWeeNvOs0RA 3 1 258799 0 337 168 -green open infra-000014 YP0U6R7FQ_GVQVQZ6Yh9Ig 3 1 223788 0 292 146 -green open infra-000015 JRBbAbEmSMqK5X40df9HbQ 3 1 224371 0 291 145 -green open .orphaned.2020.06.30 n_xQC2dWQzConkvQqei3YA 3 1 9 0 0 0 -green open infra-000007 llkkAVSzSOmosWTSAJM_hg 3 1 228584 0 296 148 -green open infra-000005 d9BoGQdiQASsS3BBFm2iRA 3 1 227987 0 297 148 -green open infra-000003 1-goREK1QUKlQPAIVkWVaQ 3 1 226719 0 295 147 -green open .security zeT65uOuRTKZMjg_bbUc1g 1 1 5 0 0 0 -green open .kibana-377444158_kubeadmin wvMhDwJkR-mRZQO84K0gUQ 3 1 1 0 0 0 -green open infra-000006 5H-KBSXGQKiO7hdapDE23g 3 1 226676 0 295 147 -green open infra-000001 eH53BQ-bSxSWR5xYZB6lVg 3 1 341800 0 443 220 -green open .kibana-6 RVp7TemSSemGJcsSUmuf3A 1 1 4 0 0 0 -green open infra-000011 J7XWBauWSTe0jnzX02fU6A 3 1 226100 0 293 146 -green open app-000001 axSAFfONQDmKwatkjPXdtw 3 1 103186 0 126 57 -green open infra-000016 m9c1iRLtStWSF1GopaRyCg 3 1 13685 0 19 9 -green open infra-000002 Hz6WvINtTvKcQzw-ewmbYg 3 1 228994 0 296 148 -green open infra-000013 KR9mMFUpQl-jraYtanyIGw 3 1 228166 0 298 148 -green open audit-000001 eERqLdLmQOiQDFES1LBATQ 3 1 0 0 0 0 ----- -==== - -.. Verify that the log collector is updated to 5.0 or 5.x: -+ -[source,terminal] ----- -$ oc get ds fluentd -o json | grep fluentd-init ----- -+ -Verify that the output includes a `fluentd-init` container: -+ -[source,terminal] ----- -"containerName": "fluentd-init" ----- - -.. Verify that the log visualizer is updated to 5.0 or 5.x using the Kibana CRD: -+ -[source,terminal] ----- -$ oc get kibana kibana -o json ----- -+ -Verify that the output includes a Kibana pod with the `ready` status: -+ -.Sample output with a ready Kibana pod -[%collapsible] -==== -[source,json] ----- -[ -{ -"clusterCondition": { -"kibana-5fdd766ffd-nb2jj": [ -{ -"lastTransitionTime": "2020-06-30T14:11:07Z", -"reason": "ContainerCreating", -"status": "True", -"type": "" -}, -{ -"lastTransitionTime": "2020-06-30T14:11:07Z", -"reason": "ContainerCreating", -"status": "True", -"type": "" -} -] -}, -"deployment": "kibana", -"pods": { -"failed": [], -"notReady": [] -"ready": [] -}, -"replicaSets": [ -"kibana-5fdd766ffd" -], -"replicas": 1 -} -] ----- -==== diff --git a/modules/cluster-logging-updating-logging-to-5-1.adoc b/modules/cluster-logging-updating-logging-to-5-1.adoc deleted file mode 100644 index dbc194adc050..000000000000 --- a/modules/cluster-logging-updating-logging-to-5-1.adoc +++ /dev/null @@ -1,218 +0,0 @@ -:_content-type: PROCEDURE -[id="cluster-logging-updating-logging-to-5-1_{context}"] -= Updating OpenShift Logging to the current version - -To update OpenShift Logging from 5.x to the current version, you change the subscriptions for the OpenShift Elasticsearch Operator and Red Hat OpenShift Logging Operator. - -[IMPORTANT] -==== -You must update the OpenShift Elasticsearch Operator _before_ you update the Red Hat OpenShift Logging Operator. You must also update _both_ Operators to the same version. -==== - - -If you update the operators in the wrong order, Kibana does not update and the Kibana custom resource (CR) is not created. To work around this problem, you delete the Red Hat OpenShift Logging Operator pod. When the Red Hat OpenShift Logging Operator pod redeploys, it creates the Kibana CR and Kibana becomes available again. - -.Prerequisites - -* The {product-title} version is 4.7 or later. - -* The OpenShift Logging status is healthy: -+ -** All pods are `ready`. -** The Elasticsearch cluster is healthy. - -* Your Elasticsearch and Kibana data is backed up. - -.Procedure - -. Update the OpenShift Elasticsearch Operator: - -.. From the web console, click *Operators* -> *Installed Operators*. - -.. Select the `openshift-operators-redhat` project. - -.. Click the *OpenShift Elasticsearch Operator*. - -.. Click *Subscription* -> *Channel*. - -.. In the *Change Subscription Update Channel* window, select *stable-5.x* and click *Save*. - -.. Wait for a few seconds, then click *Operators* -> *Installed Operators*. -+ -Verify that the OpenShift Elasticsearch Operator version is 5.x.x. -+ -Wait for the *Status* field to report *Succeeded*. - -. Update the Red Hat OpenShift Logging Operator: - -.. From the web console, click *Operators* -> *Installed Operators*. - -.. Select the `openshift-logging` project. - -.. Click the *Red Hat OpenShift Logging Operator*. - -.. Click *Subscription* -> *Channel*. - -.. In the *Change Subscription Update Channel* window, select *stable-5.x* and click *Save*. - -.. Wait for a few seconds, then click *Operators* -> *Installed Operators*. -+ -Verify that the Red Hat OpenShift Logging Operator version is 5.x.x. -+ -Wait for the *Status* field to report *Succeeded*. - -. Check the logging components: - -.. Ensure that all Elasticsearch pods are in the *Ready* status: -+ -[source,terminal] ----- -$ oc get pod -n openshift-logging --selector component=elasticsearch ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -elasticsearch-cdm-1pbrl44l-1-55b7546f4c-mshhk 2/2 Running 0 31m -elasticsearch-cdm-1pbrl44l-2-5c6d87589f-gx5hk 2/2 Running 0 30m -elasticsearch-cdm-1pbrl44l-3-88df5d47-m45jc 2/2 Running 0 29m ----- -+ -.. Ensure that the Elasticsearch cluster is healthy: -+ -[source,terminal] ----- -$ oc exec -n openshift-logging -c elasticsearch elasticsearch-cdm-1pbrl44l-1-55b7546f4c-mshhk -- health ----- -+ -[source,json] ----- -{ - "cluster_name" : "elasticsearch", - "status" : "green", -} ----- - -.. Ensure that the Elasticsearch cron jobs are created: -+ -[source,terminal] ----- -$ oc project openshift-logging ----- -+ -[source,terminal] ----- -$ oc get cronjob ----- -+ -[source,terminal] ----- -NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE -elasticsearch-im-app */15 * * * * False 0 56s -elasticsearch-im-audit */15 * * * * False 0 56s -elasticsearch-im-infra */15 * * * * False 0 56s ----- - -.. Verify that the log store is updated to 5.x and the indices are `green`: -+ -[source,terminal] ----- -$ oc exec -c elasticsearch -- indices ----- -+ -Verify that the output includes the `app-00000x`, `infra-00000x`, `audit-00000x`, `.security` indices. -+ -.Sample output with indices in a green status -[%collapsible] -==== -[source,terminal] ----- -Tue Jun 30 14:30:54 UTC 2020 -health status index uuid pri rep docs.count docs.deleted store.size pri.store.size -green open infra-000008 bnBvUFEXTWi92z3zWAzieQ 3 1 222195 0 289 144 -green open infra-000004 rtDSzoqsSl6saisSK7Au1Q 3 1 226717 0 297 148 -green open infra-000012 RSf_kUwDSR2xEuKRZMPqZQ 3 1 227623 0 295 147 -green open .kibana_7 1SJdCqlZTPWlIAaOUd78yg 1 1 4 0 0 0 -green open infra-000010 iXwL3bnqTuGEABbUDa6OVw 3 1 248368 0 317 158 -green open infra-000009 YN9EsULWSNaxWeeNvOs0RA 3 1 258799 0 337 168 -green open infra-000014 YP0U6R7FQ_GVQVQZ6Yh9Ig 3 1 223788 0 292 146 -green open infra-000015 JRBbAbEmSMqK5X40df9HbQ 3 1 224371 0 291 145 -green open .orphaned.2020.06.30 n_xQC2dWQzConkvQqei3YA 3 1 9 0 0 0 -green open infra-000007 llkkAVSzSOmosWTSAJM_hg 3 1 228584 0 296 148 -green open infra-000005 d9BoGQdiQASsS3BBFm2iRA 3 1 227987 0 297 148 -green open infra-000003 1-goREK1QUKlQPAIVkWVaQ 3 1 226719 0 295 147 -green open .security zeT65uOuRTKZMjg_bbUc1g 1 1 5 0 0 0 -green open .kibana-377444158_kubeadmin wvMhDwJkR-mRZQO84K0gUQ 3 1 1 0 0 0 -green open infra-000006 5H-KBSXGQKiO7hdapDE23g 3 1 226676 0 295 147 -green open infra-000001 eH53BQ-bSxSWR5xYZB6lVg 3 1 341800 0 443 220 -green open .kibana-6 RVp7TemSSemGJcsSUmuf3A 1 1 4 0 0 0 -green open infra-000011 J7XWBauWSTe0jnzX02fU6A 3 1 226100 0 293 146 -green open app-000001 axSAFfONQDmKwatkjPXdtw 3 1 103186 0 126 57 -green open infra-000016 m9c1iRLtStWSF1GopaRyCg 3 1 13685 0 19 9 -green open infra-000002 Hz6WvINtTvKcQzw-ewmbYg 3 1 228994 0 296 148 -green open infra-000013 KR9mMFUpQl-jraYtanyIGw 3 1 228166 0 298 148 -green open audit-000001 eERqLdLmQOiQDFES1LBATQ 3 1 0 0 0 0 ----- -==== - -.. Verify that the log collector is updated to 5.3: -+ -[source,terminal] ----- -$ oc get ds collector -o json | grep collector ----- -+ -Verify that the output includes a `fluentd-init` container: -+ -[source,terminal] ----- -"containerName": "collector" ----- - -.. Verify that the log visualizer is updated to 5.x using the Kibana CRD: -+ -[source,terminal] ----- -$ oc get kibana kibana -o json ----- -+ -Verify that the output includes a Kibana pod with the `ready` status: -+ -.Sample output with a ready Kibana pod -[%collapsible] -==== -[source,json] ----- -[ -{ -"clusterCondition": { -"kibana-5fdd766ffd-nb2jj": [ -{ -"lastTransitionTime": "2020-06-30T14:11:07Z", -"reason": "ContainerCreating", -"status": "True", -"type": "" -}, -{ -"lastTransitionTime": "2020-06-30T14:11:07Z", -"reason": "ContainerCreating", -"status": "True", -"type": "" -} -] -}, -"deployment": "kibana", -"pods": { -"failed": [], -"notReady": [] -"ready": [] -}, -"replicaSets": [ -"kibana-5fdd766ffd" -], -"replicas": 1 -} -] ----- -==== diff --git a/modules/cluster-logging-updating-logging-to-current.adoc b/modules/cluster-logging-updating-logging-to-current.adoc deleted file mode 100644 index fd413f21d54a..000000000000 --- a/modules/cluster-logging-updating-logging-to-current.adoc +++ /dev/null @@ -1,230 +0,0 @@ -// Module file include in the following assemblies: -//cluster-logging-upgrading.adoc -:_content-type: PROCEDURE -[id="cluster-logging-updating-logging-to_current_{context}"] -= Updating Logging to the current version - -To update Logging to the current version, you change the subscriptions for the OpenShift Elasticsearch Operator and Red Hat OpenShift Logging Operator. - -[IMPORTANT] -==== -You must update the OpenShift Elasticsearch Operator _before_ you update the Red Hat OpenShift Logging Operator. You must also update _both_ Operators to the same version. -==== - - -If you update the Operators in the wrong order, Kibana does not update and the Kibana custom resource (CR) is not created. To work around this problem, you delete the Red Hat OpenShift Logging Operator pod. When the Red Hat OpenShift Logging Operator pod redeploys, it creates the Kibana CR and Kibana becomes available again. - -.Prerequisites - -* The {product-title} version is 4.7 or later. - -* The Logging status is healthy: -+ -** All pods are `ready`. -** The Elasticsearch cluster is healthy. - -* Your link:https://www.elastic.co/guide/en/elasticsearch/reference/current/snapshot-restore.html[Elasticsearch and Kibana data is backed up.] - -.Procedure - -. Update the OpenShift Elasticsearch Operator: - -ifndef::openshift-rosa,openshift-dedicated[] -.. In the {product-title} web console, click *Operators* -> *Installed Operators*. -endif::[] -ifdef::openshift-rosa,openshift-dedicated[] -.. In the {hybrid-console}, click *Operators* -> *Installed Operators*. -endif::[] - -.. Select the `openshift-Operators-redhat` project. - -.. Click the *OpenShift Elasticsearch Operator*. - -.. Click *Subscription* -> *Channel*. - -.. In the *Change Subscription Update Channel* window, select *stable-5.x* and click *Save*. - -.. Wait for a few seconds, then click *Operators* -> *Installed Operators*. -+ -.. Verify that the OpenShift Elasticsearch Operator version is 5.x.x. -+ -.. Wait for the *Status* field to report *Succeeded*. - -. Update the Red Hat OpenShift Logging Operator: - -ifndef::openshift-rosa,openshift-dedicated[] -.. In the {product-title} web console, click *Operators* -> *Installed Operators*. -endif::[] -ifdef::openshift-rosa,openshift-dedicated[] -.. In the {hybrid-console}, click *Operators* -> *Installed Operators*. -endif::[] - -.. Select the `openshift-logging` project. - -.. Click the *Red Hat OpenShift Logging Operator*. - -.. Click *Subscription* -> *Channel*. - -.. In the *Change Subscription Update Channel* window, select *stable-5.x* and click *Save*. - -.. Wait for a few seconds, then click *Operators* -> *Installed Operators*. -+ -.. Verify that the Red Hat OpenShift Logging Operator version is 5.y.z -+ -.. Wait for the *Status* field to report *Succeeded*. - -. Check the logging components: - -.. Ensure that all Elasticsearch pods are in the *Ready* status: -+ -[source,terminal] ----- -$ oc get pod -n openshift-logging --selector component=elasticsearch ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -elasticsearch-cdm-1pbrl44l-1-55b7546f4c-mshhk 2/2 Running 0 31m -elasticsearch-cdm-1pbrl44l-2-5c6d87589f-gx5hk 2/2 Running 0 30m -elasticsearch-cdm-1pbrl44l-3-88df5d47-m45jc 2/2 Running 0 29m ----- -+ -.. Ensure that the Elasticsearch cluster is healthy: -+ -[source,terminal] ----- -$ oc exec -n openshift-logging -c elasticsearch elasticsearch-cdm-1pbrl44l-1-55b7546f4c-mshhk -- health ----- -+ -[source,json] ----- -{ - "cluster_name" : "elasticsearch", - "status" : "green", -} ----- - -.. Ensure that the Elasticsearch cron jobs are created: -+ -[source,terminal] ----- -$ oc project openshift-logging ----- -+ -[source,terminal] ----- -$ oc get cronjob ----- -+ -[source,terminal] ----- -NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE -elasticsearch-im-app */15 * * * * False 0 56s -elasticsearch-im-audit */15 * * * * False 0 56s -elasticsearch-im-infra */15 * * * * False 0 56s ----- - -.. Verify that the log store is updated to 5.x and the indices are `green`: -+ -[source,terminal] ----- -$ oc exec -c elasticsearch -- indices ----- -+ -.. Verify that the output includes the `app-00000x`, `infra-00000x`, `audit-00000x`, `.security` indices. -+ -.Sample output with indices in a green status -[%collapsible] -==== -[source,terminal] ----- -Tue Jun 30 14:30:54 UTC 2020 -health status index uuid pri rep docs.count docs.deleted store.size pri.store.size -green open infra-000008 bnBvUFEXTWi92z3zWAzieQ 3 1 222195 0 289 144 -green open infra-000004 rtDSzoqsSl6saisSK7Au1Q 3 1 226717 0 297 148 -green open infra-000012 RSf_kUwDSR2xEuKRZMPqZQ 3 1 227623 0 295 147 -green open .kibana_7 1SJdCqlZTPWlIAaOUd78yg 1 1 4 0 0 0 -green open infra-000010 iXwL3bnqTuGEABbUDa6OVw 3 1 248368 0 317 158 -green open infra-000009 YN9EsULWSNaxWeeNvOs0RA 3 1 258799 0 337 168 -green open infra-000014 YP0U6R7FQ_GVQVQZ6Yh9Ig 3 1 223788 0 292 146 -green open infra-000015 JRBbAbEmSMqK5X40df9HbQ 3 1 224371 0 291 145 -green open .orphaned.2020.06.30 n_xQC2dWQzConkvQqei3YA 3 1 9 0 0 0 -green open infra-000007 llkkAVSzSOmosWTSAJM_hg 3 1 228584 0 296 148 -green open infra-000005 d9BoGQdiQASsS3BBFm2iRA 3 1 227987 0 297 148 -green open infra-000003 1-goREK1QUKlQPAIVkWVaQ 3 1 226719 0 295 147 -green open .security zeT65uOuRTKZMjg_bbUc1g 1 1 5 0 0 0 -green open .kibana-377444158_kubeadmin wvMhDwJkR-mRZQO84K0gUQ 3 1 1 0 0 0 -green open infra-000006 5H-KBSXGQKiO7hdapDE23g 3 1 226676 0 295 147 -green open infra-000001 eH53BQ-bSxSWR5xYZB6lVg 3 1 341800 0 443 220 -green open .kibana-6 RVp7TemSSemGJcsSUmuf3A 1 1 4 0 0 0 -green open infra-000011 J7XWBauWSTe0jnzX02fU6A 3 1 226100 0 293 146 -green open app-000001 axSAFfONQDmKwatkjPXdtw 3 1 103186 0 126 57 -green open infra-000016 m9c1iRLtStWSF1GopaRyCg 3 1 13685 0 19 9 -green open infra-000002 Hz6WvINtTvKcQzw-ewmbYg 3 1 228994 0 296 148 -green open infra-000013 KR9mMFUpQl-jraYtanyIGw 3 1 228166 0 298 148 -green open audit-000001 eERqLdLmQOiQDFES1LBATQ 3 1 0 0 0 0 ----- -==== - -.. Verify that the log collector is updated: -+ -[source,terminal] ----- -$ oc get ds collector -o json | grep collector ----- -+ -.. Verify that the output includes a `collectort` container: -+ -[source,terminal] ----- -"containerName": "collector" ----- - -.. Verify that the log visualizer is updated to 5.x using the Kibana CRD: -+ -[source,terminal] ----- -$ oc get kibana kibana -o json ----- -+ -.. Verify that the output includes a Kibana pod with the `ready` status: -+ -.Sample output with a ready Kibana pod -[%collapsible] -==== -[source,json] ----- -[ -{ -"clusterCondition": { -"kibana-5fdd766ffd-nb2jj": [ -{ -"lastTransitionTime": "2020-06-30T14:11:07Z", -"reason": "ContainerCreating", -"status": "True", -"type": "" -}, -{ -"lastTransitionTime": "2020-06-30T14:11:07Z", -"reason": "ContainerCreating", -"status": "True", -"type": "" -} -] -}, -"deployment": "kibana", -"pods": { -"failed": [], -"notReady": [] -"ready": [] -}, -"replicaSets": [ -"kibana-5fdd766ffd" -], -"replicas": 1 -} -] ----- -==== diff --git a/modules/cluster-logging-vector-tech-preview.adoc b/modules/cluster-logging-vector-tech-preview.adoc deleted file mode 100644 index 1e69a31af785..000000000000 --- a/modules/cluster-logging-vector-tech-preview.adoc +++ /dev/null @@ -1,68 +0,0 @@ -// Module included in the following assemblies: -//cluster-logging-release-notes.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-vector-tech-preview_{context}"] -:FeatureName: Vector -include::snippets/technology-preview.adoc[] - -[id="cluster-logging-about-vector"] -= About Vector -Vector is a log collector offered as a tech-preview alternative to the current default collector for the {logging}. - -The following outputs are supported: - -* `elasticsearch`. An external Elasticsearch instance. The `elasticsearch` output can use a TLS connection. - -* `kafka`. A Kafka broker. The `kafka` output can use an unsecured or TLS connection. - -* `loki`. Loki, a horizontally scalable, highly available, multi-tenant log aggregation system. - - -[id="cluster-logging-enabling-vector"] -== Enabling Vector -Vector is not enabled by default. Use the following steps to enable Vector on your {product-title} cluster. - -[IMPORTANT] -==== -Vector does not support FIPS Enabled Clusters. -==== - -.Prerequisites - -* {product-title}: 4.13 -* {logging-title-uc}: 5.4 -* FIPS disabled - -.Procedure - -. Edit the `ClusterLogging` custom resource (CR) in the `openshift-logging` project: -+ -[source,terminal] ----- -$ oc -n openshift-logging edit ClusterLogging instance ----- - -. Add a `logging.openshift.io/preview-vector-collector: enabled` annotation to the `ClusterLogging` custom resource (CR). - -. Add `vector` as a collection type to the `ClusterLogging` custom resource (CR). - -[source,yaml] ----- - apiVersion: "logging.openshift.io/v1" - kind: "ClusterLogging" - metadata: - name: "instance" - namespace: "openshift-logging" - annotations: - logging.openshift.io/preview-vector-collector: enabled - spec: - collection: - logs: - type: "vector" - vector: {} ----- - -[role="_additional-resources"] -.Additional resources -* link:https://vector.dev/docs/about/what-is-vector/[Vector Documentation] diff --git a/modules/cluster-logging-view-cluster-dashboards.adoc b/modules/cluster-logging-view-cluster-dashboards.adoc deleted file mode 100644 index 9205ecfca60f..000000000000 --- a/modules/cluster-logging-view-cluster-dashboards.adoc +++ /dev/null @@ -1,9 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging.adoc - -:_content-type: CONCEPT -[id="cluster-logging-view-cluster-dashboards-about_{context}"] -= About viewing the cluster dashboard - -The {product-title} Logging dashboard contains charts that show details about your Elasticsearch instance at the cluster level. These charts help you diagnose and anticipate problems. diff --git a/modules/cluster-logging-visualizer-indices.adoc b/modules/cluster-logging-visualizer-indices.adoc deleted file mode 100644 index bd796c77befd..000000000000 --- a/modules/cluster-logging-visualizer-indices.adoc +++ /dev/null @@ -1,47 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-visualizer.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-visualizer-indices_{context}"] -= Defining Kibana index patterns - -An index pattern defines the Elasticsearch indices that you want to visualize. To explore and visualize data in Kibana, you must create an index pattern. - -.Prerequisites - -* A user must have the `cluster-admin` role, the `cluster-reader` role, or both roles to view the *infra* and *audit* indices in Kibana. The default `kubeadmin` user has proper permissions to view these indices. -+ -If you can view the pods and logs in the `default`, `kube-` and `openshift-` projects, you should be able to access these indices. You can use the following command to check if the current user has appropriate permissions: -+ -[source,terminal] ----- -$ oc auth can-i get pods/log -n ----- -+ -.Example output -[source,terminal] ----- -yes ----- -+ -[NOTE] -==== -The audit logs are not stored in the internal {product-title} Elasticsearch instance by default. To view the audit logs in Kibana, you must use the Log Forwarding API to configure a pipeline that uses the `default` output for audit logs. -==== - -* Elasticsearch documents must be indexed before you can create index patterns. This is done automatically, but it might take a few minutes in a new or updated cluster. - -.Procedure - -To define index patterns and create visualizations in Kibana: - -. In the {product-title} console, click the Application Launcher {launch} and select *Logging*. - -. Create your Kibana index patterns by clicking *Management* -> *Index Patterns* -> *Create index pattern*: - -** Each user must manually create index patterns when logging into Kibana the first time to see logs for their projects. Users must create an index pattern named `app` and use the `@timestamp` time field to view their container logs. - -** Each admin user must create index patterns when logged into Kibana the first time for the `app`, `infra`, and `audit` indices using the `@timestamp` time field. - -. Create Kibana Visualizations from the new index patterns. diff --git a/modules/cluster-logging-visualizer-kibana.adoc b/modules/cluster-logging-visualizer-kibana.adoc deleted file mode 100644 index 4c1451412763..000000000000 --- a/modules/cluster-logging-visualizer-kibana.adoc +++ /dev/null @@ -1,122 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/viewing/cluster-logging-visualizer.adoc - -:_content-type: PROCEDURE -[id="cluster-logging-visualizer-kibana_{context}"] -= Viewing cluster logs in Kibana - -You view cluster logs in the Kibana web console. The methods for viewing and visualizing your data in Kibana that are beyond the scope of this documentation. For more information, refer to the link:https://www.elastic.co/guide/en/kibana/6.8/tutorial-sample-discover.html[Kibana documentation]. - -.Prerequisites - -* The Red Hat OpenShift Logging and Elasticsearch Operators must be installed. - -* Kibana index patterns must exist. - -* A user must have the `cluster-admin` role, the `cluster-reader` role, or both roles to view the *infra* and *audit* indices in Kibana. The default `kubeadmin` user has proper permissions to view these indices. -+ -If you can view the pods and logs in the `default`, `kube-` and `openshift-` projects, you should be able to access these indices. You can use the following command to check if the current user has appropriate permissions: -+ -[source,terminal] ----- -$ oc auth can-i get pods/log -n ----- -+ -.Example output -[source,terminal] ----- -yes ----- -+ -[NOTE] -==== -The audit logs are not stored in the internal {product-title} Elasticsearch instance by default. To view the audit logs in Kibana, you must use the Log Forwarding API to configure a pipeline that uses the `default` output for audit logs. -==== - -.Procedure - -To view logs in Kibana: - -. In the {product-title} console, click the Application Launcher {launch} and select *Logging*. - -. Log in using the same credentials you use to log in to the {product-title} console. -+ -The Kibana interface launches. - -. In Kibana, click *Discover*. - -. Select the index pattern you created from the drop-down menu in the top-left corner: *app*, *audit*, or *infra*. -+ -The log data displays as time-stamped documents. - -. Expand one of the time-stamped documents. - -. Click the *JSON* tab to display the log entry for that document. -+ -.Sample infrastructure log entry in Kibana -[%collapsible] -==== -[source,terminal] ----- -{ - "_index": "infra-000001", - "_type": "_doc", - "_id": "YmJmYTBlNDkZTRmLTliMGQtMjE3NmFiOGUyOWM3", - "_version": 1, - "_score": null, - "_source": { - "docker": { - "container_id": "f85fa55bbef7bb783f041066be1e7c267a6b88c4603dfce213e32c1" - }, - "kubernetes": { - "container_name": "registry-server", - "namespace_name": "openshift-marketplace", - "pod_name": "redhat-marketplace-n64gc", - "container_image": "registry.redhat.io/redhat/redhat-marketplace-index:v4.7", - "container_image_id": "registry.redhat.io/redhat/redhat-marketplace-index@sha256:65fc0c45aabb95809e376feb065771ecda9e5e59cc8b3024c4545c168f", - "pod_id": "8f594ea2-c866-4b5c-a1c8-a50756704b2a", - "host": "ip-10-0-182-28.us-east-2.compute.internal", - "master_url": "https://kubernetes.default.svc", - "namespace_id": "3abab127-7669-4eb3-b9ef-44c04ad68d38", - "namespace_labels": { - "openshift_io/cluster-monitoring": "true" - }, - "flat_labels": [ - "catalogsource_operators_coreos_com/update=redhat-marketplace" - ] - }, - "message": "time=\"2020-09-23T20:47:03Z\" level=info msg=\"serving registry\" database=/database/index.db port=50051", - "level": "unknown", - "hostname": "ip-10-0-182-28.internal", - "pipeline_metadata": { - "collector": { - "ipaddr4": "10.0.182.28", - "inputname": "fluent-plugin-systemd", - "name": "fluentd", - "received_at": "2020-09-23T20:47:15.007583+00:00", - "version": "1.7.4 1.6.0" - } - }, - "@timestamp": "2020-09-23T20:47:03.422465+00:00", - "viaq_msg_id": "YmJmYTBlNDktMDMGQtMjE3NmFiOGUyOWM3", - "openshift": { - "labels": { - "logging": "infra" - } - } - }, - "fields": { - "@timestamp": [ - "2020-09-23T20:47:03.422Z" - ], - "pipeline_metadata.collector.received_at": [ - "2020-09-23T20:47:15.007Z" - ] - }, - "sort": [ - 1600894023422 - ] -} ----- -==== diff --git a/modules/cluster-logging-visualizer-launch.adoc b/modules/cluster-logging-visualizer-launch.adoc deleted file mode 100644 index 268ea7a9c092..000000000000 --- a/modules/cluster-logging-visualizer-launch.adoc +++ /dev/null @@ -1,46 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging-visualizer.adoc - -[id="cluster-logging-visualizer-launch_{context}"] -= Launching the log visualizer - -{product-title} uses Kibana as the log visualizer. Kibana is a browser-based console to query, discover, and visualize your logs through histograms, line graphs, -pie charts, heat maps, built-in geospatial support, and other visualizations. - -.Prerequisites - -* To list the *infra* and *audit* indices in Kibana, a user must have the `cluster-admin` role, the `cluster-reader` role, or both roles. The default `kubeadmin` user has proper permissions to list these indices. -+ -If you can view the pods and logs in the `default`, `kube-*` and `openshift-*` projects, you should be able to access these indices. You can use the following command to check if the current user has proper permissions: -+ -[source,terminal] ----- -$ oc auth can-i get pods/log -n ----- -+ -.Example output -[source,terminal] ----- -yes ----- -+ -[NOTE] -==== -The audit logs are not stored in the internal {product-title} Elasticsearch instance by default. To view the audit logs in Kibana, you must use the Log Forwarding API to configure a pipeline that uses the `default` output for audit logs. -==== - -.Procedure - -To launch Kibana: - -. In the {product-title} console, click the Application Launcher {launch} and select *Logging*. - -. Log in using the same credentials you use to log in to the {product-title} console. -+ -The Kibana interface launches. - -[NOTE] -==== -If you get a *security_exception* error in the Kibana console and cannot access your Kibana indices, you might have an expired OAuth token. If you see this error, log out of the Kibana console, and then log back in. This refreshes your OAuth tokens and you should be able to access your indices. -==== diff --git a/modules/cluster-machine-approver-operator.adoc b/modules/cluster-machine-approver-operator.adoc deleted file mode 100644 index e1a3953c68c0..000000000000 --- a/modules/cluster-machine-approver-operator.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator-reference.adoc - -[id="cluster-machine-approver-operator_{context}"] -= Cluster Machine Approver Operator - -[discrete] -== Purpose - -The Cluster Machine Approver Operator automatically approves the CSRs requested for a new worker node after cluster installation. - -[NOTE] -==== -For the control plane node, the `approve-csr` service on the bootstrap node automatically approves all CSRs during the cluster bootstrapping phase. -==== - -[discrete] -== Project - -link:https://github.com/openshift/cluster-machine-approver[cluster-machine-approver-operator] diff --git a/modules/cluster-monitoring-operator.adoc b/modules/cluster-monitoring-operator.adoc deleted file mode 100644 index a9baf0310b3e..000000000000 --- a/modules/cluster-monitoring-operator.adoc +++ /dev/null @@ -1,44 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator-reference.adoc - -[id="cluster-monitoring-operator_{context}"] -= Cluster Monitoring Operator - -[discrete] -== Purpose - -The Cluster Monitoring Operator manages and updates the Prometheus-based cluster monitoring stack deployed on top of {product-title}. - -[discrete] -== Project - -link:https://github.com/openshift/cluster-monitoring-operator[openshift-monitoring] - -[discrete] -== CRDs - -* `alertmanagers.monitoring.coreos.com` -** Scope: Namespaced -** CR: `alertmanager` -** Validation: Yes -* `prometheuses.monitoring.coreos.com` -** Scope: Namespaced -** CR: `prometheus` -** Validation: Yes -* `prometheusrules.monitoring.coreos.com` -** Scope: Namespaced -** CR: `prometheusrule` -** Validation: Yes -* `servicemonitors.monitoring.coreos.com` -** Scope: Namespaced -** CR: `servicemonitor` -** Validation: Yes - -[discrete] -== Configuration objects - -[source,terminal] ----- -$ oc -n openshift-monitoring edit cm cluster-monitoring-config ----- diff --git a/modules/cluster-network-operator.adoc b/modules/cluster-network-operator.adoc deleted file mode 100644 index e5027d4ea013..000000000000 --- a/modules/cluster-network-operator.adoc +++ /dev/null @@ -1,11 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator-reference.adoc - -[id="cluster-network-operator_{context}"] -= Cluster Network Operator - -[discrete] -== Purpose - -The Cluster Network Operator installs and upgrades the networking components on an {product-title} cluster. diff --git a/modules/cluster-node-tuning-operator-default-profiles-set.adoc b/modules/cluster-node-tuning-operator-default-profiles-set.adoc deleted file mode 100644 index 2bc11d2b3695..000000000000 --- a/modules/cluster-node-tuning-operator-default-profiles-set.adoc +++ /dev/null @@ -1,41 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/using-node-tuning-operator.adoc -// * post_installation_configuration/node-tasks.adoc - -[id="custom-tuning-default-profiles-set_{context}"] -= Default profiles set on a cluster - -The following are the default profiles set on a cluster. - -[source,yaml] ----- -apiVersion: tuned.openshift.io/v1 -kind: Tuned -metadata: - name: default - namespace: openshift-cluster-node-tuning-operator -spec: - profile: - - data: | - [main] - summary=Optimize systems running OpenShift (provider specific parent profile) - include=-provider-${f:exec:cat:/var/lib/tuned/provider},openshift - name: openshift - recommend: - - profile: openshift-control-plane - priority: 30 - match: - - label: node-role.kubernetes.io/master - - label: node-role.kubernetes.io/infra - - profile: openshift-node - priority: 40 ----- - -Starting with {product-title} 4.9, all OpenShift TuneD profiles are shipped with -the TuneD package. You can use the `oc exec` command to view the contents of these profiles: - -[source,terminal] ----- -$ oc exec $tuned_pod -n openshift-cluster-node-tuning-operator -- find /usr/lib/tuned/openshift{,-control-plane,-node} -name tuned.conf -exec grep -H ^ {} \; ----- diff --git a/modules/cluster-node-tuning-operator-verify-profiles.adoc b/modules/cluster-node-tuning-operator-verify-profiles.adoc deleted file mode 100644 index 3239b503ee99..000000000000 --- a/modules/cluster-node-tuning-operator-verify-profiles.adoc +++ /dev/null @@ -1,48 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/using-node-tuning-operator.adoc - -[id="verifying-tuned-profiles-are-applied_{context}"] -= Verifying that the TuneD profiles are applied - -Verify the TuneD profiles that are applied to your cluster node. - -[source,terminal] ----- -$ oc get profile -n openshift-cluster-node-tuning-operator ----- - -.Example output -[source,terminal] ----- -NAME TUNED APPLIED DEGRADED AGE -master-0 openshift-control-plane True False 6h33m -master-1 openshift-control-plane True False 6h33m -master-2 openshift-control-plane True False 6h33m -worker-a openshift-node True False 6h28m -worker-b openshift-node True False 6h28m ----- - -* `NAME`: Name of the Profile object. There is one Profile object per node and their names match. -* `TUNED`: Name of the desired TuneD profile to apply. -* `APPLIED`: `True` if the TuneD daemon applied the desired profile. (`True/False/Unknown`). -* `DEGRADED`: `True` if any errors were reported during application of the TuneD profile (`True/False/Unknown`). -* `AGE`: Time elapsed since the creation of Profile object. - -The `ClusterOperator/node-tuning` object also contains useful information about the Operator and its node agents' health. For example, Operator misconfiguration is reported by `ClusterOperator/node-tuning` status messages. - -To get status information about the `ClusterOperator/node-tuning` object, run the following command: - -[source,terminal] ----- -$ oc get co/node-tuning -n openshift-cluster-node-tuning-operator ----- - -.Example output -[source,terminal] ----- -NAME VERSION AVAILABLE PROGRESSING DEGRADED SINCE MESSAGE -node-tuning 4.13.1 True False True 60m 1/5 Profiles with bootcmdline conflict ----- - -If either the `ClusterOperator/node-tuning` or a profile object's status is `DEGRADED`, additional information is provided in the Operator or operand logs. diff --git a/modules/cluster-openshift-controller-manager-operators.adoc b/modules/cluster-openshift-controller-manager-operators.adoc deleted file mode 100644 index 52d18d574c48..000000000000 --- a/modules/cluster-openshift-controller-manager-operators.adoc +++ /dev/null @@ -1,28 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator-reference.adoc - -[id="cluster-openshift-controller-manager-operator_{context}"] -= OpenShift Controller Manager Operator - -[discrete] -== Purpose - -The OpenShift Controller Manager Operator installs and maintains the `OpenShiftControllerManager` custom resource in a cluster and can be viewed with: - -[source,terminal] ----- -$ oc get clusteroperator openshift-controller-manager -o yaml ----- - -The custom resource definitino (CRD) `openshiftcontrollermanagers.operator.openshift.io` can be viewed in a cluster with: - -[source,terminal] ----- -$ oc get crd openshiftcontrollermanagers.operator.openshift.io -o yaml ----- - -[discrete] -== Project - -link:https://github.com/openshift/cluster-openshift-controller-manager-operator[cluster-openshift-controller-manager-operator] diff --git a/modules/cluster-resources.adoc b/modules/cluster-resources.adoc deleted file mode 100644 index 1efb54bcb609..000000000000 --- a/modules/cluster-resources.adoc +++ /dev/null @@ -1,40 +0,0 @@ -:_content-type: PROCEDURE -[id="support-cluster-resources_{context}"] -= Interacting with your cluster resources - -You can interact with cluster resources by using the OpenShift CLI (`oc`) tool in {product-title}. The cluster resources that you see after running the `oc api-resources` command can be edited. - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. -* You have access to the web console or you have installed the `oc` CLI tool. - -.Procedure - -. To see which configuration Operators have been applied, run the following command: -+ -[source,terminal] ----- -$ oc api-resources -o name | grep config.openshift.io ----- - -. To see what cluster resources you can configure, run the following command: -+ -[source,terminal] ----- -$ oc explain .config.openshift.io ----- - -. To see the configuration of custom resource definition (CRD) objects in the cluster, run the following command: -+ -[source,terminal] ----- -$ oc get .config -o yaml ----- - -. To edit the cluster resource configuration, run the following command: -+ -[source,terminal] ----- -$ oc edit .config -o yaml ----- diff --git a/modules/cluster-samples-operator.adoc b/modules/cluster-samples-operator.adoc deleted file mode 100644 index 26328f29cd5b..000000000000 --- a/modules/cluster-samples-operator.adoc +++ /dev/null @@ -1,75 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator-reference.adoc -// * installing/cluster-capabilities.adoc - -// operators/operator-reference.adoc -ifeval::["{context}" == "cluster-operators-ref"] -:operator-ref: -endif::[] - -// installing/cluster-capabilities.adoc -ifeval::["{context}" == "cluster-capabilities"] -:cluster-caps: -endif::[] - -:_content-type: REFERENCE -[id="cluster-samples-operator_{context}"] -ifdef::operator-ref[= Cluster Samples Operator] -ifdef::cluster-caps[= OpenShift samples capability] - -ifdef::operator-ref[] - -[NOTE] -==== -The Cluster Samples Operator is an optional cluster capability that can be disabled by cluster administrators during installation. For more information about optional cluster capabilities, see "Cluster capabilities" in _Installing_. -==== - -endif::operator-ref[] - -[discrete] -== Purpose - -ifdef::cluster-caps[] -The Cluster Samples Operator provides the features for the `openshift-samples` capability. -endif::cluster-caps[] - -The Cluster Samples Operator manages the sample image streams and templates stored in the `openshift` namespace. - -On initial start up, the Operator creates the default samples configuration resource to initiate the creation of the image streams and templates. The configuration object is a cluster scoped object with the key `cluster` and type `configs.samples`. - -The image streams are the {op-system-first}-based {product-title} image streams pointing to images on `registry.redhat.io`. Similarly, the templates are those categorized as {product-title} templates. - -ifdef::cluster-caps[] -If you disable the samples capability, users cannot access the image streams, samples, and templates it provides. Depending on your deployment, you might want to disable this component if you do not need it. -endif::[] - -ifdef::operator-ref[] -The Cluster Samples Operator deployment is contained within the `openshift-cluster-samples-operator` namespace. On start up, the install pull secret is used by the image stream import logic in the {product-registry} and API server to authenticate with `registry.redhat.io`. An administrator can create any additional secrets in the `openshift` namespace if they change the registry used for the sample image streams. If created, those secrets contain the content of a `config.json` for `docker` needed to facilitate image import. - -The image for the Cluster Samples Operator contains image stream and template definitions for the associated {product-title} release. After the Cluster Samples Operator creates a sample, it adds an annotation that denotes the {product-title} version that it is compatible with. The Operator uses this annotation to ensure that each sample matches the compatible release version. Samples outside of its inventory are ignored, as are skipped samples. - -Modifications to any samples that are managed by the Operator are allowed as long as the version annotation is not modified or deleted. However, on an upgrade, as the version annotation will change, those modifications can get replaced as the sample will be updated with the newer version. The Jenkins images are part of the image payload from the installation and are tagged into the image streams directly. - -The samples resource includes a finalizer, which cleans up the following upon its deletion: - -* Operator-managed image streams -* Operator-managed templates -* Operator-generated configuration resources -* Cluster status resources - -Upon deletion of the samples resource, the Cluster Samples Operator recreates the resource using the default configuration. - -[discrete] -== Project - -link:https://github.com/openshift/cluster-samples-operator[cluster-samples-operator] -endif::operator-ref[] - -ifeval::["{context}" == "cluster-operators-ref"] -:!operator-ref: -endif::[] - -ifeval::["{context}" == "cluster-caps"] -:!cluster-caps: -endif::[] \ No newline at end of file diff --git a/modules/cluster-storage-operator.adoc b/modules/cluster-storage-operator.adoc deleted file mode 100644 index 794f37e0d3d4..000000000000 --- a/modules/cluster-storage-operator.adoc +++ /dev/null @@ -1,70 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator-reference.adoc -// * installing/cluster-capabilities.adoc - -ifeval::["{context}" == "cluster-operators-ref"] -:operator-ref: -endif::[] - -ifeval::["{context}" == "cluster-capabilities"] -:cluster-caps: -endif::[] - -[id="cluster-storage-operator_{context}"] -ifdef::operator-ref[= Cluster Storage Operator] -ifdef::cluster-caps[= Cluster storage capability] - -ifdef::operator-ref[] - -[NOTE] -==== -The Cluster Storage Operator is an optional cluster capability that can be disabled by cluster administrators during installation. For more information about optional cluster capabilities, see "Cluster capabilities" in _Installing_. -==== - -endif::operator-ref[] - -[discrete] -== Purpose - -ifdef::cluster-caps[] - -The Cluster Storage Operator provides the features for the `Storage` capability. - -endif::cluster-caps[] - -The Cluster Storage Operator sets {product-title} cluster-wide storage defaults. It ensures a default `storageclass` exists for {product-title} clusters. It also installs Container Storage Interface (CSI) drivers which enable your cluster to use various storage backends. - -ifdef::cluster-caps[] -[IMPORTANT] -==== -If the cluster storage capability is disabled, the cluster will not have a default `storageclass` or any CSI drivers. Users with administrator privileges can create a default `storageclass` and manually install CSI drivers if the cluster storage capability is disabled. -==== -endif::cluster-caps[] - -ifdef::operator-ref[] - -[discrete] -== Project - -link:https://github.com/openshift/cluster-storage-operator[cluster-storage-operator] - -[discrete] -== Configuration - -No configuration is required. - -endif::operator-ref[] - -[discrete] -== Notes - -* The storage class that the Operator creates can be made non-default by editing its annotation, but this storage class cannot be deleted as long as the Operator runs. - -ifeval::["{context}" == "cluster-operators-ref"] -:!operator-ref: -endif::[] - -ifeval::["{context}" == "cluster-caps"] -:!cluster-caps: -endif::[] \ No newline at end of file diff --git a/modules/cluster-telemetry.adoc b/modules/cluster-telemetry.adoc deleted file mode 100644 index e8e773f4392d..000000000000 --- a/modules/cluster-telemetry.adoc +++ /dev/null @@ -1,83 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_alibaba/installing-alibaba-network-customizations.adoc -// * installing/installing_alibaba/installing-alibaba-vpc.adoc -// * installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc -// * installing/installing_bare_metal/installing-bare-metal.adoc -// * installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc -// * installing/installing_vsphere/installing-vsphere-installer-provisioned-customizations.adoc -// * installing/installing_vsphere/installing-vsphere-installer-provisioned-network-customizations.adoc -// * installing/installing_vsphere/installing-restricted-networks-installer-provisioned-vsphere.adoc -// * installing/installing_vsphere/installing-vsphere-installer-provisioned.adoc -// * installing/installing_vsphere/installing-vsphere.adoc -// * installing/installing_vsphere/installing-vsphere-network-customizations.adoc -// * installing/installing_vsphere/installing-restricted-networks-vsphere.adoc -// * installing/installing_platform_agnostic/installing-platform-agnostic.adoc -// * installing/installing_ibm_z/installing-restricted-networks-ibm-z-kvm.adoc -// * installing/installing_ibm_z/installing-ibm-z-kvm.adoc -// * installing/installing_ibm_z/installing-restricted-networks-ibm-z.adoc -// * installing/installing_ibm_z/installing-ibm-z.adoc -// * installing/installing_azure/installing-azure-vnet.adoc -// * installing/installing_azure/installing-azure-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-default.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc -// * installing/installing_azure/installing-azure-default.adoc -// * installing/installing_azure/installing-azure-network-customizations.adoc -// * installing/installing_azure/installing-azure-government-region.adoc -// * installing/installing_azure/installing-azure-customizations.adoc -// * installing/installing_azure/installing-azure-private.adoc -// * installing/installing_rhv/installing-rhv-customizations.adoc -// * installing/installing_rhv/installing-rhv-user-infra.adoc -// * installing/installing_rhv/installing-rhv-restricted-network.adoc -// * installing/installing_rhv/installing-rhv-default.adoc -// * installing/installing_aws/installing-aws-network-customizations.adoc -// * installing/installing_aws/installing-aws-user-infra.adoc -// * installing/installing_aws/installing-restricted-networks-aws.adoc -// * installing/installing_aws/installing-aws-customizations.adoc -// * installing/installing_aws/installing-aws-private.adoc -// * installing/installing_aws/installing-restricted-networks-aws-installer-provisioned.adoc -// * installing/installing_aws/installing-aws-default.adoc -// * installing/installing_aws/installing-aws-vpc.adoc -// * installing/installing_aws/installing-aws-government-region.adoc -// * installing/installing_aws/installing-aws-china.adoc -// * installing/installing_aws/installing-aws-outposts-remote-workers.adoc -// * installing/installing_openstack/installing-openstack-installer-kuryr.adoc -// * installing/installing_openstack/installing-openstack-installer-restricted.adoc -// * installing/installing_openstack/installing-openstack-user.adoc -// * installing/installing_openstack/installing-openstack-user-sr-iov-kuryr.adoc -// * installing/installing_openstack/installing-openstack-user-sr-iov.adoc -// * installing/installing_openstack/installing-openstack-installer-custom.adoc -// * installing/installing_openstack/installing-openstack-user-kuryr.adoc -// * installing/installing_openstack/installing-openstack-installer.adoc -// * installing/installing_openstack/installing-openstack-installer-sr-iov.adoc -// * installing/installing_gcp/installing-gcp-customizations.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp.adoc -// * installing/installing_gcp/installing-gcp-private.adoc -// * installing/installing_gcp/installing-gcp-user-infra-vpc.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp-installer-provisioned.adoc -// * installing/installing_gcp/installing-gcp-user-infra.adoc -// * installing/installing_gcp/installing-gcp-default.adoc -// * installing/installing_gcp/installing-gcp-vpc.adoc -// * installing/installing_gcp/installing-gcp-network-customizations.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-customizations.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-network-customizations.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-vpc.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-private.adoc -// * installing/installing_ibm_power/installing-ibm-power.adoc -// * installing/installing_ibm_power/installing-restricted-networks-ibm-power.adoc -// * installing/installing_ibm_powervs/installing-ibm-power-vs-private-cluster.adoc -// * installing/installing_ibm_powervs/installing-restricted-networks-ibm-power-vs.adoc -// * installing/installing_ibm_powervs/installing-ibm-powervs-vpc.adoc -// * installing/installing-nutanix-installer-provisioned.adoc -// * installing/installing-restricted-networks-nutanix-installer-provisioned.adoc - -:_content-type: CONCEPT -[id="cluster-telemetry_{context}"] -ifndef::openshift-origin[] -= Telemetry access for {product-title} - -In {product-title} {product-version}, the Telemetry service, which runs by default to provide metrics about cluster health and the success of updates, requires internet access. If your cluster is connected to the internet, Telemetry runs automatically, and your cluster is registered to {cluster-manager-url}. - -After you confirm that your {cluster-manager-url} inventory is correct, either maintained automatically by Telemetry or manually by using {cluster-manager}, link:https://access.redhat.com/documentation/en-us/subscription_central/2020-04/html/getting_started_with_subscription_watch/con-how-to-select-datacollection-tool_assembly-requirements-and-your-responsibilities-ctxt#red_hat_openshift[use subscription watch] to track your {product-title} subscriptions at the account or multi-cluster level. - -endif::openshift-origin[] diff --git a/modules/cluster-version-operator.adoc b/modules/cluster-version-operator.adoc deleted file mode 100644 index 4fea6b5bdcf1..000000000000 --- a/modules/cluster-version-operator.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator-reference.adoc - -[id="cluster-version-operator_{context}"] -= Cluster Version Operator - -[discrete] -== Purpose - -Cluster Operators manage specific areas of cluster functionality. The Cluster Version Operator (CVO) manages the lifecycle of cluster Operators, many of which are installed in {product-title} by default. - -The CVO also checks with the OpenShift Update Service to see the valid updates and update paths based on current component versions and information in the graph. - -[discrete] -== Project - -link:https://github.com/openshift/cluster-version-operator[cluster-version-operator] diff --git a/modules/cluster-wide-proxy-preqs.adoc b/modules/cluster-wide-proxy-preqs.adoc deleted file mode 100644 index f495c5dc44c1..000000000000 --- a/modules/cluster-wide-proxy-preqs.adoc +++ /dev/null @@ -1,85 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/configuring-cluster-wide-proxy.adoc - -:_content-type: CONCEPT -[id="cluster-wide-proxy-prereqs_{context}"] -= Prerequisites for configuring a cluster-wide proxy - -To configure a cluster-wide proxy, you must meet the following requirements. These requirements are valid when you configure a proxy during installation or post-installation. - -[discrete] -[id="cluster-wide-proxy-general-prereqs_{context}"] -== General requirements - -* You are the cluster owner. -* Your account has sufficient privileges. -ifdef::openshift-rosa[] -* You have an existing Virtual Private Cloud (VPC) for your cluster. -endif::openshift-rosa[] -ifdef::openshift-dedicated[] -* You have an existing Virtual Private Cloud (VPC) for your cluster. -* You are using the Customer Cloud Subscription (CCS) model for your cluster. -endif::openshift-dedicated[] -* The proxy can access the VPC for the cluster and the private subnets of the VPC. The proxy is also accessible from the VPC for the cluster and from the private subnets of the VPC. -* You have added the `ec2..amazonaws.com`, `elasticloadbalancing..amazonaws.com`, and `s3..amazonaws.com` endpoints to your VPC endpoint. These endpoints are required to complete requests from the nodes to the AWS EC2 API. Because the proxy works at the container level and not at the node level, you must route these requests to the AWS EC2 API through the AWS private network. Adding the public IP address of the EC2 API to your allowlist in your proxy server is not enough. - -[discrete] -[id="cluster-wide-proxy-network-prereqs_{context}"] -== Network requirements - -* If your proxy re-encyrpts egress traffic, you must create exclusions to the domain and port combinations. The following table offers guidance into these exceptions. -+ --- -** Add the following OpenShift URLs to your allowlist for re-encryption. -+ -[cols="6,1,6",options="header"] -|=== -|Address | Protocol/Port | Function -|`observatorium-mst.api.openshift.com` -|https/443 -|Required. Used for Managed OpenShift-specific telemetry. - -|`sso.redhat.com` -|https/443 -|The https://cloud.redhat.com/openshift site uses authentication from sso.redhat.com to download the cluster pull secret and use Red Hat SaaS solutions to facilitate monitoring of your subscriptions, cluster inventory, and chargeback reporting. -|=== -+ -** Add the following site reliability engineering (SRE) and management URLs to your allowlist for re-encryption. -+ -[cols="6,1,6",options="header"] -|=== -|Address | Protocol/Port | Function -|`*.osdsecuritylogs.splunkcloud.com` - -**OR** - -`inputs1.osdsecuritylogs.splunkcloud.com` -`inputs2.osdsecuritylogs.splunkcloud.com` -`inputs4.osdsecuritylogs.splunkcloud.com` -`inputs5.osdsecuritylogs.splunkcloud.com` -`inputs6.osdsecuritylogs.splunkcloud.com` -`inputs7.osdsecuritylogs.splunkcloud.com` -`inputs8.osdsecuritylogs.splunkcloud.com` -`inputs9.osdsecuritylogs.splunkcloud.com` -`inputs10.osdsecuritylogs.splunkcloud.com` -`inputs11.osdsecuritylogs.splunkcloud.com` -`inputs12.osdsecuritylogs.splunkcloud.com` -`inputs13.osdsecuritylogs.splunkcloud.com` -`inputs14.osdsecuritylogs.splunkcloud.com` -`inputs15.osdsecuritylogs.splunkcloud.com` -|tcp/9997 -|Used by the splunk-forwarder-operator as a log forwarding endpoint to be used by Red Hat SRE for log-based alerting. - -|`http-inputs-osdsecuritylogs.splunkcloud.com` -|https/443 -|Used by the splunk-forwarder-operator as a log forwarding endpoint to be used by Red Hat SRE for log-based alerting. -|=== --- -+ -[IMPORTANT] -==== -The use of a proxy server to perform TLS re-encryption is currently not supported if the server is acting as a transparent forward proxy where it is not configured on-cluster via the `--http-proxy` or `--https-proxy` arguments. - -A transparent forward proxy intercepts the cluster traffic, but it is not actually configured on the cluster itself. -==== diff --git a/modules/cnf-about-irq-affinity-setting.adoc b/modules/cnf-about-irq-affinity-setting.adoc deleted file mode 100644 index ec13c4e595d0..000000000000 --- a/modules/cnf-about-irq-affinity-setting.adoc +++ /dev/null @@ -1,63 +0,0 @@ -// Module included in the following assemblies: -// -// scalability_and_performance/cnf-low-latency-tuning.adoc - -:_content-type: CONCEPT -[id="about_irq_affinity_setting_{context}"] -= About support of IRQ affinity setting - -Some IRQ controllers lack support for IRQ affinity setting and will always expose all online CPUs as the IRQ mask. These IRQ controllers effectively run on CPU 0. - -The following are examples of drivers and hardware that Red Hat are aware lack support for IRQ affinity setting. The list is, by no means, exhaustive: - -* Some RAID controller drivers, such as `megaraid_sas` -* Many non-volatile memory express (NVMe) drivers -* Some LAN on motherboard (LOM) network controllers -* The driver uses `managed_irqs` - -[NOTE] -==== -The reason they do not support IRQ affinity setting might be associated with factors such as the type of processor, the IRQ controller, or the circuitry connections in the motherboard. -==== - -If the effective affinity of any IRQ is set to an isolated CPU, it might be a sign of some hardware or driver not supporting IRQ affinity setting. To find the effective affinity, log in to the host and run the following command: - -[source,terminal] ----- -$ find /proc/irq/ -name effective_affinity -exec sh -c 'i="$1"; mask=$(cat $i); file=$(echo $i); echo $file: $mask' _ {} \; ----- - -.Example output - -[source,terminal] ----- -/proc/irq/0/effective_affinity: 1 -/proc/irq/1/effective_affinity: 8 -/proc/irq/2/effective_affinity: 0 -/proc/irq/3/effective_affinity: 1 -/proc/irq/4/effective_affinity: 2 -/proc/irq/5/effective_affinity: 1 -/proc/irq/6/effective_affinity: 1 -/proc/irq/7/effective_affinity: 1 -/proc/irq/8/effective_affinity: 1 -/proc/irq/9/effective_affinity: 2 -/proc/irq/10/effective_affinity: 1 -/proc/irq/11/effective_affinity: 1 -/proc/irq/12/effective_affinity: 4 -/proc/irq/13/effective_affinity: 1 -/proc/irq/14/effective_affinity: 1 -/proc/irq/15/effective_affinity: 1 -/proc/irq/24/effective_affinity: 2 -/proc/irq/25/effective_affinity: 4 -/proc/irq/26/effective_affinity: 2 -/proc/irq/27/effective_affinity: 1 -/proc/irq/28/effective_affinity: 8 -/proc/irq/29/effective_affinity: 4 -/proc/irq/30/effective_affinity: 4 -/proc/irq/31/effective_affinity: 8 -/proc/irq/32/effective_affinity: 8 -/proc/irq/33/effective_affinity: 1 -/proc/irq/34/effective_affinity: 2 ----- - -Some drivers use `managed_irqs`, whose affinity is managed internally by the kernel and userspace cannot change the affinity. In some cases, these IRQs might be assigned to isolated CPUs. For more information about `managed_irqs`, see link:https://access.redhat.com/solutions/4819541[Affinity of managed interrupts cannot be changed even if they target isolated CPU]. \ No newline at end of file diff --git a/modules/cnf-about-numa-aware-scheduling.adoc b/modules/cnf-about-numa-aware-scheduling.adoc deleted file mode 100644 index 7b711d8945f9..000000000000 --- a/modules/cnf-about-numa-aware-scheduling.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// *scalability_and_performance/cnf-numa-aware-scheduling.adoc - -:_content-type: CONCEPT -[id="cnf-about-numa-aware-scheduling_{context}"] -= About NUMA-aware scheduling - -Non-Uniform Memory Access (NUMA) is a compute platform architecture that allows different CPUs to access different regions of memory at different speeds. NUMA resource topology refers to the locations of CPUs, memory, and PCI devices relative to each other in the compute node. Co-located resources are said to be in the same _NUMA zone_. For high-performance applications, the cluster needs to process pod workloads in a single NUMA zone. - -NUMA architecture allows a CPU with multiple memory controllers to use any available memory across CPU complexes, regardless of where the memory is located. This allows for increased flexibility at the expense of performance. A CPU processing a workload using memory that is outside its NUMA zone is slower than a workload processed in a single NUMA zone. Also, for I/O-constrained workloads, the network interface on a distant NUMA zone slows down how quickly information can reach the application. High-performance workloads, such as telecommunications workloads, cannot operate to specification under these conditions. NUMA-aware scheduling aligns the requested cluster compute resources (CPUs, memory, devices) in the same NUMA zone to process latency-sensitive or high-performance workloads efficiently. NUMA-aware scheduling also improves pod density per compute node for greater resource efficiency. - -By integrating the Node Tuning Operator's performance profile with NUMA-aware scheduling, you can further configure CPU affinity to optimize performance for latency-sensitive workloads. - -The default {product-title} pod scheduler scheduling logic considers the available resources of the entire compute node, not individual NUMA zones. If the most restrictive resource alignment is requested in the kubelet topology manager, error conditions can occur when admitting the pod to a node. Conversely, if the most restrictive resource alignment is not requested, the pod can be admitted to the node without proper resource alignment, leading to worse or unpredictable performance. For example, runaway pod creation with `Topology Affinity Error` statuses can occur when the pod scheduler makes suboptimal scheduling decisions for guaranteed pod workloads by not knowing if the pod's requested resources are available. Scheduling mismatch decisions can cause indefinite pod startup delays. Also, depending on the cluster state and resource allocation, poor pod scheduling decisions can cause extra load on the cluster because of failed startup attempts. - -The NUMA Resources Operator deploys a custom NUMA resources secondary scheduler and other resources to mitigate against the shortcomings of the default {product-title} pod scheduler. The following diagram provides a high-level overview of NUMA-aware pod scheduling. - -.NUMA-aware scheduling overview -image::216_OpenShift_Topology-aware_Scheduling_0222.png[Diagram of NUMA-aware scheduling that shows how the various components interact with each other in the cluster] - -NodeResourceTopology API:: The `NodeResourceTopology` API describes the available NUMA zone resources in each compute node. -NUMA-aware scheduler:: The NUMA-aware secondary scheduler receives information about the available NUMA zones from the `NodeResourceTopology` API and schedules high-performance workloads on a node where it can be optimally processed. -Node topology exporter:: The node topology exporter exposes the available NUMA zone resources for each compute node to the `NodeResourceTopology` API. The node topology exporter daemon tracks the resource allocation from the kubelet by using the `PodResources` API. -PodResources API:: The `PodResources` API is local to each node and exposes the resource topology and available resources to the kubelet. -+ -[NOTE] -==== -The `List` endpoint of the `PodResources` API exposes exclusive CPUs allocated to a particular container. The API does not expose CPUs that belong to a shared pool. - -The `GetAllocatableResources` endpoint exposes allocatable resources available on a node. -==== diff --git a/modules/cnf-about-ptp-and-clock-synchronization.adoc b/modules/cnf-about-ptp-and-clock-synchronization.adoc deleted file mode 100644 index c60985b76fcd..000000000000 --- a/modules/cnf-about-ptp-and-clock-synchronization.adoc +++ /dev/null @@ -1,20 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/using-ptp.adoc - -:_content-type: CONCEPT -[id="cnf-about-ptp-and-clock-synchronization_{context}"] -= About PTP and clock synchronization error events - -Loss of PTP synchronization is a critical error for a RAN network. If synchronization is lost on a node, the radio might be shut down and the network Over the Air (OTA) traffic might be shifted to another node in the wireless network. Fast event notifications mitigate against workload errors by allowing cluster nodes to communicate PTP clock sync status to the vRAN application running in the DU. - -Event notifications are available to vRAN applications running on the same DU node. A publish-subscribe REST API passes events notifications to the messaging bus. Publish-subscribe messaging, or pub-sub messaging, is an asynchronous service-to-service communication architecture where any message published to a topic is immediately received by all of the subscribers to the topic. - -The PTP Operator generates fast event notifications for every PTP-capable network interface. You can access the events by using a `cloud-event-proxy` sidecar container over an HTTP or Advanced Message Queuing Protocol (AMQP) message bus. - -[NOTE] -==== -PTP fast event notifications are available for network interfaces configured to use PTP ordinary clocks or PTP boundary clocks. -==== - -include::snippets/ptp-amq-interconnect-eol.adoc[] diff --git a/modules/cnf-about-ptp-fast-event-notifications-framework.adoc b/modules/cnf-about-ptp-fast-event-notifications-framework.adoc deleted file mode 100644 index c14f62f40fec..000000000000 --- a/modules/cnf-about-ptp-fast-event-notifications-framework.adoc +++ /dev/null @@ -1,52 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/using-ptp.adoc - -:_content-type: CONCEPT -[id="cnf-about-ptp-fast-event-notifications-framework_{context}"] -= About the PTP fast event notifications framework - -Use the Precision Time Protocol (PTP) fast event notifications framework to subscribe cluster applications to PTP events that the bare-metal cluster node generates. - -[NOTE] -==== -The fast events notifications framework uses a REST API for communication. The REST API is based on the _O-RAN O-Cloud Notification API Specification for Event Consumers 3.0_ that is available from link:https://orandownloadsweb.azurewebsites.net/specifications[O-RAN ALLIANCE Specifications]. -==== - -The framework consists of a publisher, subscriber, and an AMQ or HTTP messaging protocol to handle communications between the publisher and subscriber applications. -Applications run the `cloud-event-proxy` container in a sidecar pattern to subscribe to PTP events. -The `cloud-event-proxy` sidecar container can access the same resources as the primary application container without using any of the resources of the primary application and with no significant latency. - -include::snippets/ptp-amq-interconnect-eol.adoc[] - -.Overview of PTP fast events -image::319_OpenShift_PTP_bare-metal_OCP_nodes_0323_4.13.png[Overview of PTP fast events] - -image:darkcircle-1.png[20,20] Event is generated on the cluster host:: -`linuxptp-daemon` in the PTP Operator-managed pod runs as a Kubernetes `DaemonSet` and manages the various `linuxptp` processes (`ptp4l`, `phc2sys`, and optionally for grandmaster clocks, `ts2phc`). -The `linuxptp-daemon` passes the event to the UNIX domain socket. - -image:darkcircle-2.png[20,20] Event is passed to the cloud-event-proxy sidecar:: -The PTP plugin reads the event from the UNIX domain socket and passes it to the `cloud-event-proxy` sidecar in the PTP Operator-managed pod. -`cloud-event-proxy` delivers the event from the Kubernetes infrastructure to Cloud-Native Network Functions (CNFs) with low latency. - -image:darkcircle-3.png[20,20] Event is persisted:: -The `cloud-event-proxy` sidecar in the PTP Operator-managed pod processes the event and publishes the cloud-native event by using a REST API. -+ -[NOTE] -==== -When you use HTTP transport for events, you must persist the events subscription in the PTP Operator-managed pod by using a Persistent Volume (PV) resource or similar persistent storage mechanism. -==== - -image:darkcircle-4.png[20,20] Message is transported:: -The message transporter transports the event to the `cloud-event-proxy` sidecar in the application pod over HTTP or AMQP 1.0 QPID. - -image:darkcircle-5.png[20,20] Event is available from the REST API:: -The `cloud-event-proxy` sidecar in the Application pod processes the event and makes it available by using the REST API. - -image:darkcircle-6.png[20,20] Consumer application requests a subscription and receives the subscribed event:: -The consumer application sends an API request to the `cloud-event-proxy` sidecar in the application pod to create a PTP events subscription. -The `cloud-event-proxy` sidecar creates an AMQ or HTTP messaging listener protocol for the resource specified in the subscription. - -The `cloud-event-proxy` sidecar in the application pod receives the event from the PTP Operator-managed pod, unwraps the cloud events object to retrieve the data, and posts the event to the consumer application. -The consumer application listens to the address specified in the resource qualifier and receives and processes the PTP event. diff --git a/modules/cnf-about-the-profile-creator-tool.adoc b/modules/cnf-about-the-profile-creator-tool.adoc deleted file mode 100644 index 8ef33fa5d1cf..000000000000 --- a/modules/cnf-about-the-profile-creator-tool.adoc +++ /dev/null @@ -1,16 +0,0 @@ -// Module included in the following assemblies: -// Epic CNF-792 (4.8) -// * scalability_and_performance/cnf-create-performance-profiles.adoc - -:_content-type: CONCEPT -[id="cnf-about-the-profile-creator-tool_{context}"] -= About the Performance Profile Creator - -The Performance Profile Creator (PPC) is a command-line tool, delivered with the Node Tuning Operator, used to create the performance profile. -The tool consumes `must-gather` data from the cluster and several user-supplied profile arguments. The PPC generates a performance profile that is appropriate for your hardware and topology. - -The tool is run by one of the following methods: - -* Invoking `podman` - -* Calling a wrapper script diff --git a/modules/cnf-about-topology-aware-lifecycle-manager-blocking-crs.adoc b/modules/cnf-about-topology-aware-lifecycle-manager-blocking-crs.adoc deleted file mode 100644 index af99ad355260..000000000000 --- a/modules/cnf-about-topology-aware-lifecycle-manager-blocking-crs.adoc +++ /dev/null @@ -1,381 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/cnf-talm-for-cluster-upgrades.adoc - -:_content-type: PROCEDURE -[id="cnf-about-topology-aware-lifecycle-manager-blocking-crs_{context}"] -= Blocking ClusterGroupUpgrade CRs - -You can create multiple `ClusterGroupUpgrade` CRs and control their order of application. - -For example, if you create `ClusterGroupUpgrade` CR C that blocks the start of `ClusterGroupUpgrade` CR A, then `ClusterGroupUpgrade` CR A cannot start until the status of `ClusterGroupUpgrade` CR C becomes `UpgradeComplete`. - -One `ClusterGroupUpgrade` CR can have multiple blocking CRs. In this case, all the blocking CRs must complete before the upgrade for the current CR can start. - -.Prerequisites - -* Install the {cgu-operator-first}. -* Provision one or more managed clusters. -* Log in as a user with `cluster-admin` privileges. -* Create {rh-rhacm} policies in the hub cluster. - -.Procedure - -. Save the content of the `ClusterGroupUpgrade` CRs in the `cgu-a.yaml`, `cgu-b.yaml`, and `cgu-c.yaml` files. -+ -[source,yaml] ----- -apiVersion: ran.openshift.io/v1alpha1 -kind: ClusterGroupUpgrade -metadata: - name: cgu-a - namespace: default -spec: - blockingCRs: <1> - - name: cgu-c - namespace: default - clusters: - - spoke1 - - spoke2 - - spoke3 - enable: false - managedPolicies: - - policy1-common-cluster-version-policy - - policy2-common-pao-sub-policy - - policy3-common-ptp-sub-policy - remediationStrategy: - canaries: - - spoke1 - maxConcurrency: 2 - timeout: 240 -status: - conditions: - - message: The ClusterGroupUpgrade CR is not enabled - reason: UpgradeNotStarted - status: "False" - type: Ready - copiedPolicies: - - cgu-a-policy1-common-cluster-version-policy - - cgu-a-policy2-common-pao-sub-policy - - cgu-a-policy3-common-ptp-sub-policy - managedPoliciesForUpgrade: - - name: policy1-common-cluster-version-policy - namespace: default - - name: policy2-common-pao-sub-policy - namespace: default - - name: policy3-common-ptp-sub-policy - namespace: default - placementBindings: - - cgu-a-policy1-common-cluster-version-policy - - cgu-a-policy2-common-pao-sub-policy - - cgu-a-policy3-common-ptp-sub-policy - placementRules: - - cgu-a-policy1-common-cluster-version-policy - - cgu-a-policy2-common-pao-sub-policy - - cgu-a-policy3-common-ptp-sub-policy - remediationPlan: - - - spoke1 - - - spoke2 ----- -<1> Defines the blocking CRs. The `cgu-a` update cannot start until `cgu-c` is complete. -+ -[source,yaml] ----- -apiVersion: ran.openshift.io/v1alpha1 -kind: ClusterGroupUpgrade -metadata: - name: cgu-b - namespace: default -spec: - blockingCRs: <1> - - name: cgu-a - namespace: default - clusters: - - spoke4 - - spoke5 - enable: false - managedPolicies: - - policy1-common-cluster-version-policy - - policy2-common-pao-sub-policy - - policy3-common-ptp-sub-policy - - policy4-common-sriov-sub-policy - remediationStrategy: - maxConcurrency: 1 - timeout: 240 -status: - conditions: - - message: The ClusterGroupUpgrade CR is not enabled - reason: UpgradeNotStarted - status: "False" - type: Ready - copiedPolicies: - - cgu-b-policy1-common-cluster-version-policy - - cgu-b-policy2-common-pao-sub-policy - - cgu-b-policy3-common-ptp-sub-policy - - cgu-b-policy4-common-sriov-sub-policy - managedPoliciesForUpgrade: - - name: policy1-common-cluster-version-policy - namespace: default - - name: policy2-common-pao-sub-policy - namespace: default - - name: policy3-common-ptp-sub-policy - namespace: default - - name: policy4-common-sriov-sub-policy - namespace: default - placementBindings: - - cgu-b-policy1-common-cluster-version-policy - - cgu-b-policy2-common-pao-sub-policy - - cgu-b-policy3-common-ptp-sub-policy - - cgu-b-policy4-common-sriov-sub-policy - placementRules: - - cgu-b-policy1-common-cluster-version-policy - - cgu-b-policy2-common-pao-sub-policy - - cgu-b-policy3-common-ptp-sub-policy - - cgu-b-policy4-common-sriov-sub-policy - remediationPlan: - - - spoke4 - - - spoke5 - status: {} ----- -<1> The `cgu-b` update cannot start until `cgu-a` is complete. -+ -[source,yaml] ----- -apiVersion: ran.openshift.io/v1alpha1 -kind: ClusterGroupUpgrade -metadata: - name: cgu-c - namespace: default -spec: <1> - clusters: - - spoke6 - enable: false - managedPolicies: - - policy1-common-cluster-version-policy - - policy2-common-pao-sub-policy - - policy3-common-ptp-sub-policy - - policy4-common-sriov-sub-policy - remediationStrategy: - maxConcurrency: 1 - timeout: 240 -status: - conditions: - - message: The ClusterGroupUpgrade CR is not enabled - reason: UpgradeNotStarted - status: "False" - type: Ready - copiedPolicies: - - cgu-c-policy1-common-cluster-version-policy - - cgu-c-policy4-common-sriov-sub-policy - managedPoliciesCompliantBeforeUpgrade: - - policy2-common-pao-sub-policy - - policy3-common-ptp-sub-policy - managedPoliciesForUpgrade: - - name: policy1-common-cluster-version-policy - namespace: default - - name: policy4-common-sriov-sub-policy - namespace: default - placementBindings: - - cgu-c-policy1-common-cluster-version-policy - - cgu-c-policy4-common-sriov-sub-policy - placementRules: - - cgu-c-policy1-common-cluster-version-policy - - cgu-c-policy4-common-sriov-sub-policy - remediationPlan: - - - spoke6 - status: {} ----- -<1> The `cgu-c` update does not have any blocking CRs. {cgu-operator} starts the `cgu-c` update when the `enable` field is set to `true`. - -. Create the `ClusterGroupUpgrade` CRs by running the following command for each relevant CR: -+ -[source,terminal] ----- -$ oc apply -f .yaml ----- - -. Start the update process by running the following command for each relevant CR: -+ -[source,terminal] ----- -$ oc --namespace=default patch clustergroupupgrade.ran.openshift.io/ \ ---type merge -p '{"spec":{"enable":true}}' ----- -+ -The following examples show `ClusterGroupUpgrade` CRs where the `enable` field is set to `true`: -+ -.Example for `cgu-a` with blocking CRs -+ -[source,yaml] ----- -apiVersion: ran.openshift.io/v1alpha1 -kind: ClusterGroupUpgrade -metadata: - name: cgu-a - namespace: default -spec: - blockingCRs: - - name: cgu-c - namespace: default - clusters: - - spoke1 - - spoke2 - - spoke3 - enable: true - managedPolicies: - - policy1-common-cluster-version-policy - - policy2-common-pao-sub-policy - - policy3-common-ptp-sub-policy - remediationStrategy: - canaries: - - spoke1 - maxConcurrency: 2 - timeout: 240 -status: - conditions: - - message: 'The ClusterGroupUpgrade CR is blocked by other CRs that have not yet - completed: [cgu-c]' <1> - reason: UpgradeCannotStart - status: "False" - type: Ready - copiedPolicies: - - cgu-a-policy1-common-cluster-version-policy - - cgu-a-policy2-common-pao-sub-policy - - cgu-a-policy3-common-ptp-sub-policy - managedPoliciesForUpgrade: - - name: policy1-common-cluster-version-policy - namespace: default - - name: policy2-common-pao-sub-policy - namespace: default - - name: policy3-common-ptp-sub-policy - namespace: default - placementBindings: - - cgu-a-policy1-common-cluster-version-policy - - cgu-a-policy2-common-pao-sub-policy - - cgu-a-policy3-common-ptp-sub-policy - placementRules: - - cgu-a-policy1-common-cluster-version-policy - - cgu-a-policy2-common-pao-sub-policy - - cgu-a-policy3-common-ptp-sub-policy - remediationPlan: - - - spoke1 - - - spoke2 - status: {} ----- -<1> Shows the list of blocking CRs. -+ -.Example for `cgu-b` with blocking CRs -+ -[source,yaml] ----- -apiVersion: ran.openshift.io/v1alpha1 -kind: ClusterGroupUpgrade -metadata: - name: cgu-b - namespace: default -spec: - blockingCRs: - - name: cgu-a - namespace: default - clusters: - - spoke4 - - spoke5 - enable: true - managedPolicies: - - policy1-common-cluster-version-policy - - policy2-common-pao-sub-policy - - policy3-common-ptp-sub-policy - - policy4-common-sriov-sub-policy - remediationStrategy: - maxConcurrency: 1 - timeout: 240 -status: - conditions: - - message: 'The ClusterGroupUpgrade CR is blocked by other CRs that have not yet - completed: [cgu-a]' <1> - reason: UpgradeCannotStart - status: "False" - type: Ready - copiedPolicies: - - cgu-b-policy1-common-cluster-version-policy - - cgu-b-policy2-common-pao-sub-policy - - cgu-b-policy3-common-ptp-sub-policy - - cgu-b-policy4-common-sriov-sub-policy - managedPoliciesForUpgrade: - - name: policy1-common-cluster-version-policy - namespace: default - - name: policy2-common-pao-sub-policy - namespace: default - - name: policy3-common-ptp-sub-policy - namespace: default - - name: policy4-common-sriov-sub-policy - namespace: default - placementBindings: - - cgu-b-policy1-common-cluster-version-policy - - cgu-b-policy2-common-pao-sub-policy - - cgu-b-policy3-common-ptp-sub-policy - - cgu-b-policy4-common-sriov-sub-policy - placementRules: - - cgu-b-policy1-common-cluster-version-policy - - cgu-b-policy2-common-pao-sub-policy - - cgu-b-policy3-common-ptp-sub-policy - - cgu-b-policy4-common-sriov-sub-policy - remediationPlan: - - - spoke4 - - - spoke5 - status: {} ----- -<1> Shows the list of blocking CRs. -+ -.Example for `cgu-c` with blocking CRs -+ -[source,yaml] ----- -apiVersion: ran.openshift.io/v1alpha1 -kind: ClusterGroupUpgrade -metadata: - name: cgu-c - namespace: default -spec: - clusters: - - spoke6 - enable: true - managedPolicies: - - policy1-common-cluster-version-policy - - policy2-common-pao-sub-policy - - policy3-common-ptp-sub-policy - - policy4-common-sriov-sub-policy - remediationStrategy: - maxConcurrency: 1 - timeout: 240 -status: - conditions: - - message: The ClusterGroupUpgrade CR has upgrade policies that are still non compliant <1> - reason: UpgradeNotCompleted - status: "False" - type: Ready - copiedPolicies: - - cgu-c-policy1-common-cluster-version-policy - - cgu-c-policy4-common-sriov-sub-policy - managedPoliciesCompliantBeforeUpgrade: - - policy2-common-pao-sub-policy - - policy3-common-ptp-sub-policy - managedPoliciesForUpgrade: - - name: policy1-common-cluster-version-policy - namespace: default - - name: policy4-common-sriov-sub-policy - namespace: default - placementBindings: - - cgu-c-policy1-common-cluster-version-policy - - cgu-c-policy4-common-sriov-sub-policy - placementRules: - - cgu-c-policy1-common-cluster-version-policy - - cgu-c-policy4-common-sriov-sub-policy - remediationPlan: - - - spoke6 - status: - currentBatch: 1 - remediationPlanForBatch: - spoke6: 0 ----- -<1> The `cgu-c` update does not have any blocking CRs. diff --git a/modules/cnf-about-topology-aware-lifecycle-manager-config.adoc b/modules/cnf-about-topology-aware-lifecycle-manager-config.adoc deleted file mode 100644 index 36254476b6b2..000000000000 --- a/modules/cnf-about-topology-aware-lifecycle-manager-config.adoc +++ /dev/null @@ -1,24 +0,0 @@ -// Module included in the following assemblies: -// Epic CNF-2600 (CNF-2133) (4.10), Story TELCODOCS-285 -// * scalability_and_performance/cnf-talm-for-cluster-upgrades.adoc - -:_content-type: CONCEPT -[id="cnf-about-topology-aware-lifecycle-manager-config_{context}"] -= About the {cgu-operator-full} configuration - -The {cgu-operator-first} manages the deployment of {rh-rhacm-first} policies for one or more {product-title} clusters. Using {cgu-operator} in a large network of clusters allows the phased rollout of policies to the clusters in limited batches. This helps to minimize possible service disruptions when updating. With {cgu-operator}, you can control the following actions: - -* The timing of the update -* The number of {rh-rhacm}-managed clusters -* The subset of managed clusters to apply the policies to -* The update order of the clusters -* The set of policies remediated to the cluster -* The order of policies remediated to the cluster -* The assignment of a canary cluster - -For {sno}, the {cgu-operator-first} offers the following features: - -* Create a backup of a deployment before an upgrade -* Pre-caching images for clusters with limited bandwidth - -{cgu-operator} supports the orchestration of the {product-title} y-stream and z-stream updates, and day-two operations on y-streams and z-streams. diff --git a/modules/cnf-about-topology-aware-lifecycle-manager-policies.adoc b/modules/cnf-about-topology-aware-lifecycle-manager-policies.adoc deleted file mode 100644 index 4fcbde5b0b33..000000000000 --- a/modules/cnf-about-topology-aware-lifecycle-manager-policies.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// Module included in the following assemblies: -// Epic CNF-2600 (CNF-2133) (4.10), Story TELCODOCS-285 -// * scalability_and_performance/cnf-talm-for-cluster-upgrades.adoc - -:_content-type: CONCEPT -[id="cnf-about-topology-aware-lifecycle-manager-about-policies_{context}"] -= About managed policies used with {cgu-operator-full} - -The {cgu-operator-first} uses {rh-rhacm} policies for cluster updates. - -{cgu-operator} can be used to manage the rollout of any policy CR where the `remediationAction` field is set to `inform`. -Supported use cases include the following: - -* Manual user creation of policy CRs -* Automatically generated policies from the `PolicyGenTemplate` custom resource definition (CRD) - -For policies that update an Operator subscription with manual approval, {cgu-operator} provides additional functionality that approves the installation of the updated Operator. - -For more information about managed policies, see link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/{rh-rhacm-version}/html-single/governance/index#policy-overview[Policy Overview] in the {rh-rhacm} documentation. - -For more information about the `PolicyGenTemplate` CRD, see the "About the PolicyGenTemplate CRD" section in "Configuring managed clusters with policies and PolicyGenTemplate resources". diff --git a/modules/cnf-about-virtual-routing-and-forwarding.adoc b/modules/cnf-about-virtual-routing-and-forwarding.adoc deleted file mode 100644 index aec1274e411a..000000000000 --- a/modules/cnf-about-virtual-routing-and-forwarding.adoc +++ /dev/null @@ -1,16 +0,0 @@ -// Module included in the following assemblies: -// -// networking/multiple_networks/about-virtual-routing-and-forwarding.adoc - -:_content-type: CONCEPT -[id="cnf-about-virtual-routing-and-forwarding_{context}"] -= About virtual routing and forwarding - -Virtual routing and forwarding (VRF) devices combined with IP rules provide the ability to create virtual routing and forwarding domains. VRF reduces the number of permissions needed by CNF, and provides increased visibility of the network topology of secondary networks. VRF is used to provide multi-tenancy functionality, for example, where each tenant has its own unique routing tables and requires different default gateways. - -Processes can bind a socket to the VRF device. Packets through the binded socket use the routing table associated with the VRF device. An important feature of VRF is that it impacts only OSI model layer 3 traffic and above so L2 tools, such as LLDP, are not affected. This allows higher priority IP rules such as policy based routing to take precedence over the VRF device rules directing specific traffic. - -[id="cnf-benefits-secondary-networks-telecommunications-operators_{context}"] -== Benefits of secondary networks for pods for telecommunications operators - -In telecommunications use cases, each CNF can potentially be connected to multiple different networks sharing the same address space. These secondary networks can potentially conflict with the cluster's main network CIDR. Using the CNI VRF plugin, network functions can be connected to different customers' infrastructure using the same IP address, keeping different customers isolated. IP addresses are overlapped with {product-title} IP space. The CNI VRF plugin also reduces the number of permissions needed by CNF and increases the visibility of network topologies of secondary networks. diff --git a/modules/cnf-about_hyperthreading_for_low_latency_and_real_time_applications.adoc b/modules/cnf-about_hyperthreading_for_low_latency_and_real_time_applications.adoc deleted file mode 100644 index 621e42793fd3..000000000000 --- a/modules/cnf-about_hyperthreading_for_low_latency_and_real_time_applications.adoc +++ /dev/null @@ -1,16 +0,0 @@ -// Module included in the following assemblies: -// -// scalability_and_performance/cnf-low-latency-tuning.adoc - -:_content-type: CONCEPT -[id="about_hyperthreading_for_low_latency_and_real_time_applications_{context}"] -= About hyperthreading for low latency and real-time applications - -Hyperthreading is an Intel processor technology that allows a physical CPU processor core to function as two logical cores, executing two independent threads simultaneously. Hyperthreading allows for better system throughput for certain workload types where parallel processing is beneficial. The default {product-title} configuration expects hyperthreading to be enabled by default. - -For telecommunications applications, it is important to design your application infrastructure to minimize latency as much as possible. Hyperthreading can slow performance times and negatively affect throughput for compute intensive workloads that require low latency. Disabling hyperthreading ensures predictable performance and can decrease processing times for these workloads. - -[NOTE] -==== -Hyperthreading implementation and configuration differs depending on the hardware you are running {product-title} on. Consult the relevant host hardware tuning information for more details of the hyperthreading implementation specific to that hardware. Disabling hyperthreading can increase the cost per core of the cluster. -==== diff --git a/modules/cnf-adjusting-nic-queues-with-the-performance-profile.adoc b/modules/cnf-adjusting-nic-queues-with-the-performance-profile.adoc deleted file mode 100644 index df17beef2cd8..000000000000 --- a/modules/cnf-adjusting-nic-queues-with-the-performance-profile.adoc +++ /dev/null @@ -1,168 +0,0 @@ -// Module included in the following assemblies: -//CNF-1483 (4.8) -// * scalability_and_performance/low-latency-tuning.adoc - -:_content-type: PROCEDURE -[id="adjusting-nic-queues-with-the-performance-profile_{context}"] -= Adjusting the NIC queues with the performance profile - -The performance profile lets you adjust the queue count for each network device. - -Supported network devices: - -* Non-virtual network devices - -* Network devices that support multiple queues (channels) - -Unsupported network devices: - -* Pure software network interfaces - -* Block devices - -* Intel DPDK virtual functions - -.Prerequisites - -* Access to the cluster as a user with the `cluster-admin` role. -* Install the OpenShift CLI (`oc`). - -.Procedure - -. Log in to the {product-title} cluster running the Node Tuning Operator as a user with `cluster-admin` privileges. - -. Create and apply a performance profile appropriate for your hardware and topology. For guidance on creating a profile, see the "Creating a performance profile" section. - -. Edit this created performance profile: -+ -[source,terminal] ----- -$ oc edit -f .yaml ----- - -. Populate the `spec` field with the `net` object. The object list can contain two fields: - -* `userLevelNetworking` is a required field specified as a boolean flag. If `userLevelNetworking` is `true`, the queue count is set to the reserved CPU count for all supported devices. The default is `false`. -* `devices` is an optional field specifying a list of devices that will have the queues set to the reserved CPU count. If the device list is empty, the configuration applies to all network devices. The configuration is as follows: -** `interfaceName`: This field specifies the interface name, and it supports shell-style wildcards, which can be positive or negative. -*** Example wildcard syntax is as follows: ` .*` -*** Negative rules are prefixed with an exclamation mark. To apply the net queue changes to all devices other than the excluded list, use `!`, for example, `!eno1`. -** `vendorID`: The network device vendor ID represented as a 16-bit hexadecimal number with a `0x` prefix. -** `deviceID`: The network device ID (model) represented as a 16-bit hexadecimal number with a `0x` prefix. -+ -[NOTE] -==== -When a `deviceID` is specified, the `vendorID` must also be defined. A device that matches all of the device identifiers specified in a device entry `interfaceName`, `vendorID`, or a pair of `vendorID` plus `deviceID` qualifies as a network device. This network device then has its net queues count set to the reserved CPU count. - -When two or more devices are specified, the net queues count is set to any net device that matches one of them. -==== - -. Set the queue count to the reserved CPU count for all devices by using this example performance profile: -+ -[source,yaml] ----- -apiVersion: performance.openshift.io/v2 -kind: PerformanceProfile -metadata: - name: manual -spec: - cpu: - isolated: 3-51,54-103 - reserved: 0-2,52-54 - net: - userLevelNetworking: true - nodeSelector: - node-role.kubernetes.io/worker-cnf: "" ----- - -. Set the queue count to the reserved CPU count for all devices matching any of the defined device identifiers by using this example performance profile: -+ -[source,yaml] ----- -apiVersion: performance.openshift.io/v2 -kind: PerformanceProfile -metadata: - name: manual -spec: - cpu: - isolated: 3-51,54-103 - reserved: 0-2,52-54 - net: - userLevelNetworking: true - devices: - - interfaceName: “eth0” - - interfaceName: “eth1” - - vendorID: “0x1af4” - - deviceID: “0x1000” - nodeSelector: - node-role.kubernetes.io/worker-cnf: "" ----- - -. Set the queue count to the reserved CPU count for all devices starting with the interface name `eth` by using this example performance profile: -+ -[source,yaml] ----- -apiVersion: performance.openshift.io/v2 -kind: PerformanceProfile -metadata: - name: manual -spec: - cpu: - isolated: 3-51,54-103 - reserved: 0-2,52-54 - net: - userLevelNetworking: true - devices: - - interfaceName: “eth*” - nodeSelector: - node-role.kubernetes.io/worker-cnf: "" ----- - -. Set the queue count to the reserved CPU count for all devices with an interface named anything other than `eno1` by using this example performance profile: -+ -[source,yaml] ----- -apiVersion: performance.openshift.io/v2 -kind: PerformanceProfile -metadata: - name: manual -spec: - cpu: - isolated: 3-51,54-103 - reserved: 0-2,52-54 - net: - userLevelNetworking: true - devices: - - interfaceName: “!eno1” - nodeSelector: - node-role.kubernetes.io/worker-cnf: "" ----- - -. Set the queue count to the reserved CPU count for all devices that have an interface name `eth0`, `vendorID` of `0x1af4`, and `deviceID` of `0x1000` by using this example performance profile: -+ -[source,yaml] ----- -apiVersion: performance.openshift.io/v2 -kind: PerformanceProfile -metadata: - name: manual -spec: - cpu: - isolated: 3-51,54-103 - reserved: 0-2,52-54 - net: - userLevelNetworking: true - devices: - - interfaceName: “eth0” - - vendorID: “0x1af4” - - deviceID: “0x1000” - nodeSelector: - node-role.kubernetes.io/worker-cnf: "" ----- - -. Apply the updated performance profile: -+ -[source,terminal] ----- -$ oc apply -f .yaml ----- diff --git a/modules/cnf-allocating-multiple-huge-page-sizes.adoc b/modules/cnf-allocating-multiple-huge-page-sizes.adoc deleted file mode 100644 index e79e3da910f2..000000000000 --- a/modules/cnf-allocating-multiple-huge-page-sizes.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// CNF-538 Promote Multiple Huge Pages Sizes for Pods and Containers to beta -// Module included in the following assemblies: -// -// *scalability_and_performance/cnf-low-latency-tuning.adoc - -[id="cnf-allocating-multiple-huge-page-sizes_{context}"] -= Allocating multiple huge page sizes - -You can request huge pages with different sizes under the same container. This allows you to define more complicated pods consisting of containers with different huge page size needs. - -For example, you can define sizes `1G` and `2M` and the Node Tuning Operator will configure both sizes on the node, as shown here: - -[source,yaml] ----- -spec: - hugepages: - defaultHugepagesSize: 1G - pages: - - count: 1024 - node: 0 - size: 2M - - count: 4 - node: 1 - size: 1G ----- \ No newline at end of file diff --git a/modules/cnf-assigning-a-secondary-network-to-a-vrf.adoc b/modules/cnf-assigning-a-secondary-network-to-a-vrf.adoc deleted file mode 100644 index a76720346154..000000000000 --- a/modules/cnf-assigning-a-secondary-network-to-a-vrf.adoc +++ /dev/null @@ -1,143 +0,0 @@ -// Module included in the following assemblies: -// -// networking/multiple_networks/assigning-a-secondary-network-to-a-vrf.adoc - - -:_content-type: PROCEDURE -[id="cnf-assigning-a-secondary-network-to-a-vrf_{context}"] -= Assigning a secondary network to a VRF - -As a cluster administrator, you can configure an additional network for your VRF domain by using the CNI VRF plugin. The virtual network created by this plugin is associated with a physical interface that you specify. - -[NOTE] -==== -Applications that use VRFs need to bind to a specific device. The common usage is to use the `SO_BINDTODEVICE` option for a socket. `SO_BINDTODEVICE` binds the socket to a device that is specified in the passed interface name, for example, `eth1`. To use `SO_BINDTODEVICE`, the application must have `CAP_NET_RAW` capabilities. - -Using a VRF through the `ip vrf exec` command is not supported in {product-title} pods. To use VRF, bind applications directly to the VRF interface. -==== - -[id="cnf-creating-an-additional-network-attachment-with-the-cni-vrf-plug-in_{context}"] -== Creating an additional network attachment with the CNI VRF plugin - -The Cluster Network Operator (CNO) manages additional network definitions. When you specify an additional network to create, the CNO creates the `NetworkAttachmentDefinition` custom resource (CR) automatically. - -[NOTE] -==== -Do not edit the `NetworkAttachmentDefinition` CRs that the Cluster Network Operator manages. Doing so might disrupt network traffic on your additional network. -==== - -To create an additional network attachment with the CNI VRF plugin, perform the following procedure. - -.Prerequisites - -* Install the {product-title} CLI (oc). -* Log in to the OpenShift cluster as a user with cluster-admin privileges. - -.Procedure - -. Create the `Network` custom resource (CR) for the additional network attachment and insert the `rawCNIConfig` configuration for the additional network, as in the following example CR. Save the YAML as the file `additional-network-attachment.yaml`. -+ -[source,yaml] ----- -apiVersion: operator.openshift.io/v1 -kind: Network -metadata: - name: cluster - spec: - additionalNetworks: - - name: test-network-1 - namespace: additional-network-1 - type: Raw - rawCNIConfig: '{ - "cniVersion": "0.3.1", - "name": "macvlan-vrf", - "plugins": [ <1> - { - "type": "macvlan", <2> - "master": "eth1", - "ipam": { - "type": "static", - "addresses": [ - { - "address": "191.168.1.23/24" - } - ] - } - }, - { - "type": "vrf", - "vrfname": "example-vrf-name", <3> - "table": 1001 <4> - }] - }' ----- -<1> `plugins` must be a list. The first item in the list must be the secondary network underpinning the VRF network. The second item in the list is the VRF plugin configuration. -<2> `type` must be set to `vrf`. -<3> `vrfname` is the name of the VRF that the interface is assigned to. If it does not exist in the pod, it is created. -<4> Optional. `table` is the routing table ID. By default, the `tableid` parameter is used. If it is not specified, the CNI assigns a free routing table ID to the VRF. -+ -[NOTE] -==== -VRF functions correctly only when the resource is of type `netdevice`. -==== - -. Create the `Network` resource: -+ -[source,terminal] ----- -$ oc create -f additional-network-attachment.yaml ----- - -. Confirm that the CNO created the `NetworkAttachmentDefinition` CR by running the following command. Replace `` with the namespace that you specified when configuring the network attachment, for example, `additional-network-1`. -+ -[source,terminal] ----- -$ oc get network-attachment-definitions -n ----- -+ -.Example output -[source,terminal] ----- -NAME AGE -additional-network-1 14m ----- -+ -[NOTE] -==== -There might be a delay before the CNO creates the CR. -==== - -.Verifying that the additional VRF network attachment is successful - -To verify that the VRF CNI is correctly configured and the additional network attachment is attached, do the following: - -. Create a network that uses the VRF CNI. -. Assign the network to a pod. -. Verify that the pod network attachment is connected to the VRF additional network. Remote shell into the pod and run the following command: -+ -[source,terminal] ----- -$ ip vrf show ----- -+ -.Example output -+ -[source,terminal] ----- -Name Table ------------------------ -red 10 ----- -. Confirm the VRF interface is master of the secondary interface: -+ -[source,terminal] ----- -$ ip link ----- -+ -.Example output -+ -[source,terminal] ----- -5: net1: mtu 1500 qdisc noqueue master red state UP mode ----- diff --git a/modules/cnf-assigning-a-sriov-network-to-a-vrf.adoc b/modules/cnf-assigning-a-sriov-network-to-a-vrf.adoc deleted file mode 100644 index 209f39f079bf..000000000000 --- a/modules/cnf-assigning-a-sriov-network-to-a-vrf.adoc +++ /dev/null @@ -1,133 +0,0 @@ -// Module included in the following assemblies: -// -//networking/hardware_networks/configuring-sriov-device.adoc - -:_content-type: PROCEDURE -[id="cnf-assigning-a-sriov-network-to-a-vrf_{context}"] -= Assigning an SR-IOV network to a VRF - -As a cluster administrator, you can assign an SR-IOV network interface to your VRF domain by using the CNI VRF plugin. - -To do this, add the VRF configuration to the optional `metaPlugins` parameter of the `SriovNetwork` resource. - -[NOTE] -==== -Applications that use VRFs need to bind to a specific device. The common usage is to use the `SO_BINDTODEVICE` option for a socket. `SO_BINDTODEVICE` binds the socket to a device that is specified in the passed interface name, for example, `eth1`. To use `SO_BINDTODEVICE`, the application must have `CAP_NET_RAW` capabilities. - -Using a VRF through the `ip vrf exec` command is not supported in {product-title} pods. To use VRF, bind applications directly to the VRF interface. -==== - -[id="cnf-creating-an-additional-sriov-network-with-vrf-plug-in_{context}"] -== Creating an additional SR-IOV network attachment with the CNI VRF plugin - -The SR-IOV Network Operator manages additional network definitions. When you specify an additional SR-IOV network to create, the SR-IOV Network Operator creates the `NetworkAttachmentDefinition` custom resource (CR) automatically. - -[NOTE] -==== -Do not edit `NetworkAttachmentDefinition` custom resources that the SR-IOV Network Operator manages. Doing so might disrupt network traffic on your additional network. -==== - -To create an additional SR-IOV network attachment with the CNI VRF plugin, perform the following procedure. - -.Prerequisites - -* Install the {product-title} CLI (oc). -* Log in to the {product-title} cluster as a user with cluster-admin privileges. - -.Procedure - -. Create the `SriovNetwork` custom resource (CR) for the additional SR-IOV network attachment and insert the `metaPlugins` configuration, as in the following example CR. Save the YAML as the file `sriov-network-attachment.yaml`. -+ -[source,yaml] ----- -apiVersion: sriovnetwork.openshift.io/v1 -kind: SriovNetwork -metadata: - name: example-network - namespace: additional-sriov-network-1 -spec: - ipam: | - { - "type": "host-local", - "subnet": "10.56.217.0/24", - "rangeStart": "10.56.217.171", - "rangeEnd": "10.56.217.181", - "routes": [{ - "dst": "0.0.0.0/0" - }], - "gateway": "10.56.217.1" - } - vlan: 0 - resourceName: intelnics - metaPlugins : | - { - "type": "vrf", <1> - "vrfname": "example-vrf-name" <2> - } ----- -<1> `type` must be set to `vrf`. -<2> `vrfname` is the name of the VRF that the interface is assigned to. If it does not exist in the pod, it is created. - -. Create the `SriovNetwork` resource: -+ -[source,terminal] ----- -$ oc create -f sriov-network-attachment.yaml ----- - -.Verifying that the `NetworkAttachmentDefinition` CR is successfully created - -* Confirm that the SR-IOV Network Operator created the `NetworkAttachmentDefinition` CR by running the following command. -+ -[source,terminal] ----- -$ oc get network-attachment-definitions -n <1> ----- -<1> Replace `` with the namespace that you specified when configuring the network attachment, for example, `additional-sriov-network-1`. -+ -.Example output -[source,terminal] ----- -NAME AGE -additional-sriov-network-1 14m ----- -+ -[NOTE] -==== -There might be a delay before the SR-IOV Network Operator creates the CR. -==== - -.Verifying that the additional SR-IOV network attachment is successful - -To verify that the VRF CNI is correctly configured and the additional SR-IOV network attachment is attached, do the following: - -. Create an SR-IOV network that uses the VRF CNI. -. Assign the network to a pod. -. Verify that the pod network attachment is connected to the SR-IOV additional network. Remote shell into the pod and run the following command: -+ -[source,terminal] ----- -$ ip vrf show ----- -+ -.Example output -[source,terminal] ----- -Name Table ------------------------ -red 10 ----- -. Confirm the VRF interface is master of the secondary interface: -+ -[source,terminal] ----- -$ ip link ----- -+ -.Example output -[source,terminal] ----- -... -5: net1: mtu 1500 qdisc noqueue master red state UP mode -... ----- diff --git a/modules/cnf-associating-secondary-interfaces-metrics-to-network-attachments.adoc b/modules/cnf-associating-secondary-interfaces-metrics-to-network-attachments.adoc deleted file mode 100644 index a68a4b76b1d4..000000000000 --- a/modules/cnf-associating-secondary-interfaces-metrics-to-network-attachments.adoc +++ /dev/null @@ -1,73 +0,0 @@ -// CNF-43 Associate Secondary Interfaces Metrics to Network Attachments -// Module included in the following assemblies: -// -// *networking/associating-secondary-interfaces-metrics-to-network-attachments.adoc - -[id="cnf-associating-secondary-interfaces-metrics-to-network-attachments_{context}"] -= Extending secondary network metrics for monitoring - -Secondary devices, or interfaces, are used for different purposes. It is important to have a way to classify them to be able to aggregate the metrics for secondary devices with the same classification. - -Exposed metrics contain the interface but do not specify where the interface originates. This is workable when there are no additional interfaces. However, if secondary interfaces are added, it can be difficult to use the metrics since it is hard to identify interfaces using only interface names. - -When adding secondary interfaces, their names depend on the order in which they are added, and different secondary interfaces might belong to different networks and can be used for different purposes. - -With `pod_network_name_info` it is possible to extend the current metrics with additional information that identifies the interface type. In this way, it is possible to aggregate the metrics and to add specific alarms to specific interface types. - -The network type is generated using the name of the related `NetworkAttachmentDefinition`, that in turn is used to differentiate different classes of secondary networks. For example, different interfaces belonging to different networks or using different CNIs use different network attachment definition names. - -[id="cnf-associating-secondary-interfaces-metrics-to-network-attachments-network-metrics-daemon_{context}"] -== Network Metrics Daemon - -The Network Metrics Daemon is a daemon component that collects and publishes network related metrics. - -The kubelet is already publishing network related metrics you can observe. These metrics are: - -* `container_network_receive_bytes_total` -* `container_network_receive_errors_total` -* `container_network_receive_packets_total` -* `container_network_receive_packets_dropped_total` -* `container_network_transmit_bytes_total` -* `container_network_transmit_errors_total` -* `container_network_transmit_packets_total` -* `container_network_transmit_packets_dropped_total` - -The labels in these metrics contain, among others: - -* Pod name -* Pod namespace -* Interface name (such as `eth0`) - -These metrics work well until new interfaces are added to the pod, for example via https://github.com/intel/multus-cni[Multus], as it is not clear what the interface names refer to. - -The interface label refers to the interface name, but it is not clear what that interface is meant for. In case of many different interfaces, it would be impossible to understand what network the metrics you are monitoring refer to. - -This is addressed by introducing the new `pod_network_name_info` described in the following section. - -[id="cnf-associating-secondary-interfaces-metrics-with-network-name_{context}"] -== Metrics with network name - -This daemonset publishes a `pod_network_name_info` gauge metric, with a fixed value of `0`: - -[source,bash] ----- -pod_network_name_info{interface="net0",namespace="namespacename",network_name="nadnamespace/firstNAD",pod="podname"} 0 ----- - -The network name label is produced using the annotation added by Multus. It is the concatenation of the namespace the network attachment definition belongs to, plus the name of the network attachment definition. - -The new metric alone does not provide much value, but combined with the network related `container_network_*` metrics, it offers better support for monitoring secondary networks. - -Using a `promql` query like the following ones, it is possible to get a new metric containing the value and the network name retrieved from the `k8s.v1.cni.cncf.io/network-status` annotation: - -[source,bash] ----- -(container_network_receive_bytes_total) + on(namespace,pod,interface) group_left(network_name) ( pod_network_name_info ) -(container_network_receive_errors_total) + on(namespace,pod,interface) group_left(network_name) ( pod_network_name_info ) -(container_network_receive_packets_total) + on(namespace,pod,interface) group_left(network_name) ( pod_network_name_info ) -(container_network_receive_packets_dropped_total) + on(namespace,pod,interface) group_left(network_name) ( pod_network_name_info ) -(container_network_transmit_bytes_total) + on(namespace,pod,interface) group_left(network_name) ( pod_network_name_info ) -(container_network_transmit_errors_total) + on(namespace,pod,interface) group_left(network_name) ( pod_network_name_info ) -(container_network_transmit_packets_total) + on(namespace,pod,interface) group_left(network_name) ( pod_network_name_info ) -(container_network_transmit_packets_dropped_total) + on(namespace,pod,interface) group_left(network_name) ----- diff --git a/modules/cnf-checking-numa-aware-scheduler-logs.adoc b/modules/cnf-checking-numa-aware-scheduler-logs.adoc deleted file mode 100644 index 061922aad803..000000000000 --- a/modules/cnf-checking-numa-aware-scheduler-logs.adoc +++ /dev/null @@ -1,146 +0,0 @@ -// Module included in the following assemblies: -// -// *scalability_and_performance/cnf-numa-aware-scheduling.adoc - -:_module-type: PROCEDURE -[id="cnf-checking-numa-aware-scheduler-logs_{context}"] -= Checking the NUMA-aware scheduler logs - -Troubleshoot problems with the NUMA-aware scheduler by reviewing the logs. If required, you can increase the scheduler log level by modifying the `spec.logLevel` field of the `NUMAResourcesScheduler` resource. Acceptable values are `Normal`, `Debug`, and `Trace`, with `Trace` being the most verbose option. - -[NOTE] -==== -To change the log level of the secondary scheduler, delete the running scheduler resource and re-deploy it with the changed log level. The scheduler is unavailable for scheduling new workloads during this downtime. -==== - -.Prerequisites - -* Install the OpenShift CLI (`oc`). -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -. Delete the currently running `NUMAResourcesScheduler` resource: - -.. Get the active `NUMAResourcesScheduler` by running the following command: -+ -[source,terminal] ----- -$ oc get NUMAResourcesScheduler ----- -+ -.Example output -[source,terminal] ----- -NAME AGE -numaresourcesscheduler 90m ----- - -.. Delete the secondary scheduler resource by running the following command: -+ -[source,terminal] ----- -$ oc delete NUMAResourcesScheduler numaresourcesscheduler ----- -+ -.Example output -[source,terminal] ----- -numaresourcesscheduler.nodetopology.openshift.io "numaresourcesscheduler" deleted ----- - -. Save the following YAML in the file `nro-scheduler-debug.yaml`. This example changes the log level to `Debug`: -+ -[source,yaml,subs="attributes+"] ----- -apiVersion: nodetopology.openshift.io/v1 -kind: NUMAResourcesScheduler -metadata: - name: numaresourcesscheduler -spec: - imageSpec: "registry.redhat.io/openshift4/noderesourcetopology-scheduler-container-rhel8:v{product-version}" - logLevel: Debug ----- - -. Create the updated `Debug` logging `NUMAResourcesScheduler` resource by running the following command: -+ -[source,terminal] ----- -$ oc create -f nro-scheduler-debug.yaml ----- -+ -.Example output -[source,terminal] ----- -numaresourcesscheduler.nodetopology.openshift.io/numaresourcesscheduler created ----- - -.Verification steps - -. Check that the NUMA-aware scheduler was successfully deployed: - -.. Run the following command to check that the CRD is created succesfully: -+ -[source,terminal] ----- -$ oc get crd | grep numaresourcesschedulers ----- -+ -.Example output -[source,terminal] ----- -NAME CREATED AT -numaresourcesschedulers.nodetopology.openshift.io 2022-02-25T11:57:03Z ----- - -.. Check that the new custom scheduler is available by running the following command: -+ -[source,terminal] ----- -$ oc get numaresourcesschedulers.nodetopology.openshift.io ----- -+ -.Example output -[source,terminal] ----- -NAME AGE -numaresourcesscheduler 3h26m ----- - -. Check that the logs for the scheduler shows the increased log level: - -.. Get the list of pods running in the `openshift-numaresources` namespace by running the following command: -+ -[source,terminal] ----- -$ oc get pods -n openshift-numaresources ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -numaresources-controller-manager-d87d79587-76mrm 1/1 Running 0 46h -numaresourcesoperator-worker-5wm2k 2/2 Running 0 45h -numaresourcesoperator-worker-pb75c 2/2 Running 0 45h -secondary-scheduler-7976c4d466-qm4sc 1/1 Running 0 21m ----- - -.. Get the logs for the secondary scheduler pod by running the following command: -+ -[source,terminal] ----- -$ oc logs secondary-scheduler-7976c4d466-qm4sc -n openshift-numaresources ----- -+ -.Example output -[source,terminal] ----- -... -I0223 11:04:55.614788 1 reflector.go:535] k8s.io/client-go/informers/factory.go:134: Watch close - *v1.Namespace total 11 items received -I0223 11:04:56.609114 1 reflector.go:535] k8s.io/client-go/informers/factory.go:134: Watch close - *v1.ReplicationController total 10 items received -I0223 11:05:22.626818 1 reflector.go:535] k8s.io/client-go/informers/factory.go:134: Watch close - *v1.StorageClass total 7 items received -I0223 11:05:31.610356 1 reflector.go:535] k8s.io/client-go/informers/factory.go:134: Watch close - *v1.PodDisruptionBudget total 7 items received -I0223 11:05:31.713032 1 eventhandlers.go:186] "Add event for scheduled pod" pod="openshift-marketplace/certified-operators-thtvq" -I0223 11:05:53.461016 1 eventhandlers.go:244] "Delete event for scheduled pod" pod="openshift-marketplace/certified-operators-thtvq" ----- diff --git a/modules/cnf-collecting-low-latency-tuning-debugging-data-for-red-hat-support.adoc b/modules/cnf-collecting-low-latency-tuning-debugging-data-for-red-hat-support.adoc deleted file mode 100644 index 884d8455a647..000000000000 --- a/modules/cnf-collecting-low-latency-tuning-debugging-data-for-red-hat-support.adoc +++ /dev/null @@ -1,97 +0,0 @@ -// CNF-643 Support and debugging tools for CNF -// Module included in the following assemblies: -// -// *scalability_and_performance/cnf-low-latency-tuning.adoc - -:_content-type: PROCEDURE -[id="cnf-collecting-low-latency-tuning-debugging-data-for-red-hat-support_{context}"] -= Collecting low latency tuning debugging data for Red Hat Support - -When opening a support case, it is helpful to provide debugging information about your cluster to Red Hat Support. - -The `must-gather` tool enables you to collect diagnostic information about your {product-title} cluster, including node tuning, NUMA topology, and other information needed to debug issues with low latency setup. - -For prompt support, supply diagnostic information for both {product-title} and low latency tuning. - -[id="cnf-about-must-gather_{context}"] -== About the must-gather tool - -The `oc adm must-gather` CLI command collects the information from your cluster that is most likely needed for debugging issues, such as: - -* Resource definitions -* Audit logs -* Service logs - -You can specify one or more images when you run the command by including the `--image` argument. When you specify an image, the tool collects data related to that feature or product. When you run `oc adm must-gather`, a new pod is created on the cluster. The data is collected on that pod and saved in a new directory that starts with `must-gather.local`. This directory is created in your current working directory. - -[id="cnf-about-collecting-low-latency-data_{context}"] -== About collecting low latency tuning data - -Use the `oc adm must-gather` CLI command to collect information about your cluster, including features and objects associated with low latency tuning, including: - -* The Node Tuning Operator namespaces and child objects. -* `MachineConfigPool` and associated `MachineConfig` objects. -* The Node Tuning Operator and associated Tuned objects. -* Linux Kernel command line options. -* CPU and NUMA topology -* Basic PCI device information and NUMA locality. - -To collect debugging information with `must-gather`, you must specify the Performance Addon Operator `must-gather` image: - -[source,terminal,subs="attributes+"] ----- ---image=registry.redhat.io/openshift4/performance-addon-operator-must-gather-rhel8:v{product-version}. ----- - -[NOTE] -==== -In earlier versions of {product-title}, the Performance Addon Operator provided automatic, low latency performance tuning for applications. In {product-title} 4.11 and later, this functionality is part of the Node Tuning Operator. However, you must still use the `performance-addon-operator-must-gather` image when running the `must-gather` command. -==== - -[id="cnf-about-gathering-data_{context}"] -== Gathering data about specific features - -You can gather debugging information about specific features by using the `oc adm must-gather` CLI command with the `--image` or `--image-stream` argument. The `must-gather` tool supports multiple images, so you can gather data about more than one feature by running a single command. - -[NOTE] -==== -To collect the default `must-gather` data in addition to specific feature data, add the `--image-stream=openshift/must-gather` argument. -==== - -[NOTE] -==== -In earlier versions of {product-title}, the Performance Addon Operator provided automatic, low latency performance tuning for applications. In {product-title} 4.11, these functions are part of the Node Tuning Operator. However, you must still use the `performance-addon-operator-must-gather` image when running the `must-gather` command. -==== - -.Prerequisites - -* Access to the cluster as a user with the `cluster-admin` role. -* The {product-title} CLI (oc) installed. - -.Procedure - -. Navigate to the directory where you want to store the `must-gather` data. - -. Run the `oc adm must-gather` command with one or more `--image` or `--image-stream` arguments. For example, the following command gathers both the default cluster data and information specific to the Node Tuning Operator: -+ -[source,terminal,subs="attributes+"] ----- -$ oc adm must-gather \ - --image-stream=openshift/must-gather \ <1> - - --image=registry.redhat.io/openshift4/performance-addon-operator-must-gather-rhel8:v{product-version} <2> ----- -+ -<1> The default {product-title} `must-gather` image. -<2> The `must-gather` image for low latency tuning diagnostics. - -. Create a compressed file from the `must-gather` directory that was created in your working directory. For example, on a computer that uses a Linux operating system, run the following command: -+ -[source,terminal] ----- - $ tar cvaf must-gather.tar.gz must-gather.local.5421342344627712289/ <1> ----- -+ -<1> Replace `must-gather-local.5421342344627712289/` with the actual directory name. - -. Attach the compressed file to your support case on the link:https://access.redhat.com/[Red Hat Customer Portal]. diff --git a/modules/cnf-configure_for_irq_dynamic_load_balancing.adoc b/modules/cnf-configure_for_irq_dynamic_load_balancing.adoc deleted file mode 100644 index 46538da7f3dd..000000000000 --- a/modules/cnf-configure_for_irq_dynamic_load_balancing.adoc +++ /dev/null @@ -1,179 +0,0 @@ -// Module included in the following assemblies: -// -// scalability_and_performance/cnf-low-latency-tuning.adoc - -:_content-type: PROCEDURE -[id="configuring_for_irq_dynamic_load_balancing_{context}"] -= Configuring a node for IRQ dynamic load balancing - -Configure a cluster node for IRQ dynamic load balancing to control which cores can receive device interrupt requests (IRQ). - -.Prerequisites - -* For core isolation, all server hardware components must support IRQ affinity. To check if the hardware components of your server support IRQ affinity, view the server's hardware specifications or contact your hardware provider. - -.Procedure - -. Log in to the {product-title} cluster as a user with cluster-admin privileges. -. Set the performance profile `apiVersion` to use `performance.openshift.io/v2`. -. Remove the `globallyDisableIrqLoadBalancing` field or set it to `false`. -. Set the appropriate isolated and reserved CPUs. The following snippet illustrates a profile that reserves 2 CPUs. IRQ load-balancing is enabled for pods running on the `isolated` CPU set: -+ -[source,yaml] ----- -apiVersion: performance.openshift.io/v2 -kind: PerformanceProfile -metadata: - name: dynamic-irq-profile -spec: - cpu: - isolated: 2-5 - reserved: 0-1 -... ----- -+ -[NOTE] -==== -When you configure reserved and isolated CPUs, the infra containers in pods use the reserved CPUs and the application containers use the isolated CPUs. -==== - -. Create the pod that uses exclusive CPUs, and set `irq-load-balancing.crio.io` and `cpu-quota.crio.io` annotations to `disable`. For example: -+ -[source,yaml,subs="attributes+"] ----- -apiVersion: v1 -kind: Pod -metadata: - name: dynamic-irq-pod - annotations: - irq-load-balancing.crio.io: "disable" - cpu-quota.crio.io: "disable" -spec: - containers: - - name: dynamic-irq-pod - image: "registry.redhat.io/openshift4/cnf-tests-rhel8:v{product-version}" - command: ["sleep", "10h"] - resources: - requests: - cpu: 2 - memory: "200M" - limits: - cpu: 2 - memory: "200M" - nodeSelector: - node-role.kubernetes.io/worker-cnf: "" - runtimeClassName: performance-dynamic-irq-profile -... ----- - -. Enter the pod `runtimeClassName` in the form performance-, where is the `name` from the `PerformanceProfile` YAML, in this example, `performance-dynamic-irq-profile`. -. Set the node selector to target a cnf-worker. -. Ensure the pod is running correctly. Status should be `running`, and the correct cnf-worker node should be set: -+ -[source,terminal] ----- -$ oc get pod -o wide ----- -+ -.Expected output -+ -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES -dynamic-irq-pod 1/1 Running 0 5h33m ----- -. Get the CPUs that the pod configured for IRQ dynamic load balancing runs on: -+ -[source,terminal] ----- -$ oc exec -it dynamic-irq-pod -- /bin/bash -c "grep Cpus_allowed_list /proc/self/status | awk '{print $2}'" ----- -+ -.Expected output -+ -[source,terminal] ----- -Cpus_allowed_list: 2-3 ----- -. Ensure the node configuration is applied correctly. Log in to the node to verify the configuration. -+ -[source,terminal] ----- -$ oc debug node/ ----- -+ -.Expected output -+ -[source,terminal] ----- -Starting pod/-debug ... -To use host binaries, run `chroot /host` - -Pod IP: -If you don't see a command prompt, try pressing enter. - -sh-4.4# ----- - -. Verify that you can use the node file system: -+ -[source,terminal] ----- -sh-4.4# chroot /host ----- -+ -.Expected output -+ -[source,terminal] ----- -sh-4.4# ----- - -. Ensure the default system CPU affinity mask does not include the `dynamic-irq-pod` CPUs, for example, CPUs 2 and 3. -+ -[source,terminal] ----- -$ cat /proc/irq/default_smp_affinity ----- -+ -.Example output -+ -[source,terminal] ----- -33 ----- -. Ensure the system IRQs are not configured to run on the `dynamic-irq-pod` CPUs: -+ -[source,terminal] ----- -find /proc/irq/ -name smp_affinity_list -exec sh -c 'i="$1"; mask=$(cat $i); file=$(echo $i); echo $file: $mask' _ {} \; ----- -+ -.Example output -+ -[source,terminal] ----- -/proc/irq/0/smp_affinity_list: 0-5 -/proc/irq/1/smp_affinity_list: 5 -/proc/irq/2/smp_affinity_list: 0-5 -/proc/irq/3/smp_affinity_list: 0-5 -/proc/irq/4/smp_affinity_list: 0 -/proc/irq/5/smp_affinity_list: 0-5 -/proc/irq/6/smp_affinity_list: 0-5 -/proc/irq/7/smp_affinity_list: 0-5 -/proc/irq/8/smp_affinity_list: 4 -/proc/irq/9/smp_affinity_list: 4 -/proc/irq/10/smp_affinity_list: 0-5 -/proc/irq/11/smp_affinity_list: 0 -/proc/irq/12/smp_affinity_list: 1 -/proc/irq/13/smp_affinity_list: 0-5 -/proc/irq/14/smp_affinity_list: 1 -/proc/irq/15/smp_affinity_list: 0 -/proc/irq/24/smp_affinity_list: 1 -/proc/irq/25/smp_affinity_list: 1 -/proc/irq/26/smp_affinity_list: 1 -/proc/irq/27/smp_affinity_list: 5 -/proc/irq/28/smp_affinity_list: 1 -/proc/irq/29/smp_affinity_list: 0 -/proc/irq/30/smp_affinity_list: 0-5 ----- \ No newline at end of file diff --git a/modules/cnf-configuring-fifo-priority-scheduling-for-ptp.adoc b/modules/cnf-configuring-fifo-priority-scheduling-for-ptp.adoc deleted file mode 100644 index d401f9b4fd4d..000000000000 --- a/modules/cnf-configuring-fifo-priority-scheduling-for-ptp.adoc +++ /dev/null @@ -1,80 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/using-ptp.adoc - -:_content-type: PROCEDURE -[id="cnf-configuring-fifo-priority-scheduling-for-ptp_{context}"] -= Configuring FIFO priority scheduling for PTP hardware - -In telco or other deployment configurations that require low latency performance, PTP daemon threads run in a constrained CPU footprint alongside the rest of the infrastructure components. By default, PTP threads run with the `SCHED_OTHER` policy. Under high load, these threads might not get the scheduling latency they require for error-free operation. - -To mitigate against potential scheduling latency errors, you can configure the PTP Operator `linuxptp` services to allow threads to run with a `SCHED_FIFO` policy. If `SCHED_FIFO` is set for a `PtpConfig` CR, then `ptp4l` and `phc2sys` will run in the parent container under `chrt` with a priority set by the `ptpSchedulingPriority` field of the `PtpConfig` CR. - -[NOTE] -==== -Setting `ptpSchedulingPolicy` is optional, and is only required if you are experiencing latency errors. -==== - -.Procedure - -. Edit the `PtpConfig` CR profile: -+ -[source,terminal] ----- -$ oc edit PtpConfig -n openshift-ptp ----- - -. Change the `ptpSchedulingPolicy` and `ptpSchedulingPriority` fields: -+ -[source,yaml] ----- -apiVersion: ptp.openshift.io/v1 -kind: PtpConfig -metadata: - name: - namespace: openshift-ptp -... -spec: - profile: - - name: "profile1" -... - ptpSchedulingPolicy: SCHED_FIFO <1> - ptpSchedulingPriority: 10 <2> ----- -<1> Scheduling policy for `ptp4l` and `phc2sys` processes. Use `SCHED_FIFO` on systems that support FIFO scheduling. -<2> Required. Sets the integer value 1-65 used to configure FIFO priority for `ptp4l` and `phc2sys` processes. - -. Save and exit to apply the changes to the `PtpConfig` CR. - -.Verification - -. Get the name of the `linuxptp-daemon` pod and corresponding node where the `PtpConfig` CR has been applied: -+ -[source,terminal] ----- -$ oc get pods -n openshift-ptp -o wide ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE IP NODE -linuxptp-daemon-gmv2n 3/3 Running 0 1d17h 10.1.196.24 compute-0.example.com -linuxptp-daemon-lgm55 3/3 Running 0 1d17h 10.1.196.25 compute-1.example.com -ptp-operator-3r4dcvf7f4-zndk7 1/1 Running 0 1d7h 10.129.0.61 control-plane-1.example.com ----- - -. Check that the `ptp4l` process is running with the updated `chrt` FIFO priority: -+ -[source,terminal] ----- -$ oc -n openshift-ptp logs linuxptp-daemon-lgm55 -c linuxptp-daemon-container|grep chrt ----- -+ -.Example output -[source,terminal] ----- -I1216 19:24:57.091872 1600715 daemon.go:285] /bin/chrt -f 65 /usr/sbin/ptp4l -f /var/run/ptp4l.0.config -2 --summary_interval -4 -m ----- - - diff --git a/modules/cnf-configuring-huge-pages.adoc b/modules/cnf-configuring-huge-pages.adoc deleted file mode 100644 index 6b631d888f23..000000000000 --- a/modules/cnf-configuring-huge-pages.adoc +++ /dev/null @@ -1,74 +0,0 @@ -// Module included in the following assemblies: -//CNF-78 (4.4) -// * scalability_and_performance/cnf-low-latency-tuning.adoc - -[id="cnf-configuring-huge-pages_{context}"] -= Configuring huge pages - -Nodes must pre-allocate huge pages used in an {product-title} cluster. Use the Node Tuning Operator to allocate huge pages on a specific node. - -{product-title} provides a method for creating and allocating huge pages. Node Tuning Operator provides an easier method for doing this using the performance profile. - -For example, in the `hugepages` `pages` section of the performance profile, you can specify multiple blocks of `size`, `count`, and, optionally, `node`: - -[source,yaml] ----- -hugepages: - defaultHugepagesSize: "1G" - pages: - - size: "1G" - count: 4 - node: 0 <1> ----- - -<1> `node` is the NUMA node in which the huge pages are allocated. If you omit `node`, the pages are evenly spread across all NUMA nodes. - -[NOTE] -==== -Wait for the relevant machine config pool status that indicates the update is finished. -==== - -These are the only configuration steps you need to do to allocate huge pages. - - -.Verification - -* To verify the configuration, see the `/proc/meminfo` file on the node: -+ -[source,terminal] ----- -$ oc debug node/ip-10-0-141-105.ec2.internal ----- -+ -[source,terminal] ----- -# grep -i huge /proc/meminfo ----- -+ -.Example output -[source,terminal] ----- -AnonHugePages: ###### ## -ShmemHugePages: 0 kB -HugePages_Total: 2 -HugePages_Free: 2 -HugePages_Rsvd: 0 -HugePages_Surp: 0 -Hugepagesize: #### ## -Hugetlb: #### ## ----- - -* Use `oc describe` to report the new size: -+ -[source,terminal] ----- -$ oc describe node worker-0.ocp4poc.example.com | grep -i huge ----- -+ -.Example output -[source,terminal] ----- - hugepages-1g=true - hugepages-###: ### - hugepages-###: ### ----- diff --git a/modules/cnf-configuring-log-filtering-for-linuxptp.adoc b/modules/cnf-configuring-log-filtering-for-linuxptp.adoc deleted file mode 100644 index 8d7de05a016b..000000000000 --- a/modules/cnf-configuring-log-filtering-for-linuxptp.adoc +++ /dev/null @@ -1,80 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/using-ptp.adoc - -:_content-type: PROCEDURE -[id="cnf-configuring-log-filtering-for-linuxptp_{context}"] -= Configuring log filtering for linuxptp services - -The `linuxptp` daemon generates logs that you can use for debugging purposes. In telco or other deployment configurations that feature a limited storage capacity, these logs can add to the storage demand. - -To reduce the number log messages, you can configure the `PtpConfig` custom resource (CR) to exclude log messages that report the `master offset` value. The `master offset` log message reports the difference between the current node's clock and the master clock in nanoseconds. - -.Prerequisites -* Install the OpenShift CLI (`oc`). - -* Log in as a user with `cluster-admin` privileges. - -* Install the PTP Operator. - -.Procedure - -. Edit the `PtpConfig` CR: -+ -[source,terminal] ----- -$ oc edit PtpConfig -n openshift-ptp ----- - -. In `spec.profile`, add the `ptpSettings.logReduce` specification and set the value to `true`: -+ -[source,yaml] ----- -apiVersion: ptp.openshift.io/v1 -kind: PtpConfig -metadata: - name: - namespace: openshift-ptp -... -spec: - profile: - - name: "profile1" -... - ptpSettings: - logReduce: "true" ----- -+ -[NOTE] -==== -For debugging purposes, you can revert this specification to `False` to include the master offset messages. -==== - -. Save and exit to apply the changes to the `PtpConfig` CR. - -.Verification - -. Get the name of the `linuxptp-daemon` pod and corresponding node where the `PtpConfig` CR has been applied: -+ -[source,terminal] ----- -$ oc get pods -n openshift-ptp -o wide ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE IP NODE -linuxptp-daemon-gmv2n 3/3 Running 0 1d17h 10.1.196.24 compute-0.example.com -linuxptp-daemon-lgm55 3/3 Running 0 1d17h 10.1.196.25 compute-1.example.com -ptp-operator-3r4dcvf7f4-zndk7 1/1 Running 0 1d7h 10.129.0.61 control-plane-1.example.com ----- - -. Verify that master offset messages are excluded from the logs by running the following command: -+ -[source,terminal] ----- -$ oc -n openshift-ptp logs -c linuxptp-daemon-container | grep "master offset" <1> ----- -<1> is the name of the `linuxptp-daemon` pod, for example `linuxptp-daemon-gmv2n`. -+ -When you configure the `logReduce` specification, this command does not report any instances of `master offset` in the logs of the `linuxptp` daemon. \ No newline at end of file diff --git a/modules/cnf-configuring-node-groups-for-the-numaresourcesoperator.adoc b/modules/cnf-configuring-node-groups-for-the-numaresourcesoperator.adoc deleted file mode 100644 index b1c89e14c665..000000000000 --- a/modules/cnf-configuring-node-groups-for-the-numaresourcesoperator.adoc +++ /dev/null @@ -1,73 +0,0 @@ -// Module included in the following assemblies: -// -// *scalability_and_performance/cnf-numa-aware-scheduling.adoc - -:_content-type: PROCEDURE - -[id="cnf-configuring-node-groups-for-the-numaresourcesoperator_{context}"] -= Optional: Configuring polling operations for NUMA resources updates - -The daemons controlled by the NUMA Resources Operator in their `nodeGroup` poll resources to retrieve updates about available NUMA resources. You can fine-tune polling operations for these daemons by configuring the `spec.nodeGroups` specification in the `NUMAResourcesOperator` custom resource (CR). This provides advanced control of polling operations. Configure these specifications to improve scheduling behaviour and troubleshoot suboptimal scheduling decisions. - -The configuration options are the following: - -* `infoRefreshMode`: Determines the trigger condition for polling the kubelet. The NUMA Resources Operator reports the resulting information to the API server. -* `infoRefreshPeriod`: Determines the duration between polling updates. -* `podsFingerprinting`: Determines if point-in-time information for the current set of pods running on a node is exposed in polling updates. -+ -[NOTE] -==== -`podsFingerprinting` is enabled by default. `podsFingerprinting` is a requirement for the `cacheResyncPeriod` specification in the `NUMAResourcesScheduler` CR. The `cacheResyncPeriod` specification helps to report more exact resource availability by monitoring pending resources on nodes. -==== - -.Prerequisites - -* Install the OpenShift CLI (`oc`). -* Log in as a user with `cluster-admin` privileges. -* Install the NUMA Resources Operator. - -.Procedure - -* Configure the `spec.nodeGroups` specification in your `NUMAResourcesOperator` CR: -+ -[source,yaml] ----- -apiVersion: nodetopology.openshift.io/v1 -kind: NUMAResourcesOperator -metadata: - name: numaresourcesoperator -spec: - nodeGroups: - - config: - infoRefreshMode: Periodic <1> - infoRefreshPeriod: 10s <2> - podsFingerprinting: Enabled <3> - name: worker ----- -<1> Valid values are `Periodic`, `Events`, `PeriodicAndEvents`. Use `Periodic` to poll the kubelet at intervals that you define in `infoRefreshPeriod`. Use `Events` to poll the kubelet at every pod lifecycle event. Use `PeriodicAndEvents` to enable both methods. -<2> Define the polling interval for `Periodic` or `PeriodicAndEvents` refresh modes. The field is ignored if the refresh mode is `Events`. -<3> Valid values are `Enabled` or `Disabled`. Setting to `Enabled` is a requirement for the `cacheResyncPeriod` specification in the `NUMAResourcesScheduler`. - -.Verification - -. After you deploy the NUMA Resources Operator, verify that the node group configurations were applied by running the following command: -+ -[source,terminal] ----- -$ oc get numaresop numaresourcesoperator -o json | jq '.status' ----- -+ -.Example output -[source,terminal] ----- - ... - - "config": { - "infoRefreshMode": "Periodic", - "infoRefreshPeriod": "10s", - "podsFingerprinting": "Enabled" - }, - "name": "worker" - - ... ----- diff --git a/modules/cnf-configuring-the-ptp-fast-event-publisher.adoc b/modules/cnf-configuring-the-ptp-fast-event-publisher.adoc deleted file mode 100644 index 0eab42101b5f..000000000000 --- a/modules/cnf-configuring-the-ptp-fast-event-publisher.adoc +++ /dev/null @@ -1,88 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/using-ptp.adoc - -:_content-type: PROCEDURE -[id="cnf-configuring-the-ptp-fast-event-publisher_{context}"] -= Configuring the PTP fast event notifications publisher - -To start using PTP fast event notifications for a network interface in your cluster, you must enable the fast event publisher in the PTP Operator `PtpOperatorConfig` custom resource (CR) and configure `ptpClockThreshold` values in a `PtpConfig` CR that you create. - -.Prerequisites - -* You have installed the {product-title} CLI (`oc`). - -* You have logged in as a user with `cluster-admin` privileges. - -* You have installed the PTP Operator. - -* When you use HTTP events transport, configure dynamic volume provisioning in the cluster or manually create `StorageClass`, `LocalVolume`, and `PersistentVolume` resources to persist the events subscription. -+ -[NOTE] -==== -When you enable dynamic volume provisioning in the cluster, a `PersistentVolume` resource is automatically created for the `PersistentVolumeClaim` that the PTP Operator deploys. - -For more information about manually creating persistent storage in the cluster, see "Persistent storage using local volumes". -==== - -.Procedure - -. Modify the default PTP Operator config to enable PTP fast events. - -.. Save the following YAML in the `ptp-operatorconfig.yaml` file: -+ -[source,yaml] ----- -apiVersion: ptp.openshift.io/v1 -kind: PtpOperatorConfig -metadata: - name: default - namespace: openshift-ptp -spec: - daemonNodeSelector: - node-role.kubernetes.io/worker: "" - ptpEventConfig: - enableEventPublisher: true <1> - storageType: "example-storage-class" <2> ----- -<1> Set `enableEventPublisher` to `true` to enable PTP fast event notifications. -<2> Use the value that you set for `storageType` to populate the `StorageClassName` field for the `PersistentVolumeClaim` (`PVC`) resource that the PTP Operator automatically deploys. -The `PVC` resource is used to persist consumer event subscriptions. -+ -[NOTE] -==== -In {product-title} 4.13 or later, you do not need to set the `spec.ptpEventConfig.transportHost` field in the `PtpOperatorConfig` resource when you use HTTP transport for PTP events. -Set `transportHost` only when you use AMQP transport for PTP events. - -The value that you set for `.spec.storageType` in the `PtpOperatorConfig` CR must match the `storageClassName` that is set in the `PersistentVolume` CR. -If `storageType` is not set and the `transportHost` uses HTTP, the PTP daemons are not deployed. -==== - -.. Update the `PtpOperatorConfig` CR: -+ -[source,terminal] ----- -$ oc apply -f ptp-operatorconfig.yaml ----- - -. Create a `PtpConfig` custom resource (CR) for the PTP enabled interface, and set the required values for `ptpClockThreshold` and `ptp4lOpts`. -The following YAML illustrates the required values that you must set in the `PtpConfig` CR: -+ -[source,yaml] ----- -spec: - profile: - - name: "profile1" - interface: "enp5s0f0" - ptp4lOpts: "-2 -s --summary_interval -4" <1> - phc2sysOpts: "-a -r -m -n 24 -N 8 -R 16" <2> - ptp4lConf: "" <3> - ptpClockThreshold: <4> - holdOverTimeout: 5 - maxOffsetThreshold: 100 - minOffsetThreshold: -100 ----- -<1> Append `--summary_interval -4` to use PTP fast events. -<2> Required `phc2sysOpts` values. `-m` prints messages to `stdout`. The `linuxptp-daemon` `DaemonSet` parses the logs and generates Prometheus metrics. -<3> Specify a string that contains the configuration to replace the default `/etc/ptp4l.conf` file. To use the default configuration, leave the field empty. -<4> Optional. If the `ptpClockThreshold` stanza is not present, default values are used for the `ptpClockThreshold` fields. The stanza shows default `ptpClockThreshold` values. The `ptpClockThreshold` values configure how long after the PTP master clock is disconnected before PTP events are triggered. `holdOverTimeout` is the time value in seconds before the PTP clock event state changes to `FREERUN` when the PTP master clock is disconnected. The `maxOffsetThreshold` and `minOffsetThreshold` settings configure offset values in nanoseconds that compare against the values for `CLOCK_REALTIME` (`phc2sys`) or master offset (`ptp4l`). When the `ptp4l` or `phc2sys` offset value is outside this range, the PTP clock state is set to `FREERUN`. When the offset value is within this range, the PTP clock state is set to `LOCKED`. diff --git a/modules/cnf-configuring-workload-hints.adoc b/modules/cnf-configuring-workload-hints.adoc deleted file mode 100644 index 52a39eec5873..000000000000 --- a/modules/cnf-configuring-workload-hints.adoc +++ /dev/null @@ -1,28 +0,0 @@ -// Module included in the following assemblies: -// -// scalability_and_performance/cnf-low-latency-tuning.adoc - -:_content-type: CONCEPT -[id="configuring-workload-hints_{context}"] -= Configuring workload hints manually - -.Procedure - -. Create a `PerformanceProfile` appropriate for the environment's hardware and topology as described in the table in "Understanding workload hints". Adjust the profile to match the expected workload. In this example, we tune for the lowest possible latency. - -. Add the `highPowerConsumption` and `realTime` workload hints. Both are set to `true` here. -+ -[source,yaml] ----- - apiVersion: performance.openshift.io/v2 - kind: PerformanceProfile - metadata: - name: workload-hints - spec: - ... - workloadHints: - highPowerConsumption: true <1> - realTime: true <2> ----- -<1> If `highPowerConsumption` is `true`, the node is tuned for very low latency at the cost of increased power consumption. -<2> Disables some debugging and monitoring features that can affect system latency. diff --git a/modules/cnf-cpu-infra-container.adoc b/modules/cnf-cpu-infra-container.adoc deleted file mode 100644 index 819f524ae863..000000000000 --- a/modules/cnf-cpu-infra-container.adoc +++ /dev/null @@ -1,85 +0,0 @@ -// Module included in the following assemblies: -// -// scalability_and_performance/cnf-low-latency-tuning.adoc - -:_content-type: PROCEDURE -[id="cnf-cpu-infra-container_{context}"] -= Restricting CPUs for infra and application containers - -Generic housekeeping and workload tasks use CPUs in a way that may impact latency-sensitive processes. By default, the container runtime uses all online CPUs to run all containers together, which can result in context switches and spikes in latency. Partitioning the CPUs prevents noisy processes from interfering with latency-sensitive processes by separating them from each other. The following table describes how processes run on a CPU after you have tuned the node using the Node Tuning Operator: - -.Process' CPU assignments -[%header,cols=2*] -|=== -|Process type -|Details - -|`Burstable` and `BestEffort` pods -|Runs on any CPU except where low latency workload is running - -|Infrastructure pods -|Runs on any CPU except where low latency workload is running - -|Interrupts -|Redirects to reserved CPUs (optional in {product-title} 4.7 and later) - -|Kernel processes -|Pins to reserved CPUs - -|Latency-sensitive workload pods -|Pins to a specific set of exclusive CPUs from the isolated pool - -|OS processes/systemd services -|Pins to reserved CPUs -|=== - -The allocatable capacity of cores on a node for pods of all QoS process types, `Burstable`, `BestEffort`, or `Guaranteed`, is equal to the capacity of the isolated pool. The capacity of the reserved pool is removed from the node's total core capacity for use by the cluster and operating system housekeeping duties. - -.Example 1 -A node features a capacity of 100 cores. Using a performance profile, the cluster administrator allocates 50 cores to the isolated pool and 50 cores to the reserved pool. The cluster administrator assigns 25 cores to QoS `Guaranteed` pods and 25 cores for `BestEffort` or `Burstable` pods. This matches the capacity of the isolated pool. - -.Example 2 -A node features a capacity of 100 cores. Using a performance profile, the cluster administrator allocates 50 cores to the isolated pool and 50 cores to the reserved pool. The cluster administrator assigns 50 cores to QoS `Guaranteed` pods and one core for `BestEffort` or `Burstable` pods. This exceeds the capacity of the isolated pool by one core. Pod scheduling fails because of insufficient CPU capacity. - - -The exact partitioning pattern to use depends on many factors like hardware, workload characteristics and the expected system load. Some sample use cases are as follows: - -* If the latency-sensitive workload uses specific hardware, such as a network interface controller (NIC), ensure that the CPUs in the isolated pool are as close as possible to this hardware. At a minimum, you should place the workload in the same Non-Uniform Memory Access (NUMA) node. - -* The reserved pool is used for handling all interrupts. When depending on system networking, allocate a sufficiently-sized reserve pool to handle all the incoming packet interrupts. In {product-version} and later versions, workloads can optionally be labeled as sensitive. - -The decision regarding which specific CPUs should be used for reserved and isolated partitions requires detailed analysis and measurements. Factors like NUMA affinity of devices and memory play a role. The selection also depends on the workload architecture and the specific use case. - -[IMPORTANT] -==== -The reserved and isolated CPU pools must not overlap and together must span all available cores in the worker node. -==== - -To ensure that housekeeping tasks and workloads do not interfere with each other, specify two groups of CPUs in the `spec` section of the performance profile. - -* `isolated` - Specifies the CPUs for the application container workloads. These CPUs have the lowest latency. Processes in this group have no interruptions and can, for example, reach much higher DPDK zero packet loss bandwidth. - -* `reserved` - Specifies the CPUs for the cluster and operating system housekeeping duties. Threads in the `reserved` group are often busy. Do not run latency-sensitive applications in the `reserved` group. Latency-sensitive applications run in the `isolated` group. - -.Procedure - -. Create a performance profile appropriate for the environment's hardware and topology. - -. Add the `reserved` and `isolated` parameters with the CPUs you want reserved and isolated for the infra and application containers: -+ -[source,yaml] ----- -apiVersion: performance.openshift.io/v2 -kind: PerformanceProfile -metadata: - name: infra-cpus -spec: - cpu: - reserved: "0-4,9" <1> - isolated: "5-8" <2> - nodeSelector: <3> - node-role.kubernetes.io/worker: "" ----- -<1> Specify which CPUs are for infra containers to perform cluster and operating system housekeeping duties. -<2> Specify which CPUs are for application containers to run workloads. -<3> Optional: Specify a node selector to apply the performance profile to specific nodes. diff --git a/modules/cnf-creating-nrop-cr-with-manual-performance-settings.adoc b/modules/cnf-creating-nrop-cr-with-manual-performance-settings.adoc deleted file mode 100644 index 6067f8f474f4..000000000000 --- a/modules/cnf-creating-nrop-cr-with-manual-performance-settings.adoc +++ /dev/null @@ -1,93 +0,0 @@ -// Module included in the following assemblies: -// -// *scalability_and_performance/cnf-numa-aware-scheduling.adoc - -:_module-type: PROCEDURE -[id="cnf-creating-nrop-cr-with-manual-performance-settings_{context}"] -= Creating the NUMAResourcesOperator custom resource with manual performance settings - -When you have installed the NUMA Resources Operator, then create the `NUMAResourcesOperator` custom resource (CR) that instructs the NUMA Resources Operator to install all the cluster infrastructure needed to support the NUMA-aware scheduler, including daemon sets and APIs. - -.Prerequisites - -* Install the OpenShift CLI (`oc`). -* Log in as a user with `cluster-admin` privileges. -* Install the NUMA Resources Operator. - -.Procedure - -. Optional: Create the `MachineConfigPool` custom resource that enables custom kubelet configurations for worker nodes: -+ -[NOTE] -==== -By default, {product-title} creates a `MachineConfigPool` resource for worker nodes in the cluster. You can create a custom `MachineConfigPool` resource if required. -==== - -.. Save the following YAML in the `nro-machineconfig.yaml` file: -+ -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfigPool -metadata: - labels: - cnf-worker-tuning: enabled - machineconfiguration.openshift.io/mco-built-in: "" - pools.operator.machineconfiguration.openshift.io/worker: "" - name: worker -spec: - machineConfigSelector: - matchLabels: - machineconfiguration.openshift.io/role: worker - nodeSelector: - matchLabels: - node-role.kubernetes.io/worker: "" ----- - -.. Create the `MachineConfigPool` CR by running the following command: -+ -[source,terminal] ----- -$ oc create -f nro-machineconfig.yaml ----- - -. Create the `NUMAResourcesOperator` custom resource: - -.. Save the following YAML in the `nrop.yaml` file: -+ -[source,yaml] ----- -apiVersion: nodetopology.openshift.io/v1 -kind: NUMAResourcesOperator -metadata: - name: numaresourcesoperator -spec: - nodeGroups: - - machineConfigPoolSelector: - matchLabels: - pools.operator.machineconfiguration.openshift.io/worker: "" <1> ----- -<1> Should match the label applied to worker nodes in the related `MachineConfigPool` CR. - -.. Create the `NUMAResourcesOperator` CR by running the following command: -+ -[source,terminal] ----- -$ oc create -f nrop.yaml ----- - -.Verification - -* Verify that the NUMA Resources Operator deployed successfully by running the following command: -+ -[source,terminal] ----- -$ oc get numaresourcesoperators.nodetopology.openshift.io ----- -+ -.Example output -[source,terminal] ----- -NAME AGE -numaresourcesoperator 10m ----- diff --git a/modules/cnf-creating-nrop-cr.adoc b/modules/cnf-creating-nrop-cr.adoc deleted file mode 100644 index 140716e9175e..000000000000 --- a/modules/cnf-creating-nrop-cr.adoc +++ /dev/null @@ -1,57 +0,0 @@ -// Module included in the following assemblies: -// -// *scalability_and_performance/cnf-numa-aware-scheduling.adoc - -:_module-type: PROCEDURE -[id="cnf-creating-nrop-cr_{context}"] -= Creating the NUMAResourcesOperator custom resource - -When you have installed the NUMA Resources Operator, then create the `NUMAResourcesOperator` custom resource (CR) that instructs the NUMA Resources Operator to install all the cluster infrastructure needed to support the NUMA-aware scheduler, including daemon sets and APIs. - -.Prerequisites - -* Install the OpenShift CLI (`oc`). -* Log in as a user with `cluster-admin` privileges. -* Install the NUMA Resources Operator. - -.Procedure - -. Create the `NUMAResourcesOperator` custom resource: - -.. Save the following YAML in the `nrop.yaml` file: -+ -[source,yaml] ----- -apiVersion: nodetopology.openshift.io/v1 -kind: NUMAResourcesOperator -metadata: - name: numaresourcesoperator -spec: - nodeGroups: - - machineConfigPoolSelector: - matchLabels: - pools.operator.machineconfiguration.openshift.io/worker: "" ----- - -.. Create the `NUMAResourcesOperator` CR by running the following command: -+ -[source,terminal] ----- -$ oc create -f nrop.yaml ----- - -.Verification - -* Verify that the NUMA Resources Operator deployed successfully by running the following command: -+ -[source,terminal] ----- -$ oc get numaresourcesoperators.nodetopology.openshift.io ----- -+ -.Example output -[source,terminal] ----- -NAME AGE -numaresourcesoperator 10m ----- diff --git a/modules/cnf-creating-the-performance-profile-object.adoc b/modules/cnf-creating-the-performance-profile-object.adoc deleted file mode 100644 index be2f3dd0f761..000000000000 --- a/modules/cnf-creating-the-performance-profile-object.adoc +++ /dev/null @@ -1,61 +0,0 @@ -// Module included in the following assemblies: -// Epic CNF-78 -// * scalability_and_performance/cnf-low-latency-tuning.adoc - -[id="cnf-creating-the-performance-profile-object_{context}"] -= Creating the PerformanceProfile object - -Create the `PerformanceProfile` object using the object that is posted to the cluster. -After you have specified your settings, the `PerformanceProfile` object is compiled into multiple objects: - -* A `Machine.Config` file that manipulates the nodes. -* A `KubeletConfig` file that configures the Topology Manager, the CPU Manager, and the {product-title} nodes. -* The Tuned profile that configures the Node Tuning Operator. - -.Procedure - -. Prepare a cluster. - -. Create a Machine ConfigPool. - -. Install the Performance Profile Operator. - -. Create a performance profile that is appropriate for your hardware and topology. -In the performance profile, you can specify whether to update the kernel to kernel-rt, the CPUs that -will be reserved for housekeeping, and CPUs that will be used for running the workloads. -+ -This is a typical performance profile: -+ ----- -apiversion: performance.openshift.io/v2 -kind: PerformanceProfile -metadata: - name: -spec: - cpu: - isolated: “1-3” - reserved: “0” - hugepages: - defaultHugepagesSize: “1Gi” - pages: -size: “1Gi” - count: 4 - node: 0 -realTimeKernel: - enabled: true - numa: - topologyPolicy: “best-effort” ----- - -. Specify two groups of CPUs in the `spec` section: -+ -`isolated` - Has the lowest latency. Because processes in this group have no interruptions, there is zero packet loss. -+ -`reserved` - The housekeeping CPUs. Threads in the reserved group tend to be very busy, so latency-sensitive -applications should be run in the isolated group. -See link:https://kubernetes.io/docs/tasks/configure-pod-container/quality-service-pod/#create-a-pod-that-gets-assigned-a-qos-class-of-guaranteed[Create a pod that gets assigned a QoS class of `Guaranteed`]. - -For example, you can reserve cores (threads) from a single NUMA node and put your workloads on another NUMA node. -The reason for this is that the housekeeping CPUs may be touching caches in the CPU. -Keeping your workloads on a separate NUMA node prevents the nodes from interfering with each other. -Additionally, each NUMA node has its own memory bus that is not shared. diff --git a/modules/cnf-debugging-low-latency-cnf-tuning-status.adoc b/modules/cnf-debugging-low-latency-cnf-tuning-status.adoc deleted file mode 100644 index c4b7582cac5f..000000000000 --- a/modules/cnf-debugging-low-latency-cnf-tuning-status.adoc +++ /dev/null @@ -1,121 +0,0 @@ -// Module included in the following assemblies: -// Epic CNF-303 (4.5) -// scalability_and_performance/cnf-low-latency-tuning.adoc -//CNF-303 Performance add-ons status CNF-372 -//Performance Addon Operator Detailed Status -//See: https://issues.redhat.com/browse/CNF-379 (Yanir Quinn) - -[id="cnf-debugging-low-latency-cnf-tuning-status_{context}"] -= Debugging low latency CNF tuning status - -The `PerformanceProfile` custom resource (CR) contains status fields for reporting tuning status and debugging latency degradation issues. These fields report on conditions that describe the state of the operator's reconciliation functionality. - -A typical issue can arise when the status of machine config pools that are attached to the performance profile are in a degraded state, causing the `PerformanceProfile` status to degrade. In this case, the machine config pool issues a failure message. - -The Node Tuning Operator contains the `performanceProfile.spec.status.Conditions` status field: - -[source,bash] ----- -Status: - Conditions: - Last Heartbeat Time: 2020-06-02T10:01:24Z - Last Transition Time: 2020-06-02T10:01:24Z - Status: True - Type: Available - Last Heartbeat Time: 2020-06-02T10:01:24Z - Last Transition Time: 2020-06-02T10:01:24Z - Status: True - Type: Upgradeable - Last Heartbeat Time: 2020-06-02T10:01:24Z - Last Transition Time: 2020-06-02T10:01:24Z - Status: False - Type: Progressing - Last Heartbeat Time: 2020-06-02T10:01:24Z - Last Transition Time: 2020-06-02T10:01:24Z - Status: False - Type: Degraded ----- - -The `Status` field contains `Conditions` that specify `Type` values that indicate the status of the performance profile: - -`Available`:: All machine configs and Tuned profiles have been created successfully and are available for cluster components are responsible to process them (NTO, MCO, Kubelet). - -`Upgradeable`:: Indicates whether the resources maintained by the Operator are in a state that is safe to upgrade. - -`Progressing`:: Indicates that the deployment process from the performance profile has started. - -`Degraded`:: Indicates an error if: -+ -* Validation of the performance profile has failed. -* Creation of all relevant components did not complete successfully. - -Each of these types contain the following fields: - -`Status`:: The state for the specific type (`true` or `false`). -`Timestamp`:: The transaction timestamp. -`Reason string`:: The machine readable reason. -`Message string`:: The human readable reason describing the state and error details, if any. - -[id="cnf-debugging-low-latency-cnf-tuning-status-machineconfigpools_{context}"] -== Machine config pools - -A performance profile and its created products are applied to a node according to an associated machine config pool (MCP). The MCP holds valuable information about the progress of applying the machine configurations created by performance profiles that encompass kernel args, kube config, huge pages allocation, and deployment of rt-kernel. The Performance Profile controller monitors changes in the MCP and updates the performance profile status accordingly. - -The only conditions returned by the MCP to the performance profile status is when the MCP is `Degraded`, which leads to `performaceProfile.status.condition.Degraded = true`. - -.Example - -The following example is for a performance profile with an associated machine config pool (`worker-cnf`) that was created for it: - -. The associated machine config pool is in a degraded state: -+ -[source,terminal] ----- -# oc get mcp ----- -+ -.Example output -+ -[source,terminal] ----- -NAME CONFIG UPDATED UPDATING DEGRADED MACHINECOUNT READYMACHINECOUNT UPDATEDMACHINECOUNT DEGRADEDMACHINECOUNT AGE -master rendered-master-2ee57a93fa6c9181b546ca46e1571d2d True False False 3 3 3 0 2d21h -worker rendered-worker-d6b2bdc07d9f5a59a6b68950acf25e5f True False False 2 2 2 0 2d21h -worker-cnf rendered-worker-cnf-6c838641b8a08fff08dbd8b02fb63f7c False True True 2 1 1 1 2d20h ----- - -. The `describe` section of the MCP shows the reason: -+ -[source,terminal] ----- -# oc describe mcp worker-cnf ----- -+ -.Example output -+ -[source,terminal] ----- - Message: Node node-worker-cnf is reporting: "prepping update: - machineconfig.machineconfiguration.openshift.io \"rendered-worker-cnf-40b9996919c08e335f3ff230ce1d170\" not - found" - Reason: 1 nodes are reporting degraded status on sync ----- - -. The degraded state should also appear under the performance profile `status` field marked as `degraded = true`: -+ -[source,terminal] ----- -# oc describe performanceprofiles performance ----- -+ -.Example output -+ -[source,terminal] ----- -Message: Machine config pool worker-cnf Degraded Reason: 1 nodes are reporting degraded status on sync. -Machine config pool worker-cnf Degraded Message: Node yquinn-q8s5v-w-b-z5lqn.c.openshift-gce-devel.internal is -reporting: "prepping update: machineconfig.machineconfiguration.openshift.io -\"rendered-worker-cnf-40b9996919c08e335f3ff230ce1d170\" not found". Reason: MCPDegraded - Status: True - Type: Degraded ----- diff --git a/modules/cnf-deploying-the-numa-aware-scheduler-with-manual-performance-settings.adoc b/modules/cnf-deploying-the-numa-aware-scheduler-with-manual-performance-settings.adoc deleted file mode 100644 index 9f523817fb82..000000000000 --- a/modules/cnf-deploying-the-numa-aware-scheduler-with-manual-performance-settings.adoc +++ /dev/null @@ -1,124 +0,0 @@ -// Module included in the following assemblies: -// -// *scalability_and_performance/cnf-numa-aware-scheduling.adoc - -:_module-type: PROCEDURE -[id="cnf-deploying-the-numa-aware-scheduler-with-manual-performance-settings_{context}"] -= Deploying the NUMA-aware secondary pod scheduler with manual performance settings - -After you install the NUMA Resources Operator, do the following to deploy the NUMA-aware secondary pod scheduler: - -* Configure the pod admittance policy for the required machine profile - -* Create the required machine config pool - -* Deploy the NUMA-aware secondary scheduler - -.Prerequisites - -* Install the OpenShift CLI (`oc`). - -* Log in as a user with `cluster-admin` privileges. - -* Install the NUMA Resources Operator. - -.Procedure -. Create the `KubeletConfig` custom resource that configures the pod admittance policy for the machine profile: - -.. Save the following YAML in the `nro-kubeletconfig.yaml` file: -+ -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: KubeletConfig -metadata: - name: cnf-worker-tuning -spec: - machineConfigPoolSelector: - matchLabels: - cnf-worker-tuning: enabled - kubeletConfig: - cpuManagerPolicy: "static" <1> - cpuManagerReconcilePeriod: "5s" - reservedSystemCPUs: "0,1" - memoryManagerPolicy: "Static" <2> - evictionHard: - memory.available: "100Mi" - kubeReserved: - memory: "512Mi" - reservedMemory: - - numaNode: 0 - limits: - memory: "1124Mi" - systemReserved: - memory: "512Mi" - topologyManagerPolicy: "single-numa-node" <3> - topologyManagerScope: "pod" ----- -<1> For `cpuManagerPolicy`, `static` must use a lowercase `s`. -<2> For `memoryManagerPolicy`, `Static` must use an uppercase `S`. -<3> `topologyManagerPolicy` must be set to `single-numa-node`. - -.. Create the `KubeletConfig` custom resource (CR) by running the following command: -+ -[source,terminal] ----- -$ oc create -f nro-kubeletconfig.yaml ----- - -. Create the `NUMAResourcesScheduler` custom resource that deploys the NUMA-aware custom pod scheduler: - -.. Save the following YAML in the `nro-scheduler.yaml` file: -+ -[source,yaml,subs="attributes+"] ----- -apiVersion: nodetopology.openshift.io/v1 -kind: NUMAResourcesScheduler -metadata: - name: numaresourcesscheduler -spec: - imageSpec: "registry.redhat.io/openshift4/noderesourcetopology-scheduler-container-rhel8:v{product-version}" - cacheResyncPeriod: "5s" <1> ----- -<1> Enter an interval value in seconds for synchronization of the scheduler cache. A value of `5s` is typical for most implementations. -+ -[NOTE] -==== -* Enable the `cacheResyncPeriod` specification to help the NUMA Resource Operator report more exact resource availability by monitoring pending resources on nodes and synchronizing this information in the scheduler cache at a defined interval. This also helps to minimize `Topology Affinity Error` errors because of sub-optimal scheduling decisions. The lower the interval the greater the network load. The `cacheResyncPeriod` specification is disabled by default. - -* Setting a value of `Enabled` for the `podsFingerprinting` specification in the `NUMAResourcesOperator` CR is a requirement for the implementation of the `cacheResyncPeriod` specification. -==== - -.. Create the `NUMAResourcesScheduler` CR by running the following command: -+ -[source,terminal] ----- -$ oc create -f nro-scheduler.yaml ----- - -.Verification - -* Verify that the required resources deployed successfully by running the following command: -+ -[source,terminal] ----- -$ oc get all -n openshift-numaresources ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -pod/numaresources-controller-manager-7575848485-bns4s 1/1 Running 0 13m -pod/numaresourcesoperator-worker-dvj4n 2/2 Running 0 16m -pod/numaresourcesoperator-worker-lcg4t 2/2 Running 0 16m -pod/secondary-scheduler-56994cf6cf-7qf4q 1/1 Running 0 16m -NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE -daemonset.apps/numaresourcesoperator-worker 2 2 2 2 2 node-role.kubernetes.io/worker= 16m -NAME READY UP-TO-DATE AVAILABLE AGE -deployment.apps/numaresources-controller-manager 1/1 1 1 13m -deployment.apps/secondary-scheduler 1/1 1 1 16m -NAME DESIRED CURRENT READY AGE -replicaset.apps/numaresources-controller-manager-7575848485 1 1 1 13m -replicaset.apps/secondary-scheduler-56994cf6cf 1 1 1 16m ----- diff --git a/modules/cnf-deploying-the-numa-aware-scheduler.adoc b/modules/cnf-deploying-the-numa-aware-scheduler.adoc deleted file mode 100644 index 4d30c1bc3da4..000000000000 --- a/modules/cnf-deploying-the-numa-aware-scheduler.adoc +++ /dev/null @@ -1,123 +0,0 @@ -// Module included in the following assemblies: -// -// *scalability_and_performance/cnf-numa-aware-scheduling.adoc - -:_module-type: PROCEDURE -[id="cnf-deploying-the-numa-aware-scheduler_{context}"] -= Deploying the NUMA-aware secondary pod scheduler - -After you install the NUMA Resources Operator, do the following to deploy the NUMA-aware secondary pod scheduler: - -* Configure the performance profile. - -* Deploy the NUMA-aware secondary scheduler. - -.Prerequisites - -* Install the OpenShift CLI (`oc`). - -* Log in as a user with `cluster-admin` privileges. - -* Create the required machine config pool. - -* Install the NUMA Resources Operator. - -.Procedure - -. Create the `PerformanceProfile` custom resource (CR): - -.. Save the following YAML in the `nro-perfprof.yaml` file: -+ -[source,yaml] ----- -apiVersion: performance.openshift.io/v2 -kind: PerformanceProfile -metadata: - name: perfprof-nrop -spec: - cpu: <1> - isolated: "4-51,56-103" - reserved: "0,1,2,3,52,53,54,55" - nodeSelector: - node-role.kubernetes.io/worker: "" - numa: - topologyPolicy: single-numa-node ----- -<1> The `cpu.isolated` and `cpu.reserved` specifications define ranges for isolated and reserved CPUs. Enter valid values for your CPU configuration. See the _Additional resources_ section for more information about configuring a performance profile. - -.. Create the `PerformanceProfile` CR by running the following command: -+ -[source,terminal] ----- -$ oc create -f nro-perfprof.yaml ----- -+ -.Example output -[source,terminal] ----- -performanceprofile.performance.openshift.io/perfprof-nrop created ----- - -. Create the `NUMAResourcesScheduler` custom resource that deploys the NUMA-aware custom pod scheduler: - -.. Save the following YAML in the `nro-scheduler.yaml` file: -+ -[source,yaml,subs="attributes+"] ----- -apiVersion: nodetopology.openshift.io/v1 -kind: NUMAResourcesScheduler -metadata: - name: numaresourcesscheduler -spec: - imageSpec: "registry.redhat.io/openshift4/noderesourcetopology-scheduler-container-rhel8:v{product-version}" - cacheResyncPeriod: "5s" <1> ----- -<1> Enter an interval value in seconds for synchronization of the scheduler cache. A value of `5s` is typical for most implementations. -+ -[NOTE] -==== -* Enable the `cacheResyncPeriod` specification to help the NUMA Resource Operator report more exact resource availability by monitoring pending resources on nodes and synchronizing this information in the scheduler cache at a defined interval. This also helps to minimize `Topology Affinity Error` errors because of sub-optimal scheduling decisions. The lower the interval the greater the network load. The `cacheResyncPeriod` specification is disabled by default. - -* Setting a value of `Enabled` for the `podsFingerprinting` specification in the `NUMAResourcesOperator` CR is a requirement for the implementation of the `cacheResyncPeriod` specification. -==== - -.. Create the `NUMAResourcesScheduler` CR by running the following command: -+ -[source,terminal] ----- -$ oc create -f nro-scheduler.yaml ----- - -.Verification - -. Verify that the performance profile was applied by running the following command: -+ -[source,terminal] ----- -$ oc describe performanceprofile ----- - -. Verify that the required resources deployed successfully by running the following command: -+ -[source,terminal] ----- -$ oc get all -n openshift-numaresources ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -pod/numaresources-controller-manager-7575848485-bns4s 1/1 Running 0 13m -pod/numaresourcesoperator-worker-dvj4n 2/2 Running 0 16m -pod/numaresourcesoperator-worker-lcg4t 2/2 Running 0 16m -pod/secondary-scheduler-56994cf6cf-7qf4q 1/1 Running 0 16m -NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE -daemonset.apps/numaresourcesoperator-worker 2 2 2 2 2 node-role.kubernetes.io/worker= 16m -NAME READY UP-TO-DATE AVAILABLE AGE -deployment.apps/numaresources-controller-manager 1/1 1 1 13m -deployment.apps/secondary-scheduler 1/1 1 1 16m -NAME DESIRED CURRENT READY AGE -replicaset.apps/numaresources-controller-manager-7575848485 1 1 1 13m -replicaset.apps/secondary-scheduler-56994cf6cf 1 1 1 16m ----- diff --git a/modules/cnf-disable-chronyd.adoc b/modules/cnf-disable-chronyd.adoc deleted file mode 100644 index 671a726fb8e2..000000000000 --- a/modules/cnf-disable-chronyd.adoc +++ /dev/null @@ -1,64 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/using-ptp.adoc - -:_content-type: PROCEDURE -[id="cnf-disable-chronyd_{context}"] -= Disabling the chrony time service - -You can disable the chrony time service (`chronyd`) for nodes with a specific role by using a `MachineConfig` custom resource (CR). - -.Prerequisites - -* Install the OpenShift CLI (`oc`). -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -. Create the `MachineConfig` CR that disables `chronyd` for the specified node role. - -.. Save the following YAML in the `disable-chronyd.yaml` file: -+ -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfig -metadata: - labels: - machineconfiguration.openshift.io/role: <1> - name: disable-chronyd -spec: - config: - ignition: - version: 3.2.0 - systemd: - units: - - contents: | - [Unit] - Description=NTP client/server - Documentation=man:chronyd(8) man:chrony.conf(5) - After=ntpdate.service sntp.service ntpd.service - Conflicts=ntpd.service systemd-timesyncd.service - ConditionCapability=CAP_SYS_TIME - [Service] - Type=forking - PIDFile=/run/chrony/chronyd.pid - EnvironmentFile=-/etc/sysconfig/chronyd - ExecStart=/usr/sbin/chronyd $OPTIONS - ExecStartPost=/usr/libexec/chrony-helper update-daemon - PrivateTmp=yes - ProtectHome=yes - ProtectSystem=full - [Install] - WantedBy=multi-user.target - enabled: false - name: "chronyd.service" ----- -<1> Node role where you want to disable `chronyd`, for example, `master`. - -.. Create the `MachineConfig` CR by running the following command: -+ -[source,terminal] ----- -$ oc create -f disable-chronyd.yaml ----- diff --git a/modules/cnf-du-configuring-a-performance-profile-to-support-workload-partitioning.adoc b/modules/cnf-du-configuring-a-performance-profile-to-support-workload-partitioning.adoc deleted file mode 100644 index 53d72c4b574e..000000000000 --- a/modules/cnf-du-configuring-a-performance-profile-to-support-workload-partitioning.adoc +++ /dev/null @@ -1,11 +0,0 @@ -// Module included in the following assemblies: -// -// *scalability_and_performance/cnf-provisioning-and-installing-a-distributed-unit.adoc - -[id="cnf-du-configuring-a-performance-profile-to-support-workload-partitioning.adoc_{context}"] - -= Configuring a performance profile to support workload partitioning - -After you have configured workload partitioning, you need to ensure that the Performance Addon Operator has been installed and that you configured a performance profile. - -The reserved CPU IDs in the performance profile must match the workload partitioning CPU IDs. diff --git a/modules/cnf-du-management-pods.adoc b/modules/cnf-du-management-pods.adoc deleted file mode 100644 index b7dd626c82b6..000000000000 --- a/modules/cnf-du-management-pods.adoc +++ /dev/null @@ -1,358 +0,0 @@ -// Module included in the following assemblies: -// -// *scalability_and_performance/cnf-provisioning-and-deploying-a-distributed-unit.adoc - -[id="cnf-du-management-pods.adoc_{context}"] - -= Cluster Management pods - -For the purposes of achieving 2-core (4 HT CPU) installation of single-node clusters, the set of pods that are considered _management_ are limited to: - -* Core Operators -* Day 2 Operators -* ACM pods - -The following tables identify the namespaces and pods that can be restricted to a subset of the CPUs on a node by configuring workload partitioning. - -== Core Operators - -[cols="1,1"] -|=== -| Namespace | Pod - -| openshift-apiserver-operator -| openshift-apiserver-operator - -| openshift-apiserver -| apiserver - -| openshift-authentication-operator -| authentication-operator - -| openshift-authentication -| oauth-openshift - -| openshift-cloud-controller-manager-operator -| cluster-cloud-controller-manager - -| openshift-cloud-credential-operator -| cloud-credential-operator - -| openshift-cluster-machine-approver -| machine-approver - -| openshift-cluster-node-tuning-operator -| cluster-node-tuning-operator - -| openshift-cluster-node-tuning-operator -| tuned - -| openshift-cluster-samples-operator -| cluster-samples-operator - -| openshift-cluster-storage-operator -| cluster-storage-operator - -| openshift-cluster-storage-operator -| csi-snapshot-controller - -| openshift-cluster-storage-operator -| csi-snapshot-controller-operator - -| openshift-cluster-storage-operator -| csi-snapshot-webhook - -| openshift-cluster-version -| cluster-version-operator - -| openshift-config-operator -| openshift-config-operator - -| openshift-console-operator -| console-operator - -| openshift-console -| console - -| openshift-console -| downloads - -| openshift-controller-manager-operator -| openshift-controller-manager-operator - -| openshift-controller-manager -| controller-manager - -| openshift-dns-operator -| dns-operator - -| openshift-dns -| dns-default - -| openshift-dns -| node-resolver - -| openshift-etcd-operator -| etcd-operator - -| openshift-etcd -| etcd - -| openshift-image-registry -| cluster-image-registry-operator - -| openshift-image-registry -| image-pruner - -| openshift-image-registry -| node-ca - -| openshift-ingress-canary -| ingress-canary - -| openshift-ingress-operator -| ingress-operator - -| openshift-ingress -| router-default - -| openshift-insights -| insights-operator - -| openshift-kube-apiserver-operator -| kube-apiserver-operator - -| openshift-kube-apiserver -| kube-apiserver - -| openshift-kube-controller-manager-operator -| kube-controller-manager-operator - -| openshift-kube-controller-manager -| kube-controller-manager - -| openshift-kube-scheduler-operator -| openshift-kube-scheduler-operator - -| openshift-kube-scheduler -| openshift-kube-scheduler - -| openshift-kube-storage-version-migrator-operator -| kube-storage-version-migrator-operator - -| openshift-kube-storage-version-migrator -| migrator - -| openshift-machine-api -| cluster-autoscaler-operator - -| openshift-machine-api -| cluster-baremetal-operator - -| openshift-machine-api -| machine-api-operator - -| openshift-machine-config-operator -| machine-config-controller - -| openshift-machine-config-operator -| machine-config-daemon - -| openshift-marketplace -| certified-operators - -| openshift-machine-config-operator -| machine-config-operator - -| openshift-machine-config-operator -| machine-config-server - -| openshift-marketplace -| community-operators - -| openshift-marketplace -| marketplace-operator - -| openshift-marketplace -| redhat-marketplace - -| openshift-marketplace -| redhat-operators - -| openshift-monitoring -| alertmanager-main - -| openshift-monitoring -| cluster-monitoring-operator - -| openshift-monitoring -| grafana - -| openshift-monitoring -| kube-state-metrics - -| openshift-monitoring -| node-exporter - -| openshift-monitoring -| openshift-state-metrics - -| openshift-monitoring -| prometheus-adapter - -| openshift-monitoring -| prometheus-adapter - -| openshift-monitoring -| prometheus-k8s - -| openshift-monitoring -| prometheus-operator - -| openshift-monitoring -| telemeter-client - -| openshift-monitoring -| thanos-querier - -| openshift-multus -| multus-admission-controller - -| openshift-multus -| multus - -| openshift-multus -| network-metrics-daemon - -| openshift-multus -| multus-additional-cni-plugins - -| openshift-network-diagnostics -| network-check-source - -| openshift-network-diagnostics -| network-check-target - -| openshift-network-operator -| network-operator - -| openshift-oauth-apiserver -| apiserver - -| openshift-operator-lifecycle-manager -| catalog-operator - -| openshift-operator-lifecycle-manager -| olm-operator - -| openshift-operator-lifecycle-manager -| packageserver - -| openshift-operator-lifecycle-manager -| packageserver - -| openshift-ovn-kubernetes -| ovnkube-master - -| openshift-ovn-kubernetes -| ovnkube-node - -| openshift-ovn-kubernetes -| ovs-node - -| openshift-service-ca-operator -| service-ca-operator - -| openshift-service-ca -| service-ca -|=== - -== Day 2 Operators - -[cols="1,1"] -|=== -| Namespace | Pod - -| openshift-ptp -| ptp-operator - -| openshift-ptp -| linuxptp-daemon - -| openshift-performance-addon-operator -| performance-operator - -| openshift-sriov-network-operator -| network-resources-injector - -| openshift-sriov-network-operator -| operator-webhook - -| openshift-sriov-network-operator -| sriov-cni - -| openshift-sriov-network-operator -| sriov-device-plugin - -| openshift-sriov-network-operator -| sriov-network-config-daemon - -| openshift-sriov-network-operator -| sriov-network-operator - -| local-storage -| local-disks-local-diskmaker - -| local-storage -| local-disks-local-provisioner - -| local-storage -| local-storage-operator - -| openshift-logging -| cluster-logging-operator - -| openshift-logging -| fluentd -|=== - - -== ACM pods - -[cols="1,1"] -|=== -| Namespace | Pod - -| open-cluster-management-agent-addon -| klusterlet-addon-appmgr - -| open-cluster-management-agent-addon -| klusterlet-addon-certpolicyctrl - -| open-cluster-management-agent-addon -| klusterlet-addon-iampolicyctrl - -| open-cluster-management-agent-addon -| klusterlet-addon-operator - -| open-cluster-management-agent-addon -| klusterlet-addon-policyctrl-config-policy - -| open-cluster-management-agent-addon -| klusterlet-addon-policyctrl-framework - -| open-cluster-management-agent-addon -| klusterlet-addon-search - -| open-cluster-management-agent-addon -| klusterlet-addon-workmgr - -| open-cluster-management-agent -| klusterlet - -| open-cluster-management-agent -| klusterlet-registration-agent - -| open-cluster-management-agent -| klusterlet-work-agent -|=== diff --git a/modules/cnf-fast-event-notifications-api-refererence.adoc b/modules/cnf-fast-event-notifications-api-refererence.adoc deleted file mode 100644 index 2b2357e626cb..000000000000 --- a/modules/cnf-fast-event-notifications-api-refererence.adoc +++ /dev/null @@ -1,405 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/using-ptp.adoc - -[id="cnf-fast-event-notifications-api-refererence_{context}"] -= Subscribing DU applications to PTP events REST API reference - -Use the PTP event notifications REST API to subscribe a distributed unit (DU) application to the PTP events that are generated on the parent node. - -Subscribe applications to PTP events by using the resource address `/cluster/node//ptp`, where `` is the cluster node running the DU application. - -Deploy your `cloud-event-consumer` DU application container and `cloud-event-proxy` sidecar container in a separate DU application pod. The `cloud-event-consumer` DU application subscribes to the `cloud-event-proxy` container in the application pod. - -Use the following API endpoints to subscribe the `cloud-event-consumer` DU application to PTP events posted by the `cloud-event-proxy` container at [x-]`http://localhost:8089/api/ocloudNotifications/v1/` in the DU application pod: - -* `/api/ocloudNotifications/v1/subscriptions` -- `POST`: Creates a new subscription -- `GET`: Retrieves a list of subscriptions - -* `/api/ocloudNotifications/v1/subscriptions/` -- `GET`: Returns details for the specified subscription ID - -* `api/ocloudNotifications/v1/subscriptions/status/` -- `PUT`: Creates a new status ping request for the specified subscription ID - -* `/api/ocloudNotifications/v1/health` -- `GET`: Returns the health status of `ocloudNotifications` API - -* `api/ocloudNotifications/v1/publishers` -- `GET`: Returns an array of `os-clock-sync-state`, `ptp-clock-class-change`, and `lock-state` messages for the cluster node - -* `/api/ocloudnotifications/v1//CurrentState` -- `GET`: Returns the current state of one the following event types: `os-clock-sync-state`, `ptp-clock-class-change`, or `lock-state` events - -[NOTE] -==== -`9089` is the default port for the `cloud-event-consumer` container deployed in the application pod. You can configure a different port for your DU application as required. -==== - -== api/ocloudNotifications/v1/subscriptions - -[discrete] -=== HTTP method - -`GET api/ocloudNotifications/v1/subscriptions` - -[discrete] -==== Description - -Returns a list of subscriptions. If subscriptions exist, a `200 OK` status code is returned along with the list of subscriptions. - -.Example API response -[source,json] ----- -[ - { - "id": "75b1ad8f-c807-4c23-acf5-56f4b7ee3826", - "endpointUri": "http://localhost:9089/event", - "uriLocation": "http://localhost:8089/api/ocloudNotifications/v1/subscriptions/75b1ad8f-c807-4c23-acf5-56f4b7ee3826", - "resource": "/cluster/node/compute-1.example.com/ptp" - } -] ----- - -[discrete] -=== HTTP method - -`POST api/ocloudNotifications/v1/subscriptions` - -[discrete] -==== Description - -Creates a new subscription. If a subscription is successfully created, or if it already exists, a `201 Created` status code is returned. - -.Query parameters -|=== -| Parameter | Type - -| subscription -| data -|=== - -.Example payload -[source,json] ----- -{ - "uriLocation": "http://localhost:8089/api/ocloudNotifications/v1/subscriptions", - "resource": "/cluster/node/compute-1.example.com/ptp" -} ----- - -== api/ocloudNotifications/v1/subscriptions/ - -[discrete] -=== HTTP method - -`GET api/ocloudNotifications/v1/subscriptions/` - -[discrete] -==== Description - -Returns details for the subscription with ID `` - -.Query parameters -|=== -| Parameter | Type - -| `` -| string -|=== - -.Example API response -[source,json] ----- -{ - "id":"48210fb3-45be-4ce0-aa9b-41a0e58730ab", - "endpointUri": "http://localhost:9089/event", - "uriLocation":"http://localhost:8089/api/ocloudNotifications/v1/subscriptions/48210fb3-45be-4ce0-aa9b-41a0e58730ab", - "resource":"/cluster/node/compute-1.example.com/ptp" -} ----- - -== api/ocloudNotifications/v1/subscriptions/status/ - -[discrete] -=== HTTP method - -`PUT api/ocloudNotifications/v1/subscriptions/status/` - -[discrete] -==== Description - -Creates a new status ping request for subscription with ID ``. If a subscription is present, the status request is successful and a `202 Accepted` status code is returned. - -.Query parameters -|=== -| Parameter | Type - -| `` -| string -|=== - -.Example API response -[source,json] ----- -{"status":"ping sent"} ----- - -== api/ocloudNotifications/v1/health/ - -[discrete] -=== HTTP method - -`GET api/ocloudNotifications/v1/health/` - -[discrete] -==== Description - -Returns the health status for the `ocloudNotifications` REST API. - -.Example API response -[source,terminal] ----- -OK ----- - -== api/ocloudNotifications/v1/publishers - -[discrete] -=== HTTP method - -`GET api/ocloudNotifications/v1/publishers` - -[discrete] -==== Description - -Returns an array of `os-clock-sync-state`, `ptp-clock-class-change`, and `lock-state` details for the cluster node. The system generates notifications when the relevant equipment state changes. - -* `os-clock-sync-state` notifications describe the host operating system clock synchronization state. Can be in `LOCKED` or `FREERUN` state. -* `ptp-clock-class-change` notifications describe the current state of the PTP clock class. -* `lock-state` notifications describe the current status of the PTP equipment lock state. Can be in `LOCKED`, `HOLDOVER` or `FREERUN` state. - -.Example API response -[source,json] ----- -[ - { - "id": "0fa415ae-a3cf-4299-876a-589438bacf75", - "endpointUri": "http://localhost:9085/api/ocloudNotifications/v1/dummy", - "uriLocation": "http://localhost:9085/api/ocloudNotifications/v1/publishers/0fa415ae-a3cf-4299-876a-589438bacf75", - "resource": "/cluster/node/compute-1.example.com/sync/sync-status/os-clock-sync-state" - }, - { - "id": "28cd82df-8436-4f50-bbd9-7a9742828a71", - "endpointUri": "http://localhost:9085/api/ocloudNotifications/v1/dummy", - "uriLocation": "http://localhost:9085/api/ocloudNotifications/v1/publishers/28cd82df-8436-4f50-bbd9-7a9742828a71", - "resource": "/cluster/node/compute-1.example.com/sync/ptp-status/ptp-clock-class-change" - }, - { - "id": "44aa480d-7347-48b0-a5b0-e0af01fa9677", - "endpointUri": "http://localhost:9085/api/ocloudNotifications/v1/dummy", - "uriLocation": "http://localhost:9085/api/ocloudNotifications/v1/publishers/44aa480d-7347-48b0-a5b0-e0af01fa9677", - "resource": "/cluster/node/compute-1.example.com/sync/ptp-status/lock-state" - } -] ----- - -You can find `os-clock-sync-state`, `ptp-clock-class-change` and `lock-state` events in the logs for the `cloud-event-proxy` container. For example: - -[source,terminal] ----- -$ oc logs -f linuxptp-daemon-cvgr6 -n openshift-ptp -c cloud-event-proxy ----- - -.Example os-clock-sync-state event -[source,json] ----- -{ - "id":"c8a784d1-5f4a-4c16-9a81-a3b4313affe5", - "type":"event.sync.sync-status.os-clock-sync-state-change", - "source":"/cluster/compute-1.example.com/ptp/CLOCK_REALTIME", - "dataContentType":"application/json", - "time":"2022-05-06T15:31:23.906277159Z", - "data":{ - "version":"v1", - "values":[ - { - "resource":"/sync/sync-status/os-clock-sync-state", - "dataType":"notification", - "valueType":"enumeration", - "value":"LOCKED" - }, - { - "resource":"/sync/sync-status/os-clock-sync-state", - "dataType":"metric", - "valueType":"decimal64.3", - "value":"-53" - } - ] - } -} ----- - -.Example ptp-clock-class-change event -[source,json] ----- -{ - "id":"69eddb52-1650-4e56-b325-86d44688d02b", - "type":"event.sync.ptp-status.ptp-clock-class-change", - "source":"/cluster/compute-1.example.com/ptp/ens2fx/master", - "dataContentType":"application/json", - "time":"2022-05-06T15:31:23.147100033Z", - "data":{ - "version":"v1", - "values":[ - { - "resource":"/sync/ptp-status/ptp-clock-class-change", - "dataType":"metric", - "valueType":"decimal64.3", - "value":"135" - } - ] - } -} ----- - -.Example lock-state event -[source,json] ----- -{ - "id":"305ec18b-1472-47b3-aadd-8f37933249a9", - "type":"event.sync.ptp-status.ptp-state-change", - "source":"/cluster/compute-1.example.com/ptp/ens2fx/master", - "dataContentType":"application/json", - "time":"2022-05-06T15:31:23.467684081Z", - "data":{ - "version":"v1", - "values":[ - { - "resource":"/sync/ptp-status/lock-state", - "dataType":"notification", - "valueType":"enumeration", - "value":"LOCKED" - }, - { - "resource":"/sync/ptp-status/lock-state", - "dataType":"metric", - "valueType":"decimal64.3", - "value":"62" - } - ] - } -} ----- - -== /api/ocloudnotifications/v1//CurrentState - -[discrete] -=== HTTP method - -`GET api/ocloudNotifications/v1/cluster/node//sync/ptp-status/lock-state/CurrentState` - -`GET api/ocloudNotifications/v1/cluster/node//sync/sync-status/os-clock-sync-state/CurrentState` - -`GET api/ocloudNotifications/v1/cluster/node//sync/ptp-status/ptp-clock-class-change/CurrentState` - -[discrete] -==== Description - -Configure the `CurrentState` API endpoint to return the current state of the `os-clock-sync-state`, `ptp-clock-class-change`, or `lock-state` events for the cluster node. - -* `os-clock-sync-state` notifications describe the host operating system clock synchronization state. Can be in `LOCKED` or `FREERUN` state. -* `ptp-clock-class-change` notifications describe the current state of the PTP clock class. -* `lock-state` notifications describe the current status of the PTP equipment lock state. Can be in `LOCKED`, `HOLDOVER` or `FREERUN` state. - -.Query parameters -|=== -| Parameter | Type - -| `` -| string -|=== - -.Example lock-state API response -[source,json] ----- -{ - "id": "c1ac3aa5-1195-4786-84f8-da0ea4462921", - "type": "event.sync.ptp-status.ptp-state-change", - "source": "/cluster/node/compute-1.example.com/sync/ptp-status/lock-state", - "dataContentType": "application/json", - "time": "2023-01-10T02:41:57.094981478Z", - "data": { - "version": "v1", - "values": [ - { - "resource": "/cluster/node/compute-1.example.com/ens5fx/master", - "dataType": "notification", - "valueType": "enumeration", - "value": "LOCKED" - }, - { - "resource": "/cluster/node/compute-1.example.com/ens5fx/master", - "dataType": "metric", - "valueType": "decimal64.3", - "value": "29" - } - ] - } -} ----- - -.Example os-clock-sync-state API response -[source,json] ----- -{ - "specversion": "0.3", - "id": "4f51fe99-feaa-4e66-9112-66c5c9b9afcb", - "source": "/cluster/node/compute-1.example.com/sync/sync-status/os-clock-sync-state", - "type": "event.sync.sync-status.os-clock-sync-state-change", - "subject": "/cluster/node/compute-1.example.com/sync/sync-status/os-clock-sync-state", - "datacontenttype": "application/json", - "time": "2022-11-29T17:44:22.202Z", - "data": { - "version": "v1", - "values": [ - { - "resource": "/cluster/node/compute-1.example.com/CLOCK_REALTIME", - "dataType": "notification", - "valueType": "enumeration", - "value": "LOCKED" - }, - { - "resource": "/cluster/node/compute-1.example.com/CLOCK_REALTIME", - "dataType": "metric", - "valueType": "decimal64.3", - "value": "27" - } - ] - } -} ----- - -.Example ptp-clock-class-change API response -[source,json] ----- -{ - "id": "064c9e67-5ad4-4afb-98ff-189c6aa9c205", - "type": "event.sync.ptp-status.ptp-clock-class-change", - "source": "/cluster/node/compute-1.example.com/sync/ptp-status/ptp-clock-class-change", - "dataContentType": "application/json", - "time": "2023-01-10T02:41:56.785673989Z", - "data": { - "version": "v1", - "values": [ - { - "resource": "/cluster/node/compute-1.example.com/ens5fx/master", - "dataType": "metric", - "valueType": "decimal64.3", - "value": "165" - } - ] - } -} ----- diff --git a/modules/cnf-gathering-data-about-cluster-using-must-gather.adoc b/modules/cnf-gathering-data-about-cluster-using-must-gather.adoc deleted file mode 100644 index 14ecc33bce7b..000000000000 --- a/modules/cnf-gathering-data-about-cluster-using-must-gather.adoc +++ /dev/null @@ -1,71 +0,0 @@ -// Module included in the following assemblies: -// Epic CNF-792 (4.8) -// * scalability_and_performance/cnf-create-performance-profiles.adoc - -:_content-type: PROCEDURE -[id="gathering-data-about-your-cluster-using-must-gather_{context}"] -= Gathering data about your cluster using the must-gather command - -The Performance Profile Creator (PPC) tool requires `must-gather` data. As a cluster administrator, run the `must-gather` command to capture information about your cluster. - -[NOTE] -==== -In earlier versions of {product-title}, the Performance Addon Operator provided automatic, low latency performance tuning for applications. In {product-title} 4.11 and later, this functionality is part of the Node Tuning Operator. However, you must still use the `performance-addon-operator-must-gather` image when running the `must-gather` command. -==== - -.Prerequisites - -* Access to the cluster as a user with the `cluster-admin` role. -* Access to the Performance Addon Operator `must gather` image. -* The OpenShift CLI (`oc`) installed. - -.Procedure - -. Optional: Verify that a matching machine config pool exists with a label: -+ -[source,terminal] ----- -$ oc describe mcp/worker-rt ----- -+ -.Example output -[source,terminal] ----- -Name: worker-rt -Namespace: -Labels: machineconfiguration.openshift.io/role=worker-rt ----- - -. If a matching label does not exist add a label for a machine config pool (MCP) that matches with the MCP name: -+ -[source,terminal] ----- -$ oc label mcp ="" ----- - -. Navigate to the directory where you want to store the `must-gather` data. - -. Run `must-gather` on your cluster: -+ -[source,terminal] ----- -$ oc adm must-gather --image= --dest-dir= ----- -+ -[NOTE] -==== -The `must-gather` command must be run with the `performance-addon-operator-must-gather` image. The output can optionally be compressed. Compressed output is required if you are running the Performance Profile Creator wrapper script. -==== -+ -.Example -+ -[source,terminal,subs="attributes+"] ----- -$ oc adm must-gather --image=registry.redhat.io/openshift4/performance-addon-operator-must-gather-rhel8:v{product-version} --dest-dir=/must-gather ----- -. Create a compressed file from the `must-gather` directory: -+ -[source,terminal] ----- -$ tar cvaf must-gather.tar.gz must-gather/ ----- diff --git a/modules/cnf-how-run-podman-to-create-profile.adoc b/modules/cnf-how-run-podman-to-create-profile.adoc deleted file mode 100644 index ede62cb5a310..000000000000 --- a/modules/cnf-how-run-podman-to-create-profile.adoc +++ /dev/null @@ -1,47 +0,0 @@ -// Module included in the following assemblies: -// Epic CNF-792 (4.8) -// * scalability_and_performance/cnf-create-performance-profiles.adoc - -[id="how-to-run-podman-to-create-a-profile_{context}"] -= How to run `podman` to create a performance profile - -The following example illustrates how to run `podman` to create a performance profile with 20 reserved CPUs that are to be split across the NUMA nodes. - -Node hardware configuration: - -* 80 CPUs -* Hyperthreading enabled -* Two NUMA nodes -* Even numbered CPUs run on NUMA node 0 and odd numbered CPUs run on NUMA node 1 - -Run `podman` to create the performance profile: - -[source,terminal,subs="attributes+"] ----- -$ podman run --entrypoint performance-profile-creator -v /must-gather:/must-gather:z registry.redhat.io/openshift4/ose-cluster-node-tuning-operator:v{product-version} --mcp-name=worker-cnf --reserved-cpu-count=20 --rt-kernel=true --split-reserved-cpus-across-numa=true --must-gather-dir-path /must-gather > my-performance-profile.yaml ----- - -The created profile is described in the following YAML: - -[source,yaml] ----- - apiVersion: performance.openshift.io/v2 - kind: PerformanceProfile - metadata: - name: performance - spec: - cpu: - isolated: 10-39,50-79 - reserved: 0-9,40-49 - nodeSelector: - node-role.kubernetes.io/worker-cnf: "" - numa: - topologyPolicy: restricted - realTimeKernel: - enabled: true ----- - -[NOTE] -==== -In this case, 10 CPUs are reserved on NUMA node 0 and 10 are reserved on NUMA node 1. -==== diff --git a/modules/cnf-installing-amq-interconnect-messaging-bus.adoc b/modules/cnf-installing-amq-interconnect-messaging-bus.adoc deleted file mode 100644 index f5770addccb1..000000000000 --- a/modules/cnf-installing-amq-interconnect-messaging-bus.adoc +++ /dev/null @@ -1,54 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/using-ptp.adoc - -:_content-type: PROCEDURE -[id="cnf-installing-amq-interconnect-messaging-bus_{context}"] -= Installing the AMQ messaging bus - -To pass PTP fast event notifications between publisher and subscriber on a node, you can install and configure an AMQ messaging bus to run locally on the node. -To use AMQ messaging, you must install the AMQ Interconnect Operator. - -include::snippets/ptp-amq-interconnect-eol.adoc[] - -.Prerequisites - -* Install the {product-title} CLI (`oc`). - -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -* Install the AMQ Interconnect Operator to its own `amq-interconnect` namespace. See link:https://access.redhat.com/documentation/en-us/red_hat_amq/2021.q1/html/deploying_amq_interconnect_on_openshift/adding-operator-router-ocp[Adding the Red Hat Integration - AMQ Interconnect Operator]. - -.Verification - -. Check that the AMQ Interconnect Operator is available and the required pods are running: -+ -[source,terminal] ----- -$ oc get pods -n amq-interconnect ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -amq-interconnect-645db76c76-k8ghs 1/1 Running 0 23h -interconnect-operator-5cb5fc7cc-4v7qm 1/1 Running 0 23h ----- - -. Check that the required `linuxptp-daemon` PTP event producer pods are running in the `openshift-ptp` namespace. -+ -[source,terminal] ----- -$ oc get pods -n openshift-ptp ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -linuxptp-daemon-2t78p 3/3 Running 0 12h -linuxptp-daemon-k8n88 3/3 Running 0 12h ----- diff --git a/modules/cnf-installing-numa-resources-operator-cli.adoc b/modules/cnf-installing-numa-resources-operator-cli.adoc deleted file mode 100644 index c58047bcd178..000000000000 --- a/modules/cnf-installing-numa-resources-operator-cli.adoc +++ /dev/null @@ -1,101 +0,0 @@ -// Module included in the following assemblies: -// -// *scalability_and_performance/cnf-numa-aware-scheduling.adoc - -:_content-type: PROCEDURE -[id="cnf-installing-numa-resources-operator-cli_{context}"] -= Installing the NUMA Resources Operator using the CLI - -As a cluster administrator, you can install the Operator using the CLI. - -.Prerequisites - -* Install the OpenShift CLI (`oc`). - -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -. Create a namespace for the NUMA Resources Operator: - -.. Save the following YAML in the `nro-namespace.yaml` file: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Namespace -metadata: - name: openshift-numaresources ----- - -.. Create the `Namespace` CR by running the following command: -+ -[source,terminal] ----- -$ oc create -f nro-namespace.yaml ----- - -. Create the Operator group for the NUMA Resources Operator: - -.. Save the following YAML in the `nro-operatorgroup.yaml` file: -+ -[source,yaml] ----- -apiVersion: operators.coreos.com/v1 -kind: OperatorGroup -metadata: - name: numaresources-operator - namespace: openshift-numaresources -spec: - targetNamespaces: - - openshift-numaresources ----- - -.. Create the `OperatorGroup` CR by running the following command: -+ -[source,terminal] ----- -$ oc create -f nro-operatorgroup.yaml ----- - -. Create the subscription for the NUMA Resources Operator: - -.. Save the following YAML in the `nro-sub.yaml` file: -+ -[source,yaml,subs="attributes+"] ----- -apiVersion: operators.coreos.com/v1 -kind: Subscription -metadata: - name: numaresources-operator - namespace: openshift-numaresources -spec: - channel: "{product-version}" - name: numaresources-operator - source: redhat-operators - sourceNamespace: openshift-marketplace ----- - -.. Create the `Subscription` CR by running the following command: -+ -[source,terminal] ----- -$ oc create -f nro-sub.yaml ----- - -.Verification - -. Verify that the installation succeeded by inspecting the CSV resource in the `openshift-numaresources` namespace. Run the following command: -+ -[source,terminal] ----- -$ oc get csv -n openshift-numaresources ----- -+ -.Example output - -[source,terminal,subs="attributes+"] ----- -NAME DISPLAY VERSION REPLACES PHASE -numaresources-operator.v{product-version}.2 numaresources-operator {product-version}.2 Succeeded ----- diff --git a/modules/cnf-installing-numa-resources-operator-console.adoc b/modules/cnf-installing-numa-resources-operator-console.adoc deleted file mode 100644 index b793ee53dc91..000000000000 --- a/modules/cnf-installing-numa-resources-operator-console.adoc +++ /dev/null @@ -1,33 +0,0 @@ -// Module included in the following assemblies: -// -// *scalability_and_performance/cnf-numa-aware-scheduling.adoc - -:_content-type: PROCEDURE -[id="cnf-installing-numa-resources-operator-console_{context}"] -= Installing the NUMA Resources Operator using the web console - -As a cluster administrator, you can install the NUMA Resources Operator using the web console. - -.Procedure - -. Install the NUMA Resources Operator using the {product-title} web console: - -.. In the {product-title} web console, click *Operators* -> *OperatorHub*. - -.. Choose *NUMA Resources Operator* from the list of available Operators, and then click *Install*. - -. Optional: Verify that the NUMA Resources Operator installed successfully: - -.. Switch to the *Operators* -> *Installed Operators* page. - -.. Ensure that *NUMA Resources Operator* is listed in the *default* project with a *Status* of *InstallSucceeded*. -+ -[NOTE] -==== -During installation an Operator might display a *Failed* status. If the installation later succeeds with an *InstallSucceeded* message, you can ignore the *Failed* message. -==== -+ -If the Operator does not appear as installed, to troubleshoot further: -+ -* Go to the *Operators* -> *Installed Operators* page and inspect the *Operator Subscriptions* and *Install Plans* tabs for any failure or errors under *Status*. -* Go to the *Workloads* -> *Pods* page and check the logs for pods in the `default` project. diff --git a/modules/cnf-installing-the-operators.adoc b/modules/cnf-installing-the-operators.adoc deleted file mode 100644 index d0781bc6f2c1..000000000000 --- a/modules/cnf-installing-the-operators.adoc +++ /dev/null @@ -1,238 +0,0 @@ -// CNF-950 4.7 Installing the operators -// Module included in the following assemblies: -// -// *scalability_and_performance/cnf-provisioning-and-deploying-a-distributed-unit.adoc - -:_content-type: PROCEDURE -[id="cnf-installing-the-operators_{context}"] -= Installing the Operators - -[id="cnf-installing-the-performnce-addon-operator_{context}"] -== Installing the Performance Addon Operator - -Install the Performance Addon Operator using the {product-title} CLI. - -.Procedure - -. Create the Performance Addon Operator namespace: -+ -[source,terminal] ----- -cat <" <1> - name: performance-addon-operator - source: redhat-operators <2> - sourceNamespace: openshift-marketplace -EOF ----- -<1> Specify the value you obtained in the previous step for the `status.defaultChannel` parameter. -<2> You must specify the `redhat-operators` value. - -[id="cnf-installing-the-precision-time-protocol-operator_{context}"] -== Installing the Precision Time Protocol (PTP) Operator - -Install the PTP Operator using the {product-title} CLI or the web console. - -.Procedure - -. Apply the Operator namespace: -+ -[source,terminal] ----- -cat <` variable to the command specifying an appropriate label. -==== diff --git a/modules/cnf-logging-associated-with-adjusting-nic-queues.adoc b/modules/cnf-logging-associated-with-adjusting-nic-queues.adoc deleted file mode 100644 index 898d72bd4f1e..000000000000 --- a/modules/cnf-logging-associated-with-adjusting-nic-queues.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// Module included in the following assemblies: -//CNF-1483 (4.8) -// * scalability_and_performance/cnf-low-latency-tuning.adoc - -[id="logging-associated-with-adjusting-nic-queues_{context}"] -= Logging associated with adjusting NIC queues - -Log messages detailing the assigned devices are recorded in the respective Tuned daemon logs. The following messages might be recorded to the `/var/log/tuned/tuned.log` file: - -* An `INFO` message is recorded detailing the successfully assigned devices: -+ -[source, terminal] ----- -INFO tuned.plugins.base: instance net_test (net): assigning devices ens1, ens2, ens3 ----- -* A `WARNING` message is recorded if none of the devices can be assigned: -+ -[source, terminal] ----- -WARNING tuned.plugins.base: instance net_test: no matching devices available ----- diff --git a/modules/cnf-managing-device-interrupt-processing-for-guaranteed-pod-isolated-cpus.adoc b/modules/cnf-managing-device-interrupt-processing-for-guaranteed-pod-isolated-cpus.adoc deleted file mode 100644 index aecf8fdeee1e..000000000000 --- a/modules/cnf-managing-device-interrupt-processing-for-guaranteed-pod-isolated-cpus.adoc +++ /dev/null @@ -1,72 +0,0 @@ -// CNF-802 Infrastructure-provided interrupt processing for guaranteed pod CPUs -// Module included in the following assemblies: -// -// *cnf-low-latency-tuning.adoc - -[id="managing-device-interrupt-processing-for-guaranteed-pod-isolated-cpus_{context}"] -= Managing device interrupt processing for guaranteed pod isolated CPUs - -The Node Tuning Operator can manage host CPUs by dividing them into reserved CPUs for cluster and operating system housekeeping duties, including pod infra containers, and isolated CPUs for application containers to run the workloads. This allows you to set CPUs for low latency workloads as isolated. - -Device interrupts are load balanced between all isolated and reserved CPUs to avoid CPUs being overloaded, with the exception of CPUs where there is a guaranteed pod running. Guaranteed pod CPUs are prevented from processing device interrupts when the relevant annotations are set for the pod. - -In the performance profile, `globallyDisableIrqLoadBalancing` is used to manage whether device interrupts are processed or not. For certain workloads, the reserved CPUs are not always sufficient for dealing with device interrupts, and for this reason, device interrupts are not globally disabled on the isolated CPUs. By default, Node Tuning Operator does not disable device interrupts on isolated CPUs. - -To achieve low latency for workloads, some (but not all) pods require the CPUs they are running on to not process device interrupts. A pod annotation, `irq-load-balancing.crio.io`, is used to define whether device interrupts are processed or not. When configured, CRI-O disables device interrupts only as long as the pod is running. - -[id="disabling-cpu-cfs-quota_{context}"] -== Disabling CPU CFS quota - -To reduce CPU throttling for individual guaranteed pods, create a pod specification with the annotation `cpu-quota.crio.io: "disable"`. This annotation disables the CPU completely fair scheduler (CFS) quota at the pod run time. The following pod specification contains this annotation: - -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - annotations: - cpu-quota.crio.io: "disable" -spec: - runtimeClassName: performance- -... ----- - -[NOTE] -==== -Only disable CPU CFS quota when the CPU manager static policy is enabled and for pods with guaranteed QoS that use whole CPUs. Otherwise, disabling CPU CFS quota can affect the performance of other containers in the cluster. -==== - -[id="configuring-global-device-interrupts-handling-for-isolated-cpus_{context}"] -== Disabling global device interrupts handling in Node Tuning Operator - -To configure Node Tuning Operator to disable global device interrupts for the isolated CPU set, set the `globallyDisableIrqLoadBalancing` field in the performance profile to `true`. When `true`, conflicting pod annotations are ignored. When `false`, IRQ loads are balanced across all CPUs. - -A performance profile snippet illustrates this setting: - -[source,yaml] ----- -apiVersion: performance.openshift.io/v2 -kind: PerformanceProfile -metadata: - name: manual -spec: - globallyDisableIrqLoadBalancing: true -... ----- - -[id="disabling_interrupt_processing_for_individual_pods_{context}"] -== Disabling interrupt processing for individual pods - -To disable interrupt processing for individual pods, ensure that `globallyDisableIrqLoadBalancing` is set to `false` in the performance profile. Then, in the pod specification, set the `irq-load-balancing.crio.io` pod annotation to `disable`. The following pod specification contains this annotation: - -[source,yaml] ----- -apiVersion: performance.openshift.io/v2 -kind: Pod -metadata: - annotations: - irq-load-balancing.crio.io: "disable" -spec: - runtimeClassName: performance- -... ----- diff --git a/modules/cnf-measuring-latency.adoc b/modules/cnf-measuring-latency.adoc deleted file mode 100644 index 67e08afa1c59..000000000000 --- a/modules/cnf-measuring-latency.adoc +++ /dev/null @@ -1,59 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/cnf-performing-platform-verification-latency-tests.adoc - -:_content-type: CONCEPT -[id="cnf-measuring-latency_{context}"] -= Measuring latency - -The `cnf-tests` image uses three tools to measure the latency of the system: - -* `hwlatdetect` -* `cyclictest` -* `oslat` - -Each tool has a specific use. Use the tools in sequence to achieve reliable test results. - -hwlatdetect:: Measures the baseline that the bare-metal hardware can achieve. Before proceeding with the next latency test, ensure that the latency reported by `hwlatdetect` meets the required threshold because you cannot fix hardware latency spikes by operating system tuning. - -cyclictest:: Verifies the real-time kernel scheduler latency after `hwlatdetect` passes validation. The `cyclictest` tool schedules a repeated timer and measures the difference between the desired and the actual trigger times. The difference can uncover basic issues with the tuning caused by interrupts or process priorities. The tool must run on a real-time kernel. - -oslat:: Behaves similarly to a CPU-intensive DPDK application and measures all the interruptions and disruptions to the busy loop that simulates CPU heavy data processing. - -The tests introduce the following environment variables: - -.Latency test environment variables -[cols="1,3", options="header"] -|==== -|Environment variables -|Description - -|`LATENCY_TEST_DELAY` -|Specifies the amount of time in seconds after which the test starts running. You can use the variable to allow the CPU manager reconcile loop to update the default CPU pool. The default value is 0. - -|`LATENCY_TEST_CPUS` -|Specifies the number of CPUs that the pod running the latency tests uses. If you do not set the variable, the default configuration includes all isolated CPUs. - -|`LATENCY_TEST_RUNTIME` -|Specifies the amount of time in seconds that the latency test must run. The default value is 300 seconds. - -|`HWLATDETECT_MAXIMUM_LATENCY` -|Specifies the maximum acceptable hardware latency in microseconds for the workload and operating system. If you do not set the value of `HWLATDETECT_MAXIMUM_LATENCY` or `MAXIMUM_LATENCY`, the tool compares the default expected threshold (20μs) and the actual maximum latency in the tool itself. Then, the test fails or succeeds accordingly. - -|`CYCLICTEST_MAXIMUM_LATENCY` -|Specifies the maximum latency in microseconds that all threads expect before waking up during the `cyclictest` run. If you do not set the value of `CYCLICTEST_MAXIMUM_LATENCY` or `MAXIMUM_LATENCY`, the tool skips the comparison of the expected and the actual maximum latency. - -|`OSLAT_MAXIMUM_LATENCY` -|Specifies the maximum acceptable latency in microseconds for the `oslat` test results. If you do not set the value of `OSLAT_MAXIMUM_LATENCY` or `MAXIMUM_LATENCY`, the tool skips the comparison of the expected and the actual maximum latency. - -|`MAXIMUM_LATENCY` -|Unified variable that specifies the maximum acceptable latency in microseconds. Applicable for all available latency tools. - -|`LATENCY_TEST_RUN` -|Boolean parameter that indicates whether the tests should run. `LATENCY_TEST_RUN` is set to `false` by default. To run the latency tests, set this value to `true`. -|==== - -[NOTE] -==== -Variables that are specific to a latency tool take precedence over unified variables. For example, if `OSLAT_MAXIMUM_LATENCY` is set to 30 microseconds and `MAXIMUM_LATENCY` is set to 10 microseconds, the `oslat` test will run with maximum acceptable latency of 30 microseconds. -==== diff --git a/modules/cnf-migrating-from-amqp-to-http-transport.adoc b/modules/cnf-migrating-from-amqp-to-http-transport.adoc deleted file mode 100644 index bac9bbec9429..000000000000 --- a/modules/cnf-migrating-from-amqp-to-http-transport.adoc +++ /dev/null @@ -1,75 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/using-rfhe.adoc -// * networking/using-ptp.adoc - -:_content-type: PROCEDURE -[id="cnf-migrating-from-amqp-to-http-transport_{context}"] -= Migrating consumer applications to use HTTP transport for PTP or bare-metal events - -If you have previously deployed PTP or bare-metal events consumer applications, you need to update the applications to use HTTP message transport. - -.Prerequisites - -* You have installed the OpenShift CLI (`oc`). - -* You have logged in as a user with `cluster-admin` privileges. - -* You have updated the PTP Operator or {redfish-operator} to version 4.13+ which uses HTTP transport by default. - -* Configure dynamic volume provisioning in the cluster or manually create `StorageClass`, `LocalVolume`, and `PersistentVolume` resources to persist the events subscription. -+ -[NOTE] -==== -When dynamic volume provisioning is enabled, a `PersistentVolume` resource is automatically created for the `PersistentVolumeClaim` that the PTP Operator or {redfish-operator} deploys. -==== - -.Procedure - -. Update your events consumer application to use HTTP transport. -Set the `http-event-publishers` variable for the cloud event sidecar deployment. -+ -For example, in a cluster with PTP events configured, the following YAML snippet illustrates a cloud event sidecar deployment: -+ -[source,yaml] ----- -containers: - - name: cloud-event-sidecar - image: cloud-event-sidecar - args: - - "--metrics-addr=127.0.0.1:9091" - - "--store-path=/store" - - "--transport-host=consumer-events-subscription-service.cloud-events.svc.cluster.local:9043" - - "--http-event-publishers=ptp-event-publisher-service-NODE_NAME.openshift-ptp.svc.cluster.local:9043" <1> - - "--api-port=8089" ----- -<1> The PTP Operator automatically resolves `NODE_NAME` to the host that is generating the PTP events. -For example, `compute-1.example.com`. -+ -In a cluster with bare-metal events configured, set the `http-event-publishers` field to `hw-event-publisher-service.openshift-bare-metal-events.svc.cluster.local:9043` in the cloud event sidecar deployment CR. - -. Deploy the `consumer-events-subscription-service` service alongside the events consumer application. -For example: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Service -metadata: - annotations: - prometheus.io/scrape: "true" - service.alpha.openshift.io/serving-cert-secret-name: sidecar-consumer-secret - name: consumer-events-subscription-service - namespace: cloud-events - labels: - app: consumer-service -spec: - ports: - - name: sub-port - port: 9043 - selector: - app: consumer - clusterIP: None - sessionAffinity: None - type: ClusterIP ----- diff --git a/modules/cnf-modifying-and-applying-the-default-profile.adoc b/modules/cnf-modifying-and-applying-the-default-profile.adoc deleted file mode 100644 index fcd6dd80a0e7..000000000000 --- a/modules/cnf-modifying-and-applying-the-default-profile.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// CNF-950 4.7 Modifying and applying the default profile -// Module included in the following assemblies: -// -// *scalability_and_performance/cnf-provisioning-and-deploying-a-distributed-unit.adoc - -[id="cnf-modifying-and-applying-the-default-profile_{context}"] -= Modifying and applying the default profile - -You can apply the profile manually or with the toolset of your choice, such as ArgoCD. - -[NOTE] -==== -This procedure applies the DU profile step-by-step. If the profile is pulled together into a single project and applied in one step, issues will occur between the MCO and -the SRIOV operators if an Intel NIC is used for networking traffic. To avoid a race condition between the MCO and the SRIOV Operators, it is recommended that the DU application be applied in three steps: - -. Apply the profile without SRIOV. -. Wait for the cluster to settle. -. Apply the SRIOV portion. -==== diff --git a/modules/cnf-monitoring-fast-events-metrics.adoc b/modules/cnf-monitoring-fast-events-metrics.adoc deleted file mode 100644 index 4a801789ce1d..000000000000 --- a/modules/cnf-monitoring-fast-events-metrics.adoc +++ /dev/null @@ -1,57 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/using-ptp.adoc - -:_content-type: PROCEDURE -[id="cnf-monitoring-fast-events-metrics_{context}"] -= Monitoring PTP fast event metrics - -You can monitor PTP fast events metrics from cluster nodes where the `linuxptp-daemon` is running. -You can also monitor PTP fast event metrics in the {product-title} web console by using the pre-configured and self-updating Prometheus monitoring stack. - -.Prerequisites - -* Install the {product-title} CLI `oc`. - -* Log in as a user with `cluster-admin` privileges. - -* Install and configure the PTP Operator on a node with PTP-capable hardware. - -.Procedure - -. Check for exposed PTP metrics on any node where the `linuxptp-daemon` is running. For example, run the following command: -+ -[source,terminal] ----- -$ curl http://:9091/metrics ----- -+ -.Example output ----- -# HELP openshift_ptp_clock_state 0 = FREERUN, 1 = LOCKED, 2 = HOLDOVER -# TYPE openshift_ptp_clock_state gauge -openshift_ptp_clock_state{iface="ens1fx",node="compute-1.example.com",process="ptp4l"} 1 -openshift_ptp_clock_state{iface="ens3fx",node="compute-1.example.com",process="ptp4l"} 1 -openshift_ptp_clock_state{iface="ens5fx",node="compute-1.example.com",process="ptp4l"} 1 -openshift_ptp_clock_state{iface="ens7fx",node="compute-1.example.com",process="ptp4l"} 1 -# HELP openshift_ptp_delay_ns -# TYPE openshift_ptp_delay_ns gauge -openshift_ptp_delay_ns{from="master",iface="ens1fx",node="compute-1.example.com",process="ptp4l"} 842 -openshift_ptp_delay_ns{from="master",iface="ens3fx",node="compute-1.example.com",process="ptp4l"} 480 -openshift_ptp_delay_ns{from="master",iface="ens5fx",node="compute-1.example.com",process="ptp4l"} 584 -openshift_ptp_delay_ns{from="master",iface="ens7fx",node="compute-1.example.com",process="ptp4l"} 482 -openshift_ptp_delay_ns{from="phc",iface="CLOCK_REALTIME",node="compute-1.example.com",process="phc2sys"} 547 -# HELP openshift_ptp_offset_ns -# TYPE openshift_ptp_offset_ns gauge -openshift_ptp_offset_ns{from="master",iface="ens1fx",node="compute-1.example.com",process="ptp4l"} -2 -openshift_ptp_offset_ns{from="master",iface="ens3fx",node="compute-1.example.com",process="ptp4l"} -44 -openshift_ptp_offset_ns{from="master",iface="ens5fx",node="compute-1.example.com",process="ptp4l"} -8 -openshift_ptp_offset_ns{from="master",iface="ens7fx",node="compute-1.example.com",process="ptp4l"} 3 -openshift_ptp_offset_ns{from="phc",iface="CLOCK_REALTIME",node="compute-1.example.com",process="phc2sys"} 12 ----- - -. To view the PTP event in the {product-title} web console, copy the name of the PTP metric you want to query, for example, `openshift_ptp_offset_ns`. - -. In the {product-title} web console, click *Observe* -> *Metrics*. - -. Paste the PTP metric name into the *Expression* field, and click *Run queries*. diff --git a/modules/cnf-performance-profile-creator-arguments.adoc b/modules/cnf-performance-profile-creator-arguments.adoc deleted file mode 100644 index 24abb8322fbc..000000000000 --- a/modules/cnf-performance-profile-creator-arguments.adoc +++ /dev/null @@ -1,125 +0,0 @@ -// Module included in the following assemblies: -// Epic CNF-792 (4.8) -// * scalability_and_performance/cnf-create-performance-profiles.adoc - - -[id="performance-profile-creator-arguments_{context}"] -= Performance Profile Creator arguments - -.Performance Profile Creator arguments -[cols="30%,70%",options="header"] -|=== -| Argument | Description - -| `disable-ht` -a|Disable hyperthreading. - -Possible values: `true` or `false`. - -Default: `false`. - -[WARNING] -==== -If this argument is set to `true` you should not disable hyperthreading in the BIOS. Disabling hyperthreading is accomplished with a kernel command line argument. -==== - -| `info` -a| This captures cluster information and is used in discovery mode only. Discovery mode also requires the `must-gather-dir-path` argument. If any other arguments are set they are ignored. - -Possible values: - -* `log` -* `JSON` - -+ -[NOTE] -==== -These options define the output format with the JSON format being reserved for debugging. -==== - -Default: `log`. - -| `mcp-name` -|MCP name for example `worker-cnf` corresponding to the target machines. This parameter is required. - -| `must-gather-dir-path` -| Must gather directory path. This parameter is required. - -When the user runs the tool with the wrapper script `must-gather` is supplied by the script itself and the user must not specify it. - -| `offlined-cpu-count` -a| Number of offlined CPUs. - -[NOTE] -==== -This must be a natural number greater than 0. If not enough logical processors are offlined then error messages are logged. The messages are: -[source,terminal] ----- -Error: failed to compute the reserved and isolated CPUs: please ensure that reserved-cpu-count plus offlined-cpu-count should be in the range [0,1] ----- -[source,terminal] ----- -Error: failed to compute the reserved and isolated CPUs: please specify the offlined CPU count in the range [0,1] ----- -==== - -| `power-consumption-mode` -a|The power consumption mode. - -Possible values: - -* `default`: CPU partitioning with enabled power management and basic low-latency. -* `low-latency`: Enhanced measures to improve latency figures. -* `ultra-low-latency`: Priority given to optimal latency, at the expense of power management. - -Default: `default`. - -| `per-pod-power-management` -a|Enable per-pod power management. You cannot use this argument if you configured `ultra-low-latency` as the power consumption mode. - -Possible values: `true` or `false`. - -Default: `false`. - -| `profile-name` -| Name of the performance profile to create. -Default: `performance`. - -| `reserved-cpu-count` -a| Number of reserved CPUs. This parameter is required. - -[NOTE] -==== -This must be a natural number. A value of 0 is not allowed. -==== - -| `rt-kernel` -| Enable real-time kernel. This parameter is required. - -Possible values: `true` or `false`. - -| `split-reserved-cpus-across-numa` -| Split the reserved CPUs across NUMA nodes. - -Possible values: `true` or `false`. - -Default: `false`. - -| `topology-manager-policy` -a| Kubelet Topology Manager policy of the performance profile to be created. - -Possible values: - -* `single-numa-node` -* `best-effort` -* `restricted` - -Default: `restricted`. - -| `user-level-networking` -| Run with user level networking (DPDK) enabled. - -Possible values: `true` or `false`. - -Default: `false`. -|=== \ No newline at end of file diff --git a/modules/cnf-performing-end-to-end-tests-disconnected-mode.adoc b/modules/cnf-performing-end-to-end-tests-disconnected-mode.adoc deleted file mode 100644 index 71c2b0eaac31..000000000000 --- a/modules/cnf-performing-end-to-end-tests-disconnected-mode.adoc +++ /dev/null @@ -1,178 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/cnf-performing-platform-verification-latency-tests.adoc - -:_content-type: PROCEDURE -[id="cnf-performing-end-to-end-tests-disconnected-mode_{context}"] -= Running latency tests in a disconnected cluster - -The CNF tests image can run tests in a disconnected cluster that is not able to reach external registries. This requires two steps: - -. Mirroring the `cnf-tests` image to the custom disconnected registry. - -. Instructing the tests to consume the images from the custom disconnected registry. - -[discrete] -[id="cnf-performing-end-to-end-tests-mirroring-images-to-custom-registry_{context}"] -== Mirroring the images to a custom registry accessible from the cluster - -A `mirror` executable is shipped in the image to provide the input required by `oc` to mirror the test image to a local registry. - -. Run this command from an intermediate machine that has access to the cluster and link:https://catalog.redhat.com/software/containers/explore[registry.redhat.io]: -+ -[source,terminal,subs="attributes+"] ----- -$ podman run -v $(pwd)/:/kubeconfig:Z -e KUBECONFIG=/kubeconfig/kubeconfig \ -registry.redhat.io/openshift4/cnf-tests-rhel8:v{product-version} \ -/usr/bin/mirror -registry | oc image mirror -f - ----- -+ -where: -+ --- - :: Is the disconnected mirror registry you have configured, for example, `my.local.registry:5000/`. --- - -. When you have mirrored the `cnf-tests` image into the disconnected registry, you must override the original registry used to fetch the images when running the tests, for example: -+ -[source,terminal,subs="attributes+"] ----- -$ podman run -v $(pwd)/:/kubeconfig:Z -e KUBECONFIG=/kubeconfig/kubeconfig \ --e DISCOVERY_MODE=true -e FEATURES=performance -e IMAGE_REGISTRY="" \ --e CNF_TESTS_IMAGE="cnf-tests-rhel8:v{product-version}" \ -/usr/bin/test-run.sh -ginkgo.focus="\[performance\]\ Latency\ Test" ----- - -[discrete] -[id="cnf-performing-end-to-end-tests-image-parameters_{context}"] -== Configuring the tests to consume images from a custom registry - -You can run the latency tests using a custom test image and image registry using `CNF_TESTS_IMAGE` and `IMAGE_REGISTRY` variables. - -* To configure the latency tests to use a custom test image and image registry, run the following command: -+ -[source,terminal,subs="attributes+"] ----- -$ podman run -v $(pwd)/:/kubeconfig:Z -e KUBECONFIG=/kubeconfig/kubeconfig \ --e IMAGE_REGISTRY="" \ --e CNF_TESTS_IMAGE="" \ --e FEATURES=performance \ -registry.redhat.io/openshift4/cnf-tests-rhel8:v{product-version} /usr/bin/test-run.sh ----- -+ -where: -+ --- - :: is the custom image registry, for example, `custom.registry:5000/`. - :: is the custom cnf-tests image, for example, `custom-cnf-tests-image:latest`. --- - -[discrete] -[id="cnf-performing-end-to-end-tests-mirroring-to-cluster-internal-registry_{context}"] -== Mirroring images to the cluster {product-registry} - -{product-title} provides a built-in container image registry, which runs as a standard workload on the cluster. - -.Procedure - -. Gain external access to the registry by exposing it with a route: -+ -[source,terminal] ----- -$ oc patch configs.imageregistry.operator.openshift.io/cluster --patch '{"spec":{"defaultRoute":true}}' --type=merge ----- - -. Fetch the registry endpoint by running the following command: -+ -[source,terminal] ----- -$ REGISTRY=$(oc get route default-route -n openshift-image-registry --template='{{ .spec.host }}') ----- - -. Create a namespace for exposing the images: -+ -[source,terminal] ----- -$ oc create ns cnftests ----- - -. Make the image stream available to all the namespaces used for tests. This is required to allow the tests namespaces to fetch the images from the `cnf-tests` image stream. Run the following commands: -+ -[source,terminal] ----- -$ oc policy add-role-to-user system:image-puller system:serviceaccount:cnf-features-testing:default --namespace=cnftests ----- -+ -[source,terminal] ----- -$ oc policy add-role-to-user system:image-puller system:serviceaccount:performance-addon-operators-testing:default --namespace=cnftests ----- - -. Retrieve the docker secret name and auth token by running the following commands: -+ -[source,terminal] ----- -$ SECRET=$(oc -n cnftests get secret | grep builder-docker | awk {'print $1'} ----- -+ -[source,terminal] ----- -$ TOKEN=$(oc -n cnftests get secret $SECRET -o jsonpath="{.data['\.dockercfg']}" | base64 --decode | jq '.["image-registry.openshift-image-registry.svc:5000"].auth') ----- - -. Create a `dockerauth.json` file, for example: -+ -[source,bash] ----- -$ echo "{\"auths\": { \"$REGISTRY\": { \"auth\": $TOKEN } }}" > dockerauth.json ----- - -. Do the image mirroring: -+ -[source,terminal,subs="attributes+"] ----- -$ podman run -v $(pwd)/:/kubeconfig:Z -e KUBECONFIG=/kubeconfig/kubeconfig \ -registry.redhat.io/openshift4/cnf-tests-rhel8:{product-version} \ -/usr/bin/mirror -registry $REGISTRY/cnftests | oc image mirror --insecure=true \ --a=$(pwd)/dockerauth.json -f - ----- - -. Run the tests: -+ -[source,terminal,subs="attributes+"] ----- -$ podman run -v $(pwd)/:/kubeconfig:Z -e KUBECONFIG=/kubeconfig/kubeconfig \ --e DISCOVERY_MODE=true -e FEATURES=performance -e IMAGE_REGISTRY=image-registry.openshift-image-registry.svc:5000/cnftests \ -cnf-tests-local:latest /usr/bin/test-run.sh -ginkgo.focus="\[performance\]\ Latency\ Test" ----- - -[discrete] -[id="mirroring-different-set-of-images_{context}"] -== Mirroring a different set of test images - -You can optionally change the default upstream images that are mirrored for the latency tests. - -.Procedure - -. The `mirror` command tries to mirror the upstream images by default. This can be overridden by passing a file with the following format to the image: -+ - -[source,yaml,subs="attributes+"] ----- -[ - { - "registry": "public.registry.io:5000", - "image": "imageforcnftests:{product-version}" - } -] ----- - -. Pass the file to the `mirror` command, for example saving it locally as `images.json`. With the following command, the local path is mounted in `/kubeconfig` inside the container and that can be passed to the mirror command. -+ -[source,terminal,subs="attributes+"] ----- -$ podman run -v $(pwd)/:/kubeconfig:Z -e KUBECONFIG=/kubeconfig/kubeconfig \ -registry.redhat.io/openshift4/cnf-tests-rhel8:v{product-version} /usr/bin/mirror \ ---registry "my.local.registry:5000/" --images "/kubeconfig/images.json" \ -| oc image mirror -f - ----- diff --git a/modules/cnf-performing-end-to-end-tests-junit-test-output.adoc b/modules/cnf-performing-end-to-end-tests-junit-test-output.adoc deleted file mode 100644 index be2ad8f1b5e1..000000000000 --- a/modules/cnf-performing-end-to-end-tests-junit-test-output.adoc +++ /dev/null @@ -1,34 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/cnf-performing-platform-verification-latency-tests.adoc - -:_content-type: PROCEDURE -[id="cnf-performing-end-to-end-tests-junit-test-output_{context}"] -= Generating a JUnit latency test report - -Use the following procedures to generate a JUnit latency test output and test failure report. - -.Prerequisites - -* You have installed the OpenShift CLI (`oc`). - -* You have logged in as a user with `cluster-admin` privileges. - -.Procedure - -* Create a JUnit-compliant XML report by passing the `--junit` parameter together with the path to where the report is dumped: -+ -[source,terminal,subs="attributes+"] ----- -$ podman run -v $(pwd)/:/kubeconfig:Z -v $(pwd)/junitdest: \ --e KUBECONFIG=/kubeconfig/kubeconfig -e DISCOVERY_MODE=true -e FEATURES=performance \ -registry.redhat.io/openshift4/cnf-tests-rhel8:v{product-version} \ -/usr/bin/test-run.sh --junit \ --ginkgo.focus="\[performance\]\ Latency\ Test" ----- -+ -where: -+ --- - :: Is the path to the folder where the junit report is generated --- diff --git a/modules/cnf-performing-end-to-end-tests-running-cyclictest.adoc b/modules/cnf-performing-end-to-end-tests-running-cyclictest.adoc deleted file mode 100644 index 608533e13786..000000000000 --- a/modules/cnf-performing-end-to-end-tests-running-cyclictest.adoc +++ /dev/null @@ -1,144 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/cnf-performing-platform-verification-latency-tests.adoc - -:_content-type: PROCEDURE -[id="cnf-performing-end-to-end-tests-running-cyclictest_{context}"] -= Running cyclictest - -The `cyclictest` tool measures the real-time kernel scheduler latency on the specified CPUs. - -[IMPORTANT] -==== -**Always** run the latency tests with `DISCOVERY_MODE=true` set. If you don't, the test suite will make changes to the running cluster configuration. -==== - -[NOTE] -==== -When executing `podman` commands as a non-root or non-privileged user, mounting paths can fail with `permission denied` errors. To make the `podman` command work, append `:Z` to the volumes creation; for example, `-v $(pwd)/:/kubeconfig:Z`. This allows `podman` to do the proper SELinux relabeling. -==== - -.Prerequisites - -* You have logged in to `registry.redhat.io` with your Customer Portal credentials. - -* You have installed the real-time kernel in the cluster. - -* You have applied a cluster performance profile by using Node Tuning Operator. - -.Procedure - -* To perform the `cyclictest`, run the following command, substituting variable values as appropriate: -+ -[source,terminal,subs="attributes+"] ----- -$ podman run -v $(pwd)/:/kubeconfig:Z -e KUBECONFIG=/kubeconfig/kubeconfig \ --e LATENCY_TEST_RUN=true -e DISCOVERY_MODE=true -e FEATURES=performance -e ROLE_WORKER_CNF=worker-cnf \ --e LATENCY_TEST_CPUS=10 -e LATENCY_TEST_RUNTIME=600 -e MAXIMUM_LATENCY=20 \ -registry.redhat.io/openshift4/cnf-tests-rhel8:v{product-version} \ -/usr/bin/test-run.sh -ginkgo.v -ginkgo.focus="cyclictest" ----- -+ -The command runs the `cyclictest` tool for 10 minutes (600 seconds). The test runs successfully when the maximum observed latency is lower than `MAXIMUM_LATENCY` (in this example, 20 μs). Latency spikes of 20 μs and above are generally not acceptable for telco RAN workloads. -+ -If the results exceed the latency threshold, the test fails. -+ -[IMPORTANT] -==== -For valid results, the test should run for at least 12 hours. -==== -+ -.Example failure output -[source,terminal,subs="attributes+"] ----- -running /usr/bin/cnftests -ginkgo.v -ginkgo.focus=cyclictest -I0908 13:01:59.193776 27 request.go:601] Waited for 1.046228824s due to client-side throttling, not priority and fairness, request: GET:https://api.compute-1.example.com:6443/apis/packages.operators.coreos.com/v1?timeout=32s -Running Suite: CNF Features e2e integration tests -================================================= -Random Seed: 1662642118 -Will run 1 of 194 specs - -[...] - -Summarizing 1 Failure: - -[Fail] [performance] Latency Test with the cyclictest image [It] should succeed -/remote-source/app/vendor/github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/4_latency/latency.go:220 - -Ran 1 of 194 Specs in 161.151 seconds -FAIL! -- 0 Passed | 1 Failed | 0 Pending | 193 Skipped ---- FAIL: TestTest (161.48s) -FAIL ----- - -[discrete] -[id="cnf-performing-end-to-end-tests-example-results-cyclictest_{context}"] -== Example cyclictest results - -The same output can indicate different results for different workloads. For example, spikes up to 18μs are acceptable for 4G DU workloads, but not for 5G DU workloads. - -.Example of good results -[source, terminal] ----- -running cmd: cyclictest -q -D 10m -p 1 -t 16 -a 2,4,6,8,10,12,14,16,54,56,58,60,62,64,66,68 -h 30 -i 1000 -m -# Histogram -000000 000000 000000 000000 000000 000000 000000 000000 000000 000000 000000 000000 000000 000000 000000 000000 000000 -000001 000000 000000 000000 000000 000000 000000 000000 000000 000000 000000 000000 000000 000000 000000 000000 000000 -000002 579506 535967 418614 573648 532870 529897 489306 558076 582350 585188 583793 223781 532480 569130 472250 576043 -More histogram entries ... -# Total: 000600000 000600000 000600000 000599999 000599999 000599999 000599998 000599998 000599998 000599997 000599997 000599996 000599996 000599995 000599995 000599995 -# Min Latencies: 00002 00002 00002 00002 00002 00002 00002 00002 00002 00002 00002 00002 00002 00002 00002 00002 -# Avg Latencies: 00002 00002 00002 00002 00002 00002 00002 00002 00002 00002 00002 00002 00002 00002 00002 00002 -# Max Latencies: 00005 00005 00004 00005 00004 00004 00005 00005 00006 00005 00004 00005 00004 00004 00005 00004 -# Histogram Overflows: 00000 00000 00000 00000 00000 00000 00000 00000 00000 00000 00000 00000 00000 00000 00000 00000 -# Histogram Overflow at cycle number: -# Thread 0: -# Thread 1: -# Thread 2: -# Thread 3: -# Thread 4: -# Thread 5: -# Thread 6: -# Thread 7: -# Thread 8: -# Thread 9: -# Thread 10: -# Thread 11: -# Thread 12: -# Thread 13: -# Thread 14: -# Thread 15: ----- - -.Example of bad results -[source, terminal] ----- -running cmd: cyclictest -q -D 10m -p 1 -t 16 -a 2,4,6,8,10,12,14,16,54,56,58,60,62,64,66,68 -h 30 -i 1000 -m -# Histogram -000000 000000 000000 000000 000000 000000 000000 000000 000000 000000 000000 000000 000000 000000 000000 000000 000000 -000001 000000 000000 000000 000000 000000 000000 000000 000000 000000 000000 000000 000000 000000 000000 000000 000000 -000002 564632 579686 354911 563036 492543 521983 515884 378266 592621 463547 482764 591976 590409 588145 589556 353518 -More histogram entries ... -# Total: 000599999 000599999 000599999 000599997 000599997 000599998 000599998 000599997 000599997 000599996 000599995 000599996 000599995 000599995 000599995 000599993 -# Min Latencies: 00002 00002 00002 00002 00002 00002 00002 00002 00002 00002 00002 00002 00002 00002 00002 00002 -# Avg Latencies: 00002 00002 00002 00002 00002 00002 00002 00002 00002 00002 00002 00002 00002 00002 00002 00002 -# Max Latencies: 00493 00387 00271 00619 00541 00513 00009 00389 00252 00215 00539 00498 00363 00204 00068 00520 -# Histogram Overflows: 00001 00001 00001 00002 00002 00001 00000 00001 00001 00001 00002 00001 00001 00001 00001 00002 -# Histogram Overflow at cycle number: -# Thread 0: 155922 -# Thread 1: 110064 -# Thread 2: 110064 -# Thread 3: 110063 155921 -# Thread 4: 110063 155921 -# Thread 5: 155920 -# Thread 6: -# Thread 7: 110062 -# Thread 8: 110062 -# Thread 9: 155919 -# Thread 10: 110061 155919 -# Thread 11: 155918 -# Thread 12: 155918 -# Thread 13: 110060 -# Thread 14: 110060 -# Thread 15: 110059 155917 ----- diff --git a/modules/cnf-performing-end-to-end-tests-running-hwlatdetect.adoc b/modules/cnf-performing-end-to-end-tests-running-hwlatdetect.adoc deleted file mode 100644 index 110a0d1f281f..000000000000 --- a/modules/cnf-performing-end-to-end-tests-running-hwlatdetect.adoc +++ /dev/null @@ -1,178 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/cnf-performing-platform-verification-latency-tests.adoc - -:_content-type: CONCEPT -[id="cnf-performing-end-to-end-tests-running-hwlatdetect_{context}"] -= Running hwlatdetect - -The `hwlatdetect` tool is available in the `rt-kernel` package with a regular subscription of {op-system-base-full} {op-system-version}. - -[IMPORTANT] -==== -**Always** run the latency tests with `DISCOVERY_MODE=true` set. If you don't, the test suite will make changes to the running cluster configuration. -==== - -[NOTE] -==== -When executing `podman` commands as a non-root or non-privileged user, mounting paths can fail with `permission denied` errors. To make the `podman` command work, append `:Z` to the volumes creation; for example, `-v $(pwd)/:/kubeconfig:Z`. This allows `podman` to do the proper SELinux relabeling. -==== - -.Prerequisites - -* You have installed the real-time kernel in the cluster. - -* You have logged in to `registry.redhat.io` with your Customer Portal credentials. - -.Procedure - -* To run the `hwlatdetect` tests, run the following command, substituting variable values as appropriate: -+ -[source,terminal,subs="attributes+"] ----- -$ podman run -v $(pwd)/:/kubeconfig:Z -e KUBECONFIG=/kubeconfig/kubeconfig \ --e LATENCY_TEST_RUN=true -e DISCOVERY_MODE=true -e FEATURES=performance -e ROLE_WORKER_CNF=worker-cnf \ --e LATENCY_TEST_RUNTIME=600 -e MAXIMUM_LATENCY=20 \ -registry.redhat.io/openshift4/cnf-tests-rhel8:v{product-version} \ -/usr/bin/test-run.sh -ginkgo.v -ginkgo.focus="hwlatdetect" ----- -+ -The `hwlatdetect` test runs for 10 minutes (600 seconds). The test runs successfully when the maximum observed latency is lower than `MAXIMUM_LATENCY` (20 μs). -+ -If the results exceed the latency threshold, the test fails. -+ -[IMPORTANT] -==== -For valid results, the test should run for at least 12 hours. -==== -+ -.Example failure output -[source,terminal] ----- -running /usr/bin/cnftests -ginkgo.v -ginkgo.focus=hwlatdetect -I0908 15:25:20.023712 27 request.go:601] Waited for 1.046586367s due to client-side throttling, not priority and fairness, request: GET:https://api.hlxcl6.lab.eng.tlv2.redhat.com:6443/apis/imageregistry.operator.openshift.io/v1?timeout=32s -Running Suite: CNF Features e2e integration tests -================================================= -Random Seed: 1662650718 -Will run 1 of 194 specs - -[...] - -• Failure [283.574 seconds] -[performance] Latency Test -/remote-source/app/vendor/github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/4_latency/latency.go:62 - with the hwlatdetect image - /remote-source/app/vendor/github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/4_latency/latency.go:228 - should succeed [It] - /remote-source/app/vendor/github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/4_latency/latency.go:236 - - Log file created at: 2022/09/08 15:25:27 - Running on machine: hwlatdetect-b6n4n - Binary: Built with gc go1.17.12 for linux/amd64 - Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg - I0908 15:25:27.160620 1 node.go:39] Environment information: /proc/cmdline: BOOT_IMAGE=(hd1,gpt3)/ostree/rhcos-c6491e1eedf6c1f12ef7b95e14ee720bf48359750ac900b7863c625769ef5fb9/vmlinuz-4.18.0-372.19.1.el8_6.x86_64 random.trust_cpu=on console=tty0 console=ttyS0,115200n8 ignition.platform.id=metal ostree=/ostree/boot.1/rhcos/c6491e1eedf6c1f12ef7b95e14ee720bf48359750ac900b7863c625769ef5fb9/0 ip=dhcp root=UUID=5f80c283-f6e6-4a27-9b47-a287157483b2 rw rootflags=prjquota boot=UUID=773bf59a-bafd-48fc-9a87-f62252d739d3 skew_tick=1 nohz=on rcu_nocbs=0-3 tuned.non_isolcpus=0000ffff,ffffffff,fffffff0 systemd.cpu_affinity=4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79 intel_iommu=on iommu=pt isolcpus=managed_irq,0-3 nohz_full=0-3 tsc=nowatchdog nosoftlockup nmi_watchdog=0 mce=off skew_tick=1 rcutree.kthread_prio=11 + + - I0908 15:25:27.160830 1 node.go:46] Environment information: kernel version 4.18.0-372.19.1.el8_6.x86_64 - I0908 15:25:27.160857 1 main.go:50] running the hwlatdetect command with arguments [/usr/bin/hwlatdetect --threshold 1 --hardlimit 1 --duration 100 --window 10000000us --width 950000us] - F0908 15:27:10.603523 1 main.go:53] failed to run hwlatdetect command; out: hwlatdetect: test duration 100 seconds - detector: tracer - parameters: - Latency threshold: 1us <1> - Sample window: 10000000us - Sample width: 950000us - Non-sampling period: 9050000us - Output File: None - - Starting test - test finished - Max Latency: 326us <2> - Samples recorded: 5 - Samples exceeding threshold: 5 - ts: 1662650739.017274507, inner:6, outer:6 - ts: 1662650749.257272414, inner:14, outer:326 - ts: 1662650779.977272835, inner:314, outer:12 - ts: 1662650800.457272384, inner:3, outer:9 - ts: 1662650810.697273520, inner:3, outer:2 - -[...] - -JUnit report was created: /junit.xml/cnftests-junit.xml - - -Summarizing 1 Failure: - -[Fail] [performance] Latency Test with the hwlatdetect image [It] should succeed -/remote-source/app/vendor/github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/4_latency/latency.go:476 - -Ran 1 of 194 Specs in 365.797 seconds -FAIL! -- 0 Passed | 1 Failed | 0 Pending | 193 Skipped ---- FAIL: TestTest (366.08s) -FAIL ----- -<1> You can configure the latency threshold by using the `MAXIMUM_LATENCY` or the `HWLATDETECT_MAXIMUM_LATENCY` environment variables. -<2> The maximum latency value measured during the test. - -[discrete] -[id="cnf-performing-end-to-end-tests-example-results-hwlatdetect_{context}"] -== Example hwlatdetect test results - -You can capture the following types of results: - -* Rough results that are gathered after each run to create a history of impact on any changes made throughout the test. - -* The combined set of the rough tests with the best results and configuration settings. - -.Example of good results -[source, terminal] ----- -hwlatdetect: test duration 3600 seconds -detector: tracer -parameters: -Latency threshold: 10us -Sample window: 1000000us -Sample width: 950000us -Non-sampling period: 50000us -Output File: None - -Starting test -test finished -Max Latency: Below threshold -Samples recorded: 0 ----- - -The `hwlatdetect` tool only provides output if the sample exceeds the specified threshold. - -.Example of bad results -[source, terminal] ----- -hwlatdetect: test duration 3600 seconds -detector: tracer -parameters:Latency threshold: 10usSample window: 1000000us -Sample width: 950000usNon-sampling period: 50000usOutput File: None - -Starting tests:1610542421.275784439, inner:78, outer:81 -ts: 1610542444.330561619, inner:27, outer:28 -ts: 1610542445.332549975, inner:39, outer:38 -ts: 1610542541.568546097, inner:47, outer:32 -ts: 1610542590.681548531, inner:13, outer:17 -ts: 1610543033.818801482, inner:29, outer:30 -ts: 1610543080.938801990, inner:90, outer:76 -ts: 1610543129.065549639, inner:28, outer:39 -ts: 1610543474.859552115, inner:28, outer:35 -ts: 1610543523.973856571, inner:52, outer:49 -ts: 1610543572.089799738, inner:27, outer:30 -ts: 1610543573.091550771, inner:34, outer:28 -ts: 1610543574.093555202, inner:116, outer:63 ----- - -The output of `hwlatdetect` shows that multiple samples exceed the threshold. However, the same output can indicate different results based on the following factors: - -* The duration of the test -* The number of CPU cores -* The host firmware settings - -[WARNING] -==== -Before proceeding with the next latency test, ensure that the latency reported by `hwlatdetect` meets the required threshold. Fixing latencies introduced by hardware might require you to contact the system vendor support. - -Not all latency spikes are hardware related. Ensure that you tune the host firmware to meet your workload requirements. For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux_for_real_time/9/html-single/optimizing_rhel_9_for_real_time_for_low_latency_operation/index#setting-bios-parameters-for-system-tuning_optimizing-RHEL9-for-real-time-for-low-latency-operation[Setting firmware parameters for system tuning]. -==== diff --git a/modules/cnf-performing-end-to-end-tests-running-in-single-node-cluster.adoc b/modules/cnf-performing-end-to-end-tests-running-in-single-node-cluster.adoc deleted file mode 100644 index 06abc75cbe5b..000000000000 --- a/modules/cnf-performing-end-to-end-tests-running-in-single-node-cluster.adoc +++ /dev/null @@ -1,44 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/cnf-performing-platform-verification-latency-tests.adoc - -:_content-type: PROCEDURE -[id="cnf-performing-end-to-end-tests-running-in-single-node-cluster_{context}"] -= Running latency tests on a {sno} cluster - -You can run latency tests on {sno} clusters. - -[IMPORTANT] -==== -**Always** run the latency tests with `DISCOVERY_MODE=true` set. If you don't, the test suite will make changes to the running cluster configuration. -==== - -[NOTE] -==== -When executing `podman` commands as a non-root or non-privileged user, mounting paths can fail with `permission denied` errors. To make the `podman` command work, append `:Z` to the volumes creation; for example, `-v $(pwd)/:/kubeconfig:Z`. This allows `podman` to do the proper SELinux relabeling. -==== - -.Prerequisites - -* You have installed the OpenShift CLI (`oc`). - -* You have logged in as a user with `cluster-admin` privileges. - -.Procedure - -* To run the latency tests on a {sno} cluster, run the following command: -+ -[source,terminal,subs="attributes+"] ----- -$ podman run -v $(pwd)/:/kubeconfig:Z -e KUBECONFIG=/kubeconfig/kubeconfig \ --e DISCOVERY_MODE=true -e FEATURES=performance -e ROLE_WORKER_CNF=master \ -registry.redhat.io/openshift4/cnf-tests-rhel8:v{product-version} \ -/usr/bin/test-run.sh -ginkgo.focus="\[performance\]\ Latency\ Test" ----- -+ -[NOTE] -==== -`ROLE_WORKER_CNF=master` is required because master is the only machine pool to which the node belongs. For more information about setting the required `MachineConfigPool` for the latency tests, see "Prerequisites for running latency tests". -==== -+ -After running the test suite, all the dangling resources are cleaned up. diff --git a/modules/cnf-performing-end-to-end-tests-running-oslat.adoc b/modules/cnf-performing-end-to-end-tests-running-oslat.adoc deleted file mode 100644 index a54d72cd5361..000000000000 --- a/modules/cnf-performing-end-to-end-tests-running-oslat.adoc +++ /dev/null @@ -1,84 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/cnf-performing-platform-verification-latency-tests.adoc - -:_content-type: PROCEDURE -[id="cnf-performing-end-to-end-tests-running-oslat_{context}"] -= Running oslat - -The `oslat` test simulates a CPU-intensive DPDK application and measures all the interruptions and disruptions to test how the cluster handles CPU heavy data processing. - -[IMPORTANT] -==== -**Always** run the latency tests with `DISCOVERY_MODE=true` set. If you don't, the test suite will make changes to the running cluster configuration. -==== - -[NOTE] -==== -When executing `podman` commands as a non-root or non-privileged user, mounting paths can fail with `permission denied` errors. To make the `podman` command work, append `:Z` to the volumes creation; for example, `-v $(pwd)/:/kubeconfig:Z`. This allows `podman` to do the proper SELinux relabeling. -==== - -.Prerequisites - -* You have logged in to `registry.redhat.io` with your Customer Portal credentials. -* You have applied a cluster performance profile by using the Node Tuning Operator. - -.Procedure - -* To perform the `oslat` test, run the following command, substituting variable values as appropriate: -+ -[source,terminal,subs="attributes+"] ----- -$ podman run -v $(pwd)/:/kubeconfig:Z -e KUBECONFIG=/kubeconfig/kubeconfig \ --e LATENCY_TEST_RUN=true -e DISCOVERY_MODE=true -e FEATURES=performance -e ROLE_WORKER_CNF=worker-cnf \ --e LATENCY_TEST_CPUS=10 -e LATENCY_TEST_RUNTIME=600 -e MAXIMUM_LATENCY=20 \ -registry.redhat.io/openshift4/cnf-tests-rhel8:v{product-version} \ -/usr/bin/test-run.sh -ginkgo.v -ginkgo.focus="oslat" ----- -+ -`LATENCY_TEST_CPUS` specifies the list of CPUs to test with the `oslat` command. -+ -The command runs the `oslat` tool for 10 minutes (600 seconds). The test runs successfully when the maximum observed latency is lower than `MAXIMUM_LATENCY` (20 μs). -+ -If the results exceed the latency threshold, the test fails. -+ -[IMPORTANT] -==== -For valid results, the test should run for at least 12 hours. -==== -+ -.Example failure output -[source,terminal,subs="attributes+"] ----- -running /usr/bin/cnftests -ginkgo.v -ginkgo.focus=oslat -I0908 12:51:55.999393 27 request.go:601] Waited for 1.044848101s due to client-side throttling, not priority and fairness, request: GET:https://compute-1.example.com:6443/apis/machineconfiguration.openshift.io/v1?timeout=32s -Running Suite: CNF Features e2e integration tests -================================================= -Random Seed: 1662641514 -Will run 1 of 194 specs - -[...] - -• Failure [77.833 seconds] -[performance] Latency Test -/remote-source/app/vendor/github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/4_latency/latency.go:62 - with the oslat image - /remote-source/app/vendor/github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/4_latency/latency.go:128 - should succeed [It] - /remote-source/app/vendor/github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/4_latency/latency.go:153 - - The current latency 304 is bigger than the expected one 1 : <1> - -[...] - -Summarizing 1 Failure: - -[Fail] [performance] Latency Test with the oslat image [It] should succeed -/remote-source/app/vendor/github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/4_latency/latency.go:177 - -Ran 1 of 194 Specs in 161.091 seconds -FAIL! -- 0 Passed | 1 Failed | 0 Pending | 193 Skipped ---- FAIL: TestTest (161.42s) -FAIL ----- -<1> In this example, the measured latency is outside the maximum allowed value. diff --git a/modules/cnf-performing-end-to-end-tests-running-the-tests.adoc b/modules/cnf-performing-end-to-end-tests-running-the-tests.adoc deleted file mode 100644 index 018bdcde713a..000000000000 --- a/modules/cnf-performing-end-to-end-tests-running-the-tests.adoc +++ /dev/null @@ -1,59 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/cnf-performing-platform-verification-latency-tests.adoc - -:_content-type: PROCEDURE -[id="cnf-performing-end-to-end-tests-running-the-tests_{context}"] -= Running the latency tests - -Run the cluster latency tests to validate node tuning for your Cloud-native Network Functions (CNF) workload. - -[IMPORTANT] -==== -**Always** run the latency tests with `DISCOVERY_MODE=true` set. If you don't, the test suite will make changes to the running cluster configuration. -==== - -[NOTE] -==== -When executing `podman` commands as a non-root or non-privileged user, mounting paths can fail with `permission denied` errors. To make the `podman` command work, append `:Z` to the volumes creation; for example, `-v $(pwd)/:/kubeconfig:Z`. This allows `podman` to do the proper SELinux relabeling. -==== - -.Procedure - -. Open a shell prompt in the directory containing the `kubeconfig` file. -+ -You provide the test image with a `kubeconfig` file in current directory and its related `$KUBECONFIG` environment variable, mounted through a volume. This allows the running container to use the `kubeconfig` file from inside the container. - -. Run the latency tests by entering the following command: -+ -[source,terminal,subs="attributes+"] ----- -$ podman run -v $(pwd)/:/kubeconfig:Z -e KUBECONFIG=/kubeconfig/kubeconfig \ --e LATENCY_TEST_RUN=true -e DISCOVERY_MODE=true -e FEATURES=performance registry.redhat.io/openshift4/cnf-tests-rhel8:v{product-version} \ -/usr/bin/test-run.sh -ginkgo.focus="\[performance\]\ Latency\ Test" ----- - -. Optional: Append `-ginkgo.dryRun` to run the latency tests in dry-run mode. This is useful for checking what the tests run. - -. Optional: Append `-ginkgo.v` to run the tests with increased verbosity. - -. Optional: To run the latency tests against a specific performance profile, run the following command, substituting appropriate values: -+ -[source,terminal,subs="attributes+"] ----- -$ podman run -v $(pwd)/:/kubeconfig:Z -e KUBECONFIG=/kubeconfig/kubeconfig \ --e LATENCY_TEST_RUN=true -e FEATURES=performance -e LATENCY_TEST_RUNTIME=600 -e MAXIMUM_LATENCY=20 \ --e PERF_TEST_PROFILE= registry.redhat.io/openshift4/cnf-tests-rhel8:v{product-version} \ -/usr/bin/test-run.sh -ginkgo.focus="[performance]\ Latency\ Test" ----- -+ -where: -+ --- - :: Is the name of the performance profile you want to run the latency tests against. --- -+ -[IMPORTANT] -==== -For valid latency test results, run the tests for at least 12 hours. -==== diff --git a/modules/cnf-performing-end-to-end-tests-test-failure-report.adoc b/modules/cnf-performing-end-to-end-tests-test-failure-report.adoc deleted file mode 100644 index f51213919621..000000000000 --- a/modules/cnf-performing-end-to-end-tests-test-failure-report.adoc +++ /dev/null @@ -1,34 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/cnf-performing-platform-verification-latency-tests.adoc - -:_content-type: PROCEDURE -[id="cnf-performing-end-to-end-tests-test-failure-report_{context}"] -= Generating a latency test failure report - -Use the following procedures to generate a JUnit latency test output and test failure report. - -.Prerequisites - -* You have installed the OpenShift CLI (`oc`). - -* You have logged in as a user with `cluster-admin` privileges. - -.Procedure - -* Create a test failure report with information about the cluster state and resources for troubleshooting by passing the `--report` parameter with the path to where the report is dumped: -+ -[source,terminal,subs="attributes+"] ----- -$ podman run -v $(pwd)/:/kubeconfig:Z -v $(pwd)/reportdest: \ --e KUBECONFIG=/kubeconfig/kubeconfig -e DISCOVERY_MODE=true -e FEATURES=performance \ -registry.redhat.io/openshift4/cnf-tests-rhel8:v{product-version} \ -/usr/bin/test-run.sh --report \ --ginkgo.focus="\[performance\]\ Latency\ Test" ----- -+ -where: -+ --- - :: Is the path to the folder where the report is generated. --- diff --git a/modules/cnf-performing-end-to-end-tests-troubleshooting.adoc b/modules/cnf-performing-end-to-end-tests-troubleshooting.adoc deleted file mode 100644 index 6d74e4644d6b..000000000000 --- a/modules/cnf-performing-end-to-end-tests-troubleshooting.adoc +++ /dev/null @@ -1,28 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/cnf-performing-platform-verification-latency-tests.adoc - -:_content-type: PROCEDURE -[id="cnf-performing-end-to-end-tests-troubleshooting_{context}"] -= Troubleshooting errors with the cnf-tests container - -To run latency tests, the cluster must be accessible from within the `cnf-tests` container. - -.Prerequisites - -* You have installed the OpenShift CLI (`oc`). - -* You have logged in as a user with `cluster-admin` privileges. - -.Procedure - -* Verify that the cluster is accessible from inside the `cnf-tests` container by running the following command: -+ -[source,terminal,subs="attributes+"] ----- -$ podman run -v $(pwd)/:/kubeconfig:Z -e KUBECONFIG=/kubeconfig/kubeconfig \ -registry.redhat.io/openshift4/cnf-tests-rhel8:v{product-version} \ -oc get nodes ----- -+ -If this command does not work, an error related to spanning across DNS, MTU size, or firewall access might be occurring. diff --git a/modules/cnf-provisioning-deploying-a-distributed-unit-manually.adoc b/modules/cnf-provisioning-deploying-a-distributed-unit-manually.adoc deleted file mode 100644 index d5f5e5493e4f..000000000000 --- a/modules/cnf-provisioning-deploying-a-distributed-unit-manually.adoc +++ /dev/null @@ -1,112 +0,0 @@ -// CNF-950 4.7 Provisioning and deploying a Distributed Unit (DU) manually -// Module included in the following assemblies: -// -// *scalability_and_performance/cnf-provisioning-and-deploying-a-distributed-unit.adoc - -[id="cnf-provisioning-deploying-a-distributed-unit-manually_{context}"] -= Provisioning and deploying a distributed unit (DU) manually - -Radio access network (RAN) is composed of central units (CU), distributed units (DU), and radio units (RU). -RAN from the telecommunications standard perspective is shown below: - -image::135_OpenShift_Distributed_Unit_0121.svg[High level RAN overview] - -From the three components composing RAN, the CU and DU can be virtualized and implemented as cloud-native functions. - -The CU and DU split architecture is driven by real-time computing and networking requirements. A DU can be seen as a real-time part of a -telecommunication baseband unit. -One distributed unit may aggregate several cells. A CU can be seen as a non-realtime part of a baseband unit, aggregating -traffic from one or more distributed units. - -A cell in the context of a DU can be seen as a real-time application performing intensive digital signal processing, data transfer, -and algorithmic tasks. -Cells often use hardware acceleration (FPGA, GPU, eASIC) for DSP processing offload, but there are also software-only implementations -(FlexRAN), based on AVX-512 instructions. - -Running cell application on COTS hardware requires the following features to be enabled: - -* Real-time kernel -* CPU isolation -* NUMA awareness -* Huge pages memory management -* Precision timing synchronization using PTP -* AVX-512 instruction set (for Flexran and / or FPGA implementation) -* Additional features depending on the RAN Operator requirements - -Accessing hardware acceleration devices and high throughput network interface controllers by virtualized software applications -requires use of SR-IOV and Passthrough PCI device virtualization. - -In addition to the compute and acceleration requirements, DUs operate on multiple internal and external networks. - -[id="cnf-manifest-structure_{context}"] -== The manifest structure - -The profile is built from one cluster specific folder and one or more site-specific folders. -This is done to address a deployment that includes remote worker nodes, with several sites belonging to the same cluster. - -The [`cluster-config`](ran-profile/cluster-config) directory contains performance and PTP customizations based upon -Operator deployments in [`deploy`](../feature-configs/deploy) folder. - -The [`site.1.fqdn`](site.1.fqdn) folder contains site-specific network customizations. - -[id="cnf-du-prerequisites_{context}"] -== Prerequisites - -Before installing the Operators and deploying the DU, perform the following steps. - -. Create a machine config pool for the RAN worker nodes. For example: -+ -[source,terminal] ----- -cat < node-role.kubernetes.io/worker-cnf="" ----- - -. Label the node as PTP slave (DU only): -+ -[source,terminal] ----- -$ oc label --overwrite node/ ptp/slave="" ----- - -[id="cnf-du-configuration-notes_{context}"] -== SR-IOV configuration notes - -The `SriovNetworkNodePolicy` object must be configured differently for different NIC models and placements. - -|==================== -|*Manufacturer* |*deviceType* |*isRdma* -|Intel |vfio-pci or netdevice |false -|Mellanox |netdevice |structure -|==================== - -In addition, when configuring the `nicSelector`, the `pfNames` value must match the intended interface name on the specific host. - -If there is a mixed cluster where some of the nodes are deployed with Intel NICs and some with Mellanox, several SR-IOV configurations can be -created with the same `resourceName`. The device plugin will discover only the available ones and will put the capacity on the node accordingly. diff --git a/modules/cnf-provisioning-real-time-and-low-latency-workloads.adoc b/modules/cnf-provisioning-real-time-and-low-latency-workloads.adoc deleted file mode 100644 index 0dd4836fd387..000000000000 --- a/modules/cnf-provisioning-real-time-and-low-latency-workloads.adoc +++ /dev/null @@ -1,447 +0,0 @@ -// CNF-489 Real time and low latency workload provisioning -// Module included in the following assemblies: -// -// *cnf-low-latency-tuning.adoc - -:_content-type: PROCEDURE -[id="cnf-provisioning-real-time-and-low-latency-workloads_{context}"] -= Provisioning real-time and low latency workloads - -Many industries and organizations need extremely high performance computing and might require low and predictable latency, especially in the financial and telecommunications industries. For these industries, with their unique requirements, {product-title} provides the Node Tuning Operator to implement automatic tuning to achieve low latency performance and consistent response time for {product-title} applications. - -The cluster administrator can use this performance profile configuration to make these changes in a more reliable way. The administrator can specify whether to update the kernel to kernel-rt (real-time), reserve CPUs for cluster and operating system housekeeping duties, including pod infra containers, isolate CPUs for application containers to run the workloads, and disable unused CPUs to reduce power consumption. - -[WARNING] -==== -The usage of execution probes in conjunction with applications that require guaranteed CPUs can cause latency spikes. It is recommended to use other probes, such as a properly configured set of network probes, as an alternative. -==== - -[NOTE] -==== -In earlier versions of {product-title}, the Performance Addon Operator was used to implement automatic tuning to achieve low latency performance for OpenShift applications. In {product-title} 4.11 and later, these functions are part of the Node Tuning Operator. -==== - -[id="node-tuning-operator-known-limitations-for-real-time_{context}"] -== Known limitations for real-time - -[NOTE] -==== -In most deployments, kernel-rt is supported only on worker nodes when you use a standard cluster with three control plane nodes and three worker nodes. There are exceptions for compact and single nodes on {product-title} deployments. For installations on a single node, kernel-rt is supported on the single control plane node. -==== - -To fully utilize the real-time mode, the containers must run with elevated privileges. -See link:https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-capabilities-for-a-container[Set capabilities for a Container] for information on granting privileges. - -{product-title} restricts the allowed capabilities, so you might need to create a `SecurityContext` as well. - -[NOTE] -==== -This procedure is fully supported with bare metal installations using {op-system-first} systems. -==== - -Establishing the right performance expectations refers to the fact that the real-time kernel is not a panacea. Its objective is consistent, low-latency determinism offering predictable response times. There is some additional kernel overhead associated with the real-time kernel. This is due primarily to handling hardware interruptions in separately scheduled threads. The increased overhead in some workloads results in some degradation in overall throughput. The exact amount of degradation is very workload dependent, ranging from 0% to 30%. However, it is the cost of determinism. - -[id="node-tuning-operator-provisioning-worker-with-real-time-capabilities_{context}"] -== Provisioning a worker with real-time capabilities - -. Optional: Add a node to the {product-title} cluster. -See link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux_for_real_time/8/html/optimizing_rhel_8_for_real_time_for_low_latency_operation/setting-bios-parameters-for-system-tuning_optimizing-rhel8-for-real-time-for-low-latency-operation[Setting BIOS parameters for system tuning]. - -. Add the label `worker-rt` to the worker nodes that require the real-time capability by using the `oc` command. - -. Create a new machine config pool for real-time nodes: -+ -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfigPool -metadata: - name: worker-rt - labels: - machineconfiguration.openshift.io/role: worker-rt -spec: - machineConfigSelector: - matchExpressions: - - { - key: machineconfiguration.openshift.io/role, - operator: In, - values: [worker, worker-rt], - } - paused: false - nodeSelector: - matchLabels: - node-role.kubernetes.io/worker-rt: "" ----- -Note that a machine config pool worker-rt is created for group of nodes that have the label `worker-rt`. - -. Add the node to the proper machine config pool by using node role labels. -+ -[NOTE] -==== -You must decide which nodes are configured with real-time workloads. You could configure all of the nodes in the cluster, or a subset of the nodes. The Node Tuning Operator that expects all of the nodes are part of a dedicated machine config pool. If you use all of the nodes, you must point the Node Tuning Operator to the worker node role label. If you use a subset, you must group the nodes into a new machine config pool. -==== -. Create the `PerformanceProfile` with the proper set of housekeeping cores and `realTimeKernel: enabled: true`. - -. You must set `machineConfigPoolSelector` in `PerformanceProfile`: -+ -[source,yaml] ----- - apiVersion: performance.openshift.io/v2 - kind: PerformanceProfile - metadata: - name: example-performanceprofile - spec: - ... - realTimeKernel: - enabled: true - nodeSelector: - node-role.kubernetes.io/worker-rt: "" - machineConfigPoolSelector: - machineconfiguration.openshift.io/role: worker-rt ----- -. Verify that a matching machine config pool exists with a label: -+ -[source,terminal] ----- -$ oc describe mcp/worker-rt ----- -+ -.Example output -[source,yaml] ----- -Name: worker-rt -Namespace: -Labels: machineconfiguration.openshift.io/role=worker-rt ----- - -. {product-title} will start configuring the nodes, which might involve multiple reboots. Wait for the nodes to settle. This can take a long time depending on the specific hardware you use, but 20 minutes per node is expected. - -. Verify everything is working as expected. - -[id="node-tuning-operator-verifying-real-time-kernel-installation_{context}"] -== Verifying the real-time kernel installation - -Use this command to verify that the real-time kernel is installed: - -[source,terminal] ----- -$ oc get node -o wide ----- - -Note the worker with the role `worker-rt` that contains the string `4.18.0-305.30.1.rt7.102.el8_4.x86_64 cri-o://1.26.0-99.rhaos4.10.gitc3131de.el8`: - -[source,terminal] ----- -NAME STATUS ROLES AGE VERSION INTERNAL-IP -EXTERNAL-IP OS-IMAGE KERNEL-VERSION -CONTAINER-RUNTIME -rt-worker-0.example.com Ready worker,worker-rt 5d17h v1.26.0 -128.66.135.107 Red Hat Enterprise Linux CoreOS 46.82.202008252340-0 (Ootpa) -4.18.0-305.30.1.rt7.102.el8_4.x86_64 cri-o://1.26.0-99.rhaos4.10.gitc3131de.el8 -[...] ----- - -[id="node-tuning-operator-creating-workload-that-works-in-real-time_{context}"] -== Creating a workload that works in real-time - -Use the following procedures for preparing a workload that will use real-time capabilities. - -.Procedure - -. Create a pod with a QoS class of `Guaranteed`. -. Optional: Disable CPU load balancing for DPDK. -. Assign a proper node selector. - -When writing your applications, follow the general recommendations described in -link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux_for_real_time/8/html-single/tuning_guide/index#chap-Application_Tuning_and_Deployment[Application tuning and deployment]. - -[id="node-tuning-operator-creating-pod-with-guaranteed-qos-class_{context}"] -== Creating a pod with a QoS class of `Guaranteed` - -Keep the following in mind when you create a pod that is given a QoS class of `Guaranteed`: - -* Every container in the pod must have a memory limit and a memory request, and they must be the same. -* Every container in the pod must have a CPU limit and a CPU request, and they must be the same. - -The following example shows the configuration file for a pod that has one container. The container has a memory limit and a memory request, both equal to 200 MiB. The container has a CPU limit and a CPU request, both equal to 1 CPU. - -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: qos-demo - namespace: qos-example -spec: - containers: - - name: qos-demo-ctr - image: - resources: - limits: - memory: "200Mi" - cpu: "1" - requests: - memory: "200Mi" - cpu: "1" ----- - -. Create the pod: -+ -[source,terminal] ----- -$ oc apply -f qos-pod.yaml --namespace=qos-example ----- - -. View detailed information about the pod: -+ -[source,terminal] ----- -$ oc get pod qos-demo --namespace=qos-example --output=yaml ----- -+ -.Example output -[source,yaml] ----- -spec: - containers: - ... -status: - qosClass: Guaranteed ----- -+ -[NOTE] -==== -If a container specifies its own memory limit, but does not specify a memory request, {product-title} automatically assigns a memory request that matches the limit. Similarly, if a container specifies its own CPU limit, but does not specify a CPU request, {product-title} automatically assigns a CPU request that matches the limit. -==== - -[id="node-tuning-operator-disabling-cpu-load-balancing-for-dpdk_{context}"] -== Optional: Disabling CPU load balancing for DPDK - -Functionality to disable or enable CPU load balancing is implemented on the CRI-O level. The code under the CRI-O disables or enables CPU load balancing only when the following requirements are met. - -* The pod must use the `performance-` runtime class. You can get the proper name by looking at the status of the performance profile, as shown here: -+ -[source,yaml] ----- -apiVersion: performance.openshift.io/v2 -kind: PerformanceProfile -... -status: - ... - runtimeClass: performance-manual ----- - -[NOTE] -==== -Currently, disabling CPU load balancing is not supported with cgroup v2. -==== - -The Node Tuning Operator is responsible for the creation of the high-performance runtime handler config snippet under relevant nodes and for creation of the high-performance runtime class under the cluster. It will have the same content as default runtime handler except it enables the CPU load balancing configuration functionality. - -To disable the CPU load balancing for the pod, the `Pod` specification must include the following fields: - -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - ... - annotations: - ... - cpu-load-balancing.crio.io: "disable" - ... - ... -spec: - ... - runtimeClassName: performance- - ... ----- - -[NOTE] -==== -Only disable CPU load balancing when the CPU manager static policy is enabled and for pods with guaranteed QoS that use whole CPUs. Otherwise, disabling CPU load balancing can affect the performance of other containers in the cluster. -==== - -[id="node-tuning-operator-assigning-proper-node-selector_{context}"] -== Assigning a proper node selector - -The preferred way to assign a pod to nodes is to use the same node selector the performance profile used, as shown here: - -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: example -spec: - # ... - nodeSelector: - node-role.kubernetes.io/worker-rt: "" ----- - -For more information, see link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.5/html-single/nodes/index#nodes-scheduler-node-selectors[Placing pods on specific nodes using node selectors]. - -[id="node-tuning-operator-scheduling-workload-onto-worker-with-real-time-capabilities_{context}"] -== Scheduling a workload onto a worker with real-time capabilities - -Use label selectors that match the nodes attached to the machine config pool that was configured for low latency by the Node Tuning Operator. For more information, see link:https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/[Assigning pods to nodes]. - -[id="node-tuning-operator-disabling-CPUs-for-power-consumption_{context}"] -== Reducing power consumption by taking CPUs offline - -You can generally anticipate telecommunication workloads. When not all of the CPU resources are required, the Node Tuning Operator allows you take unused CPUs offline to reduce power consumption by manually updating the performance profile. - -To take unused CPUs offline, you must perform the following tasks: - -. Set the offline CPUs in the performance profile and save the contents of the YAML file: -+ -.Example performance profile with offlined CPUs -[source,yaml] ----- -apiVersion: performance.openshift.io/v2 -kind: PerformanceProfile -metadata: - name: performance -spec: - additionalKernelArgs: - - nmi_watchdog=0 - - audit=0 - - mce=off - - processor.max_cstate=1 - - intel_idle.max_cstate=0 - - idle=poll - cpu: - isolated: "2-23,26-47" - reserved: "0,1,24,25" - offlined: “48-59” <1> - nodeSelector: - node-role.kubernetes.io/worker-cnf: "" - numa: - topologyPolicy: single-numa-node - realTimeKernel: - enabled: true ----- -<1> Optional. You can list CPUs in the `offlined` field to take the specified CPUs offline. - -. Apply the updated profile by running the following command: -+ -[source,terminal] ----- -$ oc apply -f my-performance-profile.yaml ----- - -[id="node-tuning-operator-pod-power-saving-config_{context}"] -== Optional: Power saving configurations - -You can enable power savings for a node that has low priority workloads that are colocated with high priority workloads without impacting the latency or throughput of the high priority workloads. Power saving is possible without modifications to the workloads themselves. - -[IMPORTANT] -==== -The feature is supported on Intel Ice Lake and later generations of Intel CPUs. The capabilities of the processor might impact the latency and throughput of the high priority workloads. -==== - -When you configure a node with a power saving configuration, you must configure high priority workloads with performance configuration at the pod level, which means that the configuration applies to all the cores used by the pod. - -By disabling P-states and C-states at the pod level, you can configure high priority workloads for best performance and lowest latency. - -.Configuration for high priority workloads -[cols="1,2", options="header"] -|==== -|Annotation -|Description - -a|[source,yaml] ----- -annotations: - cpu-c-states.crio.io: "disable" - cpu-freq-governor.crio.io: "" ----- -|Provides the best performance for a pod by disabling C-states and specifying the governor type for CPU scaling. The `performance` governor is recommended for high priority workloads. -|==== - - -.Prerequisites - -* You enabled C-states and OS-controlled P-states in the BIOS - -.Procedure - -. Generate a `PerformanceProfile` with `per-pod-power-management` set to `true`: -+ -[source,terminal,subs="attributes+"] ----- -$ podman run --entrypoint performance-profile-creator -v \ -/must-gather:/must-gather:z registry.redhat.io/openshift4/ose-cluster-node-tuning-operator:v{product-version} \ ---mcp-name=worker-cnf --reserved-cpu-count=20 --rt-kernel=true \ ---split-reserved-cpus-across-numa=false --topology-manager-policy=single-numa-node \ ---must-gather-dir-path /must-gather -power-consumption-mode=low-latency \ <1> ---per-pod-power-management=true > my-performance-profile.yaml ----- -<1> The `power-consumption-mode` must be `default` or `low-latency` when the `per-pod-power-management` is set to `true`. - -+ -.Example `PerformanceProfile` with `perPodPowerManagement` - -[source,yaml] ----- -apiVersion: performance.openshift.io/v2 -kind: PerformanceProfile -metadata: - name: performance -spec: - [.....] - workloadHints: - realTime: true - highPowerConsumption: false - perPodPowerManagement: true ----- - -. Set the default `cpufreq` governor as an additional kernel argument in the `PerformanceProfile` custom resource (CR): -+ -[source,yaml] ----- -apiVersion: performance.openshift.io/v2 -kind: PerformanceProfile -metadata: - name: performance -spec: - ... - additionalKernelArgs: - - cpufreq.default_governor=schedutil <1> ----- -<1> Using the `schedutil` governor is recommended, however, you can use other governors such as the `ondemand` or `powersave` governors. - -. Set the maximum CPU frequency in the `TunedPerformancePatch` CR: -+ -[source,yaml] ----- -spec: - profile: - - data: | - [sysfs] - /sys/devices/system/cpu/intel_pstate/max_perf_pct = <1> ----- -<1> The `max_perf_pct` controls the maximum frequency the `cpufreq` driver is allowed to set as a percentage of the maximum supported cpu frequency. This value applies to all CPUs. You can check the maximum supported frequency in `/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq`. As a starting point, you can use a percentage that caps all CPUs at the `All Cores Turbo` frequency. The `All Cores Turbo` frequency is the frequency that all cores will run at when the cores are all fully occupied. - -. Add the desired annotations to your high priority workload pods. The annotations override the `default` settings. -+ -.Example high priority workload annotation -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - ... - annotations: - ... - cpu-c-states.crio.io: "disable" - cpu-freq-governor.crio.io: "" - ... - ... -spec: - ... - runtimeClassName: performance- - ... ----- - -. Restart the pods. diff --git a/modules/cnf-reducing-netqueues-using-nto.adoc b/modules/cnf-reducing-netqueues-using-nto.adoc deleted file mode 100644 index 791022d4baf9..000000000000 --- a/modules/cnf-reducing-netqueues-using-nto.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -//CNF-1483 (4.8) -// * scalability_and_performance/low-latency-tuning.adoc - -[id="reducing-nic-queues-using-the-node-tuning-operator_{context}"] -= Reducing NIC queues using the Node Tuning Operator - -The Node Tuning Operator allows you to adjust the network interface controller (NIC) queue count for each network device by configuring the performance profile. Device network queues allows the distribution of packets among different physical queues and each queue gets a separate thread for packet processing. - -In real-time or low latency systems, all the unnecessary interrupt request lines (IRQs) pinned to the isolated CPUs must be moved to reserved or housekeeping CPUs. - -In deployments with applications that require system, {product-title} networking or in mixed deployments with Data Plane Development Kit (DPDK) workloads, multiple queues are needed to achieve good throughput and the number of NIC queues should be adjusted or remain unchanged. For example, to achieve low latency the number of NIC queues for DPDK based workloads should be reduced to just the number of reserved or housekeeping CPUs. - -Too many queues are created by default for each CPU and these do not fit into the interrupt tables for housekeeping CPUs when tuning for low latency. Reducing the number of queues makes proper tuning possible. Smaller number of queues means a smaller number of interrupts that then fit in the IRQ table. - -[NOTE] -==== -In earlier versions of {product-title}, the Performance Addon Operator provided automatic, low latency performance tuning for applications. In {product-title} 4.11 and later, this functionality is part of the Node Tuning Operator. -==== diff --git a/modules/cnf-rfhe-notifications-api-refererence.adoc b/modules/cnf-rfhe-notifications-api-refererence.adoc deleted file mode 100644 index afb67c84bdc2..000000000000 --- a/modules/cnf-rfhe-notifications-api-refererence.adoc +++ /dev/null @@ -1,161 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/using-rfhe.adoc - -:_content-type: REFERENCE -[id="cnf-rfhe-notifications-api-refererence_{context}"] -= Subscribing applications to bare-metal events REST API reference - -Use the bare-metal events REST API to subscribe an application to the bare-metal events that are generated on the parent node. - -Subscribe applications to Redfish events by using the resource address `/cluster/node//redfish/event`, where `` is the cluster node running the application. - -Deploy your `cloud-event-consumer` application container and `cloud-event-proxy` sidecar container in a separate application pod. The `cloud-event-consumer` application subscribes to the `cloud-event-proxy` container in the application pod. - -Use the following API endpoints to subscribe the `cloud-event-consumer` application to Redfish events posted by the `cloud-event-proxy` container at [x-]`http://localhost:8089/api/ocloudNotifications/v1/` in the application pod: - -* `/api/ocloudNotifications/v1/subscriptions` -- `POST`: Creates a new subscription -- `GET`: Retrieves a list of subscriptions -* `/api/ocloudNotifications/v1/subscriptions/` -- `GET`: Returns details for the specified subscription ID -* `api/ocloudNotifications/v1/subscriptions/status/` -- `PUT`: Creates a new status ping request for the specified subscription ID -* `/api/ocloudNotifications/v1/health` -- `GET`: Returns the health status of `ocloudNotifications` API - -[NOTE] -==== -`9089` is the default port for the `cloud-event-consumer` container deployed in the application pod. You can configure a different port for your application as required. -==== - -[discrete] -== api/ocloudNotifications/v1/subscriptions - -[discrete] -=== HTTP method - -`GET api/ocloudNotifications/v1/subscriptions` - -[discrete] -==== Description - -Returns a list of subscriptions. If subscriptions exist, a `200 OK` status code is returned along with the list of subscriptions. - -.Example API response -[source,json] ----- -[ - { - "id": "ca11ab76-86f9-428c-8d3a-666c24e34d32", - "endpointUri": "http://localhost:9089/api/ocloudNotifications/v1/dummy", - "uriLocation": "http://localhost:8089/api/ocloudNotifications/v1/subscriptions/ca11ab76-86f9-428c-8d3a-666c24e34d32", - "resource": "/cluster/node/openshift-worker-0.openshift.example.com/redfish/event" - } -] ----- - -[discrete] -=== HTTP method - -`POST api/ocloudNotifications/v1/subscriptions` - -[discrete] -==== Description - -Creates a new subscription. If a subscription is successfully created, or if it already exists, a `201 Created` status code is returned. - -.Query parameters -|=== -| Parameter | Type - -| subscription -| data -|=== - -.Example payload -[source,json] ----- -{ - "uriLocation": "http://localhost:8089/api/ocloudNotifications/v1/subscriptions", - "resource": "/cluster/node/openshift-worker-0.openshift.example.com/redfish/event" -} ----- - -[discrete] -== api/ocloudNotifications/v1/subscriptions/ - -[discrete] -=== HTTP method - -`GET api/ocloudNotifications/v1/subscriptions/` - -[discrete] -==== Description - -Returns details for the subscription with ID `` - -.Query parameters -|=== -| Parameter | Type - -| `` -| string -|=== - -.Example API response -[source,json] ----- -{ - "id":"ca11ab76-86f9-428c-8d3a-666c24e34d32", - "endpointUri":"http://localhost:9089/api/ocloudNotifications/v1/dummy", - "uriLocation":"http://localhost:8089/api/ocloudNotifications/v1/subscriptions/ca11ab76-86f9-428c-8d3a-666c24e34d32", - "resource":"/cluster/node/openshift-worker-0.openshift.example.com/redfish/event" -} ----- - -[discrete] -== api/ocloudNotifications/v1/subscriptions/status/ - -[discrete] -=== HTTP method - -`PUT api/ocloudNotifications/v1/subscriptions/status/` - -[discrete] -==== Description - -Creates a new status ping request for subscription with ID ``. If a subscription is present, the status request is successful and a `202 Accepted` status code is returned. - -.Query parameters -|=== -| Parameter | Type - -| `` -| string -|=== - -.Example API response -[source,json] ----- -{"status":"ping sent"} ----- - -[discrete] -== api/ocloudNotifications/v1/health/ - -[discrete] -=== HTTP method - -`GET api/ocloudNotifications/v1/health/` - -[discrete] -==== Description - -Returns the health status for the `ocloudNotifications` REST API. - -.Example API response -[source,terminal] ----- -OK ----- diff --git a/modules/cnf-running-the-performance-creator-profile-offline.adoc b/modules/cnf-running-the-performance-creator-profile-offline.adoc deleted file mode 100644 index ba395b4ea2cb..000000000000 --- a/modules/cnf-running-the-performance-creator-profile-offline.adoc +++ /dev/null @@ -1,262 +0,0 @@ -// Module included in the following assemblies: -// Epic CNF-792 (4.8) -// * scalability_and_performance/cnf-create-performance-profiles.adoc - -:_content-type: PROCEDURE -[id="running-the-performance-profile-creator-wrapper-script_{context}"] -= Running the Performance Profile Creator wrapper script - -The performance profile wrapper script simplifies the running of the Performance Profile Creator (PPC) tool. It hides the complexities associated with running `podman` and specifying the mapping directories and it enables the creation of the performance profile. - -.Prerequisites - -* Access to the Node Tuning Operator image. -* Access to the `must-gather` tarball. - -.Procedure - -. Create a file on your local machine named, for example, `run-perf-profile-creator.sh`: -+ -[source,terminal] ----- -$ vi run-perf-profile-creator.sh ----- - -. Paste the following code into the file: -+ -[source,bash,subs="attributes+"] ----- -#!/bin/bash - -readonly CONTAINER_RUNTIME=${CONTAINER_RUNTIME:-podman} -readonly CURRENT_SCRIPT=$(basename "$0") -readonly CMD="${CONTAINER_RUNTIME} run --entrypoint performance-profile-creator" -readonly IMG_EXISTS_CMD="${CONTAINER_RUNTIME} image exists" -readonly IMG_PULL_CMD="${CONTAINER_RUNTIME} image pull" -readonly MUST_GATHER_VOL="/must-gather" - -NTO_IMG="registry.redhat.io/openshift4/ose-cluster-node-tuning-operator:v{product-version}" -MG_TARBALL="" -DATA_DIR="" - -usage() { - print "Wrapper usage:" - print " ${CURRENT_SCRIPT} [-h] [-p image][-t path] -- [performance-profile-creator flags]" - print "" - print "Options:" - print " -h help for ${CURRENT_SCRIPT}" - print " -p Node Tuning Operator image" - print " -t path to a must-gather tarball" - - ${IMG_EXISTS_CMD} "${NTO_IMG}" && ${CMD} "${NTO_IMG}" -h -} - -function cleanup { - [ -d "${DATA_DIR}" ] && rm -rf "${DATA_DIR}" -} -trap cleanup EXIT - -exit_error() { - print "error: $*" - usage - exit 1 -} - -print() { - echo "$*" >&2 -} - -check_requirements() { - ${IMG_EXISTS_CMD} "${NTO_IMG}" || ${IMG_PULL_CMD} "${NTO_IMG}" || \ - exit_error "Node Tuning Operator image not found" - - [ -n "${MG_TARBALL}" ] || exit_error "Must-gather tarball file path is mandatory" - [ -f "${MG_TARBALL}" ] || exit_error "Must-gather tarball file not found" - - DATA_DIR=$(mktemp -d -t "${CURRENT_SCRIPT}XXXX") || exit_error "Cannot create the data directory" - tar -zxf "${MG_TARBALL}" --directory "${DATA_DIR}" || exit_error "Cannot decompress the must-gather tarball" - chmod a+rx "${DATA_DIR}" - - return 0 -} - -main() { - while getopts ':hp:t:' OPT; do - case "${OPT}" in - h) - usage - exit 0 - ;; - p) - NTO_IMG="${OPTARG}" - ;; - t) - MG_TARBALL="${OPTARG}" - ;; - ?) - exit_error "invalid argument: ${OPTARG}" - ;; - esac - done - shift $((OPTIND - 1)) - - check_requirements || exit 1 - - ${CMD} -v "${DATA_DIR}:${MUST_GATHER_VOL}:z" "${NTO_IMG}" "$@" --must-gather-dir-path "${MUST_GATHER_VOL}" - echo "" 1>&2 -} - -main "$@" ----- - -. Add execute permissions for everyone on this script: -+ -[source,terminal] ----- -$ chmod a+x run-perf-profile-creator.sh ----- - -. Optional: Display the `run-perf-profile-creator.sh` command usage: -+ -[source,terminal] ----- -$ ./run-perf-profile-creator.sh -h ----- -+ -.Expected output -+ -[source,terminal] ----- -Wrapper usage: - run-perf-profile-creator.sh [-h] [-p image][-t path] -- [performance-profile-creator flags] - -Options: - -h help for run-perf-profile-creator.sh - -p Node Tuning Operator image <1> - -t path to a must-gather tarball <2> -A tool that automates creation of Performance Profiles - -Usage: - performance-profile-creator [flags] - -Flags: - --disable-ht Disable Hyperthreading - -h, --help help for performance-profile-creator - --info string Show cluster information; requires --must-gather-dir-path, ignore the other arguments. [Valid values: log, json] (default "log") - --mcp-name string MCP name corresponding to the target machines (required) - --must-gather-dir-path string Must gather directory path (default "must-gather") - --offlined-cpu-count int Number of offlined CPUs - --power-consumption-mode string The power consumption mode. [Valid values: default, low-latency, ultra-low-latency] (default "default") - --profile-name string Name of the performance profile to be created (default "performance") - --reserved-cpu-count int Number of reserved CPUs (required) - --rt-kernel Enable Real Time Kernel (required) - --split-reserved-cpus-across-numa Split the Reserved CPUs across NUMA nodes - --topology-manager-policy string Kubelet Topology Manager Policy of the performance profile to be created. [Valid values: single-numa-node, best-effort, restricted] (default "restricted") - --user-level-networking Run with User level Networking(DPDK) enabled ----- -+ -[NOTE] -==== -There two types of arguments: - -* Wrapper arguments namely `-h`, `-p` and `-t` -* PPC arguments -==== -+ -<1> Optional: Specify the Node Tuning Operator image. If not set, the default upstream image is used: `registry.redhat.io/openshift4/ose-cluster-node-tuning-operator:v{product-version}`. -<2> `-t` is a required wrapper script argument and specifies the path to a `must-gather` tarball. - -. Run the performance profile creator tool in discovery mode: -+ -[NOTE] -==== -Discovery mode inspects your cluster using the output from `must-gather`. The output produced includes information on: - -* The NUMA cell partitioning with the allocated CPU IDs -* Whether hyperthreading is enabled - -Using this information you can set appropriate values for some of the arguments supplied to the Performance Profile Creator tool. -==== -+ -[source,terminal] ----- -$ ./run-perf-profile-creator.sh -t /must-gather/must-gather.tar.gz -- --info=log ----- -+ -[NOTE] -==== -The `info` option requires a value which specifies the output format. Possible values are log and JSON. The JSON format is reserved for debugging. -==== - -. Check the machine config pool: -+ -[source,terminal] ----- -$ oc get mcp ----- -+ -.Example output - -[source,terminal] ----- -NAME CONFIG UPDATED UPDATING DEGRADED MACHINECOUNT READYMACHINECOUNT UPDATEDMACHINECOUNT DEGRADEDMACHINECOUNT AGE -master rendered-master-acd1358917e9f98cbdb599aea622d78b True False False 3 3 3 0 22h -worker-cnf rendered-worker-cnf-1d871ac76e1951d32b2fe92369879826 False True False 2 1 1 0 22h ----- - -. Create a performance profile: -+ -[source,terminal] ----- -$ ./run-perf-profile-creator.sh -t /must-gather/must-gather.tar.gz -- --mcp-name=worker-cnf --reserved-cpu-count=2 --rt-kernel=true > my-performance-profile.yaml ----- -+ -[NOTE] -==== -The Performance Profile Creator arguments are shown in the Performance Profile Creator arguments table. The following arguments are required: - -* `reserved-cpu-count` -* `mcp-name` -* `rt-kernel` - -The `mcp-name` argument in this example is set to `worker-cnf` based on the output of the command `oc get mcp`. For {sno} use `--mcp-name=master`. -==== - -. Review the created YAML file: -+ -[source,terminal] ----- -$ cat my-performance-profile.yaml ----- -.Example output -+ -[source,terminal] ----- -apiVersion: performance.openshift.io/v2 -kind: PerformanceProfile -metadata: - name: performance -spec: - cpu: - isolated: 1-39,41-79 - reserved: 0,40 - nodeSelector: - node-role.kubernetes.io/worker-cnf: "" - numa: - topologyPolicy: restricted - realTimeKernel: - enabled: false ----- - -. Apply the generated profile: -+ -[NOTE] -==== -Install the Node Tuning Operator before applying the profile. -==== - -+ -[source,terminal] ----- -$ oc apply -f my-performance-profile.yaml ----- diff --git a/modules/cnf-running-the-performance-creator-profile.adoc b/modules/cnf-running-the-performance-creator-profile.adoc deleted file mode 100644 index 69902b3c2695..000000000000 --- a/modules/cnf-running-the-performance-creator-profile.adoc +++ /dev/null @@ -1,164 +0,0 @@ -// Module included in the following assemblies: -// Epic CNF-792 (4.8) -// * scalability_and_performance/cnf-create-performance-profiles.adoc - -:_content-type: PROCEDURE -[id="running-the-performance-profile-profile-cluster-using-podman_{context}"] -= Running the Performance Profile Creator using podman - -As a cluster administrator, you can run `podman` and the Performance Profile Creator to create a performance profile. - -.Prerequisites - -* Access to the cluster as a user with the `cluster-admin` role. -* A cluster installed on bare-metal hardware. -* A node with `podman` and OpenShift CLI (`oc`) installed. -* Access to the Node Tuning Operator image. - -.Procedure - -. Check the machine config pool: -+ -[source,terminal] ----- -$ oc get mcp ----- -.Example output -+ -[source,terminal] ----- -NAME CONFIG UPDATED UPDATING DEGRADED MACHINECOUNT READYMACHINECOUNT UPDATEDMACHINECOUNT DEGRADEDMACHINECOUNT AGE -master rendered-master-acd1358917e9f98cbdb599aea622d78b True False False 3 3 3 0 22h -worker-cnf rendered-worker-cnf-1d871ac76e1951d32b2fe92369879826 False True False 2 1 1 0 22h ----- - -. Use Podman to authenticate to `registry.redhat.io`: -+ -[source,terminal] ----- -$ podman login registry.redhat.io ----- -+ -[source,bash] ----- -Username: myrhusername -Password: ************ ----- - -. Optional: Display help for the PPC tool: -+ -[source,terminal,subs="attributes+"] ----- -$ podman run --rm --entrypoint performance-profile-creator registry.redhat.io/openshift4/ose-cluster-node-tuning-operator:v{product-version} -h ----- -+ -.Example output -+ -[source,terminal] ----- -A tool that automates creation of Performance Profiles - -Usage: - performance-profile-creator [flags] - -Flags: - --disable-ht Disable Hyperthreading - -h, --help help for performance-profile-creator - --info string Show cluster information; requires --must-gather-dir-path, ignore the other arguments. [Valid values: log, json] (default "log") - --mcp-name string MCP name corresponding to the target machines (required) - --must-gather-dir-path string Must gather directory path (default "must-gather") - --offlined-cpu-count int Number of offlined CPUs - --power-consumption-mode string The power consumption mode. [Valid values: default, low-latency, ultra-low-latency] (default "default") - --profile-name string Name of the performance profile to be created (default "performance") - --reserved-cpu-count int Number of reserved CPUs (required) - --rt-kernel Enable Real Time Kernel (required) - --split-reserved-cpus-across-numa Split the Reserved CPUs across NUMA nodes - --topology-manager-policy string Kubelet Topology Manager Policy of the performance profile to be created. [Valid values: single-numa-node, best-effort, restricted] (default "restricted") - --user-level-networking Run with User level Networking(DPDK) enabled ----- - -. Run the Performance Profile Creator tool in discovery mode: -+ -[NOTE] -==== -Discovery mode inspects your cluster using the output from `must-gather`. The output produced includes information on: - -* The NUMA cell partitioning with the allocated CPU ids -* Whether hyperthreading is enabled - -Using this information you can set appropriate values for some of the arguments supplied to the Performance Profile Creator tool. -==== -+ -[source,terminal,subs="attributes+"] ----- -$ podman run --entrypoint performance-profile-creator -v /must-gather:/must-gather:z registry.redhat.io/openshift4/ose-cluster-node-tuning-operator:v{product-version} --info log --must-gather-dir-path /must-gather ----- -+ -[NOTE] -==== -This command uses the performance profile creator as a new entry point to `podman`. It maps the `must-gather` data for the host into the container image and invokes the required user-supplied profile arguments to produce the `my-performance-profile.yaml` file. - -The `-v` option can be the path to either: - -* The `must-gather` output directory -* An existing directory containing the `must-gather` decompressed tarball - -The `info` option requires a value which specifies the output format. Possible values are log and JSON. The JSON format is reserved for debugging. -==== - -. Run `podman`: -+ -[source,terminal,subs="attributes+"] ----- -$ podman run --entrypoint performance-profile-creator -v /must-gather:/must-gather:z registry.redhat.io/openshift4/ose-cluster-node-tuning-operator:v{product-version} --mcp-name=worker-cnf --reserved-cpu-count=4 --rt-kernel=true --split-reserved-cpus-across-numa=false --must-gather-dir-path /must-gather --power-consumption-mode=ultra-low-latency --offlined-cpu-count=6 > my-performance-profile.yaml ----- -+ -[NOTE] -==== -The Performance Profile Creator arguments are shown in the Performance Profile Creator arguments table. The following arguments are required: - -* `reserved-cpu-count` -* `mcp-name` -* `rt-kernel` - -The `mcp-name` argument in this example is set to `worker-cnf` based on the output of the command `oc get mcp`. For {sno} use `--mcp-name=master`. -==== - -. Review the created YAML file: -+ -[source,terminal] ----- -$ cat my-performance-profile.yaml ----- -.Example output -+ -[source,yaml] ----- -apiVersion: performance.openshift.io/v2 -kind: PerformanceProfile -metadata: - name: performance -spec: - cpu: - isolated: 2-39,48-79 - offlined: 42-47 - reserved: 0-1,40-41 - machineConfigPoolSelector: - machineconfiguration.openshift.io/role: worker-cnf - nodeSelector: - node-role.kubernetes.io/worker-cnf: "" - numa: - topologyPolicy: restricted - realTimeKernel: - enabled: true - workloadHints: - highPowerConsumption: true - realTime: true ----- - -. Apply the generated profile: -+ -[source,terminal] ----- -$ oc apply -f my-performance-profile.yaml ----- diff --git a/modules/cnf-scheduling-numa-aware-workloads-overview-with-manual-performance-settings.adoc b/modules/cnf-scheduling-numa-aware-workloads-overview-with-manual-performance-settings.adoc deleted file mode 100644 index 78e74391fc4f..000000000000 --- a/modules/cnf-scheduling-numa-aware-workloads-overview-with-manual-performance-settings.adoc +++ /dev/null @@ -1,8 +0,0 @@ -// Module included in the following assemblies: -// -// *scalability_and_performance/cnf-numa-aware-scheduling.adoc -:_content-type: CONCEPT -[id="cnf-scheduling-numa-aware-workloads-with-manual-perofrmance-settings_{context}"] -= Scheduling NUMA-aware workloads with manual performance settings - -Clusters running latency-sensitive workloads typically feature performance profiles that help to minimize workload latency and optimize performance. However, you can schedule NUMA-aware workloads in a pristine cluster that does not feature a performance profile. The following workflow features a pristine cluster that you can manually configure for performance by using the `KubeletConfig` resource. This is not the typical environment for scheduling NUMA-aware workloads. diff --git a/modules/cnf-scheduling-numa-aware-workloads-overview.adoc b/modules/cnf-scheduling-numa-aware-workloads-overview.adoc deleted file mode 100644 index 970da3a69bb9..000000000000 --- a/modules/cnf-scheduling-numa-aware-workloads-overview.adoc +++ /dev/null @@ -1,8 +0,0 @@ -// Module included in the following assemblies: -// -// *scalability_and_performance/cnf-numa-aware-scheduling.adoc -:_content-type: CONCEPT -[id="cnf-scheduling-numa-aware-workloads-overview_{context}"] -= Scheduling NUMA-aware workloads - -Clusters running latency-sensitive workloads typically feature performance profiles that help to minimize workload latency and optimize performance. The NUMA-aware scheduler deploys workloads based on available node NUMA resources and with respect to any performance profile settings applied to the node. The combination of NUMA-aware deployments, and the performance profile of the workload, ensures that workloads are scheduled in a way that maximizes performance. \ No newline at end of file diff --git a/modules/cnf-scheduling-numa-aware-workloads-with-manual-performance-setttings.adoc b/modules/cnf-scheduling-numa-aware-workloads-with-manual-performance-setttings.adoc deleted file mode 100644 index 378b7b5136e3..000000000000 --- a/modules/cnf-scheduling-numa-aware-workloads-with-manual-performance-setttings.adoc +++ /dev/null @@ -1,200 +0,0 @@ -// Module included in the following assemblies: -// -// *scalability_and_performance/cnf-numa-aware-scheduling.adoc - -:_content-type: PROCEDURE -[id="cnf-scheduling-numa-aware-workloads-with-manual-performance-setttings_{context}"] -= Scheduling workloads with the NUMA-aware scheduler with manual performance settings - -You can schedule workloads with the NUMA-aware scheduler using `Deployment` CRs that specify the minimum required resources to process the workload. - -The following example deployment uses NUMA-aware scheduling for a sample workload. - -.Prerequisites - -* Install the OpenShift CLI (`oc`). - -* Log in as a user with `cluster-admin` privileges. - -* Install the NUMA Resources Operator and deploy the NUMA-aware secondary scheduler. - -.Procedure - -. Get the name of the NUMA-aware scheduler that is deployed in the cluster by running the following command: -+ -[source,terminal] ----- -$ oc get numaresourcesschedulers.nodetopology.openshift.io numaresourcesscheduler -o json | jq '.status.schedulerName' ----- -+ -.Example output -[source,terminal] ----- -topo-aware-scheduler ----- - -. Create a `Deployment` CR that uses scheduler named `topo-aware-scheduler`, for example: - -.. Save the following YAML in the `nro-deployment.yaml` file: -+ -[source,yaml] ----- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: numa-deployment-1 - namespace: <1> -spec: - replicas: 1 - selector: - matchLabels: - app: test - template: - metadata: - labels: - app: test - spec: - schedulerName: topo-aware-scheduler <2> - containers: - - name: ctnr - image: quay.io/openshifttest/hello-openshift:openshift - imagePullPolicy: IfNotPresent - resources: - limits: - memory: "100Mi" - cpu: "10" - requests: - memory: "100Mi" - cpu: "10" - - name: ctnr2 - image: gcr.io/google_containers/pause-amd64:3.0 - imagePullPolicy: IfNotPresent - resources: - limits: - memory: "100Mi" - cpu: "8" - requests: - memory: "100Mi" - cpu: "8" ----- -<1> Replace with the namespace for your deployment. -<2> `schedulerName` must match the name of the NUMA-aware scheduler that is deployed in your cluster, for example `topo-aware-scheduler`. - -.. Create the `Deployment` CR by running the following command: -+ -[source,terminal] ----- -$ oc create -f nro-deployment.yaml ----- - -.Verification - -. Verify that the deployment was successful: -+ -[source,terminal] ----- -$ oc get pods -n openshift-numaresources ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -numa-deployment-1-56954b7b46-pfgw8 2/2 Running 0 129m -numaresources-controller-manager-7575848485-bns4s 1/1 Running 0 15h -numaresourcesoperator-worker-dvj4n 2/2 Running 0 18h -numaresourcesoperator-worker-lcg4t 2/2 Running 0 16h -secondary-scheduler-56994cf6cf-7qf4q 1/1 Running 0 18h ----- - -. Verify that the `topo-aware-scheduler` is scheduling the deployed pod by running the following command: -+ -[source,terminal] ----- -$ oc describe pod numa-deployment-1-56954b7b46-pfgw8 -n openshift-numaresources ----- -+ -.Example output -[source,terminal] ----- -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal Scheduled 130m topo-aware-scheduler Successfully assigned openshift-numaresources/numa-deployment-1-56954b7b46-pfgw8 to compute-0.example.com ----- -+ -[NOTE] -==== -Deployments that request more resources than is available for scheduling will fail with a `MinimumReplicasUnavailable` error. The deployment succeeds when the required resources become available. Pods remain in the `Pending` state until the required resources are available. -==== - -. Verify that the expected allocated resources are listed for the node. - -.. Identify the node that is running the deployment pod by running the following command, replacing with the namespace you specified in the `Deployment` CR: -+ -[source,terminal] ----- -$ oc get pods -n -o wide ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES -numa-deployment-1-65684f8fcc-bw4bw 0/2 Running 0 82m 10.128.2.50 worker-0 ----- -+ -.. Run the following command, replacing with the name of that node that is running the deployment pod: -+ -[source,terminal] ----- -$ oc describe noderesourcetopologies.topology.node.k8s.io ----- -+ -.Example output -[source,terminal] ----- -... - -Zones: - Costs: - Name: node-0 - Value: 10 - Name: node-1 - Value: 21 - Name: node-0 - Resources: - Allocatable: 39 - Available: 21 <1> - Capacity: 40 - Name: cpu - Allocatable: 6442450944 - Available: 6442450944 - Capacity: 6442450944 - Name: hugepages-1Gi - Allocatable: 134217728 - Available: 134217728 - Capacity: 134217728 - Name: hugepages-2Mi - Allocatable: 262415904768 - Available: 262206189568 - Capacity: 270146007040 - Name: memory - Type: Node ----- -<1> The `Available` capacity is reduced because of the resources that have been allocated to the guaranteed pod. -+ -Resources consumed by guaranteed pods are subtracted from the available node resources listed under `noderesourcetopologies.topology.node.k8s.io`. - -. Resource allocations for pods with a `Best-effort` or `Burstable` quality of service (`qosClass`) are not reflected in the NUMA node resources under `noderesourcetopologies.topology.node.k8s.io`. If a pod's consumed resources are not reflected in the node resource calculation, verify that the pod has `qosClass` of `Guaranteed` and the CPU request is an integer value, not a decimal value. You can verify the that the pod has a `qosClass` of `Guaranteed` by running the following command: -+ -[source,terminal] ----- -$ oc get pod -n -o jsonpath="{ .status.qosClass }" ----- -+ -.Example output -[source,terminal] ----- -Guaranteed ----- diff --git a/modules/cnf-scheduling-numa-aware-workloads.adoc b/modules/cnf-scheduling-numa-aware-workloads.adoc deleted file mode 100644 index 6e4e7c00ba6c..000000000000 --- a/modules/cnf-scheduling-numa-aware-workloads.adoc +++ /dev/null @@ -1,201 +0,0 @@ -// Module included in the following assemblies: -// -// *scalability_and_performance/cnf-numa-aware-scheduling.adoc - -:_content-type: PROCEDURE -[id="cnf-scheduling-numa-aware-workloads_{context}"] -= Scheduling workloads with the NUMA-aware scheduler - -You can schedule workloads with the NUMA-aware scheduler using `Deployment` CRs that specify the minimum required resources to process the workload. - -The following example deployment uses NUMA-aware scheduling for a sample workload. - -.Prerequisites - -* Install the OpenShift CLI (`oc`). - -* Log in as a user with `cluster-admin` privileges. - -* Install the NUMA Resources Operator and deploy the NUMA-aware secondary scheduler. - -.Procedure - -. Get the name of the NUMA-aware scheduler that is deployed in the cluster by running the following command: -+ -[source,terminal] ----- -$ oc get numaresourcesschedulers.nodetopology.openshift.io numaresourcesscheduler -o json | jq '.status.schedulerName' ----- -+ -.Example output -[source,terminal] ----- -topo-aware-scheduler ----- - -. Create a `Deployment` CR that uses scheduler named `topo-aware-scheduler`, for example: - -.. Save the following YAML in the `nro-deployment.yaml` file: -+ -[source,yaml] ----- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: numa-deployment-1 - namespace: openshift-numaresources -spec: - replicas: 1 - selector: - matchLabels: - app: test - template: - metadata: - labels: - app: test - spec: - schedulerName: topo-aware-scheduler <1> - containers: - - name: ctnr - image: quay.io/openshifttest/hello-openshift:openshift - imagePullPolicy: IfNotPresent - resources: - limits: - memory: "100Mi" - cpu: "10" - requests: - memory: "100Mi" - cpu: "10" - - name: ctnr2 - image: gcr.io/google_containers/pause-amd64:3.0 - imagePullPolicy: IfNotPresent - command: ["/bin/sh", "-c"] - args: [ "while true; do sleep 1h; done;" ] - resources: - limits: - memory: "100Mi" - cpu: "8" - requests: - memory: "100Mi" - cpu: "8" ----- -<1> `schedulerName` must match the name of the NUMA-aware scheduler that is deployed in your cluster, for example `topo-aware-scheduler`. - -.. Create the `Deployment` CR by running the following command: -+ -[source,terminal] ----- -$ oc create -f nro-deployment.yaml ----- - -.Verification - -. Verify that the deployment was successful: -+ -[source,terminal] ----- -$ oc get pods -n openshift-numaresources ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -numa-deployment-1-56954b7b46-pfgw8 2/2 Running 0 129m -numaresources-controller-manager-7575848485-bns4s 1/1 Running 0 15h -numaresourcesoperator-worker-dvj4n 2/2 Running 0 18h -numaresourcesoperator-worker-lcg4t 2/2 Running 0 16h -secondary-scheduler-56994cf6cf-7qf4q 1/1 Running 0 18h ----- - -. Verify that the `topo-aware-scheduler` is scheduling the deployed pod by running the following command: -+ -[source,terminal] ----- -$ oc describe pod numa-deployment-1-56954b7b46-pfgw8 -n openshift-numaresources ----- -+ -.Example output -[source,terminal] ----- -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal Scheduled 130m topo-aware-scheduler Successfully assigned openshift-numaresources/numa-deployment-1-56954b7b46-pfgw8 to compute-0.example.com ----- -+ -[NOTE] -==== -Deployments that request more resources than is available for scheduling will fail with a `MinimumReplicasUnavailable` error. The deployment succeeds when the required resources become available. Pods remain in the `Pending` state until the required resources are available. -==== - -. Verify that the expected allocated resources are listed for the node. - -.. Identify the node that is running the deployment pod by running the following command, replacing with the namespace you specified in the `Deployment` CR: -+ -[source,terminal] ----- -$ oc get pods -n -o wide ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES -numa-deployment-1-65684f8fcc-bw4bw 0/2 Running 0 82m 10.128.2.50 worker-0 ----- -+ -.. Run the following command, replacing with the name of that node that is running the deployment pod. -+ -[source,terminal] ----- -$ oc describe noderesourcetopologies.topology.node.k8s.io ----- -+ -.Example output -[source,terminal] ----- -... - -Zones: - Costs: - Name: node-0 - Value: 10 - Name: node-1 - Value: 21 - Name: node-0 - Resources: - Allocatable: 39 - Available: 21 <1> - Capacity: 40 - Name: cpu - Allocatable: 6442450944 - Available: 6442450944 - Capacity: 6442450944 - Name: hugepages-1Gi - Allocatable: 134217728 - Available: 134217728 - Capacity: 134217728 - Name: hugepages-2Mi - Allocatable: 262415904768 - Available: 262206189568 - Capacity: 270146007040 - Name: memory - Type: Node ----- -<1> The `Available` capacity is reduced because of the resources that have been allocated to the guaranteed pod. -+ -Resources consumed by guaranteed pods are subtracted from the available node resources listed under `noderesourcetopologies.topology.node.k8s.io`. - -. Resource allocations for pods with a `Best-effort` or `Burstable` quality of service (`qosClass`) are not reflected in the NUMA node resources under `noderesourcetopologies.topology.node.k8s.io`. If a pod's consumed resources are not reflected in the node resource calculation, verify that the pod has `qosClass` of `Guaranteed` and the CPU request is an integer value, not a decimal value. You can verify the that the pod has a `qosClass` of `Guaranteed` by running the following command: -+ -[source,terminal] ----- -$ oc get pod -n -o jsonpath="{ .status.qosClass }" ----- -+ -.Example output -[source,terminal] ----- -Guaranteed ----- diff --git a/modules/cnf-topology-aware-lifecycle-manager-about-cgu-crs.adoc b/modules/cnf-topology-aware-lifecycle-manager-about-cgu-crs.adoc deleted file mode 100644 index c553c600912a..000000000000 --- a/modules/cnf-topology-aware-lifecycle-manager-about-cgu-crs.adoc +++ /dev/null @@ -1,478 +0,0 @@ -// Module included in the following assemblies: -// Epic CNF-2600 (CNF-2133) (4.10), Story TELCODOCS-285 -// * scalability_and_performance/cnf-talm-for-cluster-upgrades.adoc - -:_content-type: CONCEPT -[id="talo-about-cgu-crs_{context}"] -= About the ClusterGroupUpgrade CR - -The {cgu-operator-first} builds the remediation plan from the `ClusterGroupUpgrade` CR for a group of clusters. You can define the following specifications in a `ClusterGroupUpgrade` CR: - -* Clusters in the group -* Blocking `ClusterGroupUpgrade` CRs -* Applicable list of managed policies -* Number of concurrent updates -* Applicable canary updates -* Actions to perform before and after the update -* Update timing - -You can control the start time of an update using the `enable` field in the `ClusterGroupUpgrade` CR. -For example, if you have a scheduled maintenance window of four hours, you can prepare a `ClusterGroupUpgrade` CR with the `enable` field set to `false`. - -You can set the timeout by configuring the `spec.remediationStrategy.timeout` setting as follows: -[source,yaml] ----- -spec - remediationStrategy: - maxConcurrency: 1 - timeout: 240 ----- - -You can use the `batchTimeoutAction` to determine what happens if an update fails for a cluster. -You can specify `continue` to skip the failing cluster and continue to upgrade other clusters, or `abort` to stop policy remediation for all clusters. -Once the timeout elapses, {cgu-operator} removes all `enforce` policies to ensure that no further updates are made to clusters. - -To apply the changes, you set the `enabled` field to `true`. - -For more information see the "Applying update policies to managed clusters" section. - -As {cgu-operator} works through remediation of the policies to the specified clusters, the `ClusterGroupUpgrade` CR can report true or false statuses for a number of conditions. - -[NOTE] -==== -After {cgu-operator} completes a cluster update, the cluster does not update again under the control of the same `ClusterGroupUpgrade` CR. You must create a new `ClusterGroupUpgrade` CR in the following cases: - -* When you need to update the cluster again -* When the cluster changes to non-compliant with the `inform` policy after being updated -==== - -[id="selecting_clusters_{context}"] -== Selecting clusters - -{cgu-operator} builds a remediation plan and selects clusters based on the following fields: - -* The `clusterLabelSelector` field specifies the labels of the clusters that you want to update. This consists of a list of the standard label selectors from `k8s.io/apimachinery/pkg/apis/meta/v1`. Each selector in the list uses either label value pairs or label expressions. Matches from each selector are added to the final list of clusters along with the matches from the `clusterSelector` field and the `cluster` field. -* The `clusters` field specifies a list of clusters to update. -* The `canaries` field specifies the clusters for canary updates. -* The `maxConcurrency` field specifies the number of clusters to update in a batch. -* The `actions` field specifies `beforeEnable` actions that {cgu-operator} takes as it begins the update process, and `afterCompletion` actions that {cgu-operator} takes as it completes policy remediation for each cluster. - -You can use the `clusters`, `clusterLabelSelector`, and `clusterSelector` fields together to create a combined list of clusters. - -The remediation plan starts with the clusters listed in the `canaries` field. Each canary cluster forms a single-cluster batch. - -.Sample `ClusterGroupUpgrade` CR with the enabled `field` set to `false` - -[source,yaml] ----- -apiVersion: ran.openshift.io/v1alpha1 -kind: ClusterGroupUpgrade -metadata: - creationTimestamp: '2022-11-18T16:27:15Z' - finalizers: - - ran.openshift.io/cleanup-finalizer - generation: 1 - name: talm-cgu - namespace: talm-namespace - resourceVersion: '40451823' - uid: cca245a5-4bca-45fa-89c0-aa6af81a596c -Spec: - actions: - afterCompletion: <1> - addClusterLabels: - upgrade-done: "" - deleteClusterLabels: - upgrade-running: "" - deleteObjects: true - beforeEnable: <2> - addClusterLabels: - upgrade-running: "" - backup: false - clusters: <3> - - spoke1 - enable: false <4> - managedPolicies: <5> - - talm-policy - preCaching: false - remediationStrategy: <6> - canaries: <7> - - spoke1 - maxConcurrency: 2 <8> - timeout: 240 - clusterLabelSelectors: <9> - - matchExpressions: - - key: label1 - operator: In - values: - - value1a - - value1b - batchTimeoutAction: <10> -status: <11> - computedMaxConcurrency: 2 - conditions: - - lastTransitionTime: '2022-11-18T16:27:15Z' - message: All selected clusters are valid - reason: ClusterSelectionCompleted - status: 'True' - type: ClustersSelected <12> - - lastTransitionTime: '2022-11-18T16:27:15Z' - message: Completed validation - reason: ValidationCompleted - status: 'True' - type: Validated <13> - - lastTransitionTime: '2022-11-18T16:37:16Z' - message: Not enabled - reason: NotEnabled - status: 'False' - type: Progressing - managedPoliciesForUpgrade: - - name: talm-policy - namespace: talm-namespace - managedPoliciesNs: - talm-policy: talm-namespace - remediationPlan: - - - spoke1 - - - spoke2 - - spoke3 - status: ----- -<1> Specifies the action that {cgu-operator} takes when it completes policy remediation for each cluster. -<2> Specifies the action that {cgu-operator} takes as it begins the update process. -<3> Defines the list of clusters to update. -<4> The `enable` field is set to `false`. -<5> Lists the user-defined set of policies to remediate. -<6> Defines the specifics of the cluster updates. -<7> Defines the clusters for canary updates. -<8> Defines the maximum number of concurrent updates in a batch. The number of remediation batches is the number of canary clusters, plus the number of clusters, except the canary clusters, divided by the `maxConcurrency` value. The clusters that are already compliant with all the managed policies are excluded from the remediation plan. -<9> Displays the parameters for selecting clusters. -<10> Controls what happens if a batch times out. Possible values are `abort` or `continue`. If unspecified, the default is `continue`. -<11> Displays information about the status of the updates. -<12> The `ClustersSelected` condition shows that all selected clusters are valid. -<13> The `Validated` condition shows that all selected clusters have been validated. - -[NOTE] -==== -Any failures during the update of a canary cluster stops the update process. -==== - -When the remediation plan is successfully created, you can you set the `enable` field to `true` and {cgu-operator} starts to update the non-compliant clusters with the specified managed policies. - -[NOTE] -==== -You can only make changes to the `spec` fields if the `enable` field of the `ClusterGroupUpgrade` CR is set to `false`. -==== - -[id="validating_{context}"] -== Validating - -{cgu-operator} checks that all specified managed policies are available and correct, and uses the `Validated` condition to report the status and reasons as follows: - -* `true` -+ -Validation is completed. -* `false` -+ -Policies are missing or invalid, or an invalid platform image has been specified. - -[id="precaching_{context}"] -== Pre-caching - -Clusters might have limited bandwidth to access the container image registry, which can cause a timeout before the updates are completed. On {sno} clusters, you can use pre-caching to avoid this. The container image pre-caching starts when you create a `ClusterGroupUpgrade` CR with the `preCaching` field set to `true`. -{cgu-operator} compares the available disk space with the estimated {product-title} image size to ensure that there is enough space. If a cluster has insufficient space, {cgu-operator} cancels pre-caching for that cluster and does not remediate policies on it. - -{cgu-operator} uses the `PrecacheSpecValid` condition to report status information as follows: - -* `true` -+ -The pre-caching spec is valid and consistent. -* `false` -+ -The pre-caching spec is incomplete. - -{cgu-operator} uses the `PrecachingSucceeded` condition to report status information as follows: - -* `true` -+ -TALM has concluded the pre-caching process. If pre-caching fails for any cluster, the update fails for that cluster but proceeds for all other clusters. A message informs you if pre-caching has failed for any clusters. -* `false` -+ -Pre-caching is still in progress for one or more clusters or has failed for all clusters. - -For more information see the "Using the container image pre-cache feature" section. - -[id="creating_backup_{context}"] -== Creating a backup - -For {sno}, {cgu-operator} can create a backup of a deployment before an update. If the update fails, you can recover the previous version and restore a cluster to a working state without requiring a reprovision of applications. To use the backup feature you first create a `ClusterGroupUpgrade` CR with the `backup` field set to `true`. To ensure that the contents of the backup are up to date, the backup is not taken until you set the `enable` field in the `ClusterGroupUpgrade` CR to `true`. - -{cgu-operator} uses the `BackupSucceeded` condition to report the status and reasons as follows: - -* `true` -+ -Backup is completed for all clusters or the backup run has completed but failed for one or more clusters. If backup fails for any cluster, the update fails for that cluster but proceeds for all other clusters. -* `false` -+ -Backup is still in progress for one or more clusters or has failed for all clusters. - -For more information, see the "Creating a backup of cluster resources before upgrade" section. - -[id="updating_clusters_{context}"] -== Updating clusters -{cgu-operator} enforces the policies following the remediation plan. -Enforcing the policies for subsequent batches starts immediately after all the clusters of the current batch are compliant with all the managed policies. If the batch times out, {cgu-operator} moves on to the next batch. The timeout value of a batch is the `spec.timeout` field divided by the number of batches in the remediation plan. - -{cgu-operator} uses the `Progressing` condition to report the status and reasons as follows: - -* `true` -+ -{cgu-operator} is remediating non-compliant policies. -* `false` -+ -The update is not in progress. Possible reasons for this are: -+ -** All clusters are compliant with all the managed policies. -** The update has timed out as policy remediation took too long. -** Blocking CRs are missing from the system or have not yet completed. -** The `ClusterGroupUpgrade` CR is not enabled. -** Backup is still in progress. - -[NOTE] -==== -The managed policies apply in the order that they are listed in the `managedPolicies` field in the `ClusterGroupUpgrade` CR. One managed policy is applied to the specified clusters at a time. When a cluster complies with the current policy, the next managed policy is applied to it. -==== - -.Sample `ClusterGroupUpgrade` CR in the `Progressing` state - -[source,yaml] ----- -apiVersion: ran.openshift.io/v1alpha1 -kind: ClusterGroupUpgrade -metadata: - creationTimestamp: '2022-11-18T16:27:15Z' - finalizers: - - ran.openshift.io/cleanup-finalizer - generation: 1 - name: talm-cgu - namespace: talm-namespace - resourceVersion: '40451823' - uid: cca245a5-4bca-45fa-89c0-aa6af81a596c -Spec: - actions: - afterCompletion: - deleteObjects: true - beforeEnable: {} - backup: false - clusters: - - spoke1 - enable: true - managedPolicies: - - talm-policy - preCaching: true - remediationStrategy: - canaries: - - spoke1 - maxConcurrency: 2 - timeout: 240 - clusterLabelSelectors: - - matchExpressions: - - key: label1 - operator: In - values: - - value1a - - value1b - batchTimeoutAction: -status: - clusters: - - name: spoke1 - state: complete - computedMaxConcurrency: 2 - conditions: - - lastTransitionTime: '2022-11-18T16:27:15Z' - message: All selected clusters are valid - reason: ClusterSelectionCompleted - status: 'True' - type: ClustersSelected - - lastTransitionTime: '2022-11-18T16:27:15Z' - message: Completed validation - reason: ValidationCompleted - status: 'True' - type: Validated - - lastTransitionTime: '2022-11-18T16:37:16Z' - message: Remediating non-compliant policies - reason: InProgress - status: 'True' - type: Progressing <1> - managedPoliciesForUpgrade: - - name: talm-policy - namespace: talm-namespace - managedPoliciesNs: - talm-policy: talm-namespace - remediationPlan: - - - spoke1 - - - spoke2 - - spoke3 - status: - currentBatch: 2 - currentBatchRemediationProgress: - spoke2: - state: Completed - spoke3: - policyIndex: 0 - state: InProgress - currentBatchStartedAt: '2022-11-18T16:27:16Z' - startedAt: '2022-11-18T16:27:15Z' ----- -<1> The `Progressing` fields show that {cgu-operator} is in the process of remediating policies. - -[id="update_status_{context}"] -== Update status - -{cgu-operator} uses the `Succeeded` condition to report the status and reasons as follows: - -* `true` -+ -All clusters are compliant with the specified managed policies. -* `false` -+ -Policy remediation failed as there were no clusters available for remediation, or because policy remediation took too long for one of the following reasons: -+ -** The current batch contains canary updates and the cluster in the batch does not comply with all the managed policies within the batch timeout. -** Clusters did not comply with the managed policies within the `timeout` value specified in the `remediationStrategy` field. - - -.Sample `ClusterGroupUpgrade` CR in the `Succeeded` state - -[source,yaml] ----- - apiVersion: ran.openshift.io/v1alpha1 - kind: ClusterGroupUpgrade - metadata: - name: cgu-upgrade-complete - namespace: default - spec: - clusters: - - spoke1 - - spoke4 - enable: true - managedPolicies: - - policy1-common-cluster-version-policy - - policy2-common-pao-sub-policy - remediationStrategy: - maxConcurrency: 1 - timeout: 240 - status: <3> - clusters: - - name: spoke1 - state: complete - - name: spoke4 - state: complete - conditions: - - message: All selected clusters are valid - reason: ClusterSelectionCompleted - status: "True" - type: ClustersSelected - - message: Completed validation - reason: ValidationCompleted - status: "True" - type: Validated - - message: All clusters are compliant with all the managed policies - reason: Completed - status: "False" - type: Progressing <1> - - message: All clusters are compliant with all the managed policies - reason: Completed - status: "True" - type: Succeeded <2> - managedPoliciesForUpgrade: - - name: policy1-common-cluster-version-policy - namespace: default - - name: policy2-common-pao-sub-policy - namespace: default - remediationPlan: - - - spoke1 - - - spoke4 - status: - completedAt: '2022-11-18T16:27:16Z' - startedAt: '2022-11-18T16:27:15Z' - ----- -<1> In the `Progressing` fields, the status is `false` as the update has completed; clusters are compliant with all the managed policies. -<2> The `Succeeded` fields show that the validations completed successfully. -<3> The `status` field includes a list of clusters and their respective statuses. The status of a cluster can be `complete` or `timedout`. - -.Sample `ClusterGroupUpgrade` CR in the `timedout` state - -[source,yaml] ----- -apiVersion: ran.openshift.io/v1alpha1 -kind: ClusterGroupUpgrade -metadata: - creationTimestamp: '2022-11-18T16:27:15Z' - finalizers: - - ran.openshift.io/cleanup-finalizer - generation: 1 - name: talm-cgu - namespace: talm-namespace - resourceVersion: '40451823' - uid: cca245a5-4bca-45fa-89c0-aa6af81a596c -spec: - actions: - afterCompletion: - deleteObjects: true - beforeEnable: {} - backup: false - clusters: - - spoke1 - - spoke2 - enable: true - managedPolicies: - - talm-policy - preCaching: false - remediationStrategy: - maxConcurrency: 2 - timeout: 240 -status: - clusters: - - name: spoke1 - state: complete - - currentPolicy: <1> - name: talm-policy - status: NonCompliant - name: spoke2 - state: timedout - computedMaxConcurrency: 2 - conditions: - - lastTransitionTime: '2022-11-18T16:27:15Z' - message: All selected clusters are valid - reason: ClusterSelectionCompleted - status: 'True' - type: ClustersSelected - - lastTransitionTime: '2022-11-18T16:27:15Z' - message: Completed validation - reason: ValidationCompleted - status: 'True' - type: Validated - - lastTransitionTime: '2022-11-18T16:37:16Z' - message: Policy remediation took too long - reason: TimedOut - status: 'False' - type: Progressing - - lastTransitionTime: '2022-11-18T16:37:16Z' - message: Policy remediation took too long - reason: TimedOut - status: 'False' - type: Succeeded <2> - managedPoliciesForUpgrade: - - name: talm-policy - namespace: talm-namespace - managedPoliciesNs: - talm-policy: talm-namespace - remediationPlan: - - - spoke1 - - spoke2 - status: - startedAt: '2022-11-18T16:27:15Z' - completedAt: '2022-11-18T20:27:15Z' ----- -<1> If a cluster’s state is `timedout`, the `currentPolicy` field shows the name of the policy and the policy status. -<2> The status for `succeeded` is `false` and the message indicates that policy remediation took too long. diff --git a/modules/cnf-topology-aware-lifecycle-manager-apply-policies.adoc b/modules/cnf-topology-aware-lifecycle-manager-apply-policies.adoc deleted file mode 100644 index 3e49e4543f40..000000000000 --- a/modules/cnf-topology-aware-lifecycle-manager-apply-policies.adoc +++ /dev/null @@ -1,381 +0,0 @@ -// Module included in the following assemblies: -// Epic CNF-2600 (CNF-2133) (4.10), Story TELCODOCS-285 -// * scalability_and_performance/cnf-talm-for-cluster-upgrades.adoc - -:_content-type: PROCEDURE -[id="talo-apply-policies_{context}"] -= Applying update policies to managed clusters - -You can update your managed clusters by applying your policies. - -.Prerequisites - -* Install the {cgu-operator-first}. -* Provision one or more managed clusters. -* Log in as a user with `cluster-admin` privileges. -* Create {rh-rhacm} policies in the hub cluster. - -.Procedure - -. Save the contents of the `ClusterGroupUpgrade` CR in the `cgu-1.yaml` file. -+ -[source,yaml] ----- -apiVersion: ran.openshift.io/v1alpha1 -kind: ClusterGroupUpgrade -metadata: - name: cgu-1 - namespace: default -spec: - managedPolicies: <1> - - policy1-common-cluster-version-policy - - policy2-common-nto-sub-policy - - policy3-common-ptp-sub-policy - - policy4-common-sriov-sub-policy - enable: false - clusters: <2> - - spoke1 - - spoke2 - - spoke5 - - spoke6 - remediationStrategy: - maxConcurrency: 2 <3> - timeout: 240 <4> - batchTimeoutAction: <5> ----- -<1> The name of the policies to apply. -<2> The list of clusters to update. -<3> The `maxConcurrency` field signifies the number of clusters updated at the same time. -<4> The update timeout in minutes. -<5> Controls what happens if a batch times out. Possible values are `abort` or `continue`. If unspecified, the default is `continue`. - -. Create the `ClusterGroupUpgrade` CR by running the following command: -+ -[source,terminal] ----- -$ oc create -f cgu-1.yaml ----- - -.. Check if the `ClusterGroupUpgrade` CR was created in the hub cluster by running the following command: -+ -[source,terminal] ----- -$ oc get cgu --all-namespaces ----- -+ -.Example output -+ -[source,terminal] ----- -NAMESPACE NAME AGE STATE DETAILS -default cgu-1 8m55 NotEnabled Not Enabled ----- - -.. Check the status of the update by running the following command: -+ -[source,terminal] ----- -$ oc get cgu -n default cgu-1 -ojsonpath='{.status}' | jq ----- -+ -.Example output -+ -[source,json] ----- -{ - "computedMaxConcurrency": 2, - "conditions": [ - { - "lastTransitionTime": "2022-02-25T15:34:07Z", - "message": "Not enabled", <1> - "reason": "NotEnabled", - "status": "False", - "type": "Progressing" - } - ], - "copiedPolicies": [ - "cgu-policy1-common-cluster-version-policy", - "cgu-policy2-common-nto-sub-policy", - "cgu-policy3-common-ptp-sub-policy", - "cgu-policy4-common-sriov-sub-policy" - ], - "managedPoliciesContent": { - "policy1-common-cluster-version-policy": "null", - "policy2-common-nto-sub-policy": "[{\"kind\":\"Subscription\",\"name\":\"node-tuning-operator\",\"namespace\":\"openshift-cluster-node-tuning-operator\"}]", - "policy3-common-ptp-sub-policy": "[{\"kind\":\"Subscription\",\"name\":\"ptp-operator-subscription\",\"namespace\":\"openshift-ptp\"}]", - "policy4-common-sriov-sub-policy": "[{\"kind\":\"Subscription\",\"name\":\"sriov-network-operator-subscription\",\"namespace\":\"openshift-sriov-network-operator\"}]" - }, - "managedPoliciesForUpgrade": [ - { - "name": "policy1-common-cluster-version-policy", - "namespace": "default" - }, - { - "name": "policy2-common-nto-sub-policy", - "namespace": "default" - }, - { - "name": "policy3-common-ptp-sub-policy", - "namespace": "default" - }, - { - "name": "policy4-common-sriov-sub-policy", - "namespace": "default" - } - ], - "managedPoliciesNs": { - "policy1-common-cluster-version-policy": "default", - "policy2-common-nto-sub-policy": "default", - "policy3-common-ptp-sub-policy": "default", - "policy4-common-sriov-sub-policy": "default" - }, - "placementBindings": [ - "cgu-policy1-common-cluster-version-policy", - "cgu-policy2-common-nto-sub-policy", - "cgu-policy3-common-ptp-sub-policy", - "cgu-policy4-common-sriov-sub-policy" - ], - "placementRules": [ - "cgu-policy1-common-cluster-version-policy", - "cgu-policy2-common-nto-sub-policy", - "cgu-policy3-common-ptp-sub-policy", - "cgu-policy4-common-sriov-sub-policy" - ], - "precaching": { - "spec": {} - }, - "remediationPlan": [ - [ - "spoke1", - "spoke2" - ], - [ - "spoke5", - "spoke6" - ] - ], - "status": {} -} ----- -<1> The `spec.enable` field in the `ClusterGroupUpgrade` CR is set to `false`. - -.. Check the status of the policies by running the following command: -+ -[source,terminal] ----- -$ oc get policies -A ----- -+ -.Example output -[source,terminal] ----- -NAMESPACE NAME REMEDIATION ACTION COMPLIANCE STATE AGE -default cgu-policy1-common-cluster-version-policy enforce 17m <1> -default cgu-policy2-common-nto-sub-policy enforce 17m -default cgu-policy3-common-ptp-sub-policy enforce 17m -default cgu-policy4-common-sriov-sub-policy enforce 17m -default policy1-common-cluster-version-policy inform NonCompliant 15h -default policy2-common-nto-sub-policy inform NonCompliant 15h -default policy3-common-ptp-sub-policy inform NonCompliant 18m -default policy4-common-sriov-sub-policy inform NonCompliant 18m ----- -<1> The `spec.remediationAction` field of policies currently applied on the clusters is set to `enforce`. The managed policies in `inform` mode from the `ClusterGroupUpgrade` CR remain in `inform` mode during the update. - -. Change the value of the `spec.enable` field to `true` by running the following command: -+ -[source,terminal] ----- -$ oc --namespace=default patch clustergroupupgrade.ran.openshift.io/cgu-1 \ ---patch '{"spec":{"enable":true}}' --type=merge ----- - -.Verification - -. Check the status of the update again by running the following command: -+ -[source,terminal] ----- -$ oc get cgu -n default cgu-1 -ojsonpath='{.status}' | jq ----- -+ -.Example output -+ -[source,json] ----- -{ - "computedMaxConcurrency": 2, - "conditions": [ <1> - { - "lastTransitionTime": "2022-02-25T15:33:07Z", - "message": "All selected clusters are valid", - "reason": "ClusterSelectionCompleted", - "status": "True", - "type": "ClustersSelected", - "lastTransitionTime": "2022-02-25T15:33:07Z", - "message": "Completed validation", - "reason": "ValidationCompleted", - "status": "True", - "type": "Validated", - "lastTransitionTime": "2022-02-25T15:34:07Z", - "message": "Remediating non-compliant policies", - "reason": "InProgress", - "status": "True", - "type": "Progressing" - } - ], - "copiedPolicies": [ - "cgu-policy1-common-cluster-version-policy", - "cgu-policy2-common-nto-sub-policy", - "cgu-policy3-common-ptp-sub-policy", - "cgu-policy4-common-sriov-sub-policy" - ], - "managedPoliciesContent": { - "policy1-common-cluster-version-policy": "null", - "policy2-common-nto-sub-policy": "[{\"kind\":\"Subscription\",\"name\":\"node-tuning-operator\",\"namespace\":\"openshift-cluster-node-tuning-operator\"}]", - "policy3-common-ptp-sub-policy": "[{\"kind\":\"Subscription\",\"name\":\"ptp-operator-subscription\",\"namespace\":\"openshift-ptp\"}]", - "policy4-common-sriov-sub-policy": "[{\"kind\":\"Subscription\",\"name\":\"sriov-network-operator-subscription\",\"namespace\":\"openshift-sriov-network-operator\"}]" - }, - "managedPoliciesForUpgrade": [ - { - "name": "policy1-common-cluster-version-policy", - "namespace": "default" - }, - { - "name": "policy2-common-nto-sub-policy", - "namespace": "default" - }, - { - "name": "policy3-common-ptp-sub-policy", - "namespace": "default" - }, - { - "name": "policy4-common-sriov-sub-policy", - "namespace": "default" - } - ], - "managedPoliciesNs": { - "policy1-common-cluster-version-policy": "default", - "policy2-common-nto-sub-policy": "default", - "policy3-common-ptp-sub-policy": "default", - "policy4-common-sriov-sub-policy": "default" - }, - "placementBindings": [ - "cgu-policy1-common-cluster-version-policy", - "cgu-policy2-common-nto-sub-policy", - "cgu-policy3-common-ptp-sub-policy", - "cgu-policy4-common-sriov-sub-policy" - ], - "placementRules": [ - "cgu-policy1-common-cluster-version-policy", - "cgu-policy2-common-nto-sub-policy", - "cgu-policy3-common-ptp-sub-policy", - "cgu-policy4-common-sriov-sub-policy" - ], - "precaching": { - "spec": {} - }, - "remediationPlan": [ - [ - "spoke1", - "spoke2" - ], - [ - "spoke5", - "spoke6" - ] - ], - "status": { - "currentBatch": 1, - "currentBatchStartedAt": "2022-02-25T15:54:16Z", - "remediationPlanForBatch": { - "spoke1": 0, - "spoke2": 1 - }, - "startedAt": "2022-02-25T15:54:16Z" - } -} ----- -<1> Reflects the update progress of the current batch. Run this command again to receive updated information about the progress. - -. If the policies include Operator subscriptions, you can check the installation progress directly on the single-node cluster. - -.. Export the `KUBECONFIG` file of the single-node cluster you want to check the installation progress for by running the following command: -+ -[source,terminal] ----- -$ export KUBECONFIG= ----- - -.. Check all the subscriptions present on the single-node cluster and look for the one in the policy you are trying to install through the `ClusterGroupUpgrade` CR by running the following command: -+ -[source,terminal] ----- -$ oc get subs -A | grep -i ----- -+ -.Example output for `cluster-logging` policy -+ -[source,terminal] ----- -NAMESPACE NAME PACKAGE SOURCE CHANNEL -openshift-logging cluster-logging cluster-logging redhat-operators stable ----- - -. If one of the managed policies includes a `ClusterVersion` CR, check the status of platform updates in the current batch by running the following command against the spoke cluster: -+ -[source,terminal] ----- -$ oc get clusterversion ----- -+ -.Example output -+ -[source,terminal] ----- -NAME VERSION AVAILABLE PROGRESSING SINCE STATUS -version 4.9.5 True True 43s Working towards 4.9.7: 71 of 735 done (9% complete) ----- - -. Check the Operator subscription by running the following command: -+ -[source,terminal] ----- -$ oc get subs -n -ojsonpath="{.status}" ----- - -. Check the install plans present on the single-node cluster that is associated with the desired subscription by running the following command: -+ -[source,terminal] ----- -$ oc get installplan -n ----- -+ -.Example output for `cluster-logging` Operator -+ -[source,terminal] ----- -NAMESPACE NAME CSV APPROVAL APPROVED -openshift-logging install-6khtw cluster-logging.5.3.3-4 Manual true <1> ----- -<1> The install plans have their `Approval` field set to `Manual` and their `Approved` field changes from `false` to `true` after {cgu-operator} approves the install plan. -+ -[NOTE] -==== -When {cgu-operator} is remediating a policy containing a subscription, it automatically approves any install plans attached to that subscription. -Where multiple install plans are needed to get the operator to the latest known version, {cgu-operator} might approve multiple install plans, upgrading through one or more intermediate versions to get to the final version. -==== - -. Check if the cluster service version for the Operator of the policy that the `ClusterGroupUpgrade` is installing reached the `Succeeded` phase by running the following command: -+ -[source,terminal] ----- -$ oc get csv -n ----- -+ -.Example output for OpenShift Logging Operator -+ -[source,terminal] ----- -NAME DISPLAY VERSION REPLACES PHASE -cluster-logging.5.4.2 Red Hat OpenShift Logging 5.4.2 Succeeded ----- diff --git a/modules/cnf-topology-aware-lifecycle-manager-autocreate-cgu-cr-ztp.adoc b/modules/cnf-topology-aware-lifecycle-manager-autocreate-cgu-cr-ztp.adoc deleted file mode 100644 index 9507019846cc..000000000000 --- a/modules/cnf-topology-aware-lifecycle-manager-autocreate-cgu-cr-ztp.adoc +++ /dev/null @@ -1,60 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/ztp_far_edge/ztp-talm-updating-managed-policies.adoc - -:_content-type: PROCEDURE -[id="talo-precache-autocreated-cgu-for-ztp_{context}"] -= About the auto-created ClusterGroupUpgrade CR for {ztp} - -{cgu-operator} has a controller called `ManagedClusterForCGU` that monitors the `Ready` state of the `ManagedCluster` CRs on the hub cluster and creates the `ClusterGroupUpgrade` CRs for {ztp-first}. - -For any managed cluster in the `Ready` state without a `ztp-done` label applied, the `ManagedClusterForCGU` controller automatically creates a `ClusterGroupUpgrade` CR in the `ztp-install` namespace with its associated {rh-rhacm} policies that are created during the {ztp} process. {cgu-operator} then remediates the set of configuration policies that are listed in the auto-created `ClusterGroupUpgrade` CR to push the configuration CRs to the managed cluster. - -If there are no policies for the managed cluster at the time when the cluster becomes `Ready`, a `ClusterGroupUpgrade` CR with no policies is created. Upon completion of the `ClusterGroupUpgrade` the managed cluster is labeled as `ztp-done`. If there are policies that you want to apply for that managed cluster, manually create a `ClusterGroupUpgrade` as a day-2 operation. - -.Example of an auto-created `ClusterGroupUpgrade` CR for {ztp} - -[source,yaml] ----- -apiVersion: ran.openshift.io/v1alpha1 -kind: ClusterGroupUpgrade -metadata: - generation: 1 - name: spoke1 - namespace: ztp-install - ownerReferences: - - apiVersion: cluster.open-cluster-management.io/v1 - blockOwnerDeletion: true - controller: true - kind: ManagedCluster - name: spoke1 - uid: 98fdb9b2-51ee-4ee7-8f57-a84f7f35b9d5 - resourceVersion: "46666836" - uid: b8be9cd2-764f-4a62-87d6-6b767852c7da -spec: - actions: - afterCompletion: - addClusterLabels: - ztp-done: "" <1> - deleteClusterLabels: - ztp-running: "" - deleteObjects: true - beforeEnable: - addClusterLabels: - ztp-running: "" <2> - clusters: - - spoke1 - enable: true - managedPolicies: - - common-spoke1-config-policy - - common-spoke1-subscriptions-policy - - group-spoke1-config-policy - - spoke1-config-policy - - group-spoke1-validator-du-policy - preCaching: false - remediationStrategy: - maxConcurrency: 1 - timeout: 240 ----- -<1> Applied to the managed cluster when {cgu-operator} completes the cluster configuration. -<2> Applied to the managed cluster when {cgu-operator} starts deploying the configuration policies. diff --git a/modules/cnf-topology-aware-lifecycle-manager-backup-concept.adoc b/modules/cnf-topology-aware-lifecycle-manager-backup-concept.adoc deleted file mode 100644 index d0abba993491..000000000000 --- a/modules/cnf-topology-aware-lifecycle-manager-backup-concept.adoc +++ /dev/null @@ -1,44 +0,0 @@ -// Module included in the following assemblies: -// Epic CNF-3901 (CNF-2133) (4.11), Story TELCODOCS-339 -// * scalability_and_performance/cnf-talm-for-cluster-upgrades.adoc - -:_content-type: CONCEPT -[id="talo-backup-feature-concept_{context}"] -= Creating a backup of cluster resources before upgrade - -For {sno}, the {cgu-operator-first} can create a backup of a deployment before an upgrade. If the upgrade fails, you can recover the previous version and restore a cluster to a working state without requiring a reprovision of applications. - -To use the backup feature you first create a `ClusterGroupUpgrade` CR with the `backup` field set to `true`. To ensure that the contents of the backup are up to date, the backup is not taken until you set the `enable` field in the `ClusterGroupUpgrade` CR to `true`. - -{cgu-operator} uses the `BackupSucceeded` condition to report the status and reasons as follows: - -* `true` -+ -Backup is completed for all clusters or the backup run has completed but failed for one or more clusters. If backup fails for any cluster, the update does not proceed for that cluster. -* `false` -+ -Backup is still in progress for one or more clusters or has failed for all clusters. The backup process running in the spoke clusters can have the following statuses: -+ -** `PreparingToStart` -+ -The first reconciliation pass is in progress. The {cgu-operator} deletes any spoke backup namespace and hub view resources that have been created in a failed upgrade attempt. -** `Starting` -+ -The backup prerequisites and backup job are being created. -** `Active` -+ -The backup is in progress. -** `Succeeded` -+ -The backup succeeded. -** `BackupTimeout` -+ -Artifact backup is partially done. -** `UnrecoverableError` -+ -The backup has ended with a non-zero exit code. - -[NOTE] -==== -If the backup of a cluster fails and enters the `BackupTimeout` or `UnrecoverableError` state, the cluster update does not proceed for that cluster. Updates to other clusters are not affected and continue. -==== diff --git a/modules/cnf-topology-aware-lifecycle-manager-backup-feature.adoc b/modules/cnf-topology-aware-lifecycle-manager-backup-feature.adoc deleted file mode 100644 index f1f6baae6f82..000000000000 --- a/modules/cnf-topology-aware-lifecycle-manager-backup-feature.adoc +++ /dev/null @@ -1,125 +0,0 @@ -// Module included in the following assemblies: -// Epic CNF-3901 (CNF-2133) (4.11), Story TELCODOCS-339 -// * scalability_and_performance/cnf-talm-for-cluster-upgrades.adoc - -:_content-type: PROCEDURE -[id="talo-backup-start_and_update_{context}"] -= Creating a ClusterGroupUpgrade CR with backup - -You can create a backup of a deployment before an upgrade on {sno} clusters. If the upgrade fails you can use the `upgrade-recovery.sh` script generated by {cgu-operator-first} to return the system to its preupgrade state. -The backup consists of the following items: - -Cluster backup:: A snapshot of `etcd` and static pod manifests. -Content backup:: Backups of folders, for example, `/etc`, `/usr/local`, `/var/lib/kubelet`. -Changed files backup:: Any files managed by `machine-config` that have been changed. -Deployment:: A pinned `ostree` deployment. -Images (Optional):: Any container images that are in use. - - -.Prerequisites - -* Install the {cgu-operator-first}. -* Provision one or more managed clusters. -* Log in as a user with `cluster-admin` privileges. -* Install {rh-rhacm-first}. - -[NOTE] -==== -It is highly recommended that you create a recovery partition. -The following is an example `SiteConfig` custom resource (CR) for a recovery partition of 50 GB: - -[source,yaml] ----- -nodes: - - hostName: "snonode.sno-worker-0.e2e.bos.redhat.com" - role: "master" - rootDeviceHints: - hctl: "0:2:0:0" - deviceName: /dev/sda -........ -........ - #Disk /dev/sda: 893.3 GiB, 959119884288 bytes, 1873281024 sectors - diskPartition: - - device: /dev/sda - partitions: - - mount_point: /var/recovery - size: 51200 - start: 800000 ----- -==== - -.Procedure - -. Save the contents of the `ClusterGroupUpgrade` CR with the `backup` and `enable` fields set to `true` in the `clustergroupupgrades-group-du.yaml` file: -+ -[source,yaml] ----- -apiVersion: ran.openshift.io/v1alpha1 -kind: ClusterGroupUpgrade -metadata: - name: du-upgrade-4918 - namespace: ztp-group-du-sno -spec: - preCaching: true - backup: true - clusters: - - cnfdb1 - - cnfdb2 - enable: true - managedPolicies: - - du-upgrade-platform-upgrade - remediationStrategy: - maxConcurrency: 2 - timeout: 240 ----- - -. To start the update, apply the `ClusterGroupUpgrade` CR by running the following command: -+ -[source,terminal] ----- -$ oc apply -f clustergroupupgrades-group-du.yaml ----- - -.Verification - -* Check the status of the upgrade in the hub cluster by running the following command: -+ -[source,terminal] ----- -$ oc get cgu -n ztp-group-du-sno du-upgrade-4918 -o jsonpath='{.status}' ----- -+ -.Example output -+ -[source,json] ----- -{ - "backup": { - "clusters": [ - "cnfdb2", - "cnfdb1" - ], - "status": { - "cnfdb1": "Succeeded", - "cnfdb2": "Failed" <1> - } -}, -"computedMaxConcurrency": 1, -"conditions": [ - { - "lastTransitionTime": "2022-04-05T10:37:19Z", - "message": "Backup failed for 1 cluster", <2> - "reason": "PartiallyDone", <3> - "status": "True", <4> - "type": "Succeeded" - } -], -"precaching": { - "spec": {} -}, -"status": {} ----- -<1> Backup has failed for one cluster. -<2> The message confirms that the backup failed for one cluster. -<3> The backup was partially successful. -<4> The backup process has finished. diff --git a/modules/cnf-topology-aware-lifecycle-manager-backup-recovery.adoc b/modules/cnf-topology-aware-lifecycle-manager-backup-recovery.adoc deleted file mode 100644 index 0c5ee05c1881..000000000000 --- a/modules/cnf-topology-aware-lifecycle-manager-backup-recovery.adoc +++ /dev/null @@ -1,135 +0,0 @@ -// Module included in the following assemblies: -// Epic CNF-3901 (CNF-2133) (4.11), Story TELCODOCS-339 -// * scalability_and_performance/cnf-talm-for-cluster-upgrades.adoc - -:_content-type: PROCEDURE -[id="talo-backup-recovery_{context}"] -= Recovering a cluster after a failed upgrade - -If an upgrade of a cluster fails, you can manually log in to the cluster and use the backup to return the cluster to its preupgrade state. There are two stages: - -Rollback:: If the attempted upgrade included a change to the platform OS deployment, you must roll back to the previous version before running the recovery script. - -[IMPORTANT] -==== -A rollback is only applicable to upgrades from TALM and single-node OpenShift. This process does not apply to rollbacks from any other upgrade type. -==== - -Recovery:: The recovery shuts down containers and uses files from the backup partition to relaunch containers and restore clusters. - -.Prerequisites - -* Install the {cgu-operator-first}. -* Provision one or more managed clusters. -* Install {rh-rhacm-first}. -* Log in as a user with `cluster-admin` privileges. -* Run an upgrade that is configured for backup. - -.Procedure - -. Delete the previously created `ClusterGroupUpgrade` custom resource (CR) by running the following command: -+ -[source,terminal] ----- -$ oc delete cgu/du-upgrade-4918 -n ztp-group-du-sno ----- - -. Log in to the cluster that you want to recover. - -. Check the status of the platform OS deployment by running the following command: -+ -[source,terminal] ----- -$ ostree admin status ----- -.Example outputs -+ -[source,terminal] ----- -[root@lab-test-spoke2-node-0 core]# ostree admin status -* rhcos c038a8f08458bbed83a77ece033ad3c55597e3f64edad66ea12fda18cbdceaf9.0 - Version: 49.84.202202230006-0 - Pinned: yes <1> - origin refspec: c038a8f08458bbed83a77ece033ad3c55597e3f64edad66ea12fda18cbdceaf9 ----- -<1> The current deployment is pinned. A platform OS deployment rollback is not necessary. -+ -[source,terminal] ----- -[root@lab-test-spoke2-node-0 core]# ostree admin status -* rhcos f750ff26f2d5550930ccbe17af61af47daafc8018cd9944f2a3a6269af26b0fa.0 - Version: 410.84.202204050541-0 - origin refspec: f750ff26f2d5550930ccbe17af61af47daafc8018cd9944f2a3a6269af26b0fa -rhcos ad8f159f9dc4ea7e773fd9604c9a16be0fe9b266ae800ac8470f63abc39b52ca.0 (rollback) <1> - Version: 410.84.202203290245-0 - Pinned: yes <2> - origin refspec: ad8f159f9dc4ea7e773fd9604c9a16be0fe9b266ae800ac8470f63abc39b52ca ----- -<1> This platform OS deployment is marked for rollback. -<2> The previous deployment is pinned and can be rolled back. - -. To trigger a rollback of the platform OS deployment, run the following command: -+ -[source,terminal] ----- -$ rpm-ostree rollback -r ----- - -. The first phase of the recovery shuts down containers and restores files from the backup partition to the targeted directories. To begin the recovery, run the following command: -+ -[source,terminal] ----- -$ /var/recovery/upgrade-recovery.sh ----- -+ - -. When prompted, reboot the cluster by running the following command: -+ -[source,terminal] ----- -$ systemctl reboot ----- -. After the reboot, restart the recovery by running the following command: -+ -[source,terminal] ----- -$ /var/recovery/upgrade-recovery.sh --resume ----- - -[NOTE] -==== -If the recovery utility fails, you can retry with the `--restart` option: -[source,terminal] ----- -$ /var/recovery/upgrade-recovery.sh --restart ----- -==== - -.Verification -* To check the status of the recovery run the following command: -+ -[source,terminal] ----- -$ oc get clusterversion,nodes,clusteroperator ----- -+ -.Example output -[source,terminal] ----- -NAME VERSION AVAILABLE PROGRESSING SINCE STATUS -clusterversion.config.openshift.io/version 4.9.23 True False 86d Cluster version is 4.9.23 <1> - - -NAME STATUS ROLES AGE VERSION -node/lab-test-spoke1-node-0 Ready master,worker 86d v1.22.3+b93fd35 <2> - -NAME VERSION AVAILABLE PROGRESSING DEGRADED SINCE MESSAGE -clusteroperator.config.openshift.io/authentication 4.9.23 True False False 2d7h <3> -clusteroperator.config.openshift.io/baremetal 4.9.23 True False False 86d - - -.............. ----- -<1> The cluster version is available and has the correct version. -<2> The node status is `Ready`. -<3> The `ClusterOperator` object's availability is `True`. diff --git a/modules/cnf-topology-aware-lifecycle-manager-installation-cli.adoc b/modules/cnf-topology-aware-lifecycle-manager-installation-cli.adoc deleted file mode 100644 index 8980d8a04b27..000000000000 --- a/modules/cnf-topology-aware-lifecycle-manager-installation-cli.adoc +++ /dev/null @@ -1,72 +0,0 @@ -// Module included in the following assemblies: -// Epic CNF-2600 (CNF-2133) (4.10), Story TELCODOCS-285 -// * scalability_and_performance/cnf-talm-for-cluster-upgrades.adoc - -:_content-type: PROCEDURE -[id="installing-topology-aware-lifecycle-manager-using-cli_{context}"] -= Installing the {cgu-operator-full} by using the CLI - -You can use the OpenShift CLI (`oc`) to install the {cgu-operator-first}. - -.Prerequisites - -* Install the OpenShift CLI (`oc`). -* Install the latest version of the {rh-rhacm} Operator. -* Set up a hub cluster with disconnected registry. -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -. Create a `Subscription` CR: -.. Define the `Subscription` CR and save the YAML file, for example, `talm-subscription.yaml`: -+ -[source,yaml] ----- -apiVersion: operators.coreos.com/v1alpha1 -kind: Subscription -metadata: - name: openshift-topology-aware-lifecycle-manager-subscription - namespace: openshift-operators -spec: - channel: "stable" - name: topology-aware-lifecycle-manager - source: redhat-operators - sourceNamespace: openshift-marketplace ----- - -.. Create the `Subscription` CR by running the following command: -+ -[source,terminal] ----- -$ oc create -f talm-subscription.yaml ----- - -.Verification - -. Verify that the installation succeeded by inspecting the CSV resource: -+ -[source,terminal] ----- -$ oc get csv -n openshift-operators ----- -+ -.Example output -[source,terminal,subs="attributes+"] ----- -NAME DISPLAY VERSION REPLACES PHASE -topology-aware-lifecycle-manager.{product-version}.x Topology Aware Lifecycle Manager {product-version}.x Succeeded ----- - -. Verify that the {cgu-operator} is up and running: -+ -[source,terminal] ----- -$ oc get deploy -n openshift-operators ----- -+ -.Example output -[source,terminal] ----- -NAMESPACE NAME READY UP-TO-DATE AVAILABLE AGE -openshift-operators cluster-group-upgrades-controller-manager 1/1 1 1 14s ----- diff --git a/modules/cnf-topology-aware-lifecycle-manager-installation-web-console.adoc b/modules/cnf-topology-aware-lifecycle-manager-installation-web-console.adoc deleted file mode 100644 index 88ea0d4d24e3..000000000000 --- a/modules/cnf-topology-aware-lifecycle-manager-installation-web-console.adoc +++ /dev/null @@ -1,36 +0,0 @@ -// Module included in the following assemblies: -// Epic CNF-2600 (CNF-2133) (4.10), Story TELCODOCS-285 -// * scalability_and_performance/cnf-talm-for-cluster-upgrades.adoc - -:_content-type: PROCEDURE -[id="installing-topology-aware-lifecycle-manager-using-web-console_{context}"] -= Installing the {cgu-operator-full} by using the web console - -You can use the {product-title} web console to install the {cgu-operator-full}. - -.Prerequisites - -// Based on polarion test cases - -* Install the latest version of the {rh-rhacm} Operator. -* Set up a hub cluster with disconnected regitry. -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -. In the {product-title} web console, navigate to *Operators* -> *OperatorHub*. -. Search for the *{cgu-operator-full}* from the list of available Operators, and then click *Install*. -. Keep the default selection of *Installation mode* ["All namespaces on the cluster (default)"] and *Installed Namespace* ("openshift-operators") to ensure that the Operator is installed properly. -. Click *Install*. - -.Verification - -To confirm that the installation is successful: - -. Navigate to the *Operators* -> *Installed Operators* page. -. Check that the Operator is installed in the `All Namespaces` namespace and its status is `Succeeded`. - -If the Operator is not installed successfully: - -. Navigate to the *Operators* -> *Installed Operators* page and inspect the `Status` column for any errors or failures. -. Navigate to the *Workloads* -> *Pods* page and check the logs in any containers in the `cluster-group-upgrades-controller-manager` pod that are reporting issues. diff --git a/modules/cnf-topology-aware-lifecycle-manager-operator-and-platform-update.adoc b/modules/cnf-topology-aware-lifecycle-manager-operator-and-platform-update.adoc deleted file mode 100644 index c3bd2729b31b..000000000000 --- a/modules/cnf-topology-aware-lifecycle-manager-operator-and-platform-update.adoc +++ /dev/null @@ -1,136 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/ztp_far_edge/ztp-talm-updating-managed-policies.adoc - -:_content-type: PROCEDURE -[id="talo-operator-and-platform-update_{context}"] -= Performing a platform and an Operator update together - -You can perform a platform and an Operator update at the same time. - -.Prerequisites - -* Install the {cgu-operator-first}. -* Update {ztp-first} to the latest version. -* Provision one or more managed clusters with {ztp}. -* Log in as a user with `cluster-admin` privileges. -* Create {rh-rhacm} policies in the hub cluster. - -.Procedure - -. Create the `PolicyGenTemplate` CR for the updates by following the steps described in the "Performing a platform update" and "Performing an Operator update" sections. - -. Apply the prep work for the platform and the Operator update. - -.. Save the content of the `ClusterGroupUpgrade` CR with the policies for platform update preparation work, catalog source updates, and target clusters to the `cgu-platform-operator-upgrade-prep.yml` file, for example: -+ -[source,yaml] ----- -apiVersion: ran.openshift.io/v1alpha1 -kind: ClusterGroupUpgrade -metadata: - name: cgu-platform-operator-upgrade-prep - namespace: default -spec: - managedPolicies: - - du-upgrade-platform-upgrade-prep - - du-upgrade-operator-catsrc-policy - clusterSelector: - - group-du-sno - remediationStrategy: - maxConcurrency: 10 - enable: true ----- - -.. Apply the `cgu-platform-operator-upgrade-prep.yml` file to the hub cluster by running the following command: -+ -[source,terminal] ----- -$ oc apply -f cgu-platform-operator-upgrade-prep.yml ----- - -.. Monitor the process. Upon completion, ensure that the policy is compliant by running the following command: -+ -[source,terminal] ----- -$ oc get policies --all-namespaces ----- - -. Create the `ClusterGroupUpdate` CR for the platform and the Operator update with the `spec.enable` field set to `false`. -.. Save the contents of the platform and Operator update `ClusterGroupUpdate` CR with the policies and the target clusters to the `cgu-platform-operator-upgrade.yml` file, as shown in the following example: -+ -[source,yaml] ----- -apiVersion: ran.openshift.io/v1alpha1 -kind: ClusterGroupUpgrade -metadata: - name: cgu-du-upgrade - namespace: default -spec: - managedPolicies: - - du-upgrade-platform-upgrade <1> - - du-upgrade-operator-catsrc-policy <2> - - common-subscriptions-policy <3> - preCaching: true - clusterSelector: - - group-du-sno - remediationStrategy: - maxConcurrency: 1 - enable: false ----- -<1> This is the platform update policy. -<2> This is the policy containing the catalog source information for the Operators to be updated. It is needed for the pre-caching feature to determine which Operator images to download to the managed cluster. -<3> This is the policy to update the Operators. - -.. Apply the `cgu-platform-operator-upgrade.yml` file to the hub cluster by running the following command: -+ -[source,terminal] ----- -$ oc apply -f cgu-platform-operator-upgrade.yml ----- - -. Optional: Pre-cache the images for the platform and the Operator update. -.. Enable pre-caching in the `ClusterGroupUpgrade` CR by running the following command: -+ -[source,terminal] ----- -$ oc --namespace=default patch clustergroupupgrade.ran.openshift.io/cgu-du-upgrade \ ---patch '{"spec":{"preCaching": true}}' --type=merge ----- - -.. Monitor the update process and wait for the pre-caching to complete. Check the status of pre-caching by running the following command on the managed cluster: -+ -[source,terminal] ----- -$ oc get jobs,pods -n openshift-talm-pre-cache ----- - -.. Check if the pre-caching is completed before starting the update by running the following command: -+ -[source,terminal] ----- -$ oc get cgu cgu-du-upgrade -ojsonpath='{.status.conditions}' ----- - -. Start the platform and Operator update. -.. Enable the `cgu-du-upgrade` `ClusterGroupUpgrade` CR to start the platform and the Operator update by running the following command: -+ -[source,terminal] ----- -$ oc --namespace=default patch clustergroupupgrade.ran.openshift.io/cgu-du-upgrade \ ---patch '{"spec":{"enable":true, "preCaching": false}}' --type=merge ----- - -.. Monitor the process. Upon completion, ensure that the policy is compliant by running the following command: -+ -[source,terminal] ----- -$ oc get policies --all-namespaces ----- -+ -[NOTE] -==== -The CRs for the platform and Operator updates can be created from the beginning by configuring the setting to `spec.enable: true`. In this case, the update starts immediately after pre-caching completes and there is no need to manually enable the CR. - -Both pre-caching and the update create extra resources, such as policies, placement bindings, placement rules, managed cluster actions, and managed cluster view, to help complete the procedures. Setting the `afterCompletion.deleteObjects` field to `true` deletes all these resources after the updates complete. -==== diff --git a/modules/cnf-topology-aware-lifecycle-manager-operator-update.adoc b/modules/cnf-topology-aware-lifecycle-manager-operator-update.adoc deleted file mode 100644 index 8cf75b7e0ba8..000000000000 --- a/modules/cnf-topology-aware-lifecycle-manager-operator-update.adoc +++ /dev/null @@ -1,263 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/ztp_far_edge/ztp-talm-updating-managed-policies.adoc - -:_content-type: PROCEDURE -[id="talo-operator-update_{context}"] -= Performing an Operator update - -You can perform an Operator update with the {cgu-operator}. - -.Prerequisites - -* Install the {cgu-operator-first}. -* Update {ztp-first} to the latest version. -* Provision one or more managed clusters with {ztp}. -* Mirror the desired index image, bundle images, and all Operator images referenced in the bundle images. -* Log in as a user with `cluster-admin` privileges. -* Create {rh-rhacm} policies in the hub cluster. - -.Procedure - -. Update the `PolicyGenTemplate` CR for the Operator update. -.. Update the `du-upgrade` `PolicyGenTemplate` CR with the following additional contents in the `du-upgrade.yaml` file: -+ -[source,yaml,subs="attributes+"] ----- -apiVersion: ran.openshift.io/v1 -kind: PolicyGenTemplate -metadata: - name: "du-upgrade" - namespace: "ztp-group-du-sno" -spec: - bindingRules: - group-du-sno: "" - mcp: "master" - remediationAction: inform - sourceFiles: - - fileName: DefaultCatsrc.yaml - remediationAction: inform - policyName: "operator-catsrc-policy" - metadata: - name: redhat-operators - spec: - displayName: Red Hat Operators Catalog - image: registry.example.com:5000/olm/redhat-operators:v{product-version} <1> - updateStrategy: <2> - registryPoll: - interval: 1h ----- -<1> The index image URL contains the desired Operator images. If the index images are always pushed to the same image name and tag, this change is not needed. -<2> Set how frequently the Operator Lifecycle Manager (OLM) polls the index image for new Operator versions with the `registryPoll.interval` field. This change is not needed if a new index image tag is always pushed for y-stream and z-stream Operator updates. The `registryPoll.interval` field can be set to a shorter interval to expedite the update, however shorter intervals increase computational load. To counteract this, you can restore `registryPoll.interval` to the default value once the update is complete. - - -.. This update generates one policy, `du-upgrade-operator-catsrc-policy`, to update the `redhat-operators` catalog source with the new index images that contain the desired Operators images. -+ -[NOTE] -==== -If you want to use the image pre-caching for Operators and there are Operators from a different catalog source other than `redhat-operators`, you must perform the following tasks: - -* Prepare a separate catalog source policy with the new index image or registry poll interval update for the different catalog source. -* Prepare a separate subscription policy for the desired Operators that are from the different catalog source. -==== -+ -For example, the desired SRIOV-FEC Operator is available in the `certified-operators` catalog source. To update the catalog source and the Operator subscription, add the following contents to generate two policies, `du-upgrade-fec-catsrc-policy` and `du-upgrade-subscriptions-fec-policy`: -+ -[source,yaml] ----- -apiVersion: ran.openshift.io/v1 -kind: PolicyGenTemplate -metadata: - name: "du-upgrade" - namespace: "ztp-group-du-sno" -spec: - bindingRules: - group-du-sno: "" - mcp: "master" - remediationAction: inform - sourceFiles: - … - - fileName: DefaultCatsrc.yaml - remediationAction: inform - policyName: "fec-catsrc-policy" - metadata: - name: certified-operators - spec: - displayName: Intel SRIOV-FEC Operator - image: registry.example.com:5000/olm/far-edge-sriov-fec:v4.10 - updateStrategy: - registryPoll: - interval: 10m - - fileName: AcceleratorsSubscription.yaml - policyName: "subscriptions-fec-policy" - spec: - channel: "stable" - source: certified-operators ----- - -.. Remove the specified subscriptions channels in the common `PolicyGenTemplate` CR, if they exist. The default subscriptions channels from the {ztp} image are used for the update. -+ -[NOTE] -==== -The default channel for the Operators applied through {ztp} {product-version} is `stable`, except for the `performance-addon-operator`. As of {product-title} 4.11, the `performance-addon-operator` functionality was moved to the `node-tuning-operator`. For the 4.10 release, the default channel for PAO is `v4.10`. You can also specify the default channels in the common `PolicyGenTemplate` CR. -==== - -.. Push the `PolicyGenTemplate` CRs updates to the {ztp} Git repository. -+ -ArgoCD pulls the changes from the Git repository and generates the policies on the hub cluster. - -.. Check the created policies by running the following command: -+ -[source,terminal] ----- -$ oc get policies -A | grep -E "catsrc-policy|subscription" ----- - -. Apply the required catalog source updates before starting the Operator update. - -.. Save the content of the `ClusterGroupUpgrade` CR named `operator-upgrade-prep` with the catalog source policies and the target managed clusters to the `cgu-operator-upgrade-prep.yml` file: -+ -[source,yaml] ----- -apiVersion: ran.openshift.io/v1alpha1 -kind: ClusterGroupUpgrade -metadata: - name: cgu-operator-upgrade-prep - namespace: default -spec: - clusters: - - spoke1 - enable: true - managedPolicies: - - du-upgrade-operator-catsrc-policy - remediationStrategy: - maxConcurrency: 1 ----- - -.. Apply the policy to the hub cluster by running the following command: -+ -[source,terminal] ----- -$ oc apply -f cgu-operator-upgrade-prep.yml ----- - -.. Monitor the update process. Upon completion, ensure that the policy is compliant by running the following command: -+ -[source,terminal] ----- -$ oc get policies -A | grep -E "catsrc-policy" ----- - -. Create the `ClusterGroupUpgrade` CR for the Operator update with the `spec.enable` field set to `false`. -.. Save the content of the Operator update `ClusterGroupUpgrade` CR with the `du-upgrade-operator-catsrc-policy` policy and the subscription policies created from the common `PolicyGenTemplate` and the target clusters to the `cgu-operator-upgrade.yml` file, as shown in the following example: -+ -[source,yaml] ----- -apiVersion: ran.openshift.io/v1alpha1 -kind: ClusterGroupUpgrade -metadata: - name: cgu-operator-upgrade - namespace: default -spec: - managedPolicies: - - du-upgrade-operator-catsrc-policy <1> - - common-subscriptions-policy <2> - preCaching: false - clusters: - - spoke1 - remediationStrategy: - maxConcurrency: 1 - enable: false ----- -<1> The policy is needed by the image pre-caching feature to retrieve the operator images from the catalog source. -<2> The policy contains Operator subscriptions. If you have followed the structure and content of the reference `PolicyGenTemplates`, all Operator subscriptions are grouped into the `common-subscriptions-policy` policy. -+ -[NOTE] -==== -One `ClusterGroupUpgrade` CR can only pre-cache the images of the desired Operators defined in the subscription policy from one catalog source included in the `ClusterGroupUpgrade` CR. If the desired Operators are from different catalog sources, such as in the example of the SRIOV-FEC Operator, another `ClusterGroupUpgrade` CR must be created with `du-upgrade-fec-catsrc-policy` and `du-upgrade-subscriptions-fec-policy` policies for the SRIOV-FEC Operator images pre-caching and update. -==== - -.. Apply the `ClusterGroupUpgrade` CR to the hub cluster by running the following command: -+ -[source,terminal] ----- -$ oc apply -f cgu-operator-upgrade.yml ----- - -. Optional: Pre-cache the images for the Operator update. - -.. Before starting image pre-caching, verify the subscription policy is `NonCompliant` at this point by running the following command: -+ -[source,terminal] ----- -$ oc get policy common-subscriptions-policy -n ----- -+ -.Example output -+ -[source,terminal] ----- -NAME REMEDIATION ACTION COMPLIANCE STATE AGE -common-subscriptions-policy inform NonCompliant 27d ----- - -.. Enable pre-caching in the `ClusterGroupUpgrade` CR by running the following command: -+ -[source,terminal] ----- -$ oc --namespace=default patch clustergroupupgrade.ran.openshift.io/cgu-operator-upgrade \ ---patch '{"spec":{"preCaching": true}}' --type=merge ----- - -.. Monitor the process and wait for the pre-caching to complete. Check the status of pre-caching by running the following command on the managed cluster: -+ -[source,terminal] ----- -$ oc get cgu cgu-operator-upgrade -o jsonpath='{.status.precaching.status}' ----- - -.. Check if the pre-caching is completed before starting the update by running the following command: -+ -[source,terminal] ----- -$ oc get cgu -n default cgu-operator-upgrade -ojsonpath='{.status.conditions}' | jq ----- -+ -.Example output -+ -[source,json] ----- -[ - { - "lastTransitionTime": "2022-03-08T20:49:08.000Z", - "message": "The ClusterGroupUpgrade CR is not enabled", - "reason": "UpgradeNotStarted", - "status": "False", - "type": "Ready" - }, - { - "lastTransitionTime": "2022-03-08T20:55:30.000Z", - "message": "Precaching is completed", - "reason": "PrecachingCompleted", - "status": "True", - "type": "PrecachingDone" - } -] ----- - -. Start the Operator update. - -.. Enable the `cgu-operator-upgrade` `ClusterGroupUpgrade` CR and disable pre-caching to start the Operator update by running the following command: -+ -[source,terminal] ----- -$ oc --namespace=default patch clustergroupupgrade.ran.openshift.io/cgu-operator-upgrade \ ---patch '{"spec":{"enable":true, "preCaching": false}}' --type=merge ----- - -.. Monitor the process. Upon completion, ensure that the policy is compliant by running the following command: -+ -[source,terminal] ----- -$ oc get policies --all-namespaces ----- diff --git a/modules/cnf-topology-aware-lifecycle-manager-pao-update.adoc b/modules/cnf-topology-aware-lifecycle-manager-pao-update.adoc deleted file mode 100644 index 796c2e4c5c0f..000000000000 --- a/modules/cnf-topology-aware-lifecycle-manager-pao-update.adoc +++ /dev/null @@ -1,63 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/ztp_far_edge/ztp-talm-updating-managed-policies.adoc - -:_content-type: PROCEDURE -[id="talm-pao-update_{context}"] -= Removing Performance Addon Operator subscriptions from deployed clusters - -In earlier versions of {product-title}, the Performance Addon Operator provided automatic, low latency performance tuning for applications. In {product-title} 4.11 or later, these functions are part of the Node Tuning Operator. - -Do not install the Performance Addon Operator on clusters running {product-title} 4.11 or later. If you upgrade to {product-title} 4.11 or later, the Node Tuning Operator automatically removes the Performance Addon Operator. - -[NOTE] -==== -You need to remove any policies that create Performance Addon Operator subscriptions to prevent a re-installation of the Operator. -==== - -The reference DU profile includes the Performance Addon Operator in the `PolicyGenTemplate` CR `common-ranGen.yaml`. To remove the subscription from deployed managed clusters, you must update `common-ranGen.yaml`. - -[NOTE] -==== -If you install Performance Addon Operator 4.10.3-5 or later on {product-title} 4.11 or later, the Performance Addon Operator detects the cluster version and automatically hibernates to avoid interfering with the Node Tuning Operator functions. However, to ensure best performance, remove the Performance Addon Operator from your {product-title} 4.11 clusters. -==== - -.Prerequisites - -* Create a Git repository where you manage your custom site configuration data. The repository must be accessible from the hub cluster and be defined as a source repository for ArgoCD. - -* Update to {product-title} 4.11 or later. - -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -. Change the `complianceType` to `mustnothave` for the Performance Addon Operator namespace, Operator group, and subscription in the `common-ranGen.yaml` file. -+ -[source,yaml] ----- - - fileName: PaoSubscriptionNS.yaml - policyName: "subscriptions-policy" - complianceType: mustnothave - - fileName: PaoSubscriptionOperGroup.yaml - policyName: "subscriptions-policy" - complianceType: mustnothave - - fileName: PaoSubscription.yaml - policyName: "subscriptions-policy" - complianceType: mustnothave ----- - -. Merge the changes with your custom site repository and wait for the ArgoCD application to synchronize the change to the hub cluster. The status of the `common-subscriptions-policy` policy changes to `Non-Compliant`. - -. Apply the change to your target clusters by using the {cgu-operator-full}. For more information about rolling out configuration changes, see the "Additional resources" section. - -. Monitor the process. When the status of the `common-subscriptions-policy` policy for a target cluster is `Compliant`, the Performance Addon Operator has been removed from the cluster. Get the status of the `common-subscriptions-policy` by running the following command: -+ -[source,terminal] ----- -$ oc get policy -n ztp-common common-subscriptions-policy ----- - -. Delete the Performance Addon Operator namespace, Operator group and subscription CRs from `.spec.sourceFiles` in the `common-ranGen.yaml` file. - -. Merge the changes with your custom site repository and wait for the ArgoCD application to synchronize the change to the hub cluster. The policy remains compliant. diff --git a/modules/cnf-topology-aware-lifecycle-manager-platform-update.adoc b/modules/cnf-topology-aware-lifecycle-manager-platform-update.adoc deleted file mode 100644 index e25a1d94db4f..000000000000 --- a/modules/cnf-topology-aware-lifecycle-manager-platform-update.adoc +++ /dev/null @@ -1,196 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/ztp_far_edge/ztp-talm-updating-managed-policies.adoc - -:_content-type: PROCEDURE -[id="talo-platform-update_{context}"] -= Performing a platform update - -You can perform a platform update with the {cgu-operator}. - -.Prerequisites - -* Install the {cgu-operator-first}. -* Update {ztp-first} to the latest version. -* Provision one or more managed clusters with {ztp}. -* Mirror the desired image repository. -* Log in as a user with `cluster-admin` privileges. -* Create {rh-rhacm} policies in the hub cluster. - -.Procedure - -. Create a `PolicyGenTemplate` CR for the platform update: -.. Save the following contents of the `PolicyGenTemplate` CR in the `du-upgrade.yaml` file. -+ -.Example of `PolicyGenTemplate` for platform update -+ -[source,yaml,subs="attributes+"] ----- -apiVersion: ran.openshift.io/v1 -kind: PolicyGenTemplate -metadata: - name: "du-upgrade" - namespace: "ztp-group-du-sno" -spec: - bindingRules: - group-du-sno: "" - mcp: "master" - remediationAction: inform - sourceFiles: - - fileName: ImageSignature.yaml <1> - policyName: "platform-upgrade-prep" - binaryData: - ${DIGEST_ALGO}-${DIGEST_ENCODED}: ${SIGNATURE_BASE64} <2> - - fileName: DisconnectedICSP.yaml - policyName: "platform-upgrade-prep" - metadata: - name: disconnected-internal-icsp-for-ocp - spec: - repositoryDigestMirrors: <3> - - mirrors: - - quay-intern.example.com/ocp4/openshift-release-dev - source: quay.io/openshift-release-dev/ocp-release - - mirrors: - - quay-intern.example.com/ocp4/openshift-release-dev - source: quay.io/openshift-release-dev/ocp-v4.0-art-dev - - fileName: ClusterVersion.yaml <4> - policyName: "platform-upgrade-prep" - metadata: - name: version - annotations: - ran.openshift.io/ztp-deploy-wave: "1" - spec: - channel: "stable-{product-version}" - upstream: http://upgrade.example.com/images/upgrade-graph_stable-{product-version} - - fileName: ClusterVersion.yaml <5> - policyName: "platform-upgrade" - metadata: - name: version - spec: - channel: "stable-{product-version}" - upstream: http://upgrade.example.com/images/upgrade-graph_stable-{product-version} - desiredUpdate: - version: {product-version}.4 - status: - history: - - version: {product-version}.4 - state: "Completed" ----- -<1> The `ConfigMap` CR contains the signature of the desired release image to update to. -<2> Shows the image signature of the desired {product-title} release. Get the signature from the `checksum-${OCP_RELASE_NUMBER}.yaml` file you saved when following the procedures in the "Setting up the environment" section. -<3> Shows the mirror repository that contains the desired {product-title} image. Get the mirrors from the `imageContentSources.yaml` file that you saved when following the procedures in the "Setting up the environment" section. -<4> Shows the `ClusterVersion` CR to update upstream. -<5> Shows the `ClusterVersion` CR to trigger the update. The `channel`, `upstream`, and `desiredVersion` fields are all required for image pre-caching. -+ -The `PolicyGenTemplate` CR generates two policies: - -* The `du-upgrade-platform-upgrade-prep` policy does the preparation work for the platform update. It creates the `ConfigMap` CR for the desired release image signature, creates the image content source of the mirrored release image repository, and updates the cluster version with the desired update channel and the update graph reachable by the managed cluster in the disconnected environment. - -* The `du-upgrade-platform-upgrade` policy is used to perform platform upgrade. - -.. Add the `du-upgrade.yaml` file contents to the `kustomization.yaml` file located in the {ztp} Git repository for the `PolicyGenTemplate` CRs and push the changes to the Git repository. -+ -ArgoCD pulls the changes from the Git repository and generates the policies on the hub cluster. - -.. Check the created policies by running the following command: -+ -[source,terminal] ----- -$ oc get policies -A | grep platform-upgrade ----- - -. Apply the required update resources before starting the platform update with the {cgu-operator}. - -.. Save the content of the `platform-upgrade-prep` `ClusterUpgradeGroup` CR with the `du-upgrade-platform-upgrade-prep` policy and the target managed clusters to the `cgu-platform-upgrade-prep.yml` file, as shown in the following example: -+ -[source,yaml] ----- -apiVersion: ran.openshift.io/v1alpha1 -kind: ClusterGroupUpgrade -metadata: - name: cgu-platform-upgrade-prep - namespace: default -spec: - managedPolicies: - - du-upgrade-platform-upgrade-prep - clusters: - - spoke1 - remediationStrategy: - maxConcurrency: 1 - enable: true ----- - -.. Apply the policy to the hub cluster by running the following command: -+ -[source,terminal] ----- -$ oc apply -f cgu-platform-upgrade-prep.yml ----- - -.. Monitor the update process. Upon completion, ensure that the policy is compliant by running the following command: -+ -[source,terminal] ----- -$ oc get policies --all-namespaces ----- - -. Create the `ClusterGroupUpdate` CR for the platform update with the `spec.enable` field set to `false`. - -.. Save the content of the platform update `ClusterGroupUpdate` CR with the `du-upgrade-platform-upgrade` policy and the target clusters to the `cgu-platform-upgrade.yml` file, as shown in the following example: -+ -[source,yaml] ----- -apiVersion: ran.openshift.io/v1alpha1 -kind: ClusterGroupUpgrade -metadata: - name: cgu-platform-upgrade - namespace: default -spec: - managedPolicies: - - du-upgrade-platform-upgrade - preCaching: false - clusters: - - spoke1 - remediationStrategy: - maxConcurrency: 1 - enable: false ----- - -.. Apply the `ClusterGroupUpdate` CR to the hub cluster by running the following command: -+ -[source,terminal] ----- -$ oc apply -f cgu-platform-upgrade.yml ----- - -. Optional: Pre-cache the images for the platform update. -.. Enable pre-caching in the `ClusterGroupUpdate` CR by running the following command: -+ -[source,terminal] ----- -$ oc --namespace=default patch clustergroupupgrade.ran.openshift.io/cgu-platform-upgrade \ ---patch '{"spec":{"preCaching": true}}' --type=merge ----- - -.. Monitor the update process and wait for the pre-caching to complete. Check the status of pre-caching by running the following command on the hub cluster: -+ -[source,terminal] ----- -$ oc get cgu cgu-platform-upgrade -o jsonpath='{.status.precaching.status}' ----- - -. Start the platform update: -.. Enable the `cgu-platform-upgrade` policy and disable pre-caching by running the following command: -+ -[source,terminal] ----- -$ oc --namespace=default patch clustergroupupgrade.ran.openshift.io/cgu-platform-upgrade \ ---patch '{"spec":{"enable":true, "preCaching": false}}' --type=merge ----- - -.. Monitor the process. Upon completion, ensure that the policy is compliant by running the following command: -+ -[source,terminal] ----- -$ oc get policies --all-namespaces ----- diff --git a/modules/cnf-topology-aware-lifecycle-manager-policies-concept.adoc b/modules/cnf-topology-aware-lifecycle-manager-policies-concept.adoc deleted file mode 100644 index f707f204b538..000000000000 --- a/modules/cnf-topology-aware-lifecycle-manager-policies-concept.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// Module included in the following assemblies: -// Epic CNF-2600 (CNF-2133) (4.10), Story TELCODOCS-285 -// * scalability_and_performance/cnf-talm-for-cluster-upgrades.adoc - -:_content-type: CONCEPT -[id="talo-policies-concept_{context}"] -= Update policies on managed clusters - -The {cgu-operator-first} remediates a set of `inform` policies for the clusters specified in the `ClusterGroupUpgrade` CR. {cgu-operator} remediates `inform` policies by making `enforce` copies of the managed {rh-rhacm} policies. Each copied policy has its own corresponding {rh-rhacm} placement rule and {rh-rhacm} placement binding. - -One by one, {cgu-operator} adds each cluster from the current batch to the placement rule that corresponds with the applicable managed policy. If a cluster is already compliant with a policy, {cgu-operator} skips applying that policy on the compliant cluster. {cgu-operator} then moves on to applying the next policy to the non-compliant cluster. After {cgu-operator} completes the updates in a batch, all clusters are removed from the placement rules associated with the copied policies. Then, the update of the next batch starts. - -If a spoke cluster does not report any compliant state to {rh-rhacm}, the managed policies on the hub cluster can be missing status information that {cgu-operator} needs. {cgu-operator} handles these cases in the following ways: - -* If a policy's `status.compliant` field is missing, {cgu-operator} ignores the policy and adds a log entry. Then, {cgu-operator} continues looking at the policy's `status.status` field. -* If a policy's `status.status` is missing, {cgu-operator} produces an error. -* If a cluster's compliance status is missing in the policy's `status.status` field, {cgu-operator} considers that cluster to be non-compliant with that policy. - -The `ClusterGroupUpgrade` CR's `batchTimeoutAction` determines what happens if an upgrade fails for a cluster. You can specify `continue` to skip the failing cluster and continue to upgrade other clusters, or specify `abort` to stop the policy remediation for all clusters. Once the timeout elapses, {cgu-operator} removes all enforce policies to ensure that no further updates are made to clusters. - -For more information about {rh-rhacm} policies, see link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/{rh-rhacm-version}/html-single/governance/index#policy-overview[Policy overview]. diff --git a/modules/cnf-topology-aware-lifecycle-manager-precache-concept.adoc b/modules/cnf-topology-aware-lifecycle-manager-precache-concept.adoc deleted file mode 100644 index 7a5800a3c622..000000000000 --- a/modules/cnf-topology-aware-lifecycle-manager-precache-concept.adoc +++ /dev/null @@ -1,60 +0,0 @@ -// Module included in the following assemblies: -// Epic CNF-2600 (CNF-2133) (4.10), Story TELCODOCS-285 -// * scalability_and_performance/cnf-talm-for-cluster-upgrades.adoc - -:_content-type: CONCEPT -[id="talo-precache-feature-concept_{context}"] -= Using the container image pre-cache feature - -{sno-caps} clusters might have limited bandwidth to access the container image registry, which can cause a timeout before the updates are completed. - -[NOTE] -==== -The time of the update is not set by {cgu-operator}. You can apply the `ClusterGroupUpgrade` CR at the beginning of the update by manual application or by external automation. -==== - -The container image pre-caching starts when the `preCaching` field is set to `true` in the `ClusterGroupUpgrade` CR. - -{cgu-operator} uses the `PrecacheSpecValid` condition to report status information as follows: - -* `true` -+ -The pre-caching spec is valid and consistent. -* `false` -+ -The pre-caching spec is incomplete. - -{cgu-operator} uses the `PrecachingSucceeded` condition to report status information as follows: - -* `true` -+ -{cgu-operator} has concluded the pre-caching process. If pre-caching fails for any cluster, the update fails for that cluster but proceeds for all other clusters. A message informs you if pre-caching has failed for any clusters. -* `false` -+ -Pre-caching is still in progress for one or more clusters or has failed for all clusters. - -After a successful pre-caching process, you can start remediating policies. The remediation actions start when the `enable` field is set to `true`. If there is a pre-caching failure on a cluster, the upgrade fails for that cluster. The upgrade process continues for all other clusters that have a successful pre-cache. - -The pre-caching process can be in the following statuses: - -* `NotStarted` -+ -This is the initial state all clusters are automatically assigned to on the first reconciliation pass of the `ClusterGroupUpgrade` CR. In this state, {cgu-operator} deletes any pre-caching namespace and hub view resources of spoke clusters that remain from previous incomplete updates. {cgu-operator} then creates a new `ManagedClusterView` resource for the spoke pre-caching namespace to verify its deletion in the `PrecachePreparing` state. -* `PreparingToStart` -+ -Cleaning up any remaining resources from previous incomplete updates is in progress. -* `Starting` -+ -Pre-caching job prerequisites and the job are created. -* `Active` -+ -The job is in "Active" state. -* `Succeeded` -+ -The pre-cache job succeeded. -* `PrecacheTimeout` -+ -The artifact pre-caching is partially done. -* `UnrecoverableError` -+ -The job ends with a non-zero exit code. diff --git a/modules/cnf-topology-aware-lifecycle-manager-precache-feature.adoc b/modules/cnf-topology-aware-lifecycle-manager-precache-feature.adoc deleted file mode 100644 index 60c01cb5bd7d..000000000000 --- a/modules/cnf-topology-aware-lifecycle-manager-precache-feature.adoc +++ /dev/null @@ -1,160 +0,0 @@ -// Module included in the following assemblies: -// Epic CNF-2600 (CNF-2133) (4.10), Story TELCODOCS-285 -// * scalability_and_performance/cnf-talm-for-cluster-upgrades.adoc - -:_content-type: PROCEDURE -[id="talo-precache-start_and_update_{context}"] -= Creating a ClusterGroupUpgrade CR with pre-caching - -For {sno}, the pre-cache feature allows the required container images to be present on the spoke cluster before the update starts. - -[NOTE] -==== -For pre-caching, {cgu-operator} uses the `spec.remediationStrategy.timeout` value from the `ClusterGroupUpgrade` CR. You must set a `timeout` value that allows sufficient time for the pre-caching job to complete. When you enable the `ClusterGroupUpgrade` CR after pre-caching has completed, you can change the `timeout` value to a duration that is appropriate for the update. -==== - -.Prerequisites - -* Install the {cgu-operator-first}. -* Provision one or more managed clusters. -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -. Save the contents of the `ClusterGroupUpgrade` CR with the `preCaching` field set to `true` in the `clustergroupupgrades-group-du.yaml` file: -+ -[source,yaml] ----- -apiVersion: ran.openshift.io/v1alpha1 -kind: ClusterGroupUpgrade -metadata: - name: du-upgrade-4918 - namespace: ztp-group-du-sno -spec: - preCaching: true <1> - clusters: - - cnfdb1 - - cnfdb2 - enable: false - managedPolicies: - - du-upgrade-platform-upgrade - remediationStrategy: - maxConcurrency: 2 - timeout: 240 ----- -<1> The `preCaching` field is set to `true`, which enables {cgu-operator} to pull the container images before starting the update. - -. When you want to start pre-caching, apply the `ClusterGroupUpgrade` CR by running the following command: -+ -[source,terminal] ----- -$ oc apply -f clustergroupupgrades-group-du.yaml ----- - -.Verification - -. Check if the `ClusterGroupUpgrade` CR exists in the hub cluster by running the following command: -+ -[source,terminal] ----- -$ oc get cgu -A ----- -+ -.Example output -+ -[source,terminal] ----- -NAMESPACE NAME AGE STATE DETAILS -ztp-group-du-sno du-upgrade-4918 10s InProgress Precaching is required and not done <1> ----- -<1> The CR is created. - -. Check the status of the pre-caching task by running the following command: -+ -[source,terminal] ----- -$ oc get cgu -n ztp-group-du-sno du-upgrade-4918 -o jsonpath='{.status}' ----- -+ -.Example output -+ -[source,json] ----- -{ - "conditions": [ - { - "lastTransitionTime": "2022-01-27T19:07:24Z", - "message": "Precaching is required and not done", - "reason": "InProgress", - "status": "False", - "type": "PrecachingSucceeded" - }, - { - "lastTransitionTime": "2022-01-27T19:07:34Z", - "message": "Pre-caching spec is valid and consistent", - "reason": "PrecacheSpecIsWellFormed", - "status": "True", - "type": "PrecacheSpecValid" - } - ], - "precaching": { - "clusters": [ - "cnfdb1" <1> - "cnfdb2" - ], - "spec": { - "platformImage": "image.example.io"}, - "status": { - "cnfdb1": "Active" - "cnfdb2": "Succeeded"} - } -} ----- -<1> Displays the list of identified clusters. - -. Check the status of the pre-caching job by running the following command on the spoke cluster: -+ -[source,terminal] ----- -$ oc get jobs,pods -n openshift-talo-pre-cache ----- -+ -.Example output -+ -[source,terminal] ----- -NAME COMPLETIONS DURATION AGE -job.batch/pre-cache 0/1 3m10s 3m10s - -NAME READY STATUS RESTARTS AGE -pod/pre-cache--1-9bmlr 1/1 Running 0 3m10s ----- - - . Check the status of the `ClusterGroupUpgrade` CR by running the following command: -+ -[source,terminal] ----- -$ oc get cgu -n ztp-group-du-sno du-upgrade-4918 -o jsonpath='{.status}' ----- -+ -.Example output -+ -[source,json] ----- -"conditions": [ - { - "lastTransitionTime": "2022-01-27T19:30:41Z", - "message": "The ClusterGroupUpgrade CR has all clusters compliant with all the managed policies", - "reason": "UpgradeCompleted", - "status": "True", - "type": "Ready" - }, - { - "lastTransitionTime": "2022-01-27T19:28:57Z", - "message": "Precaching is completed", - "reason": "PrecachingCompleted", - "status": "True", - "type": "PrecachingSucceeded" <1> - } ----- -<1> The pre-cache tasks are done. diff --git a/modules/cnf-topology-aware-lifecycle-manager-precache-image-filter.adoc b/modules/cnf-topology-aware-lifecycle-manager-precache-image-filter.adoc deleted file mode 100644 index 3e2201430bee..000000000000 --- a/modules/cnf-topology-aware-lifecycle-manager-precache-image-filter.adoc +++ /dev/null @@ -1,33 +0,0 @@ -// Module included in the following assemblies: -// Epic CNF-6848 (4.13), Story TELCODOCS-949 -// * scalability_and_performance/cnf-talm-for-cluster-upgrades.adoc - -:_content-type: CONCEPT -[id="talo-precache-feature-image-filter_{context}"] -= Using the container image pre-cache filter - -The pre-cache feature typically downloads more images than a cluster needs for an update. You can control which pre-cache images are downloaded to a cluster. This decreases download time, and saves bandwidth and storage. - -You can see a list of all images to be downloaded using the following command: - -[source,terminal] ----- -$ oc adm release info ----- - -The following `ConfigMap` example shows how you can exclude images using the `excludePrecachePatterns` field. - -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: cluster-group-upgrade-overrides -data: - excludePrecachePatterns: | - azure <1> - aws - vsphere - alibaba ----- -<1> {cgu-operator} excludes all images with names that include any of the patterns listed here. \ No newline at end of file diff --git a/modules/cnf-topology-aware-lifecycle-manager-preparing-for-updates.adoc b/modules/cnf-topology-aware-lifecycle-manager-preparing-for-updates.adoc deleted file mode 100644 index 6434f48dd6f5..000000000000 --- a/modules/cnf-topology-aware-lifecycle-manager-preparing-for-updates.adoc +++ /dev/null @@ -1,104 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/ztp_far_edge/ztp-talm-updating-managed-policies.adoc - -:_content-type: PROCEDURE -[id="talo-platform-prepare-end-to-end_{context}"] -= Updating clusters in a disconnected environment - -You can upgrade managed clusters and Operators for managed clusters that you have deployed using {ztp-first} and {cgu-operator-first}. - -[id="talo-platform-prepare-for-update-env-setup_{context}"] -== Setting up the environment - -{cgu-operator} can perform both platform and Operator updates. - -You must mirror both the platform image and Operator images that you want to update to in your mirror registry before you can use {cgu-operator} to update your disconnected clusters. Complete the following steps to mirror the images: - -* For platform updates, you must perform the following steps: -+ -. Mirror the desired {product-title} image repository. Ensure that the desired platform image is mirrored by following the "Mirroring the {product-title} image repository" procedure linked in the Additional Resources. Save the contents of the `imageContentSources` section in the `imageContentSources.yaml` file: -+ -.Example output -[source,yaml] ----- -imageContentSources: - - mirrors: - - mirror-ocp-registry.ibmcloud.io.cpak:5000/openshift-release-dev/openshift4 - source: quay.io/openshift-release-dev/ocp-release - - mirrors: - - mirror-ocp-registry.ibmcloud.io.cpak:5000/openshift-release-dev/openshift4 - source: quay.io/openshift-release-dev/ocp-v4.0-art-dev ----- - -. Save the image signature of the desired platform image that was mirrored. You must add the image signature to the `PolicyGenTemplate` CR for platform updates. To get the image signature, perform the following steps: - -.. Specify the desired {product-title} tag by running the following command: -+ -[source,terminal] ----- -$ OCP_RELEASE_NUMBER= ----- - -.. Specify the architecture of the cluster by running the following command: -+ -[source,terminal] ----- -$ ARCHITECTURE= <1> ----- -<1> Specify the architecture of the cluster, such as `x86_64`, `aarch64`, `s390x`, or `ppc64le`. - - -.. Get the release image digest from Quay by running the following command -+ -[source,terminal] ----- -$ DIGEST="$(oc adm release info quay.io/openshift-release-dev/ocp-release:${OCP_RELEASE_NUMBER}-${ARCHITECTURE} | sed -n 's/Pull From: .*@//p')" ----- - -.. Set the digest algorithm by running the following command: -+ -[source,terminal] ----- -$ DIGEST_ALGO="${DIGEST%%:*}" ----- - -.. Set the digest signature by running the following command: -+ -[source,terminal] ----- -$ DIGEST_ENCODED="${DIGEST#*:}" ----- - -.. Get the image signature from the link:https://mirror.openshift.com/pub/openshift-v4/signatures/openshift/release/[mirror.openshift.com] website by running the following command: -+ -[source,terminal] ----- -$ SIGNATURE_BASE64=$(curl -s "https://mirror.openshift.com/pub/openshift-v4/signatures/openshift/release/${DIGEST_ALGO}=${DIGEST_ENCODED}/signature-1" | base64 -w0 && echo) ----- - -.. Save the image signature to the `checksum-.yaml` file by running the following commands: -+ -[source,terminal] ----- -$ cat >checksum-${OCP_RELEASE_NUMBER}.yaml <> -** <> -** <> -** <> - -To ensure that the `ClusterGroupUpgrade` configuration is functional, you can do the following: - -. Create the `ClusterGroupUpgrade` CR with the `spec.enable` field set to `false`. - -. Wait for the status to be updated and go through the troubleshooting questions. - -. If everything looks as expected, set the `spec.enable` field to `true` in the `ClusterGroupUpgrade` CR. - -[WARNING] -==== -After you set the `spec.enable` field to `true` in the `ClusterUpgradeGroup` CR, the update procedure starts and you cannot edit the CR's `spec` fields anymore. -==== - -[id="talo-troubleshooting-modify-cgu_{context}"] -== Cannot modify the ClusterUpgradeGroup CR - -Issue:: You cannot edit the `ClusterUpgradeGroup` CR after enabling the update. - -Resolution:: Restart the procedure by performing the following steps: -+ -. Remove the old `ClusterGroupUpgrade` CR by running the following command: -+ -[source,terminal] ----- -$ oc delete cgu -n ----- -+ -. Check and fix the existing issues with the managed clusters and policies. -.. Ensure that all the clusters are managed clusters and available. -.. Ensure that all the policies exist and have the `spec.remediationAction` field set to `inform`. -+ -. Create a new `ClusterGroupUpgrade` CR with the correct configurations. -+ -[source,terminal] ----- -$ oc apply -f ----- - -[id="talo-troubleshooting-managed-policies_{context}"] -== Managed policies - -[discrete] -== Checking managed policies on the system - -Issue:: You want to check if you have the correct managed policies on the system. - -Resolution:: Run the following command: -+ -[source,terminal] ----- -$ oc get cgu lab-upgrade -ojsonpath='{.spec.managedPolicies}' ----- -+ -.Example output -+ -[source,json] ----- -["group-du-sno-validator-du-validator-policy", "policy2-common-nto-sub-policy", "policy3-common-ptp-sub-policy"] ----- - -[discrete] -== Checking remediationAction mode - -Issue:: You want to check if the `remediationAction` field is set to `inform` in the `spec` of the managed policies. - -Resolution:: Run the following command: -+ -[source,terminal] ----- -$ oc get policies --all-namespaces ----- -+ -.Example output -+ -[source,terminal] ----- -NAMESPACE NAME REMEDIATION ACTION COMPLIANCE STATE AGE -default policy1-common-cluster-version-policy inform NonCompliant 5d21h -default policy2-common-nto-sub-policy inform Compliant 5d21h -default policy3-common-ptp-sub-policy inform NonCompliant 5d21h -default policy4-common-sriov-sub-policy inform NonCompliant 5d21h ----- - -[discrete] -== Checking policy compliance state - -Issue:: You want to check the compliance state of policies. - -Resolution:: Run the following command: -+ -[source,terminal] ----- -$ oc get policies --all-namespaces ----- -+ -.Example output -+ -[source,terminal] ----- -NAMESPACE NAME REMEDIATION ACTION COMPLIANCE STATE AGE -default policy1-common-cluster-version-policy inform NonCompliant 5d21h -default policy2-common-nto-sub-policy inform Compliant 5d21h -default policy3-common-ptp-sub-policy inform NonCompliant 5d21h -default policy4-common-sriov-sub-policy inform NonCompliant 5d21h ----- - -[id="talo-troubleshooting-clusters_{context}"] -== Clusters - -[discrete] -=== Checking if managed clusters are present - -Issue:: You want to check if the clusters in the `ClusterGroupUpgrade` CR are managed clusters. - -Resolution:: Run the following command: -+ -[source,terminal] ----- -$ oc get managedclusters ----- -+ -.Example output -+ -[source,terminal] ----- -NAME HUB ACCEPTED MANAGED CLUSTER URLS JOINED AVAILABLE AGE -local-cluster true https://api.hub.example.com:6443 True Unknown 13d -spoke1 true https://api.spoke1.example.com:6443 True True 13d -spoke3 true https://api.spoke3.example.com:6443 True True 27h ----- - -. Alternatively, check the {cgu-operator} manager logs: - -.. Get the name of the {cgu-operator} manager by running the following command: -+ -[source,terminal] ----- -$ oc get pod -n openshift-operators ----- -+ -.Example output -+ -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -cluster-group-upgrades-controller-manager-75bcc7484d-8k8xp 2/2 Running 0 45m ----- - -.. Check the {cgu-operator} manager logs by running the following command: -+ -[source,terminal] ----- -$ oc logs -n openshift-operators \ -cluster-group-upgrades-controller-manager-75bcc7484d-8k8xp -c manager ----- -+ -.Example output -+ -[source,terminal] ----- -ERROR controller-runtime.manager.controller.clustergroupupgrade Reconciler error {"reconciler group": "ran.openshift.io", "reconciler kind": "ClusterGroupUpgrade", "name": "lab-upgrade", "namespace": "default", "error": "Cluster spoke5555 is not a ManagedCluster"} <1> -sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).processNextWorkItem ----- -<1> The error message shows that the cluster is not a managed cluster. - -[discrete] -=== Checking if managed clusters are available - -Issue:: You want to check if the managed clusters specified in the `ClusterGroupUpgrade` CR are available. - -Resolution:: Run the following command: -+ -[source,terminal] ----- -$ oc get managedclusters ----- -+ -.Example output -+ -[source,terminal] ----- -NAME HUB ACCEPTED MANAGED CLUSTER URLS JOINED AVAILABLE AGE -local-cluster true https://api.hub.testlab.com:6443 True Unknown 13d -spoke1 true https://api.spoke1.testlab.com:6443 True True 13d <1> -spoke3 true https://api.spoke3.testlab.com:6443 True True 27h <1> ----- -<1> The value of the `AVAILABLE` field is `True` for the managed clusters. - -[discrete] -=== Checking clusterLabelSelector - -Issue:: You want to check if the `clusterLabelSelector` field specified in the `ClusterGroupUpgrade` CR matches at least one of the managed clusters. - -Resolution:: Run the following command: -+ -[source,terminal] ----- -$ oc get managedcluster --selector=upgrade=true <1> ----- -<1> The label for the clusters you want to update is `upgrade:true`. -+ -.Example output -+ -[source,terminal] ----- -NAME HUB ACCEPTED MANAGED CLUSTER URLS JOINED AVAILABLE AGE -spoke1 true https://api.spoke1.testlab.com:6443 True True 13d -spoke3 true https://api.spoke3.testlab.com:6443 True True 27h ----- - -[discrete] -=== Checking if canary clusters are present - -Issue:: You want to check if the canary clusters are present in the list of clusters. -+ -.Example `ClusterGroupUpgrade` CR -[source,yaml] ----- -spec: - remediationStrategy: - canaries: - - spoke3 - maxConcurrency: 2 - timeout: 240 - clusterLabelSelectors: - - matchLabels: - upgrade: true ----- - -Resolution:: Run the following commands: -+ -[source,terminal] ----- -$ oc get cgu lab-upgrade -ojsonpath='{.spec.clusters}' ----- -+ -.Example output -+ -[source,json] ----- -["spoke1", "spoke3"] ----- - -. Check if the canary clusters are present in the list of clusters that match `clusterLabelSelector` labels by running the following command: -+ -[source,terminal] ----- -$ oc get managedcluster --selector=upgrade=true ----- -+ -.Example output -+ -[source,terminal] ----- -NAME HUB ACCEPTED MANAGED CLUSTER URLS JOINED AVAILABLE AGE -spoke1 true https://api.spoke1.testlab.com:6443 True True 13d -spoke3 true https://api.spoke3.testlab.com:6443 True True 27h ----- - -[NOTE] -==== -A cluster can be present in `spec.clusters` and also be matched by the `spec.clusterLabelSelector` label. -==== - -[discrete] -=== Checking the pre-caching status on spoke clusters - -. Check the status of pre-caching by running the following command on the spoke cluster: -+ -[source,terminal] ----- -$ oc get jobs,pods -n openshift-talo-pre-cache ----- - -[id="talo-troubleshooting-remediation-strategy_{context}"] -== Remediation Strategy - -[discrete] -=== Checking if remediationStrategy is present in the ClusterGroupUpgrade CR - -Issue:: You want to check if the `remediationStrategy` is present in the `ClusterGroupUpgrade` CR. - -Resolution:: Run the following command: -+ -[source,terminal] ----- -$ oc get cgu lab-upgrade -ojsonpath='{.spec.remediationStrategy}' ----- -+ -.Example output -+ -[source,json] ----- -{"maxConcurrency":2, "timeout":240} ----- - -[discrete] -=== Checking if maxConcurrency is specified in the ClusterGroupUpgrade CR - -Issue:: You want to check if the `maxConcurrency` is specified in the `ClusterGroupUpgrade` CR. - -Resolution:: Run the following command: -+ -[source,terminal] ----- -$ oc get cgu lab-upgrade -ojsonpath='{.spec.remediationStrategy.maxConcurrency}' ----- -+ -.Example output -+ -[source,terminal] ----- -2 ----- - -[id="talo-troubleshooting-remediation-talo_{context}"] -== {cgu-operator-full} - -[discrete] -=== Checking condition message and status in the ClusterGroupUpgrade CR - -Issue:: You want to check the value of the `status.conditions` field in the `ClusterGroupUpgrade` CR. - -Resolution:: Run the following command: -+ -[source,terminal] ----- -$ oc get cgu lab-upgrade -ojsonpath='{.status.conditions}' ----- -+ -.Example output -+ -[source,json] ----- -{"lastTransitionTime":"2022-02-17T22:25:28Z", "message":"Missing managed policies:[policyList]", "reason":"NotAllManagedPoliciesExist", "status":"False", "type":"Validated"} ----- - -[discrete] -=== Checking corresponding copied policies - -Issue:: You want to check if every policy from `status.managedPoliciesForUpgrade` has a corresponding policy in `status.copiedPolicies`. - -Resolution:: Run the following command: -+ -[source,terminal] ----- -$ oc get cgu lab-upgrade -oyaml ----- -+ -.Example output -+ -[source,yaml] ----- -status: - … - copiedPolicies: - - lab-upgrade-policy3-common-ptp-sub-policy - managedPoliciesForUpgrade: - - name: policy3-common-ptp-sub-policy - namespace: default ----- - -[discrete] -=== Checking if status.remediationPlan was computed - -Issue:: You want to check if `status.remediationPlan` is computed. - -Resolution:: Run the following command: -+ -[source,terminal] ----- -$ oc get cgu lab-upgrade -ojsonpath='{.status.remediationPlan}' ----- -+ -.Example output -+ -[source,json] ----- -[["spoke2", "spoke3"]] ----- - -[discrete] -=== Errors in the {cgu-operator} manager container - -Issue:: You want to check the logs of the manager container of {cgu-operator}. - -Resolution:: Run the following command: -+ -[source,terminal] ----- -$ oc logs -n openshift-operators \ -cluster-group-upgrades-controller-manager-75bcc7484d-8k8xp -c manager ----- -+ -.Example output -+ -[source,terminal] ----- -ERROR controller-runtime.manager.controller.clustergroupupgrade Reconciler error {"reconciler group": "ran.openshift.io", "reconciler kind": "ClusterGroupUpgrade", "name": "lab-upgrade", "namespace": "default", "error": "Cluster spoke5555 is not a ManagedCluster"} <1> -sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).processNextWorkItem ----- -<1> Displays the error. - -[discrete] -=== Clusters are not compliant to some policies after a `ClusterGroupUpgrade` CR has completed - -Issue:: The policy compliance status that {cgu-operator} uses to decide if remediation is needed has not yet fully updated for all clusters. -This may be because: -* The CGU was run too soon after a policy was created or updated. -* The remediation of a policy affects the compliance of subsequent policies in the `ClusterGroupUpgrade` CR. - -Resolution:: Create and apply a new `ClusterGroupUpdate` CR with the same specification. - -[discrete] -[id="talo-troubleshooting-auto-create-policies_{context}"] -=== Auto-created `ClusterGroupUpgrade` CR in the {ztp} workflow has no managed policies - -Issue:: If there are no policies for the managed cluster when the cluster becomes `Ready`, a `ClusterGroupUpgrade` CR with no policies is auto-created. -Upon completion of the `ClusterGroupUpgrade` CR, the managed cluster is labeled as `ztp-done`. -If the `PolicyGenTemplate` CRs were not pushed to the Git repository within the required time after `SiteConfig` resources were pushed, this might result in no policies being available for the target cluster when the cluster became `Ready`. - -Resolution:: Verify that the policies you want to apply are available on the hub cluster, then create a `ClusterGroupUpgrade` CR with the required policies. - -You can either manually create the `ClusterGroupUpgrade` CR or trigger auto-creation again. To trigger auto-creation of the `ClusterGroupUpgrade` CR, remove the `ztp-done` label from the cluster and delete the empty `ClusterGroupUpgrade` CR that was previously created in the `zip-install` namespace. - -[discrete] -[id="talo-troubleshooting-pre-cache-failed_{context}"] -=== Pre-caching has failed - -Issue:: Pre-caching might fail for one of the following reasons: -* There is not enough free space on the node. -* For a disconnected environment, the pre-cache image has not been properly mirrored. -* There was an issue when creating the pod. - -Resolution:: -. To check if pre-caching has failed due to insufficient space, check the log of the pre-caching pod in the node. -.. Find the name of the pod using the following command: -+ -[source,terminal] ----- -$ oc get pods -n openshift-talo-pre-cache ----- -+ -.. Check the logs to see if the error is related to insufficient space using the following command: -+ -[source,terminal] ----- -$ oc logs -n openshift-talo-pre-cache ----- -+ -. If there is no log, check the pod status using the following command: -+ -[source,terminal] ----- -$ oc describe pod -n openshift-talo-pre-cache ----- -+ -. If the pod does not exist, check the job status to see why it could not create a pod using the following command: -+ -[source,terminal] ----- -$ oc describe job -n openshift-talo-pre-cache pre-cache ----- diff --git a/modules/cnf-troubleshooting-common-ptp-operator-issues.adoc b/modules/cnf-troubleshooting-common-ptp-operator-issues.adoc deleted file mode 100644 index 204a38430681..000000000000 --- a/modules/cnf-troubleshooting-common-ptp-operator-issues.adoc +++ /dev/null @@ -1,143 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/using-ptp.adoc - -:_content-type: PROCEDURE -[id="cnf-troubleshooting-common-ptp-operator-issues_{context}"] -= Troubleshooting common PTP Operator issues - -Troubleshoot common problems with the PTP Operator by performing the following steps. - -.Prerequisites - -* Install the {product-title} CLI (`oc`). -* Log in as a user with `cluster-admin` privileges. -* Install the PTP Operator on a bare-metal cluster with hosts that support PTP. - -.Procedure - -. Check the Operator and operands are successfully deployed in the cluster for the configured nodes. -+ -[source,terminal] ----- -$ oc get pods -n openshift-ptp -o wide ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE IP NODE -linuxptp-daemon-lmvgn 3/3 Running 0 4d17h 10.1.196.24 compute-0.example.com -linuxptp-daemon-qhfg7 3/3 Running 0 4d17h 10.1.196.25 compute-1.example.com -ptp-operator-6b8dcbf7f4-zndk7 1/1 Running 0 5d7h 10.129.0.61 control-plane-1.example.com ----- -+ -[NOTE] -==== -When the PTP fast event bus is enabled, the number of ready `linuxptp-daemon` pods is `3/3`. If the PTP fast event bus is not enabled, `2/2` is displayed. -==== - -. Check that supported hardware is found in the cluster. -+ -[source,terminal] ----- -$ oc -n openshift-ptp get nodeptpdevices.ptp.openshift.io ----- -+ -.Example output -[source,terminal] ----- -NAME AGE -control-plane-0.example.com 10d -control-plane-1.example.com 10d -compute-0.example.com 10d -compute-1.example.com 10d -compute-2.example.com 10d ----- - -. Check the available PTP network interfaces for a node: -+ -[source,terminal] ----- -$ oc -n openshift-ptp get nodeptpdevices.ptp.openshift.io -o yaml ----- -+ -where: -+ -:: Specifies the node you want to query, for example, `compute-0.example.com`. -+ -.Example output -[source,yaml] ----- -apiVersion: ptp.openshift.io/v1 -kind: NodePtpDevice -metadata: - creationTimestamp: "2021-09-14T16:52:33Z" - generation: 1 - name: compute-0.example.com - namespace: openshift-ptp - resourceVersion: "177400" - uid: 30413db0-4d8d-46da-9bef-737bacd548fd -spec: {} -status: - devices: - - name: eno1 - - name: eno2 - - name: eno3 - - name: eno4 - - name: enp5s0f0 - - name: enp5s0f1 ----- - -. Check that the PTP interface is successfully synchronized to the primary clock by accessing the `linuxptp-daemon` pod for the corresponding node. - -.. Get the name of the `linuxptp-daemon` pod and corresponding node you want to troubleshoot by running the following command: -+ -[source,terminal] ----- -$ oc get pods -n openshift-ptp -o wide ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE IP NODE -linuxptp-daemon-lmvgn 3/3 Running 0 4d17h 10.1.196.24 compute-0.example.com -linuxptp-daemon-qhfg7 3/3 Running 0 4d17h 10.1.196.25 compute-1.example.com -ptp-operator-6b8dcbf7f4-zndk7 1/1 Running 0 5d7h 10.129.0.61 control-plane-1.example.com ----- - -.. Remote shell into the required `linuxptp-daemon` container: -+ -[source,terminal] ----- -$ oc rsh -n openshift-ptp -c linuxptp-daemon-container ----- -+ -where: -+ -:: is the container you want to diagnose, for example `linuxptp-daemon-lmvgn`. - -.. In the remote shell connection to the `linuxptp-daemon` container, use the PTP Management Client (`pmc`) tool to diagnose the network interface. Run the following `pmc` command to check the sync status of the PTP device, for example `ptp4l`. -+ -[source,terminal] ----- -# pmc -u -f /var/run/ptp4l.0.config -b 0 'GET PORT_DATA_SET' ----- -+ -.Example output when the node is successfully synced to the primary clock -[source,terminal] ----- -sending: GET PORT_DATA_SET - 40a6b7.fffe.166ef0-1 seq 0 RESPONSE MANAGEMENT PORT_DATA_SET - portIdentity 40a6b7.fffe.166ef0-1 - portState SLAVE - logMinDelayReqInterval -4 - peerMeanPathDelay 0 - logAnnounceInterval -3 - announceReceiptTimeout 3 - logSyncInterval -4 - delayMechanism 1 - logMinPdelayReqInterval -4 - versionNumber 2 ----- diff --git a/modules/cnf-troubleshooting-missing-rte-config-maps.adoc b/modules/cnf-troubleshooting-missing-rte-config-maps.adoc deleted file mode 100644 index 3421bf9bb70c..000000000000 --- a/modules/cnf-troubleshooting-missing-rte-config-maps.adoc +++ /dev/null @@ -1,116 +0,0 @@ -// Module included in the following assemblies: -// -// *scalability_and_performance/cnf-numa-aware-scheduling.adoc - -:_module-type: PROCEDURE -[id="cnf-troubleshooting-missing-rte-config-maps_{context}"] -= Correcting a missing resource topology exporter config map - -If you install the NUMA Resources Operator in a cluster with misconfigured cluster settings, in some circumstances, the Operator is shown as active but the logs of the resource topology exporter (RTE) daemon set pods show that the configuration for the RTE is missing, for example: - -[source,text] ----- -Info: couldn't find configuration in "/etc/resource-topology-exporter/config.yaml" ----- - -This log message indicates that the `kubeletconfig` with the required configuration was not properly applied in the cluster, resulting in a missing RTE `configmap`. For example, the following cluster is missing a `numaresourcesoperator-worker` `configmap` custom resource (CR): - -[source,terminal] ----- -$ oc get configmap ----- - -.Example output -[source,terminal] ----- -NAME DATA AGE -0e2a6bd3.openshift-kni.io 0 6d21h -kube-root-ca.crt 1 6d21h -openshift-service-ca.crt 1 6d21h -topo-aware-scheduler-config 1 6d18h ----- - -In a correctly configured cluster, `oc get configmap` also returns a `numaresourcesoperator-worker` `configmap` CR. - -.Prerequisites - -* Install the {product-title} CLI (`oc`). - -* Log in as a user with cluster-admin privileges. - -* Install the NUMA Resources Operator and deploy the NUMA-aware secondary scheduler. - -.Procedure - -. Compare the values for `spec.machineConfigPoolSelector.matchLabels` in `kubeletconfig` and -`metadata.labels` in the `MachineConfigPool` (`mcp`) worker CR using the following commands: - -.. Check the `kubeletconfig` labels by running the following command: -+ -[source,terminal] ----- -$ oc get kubeletconfig -o yaml ----- -+ -.Example output -[source,yaml] ----- -machineConfigPoolSelector: - matchLabels: - cnf-worker-tuning: enabled ----- - -.. Check the `mcp` labels by running the following command: -+ -[source,terminal] ----- -$ oc get mcp worker -o yaml ----- -+ -.Example output -[source,yaml] ----- -labels: - machineconfiguration.openshift.io/mco-built-in: "" - pools.operator.machineconfiguration.openshift.io/worker: "" ----- -+ -The `cnf-worker-tuning: enabled` label is not present in the `MachineConfigPool` object. - -. Edit the `MachineConfigPool` CR to include the missing label, for example: -+ -[source,terminal] ----- -$ oc edit mcp worker -o yaml ----- -+ -.Example output -[source,yaml] ----- -labels: - machineconfiguration.openshift.io/mco-built-in: "" - pools.operator.machineconfiguration.openshift.io/worker: "" - cnf-worker-tuning: enabled ----- - -. Apply the label changes and wait for the cluster to apply the updated configuration. Run the following command: - -.Verification - -* Check that the missing `numaresourcesoperator-worker` `configmap` CR is applied: -+ -[source,terminal] ----- -$ oc get configmap ----- -+ -.Example output -[source,terminal] ----- -NAME DATA AGE -0e2a6bd3.openshift-kni.io 0 6d21h -kube-root-ca.crt 1 6d21h -numaresourcesoperator-worker 1 5m -openshift-service-ca.crt 1 6d21h -topo-aware-scheduler-config 1 6d18h ----- diff --git a/modules/cnf-troubleshooting-numa-aware-workloads.adoc b/modules/cnf-troubleshooting-numa-aware-workloads.adoc deleted file mode 100644 index 9a753b1f34ec..000000000000 --- a/modules/cnf-troubleshooting-numa-aware-workloads.adoc +++ /dev/null @@ -1,208 +0,0 @@ -// Module included in the following assemblies: -// -// *scalability_and_performance/cnf-numa-aware-scheduling.adoc - -:_content-type: PROCEDURE -[id="cnf-troubleshooting-numa-aware-workloads_{context}"] -= Troubleshooting NUMA-aware scheduling - -To troubleshoot common problems with NUMA-aware pod scheduling, perform the following steps. - -.Prerequisites - -* Install the {product-title} CLI (`oc`). - -* Log in as a user with cluster-admin privileges. - -* Install the NUMA Resources Operator and deploy the NUMA-aware secondary scheduler. - -.Procedure - -. Verify that the `noderesourcetopologies` CRD is deployed in the cluster by running the following command: -+ -[source,terminal] ----- -$ oc get crd | grep noderesourcetopologies ----- -+ -.Example output -[source,terminal] ----- -NAME CREATED AT -noderesourcetopologies.topology.node.k8s.io 2022-01-18T08:28:06Z ----- - -. Check that the NUMA-aware scheduler name matches the name specified in your NUMA-aware workloads by running the following command: -+ -[source,terminal] ----- -$ oc get numaresourcesschedulers.nodetopology.openshift.io numaresourcesscheduler -o json | jq '.status.schedulerName' ----- -+ -.Example output -[source,terminal] ----- -topo-aware-scheduler ----- - -. Verify that NUMA-aware scheduable nodes have the `noderesourcetopologies` CR applied to them. Run the following command: -+ -[source,terminal] ----- -$ oc get noderesourcetopologies.topology.node.k8s.io ----- -+ -.Example output -[source,terminal] ----- -NAME AGE -compute-0.example.com 17h -compute-1.example.com 17h ----- -+ -[NOTE] -==== -The number of nodes should equal the number of worker nodes that are configured by the machine config pool (`mcp`) worker definition. -==== - -. Verify the NUMA zone granularity for all scheduable nodes by running the following command: -+ -[source,terminal] ----- -$ oc get noderesourcetopologies.topology.node.k8s.io -o yaml ----- -+ -.Example output -[source,yaml] ----- -apiVersion: v1 -items: -- apiVersion: topology.node.k8s.io/v1 - kind: NodeResourceTopology - metadata: - annotations: - k8stopoawareschedwg/rte-update: periodic - creationTimestamp: "2022-06-16T08:55:38Z" - generation: 63760 - name: worker-0 - resourceVersion: "8450223" - uid: 8b77be46-08c0-4074-927b-d49361471590 - topologyPolicies: - - SingleNUMANodeContainerLevel - zones: - - costs: - - name: node-0 - value: 10 - - name: node-1 - value: 21 - name: node-0 - resources: - - allocatable: "38" - available: "38" - capacity: "40" - name: cpu - - allocatable: "134217728" - available: "134217728" - capacity: "134217728" - name: hugepages-2Mi - - allocatable: "262352048128" - available: "262352048128" - capacity: "270107316224" - name: memory - - allocatable: "6442450944" - available: "6442450944" - capacity: "6442450944" - name: hugepages-1Gi - type: Node - - costs: - - name: node-0 - value: 21 - - name: node-1 - value: 10 - name: node-1 - resources: - - allocatable: "268435456" - available: "268435456" - capacity: "268435456" - name: hugepages-2Mi - - allocatable: "269231067136" - available: "269231067136" - capacity: "270573244416" - name: memory - - allocatable: "40" - available: "40" - capacity: "40" - name: cpu - - allocatable: "1073741824" - available: "1073741824" - capacity: "1073741824" - name: hugepages-1Gi - type: Node -- apiVersion: topology.node.k8s.io/v1 - kind: NodeResourceTopology - metadata: - annotations: - k8stopoawareschedwg/rte-update: periodic - creationTimestamp: "2022-06-16T08:55:37Z" - generation: 62061 - name: worker-1 - resourceVersion: "8450129" - uid: e8659390-6f8d-4e67-9a51-1ea34bba1cc3 - topologyPolicies: - - SingleNUMANodeContainerLevel - zones: <1> - - costs: - - name: node-0 - value: 10 - - name: node-1 - value: 21 - name: node-0 - resources: <2> - - allocatable: "38" - available: "38" - capacity: "40" - name: cpu - - allocatable: "6442450944" - available: "6442450944" - capacity: "6442450944" - name: hugepages-1Gi - - allocatable: "134217728" - available: "134217728" - capacity: "134217728" - name: hugepages-2Mi - - allocatable: "262391033856" - available: "262391033856" - capacity: "270146301952" - name: memory - type: Node - - costs: - - name: node-0 - value: 21 - - name: node-1 - value: 10 - name: node-1 - resources: - - allocatable: "40" - available: "40" - capacity: "40" - name: cpu - - allocatable: "1073741824" - available: "1073741824" - capacity: "1073741824" - name: hugepages-1Gi - - allocatable: "268435456" - available: "268435456" - capacity: "268435456" - name: hugepages-2Mi - - allocatable: "269192085504" - available: "269192085504" - capacity: "270534262784" - name: memory - type: Node -kind: List -metadata: - resourceVersion: "" - selfLink: "" ----- -<1> Each stanza under `zones` describes the resources for a single NUMA zone. -<2> `resources` describes the current state of the NUMA zone resources. Check that resources listed under `items.zones.resources.available` correspond to the exclusive NUMA zone resources allocated to each guaranteed pod. diff --git a/modules/cnf-troubleshooting-resource-topo-exporter.adoc b/modules/cnf-troubleshooting-resource-topo-exporter.adoc deleted file mode 100644 index c25952cd3cb0..000000000000 --- a/modules/cnf-troubleshooting-resource-topo-exporter.adoc +++ /dev/null @@ -1,92 +0,0 @@ -// Module included in the following assemblies: -// -// *scalability_and_performance/cnf-numa-aware-scheduling.adoc - -:_module-type: PROCEDURE -[id="cnf-troubleshooting-resource-topo-exporter_{context}"] -= Troubleshooting the resource topology exporter - -Troubleshoot `noderesourcetopologies` objects where unexpected results are occurring by inspecting the corresponding `resource-topology-exporter` logs. - -[NOTE] -==== -It is recommended that NUMA resource topology exporter instances in the cluster are named for nodes they refer to. For example, a worker node with the name `worker` should have a corresponding `noderesourcetopologies` object called `worker`. -==== - -.Prerequisites - -* Install the OpenShift CLI (`oc`). -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -. Get the daemonsets managed by the NUMA Resources Operator. Each daemonset has a corresponding `nodeGroup` in the `NUMAResourcesOperator` CR. Run the following command: -+ -[source,terminal] ----- -$ oc get numaresourcesoperators.nodetopology.openshift.io numaresourcesoperator -o jsonpath="{.status.daemonsets[0]}" ----- -+ -.Example output -[source,json] ----- -{"name":"numaresourcesoperator-worker","namespace":"openshift-numaresources"} ----- - -. Get the label for the daemonset of interest using the value for `name` from the previous step: -+ -[source,terminal] ----- -$ oc get ds -n openshift-numaresources numaresourcesoperator-worker -o jsonpath="{.spec.selector.matchLabels}" ----- -+ -.Example output -[source,json] ----- -{"name":"resource-topology"} ----- - -. Get the pods using the `resource-topology` label by running the following command: -+ -[source,terminal] ----- -$ oc get pods -n openshift-numaresources -l name=resource-topology -o wide ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE IP NODE -numaresourcesoperator-worker-5wm2k 2/2 Running 0 2d1h 10.135.0.64 compute-0.example.com -numaresourcesoperator-worker-pb75c 2/2 Running 0 2d1h 10.132.2.33 compute-1.example.com ----- - -. Examine the logs of the `resource-topology-exporter` container running on the worker pod that corresponds to the node you are troubleshooting. Run the following command: -+ -[source,terminal] ----- -$ oc logs -n openshift-numaresources -c resource-topology-exporter numaresourcesoperator-worker-pb75c ----- -+ -.Example output -[source,terminal] ----- -I0221 13:38:18.334140 1 main.go:206] using sysinfo: -reservedCpus: 0,1 -reservedMemory: - "0": 1178599424 -I0221 13:38:18.334370 1 main.go:67] === System information === -I0221 13:38:18.334381 1 sysinfo.go:231] cpus: reserved "0-1" -I0221 13:38:18.334493 1 sysinfo.go:237] cpus: online "0-103" -I0221 13:38:18.546750 1 main.go:72] -cpus: allocatable "2-103" -hugepages-1Gi: - numa cell 0 -> 6 - numa cell 1 -> 1 -hugepages-2Mi: - numa cell 0 -> 64 - numa cell 1 -> 128 -memory: - numa cell 0 -> 45758Mi - numa cell 1 -> 48372Mi ----- diff --git a/modules/cnf-tuning-nodes-for-low-latency-via-performanceprofile.adoc b/modules/cnf-tuning-nodes-for-low-latency-via-performanceprofile.adoc deleted file mode 100644 index d4cf37cdacd3..000000000000 --- a/modules/cnf-tuning-nodes-for-low-latency-via-performanceprofile.adoc +++ /dev/null @@ -1,50 +0,0 @@ -// Module included in the following assemblies: -// Epic CNF-78 (4.4) -// Epic CNF-422 (4.5) -// scalability_and_performance/cnf-low-latency-tuning.adoc - -[id="cnf-tuning-nodes-for-low-latency-via-performanceprofile_{context}"] -= Tuning nodes for low latency with the performance profile - -The performance profile lets you control latency tuning aspects of nodes that belong to a certain machine config pool. After you specify your settings, the `PerformanceProfile` object is compiled into multiple objects that perform the actual node level tuning: - -* A `MachineConfig` file that manipulates the nodes. -* A `KubeletConfig` file that configures the Topology Manager, the CPU Manager, and the {product-title} nodes. -* The Tuned profile that configures the Node Tuning Operator. - -You can use a performance profile to specify whether to update the kernel to kernel-rt, to allocate huge pages, and to partition the CPUs for performing housekeeping duties or running workloads. - -[NOTE] -==== -You can manually create the `PerformanceProfile` object or use the Performance Profile Creator (PPC) to generate a performance profile. See the additional resources below for more information on the PPC. -==== - -.Sample performance profile -[source,yaml] ----- -apiVersion: performance.openshift.io/v2 -kind: PerformanceProfile -metadata: - name: performance -spec: - cpu: - isolated: "5-15" <1> - reserved: "0-4" <2> - hugepages: - defaultHugepagesSize: "1G" - pages: - - size: "1G" - count: 16 - node: 0 - realTimeKernel: - enabled: true <3> - numa: <4> - topologyPolicy: "best-effort" - nodeSelector: - node-role.kubernetes.io/worker-cnf: "" <5> ----- -<1> Use this field to isolate specific CPUs to use with application containers for workloads. -<2> Use this field to reserve specific CPUs to use with infra containers for housekeeping. -<3> Use this field to install the real-time kernel on the node. Valid values are `true` or `false`. Setting the `true` value installs the real-time kernel. -<4> Use this field to configure the topology manager policy. Valid values are `none` (default), `best-effort`, `restricted`, and `single-numa-node`. For more information, see link:https://kubernetes.io/docs/tasks/administer-cluster/topology-manager/#topology-manager-policies[Topology Manager Policies]. -<5> Use this field to specify a node selector to apply the performance profile to specific nodes. diff --git a/modules/cnf-understanding-low-latency.adoc b/modules/cnf-understanding-low-latency.adoc deleted file mode 100644 index 9c2c126ca4e7..000000000000 --- a/modules/cnf-understanding-low-latency.adoc +++ /dev/null @@ -1,41 +0,0 @@ -// Module included in the following assemblies: -// Epic CNF-78 (4.4) -// * scalability_and_performance/cnf-low-latency-tuning.adoc - -:_content-type: CONCEPT -[id="cnf-understanding-low-latency_{context}"] -= Understanding low latency - -The emergence of Edge computing in the area of Telco / 5G plays a key role in -reducing latency and congestion problems and improving application performance. - -Simply put, latency determines how fast data (packets) moves from the sender to receiver and returns to the sender after processing by the receiver. Maintaining a network architecture with the lowest possible delay of latency speeds is key for meeting the network performance requirements of 5G. Compared to 4G technology, with an average latency of 50 ms, 5G is targeted to reach latency numbers of 1 ms or less. This reduction in latency boosts wireless throughput by a factor of 10. - -Many of the deployed applications in the Telco space require low latency that can only tolerate zero packet loss. Tuning for zero packet loss helps mitigate the inherent issues that degrade network performance. For more information, see link:https://www.redhat.com/en/blog/tuning-zero-packet-loss-red-hat-openstack-platform-part-1[Tuning for Zero Packet Loss in {rh-openstack-first}]. - -The Edge computing initiative also comes in to play for reducing latency rates. Think of it as being on the edge of the cloud and closer to the user. This greatly reduces the distance between the user and distant data centers, resulting in reduced application response times and performance latency. - -Administrators must be able to manage their many Edge sites and local services in a centralized way so that all of the deployments can run at the lowest possible management cost. They also need an easy way to deploy and configure certain nodes of their cluster for real-time low latency and high-performance purposes. Low latency nodes are useful for applications such as Cloud-native Network Functions (CNF) and Data Plane Development Kit (DPDK). - -{product-title} currently provides mechanisms to tune software on an {product-title} cluster for real-time running and low latency (around <20 microseconds reaction time). This includes tuning the kernel and {product-title} set values, installing a kernel, and reconfiguring the machine. But this method requires setting up four different Operators and performing many configurations that, when done manually, is complex and could be prone to mistakes. - -{product-title} uses the Node Tuning Operator to implement automatic tuning to achieve low latency performance for {product-title} applications. The cluster administrator uses this performance profile configuration that makes it easier to make these changes in a more reliable way. The administrator can specify whether to update the kernel to kernel-rt, reserve CPUs for cluster and operating system housekeeping duties, including pod infra containers, and isolate CPUs for application containers to run the workloads. - -[NOTE] -==== -Currently, disabling CPU load balancing is not supported by cgroup v2. As a result, you might not get the desired behavior from performance profiles if you have cgroup v2 enabled. Enabling cgroup v2 is not recommended if you are using performace profiles. -==== - -{product-title} also supports workload hints for the Node Tuning Operator that can tune the `PerformanceProfile` to meet the demands of different industry environments. Workload hints are available for `highPowerConsumption` (very low latency at the cost of increased power consumption) and `realTime` (priority given to optimum latency). A combination of `true/false` settings for these hints can be used to deal with application-specific workload profiles and requirements. - -Workload hints simplify the fine-tuning of performance to industry sector settings. Instead of a “one size fits all” approach, workload hints can cater to usage patterns such as placing priority on: - -* Low latency -* Real-time capability -* Efficient use of power - -In an ideal world, all of those would be prioritized: in real life, some come at the expense of others. The Node Tuning Operator is now aware of the workload expectations and better able to meet the demands of the workload. The cluster admin can now specify into which use case that workload falls. The Node Tuning Operator uses the `PerformanceProfile` to fine tune the performance settings for the workload. - -The environment in which an application is operating influences its behavior. For a typical data center with no strict latency requirements, only minimal default tuning is needed that enables CPU partitioning for some high performance workload pods. For data centers and workloads where latency is a higher priority, measures are still taken to optimize power consumption. The most complicated cases are clusters close to latency-sensitive equipment such as manufacturing machinery and software-defined radios. This last class of deployment is often referred to as Far edge. For Far edge deployments, ultra-low latency is the ultimate priority, and is achieved at the expense of power management. - -In {product-title} version 4.10 and previous versions, the Performance Addon Operator was used to implement automatic tuning to achieve low latency performance. Now this functionality is part of the Node Tuning Operator. diff --git a/modules/cnf-understanding-workload-hints.adoc b/modules/cnf-understanding-workload-hints.adoc deleted file mode 100644 index 99479338da11..000000000000 --- a/modules/cnf-understanding-workload-hints.adoc +++ /dev/null @@ -1,63 +0,0 @@ -// Module included in the following assemblies: -// -// scalability_and_performance/cnf-low-latency-tuning.adoc - -:_content-type: CONCEPT -[id="cnf-understanding-workload-hints_{context}"] -= Understanding workload hints - -The following table describes how combinations of power consumption and real-time settings impact on latency. -[NOTE] -==== -The following workload hints can be configured manually. You can also work with workload hints using the Performance Profile Creator. For more information about the performance profile, see the "Creating a performance profile" section. -==== - -[cols="1,1,1,1",options="header"] -|=== - | Performance Profile creator setting| Hint | Environment | Description - - | Default - a|[source,terminal] ----- -workloadHints: -highPowerConsumption: false -realTime: false ----- - | High throughput cluster without latency requirements - | Performance achieved through CPU partitioning only. - - - - | Low-latency - a|[source,terminal] ----- -workloadHints: -highPowerConsumption: false -realTime: true ----- - | Regional datacenters - | Both energy savings and low-latency are desirable: compromise between power management, latency and throughput. - - - | Ultra-low-latency - a|[source,terminal] ----- -workloadHints: -highPowerConsumption: true -realTime: true ----- - | Far edge clusters, latency critical workloads - | Optimized for absolute minimal latency and maximum determinism at the cost of increased power consumption. - - | Per-pod power management - a|[source,terminal] ----- -workloadHints: -realTime: true -highPowerConsumption: false -perPodPowerManagement: true ----- - | Critical and non-critical workloads - | Allows for power management per pod. - -|=== \ No newline at end of file diff --git a/modules/cnf-use-device-interrupt-processing-for-isolated-cpus.adoc b/modules/cnf-use-device-interrupt-processing-for-isolated-cpus.adoc deleted file mode 100644 index f09231fed01d..000000000000 --- a/modules/cnf-use-device-interrupt-processing-for-isolated-cpus.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// CNF-802 Infrastructure-provided interrupt processing for guaranteed pod CPUs -// Module included in the following assemblies: -// -// *cnf-low-latency-tuning.adoc - -[id="use-device-interrupt-processing-for-isolated-cpus_{context}"] -= Upgrading the performance profile to use device interrupt processing - -When you upgrade the Node Tuning Operator performance profile custom resource definition (CRD) from v1 or v1alpha1 to v2, `globallyDisableIrqLoadBalancing` is set to `true` on existing profiles. - -[NOTE] -==== -`globallyDisableIrqLoadBalancing` toggles whether IRQ load balancing will be disabled for the Isolated CPU set. When the option is set to `true` it disables IRQ load balancing for the Isolated CPU set. Setting the option to `false` allows the IRQs to be balanced across all CPUs. -==== - -[id="nto_supported_api_versions_{context}"] -== Supported API Versions - -The Node Tuning Operator supports `v2`, `v1`, and `v1alpha1` for the performance profile `apiVersion` field. The v1 and v1alpha1 APIs are identical. The v2 API includes an optional boolean field `globallyDisableIrqLoadBalancing` with a default value of `false`. - -[id="upgrading_nto_api_from_v1alpha1_to_v1_{context}"] -=== Upgrading Node Tuning Operator API from v1alpha1 to v1 - -When upgrading Node Tuning Operator API version from v1alpha1 to v1, the v1alpha1 performance profiles are converted on-the-fly using a "None" Conversion strategy and served to the Node Tuning Operator with API version v1. - -[id="upgrading_nto_api_from_v1alpha1_to_v1_or_v2_{context}"] -=== Upgrading Node Tuning Operator API from v1alpha1 or v1 to v2 - -When upgrading from an older Node Tuning Operator API version, the existing v1 and v1alpha1 performance profiles are converted using a conversion webhook that injects the `globallyDisableIrqLoadBalancing` field with a value of `true`. diff --git a/modules/cnf-verifying-queue-status.adoc b/modules/cnf-verifying-queue-status.adoc deleted file mode 100644 index 06a90dc73ffe..000000000000 --- a/modules/cnf-verifying-queue-status.adoc +++ /dev/null @@ -1,225 +0,0 @@ -// Module included in the following assemblies: -//CNF-1483 (4.8) -// * scalability_and_performance/cnf-low-latency-tuning.adoc - -[id="verifying-queue-status_{context}"] -= Verifying the queue status - -In this section, a number of examples illustrate different performance profiles and how to verify the changes are applied. - -.Example 1 - -In this example, the net queue count is set to the reserved CPU count (2) for _all_ supported devices. - -The relevant section from the performance profile is: - -[source,yaml] ----- -apiVersion: performance.openshift.io/v2 -metadata: - name: performance -spec: - kind: PerformanceProfile - spec: - cpu: - reserved: 0-1 #total = 2 - isolated: 2-8 - net: - userLevelNetworking: true -# ... ----- - -* Display the status of the queues associated with a device using the following command: -+ -[NOTE] -==== -Run this command on the node where the performance profile was applied. -==== -+ -[source,terminal] ----- -$ ethtool -l ----- - -* Verify the queue status before the profile is applied: -+ -[source,terminal] ----- -$ ethtool -l ens4 ----- -+ -.Example output -[source,terminal] ----- -Channel parameters for ens4: -Pre-set maximums: -RX: 0 -TX: 0 -Other: 0 -Combined: 4 -Current hardware settings: -RX: 0 -TX: 0 -Other: 0 -Combined: 4 ----- - -* Verify the queue status after the profile is applied: -+ -[source,terminal] ----- -$ ethtool -l ens4 ----- -+ -.Example output -[source,terminal] ----- -Channel parameters for ens4: -Pre-set maximums: -RX: 0 -TX: 0 -Other: 0 -Combined: 4 -Current hardware settings: -RX: 0 -TX: 0 -Other: 0 -Combined: 2 <1> ----- - -<1> The combined channel shows that the total count of reserved CPUs for _all_ supported devices is 2. This matches what is configured in the performance profile. - -.Example 2 - -In this example, the net queue count is set to the reserved CPU count (2) for _all_ supported network devices with a specific `vendorID`. - -The relevant section from the performance profile is: - -[source,yaml] ----- -apiVersion: performance.openshift.io/v2 -metadata: - name: performance -spec: - kind: PerformanceProfile - spec: - cpu: - reserved: 0-1 #total = 2 - isolated: 2-8 - net: - userLevelNetworking: true - devices: - - vendorID = 0x1af4 -# ... ----- - -* Display the status of the queues associated with a device using the following command: -+ -[NOTE] -==== -Run this command on the node where the performance profile was applied. -==== -+ -[source,terminal] ----- -$ ethtool -l ----- - -* Verify the queue status after the profile is applied: -+ -[source,terminal] ----- -$ ethtool -l ens4 ----- -+ -.Example output -[source,terminal] ----- -Channel parameters for ens4: -Pre-set maximums: -RX: 0 -TX: 0 -Other: 0 -Combined: 4 -Current hardware settings: -RX: 0 -TX: 0 -Other: 0 -Combined: 2 <1> ----- - -<1> The total count of reserved CPUs for all supported devices with `vendorID=0x1af4` is 2. -For example, if there is another network device `ens2` with `vendorID=0x1af4` it will also have total net queues of 2. This matches what is configured in the performance profile. - -.Example 3 - -In this example, the net queue count is set to the reserved CPU count (2) for _all_ supported network devices that match any of the defined device identifiers. - -The command `udevadm info` provides a detailed report on a device. In this example the devices are: - -[source,terminal] ----- -# udevadm info -p /sys/class/net/ens4 -... -E: ID_MODEL_ID=0x1000 -E: ID_VENDOR_ID=0x1af4 -E: INTERFACE=ens4 -... ----- - -[source,terminal] ----- -# udevadm info -p /sys/class/net/eth0 -... -E: ID_MODEL_ID=0x1002 -E: ID_VENDOR_ID=0x1001 -E: INTERFACE=eth0 -... ----- - -* Set the net queues to 2 for a device with `interfaceName` equal to `eth0` and any devices that have a `vendorID=0x1af4` with the following performance profile: -+ -[source,yaml] ----- -apiVersion: performance.openshift.io/v2 -metadata: - name: performance -spec: - kind: PerformanceProfile - spec: - cpu: - reserved: 0-1 #total = 2 - isolated: 2-8 - net: - userLevelNetworking: true - devices: - - interfaceName = eth0 - - vendorID = 0x1af4 -... ----- - -* Verify the queue status after the profile is applied: -+ -[source,terminal] ----- -$ ethtool -l ens4 ----- -+ -.Example output -[source,terminal] ----- -Channel parameters for ens4: -Pre-set maximums: -RX: 0 -TX: 0 -Other: 0 -Combined: 4 -Current hardware settings: -RX: 0 -TX: 0 -Other: 0 -Combined: 2 <1> ----- -+ -<1> The total count of reserved CPUs for all supported devices with `vendorID=0x1af4` is set to 2. -For example, if there is another network device `ens2` with `vendorID=0x1af4`, it will also have the total net queues set to 2. Similarly, a device with `interfaceName` equal to `eth0` will have total net queues set to 2. diff --git a/modules/codeready-workspaces.adoc b/modules/codeready-workspaces.adoc deleted file mode 100644 index 6c64b30d0670..000000000000 --- a/modules/codeready-workspaces.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module included in the following assemblies: -// -// * adding_service_cluster/available-services.adoc -// * adding_service_cluster/rosa-available-services.adoc - -[id="codeready-workspaces_{context}"] -= {openshift-dev-spaces-productname} - -The {openshift-dev-spaces-productname} service is available as an add-on to your {product-title} cluster. {openshift-dev-spaces-productname} is a developer tool that makes cloud-native development practical for teams, using Kubernetes and containers to provide any member of the development or IT team with a consistent, preconfigured development environment. Developers can create code, build, and test in containers running on {product-title}. - -[NOTE] -==== -When using this service with {product-title}, {openshift-dev-spaces-productname} can be deployed to any namespace except `openshift-workspaces`. -==== - -[role="_additional-resources"] -.Additional resources -* link:https://access.redhat.com/documentation/en-us/red_hat_openshift_dev_spaces/[{openshift-dev-spaces-productname}] documentation diff --git a/modules/collecting-docker-logs-windows.adoc b/modules/collecting-docker-logs-windows.adoc deleted file mode 100644 index 1f9b42554d59..000000000000 --- a/modules/collecting-docker-logs-windows.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -// * support/troubleshooting/troubleshooting-windows-container-workload-issues.adoc - -:_content-type: PROCEDURE -[id="collecting-docker-logs-windows_{context}"] -= Collecting Docker logs for Windows containers - -The Windows Docker service does not stream its logs to stdout, but instead, logs to the event log for Windows. You can view the Docker event logs to investigate issues you think might be caused by the Windows Docker service. - -.Prerequisites - -* You installed the Windows Machine Config Operator (WMCO) using Operator Lifecycle Manager (OLM). -* You have created a Windows compute machine set. - -.Procedure - -. SSH into the Windows node and enter PowerShell: -+ -[source,terminal] ----- -C:\> powershell ----- - -. View the Docker logs by running the following command: -+ -[source,terminal] ----- -C:\> Get-EventLog -LogName Application -Source Docker ----- diff --git a/modules/collecting-kube-node-logs-windows.adoc b/modules/collecting-kube-node-logs-windows.adoc deleted file mode 100644 index 5dc1c3a331c2..000000000000 --- a/modules/collecting-kube-node-logs-windows.adoc +++ /dev/null @@ -1,35 +0,0 @@ -// Module included in the following assemblies: -// -// * support/troubleshooting/troubleshooting-windows-container-workload-issues.adoc - -:_content-type: PROCEDURE -[id="collecting-kube-node-logs-windows_{context}"] -= Collecting Kubernetes node logs for Windows containers - -Windows container logging works differently from Linux container logging; the Kubernetes node logs for Windows workloads are streamed to the `C:\var\logs` directory by default. Therefore, you must gather the Windows node logs from that directory. - -.Prerequisites - -* You installed the Windows Machine Config Operator (WMCO) using Operator Lifecycle Manager (OLM). -* You have created a Windows compute machine set. - -.Procedure - -. To view the logs under all directories in `C:\var\logs`, run the following command: -+ -[source,terminal] ----- -$ oc adm node-logs -l kubernetes.io/os=windows --path= \ - /ip-10-0-138-252.us-east-2.compute.internal containers \ - /ip-10-0-138-252.us-east-2.compute.internal hybrid-overlay \ - /ip-10-0-138-252.us-east-2.compute.internal kube-proxy \ - /ip-10-0-138-252.us-east-2.compute.internal kubelet \ - /ip-10-0-138-252.us-east-2.compute.internal pods ----- - -. You can now list files in the directories using the same command and view the individual log files. For example, to view the kubelet logs, run the following command: -+ -[source,terminal] ----- -$ oc adm node-logs -l kubernetes.io/os=windows --path=/kubelet/kubelet.log ----- diff --git a/modules/collecting-windows-application-event-logs.adoc b/modules/collecting-windows-application-event-logs.adoc deleted file mode 100644 index 349032028392..000000000000 --- a/modules/collecting-windows-application-event-logs.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * support/troubleshooting/troubleshooting-windows-container-workload-issues.adoc - -:_content-type: PROCEDURE -[id="collecting-windows-application-event-logs_{context}"] -= Collecting Windows application event logs - -The `Get-WinEvent` shim on the kubelet `logs` endpoint can be used to collect application event logs from Windows machines. - -.Prerequisites - -* You installed the Windows Machine Config Operator (WMCO) using Operator Lifecycle Manager (OLM). -* You have created a Windows compute machine set. - -.Procedure - -* To view logs from all applications logging to the event logs on the Windows machine, run: -+ -[source,terminal] ----- -$ oc adm node-logs -l kubernetes.io/os=windows --path=journal ----- -+ -The same command is executed when collecting logs with `oc adm must-gather`. -+ -Other Windows application logs from the event log can also be collected by specifying the respective service with a `-u` flag. For example, you can run the following command to collect logs for the docker runtime service: -+ -[source,terminal] ----- -$ oc adm node-logs -l kubernetes.io/os=windows --path=journal -u docker ----- diff --git a/modules/compliance-anatomy.adoc b/modules/compliance-anatomy.adoc deleted file mode 100644 index c19f299fd4fa..000000000000 --- a/modules/compliance-anatomy.adoc +++ /dev/null @@ -1,346 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-troubleshooting.adoc - -[id="compliance-anatomy_{context}"] -= Anatomy of a scan - -The following sections outline the components and stages of Compliance Operator scans. - -[id="compliance-anatomy-compliance-sources_{context}"] -== Compliance sources -The compliance content is stored in `Profile` objects that are generated from a `ProfileBundle` object. The Compliance Operator creates a `ProfileBundle` object for the cluster and another for the cluster nodes. - -[source,terminal] ----- -$ oc get -n openshift-compliance profilebundle.compliance ----- - -[source,terminal] ----- -$ oc get -n openshift-compliance profile.compliance ----- - -The `ProfileBundle` objects are processed by deployments labeled with the `Bundle` name. To troubleshoot an issue with the `Bundle`, you can find the deployment and view logs of the pods in a deployment: - -[source,terminal] ----- -$ oc logs -n openshift-compliance -lprofile-bundle=ocp4 -c profileparser ----- - -[source,terminal] ----- -$ oc get -n openshift-compliance deployments,pods -lprofile-bundle=ocp4 ----- - -[source,terminal] ----- -$ oc logs -n openshift-compliance pods/ ----- - -[source,terminal] ----- -$ oc describe -n openshift-compliance pod/ -c profileparser ----- - -[id="compliance-anatomy-scan-setting-scan-binding-lifecycle_{context}"] -== The ScanSetting and ScanSettingBinding objects lifecycle and debugging -With valid compliance content sources, the high-level `ScanSetting` and `ScanSettingBinding` objects can be used to generate `ComplianceSuite` and `ComplianceScan` objects: - -[source,yaml] ----- -apiVersion: compliance.openshift.io/v1alpha1 -kind: ScanSetting -metadata: - name: my-companys-constraints -debug: true -# For each role, a separate scan will be created pointing -# to a node-role specified in roles -roles: - - worker ---- -apiVersion: compliance.openshift.io/v1alpha1 -kind: ScanSettingBinding -metadata: - name: my-companys-compliance-requirements -profiles: - # Node checks - - name: rhcos4-e8 - kind: Profile - apiGroup: compliance.openshift.io/v1alpha1 - # Cluster checks - - name: ocp4-e8 - kind: Profile - apiGroup: compliance.openshift.io/v1alpha1 -settingsRef: - name: my-companys-constraints - kind: ScanSetting - apiGroup: compliance.openshift.io/v1alpha1 ----- - -Both `ScanSetting` and `ScanSettingBinding` objects are handled by the same controller tagged with `logger=scansettingbindingctrl`. These objects have no status. Any issues are communicated in form of events: - -[source,terminal] ----- -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal SuiteCreated 9m52s scansettingbindingctrl ComplianceSuite openshift-compliance/my-companys-compliance-requirements created ----- - -Now a `ComplianceSuite` object is created. The flow continues to reconcile the newly created `ComplianceSuite`. - -[id="compliance-suite-lifecycle-debugging_{context}"] -== ComplianceSuite custom resource lifecycle and debugging -The `ComplianceSuite` CR is a wrapper around `ComplianceScan` CRs. The `ComplianceSuite` CR is handled by controller tagged with `logger=suitectrl`. -This controller handles creating scans from a suite, reconciling and aggregating individual Scan statuses into a single Suite status. If a suite is set to execute periodically, the `suitectrl` also handles creating a `CronJob` CR that re-runs the scans in the suite after the initial run is done: - -[source,terminal] ----- -$ oc get cronjobs ----- - -.Example output -[source,terminal] ----- -NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE - 0 1 * * * False 0 151m ----- - -For the most important issues, events are emitted. View them with `oc describe compliancesuites/`. The `Suite` objects also have a `Status` subresource that is updated when any of `Scan` objects that belong to this suite update their `Status` subresource. After all expected scans are created, control is passed to the scan controller. - -[id="compliance-scan-lifecycle-debugging_{context}"] -== ComplianceScan custom resource lifecycle and debugging -The `ComplianceScan` CRs are handled by the `scanctrl` controller. This is also where the actual scans happen and the scan results are created. Each scan goes through several phases: - -[id="compliance-scan-pending-phase_{context}"] -=== Pending phase -The scan is validated for correctness in this phase. If some parameters like storage size are invalid, the scan transitions to DONE with ERROR result, otherwise proceeds to the Launching phase. - -[id="compliance-scan-launching-phase_{context}"] -=== Launching phase -In this phase, several config maps that contain either environment for the scanner pods or directly the script that the scanner pods will be evaluating. List the config maps: - -[source,terminal] ----- -$ oc -n openshift-compliance get cm \ --l compliance.openshift.io/scan-name=rhcos4-e8-worker,complianceoperator.openshift.io/scan-script= ----- - -These config maps will be used by the scanner pods. If you ever needed to modify the scanner behavior, change the scanner debug level or print the raw results, modifying the config maps is the way to go. Afterwards, a persistent volume claim is created per scan to store the raw ARF results: - -[source,terminal] ----- -$ oc get pvc -n openshift-compliance -lcompliance.openshift.io/scan-name=rhcos4-e8-worker ----- - -The PVCs are mounted by a per-scan `ResultServer` deployment. A `ResultServer` is a simple HTTP server where the individual scanner pods upload the full ARF results to. Each server can run on a different node. The full ARF results might be very large and you cannot presume that it would be possible to create a volume that could be mounted from multiple nodes at the same time. After the scan is finished, the `ResultServer` deployment is scaled down. The PVC with the raw results can be mounted from another custom pod and the results can be fetched or inspected. The traffic between the scanner pods and the `ResultServer` is protected by mutual TLS protocols. - -Finally, the scanner pods are launched in this phase; one scanner pod for a `Platform` scan instance and one scanner pod per matching node for a `node` scan instance. The per-node pods are labeled with the node name. Each pod is always labeled with the `ComplianceScan` name: - -[source,terminal] ----- -$ oc get pods -lcompliance.openshift.io/scan-name=rhcos4-e8-worker,workload=scanner --show-labels ----- - -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE LABELS -rhcos4-e8-worker-ip-10-0-169-90.eu-north-1.compute.internal-pod 0/2 Completed 0 39m compliance.openshift.io/scan-name=rhcos4-e8-worker,targetNode=ip-10-0-169-90.eu-north-1.compute.internal,workload=scanner ----- -+ -The scan then proceeds to the Running phase. - -[id="compliance-scan-running-phase_{context}"] -=== Running phase -The running phase waits until the scanner pods finish. The following terms and processes are in use in the running phase: - -* *init container*: There is one init container called `content-container`. It runs the *contentImage* container and executes a single command that copies the *contentFile* to the `/content` directory shared with the other containers in this pod. - -* *scanner*: This container runs the scan. For node scans, the container mounts the node filesystem as `/host` and mounts the content delivered by the init container. The container also mounts the `entrypoint` `ConfigMap` created in the Launching phase and executes it. The default script in the entrypoint `ConfigMap` executes OpenSCAP and stores the result files in the `/results` directory shared between the pod's containers. Logs from this pod can be viewed to determine what the OpenSCAP scanner checked. More verbose output can be viewed with the `debug` flag. - -* *logcollector*: The logcollector container waits until the scanner container finishes. Then, it uploads the full ARF results to the `ResultServer` and separately uploads the XCCDF results along with scan result and OpenSCAP result code as a `ConfigMap.` These result config maps are labeled with the scan name (`compliance.openshift.io/scan-name=rhcos4-e8-worker`): -+ -[source,terminal] ----- -$ oc describe cm/rhcos4-e8-worker-ip-10-0-169-90.eu-north-1.compute.internal-pod ----- -+ -.Example output -[source,terminal] ----- - Name: rhcos4-e8-worker-ip-10-0-169-90.eu-north-1.compute.internal-pod - Namespace: openshift-compliance - Labels: compliance.openshift.io/scan-name-scan=rhcos4-e8-worker - complianceoperator.openshift.io/scan-result= - Annotations: compliance-remediations/processed: - compliance.openshift.io/scan-error-msg: - compliance.openshift.io/scan-result: NON-COMPLIANT - OpenSCAP-scan-result/node: ip-10-0-169-90.eu-north-1.compute.internal - - Data - ==== - exit-code: - ---- - 2 - results: - ---- - - ... ----- - -Scanner pods for `Platform` scans are similar, except: - -* There is one extra init container called `api-resource-collector` that reads the OpenSCAP content provided by the content-container init, container, figures out which API resources the content needs to examine and stores those API resources to a shared directory where the `scanner` container would read them from. - -* The `scanner` container does not need to mount the host file system. - -When the scanner pods are done, the scans move on to the Aggregating phase. - -[id="compliance-scan-aggregating-phase_{context}"] -=== Aggregating phase -In the aggregating phase, the scan controller spawns yet another pod called the aggregator pod. Its purpose it to take the result `ConfigMap` objects, read the results and for each check result create the corresponding Kubernetes object. If the check failure can be automatically remediated, a `ComplianceRemediation` object is created. To provide human-readable metadata for the checks and remediations, the aggregator pod also mounts the OpenSCAP content using an init container. - -When a config map is processed by an aggregator pod, it is labeled the `compliance-remediations/processed` label. The result of this phase are `ComplianceCheckResult` objects: - -[source,terminal] ----- -$ oc get compliancecheckresults -lcompliance.openshift.io/scan-name=rhcos4-e8-worker ----- - -.Example output -[source,terminal] ----- -NAME STATUS SEVERITY -rhcos4-e8-worker-accounts-no-uid-except-zero PASS high -rhcos4-e8-worker-audit-rules-dac-modification-chmod FAIL medium ----- -and `ComplianceRemediation` objects: - -[source,terminal] ----- -$ oc get complianceremediations -lcompliance.openshift.io/scan-name=rhcos4-e8-worker ----- - -.Example output -[source,terminal] ----- -NAME STATE -rhcos4-e8-worker-audit-rules-dac-modification-chmod NotApplied -rhcos4-e8-worker-audit-rules-dac-modification-chown NotApplied -rhcos4-e8-worker-audit-rules-execution-chcon NotApplied -rhcos4-e8-worker-audit-rules-execution-restorecon NotApplied -rhcos4-e8-worker-audit-rules-execution-semanage NotApplied -rhcos4-e8-worker-audit-rules-execution-setfiles NotApplied ----- - -After these CRs are created, the aggregator pod exits and the scan moves on to the Done phase. - -[id="compliance-scan-done-phase_{context}"] -=== Done phase -In the final scan phase, the scan resources are cleaned up if needed and the `ResultServer` deployment is either scaled down (if the scan was one-time) or deleted if the scan is continuous; the next scan instance would then recreate the deployment again. - -It is also possible to trigger a re-run of a scan in the Done phase by annotating it: - -[source,terminal] ----- -$ oc -n openshift-compliance \ -annotate compliancescans/rhcos4-e8-worker compliance.openshift.io/rescan= ----- - -After the scan reaches the Done phase, nothing else happens on its own unless the remediations are set to be applied automatically with `autoApplyRemediations: true`. The {product-title} administrator would now review the remediations and apply them as needed. If the remediations are set to be applied automatically, the `ComplianceSuite` controller takes over in the Done phase, pauses the machine config pool to which the scan maps to and applies all the remediations in one go. If a remediation is applied, the `ComplianceRemediation` controller takes over. - -[id="compliance-remediation-lifecycle-debugging_{context}"] -== ComplianceRemediation controller lifecycle and debugging -The example scan has reported some findings. One of the remediations can be enabled by toggling its `apply` attribute to `true`: - -[source,terminal] ----- -$ oc patch complianceremediations/rhcos4-e8-worker-audit-rules-dac-modification-chmod --patch '{"spec":{"apply":true}}' --type=merge ----- - -The `ComplianceRemediation` controller (`logger=remediationctrl`) reconciles the modified object. The result of the reconciliation is change of status of the remediation object that is reconciled, but also a change of the rendered per-suite `MachineConfig` object that contains all the applied remediations. - -The `MachineConfig` object always begins with `75-` and is named after the scan and the suite: - -[source,terminal] ----- -$ oc get mc | grep 75- ----- - -.Example output -[source,terminal] ----- -75-rhcos4-e8-worker-my-companys-compliance-requirements 3.2.0 2m46s ----- - -The remediations the `mc` currently consists of are listed in the machine config's annotations: - -[source,terminal] ----- -$ oc describe mc/75-rhcos4-e8-worker-my-companys-compliance-requirements ----- - -.Example output -[source,terminal] ----- -Name: 75-rhcos4-e8-worker-my-companys-compliance-requirements -Labels: machineconfiguration.openshift.io/role=worker -Annotations: remediation/rhcos4-e8-worker-audit-rules-dac-modification-chmod: ----- - -The `ComplianceRemediation` controller's algorithm works like this: - -* All currently applied remediations are read into an initial remediation set. -* If the reconciled remediation is supposed to be applied, it is added to the set. -* A `MachineConfig` object is rendered from the set and annotated with names of remediations in the set. If the set is empty (the last remediation was unapplied), the rendered `MachineConfig` object is removed. -* If and only if the rendered machine config is different from the one already applied in the cluster, the applied MC is updated (or created, or deleted). -* Creating or modifying a `MachineConfig` object triggers a reboot of nodes that match the `machineconfiguration.openshift.io/role` label - see the Machine Config Operator documentation for more details. - -The remediation loop ends once the rendered machine config is updated, if needed, and the reconciled remediation object status is updated. In our case, applying the remediation would trigger a reboot. After the reboot, annotate the scan to re-run it: - -[source,terminal] ----- -$ oc -n openshift-compliance \ -annotate compliancescans/rhcos4-e8-worker compliance.openshift.io/rescan= ----- - -The scan will run and finish. Check for the remediation to pass: - -[source,terminal] ----- -$ oc -n openshift-compliance \ -get compliancecheckresults/rhcos4-e8-worker-audit-rules-dac-modification-chmod ----- - -.Example output -[source,terminal] ----- -NAME STATUS SEVERITY -rhcos4-e8-worker-audit-rules-dac-modification-chmod PASS medium ----- - -[id="compliance-operator-useful-labels_{context}"] -== Useful labels - -Each pod that is spawned by the Compliance Operator is labeled specifically with the scan it belongs to and the work it does. The scan identifier is labeled with the `compliance.openshift.io/scan-name` label. The workload identifier is labeled with the `workload` label. - -The Compliance Operator schedules the following workloads: - -* *scanner*: Performs the compliance scan. - -* *resultserver*: Stores the raw results for the compliance scan. - -* *aggregator*: Aggregates the results, detects inconsistencies and outputs result objects (checkresults and remediations). - -* *suitererunner*: Will tag a suite to be re-run (when a schedule is set). - -* *profileparser*: Parses a datastream and creates the appropriate profiles, rules and variables. - -When debugging and logs are required for a certain workload, run: - -[source,terminal] ----- -$ oc logs -l workload= -c ----- diff --git a/modules/compliance-apply-remediation-for-customized-mcp.adoc b/modules/compliance-apply-remediation-for-customized-mcp.adoc deleted file mode 100644 index 4821ec97e0ad..000000000000 --- a/modules/compliance-apply-remediation-for-customized-mcp.adoc +++ /dev/null @@ -1,74 +0,0 @@ -:_content-type: PROCEDURE -[id="compliance-operator-apply-remediation-for-customized-mcp"] -= Applying remediation when using customized machine config pools - -When you create a custom `MachineConfigPool`, add a label to the `MachineConfigPool` so that `machineConfigPoolSelector` present in the `KubeletConfig` can match the label with `MachineConfigPool`. - -[IMPORTANT] -==== -Do not set `protectKernelDefaults: false` in the `KubeletConfig` file, because the `MachineConfigPool` object might fail to unpause unexpectedly after the Compliance Operator finishes applying remediation. -==== - -.Procedure - -. List the nodes. -+ -[source,terminal] ----- -$ oc get nodes -n openshift-compliance ----- -+ -.Example output -+ -[source,terminal] ----- -NAME STATUS ROLES AGE VERSION -ip-10-0-128-92.us-east-2.compute.internal Ready master 5h21m v1.26.0 -ip-10-0-158-32.us-east-2.compute.internal Ready worker 5h17m v1.26.0 -ip-10-0-166-81.us-east-2.compute.internal Ready worker 5h17m v1.26.0 -ip-10-0-171-170.us-east-2.compute.internal Ready master 5h21m v1.26.0 -ip-10-0-197-35.us-east-2.compute.internal Ready master 5h22m v1.26.0 ----- - -. Add a label to nodes. -+ -[source,terminal] ----- -$ oc -n openshift-compliance \ -label node ip-10-0-166-81.us-east-2.compute.internal \ -node-role.kubernetes.io/= ----- -+ -.Example output -+ -[source,terminal] ----- -node/ip-10-0-166-81.us-east-2.compute.internal labeled ----- - -. Create custom `MachineConfigPool` CR. -+ -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfigPool -metadata: - name: - labels: - pools.operator.machineconfiguration.openshift.io/: '' <1> -spec: - machineConfigSelector: - matchExpressions: - - {key: machineconfiguration.openshift.io/role, operator: In, values: [worker,]} - nodeSelector: - matchLabels: - node-role.kubernetes.io/: "" ----- -<1> The `labels` field defines label name to add for Machine config pool(MCP). - -. Verify MCP created successfully. -+ -[source,terminal] ----- -$ oc get mcp -w ----- diff --git a/modules/compliance-apply-remediations-from-scans.adoc b/modules/compliance-apply-remediations-from-scans.adoc deleted file mode 100644 index 712384bcf369..000000000000 --- a/modules/compliance-apply-remediations-from-scans.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-advanced.adoc - -:_content-type: PROCEDURE -[id="installing-compliance-operator-cli_{context}"] -= Applying remediations generated by suite scans - -Although you can use the `autoApplyRemediations` boolean parameter in a `ComplianceSuite` object, you can alternatively annotate the object with `compliance.openshift.io/apply-remediations`. This allows the Operator to apply all of the created remediations. - -.Procedure - -* Apply the `compliance.openshift.io/apply-remediations` annotation by running: - -[source,terminal] ----- -$ oc -n openshift-compliance \ -annotate compliancesuites/workers-compliancesuite compliance.openshift.io/apply-remediations= ----- diff --git a/modules/compliance-applying-resource-requests-and-limits.adoc b/modules/compliance-applying-resource-requests-and-limits.adoc deleted file mode 100644 index 00f52953e297..000000000000 --- a/modules/compliance-applying-resource-requests-and-limits.adoc +++ /dev/null @@ -1,22 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-scans.adoc - -:_content-type: CONCEPT -[id="compliance-applying-resource-requests-and-limits_{context}"] -= Applying resource requests and limits - -When the kubelet starts a container as part of a Pod, the kubelet passes that container's requests and limits for memory and CPU to the container runtime. In Linux, the container runtime configures the kernel cgroups that apply and enforce the limits you defined. - -The CPU limit defines how much CPU time the container can use. During each scheduling interval, the Linux kernel checks to see if this limit is exceeded. If so, the kernel waits before allowing the cgroup to resume execution. - -If several different containers (cgroups) want to run on a contended system, workloads with larger CPU requests are allocated more CPU time than workloads with small requests. The memory request is used during Pod scheduling. On a node that uses cgroups v2, the container runtime might use the memory request as a hint to set `memory.min` and `memory.low` values. - -If a container attempts to allocate more memory than this limit, the Linux kernel out-of-memory subsystem activates and intervenes by stopping one of the processes in the container that tried to allocate memory. The memory limit for the Pod or container can also apply to pages in memory-backed volumes, such as an emptyDir. - -The kubelet tracks `tmpfs` `emptyDir` volumes as container memory is used, rather than as local ephemeral storage. If a container exceeds its memory request and the node that it runs on becomes short of memory overall, the Pod's container might be evicted. - -[IMPORTANT] -==== -A container may not exceed its CPU limit for extended periods. Container run times do not stop Pods or containers for excessive CPU usage. To determine whether a container cannot be scheduled or is being killed due to resource limits, see _Troubleshooting the Compliance Operator_. -==== diff --git a/modules/compliance-applying.adoc b/modules/compliance-applying.adoc deleted file mode 100644 index d448f850ff83..000000000000 --- a/modules/compliance-applying.adoc +++ /dev/null @@ -1,26 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-remediation.adoc - -[id="compliance-applying_{context}"] -= Applying a remediation - -The boolean attribute `spec.apply` controls whether the remediation should be applied by the Compliance Operator. You can apply the remediation by setting the attribute to `true`: - -[source,terminal] ----- -$ oc -n openshift-compliance \ -patch complianceremediations/-sysctl-net-ipv4-conf-all-accept-redirects \ ---patch '{"spec":{"apply":true}}' --type=merge ----- - -After the Compliance Operator processes the applied remediation, the `status.ApplicationState` attribute would change to *Applied* or to *Error* if incorrect. When a machine config remediation is applied, that remediation along with all other applied remediations are rendered into a `MachineConfig` object named `75-$scan-name-$suite-name`. That `MachineConfig` object is subsequently rendered by the Machine Config Operator and finally applied to all the nodes in a machine config pool by an instance of the machine control daemon running on each node. - -Note that when the Machine Config Operator applies a new `MachineConfig` object to nodes in a pool, all the nodes belonging to the pool are rebooted. This might be inconvenient when applying multiple remediations, each of which re-renders the composite `75-$scan-name-$suite-name` `MachineConfig` object. To prevent applying the remediation immediately, you can pause the machine config pool by setting the `.spec.paused` attribute of a `MachineConfigPool` object to `true`. - -The Compliance Operator can apply remediations automatically. Set `autoApplyRemediations: true` in the `ScanSetting` top-level object. - -[WARNING] -==== -Applying remediations automatically should only be done with careful consideration. -==== diff --git a/modules/compliance-auto-update-remediations.adoc b/modules/compliance-auto-update-remediations.adoc deleted file mode 100644 index 90e920ed854a..000000000000 --- a/modules/compliance-auto-update-remediations.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-advanced.adoc - -:_content-type: PROCEDURE -[id="automatically-update-remediations_{context}"] -= Automatically update remediations - -In some cases, a scan with newer content might mark remediations as `OUTDATED`. As an administrator, you can apply the `compliance.openshift.io/remove-outdated` annotation to apply new remediations and remove the outdated ones. - -.Procedure - -* Apply the `compliance.openshift.io/remove-outdated` annotation: - -[source,terminal] ----- -$ oc -n openshift-compliance \ -annotate compliancesuites/workers-compliancesuite compliance.openshift.io/remove-outdated= ----- - -Alternatively, set the `autoUpdateRemediations` flag in a `ScanSetting` or `ComplianceSuite` object to update the remediations automatically. diff --git a/modules/compliance-crd-advanced-compliance-scan.adoc b/modules/compliance-crd-advanced-compliance-scan.adoc deleted file mode 100644 index 8ae9e5acfd84..000000000000 --- a/modules/compliance-crd-advanced-compliance-scan.adoc +++ /dev/null @@ -1,59 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-crd.adoc - -:_content-type: CONCEPT -[id="advance-compliance-scan-object_{context}"] -= Advanced ComplianceScan Object -The Compliance Operator includes options for advanced users for debugging or integrating with existing tooling. While it is recommended that you not create a `ComplianceScan` object directly, you can instead manage it using a `ComplianceSuite` object. - -.Example Advanced `ComplianceScan` object -[source,yaml] ----- -apiVersion: compliance.openshift.io/v1alpha1 -kind: ComplianceScan -metadata: - name: -spec: - scanType: Node <1> - profile: xccdf_org.ssgproject.content_profile_moderate <2> - content: ssg-ocp4-ds.xml - contentImage: quay.io/complianceascode/ocp4:latest <3> - rule: "xccdf_org.ssgproject.content_rule_no_netrc_files" <4> - nodeSelector: <5> - node-role.kubernetes.io/worker: "" -status: - phase: DONE <6> - result: NON-COMPLIANT <7> ----- - -<1> Specify either `Node` or `Platform`. Node profiles scan the cluster nodes and platform profiles scan the Kubernetes platform. -<2> Specify the XCCDF identifier of the profile that you want to run. -<3> Specify the container image that encapsulates the profile files. -<4> It is optional. Specify the scan to run a single rule. This rule has to be identified with the XCCDF ID, and has to belong to the specified profile. -+ -[NOTE] -==== -If you skip the `rule` parameter, then scan runs for all the available rules of the specified profile. -==== -<5> If you are on the {product-title} and wants to generate a remediation, then nodeSelector label has to match the `MachineConfigPool` label. -+ -[NOTE] -==== -If you do not specify `nodeSelector` parameter or match the `MachineConfig` label, scan will still run, but it will not create remediation. -==== -<6> Indicates the current phase of the scan. -<7> Indicates the verdict of the scan. - -[IMPORTANT] -==== -If you delete a `ComplianceSuite` object, then all the associated scans get deleted. -==== - -When the scan is complete, it generates the result as Custom Resources of the `ComplianceCheckResult` object. However, the raw results are available in ARF format. These results are stored in a Persistent Volume (PV), which has a Persistent Volume Claim (PVC) associated with the name of the scan. -You can programmatically fetch the `ComplianceScans` events. To generate events for the suite, run the following command: - -[source,terminal] ----- -oc get events --field-selector involvedObject.kind=ComplianceScan,involvedObject.name= ----- diff --git a/modules/compliance-crd-compliance-check-result.adoc b/modules/compliance-crd-compliance-check-result.adoc deleted file mode 100644 index c5ff012bb21d..000000000000 --- a/modules/compliance-crd-compliance-check-result.adoc +++ /dev/null @@ -1,51 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-crd.adoc - -:_content-type: CONCEPT -[id="compliance-check-result_{context}"] -= ComplianceCheckResult object -When you run a scan with a specific profile, several rules in the profiles are verified. For each of these rules, a `ComplianceCheckResult` object is created, which provides the state of the cluster for a specific rule. - -.Example `ComplianceCheckResult` object -[source,yaml] ----- -apiVersion: compliance.openshift.io/v1alpha1 -kind: ComplianceCheckResult -metadata: - labels: - compliance.openshift.io/check-severity: medium - compliance.openshift.io/check-status: FAIL - compliance.openshift.io/suite: example-compliancesuite - compliance.openshift.io/scan-name: workers-scan - name: workers-scan-no-direct-root-logins - namespace: openshift-compliance - ownerReferences: - - apiVersion: compliance.openshift.io/v1alpha1 - blockOwnerDeletion: true - controller: true - kind: ComplianceScan - name: workers-scan -description: -instructions: -id: xccdf_org.ssgproject.content_rule_no_direct_root_logins -severity: medium <1> -status: FAIL <2> ----- - -<1> Describes the severity of the scan check. -<2> Describes the result of the check. The possible values are: -* PASS: check was successful. -* FAIL: check was unsuccessful. -* INFO: check was successful and found something not severe enough to be considered an error. -* MANUAL: check cannot automatically assess the status and manual check is required. -* INCONSISTENT: different nodes report different results. -* ERROR: check run successfully, but could not complete. -* NOTAPPLICABLE: check did not run as it is not applicable. - -To get all the check results from a suite, run the following command: -[source,terminal] ----- -oc get compliancecheckresults \ --l compliance.openshift.io/suite=workers-compliancesuite ----- diff --git a/modules/compliance-crd-compliance-remediation.adoc b/modules/compliance-crd-compliance-remediation.adoc deleted file mode 100644 index 9a8341e17f4d..000000000000 --- a/modules/compliance-crd-compliance-remediation.adoc +++ /dev/null @@ -1,72 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-crd.adoc - -:_content-type: CONCEPT -[id="compliance-remediation-object_{context}"] -= ComplianceRemediation object -For a specific check you can have a datastream specified fix. However, if a Kubernetes fix is available, then the Compliance Operator creates a `ComplianceRemediation` object. - -.Example `ComplianceRemediation` object -[source,yaml] ----- -apiVersion: compliance.openshift.io/v1alpha1 -kind: ComplianceRemediation -metadata: - labels: - compliance.openshift.io/suite: example-compliancesuite - compliance.openshift.io/scan-name: workers-scan - machineconfiguration.openshift.io/role: worker - name: workers-scan-disable-users-coredumps - namespace: openshift-compliance - ownerReferences: - - apiVersion: compliance.openshift.io/v1alpha1 - blockOwnerDeletion: true - controller: true - kind: ComplianceCheckResult - name: workers-scan-disable-users-coredumps - uid: -spec: - apply: false <1> - object: - current: <2> - apiVersion: machineconfiguration.openshift.io/v1 - kind: MachineConfig - spec: - config: - ignition: - version: 2.2.0 - storage: - files: - - contents: - source: data:,%2A%20%20%20%20%20hard%20%20%20core%20%20%20%200 - filesystem: root - mode: 420 - path: /etc/security/limits.d/75-disable_users_coredumps.conf - outdated: {} <3> ----- - -<1> `true` indicates the remediation was applied. `false` indicates the remediation was not applied. -<2> Includes the definition of the remediation. -<3> Indicates remediation that was previously parsed from an earlier version of the content. The Compliance Operator still retains the outdated objects to give the administrator a chance to review the new remediations before applying them. - -To get all the remediations from a suite, run the following command: -[source,terminal] ----- -oc get complianceremediations \ --l compliance.openshift.io/suite=workers-compliancesuite ----- - -To list all failing checks that can be remediated automatically, run the following command: -[source,terminal] ----- -oc get compliancecheckresults \ --l 'compliance.openshift.io/check-status in (FAIL),compliance.openshift.io/automated-remediation' ----- - -To list all failing checks that can be remediated manually, run the following command: -[source,terminal] ----- -oc get compliancecheckresults \ --l 'compliance.openshift.io/check-status in (FAIL),!compliance.openshift.io/automated-remediation' ----- diff --git a/modules/compliance-crd-compliance-suite.adoc b/modules/compliance-crd-compliance-suite.adoc deleted file mode 100644 index 372d5a2288f6..000000000000 --- a/modules/compliance-crd-compliance-suite.adoc +++ /dev/null @@ -1,55 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-crd.adoc - -:_content-type: CONCEPT -[id="compliance-suite-object_{context}"] -= ComplianceSuite object -The `ComplianceSuite` object helps you keep track of the state of the scans. It contains the raw settings to create scans and the overall result. - -For `Node` type scans, you should map the scan to the `MachineConfigPool`, since it contains the remediations for any issues. If you specify a label, ensure it directly applies to a pool. - -.Example `ComplianceSuite` object -[source,yaml] ----- -apiVersion: compliance.openshift.io/v1alpha1 -kind: ComplianceSuite -metadata: - name: -spec: - autoApplyRemediations: false <1> - schedule: "0 1 * * *" <2> - scans: <3> - - name: workers-scan - scanType: Node - profile: xccdf_org.ssgproject.content_profile_moderate - content: ssg-rhcos4-ds.xml - contentImage: quay.io/complianceascode/ocp4:latest - rule: "xccdf_org.ssgproject.content_rule_no_netrc_files" - nodeSelector: - node-role.kubernetes.io/worker: "" -status: - Phase: DONE <4> - Result: NON-COMPLIANT <5> - scanStatuses: - - name: workers-scan - phase: DONE - result: NON-COMPLIANT ----- -<1> Set to `true` to enable auto remediations. Set to `false` to disable auto remediations. -<2> Specify how often the scan should be run in cron format. -<3> Specify a list of scan specifications to run in the cluster. -<4> Indicates the progress of the scans. -<5> Indicates the overall verdict of the suite. - -The suite in the background creates the `ComplianceScan` object based on the `scans` parameter. -You can programmatically fetch the `ComplianceSuites` events. To get the events for the suite, run the following command: -[source,terminal] ----- -$ oc get events --field-selector involvedObject.kind=ComplianceSuite,involvedObject.name= ----- - -[IMPORTANT] -==== -You might create errors when you manually define the `ComplianceSuite`, since it contains the XCCDF attributes. -==== diff --git a/modules/compliance-crd-profile-bundle.adoc b/modules/compliance-crd-profile-bundle.adoc deleted file mode 100644 index 971f4ceb9beb..000000000000 --- a/modules/compliance-crd-profile-bundle.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-crd.adoc - -:_content-type: CONCEPT -[id="profile-bundle-object_{context}"] -= ProfileBundle object -When you install the Compliance Operator, it includes ready-to-run `ProfileBundle` objects. The Compliance Operator parses the `ProfileBundle` object and creates a `Profile` object for each profile in the bundle. It also parses `Rule` and `Variable` objects, which are used by the `Profile` object. - - -.Example `ProfileBundle` object -[source,yaml] ----- -apiVersion: compliance.openshift.io/v1alpha1 -kind: ProfileBundle - name: - namespace: openshift-compliance -status: - dataStreamStatus: VALID <1> ----- -<1> Indicates whether the Compliance Operator was able to parse the content files. - -[NOTE] -==== -When the `contentFile` fails, an `errorMessage` attribute appears, which provides details of the error that occurred. -==== - -.Troubleshooting - -When you roll back to a known content image from an invalid image, the `ProfileBundle` object stops responding and displays `PENDING` state. As a workaround, you can move to a different image than the previous one. Alternatively, you can delete and re-create the `ProfileBundle` object to return to the working state. diff --git a/modules/compliance-crd-profile.adoc b/modules/compliance-crd-profile.adoc deleted file mode 100644 index 9dbb9ba919e4..000000000000 --- a/modules/compliance-crd-profile.adoc +++ /dev/null @@ -1,52 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-crd.adoc - -:_content-type: CONCEPT -[id="profile-object_{context}"] -= Profile object - -The `Profile` object defines the rules and variables that can be evaluated for a certain compliance standard. It contains parsed out details about an OpenSCAP profile, such as its XCCDF identifier and profile checks for a `Node` or `Platform` type. You can either directly use the `Profile` object or further customize it using a `TailorProfile` object. - -[NOTE] -==== -You cannot create or modify the `Profile` object manually because it is derived from a single `ProfileBundle` object. Typically, a single `ProfileBundle` object can include several `Profile` objects. -==== - -.Example `Profile` object -[source,yaml] ----- -apiVersion: compliance.openshift.io/v1alpha1 -description: -id: xccdf_org.ssgproject.content_profile_moderate <1> -kind: Profile -metadata: - annotations: - compliance.openshift.io/product: - compliance.openshift.io/product-type: Node <2> - creationTimestamp: "YYYY-MM-DDTMM:HH:SSZ" - generation: 1 - labels: - compliance.openshift.io/profile-bundle: - name: rhcos4-moderate - namespace: openshift-compliance - ownerReferences: - - apiVersion: compliance.openshift.io/v1alpha1 - blockOwnerDeletion: true - controller: true - kind: ProfileBundle - name: - uid: - resourceVersion: "" - selfLink: /apis/compliance.openshift.io/v1alpha1/namespaces/openshift-compliance/profiles/rhcos4-moderate - uid: -rules: <3> -- rhcos4-account-disable-post-pw-expiration -- rhcos4-accounts-no-uid-except-zero -- rhcos4-audit-rules-dac-modification-chmod -- rhcos4-audit-rules-dac-modification-chown -title: ----- -<1> Specify the XCCDF name of the profile. Use this identifier when you define a `ComplianceScan` object as the value of the profile attribute of the scan. -<2> Specify either a `Node` or `Platform`. Node profiles scan the cluster nodes and platform profiles scan the Kubernetes platform. -<3> Specify the list of rules for the profile. Each rule corresponds to a single check. diff --git a/modules/compliance-crd-rule.adoc b/modules/compliance-crd-rule.adoc deleted file mode 100644 index 1ee2b9c4608b..000000000000 --- a/modules/compliance-crd-rule.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-crd.adoc - -:_content-type: CONCEPT -[id="rule-object_{context}"] -= Rule object -The `Rule` object, which forms the profiles, are also exposed as objects. Use the `Rule` object to define your compliance check requirements and specify how it could be fixed. - -.Example `Rule` object -[source,yaml] ----- - apiVersion: compliance.openshift.io/v1alpha1 - checkType: Platform <1> - description: <description of the rule> - id: xccdf_org.ssgproject.content_rule_configure_network_policies_namespaces <2> - instructions: <manual instructions for the scan> - kind: Rule - metadata: - annotations: - compliance.openshift.io/rule: configure-network-policies-namespaces - control.compliance.openshift.io/CIS-OCP: 5.3.2 - control.compliance.openshift.io/NERC-CIP: CIP-003-3 R4;CIP-003-3 R4.2;CIP-003-3 - R5;CIP-003-3 R6;CIP-004-3 R2.2.4;CIP-004-3 R3;CIP-007-3 R2;CIP-007-3 R2.1;CIP-007-3 - R2.2;CIP-007-3 R2.3;CIP-007-3 R5.1;CIP-007-3 R6.1 - control.compliance.openshift.io/NIST-800-53: AC-4;AC-4(21);CA-3(5);CM-6;CM-6(1);CM-7;CM-7(1);SC-7;SC-7(3);SC-7(5);SC-7(8);SC-7(12);SC-7(13);SC-7(18) - labels: - compliance.openshift.io/profile-bundle: ocp4 - name: ocp4-configure-network-policies-namespaces - namespace: openshift-compliance - rationale: <description of why this rule is checked> - severity: high <3> - title: <summary of the rule> ----- -<1> Specify the type of check this rule executes. `Node` profiles scan the cluster nodes and `Platform` profiles scan the Kubernetes platform. An empty value indicates there is no automated check. -<2> Specify the XCCDF name of the rule, which is parsed directly from the datastream. -<3> Specify the severity of the rule when it fails. - -[NOTE] -==== -The `Rule` object gets an appropriate label for an easy identification of the associated `ProfileBundle` object. The `ProfileBundle` also gets specified in the `OwnerReferences` of this object. -==== diff --git a/modules/compliance-crd-scan-setting-binding.adoc b/modules/compliance-crd-scan-setting-binding.adoc deleted file mode 100644 index 974d0fa58350..000000000000 --- a/modules/compliance-crd-scan-setting-binding.adoc +++ /dev/null @@ -1,45 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-crd.adoc - -:_content-type: CONCEPT -[id="scan-setting-binding-object_{context}"] -= ScanSettingBinding object - -Use the `ScanSettingBinding` object to specify your compliance requirements with reference to the `Profile` or `TailoredProfile` object. It is then linked to a `ScanSetting` object, which provides the operational constraints for the scan. Then the Compliance Operator generates the `ComplianceSuite` object based on the `ScanSetting` and `ScanSettingBinding` objects. - -.Example `ScanSettingBinding` object -[source,yaml] ----- -apiVersion: compliance.openshift.io/v1alpha1 -kind: ScanSettingBinding -metadata: - name: <name of the scan> -profiles: <1> - # Node checks - - name: rhcos4-with-usb - kind: TailoredProfile - apiGroup: compliance.openshift.io/v1alpha1 - # Cluster checks - - name: ocp4-moderate - kind: Profile - apiGroup: compliance.openshift.io/v1alpha1 -settingsRef: <2> - name: my-companys-constraints - kind: ScanSetting - apiGroup: compliance.openshift.io/v1alpha1 ----- - -<1> Specify the details of `Profile` or `TailoredProfile` object to scan your environment. -<2> Specify the operational constraints, such as schedule and storage size. - -The creation of `ScanSetting` and `ScanSettingBinding` objects results in the compliance suite. To get the list of compliance suite, run the following command: -[source,terminal] ----- -$ oc get compliancesuites ----- - -[IMPORTANT] -==== -If you delete `ScanSettingBinding`, then compliance suite also is deleted. -==== diff --git a/modules/compliance-crd-scan-setting.adoc b/modules/compliance-crd-scan-setting.adoc deleted file mode 100644 index db839f74974c..000000000000 --- a/modules/compliance-crd-scan-setting.adoc +++ /dev/null @@ -1,95 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-crd.adoc - -:_content-type: CONCEPT -[id="scan-setting-object_{context}"] -= ScanSetting object -Use the `ScanSetting` object to define and reuse the operational policies to run your scans. -By default, the Compliance Operator creates the following `ScanSetting` objects: - -* *default* - it runs a scan every day at 1 AM on both master and worker nodes using a 1Gi Persistent Volume (PV) and keeps the last three results. Remediation is neither applied nor updated automatically. -* *default-auto-apply* - it runs a scan every day at 1AM on both control plane and worker nodes using a 1Gi Persistent Volume (PV) and keeps the last three results. Both `autoApplyRemediations` and `autoUpdateRemediations` are set to true. - -.Example `ScanSetting` object -[source,yaml] ----- -Name: default-auto-apply -Namespace: openshift-compliance -Labels: <none> -Annotations: <none> -API Version: compliance.openshift.io/v1alpha1 -Auto Apply Remediations: true -Auto Update Remediations: true -Kind: ScanSetting -Metadata: - Creation Timestamp: 2022-10-18T20:21:00Z - Generation: 1 - Managed Fields: - API Version: compliance.openshift.io/v1alpha1 - Fields Type: FieldsV1 - fieldsV1: - f:autoApplyRemediations: <1> - f:autoUpdateRemediations: <2> - f:rawResultStorage: - .: - f:nodeSelector: - .: - f:node-role.kubernetes.io/master: - f:pvAccessModes: - f:rotation: - f:size: - f:tolerations: - f:roles: - f:scanTolerations: - f:schedule: - f:showNotApplicable: - f:strictNodeScan: - Manager: compliance-operator - Operation: Update - Time: 2022-10-18T20:21:00Z - Resource Version: 38840 - UID: 8cb0967d-05e0-4d7a-ac1c-08a7f7e89e84 -Raw Result Storage: - Node Selector: - node-role.kubernetes.io/master: - Pv Access Modes: - ReadWriteOnce - Rotation: 3 <3> - Size: 1Gi <4> - Tolerations: - Effect: NoSchedule - Key: node-role.kubernetes.io/master - Operator: Exists - Effect: NoExecute - Key: node.kubernetes.io/not-ready - Operator: Exists - Toleration Seconds: 300 - Effect: NoExecute - Key: node.kubernetes.io/unreachable - Operator: Exists - Toleration Seconds: 300 - Effect: NoSchedule - Key: node.kubernetes.io/memory-pressure - Operator: Exists -Roles: <6> - master - worker -Scan Tolerations: - Operator: Exists -Schedule: "0 1 * * *" <5> -Show Not Applicable: false -Strict Node Scan: true -Events: <none> ----- -<1> Set to `true` to enable auto remediations. Set to `false` to disable auto remediations. -<2> Set to `true` to enable auto remediations for content updates. Set to `false` to disable auto remediations for content updates. -<3> Specify the number of stored scans in the raw result format. The default value is `3`. As the older results get rotated, the administrator must store the results elsewhere before the rotation happens. -<4> Specify the storage size that should be created for the scan to store the raw results. The default value is `1Gi` -<5> Specify how often the scan should be run in cron format. -+ -[NOTE] -==== -To disable the rotation policy, set the value to `0`. -==== -<6> Specify the `node-role.kubernetes.io` label value to schedule the scan for `Node` type. This value has to match the name of a `MachineConfigPool`. diff --git a/modules/compliance-crd-tailored-profile.adoc b/modules/compliance-crd-tailored-profile.adoc deleted file mode 100644 index 96408f985628..000000000000 --- a/modules/compliance-crd-tailored-profile.adoc +++ /dev/null @@ -1,56 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-crd.adoc - -:_content-type: CONCEPT -[id="tailored-profile-object_{context}"] -= TailoredProfile object - -Use the `TailoredProfile` object to modify the default `Profile` object based on your organization requirements. You can enable or disable rules, set variable values, and provide justification for the customization. After validation, the `TailoredProfile` object creates a `ConfigMap`, which can be referenced by a `ComplianceScan` object. - -[TIP] -==== -You can use the `TailoredProfile` object by referencing it in a `ScanSettingBinding` object. For more information about `ScanSettingBinding`, see ScanSettingBinding object. -==== - -.Example `TailoredProfile` object -[source,yaml] ----- -apiVersion: compliance.openshift.io/v1alpha1 -kind: TailoredProfile -metadata: - name: rhcos4-with-usb -spec: - extends: rhcos4-moderate <1> - title: <title of the tailored profile> - disableRules: - - name: <name of a rule object to be disabled> - rationale: <description of why this rule is checked> -status: - id: xccdf_compliance.openshift.io_profile_rhcos4-with-usb <2> - outputRef: - name: rhcos4-with-usb-tp <3> - namespace: openshift-compliance - state: READY <4> ----- - -<1> This is optional. Name of the `Profile` object upon which the `TailoredProfile` is built. If no value is set, a new profile is created from the `enableRules` list. -<2> Specifies the XCCDF name of the tailored profile. -<3> Specifies the `ConfigMap` name, which can be used as the value of the `tailoringConfigMap.name` attribute of a `ComplianceScan`. -<4> Shows the state of the object such as `READY`, `PENDING`, and `FAILURE`. If the state of the object is `ERROR`, then the attribute `status.errorMessage` provides the reason for the failure. - -With the `TailoredProfile` object, it is possible to create a new `Profile` object using the `TailoredProfile` construct. To create a new `Profile`, set the following configuration parameters : - -* an appropriate title -* `extends` value must be empty -* scan type annotation on the `TailoredProfile` object: -+ -[source,yaml] ----- -compliance.openshift.io/product-type: Platform/Node ----- -+ -[NOTE] -==== -If you have not set the `product-type` annotation, the Compliance Operator defaults to `Platform` scan type. Adding the `-node` suffix to the name of the `TailoredProfile` object results in `node` scan type. -==== diff --git a/modules/compliance-crd-workflow.adoc b/modules/compliance-crd-workflow.adoc deleted file mode 100644 index a5f4cff79971..000000000000 --- a/modules/compliance-crd-workflow.adoc +++ /dev/null @@ -1,15 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-crd.adoc - -:_content-type: CONCEPT -[id="custom-resource-definitions-workflow_{context}"] -= CRDs workflow - -The CRD provides you the following workflow to complete the compliance scans: - -. Define your compliance scan requirements -. Configure the compliance scan settings -. Process compliance requirements with compliance scans settings -. Monitor the compliance scans -. Check the compliance scan results diff --git a/modules/compliance-custom-node-pools.adoc b/modules/compliance-custom-node-pools.adoc deleted file mode 100644 index e0c7bd03c0ca..000000000000 --- a/modules/compliance-custom-node-pools.adoc +++ /dev/null @@ -1,94 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-remediation.adoc - -:_content-type: PROCEDURE -[id="compliance-custom-node-pools_{context}"] -= Scanning custom node pools - -The Compliance Operator does not maintain a copy of each node pool configuration. The Compliance Operator aggregates consistent configuration options for all nodes within a single node pool into one copy of the configuration file. The Compliance Operator then uses the configuration file for a particular node pool to evaluate rules against nodes within that pool. - -If your cluster uses custom node pools outside the default `worker` and `master` node pools, you must supply additional variables to ensure the Compliance Operator aggregates a configuration file for that node pool. - -.Procedure - -. To check the configuration against all pools in an example cluster containing `master`, `worker`, and custom `example` node pools, set the value of the `ocp-var-role-master` and `opc-var-role-worker` fields to `example` in the `TailoredProfile` object: -+ -[source,yaml] ----- -apiVersion: compliance.openshift.io/v1alpha1 -kind: TailoredProfile -metadata: - name: cis-example-tp -spec: - extends: ocp4-cis - title: My modified NIST profile to scan example nodes - setValues: - - name: ocp4-var-role-master - value: example - rationale: test for example nodes - - name: ocp4-var-role-worker - value: example - rationale: test for example nodes - description: cis-example-scan ----- - -. Add the `example` role to the `ScanSetting` object that will be stored in the `ScanSettingBinding` CR: -+ -[source,yaml] ----- -apiVersion: compliance.openshift.io/v1alpha1 -kind: ScanSetting -metadata: - name: default - namespace: openshift-compliance -rawResultStorage: - rotation: 3 - size: 1Gi -roles: -- worker -- master -- example -scanTolerations: -- effect: NoSchedule - key: node-role.kubernetes.io/master - operator: Exists -schedule: '0 1 * * *' ----- - -. Create a scan that uses the `ScanSettingBinding` CR: -+ -[source,yaml] ----- -apiVersion: compliance.openshift.io/v1alpha1 -kind: ScanSettingBinding -metadata: - name: cis - namespace: openshift-compliance -profiles: -- apiGroup: compliance.openshift.io/v1alpha1 - kind: Profile - name: ocp4-cis -- apiGroup: compliance.openshift.io/v1alpha1 - kind: Profile - name: ocp4-cis-node -- apiGroup: compliance.openshift.io/v1alpha1 - kind: TailoredProfile - name: cis-example-tp -settingsRef: - apiGroup: compliance.openshift.io/v1alpha1 - kind: ScanSetting - name: default ----- - -The Compliance Operator checks the runtime `KubeletConfig` through the `Node/Proxy` API object and then uses variables such as `ocp-var-role-master` and `ocp-var-role-worker` to determine the nodes it performs the check against. In the `ComplianceCheckResult`, the `KubeletConfig` rules are shown as `ocp4-cis-kubelet-*`. The scan passes only if all selected nodes pass this check. - -.Verification - -* The Platform KubeletConfig rules are checked through the `Node/Proxy` object. You can find those rules by running the following command: -+ -[source,terminal] ----- -$ oc get rules -o json | jq '.items[] | select(.checkType == "Platform") | select(.metadata.name | contains("ocp4-kubelet-")) | .metadata.name' ----- - diff --git a/modules/compliance-custom-scc.adoc b/modules/compliance-custom-scc.adoc deleted file mode 100644 index fbb0ad118971..000000000000 --- a/modules/compliance-custom-scc.adoc +++ /dev/null @@ -1,88 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-advanced.adoc - -:_content-type: PROCEDURE -[id="compliance-custom-scc_{context}"] -= Creating a custom SCC for the Compliance Operator - -In some environments, you must create a custom Security Context Constraints (SCC) file to ensure the correct permissions are available to the Compliance Operator `api-resource-collector`. - -.Prerequisites - -* You must have `admin` privileges. - -.Procedure -. Define the SCC in a YAML file named `restricted-adjusted-compliance.yaml`: -+ -.`SecurityContextConstraints` object definition -[source,yaml] ----- - allowHostDirVolumePlugin: false - allowHostIPC: false - allowHostNetwork: false - allowHostPID: false - allowHostPorts: false - allowPrivilegeEscalation: true - allowPrivilegedContainer: false - allowedCapabilities: null - apiVersion: security.openshift.io/v1 - defaultAddCapabilities: null - fsGroup: - type: MustRunAs - kind: SecurityContextConstraints - metadata: - name: restricted-adjusted-compliance - priority: 30 <1> - readOnlyRootFilesystem: false - requiredDropCapabilities: - - KILL - - SETUID - - SETGID - - MKNOD - runAsUser: - type: MustRunAsRange - seLinuxContext: - type: MustRunAs - supplementalGroups: - type: RunAsAny - users: - - system:serviceaccount:openshift-compliance:api-resource-collector <2> - volumes: - - configMap - - downwardAPI - - emptyDir - - persistentVolumeClaim - - projected - - secret ----- -<1> The priority of this SCC must be higher than any other SCC that applies to the `system:authenticated` group. -<2> Service Account used by Compliance Operator Scanner pod. - -. Create the SCC: -+ -[source,terminal] ----- -$ oc create -n openshift-compliance -f restricted-adjusted-compliance.yaml ----- -+ -.Example output -[source,terminal] ----- -securitycontextconstraints.security.openshift.io/restricted-adjusted-compliance created ----- - -.Verification -. Verify the SCC was created: -+ -[source,terminal] ----- -$ oc get -n openshift-compliance scc restricted-adjusted-compliance ----- -+ -.Example output -[source,terminal] ----- -NAME PRIV CAPS SELINUX RUNASUSER FSGROUP SUPGROUP PRIORITY READONLYROOTFS VOLUMES -restricted-adjusted-compliance false <no value> MustRunAs MustRunAsRange MustRunAs RunAsAny 30 false ["configMap","downwardAPI","emptyDir","persistentVolumeClaim","projected","secret"] ----- diff --git a/modules/compliance-custom-storage.adoc b/modules/compliance-custom-storage.adoc deleted file mode 100644 index ccb7299dd930..000000000000 --- a/modules/compliance-custom-storage.adoc +++ /dev/null @@ -1,43 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-advanced.adoc - -[id="compliance-custom-storage_{context}"] -= Setting custom storage size for results -While the custom resources such as `ComplianceCheckResult` represent an aggregated result of one check across all scanned nodes, it can be useful to review the raw results as produced by the scanner. The raw results are produced in the ARF format and can be large (tens of megabytes per node), it is impractical to store them in a Kubernetes resource backed by the `etcd` key-value store. Instead, every scan creates a persistent volume (PV) which defaults to 1GB size. Depending on your environment, you may want to increase the PV size accordingly. This is done using the `rawResultStorage.size` attribute that is exposed in both the `ScanSetting` and `ComplianceScan` resources. - -A related parameter is `rawResultStorage.rotation` which controls how many scans are retained in the PV before the older scans are rotated. The default value is 3, setting the rotation policy to 0 disables the rotation. Given the default rotation policy and an estimate of 100MB per a raw ARF scan report, you can calculate the right PV size for your environment. - -[id="using-custom-result-storage-values_{context}"] -== Using custom result storage values -Because {product-title} can be deployed in a variety of public clouds or bare metal, the Compliance Operator cannot determine available storage configurations. By default, the Compliance Operator will try to create the PV for storing results using the default storage class of the cluster, but a custom storage class can be configured using the `rawResultStorage.StorageClassName` attribute. - -[IMPORTANT] -==== -If your cluster does not specify a default storage class, this attribute must be set. -==== - -Configure the `ScanSetting` custom resource to use a standard storage class and create persistent volumes that are 10GB in size and keep the last 10 results: - -.Example `ScanSetting` CR - -[source,yaml] ----- -apiVersion: compliance.openshift.io/v1alpha1 -kind: ScanSetting -metadata: - name: default - namespace: openshift-compliance -rawResultStorage: - storageClassName: standard - rotation: 10 - size: 10Gi -roles: -- worker -- master -scanTolerations: -- effect: NoSchedule - key: node-role.kubernetes.io/master - operator: Exists -schedule: '0 1 * * *' ----- diff --git a/modules/compliance-evaluate-kubeletconfig-rules.adoc b/modules/compliance-evaluate-kubeletconfig-rules.adoc deleted file mode 100644 index 181e9a42a303..000000000000 --- a/modules/compliance-evaluate-kubeletconfig-rules.adoc +++ /dev/null @@ -1,13 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-remediation.adoc - -:_content-type: CONCEPT -[id="compliance-evaluate-kubeletconfig-rules_{context}"] -= Evaluating KubeletConfig rules against default configuration values - -{product-title} infrastructure might contain incomplete configuration files at run time, and nodes assume default configuration values for missing configuration options. Some configuration options can be passed as command line arguments. As a result, the Compliance Operator cannot verify if the configuration file on the node is complete because it might be missing options used in the rule checks. - -To prevent false negative results where the default configuration value passes a check, the Compliance Operator uses the Node/Proxy API to fetch the configuration for each node in a node pool, then all configuration options that are consistent across nodes in the node pool are stored in a file that represents the configuration for all nodes within that node pool. This increases the accuracy of the scan results. - -No additional configuration changes are required to use this feature with default `master` and `worker` node pools configurations. \ No newline at end of file diff --git a/modules/compliance-filtering-results.adoc b/modules/compliance-filtering-results.adoc deleted file mode 100644 index 6f3356d5fd45..000000000000 --- a/modules/compliance-filtering-results.adoc +++ /dev/null @@ -1,96 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-remediation.adoc - -:_content-type: PROCEDURE -[id="filtering-compliance-check-results_{context}"] -= Filters for compliance check results - -By default, the `ComplianceCheckResult` objects are labeled with several useful labels that allow you to query the checks and decide on the next steps after the results are generated. - -List checks that belong to a specific suite: - -[source,terminal] ----- -$ oc get -n openshift-compliance compliancecheckresults \ - -l compliance.openshift.io/suite=workers-compliancesuite ----- - -List checks that belong to a specific scan: - -[source,terminal] ----- -$ oc get -n openshift-compliance compliancecheckresults \ --l compliance.openshift.io/scan=workers-scan ----- - -Not all `ComplianceCheckResult` objects create `ComplianceRemediation` objects. Only `ComplianceCheckResult` objects that can be remediated automatically do. A `ComplianceCheckResult` object has a related remediation if it is labeled with the `compliance.openshift.io/automated-remediation` label. The name of the remediation is the same as the name of the check. - -List all failing checks that can be remediated automatically: - -[source,terminal] ----- -$ oc get -n openshift-compliance compliancecheckresults \ --l 'compliance.openshift.io/check-status=FAIL,compliance.openshift.io/automated-remediation' ----- - - -List all failing checks sorted by severity: - -[source,terminal] ----- -$ oc get compliancecheckresults -n openshift-compliance \ --l 'compliance.openshift.io/check-status=FAIL,compliance.openshift.io/check-severity=high' ----- - -.Example output -[source,terminal] ----- -NAME STATUS SEVERITY -nist-moderate-modified-master-configure-crypto-policy FAIL high -nist-moderate-modified-master-coreos-pti-kernel-argument FAIL high -nist-moderate-modified-master-disable-ctrlaltdel-burstaction FAIL high -nist-moderate-modified-master-disable-ctrlaltdel-reboot FAIL high -nist-moderate-modified-master-enable-fips-mode FAIL high -nist-moderate-modified-master-no-empty-passwords FAIL high -nist-moderate-modified-master-selinux-state FAIL high -nist-moderate-modified-worker-configure-crypto-policy FAIL high -nist-moderate-modified-worker-coreos-pti-kernel-argument FAIL high -nist-moderate-modified-worker-disable-ctrlaltdel-burstaction FAIL high -nist-moderate-modified-worker-disable-ctrlaltdel-reboot FAIL high -nist-moderate-modified-worker-enable-fips-mode FAIL high -nist-moderate-modified-worker-no-empty-passwords FAIL high -nist-moderate-modified-worker-selinux-state FAIL high -ocp4-moderate-configure-network-policies-namespaces FAIL high -ocp4-moderate-fips-mode-enabled-on-all-nodes FAIL high ----- - -List all failing checks that must be remediated manually: - -[source,terminal] ----- -$ oc get -n openshift-compliance compliancecheckresults \ --l 'compliance.openshift.io/check-status=FAIL,!compliance.openshift.io/automated-remediation' ----- - -The manual remediation steps are typically stored in the `description` attribute in the `ComplianceCheckResult` object. - -.ComplianceCheckResult Status -[cols="1,1",options="header"] -|=== -| ComplianceCheckResult Status | Description -| PASS -| Compliance check ran to completion and passed. -| FAIL -| Compliance check ran to completion and failed. -| INFO -| Compliance check ran to completion and found something not severe enough to be considered an error. -| MANUAL -| Compliance check does not have a way to automatically assess the success or failure and must be checked manually. -| INCONSISTENT -| Compliance check reports different results from different sources, typically cluster nodes. -| ERROR -| Compliance check ran, but could not complete properly. -| NOT-APPLICABLE -| Compliance check did not run because it is not applicable or not selected. -|=== diff --git a/modules/compliance-imagestreams.adoc b/modules/compliance-imagestreams.adoc deleted file mode 100644 index 4d7c883701af..000000000000 --- a/modules/compliance-imagestreams.adoc +++ /dev/null @@ -1,70 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-manage.adoc - -:_content-type: PROCEDURE -[id="compliance-imagestreams_{context}"] -= Using image streams - -The `contentImage` reference points to a valid `ImageStreamTag`, and the Compliance Operator ensures that the content stays up to date automatically. - -[NOTE] -==== -`ProfileBundle` objects also accept `ImageStream` references. -==== - -.Example image stream -[source,terminal] ----- -$ oc get is -n openshift-compliance ----- - -.Example output -[source,terminal] ----- -NAME IMAGE REPOSITORY TAGS UPDATED -openscap-ocp4-ds image-registry.openshift-image-registry.svc:5000/openshift-compliance/openscap-ocp4-ds latest 32 seconds ago ----- - -.Procedure -. Ensure that the lookup policy is set to local: -+ -[source,terminal] ----- -$ oc patch is openscap-ocp4-ds \ - -p '{"spec":{"lookupPolicy":{"local":true}}}' \ - --type=merge - imagestream.image.openshift.io/openscap-ocp4-ds patched - -n openshift-compliance ----- - -. Use the name of the `ImageStreamTag` for the `ProfileBundle` by retrieving the `istag` name: -+ -[source,terminal] ----- -$ oc get istag -n openshift-compliance ----- -+ -.Example output -[source,terminal] ----- -NAME IMAGE REFERENCE UPDATED -openscap-ocp4-ds:latest image-registry.openshift-image-registry.svc:5000/openshift-compliance/openscap-ocp4-ds@sha256:46d7ca9b7055fe56ade818ec3e62882cfcc2d27b9bf0d1cbae9f4b6df2710c96 3 minutes ago ----- - -. Create the `ProfileBundle`: -+ -[source,terminal] ----- -$ cat << EOF | oc create -f - -apiVersion: compliance.openshift.io/v1alpha1 -kind: ProfileBundle -metadata: - name: mybundle - spec: - contentImage: openscap-ocp4-ds:latest - contentFile: ssg-rhcos4-ds.xml -EOF ----- - -This `ProfileBundle` will track the image and any changes that are applied to it, such as updating the tag to point to a different hash, will immediately be reflected in the `ProfileBundle`. diff --git a/modules/compliance-inconsistent.adoc b/modules/compliance-inconsistent.adoc deleted file mode 100644 index d0d734877259..000000000000 --- a/modules/compliance-inconsistent.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-remediation.adoc - -:_content-type: PROCEDURE -[id="compliance-inconsistent_{context}"] -= Inconsistent ComplianceScan -The `ScanSetting` object lists the node roles that the compliance scans generated from the `ScanSetting` or `ScanSettingBinding` objects would scan. Each node role usually maps to a machine config pool. - -[IMPORTANT] -==== -It is expected that all machines in a machine config pool are identical and all scan results from the nodes in a pool should be identical. -==== - -If some of the results are different from others, the Compliance Operator flags a `ComplianceCheckResult` object where some of the nodes will report as `INCONSISTENT`. All `ComplianceCheckResult` objects are also labeled with `compliance.openshift.io/inconsistent-check`. - -Because the number of machines in a pool might be quite large, the Compliance Operator attempts to find the most common state and list the nodes that differ from the common state. The most common state is stored in the `compliance.openshift.io/most-common-status` annotation and the annotation `compliance.openshift.io/inconsistent-source` contains pairs of `hostname:status` of check statuses that differ from the most common status. If no common state can be found, all the `hostname:status` pairs are listed in the `compliance.openshift.io/inconsistent-source annotation`. - -If possible, a remediation is still created so that the cluster can converge to a compliant status. However, this might not always be possible and correcting the difference between nodes must be done manually. The compliance scan must be re-run to get a consistent result by annotating the scan with the `compliance.openshift.io/rescan=` option: - -[source,terminal] ----- -$ oc -n openshift-compliance \ -annotate compliancescans/rhcos4-e8-worker compliance.openshift.io/rescan= ----- diff --git a/modules/compliance-increasing-operator-limits.adoc b/modules/compliance-increasing-operator-limits.adoc deleted file mode 100644 index 77c8f5bd17b8..000000000000 --- a/modules/compliance-increasing-operator-limits.adoc +++ /dev/null @@ -1,31 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-troubleshooting.adoc - -:_content-type: PROCEDURE -[id="compliance-increasing-operator-limits_{context}"] -= Increasing Compliance Operator resource limits - -In some cases, the Compliance Operator might require more memory than the default limits allow. The best way to mitigate this issue is to set custom resource limits. - -To increase the default memory and CPU limits of scanner pods, see _`ScanSetting` Custom resource_. - -.Procedure - -. To increase the Operator's memory limits to 500 Mi, create the following patch file named `co-memlimit-patch.yaml`: -+ -[source,yaml] ----- -spec: - config: - resources: - limits: - memory: 500Mi ----- - -. Apply the patch file: -+ -[source,terminal] ----- -$ oc patch sub compliance-operator -nopenshift-compliance --patch-file co-memlimit-patch.yaml --type=merge ----- \ No newline at end of file diff --git a/modules/compliance-kubeletconfig-sub-pool-remediation.adoc b/modules/compliance-kubeletconfig-sub-pool-remediation.adoc deleted file mode 100644 index 471299ca99c7..000000000000 --- a/modules/compliance-kubeletconfig-sub-pool-remediation.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-remediation.adoc - -:_content-type: PROCEDURE -[id="compliance-kubeletconfig-sub-pool-remediation_{context}"] -= Remediating `KubeletConfig` sub pools - -`KubeletConfig` remediation labels can be applied to `MachineConfigPool` sub-pools. - -.Procedure - -* Add a label to the sub-pool `MachineConfigPool` CR: -+ -[source,terminal] ----- -$ oc label mcp <sub-pool-name> pools.operator.machineconfiguration.openshift.io/<sub-pool-name>= ----- \ No newline at end of file diff --git a/modules/compliance-manual.adoc b/modules/compliance-manual.adoc deleted file mode 100644 index 52ef596ecb58..000000000000 --- a/modules/compliance-manual.adoc +++ /dev/null @@ -1,52 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-remediation.adoc - -:_content-type: PROCEDURE -[id="compliance-manual_{context}"] -= Remediating a platform check manually - -Checks for Platform scans typically have to be remediated manually by the administrator for two reasons: - -* It is not always possible to automatically determine the value that must be set. One of the checks requires that a list of allowed registries is provided, but the scanner has no way of knowing which registries the organization wants to allow. - -* Different checks modify different API objects, requiring automated remediation to possess `root` or superuser access to modify objects in the cluster, which is not advised. - -.Procedure -. The example below uses the `ocp4-ocp-allowed-registries-for-import` rule, which would fail on a default {product-title} installation. Inspect the rule `oc get rule.compliance/ocp4-ocp-allowed-registries-for-import -oyaml`, the rule is to limit the registries the users are allowed to import images from by setting the `allowedRegistriesForImport` attribute, The _warning_ attribute of the rule also shows the API object checked, so it can be modified and remediate the issue: -+ -[source,terminal] ----- -$ oc edit image.config.openshift.io/cluster ----- -+ -.Example output -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: Image -metadata: - annotations: - release.openshift.io/create-only: "true" - creationTimestamp: "2020-09-10T10:12:54Z" - generation: 2 - name: cluster - resourceVersion: "363096" - selfLink: /apis/config.openshift.io/v1/images/cluster - uid: 2dcb614e-2f8a-4a23-ba9a-8e33cd0ff77e -spec: - allowedRegistriesForImport: - - domainName: registry.redhat.io -status: - externalRegistryHostnames: - - default-route-openshift-image-registry.apps.user-cluster-09-10-12-07.devcluster.openshift.com - internalRegistryHostname: image-registry.openshift-image-registry.svc:5000 ----- - -. Re-run the scan: -+ -[source,terminal] ----- -$ oc -n openshift-compliance \ -annotate compliancescans/rhcos4-e8-worker compliance.openshift.io/rescan= ----- diff --git a/modules/compliance-new-tailored-profiles.adoc b/modules/compliance-new-tailored-profiles.adoc deleted file mode 100644 index 9c8468d3361c..000000000000 --- a/modules/compliance-new-tailored-profiles.adoc +++ /dev/null @@ -1,39 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-tailor.adoc - -:_content-type: PROCEDURE -[id="compliance-new-tailored-profiles_{context}"] -= Creating a new tailored profile - -You can write a tailored profile from scratch using the `TailoredProfile` object. Set an appropriate `title` and `description` and leave the `extends` field empty. Indicate to the Compliance Operator what type of scan will this custom profile generate: - -* Node scan: Scans the Operating System. -* Platform scan: Scans the OpenShift configuration. - -.Procedure - -Set the following annotation on the `TailoredProfile` object: -+ -.Example `new-profile.yaml` -[source,yaml] ----- -apiVersion: compliance.openshift.io/v1alpha1 -kind: TailoredProfile -metadata: - name: new-profile - annotations: - compliance.openshift.io/product-type: Node <1> -spec: - extends: - description: My custom profile <2> - title: Custom profile <3> ----- -<1> Set `Node` or `Platform` accordingly. -<2> Use the `description` field to describe the function of the new `TailoredProfile` object. -<3> Give your `TailoredProfile` object a title with the `title` field. -+ -[NOTE] -==== -Adding the `-node` suffix to the `name` field of the `TailoredProfile` object is similar to adding the `Node` product type annotation and generates an Operating System scan. -==== \ No newline at end of file diff --git a/modules/compliance-objects.adoc b/modules/compliance-objects.adoc deleted file mode 100644 index 0db4e4b9a693..000000000000 --- a/modules/compliance-objects.adoc +++ /dev/null @@ -1,40 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-advanced.adoc - -[id="compliance-objects_{context}"] -= Using the ComplianceSuite and ComplianceScan objects directly - -While it is recommended that users take advantage of the `ScanSetting` and `ScanSettingBinding` objects to define the suites and scans, there are valid use cases to define the `ComplianceSuite` objects directly: - -* Specifying only a single rule to scan. This can be useful for debugging together with the `debug: true` attribute which increases the OpenSCAP scanner verbosity, as the debug mode tends to get quite verbose otherwise. Limiting the test to one rule helps to lower the amount of debug information. - -* Providing a custom nodeSelector. In order for a remediation to be applicable, the nodeSelector must match a pool. - -* Pointing the Scan to a bespoke config map with a tailoring file. - -* For testing or development when the overhead of parsing profiles from bundles is not required. - -The following example shows a `ComplianceSuite` that scans the worker machines with only a single rule: - -[source,yaml] ----- -apiVersion: compliance.openshift.io/v1alpha1 -kind: ComplianceSuite -metadata: - name: workers-compliancesuite -spec: - scans: - - name: workers-scan - profile: xccdf_org.ssgproject.content_profile_moderate - content: ssg-rhcos4-ds.xml - contentImage: quay.io/complianceascode/ocp4:latest - debug: true - rule: xccdf_org.ssgproject.content_rule_no_direct_root_logins - nodeSelector: - node-role.kubernetes.io/worker: "" ----- - -The `ComplianceSuite` object and the `ComplianceScan` objects referred to above specify several attributes in a format that OpenSCAP expects. - -To find out the profile, content, or rule values, you can start by creating a similar Suite from `ScanSetting` and `ScanSettingBinding` or inspect the objects parsed from the `ProfileBundle` objects like rules or profiles. Those objects contain the `xccdf_org` identifiers you can use to refer to them from a `ComplianceSuite`. diff --git a/modules/compliance-operator-cli-installation.adoc b/modules/compliance-operator-cli-installation.adoc deleted file mode 100644 index f9cb58f3f487..000000000000 --- a/modules/compliance-operator-cli-installation.adoc +++ /dev/null @@ -1,102 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-installation.adoc - -:_content-type: PROCEDURE -[id="installing-compliance-operator-cli_{context}"] -= Installing the Compliance Operator using the CLI - -.Prerequisites - -* You must have `admin` privileges. - -.Procedure - -. Define a `Namespace` object: -+ -.Example `namespace-object.yaml` -[source,yaml] ----- -apiVersion: v1 -kind: Namespace -metadata: - labels: - openshift.io/cluster-monitoring: "true" - pod-security.kubernetes.io/enforce: privileged <1> - name: openshift-compliance ----- -<1> In {product-title} {product-version}, the pod security label must be set to `privileged` at the namespace level. - -. Create the `Namespace` object: -+ -[source,terminal] ----- -$ oc create -f namespace-object.yaml ----- - -. Define an `OperatorGroup` object: -+ -.Example `operator-group-object.yaml` -[source,yaml] ----- -apiVersion: operators.coreos.com/v1 -kind: OperatorGroup -metadata: - name: compliance-operator - namespace: openshift-compliance -spec: - targetNamespaces: - - openshift-compliance ----- - -. Create the `OperatorGroup` object: -+ -[source,terminal] ----- -$ oc create -f operator-group-object.yaml ----- - -. Define a `Subscription` object: -+ -.Example `subscription-object.yaml` -[source,yaml] ----- -apiVersion: operators.coreos.com/v1alpha1 -kind: Subscription -metadata: - name: compliance-operator-sub - namespace: openshift-compliance -spec: - channel: "stable" - installPlanApproval: Automatic - name: compliance-operator - source: redhat-operators - sourceNamespace: openshift-marketplace ----- -. Create the `Subscription` object: -+ -[source,terminal] ----- -$ oc create -f subscription-object.yaml ----- - -[NOTE] -==== -If you are setting the global scheduler feature and enable `defaultNodeSelector`, you must create the namespace manually and update the annotations of the `openshift-compliance` namespace, or the namespace where the Compliance Operator was installed, with `openshift.io/node-selector: “”`. This removes the default node selector and prevents deployment failures. -==== - -.Verification - -. Verify the installation succeeded by inspecting the CSV file: -+ -[source,terminal] ----- -$ oc get csv -n openshift-compliance ----- - -. Verify that the Compliance Operator is up and running: -+ -[source,terminal] ----- -$ oc get deploy -n openshift-compliance ----- diff --git a/modules/compliance-operator-cli-uninstall.adoc b/modules/compliance-operator-cli-uninstall.adoc deleted file mode 100644 index 47625212d37c..000000000000 --- a/modules/compliance-operator-cli-uninstall.adoc +++ /dev/null @@ -1,117 +0,0 @@ -// Module included in the following assemblies: -// -// security/compliance_operator/compliance-operator-uninstallation.adoc - -:_content-type: PROCEDURE -[id="compliance-operator-uninstall-cli_{context}"] -= Uninstalling the OpenShift Compliance Operator from {product-title} using the CLI - -To remove the Compliance Operator, you must first delete the objects in the namespace. After the objects are removed, you can remove the Operator and its namespace by deleting the *openshift-compliance* project. - -.Prerequisites - -* Access to an {product-title} cluster using an account with `cluster-admin` permissions. -* The OpenShift Compliance Operator must be installed. - -.Procedure - -. Delete all objects in the namespace. - -.. Delete the `ScanSettingBinding` objects: -+ -[source,terminal] ----- -$ oc delete ssb <ScanSettingBinding-name> -n openshift-compliance ----- - -.. Delete the `ScanSetting` objects: -+ -[source,terminal] ----- -$ oc delete ss <ScanSetting-name> -n openshift-compliance ----- - -.. Delete the `ComplianceSuite` objects: -+ -[source,terminal] ----- -$ oc delete suite <compliancesuite-name> -n openshift-compliance ----- - -.. Delete the `ComplianceScan` objects: -+ -[source,terminal] ----- -$ oc delete scan <compliancescan-name> -n openshift-compliance ----- - -.. Obtain the `ProfileBundle` objects: -+ -[source,terminal] ----- -$ oc get profilebundle.compliance -n openshift-compliance ----- -+ -.Example output -[source,terminal] ----- -NAME CONTENTIMAGE CONTENTFILE STATUS -ocp4 registry.redhat.io/compliance/openshift-compliance-content-rhel8@sha256:<hash> ssg-ocp4-ds.xml VALID -rhcos4 registry.redhat.io/compliance/openshift-compliance-content-rhel8@sha256:<hash> ssg-rhcos4-ds.xml VALID ----- - -.. Delete the `ProfileBundle` objects: -+ -[source,terminal] ----- -$ oc delete profilebundle.compliance ocp4 rhcos4 -n openshift-compliance ----- -+ -.Example output -[source,terminal] ----- -profilebundle.compliance.openshift.io "ocp4" deleted -profilebundle.compliance.openshift.io "rhcos4" deleted ----- - -. Delete the Subscription object: -+ -[source,terminal] ----- -$ oc delete sub <Subscription-Name> -n openshift-compliance ----- - -. Delete the CSV object: -+ -[source,terminal] ----- -$ oc delete CSV -n openshift-compliance ----- - -. Delete the project: -+ -[source,terminal] ----- -$ oc delete project -n openshift-compliance ----- -+ -.Example output -[source,terminal] ----- -project.project.openshift.io "openshift-compliance" deleted ----- - -.Verification - -. Confirm the namespace is deleted: -+ -[source,terminal] ----- -$ oc get project/openshift-compliance ----- -+ -.Example output -[source,terminal] ----- -Error from server (NotFound): namespaces "openshift-compliance" not found ----- \ No newline at end of file diff --git a/modules/compliance-operator-console-installation.adoc b/modules/compliance-operator-console-installation.adoc deleted file mode 100644 index c0dd1e7ac2c4..000000000000 --- a/modules/compliance-operator-console-installation.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-installation.adoc - -:_content-type: PROCEDURE -[id="installing-compliance-operator-web-console_{context}"] -= Installing the Compliance Operator through the web console - -.Prerequisites - -* You must have `admin` privileges. - -.Procedure - -. In the {product-title} web console, navigate to *Operators* -> *OperatorHub*. -. Search for the Compliance Operator, then click *Install*. -. Keep the default selection of *Installation mode* and *namespace* to ensure that the Operator will be installed to the `openshift-compliance` namespace. -. Click *Install*. - -.Verification - -To confirm that the installation is successful: - -. Navigate to the *Operators* -> *Installed Operators* page. -. Check that the Compliance Operator is installed in the `openshift-compliance` namespace and its status is `Succeeded`. - -If the Operator is not installed successfully: - -. Navigate to the *Operators* -> *Installed Operators* page and inspect the `Status` column for any errors or failures. -. Navigate to the *Workloads* -> *Pods* page and check the logs in any pods in the `openshift-compliance` project that are reporting issues. diff --git a/modules/compliance-operator-uninstall.adoc b/modules/compliance-operator-uninstall.adoc deleted file mode 100644 index d45269360b85..000000000000 --- a/modules/compliance-operator-uninstall.adoc +++ /dev/null @@ -1,36 +0,0 @@ -// Module included in the following assemblies: -// -// security/compliance_operator/compliance-operator-uninstallation.adoc - -:_content-type: PROCEDURE -[id="compliance-operator-uninstall_{context}"] -= Uninstalling the OpenShift Compliance Operator from {product-title} using the web console - -To remove the Compliance Operator, you must first delete the objects in the namespace. After the objects are removed, you can remove the Operator and its namespace by deleting the *openshift-compliance* project. - -.Prerequisites - -* Access to an {product-title} cluster using an account with `cluster-admin` permissions. -* The OpenShift Compliance Operator must be installed. - -.Procedure - -To remove the Compliance Operator by using the {product-title} web console: - -. Go to the *Operators* -> *Installed Operators* -> *Compliance Operator* page. - -.. Click *All instances*. - -.. In *All namespaces*, click the Options menu {kebab} and delete all ScanSettingBinding, ComplainceSuite, ComplianceScan, and ProfileBundle objects. - -. Switch to the *Administration* -> *Operators* -> *Installed Operators* page. - -. Click the Options menu {kebab} on the *Compliance Operator* entry and select *Uninstall Operator*. - -. Switch to the *Home* -> *Projects* page. - -. Search for 'compliance'. - -. Click the Options menu {kebab} next to the *openshift-compliance* project, and select *Delete Project*. - -.. Confirm the deletion by typing `openshift-compliance` in the dialog box, and click *Delete*. \ No newline at end of file diff --git a/modules/compliance-priorityclass.adoc b/modules/compliance-priorityclass.adoc deleted file mode 100644 index fc62847beb78..000000000000 --- a/modules/compliance-priorityclass.adoc +++ /dev/null @@ -1,54 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-advanced.adoc - -:_content-type: PROCEDURE -[id="compliance-priorityclass_{context}"] -= Setting `PriorityClass` for `ScanSetting` scans - -In large scale environments, the default `PriorityClass` object can be too low to guarantee Pods execute scans on time. For clusters that must maintain compliance or guarantee automated scanning, it is recommended to set the `PriorityClass` variable to ensure the Compliance Operator is always given priority in resource constrained situations. - -.Procedure - -* Set the `PriorityClass` variable: -+ -[source,yaml] ----- -apiVersion: compliance.openshift.io/v1alpha1 -strictNodeScan: true -metadata: - name: default - namespace: openshift-compliance -priorityClass: compliance-high-priority <1> -kind: ScanSetting -showNotApplicable: false -rawResultStorage: - nodeSelector: - node-role.kubernetes.io/master: '' - pvAccessModes: - - ReadWriteOnce - rotation: 3 - size: 1Gi - tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/master - operator: Exists - - effect: NoExecute - key: node.kubernetes.io/not-ready - operator: Exists - tolerationSeconds: 300 - - effect: NoExecute - key: node.kubernetes.io/unreachable - operator: Exists - tolerationSeconds: 300 - - effect: NoSchedule - key: node.kubernetes.io/memory-pressure - operator: Exists -schedule: 0 1 * * * -roles: - - master - - worker -scanTolerations: - - operator: Exists ----- -<1> If the `PriorityClass` referenced in the `ScanSetting` cannot be found, the Operator will leave the `PriorityClass` empty, issue a warning, and continue scheduling scans without a `PriorityClass`. \ No newline at end of file diff --git a/modules/compliance-profilebundle.adoc b/modules/compliance-profilebundle.adoc deleted file mode 100644 index efad4006acb9..000000000000 --- a/modules/compliance-profilebundle.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-manage.adoc - -:_content-type: CONCEPT -[id="compliance-profilebundle_{context}"] -= ProfileBundle CR example - -The `ProfileBundle` object requires two pieces of information: the URL of a container image that contains the `contentImage` and the file that contains the compliance content. The `contentFile` parameter is relative to the root of the file system. You can define the built-in `rhcos4` `ProfileBundle` object as shown in the following example: - -[source,yaml] ----- -apiVersion: compliance.openshift.io/v1alpha1 -kind: ProfileBundle -metadata: - creationTimestamp: "2022-10-19T12:06:30Z" - finalizers: - - profilebundle.finalizers.compliance.openshift.io - generation: 1 - name: rhcos4 - namespace: openshift-compliance - resourceVersion: "46741" - uid: 22350850-af4a-4f5c-9a42-5e7b68b82d7d -spec: - contentFile: ssg-rhcos4-ds.xml <1> - contentImage: registry.redhat.io/compliance/openshift-compliance-content-rhel8@sha256:900e... <2> -status: - conditions: - - lastTransitionTime: "2022-10-19T12:07:51Z" - message: Profile bundle successfully parsed - reason: Valid - status: "True" - type: Ready - dataStreamStatus: VALID ----- -<1> Location of the file containing the compliance content. -<2> Content image location. -+ -[IMPORTANT] -==== -The base image used for the content images must include `coreutils`. -==== diff --git a/modules/compliance-profiles.adoc b/modules/compliance-profiles.adoc deleted file mode 100644 index dbcbf8c3960c..000000000000 --- a/modules/compliance-profiles.adoc +++ /dev/null @@ -1,208 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-understanding.adoc - -:_content-type: CONCEPT -[id="compliance_profiles_{context}"] -= Compliance Operator profiles - -There are several profiles available as part of the Compliance Operator installation. You can use the `oc get` command to view available profiles, profile details, and specific rules. - -* View the available profiles: -+ -[source,terminal] ----- -$ oc get -n openshift-compliance profiles.compliance ----- -+ -.Example output -[source,terminal] ----- -NAME AGE -ocp4-cis 94m -ocp4-cis-node 94m -ocp4-e8 94m -ocp4-high 94m -ocp4-high-node 94m -ocp4-moderate 94m -ocp4-moderate-node 94m -ocp4-nerc-cip 94m -ocp4-nerc-cip-node 94m -ocp4-pci-dss 94m -ocp4-pci-dss-node 94m -rhcos4-e8 94m -rhcos4-high 94m -rhcos4-moderate 94m -rhcos4-nerc-cip 94m ----- -+ -These profiles represent different compliance benchmarks. Each profile has the product name that it applies to added as a prefix to the profile’s name. `ocp4-e8` applies the Essential 8 benchmark to the {product-title} product, while `rhcos4-e8` applies the Essential 8 benchmark to the {op-system-first} product. - -* Run the following command to view the details of the `rhcos4-e8` profile: -+ -[source,terminal] ----- -$ oc get -n openshift-compliance -oyaml profiles.compliance rhcos4-e8 ----- -+ -.Example output -[%collapsible] -==== -[source,yaml] ----- -apiVersion: compliance.openshift.io/v1alpha1 -description: 'This profile contains configuration checks for Red Hat Enterprise Linux - CoreOS that align to the Australian Cyber Security Centre (ACSC) Essential Eight. - A copy of the Essential Eight in Linux Environments guide can be found at the ACSC - website: https://www.cyber.gov.au/acsc/view-all-content/publications/hardening-linux-workstations-and-servers' -id: xccdf_org.ssgproject.content_profile_e8 -kind: Profile -metadata: - annotations: - compliance.openshift.io/image-digest: pb-rhcos4hrdkm - compliance.openshift.io/product: redhat_enterprise_linux_coreos_4 - compliance.openshift.io/product-type: Node - creationTimestamp: "2022-10-19T12:06:49Z" - generation: 1 - labels: - compliance.openshift.io/profile-bundle: rhcos4 - name: rhcos4-e8 - namespace: openshift-compliance - ownerReferences: - - apiVersion: compliance.openshift.io/v1alpha1 - blockOwnerDeletion: true - controller: true - kind: ProfileBundle - name: rhcos4 - uid: 22350850-af4a-4f5c-9a42-5e7b68b82d7d - resourceVersion: "43699" - uid: 86353f70-28f7-40b4-bf0e-6289ec33675b -rules: -- rhcos4-accounts-no-uid-except-zero -- rhcos4-audit-rules-dac-modification-chmod -- rhcos4-audit-rules-dac-modification-chown -- rhcos4-audit-rules-execution-chcon -- rhcos4-audit-rules-execution-restorecon -- rhcos4-audit-rules-execution-semanage -- rhcos4-audit-rules-execution-setfiles -- rhcos4-audit-rules-execution-setsebool -- rhcos4-audit-rules-execution-seunshare -- rhcos4-audit-rules-kernel-module-loading-delete -- rhcos4-audit-rules-kernel-module-loading-finit -- rhcos4-audit-rules-kernel-module-loading-init -- rhcos4-audit-rules-login-events -- rhcos4-audit-rules-login-events-faillock -- rhcos4-audit-rules-login-events-lastlog -- rhcos4-audit-rules-login-events-tallylog -- rhcos4-audit-rules-networkconfig-modification -- rhcos4-audit-rules-sysadmin-actions -- rhcos4-audit-rules-time-adjtimex -- rhcos4-audit-rules-time-clock-settime -- rhcos4-audit-rules-time-settimeofday -- rhcos4-audit-rules-time-stime -- rhcos4-audit-rules-time-watch-localtime -- rhcos4-audit-rules-usergroup-modification -- rhcos4-auditd-data-retention-flush -- rhcos4-auditd-freq -- rhcos4-auditd-local-events -- rhcos4-auditd-log-format -- rhcos4-auditd-name-format -- rhcos4-auditd-write-logs -- rhcos4-configure-crypto-policy -- rhcos4-configure-ssh-crypto-policy -- rhcos4-no-empty-passwords -- rhcos4-selinux-policytype -- rhcos4-selinux-state -- rhcos4-service-auditd-enabled -- rhcos4-sshd-disable-empty-passwords -- rhcos4-sshd-disable-gssapi-auth -- rhcos4-sshd-disable-rhosts -- rhcos4-sshd-disable-root-login -- rhcos4-sshd-disable-user-known-hosts -- rhcos4-sshd-do-not-permit-user-env -- rhcos4-sshd-enable-strictmodes -- rhcos4-sshd-print-last-log -- rhcos4-sshd-set-loglevel-info -- rhcos4-sysctl-kernel-dmesg-restrict -- rhcos4-sysctl-kernel-kptr-restrict -- rhcos4-sysctl-kernel-randomize-va-space -- rhcos4-sysctl-kernel-unprivileged-bpf-disabled -- rhcos4-sysctl-kernel-yama-ptrace-scope -- rhcos4-sysctl-net-core-bpf-jit-harden -title: Australian Cyber Security Centre (ACSC) Essential Eight ----- -==== - -* Run the following command to view the details of the `rhcos4-audit-rules-login-events` rule: -+ -[source,terminal] ----- -$ oc get -n openshift-compliance -oyaml rules rhcos4-audit-rules-login-events ----- -+ -.Example output -[%collapsible] -==== -[source,yaml] ----- -apiVersion: compliance.openshift.io/v1alpha1 -checkType: Node -description: |- - The audit system already collects login information for all users and root. If the auditd daemon is configured to use the augenrules program to read audit rules during daemon startup (the default), add the following lines to a file with suffix.rules in the directory /etc/audit/rules.d in order to watch for attempted manual edits of files involved in storing logon events: - - -w /var/log/tallylog -p wa -k logins - -w /var/run/faillock -p wa -k logins - -w /var/log/lastlog -p wa -k logins - - If the auditd daemon is configured to use the auditctl utility to read audit rules during daemon startup, add the following lines to /etc/audit/audit.rules file in order to watch for unattempted manual edits of files involved in storing logon events: - - -w /var/log/tallylog -p wa -k logins - -w /var/run/faillock -p wa -k logins - -w /var/log/lastlog -p wa -k logins -id: xccdf_org.ssgproject.content_rule_audit_rules_login_events -kind: Rule -metadata: - annotations: - compliance.openshift.io/image-digest: pb-rhcos4hrdkm - compliance.openshift.io/rule: audit-rules-login-events - control.compliance.openshift.io/NIST-800-53: AU-2(d);AU-12(c);AC-6(9);CM-6(a) - control.compliance.openshift.io/PCI-DSS: Req-10.2.3 - policies.open-cluster-management.io/controls: AU-2(d),AU-12(c),AC-6(9),CM-6(a),Req-10.2.3 - policies.open-cluster-management.io/standards: NIST-800-53,PCI-DSS - creationTimestamp: "2022-10-19T12:07:08Z" - generation: 1 - labels: - compliance.openshift.io/profile-bundle: rhcos4 - name: rhcos4-audit-rules-login-events - namespace: openshift-compliance - ownerReferences: - - apiVersion: compliance.openshift.io/v1alpha1 - blockOwnerDeletion: true - controller: true - kind: ProfileBundle - name: rhcos4 - uid: 22350850-af4a-4f5c-9a42-5e7b68b82d7d - resourceVersion: "44819" - uid: 75872f1f-3c93-40ca-a69d-44e5438824a4 -rationale: Manual editing of these files may indicate nefarious activity, such as - an attacker attempting to remove evidence of an intrusion. -severity: medium -title: Record Attempts to Alter Logon and Logout Events -warning: Manual editing of these files may indicate nefarious activity, such as an - attacker attempting to remove evidence of an intrusion. ----- -==== - -[id="compliance_profile_types{context}"] -== Compliance Operator profile types - -There are two types of compliance profiles available: Platform and Node. - -Platform:: Platform scans target your {product-title} cluster. - -Node:: Node scans target the nodes of the cluster. - -[IMPORTANT] -==== -For compliance profiles that have Node and Platform applications, such as `pci-dss` compliance profiles, you must run both in your {product-title} environment. -==== diff --git a/modules/compliance-raw-tailored.adoc b/modules/compliance-raw-tailored.adoc deleted file mode 100644 index f0cb71982f9d..000000000000 --- a/modules/compliance-raw-tailored.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-advanced.adoc - -:_content-type: PROCEDURE -[id="compliance-raw-tailored_{context}"] -= Using raw tailored profiles -While the `TailoredProfile` CR enables the most common tailoring operations, the XCCDF standard allows even more flexibility in tailoring OpenSCAP profiles. In addition, if your organization has been using OpenScap previously, you may have an existing XCCDF tailoring file and can reuse it. - -The `ComplianceSuite` object contains an optional `TailoringConfigMap` attribute that you can point to a custom tailoring file. The value of the `TailoringConfigMap` attribute is a name of a config map which must contain a key called `tailoring.xml` and the value of this key is the tailoring contents. - -.Procedure -. Create the `ConfigMap` object from a file: -+ -[source,terminal] ----- -$ oc -n openshift-compliance \ -create configmap nist-moderate-modified \ ---from-file=tailoring.xml=/path/to/the/tailoringFile.xml ----- - -. Reference the tailoring file in a scan that belongs to a suite: -+ -[source,yaml] ----- -apiVersion: compliance.openshift.io/v1alpha1 -kind: ComplianceSuite -metadata: - name: workers-compliancesuite -spec: - debug: true - scans: - - name: workers-scan - profile: xccdf_org.ssgproject.content_profile_moderate - content: ssg-rhcos4-ds.xml - contentImage: quay.io/complianceascode/ocp4:latest - debug: true - tailoringConfigMap: - name: nist-moderate-modified - nodeSelector: - node-role.kubernetes.io/worker: "" ----- diff --git a/modules/compliance-removing-kubeletconfig.adoc b/modules/compliance-removing-kubeletconfig.adoc deleted file mode 100644 index be84a1b811b2..000000000000 --- a/modules/compliance-removing-kubeletconfig.adoc +++ /dev/null @@ -1,107 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-remediation.adoc - -:_content-type: PROCEDURE -[id="compliance-removing-kubeletconfig_{context}"] -= Removing a KubeletConfig remediation -`KubeletConfig` remediations are included in node-level profiles. In order to remove a KubeletConfig remediation, you must manually remove it from the `KubeletConfig` objects. This example demonstrates how to remove the compliance check for the `one-rule-tp-node-master-kubelet-eviction-thresholds-set-hard-imagefs-available` remediation. - -.Procedure - -. Locate the `scan-name` and compliance check for the `one-rule-tp-node-master-kubelet-eviction-thresholds-set-hard-imagefs-available` remediation: -+ -[source,terminal] ----- -$ oc -n openshift-compliance get remediation \ one-rule-tp-node-master-kubelet-eviction-thresholds-set-hard-imagefs-available -o yaml ----- -+ -.Example output -[source,yaml] ----- -apiVersion: compliance.openshift.io/v1alpha1 -kind: ComplianceRemediation -metadata: - annotations: - compliance.openshift.io/xccdf-value-used: var-kubelet-evictionhard-imagefs-available - creationTimestamp: "2022-01-05T19:52:27Z" - generation: 1 - labels: - compliance.openshift.io/scan-name: one-rule-tp-node-master <1> - compliance.openshift.io/suite: one-rule-ssb-node - name: one-rule-tp-node-master-kubelet-eviction-thresholds-set-hard-imagefs-available - namespace: openshift-compliance - ownerReferences: - - apiVersion: compliance.openshift.io/v1alpha1 - blockOwnerDeletion: true - controller: true - kind: ComplianceCheckResult - name: one-rule-tp-node-master-kubelet-eviction-thresholds-set-hard-imagefs-available - uid: fe8e1577-9060-4c59-95b2-3e2c51709adc - resourceVersion: "84820" - uid: 5339d21a-24d7-40cb-84d2-7a2ebb015355 -spec: - apply: true - current: - object: - apiVersion: machineconfiguration.openshift.io/v1 - kind: KubeletConfig - spec: - kubeletConfig: - evictionHard: - imagefs.available: 10% <2> - outdated: {} - type: Configuration -status: - applicationState: Applied ----- -<1> The scan name of the remediation. -<2> The remediation that was added to the `KubeletConfig` objects. -+ -[NOTE] -==== -If the remediation invokes an `evictionHard` kubelet configuration, you must specify all of the `evictionHard` parameters: `memory.available`, `nodefs.available`, `nodefs.inodesFree`, `imagefs.available`, and `imagefs.inodesFree`. If you do not specify all parameters, only the specified parameters are applied and the remediation will not function properly. -==== - -. Remove the remediation: - -.. Set `apply` to false for the remediation object: -+ -[source,terminal] ----- -$ oc -n openshift-compliance patch \ -complianceremediations/one-rule-tp-node-master-kubelet-eviction-thresholds-set-hard-imagefs-available \ --p '{"spec":{"apply":false}}' --type=merge ----- -+ -.. Using the `scan-name`, find the `KubeletConfig` object that the remediation was applied to: -+ -[source,terminal] ----- -$ oc -n openshift-compliance get kubeletconfig \ ---selector compliance.openshift.io/scan-name=one-rule-tp-node-master ----- -+ -.Example output -[source,terminal] ----- -NAME AGE -compliance-operator-kubelet-master 2m34s ----- -.. Manually remove the remediation, `imagefs.available: 10%`, from the `KubeletConfig` object: -+ -[source,terminal] ----- -$ oc edit -n openshift-compliance KubeletConfig compliance-operator-kubelet-master ----- -+ -[IMPORTANT] -==== -All affected nodes with the remediation will be rebooted. -==== - -[NOTE] -==== -You must also exclude the rule from any scheduled scans in your tailored profiles that auto-applies the remediation, otherwise, the remediation will be re-applied during the next scheduled scan. -==== - diff --git a/modules/compliance-rescan.adoc b/modules/compliance-rescan.adoc deleted file mode 100644 index 331f7deea867..000000000000 --- a/modules/compliance-rescan.adoc +++ /dev/null @@ -1,34 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-advanced.adoc - -[id="compliance-rescan_{context}"] -= Performing a rescan -Typically you will want to re-run a scan on a defined schedule, like every Monday or daily. It can also be useful to re-run a scan once after fixing a problem on a node. To perform a single scan, annotate the scan with the `compliance.openshift.io/rescan=` option: - -[source,terminal] ----- -$ oc -n openshift-compliance \ -annotate compliancescans/rhcos4-e8-worker compliance.openshift.io/rescan= ----- - -A rescan generates four additional `mc` for `rhcos-moderate` profile: - -[source,terminal] ----- -$ oc get mc ----- - -.Example output -[source,terminal] ----- -75-worker-scan-chronyd-or-ntpd-specify-remote-server -75-worker-scan-configure-usbguard-auditbackend -75-worker-scan-service-usbguard-enabled -75-worker-scan-usbguard-allow-hid-and-hub ----- - -[IMPORTANT] -==== -When the scan setting `default-auto-apply` label is applied, remediations are applied automatically and outdated remediations automatically update. If there are remediations that were not applied due to dependencies, or remediations that had been outdated, rescanning applies the remediations and might trigger a reboot. Only remediations that use `MachineConfig` objects trigger reboots. If there are no updates or dependencies to be applied, no reboot occurs. -==== diff --git a/modules/compliance-results.adoc b/modules/compliance-results.adoc deleted file mode 100644 index 72d171cef7ff..000000000000 --- a/modules/compliance-results.adoc +++ /dev/null @@ -1,99 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-raw-results.adoc - -:_content-type: PROCEDURE -[id="compliance-results_{context}"] -= Obtaining Compliance Operator raw results from a persistent volume - -.Procedure - -The Compliance Operator generates and stores the raw results in a persistent volume. These results are in Asset Reporting Format (ARF). - -. Explore the `ComplianceSuite` object: -+ -[source,terminal] ----- -$ oc get compliancesuites nist-moderate-modified \ --o json -n openshift-compliance | jq '.status.scanStatuses[].resultsStorage' ----- -+ -.Example output -[source,json] ----- -{ - "name": "ocp4-moderate", - "namespace": "openshift-compliance" -} -{ - "name": "nist-moderate-modified-master", - "namespace": "openshift-compliance" -} -{ - "name": "nist-moderate-modified-worker", - "namespace": "openshift-compliance" -} ----- -+ -This shows the persistent volume claims where the raw results are accessible. - -. Verify the raw data location by using the name and namespace of one of the results: -+ -[source,terminal] ----- -$ oc get pvc -n openshift-compliance rhcos4-moderate-worker ----- -+ -.Example output -[source,terminal] ----- -NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE -rhcos4-moderate-worker Bound pvc-548f6cfe-164b-42fe-ba13-a07cfbc77f3a 1Gi RWO gp2 92m ----- - -. Fetch the raw results by spawning a pod that mounts the volume and copying the results: -+ -[source,terminal] ----- -$ oc create -n openshift-compliance -f pod.yaml ----- -+ -.Example pod.yaml -[source,yaml] ----- -apiVersion: "v1" -kind: Pod -metadata: - name: pv-extract -spec: - containers: - - name: pv-extract-pod - image: registry.access.redhat.com/ubi9/ubi - command: ["sleep", "3000"] - volumeMounts: - - mountPath: "/workers-scan-results" - name: workers-scan-vol - volumes: - - name: workers-scan-vol - persistentVolumeClaim: - claimName: rhcos4-moderate-worker ----- - -. After the pod is running, download the results: -+ -[source,terminal] ----- -$ oc cp pv-extract:/workers-scan-results -n openshift-compliance . ----- -+ -[IMPORTANT] -==== -Spawning a pod that mounts the persistent volume will keep the claim as `Bound`. If the volume's storage class in use has permissions set to `ReadWriteOnce`, the volume is only mountable by one pod at a time. You must delete the pod upon completion, or it will not be possible for the Operator to schedule a pod and continue storing results in this location. -==== - -. After the extraction is complete, the pod can be deleted: -+ -[source,terminal] ----- -$ oc delete pod pv-extract -n openshift-compliance ----- diff --git a/modules/compliance-review.adoc b/modules/compliance-review.adoc deleted file mode 100644 index 111ac9faa5a8..000000000000 --- a/modules/compliance-review.adoc +++ /dev/null @@ -1,55 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-remediation.adoc - -[id="compliance-review_{context}"] -= Reviewing a remediation - -Review both the `ComplianceRemediation` object and the `ComplianceCheckResult` object that owns the remediation. The `ComplianceCheckResult` object contains human-readable descriptions of what the check does and the hardening trying to prevent, as well as other `metadata` like the severity and the associated security controls. The `ComplianceRemediation` object represents a way to fix the problem described in the `ComplianceCheckResult`. After first scan, check for remediations with the state `MissingDependencies`. - -Below is an example of a check and a remediation called `sysctl-net-ipv4-conf-all-accept-redirects`. This example is redacted to only show `spec` and `status` and omits `metadata`: - -[source,yaml] ----- -spec: - apply: false - current: - object: - apiVersion: machineconfiguration.openshift.io/v1 - kind: MachineConfig - spec: - config: - ignition: - version: 3.2.0 - storage: - files: - - path: /etc/sysctl.d/75-sysctl_net_ipv4_conf_all_accept_redirects.conf - mode: 0644 - contents: - source: data:,net.ipv4.conf.all.accept_redirects%3D0 - outdated: {} -status: - applicationState: NotApplied ----- - -The remediation payload is stored in the `spec.current` attribute. The payload can be any Kubernetes object, but because this remediation was produced by a node scan, the remediation payload in the above example is a `MachineConfig` object. For Platform scans, the remediation payload is often a different kind of an object (for example, a `ConfigMap` or `Secret` object), but typically applying that remediation is up to the administrator, because otherwise the Compliance Operator would have required a very broad set of permissions to manipulate any generic Kubernetes object. An example of remediating a Platform check is provided later in the text. - -To see exactly what the remediation does when applied, the `MachineConfig` object contents use the Ignition objects for the configuration. See the link:https://coreos.github.io/ignition/specs/[Ignition specification] for further information about the format. In our example, `the spec.config.storage.files[0].path` attribute specifies the file that is being create by this remediation (`/etc/sysctl.d/75-sysctl_net_ipv4_conf_all_accept_redirects.conf`) and the `spec.config.storage.files[0].contents.source` attribute specifies the contents of that file. - -[NOTE] -==== -The contents of the files are URL-encoded. -==== - -Use the following Python script to view the contents: - -[source,terminal] ----- -$ echo "net.ipv4.conf.all.accept_redirects%3D0" | python3 -c "import sys, urllib.parse; print(urllib.parse.unquote(''.join(sys.stdin.readlines())))" ----- - -.Example output -[source,terminal] ----- -net.ipv4.conf.all.accept_redirects=0 ----- diff --git a/modules/compliance-scansetting-cr.adoc b/modules/compliance-scansetting-cr.adoc deleted file mode 100644 index a76b1da48e4a..000000000000 --- a/modules/compliance-scansetting-cr.adoc +++ /dev/null @@ -1,16 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-scans.adoc - -:_content-type: CONCEPT -[id="compliance-scansetting-cr_{context}"] -= `ScanSetting` Custom Resource - -The `ScanSetting` Custom Resource now allows you to override the default CPU and memory limits of scanner pods through the scan limits attribute. The Compliance Operator will use defaults of 500Mi memory, 100m CPU for the scanner container, and 200Mi memory with 100m CPU for the `api-resource-collector` container. To set the memory limits of the Operator, modify the `Subscription` object if installed through OLM or the Operator deployment itself. - -To increase the default CPU and memory limits of the Compliance Operator, see _Increasing Compliance Operator resource limits_. - -[IMPORTANT] -==== -Increasing the memory limit for the Compliance Operator or the scanner pods is needed if the default limits are not sufficient and the Operator or scanner pods are ended by the Out Of Memory (OOM) process. -==== diff --git a/modules/compliance-scheduling-pods-with-resource-requests.adoc b/modules/compliance-scheduling-pods-with-resource-requests.adoc deleted file mode 100644 index 2afb461050d6..000000000000 --- a/modules/compliance-scheduling-pods-with-resource-requests.adoc +++ /dev/null @@ -1,56 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-scans.adoc - -:_content-type: CONCEPT -[id="compliance-scheduling-pods-with-resource-requests_{context}"] -= Scheduling Pods with container resource requests - -When a Pod is created, the scheduler selects a Node for the Pod to run on. Each node has a maximum capacity for each resource type in the amount of CPU and memory it can provide for the Pods. The scheduler ensures that the sum of the resource requests of the scheduled containers is less than the capacity nodes for each resource type. - -Although memory or CPU resource usage on nodes is very low, the scheduler might still refuse to place a Pod on a node if the capacity check fails to protect against a resource shortage on a node. - -For each container, you can specify the following resource limits and request: - -[source,terminal] ----- -spec.containers[].resources.limits.cpu -spec.containers[].resources.limits.memory -spec.containers[].resources.limits.hugepages-<size> -spec.containers[].resources.requests.cpu -spec.containers[].resources.requests.memory -spec.containers[].resources.requests.hugepages-<size> ----- - -Although you can specify requests and limits for only individual containers, it is also useful to consider the overall resource requests and limits for a pod. For a particular resource, a container resource request or limit is the sum of the resource requests or limits of that type for each container in the pod. - -.Example container resource requests and limits -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: frontend -spec: - containers: - - name: app - image: images.my-company.example/app:v4 - resources: - requests: <1> - memory: "64Mi" - cpu: "250m" - limits: <2> - memory: "128Mi" - cpu: "500m" - - name: log-aggregator - image: images.my-company.example/log-aggregator:v6 - resources: - requests: - memory: "64Mi" - cpu: "250m" - limits: - memory: "128Mi" - cpu: "500m" ----- -<1> The container is requesting 64 Mi of memory and 250 m CPU. -<2> The container's limits are 128 Mi of memory and 500 m CPU. \ No newline at end of file diff --git a/modules/compliance-supported-profiles.adoc b/modules/compliance-supported-profiles.adoc deleted file mode 100644 index 77e6b1203c6f..000000000000 --- a/modules/compliance-supported-profiles.adoc +++ /dev/null @@ -1,135 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/ - -:_content-type: CONCEPT -[id="compliance-supported-profiles_{context}"] -= Compliance profiles - -The Compliance Operator provides the following compliance profiles: - -.Supported compliance profiles -[cols="10%,40%,10%,10%,40%,10%", options="header"] - -|=== -|Profile -|Profile title -|Application -|Compliance Operator version -|Industry compliance benchmark -|Supported architectures - -|ocp4-cis -|CIS Red Hat OpenShift Container Platform 4 Benchmark v1.1.0 -|Platform -|0.1.39+ -|link:https://www.cisecurity.org/cis-benchmarks/[CIS Benchmarks ™] ^[1]^ -|`x86_64` - `ppc64le` - `s390x` - -|ocp4-cis-node -|CIS Red Hat OpenShift Container Platform 4 Benchmark v1.1.0 -|Node ^[2]^ -|0.1.39+ -|link:https://www.cisecurity.org/cis-benchmarks/[CIS Benchmarks ™] ^[1]^ -|`x86_64` - `ppc64le` - `s390x` - -|ocp4-e8 -|Australian Cyber Security Centre (ACSC) Essential Eight -|Platform -|0.1.39+ -|link:https://www.cyber.gov.au/acsc/view-all-content/publications/hardening-linux-workstations-and-servers[ACSC Hardening Linux Workstations and Servers] -|`x86_64` - -|ocp4-moderate -|NIST 800-53 Moderate-Impact Baseline for Red Hat OpenShift - Platform level -|Platform -|0.1.39+ -|link:https://nvd.nist.gov/800-53/Rev4/impact/moderate[NIST SP-800-53 Release Search] -|`x86_64` - -|rhcos4-e8 -|Australian Cyber Security Centre (ACSC) Essential Eight -|Node -|0.1.39+ -|link:https://www.cyber.gov.au/acsc/view-all-content/publications/hardening-linux-workstations-and-servers[ACSC Hardening Linux Workstations and Servers] -|`x86_64` - -|rhcos4-moderate -|NIST 800-53 Moderate-Impact Baseline for Red Hat Enterprise Linux CoreOS -|Node -|0.1.39+ -|link:https://nvd.nist.gov/800-53/Rev4/impact/moderate[NIST SP-800-53 Release Search] -|`x86_64` - -|ocp4-moderate-node -|NIST 800-53 Moderate-Impact Baseline for Red Hat OpenShift - Node level -|Node ^[2]^ -|0.1.44+ -|link:https://nvd.nist.gov/800-53/Rev4/impact/moderate[NIST SP-800-53 Release Search] -|`x86_64` - -|ocp4-nerc-cip -|North American Electric Reliability Corporation (NERC) Critical Infrastructure Protection (CIP) cybersecurity standards profile for the Red Hat OpenShift Container Platform - Platform level -|Platform -|0.1.44+ -|link:https://www.nerc.com/pa/Stand/Pages/CIPStandards.aspx[NERC CIP Standards] -|`x86_64` - -|ocp4-nerc-cip-node -|North American Electric Reliability Corporation (NERC) Critical Infrastructure Protection (CIP) cybersecurity standards profile for the Red Hat OpenShift Container Platform - Node level -|Node ^[2]^ -|0.1.44+ -|link:https://www.nerc.com/pa/Stand/Pages/CIPStandards.aspx[NERC CIP Standards] -|`x86_64` - -|rhcos4-nerc-cip -|North American Electric Reliability Corporation (NERC) Critical Infrastructure Protection (CIP) cybersecurity standards profile for Red Hat Enterprise Linux CoreOS -|Node -|0.1.44+ -|link:https://www.nerc.com/pa/Stand/Pages/CIPStandards.aspx[NERC CIP Standards] -|`x86_64` - -|ocp4-pci-dss -|PCI-DSS v3.2.1 Control Baseline for Red Hat OpenShift Container Platform 4 -|Platform -|0.1.47+ -|link:https://www.pcisecuritystandards.org/document_library?document=pci_dss[PCI Security Standards ® Council Document Library] -|`x86_64` - `ppc64le` - -|ocp4-pci-dss-node -|PCI-DSS v3.2.1 Control Baseline for Red Hat OpenShift Container Platform 4 -|Node ^[2]^ -|0.1.47+ -|link:https://www.pcisecuritystandards.org/document_library?document=pci_dss[PCI Security Standards ® Council Document Library] -|`x86_64` - `ppc64le` - -|ocp4-high -|NIST 800-53 High-Impact Baseline for Red Hat OpenShift - Platform level -|Platform -|0.1.52+ -|link:https://csrc.nist.gov/Projects/risk-management/sp800-53-controls/release-search#!/800-53[NIST SP-800-53 Release Search] -|`x86_64` - -|ocp4-high-node -|NIST 800-53 High-Impact Baseline for Red Hat OpenShift - Node level -|Node ^[2]^ -|0.1.52+ -|link:https://csrc.nist.gov/Projects/risk-management/sp800-53-controls/release-search#!/800-53[NIST SP-800-53 Release Search] -|`x86_64` - -|rhcos4-high -|NIST 800-53 High-Impact Baseline for Red Hat Enterprise Linux CoreOS -|Node -|0.1.52+ -|link:https://csrc.nist.gov/Projects/risk-management/sp800-53-controls/release-search#!/800-53[NIST SP-800-53 Release Search] -|`x86_64` -|=== -[.small] -1. To locate the CIS {product-title} v4 Benchmark, go to link:https://www.cisecurity.org/cis-benchmarks/[CIS Benchmarks] and type `Kubernetes` in the search box. Click on *Kubernetes* and then *Download Latest CIS Benchmark*, where you can then register to download the benchmark. -2. Node profiles must be used with the relevant Platform profile. For more information, see xref:../../security/compliance_operator/compliance-operator-understanding.adoc#compliance_profile_typesunderstanding-compliance[Compliance Operator profile types]. \ No newline at end of file diff --git a/modules/compliance-tailored-profiles.adoc b/modules/compliance-tailored-profiles.adoc deleted file mode 100644 index 620f2f082470..000000000000 --- a/modules/compliance-tailored-profiles.adoc +++ /dev/null @@ -1,146 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-tailor.adoc - -:_content-type: PROCEDURE -[id="compliance-tailored-profiles_{context}"] -= Using tailored profiles to extend existing ProfileBundles -While the `TailoredProfile` CR enables the most common tailoring operations, the XCCDF standard allows even more flexibility in tailoring OpenSCAP profiles. In addition, if your organization has been using OpenScap previously, you may have an existing XCCDF tailoring file and can reuse it. - -The `ComplianceSuite` object contains an optional `TailoringConfigMap` attribute that you can point to a custom tailoring file. The value of the `TailoringConfigMap` attribute is a name of a config map, which must contain a key called `tailoring.xml` and the value of this key is the tailoring contents. - -.Procedure - -. Browse the available rules for the {op-system-first} `ProfileBundle`: -+ -[source,terminal] ----- -$ oc get rules.compliance -n openshift-compliance -l compliance.openshift.io/profile-bundle=rhcos4 ----- - -. Browse the available variables in the same `ProfileBundle`: -+ -[source,terminal] ----- -$ oc get variables.compliance -n openshift-compliance -l compliance.openshift.io/profile-bundle=rhcos4 ----- - -. Create a tailored profile named `nist-moderate-modified`: -.. Choose which rules you want to add to the `nist-moderate-modified` tailored profile. This example extends the `rhcos4-moderate` profile by disabling two rules and changing one value. Use the `rationale` value to describe why these changes were made: -+ -.Example `new-profile-node.yaml` -[source,yaml] ----- -apiVersion: compliance.openshift.io/v1alpha1 -kind: TailoredProfile -metadata: - name: nist-moderate-modified -spec: - extends: rhcos4-moderate - description: NIST moderate profile - title: My modified NIST moderate profile - disableRules: - - name: rhcos4-file-permissions-var-log-messages - rationale: The file contains logs of error messages in the system - - name: rhcos4-account-disable-post-pw-expiration - rationale: No need to check this as it comes from the IdP - setValues: - - name: rhcos4-var-selinux-state - rationale: Organizational requirements - value: permissive ----- -+ -.Attributes for spec variables -[cols="1,2a",options="header"] -|=== -|Attribute -|Description - -|`extends` -|Name of the `Profile` object upon which this `TailoredProfile` is built. - -|`title` -|Human-readable title of the `TailoredProfile`. - -|`disableRules` -|A list of name and rationale pairs. Each name refers to a name of a rule object that is to be disabled. The rationale value is human-readable text describing why the rule is disabled. - -|`manualRules` -| A list of name and rationale pairs. When a manual rule is added, the check result status will always be `manual` and remediation will not be generated. This attribute is automatic and by default has no values when set as a manual rule. - -|`enableRules` -|A list of name and rationale pairs. Each name refers to a name of a rule object that is to be enabled. The rationale value is human-readable text describing why the rule is enabled. - -|`description` -|Human-readable text describing the `TailoredProfile`. - -|`setValues` -| A list of name, rationale, and value groupings. Each name refers to a name of the value set. The rationale is human-readable text describing the set. The value is the actual setting. -|=== -+ -.. Add the `tailoredProfile.spec.manualRules` attribute: -+ -.Example `tailoredProfile.spec.manualRules.yaml` -[source,yaml] ----- -apiVersion: compliance.openshift.io/v1alpha1 -kind: TailoredProfile -metadata: - name: ocp4-manual-scc-check -spec: - extends: ocp4-cis - description: This profile extends ocp4-cis by forcing the SCC check to always return MANUAL - title: OCP4 CIS profile with manual SCC check - manualRules: - - name: ocp4-scc-limit-container-allowed-capabilities - rationale: We use third party software that installs its own SCC with extra privileges ----- - -.. Create the `TailoredProfile` object: -+ -[source,terminal] ----- -$ oc create -n openshift-compliance -f new-profile-node.yaml <1> ----- -<1> The `TailoredProfile` object is created in the default `openshift-compliance` namespace. -+ -.Example output -[source,terminal] ----- -tailoredprofile.compliance.openshift.io/nist-moderate-modified created ----- - -. Define the `ScanSettingBinding` object to bind the new `nist-moderate-modified` tailored profile to the default `ScanSetting` object. -+ -.Example `new-scansettingbinding.yaml` -[source,yaml] ----- -apiVersion: compliance.openshift.io/v1alpha1 -kind: ScanSettingBinding -metadata: - name: nist-moderate-modified -profiles: - - apiGroup: compliance.openshift.io/v1alpha1 - kind: Profile - name: ocp4-moderate - - apiGroup: compliance.openshift.io/v1alpha1 - kind: TailoredProfile - name: nist-moderate-modified -settingsRef: - apiGroup: compliance.openshift.io/v1alpha1 - kind: ScanSetting - name: default ----- - -. Create the `ScanSettingBinding` object: -+ -[source,terminal] ----- -$ oc create -n openshift-compliance -f new-scansettingbinding.yaml ----- -+ -.Example output -[source,terminal] ----- -scansettingbinding.compliance.openshift.io/nist-moderate-modified created ----- diff --git a/modules/compliance-timeout.adoc b/modules/compliance-timeout.adoc deleted file mode 100644 index 31d75ba13941..000000000000 --- a/modules/compliance-timeout.adoc +++ /dev/null @@ -1,37 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-troubleshooting.adoc - -:_content-type: PROCEDURE -[id="compliance-timeout_{context}"] -= Configuring ScanSetting timeout - -The `ScanSetting` object has a timeout option that can be specified in the `ComplianceScanSetting` object as a duration string, such as `1h30m`. If the scan does not finish within the specified timeout, the scan reattempts until the `maxRetryOnTimeout` limit is reached. - -.Procedure - -* To set a `timeout` and `maxRetryOnTimeout` in ScanSetting, modify an existing `ScanSetting` object: -+ -[source,yaml] ----- -apiVersion: compliance.openshift.io/v1alpha1 -kind: ScanSetting -metadata: - name: default - namespace: openshift-compliance -rawResultStorage: - rotation: 3 - size: 1Gi -roles: -- worker -- master -scanTolerations: -- effect: NoSchedule - key: node-role.kubernetes.io/master - operator: Exists -schedule: '0 1 * * *' -timeout: '10m0s' <1> -maxRetryOnTimeout: 3 <2> ----- -<1> The `timeout` variable is defined as a duration string, such as `1h30m`. The default value is `30m`. To disable the timeout, set the value to `0s`. -<2> The `maxRetryOnTimeout` variable defines how many times a retry is attempted. The default value is `3`. \ No newline at end of file diff --git a/modules/compliance-unapplying.adoc b/modules/compliance-unapplying.adoc deleted file mode 100644 index d214ca6ea313..000000000000 --- a/modules/compliance-unapplying.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-remediation.adoc - -:_content-type: PROCEDURE -[id="compliance-unapplying_{context}"] -= Unapplying a remediation -It might be required to unapply a remediation that was previously applied. - -.Procedure -. Set the `apply` flag to `false`: -+ -[source,terminal] ----- -$ oc -n openshift-compliance \ -patch complianceremediations/rhcos4-moderate-worker-sysctl-net-ipv4-conf-all-accept-redirects \ ---patch '{"spec":{"apply":false}}' --type=merge ----- - -. The remediation status will change to `NotApplied` and the composite `MachineConfig` object would be re-rendered to not include the remediation. -+ -[IMPORTANT] -==== -All affected nodes with the remediation will be rebooted. -==== diff --git a/modules/compliance-update.adoc b/modules/compliance-update.adoc deleted file mode 100644 index 2ed2be580eb1..000000000000 --- a/modules/compliance-update.adoc +++ /dev/null @@ -1,44 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-manage.adoc - -:_content-type: CONCEPT -[id="compliance-update_{context}"] -= Updating security content - -Security content is included as container images that the `ProfileBundle` objects refer to. To accurately track updates to `ProfileBundles` and the custom resources parsed from the bundles such as rules or profiles, identify the container image with the compliance content using a digest instead of a tag: - -[source,terminal] ----- -$ oc -n openshift-compliance get profilebundles rhcos4 -oyaml ----- - -.Example output -[source,yaml] ----- -apiVersion: compliance.openshift.io/v1alpha1 -kind: ProfileBundle -metadata: - creationTimestamp: "2022-10-19T12:06:30Z" - finalizers: - - profilebundle.finalizers.compliance.openshift.io - generation: 1 - name: rhcos4 - namespace: openshift-compliance - resourceVersion: "46741" - uid: 22350850-af4a-4f5c-9a42-5e7b68b82d7d -spec: - contentFile: ssg-rhcos4-ds.xml - contentImage: registry.redhat.io/compliance/openshift-compliance-content-rhel8@sha256:900e... <1> -status: - conditions: - - lastTransitionTime: "2022-10-19T12:07:51Z" - message: Profile bundle successfully parsed - reason: Valid - status: "True" - type: Ready - dataStreamStatus: VALID ----- -<1> Security container image. - -Each `ProfileBundle` is backed by a deployment. When the Compliance Operator detects that the container image digest has changed, the deployment is updated to reflect the change and parse the content again. Using the digest instead of a tag ensures that you use a stable and predictable set of profiles. diff --git a/modules/compliance-updating.adoc b/modules/compliance-updating.adoc deleted file mode 100644 index 6b0673e27042..000000000000 --- a/modules/compliance-updating.adoc +++ /dev/null @@ -1,54 +0,0 @@ -// Module included in the following assemblies: -// -// * security/compliance_operator/compliance-operator-remediation.adoc - -:_content-type: PROCEDURE -[id="compliance-updating_{context}"] -= Updating remediations - -When a new version of compliance content is used, it might deliver a new and different version of a remediation than the previous version. The Compliance Operator will keep the old version of the remediation applied. The {product-title} administrator is also notified of the new version to review and apply. A ComplianceRemediation object that had been applied earlier, but was updated changes its status to *Outdated*. The outdated objects are labeled so that they can be searched for easily. - -The previously applied remediation contents would then be stored in the `spec.outdated` attribute of a `ComplianceRemediation` object and the new updated contents would be stored in the `spec.current` attribute. After updating the content to a newer version, the administrator then needs to review the remediation. As long as the `spec.outdated` attribute exists, it would be used to render the resulting `MachineConfig` object. After the `spec.outdated` attribute is removed, the Compliance Operator re-renders the resulting `MachineConfig` object, which causes the Operator to push the configuration to the nodes. - -.Procedure - -. Search for any outdated remediations: -+ -[source,terminal] ----- -$ oc -n openshift-compliance get complianceremediations \ --l complianceoperator.openshift.io/outdated-remediation= ----- -+ -.Example output -[source,terminal] ----- -NAME STATE -workers-scan-no-empty-passwords Outdated ----- -+ -The currently applied remediation is stored in the `Outdated` attribute and the new, unapplied remediation is stored in the `Current` attribute. If you are satisfied with the new version, remove the `Outdated` field. If you want to keep the updated content, remove the `Current` and `Outdated` attributes. - -. Apply the newer version of the remediation: -+ -[source,terminal] ----- -$ oc -n openshift-compliance patch complianceremediations workers-scan-no-empty-passwords \ ---type json -p '[{"op":"remove", "path":/spec/outdated}]' ----- - -. The remediation state will switch from `Outdated` to `Applied`: -+ -[source,terminal] ----- -$ oc get -n openshift-compliance complianceremediations workers-scan-no-empty-passwords ----- -+ -.Example output -[source,terminal] ----- -NAME STATE -workers-scan-no-empty-passwords Applied ----- - -. The nodes will apply the newer remediation version and reboot. diff --git a/modules/compute-machineset-upi-reqs.adoc b/modules/compute-machineset-upi-reqs.adoc deleted file mode 100644 index ff6d5f90e3c0..000000000000 --- a/modules/compute-machineset-upi-reqs.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/creating_machinesets/creating-machineset-vsphere.adoc -// -// Currently only in the vSphere compute machine set content, but we will want this for other platforms such as AWS and GCP. - -ifeval::["{context}" == "creating-machineset-vsphere"] -:vsphere: -endif::[] - -:_content-type: CONCEPT -[id="compute-machineset-upi-reqs_{context}"] -= Requirements for clusters with user-provisioned infrastructure to use compute machine sets - -To use compute machine sets on clusters that have user-provisioned infrastructure, you must ensure that you cluster configuration supports using the Machine API. - -ifeval::["{context}" == "creating-machineset-vsphere"] -:!vsphere: -endif::[] diff --git a/modules/con_bmo-bare-metal-operator-architecture.adoc b/modules/con_bmo-bare-metal-operator-architecture.adoc deleted file mode 100644 index c263a5df8cf1..000000000000 --- a/modules/con_bmo-bare-metal-operator-architecture.adoc +++ /dev/null @@ -1,48 +0,0 @@ -// This is included in the following assemblies: -// -// post_installation_configuration/bare-metal-configuration.adoc -:_content-type: CONCEPT -[id="bmo-bare-metal-operator-architecture_{context}"] -= Bare Metal Operator architecture - -The Bare Metal Operator (BMO) uses three resources to provision, manage, and inspect bare-metal hosts in your cluster. The following diagram illustrates the architecture of these resources: - -image::302_OpenShift_Bare_Metal_Operator_0223.png[BMO architecture overview] - -.BareMetalHost - -The `BareMetalHost` resource defines a physical host and its properties. When you provision a bare-metal host to the cluster, you must define a `BareMetalHost` resource for that host. For ongoing management of the host, you can inspect the information in the `BareMetalHost` or update this information. - -The `BareMetalHost` resource features provisioning information such as the following: - -* Deployment specifications such as the operating system boot image or the custom RAM disk -* Provisioning state -* Baseboard Management Controller (BMC) address -* Desired power state - -The `BareMetalHost` resource features hardware information such as the following: - -* Number of CPUs -* MAC address of a NIC -* Size of the host's storage device -* Current power state - -.HostFirmwareSettings -You can use the `HostFirmwareSettings` resource to retrieve and manage the firmware settings for a host. When a host moves to the `Available` state, the Ironic service reads the host's firmware settings and creates the `HostFirmwareSettings` resource. There is a one-to-one mapping between the `BareMetalHost` resource and the `HostFirmwareSettings` resource. - -You can use the `HostFirmwareSettings` resource to inspect the firmware specifications for a host or to update a host's firmware specifications. - -[NOTE] -==== -You must adhere to the schema specific to the vendor firmware when you edit the `spec` field of the `HostFirmwareSettings` resource. This schema is defined in the read-only `FirmwareSchema` resource. -==== - -.FirmwareSchema -Firmware settings vary among hardware vendors and host models. A `FirmwareSchema` resource is a read-only resource that contains the types and limits for each firmware setting on each host model. The data comes directly from the BMC by using the Ironic service. The `FirmwareSchema` resource enables you to identify valid values you can specify in the `spec` field of the `HostFirmwareSettings` resource. - -A `FirmwareSchema` resource can apply to many `BareMetalHost` resources if the schema is the same. - -[role="_additional-resources"] -.Additional resources -* link:https://metal3.io/[Metal³ API service for provisioning bare-metal hosts] -* link:https://ironicbaremetal.org/[Ironic API service for managing bare-metal infrastructure] diff --git a/modules/config-aws-access.adoc b/modules/config-aws-access.adoc deleted file mode 100644 index b037e04e3e96..000000000000 --- a/modules/config-aws-access.adoc +++ /dev/null @@ -1,75 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_cluster_admin/osd_private_connections/aws-private-connections.adoc - -:_content-type: PROCEDURE -[id="config-aws-access_{context}"] - -= Configuring AWS infrastructure access - -// TODO: I see {AWS} and {GCP} only used a handful of time, but their written out form much more. Should all hardcoded instances be updated to use the attributes? -{AWS} infrastructure access allows link:https://access.redhat.com/node/3610411[Customer Portal Organization Administrators] and cluster owners to enable AWS Identity and Access Management (IAM) users to have federated access to the AWS Management Console for their {product-title} cluster. Administrators can select between `Network Management` or `Read-only` access options. - -.Prerequisites - -* An AWS account with IAM permissions. - -.Procedure - -. Log in to your AWS account. If necessary, you can create a new AWS account by following the link:https://aws.amazon.com/premiumsupport/knowledge-center/create-and-activate-aws-account/[AWS documentation]. - -. Create an IAM user with `STS:AllowAssumeRole` permissions within the AWS account. - -.. Open the link:https://console.aws.amazon.com/iam/home#/home[IAM dashboard] of the AWS Management Console. -.. In the *Policies* section, click *Create Policy*. -.. Select the *JSON* tab and replace the existing text with the following: -+ -[source,json] ----- -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": "sts:AssumeRole", - "Resource": "*" - } - ] -} ----- - -.. Click *Next:Tags*. -.. Optional: Add tags. Click *Next:Review* -.. Provide an appropriate name and description, then click *Create Policy*. -.. In the *Users* section, click *Add user*. -.. Provide an appropriate user name. -.. Select *AWS Management Console access* as the AWS access type. -.. Adjust the password requirements as necessary for your organization, then click *Next:Permissions*. -.. Click the *Attach existing policies directly* option. Search for and check the policy created in previous steps. -+ -[NOTE] -==== -It is not recommended to set a permissions boundary. -==== - -.. Click *Next: Tags*, then click *Next: Review*. Confirm the configuration is correct. -.. Click *Create user*, a success page appears. -.. Gather the IAM user’s Amazon Resource Name (ARN). The ARN will have the following format: `arn:aws:iam::000111222333:user/username`. Click *Close*. - -. Open {cluster-manager-url} in your browser and select the cluster you want to allow AWS infrastructure access. - -. Select the *Access control* tab, and scroll to the *AWS Infrastructure Access* section. - -. Paste the *AWS IAM ARN* and select *Network Management* or *Read-only* permissions, then click *Grant role*. - -. Copy the *AWS OSD console URL* to your clipboard. - -. Sign in to your AWS account with your Account ID or alias, IAM user name, and password. - -. In a new browser tab, paste the AWS OSD Console URL that will be used to route to the AWS Switch Role page. - -. Your account number and role will be filled in already. Choose a display name if necessary, then click *Switch Role*. - -.Verification - -* You now see *VPC* under *Recently visited services*. diff --git a/modules/config-github-idp.adoc b/modules/config-github-idp.adoc deleted file mode 100644 index dd6b0033da72..000000000000 --- a/modules/config-github-idp.adoc +++ /dev/null @@ -1,70 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_install_access_delete_cluster/config-identity-providers.adoc -// * rosa_install_access_delete_clusters/rosa-sts-config-identity-providers.adoc -// * rosa_install_access_delete_clusters/rosa_getting_started_iam/rosa-config-identity-providers.adoc - -:_content-type: PROCEDURE -[id="config-github-idp_{context}"] -= Configuring a GitHub identity provider - - -Configure a GitHub identity provider to validate user names and passwords against GitHub or GitHub Enterprise’s OAuth authentication server and access your {product-title} cluster. OAuth facilitates a token exchange flow between {product-title} and GitHub or GitHub Enterprise. - -[WARNING] -==== -Configuring GitHub authentication allows users to log in to {product-title} with their GitHub credentials. To prevent anyone with any GitHub user ID from logging in to your {product-title} cluster, you must restrict access to only those in specific GitHub organizations or teams. -==== - -.Prerequisites - -* The OAuth application must be created directly within the GitHub link:https://docs.github.com/en/github/setting-up-and-managing-organizations-and-teams/managing-organization-settings[organization settings] by the GitHub organization administrator. -* link:https://docs.github.com/en/github/setting-up-and-managing-organizations-and-teams[GitHub organizations or teams] are set up in your GitHub account. - -.Procedure - -. From {cluster-manager-url}, navigate to the *Clusters* page and select the cluster that you need to configure identity providers for. - -. Click the *Access control* tab. - -. Click *Add identity provider*. -+ -[NOTE] -==== -You can also click the *Add Oauth configuration* link in the warning message displayed after cluster creation to configure your identity providers. -==== - -. Select *GitHub* from the drop-down menu. - -. Enter a unique name for the identity provider. This name cannot be changed later. -** An *OAuth callback URL* is automatically generated in the provided field. You will use this to register the GitHub application. -+ ----- -https://oauth-openshift.apps.<cluster_name>.<cluster_domain>/oauth2callback/<idp_provider_name> ----- -+ -For example: -+ ----- -https://oauth-openshift.apps.openshift-cluster.example.com/oauth2callback/github ----- - -. link:https://docs.github.com/en/developers/apps/creating-an-oauth-app[Register an application on GitHub]. - -. Return to {product-title} and select a mapping method from the drop-down menu. *Claim* is recommended in most cases. - -. Enter the *Client ID* and *Client secret* provided by GitHub. - -. Enter a *hostname*. A hostname must be entered when using a hosted instance of GitHub Enterprise. - -. Optional: You can use a certificate authority (CA) file to validate server certificates for the configured GitHub Enterprise URL. Click *Browse* to locate and attach a *CA file* to the identity provider. - -. Select *Use organizations* or *Use teams* to restrict access to a particular GitHub organization or a GitHub team. - -. Enter the name of the organization or team you would like to restrict access to. Click *Add more* to specify multiple organizations or teams that users can be a member of. - -. Click *Confirm*. - -.Verification - -* The configured identity provider is now visible on the *Access control* tab of the *Clusters* page. diff --git a/modules/config-gitlab-idp.adoc b/modules/config-gitlab-idp.adoc deleted file mode 100644 index ac89b1c27fdb..000000000000 --- a/modules/config-gitlab-idp.adoc +++ /dev/null @@ -1,60 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_install_access_delete_cluster/config-identity-providers.adoc -// * rosa_install_access_delete_clusters/rosa-sts-config-identity-providers.adoc -// * rosa_install_access_delete_clusters/rosa_getting_started_iam/rosa-config-identity-providers.adoc - -:_content-type: PROCEDURE -[id="config-gitlab-idp_{context}"] -= Configuring a GitLab identity provider - - -Configure a GitLab identity provider to use link:https://gitlab.com/[GitLab.com] or any other GitLab instance as an identity provider. - -.Prerequisites - -- If you use GitLab version 7.7.0 to 11.0, you connect using the link:http://doc.gitlab.com/ce/integration/oauth_provider.html[OAuth integration]. If you use GitLab version 11.1 or later, you can use link:https://docs.gitlab.com/ce/integration/openid_connect_provider.html[OpenID Connect] (OIDC) to connect instead of OAuth. - -.Procedure - -. From {cluster-manager-url}, navigate to the *Clusters* page and select the cluster that you need to configure identity providers for. - -. Click the *Access control* tab. - -. Click *Add identity provider*. -+ -[NOTE] -==== -You can also click the *Add Oauth configuration* link in the warning message displayed after cluster creation to configure your identity providers. -==== - -. Select *GitLab* from the drop-down menu. - -. Enter a unique name for the identity provider. This name cannot be changed later. -** An *OAuth callback URL* is automatically generated in the provided field. You will provide this URL to GitLab. -+ ----- -https://oauth-openshift.apps.<cluster_name>.<cluster_domain>/oauth2callback/<idp_provider_name> ----- -+ -For example: -+ ----- -https://oauth-openshift.apps.openshift-cluster.example.com/oauth2callback/gitlab ----- - -. link:https://docs.gitlab.com/ee/integration/oauth_provider.html[Add a new application in GitLab]. - -. Return to {product-title} and select a mapping method from the drop-down menu. *Claim* is recommended in most cases. - -. Enter the *Client ID* and *Client secret* provided by GitLab. - -. Enter the *URL* of your GitLab provider. - -. Optional: You can use a certificate authority (CA) file to validate server certificates for the configured GitLab URL. Click *Browse* to locate and attach a *CA file* to the identity provider. - -. Click *Confirm*. - -.Verification - -* The configured identity provider is now visible on the *Access control* tab of the *Clusters* page. diff --git a/modules/config-google-idp.adoc b/modules/config-google-idp.adoc deleted file mode 100644 index 7e5069ef999e..000000000000 --- a/modules/config-google-idp.adoc +++ /dev/null @@ -1,61 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_install_access_delete_cluster/config-identity-providers.adoc -// * rosa_install_access_delete_clusters/rosa-sts-config-identity-providers.adoc -// * rosa_install_access_delete_clusters/rosa_getting_started_iam/rosa-config-identity-providers.adoc - -:_content-type: PROCEDURE -[id="config-google-idp_{context}"] -= Configuring a Google identity provider - - -Configure a Google identity provider to allow users to authenticate with their Google credentials. - -[WARNING] -==== -Using Google as an identity provider allows any Google user to authenticate to your server. -You can limit authentication to members of a specific hosted domain with the -`hostedDomain` configuration attribute. -==== - -.Procedure - -. From {cluster-manager-url}, navigate to the *Clusters* page and select the cluster that you need to configure identity providers for. - -. Click the *Access control* tab. - -. Click *Add identity provider*. -+ -[NOTE] -==== -You can also click the *Add Oauth configuration* link in the warning message displayed after cluster creation to configure your identity providers. -==== - -. Select *Google* from the drop-down menu. - -. Enter a unique name for the identity provider. This name cannot be changed later. -** An *OAuth callback URL* is automatically generated in the provided field. You will provide this URL to Google. -+ ----- -https://oauth-openshift.apps.<cluster_name>.<cluster_domain>/oauth2callback/<idp_provider_name> ----- -+ -For example: -+ ----- -https://oauth-openshift.apps.openshift-cluster.example.com/oauth2callback/google ----- - -. Configure a Google identity provider using link:https://developers.google.com/identity/protocols/OpenIDConnect[Google's OpenID Connect integration]. - -. Return to {product-title} and select a mapping method from the drop-down menu. *Claim* is recommended in most cases. - -. Enter the *Client ID* of a registered Google project and the *Client secret* issued by Google. - -. Enter a hosted domain to restrict users to a Google Apps domain. - -. Click *Confirm*. - -.Verification - -* The configured identity provider is now visible on the *Access control* tab of the *Clusters* page. diff --git a/modules/config-htpasswd-idp.adoc b/modules/config-htpasswd-idp.adoc deleted file mode 100644 index beb8b0a5ca1e..000000000000 --- a/modules/config-htpasswd-idp.adoc +++ /dev/null @@ -1,80 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_install_access_delete_cluster/config-identity-providers.adoc -// * rosa_install_access_delete_clusters/rosa-sts-config-identity-providers.adoc -// * rosa_install_access_delete_clusters/rosa_getting_started_iam/rosa-config-identity-providers.adoc - -ifeval::["{context}" == "config-identity-providers"] -:osd-distro: -endif::[] -ifeval::["{context}" == "rosa-sts-config-identity-providers"] -:rosa-distro: -endif::[] -ifeval::["{context}" == "rosa-config-identity-providers"] -:rosa-distro: -endif::[] - -:_content-type: PROCEDURE -[id="config-htpasswd-idp_{context}"] -= Configuring an htpasswd identity provider - -Configure an htpasswd identity provider to create a single, static user with cluster administration privileges. You can log in to your cluster as the user to troubleshoot issues. - -[IMPORTANT] -==== -The htpasswd identity provider option is included only to enable the creation of a single, static administration user. htpasswd is not supported as a general-use identity provider for {product-title}. -==== - -.Procedure - -. From {cluster-manager-url}, navigate to the *Clusters* page and select your cluster. - -. Select *Access control* -> *Identity providers*. - -. Click *Add identity provider*. - -. Select *HTPasswd* from the *Identity Provider* drop-down menu. - -. Add a unique name in the *Name* field for the identity provider. - -. Use the suggested username and password for the static user, or create your own. -+ -[NOTE] -==== -The credentials defined in this step are not visible after you select *Add* in the following step. If you lose the credentials, you must recreate the identity provider and define the credentials again. -==== - -. Select *Add* to create the htpasswd identity provider and the single, static user. - -. Grant the static user permission to manage the cluster: -.. Under *Access control* -> *Cluster Roles and Access*, select *Add user*. -.. Enter the *User ID* of the static user that you created in the preceding step. -ifdef::osd-distro[] -.. Select a *Group.* -** If you are installing {product-title} using the Customer Cloud Subscription (CCS) infrastructure type, choose either the `dedicated-admins` or `cluster-admins` group. Users in the `dedicated-admins` group have standard administrative privileges for {product-title}. Users in the `cluster-admins` group have full administrative access to the cluster. -** If you are installing {product-title} using the Red Hat cloud account infrastructure type, the `dedicated-admins` group is automatically selected. -endif::osd-distro[] -ifdef::rosa-distro[] -.. Select a *Group*. Users in the `dedicated-admins` group have standard administrative privileges for {product-title}. Users in the `cluster-admins` group have full administrative access to the cluster. -endif::rosa-distro[] -.. Select *Add user* to grant the administration privileges to the user. - -.Verification - -* The configured htpasswd identity provider is visible on the *Access control* -> *Identity providers* page. -+ -[NOTE] -==== -After creating the identity provider, synchronization usually completes within two minutes. You can log in to the cluster as the user after the htpasswd identity provider becomes available. -==== -* The single, administrative user is visible on the *Access control* -> *Cluster Roles and Access* page. The administration group membership of the user is also displayed. - -ifeval::["{context}" == "config-identity-providers"] -:!osd-distro: -endif::[] -ifeval::["{context}" == "rosa-sts-config-identity-providers"] -:!rosa-distro: -endif::[] -ifeval::["{context}" == "rosa-config-identity-providers"] -:!rosa-distro: -endif::[] diff --git a/modules/config-idp.adoc b/modules/config-idp.adoc deleted file mode 100644 index 576d1083b811..000000000000 --- a/modules/config-idp.adoc +++ /dev/null @@ -1,95 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_getting_started/osd-getting-started.adoc - -:_content-type: PROCEDURE -[id="config-idp_{context}"] -= Configuring an identity provider - -After you have installed {product-title}, you must configure your cluster to use an identity provider. You can then add members to your identity provider to grant them access to your cluster. - -You can configure different identity provider types for your {product-title} cluster. Supported types include GitHub, GitHub Enterprise, GitLab, Google, LDAP, OpenID Connect, and htpasswd identity providers. - -[IMPORTANT] -==== -The htpasswd identity provider option is included only to enable the creation of a single, static administration user. htpasswd is not supported as a general-use identity provider for {product-title}. -==== - -The following procedure configures a GitHub identity provider as an example. - -[WARNING] -==== -Configuring GitHub authentication allows users to log in to {product-title} with their GitHub credentials. To prevent anyone with any GitHub user ID from logging in to your {product-title} cluster, you must restrict access to only those in specific GitHub organizations or teams. -==== - -.Prerequisites - -* You logged in to {cluster-manager-url}. -* You created an {product-title} cluster. -* You have a GitHub user account. -* You created a GitHub organization in your GitHub account. For more information, see link:https://docs.github.com/en/organizations/collaborating-with-groups-in-organizations/creating-a-new-organization-from-scratch[Creating a new organization from scratch] in the GitHub documentation. -* If you are restricting user access to a GitHub team, you have created a team within your GitHub organization. For more information, see link:https://docs.github.com/en/organizations/organizing-members-into-teams/creating-a-team[Creating a team] in the GitHub documentation. - -.Procedure - -. Navigate to {cluster-manager-url} and select your cluster. - -. Select *Access control* -> *Identity providers*. - -. Select the *GitHub* identity provider type from the *Add identity provider* drop-down menu. - -. Enter a unique name for the identity provider. The name cannot be changed later. - -. Register an OAuth application in your GitHub organization by following the steps in the link:https://docs.github.com/en/developers/apps/creating-an-oauth-app[GitHub documentation]. -+ -[NOTE] -==== -You must register the OAuth app under your GitHub organization. If you register an OAuth application that is not owned by the organization that contains your cluster users or teams, then user authentication to the cluster will not succeed. -==== - -* For the homepage URL in your GitHub OAuth app configuration, specify the `\https://oauth-openshift.apps.<cluster_name>.<cluster_domain>` portion of the *OAuth callback URL* that is automatically generated in the *Add a GitHub identity provider* page on {cluster-manager}. -+ -The following is an example of a homepage URL for a GitHub identity provider: -+ ----- -https://oauth-openshift.apps.openshift-cluster.example.com ----- - -* For the authorization callback URL in your GitHub OAuth app configuration, specify the full *OAuth callback URL* that is automatically generated in the *Add a GitHub identity provider* page on {cluster-manager}. The full URL has the following syntax: -+ ----- -https://oauth-openshift.apps.<cluster_name>.<cluster_domain>/oauth2callback/<idp_provider_name> ----- - -. Return to the *Edit identity provider: GitHub* dialog in {cluster-manager-url} and select *Claim* from the *Mapping method* drop-down menu. - -. Enter the *Client ID* and *Client secret* for your GitHub OAuth application. The GitHub page for your OAuth app provides the ID and secret. - -. Optional: Enter a *hostname*. -+ -[NOTE] -==== -A hostname must be entered when using a hosted instance of GitHub Enterprise. -==== - -. Optional: You can specify a certificate authority (CA) file to validate server certificates for a configured GitHub Enterprise URL. Click *Browse* to locate and attach a *CA file* to the identity provider. - -. Select *Use organizations* or *Use teams* to restrict access to a GitHub organization or a GitHub team within an organization. - -. Enter the name of the organization or team you would like to restrict access to. Click *Add more* to specify multiple organizations or teams. -+ -[NOTE] -==== -Specified organizations must own an OAuth app that was registered by using the preceding steps. If you specify a team, it must exist within an organization that owns an OAuth app that was registered by using the preceding steps. -==== - -. Click *Add* to apply the identity provider configuration. -+ -[NOTE] -==== -It might take approximately two minutes for the identity provider configuration to become active. -==== - -.Verification - -* After the configuration becomes active, the identity provider is listed under *Access control* -> *Identity providers* on the {cluster-manager-url} page for your cluster. diff --git a/modules/config-ldap-idp.adoc b/modules/config-ldap-idp.adoc deleted file mode 100644 index 71b855efff34..000000000000 --- a/modules/config-ldap-idp.adoc +++ /dev/null @@ -1,97 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_install_access_delete_cluster/config-identity-providers.adoc -// * rosa_install_access_delete_clusters/rosa-sts-config-identity-providers.adoc -// * rosa_install_access_delete_clusters/rosa_getting_started_iam/rosa-config-identity-providers.adoc - -:_content-type: PROCEDURE -[id="config-ldap-idp_{context}"] -= Configuring a LDAP identity provider - - -Configure the LDAP identity provider to validate user names and passwords against an LDAPv3 server, using simple bind authentication. - -.Prerequisites - -* When configuring a LDAP identity provider, you will need to enter a configured *LDAP URL*. The configured URL is an RFC 2255 URL, which specifies the LDAP host and -search parameters to use. The syntax of the URL is: -+ ----- -ldap://host:port/basedn?attribute?scope?filter ----- -+ -[cols="2a,8a",options="header"] -|=== -|URL component | Description -.^|`ldap` | For regular LDAP, use the string `ldap`. For secure LDAP -(LDAPS), use `ldaps` instead. -.^|`host:port` | The name and port of the LDAP server. Defaults to -`localhost:389` for ldap and `localhost:636` for LDAPS. -.^|`basedn` | The DN of the branch of the directory where all searches should -start from. At the very least, this must be the top of your directory tree, but -it could also specify a subtree in the directory. -.^|`attribute` | The attribute to search for. Although RFC 2255 allows a -comma-separated list of attributes, only the first attribute will be used, no -matter how many are provided. If no attributes are provided, the default is to -use `uid`. It is recommended to choose an attribute that will be unique across -all entries in the subtree you will be using. -.^|`scope` | The scope of the search. Can be either `one` or `sub`. -If the scope is not provided, the default is to use a scope of `sub`. -.^|`filter` | A valid LDAP search filter. If not provided, defaults to -`(objectClass=*)` -|=== -+ -When doing searches, the attribute, filter, and provided user name are combined -to create a search filter that looks like: -+ ----- -(&(<filter>)(<attribute>=<username>)) ----- -+ -[IMPORTANT] -If the LDAP directory requires authentication to search, specify a `bindDN` and -`bindPassword` to use to perform the entry search. - - -.Procedure - -. From {cluster-manager-url}, navigate to the *Clusters* page and select the cluster that you need to configure identity providers for. - -. Click the *Access control* tab. - -. Click *Add identity provider*. -+ -[NOTE] -==== -You can also click the *Add Oauth configuration* link in the warning message displayed after cluster creation to configure your identity providers. -==== - -. Select *LDAP* from the drop-down menu. - -. Enter a unique name for the identity provider. This name cannot be changed later. - -. Select a mapping method from the drop-down menu. *Claim* is recommended in most cases. - -. Enter a *LDAP URL* to specify the LDAP search parameters to use. - -. Optional: Enter a *Bind DN* and *Bind password*. - -. Enter the attributes that will map LDAP attributes to identities. -** Enter an *ID* attribute whose value should be used as the user ID. Click *Add more* to add multiple ID attributes. -** Optional: Enter a *Preferred username* attribute whose value should be used as the display name. Click *Add more* to add multiple preferred username attributes. -** Optional: Enter an *Email* attribute whose value should be used as the email address. Click *Add more* to add multiple email attributes. - -. Optional: Click *Show advanced Options* to add a certificate authority (CA) file to your LDAP identity provider to validate server certificates for the configured URL. Click *Browse* to locate and attach a *CA file* to the identity provider. - -. Optional: Under the advanced options, you can choose to make the LDAP provider *Insecure*. If you select this option, a CA file cannot be used. -+ -[IMPORTANT] -==== -If you are using an insecure LDAP connection (ldap:// or port 389), then you must check the *Insecure* option in the configuration wizard. -==== - -. Click *Confirm*. - -.Verification - -* The configured identity provider is now visible on the *Access control* tab of the *Clusters* page. diff --git a/modules/config-openid-idp.adoc b/modules/config-openid-idp.adoc deleted file mode 100644 index 00e66b2c8eaa..000000000000 --- a/modules/config-openid-idp.adoc +++ /dev/null @@ -1,108 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_install_access_delete_cluster/config-identity-providers.adoc -// * rosa_install_access_delete_clusters/rosa-sts-config-identity-providers.adoc -// * rosa_install_access_delete_clusters/rosa_getting_started_iam/rosa-config-identity-providers.adoc - -:_content-type: PROCEDURE -[id="config-openid-idp_{context}"] -= Configuring an OpenID identity provider - - -Configure an OpenID identity provider to integrate with an OpenID Connect identity provider using an link:http://openid.net/specs/openid-connect-core-1_0.html#CodeFlowAuth[Authorization Code Flow]. - -[IMPORTANT] -==== -The Authentication Operator in {product-title} requires that the configured -OpenID Connect identity provider implements the -link:https://openid.net/specs/openid-connect-discovery-1_0.html[OpenID Connect Discovery] -specification. -==== - -Claims are read from the JWT `id_token` returned from the OpenID identity -provider and, if specified, from the JSON returned by the Issuer URL. - -At least one claim must be configured to use as the user's identity. - -You can also indicate which claims to use as the user's preferred user name, -display name, and email address. If multiple claims are specified, the first one -with a non-empty value is used. The standard claims are: - -[cols="1,2",options="header"] -|=== - -|Claim -|Description - -|`preferred_username` -|The preferred user name when provisioning a user. A -shorthand name that the user wants to be referred to as, such as `janedoe`. Typically -a value that corresponding to the user's login or username in the authentication -system, such as username or email. - -|`email` -|Email address. - -|`name` -|Display name. - -|=== - -See the -link:http://openid.net/specs/openid-connect-core-1_0.html#StandardClaims[OpenID claims documentation] -for more information. - -.Prerequisites -* Before you configure OpenID Connect, check the installation prerequisites for any Red Hat product or service you want to use with your {product-title} cluster. - -.Procedure - -. From {cluster-manager-url}, navigate to the *Clusters* page and select the cluster that you need to configure identity providers for. - -. Click the *Access control* tab. - -. Click *Add identity provider*. -+ -[NOTE] -==== -You can also click the *Add Oauth configuration* link in the warning message displayed after cluster creation to configure your identity providers. -==== - -. Select *OpenID* from the drop-down menu. - -. Enter a unique name for the identity provider. This name cannot be changed later. -** An *OAuth callback URL* is automatically generated in the provided field. -+ ----- -https://oauth-openshift.apps.<cluster_name>.<cluster_domain>/oauth2callback/<idp_provider_name> ----- -+ -For example: -+ ----- -https://oauth-openshift.apps.openshift-cluster.example.com/oauth2callback/openid ----- - -. Register a new OpenID Connect client in the OpenID identity provider by following the steps to link:https://openid.net/specs/openid-connect-core-1_0.html#AuthRequest[create an authorization request]. - -. Return to {product-title} and select a mapping method from the drop-down menu. *Claim* is recommended in most cases. - -. Enter a *Client ID* and *Client secret* provided from OpenID. - -. Enter an *Issuer URL*. This is the URL that the OpenID provider asserts as the Issuer Identifier. It must use the https scheme with no URL query parameters or fragments. - -. Enter an *Email* attribute whose value should be used as the email address. Click *Add more* to add multiple email attributes. - -. Enter a *Name* attribute whose value should be used as the preferred username. Click *Add more* to add multiple preferred usernames. - -. Enter a *Preferred username* attribute whose value should be used as the display name. Click *Add more* to add multiple display names. - -. Optional: Click *Show advanced Options* to add a certificate authority (CA) file to your OpenID identity provider. - -. Optional: Under the advanced options, you can add *Additional scopes*. By default, the `OpenID` scope is requested. - -. Click *Confirm*. - -.Verification - -* The configured identity provider is now visible on the *Access control* tab of the *Clusters* page. diff --git a/modules/configmap-adding-ca.adoc b/modules/configmap-adding-ca.adoc deleted file mode 100644 index 7240c17f228b..000000000000 --- a/modules/configmap-adding-ca.adoc +++ /dev/null @@ -1,39 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/setting-up-trusted-ca - -:_content-type: PROCEDURE -[id="configmap-adding-ca_{context}"] -= Adding certificate authorities to the cluster - -ifdef::openshift-enterprise,openshift-rosa,openshift-dedicated,openshift-webscale,openshift-origin[] -You can add certificate authorities (CA) to the cluster for use when pushing and pulling images with the following procedure. - -.Prerequisites - -ifdef::openshift-rosa[] -* You must have cluster administrator privileges. -endif::[] -ifdef::openshift-dedicated[] -* You must have at least dedicated administrator privileges. -endif::[] -* You must have access to the public certificates of the registry, usually a `hostname/ca.crt` file located in the `/etc/docker/certs.d/` directory. - -.Procedure - -. Create a `ConfigMap` in the `openshift-config` namespace containing the trusted certificates for the registries that use self-signed certificates. For each CA file, ensure the key in the `ConfigMap` is the hostname of the registry in the `hostname[..port]` format: -+ -[source,terminal] ----- -$ oc create configmap registry-cas -n openshift-config \ ---from-file=myregistry.corp.com..5000=/etc/docker/certs.d/myregistry.corp.com:5000/ca.crt \ ---from-file=otherregistry.com=/etc/docker/certs.d/otherregistry.com/ca.crt ----- - -. Update the cluster image configuration: -+ -[source,terminal] ----- -$ oc patch image.config.openshift.io/cluster --patch '{"spec":{"additionalTrustedCA":{"name":"registry-cas"}}}' --type=merge ----- -endif::[] diff --git a/modules/configmap-removing-ca.adoc b/modules/configmap-removing-ca.adoc deleted file mode 100644 index a1b93ec8621a..000000000000 --- a/modules/configmap-removing-ca.adoc +++ /dev/null @@ -1,99 +0,0 @@ -// Module included in the following assemblies: -// -// * builds/setting-up-trusted-ca - -:_content-type: PROCEDURE -[id="configmap-removing-ca_{context}"] -= Removing certificate authorities on a {product-title} cluster - -You can remove certificate authorities (CA) from your cluster with the {product-title} (ROSA) CLI, `rosa`. - -.Prerequisites - -* You must have cluster administrator privileges. -* You have installed the ROSA CLI (`rosa`). -* Your cluster has certificate authorities added. - -.Procedure - -* Use the `rosa edit` command to modify the CA trust bundle. You must pass empty strings to the `--additional-trust-bundle-file` argument to clear the trust bundle from the cluster: -+ -[source,terminal] ----- -$ rosa edit cluster -c <cluster_name> --additional-trust-bundle-file "" ----- -+ -.Example Output -+ -[source,yaml] ----- -I: Updated cluster <cluster_name> ----- - -.Verification - -* You can verify that the trust bundle has been removed from the cluster by using the `rosa describe` command: -+ -[source,yaml] ----- -$ rosa describe cluster -c <cluster_name> ----- -+ -Before removal, the Additional trust bundle section appears, redacting its value for security purposes: -+ -[source,yaml] ----- -Name: <cluster_name> -ID: <cluster_internal_id> -External ID: <cluster_external_id> -OpenShift Version: 4.13.0 -Channel Group: stable -DNS: <dns> -AWS Account: <aws_account_id> -API URL: <api_url> -Console URL: <console_url> -Region: us-east-1 -Multi-AZ: false -Nodes: - - Control plane: 3 - - Infra: 2 - - Compute: 2 -Network: - - Type: OVNKubernetes - - Service CIDR: <service_cidr> - - Machine CIDR: <machine_cidr> - - Pod CIDR: <pod_cidr> - - Host Prefix: <host_prefix> -Proxy: - - HTTPProxy: <proxy_url> -Additional trust bundle: REDACTED ----- -+ -After removing the proxy, the Additional trust bundle section is removed: -+ -[source,yaml] ----- -Name: <cluster_name> -ID: <cluster_internal_id> -External ID: <cluster_external_id> -OpenShift Version: 4.13.0 -Channel Group: stable -DNS: <dns> -AWS Account: <aws_account_id> -API URL: <api_url> -Console URL: <console_url> -Region: us-east-1 -Multi-AZ: false -Nodes: - - Control plane: 3 - - Infra: 2 - - Compute: 2 -Network: - - Type: OVNKubernetes - - Service CIDR: <service_cidr> - - Machine CIDR: <machine_cidr> - - Pod CIDR: <pod_cidr> - - Host Prefix: <host_prefix> -Proxy: - - HTTPProxy: <proxy_url> ----- diff --git a/modules/configuration-ovnk-network-plugin-json-object.adoc b/modules/configuration-ovnk-network-plugin-json-object.adoc deleted file mode 100644 index c2192071ae4b..000000000000 --- a/modules/configuration-ovnk-network-plugin-json-object.adoc +++ /dev/null @@ -1,50 +0,0 @@ -:_content-type: REFERENCE -[id="configuration-ovnk-network-plugin-json-object_{context}"] -= OVN-Kubernetes network plugin JSON configuration table - -The following table describes the configuration parameters for the OVN-Kubernetes CNI network plugin: - -.OVN-Kubernetes network plugin JSON configuration table -[cols=".^2,.^2,.^6",options="header"] -|==== -|Field|Type|Description - -|`cniVersion` -|`string` -|The CNI specification version. The required value is `0.3.1`. - -|`name` -|`string` -|The name of the network. These networks are not namespaced. For example, you can have a network named -`l2-network` referenced from two different `NetworkAttachmentDefinitions` that exist on two different -namespaces. This ensures that pods making use of the `NetworkAttachmentDefinition` on their own different -namespaces can communicate over the same secondary network. However, those two different `NetworkAttachmentDefinitions` must also share the same network specific parameters such as `topology`, `subnets`, `mtu`, and `excludeSubnets`. - -|`type` -|`string` -|The name of the CNI plugin to configure. The required value is `ovn-k8s-cni-overlay`. - -|`topology` -|`string` -|The topological configuration for the network. The required value is `layer2`. - -|`subnets` -|`string` -| The subnet to use for the network across the cluster. When specifying `layer2` for the `topology`, only include the CIDR for the node. For example, `10.100.200.0/24`. - -For `"topology":"layer2"` deployments, IPv6 (`2001:DBB::/64`) and dual-stack (`192.168.100.0/24,2001:DBB::/64`) subnets are supported. - -|`mtu` -|`string` -|The maximum transmission unit (MTU) to the specified value. The default value, `1300`, is automatically set by the kernel. - -|`netAttachDefName` -|`string` -|The metadata `namespace` and `name` of the network attachment definition object where this -configuration is included. For example, if this configuration is defined in a `NetworkAttachmentDefinition` in namespace `ns1` named `l2-network`, this should be set to `ns1/l2-network`. - -|`excludeSubnets` -|`string` -|A comma-separated list of CIDRs and IPs. IPs are removed from the assignable IP pool, and are never passed to the pods. When omitted, the logical switch implementing the network only provides layer 2 communication, and users must configure IPs for the pods. Port security only prevents MAC spoofing. - -|==== \ No newline at end of file diff --git a/modules/configuration-resource-overview.adoc b/modules/configuration-resource-overview.adoc deleted file mode 100644 index ceef1e265043..000000000000 --- a/modules/configuration-resource-overview.adoc +++ /dev/null @@ -1,68 +0,0 @@ -// Module included in the following assemblies: -// -// * TBD - -[id="configuration-resource-overview_{context}"] -= About Configuration Resources in {product-title} - -You perform many customization and configuration tasks after you deploy your -cluster, including configuring networking and setting your identity provider. - -In {product-title}, you modify Configuration Resources to determine the behavior -of these integrations. The Configuration Resources are controlled by Operators -that are managed by the Cluster Version Operator, which manages all of the -Operators that run your cluster's control plane. - -You can customize the following Configuration Resources: - -[cols="3a,8a",options="header"] -|=== - -|Configuration Resource |Description -|Authentication -| - -|DNS -| - -|Samples -| * *ManagementState:* -** *Managed.* The operator updates the samples as the configuration dictates. -** *Unmanaged.* The operator ignores updates to the samples resource object and -any imagestreams or templates in the `openshift` namespace. -** *Removed.* The operator removes the set of managed imagestreams -and templates in the `openshift` namespace. It ignores new samples created by -the cluster administrator or any samples in the skipped lists. After the removals are -complete, the operator works like it is in the `Unmanaged` state and ignores -any watch events on the sample resources, imagestreams, or templates. It -operates on secrets to facilitate the CENTOS to RHEL switch. There are some -caveats around concurrent create and removal. -* *Samples Registry:* Overrides the registry from which images are imported. -* *Architecture:* Place holder to choose an architecture type. Currently only x86 -is supported. -* *Skipped Imagestreams:* Imagestreams that are in the operator's -inventory, but that the cluster administrator wants the operator to ignore or not manage. -* *Skipped Templates:* Templates that are in the operator's inventory, but that -the cluster administrator wants the operator to ignore or not manage. - -|Infrastructure -| - -|Ingress -| - -|Network -| - -|OAuth -| - -|=== - -While you can complete many other customizations and configure other integrations -with an {product-title} cluster, configuring these resources is a common first -step after you deploy a cluster. - -Like all Operators, the Configuration Resources are governed by -Custom Resource Definitions (CRD). You customize the CRD for each -Configuration Resource that you want to modify in your cluster. diff --git a/modules/configure-web-terminal-image-admin.adoc b/modules/configure-web-terminal-image-admin.adoc deleted file mode 100644 index 938567a40007..000000000000 --- a/modules/configure-web-terminal-image-admin.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// Module is included in the following assemblies: -// -// * web_console/web_terminal/configuring-web-terminal.adoc - -:_content-type: PROCEDURE - -[id="configure-web-terminal-image-admin_{context}"] -= Configuring the web terminal image for all users - -You can use the *Administrator* perspective of the web console to set the default web terminal image for all users. - -.Prerequisites - -* You have cluster administrator permissions and are logged in to the web console. -* You have installed the {web-terminal-op}. - -include::snippets/access-cluster-configuration-console.adoc[] - -. Click the *Web Terminal* tab, which opens the *Web Terminal Configuration* page. -. Enter the URL of the image that you want to use. -. Click *Save*. \ No newline at end of file diff --git a/modules/configure-web-terminal-timeout-admin.adoc b/modules/configure-web-terminal-timeout-admin.adoc deleted file mode 100644 index 44a4b39ee51c..000000000000 --- a/modules/configure-web-terminal-timeout-admin.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// Module is included in the following assemblies: -// -// * web_console/web_terminal/configuring-web-terminal.adoc - -:_content-type: PROCEDURE - -[id="configure-web-terminal-timeout-admin_{context}"] -= Configuring the web terminal timeout for all users - -You can use the *Administrator* perspective of the web console to set the default web terminal timeout period for all users. - -.Prerequisites - -* You have cluster administrator permissions and are logged in to the web console. -* You have installed the {web-terminal-op}. - -include::snippets/access-cluster-configuration-console.adoc[] - -. Click the *Web Terminal* tab, which opens the *Web Terminal Configuration* page. -. Set a value for the timeout. From the drop-down list, select a time interval of *Seconds*, *Minutes*, *Hours*, or *Milliseconds*. -. Click *Save*. \ No newline at end of file diff --git a/modules/configuring-a-provisioning-resource-to-scale-user-provisioned-clusters.adoc b/modules/configuring-a-provisioning-resource-to-scale-user-provisioned-clusters.adoc deleted file mode 100644 index 72593eab1508..000000000000 --- a/modules/configuring-a-provisioning-resource-to-scale-user-provisioned-clusters.adoc +++ /dev/null @@ -1,74 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_bare_metal/scaling-a-user-provisioned-cluster-with-the-bare-metal-operator.adoc -:_content-type: PROCEDURE - -[id="configuring-a-provisioning-resource-to-scale-user-provisioned-clusters_{context}"] -= Configuring a provisioning resource to scale user-provisioned clusters - -Create a `Provisioning` custom resource (CR) to enable Metal platform components on a user-provisioned infrastructure cluster. - -.Prerequisites - -* You installed a user-provisioned infrastructure cluster on bare metal. - -.Procedure - -. Create a `Provisioning` CR. - -.. Save the following YAML in the `provisioning.yaml` file: -+ -[source,yaml] ----- -apiVersion: metal3.io/v1alpha1 -kind: Provisioning -metadata: - name: provisioning-configuration -spec: - provisioningNetwork: "Disabled" - watchAllNamespaces: false ----- -+ -[NOTE] -==== -{product-title} {product-version} does not support enabling a provisioning network when you scale a user-provisioned cluster by using the Bare Metal Operator. -==== - -. Create the `Provisioning` CR by running the following command: -+ -[source,terminal] ----- -$ oc create -f provisioning.yaml ----- -+ -.Example output -[source,terminal] ----- -provisioning.metal3.io/provisioning-configuration created ----- - -.Verification - -* Verify that the provisioning service is running by running the following command: -+ -[source,terminal] ----- -$ oc get pods -n openshift-machine-api ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -cluster-autoscaler-operator-678c476f4c-jjdn5 2/2 Running 0 5d21h -cluster-baremetal-operator-6866f7b976-gmvgh 2/2 Running 0 5d21h -control-plane-machine-set-operator-7d8566696c-bh4jz 1/1 Running 0 5d21h -ironic-proxy-64bdw 1/1 Running 0 5d21h -ironic-proxy-rbggf 1/1 Running 0 5d21h -ironic-proxy-vj54c 1/1 Running 0 5d21h -machine-api-controllers-544d6849d5-tgj9l 7/7 Running 1 (5d21h ago) 5d21h -machine-api-operator-5c4ff4b86d-6fjmq 2/2 Running 0 5d21h -metal3-6d98f84cc8-zn2mx 5/5 Running 0 5d21h -metal3-image-customization-59d745768d-bhrp7 1/1 Running 0 5d21h ----- - diff --git a/modules/configuring-a-proxy-after-installation-cli.adoc b/modules/configuring-a-proxy-after-installation-cli.adoc deleted file mode 100644 index bee9d7ea26e6..000000000000 --- a/modules/configuring-a-proxy-after-installation-cli.adoc +++ /dev/null @@ -1,92 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/configuring-cluster-wide-proxy.adoc - -:_content-type: PROCEDURE -[id="configuring-a-proxy-after-installation-cli_{context}"] -= Configuring a proxy after installation using the CLI - -You can use the {product-title} (ROSA) CLI (`rosa`) to add a cluster-wide proxy configuration to an existing ROSA cluster in a Virtual Private Cloud (VPC). - -You can also use `rosa` to update an existing cluster-wide proxy configuration. For example, you might need to update the network address for the proxy or replace the additional trust bundle if any of the certificate authorities for the proxy expire. - -[IMPORTANT] -==== -The cluster applies the proxy configuration to the control plane and compute nodes. While applying the configuration, each cluster node is temporarily placed in an unschedulable state and drained of its workloads. Each node is restarted as part of the process. -==== - -.Prerequisites - -* You have installed and configured the latest ROSA (`rosa`) and OpenShift (`oc`) CLIs on your installation host. -* You have a ROSA cluster that is deployed in a VPC. - -.Procedure - -* Edit the cluster configuration to add or update the cluster-wide proxy details: -+ -[source,terminal] ----- -$ rosa edit cluster \ - --cluster $CLUSTER_NAME \ - --additional-trust-bundle-file <path_to_ca_bundle_file> \ <1> <2> <3> - --http-proxy http://<username>:<password>@<ip>:<port> \ <1> <4> - --https-proxy https://<username>:<password>@<ip>:<port> \ <1> <4> - --no-proxy example.com <5> ----- -+ --- -<1> The `additional-trust-bundle-file`, `http-proxy`, and `https-proxy` arguments are all optional. -<2> If you use the `additional-trust-bundle-file` argument without an `http-proxy` or `https-proxy` argument, the trust bundle is added to the trust store and used to verify cluster system egress traffic. In that scenario, the bundle is not configured to be used with a proxy. -<3> The `additional-trust-bundle-file` argument is a file path pointing to a bundle of PEM-encoded X.509 certificates, which are all concatenated together. The `additionalTrustBundle` parameter is required unless the identity certificate of the proxy is signed by an authority from the {op-system} trust bundle. If you use an MITM transparent proxy network that does not require additional proxy configuration but requires additional CAs, you must provide the MITM CA certificate. -+ -[NOTE] -==== -You should not attempt to change the proxy or additional trust bundle configuration on the cluster directly. These changes must be applied by using the ROSA CLI (`rosa`) or {cluster-manager-first}. Any changes that are made directly to the cluster will be reverted automatically. -==== -<4> The `http-proxy` and `https-proxy` arguments must point to a valid URL. -<5> A comma-separated list of destination domain names, IP addresses, or network CIDRs to exclude proxying. -+ -Preface a domain with `.` to match subdomains only. For example, `.y.com` matches `x.y.com`, but not `y.com`. Use `*` to bypass proxy for all destinations. -If you scale up workers that are not included in the network defined by the `networking.machineNetwork[].cidr` field from the installation configuration, you must add them to this list to prevent connection issues. -+ -This field is ignored if neither the `httpProxy` or `httpsProxy` fields are set. --- - -.Verification - -. List the status of the machine config pools and verify that they are updated: -+ -[source,terminal] ----- -$ oc get machineconfigpools ----- -+ -.Example output -[source,terminal] ----- -NAME CONFIG UPDATED UPDATING DEGRADED MACHINECOUNT READYMACHINECOUNT UPDATEDMACHINECOUNT DEGRADEDMACHINECOUNT AGE -master rendered-master-d9a03f612a432095dcde6dcf44597d90 True False False 3 3 3 0 31h -worker rendered-worker-f6827a4efe21e155c25c21b43c46f65e True False False 6 6 6 0 31h ----- - -. Display the proxy configuration for your cluster and verify that the details are as expected: -+ -[source,terminal] ----- -$ oc get proxy cluster -o yaml ----- -+ -.Example output -[source,terminal] ----- -apiVersion: config.openshift.io/v1 -kind: Proxy -spec: - httpProxy: http://proxy.host.domain:<port> - httpsProxy: https://proxy.host.domain:<port> - <...more...> -status: - httpProxy: http://proxy.host.domain:<port> - httpsProxy: https://proxy.host.domain:<port> - <...more...> ----- diff --git a/modules/configuring-a-proxy-after-installation-ocm.adoc b/modules/configuring-a-proxy-after-installation-ocm.adoc deleted file mode 100644 index 6ede7996208f..000000000000 --- a/modules/configuring-a-proxy-after-installation-ocm.adoc +++ /dev/null @@ -1,52 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/configuring-cluster-wide-proxy.adoc - -:_content-type: PROCEDURE -[id="configuring-a-proxy-after-installation-ocm_{context}"] -= Configuring a proxy after installation using {cluster-manager} - -You can use {cluster-manager-first} to add a cluster-wide proxy configuration to an existing {product-title} cluster in a Virtual Private Cloud (VPC). -ifdef::openshift-dedicated[] -You can enable a proxy only for clusters that use the Customer Cloud Subscription (CCS) model. -endif::openshift-dedicated[] - -You can also use {cluster-manager} to update an existing cluster-wide proxy configuration. For example, you might need to update the network address for the proxy or replace the additional trust bundle if any of the certificate authorities for the proxy expire. - -[IMPORTANT] -==== -The cluster applies the proxy configuration to the control plane and compute nodes. While applying the configuration, each cluster node is temporarily placed in an unschedulable state and drained of its workloads. Each node is restarted as part of the process. -==== - -.Prerequisites - -* You have an {product-title} cluster -ifdef::openshift-dedicated[] - that uses the Customer Cloud Subscription (CCS) model -endif::openshift-dedicated[] -. -* Your cluster is deployed in a VPC. - -.Procedure - -. Navigate to {cluster-manager-url} and select your cluster. - -. Under the *Virtual Private Cloud (VPC)* section on the *Networking* page, click *Edit cluster-wide proxy*. - -. On the *Edit cluster-wide proxy* page, provide your proxy configuration details: -.. Enter a value in at least one of the following fields: -** Specify a valid *HTTP proxy URL*. -** Specify a valid *HTTPS proxy URL*. -** In the *Additional trust bundle* field, provide a PEM encoded X.509 certificate bundle. If you are replacing an existing trust bundle file, select *Replace file* to view the field. The bundle is added to the trusted certificate store for the cluster nodes. An additional trust bundle file is required unless the identity certificate for the proxy is signed by an authority from the {op-system-first} trust bundle. -+ -If you use an MITM transparent proxy network that does not require additional proxy configuration but requires additional certificate authorities (CAs), you must provide the MITM CA certificate. -+ -[NOTE] -==== -If you upload an additional trust bundle file without specifying an HTTP or HTTPS proxy URL, the bundle is set on the cluster but is not configured to be used with the proxy. -==== -.. Click *Confirm*. - -.Verification - -* Under the *Virtual Private Cloud (VPC)* section on the *Networking* page, verify that the proxy configuration for your cluster is as expected. diff --git a/modules/configuring-a-proxy-during-installation-cli.adoc b/modules/configuring-a-proxy-during-installation-cli.adoc deleted file mode 100644 index 95315d2121a0..000000000000 --- a/modules/configuring-a-proxy-during-installation-cli.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/configuring-cluster-wide-proxy.adoc - -:_content-type: PROCEDURE -[id="configuring-a-proxy-during-installation-cli_{context}"] -= Configuring a proxy during installation using the CLI - -If you are installing a {product-title} (ROSA) cluster into an existing Virtual Private Cloud (VPC), you can use the ROSA CLI (`rosa`) to enable a cluster-wide HTTP or HTTPS proxy during installation. - -The following procedure provides details about the ROSA CLI (`rosa`) arguments that are used to configure a cluster-wide proxy during installation. For general installation steps using the ROSA CLI, see _Creating a cluster with customizations using the CLI_. - -.Prerequisites - -* You have verified that the proxy is accessible from the VPC that the cluster is being installed into. The proxy must also be accessible from the private subnets of the VPC. - - -.Procedure -* Specify a proxy configuration when you create your cluster: -+ -[source,terminal] ----- -$ rosa create cluster \ - <other_arguments_here> \ - --additional-trust-bundle-file <path_to_ca_bundle_file> \ <1> <2> <3> - --http-proxy http://<username>:<password>@<ip>:<port> \ <1> <4> - --https-proxy https://<username>:<password>@<ip>:<port> \ <1> <4> - --no-proxy example.com <5> ----- -+ --- -<1> The `additional-trust-bundle-file`, `http-proxy`, and `https-proxy` arguments are all optional. -<2> If you use the `additional-trust-bundle-file` argument without an `http-proxy` or `https-proxy` argument, the trust bundle is added to the trust store and used to verify cluster system egress traffic. In that scenario, the bundle is not configured to be used with a proxy. -<3> The `additional-trust-bundle-file` argument is a file path pointing to a bundle of PEM-encoded X.509 certificates, which are all concatenated together. The `additionalTrustBundle` parameter is required unless the identity certificate of the proxy is signed by an authority from the {op-system} trust bundle. If you use an MITM transparent proxy network that does not require additional proxy configuration but requires additional CAs, you must provide the MITM CA certificate. -<4> The `http-proxy` and `https-proxy` arguments must point to a valid URL. -<5> A comma-separated list of destination domain names, IP addresses, or network CIDRs to exclude proxying. -+ -Preface a domain with `.` to match subdomains only. For example, `.y.com` matches `x.y.com`, but not `y.com`. Use `*` to bypass proxy for all destinations. -If you scale up workers that are not included in the network defined by the `networking.machineNetwork[].cidr` field from the installation configuration, you must add them to this list to prevent connection issues. -+ -This field is ignored if neither the `httpProxy` or `httpsProxy` fields are set. --- \ No newline at end of file diff --git a/modules/configuring-a-proxy-during-installation-ocm.adoc b/modules/configuring-a-proxy-during-installation-ocm.adoc deleted file mode 100644 index be66fe5fcd49..000000000000 --- a/modules/configuring-a-proxy-during-installation-ocm.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/configuring-cluster-wide-proxy.adoc - -:_content-type: CONCEPT -[id="configuring-a-proxy-during-installation-ocm_{context}"] -= Configuring a proxy during installation using {cluster-manager} - -If you are installing -ifdef::openshift-dedicated[] -an {product-title} -endif::openshift-dedicated[] -ifdef::openshift-rosa[] -a {product-title} (ROSA) -endif::openshift-rosa[] -cluster into an existing Virtual Private Cloud (VPC), you can use {cluster-manager-first} to enable a cluster-wide HTTP or HTTPS proxy during installation. -ifdef::openshift-dedicated[] -You can enable a proxy only for clusters that use the Customer Cloud Subscription (CCS) model. -endif::openshift-dedicated[] - -Prior to the installation, you must verify that the proxy is accessible from the VPC that the cluster is being installed into. The proxy must also be accessible from the private subnets of the VPC. - -ifdef::openshift-dedicated[] -For detailed steps to configure a cluster-wide proxy during installation by using {cluster-manager}, see _Creating a cluster on AWS with CCS_ or _Creating a cluster on GCP with CCS_. -endif::openshift-dedicated[] - -ifdef::openshift-rosa[] -For detailed steps to configure a cluster-wide proxy during installation by using {cluster-manager}, see _Creating a cluster with customizations by using OpenShift Cluster Manager_. -endif::openshift-rosa[] diff --git a/modules/configuring-a-proxy-trust-bundle-responsibilities.adoc b/modules/configuring-a-proxy-trust-bundle-responsibilities.adoc deleted file mode 100644 index b0d559d590ea..000000000000 --- a/modules/configuring-a-proxy-trust-bundle-responsibilities.adoc +++ /dev/null @@ -1,14 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/configuring-cluster-wide-proxy.adoc - -:_content-type: CONCEPT -[id="configuring-a-proxy-trust-bundle-responsibilities_{context}"] -= Responsibilities for additional trust bundles - -If you supply an additional trust bundle, you are responsible for the following requirements: - -* Ensuring that the contents of the additional trust bundle are valid -* Ensuring that the certificates, including intermediary certificates, contained in the additional trust bundle have not expired -* Tracking the expiry and performing any necessary renewals for certificates contained in the additional trust bundle -* Updating the cluster configuration with the updated additional trust bundle diff --git a/modules/configuring-albo-on-sts-cluster-predefined-credentials.adoc b/modules/configuring-albo-on-sts-cluster-predefined-credentials.adoc deleted file mode 100644 index 6e145452cda6..000000000000 --- a/modules/configuring-albo-on-sts-cluster-predefined-credentials.adoc +++ /dev/null @@ -1,71 +0,0 @@ -// Module included in the following assemblies: -// * networking/installing-albo-sts-cluster.adoc - -:_content-type: PROCEDURE -[id="nw-installing-albo-on-sts-cluster-predefined-credentials_{context}"] -= Configuring the AWS Load Balancer Operator on Security Token Service cluster by using specific credentials - -You can specify the credential secret by using the `spec.credentials` field in the AWS Load Balancer Controller custom resource (CR). You can use the predefined `CredentialsRequest` object of the controller to know which roles are required. - -.Prerequisites - -* You must extract and prepare the `ccoctl` binary. - -.Procedure - -. Download the CredentialsRequest custom resource (CR) of the AWS Load Balancer Controller, and create a directory to store it by running the following command: -+ -[source,terminal] ----- -$ curl --create-dirs -o <path-to-credrequests-dir>/cr.yaml https://raw.githubusercontent.com/openshift/aws-load-balancer-operator/main/hack/controller/controller-credentials-request.yaml ----- - -. Use the `ccoctl` tool to process the `CredentialsRequest` object of the controller: -+ -[source,terminal] ----- -$ ccoctl aws create-iam-roles \ - --name <name> --region=<aws_region> \ - --credentials-requests-dir=<path-to-credrequests-dir> \ - --identity-provider-arn <oidc-arn> ----- - -. Apply the secrets to your cluster: -+ -[source,terminal] ----- -$ ls manifests/*-credentials.yaml | xargs -I{} oc apply -f {} ----- - -. Verify the credentials secret has been created for use by the controller: -+ -[source,terminal] ----- -$ oc -n aws-load-balancer-operator get secret aws-load-balancer-controller-manual-cluster --template='{{index .data "credentials"}}' | base64 -d ----- -+ -.Example output ----- -[default] - sts_regional_endpoints = regional - role_arn = arn:aws:iam::999999999999:role/aws-load-balancer-operator-aws-load-balancer-controller - web_identity_token_file = /var/run/secrets/openshift/serviceaccount/token ----- - -. Create the `AWSLoadBalancerController` resource YAML file, for example, `sample-aws-lb-manual-creds.yaml`, as follows: -+ -[source,yaml] ----- -apiVersion: networking.olm.openshift.io/v1 -kind: AWSLoadBalancerController <1> -metadata: - name: cluster <2> -spec: - credentials: - name: <secret-name> <3> ----- -<1> Defines the `AWSLoadBalancerController` resource. -<2> Defines the AWS Load Balancer Controller instance name. This instance name gets added as a suffix to all related resources. -<3> Specifies the secret name containing AWS credentials that the controller uses. - - diff --git a/modules/configuring-albo-on-sts-cluster.adoc b/modules/configuring-albo-on-sts-cluster.adoc deleted file mode 100644 index fc9d42a54f85..000000000000 --- a/modules/configuring-albo-on-sts-cluster.adoc +++ /dev/null @@ -1,48 +0,0 @@ -// Module included in the following assemblies: -// * networking/installing-albo-sts-cluster.adoc - -:_content-type: PROCEDURE -[id="nw-installing-albo-on-sts-cluster_{context}"] -= Configuring AWS Load Balancer Operator on Security Token Service cluster by using managed `CredentialsRequest` objects - -.Prerequisites - -* You must extract and prepare the `ccoctl` binary. - -.Procedure - -. The AWS Load Balancer Operator creates the `CredentialsRequest` object in the `openshift-cloud-credential-operator` namespace for each `AWSLoadBalancerController` custom resource (CR). You can extract and save the created `CredentialsRequest` object in a directory by running the following command: -+ -[source,terminal] ----- -$ oc get credentialsrequest -n openshift-cloud-credential-operator \ - aws-load-balancer-controller-<cr-name> -o yaml > <path-to-credrequests-dir>/cr.yaml <1> ----- -<1> The `aws-load-balancer-controller-<cr-name>` parameter specifies the credential request name created by the AWS Load Balancer Operator. The `cr-name` specifies the name of the AWS Load Balancer Controller instance. - -. Use the `ccoctl` tool to process all `CredentialsRequest` objects in the `credrequests` directory by running the following command: -+ -[source,terminal] ----- -$ ccoctl aws create-iam-roles \ - --name <name> --region=<aws_region> \ - --credentials-requests-dir=<path-to-credrequests-dir> \ - --identity-provider-arn <oidc-arn> ----- - -. Apply the secrets generated in manifests directory to your cluster, by running the following command: -+ -[source,terminal] ----- -$ ls manifests/*-credentials.yaml | xargs -I{} oc apply -f {} ----- - -. Verify that the `aws-load-balancer-controller` pod is created: -+ -[source,terminal] ----- -$ oc -n aws-load-balancer-operator get pods -NAME READY STATUS RESTARTS AGE -aws-load-balancer-controller-cluster-9b766d6-gg82c 1/1 Running 0 137m -aws-load-balancer-operator-controller-manager-b55ff68cc-85jzg 2/2 Running 0 3h26m ----- diff --git a/modules/configuring-cluster-monitoring.adoc b/modules/configuring-cluster-monitoring.adoc deleted file mode 100644 index 03fb61e3e452..000000000000 --- a/modules/configuring-cluster-monitoring.adoc +++ /dev/null @@ -1,66 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/recommended-performance-scale-practices/recommended-infrastructure-practices.adoc - -:_content-type: PROCEDURE -[id="configuring-cluster-monitoring_{context}"] -= Configuring cluster monitoring - -[role="_abstract"] -You can increase the storage capacity for the Prometheus component in the cluster monitoring stack. - -.Procedure - -To increase the storage capacity for Prometheus: - -. Create a YAML configuration file, `cluster-monitoring-config.yaml`. For example: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -data: - config.yaml: | - prometheusK8s: - retention: {{PROMETHEUS_RETENTION_PERIOD}} <1> - nodeSelector: - node-role.kubernetes.io/infra: "" - volumeClaimTemplate: - spec: - storageClassName: {{STORAGE_CLASS}} <2> - resources: - requests: - storage: {{PROMETHEUS_STORAGE_SIZE}} <3> - alertmanagerMain: - nodeSelector: - node-role.kubernetes.io/infra: "" - volumeClaimTemplate: - spec: - storageClassName: {{STORAGE_CLASS}} <2> - resources: - requests: - storage: {{ALERTMANAGER_STORAGE_SIZE}} <4> -metadata: - name: cluster-monitoring-config - namespace: openshift-monitoring ----- -<1> A typical value is `PROMETHEUS_RETENTION_PERIOD=15d`. Units are measured in -time using one of these suffixes: s, m, h, d. -<2> The storage class for your cluster. -<3> A typical value is `PROMETHEUS_STORAGE_SIZE=2000Gi`. Storage values can be a -plain integer or as a fixed-point integer using one of these suffixes: E, P, T, -G, M, K. You can also use the power-of-two equivalents: Ei, Pi, Ti, Gi, Mi, Ki. -<4> A typical value is `ALERTMANAGER_STORAGE_SIZE=20Gi`. Storage values can be a -plain integer or as a fixed-point integer using one of these suffixes: E, P, T, -G, M, K. You can also use the power-of-two equivalents: Ei, Pi, Ti, Gi, Mi, Ki. - -. Add values for the retention period, storage class, and storage sizes. - -. Save the file. - -. Apply the changes by running: -+ -[source,terminal] ----- -$ oc create -f cluster-monitoring-config.yaml ----- diff --git a/modules/configuring-default-seccomp-profile.adoc b/modules/configuring-default-seccomp-profile.adoc deleted file mode 100644 index 17b3a5adfa68..000000000000 --- a/modules/configuring-default-seccomp-profile.adoc +++ /dev/null @@ -1,111 +0,0 @@ -// Module included in the following assemblies: -// -// * security/seccomp-profiles.adoc - -:_content-type: PROCEDURE - -[id="verifying-default-seccomp-profile_{context}"] -= Verifying the default seccomp profile applied to a pod - -{product-title} ships with a default seccomp profile that is referenced as `runtime/default`. In {product-version}, newly created pods have the Security Context Constraint (SCC) set to `restricted-v2` and the default seccomp profile applies to the pod. - -.Procedure - -. You can verify the Security Context Constraint (SCC) and the default seccomp profile set on a pod by running the following commands: - -.. Verify what pods are running in the namespace: -+ -[source, terminal] ----- -$ oc get pods -n <namespace> ----- -+ -For example, to verify what pods are running in the `workshop` namespace run the following: -+ -[source, terminal] ----- -$ oc get pods -n workshop ----- -+ -.Example output -+ -[source, terminal] ----- -NAME READY STATUS RESTARTS AGE -parksmap-1-4xkwf 1/1 Running 0 2m17s -parksmap-1-deploy 0/1 Completed 0 2m22s ----- -+ -.. Inspect the pods: -+ -[source, terminal] ----- -$ oc get pod parksmap-1-4xkwf -n workshop -o yaml ----- -+ -.Example output -+ -[source, terminal] ----- -apiVersion: v1 -kind: Pod -metadata: - annotations: - k8s.v1.cni.cncf.io/network-status: |- - [{ - "name": "openshift-sdn", - "interface": "eth0", - "ips": [ - "10.131.0.18" - ], - "default": true, - "dns": {} - }] - k8s.v1.cni.cncf.io/network-status: |- - [{ - "name": "openshift-sdn", - "interface": "eth0", - "ips": [ - "10.131.0.18" - ], - "default": true, - "dns": {} - }] - openshift.io/deployment-config.latest-version: "1" - openshift.io/deployment-config.name: parksmap - openshift.io/deployment.name: parksmap-1 - openshift.io/generated-by: OpenShiftWebConsole - openshift.io/scc: restricted-v2 <1> - seccomp.security.alpha.kubernetes.io/pod: runtime/default <2> ----- -<1> The `restricted-v2` SCC is added by default if your workload does not have access to a different SCC. -<2> Newly created pods in {product-version} will have the seccomp profile configured to `runtime/default` as mandated by the SCC. - -[id="upgraded_cluster_{context}"] -== Upgraded cluster - -In clusters upgraded to {product-version} all authenticated users have access to the `restricted` and `restricted-v2` SCC. - -A workload admitted by the SCC `restricted` for example, on a {product-title} v4.10 cluster when upgraded may get admitted by `restricted-v2`. This is because `restricted-v2` is the more restrictive SCC between `restricted` and `restricted-v2`. -[NOTE] -==== -The workload must be able to run with `retricted-v2`. -==== - -Conversely with a workload that requires `privilegeEscalation: true` this workload will continue to have the `restricted` SCC available for any authenticated user. This is because `restricted-v2` does not allow `privilegeEscalation`. - -[id="newly_installed_{context}"] -== Newly installed cluster - -For newly installed {product-title} 4.11 or later clusters, the `restricted-v2` replaces the `restricted` SCC as an SCC that is available to be used by any authenticated user. A workload with `privilegeEscalation: true`, is not admitted into the cluster since `restricted-v2` is the only SCC available for authenticated users by default. - -The feature `privilegeEscalation` is allowed by `restricted` but not by `restricted-v2`. More features are denied by `restricted-v2` than were allowed by `restricted` SCC. - -A workload with `privilegeEscalation: true` may be admitted into a newly installed {product-title} 4.11 or later cluster. To give access to the `restricted` SCC to the ServiceAccount running the workload (or any other SCC that can admit this workload) using a RoleBinding run the following command: - -[source, terminal] ----- -$ oc -n <workload-namespace> adm policy add-scc-to-user <scc-name> -z <serviceaccount_name> ----- - -In {product-title} {product-version} the ability to add the pod annotations `seccomp.security.alpha.kubernetes.io/pod: runtime/default` and `container.seccomp.security.alpha.kubernetes.io/<container_name>: runtime/default` is deprecated. diff --git a/modules/configuring-dynamic-admission.adoc b/modules/configuring-dynamic-admission.adoc deleted file mode 100644 index 9dccc271ee65..000000000000 --- a/modules/configuring-dynamic-admission.adoc +++ /dev/null @@ -1,386 +0,0 @@ -// Module included in the following assemblies: -// -// * architecture/admission-plug-ins.adoc - -:_content-type: PROCEDURE -[id="configuring-dynamic-admission_{context}"] -= Configuring dynamic admission - -This procedure outlines high-level steps to configure dynamic admission. The functionality of the admission chain is extended by configuring a webhook admission plugin to call out to a webhook server. - -The webhook server is also configured as an aggregated API server. This allows other {product-title} components to communicate with the webhook using internal credentials and facilitates testing using the `oc` command. Additionally, this enables role based access control (RBAC) into the webhook and prevents token information from other API servers from being disclosed to the webhook. - -.Prerequisites - -* An {product-title} account with cluster administrator access. -* The {product-title} CLI (`oc`) installed. -* A published webhook server container image. - -.Procedure - -. Build a webhook server container image and make it available to the cluster using an image registry. - -. Create a local CA key and certificate and use them to sign the webhook server's certificate signing request (CSR). - -. Create a new project for webhook resources: -+ -[source,terminal] ----- -$ oc new-project my-webhook-namespace <1> ----- -<1> Note that the webhook server might expect a specific name. - -. Define RBAC rules for the aggregated API service in a file called `rbac.yaml`: -+ -[source,yaml] ----- -apiVersion: v1 -kind: List -items: - -- apiVersion: rbac.authorization.k8s.io/v1 <1> - kind: ClusterRoleBinding - metadata: - name: auth-delegator-my-webhook-namespace - roleRef: - kind: ClusterRole - apiGroup: rbac.authorization.k8s.io - name: system:auth-delegator - subjects: - - kind: ServiceAccount - namespace: my-webhook-namespace - name: server - -- apiVersion: rbac.authorization.k8s.io/v1 <2> - kind: ClusterRole - metadata: - annotations: - name: system:openshift:online:my-webhook-server - rules: - - apiGroups: - - online.openshift.io - resources: - - namespacereservations <3> - verbs: - - get - - list - - watch - -- apiVersion: rbac.authorization.k8s.io/v1 <4> - kind: ClusterRole - metadata: - name: system:openshift:online:my-webhook-requester - rules: - - apiGroups: - - admission.online.openshift.io - resources: - - namespacereservations <5> - verbs: - - create - -- apiVersion: rbac.authorization.k8s.io/v1 <6> - kind: ClusterRoleBinding - metadata: - name: my-webhook-server-my-webhook-namespace - roleRef: - kind: ClusterRole - apiGroup: rbac.authorization.k8s.io - name: system:openshift:online:my-webhook-server - subjects: - - kind: ServiceAccount - namespace: my-webhook-namespace - name: server - -- apiVersion: rbac.authorization.k8s.io/v1 <7> - kind: RoleBinding - metadata: - namespace: kube-system - name: extension-server-authentication-reader-my-webhook-namespace - roleRef: - kind: Role - apiGroup: rbac.authorization.k8s.io - name: extension-apiserver-authentication-reader - subjects: - - kind: ServiceAccount - namespace: my-webhook-namespace - name: server - -- apiVersion: rbac.authorization.k8s.io/v1 <8> - kind: ClusterRole - metadata: - name: my-cluster-role - rules: - - apiGroups: - - admissionregistration.k8s.io - resources: - - validatingwebhookconfigurations - - mutatingwebhookconfigurations - verbs: - - get - - list - - watch - - apiGroups: - - "" - resources: - - namespaces - verbs: - - get - - list - - watch - -- apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - name: my-cluster-role - roleRef: - kind: ClusterRole - apiGroup: rbac.authorization.k8s.io - name: my-cluster-role - subjects: - - kind: ServiceAccount - namespace: my-webhook-namespace - name: server ----- -<1> Delegates authentication and authorization to the webhook server API. -<2> Allows the webhook server to access cluster resources. -<3> Points to resources. This example points to the `namespacereservations` resource. -<4> Enables the aggregated API server to create admission reviews. -<5> Points to resources. This example points to the `namespacereservations` resource. -<6> Enables the webhook server to access cluster resources. -<7> Role binding to read the configuration for terminating authentication. -<8> Default cluster role and cluster role bindings for an aggregated API server. - -. Apply those RBAC rules to the cluster: -+ -[source,terminal] ----- -$ oc auth reconcile -f rbac.yaml ----- - -. Create a YAML file called `webhook-daemonset.yaml` that is used to deploy a webhook as a daemon set server in a namespace: -+ -[source,yaml] ----- -apiVersion: apps/v1 -kind: DaemonSet -metadata: - namespace: my-webhook-namespace - name: server - labels: - server: "true" -spec: - selector: - matchLabels: - server: "true" - template: - metadata: - name: server - labels: - server: "true" - spec: - serviceAccountName: server - containers: - - name: my-webhook-container <1> - image: <image_registry_username>/<image_path>:<tag> <2> - imagePullPolicy: IfNotPresent - command: - - <container_commands> <3> - ports: - - containerPort: 8443 <4> - volumeMounts: - - mountPath: /var/serving-cert - name: serving-cert - readinessProbe: - httpGet: - path: /healthz - port: 8443 <5> - scheme: HTTPS - volumes: - - name: serving-cert - secret: - defaultMode: 420 - secretName: server-serving-cert ----- -<1> Note that the webhook server might expect a specific container name. -<2> Points to a webhook server container image. Replace `<image_registry_username>/<image_path>:<tag>` with the appropriate value. -<3> Specifies webhook container run commands. Replace `<container_commands>` with the appropriate value. -<4> Defines the target port within pods. This example uses port 8443. -<5> Specifies the port used by the readiness probe. This example uses port 8443. - -. Deploy the daemon set: -+ -[source,terminal] ----- -$ oc apply -f webhook-daemonset.yaml ----- - -. Define a secret for the service serving certificate signer, within a YAML file called `webhook-secret.yaml`: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - namespace: my-webhook-namespace - name: server-serving-cert -type: kubernetes.io/tls -data: - tls.crt: <server_certificate> <1> - tls.key: <server_key> <2> ----- -<1> References the signed webhook server certificate. Replace `<server_certificate>` with the appropriate certificate in base64 format. -<2> References the signed webhook server key. Replace `<server_key>` with the appropriate key in base64 format. - -. Create the secret: -+ -[source,terminal] ----- -$ oc apply -f webhook-secret.yaml ----- - -. Define a service account and service, within a YAML file called `webhook-service.yaml`: -+ -[source,yaml] ----- -apiVersion: v1 -kind: List -items: - -- apiVersion: v1 - kind: ServiceAccount - metadata: - namespace: my-webhook-namespace - name: server - -- apiVersion: v1 - kind: Service - metadata: - namespace: my-webhook-namespace - name: server - annotations: - service.beta.openshift.io/serving-cert-secret-name: server-serving-cert - spec: - selector: - server: "true" - ports: - - port: 443 <1> - targetPort: 8443 <2> ----- -<1> Defines the port that the service listens on. This example uses port 443. -<2> Defines the target port within pods that the service forwards connections to. This example uses port 8443. - -. Expose the webhook server within the cluster: -+ -[source,terminal] ----- -$ oc apply -f webhook-service.yaml ----- - -. Define a custom resource definition for the webhook server, in a file called `webhook-crd.yaml`: -+ -[source,yaml] ----- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: namespacereservations.online.openshift.io <1> -spec: - group: online.openshift.io <2> - version: v1alpha1 <3> - scope: Cluster <4> - names: - plural: namespacereservations <5> - singular: namespacereservation <6> - kind: NamespaceReservation <7> ----- -<1> Reflects `CustomResourceDefinition` `spec` values and is in the format `<plural>.<group>`. This example uses the `namespacereservations` resource. -<2> REST API group name. -<3> REST API version name. -<4> Accepted values are `Namespaced` or `Cluster`. -<5> Plural name to be included in URL. -<6> Alias seen in `oc` output. -<7> The reference for resource manifests. - -. Apply the custom resource definition: -+ -[source,terminal] ----- -$ oc apply -f webhook-crd.yaml ----- - -. Configure the webhook server also as an aggregated API server, within a file called `webhook-api-service.yaml`: -+ -[source,yaml] ----- -apiVersion: apiregistration.k8s.io/v1beta1 -kind: APIService -metadata: - name: v1beta1.admission.online.openshift.io -spec: - caBundle: <ca_signing_certificate> <1> - group: admission.online.openshift.io - groupPriorityMinimum: 1000 - versionPriority: 15 - service: - name: server - namespace: my-webhook-namespace - version: v1beta1 ----- -<1> A PEM-encoded CA certificate that signs the server certificate that is used by the webhook server. Replace `<ca_signing_certificate>` with the appropriate certificate in base64 format. - -. Deploy the aggregated API service: -+ -[source,terminal] ----- -$ oc apply -f webhook-api-service.yaml ----- - -. Define the webhook admission plugin configuration within a file called `webhook-config.yaml`. This example uses the validating admission plugin: -+ -[source,yaml] ----- -apiVersion: admissionregistration.k8s.io/v1beta1 -kind: ValidatingWebhookConfiguration -metadata: - name: namespacereservations.admission.online.openshift.io <1> -webhooks: -- name: namespacereservations.admission.online.openshift.io <2> - clientConfig: - service: <3> - namespace: default - name: kubernetes - path: /apis/admission.online.openshift.io/v1beta1/namespacereservations <4> - caBundle: <ca_signing_certificate> <5> - rules: - - operations: - - CREATE - apiGroups: - - project.openshift.io - apiVersions: - - "*" - resources: - - projectrequests - - operations: - - CREATE - apiGroups: - - "" - apiVersions: - - "*" - resources: - - namespaces - failurePolicy: Fail ----- -<1> Name for the `ValidatingWebhookConfiguration` object. This example uses the `namespacereservations` resource. -<2> Name of the webhook to call. This example uses the `namespacereservations` resource. -<3> Enables access to the webhook server through the aggregated API. -<4> The webhook URL used for admission requests. This example uses the `namespacereservation` resource. -<5> A PEM-encoded CA certificate that signs the server certificate that is used by the webhook server. Replace `<ca_signing_certificate>` with the appropriate certificate in base64 format. - -. Deploy the webhook: -+ -[source,terminal] ----- -$ oc apply -f webhook-config.yaml ----- - -. Verify that the webhook is functioning as expected. For example, if you have configured dynamic admission to reserve specific namespaces, confirm that requests to create those namespaces are rejected and that requests to create non-reserved namespaces succeed. diff --git a/modules/configuring-egress-proxy-edns-operator.adoc b/modules/configuring-egress-proxy-edns-operator.adoc deleted file mode 100644 index 98b318759b6b..000000000000 --- a/modules/configuring-egress-proxy-edns-operator.adoc +++ /dev/null @@ -1,47 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/external_dns_operator/nw-configuring-cluster-wide-egress-proxy.adoc - -:_content-type: PROCEDURE -[id="nw-configuring-cluster-wide-proxy_{context}"] -= Configuring the External DNS Operator to trust the certificate authority of the cluster-wide proxy - -You can configure the External DNS Operator to trust the certificate authority of the cluster-wide proxy. - -.Procedure - -. Create the config map to contain the CA bundle in the `external-dns-operator` namespace by running the following command: -+ -[source,terminal] ----- -$ oc -n external-dns-operator create configmap trusted-ca ----- - -. To inject the trusted CA bundle into the config map, add the `config.openshift.io/inject-trusted-cabundle=true` label to the config map by running the following command: -+ -[source,terminal] ----- -$ oc -n external-dns-operator label cm trusted-ca config.openshift.io/inject-trusted-cabundle=true ----- - -. Update the subscription of the External DNS Operator by running the following command: -+ -[source,terminal] ----- -$ oc -n external-dns-operator patch subscription external-dns-operator --type='json' -p='[{"op": "add", "path": "/spec/config", "value":{"env":[{"name":"TRUSTED_CA_CONFIGMAP_NAME","value":"trusted-ca"}]}}]' ----- - -.Verification - -* After the deployment of the External DNS Operator is completed, verify that the trusted CA environment variable is added to the `external-dns-operator` deployment by running the following command: -+ -[source,terminal] ----- -$ oc -n external-dns-operator exec deploy/external-dns-operator -c external-dns-operator -- printenv TRUSTED_CA_CONFIGMAP_NAME ----- -+ -.Example output -[source,terminal] ----- -trusted-ca ----- \ No newline at end of file diff --git a/modules/configuring-egress-proxy.adoc b/modules/configuring-egress-proxy.adoc deleted file mode 100644 index b0b9f0f678ce..000000000000 --- a/modules/configuring-egress-proxy.adoc +++ /dev/null @@ -1,49 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/aws_load_balancer_operator/configure-egress-proxy-aws-load-balancer-operator.adoc - -:_content-type: PROCEDURE -[id="nw-configuring-cluster-wide-proxy_{context}"] -= Configuring the AWS Load Balancer Operator to trust the certificate authority of the cluster-wide proxy - -. Create the config map to contain the certificate authority (CA) bundle in the `aws-load-balancer-operator` namespace and inject a CA bundle that is trusted by {product-title} into a config map by running the following command: -+ -[source,terminal] ----- -$ oc -n aws-load-balancer-operator create configmap trusted-ca ----- - -. To inject the trusted CA bundle into the config map, add the `config.openshift.io/inject-trusted-cabundle=true` label to the config map by running the following command: -+ -[source,terminal] ----- -$ oc -n aws-load-balancer-operator label cm trusted-ca config.openshift.io/inject-trusted-cabundle=true ----- - -. Update the subscription of the AWS Load Balancer Operator to access the config map in the deployment of the AWS Load Balancer Operator by running the following command: -+ -[source,terminal] ----- -$ oc -n aws-load-balancer-operator patch subscription aws-load-balancer-operator --type='merge' -p '{"spec":{"config":{"env":[{"name":"TRUSTED_CA_CONFIGMAP_NAME","value":"trusted-ca"}],"volumes":[{"name":"trusted-ca","configMap":{"name":"trusted-ca"}}],"volumeMounts":[{"name":"trusted-ca","mountPath":"/etc/pki/tls/certs/albo-tls-ca-bundle.crt","subPath":"ca-bundle.crt"}]}}}' ----- - -. After the deployment of the AWS Load Balancer Operator is completed, verify that the CA bundle is added to the `aws-load-balancer-operator-controller-manager` deployment by running the following command: -+ -[source,terminal] ----- -$ oc -n aws-load-balancer-operator exec deploy/aws-load-balancer-operator-controller-manager -c manager -- bash -c "ls -l /etc/pki/tls/certs/albo-tls-ca-bundle.crt; printenv TRUSTED_CA_CONFIGMAP_NAME" ----- -+ -.Example output -[source,terminal] ----- --rw-r--r--. 1 root 1000690000 5875 Jan 11 12:25 /etc/pki/tls/certs/albo-tls-ca-bundle.crt -trusted-ca ----- - -. Optional: Restart deployment of the AWS Load Balancer Operator every time the config map changes by running the following command: -+ -[source,terminal] ----- -$ oc -n aws-load-balancer-operator rollout restart deployment/aws-load-balancer-operator-controller-manager ----- \ No newline at end of file diff --git a/modules/configuring-firewall.adoc b/modules/configuring-firewall.adoc deleted file mode 100644 index 115396f0cdf3..000000000000 --- a/modules/configuring-firewall.adoc +++ /dev/null @@ -1,265 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/install_config/configuring-firewall.adoc - -:_content-type: PROCEDURE -[id="configuring-firewall_{context}"] -= Configuring your firewall for {product-title} - -Before you install {product-title}, you must configure your firewall to grant access to the sites that {product-title} requires. - -There are no special configuration considerations for services running on only controller nodes compared to worker nodes. - -[NOTE] -==== -If your environment has a dedicated load balancer in front of your {product-title} cluster, review the allowlists between your firewall and load balancer to prevent unwanted network restrictions to your cluster. -==== - -.Procedure - -. Allowlist the following registry URLs: -+ -[cols="3,2,4",options="header"] -|=== -|URL | Port | Function - -|`registry.redhat.io` -|443, 80 -|Provides core container images - -|`access.redhat.com` -|443, 80 -|Provides core container images - -|`quay.io` -|443, 80 -|Provides core container images - -|`cdn.quay.io` -|443, 80 -|Provides core container images - -|`cdn01.quay.io` -|443, 80 -|Provides core container images - -|`cdn02.quay.io` -|443, 80 -|Provides core container images - -|`cdn03.quay.io` -|443, 80 -|Provides core container images - -|`sso.redhat.com` -|443, 80 -|The `https://console.redhat.com/openshift` site uses authentication from `sso.redhat.com` - -|=== -+ -You can use the wildcards `\*.quay.io` and `*.openshiftapps.com` instead of `cdn0[1-3].quay.io` in your allowlist. When you add a site, such as `quay.io`, to your allowlist, do not add a wildcard entry, such as `*.quay.io`, to your denylist. In most cases, image registries use a content delivery network (CDN) to serve images. If a firewall blocks access, image downloads are denied when the initial download request redirects to a hostname such as `cdn01.quay.io`. - -. Allowlist any site that provides resources for a language or framework that your builds require. - -. If you do not disable Telemetry, you must grant access to the following URLs to access Red Hat Insights: -+ -[cols="3,2,4",options="header"] -|=== -|URL | Port | Function - -|`cert-api.access.redhat.com` -|443, 80 -|Required for Telemetry - -|`api.access.redhat.com` -|443, 80 -|Required for Telemetry - -|`infogw.api.openshift.com` -|443, 80 -|Required for Telemetry - -|`console.redhat.com/api/ingress`, `cloud.redhat.com/api/ingress` -|443, 80 -|Required for Telemetry and for `insights-operator` -|=== - -. If you use Alibaba Cloud, Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP) to host your cluster, you must grant access to the URLs that provide the cloud provider API and DNS for that cloud: -+ -[cols="2a,8a,2a,8a",options="header"] -|=== -|Cloud |URL | Port |Function - -|Alibaba -|`*.aliyuncs.com` -|443, 80 -|Required to access Alibaba Cloud services and resources. Review the link:https://github.com/aliyun/alibaba-cloud-sdk-go/blob/master/sdk/endpoints/endpoints_config.go?spm=a2c4g.11186623.0.0.47875873ciGnC8&file=endpoints_config.go[Alibaba endpoints_config.go file] to determine the exact endpoints to allow for the regions that you use. - -.15+|AWS -|`*.amazonaws.com` - -Alternatively, if you choose to not use a wildcard for AWS APIs, you must allowlist the following URLs: -|443, 80 -|Required to access AWS services and resources. Review the link:https://docs.aws.amazon.com/general/latest/gr/rande.html[AWS Service Endpoints] in the AWS documentation to determine the exact endpoints to allow for the regions that you use. - -|`ec2.amazonaws.com` -|443 -|Used to install and manage clusters in an AWS environment. - -|`events.amazonaws.com` -|443 -|Used to install and manage clusters in an AWS environment. - -|`iam.amazonaws.com` -|443 -|Used to install and manage clusters in an AWS environment. - -|`route53.amazonaws.com` -|443 -|Used to install and manage clusters in an AWS environment. - -|`s3.amazonaws.com` -|443 -|Used to install and manage clusters in an AWS environment. - -|`s3.<aws_region>.amazonaws.com` -|443 -|Used to install and manage clusters in an AWS environment. - -|`s3.dualstack.<aws_region>.amazonaws.com` -|443 -|Used to install and manage clusters in an AWS environment. - -|`sts.amazonaws.com` -|443 -|Used to install and manage clusters in an AWS environment. - -|`sts.<aws_region>.amazonaws.com` -|443 -|Used to install and manage clusters in an AWS environment. - -|`tagging.us-east-1.amazonaws.com` -|443 -|Used to install and manage clusters in an AWS environment. This endpoint is always `us-east-1`, regardless of the region the cluster is deployed in. - -|`ec2.<aws_region>.amazonaws.com` -|443 -|Used to install and manage clusters in an AWS environment. - -|`elasticloadbalancing.<aws_region>.amazonaws.com` -|443 -|Used to install and manage clusters in an AWS environment. - -|`servicequotas.<aws_region>.amazonaws.com` -|443, 80 -|Required. Used to confirm quotas for deploying the service. - -|`tagging.<aws_region>.amazonaws.com` -|443, 80 -|Allows the assignment of metadata about AWS resources in the form of tags. - -.2+|GCP -|`*.googleapis.com` -|443, 80 -|Required to access GCP services and resources. Review link:https://cloud.google.com/endpoints/[Cloud Endpoints] in the GCP documentation to determine the endpoints to allow for your APIs. - -|`accounts.google.com` -|443, 80 -| Required to access your GCP account. - -.4+|Azure -|`management.azure.com` -|443, 80 -|Required to access Azure services and resources. Review the link:https://docs.microsoft.com/en-us/rest/api/azure/[Azure REST API reference] in the Azure documentation to determine the endpoints to allow for your APIs. - -|`*.blob.core.windows.net` -|443, 80 -|Required to download Ignition files. - -|`login.microsoftonline.com` -|443, 80 -|Required to access Azure services and resources. Review the link:https://docs.microsoft.com/en-us/rest/api/azure/[Azure REST API reference] in the Azure documentation to determine the endpoints to allow for your APIs. - -|=== - -. Allowlist the following URLs: -+ -[cols="3,2,4",options="header"] -|=== -|URL | Port | Function - -|`mirror.openshift.com` -|443, 80 -|Required to access mirrored installation content and images. This site is also a source of release image signatures, although the Cluster Version Operator needs only a single functioning source. - -|`storage.googleapis.com/openshift-release` -|443, 80 -|A source of release image signatures, although the Cluster Version Operator needs only a single functioning source. - -|`*.apps.<cluster_name>.<base_domain>` -|443, 80 -|Required to access the default cluster routes unless you set an ingress wildcard during installation. - -|`quayio-production-s3.s3.amazonaws.com` -|443, 80 -|Required to access Quay image content in AWS. - -|`api.openshift.com` -|443, 80 -|Required both for your cluster token and to check if updates are available for the cluster. - -|`rhcos.mirror.openshift.com` -|443, 80 -|Required to download {op-system-first} images. - -|`console.redhat.com/openshift` -|443, 80 -|Required for your cluster token. - -// |`registry.access.redhat.com` -// |443, 80 -// |Required for `odo` CLI. - -|`sso.redhat.com` -|443, 80 -|The `https://console.redhat.com/openshift` site uses authentication from `sso.redhat.com` - -|=== -Operators require route access to perform health checks. Specifically, the -authentication and web console Operators connect to two routes to verify that -the routes work. If you are the cluster administrator and do not want to allow -`*.apps.<cluster_name>.<base_domain>`, then allow these routes: -+ -* `oauth-openshift.apps.<cluster_name>.<base_domain>` -* `console-openshift-console.apps.<cluster_name>.<base_domain>`, or the hostname -that is specified in the `spec.route.hostname` field of the -`consoles.operator/cluster` object if the field is not empty. - -. Allowlist the following URLs for optional third-party content: -+ -[cols="3,2,4",options="header"] -|=== -|URL | Port | Function - -|`registry.connect.redhat.com` -|443, 80 -|Required for all third-party images and certified operators. - -|`rhc4tp-prod-z8cxf-image-registry-us-east-1-evenkyleffocxqvofrk.s3.dualstack.us-east-1.amazonaws.com` -|443, 80 -|Provides access to container images hosted on `registry.connect.redhat.com` - -|`oso-rhc4tp-docker-registry.s3-us-west-2.amazonaws.com` -|443, 80 -|Required for Sonatype Nexus, F5 Big IP operators. -|=== -+ -. If you use a default Red Hat Network Time Protocol (NTP) server allow the following URLs: -* `1.rhel.pool.ntp.org` -* `2.rhel.pool.ntp.org` -* `3.rhel.pool.ntp.org` - -[NOTE] -==== -If you do not use a default Red Hat NTP server, verify the NTP server for your platform and allow it in your firewall. -==== diff --git a/modules/configuring-haproxy-interval.adoc b/modules/configuring-haproxy-interval.adoc deleted file mode 100644 index f2cc8051051f..000000000000 --- a/modules/configuring-haproxy-interval.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// * scalability_and_performance/optimization/routing-optimization.adoc -// * post_installation_configuration/network-configuration.adoc - -:_content-type: PROCEDURE -[id="configuring-haproxy-interval_{context}"] -= Configuring HAProxy reload interval - -When you update a route or an endpoint associated with a route, {product-title} router updates the configuration for HAProxy. Then, HAProxy reloads the updated configuration for those changes to take effect. When HAProxy reloads, it generates a new process that handles new connections using the updated configuration. - -HAProxy keeps the old process running to handle existing connections until those connections are all closed. When old processes have long-lived connections, these processes can accumulate and consume resources. - -The default minimum HAProxy reload interval is five seconds. You can configure an Ingress Controller using its `spec.tuningOptions.reloadInterval` field to set a longer minimum reload interval. - -[WARNING] -==== -Setting a large value for the minimum HAProxy reload interval can cause latency in observing updates to routes and their endpoints. To lessen the risk, avoid setting a value larger than the tolerable latency for updates. -==== - -.Procedure - -* Change the minimum HAProxy reload interval of the default Ingress Controller to 15 seconds by running the following command: -+ -[source, terminal] ----- -$ oc -n openshift-ingress-operator patch ingresscontrollers/default --type=merge --patch='{"spec":{"tuningOptions":{"reloadInterval":"15s"}}}' ----- diff --git a/modules/configuring-hpa-based-on-application-metrics.adoc b/modules/configuring-hpa-based-on-application-metrics.adoc deleted file mode 100644 index e907e6d862e4..000000000000 --- a/modules/configuring-hpa-based-on-application-metrics.adoc +++ /dev/null @@ -1,47 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/configuring-hpa-for-an-application.adoc - -[id="configuring-hpa-based-on-application-metrics_{context}"] -= Configuring HPA based on application metrics - -If you configure an application to export metrics, you can set up Horizontal Pod Autoscaling (HPA) based on these metrics. - -.Procedure - -. Create a YAML file for your configuration. In this example, it is called `deploy.yaml`. - -. Add configuration for deploying the horizontal pod autoscaler for the application. This example configures and deploys HPA based on the application `http_requests_per_second` metric for the sample application configured in the "Application monitoring" section: -+ -[source,yaml] ----- -apiVersion: autoscaling/v2 -kind: HorizontalPodAutoscaler -metadata: - name: example-app-scaler - namespace: default -spec: - scaleTargetRef: - apiVersion: apps/v1 - kind: Deployment - name: example-app <1> - minReplicas: 3 <2> - maxReplicas: 10 <3> - metrics: - - type: Pods - pods: - metricName: http_requests_per_second <4> - targetAverageValue: 10 <5> ----- -<1> `name` specifies the application. -<2> `minReplicas` specifies the minimum number of replicas for the HPA to maintain for the application. -<3> `maxReplicas` specifies the maximum number of replicas for the HPA to maintain for the application. -<4> `metricName` specifies the metric upon which HPA is based. Here, specify the metric you previously exposed for your application. -<5> `targetAverageValue` specifies the value of the metric for the HPA to try to maintain by increasing or decreasing the number of replicas. - -. Apply the configuration file to the cluster: -+ -[source,terminal] ----- -$ oc apply -f deploy.yaml ----- diff --git a/modules/configuring-huge-pages.adoc b/modules/configuring-huge-pages.adoc deleted file mode 100644 index 91fa8949c382..000000000000 --- a/modules/configuring-huge-pages.adoc +++ /dev/null @@ -1,166 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/what-huge-pages-do-and-how-they-are-consumed-by-apps.adoc -// * post_installation_configuration/node-tasks.adoc - -:_content-type: PROCEDURE -[id="configuring-huge-pages_{context}"] -= Configuring huge pages at boot time - -Nodes must pre-allocate huge pages used in an {product-title} cluster. There are two ways of reserving huge pages: at boot time and at run time. Reserving at boot time increases the possibility of success because the memory has not yet been significantly fragmented. The Node Tuning Operator currently supports boot time allocation of huge pages on specific nodes. - -.Procedure - -To minimize node reboots, the order of the steps below needs to be followed: - -. Label all nodes that need the same huge pages setting by a label. -+ -[source,terminal] ----- -$ oc label node <node_using_hugepages> node-role.kubernetes.io/worker-hp= ----- - -. Create a file with the following content and name it `hugepages-tuned-boottime.yaml`: -+ -[source,yaml] ----- -apiVersion: tuned.openshift.io/v1 -kind: Tuned -metadata: - name: hugepages <1> - namespace: openshift-cluster-node-tuning-operator -spec: - profile: <2> - - data: | - [main] - summary=Boot time configuration for hugepages - include=openshift-node - [bootloader] - cmdline_openshift_node_hugepages=hugepagesz=2M hugepages=50 <3> - name: openshift-node-hugepages - - recommend: - - machineConfigLabels: <4> - machineconfiguration.openshift.io/role: "worker-hp" - priority: 30 - profile: openshift-node-hugepages ----- -<1> Set the `name` of the Tuned resource to `hugepages`. -<2> Set the `profile` section to allocate huge pages. -<3> Note the order of parameters is important as some platforms support huge pages of various sizes. -<4> Enable machine config pool based matching. - -. Create the Tuned `hugepages` object -+ -[source,terminal] ----- -$ oc create -f hugepages-tuned-boottime.yaml ----- - -. Create a file with the following content and name it `hugepages-mcp.yaml`: -+ -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfigPool -metadata: - name: worker-hp - labels: - worker-hp: "" -spec: - machineConfigSelector: - matchExpressions: - - {key: machineconfiguration.openshift.io/role, operator: In, values: [worker,worker-hp]} - nodeSelector: - matchLabels: - node-role.kubernetes.io/worker-hp: "" ----- - -. Create the machine config pool: -+ -[source,terminal] ----- -$ oc create -f hugepages-mcp.yaml ----- - -Given enough non-fragmented memory, all the nodes in the `worker-hp` machine config pool should now have 50 2Mi huge pages allocated. - -[source,terminal] ----- -$ oc get node <node_using_hugepages> -o jsonpath="{.status.allocatable.hugepages-2Mi}" -100Mi ----- - -ifndef::openshift-origin[] -[WARNING] -==== -The TuneD bootloader plugin is currently supported on {op-system-first} 8.x worker nodes. For {op-system-base-full} 7.x worker nodes, the TuneD bootloader plugin is currently not supported. -==== -endif::openshift-origin[] - -//// -For run-time allocation, kubelet changes are needed, see BZ1819719. -== At run time - -.Procedure - -. Label the node so that the Node Tuning Operator knows on which node to apply the tuned profile, which describes how many huge pages should be allocated: -+ -[source,terminal] ----- -$ oc label node <node_using_hugepages> hugepages=true ----- - -. Create a file with the following content and name it `hugepages-tuned-runtime.yaml`: -+ -[source,yaml] ----- -apiVersion: tuned.openshift.io/v1 -kind: Tuned -metadata: - name: hugepages <1> - namespace: openshift-cluster-node-tuning-operator -spec: - profile: <2> - - data: | - [main] - summary=Run time configuration for hugepages - include=openshift-node - [vm] - transparent_hugepages=never - [sysfs] - /sys/devices/system/node/node0/hugepages/hugepages-2048kB/nr_hugepages=50 - name: node-hugepages - - recommend: - - match: <3> - - label: hugepages - priority: 30 - profile: node-hugepages ----- -<1> Set the `name` of the Tuned resource to `hugepages`. -<2> Set the `profile` section to allocate huge pages. -<3> Set the `match` section to associate the profile to nodes with the `hugepages` label. - -. Create the custom `hugepages` tuned profile by using the `hugepages-tuned-runtime.yaml` file: -+ -[source,terminal] ----- -$ oc create -f hugepages-tuned-runtime.yaml ----- - -. After creating the profile, the Operator applies the new profile to the correct -node and allocates huge pages. Check the logs of a tuned pod on a node using -huge pages to verify: -+ -[source,terminal] ----- -$ oc logs <tuned_pod_on_node_using_hugepages> \ - -n openshift-cluster-node-tuning-operator | grep 'applied$' | tail -n1 ----- -+ ----- -2019-08-08 07:20:41,286 INFO tuned.daemon.daemon: static tuning from profile 'node-hugepages' applied ----- - -//// diff --git a/modules/configuring-hybrid-ovnkubernetes.adoc b/modules/configuring-hybrid-ovnkubernetes.adoc deleted file mode 100644 index fd0a14ac569f..000000000000 --- a/modules/configuring-hybrid-ovnkubernetes.adoc +++ /dev/null @@ -1,89 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-network-customizations.adoc -// * installing/installing_azure/installing-azure-network-customizations.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-network-customizations.adoc -// * networking/ovn_kubernetes_network_provider/configuring-hybrid-networking.adoc - -:_content-type: PROCEDURE -[id="configuring-hybrid-ovnkubernetes_{context}"] -= Configuring hybrid networking with OVN-Kubernetes - -You can configure your cluster to use hybrid networking with OVN-Kubernetes. This allows a hybrid cluster that supports different node networking configurations. For example, this is necessary to run both Linux and Windows nodes in a cluster. - -[IMPORTANT] -==== -You must configure hybrid networking with OVN-Kubernetes during the installation of your cluster. You cannot switch to hybrid networking after the installation process. -==== - -.Prerequisites - -* You defined `OVNKubernetes` for the `networking.networkType` parameter in the `install-config.yaml` file. See the installation documentation for configuring {product-title} network customizations on your chosen cloud provider for more information. - -.Procedure - -. Change to the directory that contains the installation program and create the manifests: -+ -[source,terminal] ----- -$ ./openshift-install create manifests --dir <installation_directory> ----- -+ --- -where: - -`<installation_directory>`:: Specifies the name of the directory that contains the `install-config.yaml` file for your cluster. --- - -. Create a stub manifest file for the advanced network configuration that is named `cluster-network-03-config.yml` in the `<installation_directory>/manifests/` directory: -+ -[source,terminal] ----- -$ cat <<EOF > <installation_directory>/manifests/cluster-network-03-config.yml -apiVersion: operator.openshift.io/v1 -kind: Network -metadata: - name: cluster -spec: -EOF ----- -+ --- -where: - -`<installation_directory>`:: Specifies the directory name that contains the -`manifests/` directory for your cluster. --- - -. Open the `cluster-network-03-config.yml` file in an editor and configure OVN-Kubernetes with hybrid networking, such as in the following example: -+ --- -.Specify a hybrid networking configuration -[source,yaml] ----- -apiVersion: operator.openshift.io/v1 -kind: Network -metadata: - name: cluster -spec: - defaultNetwork: - ovnKubernetesConfig: - hybridOverlayConfig: - hybridClusterNetwork: <1> - - cidr: 10.132.0.0/14 - hostPrefix: 23 - hybridOverlayVXLANPort: 9898 <2> ----- -<1> Specify the CIDR configuration used for nodes on the additional overlay network. The `hybridClusterNetwork` CIDR cannot overlap with the `clusterNetwork` CIDR. -<2> Specify a custom VXLAN port for the additional overlay network. This is required for running Windows nodes in a cluster installed on vSphere, and must not be configured for any other cloud provider. The custom port can be any open port excluding the default `4789` port. For more information on this requirement, see the Microsoft documentation on link:https://docs.microsoft.com/en-us/virtualization/windowscontainers/kubernetes/common-problems#pod-to-pod-connectivity-between-hosts-is-broken-on-my-kubernetes-cluster-running-on-vsphere[Pod-to-pod connectivity between hosts is broken]. --- -+ -[NOTE] -==== -Windows Server Long-Term Servicing Channel (LTSC): Windows Server 2019 is not supported on clusters with a custom `hybridOverlayVXLANPort` value because this Windows server version does not support selecting a custom VXLAN port. -==== - -. Save the `cluster-network-03-config.yml` file and quit the text editor. -. Optional: Back up the `manifests/cluster-network-03-config.yml` file. The -installation program deletes the `manifests/` directory when creating the -cluster. diff --git a/modules/configuring-layer-three-routed-topology.adoc b/modules/configuring-layer-three-routed-topology.adoc deleted file mode 100644 index 9ad6d548be75..000000000000 --- a/modules/configuring-layer-three-routed-topology.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/multiple_networks/configuring-additional-network.adoc - -:_content-type: CONCEPT -[id="configuration-layer-three-routed-topology_{context}"] -= Configuration for a routed topology - -The routed (layer 3) topology networks are a simplified topology for the cluster default network without egress or ingress. In this topology, there is one logical switch per node, each with a different subnet, and a router interconnecting all logical switches. - -This configuration can be used for IPv6 and dual-stack deployments. - -[NOTE] -==== -* Layer 3 routed topology networks only allow for the transfer of data packets between pods within a cluster. -* Creating a secondary network with an IPv6 subnet or dual-stack subnets fails on a single-stack {product-title} cluster. This is a known limitation and will be fixed a future version of {product-title}. -==== - -The following `NetworkAttachmentDefinition` custom resource definition (CRD) YAML describes the fields needed to configure a routed secondary network. - -[source,yaml] ----- - { - "cniVersion": "0.3.1", - "name": "ns1-l3-network", - "type": "ovn-k8s-cni-overlay", - "topology":"layer3", - "subnets": "10.128.0.0/16/24", - "mtu": 1300, - "netAttachDefName": "ns1/l3-network" - } ----- \ No newline at end of file diff --git a/modules/configuring-layer-two-switched-topology.adoc b/modules/configuring-layer-two-switched-topology.adoc deleted file mode 100644 index 90af949198a3..000000000000 --- a/modules/configuring-layer-two-switched-topology.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/multiple_networks/configuring-additional-network.adoc - -:_content-type: CONCEPT -[id="configuration-layer-two-switched-topology_{context}"] -= Configuration for a switched topology - -The switched (layer 2) topology networks interconnect the workloads through a cluster-wide logical switch. This configuration can be used for IPv6 and dual-stack deployments. - -[NOTE] -==== -Layer 2 switched topology networks only allow for the transfer of data packets between pods within a cluster. -==== - -The following `NetworkAttachmentDefinition` custom resource definition (CRD) YAML describes the fields needed to configure a switched secondary network. - -[source,yaml] ----- - { - "cniVersion": "0.3.1", - "name": "l2-network", - "type": "ovn-k8s-cni-overlay", - "topology":"layer2", - "subnets": "10.100.200.0/24", - "mtu": 1300, - "netAttachDefName": "ns1/l2-network", - "excludeSubnets": "10.100.200.0/29" - } ----- \ No newline at end of file diff --git a/modules/configuring-localnet-switched-topology.adoc b/modules/configuring-localnet-switched-topology.adoc deleted file mode 100644 index 3db11b0bb4a2..000000000000 --- a/modules/configuring-localnet-switched-topology.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ovn_kubernetes_network_provider/configuring-secondary-networks.adoc - -:_content-type: REFERENCE -[id="configuration-localnet-switched-topology_{context}"] -= Configuration for a localnet switched topology - -The switched (localnet) topology interconnects the workloads through a cluster-wide logical switch to a physical network. - -The following `NetworkAttachmentDefinition` custom resource definition (CRD) YAML describes the fields needed to configure a localnet secondary network. - -[source,yaml] ----- - { - "cniVersion": "0.3.1", - "name": "ns1-localnet-network", - "type": "ovn-k8s-cni-overlay", - "topology":"localnet", - "subnets": "202.10.130.112/28", - "vlanID": 33, - "mtu": 1500, - "netAttachDefName": "ns1/localnet-network" - "excludeSubnets": "10.100.200.0/29" - - } ----- \ No newline at end of file diff --git a/modules/configuring-node-pools-for-hcp.adoc b/modules/configuring-node-pools-for-hcp.adoc deleted file mode 100644 index 6827d566ade8..000000000000 --- a/modules/configuring-node-pools-for-hcp.adoc +++ /dev/null @@ -1,64 +0,0 @@ -// Module included in the following assemblies: -// -// * updates/updating-hosted-control-planes.adoc -// * hosted_control_planes/hcp-managing.adoc - -:_content-type: PROCEDURE -[id="configuring-node-pools-for-hcp_{context}"] -= Configuring node pools for hosted control planes - -On hosted control planes, you can configure node pools by creating a `MachineConfig` object inside of a config map in the management cluster. - -//.Prerequisites - -//Are any prerequisites needed for this procedure? i.e., does the customer need to perform an update first? - -.Procedure - -. To create a `MachineConfig` object inside of a config map in the management cluster, enter the following information: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: <configmap-name> - namespace: clusters -data: - config: | - apiVersion: machineconfiguration.openshift.io/v1 - kind: MachineConfig - metadata: - labels: - machineconfiguration.openshift.io/role: worker - name: <machineconfig-name> - spec: - config: - ignition: - version: 3.2.0 - storage: - files: - - contents: - source: data:... - mode: 420 - overwrite: true - path: ${PATH} <1> ----- -<1> Sets the path on the node where the `MachineConfig` object is stored. - -. After you add the object to the config map, you can apply the config map to the node pool as follows: -+ -[source,yaml] ----- -spec: - config: - - name: ${CONFIGMAP_NAME} ----- - -//.Verification - -// Does the user need to do anything to verify that the procedure was successful? - - - - diff --git a/modules/configuring-ovnk-additional-networks.adoc b/modules/configuring-ovnk-additional-networks.adoc deleted file mode 100644 index fbc930409e57..000000000000 --- a/modules/configuring-ovnk-additional-networks.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/multiple_networks/configuring-additional-network.adoc - -:_content-type: CONCEPT -[id="configuration-ovnk-additional-networks_{context}"] -= Configuration for an OVN-Kubernetes additional network - -The {openshift-networking} OVN-Kubernetes network plugin allows the configuration of secondary network interfaces for pods. To configure secondary network interfaces, you must define the configurations in the `NetworkAttachmentDefinition` custom resource definition (CRD). - -:FeatureName: Configuration for an OVN-Kubernetes additional network -include::snippets/technology-preview.adoc[] - -The following sections provide example configurations for each of the topologies that OVN-Kubernetes currently allows for secondary networks. - -[NOTE] -==== -Networks names must be unique. For example, creating multiple `NetworkAttachmentDefinition` CRDs with different configurations that reference the same network is unsupported. -==== \ No newline at end of file diff --git a/modules/configuring-ovs-log-level-permanently.adoc b/modules/configuring-ovs-log-level-permanently.adoc deleted file mode 100644 index d3a6a5ea1154..000000000000 --- a/modules/configuring-ovs-log-level-permanently.adoc +++ /dev/null @@ -1,56 +0,0 @@ -// Module included in the following assemblies: -// -// * support/troubleshooting/troubleshooting-network-issues.adoc - -:_content-type: PROCEDURE -[id="configuring-ovs-log-level-permanently_{context}"] -= Configuring the Open vSwitch log level permanently - -For long-term changes to the Open vSwitch (OVS) log level, you can change the log level permanently. - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. - -* You have installed the OpenShift CLI (`oc`). - -.Procedure - -. Create a file, such as `99-change-ovs-loglevel.yaml`, with a `MachineConfig` object like the following example: -+ -[source,yaml,subs="attributes+"] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfig -metadata: - labels: - machineconfiguration.openshift.io/role: master <1> - name: 99-change-ovs-loglevel -spec: - config: - ignition: - version: 3.2.0 - systemd: - units: - - dropins: - - contents: | - [Service] - ExecStartPost=-/usr/bin/ovs-appctl vlog/set syslog:dbg <2> - ExecReload=-/usr/bin/ovs-appctl vlog/set syslog:dbg - name: 20-ovs-vswitchd-restart.conf - name: ovs-vswitchd.service ----- -<1> After you perform this procedure to configure control plane nodes, repeat the procedure and set the role to `worker` to configure worker nodes. -<2> Set the `syslog:<log_level>` value. Log levels are `off`, `emer`, `err`, `warn`, `info`, or `dbg`. Setting the value to `off` filters out all log messages. - -. Apply the machine config: -+ -[source,terminal] ----- -$ oc apply -f 99-change-ovs-loglevel.yaml ----- - -ifdef::ign-config-version[] -:!ign-config-version: -endif::[] - diff --git a/modules/configuring-ovs-log-level-temp.adoc b/modules/configuring-ovs-log-level-temp.adoc deleted file mode 100644 index a91b89b10c13..000000000000 --- a/modules/configuring-ovs-log-level-temp.adoc +++ /dev/null @@ -1,95 +0,0 @@ -:_content-type: PROCEDURE -[id="configuring-ovs-log-level-temp_{context}"] -= Configuring the Open vSwitch log level temporarily - -For short-term troubleshooting, you can configure the Open vSwitch (OVS) log level temporarily. -The following procedure does not require rebooting the node. -In addition, the configuration change does not persist whenever you reboot the node. - -After you perform this procedure to change the log level, you can receive log messages from the machine config daemon that indicate a content mismatch for the `ovs-vswitchd.service`. -To avoid the log messages, repeat this procedure and set the log level to the original value. - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. - -* You have installed the OpenShift CLI (`oc`). - -.Procedure - -. Start a debug pod for a node: -+ -[source,terminal] ----- -$ oc debug node/<node_name> ----- - -. Set `/host` as the root directory within the debug shell. The debug pod mounts the root file system from the host in `/host` within the pod. By changing the root directory to `/host`, you can run binaries from the host file system: -+ -[source,terminal] ----- -# chroot /host ----- - -. View the current syslog level for OVS modules: -+ -[source,terminal] ----- -# ovs-appctl vlog/list ----- -+ -The following example output shows the log level for syslog set to `info`. -+ -.Example output -[source,terminal] ----- - console syslog file - ------- ------ ------ -backtrace OFF INFO INFO -bfd OFF INFO INFO -bond OFF INFO INFO -bridge OFF INFO INFO -bundle OFF INFO INFO -bundles OFF INFO INFO -cfm OFF INFO INFO -collectors OFF INFO INFO -command_line OFF INFO INFO -connmgr OFF INFO INFO -conntrack OFF INFO INFO -conntrack_tp OFF INFO INFO -coverage OFF INFO INFO -ct_dpif OFF INFO INFO -daemon OFF INFO INFO -daemon_unix OFF INFO INFO -dns_resolve OFF INFO INFO -dpdk OFF INFO INFO -... ----- - -. Specify the log level in the `/etc/systemd/system/ovs-vswitchd.service.d/10-ovs-vswitchd-restart.conf` file: -+ -[source,text] ----- -Restart=always -ExecStartPre=-/bin/sh -c '/usr/bin/chown -R :$${OVS_USER_ID##*:} /var/lib/openvswitch' -ExecStartPre=-/bin/sh -c '/usr/bin/chown -R :$${OVS_USER_ID##*:} /etc/openvswitch' -ExecStartPre=-/bin/sh -c '/usr/bin/chown -R :$${OVS_USER_ID##*:} /run/openvswitch' -ExecStartPost=-/usr/bin/ovs-appctl vlog/set syslog:dbg -ExecReload=-/usr/bin/ovs-appctl vlog/set syslog:dbg ----- -+ -In the preceding example, the log level is set to `dbg`. -Change the last two lines by setting `syslog:<log_level>` to `off`, `emer`, `err`, `warn`, `info`, or `dbg`. The `off` log level filters out all log messages. - -. Restart the service: -+ -[source,terminal] ----- -# systemctl daemon-reload ----- -+ -[source,terminal] ----- -# systemctl restart ovs-vswitchd ----- - diff --git a/modules/configuring-pods-secondary-network.adoc b/modules/configuring-pods-secondary-network.adoc deleted file mode 100644 index ff0c5becc876..000000000000 --- a/modules/configuring-pods-secondary-network.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/multiple_networks/configuring-additional-network.adoc - -:_content-type: REFERENCE -[id="configuring-pods-secondary-network_{context}"] -= Configuring pods for additional networks - -You must specify the secondary network attachments through the `k8s.v1.cni.cncf.io/networks` annotation. - -The following example provisions a pod with two secondary attachments, one for each of the attachment configurations presented in this guide. - -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - annotations: - k8s.v1.cni.cncf.io/networks: l2-network - name: tinypod - namespace: ns1 -spec: - containers: - - args: - - pause - image: k8s.gcr.io/e2e-test-images/agnhost:2.36 - imagePullPolicy: IfNotPresent - name: agnhost-container ----- \ No newline at end of file diff --git a/modules/configuring-pods-static-ip.adoc b/modules/configuring-pods-static-ip.adoc deleted file mode 100644 index 7c463ef9e61d..000000000000 --- a/modules/configuring-pods-static-ip.adoc +++ /dev/null @@ -1,46 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/multiple_networks/configuring-additional-network.adoc - -:_content-type: CONCEPT -[id="configuring-pods-static-ip_{context}"] -= Configuring pods with a static IP address - -The following example provisions a pod with a static IP address. - -[NOTE] -==== -* You can only specify the IP address for a pod's secondary network attachment for layer 2 attachments. -* Specifying a static IP address for the pod is only possible when the attachment configuration does not feature subnets. -==== - -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - annotations: - k8s.v1.cni.cncf.io/networks: '[ - { - "name": "l2-network", <1> - "mac": "02:03:04:05:06:07", <2> - "interface": "myiface1", <3> - "ips": [ - "192.0.2.20/24" - ] <4> - } - ]' - name: tinypod - namespace: ns1 -spec: - containers: - - args: - - pause - image: k8s.gcr.io/e2e-test-images/agnhost:2.36 - imagePullPolicy: IfNotPresent - name: agnhost-container ----- -<1> The name of the network. This value must be unique across all `NetworkAttachmentDefinitions`. -<2> The MAC address to be assigned for the interface. -<3> The name of the network interface to be created for the pod. -<4> The IP addresses to be assigned to the network interface. diff --git a/modules/configuring-secret-for-wmco.adoc b/modules/configuring-secret-for-wmco.adoc deleted file mode 100644 index ac87fcdf07f5..000000000000 --- a/modules/configuring-secret-for-wmco.adoc +++ /dev/null @@ -1,28 +0,0 @@ -// Module included in the following assemblies: -// -// * windows_containers/enabling-windows-container-workloads.adoc - -:_content-type: PROCEDURE -[id="configuring-secret-for-wmco_{context}"] -= Configuring a secret for the Windows Machine Config Operator - -To run the Windows Machine Config Operator (WMCO), you must create a secret in the WMCO namespace containing a private key. This is required to allow the WMCO to communicate with the Windows virtual machine (VM). - -.Prerequisites - -* You installed the Windows Machine Config Operator (WMCO) using Operator Lifecycle Manager (OLM). -* You created a PEM-encoded file containing an RSA key. - -.Procedure - -* Define the secret required to access the Windows VMs: -+ -[source,terminal] ----- -$ oc create secret generic cloud-private-key --from-file=private-key.pem=${HOME}/.ssh/<key> \ - -n openshift-windows-machine-config-operator <1> ----- - -<1> You must create the private key in the WMCO namespace, like `openshift-windows-machine-config-operator`. - -It is recommended to use a different private key than the one used when installing the cluster. diff --git a/modules/configuring-vsphere-connection-settings.adoc b/modules/configuring-vsphere-connection-settings.adoc deleted file mode 100644 index 2d4940e61ef8..000000000000 --- a/modules/configuring-vsphere-connection-settings.adoc +++ /dev/null @@ -1,54 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_vsphere/installing-vsphere-post-installation-configuration.adoc - -:_content-type: PROCEDURE -[id="configuring-vSphere-connection-settings_{context}"] -= Configuring the vSphere connection settings - -[role="_abstract"] -Modify the following vSphere configuration settings as required: - -* vCenter address -* vCenter cluster -* vCenter username -* vCenter password -* vCenter address -* vSphere data center -* vSphere datastore -* Virtual machine folder - -.Prerequisites -* The {ai-full} has finished installing the cluster successfully. -* The cluster is connected to `https://console.redhat.com`. - -.Procedure -. In the Administrator perspective, navigate to *Home -> Overview*. -. Under *Status*, click *vSphere connection* to open the *vSphere connection configuration* wizard. -. In the *vCenter* field, enter the network address of the vSphere vCenter server. This can be either a domain name or an IP address. It appears in the vSphere web client URL; for example `https://[your_vCenter_address]/ui`. -. In the *vCenter cluster* field, enter the name of the vSphere vCenter cluster where {product-title} is installed. -+ -[IMPORTANT] -==== -This step is mandatory if you installed {product-title} 4.13 or later. -==== - -. In the *Username* field, enter your vSphere vCenter username. -. In the *Password* field, enter your vSphere vCenter password. -+ -[WARNING] -==== -The system stores the username and password in the `vsphere-creds` secret in the `kube-system` namespace of the cluster. An incorrect vCenter username or password makes the cluster nodes unschedulable. -==== -+ -. In the *Datacenter* field, enter the name of the vSphere data center that contains the virtual machines used to host the cluster; for example, `SDDC-Datacenter`. -. In the *Default data store* field, enter the path and name of the vSphere data store that stores the persistent data volumes; for example, `/SDDC-Datacenter/datastore/datastorename`. -+ -[WARNING] -==== -Updating the vSphere data center or default data store after the configuration has been saved detaches any active vSphere `PersistentVolumes`. -==== -+ -. In the *Virtual Machine Folder* field, enter the data center folder that contains the virtual machine of the cluster; for example, `/SDDC-Datacenter/vm/ci-ln-hjg4vg2-c61657-t2gzr`. For the {product-title} installation to succeed, all virtual machines comprising the cluster must be located in a single data center folder. -. Click *Save Configuration*. This updates the `cloud-provider-config` ConfigMap resource in the `openshift-config` namespace, and starts the configuration process. -. Reopen the *vSphere connection configuration* wizard and expand the *Monitored operators* panel. Check that the status of the operators is either *Progressing* or *Healthy*. diff --git a/modules/configuring-vsphere-regions-zones.adoc b/modules/configuring-vsphere-regions-zones.adoc deleted file mode 100644 index 47ad37085ed1..000000000000 --- a/modules/configuring-vsphere-regions-zones.adoc +++ /dev/null @@ -1,131 +0,0 @@ -// Module included in the following assemblies: -// -//* installing/Installing-vsphere-installer-provisioned-customizations.adoc [IPI] -//* installing/installing-vsphere-installer-provisioned-network-customizations.adoc [IPI] -//* installing/installing-vsphere.adoc [UPI] -//* installing/installing-vsphere-network-customizations.adoc [UPI] -//* installing/installing-restricted-networks-installer-provisioned-vsphere.adoc [IPI] -//* installing/installing-restricted-networks-vsphere.adoc [IPI] - -:_content-type: PROCEDURE -[id="configuring-vsphere-regions-zones_{context}"] -= Configuring regions and zones for a VMware vCenter -You can modify the default installation configuration file, so that you can deploy an {product-title} cluster to multiple vSphere datacenters that run in a single VMware vCenter. - -The default `install-config.yaml` file configuration from the previous release of {product-title} is deprecated. You can continue to use the deprecated default configuration, but the `openshift-installer` will prompt you with a warning message that indicates the use of deprecated fields in the configuration file. - -[IMPORTANT] -==== -The example uses the `govc` command. The `govc` command is an open source command available from VMware; it is not available from Red Hat. The Red Hat support team does not maintain the `govc` command. Instructions for downloading and installing `govc` are found on the VMware documentation website -==== - -.Prerequisites -* You have an existing `install-config.yaml` installation configuration file. -+ -[IMPORTANT] -==== -You must specify at least one failure domain for your {product-title} cluster, so that you can provision datacenter objects for your VMware vCenter server. Consider specifying multiple failure domains if you need to provision virtual machine nodes in different datacenters, clusters, datastores, and other components. -==== - -.Procedure - -. Enter the following `govc` command-line tool commands to create the `openshift-region` and `openshift-zone` vCenter tag categories: -+ -[IMPORTANT] -==== -If you specify different names for the `openshift-region` and `openshift-zone` vCenter tag categories, the installation of the {product-title} cluster fails. -==== -+ -[source,terminal] ----- -$ govc tags.category.create -d "OpenShift region" openshift-region ----- -+ -[source,terminal] ----- -$ govc tags.category.create -d "OpenShift zone" openshift-zone ----- - -. To create a region tag for each region vSphere datacenter where you want to deploy your cluster, enter the following command in your terminal: -+ -[source,terminal] ----- -$ govc tags.create -c <region_tag_category> <region_tag> ----- - -. To create a zone tag for each vSphere cluster where you want to deploy your cluster, enter the following command: -+ -[source,terminal] ----- -$ govc tags.create -c <zone_tag_category> <zone_tag> ----- - -. Attach region tags to each vCenter datacenter object by entering the following command: -+ -[source,terminal] ----- -$ govc tags.attach -c <region_tag_category> <region_tag_1> /<datacenter_1> ----- - -. Attach the zone tags to each vCenter datacenter object by entering the following command: -+ -[source,terminal] ----- -$ govc tags.attach -c <zone_tag_category> <zone_tag_1> /<datacenter_1>/host/vcs-mdcnc-workload-1 ----- - -. Change to the directory that contains the installation program and initialize the cluster deployment according to your chosen installation requirements. - -.Sample `install-config.yaml` file with multiple datacenters defined in a vSphere center - -[source,yaml] ----- ---- -compute: ---- - vsphere: - zones: - - "<machine_pool_zone_1>" - - "<machine_pool_zone_2>" ---- -controlPlane: ---- -vsphere: - zones: - - "<machine_pool_zone_1>" - - "<machine_pool_zone_2>" ---- -platform: - vsphere: - vcenters: ---- - datacenters: - - <datacenter1_name> - - <datacenter2_name> - failureDomains: - - name: <machine_pool_zone_1> - region: <region_tag_1> - zone: <zone_tag_1> - server: <fully_qualified_domain_name> - topology: - datacenter: <datacenter1> - computeCluster: "/<datacenter1>/host/<cluster1>" - networks: - - <VM_Network1_name> - datastore: "/<datacenter1>/datastore/<datastore1>" - resourcePool: "/<datacenter1>/host/<cluster1>/Resources/<resourcePool1>" - folder: "/<datacenter1>/vm/<folder1>" - - name: <machine_pool_zone_2> - region: <region_tag_2> - zone: <zone_tag_2> - server: <fully_qualified_domain_name> - topology: - datacenter: <datacenter2> - computeCluster: "/<datacenter2>/host/<cluster2>" - networks: - - <VM_Network2_name> - datastore: "/<datacenter2>/datastore/<datastore2>" - resourcePool: "/<datacenter2>/host/<cluster2>/Resources/<resourcePool2>" - folder: "/<datacenter2>/vm/<folder2>" ---- ----- diff --git a/modules/configuring-vsphere-verifying-configuration.adoc b/modules/configuring-vsphere-verifying-configuration.adoc deleted file mode 100644 index c8cfa6cd1c44..000000000000 --- a/modules/configuring-vsphere-verifying-configuration.adoc +++ /dev/null @@ -1,65 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_vsphere/installing-vsphere-post-installation-configuration.adoc - -:_content-type: PROCEDURE -[id="configuring-vSphere-monitoring-configuration-completion{context}"] -= Verifying the configuration - -The connection configuration process updates operator statuses and control plane nodes. It takes approximately an hour to complete. During the configuration process, the nodes will reboot. Previously bound `PersistentVolumeClaims` objects might become disconnected. - -.Prerequisites -* You have saved the configuration settings in the *vSphere connection configuration* wizard. - -.Procedure - -. Check that the configuration process completed successfully: -+ --- -.. In the OpenShift Container Platform Administrator perspective, navigate to *Home -> Overview*. -.. Under *Status* click *Operators*. Wait for all operator statuses to change from *Progressing* to *All succeeded*. A *Failed* status indicates that the configuration failed. -.. Under *Status*, click *Control Plane*. Wait for the response rate of all Control Pane components to return to 100%. A *Failed* control plane component indicates that the configuration failed. --- -A failure indicates that at least one of the connection settings is incorrect. Change the settings in the *vSphere connection configuration* wizard and save the configuration again. - -. Check that you are able to bind `PersistentVolumeClaims` objects by performing the following steps: - -.. Create a `StorageClass` object using the following YAML: -+ -[source,yaml] ----- -kind: StorageClass -apiVersion: storage.k8s.io/v1 -metadata: - name: vsphere-sc -provisioner: kubernetes.io/vsphere-volume -parameters: - datastore: YOURVCENTERDATASTORE - diskformat: thin -reclaimPolicy: Delete -volumeBindingMode: Immediate ----- -.. Create a `PersistentVolumeClaims` object using the following YAML: -+ -[source,yaml] ----- -kind: PersistentVolumeClaim -apiVersion: v1 -metadata: - name: test-pvc - namespace: openshift-config - annotations: - volume.beta.kubernetes.io/storage-provisioner: kubernetes.io/vsphere-volume - finalizers: - - kubernetes.io/pvc-protection -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 10Gi - storageClassName: vsphere-sc - volumeMode: Filesystem ----- -+ -If you are unable to create a `PersistentVolumeClaims` object, you can troubleshoot by navigating to *Storage* -> *PersistentVolumeClaims* in the *Administrator* perspective of the {product-title} web console. \ No newline at end of file diff --git a/modules/configuring_hyperthreading_for_a_cluster.adoc b/modules/configuring_hyperthreading_for_a_cluster.adoc deleted file mode 100644 index fe86e7445ada..000000000000 --- a/modules/configuring_hyperthreading_for_a_cluster.adoc +++ /dev/null @@ -1,130 +0,0 @@ -// Module included in the following assemblies: -// -// scalability_and_performance/cnf-low-latency-tuning.adoc - -:_content-type: PROCEDURE -[id="configuring_hyperthreading_for_a_cluster_{context}"] -= Configuring hyperthreading for a cluster - -To configure hyperthreading for an {product-title} cluster, set the CPU threads in the performance profile to the same cores that are configured for the reserved or isolated CPU pools. - -[NOTE] -==== -If you configure a performance profile, and subsequently change the hyperthreading configuration for the host, ensure that you update the CPU `isolated` and `reserved` fields in the `PerformanceProfile` YAML to match the new configuration. -==== - -[WARNING] -==== -Disabling a previously enabled host hyperthreading configuration can cause the CPU core IDs listed in the `PerformanceProfile` YAML to be incorrect. This incorrect configuration can cause the node to become unavailable because the listed CPUs can no longer be found. -==== - -.Prerequisites - -* Access to the cluster as a user with the `cluster-admin` role. -* Install the OpenShift CLI (oc). - -.Procedure - -. Ascertain which threads are running on what CPUs for the host you want to configure. -+ -You can view which threads are running on the host CPUs by logging in to the cluster and running the following command: -+ -[source,terminal] ----- -$ lscpu --all --extended ----- -+ -.Example output -+ -[source,terminal] ----- -CPU NODE SOCKET CORE L1d:L1i:L2:L3 ONLINE MAXMHZ MINMHZ -0 0 0 0 0:0:0:0 yes 4800.0000 400.0000 -1 0 0 1 1:1:1:0 yes 4800.0000 400.0000 -2 0 0 2 2:2:2:0 yes 4800.0000 400.0000 -3 0 0 3 3:3:3:0 yes 4800.0000 400.0000 -4 0 0 0 0:0:0:0 yes 4800.0000 400.0000 -5 0 0 1 1:1:1:0 yes 4800.0000 400.0000 -6 0 0 2 2:2:2:0 yes 4800.0000 400.0000 -7 0 0 3 3:3:3:0 yes 4800.0000 400.0000 ----- -+ -In this example, there are eight logical CPU cores running on four physical CPU cores. CPU0 and CPU4 are running on physical Core0, CPU1 and CPU5 are running on physical Core 1, and so on. -+ -Alternatively, to view the threads that are set for a particular physical CPU core (`cpu0` in the example below), open a command prompt and run the following: -+ -[source,terminal] ----- -$ cat /sys/devices/system/cpu/cpu0/topology/thread_siblings_list ----- -+ -.Example output -+ -[source,terminal] ----- -0-4 ----- - -. Apply the isolated and reserved CPUs in the `PerformanceProfile` YAML. For example, you can set logical cores CPU0 and CPU4 as `isolated`, and logical cores CPU1 to CPU3 and CPU5 to CPU7 as `reserved`. When you configure reserved and isolated CPUs, the infra containers in pods use the reserved CPUs and the application containers use the isolated CPUs. -+ -[source,yaml] ----- -... - cpu: - isolated: 0,4 - reserved: 1-3,5-7 -... ----- -+ -[NOTE] -==== -The reserved and isolated CPU pools must not overlap and together must span all available cores in the worker node. -==== - -[IMPORTANT] -==== -Hyperthreading is enabled by default on most Intel processors. If you enable hyperthreading, all threads processed by a particular core must be isolated or processed on the same core. -==== - -[id="disabling_hyperthreading_for_low_latency_applications_{context}"] -== Disabling hyperthreading for low latency applications - -When configuring clusters for low latency processing, consider whether you want to disable hyperthreading before you deploy the cluster. To disable hyperthreading, do the following: - -. Create a performance profile that is appropriate for your hardware and topology. -. Set `nosmt` as an additional kernel argument. The following example performance profile illustrates this setting: -+ -[source,yaml] ----- -apiVersion: performance.openshift.io/v2 -kind: PerformanceProfile -metadata: - name: example-performanceprofile -spec: - additionalKernelArgs: - - nmi_watchdog=0 - - audit=0 - - mce=off - - processor.max_cstate=1 - - idle=poll - - intel_idle.max_cstate=0 - - nosmt - cpu: - isolated: 2-3 - reserved: 0-1 - hugepages: - defaultHugepagesSize: 1G - pages: - - count: 2 - node: 0 - size: 1G - nodeSelector: - node-role.kubernetes.io/performance: '' - realTimeKernel: - enabled: true ----- -+ -[NOTE] -==== -When you configure reserved and isolated CPUs, the infra containers in pods use the reserved CPUs and the application containers use the isolated CPUs. -==== diff --git a/modules/connected-to-disconnected-config-registry.adoc b/modules/connected-to-disconnected-config-registry.adoc deleted file mode 100644 index bed513748336..000000000000 --- a/modules/connected-to-disconnected-config-registry.adoc +++ /dev/null @@ -1,190 +0,0 @@ -// Module included in the following assemblies: -// -// * post_installation_configuration/connected-to-disconnected.adoc - -[id="connected-to-disconnected-config-registry_{context}"] -= Configuring the cluster for the mirror registry - -After creating and mirroring the images to the mirror registry, you must modify your cluster so that pods can pull images from the mirror registry. - -You must: - -* Add the mirror registry credentials to the global pull secret. -* Add the mirror registry server certificate to the cluster. -* Create an `ImageContentSourcePolicy` custom resource (ICSP), which associates the mirror registry with the source registry. - - - -. Add mirror registry credential to the cluster global pull-secret: -+ -[source,terminal] ----- -$ oc set data secret/pull-secret -n openshift-config --from-file=.dockerconfigjson=<pull_secret_location> <1> ----- -<1> Provide the path to the new pull secret file. -+ -For example: -+ -[source,terminal] ----- -$ oc set data secret/pull-secret -n openshift-config --from-file=.dockerconfigjson=.mirrorsecretconfigjson ----- - -. Add the CA-signed mirror registry server certificate to the nodes in the cluster: - -.. Create a config map that includes the server certificate for the mirror registry -+ -[source,terminal] ----- -$ oc create configmap <config_map_name> --from-file=<mirror_address_host>..<port>=$path/ca.crt -n openshift-config ----- -+ -For example: -+ -[source,terminal] ----- -S oc create configmap registry-config --from-file=mirror.registry.com..443=/root/certs/ca-chain.cert.pem -n openshift-config ----- - -.. Use the config map to update the `image.config.openshift.io/cluster` custom resource (CR). {product-title} applies the changes to this CR to all nodes in the cluster: -+ -[source,terminal] ----- -$ oc patch image.config.openshift.io/cluster --patch '{"spec":{"additionalTrustedCA":{"name":"<config_map_name>"}}}' --type=merge ----- -+ -For example: -+ -[source,terminal] ----- -$ oc patch image.config.openshift.io/cluster --patch '{"spec":{"additionalTrustedCA":{"name":"registry-config"}}}' --type=merge ----- - -. Create an ICSP to redirect container pull requests from the online registries to the mirror registry: - -.. Create the `ImageContentSourcePolicy` custom resource: -+ -[source,yaml] ----- -apiVersion: operator.openshift.io/v1alpha1 -kind: ImageContentSourcePolicy -metadata: - name: mirror-ocp -spec: - repositoryDigestMirrors: - - mirrors: - - mirror.registry.com:443/ocp/release <1> - source: quay.io/openshift-release-dev/ocp-release <2> - - mirrors: - - mirror.registry.com:443/ocp/release - source: quay.io/openshift-release-dev/ocp-v4.0-art-dev ----- -<1> Specifies the name of the mirror image registry and repository. -<2> Specifies the online registry and repository containing the content that is mirrored. - -.. Create the ICSP object: -+ -[source,terminal] ----- -$ oc create -f registryrepomirror.yaml ----- -+ -.Example output -[source,terminal] ----- -imagecontentsourcepolicy.operator.openshift.io/mirror-ocp created ----- -+ -{product-title} applies the changes to this CR to all nodes in the cluster. - -. Verify that the credentials, CA, and ICSP for mirror registry were added: - -.. Log into a node: -+ -[source,terminal] ----- -$ oc debug node/<node_name> ----- - -.. Set `/host` as the root directory within the debug shell: -+ -[source,terminal] ----- -sh-4.4# chroot /host ----- - -.. Check the `config.json` file for the credentials: -+ -[source,terminal] ----- -sh-4.4# cat /var/lib/kubelet/config.json ----- -+ -.Example output -[source,terminal] ----- -{"auths":{"brew.registry.redhat.io":{"xx=="},"brewregistry.stage.redhat.io":{"auth":"xxx=="},"mirror.registry.com:443":{"auth":"xx="}}} <1> ----- -<1> Ensure that the mirror registry and credentials are present. - -.. Change to the `certs.d` directory -+ -[source,terminal] ----- -sh-4.4# cd /etc/docker/certs.d/ ----- - -.. List the certificates in the `certs.d` directory: -+ -[source,terminal] ----- -sh-4.4# ls ----- -+ -.Example output ----- -image-registry.openshift-image-registry.svc.cluster.local:5000 -image-registry.openshift-image-registry.svc:5000 -mirror.registry.com:443 <1> ----- -<1> Ensure that the mirror registry is in the list. - -.. Check that the ICSP added the mirror registry to the `registries.conf` file: -+ -[source,terminal] ----- -sh-4.4# cat /etc/containers/registries.conf ----- -+ -.Example output -+ -[source,terminal] ----- -unqualified-search-registries = ["registry.access.redhat.com", "docker.io"] - -[[registry]] - prefix = "" - location = "quay.io/openshift-release-dev/ocp-release" - mirror-by-digest-only = true - - [[registry.mirror]] - location = "mirror.registry.com:443/ocp/release" - -[[registry]] - prefix = "" - location = "quay.io/openshift-release-dev/ocp-v4.0-art-dev" - mirror-by-digest-only = true - - [[registry.mirror]] - location = "mirror.registry.com:443/ocp/release" ----- -+ -The `registry.mirror` parameters indicate that the mirror registry is searched before the original registry. - -.. Exit the node. -+ -[source,terminal] ----- -sh-4.4# exit ----- - diff --git a/modules/connected-to-disconnected-disconnect.adoc b/modules/connected-to-disconnected-disconnect.adoc deleted file mode 100644 index cdc2f8d77d58..000000000000 --- a/modules/connected-to-disconnected-disconnect.adoc +++ /dev/null @@ -1,11 +0,0 @@ -// Module included in the following assemblies: -// -// * post_installation_configuration/connected-to-disconnected.adoc - -[id="connected-to-disconnected-disconnect_{context}"] -= Disconnect the cluster from the network - -After mirroring all the required repositories and configuring your cluster to work as a disconnected cluster, you can disconnect the cluster from the network. - - - diff --git a/modules/connected-to-disconnected-mirror-images.adoc b/modules/connected-to-disconnected-mirror-images.adoc deleted file mode 100644 index 0eef46b81423..000000000000 --- a/modules/connected-to-disconnected-mirror-images.adoc +++ /dev/null @@ -1,111 +0,0 @@ -// Module included in the following assemblies: -// -// * post_installation_configuration/connected-to-disconnected.adoc - -:_content-type: PROCEDURE -[id="connected-to-disconnected-mirror-images_{context}"] -= Mirroring the images - -After the cluster is properly configured, you can mirror the images from your external repositories to the mirror repository. - -.Procedure - -. Mirror the Operator Lifecycle Manager (OLM) images: -// copied from olm-mirroring-catalog.adoc -+ -[source,terminal] ----- -$ oc adm catalog mirror registry.redhat.io/redhat/redhat-operator-index:v{product-version} <mirror_registry>:<port>/olm -a <reg_creds> ----- -+ --- -where: - -`product-version`:: Specifies the tag that corresponds to the version of {product-title} to install, such as `4.8`. -`mirror_registry`:: Specifies the fully qualified domain name (FQDN) for the target registry and namespace to mirror the Operator content to, where `<namespace>` is any existing namespace on the registry. -`reg_creds`:: Specifies the location of your modified `.dockerconfigjson` file. --- -+ -For example: -+ -[source,terminal] ----- -$ oc adm catalog mirror registry.redhat.io/redhat/redhat-operator-index:v4.8 mirror.registry.com:443/olm -a ./.dockerconfigjson --index-filter-by-os='.*' ----- - -. Mirror the content for any other Red Hat-provided Operator: -+ -[source,terminal] ----- -$ oc adm catalog mirror <index_image> <mirror_registry>:<port>/<namespace> -a <reg_creds> ----- -+ --- -where: - -`index_image`:: Specifies the index image for the catalog that you want to mirror. -`mirror_registry`:: Specifies the FQDN for the target registry and namespace to mirror the Operator content to, where `<namespace>` is any existing namespace on the registry. -`reg_creds`:: Optional: Specifies the location of your registry credentials file, if required. --- -+ -For example: -+ -[source,terminal] ----- -$ oc adm catalog mirror registry.redhat.io/redhat/community-operator-index:v4.8 mirror.registry.com:443/olm -a ./.dockerconfigjson --index-filter-by-os='.*' ----- - -. Mirror the {product-title} image repository: -+ -[source,terminal] ----- -$ oc adm release mirror -a .dockerconfigjson --from=quay.io/openshift-release-dev/ocp-release:v<product-version>-<architecture> --to=<local_registry>/<local_repository> --to-release-image=<local_registry>/<local_repository>:v<product-version>-<architecture> ----- -+ --- -where: - -`product-version`:: Specifies the tag that corresponds to the version of {product-title} to install, such as `4.8.15-x86_64`. -`architecture`:: Specifies the type of architecture for your server, such as `x86_64`. -`local_registry`:: Specifies the registry domain name for your mirror repository. -`local_repository`:: Specifies the name of the repository to create in your registry, such as `ocp4/openshift4`. --- -+ -For example: -+ -[source,terminal] ----- -$ oc adm release mirror -a .dockerconfigjson --from=quay.io/openshift-release-dev/ocp-release:4.8.15-x86_64 --to=mirror.registry.com:443/ocp/release --to-release-image=mirror.registry.com:443/ocp/release:4.8.15-x86_64 ----- -+ -.Example output -+ -[source,terminal] -+ ----- -info: Mirroring 109 images to mirror.registry.com/ocp/release ... -mirror.registry.com:443/ - ocp/release - manifests: - sha256:086224cadce475029065a0efc5244923f43fb9bb3bb47637e0aaf1f32b9cad47 -> 4.8.15-x86_64-thanos - sha256:0a214f12737cb1cfbec473cc301aa2c289d4837224c9603e99d1e90fc00328db -> 4.8.15-x86_64-kuryr-controller - sha256:0cf5fd36ac4b95f9de506623b902118a90ff17a07b663aad5d57c425ca44038c -> 4.8.15-x86_64-pod - sha256:0d1c356c26d6e5945a488ab2b050b75a8b838fc948a75c0fa13a9084974680cb -> 4.8.15-x86_64-kube-client-agent - -….. -sha256:66e37d2532607e6c91eedf23b9600b4db904ce68e92b43c43d5b417ca6c8e63c mirror.registry.com:443/ocp/release:4.5.41-multus-admission-controller -sha256:d36efdbf8d5b2cbc4dcdbd64297107d88a31ef6b0ec4a39695915c10db4973f1 mirror.registry.com:443/ocp/release:4.5.41-cluster-kube-scheduler-operator -sha256:bd1baa5c8239b23ecdf76819ddb63cd1cd6091119fecdbf1a0db1fb3760321a2 mirror.registry.com:443/ocp/release:4.5.41-aws-machine-controllers -info: Mirroring completed in 2.02s (0B/s) - -Success -Update image: mirror.registry.com:443/ocp/release:4.5.41-x86_64 -Mirror prefix: mirror.registry.com:443/ocp/release ----- - -. Mirror any other registries, as needed: -+ -[source,terminal] ----- -$ oc image mirror <online_registry>/my/image:latest <mirror_registry> ----- diff --git a/modules/connected-to-disconnected-prepare-mirror.adoc b/modules/connected-to-disconnected-prepare-mirror.adoc deleted file mode 100644 index ff61826f6bba..000000000000 --- a/modules/connected-to-disconnected-prepare-mirror.adoc +++ /dev/null @@ -1,75 +0,0 @@ -// Module included in the following assemblies: -// -// * post_installation_configuration/connected-to-disconnected.adoc - -:_content-type: PROCEDURE -[id="connected-to-disconnected-prepare-mirror_{context}"] -= Preparing the cluster for mirroring - -Before disconnecting your cluster, you must mirror, or copy, the images to a mirror registry that is reachable by every node in your disconnected cluster. In order to mirror the images, you must prepare your cluster by: - -* Adding the mirror registry certificates to the list of trusted CAs on your host. -* Creating a `.dockerconfigjson` file that contains your image pull secret, which is from the `cloud.openshift.com` token. - -.Procedure - -. Configuring credentials that allow image mirroring: - -.. Add the CA certificate for the mirror registry, in the simple PEM or DER file formats, to the list of trusted CAs. For example: -+ -[source,terminal] ----- -$ cp </path/to/cert.crt> /usr/share/pki/ca-trust-source/anchors/ ----- -+ --- -where:: -+ -`</path/to/cert.crt>`:: Specifies the path to the certificate on your local file system. --- - -.. Update the CA trust. For example, in Linux: -+ -[source,terminal] ----- -$ update-ca-trust ----- - -.. Extract the `.dockerconfigjson` file from the global pull secret: -+ -[source,terminal] ----- -$ oc extract secret/pull-secret -n openshift-config --confirm --to=. ----- -+ -.Example output -[source,terminal] ----- -.dockerconfigjson ----- - -.. Edit the `.dockerconfigjson` file to add your mirror registry and authentication credentials and save it as a new file: -// copied from olm-accessing-images-private-registries -+ -[source,terminal] ----- -{"auths":{"<local_registry>": {"auth": "<credentials>","email": "you@example.com"}}},"<registry>:<port>/<namespace>/":{"auth":"<token>"}}} ----- -+ -where: -+ -`<local_registry>`:: Specifies the registry domain name, and optionally the port, that your mirror registry uses to serve content. -`auth`:: Specifies the base64-encoded user name and password for your mirror registry. -`<registry>:<port>/<namespace>`:: Specifies the mirror registry details. -`<token>`:: Specifies the base64-encoded `username:password` for your mirror registry. -+ -For example: -+ -[source,terminal] ----- -$ {"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0Y3UjhGOVZPT0lOMEFaUjdPUzRGTA==","email":"user@example.com"}, -"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGOVZPT0lOMEFaUGSTd4VGVGVUjdPUzRGTA==","email":"user@example.com"}, -"registry.connect.redhat.com"{"auth":"NTE3MTMwNDB8dWhjLTFEZlN3VHkxOSTd4VGVGVU1MdTpleUpoYkdjaUailA==","email":"user@example.com"}, -"registry.redhat.io":{"auth":"NTE3MTMwNDB8dWhjLTFEZlN3VH3BGSTd4VGVGVU1MdTpleUpoYkdjaU9fZw==","email":"user@example.com"}, -"registry.svc.ci.openshift.org":{"auth":"dXNlcjpyWjAwWVFjSEJiT2RKVW1pSmg4dW92dGp1SXRxQ3RGN1pwajJhN1ZXeTRV"},"my-registry:5000/my-namespace/":{"auth":"dXNlcm5hbWU6cGFzc3dvcmQ="}}} ----- diff --git a/modules/connected-to-disconnected-restore-insights.adoc b/modules/connected-to-disconnected-restore-insights.adoc deleted file mode 100644 index 51bc38646bfd..000000000000 --- a/modules/connected-to-disconnected-restore-insights.adoc +++ /dev/null @@ -1,45 +0,0 @@ -// Module included in the following assemblies: -// -// * post_installation_configuration/connected-to-disconnected.adoc - -:_content-type: PROCEDURE -[id="connected-to-disconnected-restore-insights_{context}"] -= Restoring a degraded Insights Operator - -Disconnecting the cluster from the network necessarily causes the cluster to lose the Internet connection. The Insights Operator becomes degraded because it requires access to link:https://console.redhat.com[Red Hat Insights]. - -This topic describes how to recover from a degraded Insights Operator. - -.Procedure - -. Edit your `.dockerconfigjson` file to remove the `cloud.openshift.com` entry, for example: -+ -[source,terminal] ----- -"cloud.openshift.com":{"auth":"<hash>","email":"user@example.com"} ----- - -. Save the file. - -. Update the cluster secret with the edited `.dockerconfigjson` file: -+ -[source,terminal] ----- -$ oc set data secret/pull-secret -n openshift-config --from-file=.dockerconfigjson=./.dockerconfigjson ----- - -. Verify that the Insights Operator is no longer degraded: -+ -[source,terminal] ----- -$ oc get co insights ----- -+ -.Example output -[source,terminal] ----- -NAME VERSION AVAILABLE PROGRESSING DEGRADED SINCE -insights 4.5.41 True False False 3d ----- - - diff --git a/modules/connected-to-disconnected-restore.adoc b/modules/connected-to-disconnected-restore.adoc deleted file mode 100644 index b670b45bcc09..000000000000 --- a/modules/connected-to-disconnected-restore.adoc +++ /dev/null @@ -1,79 +0,0 @@ -// Module included in the following assemblies: -// -// * post_installation_configuration/connected-to-disconnected.adoc - -:_content-type: PROCEDURE -[id="connected-to-disconnected-restore_{context}"] -= Restoring the network - -If you want to reconnect a disconnected cluster and pull images from online registries, delete the cluster's ImageContentSourcePolicy (ICSP) objects. Without the ICSP, pull requests to external registries are no longer redirected to the mirror registry. - -.Procedure - -. View the ICSP objects in your cluster: -+ -[source,terminal] ----- -$ oc get imagecontentsourcepolicy ----- -+ -.Example output -[source,terminal] ----- -NAME AGE -mirror-ocp 6d20h -ocp4-index-0 6d18h -qe45-index-0 6d15h ----- - -. Delete all the ICSP objects you created when disconnecting your cluster: -+ -[source,terminal] ----- -$ oc delete imagecontentsourcepolicy <icsp_name> <icsp_name> <icsp_name> ----- -+ -For example: -+ -[source,terminal] ----- -$ oc delete imagecontentsourcepolicy mirror-ocp ocp4-index-0 qe45-index-0 ----- -+ -.Example output -[source,terminal] ----- -imagecontentsourcepolicy.operator.openshift.io "mirror-ocp" deleted -imagecontentsourcepolicy.operator.openshift.io "ocp4-index-0" deleted -imagecontentsourcepolicy.operator.openshift.io "qe45-index-0" deleted ----- - -. Wait for all the nodes to restart and return to the READY status and verify that the `registries.conf` file is pointing to the original registries and not the mirror registries: - -.. Log into a node: -+ -[source,terminal] ----- -$ oc debug node/<node_name> ----- - -.. Set `/host` as the root directory within the debug shell: -+ -[source,terminal] ----- -sh-4.4# chroot /host ----- - -.. Examine the `registries.conf` file: -+ -[source,terminal] ----- -sh-4.4# cat /etc/containers/registries.conf ----- -+ -.Example output -[source,terminal] ----- -unqualified-search-registries = ["registry.access.redhat.com", "docker.io"] <1> ----- -<1> The `registry` and `registry.mirror` entries created by the ICSPs you deleted are removed. diff --git a/modules/connected-to-disconnected-verify.adoc b/modules/connected-to-disconnected-verify.adoc deleted file mode 100644 index 7eb6403be7b5..000000000000 --- a/modules/connected-to-disconnected-verify.adoc +++ /dev/null @@ -1,53 +0,0 @@ -// Module included in the following assemblies: -// -// * post_installation_configuration/connected-to-disconnected.adoc - -:_content-type: PROCEDURE -[id="connected-to-disconnected-verify_{context}"] -= Ensure applications continue to work - -Before disconnecting the cluster from the network, ensure that your cluster is working as expected and all of your applications are working as expected. - -.Procedure - -Use the following commands to check the status of your cluster: - -* Ensure your pods are running: -+ -[source,terminal] ----- -$ oc get pods --all-namespaces ----- -+ -.Example output -[source,terinal] ----- -NAMESPACE NAME READY STATUS RESTARTS AGE -kube-system apiserver-watcher-ci-ln-47ltxtb-f76d1-mrffg-master-0 1/1 Running 0 39m -kube-system apiserver-watcher-ci-ln-47ltxtb-f76d1-mrffg-master-1 1/1 Running 0 39m -kube-system apiserver-watcher-ci-ln-47ltxtb-f76d1-mrffg-master-2 1/1 Running 0 39m -openshift-apiserver-operator openshift-apiserver-operator-79c7c646fd-5rvr5 1/1 Running 3 45m -openshift-apiserver apiserver-b944c4645-q694g 2/2 Running 0 29m -openshift-apiserver apiserver-b944c4645-shdxb 2/2 Running 0 31m -openshift-apiserver apiserver-b944c4645-x7rf2 2/2 Running 0 33m - ... ----- - -* Ensure your nodes are in the READY status: -+ -[source,terminal] ----- -$ oc get nodes ----- -+ -.Example output -[source,terminal] ----- -NAME STATUS ROLES AGE VERSION -ci-ln-47ltxtb-f76d1-mrffg-master-0 Ready master 42m v1.26.0 -ci-ln-47ltxtb-f76d1-mrffg-master-1 Ready master 42m v1.26.0 -ci-ln-47ltxtb-f76d1-mrffg-master-2 Ready master 42m v1.26.0 -ci-ln-47ltxtb-f76d1-mrffg-worker-a-gsxbz Ready worker 35m v1.26.0 -ci-ln-47ltxtb-f76d1-mrffg-worker-b-5qqdx Ready worker 35m v1.26.0 -ci-ln-47ltxtb-f76d1-mrffg-worker-c-rjkpq Ready worker 34m v1.26.0 ----- diff --git a/modules/console-operator.adoc b/modules/console-operator.adoc deleted file mode 100644 index 6937c3e17d2d..000000000000 --- a/modules/console-operator.adoc +++ /dev/null @@ -1,54 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator-reference.adoc -// * installing/cluster-capabilities.adoc - -// operators/operator-reference.adoc -ifeval::["{context}" == "cluster-operators-ref"] -:operator-ref: -endif::[] - -ifeval::["{context}" == "cluster-capabilities"] -:cluster-caps: -endif::[] - -:_content-type: REFERENCE -[id="console-operator_{context}"] -ifdef::operator-ref[= Console Operator] -ifdef::cluster-caps[= Console capability] - -ifdef::operator-ref[] - -[NOTE] -==== -The Console Operator is an optional cluster capability that can be disabled by cluster administrators during installation. If you disable the Console Operator at installation, your cluster is still supported and upgradable. For more information about optional cluster capabilities, see "Cluster capabilities" in _Installing_. -==== - -endif::operator-ref[] - -[discrete] -== Purpose - -ifdef::cluster-caps[] - -The Console Operator provides the features for the `Console` capability. - -endif::cluster-caps[] - -The Console Operator installs and maintains the {product-title} web console on a cluster. The Console Operator is installed by default and automatically maintains a console. - -ifdef::operator-ref[] - -[discrete] -== Project - -link:https://github.com/openshift/console-operator[console-operator] - -endif::operator-ref[] - -ifeval::["{context}" == "cluster-operators-ref"] -:!operator-ref: -endif::[] -ifeval::["{context}" == "cluster-capabilities"] -:!cluster-caps: -endif::[] diff --git a/modules/consuming-huge-pages-resource-using-the-downward-api.adoc b/modules/consuming-huge-pages-resource-using-the-downward-api.adoc deleted file mode 100644 index 37b68a02f97b..000000000000 --- a/modules/consuming-huge-pages-resource-using-the-downward-api.adoc +++ /dev/null @@ -1,108 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/what-huge-pages-do-and-how-they-are-consumed-by-apps.adoc - -:file-name: hugepages-volume-pod.yaml - -:_content-type: PROCEDURE -[id="consuming-huge-pages-resource-using-the-downward-api_{context}"] -= Consuming huge pages resources using the Downward API - -You can use the Downward API to inject information about the huge pages resources that are consumed by a container. - -You can inject the resource allocation as environment variables, a volume plugin, or both. Applications that you develop and run in the container can determine the resources that are available by reading the environment variables or files in the specified volumes. - -.Procedure - -. Create a `{file-name}` file that is similar to the following example: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - generateName: hugepages-volume- - labels: - app: hugepages-example -spec: - containers: - - securityContext: - capabilities: - add: [ "IPC_LOCK" ] - image: rhel7:latest - command: - - sleep - - inf - name: example - volumeMounts: - - mountPath: /dev/hugepages - name: hugepage - - mountPath: /etc/podinfo - name: podinfo - resources: - limits: - hugepages-1Gi: 2Gi - memory: "1Gi" - cpu: "1" - requests: - hugepages-1Gi: 2Gi - env: - - name: REQUESTS_HUGEPAGES_1GI <.> - valueFrom: - resourceFieldRef: - containerName: example - resource: requests.hugepages-1Gi - volumes: - - name: hugepage - emptyDir: - medium: HugePages - - name: podinfo - downwardAPI: - items: - - path: "hugepages_1G_request" <.> - resourceFieldRef: - containerName: example - resource: requests.hugepages-1Gi - divisor: 1Gi ----- -<.> Specifies to read the resource use from `requests.hugepages-1Gi` and expose the value as the `REQUESTS_HUGEPAGES_1GI` environment variable. -<.> Specifies to read the resource use from `requests.hugepages-1Gi` and expose the value as the file `/etc/podinfo/hugepages_1G_request`. - -. Create the pod from the `{file-name}` file: -+ -[source,terminal,subs="attributes+"] ----- -$ oc create -f {file-name} ----- - -.Verification - -. Check the value of the `REQUESTS_HUGEPAGES_1GI` environment variable: -+ -[source,terminal] ----- -$ oc exec -it $(oc get pods -l app=hugepages-example -o jsonpath='{.items[0].metadata.name}') \ - -- env | grep REQUESTS_HUGEPAGES_1GI ----- -+ -.Example output -[source,terminal] ----- -REQUESTS_HUGEPAGES_1GI=2147483648 ----- - -. Check the value of the `/etc/podinfo/hugepages_1G_request` file: -+ -[source,terminal] ----- -$ oc exec -it $(oc get pods -l app=hugepages-example -o jsonpath='{.items[0].metadata.name}') \ - -- cat /etc/podinfo/hugepages_1G_request ----- -+ -.Example output -[source,terminal] ----- -2 ----- - -:!file-name: diff --git a/modules/container-benefits.adoc b/modules/container-benefits.adoc deleted file mode 100644 index 769dca351a08..000000000000 --- a/modules/container-benefits.adoc +++ /dev/null @@ -1,26 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_architecture/osd-architecture.adoc - -[id="container-benefits_{context}"] -= The benefits of containerized applications - - -Applications were once expected to be installed on operating systems that included all of the dependencies for the application. However, containers provide a standard way to package your application code, configurations, and dependencies into a single unit that can run as a resource-isolated process on a compute server. To run your app in Kubernetes on {product-title}, you must first containerize your app by creating a container image that you store in a container registry. - -[id="operating-system-benefits_{context}"] -== Operating system benefits - -Containers use small, dedicated Linux operating systems without a kernel. The file system, networking, cgroups, process tables, and namespaces are separate from the host Linux system, but the containers can integrate with the -hosts seamlessly when necessary. Being based on Linux allows containers to use all the advantages that come with the open source development model of rapid innovation. - -Because each container uses a dedicated operating system, you can deploy applications that require conflicting software dependencies on the same host. Each container carries its own dependent software and manages its own interfaces, such as networking and file systems, so applications never need to compete for those assets. - -[id="deployment-scaling-benefits_{context}"] -== Deployment benefits - -If you employ rolling upgrades between major releases of your application, you can continuously improve your applications without downtime and still maintain compatibility with the current release. - -You can also deploy and test a new version of an application alongside the existing version. Deploy the new application version in addition to the current version. If the container passes your tests, simply deploy more new containers and remove the old ones.  - -Since all the software dependencies for an application are resolved within the container itself, you can use a generic operating system on each host in your data center. You do not need to configure a specific operating system for each application host. When your data center needs more capacity, you can deploy another generic host system. diff --git a/modules/containers-about.adoc b/modules/containers-about.adoc deleted file mode 100644 index a6523095c368..000000000000 --- a/modules/containers-about.adoc +++ /dev/null @@ -1,15 +0,0 @@ -// Module included in the following assemblies: -// * openshift_images/images-understand.aodc - -[id="containers-about_{context}"] -= Containers - -The basic units of {product-title} applications are called containers. link:https://access.redhat.com/articles/1353593[Linux container technologies] are lightweight mechanisms for isolating running processes so that they are limited to interacting with only their designated resources. The word container is defined as a specific running or paused instance of a container image. - -Many application instances can be running in containers on a single host without visibility into each others' processes, files, network, and so on. Typically, each container provides a single service, often called a micro-service, such as a web server or a database, though containers can be used for arbitrary workloads. - -The Linux kernel has been incorporating capabilities for container technologies for years. The Docker project developed a convenient management interface for Linux containers on a host. More recently, the link:https://github.com/opencontainers/[Open Container Initiative] has developed open standards for container formats and container runtimes. {product-title} and Kubernetes add the ability to orchestrate OCI- and Docker-formatted containers across multi-host installations. - -Though you do not directly interact with container runtimes when using {product-title}, understanding their capabilities and terminology is important for understanding their role in {product-title} and how your applications function inside of containers. - -Tools such as link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux_atomic_host/7/html-single/managing_containers/#using_podman_to_work_with_containers[podman] can be used to replace `docker` command-line tools for running and managing containers directly. Using `podman`, you can experiment with containers separately from {product-title}. diff --git a/modules/containers-signature-verify-application.adoc b/modules/containers-signature-verify-application.adoc deleted file mode 100644 index 664538bb45f5..000000000000 --- a/modules/containers-signature-verify-application.adoc +++ /dev/null @@ -1,259 +0,0 @@ -// Module included in the following assemblies: -// -// * security/container_security/security-container-signature.adoc - -:_content-type: PROCEDURE -[id="containers-signature-verify-application_{context}"] -= Verifying the signature verification configuration -After you apply the machine configs to the cluster, the Machine Config Controller detects the new `MachineConfig` object and generates a new `rendered-worker-<hash>` version. - -.Prerequisites -* You enabled signature verification by using a machine config file. - -.Procedure - -. On the command line, run the following command to display information about a desired worker: -+ -[source,terminal] ----- -$ oc describe machineconfigpool/worker ----- -+ -.Example output of initial worker monitoring -+ -[source,terminal] ----- -Name: worker -Namespace: -Labels: machineconfiguration.openshift.io/mco-built-in= -Annotations: <none> -API Version: machineconfiguration.openshift.io/v1 -Kind: MachineConfigPool -Metadata: - Creation Timestamp: 2019-12-19T02:02:12Z - Generation: 3 - Resource Version: 16229 - Self Link: /apis/machineconfiguration.openshift.io/v1/machineconfigpools/worker - UID: 92697796-2203-11ea-b48c-fa163e3940e5 -Spec: - Configuration: - Name: rendered-worker-f6819366eb455a401c42f8d96ab25c02 - Source: - API Version: machineconfiguration.openshift.io/v1 - Kind: MachineConfig - Name: 00-worker - API Version: machineconfiguration.openshift.io/v1 - Kind: MachineConfig - Name: 01-worker-container-runtime - API Version: machineconfiguration.openshift.io/v1 - Kind: MachineConfig - Name: 01-worker-kubelet - API Version: machineconfiguration.openshift.io/v1 - Kind: MachineConfig - Name: 51-worker-rh-registry-trust - API Version: machineconfiguration.openshift.io/v1 - Kind: MachineConfig - Name: 99-worker-92697796-2203-11ea-b48c-fa163e3940e5-registries - API Version: machineconfiguration.openshift.io/v1 - Kind: MachineConfig - Name: 99-worker-ssh - Machine Config Selector: - Match Labels: - machineconfiguration.openshift.io/role: worker - Node Selector: - Match Labels: - node-role.kubernetes.io/worker: - Paused: false -Status: - Conditions: - Last Transition Time: 2019-12-19T02:03:27Z - Message: - Reason: - Status: False - Type: RenderDegraded - Last Transition Time: 2019-12-19T02:03:43Z - Message: - Reason: - Status: False - Type: NodeDegraded - Last Transition Time: 2019-12-19T02:03:43Z - Message: - Reason: - Status: False - Type: Degraded - Last Transition Time: 2019-12-19T02:28:23Z - Message: - Reason: - Status: False - Type: Updated - Last Transition Time: 2019-12-19T02:28:23Z - Message: All nodes are updating to rendered-worker-f6819366eb455a401c42f8d96ab25c02 - Reason: - Status: True - Type: Updating - Configuration: - Name: rendered-worker-d9b3f4ffcfd65c30dcf591a0e8cf9b2e - Source: - API Version: machineconfiguration.openshift.io/v1 - Kind: MachineConfig - Name: 00-worker - API Version: machineconfiguration.openshift.io/v1 - Kind: MachineConfig - Name: 01-worker-container-runtime - API Version: machineconfiguration.openshift.io/v1 - Kind: MachineConfig - Name: 01-worker-kubelet - API Version: machineconfiguration.openshift.io/v1 - Kind: MachineConfig - Name: 99-worker-92697796-2203-11ea-b48c-fa163e3940e5-registries - API Version: machineconfiguration.openshift.io/v1 - Kind: MachineConfig - Name: 99-worker-ssh - Degraded Machine Count: 0 - Machine Count: 1 - Observed Generation: 3 - Ready Machine Count: 0 - Unavailable Machine Count: 1 - Updated Machine Count: 0 -Events: <none> ----- - -. Run the `oc describe` command again: -+ -[source,terminal] ----- -$ oc describe machineconfigpool/worker ----- -+ -.Example output after the worker is updated -+ -[source,terminal] ----- -... - Last Transition Time: 2019-12-19T04:53:09Z - Message: All nodes are updated with rendered-worker-f6819366eb455a401c42f8d96ab25c02 - Reason: - Status: True - Type: Updated - Last Transition Time: 2019-12-19T04:53:09Z - Message: - Reason: - Status: False - Type: Updating - Configuration: - Name: rendered-worker-f6819366eb455a401c42f8d96ab25c02 - Source: - API Version: machineconfiguration.openshift.io/v1 - Kind: MachineConfig - Name: 00-worker - API Version: machineconfiguration.openshift.io/v1 - Kind: MachineConfig - Name: 01-worker-container-runtime - API Version: machineconfiguration.openshift.io/v1 - Kind: MachineConfig - Name: 01-worker-kubelet - API Version: machineconfiguration.openshift.io/v1 - Kind: MachineConfig - Name: 51-worker-rh-registry-trust - API Version: machineconfiguration.openshift.io/v1 - Kind: MachineConfig - Name: 99-worker-92697796-2203-11ea-b48c-fa163e3940e5-registries - API Version: machineconfiguration.openshift.io/v1 - Kind: MachineConfig - Name: 99-worker-ssh - Degraded Machine Count: 0 - Machine Count: 3 - Observed Generation: 4 - Ready Machine Count: 3 - Unavailable Machine Count: 0 - Updated Machine Count: 3 -... ----- -+ -[NOTE] -==== -The `Observed Generation` parameter shows an increased count based on the generation of the controller-produced configuration. This controller updates this value even if it fails to process the specification and generate a revision. The `Configuration Source` value points to the `51-worker-rh-registry-trust` configuration. -==== - -. Confirm that the `policy.json` file exists with the following command: -+ -[source,terminal] ----- -$ oc debug node/<node> -- chroot /host cat /etc/containers/policy.json ----- -+ -.Example output -+ -[source,terminal] ----- -Starting pod/<node>-debug ... -To use host binaries, run `chroot /host` -{ - "default": [ - { - "type": "insecureAcceptAnything" - } - ], - "transports": { - "docker": { - "registry.access.redhat.com": [ - { - "type": "signedBy", - "keyType": "GPGKeys", - "keyPath": "/etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release" - } - ], - "registry.redhat.io": [ - { - "type": "signedBy", - "keyType": "GPGKeys", - "keyPath": "/etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release" - } - ] - }, - "docker-daemon": { - "": [ - { - "type": "insecureAcceptAnything" - } - ] - } - } -} ----- - -. Confirm that the `registry.redhat.io.yaml` file exists with the following command: -+ -[source,terminal] ----- -$ oc debug node/<node> -- chroot /host cat /etc/containers/registries.d/registry.redhat.io.yaml ----- -+ -.Example output -+ -[source,terminal] ----- -Starting pod/<node>-debug ... -To use host binaries, run `chroot /host` -docker: - registry.redhat.io: - sigstore: https://registry.redhat.io/containers/sigstore ----- - -. Confirm that the `registry.access.redhat.com.yaml` file exists with the following command: -+ -[source,terminal] ----- -$ oc debug node/<node> -- chroot /host cat /etc/containers/registries.d/registry.access.redhat.com.yaml ----- -+ -.Example output -+ -[source,terminal] ----- -Starting pod/<node>-debug ... -To use host binaries, run `chroot /host` -docker: - registry.access.redhat.com: - sigstore: https://access.redhat.com/webassets/docker/content/sigstore ----- diff --git a/modules/containers-signature-verify-enable.adoc b/modules/containers-signature-verify-enable.adoc deleted file mode 100644 index e9949a5acdae..000000000000 --- a/modules/containers-signature-verify-enable.adoc +++ /dev/null @@ -1,167 +0,0 @@ -// Module included in the following assemblies: -// -// * security/container_security/security-container-signature.adoc - -:_content-type: PROCEDURE -[id="containers-signature-verify-enable_{context}"] -= Enabling signature verification for Red Hat Container Registries -Enabling container signature validation for Red Hat Container Registries requires writing a signature verification policy file specifying the keys to verify images from these registries. For RHEL8 nodes, the registries are already defined in `/etc/containers/registries.d` by default. - -.Procedure - -. Create a Butane config file, `51-worker-rh-registry-trust.bu`, containing the necessary configuration for the worker nodes. -+ -[NOTE] -==== -See "Creating machine configs with Butane" for information about Butane. -==== -+ -[source,yaml] ----- -variant: openshift -version: 4.13.0 -metadata: - name: 51-worker-rh-registry-trust - labels: - machineconfiguration.openshift.io/role: worker -storage: - files: - - path: /etc/containers/policy.json - mode: 0644 - overwrite: true - contents: - inline: | - { - "default": [ - { - "type": "insecureAcceptAnything" - } - ], - "transports": { - "docker": { - "registry.access.redhat.com": [ - { - "type": "signedBy", - "keyType": "GPGKeys", - "keyPath": "/etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release" - } - ], - "registry.redhat.io": [ - { - "type": "signedBy", - "keyType": "GPGKeys", - "keyPath": "/etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release" - } - ] - }, - "docker-daemon": { - "": [ - { - "type": "insecureAcceptAnything" - } - ] - } - } - } ----- - -. Use Butane to generate a machine config YAML file, `51-worker-rh-registry-trust.yaml`, containing the file to be written to disk on the worker nodes: -+ -[source,terminal] ----- -$ butane 51-worker-rh-registry-trust.bu -o 51-worker-rh-registry-trust.yaml ----- - -. Apply the created machine config: -+ -[source,terminal] ----- -$ oc apply -f 51-worker-rh-registry-trust.yaml ----- - -. Check that the worker machine config pool has rolled out with the new machine config: - -.. Check that the new machine config was created: -+ -[source,terminal] ----- -$ oc get mc ----- -+ -.Sample output -[source,terminal] ----- -NAME GENERATEDBYCONTROLLER IGNITIONVERSION AGE -00-master a2178ad522c49ee330b0033bb5cb5ea132060b0a 3.2.0 25m -00-worker a2178ad522c49ee330b0033bb5cb5ea132060b0a 3.2.0 25m -01-master-container-runtime a2178ad522c49ee330b0033bb5cb5ea132060b0a 3.2.0 25m -01-master-kubelet a2178ad522c49ee330b0033bb5cb5ea132060b0a 3.2.0 25m -01-worker-container-runtime a2178ad522c49ee330b0033bb5cb5ea132060b0a 3.2.0 25m -01-worker-kubelet a2178ad522c49ee330b0033bb5cb5ea132060b0a 3.2.0 25m -51-master-rh-registry-trust 3.2.0 13s -51-worker-rh-registry-trust 3.2.0 53s <1> -99-master-generated-crio-seccomp-use-default 3.2.0 25m -99-master-generated-registries a2178ad522c49ee330b0033bb5cb5ea132060b0a 3.2.0 25m -99-master-ssh 3.2.0 28m -99-worker-generated-crio-seccomp-use-default 3.2.0 25m -99-worker-generated-registries a2178ad522c49ee330b0033bb5cb5ea132060b0a 3.2.0 25m -99-worker-ssh 3.2.0 28m -rendered-master-af1e7ff78da0a9c851bab4be2777773b a2178ad522c49ee330b0033bb5cb5ea132060b0a 3.2.0 8s -rendered-master-cd51fd0c47e91812bfef2765c52ec7e6 a2178ad522c49ee330b0033bb5cb5ea132060b0a 3.2.0 24m -rendered-worker-2b52f75684fbc711bd1652dd86fd0b82 a2178ad522c49ee330b0033bb5cb5ea132060b0a 3.2.0 24m -rendered-worker-be3b3bce4f4aa52a62902304bac9da3c a2178ad522c49ee330b0033bb5cb5ea132060b0a 3.2.0 48s <2> ----- -<1> New machine config -<2> New rendered machine config - -.. Check that the worker machine config pool is updating with the new machine config: -+ -[source,terminal] ----- -$ oc get mcp ----- -+ -.Sample output -[source,terminal] ----- -NAME CONFIG UPDATED UPDATING DEGRADED MACHINECOUNT READYMACHINECOUNT UPDATEDMACHINECOUNT DEGRADEDMACHINECOUNT AGE -master rendered-master-af1e7ff78da0a9c851bab4be2777773b True False False 3 3 3 0 30m -worker rendered-worker-be3b3bce4f4aa52a62902304bac9da3c False True False 3 0 0 0 30m <1> ----- -<1> When the `UPDATING` field is `True`, the machine config pool is updating with the new machine config. When the field becomes `False`, the worker machine config pool has rolled out to the new machine config. - -. If your cluster uses any RHEL7 worker nodes, when the worker machine config pool is updated, create YAML files on those nodes in the `/etc/containers/registries.d` directory, which specify the location of the detached signatures for a given registry server. The following example works only for images hosted in `registry.access.redhat.com` and `registry.redhat.io`. - -.. Start a debug session to each RHEL7 worker node: -+ -[source,terminal] ----- -$ oc debug node/<node_name> ----- - -.. Change your root directory to `/host`: -+ -[source,terminal] ----- -sh-4.2# chroot /host ----- - -.. Create a `/etc/containers/registries.d/registry.redhat.io.yaml` file that contains the following: -+ -[source,terminal] ----- -docker: - registry.redhat.io: - sigstore: https://registry.redhat.io/containers/sigstore ----- - -.. Create a `/etc/containers/registries.d/registry.access.redhat.com.yaml` file that contains the following: -+ -[source,terminal] ----- -docker: - registry.access.redhat.com: - sigstore: https://access.redhat.com/webassets/docker/content/sigstore ----- - -.. Exit the debug session. diff --git a/modules/contributing-quick-starts.adoc b/modules/contributing-quick-starts.adoc deleted file mode 100644 index aa0c7b0a2523..000000000000 --- a/modules/contributing-quick-starts.adoc +++ /dev/null @@ -1,47 +0,0 @@ -// Module included in the following assemblies: -// -// * web_console/creating-quick-start-tutorials.adoc - -:_content-type: PROCEDURE -[id="contributing-quick-starts_{context}"] -= Contributing quick starts - -{product-title} introduces the quick start custom resource, which is defined by a `ConsoleQuickStart` object. Operators and administrators can use this resource to contribute quick starts to the cluster. - -.Prerequisites - -* You must have cluster administrator privileges. - -.Procedure - -. To create a new quick start, run: -+ -[source,yaml] ----- -$ oc get -o yaml consolequickstart spring-with-s2i > my-quick-start.yaml ----- - -. Run: -+ -[source,yaml] ----- -$ oc create -f my-quick-start.yaml ----- - -. Update the YAML file using the guidance outlined in this documentation. - -. Save your edits. - -[id="viewing-quick-start-api-documentation_{context}"] -== Viewing the quick start API documentation - -.Procedure - -* To see the quick start API documentation, run: -+ -[source,terminal] ----- -$ oc explain consolequickstarts ----- - -Run `oc explain -h` for more information about `oc explain` usage. diff --git a/modules/control-plane-machine-set-operator.adoc b/modules/control-plane-machine-set-operator.adoc deleted file mode 100644 index 23fe485c8489..000000000000 --- a/modules/control-plane-machine-set-operator.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator-reference.adoc - -[id="control-plane-machine-set-operator_{context}"] -= Control Plane Machine Set Operator - -[NOTE] -==== -This Operator is available for Amazon Web Services (AWS), Google Cloud Platform (GCP), Microsoft Azure, Nutanix, and VMware vSphere. -==== - -[discrete] -== Purpose - -The Control Plane Machine Set Operator automates the management of control plane machine resources within an {product-title} cluster. - -[discrete] -== Project - -link:https://github.com/openshift/cluster-control-plane-machine-set-operator[cluster-control-plane-machine-set-operator] - -[discrete] -== CRDs - -* `controlplanemachineset.machine.openshift.io` -** Scope: Namespaced -** CR: `ControlPlaneMachineSet` -** Validation: Yes \ No newline at end of file diff --git a/modules/copying-files-pods-and-containers.adoc b/modules/copying-files-pods-and-containers.adoc deleted file mode 100644 index 42c855304989..000000000000 --- a/modules/copying-files-pods-and-containers.adoc +++ /dev/null @@ -1,38 +0,0 @@ -// Module included in the following assemblies: -// -// * support/troubleshooting/investigating-pod-issues.adoc - -:_content-type: PROCEDURE -[id="copying-files-pods-and-containers_{context}"] -= Copying files to and from pods and containers - -You can copy files to and from a pod to test configuration changes or gather diagnostic information. - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. -* Your API service is still functional. -* You have installed the OpenShift CLI (`oc`). - -.Procedure - -. Copy a file to a pod: -+ -[source,terminal] ----- -$ oc cp <local_path> <pod_name>:/<path> -c <container_name> <1> ----- -<1> The first container in a pod is selected if the `-c` option is not specified. - -. Copy a file from a pod: -+ -[source,terminal] ----- -$ oc cp <pod_name>:/<path> -c <container_name><local_path> <1> ----- -<1> The first container in a pod is selected if the `-c` option is not specified. -+ -[NOTE] -==== -For `oc cp` to function, the `tar` binary must be available within the container. -==== diff --git a/modules/core-user-password.adoc b/modules/core-user-password.adoc deleted file mode 100644 index ffe50da84b55..000000000000 --- a/modules/core-user-password.adoc +++ /dev/null @@ -1,92 +0,0 @@ -// Module included in the following assemblies: -// -// * post_installation_configuration/machine-configuration-tasks.adoc - -:_content-type: PROCEDURE -[id="core-user-password_{context}"] -= Changing the core user password for node access - -By default, {op-system-first} creates a user named `core` on the nodes in your cluster. You can use the `core` user to access the node through a cloud provider serial console or a bare metal baseboard controller manager (BMC). This can be helpful, for example, if a node is down and you cannot access that node by using SSH or the `oc debug node` command. However, by default, there is no password for this user, so you cannot log in without creating one. - -You can create a password for the `core` user by using a machine config. The Machine Config Operator (MCO) assigns the password and injects the password into the `/etc/shadow` file, allowing you to log in with the `core` user. The MCO does not examine the password hash. As such, the MCO cannot report if there is a problem with the password. - -[NOTE] -==== -* The password works only through a cloud provider serial console or a BMC. It does not work with SSH. - -* If you have a machine config that includes an `/etc/shadow` file or a systemd unit that sets a password, it takes precedence over the password hash. -==== - -You can change the password, if needed, by editing the machine config you used to create the password. Also, you can remove the password by deleting the machine config. Deleting the machine config does not remove the user account. - -.Prerequisites - -* Create a hashed password by using a tool that is supported by your operating system. - -.Procedure - -. Create a machine config file that contains the `core` username and the hashed password: -+ -[source,terminal] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfig -metadata: - labels: - machineconfiguration.openshift.io/role: worker - name: set-core-user-password -spec: - config: - ignition: - version: 3.2.0 - passwd: - users: - - name: core <1> - passwordHash: $6$2sE/010goDuRSxxv$o18K52wor.wIwZp <2> ----- -<1> This must be `core`. -<2> The hashed password to use with the `core` account. - -. Create the machine config by running the following command: -+ -[source,yaml] ----- -$ oc create -f <file-name>.yaml ----- -+ -The nodes do not reboot and should become available in a few moments. You can use the `oc get mcp` to watch for the machine config pools to be updated, as shown in the following example: -+ ----- -NAME CONFIG UPDATED UPDATING DEGRADED MACHINECOUNT READYMACHINECOUNT UPDATEDMACHINECOUNT DEGRADEDMACHINECOUNT AGE -master rendered-master-d686a3ffc8fdec47280afec446fce8dd True False False 3 3 3 0 64m -worker rendered-worker-4605605a5b1f9de1d061e9d350f251e5 False True False 3 0 0 0 64m ----- - -.Verification - -. After the nodes return to the `UPDATED=True` state, start a debug session for a node by running the following command: -+ -[source,terminal] ----- -$ oc debug node/<node_name> ----- - -. Set `/host` as the root directory within the debug shell by running the following command: -+ -[source,terminal] ----- -sh-4.4# chroot /host ----- - -. Check the contents of the `/etc/shadow` file: -+ -.Example output -[source,terminal] ----- -... -core:$6$2sE/010goDuRSxxv$o18K52wor.wIwZp:19418:0:99999:7::: -... ----- -+ -The hashed password is assigned to the `core` user. - diff --git a/modules/coreos-layering-configuring.adoc b/modules/coreos-layering-configuring.adoc deleted file mode 100644 index 8b7861cbc422..000000000000 --- a/modules/coreos-layering-configuring.adoc +++ /dev/null @@ -1,205 +0,0 @@ -// Module included in the following assemblies: -// -// * post-installation_configuration/coreos-layering.adoc - -:_content-type: PROCEDURE -[id="coreos-layering-configuring_{context}"] -= Applying a {op-system} custom layered image - -You can easily configure {op-system-first} image layering on the nodes in specific machine config pools. The Machine Config Operator (MCO) reboots those nodes with the new custom layered image, overriding the base {op-system-first} image. - -To apply a custom layered image to your cluster, you must have the custom layered image in a repository that your cluster can access. Then, create a `MachineConfig` object that points to the custom layered image. You need a separate `MachineConfig` object for each machine config pool that you want to configure. - -[IMPORTANT] -==== -When you configure a custom layered image, {product-title} no longer automatically updates any node that uses the custom layered image. You become responsible for manually updating your nodes as appropriate. If you roll back the custom layer, {product-title} will again automatically update the node. See the Additional resources section that follows for important information about updating nodes that use a custom layered image. -==== - -.Prerequisites - -* You must create a custom layered image that is based on an {product-title} image digest, not a tag. -+ -[NOTE] -==== -You should use the same base {op-system} image that is installed on the rest of your cluster. Use the `oc adm release info --image-for rhel-coreos` command to obtain the base image being used in your cluster. -==== -+ -For example, the following Containerfile creates a custom layered image from an {product-title} 4.13 image and a Hotfix package: -+ -.Example Containerfile for a custom layer image -[source,yaml] ----- -# Using a 4.12.0 image -FROM quay.io/openshift-release/ocp-release@sha256... <1> -#Install hotfix rpm -RUN rpm-ostree override replace https://example.com/hotfixes/haproxy-1.0.16-5.el8.src.rpm && \ <2> - rpm-ostree cleanup -m && \ - ostree container commit ----- -<1> Specifies the {op-system} base image of your cluster. -<2> Specifies the path to the Hotfix package. -+ -[NOTE] -==== -Instructions on how to create a Containerfile are beyond the scope of this documentation. -==== - -* Because the process for building a custom layered image is performed outside of the cluster, you must use the `--authfile /path/to/pull-secret` option with Podman or Buildah. Alternatively, to have the pull secret read by these tools automatically, you can add it to one of the default file locations: `~/.docker/config.json`, `$XDG_RUNTIME_DIR/containers/auth.json`, `~/.docker/config.json`, or `~/.dockercfg`. Refer to the `containers-auth.json` man page for more information. - -* You must push the custom layered image to a repository that your cluster can access. - -.Procedure - -. Create a machine config file. - -.. Create a YAML file similar to the following: -+ -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfig -metadata: - labels: - machineconfiguration.openshift.io/role: worker <1> - name: os-layer-custom -spec: - osImageURL: quay.io/my-registry/custom-image@sha256... <2> ----- -<1> Specifies the machine config pool to apply the custom layered image. -<2> Specifies the path to the custom layered image in the repository. - -.. Create the `MachineConfig` object: -+ -[source,terminal] ----- -$ oc create -f <file_name>.yaml ----- -+ -[IMPORTANT] -==== -It is strongly recommended that you test your images outside of your production environment before rolling out to your cluster. -==== - -.Verification - -You can verify that the custom layered image is applied by performing any of the following checks: - -. Check that the worker machine config pool has rolled out with the new machine config: - -.. Check that the new machine config is created: -+ -[source,terminal] ----- -$ oc get mc ----- -+ -.Sample output -[source,terminal] ----- -NAME GENERATEDBYCONTROLLER IGNITIONVERSION AGE -00-master 5bdb57489b720096ef912f738b46330a8f577803 3.2.0 95m -00-worker 5bdb57489b720096ef912f738b46330a8f577803 3.2.0 95m -01-master-container-runtime 5bdb57489b720096ef912f738b46330a8f577803 3.2.0 95m -01-master-kubelet 5bdb57489b720096ef912f738b46330a8f577803 3.2.0 95m -01-worker-container-runtime 5bdb57489b720096ef912f738b46330a8f577803 3.2.0 95m -01-worker-kubelet 5bdb57489b720096ef912f738b46330a8f577803 3.2.0 95m -99-master-generated-registries 5bdb57489b720096ef912f738b46330a8f577803 3.2.0 95m -99-master-ssh 3.2.0 98m -99-worker-generated-registries 5bdb57489b720096ef912f738b46330a8f577803 3.2.0 95m -99-worker-ssh 3.2.0 98m -os-layer-custom 10s <1> -rendered-master-15961f1da260f7be141006404d17d39b 5bdb57489b720096ef912f738b46330a8f577803 3.2.0 95m -rendered-worker-5aff604cb1381a4fe07feaf1595a797e 5bdb57489b720096ef912f738b46330a8f577803 3.2.0 95m -rendered-worker-5de4837625b1cbc237de6b22bc0bc873 5bdb57489b720096ef912f738b46330a8f577803 3.2.0 4s <2> ----- -<1> New machine config -<2> New rendered machine config - -.. Check that the `osImageURL` value in the new machine config points to the expected image: -+ -[source,terminal] ----- -$ oc describe mc rendered-master-4e8be63aef68b843b546827b6ebe0913 ----- -+ -.Example output -[source,terminal] ----- -Name: rendered-master-4e8be63aef68b843b546827b6ebe0913 -Namespace: -Labels: <none> -Annotations: machineconfiguration.openshift.io/generated-by-controller-version: 8276d9c1f574481043d3661a1ace1f36cd8c3b62 - machineconfiguration.openshift.io/release-image-version: 4.13.0-ec.3 -API Version: machineconfiguration.openshift.io/v1 -Kind: MachineConfig -... - Os Image URL: quay.io/my-registry/custom-image@sha256... ----- - -.. Check that the associated machine config pool is updating with the new machine config: -+ -[source,terminal] ----- -$ oc get mcp ----- -+ -.Sample output -[source,terminal] ----- -NAME CONFIG UPDATED UPDATING DEGRADED MACHINECOUNT READYMACHINECOUNT UPDATEDMACHINECOUNT DEGRADEDMACHINECOUNT AGE -master rendered-master-6faecdfa1b25c114a58cf178fbaa45e2 True False False 3 3 3 0 39m -worker rendered-worker-6b000dbc31aaee63c6a2d56d04cd4c1b False True False 3 0 0 0 39m <1> ----- -<1> When the `UPDATING` field is `True`, the machine config pool is updating with the new machine config. When the field becomes `False`, the worker machine config pool has rolled out to the new machine config. - -.. Check the nodes to see that scheduling on the nodes is disabled. This indicates that the change is being applied: -+ -[source,terminal] ----- -$ oc get nodes ----- -+ -.Example output -[source,terminal] ----- -NAME STATUS ROLES AGE VERSION -ip-10-0-148-79.us-west-1.compute.internal Ready worker 32m v1.26.0 -ip-10-0-155-125.us-west-1.compute.internal Ready,SchedulingDisabled worker 35m v1.26.0 -ip-10-0-170-47.us-west-1.compute.internal Ready control-plane,master 42m v1.26.0 -ip-10-0-174-77.us-west-1.compute.internal Ready control-plane,master 42m v1.26.0 -ip-10-0-211-49.us-west-1.compute.internal Ready control-plane,master 42m v1.26.0 -ip-10-0-218-151.us-west-1.compute.internal Ready worker 31m v1.26.0 ----- - -. When the node is back in the `Ready` state, check that the node is using the custom layered image: - -.. Open an `oc debug` session to the node. For example: -+ -[source,terminal] ----- -$ oc debug node/ip-10-0-155-125.us-west-1.compute.internal ----- - -.. Set `/host` as the root directory within the debug shell: -+ -[source,terminal] ----- -sh-4.4# chroot /host ----- - -.. Run the `rpm-ostree status` command to view that the custom layered image is in use: -+ -[source,terminal] ----- -sh-4.4# sudo rpm-ostree status ----- -+ -.Example output -+ ----- -State: idle -Deployments: -* ostree-unverified-registry:quay.io/my-registry/... - Digest: sha256:... ----- - diff --git a/modules/coreos-layering-removing.adoc b/modules/coreos-layering-removing.adoc deleted file mode 100644 index 582711e2700a..000000000000 --- a/modules/coreos-layering-removing.adoc +++ /dev/null @@ -1,93 +0,0 @@ -// Module included in the following assemblies: -// -// * post-installation_configuration/coreos-layering.adoc - -:_content-type: PROCEDURE -[id="coreos-layering-removing_{context}"] -= Removing a {op-system} custom layered image - -You can easily revert {op-system-first} image layering from the nodes in specific machine config pools. The Machine Config Operator (MCO) reboots those nodes with the cluster base {op-system-first} image, overriding the custom layered image. - -To remove a {op-system-first} custom layered image from your cluster, you need to delete the machine config that applied the image. - -.Procedure - -. Delete the machine config that applied the custom layered image. -+ -[source,terminal] ----- -$ oc delete mc os-layer-custom ----- -+ -After deleting the machine config, the nodes reboot. - -.Verification - -You can verify that the custom layered image is removed by performing any of the following checks: - -. Check that the worker machine config pool is updating with the previous machine config: -+ -[source,terminal] ----- -$ oc get mcp ----- -+ -.Sample output -[source,terminal] ----- -NAME CONFIG UPDATED UPDATING DEGRADED MACHINECOUNT READYMACHINECOUNT UPDATEDMACHINECOUNT DEGRADEDMACHINECOUNT AGE -master rendered-master-6faecdfa1b25c114a58cf178fbaa45e2 True False False 3 3 3 0 39m -worker rendered-worker-6b000dbc31aaee63c6a2d56d04cd4c1b False True False 3 0 0 0 39m <1> ----- -<1> When the `UPDATING` field is `True`, the machine config pool is updating with the previous machine config. When the field becomes `False`, the worker machine config pool has rolled out to the previous machine config. - -. Check the nodes to see that scheduling on the nodes is disabled. This indicates that the change is being applied: -+ -[source,terminal] ----- -$ oc get nodes ----- -+ -.Example output -[source,terminal] ----- -NAME STATUS ROLES AGE VERSION -ip-10-0-148-79.us-west-1.compute.internal Ready worker 32m v1.26.0 -ip-10-0-155-125.us-west-1.compute.internal Ready,SchedulingDisabled worker 35m v1.26.0 -ip-10-0-170-47.us-west-1.compute.internal Ready control-plane,master 42m v1.26.0 -ip-10-0-174-77.us-west-1.compute.internal Ready control-plane,master 42m v1.26.0 -ip-10-0-211-49.us-west-1.compute.internal Ready control-plane,master 42m v1.26.0 -ip-10-0-218-151.us-west-1.compute.internal Ready worker 31m v1.26.0 ----- - -. When the node is back in the `Ready` state, check that the node is using the base image: - -.. Open an `oc debug` session to the node. For example: -+ -[source,terminal] ----- -$ oc debug node/ip-10-0-155-125.us-west-1.compute.internal ----- - -.. Set `/host` as the root directory within the debug shell: -+ -[source,terminal] ----- -sh-4.4# chroot /host ----- - -.. Run the `rpm-ostree status` command to view that the custom layered image is in use: -+ -[source,terminal] ----- -sh-4.4# sudo rpm-ostree status ----- -+ -.Example output -+ ----- -State: idle -Deployments: -* ostree-unverified-registry:podman pull quay.io/openshift-release-dev/ocp-release@sha256:e2044c3cfebe0ff3a99fc207ac5efe6e07878ad59fd4ad5e41f88cb016dacd73 - Digest: sha256:e2044c3cfebe0ff3a99fc207ac5efe6e07878ad59fd4ad5e41f88cb016dacd73 ----- diff --git a/modules/coreos-layering-updating.adoc b/modules/coreos-layering-updating.adoc deleted file mode 100644 index d8b07552bbe7..000000000000 --- a/modules/coreos-layering-updating.adoc +++ /dev/null @@ -1,20 +0,0 @@ -// Module included in the following assemblies: -// -// * post-installation_configuration/coreos-layering.adoc - -:_content-type: REFERENCE -[id="coreos-layering-updating_{context}"] -= Updating with a {op-system} custom layered image - -When you configure {op-system-first} image layering, {product-title} no longer automatically updates the node pool that uses the custom layered image. You become responsible to manually update your nodes as appropriate. - -To update a node that uses a custom layered image, follow these general steps: - -. The cluster automatically upgrades to version x.y.z+1, except for the nodes that use the custom layered image. - -. You could then create a new Containerfile that references the updated {product-title} image and the RPM that you had previously applied. - -. Create a new machine config that points to the updated custom layered image. - -Updating a node with a custom layered image is not required. However, if that node gets too far behind the current {product-title} version, you could experience unexpected results. - diff --git a/modules/cpms-changing-aws-instance-type.adoc b/modules/cpms-changing-aws-instance-type.adoc deleted file mode 100644 index ceebcbbb0aeb..000000000000 --- a/modules/cpms-changing-aws-instance-type.adoc +++ /dev/null @@ -1,58 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/recommended-performance-scale-practices/recommended-control-plane-practices.adoc -// * machine_management/control_plane_machine_management/cpmso-using.adoc - -ifeval::["{context}" == "recommended-control-plane-practices"] -:scale-host: -endif::[] -ifeval::["{context}" == "cpmso-using"] -:cpmso-using: -endif::[] - -:_content-type: PROCEDURE -[id="cpms-changing-aws-instance-type_{context}"] -= Changing the Amazon Web Services instance type by using a control plane machine set - -You can change the Amazon Web Services (AWS) instance type that your control plane machines use by updating the specification in the control plane machine set custom resource (CR). - -.Prerequisites - -* Your AWS cluster uses a control plane machine set. - -.Procedure - -ifdef::scale-host[] -. Edit your control plane machine set CR by running the following command: -+ -[source,terminal] ----- -$ oc --namespace openshift-machine-api edit controlplanemachineset.machine.openshift.io cluster ----- -endif::scale-host[] - -. Edit the following line under the `providerSpec` field: -+ -[source,yaml] ----- -providerSpec: - value: - ... - instanceType: <compatible_aws_instance_type> <1> ----- -<1> Specify a larger AWS instance type with the same base as the previous selection. For example, you can change `m6i.xlarge` to `m6i.2xlarge` or `m6i.4xlarge`. - -. Save your changes. - -ifdef::scale-host[] -** For clusters that use the default `RollingUpdate` update strategy, the Operator automatically propagates the changes to your control plane configuration. - -** For clusters that are configured to use the `OnDelete` update strategy, you must replace your control plane machines manually. -endif::scale-host[] - -ifeval::["{context}" == "recommended-control-plane-practices"] -:!scale-host: -endif::[] -ifeval::["{context}" == "cpmso-using"] -:!cpmso-using: -endif::[] diff --git a/modules/cpmso-activating.adoc b/modules/cpmso-activating.adoc deleted file mode 100644 index b58bbb82120e..000000000000 --- a/modules/cpmso-activating.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/cpmso-getting-started.adoc - -:_content-type: PROCEDURE -[id="cpmso-activating_{context}"] -= Activating the control plane machine set custom resource - -To use the control plane machine set, you must ensure that a `ControlPlaneMachineSet` custom resource (CR) with the correct settings for your cluster exists. On a cluster with a generated CR, you must verify that the configuration in the CR is correct for your cluster and activate it. - -[NOTE] -==== -For more information about the parameters in the CR, see "Control plane machine set configuration". -==== - -.Procedure - -. View the configuration of the CR by running the following command: -+ -[source,terminal] ----- -$ oc --namespace openshift-machine-api edit controlplanemachineset.machine.openshift.io cluster ----- - -. Change the values of any fields that are incorrect for your cluster configuration. - -. When the configuration is correct, activate the CR by setting the `.spec.state` field to `Active` and saving your changes. -+ -[IMPORTANT] -==== -To activate the CR, you must change the `.spec.state` field to `Active` in the same `oc edit` session that you use to update the CR configuration. If the CR is saved with the state left as `Inactive`, the control plane machine set generator resets the CR to its original settings. -==== \ No newline at end of file diff --git a/modules/cpmso-checking-status.adoc b/modules/cpmso-checking-status.adoc deleted file mode 100644 index c89547619dcf..000000000000 --- a/modules/cpmso-checking-status.adoc +++ /dev/null @@ -1,45 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/cpmso-getting-started.adoc -// * machine_management/cpmso-troubleshooting.adoc -// * machine_management/cpmso-disabling.adoc - -ifeval::["{context}" == "cpmso-disabling"] -:cpmso-disabling: -endif::[] - -:_content-type: PROCEDURE -[id="cpmso-checking-status_{context}"] -= Checking the control plane machine set custom resource state - -You can verify the existence and state of the `ControlPlaneMachineSet` custom resource (CR). - -.Procedure - -* Determine the state of the CR by running the following command: -+ -[source,terminal] ----- -$ oc get controlplanemachineset.machine.openshift.io cluster \ - --namespace openshift-machine-api ----- - -** A result of `Active` indicates that the `ControlPlaneMachineSet` CR exists and is activated. No administrator action is required. - -** A result of `Inactive` indicates that a `ControlPlaneMachineSet` CR exists but is not activated. - -** A result of `NotFound` indicates that there is no existing `ControlPlaneMachineSet` CR. - -ifndef::cpmso-disabling[] -.Next steps - -To use the control plane machine set, you must ensure that a `ControlPlaneMachineSet` CR with the correct settings for your cluster exists. - -* If your cluster has an existing CR, you must verify that the configuration in the CR is correct for your cluster. - -* If your cluster does not have an existing CR, you must create one with the correct configuration for your cluster. -endif::[] - -ifeval::["{context}" == "cpmso-disabling"] -:!cpmso-disabling: -endif::[] \ No newline at end of file diff --git a/modules/cpmso-control-plane-recovery.adoc b/modules/cpmso-control-plane-recovery.adoc deleted file mode 100644 index 4660ca0dfdc9..000000000000 --- a/modules/cpmso-control-plane-recovery.adoc +++ /dev/null @@ -1,20 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/cpmso-resiliency.adoc - -:_content-type: CONCEPT -[id="cpmso-control-plane-recovery_{context}"] -= Recovery of failed control plane machines - -The Control Plane Machine Set Operator automates the recovery of control plane machines. When a control plane machine is deleted, the Operator creates a replacement with the configuration that is specified in the `ControlPlaneMachineSet` custom resource (CR). - -For clusters that use control plane machine sets, you can configure a machine health check. The machine health check deletes unhealthy control plane machines so that they are replaced. - -[IMPORTANT] -==== -If you configure a `MachineHealthCheck` resource for the control plane, set the value of `maxUnhealthy` to `1`. - -This configuration ensures that the machine health check takes no action when multiple control plane machines appear to be unhealthy. Multiple unhealthy control plane machines can indicate that the etcd cluster is degraded or that a scaling operation to replace a failed machine is in progress. - -If the etcd cluster is degraded, manual intervention might be required. If a scaling operation is in progress, the machine health check should allow it to finish. -==== \ No newline at end of file diff --git a/modules/cpmso-creating-cr.adoc b/modules/cpmso-creating-cr.adoc deleted file mode 100644 index 1fbc26b04545..000000000000 --- a/modules/cpmso-creating-cr.adoc +++ /dev/null @@ -1,92 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/cpmso-getting-started.adoc - -:_content-type: PROCEDURE -[id="cpmso-creating-cr_{context}"] -= Creating a control plane machine set custom resource - -To use the control plane machine set, you must ensure that a `ControlPlaneMachineSet` custom resource (CR) with the correct settings for your cluster exists. On a cluster without a generated CR, you must create the CR manually and activate it. - -[NOTE] -==== -For more information about the structure and parameters of the CR, see "Control plane machine set configuration". -==== - -.Procedure - -. Create a YAML file using the following template: -+ --- -.Control plane machine set CR YAML file template -[source,yaml] ----- -apiVersion: machine.openshift.io/v1 -kind: ControlPlaneMachineSet -metadata: - name: cluster - namespace: openshift-machine-api -spec: - replicas: 3 - selector: - matchLabels: - machine.openshift.io/cluster-api-cluster: <cluster_id> <1> - machine.openshift.io/cluster-api-machine-role: master - machine.openshift.io/cluster-api-machine-type: master - state: Active <2> - strategy: - type: RollingUpdate <3> - template: - machineType: machines_v1beta1_machine_openshift_io - machines_v1beta1_machine_openshift_io: - failureDomains: - platform: <platform> <4> - <platform_failure_domains> <5> - metadata: - labels: - machine.openshift.io/cluster-api-cluster: <cluster_id> <6> - machine.openshift.io/cluster-api-machine-role: master - machine.openshift.io/cluster-api-machine-type: master - spec: - providerSpec: - value: - <platform_provider_spec> <7> ----- -<1> Specify the infrastructure ID that is based on the cluster ID that you set when you provisioned the cluster. You must specify this value when you create a `ControlPlaneMachineSet` CR. If you have the OpenShift CLI (`oc`) installed, you can obtain the infrastructure ID by running the following command: -+ -[source,terminal] ----- -$ oc get -o jsonpath='{.status.infrastructureName}{"\n"}' infrastructure cluster ----- -<2> Specify the state of the Operator. When the state is `Inactive`, the Operator is not operational. You can activate the Operator by setting the value to `Active`. -+ -[IMPORTANT] -==== -Before you activate the CR, you must ensure that its configuration is correct for your cluster requirements. -==== -<3> Specify the update strategy for the cluster. Valid values are `OnDelete` and `RollingUpdate`. The default value is `RollingUpdate`. For more information about update strategies, see "Updating the control plane configuration". -<4> Specify your cloud provider platform name. Valid values are `AWS`, `Azure`, `GCP`, `Nutanix`, and `VSphere`. -<5> Add the `<platform_failure_domains>` configuration for the cluster. The format and values of this section are provider-specific. For more information, see the sample failure domain configuration for your cloud provider. -+ -[NOTE] -==== -VMware vSphere does not support failure domains. For vSphere clusters, replace `<platform_failure_domains>` with an empty `failureDomains:` parameter. -==== -<6> Specify the infrastructure ID. -<7> Add the `<platform_provider_spec>` configuration for the cluster. The format and values of this section are provider-specific. For more information, see the sample provider specification for your cloud provider. --- - -. Refer to the sample YAML for a control plane machine set CR and populate your file with values that are appropriate for your cluster configuration. - -. Refer to the sample failure domain configuration and sample provider specification for your cloud provider and update those sections of your file with the appropriate values. - -. When the configuration is correct, activate the CR by setting the `.spec.state` field to `Active` and saving your changes. - -. Create the CR from your YAML file by running the following command: -+ -[source,terminal] ----- -$ oc create -f <control_plane_machine_set>.yaml ----- -+ -where `<control_plane_machine_set>` is the name of the YAML file that contains the CR configuration. \ No newline at end of file diff --git a/modules/cpmso-deleting.adoc b/modules/cpmso-deleting.adoc deleted file mode 100644 index 305e305792b5..000000000000 --- a/modules/cpmso-deleting.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/cpmso-disabling.adoc - -:_content-type: PROCEDURE -[id="cpmso-deleting_{context}"] -= Deleting the control plane machine set - -To stop managing control plane machines with the control plane machine set on your cluster, you must delete the `ControlPlaneMachineSet` custom resource (CR). - -.Procedure - -* Delete the control plane machine set CR by running the following command: -+ -[source,terminal] ----- -$ oc delete controlplanemachineset.machine.openshift.io cluster \ - -n openshift-machine-api ----- - -.Verification - -* Check the control plane machine set custom resource state. A result of `Inactive` indicates that the removal and replacement process is successful. A `ControlPlaneMachineSet` CR exists but is not activated. diff --git a/modules/cpmso-failure-domains-balancing.adoc b/modules/cpmso-failure-domains-balancing.adoc deleted file mode 100644 index ae9a19f4831a..000000000000 --- a/modules/cpmso-failure-domains-balancing.adoc +++ /dev/null @@ -1,14 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/cpmso-resiliency.adoc - -:_content-type: CONCEPT -[id="cpmso-failure-domains-balancing_{context}"] -= Balancing control plane machines - -The control plane machine set balances control plane machines across the failure domains that are specified in the custom resource (CR). - -//If failure domains must be reused, they are selected alphabetically by name. -When possible, the control plane machine set uses each failure domain equally to ensure appropriate fault tolerance. If there are fewer failure domains than control plane machines, failure domains are selected for reuse alphabetically by name. For clusters with no failure domains specified, all control plane machines are placed within a single failure domain. - -Some changes to the failure domain configuration cause the control plane machine set to rebalance the control plane machines. For example, if you add failure domains to a cluster with fewer failure domains than control plane machines, the control plane machine set rebalances the machines across all available failure domains. \ No newline at end of file diff --git a/modules/cpmso-failure-domains-provider.adoc b/modules/cpmso-failure-domains-provider.adoc deleted file mode 100644 index fcf5aa850443..000000000000 --- a/modules/cpmso-failure-domains-provider.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/cpmso-resiliency.adoc - -:_content-type: REFERENCE -[id="cpmso-failure-domains-provider_{context}"] -= Failure domain platform support and configuration - -The control plane machine set concept of a failure domain is analogous to existing concepts on cloud providers. Not all platforms support the use of failure domains. - -.Failure domain support matrix -[cols="<.^,^.^,^.^"] -|==== -|Cloud provider |Support for failure domains |Provider nomenclature - -|Amazon Web Services (AWS) -|X -|link:https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-availability-zones[Availability Zone (AZ)] - -|Google Cloud Platform (GCP) -|X -|link:https://cloud.google.com/compute/docs/regions-zones[zone] - -|Nutanix -//link:https://portal.nutanix.com/page/documents/details?targetId=Web-Console-Guide-Prism-v6_1:arc-failure-modes-c.html[Availability domain] -| -|Not applicable ^[1]^ - -|Microsoft Azure -|X -|link:https://learn.microsoft.com/en-us/azure/azure-web-pubsub/concept-availability-zones[Azure availability zone] - -|VMware vSphere -| -|Not applicable -|==== -[.small] --- -1. Nutanix has a failure domain concept, but {product-title} {product-version} does not include support for this feature. --- - -The failure domain configuration in the control plane machine set custom resource (CR) is platform-specific. For more information about failure domain parameters in the CR, see the sample failure domain configuration for your provider. \ No newline at end of file diff --git a/modules/cpmso-feat-auto-update.adoc b/modules/cpmso-feat-auto-update.adoc deleted file mode 100644 index 562ebf30780f..000000000000 --- a/modules/cpmso-feat-auto-update.adoc +++ /dev/null @@ -1,13 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/control_plane_machine_management/cpmso-using.adoc - -:_content-type: CONCEPT -[id="cpmso-feat-auto-update_{context}"] -= Automatically updating the control plane configuration - -You can use the `RollingUpdate` update strategy to automatically propagate changes to your control plane configuration. - -For clusters that use the default `RollingUpdate` update strategy, the Operator creates a replacement control plane machine with the configuration that is specified in the CR. When the replacement control plane machine is ready, the Operator deletes the control plane machine that is marked for replacement. The replacement machine then joins the control plane. - -If multiple control plane machines are marked for replacement, the Operator repeats this replacement process one machine at a time until each machine is replaced. diff --git a/modules/cpmso-feat-config-update.adoc b/modules/cpmso-feat-config-update.adoc deleted file mode 100644 index be0e8bc919e8..000000000000 --- a/modules/cpmso-feat-config-update.adoc +++ /dev/null @@ -1,40 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/control_plane_machine_management/cpmso-using.adoc - -:_content-type: PROCEDURE -[id="cpmso-feat-config-update_{context}"] -= Updating the control plane configuration - -You can make changes to the configuration of the machines in the control plane by updating the specification in the control plane machine set custom resource (CR). - -The Control Plane Machine Set Operator monitors the control plane machines and compares their configuration with the specification in the control plane machine set CR. When there is a discrepancy between the specification in the CR and the configuration of a control plane machine, the Operator marks that control plane machine for replacement. - -[NOTE] -==== -For more information about the parameters in the CR, see "Control plane machine set configuration". -==== - -.Prerequisites - -* Your cluster has an activated and functioning Control Plane Machine Set Operator. - -.Procedure - -. Edit your control plane machine set CR by running the following command: -+ -[source,terminal] ----- -$ oc edit controlplanemachineset.machine.openshift.io cluster \ - -n openshift-machine-api ----- - -. Change the values of any fields that you want to update in your cluster configuration. - -. Save your changes. - -.Next steps - -* For clusters that use the default `RollingUpdate` update strategy, the changes to your control plane configuration are propagated automatically. - -* For clusters that are configured to use the `OnDelete` update strategy, you must replace your control plane machines manually. \ No newline at end of file diff --git a/modules/cpmso-feat-test-changes.adoc b/modules/cpmso-feat-test-changes.adoc deleted file mode 100644 index d5513cf610e9..000000000000 --- a/modules/cpmso-feat-test-changes.adoc +++ /dev/null @@ -1,13 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/control_plane_machine_management/cpmso-using.adoc - -:_content-type: CONCEPT -[id="cpmso-feat-test-changes_{context}"] -= Testing changes to the control plane configuration - -You can use the `OnDelete` update strategy to test changes to your control plane configuration. With this update strategy, you replace control plane machines manually. Manually replacing machines allows you to test changes to your configuration on a single machine before applying the changes more broadly. - -For clusters that are configured to use the `OnDelete` update strategy, the Operator creates a replacement control plane machine when you delete an existing machine. When the replacement control plane machine is ready, the etcd Operator allows the existing machine to be deleted. The replacement machine then joins the control plane. - -If multiple control plane machines are deleted, the Operator creates all of the required replacement machines simultaneously. \ No newline at end of file diff --git a/modules/cpmso-feat-vertical-resize.adoc b/modules/cpmso-feat-vertical-resize.adoc deleted file mode 100644 index b9f08a8bda72..000000000000 --- a/modules/cpmso-feat-vertical-resize.adoc +++ /dev/null @@ -1,7 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/cpmso-about.adoc - -:_content-type: CONCEPT -[id="cpmso-feat-vertical-resize_{context}"] -= Vertical resizing of the control plane \ No newline at end of file diff --git a/modules/cpmso-overview.adoc b/modules/cpmso-overview.adoc deleted file mode 100644 index 20f2978e9997..000000000000 --- a/modules/cpmso-overview.adoc +++ /dev/null @@ -1,13 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/cpmso-about.adoc - -:_content-type: CONCEPT -[id="cpmso-overview_{context}"] -= Control Plane Machine Set Operator overview - -The Control Plane Machine Set Operator uses the `ControlPlaneMachineSet` custom resource (CR) to automate management of the control plane machine resources within your {product-title} cluster. - -When the state of the cluster control plane machine set is set to `Active`, the Operator ensures that the cluster has the correct number of control plane machines with the specified configuration. This allows the automated replacement of degraded control plane machines and rollout of changes to the control plane. - -A cluster has only one control plane machine set, and the Operator only manages objects in the `openshift-machine-api` namespace. \ No newline at end of file diff --git a/modules/cpmso-ts-ilb-missing.adoc b/modules/cpmso-ts-ilb-missing.adoc deleted file mode 100644 index 5e43319a9129..000000000000 --- a/modules/cpmso-ts-ilb-missing.adoc +++ /dev/null @@ -1,47 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/cpmso-troubleshooting.adoc - -:_content-type: PROCEDURE -[id="cpmso-ts-ilb-missing_{context}"] -= Adding a missing Azure internal load balancer - -The `internalLoadBalancer` parameter is required in both the `ControlPlaneMachineSet` and control plane `Machine` custom resources (CRs) for Azure. If this parameter is not preconfigured on your cluster, you must add it to both CRs. - -For more information about where this parameter is located in the Azure provider specification, see the sample Azure provider specification. The placement in the control plane `Machine` CR is similar. - -.Procedure - -. List the control plane machines in your cluster by running the following command: -+ -[source,terminal] ----- -$ oc get machines \ - -l machine.openshift.io/cluster-api-machine-role==master \ - -n openshift-machine-api ----- - -. For each control plane machine, edit the CR by running the following command: -+ -[source,terminal] ----- -$ oc edit machine <control_plane_machine_name> ----- - -. Add the `internalLoadBalancer` parameter with the correct details for your cluster and save your changes. - -. Edit your control plane machine set CR by running the following command: -+ -[source,terminal] ----- -$ oc edit controlplanemachineset.machine.openshift.io cluster \ - -n openshift-machine-api ----- - -. Add the `internalLoadBalancer` parameter with the correct details for your cluster and save your changes. - -.Next steps - -* For clusters that use the default `RollingUpdate` update strategy, the Operator automatically propagates the changes to your control plane configuration. - -* For clusters that are configured to use the `OnDelete` update strategy, you must replace your control plane machines manually. \ No newline at end of file diff --git a/modules/cpmso-ts-mhc-etcd-degraded.adoc b/modules/cpmso-ts-mhc-etcd-degraded.adoc deleted file mode 100644 index 7ccad84470cb..000000000000 --- a/modules/cpmso-ts-mhc-etcd-degraded.adoc +++ /dev/null @@ -1,51 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/cpmso-troubleshooting.adoc - -:_content-type: PROCEDURE -[id="cpmso-ts-etcd-degraded_{context}"] -= Recovering a degraded etcd Operator - -Certain situations can cause the etcd Operator to become degraded. - -For example, while performing remediation, the machine health check might delete a control plane machine that is hosting etcd. If the etcd member is not reachable at that time, the etcd Operator becomes degraded. - -When the etcd Operator is degraded, manual intervention is required to force the Operator to remove the failed member and restore the cluster state. - -.Procedure - -. List the control plane machines in your cluster by running the following command: -+ -[source,terminal] ----- -$ oc get machines \ - -l machine.openshift.io/cluster-api-machine-role==master \ - -n openshift-machine-api \ - -o wide ----- -+ -Any of the following conditions might indicate a failed control plane machine: -+ --- -** The `STATE` value is `stopped`. -** The `PHASE` value is `Failed`. -** The `PHASE` value is `Deleting` for more than ten minutes. --- -+ -[IMPORTANT] -==== -Before continuing, ensure that your cluster has two healthy control plane machines. Performing the actions in this procedure on more than one control plane machine risks losing etcd quorum and can cause data loss. - -If you have lost the majority of your control plane hosts, leading to etcd quorum loss, then you must follow the disaster recovery procedure "Restoring to a previous cluster state" instead of this procedure. -==== - -. Edit the machine CR for the failed control plane machine by running the following command: -+ -[source,terminal] ----- -$ oc edit machine <control_plane_machine_name> ----- - -. Remove the contents of the `lifecycleHooks` parameter from the failed control plane machine and save your changes. -+ -The etcd Operator removes the failed machine from the cluster and can then safely add new etcd members. \ No newline at end of file diff --git a/modules/cpmso-yaml-failure-domain-aws.adoc b/modules/cpmso-yaml-failure-domain-aws.adoc deleted file mode 100644 index c84f33886658..000000000000 --- a/modules/cpmso-yaml-failure-domain-aws.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/cpmso-configuration.adoc - -:_content-type: REFERENCE -[id="cpmso-yaml-failure-domain-aws_{context}"] -= Sample AWS failure domain configuration - -The control plane machine set concept of a failure domain is analogous to existing AWS concept of an link:https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-availability-zones[_Availability Zone (AZ)_]. The `ControlPlaneMachineSet` CR spreads control plane machines across multiple failure domains when possible. - -When configuring AWS failure domains in the control plane machine set, you must specify the availability zone name and the subnet to use. - -.Sample AWS failure domain values -[source,yaml] ----- -failureDomains: - aws: - - placement: - availabilityZone: <aws_zone_a> <1> - subnet: <2> - filters: - - name: tag:Name - values: - - <cluster_id>-private-<aws_zone_a> <3> - type: Filters <4> - - placement: - availabilityZone: <aws_zone_b> <5> - subnet: - filters: - - name: tag:Name - values: - - <cluster_id>-private-<aws_zone_b> <6> - type: Filters - platform: AWS <7> ----- -<1> Specifies an AWS availability zone for the first failure domain. -<2> Specifies a subnet configuration. In this example, the subnet type is `Filters`, so there is a `filters` stanza. -<3> Specifies the subnet name for the first failure domain, using the infrastructure ID and the AWS availability zone. -<4> Specifies the subnet type. The allowed values are: `ARN`, `Filters` and `ID`. The default value is `Filters`. -<5> Specifies the subnet name for an additional failure domain, using the infrastructure ID and the AWS availability zone. -<6> Specifies the cluster's infrastructure ID and the AWS availability zone for the additional failure domain. -<7> Specifies the cloud provider platform name. Do not change this value. \ No newline at end of file diff --git a/modules/cpmso-yaml-failure-domain-azure.adoc b/modules/cpmso-yaml-failure-domain-azure.adoc deleted file mode 100644 index 32b0efeeb309..000000000000 --- a/modules/cpmso-yaml-failure-domain-azure.adoc +++ /dev/null @@ -1,24 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/cpmso-configuration.adoc - -:_content-type: REFERENCE -[id="cpmso-yaml-failure-domain-azure_{context}"] -= Sample Azure failure domain configuration - -The control plane machine set concept of a failure domain is analogous to existing Azure concept of an link:https://learn.microsoft.com/en-us/azure/azure-web-pubsub/concept-availability-zones[_Azure availability zone_]. The `ControlPlaneMachineSet` CR spreads control plane machines across multiple failure domains when possible. - -When configuring Azure failure domains in the control plane machine set, you must specify the availability zone name. - -.Sample Azure failure domain values -[source,yaml] ----- -failureDomains: - azure: <1> - - zone: "1" - - zone: "2" - - zone: "3" - platform: Azure <2> ----- -<1> Each instance of `zone` specifies an Azure availability zone for a failure domain. -<2> Specifies the cloud provider platform name. Do not change this value. \ No newline at end of file diff --git a/modules/cpmso-yaml-failure-domain-gcp.adoc b/modules/cpmso-yaml-failure-domain-gcp.adoc deleted file mode 100644 index fcaa407f0c15..000000000000 --- a/modules/cpmso-yaml-failure-domain-gcp.adoc +++ /dev/null @@ -1,26 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/cpmso-configuration.adoc - -:_content-type: REFERENCE -[id="cpmso-yaml-failure-domain-gcp_{context}"] -= Sample GCP failure domain configuration - -The control plane machine set concept of a failure domain is analogous to the existing GCP concept of a link:https://cloud.google.com/compute/docs/regions-zones[_zone_]. The `ControlPlaneMachineSet` CR spreads control plane machines across multiple failure domains when possible. - -When configuring GCP failure domains in the control plane machine set, you must specify the zone name to use. - -.Sample GCP failure domain values -[source,yaml] ----- -failureDomains: - gcp: - - zone: <gcp_zone_a> <1> - - zone: <gcp_zone_b> <2> - - zone: <gcp_zone_c> - - zone: <gcp_zone_d> - platform: GCP <3> ----- -<1> Specifies a GCP zone for the first failure domain. -<2> Specifies an additional failure domain. Further failure domains are added the same way. -<3> Specifies the cloud provider platform name. Do not change this value. \ No newline at end of file diff --git a/modules/cpmso-yaml-provider-spec-aws.adoc b/modules/cpmso-yaml-provider-spec-aws.adoc deleted file mode 100644 index 257276d42306..000000000000 --- a/modules/cpmso-yaml-provider-spec-aws.adoc +++ /dev/null @@ -1,71 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/cpmso-configuration.adoc - -:_content-type: REFERENCE -[id="cpmso-yaml-provider-spec-aws_{context}"] -= Sample AWS provider specification - -When you create a control plane machine set for an existing cluster, the provider specification must match the `providerSpec` configuration in the control plane machine custom resource (CR) that is created by the installation program. You can omit any field that is set in the failure domain section of the CR. - -In the following example, `<cluster_id>` is the infrastructure ID that is based on the cluster ID that you set when you provisioned the cluster. If you have the OpenShift CLI installed, you can obtain the infrastructure ID by running the following command: - -[source,terminal] ----- -$ oc get -o jsonpath='{.status.infrastructureName}{"\n"}' infrastructure cluster ----- - -.Sample AWS `providerSpec` values -[source,yaml] ----- -providerSpec: - value: - ami: - id: ami-<ami_id_string> <1> - apiVersion: machine.openshift.io/v1beta1 - blockDevices: - - ebs: <2> - encrypted: true - iops: 0 - kmsKey: - arn: "" - volumeSize: 120 - volumeType: gp3 - credentialsSecret: - name: aws-cloud-credentials <3> - deviceIndex: 0 - iamInstanceProfile: - id: <cluster_id>-master-profile <4> - instanceType: m6i.xlarge <5> - kind: AWSMachineProviderConfig <6> - loadBalancers: <7> - - name: <cluster_id>-int - type: network - - name: <cluster_id>-ext - type: network - metadata: - creationTimestamp: null - metadataServiceOptions: {} - placement: <8> - region: <region> <9> - securityGroups: - - filters: - - name: tag:Name - values: - - <cluster_id>-master-sg <10> - subnet: {} <11> - userDataSecret: - name: master-user-data <12> ----- -<1> Specifies the {op-system-first} Amazon Machine Images (AMI) ID for the cluster. The AMI must belong to the same region as the cluster. If you want to use an AWS Marketplace image, you must complete the {product-title} subscription from the link:https://aws.amazon.com/marketplace/fulfillment?productId=59ead7de-2540-4653-a8b0-fa7926d5c845[AWS Marketplace] to obtain an AMI ID for your region. -<2> Specifies the configuration of an encrypted EBS volume. -<3> Specifies the secret name for the cluster. Do not change this value. -<4> Specifies the AWS Identity and Access Management (IAM) instance profile. Do not change this value. -<5> Specifies the AWS instance type for the control plane. -<6> Specifies the cloud provider platform type. Do not change this value. -<7> Specifies the internal (`int`) and external (`ext`) load balancers for the cluster. -<8> This parameter is configured in the failure domain, and is shown with an empty value here. If a value specified for this parameter differs from the value in the failure domain, the Operator overwrites it with the value in the failure domain. -<9> Specifies the AWS region for the cluster. -<10> Specifies the control plane machines security group. -<11> This parameter is configured in the failure domain, and is shown with an empty value here. If a value specified for this parameter differs from the value in the failure domain, the Operator overwrites it with the value in the failure domain. -<12> Specifies the control plane user data secret. Do not change this value. \ No newline at end of file diff --git a/modules/cpmso-yaml-provider-spec-azure.adoc b/modules/cpmso-yaml-provider-spec-azure.adoc deleted file mode 100644 index f2d6576efc61..000000000000 --- a/modules/cpmso-yaml-provider-spec-azure.adoc +++ /dev/null @@ -1,69 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/cpmso-configuration.adoc - -:_content-type: REFERENCE -[id="cpmso-yaml-provider-spec-azure_{context}"] -= Sample Azure provider specification - -When you create a control plane machine set for an existing cluster, the provider specification must match the `providerSpec` configuration in the control plane `Machine` CR that is created by the installation program. You can omit any field that is set in the failure domain section of the CR. - -In the following example, `<cluster_id>` is the infrastructure ID that is based on the cluster ID that you set when you provisioned the cluster. If you have the OpenShift CLI installed, you can obtain the infrastructure ID by running the following command: - -[source,terminal] ----- -$ oc get -o jsonpath='{.status.infrastructureName}{"\n"}' infrastructure cluster ----- - -.Sample Azure `providerSpec` values -[source,yaml] ----- -providerSpec: - value: - acceleratedNetworking: true - apiVersion: machine.openshift.io/v1beta1 - credentialsSecret: - name: azure-cloud-credentials <1> - namespace: openshift-machine-api - diagnostics: {} - image: <2> - offer: "" - publisher: "" - resourceID: /resourceGroups/<cluster_id>-rg/providers/Microsoft.Compute/galleries/gallery_<cluster_id>/images/<cluster_id>-gen2/versions/412.86.20220930 <3> - sku: "" - version: "" - internalLoadBalancer: <cluster_id>-internal <4> - kind: AzureMachineProviderSpec <5> - location: <region> <6> - managedIdentity: <cluster_id>-identity - metadata: - creationTimestamp: null - name: <cluster_id> - networkResourceGroup: <cluster_id>-rg - osDisk: <7> - diskSettings: {} - diskSizeGB: 1024 - managedDisk: - storageAccountType: Premium_LRS - osType: Linux - publicIP: false - publicLoadBalancer: <cluster_id> <8> - resourceGroup: <cluster_id>-rg - subnet: <cluster_id>-master-subnet <9> - userDataSecret: - name: master-user-data <10> - vmSize: Standard_D8s_v3 - vnet: <cluster_id>-vnet - zone: "" <11> ----- -<1> Specifies the secret name for the cluster. Do not change this value. -<2> Specifies the image details for your control plane machine set. -<3> Specifies an image that is compatible with your instance type. The Hyper-V generation V2 images created by the installation program have a `-gen2` suffix, while V1 images have the same name without the suffix. -<4> Specifies the internal load balancer for the control plane. This field might not be preconfigured but is required in both the `ControlPlaneMachineSet` and control plane `Machine` CRs. -<5> Specifies the cloud provider platform type. Do not change this value. -<6> Specifies the region to place control plane machines on. -<7> Specifies the disk configuration for the control plane. -<8> Specifies the public load balancer for the control plane. -<9> Specifies the subnet for the control plane. -<10> Specifies the control plane user data secret. Do not change this value. -<11> This parameter is configured in the failure domain, and is shown with an empty value here. If a value specified for this parameter differs from the value in the failure domain, the Operator overwrites it with the value in the failure domain. \ No newline at end of file diff --git a/modules/cpmso-yaml-provider-spec-gcp.adoc b/modules/cpmso-yaml-provider-spec-gcp.adoc deleted file mode 100644 index 3b1f3e29fbad..000000000000 --- a/modules/cpmso-yaml-provider-spec-gcp.adoc +++ /dev/null @@ -1,87 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/cpmso-configuration.adoc - -:_content-type: REFERENCE -[id="cpmso-yaml-provider-spec-gcp_{context}"] -= Sample GCP provider specification - -When you create a control plane machine set for an existing cluster, the provider specification must match the `providerSpec` configuration in the control plane machine custom resource (CR) that is created by the installation program. You can omit any field that is set in the failure domain section of the CR. - -[discrete] -[id="cpmso-yaml-provider-spec-gcp-oc_{context}"] -== Values obtained by using the OpenShift CLI - -In the following example, you can obtain some of the values for your cluster by using the OpenShift CLI. - -Infrastructure ID:: The `<cluster_id>` string is the infrastructure ID that is based on the cluster ID that you set when you provisioned the cluster. If you have the OpenShift CLI installed, you can obtain the infrastructure ID by running the following command: -+ -[source,terminal] ----- -$ oc get -o jsonpath='{.status.infrastructureName}{"\n"}' infrastructure cluster ----- - -Image path:: The `<path_to_image>` string is the path to the image that was used to create the disk. If you have the OpenShift CLI installed, you can obtain the path to the image by running the following command: -+ -[source,terminal] ----- -$ oc -n openshift-machine-api \ - -o jsonpath='{.spec.template.machines_v1beta1_machine_openshift_io.spec.providerSpec.value.disks[0].image}{"\n"}' \ - get ControlPlaneMachineSet/cluster ----- - -.Sample GCP `providerSpec` values -[source,yaml] ----- -providerSpec: - value: - apiVersion: machine.openshift.io/v1beta1 - canIPForward: false - credentialsSecret: - name: gcp-cloud-credentials <1> - deletionProtection: false - disks: - - autoDelete: true - boot: true - image: <path_to_image> <2> - labels: null - sizeGb: 200 - type: pd-ssd - kind: GCPMachineProviderSpec <3> - machineType: e2-standard-4 - metadata: - creationTimestamp: null - metadataServiceOptions: {} - networkInterfaces: - - network: <cluster_id>-network - subnetwork: <cluster_id>-master-subnet - projectID: <project_name> <4> - region: <region> <5> - serviceAccounts: - - email: <cluster_id>-m@<project_name>.iam.gserviceaccount.com - scopes: - - https://www.googleapis.com/auth/cloud-platform - shieldedInstanceConfig: {} - tags: - - <cluster_id>-master - targetPools: - - <cluster_id>-api - userDataSecret: - name: master-user-data <6> - zone: "" <7> ----- -<1> Specifies the secret name for the cluster. Do not change this value. -<2> Specifies the path to the image that was used to create the disk. -+ -To use a GCP Marketplace image, specify the offer to use: -+ --- -* {product-title}: `\https://www.googleapis.com/compute/v1/projects/redhat-marketplace-public/global/images/redhat-coreos-ocp-48-x86-64-202210040145` -* {opp}: `\https://www.googleapis.com/compute/v1/projects/redhat-marketplace-public/global/images/redhat-coreos-opp-48-x86-64-202206140145` -* {oke}: `\https://www.googleapis.com/compute/v1/projects/redhat-marketplace-public/global/images/redhat-coreos-oke-48-x86-64-202206140145` --- -<3> Specifies the cloud provider platform type. Do not change this value. -<4> Specifies the name of the GCP project that you use for your cluster. -<5> Specifies the GCP region for the cluster. -<6> Specifies the control plane user data secret. Do not change this value. -<7> This parameter is configured in the failure domain, and is shown with an empty value here. If a value specified for this parameter differs from the value in the failure domain, the Operator overwrites it with the value in the failure domain. \ No newline at end of file diff --git a/modules/cpmso-yaml-provider-spec-nutanix.adoc b/modules/cpmso-yaml-provider-spec-nutanix.adoc deleted file mode 100644 index 9d02f6ec0367..000000000000 --- a/modules/cpmso-yaml-provider-spec-nutanix.adoc +++ /dev/null @@ -1,75 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/cpmso-configuration.adoc - -:_content-type: REFERENCE -[id="cpmso-yaml-provider-spec-nutanix_{context}"] -= Sample Nutanix provider specification - -When you create a control plane machine set for an existing cluster, the provider specification must match the `providerSpec` configuration in the control plane machine custom resource (CR) that is created by the installation program. - -[discrete] -[id="cpmso-yaml-provider-spec-nutanix-oc_{context}"] -== Values obtained by using the OpenShift CLI - -In the following example, you can obtain some of the values for your cluster by using the OpenShift CLI. - -Infrastructure ID:: The `<cluster_id>` string is the infrastructure ID that is based on the cluster ID that you set when you provisioned the cluster. If you have the OpenShift CLI installed, you can obtain the infrastructure ID by running the following command: -+ -[source,terminal] ----- -$ oc get -o jsonpath='{.status.infrastructureName}{"\n"}' infrastructure cluster ----- - -.Sample Nutanix `providerSpec` values -[source,yaml] ----- -providerSpec: - value: - apiVersion: machine.openshift.io/v1 - bootType: "" <1> - categories: <2> - - key: <category_name> - value: <category_value> - cluster: <3> - type: uuid - uuid: <cluster_uuid> - credentialsSecret: - name: nutanix-credentials <4> - image: <5> - name: <cluster_id>-rhcos - type: name - kind: NutanixMachineProviderConfig <6> - memorySize: 16Gi <7> - metadata: - creationTimestamp: null - project: <8> - type: name - name: <project_name> - subnets: <9> - - type: uuid - uuid: <subnet_uuid> - systemDiskSize: 120Gi <10> - userDataSecret: - name: master-user-data <11> - vcpuSockets: 8 <12> - vcpusPerSocket: 1 <13> ----- -<1> Specifies the boot type that the control plane machines use. For more information about boot types, see link:https://portal.nutanix.com/page/documents/kbs/details?targetId=kA07V000000H3K9SAK[Understanding UEFI, Secure Boot, and TPM in the Virtualized Environment]. Valid values are `Legacy`, `SecureBoot`, or `UEFI`. The default is `Legacy`. -+ -[NOTE] -==== -You must use the `Legacy` boot type in {product-title} {product-version}. -==== -<2> Specifies one or more Nutanix Prism categories to apply to control plane machines. This stanza requires `key` and `value` parameters for a category key-value pair that exists in Prism Central. For more information about categories, see link:https://portal.nutanix.com/page/documents/details?targetId=Prism-Central-Guide-vpc_2022_6:ssp-ssp-categories-manage-pc-c.html[Category management]. -<3> Specifies a Nutanix Prism Element cluster configuration. In this example, the cluster type is `uuid`, so there is a `uuid` stanza. -<4> Specifies the secret name for the cluster. Do not change this value. -<5> Specifies the image that was used to create the disk. -<6> Specifies the cloud provider platform type. Do not change this value. -<7> Specifies the memory allocated for the control plane machines. -<8> Specifies the Nutanix project that you use for your cluster. In this example, the project type is `name`, so there is a `name` stanza. -<9> Specifies a subnet configuration. In this example, the subnet type is `uuid`, so there is a `uuid` stanza. -<10> Specifies the VM disk size for the control plane machines. -<11> Specifies the control plane user data secret. Do not change this value. -<12> Specifies the number of vCPU sockets allocated for the control plane machines. -<13> Specifies the number of vCPUs for each control plane vCPU socket. \ No newline at end of file diff --git a/modules/cpmso-yaml-provider-spec-vsphere.adoc b/modules/cpmso-yaml-provider-spec-vsphere.adoc deleted file mode 100644 index 7dfe933f4cba..000000000000 --- a/modules/cpmso-yaml-provider-spec-vsphere.adoc +++ /dev/null @@ -1,53 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/cpmso-configuration.adoc - -:_content-type: REFERENCE -[id="cpmso-yaml-provider-spec-vsphere_{context}"] -= Sample vSphere provider specification - -When you create a control plane machine set for an existing cluster, the provider specification must match the `providerSpec` configuration in the control plane machine custom resource (CR) that is created by the installation program. - -.Sample vSphere `providerSpec` values -[source,yaml] ----- -providerSpec: - value: - apiVersion: machine.openshift.io/v1beta1 - credentialsSecret: - name: vsphere-cloud-credentials <1> - diskGiB: 120 <2> - kind: VSphereMachineProviderSpec <3> - memoryMiB: 16384 <4> - metadata: - creationTimestamp: null - network: <5> - devices: - - networkName: <vm_network_name> - numCPUs: 4 <6> - numCoresPerSocket: 4 <7> - snapshot: "" - template: <vm_template_name> <8> - userDataSecret: - name: master-user-data <9> - workspace: - datacenter: <vcenter_datacenter_name> <10> - datastore: <vcenter_datastore_name> <11> - folder: <path_to_vcenter_vm_folder> <12> - resourcePool: <vsphere_resource_pool> <13> - server: <vcenter_server_ip> <14> ----- -<1> Specifies the secret name for the cluster. Do not change this value. -<2> Specifies the VM disk size for the control plane machines. -<3> Specifies the cloud provider platform type. Do not change this value. -<4> Specifies the memory allocated for the control plane machines. -<5> Specifies the network on which the control plane is deployed. -<6> Specifies the number of CPUs allocated for the control plane machines. -<7> Specifies the number of cores for each control plane CPU. -<8> Specifies the vSphere VM template to use, such as `user-5ddjd-rhcos`. -<9> Specifies the control plane user data secret. Do not change this value. -<10> Specifies the vCenter Datacenter for the control plane. -<11> Specifies the vCenter Datastore for the control plane. -<12> Specifies the path to the vSphere VM folder in vCenter, such as `/dc1/vm/user-inst-5ddjd`. -<13> Specifies the vSphere resource pool for your VMs. -<14> Specifies the vCenter server IP or fully qualified domain name. \ No newline at end of file diff --git a/modules/cpmso-yaml-sample-cr.adoc b/modules/cpmso-yaml-sample-cr.adoc deleted file mode 100644 index e8008b36e217..000000000000 --- a/modules/cpmso-yaml-sample-cr.adoc +++ /dev/null @@ -1,67 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/cpmso-configuration.adoc - -:_content-type: REFERENCE -[id="cpmso-yaml-sample-cr_{context}"] -= Sample YAML for a control plane machine set custom resource - -The base of the `ControlPlaneMachineSet` CR is structured the same way for all platforms. - -.Sample `ControlPlaneMachineSet` CR YAML file -[source,yaml] ----- -apiVersion: machine.openshift.io/v1 -kind: ControlPlaneMachineSet -metadata: - name: cluster <1> - namespace: openshift-machine-api -spec: - replicas: 3 <2> - selector: - matchLabels: - machine.openshift.io/cluster-api-cluster: <cluster_id> <3> - machine.openshift.io/cluster-api-machine-role: master - machine.openshift.io/cluster-api-machine-type: master - state: Active <4> - strategy: - type: RollingUpdate <5> - template: - machineType: machines_v1beta1_machine_openshift_io - machines_v1beta1_machine_openshift_io: - failureDomains: - platform: <platform> <6> - <platform_failure_domains> <7> - metadata: - labels: - machine.openshift.io/cluster-api-cluster: <cluster_id> - machine.openshift.io/cluster-api-machine-role: master - machine.openshift.io/cluster-api-machine-type: master - spec: - providerSpec: - value: - <platform_provider_spec> <8> ----- -<1> Specifies the name of the `ControlPlaneMachineSet` CR, which is `cluster`. Do not change this value. -<2> Specifies the number of control plane machines. Only clusters with three control plane machines are supported, so the `replicas` value is `3`. Horizontal scaling is not supported. Do not change this value. -<3> Specifies the infrastructure ID that is based on the cluster ID that you set when you provisioned the cluster. You must specify this value when you create a `ControlPlaneMachineSet` CR. If you have the OpenShift CLI (`oc`) installed, you can obtain the infrastructure ID by running the following command: -+ -[source,terminal] ----- -$ oc get -o jsonpath='{.status.infrastructureName}{"\n"}' infrastructure cluster ----- -<4> Specifies the state of the Operator. When the state is `Inactive`, the Operator is not operational. You can activate the Operator by setting the value to `Active`. -+ -[IMPORTANT] -==== -Before you activate the Operator, you must ensure that the `ControlPlaneMachineSet` CR configuration is correct for your cluster requirements. For more information about activating the Control Plane Machine Set Operator, see "Getting started with control plane machine sets". -==== -<5> Specifies the update strategy for the cluster. The allowed values are `OnDelete` and `RollingUpdate`. The default value is `RollingUpdate`. For more information about update strategies, see "Updating the control plane configuration". -<6> Specifies the cloud provider platform name. Do not change this value. -<7> Specifies the `<platform_failure_domains>` configuration for the cluster. The format and values of this section are provider-specific. For more information, see the sample failure domain configuration for your cloud provider. -+ -[NOTE] -==== -VMware vSphere does not support failure domains. -==== -<8> Specifies the `<platform_provider_spec>` configuration for the cluster. The format and values of this section are provider-specific. For more information, see the sample provider specification for your cloud provider. \ No newline at end of file diff --git a/modules/crd-creating-aggregated-cluster-roles.adoc b/modules/crd-creating-aggregated-cluster-roles.adoc deleted file mode 100644 index a7c7c1b9726e..000000000000 --- a/modules/crd-creating-aggregated-cluster-roles.adoc +++ /dev/null @@ -1,69 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/understanding/crds/extending-api-with-crds.adoc - -:_content-type: PROCEDURE -[id="crd-creating-aggregated-cluster-role_{context}"] -= Creating cluster roles for custom resource definitions - -Cluster administrators can grant permissions to existing cluster-scoped custom resource definitions (CRDs). If you use the `admin`, `edit`, and `view` default cluster roles, you can take advantage of cluster role aggregation for their rules. - -[IMPORTANT] -==== -You must explicitly assign permissions to each of these roles. The roles with more permissions do not inherit rules from roles with fewer permissions. If you assign a rule to a role, you must also assign that verb to roles that have more permissions. For example, if you grant the `get crontabs` permission to the view role, you must also grant it to the `edit` and `admin` roles. The `admin` or `edit` role is usually assigned to the user that created a project through the project template. -==== - -.Prerequisites - -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -- Create a CRD. -endif::[] - -.Procedure - -. Create a cluster role definition file for the CRD. The cluster role definition is a YAML file that contains the rules that apply to each cluster role. An {product-title} controller adds the rules that you specify to the default cluster roles. -+ -.Example YAML file for a cluster role definition -[source,yaml] ----- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 <1> -metadata: - name: aggregate-cron-tabs-admin-edit <2> - labels: - rbac.authorization.k8s.io/aggregate-to-admin: "true" <3> - rbac.authorization.k8s.io/aggregate-to-edit: "true" <4> -rules: -- apiGroups: ["stable.example.com"] <5> - resources: ["crontabs"] <6> - verbs: ["get", "list", "watch", "create", "update", "patch", "delete", "deletecollection"] <7> ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: aggregate-cron-tabs-view <2> - labels: - # Add these permissions to the "view" default role. - rbac.authorization.k8s.io/aggregate-to-view: "true" <8> - rbac.authorization.k8s.io/aggregate-to-cluster-reader: "true" <9> -rules: -- apiGroups: ["stable.example.com"] <5> - resources: ["crontabs"] <6> - verbs: ["get", "list", "watch"] <7> ----- -<1> Use the `rbac.authorization.k8s.io/v1` API. -<2> Specify a name for the definition. -<3> Specify this label to grant permissions to the admin default role. -<4> Specify this label to grant permissions to the edit default role. -<5> Specify the group name of the CRD. -<6> Specify the plural name of the CRD that these rules apply to. -<7> Specify the verbs that represent the permissions that are granted to the role. For example, apply read and write permissions to the `admin` and `edit` roles and only read permission to the `view` role. -<8> Specify this label to grant permissions to the `view` default role. -<9> Specify this label to grant permissions to the `cluster-reader` default role. - -. Create the cluster role: -+ -[source,terminal] ----- -$ oc create -f <file_name>.yaml ----- diff --git a/modules/crd-creating-crds.adoc b/modules/crd-creating-crds.adoc deleted file mode 100644 index e39db189f15a..000000000000 --- a/modules/crd-creating-crds.adoc +++ /dev/null @@ -1,76 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/understanding/crds/extending-api-with-crds.adoc - -:_content-type: PROCEDURE -[id="crd-creating-custom-resources-definition_{context}"] -= Creating a custom resource definition - -To create custom resource (CR) objects, cluster administrators must first create a custom resource definition (CRD). - -.Prerequisites - -- Access to an {product-title} cluster with `cluster-admin` user privileges. - -.Procedure - -To create a CRD: - -. Create a YAML file that contains the following field types: -+ -.Example YAML file for a CRD -[source,yaml] ----- -apiVersion: apiextensions.k8s.io/v1 <1> -kind: CustomResourceDefinition -metadata: - name: crontabs.stable.example.com <2> -spec: - group: stable.example.com <3> - versions: - name: v1 <4> - scope: Namespaced <5> - names: - plural: crontabs <6> - singular: crontab <7> - kind: CronTab <8> - shortNames: - - ct <9> ----- -<1> Use the `apiextensions.k8s.io/v1` API. -<2> Specify a name for the definition. This must be in the `<plural-name>.<group>` format using the values from the `group` and `plural` fields. -<3> Specify a group name for the API. An API group is a collection of objects that are logically related. For example, all batch objects like `Job` or `ScheduledJob` could be in the batch API group (such as `batch.api.example.com`). A good practice is to use a fully-qualified-domain name (FQDN) of your organization. -<4> Specify a version name to be used in the URL. Each API group can exist in multiple versions, for example `v1alpha`, `v1beta`, `v1`. -<5> Specify whether the custom objects are available to a project (`Namespaced`) or all projects in the cluster (`Cluster`). -<6> Specify the plural name to use in the URL. The `plural` field is the same as a resource in an API URL. -<7> Specify a singular name to use as an alias on the CLI and for display. -<8> Specify the kind of objects that can be created. The type can be in CamelCase. -<9> Specify a shorter string to match your resource on the CLI. -+ -[NOTE] -==== -By default, a CRD is cluster-scoped and available to all projects. -==== - -. Create the CRD object: -+ -[source,terminal] ----- -$ oc create -f <file_name>.yaml ----- -+ -A new RESTful API endpoint is created at: -+ -[source,terminal] ----- -/apis/<spec:group>/<spec:version>/<scope>/*/<names-plural>/... ----- -+ -For example, using the example file, the following endpoint is created: -+ -[source,terminal] ----- -/apis/stable.example.com/v1/namespaces/*/crontabs/... ----- -+ -You can now use this endpoint URL to create and manage CRs. The object kind is based on the `spec.kind` field of the CRD object you created. diff --git a/modules/crd-creating-custom-resources-from-file.adoc b/modules/crd-creating-custom-resources-from-file.adoc deleted file mode 100644 index 0b55f960ce33..000000000000 --- a/modules/crd-creating-custom-resources-from-file.adoc +++ /dev/null @@ -1,49 +0,0 @@ -// Useful paired with modules/crd-inspecting-custom-resources.adoc -// -// Module included in the following assemblies: -// -// * operators/understanding/crds/crd-managing-resources-from-crds.adoc -// * operators/understanding/crds/extending-api-with-crds.adoc - -:_content-type: PROCEDURE -[id="crd-creating-custom-resources-from-file_{context}"] -= Creating custom resources from a file - -After a custom resource definitions (CRD) has been added to the cluster, custom resources (CRs) can be created with the CLI from a file using the CR specification. - -.Prerequisites - -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -- CRD added to the cluster by a cluster administrator. -endif::[] - -.Procedure - -. Create a YAML file for the CR. In the following example definition, the `cronSpec` and `image` custom fields are set in a CR of `Kind: CronTab`. The `Kind` comes from the `spec.kind` field of the CRD object: -+ -.Example YAML file for a CR -[source,yaml] ----- -apiVersion: "stable.example.com/v1" <1> -kind: CronTab <2> -metadata: - name: my-new-cron-object <3> - finalizers: <4> - - finalizer.stable.example.com -spec: <5> - cronSpec: "* * * * /5" - image: my-awesome-cron-image ----- -+ -<1> Specify the group name and API version (name/version) from the CRD. -<2> Specify the type in the CRD. -<3> Specify a name for the object. -<4> Specify the link:https://kubernetes.io/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/#finalizers[finalizers] for the object, if any. Finalizers allow controllers to implement conditions that must be completed before the object can be deleted. -<5> Specify conditions specific to the type of object. - -. After you create the file, create the object: -+ -[source,terminal] ----- -$ oc create -f <file_name>.yaml ----- diff --git a/modules/crd-custom-resource-definitions.adoc b/modules/crd-custom-resource-definitions.adoc deleted file mode 100644 index 59500f91841d..000000000000 --- a/modules/crd-custom-resource-definitions.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/understanding/crds/crd-managing-resources-from-crds.adoc -// * operators/understanding/crds/extending-api-with-crds.adoc - -[id="crd-custom-resource-definitions_{context}"] -= Custom resource definitions - -In the Kubernetes API, a _resource_ is an endpoint that stores a collection of API objects of a certain kind. For example, the built-in `Pods` resource contains a collection of `Pod` objects. - -A _custom resource definition_ (CRD) object defines a new, unique object type, called a _kind_, in the cluster and lets the Kubernetes API server handle its entire lifecycle. - -_Custom resource_ (CR) objects are created from CRDs that have been added to the cluster by a cluster administrator, allowing all cluster users to add the new resource type into projects. - -ifeval::["{context}" == "crd-extending-api-with-crds"] -When a cluster administrator adds a new CRD to the cluster, the Kubernetes API server reacts by creating a new RESTful resource path that can be accessed by the entire cluster or a single project (namespace) and begins serving the specified CR. - -Cluster administrators that want to grant access to the CRD to other users can use cluster role aggregation to grant access to users with the `admin`, `edit`, or `view` default cluster roles. Cluster role aggregation allows the insertion of custom policy rules into these cluster roles. This behavior integrates the new resource into the RBAC policy of the cluster as if it was a built-in resource. -endif::[] - -Operators in particular make use of CRDs by packaging them with any required RBAC policy and other software-specific logic. -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -Cluster administrators can also add CRDs manually to the cluster outside of the lifecycle of an Operator, making them available to all users. - -[NOTE] -==== -While only cluster administrators can create CRDs, developers can create the CR from an existing CRD if they have read and write permission to it. -==== -endif::[] diff --git a/modules/crd-inspecting-custom-resources.adoc b/modules/crd-inspecting-custom-resources.adoc deleted file mode 100644 index abae97060767..000000000000 --- a/modules/crd-inspecting-custom-resources.adoc +++ /dev/null @@ -1,93 +0,0 @@ -// Useful paired with modules/crd-creating-custom-resources-from-file.adoc -// -// Module included in the following assemblies: -// -// * operators/understanding/crds/crd-managing-resources-from-crds.adoc -// * operators/understanding/crds/extending-api-with-crds.adoc - -:_content-type: PROCEDURE -[id="crd-inspecting-custom-resources_{context}"] -= Inspecting custom resources - -You can inspect custom resource (CR) objects that exist in your cluster using the CLI. - -.Prerequisites - -* A CR object exists in a namespace to which you have access. - -.Procedure - -. To get information on a specific kind of a CR, run: -+ -[source,terminal] ----- -$ oc get <kind> ----- -+ -For example: -+ -[source,terminal] ----- -$ oc get crontab ----- -+ -.Example output -[source,terminal] ----- -NAME KIND -my-new-cron-object CronTab.v1.stable.example.com ----- -+ -Resource names are not case-sensitive, and you can use either the singular or plural forms defined in the CRD, as well as any short name. For example: -+ -[source,terminal] ----- -$ oc get crontabs ----- -+ -[source,terminal] ----- -$ oc get crontab ----- -+ -[source,terminal] ----- -$ oc get ct ----- - -. You can also view the raw YAML data for a CR: -+ -[source,terminal] ----- -$ oc get <kind> -o yaml ----- -+ -For example: -+ -[source,terminal] ----- -$ oc get ct -o yaml ----- -+ -.Example output -[source,terminal] ----- -apiVersion: v1 -items: -- apiVersion: stable.example.com/v1 - kind: CronTab - metadata: - clusterName: "" - creationTimestamp: 2017-05-31T12:56:35Z - deletionGracePeriodSeconds: null - deletionTimestamp: null - name: my-new-cron-object - namespace: default - resourceVersion: "285" - selfLink: /apis/stable.example.com/v1/namespaces/default/crontabs/my-new-cron-object - uid: 9423255b-4600-11e7-af6a-28d2447dc82b - spec: - cronSpec: '* * * * /5' <1> - image: my-awesome-cron-image <1> ----- -<1> Custom data from the YAML that you used to create the object displays. diff --git a/modules/create-a-containerruntimeconfig-crd.adoc b/modules/create-a-containerruntimeconfig-crd.adoc deleted file mode 100644 index a4715f0cc228..000000000000 --- a/modules/create-a-containerruntimeconfig-crd.adoc +++ /dev/null @@ -1,236 +0,0 @@ -// Module included in the following assemblies: -// -// * post_installation_configuration/machine-configuration-tasks.adoc - -:_content-type: PROCEDURE -[id="create-a-containerruntimeconfig_{context}"] -= Creating a ContainerRuntimeConfig CR to edit CRI-O parameters - -You can change some of the settings associated with the {product-title} CRI-O runtime for the nodes associated with a specific machine config pool (MCP). Using a `ContainerRuntimeConfig` custom resource (CR), you set the configuration values and add a label to match the MCP. The MCO then rebuilds the `crio.conf` and `storage.conf` configuration files on the associated nodes with the updated values. - -[NOTE] -==== -To revert the changes implemented by using a `ContainerRuntimeConfig` CR, you must delete the CR. Removing the label from the machine config pool does not revert the changes. -==== - -You can modify the following settings by using a `ContainerRuntimeConfig` CR: - -* **PIDs limit**: Setting the PIDs limit in the `ContainerRuntimeConfig` is expected to be deprecated. If PIDs limits are required, it is recommended to use the `podPidsLimit` field in the `KubeletConfig` CR instead. The default value of the `podPidsLimit` field is `4096`. -+ -[NOTE] -==== -The CRI-O flag is applied on the cgroup of the container, while the Kubelet flag is set on the cgroup of the pod. Please adjust the PIDs limit accordingly. -==== - -* **Log level**: The `logLevel` parameter sets the CRI-O `log_level` parameter, which is the level of verbosity for log messages. The default is `info` (`log_level = info`). Other options include `fatal`, `panic`, `error`, `warn`, `debug`, and `trace`. -* **Overlay size**: The `overlaySize` parameter sets the CRI-O Overlay storage driver `size` parameter, which is the maximum size of a container image. -* **Maximum log size**: Setting the maximum log size in the `ContainerRuntimeConfig` is expected to be deprecated. If a maximum log size is required, it is recommended to use the `containerLogMaxSize` field in the `KubeletConfig` CR instead. -* **Container runtime**: The `defaultRuntime` parameter sets the container runtime to either `runc` or `crun`. The default is `runc`. - -You should have one `ContainerRuntimeConfig` CR for each machine config pool with all the config changes you want for that pool. If you are applying the same content to all the pools, you only need one `ContainerRuntimeConfig` CR for all the pools. - -You should edit an existing `ContainerRuntimeConfig` CR to modify existing settings or add new settings instead of creating a new CR for each change. It is recommended to create a new `ContainerRuntimeConfig` CR only to modify a different machine config pool, or for changes that are intended to be temporary so that you can revert the changes. - -You can create multiple `ContainerRuntimeConfig` CRs, as needed, with a limit of 10 per cluster. For the first `ContainerRuntimeConfig` CR, the MCO creates a machine config appended with `containerruntime`. With each subsequent CR, the controller creates a new `containerruntime` machine config with a numeric suffix. For example, if you have a `containerruntime` machine config with a `-2` suffix, the next `containerruntime` machine config is appended with `-3`. - -If you want to delete the machine configs, you should delete them in reverse order to avoid exceeding the limit. For example, you should delete the `containerruntime-3` machine config before deleting the `containerruntime-2` machine config. - -[NOTE] -==== -If you have a machine config with a `containerruntime-9` suffix, and you create another `ContainerRuntimeConfig` CR, a new machine config is not created, even if there are fewer than 10 `containerruntime` machine configs. -==== - -.Example showing multiple `ContainerRuntimeConfig` CRs -[source,terminal] ----- -$ oc get ctrcfg ----- - -.Example output -[source, terminal] ----- -NAME AGE -ctr-pid 24m -ctr-overlay 15m -ctr-level 5m45s ----- - -.Example showing multiple `containerruntime` machine configs -[source,terminal] ----- -$ oc get mc | grep container ----- - -.Example output -[source, terminal] ----- -... -01-master-container-runtime b5c5119de007945b6fe6fb215db3b8e2ceb12511 3.2.0 57m -... -01-worker-container-runtime b5c5119de007945b6fe6fb215db3b8e2ceb12511 3.2.0 57m -... -99-worker-generated-containerruntime b5c5119de007945b6fe6fb215db3b8e2ceb12511 3.2.0 26m -99-worker-generated-containerruntime-1 b5c5119de007945b6fe6fb215db3b8e2ceb12511 3.2.0 17m -99-worker-generated-containerruntime-2 b5c5119de007945b6fe6fb215db3b8e2ceb12511 3.2.0 7m26s -... ----- - -The following example raises the `pids_limit` to 2048, sets the `log_level` to `debug`, sets the overlay size to 8 GB, and sets the `log_size_max` to unlimited: - -.Example `ContainerRuntimeConfig` CR -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: ContainerRuntimeConfig -metadata: - name: overlay-size -spec: - machineConfigPoolSelector: - matchLabels: - pools.operator.machineconfiguration.openshift.io/worker: '' <1> - containerRuntimeConfig: - pidsLimit: 2048 <2> - logLevel: debug <3> - overlaySize: 8G <4> - logSizeMax: "-1" <5> - defaultRuntime: "crun" <6> ----- -<1> Specifies the machine config pool label. -<2> Optional: Specifies the maximum number of processes allowed in a container. -<3> Optional: Specifies the level of verbosity for log messages. -<4> Optional: Specifies the maximum size of a container image. -<5> Optional: Specifies the maximum size allowed for the container log file. If - set to a positive number, it must be at least 8192. -<6> Optional: Specifies the container runtime to deploy to new containers. The default is `runc`. - -.Prerequisite - -* To enable crun, you must enable the `TechPreviewNoUpgrade` feature set. -+ -[NOTE] -==== -Enabling the `TechPreviewNoUpgrade` feature set cannot be undone and prevents minor version updates. These feature sets are not recommended on production clusters. -==== - -.Procedure - -To change CRI-O settings using the `ContainerRuntimeConfig` CR: - -. Create a YAML file for the `ContainerRuntimeConfig` CR: -+ -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: ContainerRuntimeConfig -metadata: - name: overlay-size -spec: - machineConfigPoolSelector: - matchLabels: - pools.operator.machineconfiguration.openshift.io/worker: '' <1> - containerRuntimeConfig: <2> - pidsLimit: 2048 - logLevel: debug - overlaySize: 8G - logSizeMax: "-1" ----- -<1> Specify a label for the machine config pool that you want you want to modify. -<2> Set the parameters as needed. - -. Create the `ContainerRuntimeConfig` CR: -+ -[source,terminal] ----- -$ oc create -f <file_name>.yaml ----- - -. Verify that the CR is created: -+ -[source,terminal] ----- -$ oc get ContainerRuntimeConfig ----- -+ -.Example output -[source,terminal] ----- -NAME AGE -overlay-size 3m19s ----- - -. Check that a new `containerruntime` machine config is created: -+ -[source,terminal] ----- -$ oc get machineconfigs | grep containerrun ----- -+ -.Example output -[source,terminal] ----- -99-worker-generated-containerruntime 2c9371fbb673b97a6fe8b1c52691999ed3a1bfc2 3.2.0 31s ----- - -. Monitor the machine config pool until all are shown as ready: -+ -[source,terminal] ----- -$ oc get mcp worker ----- -+ -.Example output -+ -[source,terminal] ----- -NAME CONFIG UPDATED UPDATING DEGRADED MACHINECOUNT READYMACHINECOUNT UPDATEDMACHINECOUNT DEGRADEDMACHINECOUNT AGE -worker rendered-worker-169 False True False 3 1 1 0 9h ----- - -. Verify that the settings were applied in CRI-O: - -.. Open an `oc debug` session to a node in the machine config pool and run `chroot /host`. -+ -[source, terminal] ----- -$ oc debug node/<node_name> ----- -+ -[source, terminal] ----- -sh-4.4# chroot /host ----- - -.. Verify the changes in the `crio.conf` file: -+ -[source,terminal] ----- -sh-4.4# crio config | egrep 'log_level|pids_limit|log_size_max' ----- -+ -.Example output -+ -[source,terminal] ----- -pids_limit = 2048 -log_size_max = -1 -log_level = "debug" ----- - -.. Verify the changes in the `storage.conf`file: -+ -[source,terminal] ----- -sh-4.4# head -n 7 /etc/containers/storage.conf ----- -+ -.Example output -+ ----- -[storage] - driver = "overlay" - runroot = "/var/run/containers/storage" - graphroot = "/var/lib/containers/storage" - [storage.options] - additionalimagestores = [] - size = "8G" ----- diff --git a/modules/create-a-kubeletconfig-crd-to-edit-kubelet-parameters.adoc b/modules/create-a-kubeletconfig-crd-to-edit-kubelet-parameters.adoc deleted file mode 100644 index 5206bed8c78e..000000000000 --- a/modules/create-a-kubeletconfig-crd-to-edit-kubelet-parameters.adoc +++ /dev/null @@ -1,254 +0,0 @@ -// Module included in the following assemblies: -// -// * post_installation_configuration/node-tasks.adoc -// * post_installation_configuration/machine-configuration-tasks.adoc - -:_content-type: PROCEDURE -[id="create-a-kubeletconfig-crd-to-edit-kubelet-parameters_{context}"] -= Creating a KubeletConfig CRD to edit kubelet parameters - -The kubelet configuration is currently serialized as an Ignition configuration, so it can be directly edited. However, there is also a new `kubelet-config-controller` added to the Machine Config Controller (MCC). This lets you use a `KubeletConfig` custom resource (CR) to edit the kubelet parameters. - -[NOTE] -==== -As the fields in the `kubeletConfig` object are passed directly to the kubelet from upstream Kubernetes, the kubelet validates those values directly. Invalid values in the `kubeletConfig` object might cause cluster nodes to become unavailable. For valid values, see the link:https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/[Kubernetes documentation]. -==== - -Consider the following guidance: - -* Create one `KubeletConfig` CR for each machine config pool with all the config changes you want for that pool. If you are applying the same content to all of the pools, you need only one `KubeletConfig` CR for all of the pools. - -* Edit an existing `KubeletConfig` CR to modify existing settings or add new settings, instead of creating a CR for each change. It is recommended that you create a CR only to modify a different machine config pool, or for changes that are intended to be temporary, so that you can revert the changes. - -* As needed, create multiple `KubeletConfig` CRs with a limit of 10 per cluster. For the first `KubeletConfig` CR, the Machine Config Operator (MCO) creates a machine config appended with `kubelet`. With each subsequent CR, the controller creates another `kubelet` machine config with a numeric suffix. For example, if you have a `kubelet` machine config with a `-2` suffix, the next `kubelet` machine config is appended with `-3`. - -If you want to delete the machine configs, delete them in reverse order to avoid exceeding the limit. For example, you delete the `kubelet-3` machine config before deleting the `kubelet-2` machine config. - -[NOTE] -==== -If you have a machine config with a `kubelet-9` suffix, and you create another `KubeletConfig` CR, a new machine config is not created, even if there are fewer than 10 `kubelet` machine configs. -==== - -.Example `KubeletConfig` CR -[source,terminal] ----- -$ oc get kubeletconfig ----- - -[source, terminal] ----- -NAME AGE -set-max-pods 15m ----- - -.Example showing a `KubeletConfig` machine config -[source,terminal] ----- -$ oc get mc | grep kubelet ----- - -[source, terminal] ----- -... -99-worker-generated-kubelet-1 b5c5119de007945b6fe6fb215db3b8e2ceb12511 3.2.0 26m -... ----- - -The following procedure is an example to show how to configure the maximum number of pods per node on the worker nodes. - -.Prerequisites - -. Obtain the label associated with the static `MachineConfigPool` CR for the type of node you want to configure. -Perform one of the following steps: - -.. View the machine config pool: -+ -[source,terminal] ----- -$ oc describe machineconfigpool <name> ----- -+ -For example: -+ -[source,terminal] ----- -$ oc describe machineconfigpool worker ----- -+ -.Example output -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfigPool -metadata: - creationTimestamp: 2019-02-08T14:52:39Z - generation: 1 - labels: - custom-kubelet: set-max-pods <1> ----- -<1> If a label has been added it appears under `labels`. - -.. If the label is not present, add a key/value pair: -+ -[source,terminal] ----- -$ oc label machineconfigpool worker custom-kubelet=set-max-pods ----- - -.Procedure - -. View the available machine configuration objects that you can select: -+ -[source,terminal] ----- -$ oc get machineconfig ----- -+ -By default, the two kubelet-related configs are `01-master-kubelet` and `01-worker-kubelet`. - -. Check the current value for the maximum pods per node: -+ -[source,terminal] ----- -$ oc describe node <node_name> ----- -+ -For example: -+ -[source,terminal] ----- -$ oc describe node ci-ln-5grqprb-f76d1-ncnqq-worker-a-mdv94 ----- -+ -Look for `value: pods: <value>` in the `Allocatable` stanza: -+ -.Example output -[source,terminal] ----- -Allocatable: - attachable-volumes-aws-ebs: 25 - cpu: 3500m - hugepages-1Gi: 0 - hugepages-2Mi: 0 - memory: 15341844Ki - pods: 250 ----- - -. Set the maximum pods per node on the worker nodes by creating a custom resource file that contains the kubelet configuration: -+ -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: KubeletConfig -metadata: - name: set-max-pods -spec: - machineConfigPoolSelector: - matchLabels: - custom-kubelet: set-max-pods <1> - kubeletConfig: - maxPods: 500 <2> ----- -<1> Enter the label from the machine config pool. -<2> Add the kubelet configuration. In this example, use `maxPods` to set the maximum pods per node. -+ -[NOTE] -==== -The rate at which the kubelet talks to the API server depends on queries per second (QPS) and burst values. The default values, `50` for `kubeAPIQPS` and `100` for `kubeAPIBurst`, are sufficient if there are limited pods running on each node. It is recommended to update the kubelet QPS and burst rates if there are enough CPU and memory resources on the node. - -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: KubeletConfig -metadata: - name: set-max-pods -spec: - machineConfigPoolSelector: - matchLabels: - custom-kubelet: set-max-pods - kubeletConfig: - maxPods: <pod_count> - kubeAPIBurst: <burst_rate> - kubeAPIQPS: <QPS> ----- -==== -.. Update the machine config pool for workers with the label: -+ -[source,terminal] ----- -$ oc label machineconfigpool worker custom-kubelet=set-max-pods ----- - -.. Create the `KubeletConfig` object: -+ -[source,terminal] ----- -$ oc create -f change-maxPods-cr.yaml ----- - -.. Verify that the `KubeletConfig` object is created: -+ -[source,terminal] ----- -$ oc get kubeletconfig ----- -+ -.Example output -[source, terminal] ----- -NAME AGE -set-max-pods 15m ----- -+ -Depending on the number of worker nodes in the cluster, wait for the worker nodes to be rebooted one by one. For a cluster with 3 worker nodes, this could take about 10 to 15 minutes. - -. Verify that the changes are applied to the node: - -.. Check on a worker node that the `maxPods` value changed: -+ -[source,terminal] ----- -$ oc describe node <node_name> ----- - -.. Locate the `Allocatable` stanza: -+ -[source,terminal] ----- - ... -Allocatable: - attachable-volumes-gce-pd: 127 - cpu: 3500m - ephemeral-storage: 123201474766 - hugepages-1Gi: 0 - hugepages-2Mi: 0 - memory: 14225400Ki - pods: 500 <1> - ... ----- -<1> In this example, the `pods` parameter should report the value you set in the `KubeletConfig` object. - -. Verify the change in the `KubeletConfig` object: -+ -[source,terminal] ----- -$ oc get kubeletconfigs set-max-pods -o yaml ----- -+ -This should show a status of `True` and `type:Success`, as shown in the following example: -+ -[source,yaml] ----- -spec: - kubeletConfig: - maxPods: 500 - machineConfigPoolSelector: - matchLabels: - custom-kubelet: set-max-pods -status: - conditions: - - lastTransitionTime: "2021-06-30T17:04:07Z" - message: Success - status: "True" - type: Success ----- diff --git a/modules/creating-a-custom-ingress-controller.adoc b/modules/creating-a-custom-ingress-controller.adoc deleted file mode 100644 index 9e395c8e60f9..000000000000 --- a/modules/creating-a-custom-ingress-controller.adoc +++ /dev/null @@ -1,48 +0,0 @@ -// Module included in the following assemblies: -// -// *ingress-controller-dnsmgt.adoc - -:_content-type: PROCEDURE -[id="creating-a-custom-ingress-controller_{context}"] -= Creating a custom Ingress Controller with the `Unmanaged` DNS management policy - -As a cluster administrator, you can create a new custom Ingress Controller with the `Unmanaged` DNS management policy. - -.Prerequisites - -* Install the OpenShift CLI (`oc`). -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -. Create a custom resource (CR) file named `sample-ingress.yaml` containing the following: - -+ -[source,yaml] ----- -apiVersion: operator.openshift.io/v1 -kind: IngressController -metadata: - namespace: openshift-ingress-operator - name: <name> <1> -spec: - domain: <domain> <2> - endpointPublishingStrategy: - type: LoadBalancerService - loadBalancer: - scope: External <3> - dnsManagementPolicy: Unmanaged <4> ----- -<1> Specify the `<name>` with a name for the `IngressController` object. -<2> Specify the `domain` based on the DNS record that was created as a prerequisite. -<3> Specify the `scope` as `External` to expose the load balancer externally. -<4> `dnsManagementPolicy` indicates if the Ingress Controller is managing the lifecycle of the wildcard DNS record associated with the load balancer. -The valid values are `Managed` and `Unmanaged`. The default value is `Managed`. - - -. Save the file to apply the changes. -+ -[source,terminal] ----- -oc apply -f <name>.yaml <1> ----- diff --git a/modules/creating-a-machine-pool-cli.adoc b/modules/creating-a-machine-pool-cli.adoc deleted file mode 100644 index 10432a15aaad..000000000000 --- a/modules/creating-a-machine-pool-cli.adoc +++ /dev/null @@ -1,119 +0,0 @@ -// Module included in the following assemblies: -// -// * rosa_cluster_admin/rosa_nodes/rosa-managing-worker-nodes.adoc - -:_content-type: PROCEDURE -[id="creating_machine_pools_cli_{context}"] -= Creating a machine pool using the ROSA CLI - -You can create additional machine pools for your {product-title} (ROSA) cluster by using the ROSA CLI (`rosa`). - -.Prerequisites - -* You installed and configured the latest {product-title} (ROSA) CLI, `rosa`, on your workstation. -* You logged in to your Red Hat account using the ROSA CLI (`rosa`). -* You created a ROSA cluster. - -.Procedure - -* To add a machine pool that does not use autoscaling, create the machine pool and define the instance type, compute (also known as worker) node count, and node labels: -+ -[source,terminal] ----- -$ rosa create machinepool --cluster=<cluster-name> \ - --name=<machine_pool_id> \ <1> - --replicas=<replica_count> \ <2> - --instance-type=<instance_type> \ <3> - --labels=<key>=<value>,<key>=<value> \ <4> - --taints=<key>=<value>:<effect>,<key>=<value>:<effect> \ <5> - --use-spot-instances \ <6> - --spot-max-price=0.5 <7> ----- -<1> Specifies the name of the machine pool. Replace `<machine_pool_id>` with the name of your machine pool. -<2> Specifies the number of compute nodes to provision. If you deployed ROSA using a single availability zone, this defines the number of compute nodes to provision to the machine pool for the zone. If you deployed your cluster using multiple availability zones, this defines the number of compute nodes to provision in total across all zones and the count must be a multiple of 3. The `--replicas` argument is required when autoscaling is not configured. -<3> Optional: Sets the instance type for the compute nodes in your machine pool. The instance type defines the vCPU and memory allocation for each compute node in the pool. Replace `<instance_type>` with an instance type. The default is `m5.xlarge`. You cannot change the instance type for a machine pool after the pool is created. -<4> Optional: Defines the labels for the machine pool. Replace `<key>=<value>,<key>=<value>` with a comma-delimited list of key-value pairs, for example `--labels=key1=value1,key2=value2`. -<5> Optional: Defines the taints for the machine pool. Replace `<key>=<value>:<effect>,<key>=<value>:<effect>` with a key, value, and effect for each taint, for example `--taints=key1=value1:NoSchedule,key2=value2:NoExecute`. Available effects include `NoSchedule`, `PreferNoSchedule`, and `NoExecute`. -<6> Optional: Configures your machine pool to deploy machines as non-guaranteed AWS Spot Instances. For information, see link:https://aws.amazon.com/ec2/spot/[Amazon EC2 Spot Instances] in the AWS documentation. If you select *Use Amazon EC2 Spot Instances* for a machine pool, you cannot disable the option after the machine pool is created. -<7> Optional: If you have opted to use Spot Instances, you can specify this argument to define a maximum hourly price for a Spot Instance. If this argument is not specified, the on-demand price is used. -+ -[IMPORTANT] -==== -Your Amazon EC2 Spot Instances might be interrupted at any time. Use Amazon EC2 Spot Instances only for workloads that can tolerate interruptions. -==== -+ -The following example creates a machine pool called `mymachinepool` that uses the `m5.xlarge` instance type and has 2 compute node replicas. The example also adds 2 workload-specific labels: -+ -[source,terminal] ----- -$ rosa create machinepool --cluster=mycluster --name=mymachinepool --replicas=2 --instance-type=m5.xlarge --labels=app=db,tier=backend ----- -+ -.Example output -[source,terminal] ----- -I: Machine pool 'mymachinepool' created successfully on cluster 'mycluster' -I: To view all machine pools, run 'rosa list machinepools -c mycluster' ----- - -* To add a machine pool that uses autoscaling, create the machine pool and define the autoscaling configuration, instance type and node labels: -+ -[source,terminal] ----- -$ rosa create machinepool --cluster=<cluster-name> \ - --name=<machine_pool_id> \ <1> - --enable-autoscaling \ <2> - --min-replicas=<minimum_replica_count> \ <3> - --max-replicas=<maximum_replica_count> \ <3> - --instance-type=<instance_type> \ <4> - --labels=<key>=<value>,<key>=<value> \ <5> - --taints=<key>=<value>:<effect>,<key>=<value>:<effect> \ <6> - --use-spot-instances \ <7> - --spot-max-price=0.5 <8> ----- -<1> Specifies the name of the machine pool. Replace `<machine_pool_id>` with the name of your machine pool. -<2> Enables autoscaling in the machine pool to meet the deployment needs. -<3> Defines the minimum and maximum compute node limits. The cluster autoscaler does not reduce or increase the machine pool node count beyond the limits that you specify. If you deployed ROSA using a single availability zone, the `--min-replicas` and `--max-replicas` arguments define the autoscaling limits in the machine pool for the zone. If you deployed your cluster using multiple availability zones, the arguments define the autoscaling limits in total across all zones and the counts must be multiples of 3. -<4> Optional: Sets the instance type for the compute nodes in your machine pool. The instance type defines the vCPU and memory allocation for each compute node in the pool. Replace `<instance_type>` with an instance type. The default is `m5.xlarge`. You cannot change the instance type for a machine pool after the pool is created. -<5> Optional: Defines the labels for the machine pool. Replace `<key>=<value>,<key>=<value>` with a comma-delimited list of key-value pairs, for example `--labels=key1=value1,key2=value2`. -<6> Optional: Defines the taints for the machine pool. Replace `<key>=<value>:<effect>,<key>=<value>:<effect>` with a key, value, and effect for each taint, for example `--taints=key1=value1:NoSchedule,key2=value2:NoExecute`. Available effects include `NoSchedule`, `PreferNoSchedule`, and `NoExecute`. -<7> Optional: Configures your machine pool to deploy machines as non-guaranteed AWS Spot Instances. For information, see link:https://aws.amazon.com/ec2/spot/[Amazon EC2 Spot Instances] in the AWS documentation. If you select *Use Amazon EC2 Spot Instances* for a machine pool, you cannot disable the option after the machine pool is created. -<8> Optional: If you have opted to use Spot Instances, you can specify this argument to define a maximum hourly price for a Spot Instance. If this argument is not specified, the on-demand price is used. -+ -[IMPORTANT] -==== -Your Amazon EC2 Spot Instances might be interrupted at any time. Use Amazon EC2 Spot Instances only for workloads that can tolerate interruptions. -==== -+ -The following example creates a machine pool called `mymachinepool` that uses the `m5.xlarge` instance type and has autoscaling enabled. The minimum compute node limit is 3 and the maximum is 6 overall. The example also adds 2 workload-specific labels: -+ -[source,terminal] ----- -$ rosa create machinepool --cluster=mycluster --name=mymachinepool --enable-autoscaling --min-replicas=3 --max-replicas=6 --instance-type=m5.xlarge --labels=app=db,tier=backend ----- -+ -.Example output -[source,terminal] ----- -I: Machine pool 'mymachinepool' created successfully on cluster 'mycluster' -I: To view all machine pools, run 'rosa list machinepools -c mycluster' ----- - -.Verification - -. List the available machine pools in your cluster: -+ -[source,terminal] ----- -$ rosa list machinepools --cluster=<cluster_name> ----- -+ -.Example output -[source,terminal] ----- -ID AUTOSCALING REPLICAS INSTANCE TYPE LABELS TAINTS AVAILABILITY ZONES SPOT INSTANCES -Default No 3 m5.xlarge us-east-1a, us-east-1b, us-east-1c N/A -mymachinepool Yes 3-6 m5.xlarge app=db, tier=backend us-east-1a, us-east-1b, us-east-1c No ----- - -. Verify that the machine pool is included in the output and the configuration is as expected. diff --git a/modules/creating-a-machine-pool-ocm.adoc b/modules/creating-a-machine-pool-ocm.adoc deleted file mode 100644 index c205af14ed4b..000000000000 --- a/modules/creating-a-machine-pool-ocm.adoc +++ /dev/null @@ -1,123 +0,0 @@ -// Module included in the following assemblies: -// -// * rosa_cluster_admin/rosa_nodes/rosa-managing-worker-nodes.adoc -// * nodes/rosa-managing-worker-nodes.adoc -// * osd_cluster_admin/osd_nodes/osd-managing-worker-nodes.adoc - -:_content-type: PROCEDURE -[id="creating_machine_pools_ocm_{context}"] -ifndef::openshift-rosa[] -= Creating a machine pool -endif::openshift-rosa[] -ifdef::openshift-rosa[] -= Creating a machine pool using OpenShift Cluster Manager -endif::openshift-rosa[] - -ifndef::openshift-rosa[] -A default machine pool is created when you install an {product-title} cluster. After installation, you can create additional machine pools for your cluster by using {cluster-manager}. -endif::openshift-rosa[] -ifdef::openshift-rosa[] -You can create additional machine pools for your {product-title} (ROSA) cluster by using {cluster-manager}. -endif::openshift-rosa[] - -ifndef::openshift-rosa[] -[IMPORTANT] -==== -The compute (also known as worker) node instance types, autoscaling options, and node counts that are available to you depend on your -ifdef::openshift-rosa[] -ROSA -endif::openshift-rosa[] -ifndef::openshift-rosa[] -{product-title} -endif::[] -subscriptions, resource quotas and deployment scenario. For more information, contact your sales representative or Red Hat support. -==== -endif::openshift-rosa[] - -.Prerequisites - -ifdef::openshift-rosa[] -* You created a ROSA cluster. -endif::openshift-rosa[] -ifndef::openshift-rosa[] -* You created an {product-title} cluster. -endif::[] - -.Procedure - -. Navigate to {cluster-manager-url} and select your cluster. - -. Under the *Machine pools* tab, click *Add machine pool*. - -. Add a *Machine pool name*. - -. Select a *Worker node instance type* from the drop-down menu. The instance type defines the vCPU and memory allocation for each compute node in the machine pool. -+ -[NOTE] -==== -You cannot change the instance type for a machine pool after the pool is created. -==== - -. Optional: Configure autoscaling for the machine pool: -.. Select *Enable autoscaling* to automatically scale the number of machines in your machine pool to meet the deployment needs. -ifdef::openshift-dedicated[] -+ -[NOTE] -==== -The *Enable autoscaling* option is only available for {product-title} if you have the `capability.cluster.autoscale_clusters` subscription. For more information, contact your sales representative or Red Hat support. -==== -endif::openshift-dedicated[] -.. Set the minimum and maximum node count limits for autoscaling. The cluster autoscaler does not reduce or increase the machine pool node count beyond the limits that you specify. -** If you deployed your cluster using a single availability zone, set the *Minimum and maximum node count*. This defines the minimum and maximum compute node limits in the availability zone. -** If you deployed your cluster using multiple availability zones, set the *Minimum nodes per zone* and *Maximum nodes per zone*. This defines the minimum and maximum compute node limits per zone. -+ -[NOTE] -==== -Alternatively, you can set your autoscaling preferences for the machine pool after the machine pool is created. -==== - -. If you did not enable autoscaling, select a compute node count: -* If you deployed your cluster using a single availability zone, select a *Worker node count* from the drop-down menu. This defines the number of compute nodes to provision to the machine pool for the zone. -* If you deployed your cluster using multiple availability zones, select a *Worker node count (per zone)* from the drop-down menu. This defines the number of compute nodes to provision to the machine pool per zone. - -. Optional: Add node labels and taints for your machine pool: -.. Expand the *Edit node labels and taints* menu. -.. Under *Node labels*, add *Key* and *Value* entries for your node labels. -.. Under *Taints*, add *Key* and *Value* entries for your taints. -.. For each taint, select an *Effect* from the drop-down menu. Available options include `NoSchedule`, `PreferNoSchedule`, and `NoExecute`. -+ -[NOTE] -==== -Alternatively, you can add the node labels and taints after you create the machine pool. -==== - -ifdef::openshift-dedicated[] -. Optional: If you deployed {product-title} on AWS using the Customer Cloud Subscription (CCS) model, use Amazon EC2 Spot Instances if you want to configure your machine pool to deploy machines as non-guaranteed AWS Spot Instances: -.. Select *Use Amazon EC2 Spot Instances*. -.. Leave *Use On-Demand instance price* selected to use the on-demand instance price. Alternatively, select *Set maximum price* to define a maximum hourly price for a Spot Instance. -+ -For more information about Amazon EC2 Spot Instances, see the link:https://aws.amazon.com/ec2/spot/[AWS documentation]. -endif::openshift-dedicated[] -ifdef::openshift-rosa[] -. Optional: Use Amazon EC2 Spot Instances if you want to configure your machine pool to deploy machines as non-guaranteed AWS Spot Instances: -.. Select *Use Amazon EC2 Spot Instances*. -.. Leave *Use On-Demand instance price* selected to use the on-demand instance price. Alternatively, select *Set maximum price* to define a maximum hourly price for a Spot Instance. -+ -For more information about Amazon EC2 Spot Instances, see the link:https://aws.amazon.com/ec2/spot/[AWS documentation]. -endif::openshift-rosa[] -+ -[IMPORTANT] -==== -Your Amazon EC2 Spot Instances might be interrupted at any time. Use Amazon EC2 Spot Instances only for workloads that can tolerate interruptions. -==== -+ -[NOTE] -==== -If you select *Use Amazon EC2 Spot Instances* for a machine pool, you cannot disable the option after the machine pool is created. -==== - -. Click *Add machine pool* to create the machine pool. - -.Verification - -* Verify that the machine pool is visible on the *Machine pools* page and the configuration is as expected. diff --git a/modules/creating-a-machine-pool.adoc b/modules/creating-a-machine-pool.adoc deleted file mode 100644 index aa35f2adedb3..000000000000 --- a/modules/creating-a-machine-pool.adoc +++ /dev/null @@ -1,9 +0,0 @@ -// Module included in the following assemblies: -// -// * rosa_cluster_admin/rosa_nodes/rosa-managing-worker-nodes.adoc - -:_content-type: CONCEPT -[id="creating_a_machine_pool_{context}"] -= Creating a machine pool - -A default machine pool is created when you install a {product-title} (ROSA) cluster. After installation, you can create additional machine pools for your cluster by using {cluster-manager} or the ROSA CLI (`rosa`). diff --git a/modules/creating-a-project-using-the-CLI.adoc b/modules/creating-a-project-using-the-CLI.adoc deleted file mode 100644 index edec035d947e..000000000000 --- a/modules/creating-a-project-using-the-CLI.adoc +++ /dev/null @@ -1,51 +0,0 @@ -// Module included in the following assemblies: -// -// applications/projects/working-with-projects.adoc - -:_content-type: PROCEDURE -[id="creating-a-project-using-the-CLI_{context}"] -= Creating a project using the CLI - -If allowed by your cluster administrator, you can create a new project. - -[NOTE] -==== -Projects starting with `openshift-` and `kube-` are considered critical by {product-title}. As such, {product-title} does not allow you to create Projects starting with `openshift-` or `kube-` using the `oc new-project` command. Cluster administrators can create these Projects using the `oc adm new-project` command. -==== - -[NOTE] -==== -You cannot assign an SCC to pods created in one of the default namespaces: `default`, `kube-system`, `kube-public`, `openshift-node`, `openshift-infra`, and `openshift`. You cannot use these namespaces for running pods or services. -==== - -.Procedure - -* Run: -+ -[source,terminal] ----- -$ oc new-project <project_name> \ - --description="<description>" --display-name="<display_name>" ----- -+ -For example: -+ -[source,terminal] ----- -$ oc new-project hello-openshift \ - --description="This is an example project" \ - --display-name="Hello OpenShift" ----- - -[NOTE] -==== -The number of projects you are allowed to create -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -might be limited by the system administrator. -endif::[] -ifdef::openshift-online[] -is limited. -endif::[] -After your limit is reached, you might have to delete an existing project in -order to create a new one. -==== diff --git a/modules/creating-a-project-using-the-web-console.adoc b/modules/creating-a-project-using-the-web-console.adoc deleted file mode 100644 index e6aa4786edce..000000000000 --- a/modules/creating-a-project-using-the-web-console.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// Module included in the following assemblies: -// -// applications/projects/working-with-projects.adoc - -:_content-type: PROCEDURE -[id="creating-a-project-using-the-web-console_{context}"] -= Creating a project using the web console - -If allowed by your cluster administrator, you can create a new project. - -[NOTE] -==== -Projects starting with `openshift-` and `kube-` are considered critical by {product-title}. As such, {product-title} does not allow you to create Projects starting with `openshift-` using the web console. -==== - -[NOTE] -==== -You cannot assign an SCC to pods created in one of the default namespaces: `default`, `kube-system`, `kube-public`, `openshift-node`, `openshift-infra`, and `openshift`. You cannot use these namespaces for running pods or services. -==== - -.Procedure - -. Navigate to *Home* -> *Projects*. - -. Click *Create Project*. - -. Enter your project details. - -. Click *Create*. diff --git a/modules/creating-a-service-account-in-your-project.adoc b/modules/creating-a-service-account-in-your-project.adoc deleted file mode 100644 index 8fa8055bafa0..000000000000 --- a/modules/creating-a-service-account-in-your-project.adoc +++ /dev/null @@ -1,95 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/assuming-an-aws-iam-role-for-a-service-account.adoc - -:_content-type: PROCEDURE -[id="creating-a-service-account-in-your-project_{context}"] -= Creating a service account in your project - -Add a service account in your user-defined project. Include an `eks.amazonaws.com/role-arn` annotation in the service account configuration that references the Amazon Resource Name (ARN) for the AWS Identity and Access Management (IAM) role that you want the service account to assume. - -.Prerequisites - -* You have created an AWS IAM role for your service account. For more information, see _Setting up an AWS IAM role for a service account_. -* You have access to a {product-title} with AWS Security Token Service (STS) cluster. Admin-level user privileges are not required. -* You have installed the OpenShift CLI (`oc`). - -.Procedure - -. In your {product-title} cluster, create a project: -+ -[source,terminal] ----- -$ oc new-project <project_name> <1> ----- -<1> Replace `<project_name>` with the name of your project. The name must match the project name that you specified in your AWS IAM role configuration. -+ -[NOTE] -==== -You are automatically switched to the project when it is created. -==== - -. Create a file named `test-service-account.yaml` with the following service account configuration: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: <service_account_name> <1> - namespace: <project_name> <2> - annotations: - eks.amazonaws.com/role-arn: "<aws_iam_role_arn>" <3> ----- -// Add these annotations in the preceding code block later: -// eks.amazonaws.com/sts-regional-endpoints: "true" <4> -// eks.amazonaws.com/token-expiration: "86400" <5> -<1> Replace `<service_account_name>` with the name of your service account. The name must match the service account name that you specified in your AWS IAM role configuration. -<2> Replace `<project_name>` with the name of your project. The name must match the project name that you specified in your AWS IAM role configuration. -<3> Specifies the ARN of the AWS IAM role that the service account assumes for use within your pod. Replace `<aws_iam_role_arn>` with the ARN for the AWS IAM role that you created for your service account. The format of the role ARN is `arn:aws:iam::<aws_account_id>:role/<aws_iam_role_name>`. -// Add these call outs when the additional annotations are added later: -//<4> Optional: When set to `true`, the `AWS_STS_REGIONAL_ENDPOINTS=regional` environment variable is defined in the pod and AWS STS requests are sent to endpoints for the active region. When this option is not set to `true`, the AWS STS requests are by default sent to the global endpoint \https://sts.amazonaws.com. For more information, see link:https://docs.aws.amazon.com/sdkref/latest/guide/feature-sts-regionalized-endpoints.html[AWS STS Regionalized endpoints] in the AWS documentation. -//<5> Optional: Specifies the token expiration time in seconds. The default is `86400`. - -. Create the service account in your project: -+ -[source,terminal] ----- -$ oc create -f test-service-account.yaml ----- -+ -.Example output: -[source,terminal] ----- -serviceaccount/<service_account_name> created ----- - -. Review the details of the service account: -+ -[source,terminal] ----- -$ oc describe serviceaccount <service_account_name> <1> ----- -<1> Replace `<service_account_name>` with the name of your service account. -+ -.Example output: -+ -[source,terminal] ----- -Name: <service_account_name> <1> -Namespace: <project_name> <2> -Labels: <none> -Annotations: eks.amazonaws.com/role-arn: <aws_iam_role_arn> <3> -Image pull secrets: <service_account_name>-dockercfg-rnjkq -Mountable secrets: <service_account_name>-dockercfg-rnjkq -Tokens: <service_account_name>-token-4gbjp -Events: <none> ----- -// Add these annotations in the preceding code block later: -// eks.amazonaws.com/sts-regional-endpoints: true <3> -// eks.amazonaws.com/token-expiration: 86400 <3> -<1> Specifies the name of the service account. -<2> Specifies the project that contains the service account. -<3> Lists the annotation for the ARN of the AWS IAM role that the service account assumes. -// Update the preceding call out to the following when the additional annotations are added later: -//<3> Lists the annotations for the ARN of the AWS IAM role that the service account assumes, the optional regional endpoint configuration, and the optional token expiration specification. diff --git a/modules/creating-an-example-aws-sdk-container-image.adoc b/modules/creating-an-example-aws-sdk-container-image.adoc deleted file mode 100644 index 0b935539f853..000000000000 --- a/modules/creating-an-example-aws-sdk-container-image.adoc +++ /dev/null @@ -1,67 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/assuming-an-aws-iam-role-for-a-service-account.adoc - -:_content-type: PROCEDURE -[id="creating-an-example-aws-sdk-container-image_{context}"] -= Creating an example AWS SDK container image - -The steps in this procedure provide an example method to create a container image that includes an AWS SDK. - -The example steps use Podman to create the container image and Quay.io to host the image. For more information about Quay.io, see link:https://docs.quay.io/solution/getting-started.html[Getting Started with Quay.io]. The container image can be used to deploy pods that can run AWS SDK operations. - -[NOTE] -==== -In this example procedure, the AWS Boto3 SDK for Python is installed into a container image. For more information about installing and using the AWS Boto3 SDK, see the link:https://boto3.amazonaws.com/v1/documentation/api/latest/index.html[AWS Boto3 documentation]. For details about other AWS SDKs, see link:https://docs.aws.amazon.com/sdkref/latest/guide/overview.html[AWS SDKs and Tools Reference Guide] in the AWS documentation. -==== - -.Prerequisites - -* You have installed Podman on your installation host. -* You have a Quay.io user account. - -.Procedure - -. Add the following configuration to a file named `Containerfile`: -+ -[source,terminal] ----- -FROM ubi9/ubi <1> -RUN dnf makecache && dnf install -y python3-pip && dnf clean all && pip3 install boto3>=1.15.0 <2> ----- -<1> Specifies the Red Hat Universal Base Image version 9. -<2> Installs the AWS Boto3 SDK by using the `pip` package management system. In this example, AWS Boto3 SDK version 1.15.0 or later is installed. - -. From the directory that contains the file, build a container image named `awsboto3sdk`: -+ -[source,terminal] ----- -$ podman build -t awsboto3sdk . ----- - -. Log in to Quay.io: -+ -[source,terminal] ----- -$ podman login quay.io ----- - -. Tag the image in preparation for the upload to Quay.io: -+ -[source,terminal] ----- -$ podman tag localhost/awsboto3sdk quay.io/<quay_username>/awsboto3sdk:latest <1> ----- -<1> Replace `<quay_username>` with your Quay.io username. - -. Push the tagged container image to Quay.io: -+ -[source,terminal] ----- -$ podman push quay.io/<quay_username>/awsboto3sdk:latest <1> ----- -<1> Replace `<quay_username>` with your Quay.io username. - -. Make the Quay.io repository that contains the image public. This publishes the image so that it can be used to deploy a pod in your {product-title} cluster: -.. On https://quay.io/, navigate to the *Repository Settings* page for repository that contains the image. -.. Click *Make Public* to make the repository publicly available. diff --git a/modules/creating-an-infra-node.adoc b/modules/creating-an-infra-node.adoc deleted file mode 100644 index 397957a500ba..000000000000 --- a/modules/creating-an-infra-node.adoc +++ /dev/null @@ -1,74 +0,0 @@ -// Module included in the following assemblies: -// -// * post_installation_configuration/cluster-tasks.adoc - -:_content-type: PROCEDURE -[id="creating-an-infra-node_{context}"] -= Creating an infrastructure node - -[IMPORTANT] -==== -See Creating infrastructure machine sets for installer-provisioned infrastructure environments or for any cluster where the control plane nodes are managed by the machine API. -==== - -Requirements of the cluster dictate that infrastructure, also called `infra` nodes, be provisioned. The installer only provides provisions for control plane and worker nodes. Worker nodes can be designated as infrastructure nodes or application, also called `app`, nodes through labeling. - -.Procedure - -. Add a label to the worker node that you want to act as application node: -+ -[source,terminal] ----- -$ oc label node <node-name> node-role.kubernetes.io/app="" ----- - -. Add a label to the worker nodes that you want to act as infrastructure nodes: -+ -[source,terminal] ----- -$ oc label node <node-name> node-role.kubernetes.io/infra="" ----- - -. Check to see if applicable nodes now have the `infra` role and `app` roles: -+ -[source,terminal] ----- -$ oc get nodes ----- - -. Create a default cluster-wide node selector. The default node selector is applied to pods created in all namespaces. This creates an intersection with any existing node selectors on a pod, which additionally constrains the pod's selector. -+ -[IMPORTANT] -==== -If the default node selector key conflicts with the key of a pod's label, then the default node selector is not applied. - -However, do not set a default node selector that might cause a pod to become unschedulable. For example, setting the default node selector to a specific node role, such as `node-role.kubernetes.io/infra=""`, when a pod's label is set to a different node role, such as `node-role.kubernetes.io/master=""`, can cause the pod to become unschedulable. For this reason, use caution when setting the default node selector to specific node roles. - -You can alternatively use a project node selector to avoid cluster-wide node selector key conflicts. -==== - -.. Edit the `Scheduler` object: -+ -[source,terminal] ----- -$ oc edit scheduler cluster ----- - -.. Add the `defaultNodeSelector` field with the appropriate node selector: -+ -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: Scheduler -metadata: - name: cluster -... -spec: - defaultNodeSelector: topology.kubernetes.io/region=us-east-1 <1> -... ----- -<1> This example node selector deploys pods on nodes in the `us-east-1` region by default. - -.. Save the file to apply the changes. - -You can now move infrastructure resources to the newly labeled `infra` nodes. diff --git a/modules/creating-custom-links.adoc b/modules/creating-custom-links.adoc deleted file mode 100644 index 3636d2e74d4a..000000000000 --- a/modules/creating-custom-links.adoc +++ /dev/null @@ -1,87 +0,0 @@ -// Module included in the following assemblies: -// -// * web_console/customizing-the-web-console.adoc - -:_content-type: PROCEDURE -[id="creating-custom-links_{context}"] -= Creating custom links in the web console - -.Prerequisites - -* You must have administrator privileges. - -.Procedure - -. From *Administration* -> *Custom Resource Definitions*, click on -*ConsoleLink*. -. Select *Instances* tab -. Click *Create Console Link* and edit the file: -+ -[source,yaml] ----- -apiVersion: console.openshift.io/v1 -kind: ConsoleLink -metadata: - name: example -spec: - href: 'https://www.example.com' - location: HelpMenu <1> - text: Link 1 ----- -<1> Valid location settings are `HelpMenu`, `UserMenu`, `ApplicationMenu`, and -`NamespaceDashboard`. -+ -To make the custom link appear in all namespaces, follow this example: -+ -[source,yaml] ----- -apiVersion: console.openshift.io/v1 -kind: ConsoleLink -metadata: - name: namespaced-dashboard-link-for-all-namespaces -spec: - href: 'https://www.example.com' - location: NamespaceDashboard - text: This appears in all namespaces ----- -+ -To make the custom link appear in only some namespaces, follow this example: -+ -[source,yaml] ----- -apiVersion: console.openshift.io/v1 -kind: ConsoleLink -metadata: - name: namespaced-dashboard-for-some-namespaces -spec: - href: 'https://www.example.com' - location: NamespaceDashboard - # This text will appear in a box called "Launcher" under "namespace" or "project" in the web console - text: Custom Link Text - namespaceDashboard: - namespaces: - # for these specific namespaces - - my-namespace - - your-namespace - - other-namespace ----- -+ -To make the custom link appear in the application menu, follow this example: -+ -[source,yaml] ----- -apiVersion: console.openshift.io/v1 -kind: ConsoleLink -metadata: - name: application-menu-link-1 -spec: - href: 'https://www.example.com' - location: ApplicationMenu - text: Link 1 - applicationMenu: - section: My New Section - # image that is 24x24 in size - imageURL: https://via.placeholder.com/24 ----- - -. Click *Save* to apply your changes. diff --git a/modules/creating-custom-live-rhcos-iso.adoc b/modules/creating-custom-live-rhcos-iso.adoc deleted file mode 100644 index 951d9dda35d7..000000000000 --- a/modules/creating-custom-live-rhcos-iso.adoc +++ /dev/null @@ -1,83 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_sno/install-sno-installing-sno.adoc - -:_module-type: PROCEDURE -[id="create-custom-live-rhcos-iso_{context}"] -= Creating a custom live {op-system} ISO for remote server access - -In some cases, you cannot attach an external disk drive to a server, however, you need to access the server remotely to provision a node. -It is recommended to enable SSH access to the server. -You can create a live {op-system} ISO with SSHd enabled and with predefined credentials so that you can access the server after it boots. - -.Prerequisites - -* You installed the `butane` utility. - -.Procedure - -. Download the `coreos-installer` binary from the `coreos-installer` image link:https://mirror.openshift.com/pub/openshift-v4/clients/coreos-installer/latest/[mirror] page. - -. Download the latest live {op-system} ISO from link:https://mirror.openshift.com/pub/openshift-v4/x86_64/dependencies/rhcos/4.12/latest/[mirror.openshift.com]. - -. Create the `embedded.yaml` file that the `butane` utility uses to create the Ignition file: -+ -[source,yaml,subs="attributes+"] ----- -variant: openshift -version: {product-version}.0 -metadata: - name: sshd - labels: - machineconfiguration.openshift.io/role: worker -passwd: - users: - - name: core <1> - ssh_authorized_keys: - - '<ssh_key>' ----- -<1> The `core` user has sudo privileges. - -. Run the `butane` utility to create the Ignition file using the following command: -+ -[source,terminal] ----- -$ butane -pr embedded.yaml -o embedded.ign ----- - -. After the Ignition file is created, you can include the configuration in a new live {op-system} ISO, which is named `rhcos-sshd-{product-version}.0-x86_64-live.x86_64.iso`, with the `coreos-installer` utility: -+ -[source,terminal,subs="attributes+"] ----- -$ coreos-installer iso ignition embed -i embedded.ign rhcos-{product-version}.0-x86_64-live.x86_64.iso -o rhcos-sshd-{product-version}.0-x86_64-live.x86_64.iso ----- - -.Verification - -* Check that the custom live ISO can be used to boot the server by running the following command: -+ -[source,terminal,subs="attributes+"] ----- -# coreos-installer iso ignition show rhcos-sshd-{product-version}.0-x86_64-live.x86_64.iso ----- - -+ -.Example output -[source,json] ----- -{ - "ignition": { - "version": "3.2.0" - }, - "passwd": { - "users": [ - { - "name": "core", - "sshAuthorizedKeys": [ - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCZnG8AIzlDAhpyENpK2qKiTT8EbRWOrz7NXjRzopbPu215mocaJgjjwJjh1cYhgPhpAp6M/ttTk7I4OI7g4588Apx4bwJep6oWTU35LkY8ZxkGVPAJL8kVlTdKQviDv3XX12l4QfnDom4tm4gVbRH0gNT1wzhnLP+LKYm2Ohr9D7p9NBnAdro6k++XWgkDeijLRUTwdEyWunIdW1f8G0Mg8Y1Xzr13BUo3+8aey7HLKJMDtobkz/C8ESYA/f7HJc5FxF0XbapWWovSSDJrr9OmlL9f4TfE+cQk3s+eoKiz2bgNPRgEEwihVbGsCN4grA+RzLCAOpec+2dTJrQvFqsD alosadag@sonnelicht.local" - ] - } - ] - } -} ----- diff --git a/modules/creating-custom-seccomp-profile.adoc b/modules/creating-custom-seccomp-profile.adoc deleted file mode 100644 index 68bf177695aa..000000000000 --- a/modules/creating-custom-seccomp-profile.adoc +++ /dev/null @@ -1,40 +0,0 @@ -// Module included in the following assemblies: -// -// * security/seccomp-profiles.adoc - -:_content-type: PROCEDURE -[id="creating-custom-seccomp-profile_{context}"] -= Creating seccomp profiles -You can use the `MachineConfig` object to create profiles. - -Seccomp can restrict system calls (syscalls) within a container, limiting the access of your application. - -.Prerequisites - -* You have cluster admin permissions. -* You have created a custom security context constraints (SCC). For more information, see _Additional resources_. - -.Procedure - -* Create the `MachineConfig` object: -+ -[source,yaml,subs="attributes+"] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfig -metadata: - labels: - machineconfiguration.openshift.io/role: worker - name: custom-seccomp -spec: - config: - ignition: - version: 3.2.0 - storage: - files: - - contents: - source: data:text/plain;charset=utf-8;base64,<hash> - filesystem: root - mode: 0644 - path: /var/lib/kubelet/seccomp/seccomp-nostat.json ----- \ No newline at end of file diff --git a/modules/creating-ibm-power-vs-workspace-procedure.adoc b/modules/creating-ibm-power-vs-workspace-procedure.adoc deleted file mode 100644 index ef98301347dc..000000000000 --- a/modules/creating-ibm-power-vs-workspace-procedure.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// * installing/installing_ibm_powervs/creating-ibm-power-vs-workspace.adoc - -:_content-type: PROCEDURE -[id="creating-ibm-power-vs-workspace-procedure_{context}"] -= Creating an {ibmpowerProductName} Virtual Server workspace - -Use the following procedure to create an {ibmpowerProductName} Virtual Server workspace. - -.Procedure - -. To create an {ibmpowerProductName} Virtual Server workspace, complete step 1 to step 5 from the IBM Cloud documentation for link:https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-creating-power-virtual-server[Creating an IBM Power Virtual Server]. - -. After it has finished provisioning, retrieve the 32-character alphanumeric ID of your new workspace by entering the following command: -+ -[source,terminal] ----- -$ ibmcloud resource service-instances | grep <workspace name> ----- -+ diff --git a/modules/creating-infra-machines.adoc b/modules/creating-infra-machines.adoc deleted file mode 100644 index b2da9ecaace2..000000000000 --- a/modules/creating-infra-machines.adoc +++ /dev/null @@ -1,160 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/creating-infrastructure-machinesets.adoc -// * post_installation_configuration/cluster-tasks.adoc - -:_content-type: PROCEDURE -[id="creating-infra-machines_{context}"] -= Creating a machine config pool for infrastructure machines - -If you need infrastructure machines to have dedicated configurations, you must create an infra pool. - -.Procedure - -. Add a label to the node you want to assign as the infra node with a specific label: -+ -[source,terminal] ----- -$ oc label node <node_name> <label> ----- -+ -[source,terminal] ----- -$ oc label node ci-ln-n8mqwr2-f76d1-xscn2-worker-c-6fmtx node-role.kubernetes.io/infra= ----- - -. Create a machine config pool that contains both the worker role and your custom role as machine config selector: -+ -[source,terminal] ----- -$ cat infra.mcp.yaml ----- -+ -.Example output -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfigPool -metadata: - name: infra -spec: - machineConfigSelector: - matchExpressions: - - {key: machineconfiguration.openshift.io/role, operator: In, values: [worker,infra]} <1> - nodeSelector: - matchLabels: - node-role.kubernetes.io/infra: "" <2> ----- -<1> Add the worker role and your custom role. -<2> Add the label you added to the node as a `nodeSelector`. -+ -[NOTE] -==== -Custom machine config pools inherit machine configs from the worker pool. Custom pools use any machine config targeted for the worker pool, but add the ability to also deploy changes that are targeted at only the custom pool. Because a custom pool inherits resources from the worker pool, any change to the worker pool also affects the custom pool. -==== - -. After you have the YAML file, you can create the machine config pool: -+ -[source,terminal] ----- -$ oc create -f infra.mcp.yaml ----- - -. Check the machine configs to ensure that the infrastructure configuration rendered successfully: -+ -[source,terminal] ----- -$ oc get machineconfig ----- -+ -.Example output -[source,terminal] ----- -NAME GENERATEDBYCONTROLLER IGNITIONVERSION CREATED -00-master 365c1cfd14de5b0e3b85e0fc815b0060f36ab955 3.2.0 31d -00-worker 365c1cfd14de5b0e3b85e0fc815b0060f36ab955 3.2.0 31d -01-master-container-runtime 365c1cfd14de5b0e3b85e0fc815b0060f36ab955 3.2.0 31d -01-master-kubelet 365c1cfd14de5b0e3b85e0fc815b0060f36ab955 3.2.0 31d -01-worker-container-runtime 365c1cfd14de5b0e3b85e0fc815b0060f36ab955 3.2.0 31d -01-worker-kubelet 365c1cfd14de5b0e3b85e0fc815b0060f36ab955 3.2.0 31d -99-master-1ae2a1e0-a115-11e9-8f14-005056899d54-registries 365c1cfd14de5b0e3b85e0fc815b0060f36ab955 3.2.0 31d -99-master-ssh 3.2.0 31d -99-worker-1ae64748-a115-11e9-8f14-005056899d54-registries 365c1cfd14de5b0e3b85e0fc815b0060f36ab955 3.2.0 31d -99-worker-ssh 3.2.0 31d -rendered-infra-4e48906dca84ee702959c71a53ee80e7 365c1cfd14de5b0e3b85e0fc815b0060f36ab955 3.2.0 23m -rendered-master-072d4b2da7f88162636902b074e9e28e 5b6fb8349a29735e48446d435962dec4547d3090 3.2.0 31d -rendered-master-3e88ec72aed3886dec061df60d16d1af 02c07496ba0417b3e12b78fb32baf6293d314f79 3.2.0 31d -rendered-master-419bee7de96134963a15fdf9dd473b25 365c1cfd14de5b0e3b85e0fc815b0060f36ab955 3.2.0 17d -rendered-master-53f5c91c7661708adce18739cc0f40fb 365c1cfd14de5b0e3b85e0fc815b0060f36ab955 3.2.0 13d -rendered-master-a6a357ec18e5bce7f5ac426fc7c5ffcd 365c1cfd14de5b0e3b85e0fc815b0060f36ab955 3.2.0 7d3h -rendered-master-dc7f874ec77fc4b969674204332da037 5b6fb8349a29735e48446d435962dec4547d3090 3.2.0 31d -rendered-worker-1a75960c52ad18ff5dfa6674eb7e533d 5b6fb8349a29735e48446d435962dec4547d3090 3.2.0 31d -rendered-worker-2640531be11ba43c61d72e82dc634ce6 5b6fb8349a29735e48446d435962dec4547d3090 3.2.0 31d -rendered-worker-4e48906dca84ee702959c71a53ee80e7 365c1cfd14de5b0e3b85e0fc815b0060f36ab955 3.2.0 7d3h -rendered-worker-4f110718fe88e5f349987854a1147755 365c1cfd14de5b0e3b85e0fc815b0060f36ab955 3.2.0 17d -rendered-worker-afc758e194d6188677eb837842d3b379 02c07496ba0417b3e12b78fb32baf6293d314f79 3.2.0 31d -rendered-worker-daa08cc1e8f5fcdeba24de60cd955cc3 365c1cfd14de5b0e3b85e0fc815b0060f36ab955 3.2.0 13d ----- -+ -You should see a new machine config, with the `rendered-infra-*` prefix. - -. Optional: To deploy changes to a custom pool, create a machine config that uses the custom pool name as the label, such as `infra`. Note that this is not required and only shown for instructional purposes. In this manner, you can apply any custom configurations specific to only your infra nodes. -+ -[NOTE] -==== -After you create the new machine config pool, the MCO generates a new rendered config for that pool, and associated nodes of that pool reboot to apply the new configuration. -==== - -.. Create a machine config: -+ -[source,terminal] ----- -$ cat infra.mc.yaml ----- -+ -.Example output -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfig -metadata: - name: 51-infra - labels: - machineconfiguration.openshift.io/role: infra <1> -spec: - config: - ignition: - version: 3.2.0 - storage: - files: - - path: /etc/infratest - mode: 0644 - contents: - source: data:,infra ----- -<1> Add the label you added to the node as a `nodeSelector`. - -.. Apply the machine config to the infra-labeled nodes: -+ -[source,terminal] ----- -$ oc create -f infra.mc.yaml ----- - -. Confirm that your new machine config pool is available: -+ -[source,terminal] ----- -$ oc get mcp ----- -+ -.Example output -[source,terminal] ----- -NAME CONFIG UPDATED UPDATING DEGRADED MACHINECOUNT READYMACHINECOUNT UPDATEDMACHINECOUNT DEGRADEDMACHINECOUNT AGE -infra rendered-infra-60e35c2e99f42d976e084fa94da4d0fc True False False 1 1 1 0 4m20s -master rendered-master-9360fdb895d4c131c7c4bebbae099c90 True False False 3 3 3 0 91m -worker rendered-worker-60e35c2e99f42d976e084fa94da4d0fc True False False 2 2 2 0 91m ----- -+ -In this example, a worker node was changed to an infra node. diff --git a/modules/creating-instance-aws-load-balancer-controller.adoc b/modules/creating-instance-aws-load-balancer-controller.adoc deleted file mode 100644 index 3485ac2842c5..000000000000 --- a/modules/creating-instance-aws-load-balancer-controller.adoc +++ /dev/null @@ -1,152 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/aws_load_balancer_operator/create-instance-aws-load-balancer-controller.adoc - -:_content-type: PROCEDURE -[id="nw-creating-instance-aws-load-balancer-controller_{context}"] -= Creating an instance of the AWS Load Balancer Controller using AWS Load Balancer Operator - -You can install only a single instance of the `aws-load-balancer-controller` in a cluster. You can create the AWS Load Balancer Controller by using CLI. The AWS Load Balancer(ALB) Operator reconciles only the resource with the name `cluster`. - -.Prerequisites - -* You have created the `echoserver` namespace. -* You have access to the OpenShift CLI (`oc`). - -.Procedure - -. Create an `aws-load-balancer-controller` resource YAML file, for example, `sample-aws-lb.yaml`, as follows: -+ -[source,yaml] ----- -apiVersion: networking.olm.openshift.io/v1 -kind: AWSLoadBalancerController <1> -metadata: - name: cluster <2> -spec: - subnetTagging: Auto <3> - additionalResourceTags: <4> - - key: example.org/security-scope - value: staging - ingressClass: cloud <5> - config: - replicas: 2 <6> - enabledAddons: <7> - - AWSWAFv2 <8> ----- -<1> Defines the `aws-load-balancer-controller` resource. -<2> Defines the AWS Load Balancer Controller instance name. This instance name gets added as a suffix to all related resources. -<3> Valid options are `Auto` and `Manual`. When the value is set to `Auto`, the Operator attempts to determine the subnets that belong to the cluster and tags them appropriately. The Operator cannot determine the role correctly if the internal subnet tags are not present on internal subnet. If you installed your cluster on user-provided infrastructure, you can manually tag the subnets with the appropriate role tags and set the subnet tagging policy to `Manual`. -<4> Defines the tags used by the controller when it provisions AWS resources. -<5> The default value for this field is `alb`. The Operator provisions an `IngressClass` resource with the same name if it does not exist. -<6> Specifies the number of replicas of the controller. -<7> Specifies add-ons for AWS load balancers, which get specified through annotations. -<8> Enables the `alb.ingress.kubernetes.io/wafv2-acl-arn` annotation. - -. Create a `aws-load-balancer-controller` resource by running the following command: -+ -[source,terminal] ----- -$ oc create -f sample-aws-lb.yaml ----- - -. After the AWS Load Balancer Controller is running, create a `deployment` resource: -+ -[source,yaml] ----- -apiVersion: apps/v1 -kind: Deployment <1> -metadata: - name: <echoserver> <2> - namespace: echoserver -spec: - selector: - matchLabels: - app: echoserver - replicas: 3 <3> - template: - metadata: - labels: - app: echoserver - spec: - containers: - - image: openshift/origin-node - command: - - "/bin/socat" - args: - - TCP4-LISTEN:8080,reuseaddr,fork - - EXEC:'/bin/bash -c \"printf \\\"HTTP/1.0 200 OK\r\n\r\n\\\"; sed -e \\\"/^\r/q\\\"\"' - imagePullPolicy: Always - name: echoserver - ports: - - containerPort: 8080 ----- -<1> Defines the deployment resource. -<2> Specifies the deployment name. -<3> Specifies the number of replicas of the deployment. - -. Create a `service` resource: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Service <1> -metadata: - name: <echoserver> <2> - namespace: echoserver -spec: - ports: - - port: 80 - targetPort: 8080 - protocol: TCP - type: NodePort - selector: - app: echoserver ----- -<1> Defines the service resource. -<2> Specifies the name of the service. - -. Deploy an ALB-backed `Ingress` resource: -+ -[source,yaml] ----- -apiVersion: networking.k8s.io/v1 -kind: Ingress <1> -metadata: - name: <echoserver> <2> - namespace: echoserver - annotations: - alb.ingress.kubernetes.io/scheme: internet-facing - alb.ingress.kubernetes.io/target-type: instance -spec: - ingressClassName: alb - rules: - - http: - paths: - - path: / - pathType: Exact - backend: - service: - name: <echoserver> <3> - port: - number: 80 ----- -<1> Defines the ingress resource. -<2> Specifies the name of the ingress resource. -<3> Specifies the name of the service resource. - -.Verification - -* Verify the status of the `Ingress` resource to show the host of the provisioned AWS Load Balancer (ALB) by running the following command: -+ -[source,terminal] ----- -$ HOST=$(oc get ingress -n echoserver echoserver --template='{{(index .status.loadBalancer.ingress 0).hostname}}') ----- - -* Verify the status of the provisioned AWS Load Balancer (ALB) host by running the following command: -+ -[source,terminal] ----- -$ curl $HOST ----- diff --git a/modules/creating-machines-bare-metal.adoc b/modules/creating-machines-bare-metal.adoc deleted file mode 100644 index 5a7dc5f4f28f..000000000000 --- a/modules/creating-machines-bare-metal.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_bare_metal/installing-bare-metal.adoc -// * installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc -// * installing/installing_platform_agnostic/installing-platform-agnostic.adoc - -[id="creating-machines-bare-metal_{context}"] -= Installing {op-system} and starting the {product-title} bootstrap process - -To install {product-title} on bare metal infrastructure that you provision, you must install {op-system-first} on the machines. When you install {op-system}, you must provide the Ignition config file that was generated by the {product-title} installation program for the type of machine you are installing. If you have configured suitable networking, DNS, and load balancing infrastructure, the {product-title} bootstrap process begins automatically after the {op-system} machines have rebooted. - -To install {op-system} on the machines, follow either the steps to use an ISO image or network PXE booting. - -[NOTE] -==== -The compute node deployment steps included in this installation document are {op-system}-specific. If you choose instead to deploy {op-system-base}-based compute nodes, you take responsibility for all operating system life cycle management and maintenance, including performing system updates, applying patches, and completing all other required tasks. Only {op-system-base} 8 compute machines are supported. -==== - -You can configure {op-system} during ISO and PXE installations by using the following methods: - -* Kernel arguments: You can use kernel arguments to provide installation-specific information. For example, you can specify the locations of the {op-system} installation files that you uploaded to your HTTP server and the location of the Ignition config file for the type of node you are installing. For a PXE installation, you can use the `APPEND` parameter to pass the arguments to the kernel of the live installer. For an ISO installation, you can interrupt the live installation boot process to add the kernel arguments. In both installation cases, you can use special `coreos.inst.*` arguments to direct the live installer, as well as standard installation boot arguments for turning standard kernel services on or off. - -* Ignition configs: {product-title} Ignition config files (`*.ign`) are specific to the type of node you are installing. You pass the location of a bootstrap, control plane, or compute node Ignition config file during the {op-system} installation so that it takes effect on first boot. In special cases, you can create a separate, limited Ignition config to pass to the live system. That Ignition config could do a certain set of tasks, such as reporting success to a provisioning system after completing installation. This special Ignition config is consumed by the `coreos-installer` to be applied on first boot of the installed system. Do not provide the standard control plane and compute node Ignition configs to the live ISO directly. - -* `coreos-installer`: You can boot the live ISO installer to a shell prompt, which allows you to prepare the permanent system in a variety of ways before first boot. In particular, you can run the `coreos-installer` command to identify various artifacts to include, work with disk partitions, and set up networking. In some cases, you can configure features on the live system and copy them to the installed system. - -Whether to use an ISO or PXE install depends on your situation. A PXE install requires an available DHCP service and more preparation, but can make the installation process more automated. An ISO install is a more manual process and can be inconvenient if you are setting up more than a few machines. - -[NOTE] -==== -As of {product-title} 4.6, the {op-system} ISO and other installation artifacts provide support for installation on disks with 4K sectors. -==== diff --git a/modules/creating-multiple-ingress-through-single-alb.adoc b/modules/creating-multiple-ingress-through-single-alb.adoc deleted file mode 100644 index 184f75c324f3..000000000000 --- a/modules/creating-multiple-ingress-through-single-alb.adoc +++ /dev/null @@ -1,143 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/aws_load_balancer_operator/multiple-ingress-through-single-alb.adoc - -:_content-type: PROCEDURE -[id="nw-creating-multiple-ingress-through-single-alb_{context}"] -= Creating multiple ingresses through a single AWS Load Balancer - -You can route the traffic to multiple Ingresses through a single AWS Load Balancer (ALB) by using the CLI. - -.Prerequisites - -* You have an access to the OpenShift CLI (`oc`). - -.Procedure - -. Create an `IngressClassParams` resource YAML file, for example, `sample-single-lb-params.yaml`, as follows: -+ -[source,yaml] ----- -apiVersion: elbv2.k8s.aws/v1beta1 <1> -kind: IngressClassParams -metadata: - name: <single-lb-params> <2> -spec: - group: - name: single-lb <3> ----- -<1> Defines the API group and version of the `IngressClassParams` resource. -<2> Specifies the name of the `IngressClassParams` resource. -<3> Specifies the name of the `IngressGroup`. All Ingresses of this class belong to this `IngressGroup`. - -. Create an `IngressClassParams` resource by running the following command: -+ -[source,terminal] ----- -$ oc create -f sample-single-lb-params.yaml ----- - -. Create an `IngressClass` resource YAML file, for example, `sample-single-lb.yaml`, as follows: -+ -[source,yaml] ----- -apiVersion: networking.k8s.io/v1 <1> -kind: IngressClass -metadata: - name: <single-lb> <2> -spec: - controller: ingress.k8s.aws/alb <3> - parameters: - apiGroup: elbv2.k8s.aws <4> - kind: IngressClassParams <5> - name: single-lb <6> ----- -<1> Defines the API group and the version of the `IngressClass` resource. -<2> Specifies the name of the `IngressClass`. -<3> Defines the controller name, common for all `IngressClasses`. The `aws-load-balancer-controller` reconciles the controller. -<4> Defines the API group of the `IngressClassParams` resource. -<5> Defines the resource type of the `IngressClassParams` resource. -<6> Defines the name of the `IngressClassParams` resource. - -. Create an `IngressClass` resource by running the following command: -+ -[source,terminal] ----- -$ oc create -f sample-single-lb.yaml ----- - -. Create an `Ingress` resource YAML file, for example, `sample-multiple-ingress.yaml`, as follows: -+ -[source,yaml] ----- -apiVersion: networking.k8s.io/v1 <1> -kind: Ingress -metadata: - name: <example-1> <1> - annotations: - alb.ingress.kubernetes.io/scheme: internet-facing <2> - alb.ingress.kubernetes.io/group.order: "1" <3> -spec: - ingressClass: alb <4> - rules: - - host: example.com <5> - http: - paths: - - path: /blog <6> - backend: - service: - name: <example-1> <7> - port: - number: 80 <8> -kind: Ingress -metadata: - name: <example-2> - annotations: - alb.ingress.kubernetes.io/scheme: internet-facing - alb.ingress.kubernetes.io/group.order: "2" -spec: - ingressClass: alb - rules: - - host: example.com - http: - paths: - - path: /store - backend: - service: - name: <example-2> - port: - number: 80 -kind: Ingress - metadata: - name: <example-3> - annotations: - alb.ingress.kubernetes.io/scheme: internet-facing - alb.ingress.kubernetes.io/group.order: "3" -spec: - ingressClass: alb - rules: - - host: example.com - http: - paths: - - path: / - backend: - service: - name: <example-3> - port: - number: 80 ----- -<1> Specifies the name of an ingress. -<2> Indicates the load balancer to provision in the public subnet and makes it accessible over the internet. -<3> Specifies the order in which the rules from the Ingresses are matched when the request is received at the load balancer. -<4> Specifies the Ingress Class that belongs to this ingress. -<5> Defines the name of a domain used for request routing. -<6> Defines the path that must route to the service. -<7> Defines the name of the service that serves the endpoint configured in the ingress. -<8> Defines the port on the service that serves the endpoint. - -. Create the `Ingress` resources by running the following command: -+ -[source,terminal] ----- -$ oc create -f sample-multiple-ingress.yaml ----- diff --git a/modules/creating-new-osdk-v0-1-0-project.adoc b/modules/creating-new-osdk-v0-1-0-project.adoc deleted file mode 100644 index 20af6c50aada..000000000000 --- a/modules/creating-new-osdk-v0-1-0-project.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-migrating-to-v0-1-0.adoc - -:_content-type: PROCEDURE -[id="creating-new-operator-sdk-v0-1-0-project_{context}"] -= Creating a new Operator SDK v0.1.0 project - -Rename your Operator SDK v0.0.x project and create a new v0.1.0 project in its -place. - -.Prerequisites - -- Operator SDK v0.1.0 CLI installed on the development workstation -- `memcached-operator` project previously deployed using an earlier version of -Operator SDK - -.Procedure - -. Ensure the SDK version is v0.1.0: -+ ----- -$ operator-sdk --version -operator-sdk version 0.1.0 ----- - -. Create a new project: -+ ----- -$ mkdir -p $GOPATH/src/github.com/example-inc/ -$ cd $GOPATH/src/github.com/example-inc/ -$ mv memcached-operator old-memcached-operator -$ operator-sdk new memcached-operator --skip-git-init -$ ls -memcached-operator old-memcached-operator ----- - -. Copy over `.git` from old project: -+ ----- -$ cp -rf old-memcached-operator/.git memcached-operator/.git ----- diff --git a/modules/creating-rolling-deployments-CLI.adoc b/modules/creating-rolling-deployments-CLI.adoc deleted file mode 100644 index ae99abf954de..000000000000 --- a/modules/creating-rolling-deployments-CLI.adoc +++ /dev/null @@ -1,54 +0,0 @@ -// Module included in the following assemblies: -// -// * applications/deployments/deployment-strategies.adoc - -:_content-type: PROCEDURE -[id="deployments-creating-rolling-deployment_{context}"] -= Creating a rolling deployment - -Rolling deployments are the default type in {product-title}. You can create a rolling deployment using the CLI. - -.Procedure - -. Create an application based on the example deployment images found in link:https://quay.io/repository/openshifttest/deployment-example[Quay.io]: -+ -[source,terminal] ----- -$ oc new-app quay.io/openshifttest/deployment-example:latest ----- - -. If you have the router installed, make the application available via a route or use the service IP directly. -+ -[source,terminal] ----- -$ oc expose svc/deployment-example ----- - -. Browse to the application at `deployment-example.<project>.<router_domain>` to verify you see the `v1` image. - -. Scale the `DeploymentConfig` object up to three replicas: -+ -[source,terminal] ----- -$ oc scale dc/deployment-example --replicas=3 ----- - -. Trigger a new deployment automatically by tagging a new version of the example as the `latest` tag: -+ -[source,terminal] ----- -$ oc tag deployment-example:v2 deployment-example:latest ----- - -. In your browser, refresh the page until you see the `v2` image. - -. When using the CLI, the following command shows how many pods are on version 1 and how many are on version 2. In the web console, the pods are progressively added to v2 and removed from v1: -+ -[source,terminal] ----- -$ oc describe dc deployment-example ----- - -During the deployment process, the new replication controller is incrementally scaled up. After the new pods are marked as `ready` (by passing their readiness check), the deployment process continues. - -If the pods do not become ready, the process aborts, and the deployment rolls back to its previous version. diff --git a/modules/creating-runtimeclass.adoc b/modules/creating-runtimeclass.adoc deleted file mode 100644 index a16d6f4a6ed9..000000000000 --- a/modules/creating-runtimeclass.adoc +++ /dev/null @@ -1,63 +0,0 @@ -// Module included in the following assemblies: -// -// * windows_containers/scheduling-windows-workloads.adoc - -:_content-type: PROCEDURE -[id="creating-runtimeclass_{context}"] -= Creating a RuntimeClass object to encapsulate scheduling mechanisms - -Using a `RuntimeClass` object simplifies the use of scheduling mechanisms like taints and tolerations; you deploy a runtime class that encapsulates your taints and tolerations and then apply it to your pods to schedule them to the appropriate node. Creating a runtime class is also necessary in clusters that support multiple operating system variants. - -.Procedure - -. Create a `RuntimeClass` object YAML file. For example, `runtime-class.yaml`: -+ -[source,yaml] ----- -apiVersion: node.k8s.io/v1beta1 -kind: RuntimeClass -metadata: - name: <runtime_class_name> <1> -handler: 'runhcs-wcow-process' -scheduling: - nodeSelector: <2> - kubernetes.io/os: 'windows' - kubernetes.io/arch: 'amd64' - node.kubernetes.io/windows-build: '10.0.17763' - tolerations: <3> - - effect: NoSchedule - key: os - operator: Equal - value: "Windows" ----- -<1> Specify the `RuntimeClass` object name, which is defined in the pods you want to be managed by this runtime class. -<2> Specify labels that must be present on nodes that support this runtime class. Pods using this runtime class can only be scheduled to a node matched by this selector. The node selector of the runtime class is merged with the existing node selector of the pod. Any conflicts prevent the pod from being scheduled to the node. -<3> Specify tolerations to append to pods, excluding duplicates, running with this runtime class during admission. This combines the set of nodes tolerated by the pod and the runtime class. - -. Create the `RuntimeClass` object: -+ -[source,terminal] ----- -$ oc create -f <file-name>.yaml ----- -+ -For example: -+ -[source,terminal] ----- -$ oc create -f runtime-class.yaml ----- - -. Apply the `RuntimeClass` object to your pod to ensure it is scheduled to the appropriate operating system variant: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: my-windows-pod -spec: - runtimeClassName: <runtime_class_name> <1> -... ----- -<1> Specify the runtime class to manage the scheduling of your pod. diff --git a/modules/creating-serverless-apps-admin-console.adoc b/modules/creating-serverless-apps-admin-console.adoc deleted file mode 100644 index 1c8b8d5ddcf9..000000000000 --- a/modules/creating-serverless-apps-admin-console.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// Module included in the following assemblies: -// -// serverless/admin_guide/serverless-cluster-admin-serving.adoc - -:_content-type: PROCEDURE -[id="creating-serverless-apps-admin-console_{context}"] -= Creating serverless applications using the Administrator perspective - -include::snippets/serverless-apps.adoc[] - -After the service is created and the application is deployed, Knative creates an immutable revision for this version of the application. Knative also performs network programming to create a route, ingress, service, and load balancer for your application and automatically scales your pods up and down based on traffic. - -.Prerequisites - -To create serverless applications using the *Administrator* perspective, ensure that you have completed the following steps. - -* The {ServerlessOperatorName} and Knative Serving are installed. -* You have logged in to the web console and are in the *Administrator* perspective. - -.Procedure - -. Navigate to the *Serverless* -> *Serving* page. -. In the *Create* list, select *Service*. -. Manually enter YAML or JSON definitions, or by dragging and dropping a file into the editor. -. Click *Create*. diff --git a/modules/creating-serverless-apps-kn.adoc b/modules/creating-serverless-apps-kn.adoc deleted file mode 100644 index d43d64d8725f..000000000000 --- a/modules/creating-serverless-apps-kn.adoc +++ /dev/null @@ -1,52 +0,0 @@ -// Module included in the following assemblies: -// -// * serverless/develop/serverless-applications.adoc -// * serverless/reference/kn-serving-ref.adoc - -:_content-type: PROCEDURE -[id="creating-serverless-apps-kn_{context}"] -= Creating serverless applications by using the Knative CLI - -Using the Knative (`kn`) CLI to create serverless applications provides a more streamlined and intuitive user interface over modifying YAML files directly. You can use the `kn service create` command to create a basic serverless application. - -.Prerequisites - -* {ServerlessOperatorName} and Knative Serving are installed on your cluster. -* You have installed the Knative (`kn`) CLI. -* You have created a project or have access to a project with the appropriate roles and permissions to create applications and other workloads in {product-title}. - -.Procedure - -* Create a Knative service: -+ -[source,terminal] ----- -$ kn service create <service-name> --image  - <privileged>false</privileged> - <alwaysPullImage>true</alwaysPullImage> - <workingDir>/tmp</workingDir> - <command></command> - <args>${computer.jnlpmac} ${computer.name}</args> - <ttyEnabled>false</ttyEnabled> - <resourceRequestCpu></resourceRequestCpu> - <resourceRequestMemory></resourceRequestMemory> - <resourceLimitCpu></resourceLimitCpu> - <resourceLimitMemory></resourceLimitMemory> - <envVars/> - </org.csanchez.jenkins.plugins.kubernetes.ContainerTemplate> - </containers> - <envVars/> - <annotations/> - <imagePullSecrets/> - <nodeProperties/> - </org.csanchez.jenkins.plugins.kubernetes.PodTemplate> ----- - -The following example shows two containers that reference image streams in the `openshift` namespace. One container handles the JNLP contract for launching Pods as Jenkins Agents. The other container uses an image with tools for building code in a particular coding language: - -[source,yaml] ----- -kind: ConfigMap -apiVersion: v1 -metadata: - name: jenkins-agent - labels: - role: jenkins-agent -data: - template2: |- - <org.csanchez.jenkins.plugins.kubernetes.PodTemplate> - <inheritFrom></inheritFrom> - <name>template2</name> - <instanceCap>2147483647</instanceCap> - <idleMinutes>0</idleMinutes> - <label>template2</label> - <serviceAccount>jenkins</serviceAccount> - <nodeSelector></nodeSelector> - <volumes/> - <containers> - <org.csanchez.jenkins.plugins.kubernetes.ContainerTemplate> - <name>jnlp</name> -  - <privileged>false</privileged> - <alwaysPullImage>true</alwaysPullImage> - <workingDir>/home/jenkins/agent</workingDir> - <command></command> - <args>\$(JENKINS_SECRET) \$(JENKINS_NAME)</args> - <ttyEnabled>false</ttyEnabled> - <resourceRequestCpu></resourceRequestCpu> - <resourceRequestMemory></resourceRequestMemory> - <resourceLimitCpu></resourceLimitCpu> - <resourceLimitMemory></resourceLimitMemory> - <envVars/> - </org.csanchez.jenkins.plugins.kubernetes.ContainerTemplate> - <org.csanchez.jenkins.plugins.kubernetes.ContainerTemplate> - <name>java</name> -  - <privileged>false</privileged> - <alwaysPullImage>true</alwaysPullImage> - <workingDir>/home/jenkins/agent</workingDir> - <command>cat</command> - <args></args> - <ttyEnabled>true</ttyEnabled> - <resourceRequestCpu></resourceRequestCpu> - <resourceRequestMemory></resourceRequestMemory> - <resourceLimitCpu></resourceLimitCpu> - <resourceLimitMemory></resourceLimitMemory> - <envVars/> - </org.csanchez.jenkins.plugins.kubernetes.ContainerTemplate> - </containers> - <envVars/> - <annotations/> - <imagePullSecrets/> - <nodeProperties/> - </org.csanchez.jenkins.plugins.kubernetes.PodTemplate> ----- - - -[NOTE] -==== -Do not log in to the Jenkins console and change the pod template configuration. If you do so after the pod template is created, and the {product-title} Sync plugin detects that the image associated with the image stream or image stream tag has changed, it replaces the pod template and overwrites those configuration changes. You cannot merge a new configuration with the existing configuration. - -Consider the config map approach if you have more complex configuration needs. -==== - -After it is installed, the {product-title} Sync plugin monitors the API server of {product-title} for updates to image streams, image stream tags, and config maps and adjusts the configuration of the Kubernetes plugin. - -The following rules apply: - -* Removing the label or annotation from the config map, image stream, or image stream tag deletes any existing `PodTemplate` from the configuration of the Kubernetes plugin. -* If those objects are removed, the corresponding configuration is removed from the Kubernetes plugin. -* If you create appropriately labeled or annotated `ConfigMap`, `ImageStream`, or `ImageStreamTag` objects, or add labels after their initial creation, this results in the creation of a `PodTemplate` in the Kubernetes-plugin configuration. -* In the case of the `PodTemplate` by config map form, changes to the config map data for the `PodTemplate` are applied to the `PodTemplate` settings in the Kubernetes plugin configuration. The changes also override any changes that were made to the `PodTemplate` through the Jenkins UI between changes to the config map. - -To use a container image as a Jenkins agent, the image must run the agent as an entry point. For more details, see the official https://wiki.jenkins-ci.org/display/JENKINS/Distributed+builds#Distributedbuilds-Launchslaveagentheadlessly[Jenkins documentation]. diff --git a/modules/images-other-jenkins-create-service.adoc b/modules/images-other-jenkins-create-service.adoc deleted file mode 100644 index ba9c53fcadbc..000000000000 --- a/modules/images-other-jenkins-create-service.adoc +++ /dev/null @@ -1,50 +0,0 @@ -// Module included in the following assemblies: -// -// * cicd/jenkins/images-other-jenkins.adoc - -:_content-type: PROCEDURE -[id="images-other-jenkins-create-service_{context}"] -= Creating a Jenkins service from a template - -Templates provide parameter fields to define all the environment variables with predefined default values. {product-title} provides templates to make creating a new Jenkins service easy. The Jenkins templates should be registered in the default `openshift` project by your cluster administrator during the initial cluster setup. - -The two available templates both define deployment configuration and a service. The templates differ in their storage strategy, which affects whether the Jenkins content persists across a pod restart. - -[NOTE] -==== -A pod might be restarted when it is moved to another node or when an update of the deployment configuration triggers a redeployment. -==== - -* `jenkins-ephemeral` uses ephemeral storage. On pod restart, all data is lost. This template is only useful for development or testing. - -* `jenkins-persistent` uses a Persistent Volume (PV) store. Data survives a pod restart. - -To use a PV store, the cluster administrator must define a PV pool in the {product-title} deployment. - -After you select which template you want, you must instantiate the template to be able to use Jenkins. - -.Procedure - -. Create a new Jenkins application using one of the following methods: -** A PV: -+ -[source,terminal] ----- -$ oc new-app jenkins-persistent ----- - -** Or an `emptyDir` type volume where configuration does not persist across pod restarts: -+ -[source,terminal] ----- -$ oc new-app jenkins-ephemeral ----- - -With both templates, you can run `oc describe` on them to see all the parameters available for overriding. - -For example: - -[source,terminal] ----- -$ oc describe jenkins-ephemeral ----- diff --git a/modules/images-other-jenkins-cross-project.adoc b/modules/images-other-jenkins-cross-project.adoc deleted file mode 100644 index 0aa0d3736718..000000000000 --- a/modules/images-other-jenkins-cross-project.adoc +++ /dev/null @@ -1,53 +0,0 @@ -// Module included in the following assemblies: -// -// * cicd/jenkins/images-other-jenkins.adoc - -:_content-type: PROCEDURE -[id="images-other-jenkins-cross-project_{context}"] -= Providing Jenkins cross project access - -If you are going to run Jenkins somewhere other than your same project, you must provide an access token to Jenkins to access your project. - -.Procedure - -. Identify the secret for the service account that has appropriate permissions to access the project Jenkins must access: -+ -[source,terminal] ----- -$ oc describe serviceaccount jenkins ----- -+ -.Example output -[source,terminal] ----- -Name: default -Labels: <none> -Secrets: { jenkins-token-uyswp } - { jenkins-dockercfg-xcr3d } -Tokens: jenkins-token-izv1u - jenkins-token-uyswp ----- -+ -In this case the secret is named `jenkins-token-uyswp`. - -. Retrieve the token from the secret: -+ -[source,terminal] ----- -$ oc describe secret <secret name from above> ----- -+ -.Example output -[source,terminal] ----- -Name: jenkins-token-uyswp -Labels: <none> -Annotations: kubernetes.io/service-account.name=jenkins,kubernetes.io/service-account.uid=32f5b661-2a8f-11e5-9528-3c970e3bf0b7 -Type: kubernetes.io/service-account-token -Data -==== -ca.crt: 1066 bytes -token: eyJhbGc..<content cut>....wRA ----- -+ -The token parameter contains the token value Jenkins requires to access the project. diff --git a/modules/images-other-jenkins-customize-s2i.adoc b/modules/images-other-jenkins-customize-s2i.adoc deleted file mode 100644 index b9421af42602..000000000000 --- a/modules/images-other-jenkins-customize-s2i.adoc +++ /dev/null @@ -1,60 +0,0 @@ -// Module included in the following assemblies: -// -// * cicd/jenkins/images-other-jenkins.adoc - -:_content-type: CONCEPT -[id="images-other-jenkins-customize-s2i_{context}"] -= Customizing the Jenkins image through source-to-image - -To customize the official {product-title} Jenkins image, you can use the image as a source-to-image (S2I) builder. - -You can use S2I to copy your custom Jenkins jobs definitions, add additional plugins, or replace the provided `config.xml` file with your own, custom, configuration. - -To include your modifications in the Jenkins image, you must have a Git repository with the following directory structure: - -`plugins`:: -This directory contains those binary Jenkins plugins you want to copy into Jenkins. - -`plugins.txt`:: -This file lists the plugins you want to install using the following syntax: - ----- -pluginId:pluginVersion ----- - -`configuration/jobs`:: -This directory contains the Jenkins job definitions. - -`configuration/config.xml`:: -This file contains your custom Jenkins configuration. - -The contents of the `configuration/` directory is copied to the `/var/lib/jenkins/` directory, so you can also include additional files, such as `credentials.xml`, there. - -.Sample build configuration customizes the Jenkins image in {product-title} -[source,yaml] ----- -apiVersion: build.openshift.io/v1 -kind: BuildConfig -metadata: - name: custom-jenkins-build -spec: - source: <1> - git: - uri: https://github.com/custom/repository - type: Git - strategy: <2> - sourceStrategy: - from: - kind: ImageStreamTag - name: jenkins:2 - namespace: openshift - type: Source - output: <3> - to: - kind: ImageStreamTag - name: custom-jenkins:latest ----- - -<1> The `source` parameter defines the source Git repository with the layout described above. -<2> The `strategy` parameter defines the original Jenkins image to use as a source image for the build. -<3> The `output` parameter defines the resulting, customized Jenkins image that you can use in deployment configurations instead of the official Jenkins image. diff --git a/modules/images-other-jenkins-env-var.adoc b/modules/images-other-jenkins-env-var.adoc deleted file mode 100644 index 8e1569691b4e..000000000000 --- a/modules/images-other-jenkins-env-var.adoc +++ /dev/null @@ -1,99 +0,0 @@ -// Module included in the following assemblies: -// -// * cicd/jenkins/images-other-jenkins.adoc - -:_content-type: REFERENCE -[id="images-other-jenkins-env-var_{context}"] -= Jenkins environment variables - -The Jenkins server can be configured with the following environment variables: - -[options="header"] -|=== -| Variable | Definition | Example values and settings - -|`OPENSHIFT_ENABLE_OAUTH` -|Determines whether the {product-title} Login plugin manages authentication when logging in to Jenkins. To enable, set to `true`. -|Default: `false` - -|`JENKINS_PASSWORD` -|The password for the `admin` user when using standard Jenkins authentication. Not applicable when `OPENSHIFT_ENABLE_OAUTH` is set to `true`. -|Default: `password` - -|`JAVA_MAX_HEAP_PARAM`, -`CONTAINER_HEAP_PERCENT`, -`JENKINS_MAX_HEAP_UPPER_BOUND_MB` -|These values control the maximum heap size of the Jenkins JVM. If -`JAVA_MAX_HEAP_PARAM` is set, its value takes precedence. Otherwise, the maximum heap size is dynamically calculated as `CONTAINER_HEAP_PERCENT` of the container memory limit, optionally capped at `JENKINS_MAX_HEAP_UPPER_BOUND_MB` MiB. - -By default, the maximum heap size of the Jenkins JVM is set to 50% of the container memory limit with no cap. -|`JAVA_MAX_HEAP_PARAM` example setting: `-Xmx512m` - -`CONTAINER_HEAP_PERCENT` default: `0.5`, or 50% - -`JENKINS_MAX_HEAP_UPPER_BOUND_MB` example setting: `512 MiB` - -|`JAVA_INITIAL_HEAP_PARAM`, -`CONTAINER_INITIAL_PERCENT` -|These values control the initial heap size of the Jenkins JVM. If `JAVA_INITIAL_HEAP_PARAM` is set, its value takes precedence. Otherwise, the initial heap size is dynamically calculated as `CONTAINER_INITIAL_PERCENT` of the dynamically calculated maximum heap size. - -By default, the JVM sets the initial heap size. -|`JAVA_INITIAL_HEAP_PARAM` example setting: `-Xms32m` - -`CONTAINER_INITIAL_PERCENT` example setting: `0.1`, or 10% - -|`CONTAINER_CORE_LIMIT` -|If set, specifies an integer number of cores used for sizing numbers of internal JVM threads. -|Example setting: `2` - -|`JAVA_TOOL_OPTIONS` -|Specifies options to apply to all JVMs running in this container. It is not recommended to override this value. -|Default: `-XX:+UnlockExperimentalVMOptions -XX:+UseCGroupMemoryLimitForHeap -Dsun.zip.disableMemoryMapping=true` - -|`JAVA_GC_OPTS` -|Specifies Jenkins JVM garbage collection parameters. It is not recommended to override this value. -|Default: `-XX:+UseParallelGC -XX:MinHeapFreeRatio=5 -XX:MaxHeapFreeRatio=10 -XX:GCTimeRatio=4 -XX:AdaptiveSizePolicyWeight=90` - -|`JENKINS_JAVA_OVERRIDES` -|Specifies additional options for the Jenkins JVM. These options are appended to all other options, including the Java options above, and may be used to override any of them if necessary. Separate each additional option with a space; if any option contains space characters, escape them with a backslash. -|Example settings: `-Dfoo -Dbar`; `-Dfoo=first\ value -Dbar=second\ value`. - -|`JENKINS_OPTS` -|Specifies arguments to Jenkins. -| - -|`INSTALL_PLUGINS` -|Specifies additional Jenkins plugins to install when the container is first run or when `OVERRIDE_PV_PLUGINS_WITH_IMAGE_PLUGINS` is set to `true`. Plugins are specified as a comma-delimited list of name:version pairs. -|Example setting: `git:3.7.0,subversion:2.10.2`. - -|`OPENSHIFT_PERMISSIONS_POLL_INTERVAL` -|Specifies the interval in milliseconds that the {product-title} Login plugin polls {product-title} for the permissions that are associated with each user that is defined in Jenkins. -|Default: `300000` - 5 minutes - -|`OVERRIDE_PV_CONFIG_WITH_IMAGE_CONFIG` -|When running this image with an {product-title} persistent volume (PV) for the Jenkins configuration directory, the transfer of configuration from the image to the PV is performed only the first time the image starts because the PV is assigned when the persistent volume claim (PVC) is created. If you create a custom image that extends this image and updates the configuration in the custom image after the initial startup, the configuration is not copied over unless you set this environment variable to `true`. -|Default: `false` - -|`OVERRIDE_PV_PLUGINS_WITH_IMAGE_PLUGINS` -|When running this image with an {product-title} PV for the Jenkins configuration directory, the transfer of plugins from the image to the PV is performed only the first time the image starts because the PV is assigned when the PVC is created. If you create a custom image that extends this image and updates plugins in the custom image after the initial startup, the plugins are not copied over unless you set this environment variable to `true`. -|Default: `false` - -|`ENABLE_FATAL_ERROR_LOG_FILE` -|When running this image with an {product-title} PVC for the Jenkins configuration directory, this environment variable allows the fatal error log file to persist when a fatal error occurs. The fatal error file is saved at `/var/lib/jenkins/logs`. -|Default: `false` - -|`AGENT_BASE_IMAGE` -|Setting this value overrides the image used for the `jnlp` container in the sample Kubernetes plugin pod templates provided with this image. Otherwise, the image from the `jenkins-agent-base-rhel8:latest` image stream tag in the `openshift` namespace is used. -|Default: -`image-registry.openshift-image-registry.svc:5000/openshift/jenkins-agent-base-rhel8:latest` - -|`JAVA_BUILDER_IMAGE` -|Setting this value overrides the image used for the `java-builder` container in the `java-builder` sample Kubernetes plugin pod templates provided with this image. Otherwise, the image from the `java:latest` image stream tag in the `openshift` namespace is used. -|Default: -`image-registry.openshift-image-registry.svc:5000/openshift/java:latest` - -|`JAVA_FIPS_OPTIONS` -|Setting this value controls how the JVM operates when running on a FIPS node. For more information, see link:https://access.redhat.com/documentation/en-us/openjdk/11/html-single/configuring_openjdk_11_on_rhel_with_fips/index#config-fips-in-openjdk[Configure OpenJDK 11 in FIPS mode]. -|Default: `-Dcom.redhat.fips=false` - -|=== diff --git a/modules/images-other-jenkins-kubernetes-plugin.adoc b/modules/images-other-jenkins-kubernetes-plugin.adoc deleted file mode 100644 index 843eb4d9af87..000000000000 --- a/modules/images-other-jenkins-kubernetes-plugin.adoc +++ /dev/null @@ -1,159 +0,0 @@ -// Module included in the following assemblies: -// -// * cicd/jenkins/images-other-jenkins.adoc - -:_content-type: CONCEPT -[id="images-other-jenkins-kubernetes-plugin_{context}"] -= Using the Jenkins Kubernetes plugin - -In the following example, the `openshift-jee-sample` `BuildConfig` object causes a Jenkins Maven agent pod to be dynamically provisioned. The pod clones some Java source code, builds a WAR file, and causes a second `BuildConfig`, `openshift-jee-sample-docker` to run. The second `BuildConfig` layers the new WAR file into a container image. - -[IMPORTANT] -==== -{product-title} 4.11 removed the OpenShift Jenkins Maven and NodeJS Agent images from its payload. Red Hat no longer produces these images, and they are not available from the `ocp-tools-4` repository at `registry.redhat.io`. Red Hat maintains the 4.10 and earlier versions of these images for any significant bug fixes or security CVEs, following the link:https://access.redhat.com/support/policy/updates/openshift[{product-title} lifecycle policy]. - -For more information, see the "Important changes to OpenShift Jenkins images" link in the following "Additional resources" section. -==== - -.Sample `BuildConfig` that uses the Jenkins Kubernetes plugin -[source,yaml] ----- -kind: List -apiVersion: v1 -items: -- kind: ImageStream - apiVersion: image.openshift.io/v1 - metadata: - name: openshift-jee-sample -- kind: BuildConfig - apiVersion: build.openshift.io/v1 - metadata: - name: openshift-jee-sample-docker - spec: - strategy: - type: Docker - source: - type: Docker - dockerfile: |- - FROM openshift/wildfly-101-centos7:latest - COPY ROOT.war /wildfly/standalone/deployments/ROOT.war - CMD $STI_SCRIPTS_PATH/run - binary: - asFile: ROOT.war - output: - to: - kind: ImageStreamTag - name: openshift-jee-sample:latest -- kind: BuildConfig - apiVersion: build.openshift.io/v1 - metadata: - name: openshift-jee-sample - spec: - strategy: - type: JenkinsPipeline - jenkinsPipelineStrategy: - jenkinsfile: |- - node("maven") { - sh "git clone https://github.com/openshift/openshift-jee-sample.git ." - sh "mvn -B -Popenshift package" - sh "oc start-build -F openshift-jee-sample-docker --from-file=target/ROOT.war" - } - triggers: - - type: ConfigChange ----- - -It is also possible to override the specification of the dynamically created Jenkins agent pod. The following is a modification to the preceding example, which overrides the container memory and specifies an environment variable. - -.Sample `BuildConfig` that uses the Jenkins Kubernetes plugin, specifying memory limit and environment variable -[source,yaml] ----- -kind: BuildConfig -apiVersion: build.openshift.io/v1 -metadata: - name: openshift-jee-sample -spec: - strategy: - type: JenkinsPipeline - jenkinsPipelineStrategy: - jenkinsfile: |- - podTemplate(label: "mypod", <1> - cloud: "openshift", <2> - inheritFrom: "maven", <3> - containers: [ - containerTemplate(name: "jnlp", <4> - image: "openshift/jenkins-agent-maven-35-centos7:v3.10", <5> - resourceRequestMemory: "512Mi", <6> - resourceLimitMemory: "512Mi", <7> - envVars: [ - envVar(key: "CONTAINER_HEAP_PERCENT", value: "0.25") <8> - ]) - ]) { - node("mypod") { <9> - sh "git clone https://github.com/openshift/openshift-jee-sample.git ." - sh "mvn -B -Popenshift package" - sh "oc start-build -F openshift-jee-sample-docker --from-file=target/ROOT.war" - } - } - triggers: - - type: ConfigChange ----- -<1> A new pod template called `mypod` is defined dynamically. The new pod template name is referenced in the node stanza. -<2> The `cloud` value must be set to `openshift`. -<3> The new pod template can inherit its configuration from an existing pod template. In this case, inherited from the Maven pod template that is pre-defined by {product-title}. -<4> This example overrides values in the pre-existing container, and must be specified by name. All Jenkins agent images shipped with {product-title} use the Container name `jnlp`. -<5> Specify the container image name again. This is a known issue. -<6> A memory request of `512 Mi` is specified. -<7> A memory limit of `512 Mi` is specified. -<8> An environment variable `CONTAINER_HEAP_PERCENT`, with value `0.25`, is specified. -<9> The node stanza references the name of the defined pod template. - -// Writer, remove or update jenkins-agent-maven reference in 4.12 - -By default, the pod is deleted when the build completes. This behavior can be modified with the plugin or within a pipeline Jenkinsfile. - -Upstream Jenkins has more recently introduced a YAML declarative format for defining a `podTemplate` pipeline DSL in-line with your pipelines. An example of this format, using the sample `java-builder` pod template that is defined in the {product-title} Jenkins image: - -[source,yaml] ----- -def nodeLabel = 'java-buidler' - -pipeline { - agent { - kubernetes { - cloud 'openshift' - label nodeLabel - yaml """ -apiVersion: v1 -kind: Pod -metadata: - labels: - worker: ${nodeLabel} -spec: - containers: - - name: jnlp - image: image-registry.openshift-image-registry.svc:5000/openshift/jenkins-agent-base-rhel8:latest - args: ['\$(JENKINS_SECRET)', '\$(JENKINS_NAME)'] - - name: java - image: image-registry.openshift-image-registry.svc:5000/openshift/java:latest - command: - - cat - tty: true -""" - } - } - - options { - timeout(time: 20, unit: 'MINUTES') - } - - stages { - stage('Build App') { - steps { - container("java") { - sh "mvn --version" - } - } - } - } -} ----- diff --git a/modules/images-other-jenkins-memory.adoc b/modules/images-other-jenkins-memory.adoc deleted file mode 100644 index d12411d290c5..000000000000 --- a/modules/images-other-jenkins-memory.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Module included in the following assemblies: -// -// * cicd/jenkins/images-other-jenkins.adoc - -:_content-type: CONCEPT -[id="images-other-jenkins-memory_{context}"] -= Jenkins memory requirements - -When deployed by the provided Jenkins Ephemeral or Jenkins Persistent templates, the default memory limit is `1 Gi`. - -By default, all other process that run in the Jenkins container cannot use more than a total of `512 MiB` of memory. If they require more memory, the container halts. It is therefore highly recommended that pipelines run external commands in an agent container wherever possible. - -And if `Project` quotas allow for it, see recommendations from the Jenkins documentation on what a Jenkins master should have from a memory perspective. Those recommendations proscribe to allocate even more memory for the Jenkins master. - -It is recommended to specify memory request and limit values on agent containers created by the Jenkins Kubernetes plugin. Admin users can set default values on a per-agent image basis through the Jenkins configuration. The memory request and limit parameters can also be overridden on a per-container basis. - -You can increase the amount of memory available to Jenkins by overriding the `MEMORY_LIMIT` parameter when instantiating the Jenkins Ephemeral or Jenkins Persistent template. diff --git a/modules/images-other-jenkins-oauth-auth.adoc b/modules/images-other-jenkins-oauth-auth.adoc deleted file mode 100644 index 493d59fca046..000000000000 --- a/modules/images-other-jenkins-oauth-auth.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * cicd/jenkins/images-other-jenkins.adoc - -:_content-type: CONCEPT -[id="images-other-jenkins-oauth-auth_{context}"] -= {product-title} OAuth authentication - -OAuth authentication is activated by configuring options on the *Configure Global Security* panel in the Jenkins UI, or by setting the `OPENSHIFT_ENABLE_OAUTH` environment variable on the Jenkins *Deployment configuration* to anything other than `false`. This activates the {product-title} Login plugin, which retrieves the configuration information from pod data or by interacting with the {product-title} API server. - -Valid credentials are controlled by the {product-title} identity provider. - -Jenkins supports both browser and non-browser access. - -Valid users are automatically added to the Jenkins authorization matrix at log in, where {product-title} roles dictate the specific Jenkins permissions that users have. The roles used by default are the predefined `admin`, `edit`, and `view`. The login plugin executes self-SAR requests against those roles in the project or namespace that Jenkins is running in. - -Users with the `admin` role have the traditional Jenkins administrative user permissions. Users with the `edit` or `view` role have progressively fewer permissions. - -The default {product-title} `admin`, `edit`, and `view` roles and the Jenkins permissions those roles are assigned in the Jenkins instance are configurable. - -When running Jenkins in an {product-title} pod, the login plugin looks for a config map named `openshift-jenkins-login-plugin-config` in the namespace that Jenkins is running in. - -If this plugin finds and can read in that config map, you can define the role to Jenkins Permission mappings. Specifically: - - * The login plugin treats the key and value pairs in the config map as Jenkins permission to {product-title} role mappings. - * The key is the Jenkins permission group short ID and the Jenkins permission short ID, with those two separated by a hyphen character. - * If you want to add the `Overall Jenkins Administer` permission to an {product-title} role, the key should be `Overall-Administer`. - * To get a sense of which permission groups and permissions IDs are available, go to the matrix authorization page in the Jenkins console and IDs for the groups and individual permissions in the table they provide. - * The value of the key and value pair is the list of {product-title} roles the permission should apply to, with each role separated by a comma. - * If you want to add the `Overall Jenkins Administer` permission to both the default `admin` and `edit` roles, as well as a new Jenkins role you have created, the value for the key `Overall-Administer` would be `admin,edit,jenkins`. - - -[NOTE] -==== -The `admin` user that is pre-populated in the {product-title} Jenkins image with administrative privileges is not given those privileges when {product-title} OAuth is used. To grant these permissions the {product-title} cluster administrator must explicitly define that user in the {product-title} identity provider and assigns the `admin` role to the user. -==== - -Jenkins users' permissions that are stored can be changed after the users are initially established. The {product-title} Login plugin polls the {product-title} API server for permissions and updates the permissions stored in Jenkins for each user with the permissions retrieved from {product-title}. If the Jenkins UI is used to update permissions for a Jenkins user, the permission changes are overwritten the next time the plugin polls {product-title}. - -You can control how often the polling occurs with the `OPENSHIFT_PERMISSIONS_POLL_INTERVAL` environment variable. The default polling interval is five minutes. - -The easiest way to create a new Jenkins service using OAuth authentication is to use a template. diff --git a/modules/images-other-jenkins-permissions.adoc b/modules/images-other-jenkins-permissions.adoc deleted file mode 100644 index a28686174b32..000000000000 --- a/modules/images-other-jenkins-permissions.adoc +++ /dev/null @@ -1,20 +0,0 @@ -// Module included in the following assemblies: -// -// * cicd/jenkins/images-other-jenkins.adoc - -:_content-type: CONCEPT -[id="images-other-jenkins-permissions_{context}"] -= Jenkins permissions - -If in the config map the `<serviceAccount>` element of the pod template XML is the {product-title} service account used for the resulting pod, the service account credentials are mounted into the pod. The permissions are associated with the service account and control which operations against the {product-title} master are allowed from the pod. - -Consider the following scenario with service accounts used for the pod, which is launched by the Kubernetes Plugin that runs in the {product-title} Jenkins image. - -If you use the example template for Jenkins that is provided by {product-title}, the `jenkins` service account is defined with the `edit` role for the project Jenkins runs in, and the master Jenkins pod has that service account mounted. - -The two default Maven and NodeJS pod templates that are injected into the Jenkins configuration are also set to use the same service account as the Jenkins master. - -* Any pod templates that are automatically discovered by the {product-title} sync plugin because their image streams or image stream tags have the required label or annotations are configured to use the Jenkins master service account as their service account. -* For the other ways you can provide a pod template definition into Jenkins and the Kubernetes plugin, you have to explicitly specify the service account to use. Those other ways include the Jenkins console, the `podTemplate` pipeline DSL that is provided by the Kubernetes plugin, or labeling a config map whose data is the XML configuration for a pod template. -* If you do not specify a value for the service account, the `default` service account is used. -* Ensure that whatever service account is used has the necessary permissions, roles, and so on defined within {product-title} to manipulate whatever projects you choose to manipulate from the within the pod. diff --git a/modules/images-pulling-from-private-registries.adoc b/modules/images-pulling-from-private-registries.adoc deleted file mode 100644 index dc0b673a009c..000000000000 --- a/modules/images-pulling-from-private-registries.adoc +++ /dev/null @@ -1,38 +0,0 @@ -// Module included in the following assemblies: -// * openshift_images/using-image-pull-secrets - -:_content-type: PROCEDURE -[id="images-pulling-from-private-registries_{context}"] -= Pulling from private registries with delegated authentication - -A private registry can delegate authentication to a separate service. In these cases, image pull secrets must be defined for both the authentication and registry endpoints. - -.Procedure - -. Create a secret for the delegated authentication server: -+ -[source,terminal] ----- -$ oc create secret docker-registry \ - --docker-server=sso.redhat.com \ - --docker-username=developer@example.com \ - --docker-password=******** \ - --docker-email=unused \ - redhat-connect-sso - -secret/redhat-connect-sso ----- -+ -. Create a secret for the private registry: -+ -[source,terminal] ----- -$ oc create secret docker-registry \ - --docker-server=privateregistry.example.com \ - --docker-username=developer@example.com \ - --docker-password=******** \ - --docker-email=unused \ - private-registry - -secret/private-registry ----- diff --git a/modules/images-referencing-images-imagestreams.adoc b/modules/images-referencing-images-imagestreams.adoc deleted file mode 100644 index c222a57ba9e8..000000000000 --- a/modules/images-referencing-images-imagestreams.adoc +++ /dev/null @@ -1,67 +0,0 @@ -// Module included in the following assemblies: -// * openshift_images/tagging-images - -:_content-type: PROCEDURE -[id="images-referencing-images-imagestreams_{context}"] -= Referencing images in imagestreams - -You can use tags to reference images in image streams using the following reference types. - -.Imagestream reference types -[width="50%",options="header"] -|=== -|Reference type |Description - -|`ImageStreamTag` -|An `ImageStreamTag` is used to reference or retrieve an image for a given image stream and tag. - -|`ImageStreamImage` -|An `ImageStreamImage` is used to reference or retrieve an image for a given image stream and image `sha` ID. - -|`DockerImage` -|A `DockerImage` is used to reference or retrieve an image for a given external registry. It uses standard Docker `pull specification` for its name. -|=== - -When viewing example image stream definitions you may notice they contain definitions of `ImageStreamTag` and references to `DockerImage`, but nothing related to `ImageStreamImage`. - -This is because the `ImageStreamImage` objects are automatically created in {product-title} when you import or tag an image into the image stream. You should never have to explicitly define an `ImageStreamImage` object in any image stream definition that you use to create image streams. - -.Procedure - -* To reference an image for a given image stream and tag, use `ImageStreamTag`: -+ ----- -<image_stream_name>:<tag> ----- - -* To reference an image for a given image stream and image `sha` ID, use `ImageStreamImage`: -+ ----- -<image_stream_name>@<id> ----- -+ -The `<id>` is an immutable identifier for a specific image, also called a -digest. - -* To reference or retrieve an image for a given external registry, use `DockerImage`: -+ ----- -openshift/ruby-20-centos7:2.0 ----- -+ -[NOTE] -==== -When no tag is specified, it is assumed the `latest` tag is used. -==== -+ -You can also reference a third-party registry: -+ ----- -registry.redhat.io/rhel7:latest ----- -+ -Or an image with a digest: -+ ----- -centos/ruby-22-centos7@sha256:3a335d7d8a452970c5b4054ad7118ff134b3a6b50a2bb6d0c07c746e8986b28e ----- diff --git a/modules/images-remove-tag-imagestream.adoc b/modules/images-remove-tag-imagestream.adoc deleted file mode 100644 index a97922e3e970..000000000000 --- a/modules/images-remove-tag-imagestream.adoc +++ /dev/null @@ -1,24 +0,0 @@ -// Module included in the following assemblies: -// * openshift_images/tagging-images - -:_content-type: PROCEDURE -[id="images-remove-tag-imagestream_{context}"] -= Removing tags from image streams - -You can remove tags from an image stream. - -.Procedure - -* To remove a tag completely from an image stream run: -+ -[source,terminal] ----- -$ oc delete istag/ruby:latest ----- -+ -or: -+ -[source,terminal] ----- -$ oc tag -d ruby:latest ----- diff --git a/modules/images-s2i-build-process-overview.adoc b/modules/images-s2i-build-process-overview.adoc deleted file mode 100644 index 006552f1558d..000000000000 --- a/modules/images-s2i-build-process-overview.adoc +++ /dev/null @@ -1,16 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/using_images/using-images-source-to-image.adoc - -:_content-type: CONCEPT -[id="images-s2i-build-process-overview_{context}"] -= Source-to-image build process overview - -Source-to-image (S2I) produces ready-to-run images by injecting source code into a container that prepares that source code to be run. It performs the following steps: - -. Runs the `FROM <builder image>` command -. Copies the source code to a defined location in the builder image -. Runs the assemble script in the builder image -. Sets the run script in the builder image as the default command - -Buildah then creates the container image. diff --git a/modules/images-samples-operator-deprecated-image-stream.adoc b/modules/images-samples-operator-deprecated-image-stream.adoc deleted file mode 100644 index a721d87db974..000000000000 --- a/modules/images-samples-operator-deprecated-image-stream.adoc +++ /dev/null @@ -1,38 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/configuring-samples-operator.adoc - - -:_content-type: PROCEDURE -[id="images-samples-operator-deprecated-image-stream_{context}"] -= Removing deprecated image stream tags from the Cluster Samples Operator - -The Cluster Samples Operator leaves deprecated image stream tags in an image stream because users can have deployments that use the deprecated image stream tags. - -You can remove deprecated image stream tags by editing the image stream with the `oc tag` command. - -[NOTE] -==== -Deprecated image stream tags that the samples providers have removed from their image streams are not included on initial installations. -==== - -.Prerequisites - -* You installed the `oc` CLI. - -.Procedure - -* Remove deprecated image stream tags by editing the image stream with the `oc tag` command. -+ -[source,terminal] ----- -$ oc tag -d <image_stream_name:tag> ----- -+ -.Example output -[source,terminal] ----- -Deleted tag default/<image_stream_name:tag>. ----- - -//Similar procedure in images-imagestreams-remove-tag.adoc diff --git a/modules/images-tag.adoc b/modules/images-tag.adoc deleted file mode 100644 index c6156c9fbb96..000000000000 --- a/modules/images-tag.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Module included in the following assemblies: -// * openshift_images/images-understand.adoc -// * openshift_images/tagging-images.adoc - -[id="images-tag_{context}"] -= Image tags - -An image tag is a label applied to a container image in a repository that distinguishes a specific image from other images in an image stream. Typically, the tag represents a version number of some sort. For example, here `:v3.11.59-2` is the tag: - -[source,text] ----- -registry.access.redhat.com/openshift3/jenkins-2-rhel7:v3.11.59-2 ----- - -You can add additional tags to an image. For example, an image might be assigned the tags `:v3.11.59-2` and `:latest`. - -{product-title} provides the `oc tag` command, which is similar to the `docker tag` command, but operates on image streams instead of directly on images. diff --git a/modules/images-tagging-conventions.adoc b/modules/images-tagging-conventions.adoc deleted file mode 100644 index b346a415d53c..000000000000 --- a/modules/images-tagging-conventions.adoc +++ /dev/null @@ -1,39 +0,0 @@ -// Module included in the following assemblies: -// * openshift_images/tagging-images - -[id="images-tagging-conventions_{context}"] -= Image tag conventions - -Images evolve over time and their tags reflect this. Generally, an image tag always points to the latest image built. - -If there is too much information embedded in a tag name, like `v2.0.1-may-2019`, the tag points to just one revision of an image and is never updated. Using default image pruning options, such an image is never removed. -ifdef::openshift-origin,openshift-enterprise,openshift-webscale[] -In very large clusters, the schema of creating new tags for every revised image could eventually fill up the etcd datastore with excess tag metadata for images that are long outdated. -endif::[] - -If the tag is named `v2.0`, image revisions are more likely. This results in longer tag history and, therefore, the image pruner is more likely to remove old and unused images. - -Although tag naming convention is up to you, here are a few examples in the format `<image_name>:<image_tag>`: - -.Image tag naming conventions -[width="50%",options="header"] -|=== -|Description |Example - -|Revision -|`myimage:v2.0.1` - -|Architecture -|`myimage:v2.0-x86_64` - -|Base image -|`myimage:v1.2-centos7` - -|Latest (potentially unstable) -|`myimage:latest` - -|Latest stable -|`myimage:stable` -|=== - -If you require dates in tag names, periodically inspect old and unsupported images and `istags` and remove them. Otherwise, you can experience increasing resource usage caused by retaining old images. diff --git a/modules/images-test-s2i.adoc b/modules/images-test-s2i.adoc deleted file mode 100644 index 447fde8ce773..000000000000 --- a/modules/images-test-s2i.adoc +++ /dev/null @@ -1,167 +0,0 @@ -// Module included in the following assemblies: -// * openshift_images/create-images.adoc - -:_content-type: CONCEPT -[id="images-test-s2i_{context}"] -= About testing source-to-image images - -As an Source-to-Image (S2I) builder image author, you can test your S2I image -locally and use the {product-title} build system for automated testing and -continuous integration. - -S2I requires the -`assemble` and `run` scripts to be present to successfully run -the S2I build. Providing the `save-artifacts` script reuses the build -artifacts, and providing the `usage` script ensures that usage information is -printed to console when someone runs the container image outside of the S2I. - -The goal of testing an S2I image is to make sure that all of these described -commands work properly, even if the base container image has changed or the tooling -used by the commands was updated. - -[id="images-test-s2i-testing-requirements_{context}"] -== Understanding testing requirements - -The standard location for the `test` script is `test/run`. This script is -invoked by the {product-title} S2I image builder and it could be a simple Bash -script or a static Go binary. - -The `test/run` script performs the S2I build, so you must have the S2I binary -available in your `$PATH`. If required, follow the installation instructions -in the -https://github.com/openshift/source-to-image/blob/master/README.md#installation[S2I -README]. - -S2I combines the application source code and builder image, so to test -it you need a sample application source to verify that the source successfully -transforms into a runnable container image. The sample application should be simple, -but it should exercise the crucial steps of `assemble` and `run` scripts. - -[id="images-test-s2i-generating-scripts-and-tools_{context}"] -== Generating scripts and tools - -The S2I tooling comes with powerful generation tools to speed up the process of -creating a new S2I image. The `s2i create` command produces all the necessary S2I -scripts and testing tools along with the `Makefile`: - -[source,termnal] ----- -$ s2i create _<image name>_ _<destination directory>_ ----- - -The generated `test/run` script must be adjusted to be -useful, but it provides a good starting point to begin developing. - -[NOTE] -==== -The `test/run` script produced by the `s2i create` command requires that the sample application sources are inside the `test/test-app` directory. -==== -ifndef::openshift-online[] -[id="images-test-s21-testing-locally_{context}"] -== Testing locally -The easiest way to run the S2I image tests locally is to use the generated -`Makefile`. - -If you did not use the `s2i create` command, you can copy the -following `Makefile` template and replace the `IMAGE_NAME` parameter with -your image name. - -.Sample `Makefile` ----- -IMAGE_NAME = openshift/ruby-20-centos7 -CONTAINER_ENGINE := $(shell command -v podman 2> /dev/null | echo docker) - -build: - ${CONTAINER_ENGINE} build -t $(IMAGE_NAME) . - -.PHONY: test -test: - ${CONTAINER_ENGINE} build -t $(IMAGE_NAME)-candidate . - IMAGE_NAME=$(IMAGE_NAME)-candidate test/run ----- - -[id="images-test-s21-basic-testing-workflow_{context}"] -== Basic testing workflow - -The `test` script assumes you have already built the image you want to -test. If required, first build the S2I image. Run one of the following commands: - -* If you use Podman, run the following command: -+ -[source,terminal] ----- -$ podman build -t <builder_image_name> ----- - -* If you use Docker, run the following command: -+ -[source,terminal] ----- -$ docker build -t <builder_image_name> ----- - -The following steps describe the default workflow to test S2I image builders: - -. Verify the `usage` script is working: -+ -* If you use Podman, run the following command: -+ -[source,terminal] ----- -$ podman run <builder_image_name> . ----- - -* If you use Docker, run the following command: -+ -[source,terminal] ----- -$ docker run <builder_image_name> . ----- - -. Build the image: -+ -[source,terminal] -[options="nowrap"] ----- -$ s2i build file:///path-to-sample-app _<BUILDER_IMAGE_NAME>_ _<OUTPUT_APPLICATION_IMAGE_NAME>_ ----- - -. Optional: if you support `save-artifacts`, run step 2 once again to -verify that saving and restoring artifacts works properly. - -. Run the container: -+ -* If you use Podman, run the following command: -+ -[source,terminal] ----- -$ podman run <output_application_image_name> ----- - -* If you use Docker, run the following command: -+ -[source,terminal] ----- -$ docker run <output_application_image_name> ----- - -. Verify the container is running and the application is responding. - -Running these steps is generally enough to tell if the builder image is -working as expected. - - -[id="images-test-s21-using-openshift-for-building-the-image_{context}"] -== Using {product-title} for building the image - -Once you have a `Dockerfile` and the other artifacts that make up your new -S2I builder image, you can put them in a git repository and use {product-title} -to build and push the image. Define a Docker build that points -to your repository. - -If your {product-title} instance is hosted on a public IP address, the build can -be triggered each time you push into your S2I builder image GitHub repository. - -You can also use the `ImageChangeTrigger` to trigger a rebuild of your applications that are -based on the S2I builder image you updated. -endif::openshift-online[] diff --git a/modules/images-triggering-updates-imagestream-changes-kubernetes-about.adoc b/modules/images-triggering-updates-imagestream-changes-kubernetes-about.adoc deleted file mode 100644 index 246cb30bd97e..000000000000 --- a/modules/images-triggering-updates-imagestream-changes-kubernetes-about.adoc +++ /dev/null @@ -1,46 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/triggering-updates-on-imagestream-changes.adoc - - -[id="images-triggering-updates-imagestream-changes-kubernetes-about_{context}"] -= Triggering Kubernetes resources - -Kubernetes resources do not have fields for triggering, unlike deployment and build configurations, which include as part of their API definition a set of fields for controlling triggers. Instead, you can use annotations in {product-title} to request triggering. - -The annotation is defined as follows: - -[source,yaml] ----- -Key: image.openshift.io/triggers -Value: -[ - { - "from": { - "kind": "ImageStreamTag", <1> - "name": "example:latest", <2> - "namespace": "myapp" <3> - }, - "fieldPath": "spec.template.spec.containers[?(@.name==\"web\")].image", <4> - "paused": false <5> - }, - ... -] ----- -<1> Required: `kind` is the resource to trigger from must be `ImageStreamTag`. -<2> Required: `name` must be the name of an image stream tag. -<3> Optional: `namespace` defaults to the namespace of the object. -<4> Required: `fieldPath` is the JSON path to change. This field is limited and accepts only a JSON path expression that precisely matches a container by ID or index. For pods, the JSON path is "spec.containers[?(@.name='web')].image". -<5> Optional: `paused` is whether or not the trigger is paused, and the default value is `false`. Set `paused` to `true` to temporarily disable this trigger. - -When one of the core Kubernetes resources contains both a pod template and this annotation, {product-title} attempts to update the object by using the image currently associated with the image stream tag that is referenced by trigger. The update is performed against the `fieldPath` specified. - -Examples of core Kubernetes resources that can contain both a pod template and annotation include: - -* `CronJobs` -* `Deployments` -* `StatefulSets` -* `DaemonSets` -* `Jobs` -* `ReplicationControllers` -* `Pods` diff --git a/modules/images-triggering-updates-imagestream-changes-kubernetes-cli.adoc b/modules/images-triggering-updates-imagestream-changes-kubernetes-cli.adoc deleted file mode 100644 index 03e76d91a1e3..000000000000 --- a/modules/images-triggering-updates-imagestream-changes-kubernetes-cli.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/triggering-updates-on-imagestream-changes.adoc - - -:_content-type: PROCEDURE -[id="images-triggering-updates-imagestream-changes-kubernetes-cli_{context}"] -= Setting the image trigger on Kubernetes resources - -When adding an image trigger to deployments, you can use the `oc set triggers` command. For example, the sample command in this procedure adds an image change trigger to the deployment named `example` so that when the `example:latest` image stream tag is updated, the `web` container inside the deployment updates with the new image value. This command sets the correct `image.openshift.io/triggers` annotation on the deployment resource. - -.Procedure - -* Trigger Kubernetes resources by entering the `oc set triggers` command: -+ -[source,terminal] ----- -$ oc set triggers deploy/example --from-image=example:latest -c web ----- - -Unless the deployment is paused, this pod template update automatically causes a deployment to occur with the new image value. diff --git a/modules/images-update-global-pull-secret.adoc b/modules/images-update-global-pull-secret.adoc deleted file mode 100644 index ab32a54f219e..000000000000 --- a/modules/images-update-global-pull-secret.adoc +++ /dev/null @@ -1,82 +0,0 @@ -// Module included in the following assemblies: -// * openshift_images/managing_images/using-image-pull-secrets.adoc -// * post_installation_configuration/cluster-tasks.adoc -// * updating/updating-restricted-network-cluster/restricted-network-update-osus.adoc -// * support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc -// * sd_support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc -// -// Not included, but linked to from: -// * operators/admin/olm-managing-custom-catalogs.adoc - -ifeval::["{context}" == "using-image-pull-secrets"] -:image-pull-secrets: -endif::[] - -:_content-type: PROCEDURE -[id="images-update-global-pull-secret_{context}"] -= Updating the global cluster pull secret - -You can update the global pull secret for your cluster by either replacing the current pull secret or appending a new pull secret. - -ifndef::image-pull-secrets[] -The procedure is required when users use a separate registry to store images than the registry used during installation. -endif::image-pull-secrets[] - -ifdef::image-pull-secrets[] -[IMPORTANT] -==== -To transfer your cluster to another owner, you must first initiate the transfer in {cluster-manager-url}, and then update the pull secret on the cluster. Updating a cluster's pull secret without initiating the transfer in {cluster-manager} causes the cluster to stop reporting Telemetry metrics in {cluster-manager}. - -For more information link:https://access.redhat.com/documentation/en-us/openshift_cluster_manager/2021/html/managing_clusters/assembly-managing-clusters#transferring-cluster-ownership_assembly-managing-clusters[about transferring cluster ownership], see "Transferring cluster ownership" in the {cluster-manager-first} documentation. -==== -endif::image-pull-secrets[] - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. - -.Procedure -. Optional: To append a new pull secret to the existing pull secret, complete the following steps: - -.. Enter the following command to download the pull secret: -+ -[source,terminal] ----- -$ oc get secret/pull-secret -n openshift-config --template='{{index .data ".dockerconfigjson" | base64decode}}' ><pull_secret_location> <1> ----- -<1> Provide the path to the pull secret file. - -.. Enter the following command to add the new pull secret: -+ -[source,terminal] ----- -$ oc registry login --registry="<registry>" \ <1> ---auth-basic="<username>:<password>" \ <2> ---to=<pull_secret_location> <3> ----- -<1> Provide the new registry. You can include multiple repositories within the same registry, for example: `--registry="<registry/my-namespace/my-repository>"`. -<2> Provide the credentials of the new registry. -<3> Provide the path to the pull secret file. -+ -Alternatively, you can perform a manual update to the pull secret file. - -. Enter the following command to update the global pull secret for your cluster: -+ -[source,terminal] ----- -$ oc set data secret/pull-secret -n openshift-config --from-file=.dockerconfigjson=<pull_secret_location> <1> ----- -<1> Provide the path to the new pull secret file. -+ -This update is rolled out to all nodes, which can take some time depending on the size of your cluster. -+ -[NOTE] -==== -As of {product-title} 4.7.4, changes to the global pull secret no longer trigger a node drain or reboot. -==== -//Also referred to as the cluster-wide pull secret. - - -ifeval::["{context}" == "using-image-pull-secrets"] -:!image-pull-secrets: -endif::[] diff --git a/modules/images-using-customizing-s2i-images-scripts-embedded.adoc b/modules/images-using-customizing-s2i-images-scripts-embedded.adoc deleted file mode 100644 index 4c32e6efc5a7..000000000000 --- a/modules/images-using-customizing-s2i-images-scripts-embedded.adoc +++ /dev/null @@ -1,61 +0,0 @@ -// Module included in the following assemblies: -// -// * openshift_images/using_images/customizing-s2i-images.adoc - -:_content-type: PROCEDURE -[id="images-using-customizing-s2i-images-scripts-embedded_{context}"] -= Invoking scripts embedded in an image - -Builder images provide their own version of the source-to-image (S2I) scripts that cover the most common use-cases. If these scripts do not fulfill your needs, S2I provides a way of overriding them by adding custom ones in the `.s2i/bin` directory. However, by doing this, you are completely replacing the standard scripts. In some cases, replacing the scripts is acceptable, but, in other scenarios, you can run a few commands before or after the scripts while retaining the logic of the script provided in the image. To reuse the standard scripts, you can create a wrapper script that runs custom logic and delegates further work to the default scripts in the image. - -.Procedure - -. Look at the value of the `io.openshift.s2i.scripts-url` label to determine the location of the scripts inside of the builder image: -+ -[source,terminal] ----- -$ podman inspect --format='{{ index .Config.Labels "io.openshift.s2i.scripts-url" }}' wildfly/wildfly-centos7 ----- -+ -.Example output -[source,terminal] ----- -image:///usr/libexec/s2i ----- -+ -You inspected the `wildfly/wildfly-centos7` builder image and found out that the scripts are in the `/usr/libexec/s2i` directory. -+ -. Create a script that includes an invocation of one of the standard scripts wrapped in other commands: -+ -.`.s2i/bin/assemble` script -[source,bash] ----- -#!/bin/bash -echo "Before assembling" - -/usr/libexec/s2i/assemble -rc=$? - -if [ $rc -eq 0 ]; then - echo "After successful assembling" -else - echo "After failed assembling" -fi - -exit $rc ----- -+ -This example shows a custom assemble script that prints the message, runs the standard assemble script from the image, and prints another message depending on the exit code of the assemble script. -+ -[IMPORTANT] -==== -When wrapping the run script, you must use `exec` for invoking it to ensure signals are handled properly. The use of `exec` also precludes the ability to run additional commands after invoking the default image run script. -==== -+ -.`.s2i/bin/run` script -[source,bash] ----- -#!/bin/bash -echo "Before running application" -exec /usr/libexec/s2i/run ----- diff --git a/modules/images-using-imagestream-change-triggers.adoc b/modules/images-using-imagestream-change-triggers.adoc deleted file mode 100644 index 3f83a2b8bf47..000000000000 --- a/modules/images-using-imagestream-change-triggers.adoc +++ /dev/null @@ -1,14 +0,0 @@ -// Module included in the following assemblies: -// * openshift_images/image-streams-managing.adoc - -[id="images-using-imagestream-change-triggers_{context}"] -= Image stream change triggers - -Image stream triggers allow your builds and deployments to be automatically -invoked when a new version of an upstream image is available. - -//from FAQ - -For example, builds and deployments can be automatically started when an image -stream tag is modified. This is achieved by monitoring that particular image -stream tag and notifying the build or deployment when a change is detected. diff --git a/modules/images-using-imagestream-images.adoc b/modules/images-using-imagestream-images.adoc deleted file mode 100644 index 11c807396ece..000000000000 --- a/modules/images-using-imagestream-images.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// * openshift_images/image-streams-managing.adoc - -[id="images-using-imagestream-images_{context}"] -= Image stream images - -An image stream image points from within an image stream to a particular image ID. - -Image stream images allow you to retrieve metadata about an image from a particular image stream where it is tagged. - -Image stream image objects are automatically created in {product-title} whenever you import or tag an image into the image stream. You should never have to explicitly define an image stream image object in any image stream definition that you use to create image streams. - -The image stream image consists of the image stream name and image ID from the repository, delimited by an `@` sign: - ----- -<image-stream-name>@<image-id> ----- - -To refer to the image in the `ImageStream` object example, the image stream image looks like: - ----- -origin-ruby-sample@sha256:47463d94eb5c049b2d23b03a9530bf944f8f967a0fe79147dd6b9135bf7dd13d ----- diff --git a/modules/images-using-imagestream-tags.adoc b/modules/images-using-imagestream-tags.adoc deleted file mode 100644 index a0226e7b35e2..000000000000 --- a/modules/images-using-imagestream-tags.adoc +++ /dev/null @@ -1,60 +0,0 @@ -// Module included in the following assemblies: -// * openshift_images/image-streams-managing.adoc - -[id="images-using-imagestream-tags_{context}"] -= Image stream tags - -An image stream tag is a named pointer to an image in an image stream. It is abbreviated as `istag`. An image stream tag is used to reference or retrieve an image for a given image stream and tag. - -Image stream tags can reference any local or externally managed image. It contains a history of images represented as a stack of all images the tag ever pointed to. Whenever a new or existing image is tagged under particular image stream tag, it is placed at the first position in the history stack. The image previously occupying the top position is available at the second position. This allows for easy rollbacks to make tags point to historical images again. - -The following image stream tag is from an `ImageStream` object: - -.Image stream tag with two images in its history - -[source,yaml] ----- - tags: - - items: - - created: 2017-09-02T10:15:09Z - dockerImageReference: 172.30.56.218:5000/test/origin-ruby-sample@sha256:47463d94eb5c049b2d23b03a9530bf944f8f967a0fe79147dd6b9135bf7dd13d - generation: 2 - image: sha256:909de62d1f609a717ec433cc25ca5cf00941545c83a01fb31527771e1fab3fc5 - - created: 2017-09-01T13:40:11Z - dockerImageReference: 172.30.56.218:5000/test/origin-ruby-sample@sha256:909de62d1f609a717ec433cc25ca5cf00941545c83a01fb31527771e1fab3fc5 - generation: 1 - image: sha256:47463d94eb5c049b2d23b03a9530bf944f8f967a0fe79147dd6b9135bf7dd13d - tag: latest ----- - -Image stream tags can be permanent tags or tracking tags. - -* Permanent tags are version-specific tags that point to a particular version of an image, such as Python 3.5. - -* Tracking tags are reference tags that follow another image stream tag and can be updated to change which image they follow, like a symlink. These new levels are not guaranteed to be backwards-compatible. -+ -For example, the `latest` image stream tags that ship with {product-title} are tracking tags. This means consumers of the `latest` image stream tag are updated to the newest level of the framework provided by the image when a new level becomes available. A `latest` image stream tag to `v3.10` can be changed to `v3.11` at any time. It is important to be aware that these `latest` image stream tags behave differently than the Docker `latest` tag. The `latest` image stream tag, in this case, does not point to the latest image in the Docker repository. It points to another image stream tag, which might not be the latest version of an image. For example, if the `latest` image stream tag points to `v3.10` of an image, when the `3.11` version is released, the `latest` tag is not automatically updated to `v3.11`, and remains at `v3.10` until it is manually updated to point to a `v3.11` image stream tag. -+ -[NOTE] -==== -Tracking tags are limited to a single image stream and cannot reference other -image streams. -==== - -You can create your own image stream tags for your own needs. - -The image stream tag is composed of the name of the image stream and a tag, -separated by a colon: - ----- -<imagestream name>:<tag> ----- - -For example, to refer to the -`sha256:47463d94eb5c049b2d23b03a9530bf944f8f967a0fe79147dd6b9135bf7dd13d` image -in the `ImageStream` object example earlier, the image stream tag -would be: - ----- -origin-ruby-sample:latest ----- diff --git a/modules/impersonation-project-creation.adoc b/modules/impersonation-project-creation.adoc deleted file mode 100644 index 8dce08473d60..000000000000 --- a/modules/impersonation-project-creation.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// Module included in the following assemblies: -// -// * applications/projects/creating-project-other-user.adoc - -:_content-type: PROCEDURE -[id="impersonation-project-creation_{context}"] -= Impersonating a user when you create a project - -You can impersonate a different user when you create a project request. Because -`system:authenticated:oauth` is the only bootstrap group that can -create project requests, you must impersonate that group. - -.Procedure - -* To create a project request on behalf of a different user: -+ -[source,terminal] ----- -$ oc new-project <project> --as=<user> \ - --as-group=system:authenticated --as-group=system:authenticated:oauth ----- diff --git a/modules/impersonation-system-admin-group.adoc b/modules/impersonation-system-admin-group.adoc deleted file mode 100644 index 8d47b654141b..000000000000 --- a/modules/impersonation-system-admin-group.adoc +++ /dev/null @@ -1,22 +0,0 @@ -// Module included in the following assemblies: -// -// * users_and_roles/impersonating-system-admin.adoc - -:_content-type: PROCEDURE -[id="impersonation-system-admin-group_{context}"] -= Impersonating the system:admin group - - -When a `system:admin` user is granted cluster administration permissions through a group, you must include the -`--as=<user> --as-group=<group1> --as-group=<group2>` parameters in the command to impersonate the associated groups. - -.Procedure - -* To grant a user permission to impersonate a `system:admin` by impersonating the associated cluster administration groups, -run the following command: -+ -[source,terminal] ----- -$ oc create clusterrolebinding <any_valid_name> --clusterrole=sudoer --as=<user> \ ---as-group=<group1> --as-group=<group2> ----- diff --git a/modules/impersonation-system-admin-user.adoc b/modules/impersonation-system-admin-user.adoc deleted file mode 100644 index 86bd63c3e363..000000000000 --- a/modules/impersonation-system-admin-user.adoc +++ /dev/null @@ -1,40 +0,0 @@ -// Module included in the following assemblies: -// -// * users_and_roles/impersonating-system-admin.adoc - -:_content-type: PROCEDURE -[id="impersonation-system-admin-user_{context}"] -= Impersonating the system:admin user - -You can grant a user permission to impersonate `system:admin`, which grants them -cluster administrator permissions. - -.Procedure - -* To grant a user permission to impersonate `system:admin`, run the following command: -+ -[source,terminal] ----- -$ oc create clusterrolebinding <any_valid_name> --clusterrole=sudoer --user=<username> ----- -+ -[TIP] -==== -You can alternatively apply the following YAML to grant permission to impersonate `system:admin`: - -[source,yaml] ----- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: <any_valid_name> -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: sudoer -subjects: -- apiGroup: rbac.authorization.k8s.io - kind: User - name: <username> ----- -==== diff --git a/modules/importing-manifest-list-through-imagestreamimport.adoc b/modules/importing-manifest-list-through-imagestreamimport.adoc deleted file mode 100644 index f18ebf11ae0b..000000000000 --- a/modules/importing-manifest-list-through-imagestreamimport.adoc +++ /dev/null @@ -1,44 +0,0 @@ -// Module included in the following assemblies: -// * openshift_images/image-streams-manage.adoc - -:_content-type: PROCEDURE -[id="importing-manifest-list-through-imagestreamimport_{context}"] -= Importing a manifest list through ImageStreamImport - - -You can use the `ImageStreamImport` resource to find and import image manifests from other container image registries into the cluster. Individual images or an entire image repository can be imported. - -Use the following procedure to import a manifest list through the `ImageStreamImport` object with the `importMode` value. - -.Procedure - -. Create an `ImageStreamImport` YAML file and set the `importMode` parameter to `PreserveOriginal` on the tags that you will import as a manifest list: -+ -[source,yaml] ----- -apiVersion: image.openshift.io/v1 -kind: ImageStreamImport -metadata: - name: app - namespace: myapp -spec: - import: true - images: - - from: - kind: DockerImage - name: <registry>/<project_name>/<image_name> - to: - name: latest - referencePolicy: - type: Source - importPolicy: - importMode: "PreserveOriginal" ----- - -. Create the `ImageStreamImport` by running the following command: -+ -[source,terminal] ----- -$ oc create -f <your_imagestreamimport.yaml> ----- - diff --git a/modules/importmode-configuration-fields.adoc b/modules/importmode-configuration-fields.adoc deleted file mode 100644 index 078d52eeb64b..000000000000 --- a/modules/importmode-configuration-fields.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// * assembly/openshift_images/managing-image-streams.adoc - -:_content-type: CONCEPT -[id="importmode-configuration-fields_{context}"] -= Configuration fields for --import-mode - -The following table describes the options available for the `--import-mode=` flag: - -[cols="3a,8a",options="header"] -|=== -|Parameter |Description - -| *Legacy* | The default option for `--import-mode`. When specified, the manifest list is discarded, and a single sub-manifest is imported. The platform is chosen in the following order of priority: - -. Tag annotations -. Control plane architecture -. Linux/AMD64 -. The first manifest in the list - -| *PreserveOriginal* | When specified, the original manifest is preserved. For manifest lists, the manifest list and all of its sub-manifests are imported. - -|=== \ No newline at end of file diff --git a/modules/infrastructure-components.adoc b/modules/infrastructure-components.adoc deleted file mode 100644 index 2f1a2d60040b..000000000000 --- a/modules/infrastructure-components.adoc +++ /dev/null @@ -1,28 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/creating-infrastructure-machinesets.adoc -// * post_installation_configuration/cluster-tasks.adoc -// * nodes-nodes-creating-infrastructure-nodes.adoc - -[id="infrastructure-components_{context}"] -= {product-title} infrastructure components - -The following infrastructure workloads do not incur {product-title} worker subscriptions: - -* Kubernetes and {product-title} control plane services that run on masters -* The default router -* The integrated container image registry -* The HAProxy-based Ingress Controller -* The cluster metrics collection, or monitoring service, including components for monitoring user-defined projects -* Cluster aggregated logging -* Service brokers -* Red Hat Quay -* {rh-storage-first} -* Red Hat Advanced Cluster Manager -* Red Hat Advanced Cluster Security for Kubernetes -* Red Hat OpenShift GitOps -* Red Hat OpenShift Pipelines - -// Updated the list to match the list under "Red Hat OpenShift control plane and infrastructure nodes" in https://www.redhat.com/en/resources/openshift-subscription-sizing-guide - -Any node that runs any other container, pod, or component is a worker node that your subscription must cover. diff --git a/modules/infrastructure-moving-logging.adoc b/modules/infrastructure-moving-logging.adoc deleted file mode 100644 index 184b1cb05099..000000000000 --- a/modules/infrastructure-moving-logging.adoc +++ /dev/null @@ -1,233 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/creating-infrastructure-machinesets.adoc -// * logging/cluster-logging-moving.adoc - -:_content-type: PROCEDURE -[id="infrastructure-moving-logging_{context}"] -= Moving OpenShift Logging resources - -You can configure the Cluster Logging Operator to deploy the pods for {logging} components, such as Elasticsearch and Kibana, to different nodes. You cannot move the Cluster Logging Operator pod from its installed location. - -For example, you can move the Elasticsearch pods to a separate node because of high CPU, memory, and disk requirements. - -.Prerequisites - -* The Red Hat OpenShift Logging and Elasticsearch Operators must be installed. These features are not installed by default. - -.Procedure - -. Edit the `ClusterLogging` custom resource (CR) in the `openshift-logging` project: -+ -[source,terminal] ----- -$ oc edit ClusterLogging instance ----- -+ -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogging - -... - -spec: - collection: - logs: - fluentd: - resources: null - type: fluentd - logStore: - elasticsearch: - nodeCount: 3 - nodeSelector: <1> - node-role.kubernetes.io/infra: '' - tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/infra - value: reserved - - effect: NoExecute - key: node-role.kubernetes.io/infra - value: reserved - redundancyPolicy: SingleRedundancy - resources: - limits: - cpu: 500m - memory: 16Gi - requests: - cpu: 500m - memory: 16Gi - storage: {} - type: elasticsearch - managementState: Managed - visualization: - kibana: - nodeSelector: <1> - node-role.kubernetes.io/infra: '' - tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/infra - value: reserved - - effect: NoExecute - key: node-role.kubernetes.io/infra - value: reserved - proxy: - resources: null - replicas: 1 - resources: null - type: kibana - -... ----- -<1> Add a `nodeSelector` parameter with the appropriate value to the component you want to move. You can use a `nodeSelector` in the format shown or use `<key>: <value>` pairs, based on the value specified for the node. If you added a taint to the infrasructure node, also add a matching toleration. - -.Verification - -To verify that a component has moved, you can use the `oc get pod -o wide` command. - -For example: - -* You want to move the Kibana pod from the `ip-10-0-147-79.us-east-2.compute.internal` node: -+ -[source,terminal] ----- -$ oc get pod kibana-5b8bdf44f9-ccpq9 -o wide ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES -kibana-5b8bdf44f9-ccpq9 2/2 Running 0 27s 10.129.2.18 ip-10-0-147-79.us-east-2.compute.internal <none> <none> ----- - -* You want to move the Kibana pod to the `ip-10-0-139-48.us-east-2.compute.internal` node, a dedicated infrastructure node: -+ -[source,terminal] ----- -$ oc get nodes ----- -+ -.Example output -[source,terminal] ----- -NAME STATUS ROLES AGE VERSION -ip-10-0-133-216.us-east-2.compute.internal Ready master 60m v1.26.0 -ip-10-0-139-146.us-east-2.compute.internal Ready master 60m v1.26.0 -ip-10-0-139-192.us-east-2.compute.internal Ready worker 51m v1.26.0 -ip-10-0-139-241.us-east-2.compute.internal Ready worker 51m v1.26.0 -ip-10-0-147-79.us-east-2.compute.internal Ready worker 51m v1.26.0 -ip-10-0-152-241.us-east-2.compute.internal Ready master 60m v1.26.0 -ip-10-0-139-48.us-east-2.compute.internal Ready infra 51m v1.26.0 ----- -+ -Note that the node has a `node-role.kubernetes.io/infra: ''` label: -+ -[source,terminal] ----- -$ oc get node ip-10-0-139-48.us-east-2.compute.internal -o yaml ----- -+ -.Example output -[source,yaml] ----- -kind: Node -apiVersion: v1 -metadata: - name: ip-10-0-139-48.us-east-2.compute.internal - selfLink: /api/v1/nodes/ip-10-0-139-48.us-east-2.compute.internal - uid: 62038aa9-661f-41d7-ba93-b5f1b6ef8751 - resourceVersion: '39083' - creationTimestamp: '2020-04-13T19:07:55Z' - labels: - node-role.kubernetes.io/infra: '' -... ----- - -* To move the Kibana pod, edit the `ClusterLogging` CR to add a node selector: -+ -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogging - -... - -spec: - -... - - visualization: - kibana: - nodeSelector: <1> - node-role.kubernetes.io/infra: '' - proxy: - resources: null - replicas: 1 - resources: null - type: kibana ----- -<1> Add a node selector to match the label in the node specification. - -* After you save the CR, the current Kibana pod is terminated and new pod is deployed: -+ -[source,terminal] ----- -$ oc get pods ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -cluster-logging-operator-84d98649c4-zb9g7 1/1 Running 0 29m -elasticsearch-cdm-hwv01pf7-1-56588f554f-kpmlg 2/2 Running 0 28m -elasticsearch-cdm-hwv01pf7-2-84c877d75d-75wqj 2/2 Running 0 28m -elasticsearch-cdm-hwv01pf7-3-f5d95b87b-4nx78 2/2 Running 0 28m -fluentd-42dzz 1/1 Running 0 28m -fluentd-d74rq 1/1 Running 0 28m -fluentd-m5vr9 1/1 Running 0 28m -fluentd-nkxl7 1/1 Running 0 28m -fluentd-pdvqb 1/1 Running 0 28m -fluentd-tflh6 1/1 Running 0 28m -kibana-5b8bdf44f9-ccpq9 2/2 Terminating 0 4m11s -kibana-7d85dcffc8-bfpfp 2/2 Running 0 33s ----- - -* The new pod is on the `ip-10-0-139-48.us-east-2.compute.internal` node: -+ -[source,terminal] ----- -$ oc get pod kibana-7d85dcffc8-bfpfp -o wide ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES -kibana-7d85dcffc8-bfpfp 2/2 Running 0 43s 10.131.0.22 ip-10-0-139-48.us-east-2.compute.internal <none> <none> ----- - -* After a few moments, the original Kibana pod is removed. -+ -[source,terminal] ----- -$ oc get pods ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -cluster-logging-operator-84d98649c4-zb9g7 1/1 Running 0 30m -elasticsearch-cdm-hwv01pf7-1-56588f554f-kpmlg 2/2 Running 0 29m -elasticsearch-cdm-hwv01pf7-2-84c877d75d-75wqj 2/2 Running 0 29m -elasticsearch-cdm-hwv01pf7-3-f5d95b87b-4nx78 2/2 Running 0 29m -fluentd-42dzz 1/1 Running 0 29m -fluentd-d74rq 1/1 Running 0 29m -fluentd-m5vr9 1/1 Running 0 29m -fluentd-nkxl7 1/1 Running 0 29m -fluentd-pdvqb 1/1 Running 0 29m -fluentd-tflh6 1/1 Running 0 29m -kibana-7d85dcffc8-bfpfp 2/2 Running 0 62s ----- diff --git a/modules/infrastructure-moving-monitoring.adoc b/modules/infrastructure-moving-monitoring.adoc deleted file mode 100644 index 13273b9b8e9a..000000000000 --- a/modules/infrastructure-moving-monitoring.adoc +++ /dev/null @@ -1,127 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/creating-infrastructure-machinesets.adoc - -:_content-type: PROCEDURE -[id="infrastructure-moving-monitoring_{context}"] -= Moving the monitoring solution - -The monitoring stack includes multiple components, including Prometheus, Thanos Querier, and Alertmanager. -The Cluster Monitoring Operator manages this stack. To redeploy the monitoring stack to infrastructure nodes, you can create and apply a custom config map. - -.Procedure - -. Edit the `cluster-monitoring-config` config map and change the `nodeSelector` to use the `infra` label: -+ -[source,terminal] ----- -$ oc edit configmap cluster-monitoring-config -n openshift-monitoring ----- -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: cluster-monitoring-config - namespace: openshift-monitoring -data: - config.yaml: |+ - alertmanagerMain: - nodeSelector: <1> - node-role.kubernetes.io/infra: "" - tolerations: - - key: node-role.kubernetes.io/infra - value: reserved - effect: NoSchedule - - key: node-role.kubernetes.io/infra - value: reserved - effect: NoExecute - prometheusK8s: - nodeSelector: - node-role.kubernetes.io/infra: "" - tolerations: - - key: node-role.kubernetes.io/infra - value: reserved - effect: NoSchedule - - key: node-role.kubernetes.io/infra - value: reserved - effect: NoExecute - prometheusOperator: - nodeSelector: - node-role.kubernetes.io/infra: "" - tolerations: - - key: node-role.kubernetes.io/infra - value: reserved - effect: NoSchedule - - key: node-role.kubernetes.io/infra - value: reserved - effect: NoExecute - k8sPrometheusAdapter: - nodeSelector: - node-role.kubernetes.io/infra: "" - tolerations: - - key: node-role.kubernetes.io/infra - value: reserved - effect: NoSchedule - - key: node-role.kubernetes.io/infra - value: reserved - effect: NoExecute - kubeStateMetrics: - nodeSelector: - node-role.kubernetes.io/infra: "" - tolerations: - - key: node-role.kubernetes.io/infra - value: reserved - effect: NoSchedule - - key: node-role.kubernetes.io/infra - value: reserved - effect: NoExecute - telemeterClient: - nodeSelector: - node-role.kubernetes.io/infra: "" - tolerations: - - key: node-role.kubernetes.io/infra - value: reserved - effect: NoSchedule - - key: node-role.kubernetes.io/infra - value: reserved - effect: NoExecute - openshiftStateMetrics: - nodeSelector: - node-role.kubernetes.io/infra: "" - tolerations: - - key: node-role.kubernetes.io/infra - value: reserved - effect: NoSchedule - - key: node-role.kubernetes.io/infra - value: reserved - effect: NoExecute - thanosQuerier: - nodeSelector: - node-role.kubernetes.io/infra: "" - tolerations: - - key: node-role.kubernetes.io/infra - value: reserved - effect: NoSchedule - - key: node-role.kubernetes.io/infra - value: reserved - effect: NoExecute ----- -<1> Add a `nodeSelector` parameter with the appropriate value to the component you want to move. You can use a `nodeSelector` in the format shown or use `<key>: <value>` pairs, based on the value specified for the node. If you added a taint to the infrasructure node, also add a matching toleration. - -. Watch the monitoring pods move to the new machines: -+ -[source,terminal] ----- -$ watch 'oc get pod -n openshift-monitoring -o wide' ----- - -. If a component has not moved to the `infra` node, delete the pod with this component: -+ -[source,terminal] ----- -$ oc delete pod -n openshift-monitoring <pod> ----- -+ -The component from the deleted pod is re-created on the `infra` node. diff --git a/modules/infrastructure-moving-registry.adoc b/modules/infrastructure-moving-registry.adoc deleted file mode 100644 index 9d70bb2badeb..000000000000 --- a/modules/infrastructure-moving-registry.adoc +++ /dev/null @@ -1,103 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/creating-infrastructure-machinesets.adoc - -:_content-type: PROCEDURE -[id="infrastructure-moving-registry_{context}"] -= Moving the default registry - -You configure the registry Operator to deploy its pods to different nodes. - -.Prerequisites - -* Configure additional compute machine sets in your {product-title} cluster. - -.Procedure - -. View the `config/instance` object: -+ -[source,terminal] ----- -$ oc get configs.imageregistry.operator.openshift.io/cluster -o yaml ----- -+ -.Example output -[source,yaml] ----- -apiVersion: imageregistry.operator.openshift.io/v1 -kind: Config -metadata: - creationTimestamp: 2019-02-05T13:52:05Z - finalizers: - - imageregistry.operator.openshift.io/finalizer - generation: 1 - name: cluster - resourceVersion: "56174" - selfLink: /apis/imageregistry.operator.openshift.io/v1/configs/cluster - uid: 36fd3724-294d-11e9-a524-12ffeee2931b -spec: - httpSecret: d9a012ccd117b1e6616ceccb2c3bb66a5fed1b5e481623 - logging: 2 - managementState: Managed - proxy: {} - replicas: 1 - requests: - read: {} - write: {} - storage: - s3: - bucket: image-registry-us-east-1-c92e88cad85b48ec8b312344dff03c82-392c - region: us-east-1 -status: -... ----- - -. Edit the `config/instance` object: -+ -[source,terminal] ----- -$ oc edit configs.imageregistry.operator.openshift.io/cluster ----- -+ -[source,yaml] ----- -spec: - affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - podAffinityTerm: - namespaces: - - openshift-image-registry - topologyKey: kubernetes.io/hostname - weight: 100 - logLevel: Normal - managementState: Managed - nodeSelector: <1> - node-role.kubernetes.io/infra: "" - tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/infra - value: reserved - - effect: NoExecute - key: node-role.kubernetes.io/infra - value: reserved ----- -<1> Add a `nodeSelector` parameter with the appropriate value to the component you want to move. You can use a `nodeSelector` in the format shown or use `<key>: <value>` pairs, based on the value specified for the node. If you added a taint to the infrasructure node, also add a matching toleration. - -. Verify the registry pod has been moved to the infrastructure node. -+ -.. Run the following command to identify the node where the registry pod is located: -+ -[source,terminal] ----- -$ oc get pods -o wide -n openshift-image-registry ----- -+ -.. Confirm the node has the label you specified: -+ -[source,terminal] ----- -$ oc describe node <node_name> ----- -+ -Review the command output and confirm that `node-role.kubernetes.io/infra` is in the `LABELS` list. diff --git a/modules/infrastructure-moving-router.adoc b/modules/infrastructure-moving-router.adoc deleted file mode 100644 index 83be616110f5..000000000000 --- a/modules/infrastructure-moving-router.adoc +++ /dev/null @@ -1,110 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/creating-infrastructure-machinesets.adoc - -:_content-type: PROCEDURE -[id="infrastructure-moving-router_{context}"] -= Moving the router - -You can deploy the router pod to a different compute machine set. By default, the pod is deployed to a worker node. - -.Prerequisites - -* Configure additional compute machine sets in your {product-title} cluster. - -.Procedure - -. View the `IngressController` custom resource for the router Operator: -+ -[source,terminal] ----- -$ oc get ingresscontroller default -n openshift-ingress-operator -o yaml ----- -+ -The command output resembles the following text: -+ -[source,yaml] ----- -apiVersion: operator.openshift.io/v1 -kind: IngressController -metadata: - creationTimestamp: 2019-04-18T12:35:39Z - finalizers: - - ingresscontroller.operator.openshift.io/finalizer-ingresscontroller - generation: 1 - name: default - namespace: openshift-ingress-operator - resourceVersion: "11341" - selfLink: /apis/operator.openshift.io/v1/namespaces/openshift-ingress-operator/ingresscontrollers/default - uid: 79509e05-61d6-11e9-bc55-02ce4781844a -spec: {} -status: - availableReplicas: 2 - conditions: - - lastTransitionTime: 2019-04-18T12:36:15Z - status: "True" - type: Available - domain: apps.<cluster>.example.com - endpointPublishingStrategy: - type: LoadBalancerService - selector: ingresscontroller.operator.openshift.io/deployment-ingresscontroller=default ----- - -. Edit the `ingresscontroller` resource and change the `nodeSelector` to use the `infra` label: -+ -[source,terminal] ----- -$ oc edit ingresscontroller default -n openshift-ingress-operator ----- -+ -[source,yaml] ----- - spec: - nodePlacement: - nodeSelector: <1> - matchLabels: - node-role.kubernetes.io/infra: "" - tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/infra - value: reserved - - effect: NoExecute - key: node-role.kubernetes.io/infra - value: reserved ----- -<1> Add a `nodeSelector` parameter with the appropriate value to the component you want to move. You can use a `nodeSelector` in the format shown or use `<key>: <value>` pairs, based on the value specified for the node. If you added a taint to the infrastructure node, also add a matching toleration. - -. Confirm that the router pod is running on the `infra` node. -.. View the list of router pods and note the node name of the running pod: -+ -[source,terminal] ----- -$ oc get pod -n openshift-ingress -o wide ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES -router-default-86798b4b5d-bdlvd 1/1 Running 0 28s 10.130.2.4 ip-10-0-217-226.ec2.internal <none> <none> -router-default-955d875f4-255g8 0/1 Terminating 0 19h 10.129.2.4 ip-10-0-148-172.ec2.internal <none> <none> ----- -+ -In this example, the running pod is on the `ip-10-0-217-226.ec2.internal` node. - -.. View the node status of the running pod: -+ -[source,terminal] ----- -$ oc get node <node_name> <1> ----- -<1> Specify the `<node_name>` that you obtained from the pod list. -+ -.Example output -[source,terminal] ----- -NAME STATUS ROLES AGE VERSION -ip-10-0-217-226.ec2.internal Ready infra,worker 17h v1.26.0 ----- -+ -Because the role list includes `infra`, the pod is running on the correct node. diff --git a/modules/infrastructure-node-sizing.adoc b/modules/infrastructure-node-sizing.adoc deleted file mode 100644 index b36e798bab5b..000000000000 --- a/modules/infrastructure-node-sizing.adoc +++ /dev/null @@ -1,48 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/recommended-performance-scale-practices/recommended-infrastructure-practices.adoc - -[id="infrastructure-node-sizing_{context}"] -= Infrastructure node sizing - -_Infrastructure nodes_ are nodes that are labeled to run pieces of the {product-title} environment. The infrastructure node resource requirements depend on the cluster age, nodes, and objects in the cluster, as these factors can lead to an increase in the number of metrics or time series in Prometheus. The following infrastructure node size recommendations are based on the results observed in cluster-density testing detailed in the *Control plane node sizing* section, where the monitoring stack and the default ingress-controller were moved to these nodes. - -[options="header",cols="4*"] -|=== -| Number of worker nodes |Cluster density, or number of namespaces |CPU cores |Memory (GB) - -| 27 -| 500 -| 4 -| 24 - -| 120 -| 1000 -| 8 -| 48 - -| 252 -| 4000 -| 16 -| 128 - -| 501 -| 4000 -| 32 -| 128 - -|=== - -In general, three infrastructure nodes are recommended per cluster. - -[IMPORTANT] -==== -These sizing recommendations should be used as a guideline. Prometheus is a highly memory intensive application; the resource usage depends on various factors including the number of nodes, objects, the Prometheus metrics scraping interval, metrics or time series, and the age of the cluster. In addition, the router resource usage can also be affected by the number of routes and the amount/type of inbound requests. - -These recommendations apply only to infrastructure nodes hosting Monitoring, Ingress and Registry infrastructure components installed during cluster creation. -==== - -[NOTE] -==== -In {product-title} {product-version}, half of a CPU core (500 millicore) is now reserved by the system by default compared to {product-title} 3.11 and previous versions. This influences the stated sizing recommendations. -==== diff --git a/modules/ingress-liveness-readiness-startup-probes.adoc b/modules/ingress-liveness-readiness-startup-probes.adoc deleted file mode 100644 index 1459013e1e9b..000000000000 --- a/modules/ingress-liveness-readiness-startup-probes.adoc +++ /dev/null @@ -1,48 +0,0 @@ -// Module included in the following assemblies: -// * scalability_and_performance/optimization/routing-optimization.adoc -// * post_installation_configuration/network-configuration.adoc - -:_content-type: REFERENCE -[id="ingress-liveness-readiness-startup-probes_{context}"] -= Configuring Ingress Controller liveness, readiness, and startup probes - -Cluster administrators can configure the timeout values for the kubelet's liveness, readiness, and startup probes for router deployments that are managed by the {product-title} Ingress Controller (router). The liveness and readiness probes of the router use the default timeout value -of 1 second, which is too brief when networking or runtime performance is severely degraded. Probe timeouts can cause unwanted router restarts that interrupt application connections. The ability to set larger timeout values can reduce the risk of unnecessary and unwanted restarts. - -You can update the `timeoutSeconds` value on the `livenessProbe`, `readinessProbe`, and `startupProbe` parameters of the router container. - -[cols="3a,8a",options="header"] -|=== - |Parameter |Description - - |`livenessProbe` - |The `livenessProbe` reports to the kubelet whether a pod is dead and needs to be restarted. - - |`readinessProbe` - |The `readinessProbe` reports whether a pod is healthy or unhealthy. When the readiness probe reports an unhealthy pod, then the kubelet marks the pod as not ready to accept traffic. Subsequently, the endpoints for that pod are marked as not ready, and this status propagates to the kube-proxy. On cloud platforms with a configured load balancer, the kube-proxy communicates to the cloud load-balancer not to send traffic to the node with that pod. - - |`startupProbe` - |The `startupProbe` gives the router pod up to 2 minutes to initialize before the kubelet begins sending the router liveness and readiness probes. This initialization time can prevent routers with many routes or endpoints from prematurely restarting. -|=== - - -[IMPORTANT] -==== -The timeout configuration option is an advanced tuning technique that can be used to work around issues. However, these issues should eventually be diagnosed and possibly a support case or https://issues.redhat.com/secure/CreateIssueDetails!init.jspa?pid=12332330&summary=Summary&issuetype=1&priority=10200&versions=12385624[Jira issue] opened for any issues that causes probes to time out. -==== - -The following example demonstrates how you can directly patch the default router deployment to set a 5-second timeout for the liveness and readiness probes: - - -[source, terminal] ----- -$ oc -n openshift-ingress patch deploy/router-default --type=strategic --patch='{"spec":{"template":{"spec":{"containers":[{"name":"router","livenessProbe":{"timeoutSeconds":5},"readinessProbe":{"timeoutSeconds":5}}]}}}}' ----- - -.Verification -[source, terminal] ----- -$ oc -n openshift-ingress describe deploy/router-default | grep -e Liveness: -e Readiness: - Liveness: http-get http://:1936/healthz delay=0s timeout=5s period=10s #success=1 #failure=3 - Readiness: http-get http://:1936/healthz/ready delay=0s timeout=5s period=10s #success=1 #failure=3 ----- diff --git a/modules/ingress-operator.adoc b/modules/ingress-operator.adoc deleted file mode 100644 index 595f04371018..000000000000 --- a/modules/ingress-operator.adoc +++ /dev/null @@ -1,62 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator-reference.adoc - -[id="ingress-operator_{context}"] -= Ingress Operator - -[discrete] -== Purpose - -The Ingress Operator configures and manages the {product-title} router. - -[discrete] -== Project - -link:https://github.com/openshift/cluster-ingress-operator[openshift-ingress-operator] - -[discrete] -== CRDs - -* `clusteringresses.ingress.openshift.io` -** Scope: Namespaced -** CR: `clusteringresses` -** Validation: No - -[discrete] -== Configuration objects - -* Cluster config -** Type Name: `clusteringresses.ingress.openshift.io` -** Instance Name: `default` -** View Command: -+ -[source,terminal] ----- -$ oc get clusteringresses.ingress.openshift.io -n openshift-ingress-operator default -o yaml ----- - -[discrete] -== Notes - -The Ingress Operator sets up the router in the `openshift-ingress` project and creates the deployment for the router: - -[source,terminal] ----- -$ oc get deployment -n openshift-ingress ----- - -The Ingress Operator uses the `clusterNetwork[].cidr` from the `network/cluster` status to determine what mode (IPv4, IPv6, or dual stack) the managed Ingress Controller (router) should operate in. For example, if `clusterNetwork` contains only a v6 `cidr`, then the Ingress Controller operates in IPv6-only mode. - -In the following example, Ingress Controllers managed by the Ingress Operator will run in IPv4-only mode because only one cluster network exists and the network is an IPv4 `cidr`: - -[source,terminal] ----- -$ oc get network/cluster -o jsonpath='{.status.clusterNetwork[*]}' ----- - -.Example output -[source,terminal] ----- -map[cidr:10.128.0.0/14 hostPrefix:23] ----- diff --git a/modules/insights-operator-about.adoc b/modules/insights-operator-about.adoc deleted file mode 100644 index c98ba34c3b4e..000000000000 --- a/modules/insights-operator-about.adoc +++ /dev/null @@ -1,20 +0,0 @@ -// Module included in the following assemblies: -// -// * support/remote_health_monitoring/about-remote-health-monitoring.adoc -// * sd_support/remote_health_monitoring/about-remote-health-monitoring.adoc - -:_content-type: CONCEPT -[id="insights-operator-about_{context}"] -= About the Insights Operator - -The Insights Operator periodically gathers configuration and component failure status and, by default, reports that data every two hours to Red Hat. This information enables Red Hat to assess configuration and deeper failure data than is reported through Telemetry. - -Users of {product-title} can display the report of each cluster in the {insights-advisor-url} service on {hybrid-console}. If any issues have been identified, Insights provides further details and, if available, steps on how to solve a problem. - -The Insights Operator does not collect identifying information, such as user names, passwords, or certificates. See link:https://console.redhat.com/security/insights[Red Hat Insights Data & Application Security] for information about Red Hat Insights data collection and controls. - -Red Hat uses all connected cluster information to: - -* Identify potential cluster issues and provide a solution and preventive actions in the {insights-advisor-url} service on {hybrid-console} -* Improve {product-title} by providing aggregated and critical information to product and support teams -* Make {product-title} more intuitive diff --git a/modules/insights-operator-advisor-overview.adoc b/modules/insights-operator-advisor-overview.adoc deleted file mode 100644 index 30fec852260f..000000000000 --- a/modules/insights-operator-advisor-overview.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Module included in the following assemblies: -// -// * support/remote_health_monitoring/using-insights-to-identify-issues-with-your-cluster.adoc -// * sd_support/remote_health_monitoring/using-insights-to-identify-issues-with-your-cluster.adoc - -:_content-type: CONCEPT -[id="insights-operator-advisor-overview_{context}"] -= About Red Hat Insights Advisor for {product-title} - -You can use Insights Advisor to assess and monitor the health of your {product-title} clusters. Whether you are concerned about individual clusters, or with your whole infrastructure, it is important to be aware of your exposure to issues that can affect service availability, fault tolerance, performance, or security. - -Insights repeatedly analyzes the data that Insights Operator sends using a database of _recommendations_, which are sets of conditions that can leave your {product-title} clusters at risk. Your data is then uploaded to the Insights Advisor service on Red Hat Hybrid Cloud Console where you can perform the following actions: - -* See clusters impacted by a specific recommendation. -* Use robust filtering capabilities to refine your results to those recommendations. -* Learn more about individual recommendations, details about the risks they present, and get resolutions tailored to your individual clusters. -* Share results with other stakeholders. diff --git a/modules/insights-operator-advisor-recommendations.adoc b/modules/insights-operator-advisor-recommendations.adoc deleted file mode 100644 index 1c2f1a6088cf..000000000000 --- a/modules/insights-operator-advisor-recommendations.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module included in the following assemblies: -// -// * support/remote_health_monitoring/using-insights-to-identify-issues-with-your-cluster.adoc -// * sd_support/remote_health_monitoring/using-insights-to-identify-issues-with-your-cluster.adoc - -:_content-type: CONCEPT -[id="insights-operator-advisor-recommendations_{context}"] -= Understanding Insights Advisor recommendations - -Insights Advisor bundles information about various cluster states and component configurations that can negatively affect the service availability, fault tolerance, performance, or security of your clusters. This information set is called a recommendation in Insights Advisor and includes the following information: - -* *Name:* A concise description of the recommendation -* *Added:* When the recommendation was published to the Insights Advisor archive -* *Category:* Whether the issue has the potential to negatively affect service availability, fault tolerance, performance, or security -* *Total risk:* A value derived from the _likelihood_ that the condition will negatively affect your infrastructure, and the _impact_ on operations if that were to happen -* *Clusters:* A list of clusters on which a recommendation is detected -* *Description:* A brief synopsis of the issue, including how it affects your clusters -* *Link to associated topics:* More information from Red Hat about the issue diff --git a/modules/insights-operator-configuring-sca.adoc b/modules/insights-operator-configuring-sca.adoc deleted file mode 100644 index 66dc1f5f163e..000000000000 --- a/modules/insights-operator-configuring-sca.adoc +++ /dev/null @@ -1,31 +0,0 @@ -// Module included in the following assemblies: -// -// * support/remote_health_monitoring/insights-operator-simple-access.adoc -// * sd_support/remote_health_monitoring/insights-operator-simple-access.adoc - - -:_content-type: PROCEDURE -[id="insights-operator-configuring-sca_{context}"] -= Configuring simple content access import interval - -You can configure how often the Insights Operator imports the simple content access entitlements using the `support` secret in the `openshift-config` namespace. The entitlement import normally occurs every eight hours, but you can shorten this interval if you update your simple content access configuration in Red Hat Subscription Management. - -This procedure describes how to update the import interval to one hour. - -.Prerequisites - -* You are logged in to the {product-title} web console as `cluster-admin`. - -.Procedure - -. Navigate to *Workloads* -> *Secrets*. -. Select the *openshift-config* project. -. Search for the *support* secret using the *Search by name* field. If it does not exist, click *Create* -> *Key/value secret* to create it. -. Click the *Options* menu {kebab}, and then click *Edit Secret*. -. Click *Add Key/Value*. -. Create a key named `scaInterval` with a value of `1h`, and click *Save*. -+ -[NOTE] -==== -The interval `1h` can also be entered as `60m` for 60 minutes. -==== diff --git a/modules/insights-operator-configuring.adoc b/modules/insights-operator-configuring.adoc deleted file mode 100644 index 258d044fa211..000000000000 --- a/modules/insights-operator-configuring.adoc +++ /dev/null @@ -1,47 +0,0 @@ -// Module included in the following assemblies: -// -// * support/remote_health_monitoring/using-insights-operator.adoc - - -:_content-type: PROCEDURE -[id="insights-operator-configuring-sca_{context}"] -= Configuring Insights Operator - -You can configure Insights Operator to meet the needs of your organization. The Insights Operator is configured using a combination of the default configurations in the `pod.yaml` file in the Insights Operator `Config` directory and the configurations stored in the `support` secret in the `openshift-config` namespace. The `support` secret does not exist by default and must be created when adding custom configurations for the first time. Configurations in the `support` secret override the defaults set in the `pod.yaml` file. - -The table below describes the available configuration attributes: - -.Insights Operator configurable attributes -[options="header"] -|==== -|Attribute name|Description|Value type|Default value -|`username`|Specifies username for basic authentication with `console.redhat.com` (overrides the default `pull-secret` token authentication when set)|String|Not set -|`password`|Specifies password for basic authentication with `console.redhat.com` (overrides the default `pull-secret` token authentication when set)|String|Not set -|`enableGlobalObfuscation`|Enables the global obfuscation of IP addresses and the cluster domain name|Boolean|`false` -|`scaInterval`|Specifies the frequency of the simple content access entitlements download|Time interval|`8h` -|`scaPullDisabled`|Disables the simple content access entitlements download|Boolean|`false` -|`clusterTransferInterval`|Specifies how often Insights Operator checks OpenShift Cluster Manager for available cluster transfers|Time interval|`24h` -|`disableInsightsAlerts`|Disables Insights Operator alerts to the cluster Prometheus instance|Boolean|`False` -|==== - -This procedure describes how to set custom Insights Operator configurations. - -[IMPORTANT] -==== -Red Hat recommends you consult Red Hat Support before making changes to the default Insights Operator configuration. -==== - -.Prerequisites - -* You are logged in to the {product-title} web console as a user with `cluster-admin` role. - -.Procedure - -. Navigate to *Workloads* -> *Secrets*. -. On the *Secrets* page, select *All Projects* from the *Project* list, and then set *Show default projects* to on. -. Select the *openshift-config* project from the *Project* list. -. Search for the *support* secret using the *Search by name* field. If it does not exist, click *Create* -> *Key/value secret* to create it. -. Click the *Options* menu {kebab} for the secret, and then click *Edit Secret*. -. Click *Add Key/Value*. -. Enter an attribute name with an appropriate value (see table above), and click *Save*. -. Repeat the above steps for any additional configurations. diff --git a/modules/insights-operator-disabling-sca.adoc b/modules/insights-operator-disabling-sca.adoc deleted file mode 100644 index 8f6a91e2df09..000000000000 --- a/modules/insights-operator-disabling-sca.adoc +++ /dev/null @@ -1,31 +0,0 @@ -// Module included in the following assemblies: -// -// * support/remote_health_monitoring/insights-operator-simple-access.adoc -// * sd_support/remote_health_monitoring/insights-operator-simple-access.adoc - - -:_content-type: PROCEDURE -[id="insights-operator-disabling-sca_{context}"] -= Disabling simple content access import - -You can disable the importing of simple content access entitlements using the `support` secret in the `openshift-config` namespace. - -.Prerequisites - -* You are logged in to the {product-title} web console as `cluster-admin`. - -.Procedure - -. Navigate to *Workloads* -> *Secrets*. -. Select the *openshift-config* project. -. Search for the *support* secret using the *Search by name* field. If it does not exist, click *Create* -> *Key/value secret* to create it. -. Click the *Options* menu {kebab}, and then click *Edit Secret*. -. Click *Add Key/Value*. -. Create a key named `scaPullDisabled` with a value of `true`, and click *Save*. -+ -The simple content access entitlement import is now disabled. -+ -[NOTE] -==== -To enable the simple content access import again, edit the `support` secret and delete the `scaPullDisabled` key. -==== diff --git a/modules/insights-operator-downloading-archive.adoc b/modules/insights-operator-downloading-archive.adoc deleted file mode 100644 index 81df8ba614c2..000000000000 --- a/modules/insights-operator-downloading-archive.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * support/remote_health_monitoring/using-insights-operator.adoc - -:_content-type: PROCEDURE -[id="insights-operator-downloading-archive_{context}"] -= Downloading your Insights Operator archive - -Insights Operator stores gathered data in an archive located in the `openshift-insights` namespace of your cluster. You can download and review the data that is gathered by the Insights Operator. - -.Prerequisites - -* Access to the cluster as a user with the `cluster-admin` role. - -.Procedure - -. Find the name of the running pod for the Insights Operator: -+ -[source,terminal] ----- -$ oc get pods --namespace=openshift-insights -o custom-columns=:metadata.name --no-headers --field-selector=status.phase=Running ----- - -. Copy the recent data archives collected by the Insights Operator: -+ -[source,terminal] ----- -$ oc cp openshift-insights/<insights_operator_pod_name>:/var/lib/insights-operator ./insights-data <1> ----- -<1> Replace `<insights_operator_pod_name>` with the pod name output from the preceding command. - -The recent Insights Operator archives are now available in the `insights-data` directory. diff --git a/modules/insights-operator-enable-obfuscation.adoc b/modules/insights-operator-enable-obfuscation.adoc deleted file mode 100644 index b095cb44ae56..000000000000 --- a/modules/insights-operator-enable-obfuscation.adoc +++ /dev/null @@ -1,50 +0,0 @@ -// Module included in the following assemblies: -// -// * support/remote_health_monitoring/remote-health-reporting-from-restricted-network.adoc -// * sd_support/remote_health_monitoring/remote-health-reporting-from-restricted-network.adoc - - - -:_content-type: PROCEDURE -[id="insights-operator-enable-obfuscation_{context}"] -= Enabling Insights Operator data obfuscation - -You can enable obfuscation to mask sensitive and identifiable IPv4 addresses and cluster base domains that the Insights Operator sends to link:https://console.redhat.com[console.redhat.com]. - -[WARNING] -==== -Although this feature is available, Red Hat recommends keeping obfuscation disabled for a more effective support experience. -==== - -Obfuscation assigns non-identifying values to cluster IPv4 addresses, and uses a translation table that is retained in memory to change IP addresses to their obfuscated versions throughout the Insights Operator archive before uploading the data to link:https://console.redhat.com[console.redhat.com]. - -For cluster base domains, obfuscation changes the base domain to a hardcoded substring. For example, `cluster-api.openshift.example.com` becomes `cluster-api.<CLUSTER_BASE_DOMAIN>`. - -The following procedure enables obfuscation using the `support` secret in the `openshift-config` namespace. - -.Prerequisites - -* You are logged in to the {product-title} web console as `cluster-admin`. - -.Procedure - -. Navigate to *Workloads* -> *Secrets*. -. Select the *openshift-config* project. -. Search for the *support* secret using the *Search by name* field. If it does not exist, click *Create* -> *Key/value secret* to create it. -. Click the *Options* menu {kebab}, and then click *Edit Secret*. -. Click *Add Key/Value*. -. Create a key named `enableGlobalObfuscation` with a value of `true`, and click *Save*. -. Navigate to *Workloads* -> *Pods* -. Select the `openshift-insights` project. -. Find the `insights-operator` pod. -. To restart the `insights-operator` pod, click the *Options* menu {kebab}, and then click *Delete Pod*. - -.Verification - -. Navigate to *Workloads* -> *Secrets*. -. Select the *openshift-insights* project. -. Search for the *obfuscation-translation-table* secret using the *Search by name* field. - -If the `obfuscation-translation-table` secret exists, then obfuscation is enabled and working. - -Alternatively, you can inspect `/insights-operator/gathers.json` in your Insights Operator archive for the value `"is_global_obfuscation_enabled": true`. diff --git a/modules/insights-operator-gather-duration.adoc b/modules/insights-operator-gather-duration.adoc deleted file mode 100644 index 9159f06550b5..000000000000 --- a/modules/insights-operator-gather-duration.adoc +++ /dev/null @@ -1,35 +0,0 @@ -// Module included in the following assemblies: -// -// * support/remote_health_monitoring/using-insights-operator.adoc - -:_content-type: PROCEDURE -[id="insights-operator-gather-duration_{context}"] -= Viewing Insights Operator gather durations - -You can view the time it takes for the Insights Operator to gather the information contained in the archive. This helps you to understand Insights Operator resource usage and issues with Insights Advisor. - - -.Prerequisites - -* A recent copy of your Insights Operator archive. - -.Procedure - -. From your archive, open `/insights-operator/gathers.json`. -+ -The file contains a list of Insights Operator gather operations: -+ -[source,json] ----- - { - "name": "clusterconfig/authentication", - "duration_in_ms": 730, <1> - "records_count": 1, - "errors": null, - "panic": null - } ----- -+ -<1> `duration_in_ms` is the amount of time in milliseconds for each gather operation. - -. Inspect each gather operation for abnormalities. diff --git a/modules/insights-operator-manual-upload.adoc b/modules/insights-operator-manual-upload.adoc deleted file mode 100644 index b705a2c1b211..000000000000 --- a/modules/insights-operator-manual-upload.adoc +++ /dev/null @@ -1,74 +0,0 @@ -// Module included in the following assemblies: -// -// * support/remote_health_monitoring/remote-health-reporting-from-restricted-network.adoc -// * sd_support/remote_health_monitoring/remote-health-reporting-from-restricted-network.adoc - - - -:_content-type: PROCEDURE -[id="insights-operator-manual-upload_{context}"] -= Uploading an Insights Operator archive - -You can manually upload an Insights Operator archive to link:https://console.redhat.com[console.redhat.com] to diagnose potential issues. - -.Prerequisites - -* You are logged in to {product-title} as `cluster-admin`. -* You have a workstation with unrestricted internet access. -* You have created a copy of the Insights Operator archive. - -.Procedure - -. Download the `dockerconfig.json` file: -+ -[source,terminal] ----- -$ oc extract secret/pull-secret -n openshift-config --to=. ----- -. Copy your `"cloud.openshift.com"` `"auth"` token from the `dockerconfig.json` file: -+ -[source,json,subs="+quotes"] ----- -{ - "auths": { - "cloud.openshift.com": { - "auth": "_<your_token>_", - "email": "asd@redhat.com" - } -} ----- - - -. Upload the archive to link:https://console.redhat.com[console.redhat.com]: -+ -[source,terminal,subs="+quotes"] ----- -$ curl -v -H "User-Agent: insights-operator/one10time200gather184a34f6a168926d93c330 cluster/_<cluster_id>_" -H "Authorization: Bearer _<your_token>_" -F "upload=@_<path_to_archive>_; type=application/vnd.redhat.openshift.periodic+tar" https://console.redhat.com/api/ingress/v1/upload ----- -where `_<cluster_id>_` is your cluster ID, `_<your_token>_` is the token from your pull secret, and `_<path_to_archive>_` is the path to the Insights Operator archive. -+ -If the operation is successful, the command returns a `"request_id"` and `"account_number"`: -+ -.Example output -+ -[source,terminal] ----- -* Connection #0 to host console.redhat.com left intact -{"request_id":"393a7cf1093e434ea8dd4ab3eb28884c","upload":{"account_number":"6274079"}}% ----- - -.Verification steps - -. Log in to link:https://console.redhat.com/openshift[]. - -. Click the *Clusters* menu in the left pane. - -. To display the details of the cluster, click the cluster name. - -. Open the *Insights Advisor* tab of the cluster. -+ -If the upload was successful, the tab displays one of the following: -+ -* *Your cluster passed all recommendations*, if Insights Advisor did not identify any issues. - -* A list of issues that Insights Advisor has detected, prioritized by risk (low, moderate, important, and critical). diff --git a/modules/insights-operator-new-pull-secret-disabled.adoc b/modules/insights-operator-new-pull-secret-disabled.adoc deleted file mode 100644 index f1078cb7a4c5..000000000000 --- a/modules/insights-operator-new-pull-secret-disabled.adoc +++ /dev/null @@ -1,36 +0,0 @@ -// Module included in the following assemblies: -// -// * support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc -// * sd_support/remote_health_monitoring/opting-out-of-remote-health-reporting.adoc - -:_content-type: PROCEDURE -[id="insights-operator-new-pull-secret_{context}"] -= Modifying the global cluster pull secret to disable remote health reporting - -You can modify your existing global cluster pull secret to disable remote health reporting. This disables both Telemetry and the Insights Operator. - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. - -.Procedure - -. Download the global cluster pull secret to your local file system. -+ -[source,terminal] ----- -$ oc extract secret/pull-secret -n openshift-config --to=. ----- - -. In a text editor, edit the `.dockerconfigjson` file that was downloaded. - -. Remove the `cloud.openshift.com` JSON entry, for example: -+ -[source,json] ----- -"cloud.openshift.com":{"auth":"<hash>","email":"<email_address>"} ----- - -. Save the file. - -You can now update your cluster to use this modified pull secret. diff --git a/modules/insights-operator-new-pull-secret-enable.adoc b/modules/insights-operator-new-pull-secret-enable.adoc deleted file mode 100644 index f563ef1c40e0..000000000000 --- a/modules/insights-operator-new-pull-secret-enable.adoc +++ /dev/null @@ -1,62 +0,0 @@ -// Module included in the following assemblies: -// -// * support/remote_health_monitoring/enabling-remote-health-reporting.adoc -// * sd_support/remote_health_monitoring/enabling-remote-health-reporting.adoc - -:_content-type: PROCEDURE -[id="insights-operator-new-pull-secret-enable_{context}"] -= Modifying your global cluster pull secret to enable remote health reporting - -You can modify your existing global cluster pull secret to enable remote health reporting. If you have previously disabled remote health monitoring, you must first download a new pull secret with your `console.openshift.com` access token from {cluster-manager-first}. - -.Prerequisites - -* Access to the cluster as a user with the `cluster-admin` role. -* Access to {cluster-manager}. - -.Procedure - -. Navigate to link:https://console.redhat.com/openshift/downloads[https://console.redhat.com/openshift/downloads]. -. From *Tokens* -> *Pull Secret*, click *Download*. -+ -The file `pull-secret.txt` containing your `cloud.openshift.com` access token in JSON format downloads: -+ -[source,json,subs="+quotes"] ----- -{ - "auths": { - "cloud.openshift.com": { - "auth": "_<your_token>_", - "email": "_<email_address>_" - } -} ----- - -. Download the global cluster pull secret to your local file system. -+ -[source,terminal] ----- -$ oc get secret/pull-secret -n openshift-config --template='{{index .data ".dockerconfigjson" | base64decode}}' > pull-secret ----- -. Make a backup copy of your pull secret. -+ -[source,terminal] ----- -$ cp pull-secret pull-secret-backup ----- -. Open the `pull-secret` file in a text editor. -. Append the `cloud.openshift.com` JSON entry from `pull-secret.txt` into `auths`. -. Save the file. -. Update the secret in your cluster. -+ -[source,terminal] ----- -oc set data secret/pull-secret -n openshift-config --from-file=.dockerconfigjson=pull-secret ----- - -It may take several minutes for the secret to update and your cluster to begin reporting. - -.Verification - -. Navigate to the {product-title} Web Console Overview page. -. *Insights* in the *Status* tile reports the number of issues found. diff --git a/modules/insights-operator-one-time-gather.adoc b/modules/insights-operator-one-time-gather.adoc deleted file mode 100644 index b70d565d461b..000000000000 --- a/modules/insights-operator-one-time-gather.adoc +++ /dev/null @@ -1,88 +0,0 @@ -// Module included in the following assemblies: -// -// * support/remote_health_monitoring/remote-health-reporting-from-restricted-network.adoc -// * sd_support/remote_health_monitoring/remote-health-reporting-from-restricted-network.adoc - - - -:_content-type: PROCEDURE -[id="insights-operator-one-time-gather_{context}"] -= Running an Insights Operator gather operation - -You must run a gather operation to create an Insights Operator archive. - -.Prerequisites - -* You are logged in to {product-title} as `cluster-admin`. - -.Procedure - -. Create a file named `gather-job.yaml` using this template: -+ -[source,yaml] ----- -include::https://raw.githubusercontent.com/openshift/insights-operator/release-4.13/docs/gather-job.yaml[] ----- -. Copy your `insights-operator` image version: -+ -[source,terminal] ----- -$ oc get -n openshift-insights deployment insights-operator -o yaml ----- -. Paste your image version in `gather-job.yaml`: -+ -[source,yaml,subs="+quotes"] ----- -initContainers: - - name: insights-operator - image: _<your_insights_operator_image_version>_ - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: ----- -. Create the gather job: -+ -[source,terminal] ----- -$ oc apply -n openshift-insights -f gather-job.yaml ----- -. Find the name of the job pod: -+ -[source,terminal] ----- -$ oc describe -n openshift-insights job/insights-operator-job ----- -+ -.Example output -[source,terminal,subs="+quotes"] ----- -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal SuccessfulCreate 7m18s job-controller Created pod: insights-operator-job-_<your_job>_ ----- -where `insights-operator-job-_<your_job>_` is the name of the pod. - -. Verify that the operation has finished: -+ -[source,terminal,subs="+quotes"] ----- -$ oc logs -n openshift-insights insights-operator-job-_<your_job>_ insights-operator ----- -+ -.Example output -[source,terminal] ----- -I0407 11:55:38.192084 1 diskrecorder.go:34] Wrote 108 records to disk in 33ms ----- -. Save the created archive: -+ -[source,terminal,subs="+quotes"] ----- -$ oc cp openshift-insights/insights-operator-job-_<your_job>_:/var/lib/insights-operator ./insights-data ----- -. Clean up the job: -+ -[source,terminal] ----- -$ oc delete -n openshift-insights job insights-operator-job ----- diff --git a/modules/insights-operator-showing-data-collected-from-the-cluster.adoc b/modules/insights-operator-showing-data-collected-from-the-cluster.adoc deleted file mode 100644 index 64dbaf590eda..000000000000 --- a/modules/insights-operator-showing-data-collected-from-the-cluster.adoc +++ /dev/null @@ -1,33 +0,0 @@ -// Module included in the following assemblies: -// -// * rosa_support/remote_health_monitoring/showing-data-collected-by-remote-health-monitoring.adoc -// * support/remote_health_monitoring/showing-data-collected-by-remote-health-monitoring.adoc -// * sd_support/remote_health_monitoring/showing-data-collected-by-remote-health-monitoring.adoc - -:_content-type: PROCEDURE -[id="insights-operator-showing-data-collected-from-the-cluster_{context}"] -= Showing data collected by the Insights Operator - -You can review the data that is collected by the Insights Operator. - -.Prerequisites - -* Access to the cluster as a user with the `cluster-admin` role. - -.Procedure - -. Find the name of the currently running pod for the Insights Operator: -+ -[source,terminal] ----- -$ INSIGHTS_OPERATOR_POD=$(oc get pods --namespace=openshift-insights -o custom-columns=:metadata.name --no-headers --field-selector=status.phase=Running) ----- - -. Copy the recent data archives collected by the Insights Operator: -+ -[source,terminal] ----- -$ oc cp openshift-insights/$INSIGHTS_OPERATOR_POD:/var/lib/insights-operator ./insights-data ----- - -The recent Insights Operator archives are now available in the `insights-data` directory. diff --git a/modules/insights-operator-what-information-is-collected.adoc b/modules/insights-operator-what-information-is-collected.adoc deleted file mode 100644 index 72e0cbcbf9a4..000000000000 --- a/modules/insights-operator-what-information-is-collected.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// * support/remote_health_monitoring/about-remote-health-monitoring.adoc -// * sd_support/remote_health_monitoring/about-remote-health-monitoring.adoc - -[id="insights-operator-what-information-is-collected_{context}"] -= Information collected by the Insights Operator - -The following information is collected by the Insights Operator: - -* General information about your cluster and its components to identify issues that are specific to your {product-title} version and environment -* Configuration files, such as the image registry configuration, of your cluster to determine incorrect settings and issues that are specific to parameters you set -* Errors that occur in the cluster components -* Progress information of running updates, and the status of any component upgrades -* Details of the platform that {product-title} is deployed on, such as Amazon Web Services, and the region that the cluster is located in -ifndef::openshift-dedicated[] -* Cluster workload information transformed into discreet Secure Hash Algorithm (SHA) values, which allows Red Hat to assess workloads for security and version vulnerabilities without disclosing sensitive details -endif::openshift-dedicated[] -* If an Operator reports an issue, information is collected about core {product-title} pods in the `openshift-*` and `kube-*` projects. This includes state, resource, security context, volume information, and more. diff --git a/modules/insights-operator.adoc b/modules/insights-operator.adoc deleted file mode 100644 index c6b730da4cf7..000000000000 --- a/modules/insights-operator.adoc +++ /dev/null @@ -1,64 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator-reference.adoc -// * installing/cluster-capabilities.adoc - -ifeval::["{context}" == "cluster-capabilities"] -:cluster-caps: -endif::[] - -ifeval::["{context}" == "cluster-operators-ref"] -:operator-ref: -endif::[] - -:_content-type: REFERENCE -[id="insights-operator_{context}"] -ifdef::operator-ref[= Insights Operator] -ifdef::cluster-caps[= Insights capability] - -ifdef::operator-ref[] - -[NOTE] -==== -The Insights Operator is an optional cluster capability that can be disabled by cluster administrators during installation. For more information about optional cluster capabilities, see "Cluster capabilities" in _Installing_. -==== - -endif::operator-ref[] - -[discrete] -== Purpose - -ifdef::cluster-caps[] - -The Insights Operator provides the features for the `Insights` capability. - -endif::cluster-caps[] - -The Insights Operator gathers {product-title} configuration data and sends it to Red Hat. The data is used to produce proactive insights recommendations about potential issues that a cluster might be exposed to. These insights are communicated to cluster administrators through Insights Advisor on link:https://console.redhat.com/[console.redhat.com]. - -ifdef::operator-ref[] - -[discrete] -== Project - -link:https://github.com/openshift/insights-operator[insights-operator] - -[discrete] -== Configuration - -No configuration is required. - -endif::operator-ref[] - -[discrete] -== Notes - -Insights Operator complements {product-title} Telemetry. - -ifeval::["{context}" == "cluster-operators-ref"] -:!operator-ref: -endif::[] - -ifeval::["{context}" == "cluster-caps"] -:!cluster-caps: -endif::[] \ No newline at end of file diff --git a/modules/inspecting-pod-and-container-logs.adoc b/modules/inspecting-pod-and-container-logs.adoc deleted file mode 100644 index 011170bf8e26..000000000000 --- a/modules/inspecting-pod-and-container-logs.adoc +++ /dev/null @@ -1,61 +0,0 @@ -// Module included in the following assemblies: -// -// * support/troubleshooting/investigating-pod-issues.adoc - -:_content-type: PROCEDURE -[id="inspecting-pod-and-container-logs_{context}"] -= Inspecting pod and container logs - -You can inspect pod and container logs for warnings and error messages related to explicit pod failures. Depending on policy and exit code, pod and container logs remain available after pods have been terminated. - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. -* Your API service is still functional. -* You have installed the OpenShift CLI (`oc`). - -.Procedure - -. Query logs for a specific pod: -+ -[source,terminal] ----- -$ oc logs <pod_name> ----- - -. Query logs for a specific container within a pod: -+ -[source,terminal] ----- -$ oc logs <pod_name> -c <container_name> ----- -+ -Logs retrieved using the preceding `oc logs` commands are composed of messages sent to stdout within pods or containers. - -. Inspect logs contained in `/var/log/` within a pod. -.. List log files and subdirectories contained in `/var/log` within a pod: -+ -[source,terminal] ----- -$ oc exec <pod_name> ls -alh /var/log ----- -+ -.. Query a specific log file contained in `/var/log` within a pod: -+ -[source,terminal] ----- -$ oc exec <pod_name> cat /var/log/<path_to_log> ----- -.. List log files and subdirectories contained in `/var/log` within a specific container: -+ -[source,terminal] ----- -$ oc exec <pod_name> -c <container_name> ls /var/log ----- -+ -.. Query a specific log file contained in `/var/log` within a specific container: -+ -[source,terminal] ----- -$ oc exec <pod_name> -c <container_name> cat /var/log/<path_to_log> ----- diff --git a/modules/install-booting-from-an-iso-over-http-redfish.adoc b/modules/install-booting-from-an-iso-over-http-redfish.adoc deleted file mode 100644 index 80d732fc82a2..000000000000 --- a/modules/install-booting-from-an-iso-over-http-redfish.adoc +++ /dev/null @@ -1,55 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_sno/install-sno-installing-sno.adoc - -:_content-type: PROCEDURE -[id="install-booting-from-an-iso-over-http-redfish_{context}"] -= Booting from an HTTP-hosted ISO image using the Redfish API - -You can provision hosts in your network using ISOs that you install using the Redfish Baseboard Management Controller (BMC) API. - -.Prerequisites - -. Download the installation {op-system-first} ISO. - -.Procedure - -. Copy the ISO file to an HTTP server accessible in your network. - -. Boot the host from the hosted ISO file, for example: - -.. Call the redfish API to set the hosted ISO as the `VirtualMedia` boot media by running the following command: -+ -[source,terminal] ----- -$ curl -k -u <bmc_username>:<bmc_password> -d '{"Image":"<hosted_iso_file>", "Inserted": true}' -H "Content-Type: application/json" -X POST <host_bmc_address>/redfish/v1/Managers/iDRAC.Embedded.1/VirtualMedia/CD/Actions/VirtualMedia.InsertMedia ----- -+ -Where: -+ --- -<bmc_username>:<bmc_password>:: Is the username and password for the target host BMC. -<hosted_iso_file>:: Is the URL for the hosted installation ISO, for example: `http://webserver.example.com/rhcos-live-minimal.iso`. The ISO must be accessible from the target host machine. -<host_bmc_address>:: Is the BMC IP address of the target host machine. --- - -.. Set the host to boot from the `VirtualMedia` device by running the following command: -+ -[source,terminal] ----- -$ curl -k -u <bmc_username>:<bmc_password> -X PATCH -H 'Content-Type: application/json' -d '{"Boot": {"BootSourceOverrideTarget": "Cd", "BootSourceOverrideMode": "UEFI", "BootSourceOverrideEnabled": "Once"}}' <host_bmc_address>/redfish/v1/Systems/System.Embedded.1 ----- - -.. Reboot the host: -+ -[source,terminal] ----- -$ curl -k -u <bmc_username>:<bmc_password> -d '{"ResetType": "ForceRestart"}' -H 'Content-type: application/json' -X POST <host_bmc_address>/redfish/v1/Systems/System.Embedded.1/Actions/ComputerSystem.Reset ----- - -.. Optional: If the host is powered off, you can boot it using the `{"ResetType": "On"}` switch. Run the following command: -+ -[source,terminal] ----- -$ curl -k -u <bmc_username>:<bmc_password> -d '{"ResetType": "On"}' -H 'Content-type: application/json' -X POST <host_bmc_address>/redfish/v1/Systems/System.Embedded.1/Actions/ComputerSystem.Reset ----- diff --git a/modules/install-creating-install-config-aws-local-zones.adoc b/modules/install-creating-install-config-aws-local-zones.adoc deleted file mode 100644 index 4ea327ea350c..000000000000 --- a/modules/install-creating-install-config-aws-local-zones.adoc +++ /dev/null @@ -1,35 +0,0 @@ -// Module included in the following assemblies: -// * installing/installing_aws/installing-aws-localzone.adoc - -:_content-type: PROCEDURE -[id="install-creating-install-config-aws-local-zones_{context}"] -= Modifying an installation configuration file to use AWS Local Zones subnets - -Modify an `install-config.yaml` file to include AWS Local Zones subnets. - -.Prerequisites - -* You created subnets by using the procedure "Creating a subnet in AWS Local Zones". -* You created an `install-config.yaml` file by using the procedure "Creating the installation configuration file". - -.Procedure - -* Add the VPC and Local Zone subnets as the values of the `platform.aws.subnets` property. As an example: -+ -[source,yaml] ----- -... -platform: - aws: - region: us-west-2 - subnets: <1> - - publicSubnetId-1 - - publicSubnetId-2 - - publicSubnetId-3 - - privateSubnetId-1 - - privateSubnetId-2 - - privateSubnetId-3 - - publicSubnetId-LocalZone-1 -... ----- -<1> List of subnets created in the Availability and Local Zones. \ No newline at end of file diff --git a/modules/install-ibm-cloud-configuring-the-install-config-file.adoc b/modules/install-ibm-cloud-configuring-the-install-config-file.adoc deleted file mode 100644 index da7769187be2..000000000000 --- a/modules/install-ibm-cloud-configuring-the-install-config-file.adoc +++ /dev/null @@ -1,112 +0,0 @@ -// This is included in the following assemblies: -// -// installing_ibm_cloud/install-ibm-cloud-installing-on-ibm-cloud.adoc - -:_content-type: PROCEDURE -[id="configuring-the-install-config-file_{context}"] -= Configuring the install-config.yaml file - -The `install-config.yaml` file requires some additional details. Most of the information is teaching the installer and the resulting cluster enough about the available IBM Cloud® hardware so that it is able to fully manage it. The material difference between installing on bare metal and installing on IBM Cloud is that you must explicitly set the privilege level for IPMI in the BMC section of the `install-config.yaml` file. - -.Procedure - -. Configure `install-config.yaml`. Change the appropriate variables to match the environment, including `pullSecret` and `sshKey`. -+ -[source,yaml] ----- -apiVersion: v1 -baseDomain: <domain> -metadata: - name: <cluster_name> -networking: - machineNetwork: - - cidr: <public-cidr> - networkType: OVNKubernetes -compute: -- name: worker - replicas: 2 -controlPlane: - name: master - replicas: 3 - platform: - baremetal: {} -platform: - baremetal: - apiVIP: <api_ip> - ingressVIP: <wildcard_ip> - provisioningNetworkInterface: <NIC1> - provisioningNetworkCIDR: <CIDR> - hosts: - - name: openshift-master-0 - role: master - bmc: - address: ipmi://10.196.130.145?privilegelevel=OPERATOR <1> - username: root - password: <password> - bootMACAddress: 00:e0:ed:6a:ca:b4 <2> - rootDeviceHints: - deviceName: "/dev/sda" - - name: openshift-worker-0 - role: worker - bmc: - address: ipmi://<out-of-band-ip>?privilegelevel=OPERATOR <1> - username: <user> - password: <password> - bootMACAddress: <NIC1_mac_address> <2> - rootDeviceHints: - deviceName: "/dev/sda" -pullSecret: '<pull_secret>' -sshKey: '<ssh_pub_key>' ----- -+ -<1> The `bmc.address` provides a `privilegelevel` configuration setting with the value set to `OPERATOR`. This is required for IBM Cloud. -<2> Add the MAC address of the private `provisioning` network NIC for the corresponding node. -+ -[NOTE] -==== -You can use the `ibmcloud` command-line utility to retrieve the password. - -[source,terminal] ----- -$ ibmcloud sl hardware detail <id> --output JSON | \ - jq '"(.networkManagementIpAddress) (.remoteManagementAccounts[0].password)"' ----- - -Replace `<id>` with the ID of the node. -==== - -. Create a directory to store the cluster configuration: -+ -[source,terminal] ----- -$ mkdir ~/clusterconfigs ----- - -. Copy the `install-config.yaml` file into the directory: -+ -[source,terminal] ----- -$ cp install-config.yaml ~/clusterconfig ----- - -. Ensure all bare metal nodes are powered off prior to installing the {product-title} cluster: -+ -[source,terminal] ----- -$ ipmitool -I lanplus -U <user> -P <password> -H <management_server_ip> power off ----- - -. Remove old bootstrap resources if any are left over from a previous deployment attempt: -+ -[source,bash] ----- -for i in $(sudo virsh list | tail -n +3 | grep bootstrap | awk {'print $2'}); -do - sudo virsh destroy $i; - sudo virsh undefine $i; - sudo virsh vol-delete $i --pool $i; - sudo virsh vol-delete $i.ign --pool $i; - sudo virsh pool-destroy $i; - sudo virsh pool-undefine $i; -done ----- diff --git a/modules/install-ibm-cloud-configuring-the-public-subnet.adoc b/modules/install-ibm-cloud-configuring-the-public-subnet.adoc deleted file mode 100644 index cd16ed93358a..000000000000 --- a/modules/install-ibm-cloud-configuring-the-public-subnet.adoc +++ /dev/null @@ -1,192 +0,0 @@ -// This is included in the following assemblies: -// -// installing_ibm_cloud/install-ibm-cloud-installing-on-ibm-cloud.adoc - -:_content-type: PROCEDURE -[id="configuring-the-public-subnet_{context}"] -= Configuring the public subnet - -All of the {product-title} cluster nodes must be on the public subnet. IBM Cloud® does not provide a DHCP server on the subnet. Set it up separately on the provisioner node. - -You must reset the BASH variables defined when preparing the provisioner node. Rebooting the provisioner node after preparing it will delete the BASH variables previously set. - -.Procedure - -. Install `dnsmasq`: -+ -[source,terminal] ----- -$ sudo dnf install dnsmasq ----- - -. Open the `dnsmasq` configuration file: -+ -[source,terminal] ----- -$ sudo vi /etc/dnsmasq.conf ----- - -. Add the following configuration to the `dnsmasq` configuration file: -+ -[source,text] ----- -interface=baremetal -except-interface=lo -bind-dynamic -log-dhcp - -dhcp-range=<ip_addr>,<ip_addr>,<pub_cidr> <1> -dhcp-option=baremetal,121,0.0.0.0/0,<pub_gateway>,<prvn_priv_ip>,<prvn_pub_ip> <2> - -dhcp-hostsfile=/var/lib/dnsmasq/dnsmasq.hostsfile ----- -+ -<1> Set the DHCP range. Replace both instances of `<ip_addr>` with one unused IP address from the public subnet so that the `dhcp-range` for the `baremetal` network begins and ends with the same the IP address. Replace `<pub_cidr>` with the CIDR of the public subnet. -+ -<2> Set the DHCP option. Replace `<pub_gateway>` with the IP address of the gateway for the `baremetal` network. Replace `<prvn_priv_ip>` with the IP address of the provisioner node's private IP address on the `provisioning` network. Replace `<prvn_pub_ip>` with the IP address of the provisioner node's public IP address on the `baremetal` network. -+ -To retrieve the value for `<pub_cidr>`, execute: -+ -[source,terminal] ----- -$ ibmcloud sl subnet detail <publicsubnetid> --output JSON | jq .cidr ----- -+ -Replace `<publicsubnetid>` with the ID of the public subnet. -+ -To retrieve the value for `<pub_gateway>`, execute: -+ -[source,terminal] ----- -$ ibmcloud sl subnet detail <publicsubnetid> --output JSON | jq .gateway -r ----- -+ -Replace `<publicsubnetid>` with the ID of the public subnet. -+ -To retrieve the value for `<prvn_priv_ip>`, execute: -+ -[source,terminal] ----- -$ ibmcloud sl hardware detail <id> --output JSON | \ - jq .primaryBackendIpAddress -r ----- -+ -Replace `<id>` with the ID of the provisioner node. -+ -To retrieve the value for `<prvn_pub_ip>`, execute: -+ -[source,terminal] ----- -$ ibmcloud sl hardware detail <id> --output JSON | jq .primaryIpAddress -r ----- -+ -Replace `<id>` with the ID of the provisioner node. - -. Obtain the list of hardware for the cluster: -+ -[source,terminal] ----- -$ ibmcloud sl hardware list ----- - -. Obtain the MAC addresses and IP addresses for each node: -+ -[source,terminal] ----- -$ ibmcloud sl hardware detail <id> --output JSON | \ - jq '.networkComponents[] | \ - "\(.primaryIpAddress) \(.macAddress)"' | grep -v null ----- -+ -Replace `<id>` with the ID of the node. -+ -.Example output -[source,terminal] ----- -"10.196.130.144 00:e0:ed:6a:ca:b4" -"141.125.65.215 00:e0:ed:6a:ca:b5" ----- -+ -Make a note of the MAC address and IP address of the public network. Make a separate note of the MAC address of the private network, which you will use later in the `install-config.yaml` file. Repeat this procedure for each node until you have all the public MAC and IP addresses for the public `baremetal` network, and the MAC addresses of the private `provisioning` network. - -. Add the MAC and IP address pair of the public `baremetal` network for each node into the `dnsmasq.hostsfile` file: -+ -[source,terminal] ----- -$ sudo vim /var/lib/dnsmasq/dnsmasq.hostsfile ----- -+ -.Example input -[source,text] ----- -00:e0:ed:6a:ca:b5,141.125.65.215,master-0 -<mac>,<ip>,master-1 -<mac>,<ip>,master-2 -<mac>,<ip>,worker-0 -<mac>,<ip>,worker-1 -... ----- -+ -Replace `<mac>,<ip>` with the public MAC address and public IP address of the corresponding node name. - -. Start `dnsmasq`: -+ -[source,terminal] ----- -$ sudo systemctl start dnsmasq ----- - -. Enable `dnsmasq` so that it starts when booting the node: -+ -[source,terminal] ----- -$ sudo systemctl enable dnsmasq ----- - -. Verify `dnsmasq` is running: -+ -[source,terminal] ----- -$ sudo systemctl status dnsmasq ----- -+ -.Example output -[source,terminal] ----- -● dnsmasq.service - DNS caching server. -Loaded: loaded (/usr/lib/systemd/system/dnsmasq.service; enabled; vendor preset: disabled) -Active: active (running) since Tue 2021-10-05 05:04:14 CDT; 49s ago -Main PID: 3101 (dnsmasq) -Tasks: 1 (limit: 204038) -Memory: 732.0K -CGroup: /system.slice/dnsmasq.service -└─3101 /usr/sbin/dnsmasq -k ----- - -. Open ports `53` and `67` with UDP protocol: -+ -[source,terminal] ----- -$ sudo firewall-cmd --add-port 53/udp --permanent ----- -+ -[source,terminal] ----- -$ sudo firewall-cmd --add-port 67/udp --permanent ----- - -. Add `provisioning` to the external zone with masquerade: -+ -[source,terminal] ----- -$ sudo firewall-cmd --change-zone=provisioning --zone=external --permanent ----- -+ -This step ensures network address translation for IPMI calls to the management subnet. - -. Reload the `firewalld` configuration: -+ -[source,terminal] ----- -$ sudo firewall-cmd --reload ----- diff --git a/modules/install-ibm-cloud-preparing-the-provisioner-node.adoc b/modules/install-ibm-cloud-preparing-the-provisioner-node.adoc deleted file mode 100644 index dbdf3943b497..000000000000 --- a/modules/install-ibm-cloud-preparing-the-provisioner-node.adoc +++ /dev/null @@ -1,273 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_ibm_cloud/install-ibm-cloud-installing-on-ibm-cloud.adoc - -:_content-type: PROCEDURE -[id="preparing-the-provisioner-node-for-openshift-install-on-ibm-cloud_{context}"] -= Preparing the provisioner node for {product-title} installation on IBM Cloud - -Perform the following steps to prepare the provisioner node. - -.Procedure - -. Log in to the provisioner node via `ssh`. - -. Create a non-root user (`kni`) and provide that user with `sudo` privileges: -+ -[source,terminal] ----- -# useradd kni ----- -+ -[source,terminal] ----- -# passwd kni ----- -+ -[source,terminal] ----- -# echo "kni ALL=(root) NOPASSWD:ALL" | tee -a /etc/sudoers.d/kni ----- -+ -[source,terminal] ----- -# chmod 0440 /etc/sudoers.d/kni ----- - -. Create an `ssh` key for the new user: -+ -[source,terminal] ----- -# su - kni -c "ssh-keygen -f /home/kni/.ssh/id_rsa -N ''" ----- - -. Log in as the new user on the provisioner node: -+ -[source,terminal] ----- -# su - kni ----- - -. Use Red Hat Subscription Manager to register the provisioner node: -+ -[source,terminal] ----- -$ sudo subscription-manager register --username=<user> --password=<pass> --auto-attach ----- -+ -[source,terminal] ----- -$ sudo subscription-manager repos --enable=rhel-8-for-x86_64-appstream-rpms \ - --enable=rhel-8-for-x86_64-baseos-rpms ----- -+ -[NOTE] -==== -For more information about Red Hat Subscription Manager, see link:https://access.redhat.com/documentation/en-us/red_hat_subscription_management/1/html-single/rhsm/index[Using and Configuring Red Hat Subscription Manager]. -==== - -. Install the following packages: -+ -[source,terminal] ----- -$ sudo dnf install -y libvirt qemu-kvm mkisofs python3-devel jq ipmitool ----- - -. Modify the user to add the `libvirt` group to the newly created user: -+ -[source,terminal] ----- -$ sudo usermod --append --groups libvirt kni ----- - -. Start `firewalld`: -+ -[source,terminal] ----- -$ sudo systemctl start firewalld ----- - -. Enable `firewalld`: -+ -[source,terminal] ----- -$ sudo systemctl enable firewalld ----- - -. Start the `http` service: -+ -[source,terminal] ----- -$ sudo firewall-cmd --zone=public --add-service=http --permanent ----- -+ -[source,terminal] ----- -$ sudo firewall-cmd --reload ----- - -. Start and enable the `libvirtd` service: -+ -[source,terminal] ----- -$ sudo systemctl enable libvirtd --now ----- - -. Set the ID of the provisioner node: -+ -[source,terminal] ----- -$ PRVN_HOST_ID=<ID> ----- -+ -You can view the ID with the following `ibmcloud` command: -+ -[source,terminal] ----- -$ ibmcloud sl hardware list ----- - -. Set the ID of the public subnet: -+ -[source,terminal] ----- -$ PUBLICSUBNETID=<ID> ----- -+ -You can view the ID with the following `ibmcloud` command: -+ -[source,terminal] ----- -$ ibmcloud sl subnet list ----- - -. Set the ID of the private subnet: -+ -[source,terminal] ----- -$ PRIVSUBNETID=<ID> ----- -+ -You can view the ID with the following `ibmcloud` command: -+ -[source,terminal] ----- -$ ibmcloud sl subnet list ----- - -. Set the provisioner node public IP address: -+ -[source,terminal] ----- -$ PRVN_PUB_IP=$(ibmcloud sl hardware detail $PRVN_HOST_ID --output JSON | jq .primaryIpAddress -r) ----- - -. Set the CIDR for the public network: -+ -[source,terminal] ----- -$ PUBLICCIDR=$(ibmcloud sl subnet detail $PUBLICSUBNETID --output JSON | jq .cidr) ----- - -. Set the IP address and CIDR for the public network: -+ -[source,terminal] ----- -$ PUB_IP_CIDR=$PRVN_PUB_IP/$PUBLICCIDR ----- - -. Set the gateway for the public network: -+ -[source,terminal] ----- -$ PUB_GATEWAY=$(ibmcloud sl subnet detail $PUBLICSUBNETID --output JSON | jq .gateway -r) ----- - -. Set the private IP address of the provisioner node: -+ -[source,terminal] ----- -$ PRVN_PRIV_IP=$(ibmcloud sl hardware detail $PRVN_HOST_ID --output JSON | \ - jq .primaryBackendIpAddress -r) ----- - -. Set the CIDR for the private network: -+ -[source,terminal] ----- -$ PRIVCIDR=$(ibmcloud sl subnet detail $PRIVSUBNETID --output JSON | jq .cidr) ----- - -. Set the IP address and CIDR for the private network: -+ -[source,terminal] ----- -$ PRIV_IP_CIDR=$PRVN_PRIV_IP/$PRIVCIDR ----- - -. Set the gateway for the private network: -+ -[source,terminal] ----- -$ PRIV_GATEWAY=$(ibmcloud sl subnet detail $PRIVSUBNETID --output JSON | jq .gateway -r) ----- - -. Set up the bridges for the `baremetal` and `provisioning` networks: -+ -[source,terminal] ----- -$ sudo nohup bash -c " - nmcli --get-values UUID con show | xargs -n 1 nmcli con delete - nmcli connection add ifname provisioning type bridge con-name provisioning - nmcli con add type bridge-slave ifname eth1 master provisioning - nmcli connection add ifname baremetal type bridge con-name baremetal - nmcli con add type bridge-slave ifname eth2 master baremetal - nmcli connection modify baremetal ipv4.addresses $PUB_IP_CIDR ipv4.method manual ipv4.gateway $PUB_GATEWAY - nmcli connection modify provisioning ipv4.addresses 172.22.0.1/24,$PRIV_IP_CIDR ipv4.method manual - nmcli connection modify provisioning +ipv4.routes \"10.0.0.0/8 $PRIV_GATEWAY\" - nmcli con down baremetal - nmcli con up baremetal - nmcli con down provisioning - nmcli con up provisioning - init 6 -" ----- -+ -[NOTE] -==== -For `eth1` and `eth2`, substitute the appropriate interface name, as needed. -==== - -. If required, SSH back into the `provisioner` node: -+ -[source,terminal] ----- -# ssh kni@provisioner.<cluster-name>.<domain> ----- - -. Verify the connection bridges have been properly created: -+ -[source,terminal] ----- -$ sudo nmcli con show ----- -+ -.Example output -[source,terminal] ----- -NAME UUID TYPE DEVICE -baremetal 4d5133a5-8351-4bb9-bfd4-3af264801530 bridge baremetal -provisioning 43942805-017f-4d7d-a2c2-7cb3324482ed bridge provisioning -virbr0 d9bca40f-eee1-410b-8879-a2d4bb0465e7 bridge virbr0 -bridge-slave-eth1 76a8ed50-c7e5-4999-b4f6-6d9014dd0812 ethernet eth1 -bridge-slave-eth2 f31c3353-54b7-48de-893a-02d2b34c4736 ethernet eth2 ----- - -. Create a `pull-secret.txt` file: -+ -[source,terminal] ----- -$ vim pull-secret.txt ----- -+ -In a web browser, navigate to link:https://console.redhat.com/openshift/install/metal/user-provisioned[Install on Bare Metal with user-provisioned infrastructure]. In step 1, click **Download pull secret**. Paste the contents into the `pull-secret.txt` file and save the contents in the `kni` user's home directory. diff --git a/modules/install-ibm-cloud-setting-up-ibm-cloud-infrastructure.adoc b/modules/install-ibm-cloud-setting-up-ibm-cloud-infrastructure.adoc deleted file mode 100644 index 078aebf0790e..000000000000 --- a/modules/install-ibm-cloud-setting-up-ibm-cloud-infrastructure.adoc +++ /dev/null @@ -1,187 +0,0 @@ -// This is included in the following assemblies: -// -// installing_ibm_cloud/install-ibm-cloud-installing-on-ibm-cloud.adoc - -[id="setting-up-ibm-cloud-infrastructure_{context}"] -= Setting up IBM Cloud infrastructure - -To deploy an {product-title} cluster on IBM Cloud®, you must first provision the IBM Cloud nodes. - -[IMPORTANT] -==== -Red Hat supports IPMI and PXE on the `provisioning` network only. Red Hat has not tested Red Fish, virtual media, or other complementary technologies such as Secure Boot on IBM Cloud deployments. The `provisioning` network is required. -==== - -You can customize IBM Cloud nodes using the IBM Cloud API. When creating IBM Cloud nodes, you must consider the following requirements. - -[discrete] -== Use one data center per cluster - -All nodes in the {product-title} cluster must run in the same IBM Cloud data center. - -[discrete] -== Create public and private VLANs - -Create all nodes with a single public VLAN and a single private VLAN. - -[discrete] -== Ensure subnets have sufficient IP addresses - -IBM Cloud public VLAN subnets use a `/28` prefix by default, which provides 16 IP addresses. That is sufficient for a cluster consisting of three control plane nodes, four worker nodes, and two IP addresses for the API VIP and Ingress VIP on the `baremetal` network. For larger clusters, you might need a smaller prefix. - -IBM Cloud private VLAN subnets use a `/26` prefix by default, which provides 64 IP addresses. IBM Cloud will use private network IP addresses to access the Baseboard Management Controller (BMC) of each node. {product-title} creates an additional subnet for the `provisioning` network. Network traffic for the `provisioning` network subnet routes through the private VLAN. For larger clusters, you might need a smaller prefix. - -.IP addresses per prefix -[options="header"] -|==== -|IP addresses |Prefix -|32| `/27` -|64| `/26` -|128| `/25` -|256| `/24` -|==== - -[discrete] -== Configuring NICs - -{product-title} deploys with two networks: - -- `provisioning`: The `provisioning` network is a non-routable network used for provisioning the underlying operating system on each node that is a part of the {product-title} cluster. - -- `baremetal`: The `baremetal` network is a routable network. You can use any NIC order to interface with the `baremetal` network, provided it is not the NIC specified in the `provisioningNetworkInterface` configuration setting or the NIC associated to a node's `bootMACAddress` configuration setting for the `provisioning` network. - -While the cluster nodes can contain more than two NICs, the installation process only focuses on the first two NICs. For example: - -[options="header"] -|=== -|NIC |Network |VLAN -| NIC1 | `provisioning` | <provisioning_vlan> -| NIC2 | `baremetal` | <baremetal_vlan> -|=== - -In the previous example, NIC1 on all control plane and worker nodes connects to the non-routable network (`provisioning`) that is only used for the installation of the {product-title} cluster. NIC2 on all control plane and worker nodes connects to the routable `baremetal` network. - -[options="header"] -|=== -|PXE |Boot order -| NIC1 PXE-enabled `provisioning` network | 1 -| NIC2 `baremetal` network. | 2 -|=== - -[NOTE] -==== -Ensure PXE is enabled on the NIC used for the `provisioning` network and is disabled on all other NICs. -==== - -[discrete] -== Configuring canonical names - -Clients access the {product-title} cluster nodes over the `baremetal` network. Configure IBM Cloud subdomains or subzones where the canonical name extension is the cluster name. - ----- -<cluster_name>.<domain> ----- - -For example: - ----- -test-cluster.example.com ----- - -[discrete] -== Creating DNS entries - -You must create DNS `A` record entries resolving to unused IP addresses on the public subnet for the following: - -[width="100%", options="header"] -|===== -| Usage | Host Name | IP -| API | api.<cluster_name>.<domain> | <ip> -| Ingress LB (apps) | *.apps.<cluster_name>.<domain> | <ip> -|===== - -Control plane and worker nodes already have DNS entries after provisioning. - -The following table provides an example of fully qualified domain names. The API and Nameserver addresses begin with canonical name extensions. The host names of the control plane and worker nodes are examples, so you can use any host naming convention you prefer. - -[width="100%", options="header"] -|===== -| Usage | Host Name | IP -| API | api.<cluster_name>.<domain> | <ip> -| Ingress LB (apps) | *.apps.<cluster_name>.<domain> | <ip> -| Provisioner node | provisioner.<cluster_name>.<domain> | <ip> -| Master-0 | openshift-master-0.<cluster_name>.<domain> | <ip> -| Master-1 | openshift-master-1.<cluster_name>.<domain> | <ip> -| Master-2 | openshift-master-2.<cluster_name>.<domain> | <ip> -| Worker-0 | openshift-worker-0.<cluster_name>.<domain> | <ip> -| Worker-1 | openshift-worker-1.<cluster_name>.<domain> | <ip> -| Worker-n | openshift-worker-n.<cluster_name>.<domain> | <ip> -|===== - -{product-title} includes functionality that uses cluster membership information to generate `A` records. This resolves the node names to their IP addresses. After the nodes are registered with the API, the cluster can disperse node information without using CoreDNS-mDNS. This eliminates the network traffic associated with multicast DNS. - -[IMPORTANT] -==== -After provisioning the IBM Cloud nodes, you must create a DNS entry for the `api.<cluster_name>.<domain>` domain name on the external DNS because removing CoreDNS causes the local entry to disappear. Failure to create a DNS record for the `api.<cluster_name>.<domain>` domain name in the external DNS server prevents worker nodes from joining the cluster. -==== - -[discrete] -== Network Time Protocol (NTP) - -Each {product-title} node in the cluster must have access to an NTP server. {product-title} nodes use NTP to synchronize their clocks. For example, cluster nodes use SSL certificates that require validation, which might fail if the date and time between the nodes are not in sync. - -[IMPORTANT] -==== -Define a consistent clock date and time format in each cluster node's BIOS settings, or installation might fail. -==== - -[discrete] -== Configure a DHCP server - -IBM Cloud does not run DHCP on the public or private VLANs. After provisioning IBM Cloud nodes, you must set up a DHCP server for the public VLAN, which corresponds to {product-title}'s `baremetal` network. - -[NOTE] -==== -The IP addresses allocated to each node do not need to match the IP addresses allocated by the IBM Cloud provisioning system. -==== - -See the "Configuring the public subnet" section for details. - -[discrete] -== Ensure BMC access privileges - -The "Remote management" page for each node on the dashboard contains the node's intelligent platform management interface (IPMI) credentials. The default IPMI privileges prevent the user from making certain boot target changes. You must change the privilege level to `OPERATOR` so that Ironic can make those changes. - -In the `install-config.yaml` file, add the `privilegelevel` parameter to the URLs used to configure each BMC. See the "Configuring the install-config.yaml file" section for additional details. For example: - -[source,yaml] ----- -ipmi://<IP>:<port>?privilegelevel=OPERATOR ----- - -Alternatively, contact IBM Cloud support and request that they increase the IPMI privileges to `ADMINISTRATOR` for each node. - -[discrete] -== Create bare metal servers - -Create bare metal servers in the link:https://cloud.ibm.com[IBM Cloud dashboard] by navigating to *Create resource* -> *Bare Metal Server*. - -Alternatively, you can create bare metal servers with the `ibmcloud` CLI utility. For example: - -[source,terminal] ----- -$ ibmcloud sl hardware create --hostname <SERVERNAME> \ - --domain <DOMAIN> \ - --size <SIZE> \ - --os <OS-TYPE> \ - --datacenter <DC-NAME> \ - --port-speed <SPEED> \ - --billing <BILLING> ----- - -See link:https://cloud.ibm.com/docs/cli?topic=cli-install-ibmcloud-cli[Installing the stand-alone IBM Cloud CLI] for details on installing the IBM Cloud CLI. - -[NOTE] -==== -IBM Cloud servers might take 3-5 hours to become available. -==== diff --git a/modules/install-openshift-common-terms.adoc b/modules/install-openshift-common-terms.adoc deleted file mode 100644 index 427185853c4f..000000000000 --- a/modules/install-openshift-common-terms.adoc +++ /dev/null @@ -1,54 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/index.adoc - -:_content-type: REFERENCE -[id="install-openshift-common-terms_{context}"] -= Glossary of common terms for {product-title} installing - -This glossary defines common terms that are used in the installation content. These terms help you understand installation effectively. - -{ai-full}:: -An installer hosted at link:https://access.redhat.com/documentation/en-us/assisted_installer_for_openshift_container_platform/2022/html-single/assisted_installer_for_openshift_container_platform/index[console.redhat.com] that provides a web user interface or a RESTful API for creating a cluster configuration. The {ai-full} generates a discovery image. Cluster machines boot with the discovery image, which installs {op-system} and an agent. Together, the {ai-full} and agent provide pre-installation validation and installation for the cluster. - -Agent-based installer:: -An installer similar to the {ai-full}, but you must download the link:https://console.redhat.com/openshift/install/metal/agent-based[agent-based installer] first. The agent-based installer is ideal for air-gapped/restricted networks. - -Bootstrap node:: -A temporary machine that runs a minimal Kubernetes configuration to deploy the {product-title} control plane. - -Control plane:: -A container orchestration layer that exposes the API and interfaces to define, deploy, and manage the lifecycle of containers. Also known as control plane machines. - -Compute node:: -Nodes that are responsible for executing workloads for cluster users. Also known as worker nodes. - -Disconnected installation:: -There are situations where parts of a data center might not have access to the internet, even through proxy servers. You can still install the {product-title} in these environments, but you must download the required software and images and make them available to the disconnected environment. - -The {product-title} installation program:: -A program that provisions the infrastructure and deploys a cluster. - -Installer-provisioned infrastructure:: -The installation program deploys and configures the infrastructure that the cluster runs on. - -Ignition config files:: -A file that Ignition uses to configure {op-system-first} during operating system initialization. The installation program generates different Ignition config files to initialize bootstrap, control plane, and worker nodes. - -Kubernetes manifests:: -Specifications of a Kubernetes API object in a JSON or YAML format. A configuration file can include deployments, config maps, secrets, daemonsets etc. - -Kubelet:: -A **primary node** agent that runs on each node in the cluster to ensure that containers are running in a pod. - -Load balancers:: -A load balancer serves as the single point of contact for clients. Load balancers for the API distribute incoming traffic across control plane nodes. - -Machine Config Operator:: -An Operator that manages and applies configuration and updates of the base operating system and container runtime, including everything between the kernel and kubelet for the nodes in the cluster. - -Operators:: -The preferred method of packaging, deploying, and managing a Kubernetes application in an {product-title} cluster. An operator takes human operational knowledge and encodes it into software that is easily packaged and shared with customers. - -User-provisioned infrastructure:: -You can install {product-title} on infrastructure that you provide. You can use the installation program to generate the assets required to provision the cluster infrastructure, create the cluster infrastructure, and then deploy the cluster to the infrastructure that you provided. diff --git a/modules/install-sno-about-installing-on-a-single-node.adoc b/modules/install-sno-about-installing-on-a-single-node.adoc deleted file mode 100644 index aba00569f8d6..000000000000 --- a/modules/install-sno-about-installing-on-a-single-node.adoc +++ /dev/null @@ -1,14 +0,0 @@ -// This is included in the following assemblies: -// -// installing_sno/install-sno-preparing-to-install-sno.adoc - -:_content-type: CONCEPT -[id="install-sno-about-installing-on-a-single-node_{context}"] -= About OpenShift on a single node - -You can create a single-node cluster with standard installation methods. {product-title} on a single node is a specialized installation that requires the creation of a special ignition configuration ISO. The primary use case is for edge computing workloads, including intermittent connectivity, portable clouds, and 5G radio access networks (RAN) close to a base station. The major tradeoff with an installation on a single node is the lack of high availability. - -[IMPORTANT] -==== -The use of OpenShiftSDN with {sno} is not supported. OVN-Kubernetes is the default network plugin for {sno} deployments. -==== diff --git a/modules/install-sno-generating-the-discovery-iso-with-the-assisted-installer.adoc b/modules/install-sno-generating-the-discovery-iso-with-the-assisted-installer.adoc deleted file mode 100644 index 5a507f007ae5..000000000000 --- a/modules/install-sno-generating-the-discovery-iso-with-the-assisted-installer.adoc +++ /dev/null @@ -1,43 +0,0 @@ -// This is included in the following assemblies: -// -// installing_sno/install-sno-installing-sno.adoc - -:_content-type: PROCEDURE -[id="install-sno-generating-the-discovery-iso-with-the-assisted-installer_{context}"] -= Generating the discovery ISO with the Assisted Installer - -Installing {product-title} on a single node requires a discovery ISO, which the Assisted Installer can generate. - -.Procedure - -. On the administration host, open a browser and navigate to link:https://console.redhat.com/openshift/assisted-installer/clusters[{cluster-manager-first}]. - -. Click *Create Cluster* to create a new cluster. - -. In the *Cluster name* field, enter a name for the cluster. - -. In the *Base domain* field, enter a base domain. For example: -+ ----- -example.com ----- -+ -All DNS records must be subdomains of this base domain and include the cluster name, for example: -+ ----- -<cluster-name>.example.com ----- -+ -[NOTE] -==== -You cannot change the base domain or cluster name after cluster installation. -==== - -. Select *Install single node OpenShift (SNO)* and complete the rest of the wizard steps. Download the discovery ISO. - -. Make a note of the discovery ISO URL for installing with virtual media. - -[NOTE] -===== -If you enable {VirtProductName} during this process, you must have a second local storage device of at least 50GiB for your virtual machines. -===== diff --git a/modules/install-sno-generating-the-install-iso-manually.adoc b/modules/install-sno-generating-the-install-iso-manually.adoc deleted file mode 100644 index b57ba3120096..000000000000 --- a/modules/install-sno-generating-the-install-iso-manually.adoc +++ /dev/null @@ -1,152 +0,0 @@ -// This is included in the following assemblies: -// -// installing_sno/install-sno-installing-sno.adoc - -:_content-type: PROCEDURE -[id="generating-the-install-iso-manually_{context}"] -= Generating the installation ISO with coreos-installer - -Installing {product-title} on a single node requires an installation ISO, which you can generate with the following procedure. - -.Prerequisites - -* Install `podman`. - -.Procedure - -. Set the {product-title} version: -+ -[source,terminal] ----- -$ OCP_VERSION=<ocp_version> <1> ----- -+ -<1> Replace `<ocp_version>` with the current version, for example, `latest-{product-version}` - -. Set the host architecture: -+ -[source,terminal] ----- -$ ARCH=<architecture> <1> ----- -<1> Replace `<architecture>` with the target host architecture, for example, `aarch64` or `x86_64`. - -. Download the {product-title} client (`oc`) and make it available for use by entering the following commands: -+ -[source,terminal] ----- -$ curl -k https://mirror.openshift.com/pub/openshift-v4/clients/ocp/$OCP_VERSION/openshift-client-linux.tar.gz -o oc.tar.gz ----- -+ -[source,terminal] ----- -$ tar zxf oc.tar.gz ----- -+ -[source,terminal] ----- -$ chmod +x oc ----- - -. Download the {product-title} installer and make it available for use by entering the following commands: -+ -[source,terminal] ----- -$ curl -k https://mirror.openshift.com/pub/openshift-v4/clients/ocp/$OCP_VERSION/openshift-install-linux.tar.gz -o openshift-install-linux.tar.gz ----- -+ -[source,terminal] ----- -$ tar zxvf openshift-install-linux.tar.gz ----- -+ -[source,terminal] ----- -$ chmod +x openshift-install ----- - -. Retrieve the {op-system} ISO URL by running the following command: -+ -[source,terminal] ----- -$ ISO_URL=$(./openshift-install coreos print-stream-json | grep location | grep $ARCH | grep iso | cut -d\" -f4) ----- - -. Download the {op-system} ISO: -+ -[source,terminal] ----- -$ curl -L $ISO_URL -o rhcos-live.iso ----- - -. Prepare the `install-config.yaml` file: -+ -[source,yaml] ----- -apiVersion: v1 -baseDomain: <domain> <1> -compute: -- name: worker - replicas: 0 <2> -controlPlane: - name: master - replicas: 1 <3> -metadata: - name: <name> <4> -networking: <5> - clusterNetwork: - - cidr: 10.128.0.0/14 - hostPrefix: 23 - machineNetwork: - - cidr: 10.0.0.0/16 <6> - networkType: OVNKubernetes - serviceNetwork: - - 172.30.0.0/16 -platform: - none: {} -bootstrapInPlace: - installationDisk: /dev/disk/by-id/<disk_id> <7> -pullSecret: '<pull_secret>' <8> -sshKey: | - <ssh_key> <9> ----- -<1> Add the cluster domain name. -<2> Set the `compute` replicas to `0`. This makes the control plane node schedulable. -<3> Set the `controlPlane` replicas to `1`. In conjunction with the previous `compute` setting, this setting ensures the cluster runs on a single node. -<4> Set the `metadata` name to the cluster name. -<5> Set the `networking` details. OVN-Kubernetes is the only allowed network plugin type for single-node clusters. -<6> Set the `cidr` value to match the subnet of the {sno} cluster. -<7> Set the path to the installation disk drive, for example, `/dev/disk/by-id/wwn-0x64cd98f04fde100024684cf3034da5c2`. -<8> Copy the {cluster-manager-url-pull} and add the contents to this configuration setting. -<9> Add the public SSH key from the administration host so that you can log in to the cluster after installation. - -. Generate {product-title} assets by running the following commands: -+ -[source,terminal] ----- -$ mkdir ocp ----- -+ -[source,terminal] ----- -$ cp install-config.yaml ocp ----- -+ -[source,terminal] ----- -$ ./openshift-install --dir=ocp create single-node-ignition-config ----- - -. Embed the ignition data into the {op-system} ISO by running the following commands: -+ -[source,terminal] ----- -$ alias coreos-installer='podman run --privileged --pull always --rm \ - -v /dev:/dev -v /run/udev:/run/udev -v $PWD:/data \ - -w /data quay.io/coreos/coreos-installer:release' ----- -+ -[source,terminal] ----- -$ coreos-installer iso ignition embed -fi ocp/bootstrap-in-place-for-live-iso.ign rhcos-live.iso ----- diff --git a/modules/install-sno-installing-with-the-assisted-installer.adoc b/modules/install-sno-installing-with-the-assisted-installer.adoc deleted file mode 100644 index ee83eca56a83..000000000000 --- a/modules/install-sno-installing-with-the-assisted-installer.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// This is included in the following assemblies: -// -// installing_sno/install-sno-installing-sno.adoc - -:_content-type: PROCEDURE -[id="install-sno-installing-with-the-assisted-installer_{context}"] -= Installing {sno} with the Assisted Installer - -Use the Assisted Installer to install the single-node cluster. - -.Procedure - -. Attach the {op-system} discovery ISO to the target host. - -. Configure the boot drive order in the server BIOS settings to boot from the attached discovery ISO and then reboot the server. - -. On the administration host, return to the browser. Wait for the host to appear in the list of discovered hosts. If necessary, reload the link:https://console.redhat.com/openshift/assisted-installer/clusters[*Assisted Clusters*] page and select the cluster name. - -. Complete the install wizard steps. Add networking details, including a subnet from the available subnets. Add the SSH public key if necessary. - -. Monitor the installation's progress. Watch the cluster events. After the installation process finishes writing the operating system image to the server's hard disk, the server restarts. - -. Remove the discovery ISO, and reset the server to boot from the installation drive. -+ -The server restarts several times automatically, deploying the control plane. diff --git a/modules/install-sno-installing-with-usb-media.adoc b/modules/install-sno-installing-with-usb-media.adoc deleted file mode 100644 index 30da15ddf754..000000000000 --- a/modules/install-sno-installing-with-usb-media.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// This is included in the following assemblies: -// -// installing_sno/install-sno-installing-sno.adoc - -:_content-type: PROCEDURE -[id="installing-with-usb-media_{context}"] -= Creating a bootable ISO image on a USB drive - -You can install software using a bootable USB drive that contains an ISO image. Booting the server with the USB drive prepares the server for the software installation. - -.Procedure - -. On the administration host, insert a USB drive into a USB port. - -. Create a bootable USB drive, for example: -+ -[source,terminal] ----- -# dd if=<path_to_iso> of=<path_to_usb> status=progress ----- -+ -where: -+ --- -<path_to_iso>:: is the relative path to the downloaded ISO file, for example, `rhcos-live.iso`. -<path_to_usb>:: is the location of the connected USB drive, for example, `/dev/sdb`. --- -+ -After the ISO is copied to the USB drive, you can use the USB drive to install software on the server. diff --git a/modules/install-sno-monitoring-the-installation-manually.adoc b/modules/install-sno-monitoring-the-installation-manually.adoc deleted file mode 100644 index 7efb12a995a8..000000000000 --- a/modules/install-sno-monitoring-the-installation-manually.adoc +++ /dev/null @@ -1,45 +0,0 @@ -// This is included in the following assemblies: -// -// installing_sno/install-sno-installing-sno.adoc - -:_content-type: PROCEDURE -[id="install-sno-monitoring-the-installation-manually_{context}"] -= Monitoring the cluster installation using openshift-install - -Use `openshift-install` to monitor the progress of the single-node cluster installation. - -.Procedure - -. Attach the modified {op-system} installation ISO to the target host. - -. Configure the boot drive order in the server BIOS settings to boot from the attached discovery ISO and then reboot the server. - -. On the administration host, monitor the installation by running the following command: -+ -[source,terminal] ----- -$ ./openshift-install --dir=ocp wait-for install-complete ----- -+ -The server restarts several times while deploying the control plane. - -.Verification - -* After the installation is complete, check the environment by running the following command: -+ -[source,terminal] ----- -$ export KUBECONFIG=ocp/auth/kubeconfig ----- -+ -[source,terminal] ----- -$ oc get nodes ----- -+ -.Example output -[source,terminal] ----- -NAME STATUS ROLES AGE VERSION -control-plane.example.com Ready master,worker 10m v1.26.0 ----- diff --git a/modules/install-sno-requirements-for-installing-on-a-single-node.adoc b/modules/install-sno-requirements-for-installing-on-a-single-node.adoc deleted file mode 100644 index a2ffd5562b0c..000000000000 --- a/modules/install-sno-requirements-for-installing-on-a-single-node.adoc +++ /dev/null @@ -1,48 +0,0 @@ -// This is included in the following assemblies: -// -// installing_sno/install-sno-preparing-to-install-sno.adoc -:_content-type: CONCEPT - -[id="install-sno-requirements-for-installing-on-a-single-node_{context}"] -= Requirements for installing OpenShift on a single node - -Installing {product-title} on a single node alleviates some of the requirements for high availability and large scale clusters. However, you must address the following requirements: - -* *Administration host:* You must have a computer to prepare the ISO, to create the USB boot drive, and to monitor the installation. - -* *CPU Architecture:* Installing {product-title} on a single node supports `x86_64` and `arm64` CPU architectures. - -* *Supported platforms:* Installing {product-title} on a single node is supported on bare metal, vSphere, AWS, Red Hat OpenStack, and Red Hat Virtualization platforms. In all cases, you must specify the `platform.none: {}` parameter in the `install-config.yaml` configuration file. - -* *Production-grade server:* Installing {product-title} on a single node requires a server with sufficient resources to run {product-title} services and a production workload. -+ -.Minimum resource requirements -[options="header"] -|==== -|Profile|vCPU|Memory|Storage -|Minimum|8 vCPU cores|16GB of RAM| 120GB -|==== -+ -[NOTE] -==== -* One vCPU is equivalent to one physical core when simultaneous multithreading (SMT), or hyperthreading, is not enabled. When enabled, use the following formula to calculate the corresponding ratio: -+ -(threads per core × cores) × sockets = vCPUs - -* Adding Operators during the installation process might increase the minimum resource requirements. -==== -+ -The server must have a Baseboard Management Controller (BMC) when booting with virtual media. - -* *Networking:* The server must have access to the internet or access to a local registry if it is not connected to a routable network. The server must have a DHCP reservation or a static IP address for the Kubernetes API, ingress route, and cluster node domain names. You must configure the DNS to resolve the IP address to each of the following fully qualified domain names (FQDN): -+ -.Required DNS records -[options="header"] -|==== -|Usage|FQDN|Description -|Kubernetes API|`api.<cluster_name>.<base_domain>`| Add a DNS A/AAAA or CNAME record. This record must be resolvable by clients external to the cluster. -|Internal API|`api-int.<cluster_name>.<base_domain>`| Add a DNS A/AAAA or CNAME record when creating the ISO manually. This record must be resolvable by nodes within the cluster. -|Ingress route|`*.apps.<cluster_name>.<base_domain>`| Add a wildcard DNS A/AAAA or CNAME record that targets the node. This record must be resolvable by clients external to the cluster. -|==== -+ -Without persistent IP addresses, communications between the `apiserver` and `etcd` might fail. diff --git a/modules/install-sno_additional-requirements-for-installing-on-a-single-node-on-aws.adoc b/modules/install-sno_additional-requirements-for-installing-on-a-single-node-on-aws.adoc deleted file mode 100644 index 69b6038d43ca..000000000000 --- a/modules/install-sno_additional-requirements-for-installing-on-a-single-node-on-aws.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// This module is included in the following assemblies: -// -// installing/installing_sno/install-sno-preparing-to-install-sno.adoc - -:_content-type: CONCEPT -[id="additional-requirements-for-installing-on-a-single-node-on-aws_{context}"] -= Additional requirements for installing on a single node on AWS - -The AWS documentation for installer-provisioned installation is written with a high availability cluster consisting of three control plane nodes. When referring to the AWS documentation, consider the differences between the requirements for a {sno} cluster and a high availability cluster. - -* The required machines for cluster installation in AWS documentation indicates a temporary bootstrap machine, three control plane machines, and at least two compute machines. You require only a temporary bootstrap machine and one AWS instance for the control plane node and no worker nodes. - -* The minimum resource requirements for cluster installation in the AWS documentation indicates a control plane node with 4 vCPUs and 100GB of storage. For a single node cluster, you must have a minimum of 8 vCPU cores and 120GB of storage. - -* The `controlPlane.replicas` setting in the `install-config.yaml` file should be set to `1`. - -* The `compute.replicas` setting in the `install-config.yaml` file should be set to `0`. -This makes the control plane node schedulable. diff --git a/modules/installation-about-custom-azure-vnet.adoc b/modules/installation-about-custom-azure-vnet.adoc deleted file mode 100644 index 00c4bf45794d..000000000000 --- a/modules/installation-about-custom-azure-vnet.adoc +++ /dev/null @@ -1,131 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_azure/installing-azure-government-region.adoc -// * installing/installing_azure/installing-azure-private.adoc -// * installing/installing_azure/installing-azure-vnet.adoc - -ifeval::["{context}" == "installing-azure-government-region"] -:azure: -endif::[] -ifeval::["{context}" == "installing-azure-private"] -:azure-private: -endif::[] -ifeval::["{context}" == "installing-azure-vnet"] -:azure: -endif::[] - -:_content-type: CONCEPT -[id="installation-about-custom-azure-vnet_{context}"] -= About reusing a VNet for your {product-title} cluster - -In {product-title} {product-version}, you can deploy a cluster into an existing Azure Virtual Network (VNet) in Microsoft Azure. If you do, you must also use existing subnets within the VNet and routing rules. - -By deploying {product-title} into an existing Azure VNet, you might be able to avoid service limit constraints in new accounts or more easily abide by the operational constraints that your company's guidelines set. This is a good option to use if you cannot obtain the infrastructure creation permissions that are required to create the VNet. - -[id="installation-about-custom-azure-vnet-requirements_{context}"] -== Requirements for using your VNet - -When you deploy a cluster by using an existing VNet, you must perform additional network configuration before you install the cluster. In installer-provisioned infrastructure clusters, the installer usually creates the following components, but it does not create them when you install into an existing VNet: - -* Subnets -* Route tables -* VNets -* Network Security Groups - -include::snippets/custom-dns-server.adoc[] - -If you use a custom VNet, you must correctly configure it and its subnets for the installation program and the cluster to use. The installation program cannot subdivide network ranges for the cluster to use, set route tables for the subnets, or set VNet options like DHCP, so you must do so before you install the cluster. - -The cluster must be able to access the resource group that contains the existing VNet and subnets. While all of the resources that the cluster creates are placed in a separate resource group that it creates, some network resources are used from a separate group. Some cluster Operators must be able to access resources in both resource groups. For example, the Machine API controller attaches NICS for the virtual machines that it creates to subnets from the networking resource group. - -Your VNet must meet the following characteristics: - -* The VNet's CIDR block must contain the `Networking.MachineCIDR` range, which is the IP address pool for cluster machines. -* The VNet and its subnets must belong to the same resource group, and the subnets must be configured to use Azure-assigned DHCP IP addresses instead of static IP addresses. - -You must provide two subnets within your VNet, one for the control plane machines and one for the compute machines. Because Azure distributes machines in different availability zones within the region that you specify, your cluster will have high availability by default. - -To ensure that the subnets that you provide are suitable, the installation program confirms the following data: - -* All the specified subnets exist. -* There are two private subnets, one for the control plane machines and one for the compute machines. -* The subnet CIDRs belong to the machine CIDR that you specified. Machines are not provisioned in availability zones that you do not provide private subnets for. -ifdef::azure[] -If required, the installation program creates public load balancers that manage the control plane and worker nodes, and Azure allocates a public IP address to them. -endif::[] - -[NOTE] -==== -If you destroy a cluster that uses an existing VNet, the VNet is not deleted. -==== - -[id="installation-about-custom-azure-vnet-nsg-requirements_{context}"] -=== Network security group requirements - -The network security groups for the subnets that host the compute and control plane machines require specific access to ensure that the cluster communication is correct. You must create rules to allow access to the required cluster communication ports. - -[IMPORTANT] -==== -The network security group rules must be in place before you install the cluster. If you attempt to install a cluster without the required access, the installation program cannot reach the Azure APIs, and installation fails. -==== - -.Required ports -[options="header",cols="1,3,1,1"] -|=== - -|Port -|Description -|Control plane -|Compute - -|`80` -|Allows HTTP traffic -| -|x - -|`443` -|Allows HTTPS traffic -| -|x - -|`6443` -|Allows communication to the control plane machines -|x -| - -|`22623` -|Allows internal communication to the machine config server for provisioning machines -|x -| -|=== - -include::snippets/mcs-endpoint-limitation.adoc[] - -Because cluster components do not modify the user-provided network security groups, which the Kubernetes controllers update, a pseudo-network security group is created for the Kubernetes controller to modify without impacting the rest of the environment. - -.Additional resources - -* xref:../../networking/openshift_sdn/about-openshift-sdn.adoc#about-openshift-sdn[About the OpenShift SDN network plugin] - - -[id="installation-about-custom-azure-permissions_{context}"] -== Division of permissions - -Starting with {product-title} 4.3, you do not need all of the permissions that are required for an installation program-provisioned infrastructure cluster to deploy a cluster. This change mimics the division of permissions that you might have at your company: some individuals can create different resources in your clouds than others. For example, you might be able to create application-specific items, like instances, storage, and load balancers, but not networking-related components such as VNets, subnet, or ingress rules. - -The Azure credentials that you use when you create your cluster do not need the networking permissions that are required to make VNets and core networking components within the VNet, such as subnets, routing tables, internet gateways, NAT, and VPN. You still need permission to make the application resources that the machines within the cluster require, such as load balancers, security groups, storage accounts, and nodes. - -[id="installation-about-custom-azure-vnet-isolation_{context}"] -== Isolation between clusters - -Because the cluster is unable to modify network security groups in an existing subnet, there is no way to isolate clusters from each other on the VNet. -//// -These are some of the details from the AWS version, and if any of them are relevant to Azure, they can be included. -If you deploy {product-title} to an existing network, the isolation of cluster services is reduced in the following ways: - -* You can install multiple {product-title} clusters in the same VNet. -* ICMP ingress is allowed to entire network. -* TCP 22 ingress (SSH) is allowed to the entire network. -* Control plane TCP 6443 ingress (Kubernetes API) is allowed to the entire network. -* Control plane TCP 22623 ingress (MCS) is allowed to the entire network. -//// diff --git a/modules/installation-about-custom-gcp-vpc.adoc b/modules/installation-about-custom-gcp-vpc.adoc deleted file mode 100644 index 380524acd4ee..000000000000 --- a/modules/installation-about-custom-gcp-vpc.adoc +++ /dev/null @@ -1,54 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/ - -:_content-type: CONCEPT -[id="installation-about-custom-gcp-vpc_{context}"] -= About using a custom VPC - -In {product-title} {product-version}, you can deploy a cluster into an existing VPC in Google Cloud Platform (GCP). If you do, you must also use existing subnets within the VPC and routing rules. - -By deploying {product-title} into an existing GCP VPC, you might be able to avoid limit constraints in new accounts or more easily abide by the operational constraints that your company's guidelines set. This is a good option to use if you cannot obtain the infrastructure creation permissions that are required to create the VPC yourself. - -[id="installation-about-custom-gcp-vpcs-requirements_{context}"] -== Requirements for using your VPC - -The installation program will no longer create the following components: - -* VPC -* Subnets -* Cloud router -* Cloud NAT -* NAT IP addresses - -If you use a custom VPC, you must correctly configure it and its subnets for the installation program and the cluster to use. The installation program cannot subdivide network ranges for the cluster to use, set route tables for the subnets, or set VPC options like DHCP, so you must do so before you install the cluster. - -Your VPC and subnets must meet the following characteristics: - -* The VPC must be in the same GCP project that you deploy the {product-title} cluster to. -* To allow access to the internet from the control plane and compute machines, you must configure cloud NAT on the subnets to allow egress to it. These machines do not have a public address. Even if you do not require access to the internet, you must allow egress to the VPC network to obtain the installation program and images. Because multiple cloud NATs cannot be configured on the shared subnets, the installation program cannot configure it. - -To ensure that the subnets that you provide are suitable, the installation program confirms the following data: - -* All the subnets that you specify exist and belong to the VPC that you specified. -* The subnet CIDRs belong to the machine CIDR. -* You must provide a subnet to deploy the cluster control plane and compute machines to. You can use the same subnet for both machine types. - -If you destroy a cluster that uses an existing VPC, the VPC is not deleted. - -[id="installation-about-custom-gcp-permissions_{context}"] -== Division of permissions - -Starting with {product-title} 4.3, you do not need all of the permissions that are required for an installation program-provisioned infrastructure cluster to deploy a cluster. This change mimics the division of permissions that you might have at your company: some individuals can create different resources in your clouds than others. For example, you might be able to create application-specific items, like instances, buckets, and load balancers, but not networking-related components such as VPCs, subnets, or Ingress rules. - -The GCP credentials that you use when you create your cluster do not need the networking permissions that are required to make VPCs and core networking components within the VPC, such as subnets, routing tables, internet gateways, NAT, and VPN. You still need permission to make the application resources that the machines within the cluster require, such as load balancers, security groups, storage, and nodes. - -[id="installation-about-custom-gcp-vpcs-isolation_{context}"] -== Isolation between clusters - -If you deploy {product-title} to an existing network, the isolation of cluster services is preserved by firewall rules that reference the machines in your cluster by the cluster's infrastructure ID. Only traffic within the cluster is allowed. - -If you deploy multiple clusters to the same VPC, the following components might share access between clusters: - -* The API, which is globally available with an external publishing strategy or available throughout the network in an internal publishing strategy -* Debugging tools, such as ports on VM instances that are open to the machine CIDR for SSH and ICMP access diff --git a/modules/installation-about-mirror-registry.adoc b/modules/installation-about-mirror-registry.adoc deleted file mode 100644 index 4418f3edac1a..000000000000 --- a/modules/installation-about-mirror-registry.adoc +++ /dev/null @@ -1,53 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/disconnected_install/installing-mirroring-installation-images.adoc -// * openshift_images/samples-operator-alt-registry.adoc -// * scalability_and_performance/ztp-deploying-disconnected.adoc -// * updating/updating-restricted-network-cluster/mirroring-image-repository.adoc - -ifeval::["{context}" == "installing-mirroring-disconnected"] -:oc-mirror: -endif::[] - -ifeval::["{context}" == "mirroring-ocp-image-repository"] -:oc-mirror: -endif::[] - -:_content-type: CONCEPT -[id="installation-about-mirror-registry_{context}"] -= About the mirror registry - -ifndef::oc-mirror[] -You can mirror the images that are required for {product-title} installation and subsequent product updates to a container mirror registry such as Red Hat Quay, JFrog Artifactory, Sonatype Nexus Repository, or Harbor. If you do not have access to a large-scale container registry, you can use the _mirror registry for Red Hat OpenShift_, a small-scale container registry included with {product-title} subscriptions. - -You can use any container registry that supports link:https://docs.docker.com/registry/spec/manifest-v2-2[Docker v2-2], such as Red Hat Quay, the _mirror registry for Red Hat OpenShift_, Artifactory, Sonatype Nexus Repository, or Harbor. Regardless of your chosen registry, the procedure to mirror content from Red Hat hosted sites on the internet to an isolated image registry is the same. After you mirror the content, you configure each cluster to retrieve this content from your mirror registry. -endif::[] -ifdef::oc-mirror[] -You can mirror the images that are required for {product-title} installation and subsequent product updates to a container mirror registry that supports link:https://docs.docker.com/registry/spec/manifest-v2-2[Docker v2-2], such as Red Hat Quay. If you do not have access to a large-scale container registry, you can use the _mirror registry for Red Hat OpenShift_, which is a small-scale container registry included with {product-title} subscriptions. - -Regardless of your chosen registry, the procedure to mirror content from Red Hat hosted sites on the internet to an isolated image registry is the same. After you mirror the content, you configure each cluster to retrieve this content from your mirror registry. -endif::[] - -[IMPORTANT] -==== -The {product-registry} cannot be used as the target registry because it does not support pushing without a tag, which is required during the mirroring process. -==== - -If choosing a container registry that is not the _mirror registry for Red Hat OpenShift_, it must be reachable by every machine in the clusters that you provision. If the registry is unreachable, installation, updating, or normal operations such as workload relocation might fail. For that reason, you must run mirror registries in a highly available way, and the mirror registries must at least match the production availability of your {product-title} clusters. - -When you populate your mirror registry with {product-title} images, you can follow two scenarios. If you have a host that can access both the internet and your mirror registry, but not your cluster nodes, you can directly mirror the content from that machine. This process is referred to as _connected mirroring_. If you have no such host, you must mirror the images to a file system and then bring that host or removable media into your restricted environment. This process is referred to as _disconnected mirroring_. - -For mirrored registries, to view the source of pulled images, you must review the `Trying to access` log entry in the CRI-O logs. Other methods to view the image pull source, such as using the `crictl images` command on a node, show the non-mirrored image name, even though the image is pulled from the mirrored location. - -[NOTE] -==== -Red Hat does not test third party registries with {product-title}. -==== - -ifeval::["{context}" == "installing-mirroring-disconnected"] -:!oc-mirror: -endif::[] - -ifeval::["{context}" == "mirroring-ocp-image-repository"] -:!oc-mirror: -endif::[] \ No newline at end of file diff --git a/modules/installation-about-restricted-network.adoc b/modules/installation-about-restricted-network.adoc deleted file mode 100644 index 91bf70b2e5ad..000000000000 --- a/modules/installation-about-restricted-network.adoc +++ /dev/null @@ -1,111 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-restricted-networks-aws.adoc -// * installing/installing_aws/installing-restricted-networks-aws-installer-provisioned.adoc -// * installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp-installer-provisioned.adoc -// * installing/installing_vsphere/installing-restricted-networks-vsphere.adoc -// * installing/installing_vsphere/installing-restricted-networks-installer-provisioned-vsphere.adoc -// * installing/installing_openstack/installing-openstack-installer-restricted.adoc -// * installing/installing_ibm_z/installing-restricted-networks-ibm-z.adoc -// * installing/installing_ibm_power/installing-restricted-networks-ibm-power.adoc -// * installing/installing_ibm_powervs/installing-restricted-networks-ibm-power-vs.adoc -// * installing/installing-rhv-restricted-network.adoc -// * installing/installing-restricted-networks-nutanix-installer-provisioned.adoc - -ifeval::["{context}" == "installing-ibm-power"] -:ibm-power: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power"] -:ibm-power: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power-vs"] -:ipi: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-gcp-installer-provisioned"] -:ipi: -endif::[] -ifeval::["{context}" == "installing-openstack-installer-restricted"] -:ipi: -endif::[] -ifeval::["{context}" == "installing-rhv-restricted-network"] -:ipi: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-installer-provisioned-vsphere"] -:ipi: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-aws-installer-provisioned"] -:ipi: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-nutanix-installer-provisioned"] -:ipi: -endif::[] - -:_content-type: CONCEPT -[id="installation-about-restricted-networks_{context}"] -= About installations in restricted networks - -In {product-title} {product-version}, you can perform an installation that does not -require an active connection to the internet to obtain software components. Restricted network installations can be completed using installer-provisioned infrastructure or user-provisioned infrastructure, depending on the cloud platform to which you are installing the cluster. - -ifndef::ibm-power[] -If you choose to perform a restricted network installation on a cloud platform, you -still require access to its cloud APIs. Some cloud functions, like -Amazon Web Service's Route 53 DNS and IAM services, require internet access. -//behind a proxy -Depending on your network, you might require less internet -access for an installation on bare metal hardware, Nutanix, or on VMware vSphere. -endif::ibm-power[] - -To complete a restricted network installation, you must create a registry that -mirrors the contents of the {product-registry} and contains the -installation media. You can create this registry on a mirror host, which can -access both the internet and your closed network, or by using other methods -that meet your restrictions. - -ifndef::ipi[] -[IMPORTANT] -==== -Because of the complexity of the configuration for user-provisioned installations, consider completing a standard user-provisioned infrastructure installation before you attempt a restricted network installation using user-provisioned infrastructure. Completing this test installation might make it easier to isolate and troubleshoot any issues that might arise during your installation in a restricted network. -==== -endif::ipi[] - -[id="installation-restricted-network-limits_{context}"] -== Additional limits - -Clusters in restricted networks have the following additional limitations and restrictions: - -* The `ClusterVersion` status includes an `Unable to retrieve available updates` -error. -//* The authentication Operator might randomly fail. -* By default, you cannot use the contents of the Developer Catalog because - you cannot access the required image stream tags. -//* The `TelemeterClientDown` and `Watchdog` alerts from the monitoring Operator always display. - -ifeval::["{context}" == "installing-ibm-power"] -:!ibm-power: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power"] -:!ibm-power: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power-vs"] -:!ipi: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-gcp-installer-provisioned"] -:!ipi: -endif::[] -ifeval::["{context}" == "installing-openstack-installer-restricted"] -:!ipi: -endif::[] -ifeval::["{context}" == "installing-rhv-restricted-network"] -:!ipi: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-installer-provisioned-vsphere"] -:!ipi: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-aws-installer-provisioned"] -:!ipi: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-nutanix-installer-provisioned"] -:!ipi: -endif::[] diff --git a/modules/installation-adding-nutanix-root-certificates.adoc b/modules/installation-adding-nutanix-root-certificates.adoc deleted file mode 100644 index d225d2834406..000000000000 --- a/modules/installation-adding-nutanix-root-certificates.adoc +++ /dev/null @@ -1,28 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_nutanix/installing-nutanix-installer-provisioned.adoc -// * installing/installing-restricted-networks-nutanix-installer-provisioned.adoc - -:_content-type: PROCEDURE -[id="installation-adding-nutanix-root-certificates_{context}"] -= Adding Nutanix root CA certificates to your system trust - -Because the installation program requires access to the Prism Central API, you must add your Nutanix trusted root CA certificates to your system trust before you install an {product-title} cluster. - -.Procedure - -. From the Prism Central web console, download the Nutanix root CA certificates. -. Extract the compressed file that contains the Nutanix root CA certificates. -. Add the files for your operating system to the system trust. For example, on a Fedora operating system, run the following command: -+ -[source,terminal] ----- -# cp certs/lin/* /etc/pki/ca-trust/source/anchors ----- - -. Update your system trust. For example, on a Fedora operating system, run the following command: -+ -[source,terminal] ----- -# update-ca-trust extract ----- diff --git a/modules/installation-adding-registry-pull-secret.adoc b/modules/installation-adding-registry-pull-secret.adoc deleted file mode 100644 index d8459ad3388f..000000000000 --- a/modules/installation-adding-registry-pull-secret.adoc +++ /dev/null @@ -1,203 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/disconnected_install/installing-mirroring-installation-images.adoc -// * installing/disconnected_install/installing-mirroring-disconnected.adoc -// * openshift_images/samples-operator-alt-registry.adoc -// * scalability_and_performance/ztp_far_edge/ztp-deploying-far-edge-clusters-at-scale.adoc -// * updating/updating-restricted-network-cluster/mirroring-image-repository.adoc - -ifeval::["{context}" == "mirroring-ocp-image-repository"] -:restricted: -:update-oc-mirror: -endif::[] - -ifeval::["{context}" == "installing-mirroring-installation-images"] -:restricted: -endif::[] - -ifeval::["{context}" == "installing-mirroring-disconnected"] -:restricted: -:oc-mirror: -endif::[] - -:_content-type: PROCEDURE -[id="installation-adding-registry-pull-secret_{context}"] -= Configuring credentials that allow images to be mirrored - -Create a container image registry credentials file that allows mirroring -images from Red Hat to your mirror. - -ifdef::restricted[] -[WARNING] -==== -Do not use this image registry credentials file as the pull secret when you install a cluster. If you provide this file when you install cluster, all of the machines in the cluster will have write access to your mirror registry. -==== -endif::restricted[] - -ifdef::restricted[] -[WARNING] -==== -This process requires that you have write access to a container image registry on the mirror registry and adds the credentials to a registry pull secret. -==== - -endif::restricted[] - -.Prerequisites - -* You configured a mirror registry to use in your disconnected environment. -ifdef::restricted[] -* You identified an image repository location on your mirror registry to mirror images into. -* You provisioned a mirror registry account that allows images to be uploaded to that image repository. -endif::restricted[] - -.Procedure - -Complete the following steps on the installation host: - -ifndef::openshift-origin[] -. Download your `registry.redhat.io` {cluster-manager-url-pull}. - -. Make a copy of your pull secret in JSON format: -+ -[source,terminal] ----- -$ cat ./pull-secret | jq . > <path>/<pull_secret_file_in_json> <1> ----- -<1> Specify the path to the folder to store the pull secret in and a name for the JSON file that you create. -+ -The contents of the file resemble the following example: -+ -[source,json] ----- -{ - "auths": { - "cloud.openshift.com": { - "auth": "b3BlbnNo...", - "email": "you@example.com" - }, - "quay.io": { - "auth": "b3BlbnNo...", - "email": "you@example.com" - }, - "registry.connect.redhat.com": { - "auth": "NTE3Njg5Nj...", - "email": "you@example.com" - }, - "registry.redhat.io": { - "auth": "NTE3Njg5Nj...", - "email": "you@example.com" - } - } -} ----- -// An additional step for following this procedure when using oc-mirror as part of the disconnected install process. -ifdef::oc-mirror[] -. Save the file either as `~/.docker/config.json` or `$XDG_RUNTIME_DIR/containers/auth.json`. -endif::[] -// Similar to the additional step above, except it is framed as optional because it is included in a disconnected update page (where users may or may not use oc-mirror for their process) -ifdef::update-oc-mirror[] -. Optional: If using the oc-mirror plugin, save the file either as `~/.docker/config.json` or `$XDG_RUNTIME_DIR/containers/auth.json`. -endif::[] -endif::[] - -. Generate the base64-encoded user name and password or token for your mirror registry: -+ -[source,terminal] ----- -$ echo -n '<user_name>:<password>' | base64 -w0 <1> -BGVtbYk3ZHAtqXs= ----- -<1> For `<user_name>` and `<password>`, specify the user name and password that you configured for your registry. - -ifndef::openshift-origin[] -. Edit the JSON -endif::[] -ifdef::openshift-origin[] -. Create a `.json` -endif::[] -file and add a section that describes your registry to it: -+ -[source,json] ----- -ifndef::openshift-origin[] - "auths": { - "<mirror_registry>": { <1> - "auth": "<credentials>", <2> - "email": "you@example.com" - } - }, -endif::[] -ifdef::openshift-origin[] -{ - "auths": { - "<mirror_registry>": { <1> - "auth": "<credentials>", <2> - "email": "you@example.com" - } - } -} -endif::[] ----- -<1> For `<mirror_registry>`, specify the registry domain name, and optionally the -port, that your mirror registry uses to serve content. For example, -`registry.example.com` or `registry.example.com:8443` -<2> For `<credentials>`, specify the base64-encoded user name and password for -the mirror registry. -+ -ifndef::openshift-origin[] -The file resembles the following example: -+ -[source,json] ----- -{ - "auths": { - "registry.example.com": { - "auth": "BGVtbYk3ZHAtqXs=", - "email": "you@example.com" - }, - "cloud.openshift.com": { - "auth": "b3BlbnNo...", - "email": "you@example.com" - }, - "quay.io": { - "auth": "b3BlbnNo...", - "email": "you@example.com" - }, - "registry.connect.redhat.com": { - "auth": "NTE3Njg5Nj...", - "email": "you@example.com" - }, - "registry.redhat.io": { - "auth": "NTE3Njg5Nj...", - "email": "you@example.com" - } - } -} ----- -endif::[] - -//// -This is not currently working as intended. -. Log in to your registry by using the following command: -+ -[source,terminal] ----- -$ oc registry login --to ./pull-secret.json --registry "<registry_host_and_port>" --auth-basic=<username>:<password> ----- -+ -Provide both the registry details and a valid user name and password for the registry. -//// - -ifeval::["{context}" == "installing-mirroring-installation-images"] -:!restricted: -endif::[] - -ifeval::["{context}" == "mirroring-ocp-image-repository"] -:!restricted: -:!update-oc-mirror: -endif::[] - -ifeval::["{context}" == "installing-mirroring-disconnected"] -:!restricted: -:!oc-mirror: -endif::[] diff --git a/modules/installation-adding-vcenter-root-certificates.adoc b/modules/installation-adding-vcenter-root-certificates.adoc deleted file mode 100644 index e38ed94989dd..000000000000 --- a/modules/installation-adding-vcenter-root-certificates.adoc +++ /dev/null @@ -1,56 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_vsphere/installing-vsphere-installer-provisioned.adoc -// * installing/installing_vsphere/installing-vsphere-installer-provisioned-customizations.adoc -// * installing/installing_vsphere/installing-vsphere-installer-provisioned-network-customizations.adoc -// * installing/installing_vsphere/installing-restricted-networks-installer-provisioned-vsphere.adoc - -:_content-type: PROCEDURE -[id="installation-adding-vcenter-root-certificates_{context}"] -= Adding vCenter root CA certificates to your system trust - -Because the installation program requires access to your vCenter's API, you must add your vCenter's trusted root CA certificates to your system trust before you install an {product-title} cluster. - -.Procedure - -. From the vCenter home page, download the vCenter's root CA certificates. Click *Download trusted root CA certificates* in the vSphere Web Services SDK section. The `<vCenter>/certs/download.zip` file downloads. - -. Extract the compressed file that contains the vCenter root CA certificates. The contents of the compressed file resemble the following file structure: -+ ----- -certs -├── lin -│ ├── 108f4d17.0 -│ ├── 108f4d17.r1 -│ ├── 7e757f6a.0 -│ ├── 8e4f8471.0 -│ └── 8e4f8471.r0 -├── mac -│ ├── 108f4d17.0 -│ ├── 108f4d17.r1 -│ ├── 7e757f6a.0 -│ ├── 8e4f8471.0 -│ └── 8e4f8471.r0 -└── win - ├── 108f4d17.0.crt - ├── 108f4d17.r1.crl - ├── 7e757f6a.0.crt - ├── 8e4f8471.0.crt - └── 8e4f8471.r0.crl - -3 directories, 15 files ----- - -. Add the files for your operating system to the system trust. For example, on a Fedora operating system, run the following command: -+ -[source,terminal] ----- -# cp certs/lin/* /etc/pki/ca-trust/source/anchors ----- - -. Update your system trust. For example, on a Fedora operating system, run the following command: -+ -[source,terminal] ----- -# update-ca-trust extract ----- diff --git a/modules/installation-alibaba-config-yaml.adoc b/modules/installation-alibaba-config-yaml.adoc deleted file mode 100644 index c3fb7aa75be1..000000000000 --- a/modules/installation-alibaba-config-yaml.adoc +++ /dev/null @@ -1,67 +0,0 @@ -// Module included in the following assemblies: -// -// installing/installing_alibaba/installing-alibaba-network-customizations.adoc -// * installing/installing_alibaba/installing-alibaba-customizations.adoc - -:_content-type: REFERENCE -[id="installation-alibaba-config-yaml_{context}"] -= Sample customized install-config.yaml file for Alibaba Cloud - -You can customize the installation configuration file (`install-config.yaml`) to specify more details about -your cluster's platform or modify the values of the required -parameters. - -[source,yaml] ----- -apiVersion: v1 -baseDomain: alicloud-dev.devcluster.openshift.com -credentialsMode: Manual -compute: -- architecture: amd64 - hyperthreading: Enabled - name: worker - platform: {} - replicas: 3 -controlPlane: - architecture: amd64 - hyperthreading: Enabled - name: master - platform: {} - replicas: 3 -metadata: - creationTimestamp: null - name: test-cluster <1> - networking: - clusterNetwork: - - cidr: 10.128.0.0/14 - hostPrefix: 23 - machineNetwork: - - cidr: 10.0.0.0/16 - networkType: OVNKubernetes <2> - serviceNetwork: - - 172.30.0.0/16 -platform: - alibabacloud: - defaultMachinePlatform: <3> - instanceType: ecs.g6.xlarge - systemDiskCategory: cloud_efficiency - systemDiskSize: 200 - region: ap-southeast-1 <4> - resourceGroupID: rg-acfnw6j3hyai <5> - vpcID: vpc-0xifdjerdibmaqvtjob2b <8> - vswitchIDs: <8> - - vsw-0xi8ycgwc8wv5rhviwdq5 - - vsw-0xiy6v3z2tedv009b4pz2 -publish: External -pullSecret: '{"auths": {"cloud.openshift.com": {"auth": ... }' <6> -sshKey: | - ssh-rsa AAAA... <7> ----- -<1> Required. The installation program prompts you for a cluster name. -<2> The cluster network plugin to install. The supported values are `OVNKubernetes` and `OpenShiftSDN`. The default value is `OVNKubernetes`. -<3> Optional. Specify parameters for machine pools that do not define their own platform configuration. -<4> Required. The installation program prompts you for the region to deploy the cluster to. -<5> Optional. Specify an existing resource group where the cluster should be installed. -<6> Required. The installation program prompts you for the pull secret. -<7> Optional. The installation program prompts you for the SSH key value that you use to access the machines in your cluster. -<8> Optional. These are example vswitchID values. diff --git a/modules/installation-alibaba-dns.adoc b/modules/installation-alibaba-dns.adoc deleted file mode 100644 index 4af3415ddf36..000000000000 --- a/modules/installation-alibaba-dns.adoc +++ /dev/null @@ -1,37 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_alibaba/installing-alibaba-account.adoc - -:_content-type: PROCEDURE -[id="installation-alibaba-dns_{context}"] -= Registering and Configuring Alibaba Cloud Domain - -To install {product-title}, the Alibaba Cloud account you use must have a dedicated public hosted zone in your account. This zone must be authoritative for the domain. This service provides cluster DNS resolution and name lookup for external connections to the cluster. - -.Procedure - -. Identify your domain, or subdomain, and registrar. You can transfer an existing domain and registrar or obtain a new one through Alibaba Cloud or another source. -+ -[NOTE] -==== -If you purchase a new domain through Alibaba Cloud, it takes time for the relevant DNS changes to propagate. For more information about purchasing domains through Alibaba Cloud, see link:https://www.alibabacloud.com/domain[Alibaba Cloud domains]. -==== - -. If you are using an existing domain and registrar, migrate its DNS to Alibaba Cloud. See link:https://www.alibabacloud.com/help/en/doc-detail/42479.htm[Domain name transfer] -in the Alibaba Cloud documentation. - -. Configure DNS for your domain. This includes: -* link:https://partners-intl.aliyun.com/help/en/doc-detail/54068.htm?spm=a2c63.p38356.0.0.427d2054k5gZOr#task-1830383[Registering a generic domain name]. -* link:https://partners-intl.aliyun.com/help/en/doc-detail/108953.htm?spm=a2c63.p38356.0.0.3c62433fjUrdZG#section-qyn-s41-ygb[Completing real-name verification for your domain name]. -* link:https://account.alibabacloud.com/login/login.htm[Applying for an Internet Content Provider (ICP) filing]. -* link:https://www.alibabacloud.com/product/dns/pricing?spm=a3c0i.23458820.2359477120.2.36ca7d3fe0b5KL[Enabling domain name resolution]. -+ -Use an appropriate root domain, such as `openshiftcorp.com`, or subdomain, such as `clusters.openshiftcorp.com`. - -. If you are using a subdomain, follow the procedures of your company to add its delegation records to the parent domain. - -//// -.Question - -Can Alibaba provide a link(s) to their doc on how to complete each task under step 3 in their doc? Could not find content in their help. -//// diff --git a/modules/installation-alibaba-regions.adoc b/modules/installation-alibaba-regions.adoc deleted file mode 100644 index 0667c6b4e912..000000000000 --- a/modules/installation-alibaba-regions.adoc +++ /dev/null @@ -1,15 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_alibaba/installing-alibaba-account.adoc - -:_content-type: REFERENCE -[id="installation-alibaba-regions_{context}"] -= Supported Alibaba regions - -You can deploy an {product-title} cluster to the regions listed in the link:https://www.alibabacloud.com/help/en/doc-detail/188196.htm[Alibaba _Regions and zones_ documentation]. - -//// -Answer from Gaurav Singh (PM) - -All of the regions (in mainland china and outside mainland china ) listed in this doc https://www.alibabacloud.com/help/doc-detail/188196.htm[Alibaba doc] will be shown as option to the customer to deploy openshift . We might need to test all of them. -//// diff --git a/modules/installation-approve-csrs.adoc b/modules/installation-approve-csrs.adoc deleted file mode 100644 index b5a8835ed88a..000000000000 --- a/modules/installation-approve-csrs.adoc +++ /dev/null @@ -1,199 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-user-infra.adoc -// * installing/installing_azure/installing-azure-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc -// * installing/installing_gcp/installing-gcp-user-infra.adoc -// * installing/installing_gcp/installing-gcp-restricted-networks.adoc -// * installing/installing_bare_metal/installing-bare-metal.adoc -// * installing/installing_aws/installing-restricted-networks-aws.adoc -// * installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc -// * installing/installing_vsphere/installing-restricted-networks-vsphere.adoc -// * installing/installing_vsphere/installing-vsphere.adoc -// * installing/installing_vsphere/installing-vsphere-network-customizations.adoc -// * installing/installing_ibm_z/installing-ibm-z.adoc -// * machine_management/adding-rhel-compute.adoc -// * machine_management/more-rhel-compute.adoc -// * machine_management/user_provisioned/adding-aws-compute-user-infra.adoc -// * machine_management/user_provisioned/adding-bare-metal-compute-user-infra.adoc -// * machine_management/user_provisioned/adding-vsphere-compute-user-infra.adoc -// * post_installation_configuration/node-tasks.adoc -// * installing/installing_ibm_z/installing-restricted-networks-ibm-z.adoc -// * installing/installing_ibm_z/installing-ibm-z-kvm.adoc -// * installing/installing_ibm_z/installing-ibm-power.adoc -// * installing/installing_ibm_z/installing-restricted-networks-ibm-power.adoc - - -ifeval::["{context}" == "installing-ibm-z"] -:ibm-z: -endif::[] -ifeval::["{context}" == "installing-ibm-z-kvm"] -:ibm-z-kvm: -endif::[] - -:_content-type: PROCEDURE -[id="installation-approve-csrs_{context}"] -= Approving the certificate signing requests for your machines - -When you add machines to a cluster, two pending certificate signing requests (CSRs) are generated for each machine that you added. You must confirm that these CSRs are approved or, if necessary, approve them yourself. The client requests must be approved first, followed by the server requests. - -.Prerequisites - -* You added machines to your cluster. - -.Procedure - -. Confirm that the cluster recognizes the machines: -+ -[source,terminal] ----- -$ oc get nodes ----- -+ -.Example output -[source,terminal] ----- -NAME STATUS ROLES AGE VERSION -master-0 Ready master 63m v1.26.0 -master-1 Ready master 63m v1.26.0 -master-2 Ready master 64m v1.26.0 ----- -+ -The output lists all of the machines that you created. -+ -[NOTE] -==== -The preceding output might not include the compute nodes, also known as worker nodes, until some CSRs are approved. -==== - -. Review the pending CSRs and ensure that you see the client requests with the `Pending` or `Approved` status for each machine that you added to the cluster: -+ -ifndef::ibm-z,ibm-z-kvm[] -[source,terminal] ----- -$ oc get csr ----- -+ -.Example output -[source,terminal] ----- -NAME AGE REQUESTOR CONDITION -csr-8b2br 15m system:serviceaccount:openshift-machine-config-operator:node-bootstrapper Pending -csr-8vnps 15m system:serviceaccount:openshift-machine-config-operator:node-bootstrapper Pending -... ----- -+ -In this example, two machines are joining the cluster. You might see more approved CSRs in the list. -endif::ibm-z,ibm-z-kvm[] -ifdef::ibm-z,ibm-z-kvm[] -[source,terminal] ----- -$ oc get csr ----- -+ -[source,terminal] -.Example output ----- -NAME AGE REQUESTOR CONDITION -csr-mddf5 20m system:node:master-01.example.com Approved,Issued -csr-z5rln 16m system:node:worker-21.example.com Approved,Issued ----- -endif::ibm-z,ibm-z-kvm[] - -. If the CSRs were not approved, after all of the pending CSRs for the machines you added are in `Pending` status, approve the CSRs for your cluster machines: -+ -[NOTE] -==== -Because the CSRs rotate automatically, approve your CSRs within an hour of adding the machines to the cluster. If you do not approve them within an hour, the certificates will rotate, and more than two certificates will be present for each node. You must approve all of these certificates. After the client CSR is approved, the Kubelet creates a secondary CSR for the serving certificate, which requires manual approval. Then, subsequent serving certificate renewal requests are automatically approved by the `machine-approver` if the Kubelet requests a new certificate with identical parameters. -==== -+ -[NOTE] -==== -For clusters running on platforms that are not machine API enabled, such as bare metal and other user-provisioned infrastructure, you must implement a method of automatically approving the kubelet serving certificate requests (CSRs). If a request is not approved, then the `oc exec`, `oc rsh`, and `oc logs` commands cannot succeed, because a serving certificate is required when the API server connects to the kubelet. Any operation that contacts the Kubelet endpoint requires this certificate approval to be in place. The method must watch for new CSRs, confirm that the CSR was submitted by the `node-bootstrapper` service account in the `system:node` or `system:admin` groups, and confirm the identity of the node. -==== - -** To approve them individually, run the following command for each valid CSR: -+ -[source,terminal] ----- -$ oc adm certificate approve <csr_name> <1> ----- -<1> `<csr_name>` is the name of a CSR from the list of current CSRs. - -** To approve all pending CSRs, run the following command: -+ -[source,terminal] ----- -$ oc get csr -o go-template='{{range .items}}{{if not .status}}{{.metadata.name}}{{"\n"}}{{end}}{{end}}' | xargs --no-run-if-empty oc adm certificate approve ----- -+ -[NOTE] -==== -Some Operators might not become available until some CSRs are approved. -==== - -. Now that your client requests are approved, you must review the server requests for each machine that you added to the cluster: -+ -[source,terminal] ----- -$ oc get csr ----- -+ -.Example output -[source,terminal] ----- -NAME AGE REQUESTOR CONDITION -csr-bfd72 5m26s system:node:ip-10-0-50-126.us-east-2.compute.internal Pending -csr-c57lv 5m26s system:node:ip-10-0-95-157.us-east-2.compute.internal Pending -... ----- - -. If the remaining CSRs are not approved, and are in the `Pending` status, approve the CSRs for your cluster machines: - -** To approve them individually, run the following command for each valid CSR: -+ -[source,terminal] ----- -$ oc adm certificate approve <csr_name> <1> ----- -<1> `<csr_name>` is the name of a CSR from the list of current CSRs. - -** To approve all pending CSRs, run the following command: -+ -[source,terminal] ----- -$ oc get csr -o go-template='{{range .items}}{{if not .status}}{{.metadata.name}}{{"\n"}}{{end}}{{end}}' | xargs oc adm certificate approve ----- - -. After all client and server CSRs have been approved, the machines have the `Ready` status. Verify this by running the following command: -+ -[source,terminal] ----- -$ oc get nodes ----- -+ -.Example output -[source,terminal] ----- -NAME STATUS ROLES AGE VERSION -master-0 Ready master 73m v1.26.0 -master-1 Ready master 73m v1.26.0 -master-2 Ready master 74m v1.26.0 -worker-0 Ready worker 11m v1.26.0 -worker-1 Ready worker 11m v1.26.0 ----- -+ -[NOTE] -==== -It can take a few minutes after approval of the server CSRs for the machines to transition to the `Ready` status. -==== - -.Additional information -* For more information on CSRs, see link:https://kubernetes.io/docs/reference/access-authn-authz/certificate-signing-requests/[Certificate Signing Requests]. - -ifeval::["{context}" == "installing-ibm-z"] -:!ibm-z: -endif::[] -ifeval::["{context}" == "installing-ibm-z-kvm"] -:!ibm-z-kvm: -endif::[] diff --git a/modules/installation-arm-bootstrap.adoc b/modules/installation-arm-bootstrap.adoc deleted file mode 100644 index 0840532634cd..000000000000 --- a/modules/installation-arm-bootstrap.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_azure/installing-azure-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc - -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:ash: -endif::[] - -[id="installation-arm-bootstrap_{context}"] -= ARM template for the bootstrap machine - -You can use the following Azure Resource Manager (ARM) template to deploy the -bootstrap machine that you need for your {product-title} cluster: - -.`04_bootstrap.json` ARM template -[%collapsible] -==== -[source,json] ----- -ifndef::ash[] -include::https://raw.githubusercontent.com/openshift/installer/release-4.13/upi/azure/04_bootstrap.json[] -endif::ash[] -ifdef::ash[] -include::https://raw.githubusercontent.com/openshift/installer/release-4.13/upi/azurestack/04_bootstrap.json[] -endif::ash[] ----- -==== - -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:!ash: -endif::[] diff --git a/modules/installation-arm-control-plane.adoc b/modules/installation-arm-control-plane.adoc deleted file mode 100644 index 59b8127da2c0..000000000000 --- a/modules/installation-arm-control-plane.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_azure/installing-azure-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc - -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:ash: -endif::[] - -[id="installation-arm-control-plane_{context}"] -= ARM template for control plane machines - -You can use the following Azure Resource Manager (ARM) template to deploy the -control plane machines that you need for your {product-title} cluster: - -.`05_masters.json` ARM template -[%collapsible] -==== -[source,json] ----- -ifndef::ash[] -include::https://raw.githubusercontent.com/openshift/installer/release-4.13/upi/azure/05_masters.json[] -endif::ash[] -ifdef::ash[] -include::https://raw.githubusercontent.com/openshift/installer/release-4.13/upi/azurestack/05_masters.json[] -endif::ash[] ----- -==== - -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:!ash: -endif::[] diff --git a/modules/installation-arm-dns.adoc b/modules/installation-arm-dns.adoc deleted file mode 100644 index f15f4cd08ab0..000000000000 --- a/modules/installation-arm-dns.adoc +++ /dev/null @@ -1,33 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_azure/installing-azure-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc - -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:ash: -endif::[] - -[id="installation-arm-dns_{context}"] -= ARM template for the network and load balancers - -You can use the following Azure Resource Manager (ARM) template to deploy the -networking objects and load balancers that you need for your {product-title} -cluster: - -.`03_infra.json` ARM template -[%collapsible] -==== -[source,json] ----- -ifndef::ash[] -include::https://raw.githubusercontent.com/openshift/installer/release-4.13/upi/azure/03_infra.json[] -endif::ash[] -ifdef::ash[] -include::https://raw.githubusercontent.com/openshift/installer/release-4.13/upi/azurestack/03_infra.json[] -endif::ash[] ----- -==== - -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:!ash: -endif::[] diff --git a/modules/installation-arm-image-storage.adoc b/modules/installation-arm-image-storage.adoc deleted file mode 100644 index b22d620d6c42..000000000000 --- a/modules/installation-arm-image-storage.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_azure/installing-azure-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc - -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:ash: -endif::[] - -[id="installation-arm-image-storage_{context}"] -= ARM template for image storage - -You can use the following Azure Resource Manager (ARM) template to deploy the -stored {op-system-first} image that you need for your {product-title} cluster: - -.`02_storage.json` ARM template -[%collapsible] -==== -[source,json] ----- -ifndef::ash[] -include::https://raw.githubusercontent.com/openshift/installer/release-4.13/upi/azure/02_storage.json[] -endif::ash[] -ifdef::ash[] -include::https://raw.githubusercontent.com/openshift/installer/release-4.13/upi/azurestack/02_storage.json[] -endif::ash[] ----- -==== - -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:!ash: -endif::[] diff --git a/modules/installation-arm-vnet.adoc b/modules/installation-arm-vnet.adoc deleted file mode 100644 index 8c1f64237b4c..000000000000 --- a/modules/installation-arm-vnet.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_azure/installing-azure-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc - -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:ash: -endif::[] - -[id="installation-arm-vnet_{context}"] -= ARM template for the VNet - -You can use the following Azure Resource Manager (ARM) template to deploy the -VNet that you need for your {product-title} cluster: - -.`01_vnet.json` ARM template -[%collapsible] -==== -[source,json] ----- -ifndef::ash[] -include::https://raw.githubusercontent.com/openshift/installer/release-4.13/upi/azure/01_vnet.json[] -endif::ash[] -ifdef::ash[] -include::https://raw.githubusercontent.com/openshift/installer/release-4.13/upi/azurestack/01_vnet.json[] -endif::ash[] ----- -==== - -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:!ash: -endif::[] diff --git a/modules/installation-arm-worker.adoc b/modules/installation-arm-worker.adoc deleted file mode 100644 index 6586d94624ff..000000000000 --- a/modules/installation-arm-worker.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_azure/installing-azure-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc - -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:ash: -endif::[] - -[id="installation-arm-worker_{context}"] -= ARM template for worker machines - -You can use the following Azure Resource Manager (ARM) template to deploy the -worker machines that you need for your {product-title} cluster: - -.`06_workers.json` ARM template -[%collapsible] -==== -[source,json] ----- -ifndef::ash[] -include::https://raw.githubusercontent.com/openshift/installer/release-4.13/upi/azure/06_workers.json[] -endif::ash[] -ifdef::ash[] -include::https://raw.githubusercontent.com/openshift/installer/release-4.13/upi/azurestack/06_workers.json[] -endif::ash[] ----- -==== - -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:!ash: -endif::[] diff --git a/modules/installation-aws-about-government-region.adoc b/modules/installation-aws-about-government-region.adoc deleted file mode 100644 index 03ca50398f50..000000000000 --- a/modules/installation-aws-about-government-region.adoc +++ /dev/null @@ -1,52 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-government-region.adoc -// * installing/installing_aws/installing-aws-secret-region.adoc - -ifeval::["{context}" == "installing-aws-government-region"] -:aws-gov: -endif::[] -ifeval::["{context}" == "installing-aws-secret-region"] -:aws-secret: -endif::[] - -[id="installation-aws-about-gov-secret-region_{context}"] -ifdef::aws-gov[] -= AWS government regions -endif::aws-gov[] -ifdef::aws-secret[] -= AWS secret regions -endif::aws-secret[] - -ifdef::aws-gov[] -{product-title} supports deploying a cluster to an link:https://aws.amazon.com/govcloud-us[AWS GovCloud (US)] region. -endif::aws-gov[] - -ifdef::aws-gov[] -The following AWS GovCloud partitions are supported: - -* `us-gov-east-1` -* `us-gov-west-1` -endif::aws-gov[] - -ifdef::aws-secret[] -The following AWS secret partitions are supported: - -* `us-isob-east-1` (SC2S) -* `us-iso-east-1` (C2S) - -[NOTE] -==== -The maximum supported MTU in an AWS SC2S and C2S Regions is not the same as -AWS commercial. For more information about configuring MTU during installation, -see the _Cluster Network Operator configuration object_ section in _Installing -a cluster on AWS with network customizations_ -==== -endif::aws-secret[] - -ifeval::["{context}" == "installing-aws-government-region"] -:!aws-gov: -endif::[] -ifeval::["{context}" == "installing-aws-secret-region"] -:!aws-secret: -endif::[] diff --git a/modules/installation-aws-access-analyzer.adoc b/modules/installation-aws-access-analyzer.adoc deleted file mode 100644 index 2ec78ad2588d..000000000000 --- a/modules/installation-aws-access-analyzer.adoc +++ /dev/null @@ -1,29 +0,0 @@ -:_content-type: PROCEDURE -[id="create-custom-permissions-for-iam-instance-profiles_{context}"] -= Using AWS IAM Analyzer to create policy templates - -The minimal set of permissions that the control plane and compute instance profiles require depends on how the cluster is configured for its daily operation. - -One way to determine which permissions the cluster instances require is to use the AWS Identity and Access Management Access Analyzer (IAM Access Analyzer) to create a policy template: - -* A policy template contains the permissions the cluster has used over a specified period of time. -* You can then use the template to create policies with fine-grained permissions. - -.Procedure - -The overall process could be: - -. Ensure that CloudTrail is enabled. CloudTrail records all of the actions and events in your AWS account, including the API calls that are required to create a policy template. For more information, see the AWS documentation for https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-getting-started.html[working with CloudTrail]. -. Create an instance profile for control plane instances and an instance profile for compute instances. Be sure to assign each role a permissive policy, such as PowerUserAccess. For more information, see the AWS documentation for -https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2.html[creating instance profile roles]. -. Install the cluster in a development environment and configure it as required. Be sure to deploy all of applications the cluster will host in a production environment. -. Test the cluster thoroughly. Testing the cluster ensures that all of the required API calls are logged. -. Use the IAM Access Analyzer to create a policy template for each instance profile. For more information, see the AWS documentation for https://docs.aws.amazon.com/IAM/latest/UserGuide/access-analyzer-policy-generation.html[generating policies based on the CloudTrail logs]. -. Create and add a fine-grained policy to each instance profile. -. Remove the permissive policy from each instance profile. -. Deploy a production cluster using the existing instance profiles with the new policies. - -[NOTE] -==== -You can add https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_condition.html[IAM Conditions] to your policy to make it more restrictive and compliant with your organization security requirements. -==== diff --git a/modules/installation-aws-add-iam-roles.adoc b/modules/installation-aws-add-iam-roles.adoc deleted file mode 100644 index 0e7ce88aa415..000000000000 --- a/modules/installation-aws-add-iam-roles.adoc +++ /dev/null @@ -1,41 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-account.adoc - -:_content-type: PROCEDURE -[id="specify-an-existing-iam-role_{context}"] -= Specifying an existing IAM role - -Instead of allowing the installation program to create IAM instance profiles with the default permissions, you can use the `install-config.yaml` file to specify an existing IAM role for control plane and compute instances. - -.Prerequisites - -* You have an existing `install-config.yaml` file. - -.Procedure - -. Update `compute.platform.aws.iamRole` with an existing role for the control plane machines. -+ -.Sample `install-config.yaml` file with an IAM role for compute instances -[source,yaml] ----- -compute: -- hyperthreading: Enabled - name: worker - platform: - aws: - iamRole: ExampleRole ----- -. Update `controlPlane.platform.aws.iamRole` with an existing role for the compute machines. -+ -.Sample `install-config.yaml` file with an IAM role for control plane instances -[source,yaml] ----- -controlPlane: - hyperthreading: Enabled - name: master - platform: - aws: - iamRole: ExampleRole ----- -. Save the file and reference it when installing the {product-title} cluster. diff --git a/modules/installation-aws-add-local-zone-locations.adoc b/modules/installation-aws-add-local-zone-locations.adoc deleted file mode 100644 index 691e858b509c..000000000000 --- a/modules/installation-aws-add-local-zone-locations.adoc +++ /dev/null @@ -1,60 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-localzone.adoc - -:_content-type: PROCEDURE -[id="installation-aws-add-local-zone-locations_{context}"] -= Opting into AWS Local Zones - -If you plan to create the subnets in AWS Local Zones, you must opt in to each zone group separately. - -.Prerequisites - -* You have installed the AWS CLI. -* You have determined into which region you will deploy your {product-title} cluster. - -.Procedure - -. Export a variable to contain the name of the region in which you plan to deploy your {product-title} cluster by running the following command: -+ -[source,terminal] ----- -$ export CLUSTER_REGION="<region_name>" <1> ----- -<1> For `<region_name>`, specify a valid AWS region name, such as `us-east-1`. - -. List the zones that are available in your region by running the following command: -+ -[source,terminal] ----- -$ aws --region ${CLUSTER_REGION} ec2 describe-availability-zones \ - --query 'AvailabilityZones[].[{ZoneName: ZoneName, GroupName: GroupName, Status: OptInStatus}]' \ - --filters Name=zone-type,Values=local-zone \ - --all-availability-zones ----- -+ -Depending on the region, the list of available zones can be long. The command will return the following fields: -+ -`ZoneName`:: The name of the Local Zone. -`GroupName`:: The group that the zone is part of. You need to save this name to opt in. -`Status`:: The status of the Local Zone group. If the status is `not-opted-in`, you must opt in the `GroupName` by running the commands that follow. - -. Export a variable to contain the name of the Local Zone to host your VPC by running the following command: -+ -[source,terminal] ----- -$ export ZONE_GROUP_NAME="<value_of_GroupName>" <1> ----- -+ -where: - -<value_of_GroupName>:: Specifies the name of the group of the Local Zone you want to create subnets on. For example, specify `us-east-1-nyc-1` to use the zone `us-east-1-nyc-1a`, US East (New York). - -. Opt in to the zone group on your AWS account by running the following command: -+ -[source,terminal] ----- -$ aws ec2 modify-availability-zone-group \ - --group-name "${ZONE_GROUP_NAME}" \ - --opt-in-status opted-in ----- diff --git a/modules/installation-aws-ami-stream-metadata.adoc b/modules/installation-aws-ami-stream-metadata.adoc deleted file mode 100644 index f08a0b2202f8..000000000000 --- a/modules/installation-aws-ami-stream-metadata.adoc +++ /dev/null @@ -1,61 +0,0 @@ -//TODO: Add the module include to the following assemblies -//TODO: Create related modules for OpenStack (QCOW2) and Bare Metal (ISO) - -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-user-infra.adoc -// * installing/installing_aws/installing-restricted-networks-aws.adoc - -:_content-type: PROCEDURE -[id="installation-aws-ami-stream-metadata_{context}"] -= Accessing {op-system} AMIs with stream metadata - -In {product-title}, _stream metadata_ provides standardized metadata about {op-system} in the JSON format and injects the metadata into the cluster. Stream metadata is a stable format that supports multiple architectures and is intended to be self-documenting for maintaining automation. - -You can use the `coreos print-stream-json` sub-command of `openshift-install` to access information about the boot images in the stream metadata format. This command provides a method for printing stream metadata in a scriptable, machine-readable format. - -For user-provisioned installations, the `openshift-install` binary contains references to the version of {op-system} boot images that are tested for use with {product-title}, such as the AWS AMI. - -.Procedure - -To parse the stream metadata, use one of the following methods: - -* From a Go program, use the official `stream-metadata-go` library at https://github.com/coreos/stream-metadata-go. You can also view example code in the library. - -* From another programming language, such as Python or Ruby, use the JSON library of your preferred programming language. - -* From a command-line utility that handles JSON data, such as `jq`: - -** Print the current `x86_64` -ifndef::openshift-origin[] -or `aarch64` -endif::openshift-origin[] -AMI for an AWS region, such as `us-west-1`: -+ -.For x86_64 -[source,terminal] ----- -$ openshift-install coreos print-stream-json | jq -r '.architectures.x86_64.images.aws.regions["us-west-1"].image' ----- -+ -.Example output -[source,terminal] ----- -ami-0d3e625f84626bbda ----- -+ -ifndef::openshift-origin[] -.For aarch64 -[source,terminal] ----- -$ openshift-install coreos print-stream-json | jq -r '.architectures.aarch64.images.aws.regions["us-west-1"].image' ----- -+ -.Example output -[source,terminal] ----- -ami-0af1d3b7fa5be2131 ----- -+ -endif::openshift-origin[] -The output of this command is the AWS AMI ID for your designated architecture and the `us-west-1` region. The AMI must belong to the same region as the cluster. diff --git a/modules/installation-aws-arm-tested-machine-types.adoc b/modules/installation-aws-arm-tested-machine-types.adoc deleted file mode 100644 index 31e27b899e0c..000000000000 --- a/modules/installation-aws-arm-tested-machine-types.adoc +++ /dev/null @@ -1,26 +0,0 @@ -// Module included in the following assemblies: -// -// installing/installing_aws/installing-aws-china.adoc -// installing/installing_aws/installing-aws-customizations.adoc -// installing/installing_aws/installing-aws-government-region.adoc -// installing/installing_aws/installing-aws-network-customizations.adoc -// installing/installing_aws/installing-aws-private.adoc -// installing/installing_aws/installing-aws-user-infra.adoc -// installing/installing_aws/installing-aws-vpc.adoc -// installing/installing_aws/installing-restricted-networks-aws.adoc - -[id="installation-aws-arm-tested-machine-types_{context}"] -= Tested instance types for AWS on 64-bit ARM infrastructures - -The following Amazon Web Services (AWS) 64-bit ARM instance types have been tested with {product-title}. - -[NOTE] -==== -Use the machine types included in the following charts for your AWS ARM instances. If you use an instance type that is not listed in the chart, ensure that the instance size you use matches the minimum resource requirements that are listed in "Minimum resource requirements for cluster installation". -==== - -.Machine types based on 64-bit ARM architecture -[%collapsible] -==== -include::https://raw.githubusercontent.com/openshift/installer/master/docs/user/aws/tested_instance_types_aarch64.md[] -==== diff --git a/modules/installation-aws-config-yaml.adoc b/modules/installation-aws-config-yaml.adoc deleted file mode 100644 index 1b9401aee4bc..000000000000 --- a/modules/installation-aws-config-yaml.adoc +++ /dev/null @@ -1,467 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-customizations.adoc -// * installing/installing_aws/installing-aws-government-region.adoc -// * installing/installing_aws/installing-aws-secret-region.adoc -// * installing/installing_aws/installing-aws-network-customizations.adoc -// * installing/installing_aws/installing-aws-private.adoc -// * installing/installing_aws/installing-aws-vpc.adoc -// * installing/installing_aws/installing-restricted-networks-aws-installer-provisioned.adoc -// * installing/installing_aws/installing-aws-outposts-remote-workers.adoc - -ifeval::["{context}" == "installing-aws-network-customizations"] -:with-networking: -endif::[] -ifeval::["{context}" != "installing-aws-network-customizations"] -:without-networking: -endif::[] -ifeval::["{context}" == "installing-aws-vpc"] -:vpc: -endif::[] -ifeval::["{context}" == "installing-aws-private"] -:vpc: -:private: -endif::[] -ifeval::["{context}" == "installing-aws-government-region"] -:vpc: -:private: -:gov: -endif::[] -ifeval::["{context}" == "installing-aws-secret-region"] -:vpc: -:private: -:secret: -endif::[] -ifeval::["{context}" == "installing-aws-china-region"] -:vpc: -:private: -:china: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-aws-installer-provisioned"] -:restricted: -endif::[] -ifeval::["{context}" == "installing-aws-outposts-remote-workers"] -:aws-outposts: -endif::[] - -:_content-type: REFERENCE -[id="installation-aws-config-yaml_{context}"] -= Sample customized install-config.yaml file for AWS - -You can customize the installation configuration file (`install-config.yaml`) to specify more details about your {product-title} cluster's platform or modify the values of the required parameters. - -ifndef::china,gov,secret[] -[IMPORTANT] -==== -This sample YAML file is provided for reference only. You must obtain your -`install-config.yaml` file by using the installation program and modify it. -==== -endif::china,gov,secret[] - -ifdef::china,gov,secret[] -[IMPORTANT] -==== -This sample YAML file is provided for reference only. Use it as a resource to enter parameter values into the installation configuration file that you created manually. -==== -endif::china,gov,secret[] - -[source,yaml] ----- -apiVersion: v1 -baseDomain: example.com <1> -credentialsMode: Mint <2> -controlPlane: <3> <4> - hyperthreading: Enabled <5> - name: master -ifndef::aws-outposts[] - platform: - aws: -ifndef::openshift-origin[] - lbType: NLB -endif::openshift-origin[] - zones: -ifdef::china[] - - cn-north-1a - - cn-north-1b -endif::china[] -ifdef::gov[] - - us-gov-west-1a - - us-gov-west-1b -endif::gov[] -ifdef::secret[] - - us-iso-east-1a - - us-iso-east-1b -endif::secret[] -ifndef::gov,china,secret[] - - us-west-2a - - us-west-2b -endif::gov,china,secret[] - rootVolume: - iops: 4000 - size: 500 - type: io1 <6> - metadataService: - authentication: Optional <7> - type: m6i.xlarge -endif::aws-outposts[] -ifdef::aws-outposts[] - platform: {} -endif::aws-outposts[] - replicas: 3 -compute: <3> -- hyperthreading: Enabled <5> - name: worker - platform: - aws: -ifndef::aws-outposts[] - rootVolume: - iops: 2000 - size: 500 - type: io1 <6> - metadataService: - authentication: Optional <7> - type: c5.4xlarge - zones: -ifdef::china[] - - cn-north-1a -endif::china[] -ifdef::gov[] - - us-gov-west-1c -endif::gov[] -ifdef::secret[] - - us-iso-east-1a - - us-iso-east-1b -endif::secret[] -ifndef::gov,china,secret[] - - us-west-2c -endif::gov,china,secret[] -endif::aws-outposts[] -ifdef::aws-outposts[] - type: m5.large <6> - zones: - - us-east-1a <7> - rootVolume: - type: gp2 <8> - size: 120 -endif::aws-outposts[] - replicas: 3 -metadata: - name: test-cluster <1> -ifdef::without-networking[] -networking: -endif::[] -ifdef::with-networking[] -networking: <3> -endif::[] - clusterNetwork: - - cidr: 10.128.0.0/14 - hostPrefix: 23 - machineNetwork: - - cidr: 10.0.0.0/16 -ifndef::aws-outposts[] - networkType: OVNKubernetes <8> -endif::aws-outposts[] -ifdef::aws-outposts[] - networkType: OVNKubernetes <9> -endif::aws-outposts[] - serviceNetwork: - - 172.30.0.0/16 -platform: - aws: -ifndef::gov,china,secret[] - region: us-west-2 <1> - propagateUserTags: true <3> -endif::gov,china,secret[] -ifdef::china[] - region: cn-north-1 <1> - propagateUserTags: true <3> -endif::china[] -ifdef::gov[] - region: us-gov-west-1 <1> - propagateUserTags: true <3> -endif::gov[] -ifdef::secret[] - region: us-iso-east-1 <1> - propagateUserTags: true <3> -endif::secret[] - userTags: - adminContact: jdoe - costCenter: 7536 -ifdef::vpc,restricted[] - subnets: <9> - - subnet-1 - - subnet-2 - - subnet-3 -endif::vpc,restricted[] -ifdef::aws-outposts[] - subnets: <10> - - subnet-1 - - subnet-2 - - subnet-3 -endif::aws-outposts[] -ifdef::vpc,restricted[] -ifndef::secret,china[] - amiID: ami-96c6f8f7 <10> -endif::secret,china[] -ifdef::secret,china[] - amiID: ami-96c6f8f7 <1> <10> -endif::secret,china[] - serviceEndpoints: <11> - - name: ec2 -ifndef::china[] - url: https://vpce-id.ec2.us-west-2.vpce.amazonaws.com -endif::china[] -ifdef::china[] - url: https://vpce-id.ec2.cn-north-1.vpce.amazonaws.com.cn -endif::china[] - hostedZone: Z3URY6TWQ91KVV <12> -endif::vpc,restricted[] -ifndef::vpc,restricted,aws-outposts[] - amiID: ami-96c6f8f7 <9> - serviceEndpoints: <10> - - name: ec2 - url: https://vpce-id.ec2.us-west-2.vpce.amazonaws.com -endif::vpc,restricted,aws-outposts[] -ifdef::vpc,restricted[] -ifndef::openshift-origin[] -fips: false <13> -sshKey: ssh-ed25519 AAAA... <14> -endif::openshift-origin[] -ifdef::openshift-origin[] -sshKey: ssh-ed25519 AAAA... <13> -endif::openshift-origin[] -endif::vpc,restricted[] -ifndef::vpc,restricted[] -ifndef::openshift-origin,aws-outposts[] -fips: false <11> -sshKey: ssh-ed25519 AAAA... <12> -endif::openshift-origin,aws-outposts[] -ifdef::openshift-origin,aws-outposts[] -sshKey: ssh-ed25519 AAAA... <11> -endif::openshift-origin,aws-outposts[] -endif::vpc,restricted[] -ifdef::private[] -ifndef::openshift-origin[] -publish: Internal <15> -endif::openshift-origin[] -endif::private[] -ifndef::restricted[] -pullSecret: '{"auths": ...}' <1> -endif::restricted[] -ifdef::restricted[] -ifndef::openshift-origin[] -pullSecret: '{"auths":{"<local_registry>": {"auth": "<credentials>","email": "you@example.com"}}}' <15> -endif::openshift-origin[] -ifdef::openshift-origin[] -pullSecret: '{"auths":{"<local_registry>": {"auth": "<credentials>","email": "you@example.com"}}}' <14> -endif::openshift-origin[] -endif::restricted[] -ifdef::secret[] -ifndef::openshift-origin[] -additionalTrustBundle: | <16> - -----BEGIN CERTIFICATE----- - <MY_TRUSTED_CA_CERT> - -----END CERTIFICATE----- -endif::openshift-origin[] -endif::secret[] -ifdef::private[] -ifdef::openshift-origin[] -publish: Internal <14> -endif::openshift-origin[] -endif::private[] -ifdef::secret[] -ifdef::openshift-origin[] -additionalTrustBundle: | <15> - -----BEGIN CERTIFICATE----- - <MY_TRUSTED_CA_CERT> - -----END CERTIFICATE----- -endif::openshift-origin[] -endif::secret[] -ifdef::restricted[] -ifndef::openshift-origin[] -additionalTrustBundle: | <16> - -----BEGIN CERTIFICATE----- - <MY_TRUSTED_CA_CERT> - -----END CERTIFICATE----- -imageContentSources: <17> -- mirrors: - - <local_registry>/<local_repository_name>/release - source: quay.io/openshift-release-dev/ocp-release -- mirrors: - - <local_registry>/<local_repository_name>/release - source: quay.io/openshift-release-dev/ocp-v4.0-art-dev -endif::openshift-origin[] -ifdef::openshift-origin[] -additionalTrustBundle: | <15> - -----BEGIN CERTIFICATE----- - <MY_TRUSTED_CA_CERT> - -----END CERTIFICATE----- -imageContentSources: <16> -- mirrors: - - <local_registry>/<local_repository_name>/release - source: quay.io/openshift-release-dev/ocp-release -- mirrors: - - <local_registry>/<local_repository_name>/release - source: quay.io/openshift-release-dev/ocp-v4.0-art-dev -endif::openshift-origin[] -endif::restricted[] ----- -ifndef::gov,secret,china[] -<1> Required. The installation program prompts you for this value. -endif::gov,secret,china[] -ifdef::gov,secret,china[] -<1> Required. -endif::gov,secret,china[] -<2> Optional: Add this parameter to force the Cloud Credential Operator (CCO) to use the specified mode, instead of having the CCO dynamically try to determine the capabilities of the credentials. For details about CCO modes, see the _Cloud Credential Operator_ entry in the _Red Hat Operators reference_ content. -<3> If you do not provide these parameters and values, the installation program -provides the default value. -<4> The `controlPlane` section is a single mapping, but the `compute` section is a -sequence of mappings. To meet the requirements of the different data structures, -the first line of the `compute` section must begin with a hyphen, `-`, and the -first line of the `controlPlane` section must not. Only one control plane pool is used. -<5> Whether to enable or disable simultaneous multithreading, or -`hyperthreading`. By default, simultaneous multithreading is enabled -to increase the performance of your machines' cores. You can disable it by -setting the parameter value to `Disabled`. If you disable simultaneous -multithreading in some cluster machines, you must disable it in all cluster -machines. -+ -[IMPORTANT] -==== -If you disable simultaneous multithreading, ensure that your capacity planning -accounts for the dramatically decreased machine performance. Use larger -instance types, such as `m4.2xlarge` or `m5.2xlarge`, for your machines if you -disable simultaneous multithreading. -==== -ifndef::aws-outposts[] -<6> To configure faster storage for etcd, especially for larger clusters, set the storage type as `io1` and set `iops` to `2000`. -<7> Whether to require the link:https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html[Amazon EC2 Instance Metadata Service v2] (IMDSv2). To require IMDSv2, set the parameter value to `Required`. To allow the use of both IMDSv1 and IMDSv2, set the parameter value to `Optional`. If no value is specified, both IMDSv1 and IMDSv2 are allowed. -+ -[NOTE] -==== -The IMDS configuration for control plane machines that is set during cluster installation can only be changed by using the AWS CLI. The IMDS configuration for compute machines can be changed by using compute machine sets. -==== -<8> The cluster network plugin to install. The supported values are `OVNKubernetes` and `OpenShiftSDN`. The default value is `OVNKubernetes`. -endif::aws-outposts[] -ifdef::aws-outposts[] -<6> For compute instances running in an AWS Outpost instance, specify a supported instance type in the AWS Outpost instance. -<7> For compute instances running in AWS Outpost instance, specify the Availability Zone where the Outpost instance is located. -<8> For compute instances running in AWS Outpost instance, specify volume type gp2, to avoid using gp3 volume type which is not supported. -<9> The cluster network plugin to install. The supported values are `OVNKubernetes` and `OpenShiftSDN`. The default value is `OVNKubernetes`. -<10> If you provide your own VPC, specify subnets for each availability zone that your cluster uses. -endif::aws-outposts[] -ifdef::vpc,restricted[] -<9> If you provide your own VPC, specify subnets for each availability zone that your cluster uses. -<10> The ID of the AMI used to boot machines for the cluster. If set, the AMI -must belong to the same region as the cluster. -<11> The AWS service endpoints. Custom endpoints are required when installing to -an unknown AWS region. The endpoint URL must use the `https` protocol and the -host must trust the certificate. -<12> The ID of your existing Route 53 private hosted zone. Providing an existing hosted zone requires that you supply your own VPC and the hosted zone is already associated with the VPC prior to installing your cluster. If undefined, the installation program creates a new hosted zone. -ifndef::openshift-origin[] -<13> Whether to enable or disable FIPS mode. By default, FIPS mode is not enabled. If FIPS mode is enabled, the {op-system-first} machines that {product-title} runs on bypass the default Kubernetes cryptography suite and use the cryptography modules that are provided with {op-system} instead. -+ -[IMPORTANT] -==== -The use of FIPS Validated / Modules in Process cryptographic libraries is only supported on {product-title} deployments on the `x86_64` architecture. -==== -<14> You can optionally provide the `sshKey` value that you use to access the machines in your cluster. -endif::openshift-origin[] -ifdef::openshift-origin[] -<13> You can optionally provide the `sshKey` value that you use to access the machines in your cluster. -endif::openshift-origin[] -endif::vpc,restricted[] -ifndef::vpc,restricted,aws-outposts[] -<9> The ID of the AMI used to boot machines for the cluster. If set, the AMI must belong to the same region as the cluster. -<10> The AWS service endpoints. Custom endpoints are required when installing to an unknown AWS region. The endpoint URL must use the `https` protocol and the host must trust the certificate. -ifndef::openshift-origin[] -<11> Whether to enable or disable FIPS mode. By default, FIPS mode is not enabled. If FIPS mode is enabled, the {op-system-first} machines that {product-title} runs on bypass the default Kubernetes cryptography suite and use the cryptography modules that are provided with {op-system} instead. -+ -[IMPORTANT] -==== -The use of FIPS Validated / Modules in Process cryptographic libraries is only supported on {product-title} deployments on the `x86_64` architecture. -==== -<12> You can optionally provide the `sshKey` value that you use to access the machines in your cluster. -endif::openshift-origin[] -ifdef::openshift-origin[] -<11> You can optionally provide the `sshKey` value that you use to access the -machines in your cluster. -endif::openshift-origin[] -endif::vpc,restricted,aws-outposts[] -ifdef::aws-outposts[] -<11> You can optionally provide the `sshKey` value that you use to access the machines in your cluster. -endif::aws-outposts[] -+ -[NOTE] -==== -For production {product-title} clusters on which you want to perform installation debugging or disaster recovery, specify an SSH key that your `ssh-agent` process uses. -==== -ifdef::private[] -ifndef::openshift-origin[] -<15> How to publish the user-facing endpoints of your cluster. Set `publish` to `Internal` to deploy a private cluster, which cannot be accessed from the internet. The default value is `External`. -endif::openshift-origin[] -ifdef::openshift-origin[] -<14> How to publish the user-facing endpoints of your cluster. Set `publish` to `Internal` to deploy a private cluster, which cannot be accessed from the internet. The default value is `External`. -endif::openshift-origin[] -endif::private[] -ifdef::secret[] -ifndef::openshift-origin[] -<16> The custom CA certificate. This is required when deploying to the SC2S or C2S Regions because the AWS API requires a custom CA trust bundle. -endif::openshift-origin[] -ifdef::openshift-origin[] -<15> The custom CA certificate. This is required when deploying to the SC2S or C2S Regions because the AWS API requires a custom CA trust bundle. -endif::openshift-origin[] -endif::secret[] -ifdef::restricted[] -ifndef::openshift-origin[] -<15> For `<local_registry>`, specify the registry domain name, and optionally the -port, that your mirror registry uses to serve content. For example -`registry.example.com` or `registry.example.com:5000`. For `<credentials>`, -specify the base64-encoded user name and password for your mirror registry. -<16> Provide the contents of the certificate file that you used for your mirror registry. -<17> Provide the `imageContentSources` section from the output of the command to mirror the repository. -endif::openshift-origin[] -ifdef::openshift-origin[] -<14> For `<local_registry>`, specify the registry domain name, and optionally the -port, that your mirror registry uses to serve content. For example -`registry.example.com` or `registry.example.com:5000`. For `<credentials>`, -specify the base64-encoded user name and password for your mirror registry. -<15> Provide the contents of the certificate file that you used for your mirror registry. -<16> Provide the `imageContentSources` section from the output of the command to mirror the repository. -endif::openshift-origin[] -endif::restricted[] - -ifeval::["{context}" == "installing-aws-network-customizations"] -:!with-networking: -endif::[] -ifeval::["{context}" != "installing-aws-network-customizations"] -:!without-networking: -endif::[] -ifeval::["{context}" == "installing-aws-vpc"] -:!vpc: -endif::[] -ifeval::["{context}" == "installing-aws-private"] -:!vpc: -:!private: -endif::[] -ifeval::["{context}" == "installing-aws-government-region"] -:!vpc: -:!private: -:!gov: -endif::[] -ifeval::["{context}" == "installing-aws-secret-region"] -:!vpc: -:!private: -:!secret: -endif::[] -ifeval::["{context}" == "installing-aws-china-region"] -:!vpc: -:!private: -:!china: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-aws-installer-provisioned"] -:!restricted: -endif::[] -ifeval::["{context}" == "installing-aws-outposts-remote-workers"] -:!aws-outposts: -endif::[] diff --git a/modules/installation-aws-delete-cluster.adoc b/modules/installation-aws-delete-cluster.adoc deleted file mode 100644 index 1fc057646abc..000000000000 --- a/modules/installation-aws-delete-cluster.adoc +++ /dev/null @@ -1,57 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/uninstalling-cluster-aws.adoc - -:_content-type: PROCEDURE -[id="installation-aws-delete-cluster"] -= Deleting a cluster with a configured AWS Local Zone infrastructure - -After you install a cluster on Amazon Web Services (AWS) into an existing Virtual Private Cloud (VPC), and you set subnets for each Local Zone location, you can delete the cluster and any AWS resources associated with it. - -The example in the procedure assumes that you created a VPC and its subnets by using a CloudFormation template. - -.Prerequisites - -* You know the name of the CloudFormation stacks, `<local_zone_stack_name>` and `<vpc_stack_name>`, that were used during the creation of the network. You need the name of the stack to delete the cluster. -* You have access rights to the directory that contains the installation files that were created by the installation program. -* Your account includes a policy that provides you with permissions to delete the CloudFormation stack. - -.Procedure - -. Change to the directory that contains the stored installation program, and delete the cluster by using the `destroy cluster` command: -+ -[source,terminal] ----- -$ ./openshift-install destroy cluster --dir <installation_directory> \//<1> - --log-level=debug <2> ----- -<1> For `<installation_directory>`, specify the directory that stored any files created by the installation program. -<2> To view different log details, specify `error`, `info`, or `warn` instead of `debug`. - -. Delete the CloudFormation stack for the Local Zone subnet: -+ -[source,terminal] ----- -$ aws cloudformation delete-stack --stack-name <local_zone_stack_name> ----- - -. Delete the stack of resources that represent the VPC: -+ -[source,terminal] ----- -$ aws cloudformation delete-stack --stack-name <vpc_stack_name> ----- - -.Verification - -* Check that you removed the stack resources by issuing the following commands in the AWS CLI. The AWS CLI outputs that no template component exists. -+ -[source,terminal] ----- -$ aws cloudformation describe-stacks --stack-name <local_zone_stack_name> ----- -+ -[source,terminal] ----- -$ aws cloudformation describe-stacks --stack-name <vpc_stack_name> ----- \ No newline at end of file diff --git a/modules/installation-aws-editing-manifests.adoc b/modules/installation-aws-editing-manifests.adoc deleted file mode 100644 index 44c36e1ed291..000000000000 --- a/modules/installation-aws-editing-manifests.adoc +++ /dev/null @@ -1,124 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-outposts-remote-workers.adoc - -:_content-type: PROCEDURE -[id="installation-aws-creating-manifests_{context}"] -= Generating manifest files - -Use the installation program to generate a set of manifest files in the assets directory. Manifest files are required to specify the AWS Outposts subnets to use for worker machines, and to specify settings required by the network provider. - -If you plan to reuse the `install-config.yaml` file, create a backup file before you generate the manifest files. - -.Procedure - -. Optional: Create a backup copy of the `install-config.yaml` file: -+ -[source,terminal] ----- -$ cp install-config.yaml install-config.yaml.backup ----- - -. Generate a set of manifests in your assets directory: -+ -[source,terminal] ----- -$ openshift-install create manifests --dir <installation_-_directory> ----- -+ -This command displays the following messages. -+ -.Example output -[source,terminal] ----- -INFO Consuming Install Config from target directory -INFO Manifests created in: <installation_directory>/manifests and <installation_directory>/openshift ----- -+ -The command generates the following manifest files: -+ -.Example output -[source,terminal] ----- -$ tree -. -├── manifests -│  ├── cluster-config.yaml -│  ├── cluster-dns-02-config.yml -│  ├── cluster-infrastructure-02-config.yml -│  ├── cluster-ingress-02-config.yml -│  ├── cluster-network-01-crd.yml -│  ├── cluster-network-02-config.yml -│  ├── cluster-proxy-01-config.yaml -│  ├── cluster-scheduler-02-config.yml -│  ├── cvo-overrides.yaml -│  ├── kube-cloud-config.yaml -│  ├── kube-system-configmap-root-ca.yaml -│  ├── machine-config-server-tls-secret.yaml -│  └── openshift-config-secret-pull-secret.yaml -└── openshift - ├── 99_cloud-creds-secret.yaml - ├── 99_kubeadmin-password-secret.yaml - ├── 99_openshift-cluster-api_master-machines-0.yaml - ├── 99_openshift-cluster-api_master-machines-1.yaml - ├── 99_openshift-cluster-api_master-machines-2.yaml - ├── 99_openshift-cluster-api_master-user-data-secret.yaml - ├── 99_openshift-cluster-api_worker-machineset-0.yaml - ├── 99_openshift-cluster-api_worker-user-data-secret.yaml - ├── 99_openshift-machineconfig_99-master-ssh.yaml - ├── 99_openshift-machineconfig_99-worker-ssh.yaml - ├── 99_role-cloud-creds-secret-reader.yaml - └── openshift-install-manifests.yaml - ----- - -[id="installation-aws-editing-manifests_{context}"] -== Modifying manifest files - -[NOTE] -==== -The AWS Outposts environments has the following limitations which require manual modification in the manifest generated files: - -* The maximum transmission unit (MTU) of a network connection is the size, in bytes, of the largest permissible packet that can be passed over the connection. The Outpost service link supports a maximum packet size of 1300 bytes. For more information about the service link, see link:https://docs.aws.amazon.com/outposts/latest/userguide/region-connectivity.html[Outpost connectivity to AWS Regions] - -You will find more information about how to change these values below. -==== - -* Use Outpost Subnet for workers `machineset` -+ -Modify the following file: -<installation_directory>/openshift/99_openshift-cluster-api_worker-machineset-0.yaml -Find the subnet ID and replace it with the ID of the private subnet created in the Outpost. As a result, all the worker machines will be created in the Outpost. - -* Specify MTU value for the Network Provider -+ -Outpost service links support a maximum packet size of 1300 bytes. It's required to modify the MTU of the Network Provider to follow this requirement. -Create a new file under manifests directory, named cluster-network-03-config.yml -+ -If OpenShift SDN network provider is used, set the MTU value to 1250 -+ -[source,yaml] ----- -apiVersion: operator.openshift.io/v1 -kind: Network -metadata: - name: cluster -spec: - defaultNetwork: - openshiftSDNConfig: - mtu: 1250 ----- -+ -If OVN-Kubernetes network provider is used, set the MTU value to 1200 -+ -[source,yaml] ----- -apiVersion: operator.openshift.io/v1 -kind: Network -metadata: - name: cluster -spec: - defaultNetwork: - ovnKubernetesConfig: - mtu: 1200 ----- diff --git a/modules/installation-aws-iam-policies-about.adoc b/modules/installation-aws-iam-policies-about.adoc deleted file mode 100644 index e604cf5ec274..000000000000 --- a/modules/installation-aws-iam-policies-about.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-account.adoc - -:_content-type: CONCEPT -[id="iam-policies-and-aws-authentication_{context}"] -= IAM Policies and AWS authentication - -By default, the installation program creates instance profiles for the bootstrap, control plane, and compute instances with the necessary permissions for the cluster to operate. - -However, you can create your own IAM roles and specify them as part of the installation process. You might need to specify your own roles to deploy the cluster or to manage the cluster after installation. For example: - -* Your organization's security policies require that you use a more restrictive set of permissions to install the cluster. -* After the installation, the cluster is configured with an Operator that requires access to additional services. - -If you choose to specify your own IAM roles, you can take the following steps: - -* Begin with the default policies and adapt as required. For more information, see "Default permissions for IAM instance profiles". -* Use the AWS Identity and Access Management Access Analyzer (IAM Access Analyzer) to create a policy template that is based on the cluster's activity. For more information see, "Using AWS IAM Analyzer to create policy templates". diff --git a/modules/installation-aws-iam-user.adoc b/modules/installation-aws-iam-user.adoc deleted file mode 100644 index ebd7cc101fff..000000000000 --- a/modules/installation-aws-iam-user.adoc +++ /dev/null @@ -1,52 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-account.adoc - -:_content-type: PROCEDURE -[id="installation-aws-iam-user_{context}"] -= Creating an IAM user - -Each Amazon Web Services (AWS) account contains a root user account that is -based on the email address you used to create the account. This is a -highly-privileged account, and it is recommended to use it for only initial -account and billing configuration, creating an initial set of users, and -securing the account. - -Before you install {product-title}, create a secondary IAM -administrative user. As you complete the -link:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_create.html[Creating an IAM User in Your AWS Account] -procedure in the AWS documentation, set the following options: - -.Procedure - -. Specify the IAM user name and select `Programmatic access`. - -. Attach the `AdministratorAccess` policy to ensure that the account has -sufficient permission to create the cluster. This policy provides the cluster -with the ability to grant credentials to each {product-title} component. The -cluster grants the components only the credentials that they require. -+ -[NOTE] -==== -While it is possible to create a policy that grants the all of the required -AWS permissions and attach it to the user, this is not the preferred option. -The cluster will not have the ability to grant additional credentials to -individual components, so the same credentials are used by all components. -==== - -. Optional: Add metadata to the user by attaching tags. - -. Confirm that the user name that you specified is granted the -`AdministratorAccess` policy. - -. Record the access key ID and secret access key values. You must use these -values when you configure your local machine to run the installation program. -+ -[IMPORTANT] -==== -You cannot use a temporary session token that you generated while using a -multi-factor authentication device to authenticate to AWS when you deploy a -cluster. The cluster continues to use your current AWS credentials to -create AWS resources for the entire life of the cluster, so you must -use key-based, long-lived credentials. -==== diff --git a/modules/installation-aws-limits.adoc b/modules/installation-aws-limits.adoc deleted file mode 100644 index 6f256194f916..000000000000 --- a/modules/installation-aws-limits.adoc +++ /dev/null @@ -1,112 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-account.adoc - -[id="installation-aws-limits_{context}"] -= AWS account limits - -The {product-title} cluster uses a number of Amazon Web Services (AWS) -components, and the default -link:https://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html[Service Limits] -affect your ability to install {product-title} clusters. If you use certain -cluster configurations, deploy your cluster in certain AWS regions, or -run multiple clusters from your account, you might need -to request additional resources for your AWS account. - -The following table summarizes the AWS components whose limits can impact your -ability to install and run {product-title} clusters. - -[cols="2a,3a,3a,8a",options="header"] -|=== -|Component |Number of clusters available by default| Default AWS limit |Description - -|Instance Limits -|Varies -|Varies -|By default, each cluster creates the following instances: - -* One bootstrap machine, which is removed after installation -* Three control plane nodes -* Three worker nodes - -These instance type counts are within a new account's default limit. To deploy -more worker nodes, enable autoscaling, deploy large workloads, or use a -different instance type, review your account limits to ensure that your cluster -can deploy the machines that you need. - -In most regions, the worker machines use an `m6i.large` instance -and the bootstrap and control plane machines use `m6i.xlarge` instances. In some regions, including -all regions that do not support these instance types, `m5.large` and `m5.xlarge` -instances are used instead. - -|Elastic IPs (EIPs) -|0 to 1 -|5 EIPs per account -|To provision the cluster in a highly available configuration, the installation program -creates a public and private subnet for each -link:https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html[availability zone within a region]. -Each private subnet requires a -link:https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html[NAT Gateway], -and each NAT gateway requires a separate -link:https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html[elastic IP]. -Review the -link:https://aws.amazon.com/about-aws/global-infrastructure/[AWS region map] to -determine how many availability zones are in each region. To take advantage of -the default high availability, install the cluster in a region with at least -three availability zones. To install a cluster in a region with more than five -availability zones, you must increase the EIP limit. -[IMPORTANT] -==== -To use the `us-east-1` region, you must increase the EIP limit for your account. -==== - -|Virtual Private Clouds (VPCs) -|5 -|5 VPCs per region -|Each cluster creates its own VPC. - -|Elastic Load Balancing (ELB/NLB) -|3 -|20 per region -|By default, each cluster creates internal and external network load balancers for the master -API server and a single classic elastic load balancer for the router. Deploying -more Kubernetes `Service` objects with type `LoadBalancer` will create additional -link:https://aws.amazon.com/elasticloadbalancing/[load balancers]. - - -|NAT Gateways -|5 -|5 per availability zone -|The cluster deploys one NAT gateway in each availability zone. - -|Elastic Network Interfaces (ENIs) -|At least 12 -|350 per region -|The default installation creates 21 ENIs and an ENI for each availability zone -in your region. For example, the `us-east-1` region contains six availability -zones, so a cluster that is deployed in that zone uses 27 ENIs. Review the -link:https://aws.amazon.com/about-aws/global-infrastructure/[AWS region map] to -determine how many availability zones are in each region. - -Additional ENIs are created for additional machines and elastic load balancers -that are created by cluster usage and deployed workloads. - -|VPC Gateway -|20 -|20 per account -|Each cluster creates a single VPC Gateway for S3 access. - - -|S3 buckets -|99 -|100 buckets per account -|Because the installation process creates a temporary bucket and the registry -component in each cluster creates a bucket, you can create only 99 -{product-title} clusters per AWS account. - -|Security Groups -|250 -|2,500 per account -|Each cluster creates 10 distinct security groups. - | Fail, optionally surfacing response body to the user -|=== diff --git a/modules/installation-aws-marketplace-subscribe.adoc b/modules/installation-aws-marketplace-subscribe.adoc deleted file mode 100644 index aba198cb16bf..000000000000 --- a/modules/installation-aws-marketplace-subscribe.adoc +++ /dev/null @@ -1,77 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-customizations.adoc -// * installing/installing_aws/installing-aws-government-region.adoc -// * installing/installing_aws/installing-aws-user-infra.adoc - -ifeval::["{context}" == "installing-aws-customizations"] -:ipi: -endif::[] -ifeval::["{context}" == "installing-aws-government-region"] -:ipi: -endif::[] -ifeval::["{context}" == "installing-aws-localzone"] -:ipi: -endif::[] -ifeval::["{context}" == "installing-aws-user-infra"] -:upi: -endif::[] - -:_content-type: PROCEDURE -[id="installation-aws-marketplace-subscribe_{context}"] -= Obtaining an AWS Marketplace image -If you are deploying an {product-title} cluster using an AWS Marketplace image, you must first subscribe through AWS. Subscribing to the offer provides you with the AMI ID that the installation program uses to deploy worker nodes. - -.Prerequisites - -* You have an AWS account to purchase the offer. This account does not have to be the same account that is used to install the cluster. - -.Procedure - -. Complete the {product-title} subscription from the link:https://aws.amazon.com/marketplace/fulfillment?productId=59ead7de-2540-4653-a8b0-fa7926d5c845[AWS Marketplace]. -ifdef::ipi[] -. Record the AMI ID for your specific region. As part of the installation process, you must update the `install-config.yaml` file with this value before deploying the cluster. -endif::ipi[] -ifdef::upi[] -. Record the AMI ID for your specific region. If you use the CloudFormation template to deploy your worker nodes, you must update the `worker0.type.properties.ImageID` parameter with this value. -endif::upi[] - -ifdef::ipi[] -.Sample `install-config.yaml` file with AWS Marketplace worker nodes - -[source,yaml] ----- -apiVersion: v1 -baseDomain: example.com -compute: -- hyperthreading: Enabled - name: worker - platform: - aws: - amiID: ami-06c4d345f7c207239 <1> - type: m5.4xlarge - replicas: 3 -metadata: - name: test-cluster -platform: - aws: - region: us-east-2 <2> -sshKey: ssh-ed25519 AAAA... -pullSecret: '{"auths": ...}' ----- -<1> The AMI ID from your AWS Marketplace subscription. -<2> Your AMI ID is associated with a specific AWS region. When creating the installation configuration file, ensure that you select the same AWS region that you specified when configuring your subscription. -endif::ipi[] - -ifeval::["{context}" == "installing-aws-customizations"] -:!ipi: -endif::[] -ifeval::["{context}" == "installing-aws-government-region"] -:!ipi: -endif::[] -ifeval::["{context}" == "installing-aws-localzone"] -:!ipi: -endif::[] -ifeval::["{context}" == "installing-aws-user-infra"] -:!upi: -endif::[] diff --git a/modules/installation-aws-marketplace.adoc b/modules/installation-aws-marketplace.adoc deleted file mode 100644 index 295c20428b48..000000000000 --- a/modules/installation-aws-marketplace.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-account.adoc - -:_content-type: CONCEPT -[id="installation-aws-marketplace_{context}"] -= Supported AWS Marketplace regions - -Installing an {product-title} cluster using an AWS Marketplace image is available to customers who purchase the offer in North America. - -While the offer must be purchased in North America, you can deploy the cluster to any of the following supported paritions: - -* Public -* GovCloud - -[NOTE] -==== -Deploying a {product-title} cluster using an AWS Marketplace image is not supported for the AWS secret regions or China regions. -==== diff --git a/modules/installation-aws-permissions-iam-roles.adoc b/modules/installation-aws-permissions-iam-roles.adoc deleted file mode 100644 index 478df830636f..000000000000 --- a/modules/installation-aws-permissions-iam-roles.adoc +++ /dev/null @@ -1,60 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-account.adoc - -[id="installation-aws-permissions-iam-roles_{context}"] -= Default permissions for IAM instance profiles - -By default, the installation program creates IAM instance profiles for the bootstrap, control plane and worker instances with the necessary permissions for the cluster to operate. - -The following lists specify the default permissions for control plane and compute machines: - -.Default IAM role permissions for control plane instance profiles -[%collapsible] -==== -* `ec2:AttachVolume` -* `ec2:AuthorizeSecurityGroupIngress` -* `ec2:CreateSecurityGroup` -* `ec2:CreateTags` -* `ec2:CreateVolume` -* `ec2:DeleteSecurityGroup` -* `ec2:DeleteVolume` -* `ec2:Describe*` -* `ec2:DetachVolume` -* `ec2:ModifyInstanceAttribute` -* `ec2:ModifyVolume` -* `ec2:RevokeSecurityGroupIngress` -* `elasticloadbalancing:AddTags` -* `elasticloadbalancing:AttachLoadBalancerToSubnets` -* `elasticloadbalancing:ApplySecurityGroupsToLoadBalancer` -* `elasticloadbalancing:CreateListener` -* `elasticloadbalancing:CreateLoadBalancer` -* `elasticloadbalancing:CreateLoadBalancerPolicy` -* `elasticloadbalancing:CreateLoadBalancerListeners` -* `elasticloadbalancing:CreateTargetGroup` -* `elasticloadbalancing:ConfigureHealthCheck` -* `elasticloadbalancing:DeleteListener` -* `elasticloadbalancing:DeleteLoadBalancer` -* `elasticloadbalancing:DeleteLoadBalancerListeners` -* `elasticloadbalancing:DeleteTargetGroup` -* `elasticloadbalancing:DeregisterInstancesFromLoadBalancer` -* `elasticloadbalancing:DeregisterTargets` -* `elasticloadbalancing:Describe*` -* `elasticloadbalancing:DetachLoadBalancerFromSubnets` -* `elasticloadbalancing:ModifyListener` -* `elasticloadbalancing:ModifyLoadBalancerAttributes` -* `elasticloadbalancing:ModifyTargetGroup` -* `elasticloadbalancing:ModifyTargetGroupAttributes` -* `elasticloadbalancing:RegisterInstancesWithLoadBalancer` -* `elasticloadbalancing:RegisterTargets` -* `elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer` -* `elasticloadbalancing:SetLoadBalancerPoliciesOfListener` -* `kms:DescribeKey` -==== - -.Default IAM role permissions for compute instance profiles -[%collapsible] -==== -* `ec2:DescribeInstances` -* `ec2:DescribeRegions` -==== diff --git a/modules/installation-aws-permissions.adoc b/modules/installation-aws-permissions.adoc deleted file mode 100644 index 8e0baa5d11a8..000000000000 --- a/modules/installation-aws-permissions.adoc +++ /dev/null @@ -1,293 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-user-infra.adoc -// * installing/installing_aws/installing-aws-account.adoc -// * installing/installing_aws/installing-restricted-networks-aws.adoc - -[id="installation-aws-permissions_{context}"] -= Required AWS permissions for the IAM user - -[NOTE] -==== -Your IAM user must have the permission `tag:GetResources` in the region `us-east-1` to delete the base cluster resources. As part of the AWS API requirement, the {product-title} installation program performs various actions in this region. -==== - -When you attach the `AdministratorAccess` policy to the IAM user that you create in Amazon Web Services (AWS), -you grant that user all of the required permissions. To deploy all components of an {product-title} -cluster, the IAM user requires the following permissions: - -.Required EC2 permissions for installation -[%collapsible] -==== -* `ec2:AuthorizeSecurityGroupEgress` -* `ec2:AuthorizeSecurityGroupIngress` -* `ec2:CopyImage` -* `ec2:CreateNetworkInterface` -* `ec2:AttachNetworkInterface` -* `ec2:CreateSecurityGroup` -* `ec2:CreateTags` -* `ec2:CreateVolume` -* `ec2:DeleteSecurityGroup` -* `ec2:DeleteSnapshot` -* `ec2:DeleteTags` -* `ec2:DeregisterImage` -* `ec2:DescribeAccountAttributes` -* `ec2:DescribeAddresses` -* `ec2:DescribeAvailabilityZones` -* `ec2:DescribeDhcpOptions` -* `ec2:DescribeImages` -* `ec2:DescribeInstanceAttribute` -* `ec2:DescribeInstanceCreditSpecifications` -* `ec2:DescribeInstances` -* `ec2:DescribeInstanceTypes` -* `ec2:DescribeInternetGateways` -* `ec2:DescribeKeyPairs` -* `ec2:DescribeNatGateways` -* `ec2:DescribeNetworkAcls` -* `ec2:DescribeNetworkInterfaces` -* `ec2:DescribePrefixLists` -* `ec2:DescribeRegions` -* `ec2:DescribeRouteTables` -* `ec2:DescribeSecurityGroups` -* `ec2:DescribeSubnets` -* `ec2:DescribeTags` -* `ec2:DescribeVolumes` -* `ec2:DescribeVpcAttribute` -* `ec2:DescribeVpcClassicLink` -* `ec2:DescribeVpcClassicLinkDnsSupport` -* `ec2:DescribeVpcEndpoints` -* `ec2:DescribeVpcs` -* `ec2:GetEbsDefaultKmsKeyId` -* `ec2:ModifyInstanceAttribute` -* `ec2:ModifyNetworkInterfaceAttribute` -* `ec2:RevokeSecurityGroupEgress` -* `ec2:RevokeSecurityGroupIngress` -* `ec2:RunInstances` -* `ec2:TerminateInstances` -==== - -.Required permissions for creating network resources during installation -[%collapsible] -==== -* `ec2:AllocateAddress` -* `ec2:AssociateAddress` -* `ec2:AssociateDhcpOptions` -* `ec2:AssociateRouteTable` -* `ec2:AttachInternetGateway` -* `ec2:CreateDhcpOptions` -* `ec2:CreateInternetGateway` -* `ec2:CreateNatGateway` -* `ec2:CreateRoute` -* `ec2:CreateRouteTable` -* `ec2:CreateSubnet` -* `ec2:CreateVpc` -* `ec2:CreateVpcEndpoint` -* `ec2:ModifySubnetAttribute` -* `ec2:ModifyVpcAttribute` - -[NOTE] -===== -If you use an existing VPC, your account does not require these permissions for creating network resources. -===== -==== - -.Required Elastic Load Balancing permissions (ELB) for installation -[%collapsible] -==== -* `elasticloadbalancing:AddTags` -* `elasticloadbalancing:ApplySecurityGroupsToLoadBalancer` -* `elasticloadbalancing:AttachLoadBalancerToSubnets` -* `elasticloadbalancing:ConfigureHealthCheck` -* `elasticloadbalancing:CreateLoadBalancer` -* `elasticloadbalancing:CreateLoadBalancerListeners` -* `elasticloadbalancing:DeleteLoadBalancer` -* `elasticloadbalancing:DeregisterInstancesFromLoadBalancer` -* `elasticloadbalancing:DescribeInstanceHealth` -* `elasticloadbalancing:DescribeLoadBalancerAttributes` -* `elasticloadbalancing:DescribeLoadBalancers` -* `elasticloadbalancing:DescribeTags` -* `elasticloadbalancing:ModifyLoadBalancerAttributes` -* `elasticloadbalancing:RegisterInstancesWithLoadBalancer` -* `elasticloadbalancing:SetLoadBalancerPoliciesOfListener` -==== - -.Required Elastic Load Balancing permissions (ELBv2) for installation -[%collapsible] -==== -* `elasticloadbalancing:AddTags` -* `elasticloadbalancing:CreateListener` -* `elasticloadbalancing:CreateLoadBalancer` -* `elasticloadbalancing:CreateTargetGroup` -* `elasticloadbalancing:DeleteLoadBalancer` -* `elasticloadbalancing:DeregisterTargets` -* `elasticloadbalancing:DescribeListeners` -* `elasticloadbalancing:DescribeLoadBalancerAttributes` -* `elasticloadbalancing:DescribeLoadBalancers` -* `elasticloadbalancing:DescribeTargetGroupAttributes` -* `elasticloadbalancing:DescribeTargetHealth` -* `elasticloadbalancing:ModifyLoadBalancerAttributes` -* `elasticloadbalancing:ModifyTargetGroup` -* `elasticloadbalancing:ModifyTargetGroupAttributes` -* `elasticloadbalancing:RegisterTargets` -==== - -.Required IAM permissions for installation -[%collapsible] -==== -* `iam:AddRoleToInstanceProfile` -* `iam:CreateInstanceProfile` -* `iam:CreateRole` -* `iam:DeleteInstanceProfile` -* `iam:DeleteRole` -* `iam:DeleteRolePolicy` -* `iam:GetInstanceProfile` -* `iam:GetRole` -* `iam:GetRolePolicy` -* `iam:GetUser` -* `iam:ListInstanceProfilesForRole` -* `iam:ListRoles` -* `iam:ListUsers` -* `iam:PassRole` -* `iam:PutRolePolicy` -* `iam:RemoveRoleFromInstanceProfile` -* `iam:SimulatePrincipalPolicy` -* `iam:TagRole` - -[NOTE] -===== -If you have not created a load balancer in your AWS account, the IAM user also requires the `iam:CreateServiceLinkedRole` permission. -===== -==== - -.Required Route 53 permissions for installation -[%collapsible] -==== -* `route53:ChangeResourceRecordSets` -* `route53:ChangeTagsForResource` -* `route53:CreateHostedZone` -* `route53:DeleteHostedZone` -* `route53:GetChange` -* `route53:GetHostedZone` -* `route53:ListHostedZones` -* `route53:ListHostedZonesByName` -* `route53:ListResourceRecordSets` -* `route53:ListTagsForResource` -* `route53:UpdateHostedZoneComment` -==== - -.Required S3 permissions for installation -[%collapsible] -==== -* `s3:CreateBucket` -* `s3:DeleteBucket` -* `s3:GetAccelerateConfiguration` -* `s3:GetBucketAcl` -* `s3:GetBucketCors` -* `s3:GetBucketLocation` -* `s3:GetBucketLogging` -* `s3:GetBucketPolicy` -* `s3:GetBucketObjectLockConfiguration` -* `s3:GetBucketReplication` -* `s3:GetBucketRequestPayment` -* `s3:GetBucketTagging` -* `s3:GetBucketVersioning` -* `s3:GetBucketWebsite` -* `s3:GetEncryptionConfiguration` -* `s3:GetLifecycleConfiguration` -* `s3:GetReplicationConfiguration` -* `s3:ListBucket` -* `s3:PutBucketAcl` -* `s3:PutBucketTagging` -* `s3:PutEncryptionConfiguration` -==== - -.S3 permissions that cluster Operators require -[%collapsible] -==== -* `s3:DeleteObject` -* `s3:GetObject` -* `s3:GetObjectAcl` -* `s3:GetObjectTagging` -* `s3:GetObjectVersion` -* `s3:PutObject` -* `s3:PutObjectAcl` -* `s3:PutObjectTagging` -==== - -.Required permissions to delete base cluster resources -[%collapsible] -==== -* `autoscaling:DescribeAutoScalingGroups` -* `ec2:DeletePlacementGroup` -* `ec2:DeleteNetworkInterface` -* `ec2:DeleteVolume` -* `elasticloadbalancing:DeleteTargetGroup` -* `elasticloadbalancing:DescribeTargetGroups` -* `iam:DeleteAccessKey` -* `iam:DeleteUser` -* `iam:ListAttachedRolePolicies` -* `iam:ListInstanceProfiles` -* `iam:ListRolePolicies` -* `iam:ListUserPolicies` -* `s3:DeleteObject` -* `s3:ListBucketVersions` -* `tag:GetResources` -==== - -.Required permissions to delete network resources -[%collapsible] -==== -* `ec2:DeleteDhcpOptions` -* `ec2:DeleteInternetGateway` -* `ec2:DeleteNatGateway` -* `ec2:DeleteRoute` -* `ec2:DeleteRouteTable` -* `ec2:DeleteSubnet` -* `ec2:DeleteVpc` -* `ec2:DeleteVpcEndpoints` -* `ec2:DetachInternetGateway` -* `ec2:DisassociateRouteTable` -* `ec2:ReleaseAddress` -* `ec2:ReplaceRouteTableAssociation` - -[NOTE] -===== -If you use an existing VPC, your account does not require these permissions to delete network resources. Instead, your account only requires the `tag:UntagResources` permission to delete network resources. -===== -==== - -.Required permissions to delete a cluster with shared instance roles -[%collapsible] -==== -* `iam:UntagRole` -==== - -.Additional IAM and S3 permissions that are required to create manifests -[%collapsible] -==== -* `iam:DeleteAccessKey` -* `iam:DeleteUser` -* `iam:DeleteUserPolicy` -* `iam:GetUserPolicy` -* `iam:ListAccessKeys` -* `iam:PutUserPolicy` -* `iam:TagUser` -* `s3:PutBucketPublicAccessBlock` -* `s3:GetBucketPublicAccessBlock` -* `s3:PutLifecycleConfiguration` -* `s3:HeadBucket` -* `s3:ListBucketMultipartUploads` -* `s3:AbortMultipartUpload` - -[NOTE] -===== -If you are managing your cloud provider credentials with mint mode, the IAM user also requires the `iam:CreateAccessKey` and `iam:CreateUser` permissions. -===== -==== - -.Optional permissions for instance and quota checks for installation -[%collapsible] -==== -* `ec2:DescribeInstanceTypeOfferings` -* `servicequotas:ListAWSDefaultServiceQuotas` -==== diff --git a/modules/installation-aws-regions-with-no-ami.adoc b/modules/installation-aws-regions-with-no-ami.adoc deleted file mode 100644 index d465fc432b86..000000000000 --- a/modules/installation-aws-regions-with-no-ami.adoc +++ /dev/null @@ -1,70 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-china.adoc -// * installing/installing_aws/installing-aws-user-infra.adoc -// * installing/installing_aws/installing-aws-secret-region.adoc - -ifeval::["{context}" == "installing-aws-china-region"] -:aws-china: -endif::[] -ifeval::["{context}" == "installing-aws-secret-region"] -:aws-secret: -endif::[] - -[id="installation-aws-regions-with-no-ami_{context}"] -ifndef::aws-china,aws-secret[] -= AWS regions without a published {op-system} AMI -endif::aws-china,aws-secret[] - -ifdef::aws-china,aws-secret[] -= Installation requirements -endif::aws-china,aws-secret[] - -ifndef::aws-china,aws-secret[] -You can deploy an {product-title} cluster to Amazon Web Services (AWS) regions -without native support for a {op-system-first} Amazon Machine Image (AMI) or the -AWS software development kit (SDK). If a -published AMI is not available for an AWS region, you can upload a custom AMI -prior to installing the cluster. - -If you are deploying to a region not supported by the AWS SDK -and you do not specify a custom AMI, the installation program -copies the `us-east-1` AMI to the user account automatically. Then the -installation program creates the control plane machines with encrypted EBS -volumes using the default or user-specified Key Management Service (KMS) key. -This allows the AMI to follow the same process workflow as published {op-system} -AMIs. - -A region without native support for an {op-system} AMI is not available to -select from the terminal during cluster creation because it is not published. -However, you can install to this region by configuring the custom AMI in the -`install-config.yaml` file. -endif::aws-china,aws-secret[] - -ifdef::aws-china,aws-secret[] -ifdef::aws-china[Red Hat does not publish a {op-system-first} Amazon Machine Image (AMI) for the AWS China regions.] -ifdef::aws-secret[Red Hat does not publish a {op-system-first} Amzaon Machine Image for the AWS Secret and Top Secret Regions.] - -Before you can install the cluster, you must: - -* Upload a custom {op-system} AMI. -* Manually create the installation configuration file (`install-config.yaml`). -* Specify the AWS region, and the accompanying custom AMI, in the installation configuration file. - -You cannot use the {product-title} installation program to create the installation configuration file. The installer does not list an AWS region without native support for an {op-system} AMI. - -ifdef::aws-secret[] -[IMPORTANT] -==== -You must also define a custom CA certificate in the `additionalTrustBundle` field of the `install-config.yaml` file because the AWS API requires a custom CA trust bundle. To allow the installation program to access the AWS API, the CA certificates must also be defined on the machine that runs the installation program. You must add the CA bundle to the trust store on the machine, use the `AWS_CA_BUNDLE` environment variable, or define the CA bundle in the link:https://docs.aws.amazon.com/credref/latest/refdocs/setting-global-ca_bundle.html[`ca_bundle`] field of the AWS config file. -==== -endif::aws-secret[] - -endif::aws-china,aws-secret[] - -ifeval::["{context}" == "installing-aws-china-region"] -:!aws-china: -endif::[] -ifeval::["{context}" == "installing-aws-secret-region"] -:!aws-secret: -endif::[] diff --git a/modules/installation-aws-regions.adoc b/modules/installation-aws-regions.adoc deleted file mode 100644 index b00ea7fadb89..000000000000 --- a/modules/installation-aws-regions.adoc +++ /dev/null @@ -1,70 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-account.adoc - -[id="installation-aws-regions_{context}"] -= Supported AWS regions - -You can deploy an {product-title} cluster to the following regions. - -[NOTE] -==== -Your IAM user must have the permission `tag:GetResources` in the region `us-east-1` to delete the base cluster resources. As part of the AWS API requirement, the {product-title} installation program performs various actions in this region. -==== - -[id="installation-aws-public_{context}"] -== AWS public regions - -The following AWS public regions are supported: - -* `af-south-1` (Cape Town) -* `ap-east-1` (Hong Kong) -* `ap-northeast-1` (Tokyo) -* `ap-northeast-2` (Seoul) -* `ap-northeast-3` (Osaka) -* `ap-south-1` (Mumbai) -* `ap-south-2` (Hyderabad) -* `ap-southeast-1` (Singapore) -* `ap-southeast-2` (Sydney) -* `ap-southeast-3` (Jakarta) -* `ap-southeast-4` (Melbourne) -* `ca-central-1` (Central) -* `eu-central-1` (Frankfurt) -* `eu-central-2` (Zurich) -* `eu-north-1` (Stockholm) -* `eu-south-1` (Milan) -* `eu-south-2` (Spain) -* `eu-west-1` (Ireland) -* `eu-west-2` (London) -* `eu-west-3` (Paris) -* `me-central-1` (UAE) -* `me-south-1` (Bahrain) -* `sa-east-1` (São Paulo) -* `us-east-1` (N. Virginia) -* `us-east-2` (Ohio) -* `us-west-1` (N. California) -* `us-west-2` (Oregon) - -[id="installation-aws-govcloud_{context}"] -== AWS GovCloud regions - -The following AWS GovCloud regions are supported: - -* `us-gov-west-1` -* `us-gov-east-1` - -[id="installation-aws-c2s_{context}"] -== AWS SC2S and C2S secret regions - -The following AWS secret regions are supported: - -* `us-isob-east-1` Secret Commercial Cloud Services (SC2S) -* `us-iso-east-1` Commercial Cloud Services (C2S) - -[id="installation-aws-china_{context}"] -== AWS China regions - -The following AWS China regions are supported: - -* `cn-north-1` (Beijing) -* `cn-northwest-1` (Ningxia) diff --git a/modules/installation-aws-route53.adoc b/modules/installation-aws-route53.adoc deleted file mode 100644 index 14c632a66268..000000000000 --- a/modules/installation-aws-route53.adoc +++ /dev/null @@ -1,48 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-account.adoc - -:_content-type: PROCEDURE -[id="installation-aws-route53_{context}"] -= Configuring Route 53 - -To install {product-title}, the Amazon Web Services (AWS) account you use must -have a dedicated public hosted zone in your Route 53 service. This zone must be -authoritative for the domain. The Route 53 service provides -cluster DNS resolution and name lookup for external connections to the cluster. - -.Procedure - -. Identify your domain, or subdomain, and registrar. You can transfer an existing domain and -registrar or obtain a new one through AWS or another source. -+ -[NOTE] -==== -If you purchase a new domain through AWS, it takes time for the relevant DNS -changes to propagate. For more information about purchasing domains -through AWS, see -link:https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/registrar.html[Registering Domain Names Using Amazon Route 53] -in the AWS documentation. -==== - -. If you are using an existing domain and registrar, migrate its DNS to AWS. See -link:https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/MigratingDNS.html[Making Amazon Route 53 the DNS Service for an Existing Domain] -in the AWS documentation. - -. Create a public hosted zone for your domain or subdomain. See -link:https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/CreatingHostedZone.html[Creating a Public Hosted Zone] -in the AWS documentation. -+ -Use an appropriate root domain, such as `openshiftcorp.com`, or subdomain, -such as `clusters.openshiftcorp.com`. - -. Extract the new authoritative name servers from the hosted zone records. See -link:https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/GetInfoAboutHostedZone.html[Getting the Name Servers for a Public Hosted Zone] -in the AWS documentation. - -. Update the registrar records for the AWS Route 53 name servers that your domain -uses. For example, if you registered your domain to a Route 53 service in a -different accounts, see the following topic in the AWS documentation: -link:https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/domain-name-servers-glue-records.html#domain-name-servers-glue-records-procedure[Adding or Changing Name Servers or Glue Records]. - -. If you are using a subdomain, add its delegation records to the parent domain. This gives Amazon Route 53 responsibility for the subdomain. Follow the delegation procedure outlined by the DNS provider of the parent domain. See link:https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/CreatingNewSubdomain.html[Creating a subdomain that uses Amazon Route 53 as the DNS service without migrating the parent domain] in the AWS documentation for an example high level procedure. diff --git a/modules/installation-aws-tested-machine-types.adoc b/modules/installation-aws-tested-machine-types.adoc deleted file mode 100644 index aa79d2c49733..000000000000 --- a/modules/installation-aws-tested-machine-types.adoc +++ /dev/null @@ -1,55 +0,0 @@ -// Module included in the following assemblies: -// -// installing/installing_aws/installing-aws-china.adoc -// installing/installing_aws/installing-aws-customizations.adoc -// installing/installing_aws/installing-aws-government-region.adoc -// installing/installing_aws/installing-aws-network-customizations.adoc -// installing/installing_aws/installing-aws-private.adoc -// installing/installing_aws/installing-aws-user-infra.adoc -// installing/installing_aws/installing-aws-vpc.adoc -// installing/installing_aws/installing-restricted-networks-aws.adoc -// installing-aws-localzone - -ifeval::["{context}" == "installing-aws-localzone"] -:localzone: -endif::[] - -[id="installation-aws-tested-machine-types_{context}"] -= Tested instance types for AWS - -The following Amazon Web Services (AWS) instance types have been tested with -ifndef::localzone[] -{product-title}. -endif::localzone[] -ifdef::localzone[] -{product-title} for use with AWS Local Zones. -endif::localzone[] - -[NOTE] -==== -Use the machine types included in the following charts for your AWS instances. If you use an instance type that is not listed in the chart, ensure that the instance size you use matches the minimum resource requirements that are listed in "Minimum resource requirements for cluster installation". -==== - -ifndef::localzone[] -.Machine types based on 64-bit x86 architecture -[%collapsible] -==== -include::https://raw.githubusercontent.com/openshift/installer/master/docs/user/aws/tested_instance_types_x86_64.md[] -==== -endif::localzone[] -ifdef::localzone[] -.Machine types based on 64-bit x86 architecture for AWS Local Zones -[%collapsible] -==== -* `c5.*` -* `c5d.*` -* `m6i.*` -* `m5.*` -* `r5.*` -* `t3.*` -==== -endif::localzone[] - -ifeval::["{context}" == "installing-aws-localzone"] -:!localzone: -endif::[] \ No newline at end of file diff --git a/modules/installation-aws-upload-custom-rhcos-ami.adoc b/modules/installation-aws-upload-custom-rhcos-ami.adoc deleted file mode 100644 index ad94e78fd777..000000000000 --- a/modules/installation-aws-upload-custom-rhcos-ami.adoc +++ /dev/null @@ -1,164 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-secret-region.adoc -// * installing/installing_aws/installing-aws-china.adoc - -ifeval::["{context}" == "installing-aws-china-region"] -:aws-china: -endif::[] -ifeval::["{context}" == "installing-aws-government-region"] -:aws-gov: -endif::[] - -:_content-type: PROCEDURE -[id="installation-aws-upload-custom-rhcos-ami_{context}"] -= Uploading a custom {op-system} AMI in AWS - -If you are deploying to a custom Amazon Web Services (AWS) region, you must -upload a custom {op-system-first} Amazon Machine Image (AMI) that belongs to -that region. - -.Prerequisites - -* You configured an AWS account. -* You created an Amazon S3 bucket with the required IAM -link:https://docs.aws.amazon.com/vm-import/latest/userguide/vmie_prereqs.html#vmimport-role[service role]. -* You uploaded your {op-system} VMDK file to Amazon S3. -ifdef::openshift-enterprise,openshift-webscale[] -The {op-system} VMDK file must be the highest version that is less than or equal to the {product-title} version you are installing. -endif::[] -* You downloaded the AWS CLI and installed it on your computer. See -link:https://docs.aws.amazon.com/cli/latest/userguide/install-bundle.html[Install the AWS CLI Using the Bundled Installer]. - -.Procedure - -. Export your AWS profile as an environment variable: -+ -[source,terminal] ----- -$ export AWS_PROFILE=<aws_profile> <1> ----- -ifdef::aws-gov[<1> The AWS profile name that holds your AWS credentials, like `govcloud`.] -ifdef::aws-china[<1> The AWS profile name that holds your AWS credentials, like `beijingadmin`.] - -. Export the region to associate with your custom AMI as an environment -variable: -+ -[source,terminal] ----- -$ export AWS_DEFAULT_REGION=<aws_region> <1> ----- -ifdef::aws-gov[<1> The AWS region, like `us-gov-east-1`.] -ifdef::aws-china[<1> The AWS region, like `cn-north-1`.] - -. Export the version of {op-system} you uploaded to Amazon S3 as an environment -variable: -+ -[source,terminal] ----- -$ export RHCOS_VERSION=<version> <1> ----- -<1> The {op-system} VMDK version, like `4.13.0`. - -. Export the Amazon S3 bucket name as an environment variable: -+ -[source,terminal] ----- -$ export VMIMPORT_BUCKET_NAME=<s3_bucket_name> ----- - -. Create the `containers.json` file and define your {op-system} VMDK file: -+ -[source,terminal] ----- -$ cat <<EOF > containers.json -{ - "Description": "rhcos-${RHCOS_VERSION}-x86_64-aws.x86_64", - "Format": "vmdk", - "UserBucket": { - "S3Bucket": "${VMIMPORT_BUCKET_NAME}", - "S3Key": "rhcos-${RHCOS_VERSION}-x86_64-aws.x86_64.vmdk" - } -} -EOF ----- - -. Import the {op-system} disk as an Amazon EBS snapshot: -+ -[source,terminal] ----- -$ aws ec2 import-snapshot --region ${AWS_DEFAULT_REGION} \ - --description "<description>" \ <1> - --disk-container "file://<file_path>/containers.json" <2> ----- -<1> The description of your {op-system} disk being imported, like -`rhcos-${RHCOS_VERSION}-x86_64-aws.x86_64`. -<2> The file path to the JSON file describing your {op-system} disk. The JSON -file should contain your Amazon S3 bucket name and key. - -. Check the status of the image import: -+ -[source,terminal] ----- -$ watch -n 5 aws ec2 describe-import-snapshot-tasks --region ${AWS_DEFAULT_REGION} ----- -+ -.Example output -[source,terminal] ----- -{ - "ImportSnapshotTasks": [ - { - "Description": "rhcos-4.7.0-x86_64-aws.x86_64", - "ImportTaskId": "import-snap-fh6i8uil", - "SnapshotTaskDetail": { - "Description": "rhcos-4.7.0-x86_64-aws.x86_64", - "DiskImageSize": 819056640.0, - "Format": "VMDK", - "SnapshotId": "snap-06331325870076318", - "Status": "completed", - "UserBucket": { - "S3Bucket": "external-images", - "S3Key": "rhcos-4.7.0-x86_64-aws.x86_64.vmdk" - } - } - } - ] -} ----- -+ -Copy the `SnapshotId` to register the image. - -. Create a custom {op-system} AMI from the {op-system} snapshot: -+ -[source,terminal] ----- -$ aws ec2 register-image \ - --region ${AWS_DEFAULT_REGION} \ - --architecture x86_64 \ <1> - --description "rhcos-${RHCOS_VERSION}-x86_64-aws.x86_64" \ <2> - --ena-support \ - --name "rhcos-${RHCOS_VERSION}-x86_64-aws.x86_64" \ <3> - --virtualization-type hvm \ - --root-device-name '/dev/xvda' \ - --block-device-mappings 'DeviceName=/dev/xvda,Ebs={DeleteOnTermination=true,SnapshotId=<snapshot_ID>}' <4> ----- -<1> The {op-system} VMDK architecture type, like `x86_64`, -ifndef::openshift-origin[] -`aarch64`, -endif::openshift-origin[] -`s390x`, or `ppc64le`. -<2> The `Description` from the imported snapshot. -<3> The name of the {op-system} AMI. -<4> The `SnapshotID` from the imported snapshot. - -To learn more about these APIs, see the AWS documentation for -link:https://docs.aws.amazon.com/vm-import/latest/userguide/vmimport-import-snapshot.html[importing snapshots] -and link:https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/creating-an-ami-ebs.html#creating-launching-ami-from-snapshot[creating EBS-backed AMIs]. - -ifeval::["{context}" == "installing-aws-china-region"] -:!aws-china: -endif::[] -ifeval::["{context}" == "installing-aws-government-region"] -:!aws-gov: -endif::[] diff --git a/modules/installation-aws-user-infra-bootstrap.adoc b/modules/installation-aws-user-infra-bootstrap.adoc deleted file mode 100644 index 47123f354285..000000000000 --- a/modules/installation-aws-user-infra-bootstrap.adoc +++ /dev/null @@ -1,55 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-user-infra.adoc -// * installing/installing_aws/installing-restricted-networks-aws.adoc - -:_content-type: PROCEDURE -[id="installation-aws-user-infra-bootstrap_{context}"] -= Initializing the bootstrap sequence on AWS with user-provisioned infrastructure - -After you create all of the required infrastructure in Amazon Web Services (AWS), -you can start the bootstrap sequence that initializes the {product-title} control plane. - -.Prerequisites - -* You configured an AWS account. -* You added your AWS keys and region to your local AWS profile by running `aws configure`. -* You generated the Ignition config files for your cluster. -* You created and configured a VPC and associated subnets in AWS. -* You created and configured DNS, load balancers, and listeners in AWS. -* You created the security groups and roles required for your cluster in AWS. -* You created the bootstrap machine. -* You created the control plane machines. -* You created the worker nodes. - -.Procedure - -. Change to the directory that contains the installation program and start the bootstrap process that initializes the {product-title} control plane: -+ -[source,terminal] ----- -$ ./openshift-install wait-for bootstrap-complete --dir <installation_directory> \ <1> - --log-level=info <2> ----- -<1> For `<installation_directory>`, specify the path to the directory that you -stored the installation files in. -<2> To view different installation details, specify `warn`, `debug`, or -`error` instead of `info`. -+ -.Example output -[source,terminal] ----- -INFO Waiting up to 20m0s for the Kubernetes API at https://api.mycluster.example.com:6443... -INFO API v1.26.0 up -INFO Waiting up to 30m0s for bootstrapping to complete... -INFO It is now safe to remove the bootstrap resources -INFO Time elapsed: 1s ----- -+ -If the command exits without a `FATAL` warning, your {product-title} control plane -has initialized. -+ -[NOTE] -==== -After the control plane initializes, it sets up the compute nodes and installs additional services in the form of Operators. -==== diff --git a/modules/installation-aws-user-infra-delete-bootstrap.adoc b/modules/installation-aws-user-infra-delete-bootstrap.adoc deleted file mode 100644 index ba730b6a57e5..000000000000 --- a/modules/installation-aws-user-infra-delete-bootstrap.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-user-infra.adoc -// * installing/installing_aws/installing-restricted-networks-aws.adoc - -:_content-type: PROCEDURE -[id="installation-aws-user-infra-delete-bootstrap_{context}"] -= Deleting the bootstrap resources - -After you complete the initial Operator configuration for the cluster, remove the bootstrap resources from Amazon Web Services (AWS). - -.Prerequisites - -* You completed the initial Operator configuration for your cluster. - -.Procedure - -. Delete the bootstrap resources. If you used the CloudFormation template, -link:https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-console-delete-stack.html[delete its stack]: -** Delete the stack by using the AWS CLI: -+ -[source,terminal] ----- -$ aws cloudformation delete-stack --stack-name <name> <1> ----- -<1> `<name>` is the name of your bootstrap stack. -** Delete the stack by using the link:https://console.aws.amazon.com/cloudformation/[AWS CloudFormation console]. diff --git a/modules/installation-aws-user-infra-installation.adoc b/modules/installation-aws-user-infra-installation.adoc deleted file mode 100644 index 819e7661640b..000000000000 --- a/modules/installation-aws-user-infra-installation.adoc +++ /dev/null @@ -1,71 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-user-infra.adoc -// * installing/installing_aws/installing-restricted-networks-aws.adoc - -ifeval::["{context}" == "installing-restricted-networks-aws"] -:restricted: -endif::[] -ifdef::openshift-origin[] -:restricted: -endif::[] - -:_content-type: PROCEDURE -[id="installation-aws-user-infra-installation_{context}"] -= Completing an AWS installation on user-provisioned infrastructure - -After you start the {product-title} installation on Amazon Web Service (AWS) -user-provisioned infrastructure, monitor the deployment to completion. - -.Prerequisites - -* You removed the bootstrap node for an {product-title} cluster on user-provisioned AWS infrastructure. -* You installed the `oc` CLI. - -.Procedure - -ifdef::restricted[] -. From the directory that contains the installation program, complete -endif::restricted[] -ifndef::restricted[] -* From the directory that contains the installation program, complete -endif::restricted[] -the cluster installation: -+ -[source,terminal] ----- -$ ./openshift-install --dir <installation_directory> wait-for install-complete <1> ----- -<1> For `<installation_directory>`, specify the path to the directory that you -stored the installation files in. -+ -.Example output -[source,terminal] ----- -INFO Waiting up to 40m0s for the cluster at https://api.mycluster.example.com:6443 to initialize... -INFO Waiting up to 10m0s for the openshift-console route to be created... -INFO Install complete! -INFO To access the cluster as the system:admin user when using 'oc', run 'export KUBECONFIG=/home/myuser/install_dir/auth/kubeconfig' -INFO Access the OpenShift web-console here: https://console-openshift-console.apps.mycluster.example.com -INFO Login to the console with user: "kubeadmin", and password: "4vYBz-Fe5en-ymBEc-Wt6NL" -INFO Time elapsed: 1s ----- -+ -[IMPORTANT] -==== -* The Ignition config files that the installation program generates contain certificates that expire after 24 hours, which are then renewed at that time. If the cluster is shut down before renewing the certificates and the cluster is later restarted after the 24 hours have elapsed, the cluster automatically recovers the expired certificates. The exception is that you must manually approve the pending `node-bootstrapper` certificate signing requests (CSRs) to recover kubelet certificates. See the documentation for _Recovering from expired control plane certificates_ for more information. - -* It is recommended that you use Ignition config files within 12 hours after they are generated because the 24-hour certificate rotates from 16 to 22 hours after the cluster is installed. By using the Ignition config files within 12 hours, you can avoid installation failure if the certificate update runs during installation. -==== - -ifdef::restricted[] -. Register your cluster on the link:https://console.redhat.com/openshift/register[Cluster registration] page. -endif::restricted[] - - -ifeval::["{context}" == "installing-restricted-networks-aws"] -:!restricted: -endif::[] -ifdef::openshift-origin[] -:!restricted: -endif::[] diff --git a/modules/installation-aws-user-infra-requirements.adoc b/modules/installation-aws-user-infra-requirements.adoc deleted file mode 100644 index 4f98ac7b8f4a..000000000000 --- a/modules/installation-aws-user-infra-requirements.adoc +++ /dev/null @@ -1,564 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-user-infra.adoc -// * installing/installing_aws/installing-restricted-networks-aws.adoc - -[id="installation-aws-user-infra-requirements_{context}"] -= Required AWS infrastructure components - -To install {product-title} on user-provisioned infrastructure in Amazon Web Services (AWS), you must manually create both the machines and their supporting infrastructure. - -For more information about the integration testing for different platforms, see the link:https://access.redhat.com/articles/4128421[OpenShift Container Platform 4.x Tested Integrations] page. - -By using the provided CloudFormation templates, you can create stacks of AWS resources that represent the following components: - -* An AWS Virtual Private Cloud (VPC) -* Networking and load balancing components -* Security groups and roles -* An {product-title} bootstrap node -* {product-title} control plane nodes -* An {product-title} compute node - -Alternatively, you can manually create the components or you can reuse existing infrastructure that meets the cluster requirements. Review the CloudFormation templates for more details about how the components interrelate. - -[id="installation-aws-user-infra-other-infrastructure_{context}"] -== Other infrastructure components - -* A VPC -* DNS entries -* Load balancers (classic or network) and listeners -* A public and a private Route 53 zone -* Security groups -* IAM roles -* S3 buckets - -If you are working in a disconnected environment, you are unable to reach the public IP addresses for EC2, ELB, and S3 endpoints. Depending on the level to which you want to restrict internet traffic during the installation, the following configuration options are available: - -[discrete] -[id="create-vpc-endpoints_{context}"] -=== Option 1: Create VPC endpoints - -Create a VPC endpoint and attach it to the subnets that the clusters are using. Name the endpoints as follows: - -* `ec2.<aws_region>.amazonaws.com` -* `elasticloadbalancing.<aws_region>.amazonaws.com` -* `s3.<aws_region>.amazonaws.com` - -With this option, network traffic remains private between your VPC and the required AWS services. - -[discrete] -[id="create-proxy-without-vpc-endpoints_{context}"] -=== Option 2: Create a proxy without VPC endpoints -As part of the installation process, you can configure an HTTP or HTTPS proxy. With this option, internet traffic goes through the proxy to reach the required AWS services. - -[discrete] -[id="create-proxy-with-vpc-endpoints_{context}"] -=== Option 3: Create a proxy with VPC endpoints -As part of the installation process, you can configure an HTTP or HTTPS proxy with VPC endpoints. Create a VPC endpoint and attach it to the subnets that the clusters are using. Name the endpoints as follows: - -* `ec2.<aws_region>.amazonaws.com` -* `elasticloadbalancing.<aws_region>.amazonaws.com` -* `s3.<aws_region>.amazonaws.com` - -When configuring the proxy in the `install-config.yaml` file, add these endpoints to the `noProxy` field. With this option, the proxy prevents the cluster from accessing the internet directly. However, network traffic remains private between your VPC and the required AWS services. - -.Required VPC components - -You must provide a suitable VPC and subnets that allow communication to your -machines. - -[cols="2a,7a,3a,3a",options="header"] -|=== - -|Component -|AWS type -2+|Description - -|VPC -|* `AWS::EC2::VPC` -* `AWS::EC2::VPCEndpoint` -2+|You must provide a public VPC for the cluster to use. The VPC uses an -endpoint that references the route tables for each subnet to improve communication with the registry that is hosted in S3. - -|Public subnets -|* `AWS::EC2::Subnet` -* `AWS::EC2::SubnetNetworkAclAssociation` -2+|Your VPC must have public subnets for between 1 and 3 availability zones -and associate them with appropriate Ingress rules. - -|Internet gateway -| -* `AWS::EC2::InternetGateway` -* `AWS::EC2::VPCGatewayAttachment` -* `AWS::EC2::RouteTable` -* `AWS::EC2::Route` -* `AWS::EC2::SubnetRouteTableAssociation` -* `AWS::EC2::NatGateway` -* `AWS::EC2::EIP` -2+|You must have a public internet gateway, with public routes, attached to the -VPC. In the provided templates, each public subnet has a NAT gateway with an EIP address. These NAT gateways allow cluster resources, like private subnet instances, to reach the internet and are not required for some restricted network or proxy scenarios. - -.7+|Network access control -.7+| * `AWS::EC2::NetworkAcl` -* `AWS::EC2::NetworkAclEntry` -2+|You must allow the VPC to access the following ports: -h|Port -h|Reason - -|`80` -|Inbound HTTP traffic - -|`443` -|Inbound HTTPS traffic - -|`22` -|Inbound SSH traffic - -|`1024` - `65535` -|Inbound ephemeral traffic - -|`0` - `65535` -|Outbound ephemeral traffic - - -|Private subnets -|* `AWS::EC2::Subnet` -* `AWS::EC2::RouteTable` -* `AWS::EC2::SubnetRouteTableAssociation` -2+|Your VPC can have private subnets. The provided CloudFormation templates -can create private subnets for between 1 and 3 availability zones. -If you use private subnets, you must provide appropriate routes and tables -for them. - -|=== - - -.Required DNS and load balancing components - -Your DNS and load balancer configuration needs to use a public hosted zone and -can use a private hosted zone similar to the one that the installation program -uses if it provisions the cluster's infrastructure. You must -create a DNS entry that resolves to your load balancer. An entry for -`api.<cluster_name>.<domain>` must point to the external load balancer, and an -entry for `api-int.<cluster_name>.<domain>` must point to the internal load -balancer. - -The cluster also requires load balancers and listeners for port 6443, which are -required for the Kubernetes API and its extensions, and port 22623, which are -required for the Ignition config files for new machines. The targets will be the -control plane nodes. Port 6443 must be accessible to both clients external to the -cluster and nodes within the cluster. Port 22623 must be accessible to nodes -within the cluster. - - -[cols="2a,2a,8a",options="header"] -|=== - -|Component -|AWS type -|Description - -|DNS -|`AWS::Route53::HostedZone` -|The hosted zone for your internal DNS. - -|Public load balancer -|`AWS::ElasticLoadBalancingV2::LoadBalancer` -|The load balancer for your public subnets. - -|External API server record -|`AWS::Route53::RecordSetGroup` -|Alias records for the external API server. - -|External listener -|`AWS::ElasticLoadBalancingV2::Listener` -|A listener on port 6443 for the external load balancer. - -|External target group -|`AWS::ElasticLoadBalancingV2::TargetGroup` -|The target group for the external load balancer. - -|Private load balancer -|`AWS::ElasticLoadBalancingV2::LoadBalancer` -|The load balancer for your private subnets. - -|Internal API server record -|`AWS::Route53::RecordSetGroup` -|Alias records for the internal API server. - -|Internal listener -|`AWS::ElasticLoadBalancingV2::Listener` -|A listener on port 22623 for the internal load balancer. - -|Internal target group -|`AWS::ElasticLoadBalancingV2::TargetGroup` -|The target group for the internal load balancer. - -|Internal listener -|`AWS::ElasticLoadBalancingV2::Listener` -|A listener on port 6443 for the internal load balancer. - -|Internal target group -|`AWS::ElasticLoadBalancingV2::TargetGroup` -|The target group for the internal load balancer. - -|=== - -.Security groups - -The control plane and worker machines require access to the following ports: - -[cols="2a,2a,2a,2a",options="header"] -|=== - -|Group -|Type -|IP Protocol -|Port range - - -.4+|`MasterSecurityGroup` -.4+|`AWS::EC2::SecurityGroup` -|`icmp` -|`0` - -|`tcp` -|`22` - -|`tcp` -|`6443` - -|`tcp` -|`22623` - -.2+|`WorkerSecurityGroup` -.2+|`AWS::EC2::SecurityGroup` -|`icmp` -|`0` - -|`tcp` -|`22` - - -.2+|`BootstrapSecurityGroup` -.2+|`AWS::EC2::SecurityGroup` - -|`tcp` -|`22` - -|`tcp` -|`19531` - -|=== - -.Control plane Ingress - -The control plane machines require the following Ingress groups. Each Ingress group is -a `AWS::EC2::SecurityGroupIngress` resource. - -[cols="2a,5a,2a,2a",options="header"] -|=== - -|Ingress group -|Description -|IP protocol -|Port range - - -|`MasterIngressEtcd` -|etcd -|`tcp` -|`2379`- `2380` - -|`MasterIngressVxlan` -|Vxlan packets -|`udp` -|`4789` - -|`MasterIngressWorkerVxlan` -|Vxlan packets -|`udp` -|`4789` - -|`MasterIngressInternal` -|Internal cluster communication and Kubernetes proxy metrics -|`tcp` -|`9000` - `9999` - -|`MasterIngressWorkerInternal` -|Internal cluster communication -|`tcp` -|`9000` - `9999` - -|`MasterIngressKube` -|Kubernetes kubelet, scheduler and controller manager -|`tcp` -|`10250` - `10259` - -|`MasterIngressWorkerKube` -|Kubernetes kubelet, scheduler and controller manager -|`tcp` -|`10250` - `10259` - -|`MasterIngressIngressServices` -|Kubernetes Ingress services -|`tcp` -|`30000` - `32767` - -|`MasterIngressWorkerIngressServices` -|Kubernetes Ingress services -|`tcp` -|`30000` - `32767` - -|`MasterIngressGeneve` -|Geneve packets -|`udp` -|`6081` - -|`MasterIngressWorkerGeneve` -|Geneve packets -|`udp` -|`6081` - -|`MasterIngressIpsecIke` -|IPsec IKE packets -|`udp` -|`500` - -|`MasterIngressWorkerIpsecIke` -|IPsec IKE packets -|`udp` -|`500` - -|`MasterIngressIpsecNat` -|IPsec NAT-T packets -|`udp` -|`4500` - -|`MasterIngressWorkerIpsecNat` -|IPsec NAT-T packets -|`udp` -|`4500` - -|`MasterIngressIpsecEsp` -|IPsec ESP packets -|`50` -|`All` - -|`MasterIngressWorkerIpsecEsp` -|IPsec ESP packets -|`50` -|`All` - -|`MasterIngressInternalUDP` -|Internal cluster communication -|`udp` -|`9000` - `9999` - -|`MasterIngressWorkerInternalUDP` -|Internal cluster communication -|`udp` -|`9000` - `9999` - -|`MasterIngressIngressServicesUDP` -|Kubernetes Ingress services -|`udp` -|`30000` - `32767` - -|`MasterIngressWorkerIngressServicesUDP` -|Kubernetes Ingress services -|`udp` -|`30000` - `32767` - -|=== - - -.Worker Ingress - -The worker machines require the following Ingress groups. Each Ingress group is -a `AWS::EC2::SecurityGroupIngress` resource. - -[cols="2a,5a,2a,2a",options="header"] -|=== - -|Ingress group -|Description -|IP protocol -|Port range - - -|`WorkerIngressVxlan` -|Vxlan packets -|`udp` -|`4789` - -|`WorkerIngressWorkerVxlan` -|Vxlan packets -|`udp` -|`4789` - -|`WorkerIngressInternal` -|Internal cluster communication -|`tcp` -|`9000` - `9999` - -|`WorkerIngressWorkerInternal` -|Internal cluster communication -|`tcp` -|`9000` - `9999` - -|`WorkerIngressKube` -|Kubernetes kubelet, scheduler, and controller manager -|`tcp` -|`10250` - -|`WorkerIngressWorkerKube` -|Kubernetes kubelet, scheduler, and controller manager -|`tcp` -|`10250` - -|`WorkerIngressIngressServices` -|Kubernetes Ingress services -|`tcp` -|`30000` - `32767` - -|`WorkerIngressWorkerIngressServices` -|Kubernetes Ingress services -|`tcp` -|`30000` - `32767` - -|`WorkerIngressGeneve` -|Geneve packets -|`udp` -|`6081` - -|`WorkerIngressMasterGeneve` -|Geneve packets -|`udp` -|`6081` - -|`WorkerIngressIpsecIke` -|IPsec IKE packets -|`udp` -|`500` - -|`WorkerIngressMasterIpsecIke` -|IPsec IKE packets -|`udp` -|`500` - -|`WorkerIngressIpsecNat` -|IPsec NAT-T packets -|`udp` -|`4500` - -|`WorkerIngressMasterIpsecNat` -|IPsec NAT-T packets -|`udp` -|`4500` - -|`WorkerIngressIpsecEsp` -|IPsec ESP packets -|`50` -|`All` - -|`WorkerIngressMasterIpsecEsp` -|IPsec ESP packets -|`50` -|`All` - -|`WorkerIngressInternalUDP` -|Internal cluster communication -|`udp` -|`9000` - `9999` - -|`WorkerIngressMasterInternalUDP` -|Internal cluster communication -|`udp` -|`9000` - `9999` - -|`WorkerIngressIngressServicesUDP` -|Kubernetes Ingress services -|`udp` -|`30000` - `32767` - -|`WorkerIngressMasterIngressServicesUDP` -|Kubernetes Ingress services -|`udp` -|`30000` - `32767` - -|=== - - -.Roles and instance profiles - -You must grant the machines permissions in AWS. The provided CloudFormation -templates grant the machines `Allow` permissions for the following `AWS::IAM::Role` objects -and provide a `AWS::IAM::InstanceProfile` for each set of roles. If you do -not use the templates, you can grant the machines the following broad permissions -or the following individual permissions. - -[cols="2a,2a,2a,2a",options="header"] -|=== - -|Role -|Effect -|Action -|Resource - -.4+|Master -|`Allow` -|`ec2:*` -|`*` - -|`Allow` -|`elasticloadbalancing:*` -|`*` - -|`Allow` -|`iam:PassRole` -|`*` - -|`Allow` -|`s3:GetObject` -|`*` - -|Worker -|`Allow` -|`ec2:Describe*` -|`*` - - -.3+|Bootstrap -|`Allow` -|`ec2:Describe*` -|`*` - -|`Allow` -|`ec2:AttachVolume` -|`*` - -|`Allow` -|`ec2:DetachVolume` -|`*` - -|`Allow` -|`s3:GetObject` -|`*` - -|=== - -[id="installation-aws-user-infra-cluster-machines_{context}"] -== Cluster machines - -You need `AWS::EC2::Instance` objects for the following machines: - -* A bootstrap machine. This machine is required during installation, but you can remove it after your cluster deploys. -* Three control plane machines. The control plane machines are not governed by a control plane machine set. -* Compute machines. You must create at least two compute machines, which are also known as worker machines, during installation. These machines are not governed by a compute machine set. - -//// -You can also create and control them by using a MachineSet after your -control plane initializes and you can access the cluster API by using the `oc` -command line interface. -//// diff --git a/modules/installation-aws-user-infra-rhcos-ami.adoc b/modules/installation-aws-user-infra-rhcos-ami.adoc deleted file mode 100644 index bdaa1f3dd69c..000000000000 --- a/modules/installation-aws-user-infra-rhcos-ami.adoc +++ /dev/null @@ -1,210 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-user-infra.adoc -// * installing/installing_aws/installing-restricted-networks-aws.adoc - -[id="installation-aws-user-infra-rhcos-ami_{context}"] -= {op-system} AMIs for the AWS infrastructure - -Red Hat provides {op-system-first} AMIs that are valid for the various AWS regions and instance architectures that you can manually specify for your {product-title} nodes. - -[NOTE] -==== -By importing your own AMI, you can also install to regions that do not have a published {op-system} AMI. -==== - -ifndef::openshift-origin[] -.x86_64 {op-system} AMIs - -[cols="2a,2a",options="header"] -|=== - -|AWS zone -|AWS AMI - -|`af-south-1` -|`ami-052b3e6b060b5595d` - -|`ap-east-1` -|`ami-09c502968481ee218` - -|`ap-northeast-1` -|`ami-06b1dbe049e3c1d23` - -|`ap-northeast-2` -|`ami-08add6eb5aa1c8639` - -|`ap-northeast-3` -|`ami-0af4dfc64506fe20e` - -|`ap-south-1` -|`ami-09b1532dd3d63fdc0` - -|`ap-south-2` -|`ami-0a915cedf8558e600` - -|`ap-southeast-1` -|`ami-0c914fd7a50130c9e` - -|`ap-southeast-2` -|`ami-04b54199f4be0ec9d` - -|`ap-southeast-3` -|`ami-0be3ee78b9a3fdf07` - -|`ap-southeast-4` -|`ami-00a44d7d5054bb5f8` - -|`ca-central-1` -|`ami-0bb1fd49820ea09ae` - -|`eu-central-1` -|`ami-03d9cb166a11c9b8a` - -|`eu-central-2` -|`ami-089865c640f876630` - -|`eu-north-1` -|`ami-0e94d896e72eeae0d` - -|`eu-south-1` -|`ami-04df4e2850dce0721` - -|`eu-south-2` -|`ami-0d80de3a5ba722545` - -|`eu-west-1` -|`ami-066f2d86026ef97a8` - -|`eu-west-2` -|`ami-0f1c0b26b1c99499d` - -|`eu-west-3` -|`ami-0f639505a9c74d9a2` - -|`me-central-1` -|`ami-0fbb2ece8478f1402` - -|`me-south-1` -|`ami-01507551558853852` - -|`sa-east-1` -|`ami-097132aa0da53c981` - -|`us-east-1` -|`ami-0624891c612b5eaa0` - -|`us-east-2` -|`ami-0dc6c4d1bd5161f13` - -|`us-gov-east-1` -|`ami-0bab20368b3b9b861` - -|`us-gov-west-1` -|`ami-0fe8299f8e808e720` - -|`us-west-1` -|`ami-0c03b7e5954f10f9b` - -|`us-west-2` -|`ami-0f4cdfd74e4a3fc29` - -|=== - -.aarch64 {op-system} AMIs - -[cols="2a,2a",options="header"] -|=== - -|AWS zone -|AWS AMI - -|`af-south-1` -|`ami-0d684ca7c09e6f5fc` - -|`ap-east-1` -|`ami-01b0e1c24d180fe5d` - -|`ap-northeast-1` -|`ami-06439c626e2663888` - -|`ap-northeast-2` -|`ami-0a19d3bed3a2854e3` - -|`ap-northeast-3` -|`ami-08b8fa76fd46b5c58` - -|`ap-south-1` -|`ami-0ec6463b788929a6a` - -|`ap-south-2` -|`ami-0f5077b6d7e1b10a5` - -|`ap-southeast-1` -|`ami-081a6c6a24e2ee453` - -|`ap-southeast-2` -|`ami-0a70049ac02157a02` - -|`ap-southeast-3` -|`ami-065fd6311a9d7e6a6` - -|`ap-southeast-4` -|`ami-0105993dc2508c4f4` - -|`ca-central-1` -|`ami-04582d73d5aad9a85` - -|`eu-central-1` -|`ami-0f72c8b59213f628e` - -|`eu-central-2` -|`ami-0647f43516c31119c` - -|`eu-north-1` -|`ami-0d155ca6a531f5f72` - -|`eu-south-1` -|`ami-02f8d2794a663dbd0` - -|`eu-south-2` -|`ami-0427659985f520cae` - -|`eu-west-1` -|`ami-04e9944a8f9761c3e` - -|`eu-west-2` -|`ami-09c701f11d9a7b167` - -|`eu-west-3` -|`ami-02cd8181243610e0d` - -|`me-central-1` -|`ami-03008d03f133e6ec0` - -|`me-south-1` -|`ami-096bc3b4ec0faad76` - -|`sa-east-1` -|`ami-01f9b5a4f7b8c50a1` - -|`us-east-1` -|`ami-09ea6f8f7845792e1` - -|`us-east-2` -|`ami-039cdb2bf3b5178da` - -|`us-gov-east-1` -|`ami-0fed54a5ab75baed0` - -|`us-gov-west-1` -|`ami-0fc5be5af4bb1d79f` - -|`us-west-1` -|`ami-018e5407337da1062` - -|`us-west-2` -|`ami-0c0c67ef81b80e8eb` - -|=== -endif::openshift-origin[] diff --git a/modules/installation-aws_con_connecting-the-vpc-to-the-on-premise-network.adoc b/modules/installation-aws_con_connecting-the-vpc-to-the-on-premise-network.adoc deleted file mode 100644 index 9408672fb476..000000000000 --- a/modules/installation-aws_con_connecting-the-vpc-to-the-on-premise-network.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// This module is included in the following assemblies: -// -// installing/installing_aws/installing-aws-expanding-a-cluster-with-on-premise-bare-metal-nodes.adoc - -:_content-type: CONCEPT -[id="connecting-the-vpc-to-the-on-premise-network_{context}"] -= Connecting the VPC to the on-premise network - -To expand the {product-title} cluster deployed on AWS with on-premise bare metal nodes, you must establish network connectivity between them. You will need to configure the networking using a virtual private network or AWS Direct Connect between the AWS VPC and your on-premise network. This allows traffic to flow between the on-premise nodes and the AWS nodes. - -Additionally, you need to ensure secure access to the Baseboard Management Controllers (BMCs) of the bare metal nodes. When expanding the cluster with the Baremetal Operator, access to the BMCs is required for remotely managing and monitoring the hardware of your on-premise nodes. - -To securely access the BMCs, you can create a separate, secure network segment or use a dedicated VPN connection specifically for BMC access. This way, you can isolate the BMC traffic from other network traffic, reducing the risk of unauthorized access or potential vulnerabilities. - -[WARNING] -==== -Misconfiguration of the network connection between the AWS and on-premise environments can expose the on-premise network and bare-metal nodes to the internet. That is a significant security risk, which might result in an attacker having full access to the exposed machines, and through them to the private network in these environments. -==== diff --git a/modules/installation-aws_con_installing-sno-on-aws.adoc b/modules/installation-aws_con_installing-sno-on-aws.adoc deleted file mode 100644 index a09e50c3bd5c..000000000000 --- a/modules/installation-aws_con_installing-sno-on-aws.adoc +++ /dev/null @@ -1,9 +0,0 @@ -// This module is included in the following assemblies: -// -// installing/installing_sno/install-sno-installing-sno.adoc - -:_content-type: CONCEPT -[id="installing-sno-on-aws_{context}"] -= Installing {sno} on AWS - -Installing a single node cluster on AWS requires installer-provisioned installation using the "Installing a cluster on AWS with customizations" procedure. diff --git a/modules/installation-aws_proc_creating-firewall-rules-for-port-6183.adoc b/modules/installation-aws_proc_creating-firewall-rules-for-port-6183.adoc deleted file mode 100644 index bcf43736d62c..000000000000 --- a/modules/installation-aws_proc_creating-firewall-rules-for-port-6183.adoc +++ /dev/null @@ -1,59 +0,0 @@ -// This module is included in the following assemblies: -// -// installing/installing_aws/installing-aws-expanding-a-cluster-with-on-premise-bare-metal-nodes.adoc - -:_content-type: PROCEDURE -[id="creating-firewall-rules-for-port-6183_{context}"] -= Creating firewall rules for port 6183 - -Port `6183` is open by default on the control plane. However, you must create a firewall rule for the VPC connection and for the on-premise network for the bare metal nodes to allow inbound and outbound traffic on that port. - -.Procedure - -. Modify the AWS VPC security group to open port `6183`: - -.. Navigate to the Amazon VPC console in the AWS Management Console. -.. In the left navigation pane, click on **Security Groups**. -.. Find and select the security group associated with the {product-title} cluster. -.. In the **Inbound rules** tab, click **Edit inbound rules**. -.. Click **Add rule** and select **Custom TCP Rule** as the rule type. -.. In the **Port range** field, enter `6183`. -.. In the **Source** field, specify the CIDR block for the on-premise network or the security group ID of the peered VPC (if you have VPC peering) to allow traffic only from the desired sources. -.. Click **Save rules**. - -. Modify the AWS VPC network access control lists to open port `6183`: - -.. In the Amazon VPC console, click on **Network ACLs** in the left navigation pane. -.. Find and select the network ACL associated with your {product-title} cluster's VPC. -.. In the **Inbound rules** tab, click **Edit inbound rules**. -.. Click **Add rule** and enter a rule number in the **Rule #** field. Choose a number that doesn't conflict with existing rules. -.. Select `TCP` as the protocol. -.. In the **Port range** field, enter `6183`. -.. In the **Source** field, specify the CIDR block for the on-premise network to allow traffic only from the desired sources. -.. Click **Save** to save the new rule. -.. Repeat the same process for the **Outbound rules** tab to allow outbound traffic on port `6183`. - -. Modify the on-premise network to allow traffic on port `6183`: - -.. Execute the following command to identify the zone you want to modify: -+ -[source,terminal] ----- -$ sudo firewall-cmd --list-all-zones ----- - -.. To open port `6183` for TCP traffic in the desired zone execute the following command: -+ -[source,terminal] ----- -$ sudo firewall-cmd --zone=<zone> --add-port=6183/tcp --permanent ----- -+ -Replace `<zone>` with the appropriate zone name. - -.. Reload `firewalld` to apply the new rule: -+ -[source,terminal] ----- -$ sudo firewall-cmd --reload ----- \ No newline at end of file diff --git a/modules/installation-azure-about-government-region.adoc b/modules/installation-azure-about-government-region.adoc deleted file mode 100644 index 0ef5078be7f0..000000000000 --- a/modules/installation-azure-about-government-region.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_azure/installing-azure-government-region.adoc - -[id="installation-azure-about-government-region_{context}"] -= Azure government regions - -{product-title} supports deploying a cluster to -link:https://docs.microsoft.com/en-us/azure/azure-government/documentation-government-welcome[Microsoft Azure Government (MAG)] -regions. MAG is specifically designed for US government agencies at the federal, -state, and local level, as well as contractors, educational institutions, and -other US customers that must run sensitive workloads on Azure. MAG is composed -of government-only data center regions, all granted an -link:https://docs.microsoft.com/en-us/microsoft-365/compliance/offering-dod-disa-l2-l4-l5?view=o365-worldwide#dod-impact-level-5-provisional-authorization[Impact Level 5 Provisional Authorization]. - -Installing to a MAG region requires manually configuring the Azure Government -dedicated cloud instance and region in the `install-config.yaml` file. You must -also update your service principal to reference the appropriate government -environment. - -[NOTE] -==== -The Azure government region cannot be selected using the guided terminal prompts -from the installation program. You must define the region manually in the -`install-config.yaml` file. Remember to also set the dedicated cloud instance, -like `AzureUSGovernmentCloud`, based on the region specified. -==== diff --git a/modules/installation-azure-arm-tested-machine-types.adoc b/modules/installation-azure-arm-tested-machine-types.adoc deleted file mode 100644 index d11dace44f89..000000000000 --- a/modules/installation-azure-arm-tested-machine-types.adoc +++ /dev/null @@ -1,20 +0,0 @@ - -// Module included in the following assemblies: -// -// installing/installing_azure/installing-azure-customizations.adoc -// installing/installing_azure/installing-azure-government-region.adoc -// installing/installing_azure/installing-azure-network-customizations.adoc -// installing/installing_azure/installing-azure-private.adoc -// installing/installing_azure/installing-azure-user-infra.adoc -// installing/installing_azure/installing-azure-vnet.adoc - -[id="installation-azure-arm-tested-machine-types_{context}"] -= Tested instance types for Azure on 64-bit ARM infrastructures - -The following Microsoft Azure ARM64 instance types have been tested with {product-title}. - -.Machine types based on 64-bit ARM architecture -[%collapsible] -==== -include::https://raw.githubusercontent.com/openshift/installer/master/docs/user/azure/tested_instance_types_aarch64.md[] -==== \ No newline at end of file diff --git a/modules/installation-azure-config-yaml.adoc b/modules/installation-azure-config-yaml.adoc deleted file mode 100644 index 66f7aeae24d2..000000000000 --- a/modules/installation-azure-config-yaml.adoc +++ /dev/null @@ -1,302 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_azure/installing-azure-customizations.adoc -// * installing/installing_azure/installing-azure-government-region.adoc -// * installing/installing_azure/installing-azure-network-customizations.adoc -// * installing/installing_azure/installing-azure-private.adoc -// * installing/installing_azure/installing-azure-vnet.adoc - -ifeval::["{context}" == "installing-azure-network-customizations"] -:with-networking: -endif::[] -ifeval::["{context}" != "installing-azure-network-customizations"] -:without-networking: -endif::[] -ifeval::["{context}" == "installing-azure-vnet"] -:vnet: -endif::[] -ifeval::["{context}" == "installing-azure-private"] -:private: -endif::[] -ifeval::["{context}" == "installing-azure-government-region"] -:gov: -endif::[] - -[id="installation-azure-config-yaml_{context}"] -= Sample customized install-config.yaml file for Azure - -You can customize the `install-config.yaml` file to specify more details about your {product-title} cluster's platform or modify the values of the required parameters. - -[IMPORTANT] -==== -This sample YAML file is provided for reference only. You must obtain your `install-config.yaml` file by using the installation program and modify it. -==== - -[source,yaml] ----- -apiVersion: v1 -baseDomain: example.com <1> -controlPlane: <2> - hyperthreading: Enabled <3> <4> - name: master - platform: - azure: - encryptionAtHost: true - ultraSSDCapability: Enabled - osDisk: - diskSizeGB: 1024 <5> - diskType: Premium_LRS - diskEncryptionSet: - resourceGroup: disk_encryption_set_resource_group - name: disk_encryption_set_name - subscriptionId: secondary_subscription_id - type: Standard_D8s_v3 - replicas: 3 -compute: <2> -- hyperthreading: Enabled <3> - name: worker - platform: - azure: - ultraSSDCapability: Enabled - type: Standard_D2s_v3 - encryptionAtHost: true - osDisk: - diskSizeGB: 512 <5> - diskType: Standard_LRS - diskEncryptionSet: - resourceGroup: disk_encryption_set_resource_group - name: disk_encryption_set_name - subscriptionId: secondary_subscription_id - zones: <6> - - "1" - - "2" - - "3" - replicas: 5 -metadata: - name: test-cluster <1> -ifdef::without-networking[] -networking: -endif::[] -ifdef::with-networking[] -networking: <2> -endif::[] - clusterNetwork: - - cidr: 10.128.0.0/14 - hostPrefix: 23 - machineNetwork: - - cidr: 10.0.0.0/16 - networkType: OVNKubernetes <7> - serviceNetwork: - - 172.30.0.0/16 -platform: - azure: - defaultMachinePlatform: - ultraSSDCapability: Enabled - baseDomainResourceGroupName: resource_group <8> -ifndef::gov[] - region: centralus <1> -endif::gov[] -ifdef::gov[] - region: usgovvirginia -endif::gov[] - resourceGroupName: existing_resource_group <9> -ifdef::vnet,private,gov[] - networkResourceGroupName: vnet_resource_group <10> - virtualNetwork: vnet <11> - controlPlaneSubnet: control_plane_subnet <12> - computeSubnet: compute_subnet <13> -endif::vnet,private,gov[] -ifndef::private,gov[] - outboundType: Loadbalancer -endif::private,gov[] -ifdef::private,gov[] - outboundType: UserDefinedRouting <14> -endif::private,gov[] -ifndef::gov[] - cloudName: AzurePublicCloud -endif::gov[] -ifdef::gov[] - cloudName: AzureUSGovernmentCloud <15> -endif::gov[] -pullSecret: '{"auths": ...}' <1> -ifdef::vnet[] -ifndef::openshift-origin[] -fips: false <14> -sshKey: ssh-ed25519 AAAA... <15> -endif::openshift-origin[] -ifdef::openshift-origin[] -sshKey: ssh-ed25519 AAAA... <14> -endif::openshift-origin[] -endif::vnet[] -ifdef::private[] -ifndef::openshift-origin[] -fips: false <15> -sshKey: ssh-ed25519 AAAA... <16> -endif::openshift-origin[] -ifdef::openshift-origin[] -sshKey: ssh-ed25519 AAAA... <15> -endif::openshift-origin[] -endif::private[] -ifdef::gov[] -ifndef::openshift-origin[] -fips: false <16> -endif::openshift-origin[] -ifndef::openshift-origin[] -sshKey: ssh-ed25519 AAAA... <17> -endif::openshift-origin[] -ifdef::openshift-origin[] -sshKey: ssh-ed25519 AAAA... <16> -endif::openshift-origin[] -endif::gov[] -ifndef::vnet,private,gov[] -ifndef::openshift-origin[] -fips: false <10> -sshKey: ssh-ed25519 AAAA... <11> -endif::openshift-origin[] -ifdef::openshift-origin[] -sshKey: ssh-ed25519 AAAA... <10> -endif::openshift-origin[] -endif::vnet,private,gov[] -ifdef::private[] -ifndef::openshift-origin[] -publish: Internal <17> -endif::openshift-origin[] -ifdef::openshift-origin[] -publish: Internal <16> -endif::openshift-origin[] -endif::private[] -ifdef::gov[] -ifndef::openshift-origin[] -publish: Internal <18> -endif::openshift-origin[] -ifdef::openshift-origin[] -publish: Internal <17> -endif::openshift-origin[] -endif::gov[] ----- -ifndef::gov[] -<1> Required. The installation program prompts you for this value. -endif::gov[] -ifdef::gov[] -<1> Required. -endif::gov[] -<2> If you do not provide these parameters and values, the installation program provides the default value. -<3> The `controlPlane` section is a single mapping, but the `compute` section is a sequence of mappings. To meet the requirements of the different data structures, the first line of the `compute` section must begin with a hyphen, `-`, and the first line of the `controlPlane` section must not. Only one control plane pool is used. -<4> Whether to enable or disable simultaneous multithreading, or `hyperthreading`. By default, simultaneous multithreading is enabled to increase the performance of your machines' cores. You can disable it by setting the parameter value to `Disabled`. If you disable simultaneous multithreading in some cluster machines, you must disable it in all cluster machines. -+ -[IMPORTANT] -==== -If you disable simultaneous multithreading, ensure that your capacity planning accounts for the dramatically decreased machine performance. Use larger virtual machine types, such as `Standard_D8s_v3`, for your machines if you disable simultaneous multithreading. -==== -<5> You can specify the size of the disk to use in GB. Minimum recommendation for control plane nodes is 1024 GB. -//To configure faster storage for etcd, especially for larger clusters, set the -//storage type as `io1` and set `iops` to `2000`. -<6> Specify a list of zones to deploy your machines to. For high availability, specify at least two zones. -<7> The cluster network plugin to install. The supported values are `OVNKubernetes` and `OpenShiftSDN`. The default value is `OVNKubernetes`. -<8> Specify the name of the resource group that contains the DNS zone for your base domain. -<9> Specify the name of an already existing resource group to install your cluster to. If undefined, a new resource group is created for the cluster. -ifdef::vnet,private,gov[] -<10> If you use an existing VNet, specify the name of the resource group that contains it. -<11> If you use an existing VNet, specify its name. -<12> If you use an existing VNet, specify the name of the subnet to host the control plane machines. -<13> If you use an existing VNet, specify the name of the subnet to host the compute machines. -endif::vnet,private,gov[] -ifdef::private,gov[] -<14> You can customize your own outbound routing. Configuring user-defined routing prevents exposing external endpoints in your cluster. User-defined routing for egress requires deploying your cluster to an existing VNet. -endif::private,gov[] -ifdef::gov[] -<15> Specify the name of the Azure cloud environment to deploy your cluster to. Set `AzureUSGovernmentCloud` to deploy to a Microsoft Azure Government (MAG) region. The default value is `AzurePublicCloud`. -endif::gov[] -ifdef::vnet[] -ifndef::openshift-origin[] -<14> Whether to enable or disable FIPS mode. By default, FIPS mode is not enabled. If FIPS mode is enabled, the {op-system-first} machines that {product-title} runs on bypass the default Kubernetes cryptography suite and use the cryptography modules that are provided with {op-system} instead. -+ -[IMPORTANT] -==== -The use of FIPS Validated / Modules in Process cryptographic libraries is only supported on {product-title} deployments on the `x86_64` architecture. -==== -<15> You can optionally provide the `sshKey` value that you use to access the machines in your cluster. -endif::openshift-origin[] -ifdef::openshift-origin[] -<14> You can optionally provide the `sshKey` value that you use to access the machines in your cluster. -endif::openshift-origin[] -endif::vnet[] -ifdef::private[] -ifndef::openshift-origin[] -<15> Whether to enable or disable FIPS mode. By default, FIPS mode is not enabled. If FIPS mode is enabled, the {op-system-first} machines that {product-title} runs on bypass the default Kubernetes cryptography suite and use the cryptography modules that are provided with {op-system} instead. -+ -[IMPORTANT] -==== -The use of FIPS Validated / Modules in Process cryptographic libraries is only supported on {product-title} deployments on the `x86_64` architecture. -==== -<16> You can optionally provide the `sshKey` value that you use to access the machines in your cluster. -endif::openshift-origin[] -ifdef::openshift-origin[] -<15> You can optionally provide the `sshKey` value that you use to access the machines in your cluster. -endif::openshift-origin[] -endif::private[] -ifdef::gov[] -ifndef::openshift-origin[] -<16> Whether to enable or disable FIPS mode. By default, FIPS mode is not enabled. If FIPS mode is enabled, the {op-system-first} machines that {product-title} runs on bypass the default Kubernetes cryptography suite and use the cryptography modules that are provided with {op-system} instead. -+ -[IMPORTANT] -==== -The use of FIPS Validated / Modules in Process cryptographic libraries is only supported on {product-title} deployments on the `x86_64` architecture. -==== -<17> You can optionally provide the `sshKey` value that you use to access the machines in your cluster. -endif::openshift-origin[] -ifdef::openshift-origin[] -<16> You can optionally provide the `sshKey` value that you use to access the machines in your cluster. -endif::openshift-origin[] -endif::gov[] -ifndef::vnet,private,gov[] -ifndef::openshift-origin[] -<10> Whether to enable or disable FIPS mode. By default, FIPS mode is not enabled. If FIPS mode is enabled, the {op-system-first} machines that {product-title} runs on bypass the default Kubernetes cryptography suite and use the cryptography modules that are provided with {op-system} instead. -+ -[IMPORTANT] -==== -The use of FIPS Validated / Modules in Process cryptographic libraries is only supported on {product-title} deployments on the `x86_64` architecture. -==== -<11> You can optionally provide the `sshKey` value that you use to access the machines in your cluster. -endif::openshift-origin[] -ifdef::openshift-origin[] -<10> You can optionally provide the `sshKey` value that you use to access the machines in your cluster. -endif::openshift-origin[] -endif::vnet,private,gov[] -+ -[NOTE] -==== -For production {product-title} clusters on which you want to perform installation debugging or disaster recovery, specify an SSH key that your `ssh-agent` process uses. -==== -ifdef::private[] -ifndef::openshift-origin[] -<17> How to publish the user-facing endpoints of your cluster. Set `publish` to `Internal` to deploy a private cluster, which cannot be accessed from the internet. The default value is `External`. -endif::openshift-origin[] -ifdef::openshift-origin[] -<16> How to publish the user-facing endpoints of your cluster. Set `publish` to `Internal` to deploy a private cluster, which cannot be accessed from the internet. The default value is `External`. -endif::openshift-origin[] -endif::private[] -ifdef::gov[] -ifndef::openshift-origin[] -<18> How to publish the user-facing endpoints of your cluster. Set `publish` to `Internal` to deploy a private cluster, which cannot be accessed from the internet. The default value is `External`. -endif::openshift-origin[] -ifdef::openshift-origin[] -<17> How to publish the user-facing endpoints of your cluster. Set `publish` to `Internal` to deploy a private cluster, which cannot be accessed from the internet. The default value is `External`. -endif::openshift-origin[] -endif::gov[] - -ifeval::["{context}" == "installing-azure-network-customizations"] -:!with-networking: -endif::[] -ifeval::["{context}" != "installing-azure-network-customizations"] -:!without-networking: -endif::[] -ifeval::["{context}" == "installing-azure-vnet"] -:!vnet: -endif::[] -ifeval::["{context}" == "installing-azure-private"] -:!private: -endif::[] -ifeval::["{context}" == "installing-azure-government-region"] -:!gov: -endif::[] diff --git a/modules/installation-azure-create-dns-zones.adoc b/modules/installation-azure-create-dns-zones.adoc deleted file mode 100644 index e8a84aab60d2..000000000000 --- a/modules/installation-azure-create-dns-zones.adoc +++ /dev/null @@ -1,79 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_azure/installing-azure-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc - -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:ash: -endif::[] - -:_content-type: PROCEDURE -[id="installation-azure-create-dns-zones_{context}"] -= Example for creating DNS zones - -DNS records are required for clusters that use user-provisioned infrastructure. -You should choose the DNS strategy that fits your scenario. - -ifndef::ash[] -For this example, link:https://docs.microsoft.com/en-us/azure/dns/dns-overview[Azure's DNS solution] -is used, so you will create a new public DNS zone for external (internet) -visibility and a private DNS zone for internal cluster resolution. -endif::ash[] -ifdef::ash[] -For this example, link:https://docs.microsoft.com/en-us/azure-stack/operator/azure-stack-integrate-dns?view=azs-2102[Azure Stack Hub's datacenter DNS integration] is used, so you will create a DNS zone. -endif::ash[] - -ifndef::ash[] -[NOTE] -==== -The public DNS zone is not required to exist in the same resource group as the -cluster deployment and might already exist in your organization for the desired base domain. If that is the case, you can skip creating the public DNS zone; be sure the installation config you generated earlier reflects that scenario. -==== -endif::ash[] - -ifdef::ash[] -[NOTE] -==== -The DNS zone is not required to exist in the same resource group as the -cluster deployment and might already exist in your organization for the desired base domain. If that is the case, you can skip creating the DNS zone; be sure the installation config you generated earlier reflects that scenario. -==== -endif::ash[] - -.Prerequisites - -* Configure an Azure account. - -* Generate the Ignition config files for your cluster. - -.Procedure - -ifndef::ash[] -. Create the new public DNS zone in the resource group exported in the -`BASE_DOMAIN_RESOURCE_GROUP` environment variable: -endif::ash[] -ifdef::ash[] -* Create the new DNS zone in the resource group exported in the -`BASE_DOMAIN_RESOURCE_GROUP` environment variable: -endif::ash[] -+ -[source,terminal] ----- -$ az network dns zone create -g ${BASE_DOMAIN_RESOURCE_GROUP} -n ${CLUSTER_NAME}.${BASE_DOMAIN} ----- -+ -ifndef::ash[You can skip this step if you are using a public DNS zone that already exists.] -ifdef::ash[You can skip this step if you are using a DNS zone that already exists.] - -ifndef::ash[] -. Create the private DNS zone in the same resource group as the rest of this -deployment: -+ -[source,terminal] ----- -$ az network private-dns zone create -g ${RESOURCE_GROUP} -n ${CLUSTER_NAME}.${BASE_DOMAIN} ----- -endif::ash[] - -ifeval::["{context}" == "installing-azure-user-infra"] -:!ash: -endif::[] diff --git a/modules/installation-azure-create-ingress-dns-records.adoc b/modules/installation-azure-create-ingress-dns-records.adoc deleted file mode 100644 index 7c126498561a..000000000000 --- a/modules/installation-azure-create-ingress-dns-records.adoc +++ /dev/null @@ -1,127 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_azure/installing-azure-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc - -ifeval::["{context}" == "installing-azure-user-infra"] -:cp: Azure -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:ash: -:cp: Azure Stack Hub -endif::[] - -:_content-type: PROCEDURE -[id="installation-azure-create-ingress-dns-records_{context}"] -= Adding the Ingress DNS records - -If you removed the DNS Zone configuration when creating Kubernetes manifests and -generating Ignition configs, you must manually create DNS records that point at -the Ingress load balancer. You can create either a wildcard -`*.apps.{baseDomain}.` or specific records. You can use A, CNAME, and other -records per your requirements. - -.Prerequisites - -* You deployed an {product-title} cluster on Microsoft {cp} by using infrastructure that you provisioned. -* Install the OpenShift CLI (`oc`). -* Install or update the link:https://docs.microsoft.com/en-us/cli/azure/install-azure-cli-yum?view=azure-cli-latest[Azure CLI]. - -.Procedure - -. Confirm the Ingress router has created a load balancer and populated the -`EXTERNAL-IP` field: -+ -[source,terminal] ----- -$ oc -n openshift-ingress get service router-default ----- -+ -.Example output -[source,terminal] ----- -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -router-default LoadBalancer 172.30.20.10 35.130.120.110 80:32288/TCP,443:31215/TCP 20 ----- - -. Export the Ingress router IP as a variable: -+ -[source,terminal] ----- -$ export PUBLIC_IP_ROUTER=`oc -n openshift-ingress get service router-default --no-headers | awk '{print $4}'` ----- -ifndef::ash[] -. Add a `*.apps` record to the public DNS zone. - -.. If you are adding this cluster to a new public zone, run: -+ -[source,terminal] ----- -$ az network dns record-set a add-record -g ${BASE_DOMAIN_RESOURCE_GROUP} -z ${CLUSTER_NAME}.${BASE_DOMAIN} -n *.apps -a ${PUBLIC_IP_ROUTER} --ttl 300 ----- - -.. If you are adding this cluster to an already existing public zone, run: -+ -[source,terminal] ----- -$ az network dns record-set a add-record -g ${BASE_DOMAIN_RESOURCE_GROUP} -z ${BASE_DOMAIN} -n *.apps.${CLUSTER_NAME} -a ${PUBLIC_IP_ROUTER} --ttl 300 ----- -endif::ash[] -ifdef::ash[] -. Add a `*.apps` record to the DNS zone. - -.. If you are adding this cluster to a new DNS zone, run: -+ -[source,terminal] ----- -$ az network dns record-set a add-record -g ${BASE_DOMAIN_RESOURCE_GROUP} -z ${CLUSTER_NAME}.${BASE_DOMAIN} -n *.apps -a ${PUBLIC_IP_ROUTER} --ttl 300 ----- -.. If you are adding this cluster to an already existing DNS zone, run: -+ -[source,terminal] ----- -$ az network dns record-set a add-record -g ${BASE_DOMAIN_RESOURCE_GROUP} -z ${BASE_DOMAIN} -n *.apps.${CLUSTER_NAME} -a ${PUBLIC_IP_ROUTER} --ttl 300 ----- -endif::ash[] - -ifndef::ash[] -. Add a `*.apps` record to the private DNS zone: -.. Create a `*.apps` record by using the following command: -+ -[source,terminal] ----- -$ az network private-dns record-set a create -g ${RESOURCE_GROUP} -z ${CLUSTER_NAME}.${BASE_DOMAIN} -n *.apps --ttl 300 ----- -.. Add the `*.apps` record to the private DNS zone by using the following command: -+ -[source,terminal] ----- -$ az network private-dns record-set a add-record -g ${RESOURCE_GROUP} -z ${CLUSTER_NAME}.${BASE_DOMAIN} -n *.apps -a ${PUBLIC_IP_ROUTER} ----- -endif::ash[] - -If you prefer to add explicit domains instead of using a wildcard, you can -create entries for each of the cluster's current routes: - -[source,terminal] ----- -$ oc get --all-namespaces -o jsonpath='{range .items[*]}{range .status.ingress[*]}{.host}{"\n"}{end}{end}' routes ----- - -.Example output -[source,terminal] ----- -oauth-openshift.apps.cluster.basedomain.com -console-openshift-console.apps.cluster.basedomain.com -downloads-openshift-console.apps.cluster.basedomain.com -alertmanager-main-openshift-monitoring.apps.cluster.basedomain.com -prometheus-k8s-openshift-monitoring.apps.cluster.basedomain.com ----- - -ifeval::["{context}" == "installing-azure-user-infra"] -:!cp: Azure -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:!ash: -:!cp: Azure Stack Hub -endif::[] diff --git a/modules/installation-azure-create-resource-group-and-identity.adoc b/modules/installation-azure-create-resource-group-and-identity.adoc deleted file mode 100644 index 00b7ce116ce1..000000000000 --- a/modules/installation-azure-create-resource-group-and-identity.adoc +++ /dev/null @@ -1,94 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_azure/installing-azure-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc - -ifeval::["{context}" == "installing-azure-user-infra"] -:azure: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:ash: -endif::[] - -:_content-type: PROCEDURE -[id="installation-azure-create-resource-group-and-identity_{context}"] -= Creating the Azure resource group - -ifdef::azure[] -You must create a Microsoft Azure link:https://docs.microsoft.com/en-us/azure/azure-resource-manager/management/overview#resource-groups[resource group] and an identity for that resource group. These are both used during the installation of your {product-title} cluster on Azure. -endif::azure[] -ifdef::ash[] -You must create a Microsoft Azure link:https://docs.microsoft.com/en-us/azure/azure-resource-manager/management/overview#resource-groups[resource group]. This is used during the installation of your {product-title} cluster on Azure Stack Hub. -endif::ash[] - -.Prerequisites - -* Configure an Azure account. - -* Generate the Ignition config files for your cluster. - -.Procedure - -ifdef::azure[] -. Create the resource group in a supported Azure region: -endif::azure[] -ifdef::ash[] -* Create the resource group in a supported Azure region: -endif::ash[] -+ -[source,terminal] ----- -$ az group create --name ${RESOURCE_GROUP} --location ${AZURE_REGION} ----- - -ifdef::azure[] -. Create an Azure identity for the resource group: -+ -[source,terminal] ----- -$ az identity create -g ${RESOURCE_GROUP} -n ${INFRA_ID}-identity ----- -+ -This is used to grant the required access to Operators in your cluster. For -example, this allows the Ingress Operator to create a public IP and its load -balancer. You must assign the Azure identity to a role. - -. Grant the Contributor role to the Azure identity: - -.. Export the following variables required by the Azure role assignment: -+ -[source,terminal] ----- -$ export PRINCIPAL_ID=`az identity show -g ${RESOURCE_GROUP} -n ${INFRA_ID}-identity --query principalId --out tsv` ----- -+ -[source,terminal] ----- -$ export RESOURCE_GROUP_ID=`az group show -g ${RESOURCE_GROUP} --query id --out tsv` ----- - -.. Assign the Contributor role to the identity: -+ -[source,terminal] ----- -$ az role assignment create --assignee "${PRINCIPAL_ID}" --role 'Contributor' --scope "${RESOURCE_GROUP_ID}" ----- -+ -[NOTE] -==== -If you want to assign a custom role with all the required permissions to the identity, run the following command: -[source,terminal] ----- -$ az role assignment create --assignee "${PRINCIPAL_ID}" --role <custom_role> \ <1> ---scope "${RESOURCE_GROUP_ID}" ----- -<1> Specifies the custom role name. -==== -endif::azure[] - -ifeval::["{context}" == "installing-azure-user-infra"] -:!azure: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:!ash: -endif::[] diff --git a/modules/installation-azure-finalizing-encryption.adoc b/modules/installation-azure-finalizing-encryption.adoc deleted file mode 100644 index 84bfe847e7eb..000000000000 --- a/modules/installation-azure-finalizing-encryption.adoc +++ /dev/null @@ -1,155 +0,0 @@ -//Module included in the following assemblies: -// -// * installing/installing_azure/installing-azure-customizations.adoc -// * installing/installing_azure/installing-azure-government-region.adoc -// * installing/installing_azure/installing-azure-network-customizations.adoc -// * installing/installing_azure/installing-azure-private.adoc -// * installing/installing_azure/installing-azure-vnet.adoc - - -ifeval::["{context}" == "installing-azure-customizations"] -:azure-public: -endif::[] -ifeval::["{context}" == "installing-azure-government-region"] -:azure-gov: -endif::[] -ifeval::["{context}" == "installing-azure-network-customizations"] -:azure-public: -endif::[] -ifeval::["{context}" == "installing-azure-private"] -:azure-public: -endif::[] -ifeval::["{context}" == "installing-azure-vnet"] -:azure-public: -endif::[] - -:_content-type: PROCEDURE -[id="finalizing-encryption_{context}"] -= Finalizing user-managed encryption after installation -If you installed {product-title} using a user-managed encryption key, you can complete the installation by creating a new storage class and granting write permissions to the Azure cluster resource group. - -.Procedure - -. Obtain the identity of the cluster resource group used by the installer: -.. If you specified an existing resource group in `install-config.yaml`, obtain its Azure identity by running the following command: -+ -[source,terminal] ----- -$ az identity list --resource-group "<existing_resource_group>" ----- -.. If you did not specify a existing resource group in `install-config.yaml`, locate the resource group that the installer created, and then obtain its Azure identity by running the following commands: -+ -[source,terminal] ----- -$ az group list ----- -+ -[source,terminal] ----- -$ az identity list --resource-group "<installer_created_resource_group>" ----- -+ -. Grant a role assignment to the cluster resource group so that it can write to the Disk Encryption Set by running the following command: -+ -[source,terminal] ----- -$ az role assignment create --role "<privileged_role>" \// <1> - --assignee "<resource_group_identity>" <2> ----- -<1> Specifies an Azure role that has read/write permissions to the disk encryption set. You can use the `Owner` role or a custom role with the necessary permissions. -<2> Specifies the identity of the cluster resource group. -+ -. Obtain the `id` of the disk encryption set you created prior to installation by running the following command: -+ -[source,terminal] ----- -$ az disk-encryption-set show -n <disk_encryption_set_name> \// <1> - --resource-group <resource_group_name> <2> ----- -<1> Specifies the name of the disk encryption set. -<2> Specifies the resource group that contains the disk encryption set. -The `id` is in the format of `"/subscriptions/.../resourceGroups/.../providers/Microsoft.Compute/diskEncryptionSets/..."`. -+ -. Obtain the identity of the cluster service principal by running the following command: -+ -[source,terminal] ----- -$ az identity show -g <cluster_resource_group> \// <1> - -n <cluster_service_principal_name> \// <2> - --query principalId --out tsv ----- -<1> Specifies the name of the cluster resource group created by the installation program. -<2> Specifies the name of the cluster service principal created by the installation program. -The identity is in the format of `12345678-1234-1234-1234-1234567890`. -ifdef::azure-gov[] -. Create a role assignment that grants the cluster service principal `Contributor` privileges to the disk encryption set by running the following command: -+ -[source,terminal] ----- -$ az role assignment create --assignee <cluster_service_principal_id> \// <1> - --role 'Contributor' \// - --scope <disk_encryption_set_id> \// <2> ----- -<1> Specifies the ID of the cluster service principal obtained in the previous step. -<2> Specifies the ID of the disk encryption set. -endif::azure-gov[] -ifdef::azure-public[] -. Create a role assignment that grants the cluster service principal necessary privileges to the disk encryption set by running the following command: -+ -[source,terminal] ----- -$ az role assignment create --assignee <cluster_service_principal_id> \// <1> - --role <privileged_role> \// <2> - --scope <disk_encryption_set_id> \// <3> ----- -<1> Specifies the ID of the cluster service principal obtained in the previous step. -<2> Specifies the Azure role name. You can use the `Contributor` role or a custom role with the necessary permissions. -<3> Specifies the ID of the disk encryption set. -endif::azure-public[] -+ -. Create a storage class that uses the user-managed disk encryption set: -.. Save the following storage class definition to a file, for example `storage-class-definition.yaml`: -+ -[source,yaml] ----- -kind: StorageClass -apiVersion: storage.k8s.io/v1 -metadata: - name: managed-premium -provisioner: kubernetes.io/azure-disk -parameters: - skuname: Premium_LRS - kind: Managed - diskEncryptionSetID: "<disk_encryption_set_ID>" <1> - resourceGroup: "<resource_group_name>" <2> -reclaimPolicy: Delete -allowVolumeExpansion: true -volumeBindingMode: WaitForFirstConsumer ----- -<1> Specifies the ID of the disk encryption set that you created in the prerequisite steps, for example `"/subscriptions/xxxxxx-xxxxx-xxxxx/resourceGroups/test-encryption/providers/Microsoft.Compute/diskEncryptionSets/disk-encryption-set-xxxxxx"`. -<2> Specifies the name of the resource group used by the installer. This is the same resource group from the first step. -.. Create the storage class `managed-premium` from the file you created by running the following command: -+ -[source,terminal] ----- -$ oc create -f storage-class-definition.yaml ----- -. Select the `managed-premium` storage class when you create persistent volumes to use encrypted storage. - - - -ifeval::["{context}" == "installing-azure-customizations"] -:!azure-public: -endif::[] -ifeval::["{context}" == "installing-azure-government-region"] -:!azure-gov: -endif::[] -ifeval::["{context}" == "installing-azure-network-customizations"] -:!azure-public: -endif::[] -ifeval::["{context}" == "installing-azure-private"] -:!azure-public: -endif::[] -ifeval::["{context}" == "installing-azure-vnet"] -:!azure-public: -endif::[] \ No newline at end of file diff --git a/modules/installation-azure-increasing-limits.adoc b/modules/installation-azure-increasing-limits.adoc deleted file mode 100644 index 2e60107fdd1a..000000000000 --- a/modules/installation-azure-increasing-limits.adoc +++ /dev/null @@ -1,34 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_azure/installing-azure-account.adoc -// * installing/installing_azure/installing-azure-user-infra.adoc - -:_content-type: PROCEDURE -[id="installation-azure-increasing-limits_{context}"] -= Increasing Azure account limits - -To increase an account limit, file a support request on the Azure portal. -[NOTE] -==== -You can increase only one type of quota per support request. -==== - -.Procedure - -. From the Azure portal, click *Help + support* in the lower left corner. - -. Click *New support request* and then select the required values: -.. From the *Issue type* list, select *Service and subscription limits (quotas)*. -.. From the *Subscription* list, select the subscription to modify. -.. From the *Quota type* list, select the quota to increase. For example, select -*Compute-VM (cores-vCPUs) subscription limit increases* to increase the number -of vCPUs, which is required to install a cluster. -.. Click *Next: Solutions*. - -. On the *Problem Details* page, provide the required information for your quota -increase: -.. Click *Provide details* and provide the required details in the *Quota details* window. -.. In the SUPPORT METHOD and CONTACT INFO sections, provide the issue severity -and your contact details. - -. Click *Next: Review + create* and then click *Create*. diff --git a/modules/installation-azure-limits.adoc b/modules/installation-azure-limits.adoc deleted file mode 100644 index 4a85205a3bc4..000000000000 --- a/modules/installation-azure-limits.adoc +++ /dev/null @@ -1,232 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_azure/installing-azure-account.adoc -// * installing/installing_azure/installing-azure-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-account.adoc - -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:ash: -:cp: Azure Stack Hub -:upi: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-account"] -:ash: -:upi: -:cp: Azure Stack Hub -endif::[] -ifeval::["{context}" == "installing-azure-user-infra"] -:cp: Azure -:upi: -endif::[] -ifeval::["{context}" == "installing-azure-account"] -:cp: Azure -endif::[] - -:_content-type: REFERENCE -[id="installation-azure-limits_{context}"] -= {cp} account limits - -ifndef::ash[] -The {product-title} cluster uses a number of Microsoft {cp} components, and the default link:https://docs.microsoft.com/en-us/azure/azure-subscription-service-limits[Azure subscription and service limits, quotas, and constraints] affect your ability to install {product-title} clusters. - -[IMPORTANT] -==== -Default limits vary by offer category types, such as Free Trial and Pay-As-You-Go, and by series, such as Dv2, F, and G. For example, the default for Enterprise Agreement subscriptions is 350 cores. - -Check the limits for your subscription type and if necessary, increase quota limits for your account before you install a default -cluster on Azure. -==== -endif::ash[] -ifdef::ash[] -The {product-title} cluster uses a number of Microsoft Azure Stack Hub components, and the default link:https://docs.microsoft.com/en-us/azure-stack/operator/azure-stack-quota-types?view=azs-2102[Quota types in Azure Stack Hub] affect your ability to install {product-title} clusters. -endif::ash[] - -The following table summarizes the {cp} components whose limits can impact your -ability to install and run {product-title} clusters. - -ifndef::ash[] -[cols="2a,3a,3a,8a",options="header"] -|=== -|Component |Number of components required by default| Default {cp} limit |Description -endif::ash[] -ifdef::ash[] -[cols="2a,3a,8a",options="header"] -|=== -|Component |Number of components required by default |Description -endif::ash[] - -|vCPU -ifndef::ash[] -ifndef::upi[] -|44 -endif::upi[] -ifdef::upi[] -|40 -endif::upi[] -|20 per region -ifndef::upi[] -|A default cluster requires 44 vCPUs, so you must increase the account limit. -endif::upi[] -ifdef::upi[] -|A default cluster requires 40 vCPUs, so you must increase the account limit. -endif::upi[] - -By default, each cluster creates the following instances: - -* One bootstrap machine, which is removed after installation -* Three control plane machines -* Three compute machines - -ifndef::upi[] -Because the bootstrap and control plane machines use `Standard_D8s_v3` virtual -machines, which use 8 vCPUs, and the compute machines use `Standard_D4s_v3` -virtual machines, which use 4 vCPUs, a default cluster requires 44 vCPUs. -The bootstrap node VM, which uses 8 vCPUs, is used only during installation. -endif::upi[] -ifdef::upi[] -Because the bootstrap machine uses `Standard_D4s_v3` machines, which use 4 vCPUs, -the control plane machines use `Standard_D8s_v3` virtual -machines, which use 8 vCPUs, and the worker machines use `Standard_D4s_v3` -virtual machines, which use 4 vCPUs, a default cluster requires 40 vCPUs. -The bootstrap node VM, which uses 4 vCPUs, is used only during installation. -endif::upi[] -endif::ash[] -ifdef::ash[] -|56 -|A default cluster requires 56 vCPUs, so you must increase the account limit. - -By default, each cluster creates the following instances: - -* One bootstrap machine, which is removed after installation -* Three control plane machines -* Three compute machines - -Because the bootstrap, control plane, and worker machines use `Standard_DS4_v2` virtual machines, which use 8 vCPUs, a default cluster requires 56 vCPUs. The bootstrap node VM is used only during installation. -endif::ash[] - -To deploy more worker nodes, enable autoscaling, deploy large workloads, or use -a different instance type, you must further increase the vCPU limit for your -account to ensure that your cluster can deploy the machines that you require. - -ifndef::ash[] -By default, the installation program distributes control plane and compute machines across -link:https://azure.microsoft.com/en-us/global-infrastructure/availability-zones/[all availability zones] -within -link:https://azure.microsoft.com/en-us/global-infrastructure/regions[a region]. -To ensure high availability for your cluster, select a region with at least -three availability zones. If your region contains fewer than three availability -zones, the installation program places more than one control plane machine in the -available zones. -endif::ash[] - -ifndef::ash[] -|OS Disk -|7 -| -|Each cluster machine must have a minimum of 100 GB of storage and 300 IOPS. While these are the minimum supported values, faster storage is recommended for production clusters and clusters with intensive workloads. For more information about optimizing storage for performance, see the page titled "Optimizing storage" in the "Scalability and performance" section. -endif::ash[] - -|VNet -| 1 -ifndef::ash[] -| 1000 per region -endif::ash[] -| Each default cluster requires one Virtual Network (VNet), which contains two -subnets. - -|Network interfaces -|7 -ifndef::ash[] -|65,536 per region -endif::ash[] -|Each default cluster requires seven network interfaces. If you create more -machines or your deployed workloads create load balancers, your cluster uses -more network interfaces. - -|Network security groups -|2 -ifndef::ash[] -|5000 -endif::ash[] -| Each cluster creates network security groups for each subnet in the VNet. -The default cluster creates network -security groups for the control plane and for the compute node subnets: - -[horizontal] - `controlplane`:: Allows the control plane machines to be reached on port 6443 - from anywhere -`node`:: Allows worker nodes to be reached from the internet on ports 80 and 443 - -|Network load balancers -| 3 -ifndef::ash[] -| 1000 per region -endif::ash[] -|Each cluster creates the following -link:https://docs.microsoft.com/en-us/azure/load-balancer/load-balancer-overview[load balancers]: - -[horizontal] -`default`:: Public IP address that load balances requests to ports 80 and 443 across worker machines -`internal`:: Private IP address that load balances requests to ports 6443 and 22623 across control plane machines -`external`:: Public IP address that load balances requests to port 6443 across control plane machines - -If your applications create more Kubernetes `LoadBalancer` service objects, -your cluster uses more load balancers. - -|Public IP addresses -ifndef::ash[] -|3 -| -|Each of the two public load balancers uses a public IP address. The bootstrap -machine also uses a public IP address so that you can SSH into the -machine to troubleshoot issues during installation. The IP address for the -bootstrap node is used only during installation. -endif::ash[] -ifdef::ash[] -|2 -|The public load balancer uses a public IP address. The bootstrap -machine also uses a public IP address so that you can SSH into the -machine to troubleshoot issues during installation. The IP address for the -bootstrap node is used only during installation. -endif::ash[] - -|Private IP addresses -|7 -ifndef::ash[] -| -endif::ash[] -|The internal load balancer, each of the three control plane machines, and each -of the three worker machines each use a private IP address. - -ifndef::ash[] -|Spot VM vCPUs (optional) -|0 - -If you configure spot VMs, your cluster must have two spot VM vCPUs for every compute node. -|20 per region -|This is an optional component. To use spot VMs, you must increase the Azure default limit to at least twice the number of compute nodes in your cluster. -[NOTE] -==== -Using spot VMs for control plane nodes is not recommended. -==== -endif::ash[] -|=== - -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:!ash: -:!cp: Azure Stack Hub -:!upi: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-account"] -:!ash: -:!cp: Azure Stack Hub -:!upi: -endif::[] -ifeval::["{context}" == "installing-azure-user-infra"] -:!cp: Azure -:!upi: -endif::[] -ifeval::["{context}" == "installing-azure-account"] -:!cp: Azure -endif::[] diff --git a/modules/installation-azure-marketplace-subscribe.adoc b/modules/installation-azure-marketplace-subscribe.adoc deleted file mode 100644 index bacdb4b01f84..000000000000 --- a/modules/installation-azure-marketplace-subscribe.adoc +++ /dev/null @@ -1,221 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-azure-customizations.adoc -// * installing/installing_aws/installing-azure-user-infra.adoc -// * machine_management/creating-machineset-azure.adoc -// * machine_management/control_plane_machine_management/cpmso-using.adoc - -ifeval::["{context}" == "installing-azure-customizations"] -:ipi: -endif::[] -ifeval::["{context}" == "installing-azure-user-infra"] -:upi: -endif::[] -ifeval::["{context}" == "creating-machineset-azure"] -:mapi: -endif::[] -ifeval::["{context}" == "cpmso-using"] -:mapi: -endif::[] - -//mpytlak: The procedure differs depending on whether this module is used in an IPI or UPI assembly. -//jrouth: Also some variations for when it appears in the machine management content (`mapi`). - -:_content-type: PROCEDURE -[id="installation-azure-marketplace-subscribe_{context}"] -= Selecting an Azure Marketplace image -ifndef::mapi[] -If you are deploying an {product-title} cluster using the Azure Marketplace offering, you must first obtain the Azure Marketplace image. The installation program uses this image to deploy worker nodes. When obtaining your image, consider the following: -endif::mapi[] -ifdef::mapi[] -You can create a machine set running on Azure that deploys machines that use the Azure Marketplace offering. To use this offering, you must first obtain the Azure Marketplace image. When obtaining your image, consider the following: -endif::mapi[] - -* While the images are the same, the Azure Marketplace publisher is different depending on your region. If you are located in North America, specify `redhat` as the publisher. If you are located in EMEA, specify `redhat-limited` as the publisher. -* The offer includes a `rh-ocp-worker` SKU and a `rh-ocp-worker-gen1` SKU. The `rh-ocp-worker` SKU represents a Hyper-V generation version 2 VM image. The default instance types used in {product-title} are version 2 compatible. If you plan to use an instance type that is only version 1 compatible, use the image associated with the `rh-ocp-worker-gen1` SKU. The `rh-ocp-worker-gen1` SKU represents a Hyper-V version 1 VM image. -//What happens with control plane machines? "worker" SKU seems incorrect - -[IMPORTANT] -==== -Installing images with the Azure marketplace is not supported on clusters with 64-bit ARM instances. -==== - -.Prerequisites - -* You have installed the Azure CLI client `(az)`. -* Your Azure account is entitled for the offer and you have logged into this account with the Azure CLI client. - -.Procedure - -. Display all of the available {product-title} images by running one of the following commands: -+ --- -** North America: -+ -[source,terminal] ----- -$ az vm image list --all --offer rh-ocp-worker --publisher redhat -o table ----- -+ -.Example output -[source,terminal] ----- -Offer Publisher Sku Urn Version -------------- -------------- ------------------ -------------------------------------------------------------- -------------- -rh-ocp-worker RedHat rh-ocp-worker RedHat:rh-ocp-worker:rh-ocpworker:4.8.2021122100 4.8.2021122100 -rh-ocp-worker RedHat rh-ocp-worker-gen1 RedHat:rh-ocp-worker:rh-ocp-worker-gen1:4.8.2021122100 4.8.2021122100 ----- -** EMEA: -+ -[source,terminal] ----- -$ az vm image list --all --offer rh-ocp-worker --publisher redhat-limited -o table ----- -+ -.Example output -[source,terminal] ----- -Offer Publisher Sku Urn Version -------------- -------------- ------------------ -------------------------------------------------------------- -------------- -rh-ocp-worker redhat-limited rh-ocp-worker redhat-limited:rh-ocp-worker:rh-ocp-worker:4.8.2021122100 4.8.2021122100 -rh-ocp-worker redhat-limited rh-ocp-worker-gen1 redhat-limited:rh-ocp-worker:rh-ocp-worker-gen1:4.8.2021122100 4.8.2021122100 ----- --- -+ -[NOTE] -==== -Regardless of the version of {product-title} that you install, the correct version of the Azure Marketplace image to use is 4.8. If required, your VMs are automatically upgraded as part of the installation process. -==== -. Inspect the image for your offer by running one of the following commands: -** North America: -+ -[source,terminal] ----- -$ az vm image show --urn redhat:rh-ocp-worker:rh-ocp-worker:<version> ----- -** EMEA: -+ -[source,terminal] ----- -$ az vm image show --urn redhat-limited:rh-ocp-worker:rh-ocp-worker:<version> ----- -. Review the terms of the offer by running one of the following commands: -** North America: -+ -[source,terminal] ----- -$ az vm image terms show --urn redhat:rh-ocp-worker:rh-ocp-worker:<version> ----- -** EMEA: -+ -[source,terminal] ----- -$ az vm image terms show --urn redhat-limited:rh-ocp-worker:rh-ocp-worker:<version> ----- -. Accept the terms of the offering by running one of the following commands: -** North America: -+ -[source,terminal] ----- -$ az vm image terms accept --urn redhat:rh-ocp-worker:rh-ocp-worker:<version> ----- -** EMEA: -+ -[source,terminal] ----- -$ az vm image terms accept --urn redhat-limited:rh-ocp-worker:rh-ocp-worker:<version> ----- -ifdef::ipi[] -. Record the image details of your offer. You must update the `compute` section in the `install-config.yaml` file with values for `publisher`, `offer`, `sku`, and `version` before deploying the cluster. -endif::ipi[] -ifdef::upi[] -. Record the image details of your offer. If you use the Azure Resource Manager (ARM) template to deploy your worker nodes: -+ -.. Update `storageProfile.imageReference` by deleting the `id` parameter and adding the `offer`, `publisher`, `sku`, and `version` parameters by using the values from your offer. -.. Specify a `plan` for the virtual machines (VMs). -+ -.Example `06_workers.json` ARM template with an updated `storageProfile.imageReference` object and a specified `plan` -+ -[source,json,subs="none"] ----- -... - "plan" : { - "name": "rh-ocp-worker", - "product": "rh-ocp-worker", - "publisher": "redhat" - }, - "dependsOn" : [ - "[concat('Microsoft.Network/networkInterfaces/', concat(variables('vmNames')[copyIndex()], '-nic'))]" - ], - "properties" : { -... - "storageProfile": { - "imageReference": { - "offer": "rh-ocp-worker", - "publisher": "redhat", - "sku": "rh-ocp-worker", - "version": "4.8.2021122100" - } - ... - } -... - } ----- - -endif::upi[] -ifdef::mapi[] -. Record the image details of your offer, specifically the values for `publisher`, `offer`, `sku`, and `version`. -endif::mapi[] - -ifdef::ipi[] -.Sample `install-config.yaml` file with the Azure Marketplace worker nodes - -[source,yaml] ----- -apiVersion: v1 -baseDomain: example.com -compute: -- hyperthreading: Enabled - name: worker - platform: - azure: - type: Standard_D4s_v5 - osImage: - publisher: redhat - offer: rh-ocp-worker - sku: rh-ocp-worker - version: 4.8.2021122100 - replicas: 3 ----- -endif::ipi[] -ifdef::mapi[] -. Add the following parameters to the `providerSpec` section of your machine set YAML file using the image details for your offer: -+ -.Sample `providerSpec` image values for Azure Marketplace machines -[source,yaml] ----- -providerSpec: - value: - image: - offer: rh-ocp-worker - publisher: redhat - resourceID: "" - sku: rh-ocp-worker - type: MarketplaceWithPlan - version: 4.8.2021122100 ----- -//offer also has "worker" -endif::mapi[] - -ifeval::["{context}" == "installing-azure-customizations"] -:!ipi: -endif::[] -ifeval::["{context}" == "installing-azure-user-infra"] -:!upi: -endif::[] -ifeval::["{context}" == "creating-machineset-azure"] -:!mapi: -endif::[] -ifeval::["{context}" == "cpmso-using"] -:!mapi: -endif::[] \ No newline at end of file diff --git a/modules/installation-azure-marketplace.adoc b/modules/installation-azure-marketplace.adoc deleted file mode 100644 index b7608a754331..000000000000 --- a/modules/installation-azure-marketplace.adoc +++ /dev/null @@ -1,16 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-azure-account.adoc - -:_content-type: CONCEPT -[id="installation-azure-marketplace_{context}"] -= Supported Azure Marketplace regions - -Installing a cluster using the Azure Marketplace image is available to customers who purchase the offer in North America and EMEA. - -While the offer must be purchased in North America or EMEA, you can deploy the cluster to any of the Azure public partitions that {product-title} supports. - -[NOTE] -==== -Deploying a cluster using the Azure Marketplace image is not supported for the Azure Government regions. -==== diff --git a/modules/installation-azure-network-config.adoc b/modules/installation-azure-network-config.adoc deleted file mode 100644 index e23deb945e48..000000000000 --- a/modules/installation-azure-network-config.adoc +++ /dev/null @@ -1,41 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_azure/installing-azure-account.adoc -// * installing/installing_azure/installing-azure-user-infra.adoc - -:_content-type: PROCEDURE -[id="installation-azure-network-config_{context}"] -= Configuring a public DNS zone in Azure - -To install {product-title}, the Microsoft Azure account you use must -have a dedicated public hosted DNS zone in your account. This zone must be -authoritative for the domain. This service provides -cluster DNS resolution and name lookup for external connections to the cluster. - -.Procedure - -. Identify your domain, or subdomain, and registrar. You can transfer an -existing domain and registrar or obtain a new one through Azure or another source. -+ -[NOTE] -==== -For more information about purchasing domains through Azure, see -link:https://docs.microsoft.com/en-us/azure/app-service/manage-custom-dns-buy-domain[Buy a custom domain name for Azure App Service] -in the Azure documentation. -==== - -. If you are using an existing domain and registrar, migrate its DNS to Azure. See -link:https://docs.microsoft.com/en-us/azure/app-service/manage-custom-dns-migrate-domain[Migrate an active DNS name to Azure App Service] -in the Azure documentation. - -. Configure DNS for your domain. Follow the steps in the -link:https://docs.microsoft.com/en-us/azure/dns/dns-delegate-domain-azure-dns[Tutorial: Host your domain in Azure DNS] -in the Azure documentation to create a public hosted zone for your domain or -subdomain, extract the new authoritative name servers, and update the registrar -records for the name servers that your domain uses. -+ -Use an appropriate root domain, such as `openshiftcorp.com`, or subdomain, -such as `clusters.openshiftcorp.com`. - -. If you use a subdomain, follow your company's procedures to add its delegation -records to the parent domain. diff --git a/modules/installation-azure-permissions.adoc b/modules/installation-azure-permissions.adoc deleted file mode 100644 index 7d23ada530a6..000000000000 --- a/modules/installation-azure-permissions.adoc +++ /dev/null @@ -1,14 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_azure/installing-azure-account.adoc -// * installing/installing_azure/installing-azure-user-infra.adoc - -[id="installation-azure-permissions_{context}"] -= Required Azure roles - -{product-title} needs a service principal so it can manage Microsoft Azure resources. Before you can create a service principal, your Azure account subscription must have the following roles: - -* `User Access Administrator` -* `Contributor` - -To set roles on the Azure portal, see the link:https://docs.microsoft.com/en-us/azure/role-based-access-control/role-assignments-portal[Manage access to Azure resources using RBAC and the Azure portal] in the Azure documentation. \ No newline at end of file diff --git a/modules/installation-azure-preparing-diskencryptionsets.adoc b/modules/installation-azure-preparing-diskencryptionsets.adoc deleted file mode 100644 index 858b35021320..000000000000 --- a/modules/installation-azure-preparing-diskencryptionsets.adoc +++ /dev/null @@ -1,132 +0,0 @@ -//Module included in the following assemblies: -// -// * installing/installing_azure/enabling-disk-encryption-sets-azure.adoc - -:_content-type: PROCEDURE -[id="preparing-disk-encryption-sets"] -= Preparing an Azure Disk Encryption Set -The {product-title} installer can use an existing Disk Encryption Set with a user-managed key. To enable this feature, you can create a Disk Encryption Set in Azure and provide the key to the installer. - -.Procedure - -. Set the following environment variables for the Azure resource group by running the following command: -+ -[source,terminal] ----- -$ export RESOURCEGROUP="<resource_group>" \// <1> - LOCATION="<location>" <2> ----- -<1> Specifies the name of the Azure resource group where you will create the Disk Encryption Set and encryption key. To avoid losing access to your keys after destroying the cluster, you should create the Disk Encryption Set in a different resource group than the resource group where you install the cluster. -<2> Specifies the Azure location where you will create the resource group. -+ -. Set the following environment variables for the Azure Key Vault and Disk Encryption Set by running the following command: -+ -[source,terminal] ----- -$ export KEYVAULT_NAME="<keyvault_name>" \// <1> - KEYVAULT_KEY_NAME="<keyvault_key_name>" \// <2> - DISK_ENCRYPTION_SET_NAME="<disk_encryption_set_name>" <3> ----- -<1> Specifies the name of the Azure Key Vault you will create. -<2> Specifies the name of the encryption key you will create. -<3> Specifies the name of the disk encryption set you will create. -+ -. Set the environment variable for the ID of your Azure Service Principal by running the following command: -+ -[source,terminal] ----- -$ export CLUSTER_SP_ID="<service_principal_id>" <1> ----- -<1> Specifies the ID of the service principal you will use for this installation. -+ -. Enable host-level encryption in Azure by running the following commands: -+ -[source,terminal] ----- -$ az feature register --namespace "Microsoft.Compute" --name "EncryptionAtHost" ----- -+ -[source,terminal] ----- -$ az feature show --namespace Microsoft.Compute --name EncryptionAtHost ----- -+ -[source,terminal] ----- -$ az provider register -n Microsoft.Compute ----- -+ -. Create an Azure Resource Group to hold the disk encryption set and associated resources by running the following command: -+ -[source,terminal] ----- -$ az group create --name $RESOURCEGROUP --location $LOCATION ----- -+ -. Create an Azure key vault by running the following command: -+ -[source,terminal] ----- -$ az keyvault create -n $KEYVAULT_NAME -g $RESOURCEGROUP -l $LOCATION \ - --enable-purge-protection true --enable-soft-delete true ----- -+ -. Create an encryption key in the key vault by running the following command: -+ -[source,terminal] ----- -$ az keyvault key create --vault-name $KEYVAULT_NAME -n $KEYVAULT_KEY_NAME \ - --protection software ----- -+ -. Capture the ID of the key vault by running the following command: -+ -[source,terminal] ----- -$ KEYVAULT_ID=$(az keyvault show --name $KEYVAULT_NAME --query "[id]" -o tsv) ----- -+ -. Capture the key URL in the key vault by running the following command: -+ -[source,terminal] ----- -$ KEYVAULT_KEY_URL=$(az keyvault key show --vault-name $KEYVAULT_NAME --name \ - $KEYVAULT_KEY_NAME --query "[key.kid]" -o tsv) ----- -+ -. Create a disk encryption set by running the following command: -+ -[source,terminal] ----- -$ az disk-encryption-set create -n $DISK_ENCRYPTION_SET_NAME -l $LOCATION -g \ - $RESOURCEGROUP --source-vault $KEYVAULT_ID --key-url $KEYVAULT_KEY_URL ----- -+ -. Grant the DiskEncryptionSet resource access to the key vault by running the following commands: -+ -[source,terminal] ----- -$ DES_IDENTITY=$(az disk-encryption-set show -n $DISK_ENCRYPTION_SET_NAME -g \ - $RESOURCEGROUP --query "[identity.principalId]" -o tsv) ----- -+ -[source,terminal] ----- -$ az keyvault set-policy -n $KEYVAULT_NAME -g $RESOURCEGROUP --object-id \ - $DES_IDENTITY --key-permissions wrapkey unwrapkey get ----- -+ -. Grant the Azure Service Principal permission to read the DiskEncryptionSet by running the following commands: -+ -[source,terminal] ----- -$ DES_RESOURCE_ID=$(az disk-encryption-set show -n $DISK_ENCRYPTION_SET_NAME -g \ - $RESOURCEGROUP --query "[id]" -o tsv) ----- -+ -[source,terminal] ----- -$ az role assignment create --assignee $CLUSTER_SP_ID --role "<reader_role>" \// <1> - --scope $DES_RESOURCE_ID -o jsonc ----- -<1> Specifies an Azure role with read permissions to the disk encryption set. You can use the `Owner` role or a custom role with the necessary permissions. diff --git a/modules/installation-azure-regions.adoc b/modules/installation-azure-regions.adoc deleted file mode 100644 index 64d1540d3a7a..000000000000 --- a/modules/installation-azure-regions.adoc +++ /dev/null @@ -1,67 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_azure/installing-azure-account.adoc -// * installing/installing_azure/installing-azure-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc - -[id="installation-azure-regions_{context}"] -= Supported Azure regions - -The installation program dynamically generates the list of available Microsoft Azure regions based on your subscription. - -[discrete] -== Supported Azure public regions - -* `australiacentral` (Australia Central) -* `australiaeast` (Australia East) -* `australiasoutheast` (Australia South East) -* `brazilsouth` (Brazil South) -* `canadacentral` (Canada Central) -* `canadaeast` (Canada East) -* `centralindia` (Central India) -* `centralus` (Central US) -* `eastasia` (East Asia) -* `eastus` (East US) -* `eastus2` (East US 2) -* `francecentral` (France Central) -//* francesouth (France South) -* `germanywestcentral` (Germany West Central) -* `japaneast` (Japan East) -* `japanwest` (Japan West) -* `koreacentral` (Korea Central) -* `koreasouth` (Korea South) -* `northcentralus` (North Central US) -* `northeurope` (North Europe) -* `norwayeast` (Norway East) -* `qatarcentral` (Qatar Central) -* `southafricanorth` (South Africa North) -//* southafricawest (South Africa West) -* `southcentralus` (South Central US) -* `southeastasia` (Southeast Asia) -* `southindia` (South India) -* `swedencentral` (Sweden Central) -* `switzerlandnorth` (Switzerland North) -//* uaecentral (UAE Central) -* `uaenorth` (UAE North) -* `uksouth` (UK South) -* `ukwest` (UK West) -* `westcentralus` (West Central US) -* `westeurope` (West Europe) -* `westindia` (West India) -* `westus` (West US) -* `westus2` (West US 2) -* `westus3` (West US 3) - -[discrete] -== Supported Azure Government regions - -Support for the following Microsoft Azure Government (MAG) regions was added in {product-title} version 4.6: - -* `usgovtexas` (US Gov Texas) -* `usgovvirginia` (US Gov Virginia) -//* usdodcentral (US DoD Central) -//* usdodeast (US DoD East) -//* usgovarizona (US Gov Arizona) -//* usgoviowa (US Gov Iowa) - -You can reference all available MAG regions in the link:https://azure.microsoft.com/en-us/global-infrastructure/geographies/#geographies[Azure documentation]. Other provided MAG regions are expected to work with {product-title}, but have not been tested. diff --git a/modules/installation-azure-service-principal.adoc b/modules/installation-azure-service-principal.adoc deleted file mode 100644 index 195a9b309e2f..000000000000 --- a/modules/installation-azure-service-principal.adoc +++ /dev/null @@ -1,265 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_azure/installing-azure-account.adoc -// * installing/installing_azure/installing-azure-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-account.adoc - -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:ash: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-account"] -:ash: -endif::[] -ifeval::["{context}" == "installing-azure-account"] -:ipi: -endif::[] -ifeval::["{context}" == "installing-azure-user-infra"] -:upi: -endif::[] - -:_content-type: PROCEDURE -[id="installation-azure-service-principal_{context}"] -= Creating a service principal - -Because {product-title} and its installation program create Microsoft Azure resources by using the Azure Resource Manager, you must create a service principal to represent it. - -.Prerequisites - -* Install or update the link:https://docs.microsoft.com/en-us/cli/azure/install-azure-cli-yum?view=azure-cli-latest[Azure CLI]. -* Your Azure account has the required roles for the subscription that you use. -ifdef::ipi[] -* If you want to use a custom role, you have created a link:https://learn.microsoft.com/en-us/azure/role-based-access-control/custom-roles[custom role] with the required permissions listed in the _Required Azure permissions for installer-provisioned infrastructure_ section. -endif::ipi[] -ifdef::upi[] -* If you want to use a custom role, you have created a link:https://learn.microsoft.com/en-us/azure/role-based-access-control/custom-roles[custom role] with the required permissions listed in the _Required Azure permissions for user-provisioned infrastructure_ section. -endif::upi[] - -.Procedure - -ifdef::ash[] -. Register your environment: -+ -[source,terminal] ----- -$ az cloud register -n AzureStackCloud --endpoint-resource-manager <endpoint> <1> ----- -<1> Specify the Azure Resource Manager endpoint, \`https://management.<region>.<fqdn>/`. -+ -See the link:https://docs.microsoft.com/en-us/azure-stack/mdc/azure-stack-version-profiles-azurecli-2-tzl#connect-to-azure-stack-hub[Microsoft documentation] for details. - -. Set the active environment: -+ -[source,terminal] ----- -$ az cloud set -n AzureStackCloud ----- - -. Update your environment configuration to use the specific API version for Azure Stack Hub: -+ -[source,terminal] ----- -$ az cloud update --profile 2019-03-01-hybrid ----- -endif::ash[] - -. Log in to the Azure CLI: -+ -[source,terminal] ----- -$ az login ----- -ifdef::ash[] -+ -If you are in a multitenant environment, you must also supply the tenant ID. -endif::ash[] - -. If your Azure account uses subscriptions, ensure that you are using the right -subscription: - -.. View the list of available accounts and record the `tenantId` value for the -subscription you want to use for your cluster: -+ -[source,terminal] ----- -$ az account list --refresh ----- -+ -.Example output -[source,terminal] ----- -[ - { -ifndef::ash[] - "cloudName": "AzureCloud", -endif::[] -ifdef::ash[] - "cloudName": AzureStackCloud", -endif::[] - "id": "9bab1460-96d5-40b3-a78e-17b15e978a80", - "isDefault": true, - "name": "Subscription Name", - "state": "Enabled", - "tenantId": "6057c7e9-b3ae-489d-a54e-de3f6bf6a8ee", - "user": { - "name": "you@example.com", - "type": "user" - } - } -] ----- - -.. View your active account details and confirm that the `tenantId` value matches -the subscription you want to use: -+ -[source,terminal] ----- -$ az account show ----- -+ -.Example output -[source,terminal] ----- -{ -ifndef::ash[] - "environmentName": "AzureCloud", -endif::[] -ifdef::ash[] - "environmentName": AzureStackCloud", -endif::[] - "id": "9bab1460-96d5-40b3-a78e-17b15e978a80", - "isDefault": true, - "name": "Subscription Name", - "state": "Enabled", - "tenantId": "6057c7e9-b3ae-489d-a54e-de3f6bf6a8ee", <1> - "user": { - "name": "you@example.com", - "type": "user" - } -} ----- -<1> Ensure that the value of the `tenantId` parameter is the correct subscription ID. - -.. If you are not using the right subscription, change the active subscription: -+ -[source,terminal] ----- -$ az account set -s <subscription_id> <1> ----- -<1> Specify the subscription ID. - -.. Verify the subscription ID update: -+ -[source,terminal] ----- -$ az account show ----- -+ -.Example output -[source,terminal] ----- -{ -ifndef::ash[] - "environmentName": "AzureCloud", -endif::[] -ifdef::ash[] - "environmentName": AzureStackCloud", -endif::[] - "id": "33212d16-bdf6-45cb-b038-f6565b61edda", - "isDefault": true, - "name": "Subscription Name", - "state": "Enabled", - "tenantId": "8049c7e9-c3de-762d-a54e-dc3f6be6a7ee", - "user": { - "name": "you@example.com", - "type": "user" - } -} ----- - -. Record the `tenantId` and `id` parameter values from the output. You need these values during the {product-title} installation. - -ifdef::ash[] -. Create the service principal for your account: -+ -[source,terminal] ----- -$ az ad sp create-for-rbac --role Contributor --name <service_principal> \ <1> - --scopes /subscriptions/<subscription_id> <2> - --years <years> <3> ----- -<1> Specify the service principal name. -<2> Specify the subscription ID. -<3> Specify the number of years. By default, a service principal expires in one year. By using the `--years` option you can extend the validity of your service principal. -+ -.Example output -[source,terminal] ----- -Creating 'Contributor' role assignment under scope '/subscriptions/<subscription_id>' -The output includes credentials that you must protect. Be sure that you do not -include these credentials in your code or check the credentials into your source -control. For more information, see https://aka.ms/azadsp-cli -{ - "appId": "ac461d78-bf4b-4387-ad16-7e32e328aec6", - "displayName": <service_principal>", - "password": "00000000-0000-0000-0000-000000000000", - "tenantId": "8049c7e9-c3de-762d-a54e-dc3f6be6a7ee" -} ----- -endif::ash[] - -ifndef::ash[] -. Create the service principal for your account: -+ -[source,terminal] ----- -$ az ad sp create-for-rbac --role <role_name> \// <1> - --name <service_principal> \// <2> - --scopes /subscriptions/<subscription_id> <3> ----- -<1> Defines the role name. You can use the `Contributor` role, or you can specify a custom role which contains the necessary permissions. -<2> Defines the service principal name. -<3> Specifies the subscription ID. -+ -.Example output -[source,terminal] ----- -Creating 'Contributor' role assignment under scope '/subscriptions/<subscription_id>' -The output includes credentials that you must protect. Be sure that you do not -include these credentials in your code or check the credentials into your source -control. For more information, see https://aka.ms/azadsp-cli -{ - "appId": "ac461d78-bf4b-4387-ad16-7e32e328aec6", - "displayName": <service_principal>", - "password": "00000000-0000-0000-0000-000000000000", - "tenantId": "8049c7e9-c3de-762d-a54e-dc3f6be6a7ee" -} ----- -endif::ash[] - -. Record the values of the `appId` and `password` parameters from the previous -output. You need these values during {product-title} installation. - -ifndef::ash[] -. If you applied the `Contributor` role to your service principal, assign the `User Administrator Access` role by running the following command: -+ -[source,terminal] ----- -$ az role assignment create --role "User Access Administrator" \ - --assignee-object-id $(az ad sp show --id <appId> --query id -o tsv) <1> ----- -<1> Specify the `appId` parameter value for your service principal. -endif::ash[] - -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:!ash: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-account"] -:!ash: -endif::[] -ifeval::["{context}" == "installing-azure-account"] -:!ipi: -endif::[] -ifeval::["{context}" == "installing-azure-user-infra"] -:!upi: -endif::[] \ No newline at end of file diff --git a/modules/installation-azure-stack-hub-config-yaml.adoc b/modules/installation-azure-stack-hub-config-yaml.adoc deleted file mode 100644 index a4717e2b1223..000000000000 --- a/modules/installation-azure-stack-hub-config-yaml.adoc +++ /dev/null @@ -1,220 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-default.adoc - -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:ash: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-default"] -:ash-default: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-network-customizations"] -:ash-network: -endif::[] - -[id="installation-azure-stack-hub-config-yaml_{context}"] -= Sample customized install-config.yaml file for Azure Stack Hub - -You can customize the `install-config.yaml` file to specify more details about your {product-title} cluster's platform or modify the values of the required parameters. - -[IMPORTANT] -==== -This sample YAML file is provided for reference only. Use it as a resource to enter parameter values into the installation configuration file that you created manually. -==== - -ifdef::ash[] -[source,yaml] ----- -apiVersion: v1 -baseDomain: example.com -controlPlane: <1> - name: master - platform: - azure: - osDisk: - diskSizeGB: 1024 <2> - diskType: premium_LRS - replicas: 3 -compute: <1> -- name: worker - platform: - azure: - osDisk: - diskSizeGB: 512 <2> - diskType: premium_LRS - replicas: 0 -metadata: - name: test-cluster <3> -networking: - clusterNetwork: - - cidr: 10.128.0.0/14 - hostPrefix: 23 - machineNetwork: - - cidr: 10.0.0.0/16 - networkType: OVNKubernetes <4> - serviceNetwork: - - 172.30.0.0/16 -platform: - azure: - armEndpoint: azurestack_arm_endpoint <5> - baseDomainResourceGroupName: resource_group <6> - region: azure_stack_local_region <7> - resourceGroupName: existing_resource_group <8> - outboundType: Loadbalancer - cloudName: AzureStackCloud <9> -pullSecret: '{"auths": ...}' <10> -ifndef::openshift-origin[] -fips: false <11> -additionalTrustBundle: | <12> - -----BEGIN CERTIFICATE----- - <MY_TRUSTED_CA_CERT> - -----END CERTIFICATE----- -sshKey: ssh-ed25519 AAAA... <13> -endif::openshift-origin[] -ifdef::openshift-origin[] -additionalTrustBundle: | <11> - -----BEGIN CERTIFICATE----- - <MY_TRUSTED_CA_CERT> - -----END CERTIFICATE----- -sshKey: ssh-ed25519 AAAA... <12> -endif::openshift-origin[] ----- -<1> The `controlPlane` section is a single mapping, but the `compute` section is a sequence of mappings. To meet the requirements of the different data structures, the first line of the `compute` section must begin with a hyphen, `-`, and the first line of the `controlPlane` section must not. Only one control plane pool is used. -<2> You can specify the size of the disk to use in GB. Minimum recommendation for control plane nodes is 1024 GB. -<3> Specify the name of the cluster. -<4> The cluster network plugin to install. The supported values are `OVNKubernetes` and `OpenShiftSDN`. The default value is `OVNKubernetes`. -<5> Specify the Azure Resource Manager endpoint that your Azure Stack Hub operator provides. -<6> Specify the name of the resource group that contains the DNS zone for your base domain. -<7> Specify the name of your Azure Stack Hub local region. -<8> Specify the name of an already existing resource group to install your cluster to. If undefined, a new resource group is created for the cluster. -<9> Specify the Azure Stack Hub environment as your target platform. -<10> Specify the pull secret required to authenticate your cluster. -ifndef::openshift-origin[] -<11> Whether to enable or disable FIPS mode. By default, FIPS mode is not enabled. If FIPS mode is enabled, the {op-system-first} machines that {product-title} runs on bypass the default Kubernetes cryptography suite and use the cryptography modules that are provided with {op-system} instead. -+ -[IMPORTANT] -==== -The use of FIPS Validated / Modules in Process cryptographic libraries is only supported on {product-title} deployments on the `x86_64` architecture. -==== -<12> If your Azure Stack Hub environment uses an internal certificate authority (CA), add the necessary certificate bundle in `.pem` format. -<13> You can optionally provide the `sshKey` value that you use to access the machines in your cluster. -endif::openshift-origin[] -ifdef::openshift-origin[] -<11> If your Azure Stack Hub environment uses an internal certificate authority (CA), add the necessary certificate bundle in `.pem` format. -<12> You can optionally provide the `sshKey` value that you use to access the machines in your cluster. -endif::openshift-origin[] -+ -[NOTE] -==== -For production {product-title} clusters on which you want to perform installation debugging or disaster recovery, specify an SSH key that your `ssh-agent` process uses. -==== -endif::ash[] - -ifdef::ash-default,ash-network[] -[source,yaml] ----- -apiVersion: v1 -baseDomain: example.com <1> -credentialsMode: Manual -controlPlane: <2> <3> - name: master - platform: - azure: - osDisk: - diskSizeGB: 1024 <4> - diskType: premium_LRS - replicas: 3 -compute: <2> -- name: worker - platform: - azure: - osDisk: - diskSizeGB: 512 <4> - diskType: premium_LRS - replicas: 3 -metadata: - name: test-cluster <1> <5> -networking: - clusterNetwork: - - cidr: 10.128.0.0/14 - hostPrefix: 23 - machineNetwork: - - cidr: 10.0.0.0/16 - networkType: OVNKubernetes <6> - serviceNetwork: - - 172.30.0.0/16 -platform: - azure: - armEndpoint: azurestack_arm_endpoint <1> <7> - baseDomainResourceGroupName: resource_group <1> <8> - region: azure_stack_local_region <1> <9> - resourceGroupName: existing_resource_group <10> - outboundType: Loadbalancer - cloudName: AzureStackCloud <1> - clusterOSimage: https://vhdsa.blob.example.example.com/vhd/rhcos-410.84.202112040202-0-azurestack.x86_64.vhd <1> <11> -pullSecret: '{"auths": ...}' <1> <12> -ifndef::openshift-origin[] -fips: false <13> -sshKey: ssh-ed25519 AAAA... <14> -endif::openshift-origin[] -ifdef::openshift-origin[] -sshKey: ssh-ed25519 AAAA...<13> -endif::openshift-origin[] -ifndef::openshift-origin[] -additionalTrustBundle: | <15> -endif::openshift-origin[] -ifdef::openshift-origin[] -additionalTrustBundle: | <14> -endif::openshift-origin[] - -----BEGIN CERTIFICATE----- - <MY_TRUSTED_CA_CERT> - -----END CERTIFICATE----- ----- -<1> Required. -<2> If you do not provide these parameters and values, the installation program provides the default value. -<3> The `controlPlane` section is a single mapping, but the `compute` section is a sequence of mappings. To meet the requirements of the different data structures, the first line of the `compute` section must begin with a hyphen, `-`, and the first line of the `controlPlane` section must not. Although both sections currently define a single machine pool, it is possible that future versions of {product-title} will support defining multiple compute pools during installation. Only one control plane pool is used. -<4> You can specify the size of the disk to use in GB. Minimum recommendation for control plane nodes is 1024 GB. -<5> The name of the cluster. -<6> The cluster network plugin to install. The supported values are `OVNKubernetes` and `OpenShiftSDN`. The default value is `OVNKubernetes`. -<7> The Azure Resource Manager endpoint that your Azure Stack Hub operator provides. -<8> The name of the resource group that contains the DNS zone for your base domain. -<9> The name of your Azure Stack Hub local region. -<10> The name of an existing resource group to install your cluster to. If undefined, a new resource group is created for the cluster. -<11> The URL of a storage blob in the Azure Stack environment that contains an {op-system} VHD. -<12> The pull secret required to authenticate your cluster. -ifndef::openshift-origin[] -<13> Whether to enable or disable FIPS mode. By default, FIPS mode is not enabled. If FIPS mode is enabled, the {op-system-first} machines that {product-title} runs on bypass the default Kubernetes cryptography suite and use the cryptography modules that are provided with {op-system} instead. -+ -[IMPORTANT] -==== -The use of FIPS Validated / Modules in Process cryptographic libraries is only supported on {product-title} deployments on the `x86_64` architecture. -==== -<14> You can optionally provide the `sshKey` value that you use to access the machines in your cluster. -endif::openshift-origin[] -ifdef::openshift-origin[] -<13> You can optionally provide the `sshKey` value that you use to access the machines in your cluster. -endif::openshift-origin[] -+ -[NOTE] -==== -For production {product-title} clusters on which you want to perform installation debugging or disaster recovery, specify an SSH key that your `ssh-agent` process uses. -==== -ifndef::openshift-origin[] -<15> If the Azure Stack Hub environment is using an internal Certificate Authority (CA), adding the CA certificate is required. -endif::openshift-origin[] -ifdef::openshift-origin[] -<14> If the Azure Stack Hub environment is using an internal Certificate Authority (CA), adding the CA certificate is required. -endif::openshift-origin[] - -endif::ash-default,ash-network[] - -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:!ash: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-default"] -:!ash-default: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-network-customizations"] -:!ash-network: -endif::[] diff --git a/modules/installation-azure-stack-hub-network-config.adoc b/modules/installation-azure-stack-hub-network-config.adoc deleted file mode 100644 index fe2129ed4101..000000000000 --- a/modules/installation-azure-stack-hub-network-config.adoc +++ /dev/null @@ -1,9 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-account.adoc - -[id="installation-azure-stack-hub-network-config_{context}"] -= Configuring a DNS zone in Azure Stack Hub - -To successfully install {product-title} on Azure Stack Hub, you must create DNS records in an Azure Stack Hub DNS zone. The DNS zone must be authoritative for the domain. To delegate a registrar's DNS zone to Azure Stack Hub, see Microsoft's documentation for link:https://docs.microsoft.com/en-us/azure-stack/operator/azure-stack-integrate-dns?view=azs-2102[Azure Stack Hub datacenter DNS integration]. diff --git a/modules/installation-azure-stack-hub-permissions.adoc b/modules/installation-azure-stack-hub-permissions.adoc deleted file mode 100644 index fd442632162d..000000000000 --- a/modules/installation-azure-stack-hub-permissions.adoc +++ /dev/null @@ -1,12 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc - -[id="installation-azure-stack-hub-permissions_{context}"] -= Required Azure Stack Hub roles - -Your Microsoft Azure Stack Hub account must have the following roles for the subscription that you use: - -* `Owner` - -To set roles on the Azure portal, see the link:https://docs.microsoft.com/en-us/azure-stack/user/azure-stack-manage-permissions?view=azs-2102[Manage access to resources in Azure Stack Hub with role-based access control] in the Microsoft documentation. diff --git a/modules/installation-azure-tested-machine-types.adoc b/modules/installation-azure-tested-machine-types.adoc deleted file mode 100644 index d0ead0fbd221..000000000000 --- a/modules/installation-azure-tested-machine-types.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// installing/installing_azure/installing-azure-customizations.adoc -// installing/installing_azure/installing-azure-government-region.adoc -// installing/installing_azure/installing-azure-network-customizations.adoc -// installing/installing_azure/installing-azure-private.adoc -// installing/installing_azure/installing-azure-user-infra.adoc -// installing/installing_azure/installing-azure-vnet.adoc - -[id="installation-azure-tested-machine-types_{context}"] -= Tested instance types for Azure - -The following Microsoft Azure instance types have been tested with {product-title}. - -.Machine types based on 64-bit x86 architecture -[%collapsible] -==== -include::https://raw.githubusercontent.com/openshift/installer/master/docs/user/azure/tested_instance_types_x86_64.md[] -==== diff --git a/modules/installation-azure-user-defined-routing.adoc b/modules/installation-azure-user-defined-routing.adoc deleted file mode 100644 index f06596a66d59..000000000000 --- a/modules/installation-azure-user-defined-routing.adoc +++ /dev/null @@ -1,83 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_azure/installing-azure-private.adoc - -[id="installation-azure-user-defined-routing_{context}"] -= User-defined outbound routing - -In {product-title}, you can choose your own outbound routing for a cluster to -connect to the internet. This allows you to skip the creation of public IP -addresses and the public load balancer. - -You can configure user-defined routing by modifying parameters in the -`install-config.yaml` file before installing your cluster. A pre-existing VNet -is required to use outbound routing when installing a cluster; the installation -program is not responsible for configuring this. - -When configuring a cluster to use user-defined routing, the installation program -does not create the following resources: - -* Outbound rules for access to the internet. -* Public IPs for the public load balancer. -* Kubernetes Service object to add the cluster machines to the public load -balancer for outbound requests. - -You must ensure the following items are available before setting user-defined -routing: - -* Egress to the internet is possible to pull container images, unless using an -{product-registry} mirror. -* The cluster can access Azure APIs. -* Various allowlist endpoints are configured. You can reference these endpoints -in the _Configuring your firewall_ section. - -There are several pre-existing networking setups that are supported for internet -access using user-defined routing. - -[discrete] -== Private cluster with network address translation - -You can use link:https://docs.microsoft.com/en-us/azure/virtual-network/nat-overview[Azure VNET network address translation (NAT)] -to provide outbound internet access for the subnets in your cluster. You can -reference -link:https://docs.microsoft.com/en-us/azure/virtual-network/quickstart-create-nat-gateway-cli[Create a NAT gateway using Azure CLI] -in the Azure documentation for configuration instructions. - -When using a VNet setup with Azure NAT and user-defined routing configured, you -can create a private cluster with no public endpoints. - -[discrete] -== Private cluster with Azure Firewall - -You can use Azure Firewall to provide outbound routing for the VNet used to -install the cluster. You can learn more about -link:https://docs.microsoft.com/en-us/azure/aks/egress-outboundtype#deploy-a-cluster-with-outbound-type-of-udr-and-azure-firewall[providing user-defined routing with Azure Firewall] -in the Azure documentation. - -When using a VNet setup with Azure Firewall and user-defined routing configured, -you can create a private cluster with no public endpoints. - -[discrete] -== Private cluster with a proxy configuration - -You can use a proxy with user-defined routing to allow egress to the internet. -You must ensure that cluster Operators do not access Azure APIs using a -proxy; Operators must have access to Azure APIs outside of the proxy. - -When using the default route table for subnets, with `0.0.0.0/0` populated -automatically by Azure, all Azure API requests are routed over Azure's internal -network even though the IP addresses are public. As long as the Network Security -Group rules allow egress to Azure API endpoints, proxies with user-defined -routing configured allow you to create private clusters with no public -endpoints. - -[discrete] -== Private cluster with no internet access - -You can install a private network that restricts all access to the internet, except the Azure API. This is accomplished by mirroring the release image registry locally. Your cluster must have access to the following: - -* An {product-registry} mirror that allows for pulling container images -* Access to Azure APIs - -With these requirements available, you can use user-defined routing to create -private clusters with no public endpoints. diff --git a/modules/installation-azure-user-infra-completing.adoc b/modules/installation-azure-user-infra-completing.adoc deleted file mode 100644 index 2cc4fb5a7edd..000000000000 --- a/modules/installation-azure-user-infra-completing.adoc +++ /dev/null @@ -1,55 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_azure/installing-azure-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc - -ifeval::["{context}" == "installing-azure-user-infra"] -:cp: Azure -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:cp: Azure Stack Hub -endif::[] - -:_content-type: PROCEDURE -[id="installation-azure-user-infra-completing_{context}"] -= Completing an {cp} installation on user-provisioned infrastructure - -After you start the {product-title} installation on Microsoft {cp} -user-provisioned infrastructure, you can monitor the cluster events until the -cluster is ready. - -.Prerequisites - -* Deploy the bootstrap machine for an {product-title} cluster on user-provisioned {cp} infrastructure. -* Install the `oc` CLI and log in. - -.Procedure - -* Complete the cluster installation: -+ -[source,terminal] ----- -$ ./openshift-install --dir <installation_directory> wait-for install-complete <1> ----- -+ -.Example output -[source,terminal] ----- -INFO Waiting up to 30m0s for the cluster to initialize... ----- -<1> For `<installation_directory>`, specify the path to the directory that you -stored the installation files in. -+ -[IMPORTANT] -==== -* The Ignition config files that the installation program generates contain certificates that expire after 24 hours, which are then renewed at that time. If the cluster is shut down before renewing the certificates and the cluster is later restarted after the 24 hours have elapsed, the cluster automatically recovers the expired certificates. The exception is that you must manually approve the pending `node-bootstrapper` certificate signing requests (CSRs) to recover kubelet certificates. See the documentation for _Recovering from expired control plane certificates_ for more information. - -* It is recommended that you use Ignition config files within 12 hours after they are generated because the 24-hour certificate rotates from 16 to 22 hours after the cluster is installed. By using the Ignition config files within 12 hours, you can avoid installation failure if the certificate update runs during installation. -==== - -ifeval::["{context}" == "installing-azure-user-infra"] -:!cp: Azure -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:!cp: Azure Stack Hub -endif::[] diff --git a/modules/installation-azure-user-infra-deploying-rhcos.adoc b/modules/installation-azure-user-infra-deploying-rhcos.adoc deleted file mode 100644 index 7dcb6ecc7f7a..000000000000 --- a/modules/installation-azure-user-infra-deploying-rhcos.adoc +++ /dev/null @@ -1,68 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_azure/installing-azure-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc - -ifeval::["{context}" == "installing-azure-user-infra"] -:azure: -:cp: Azure -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:ash: -:cp: Azure Stack Hub -endif::[] - -:_content-type: PROCEDURE -[id="installation-azure-user-infra-deploying-rhcos_{context}"] -= Deploying the {op-system} cluster image for the {cp} infrastructure - -You must use a valid {op-system-first} image for Microsoft {cp} for your -{product-title} nodes. - -.Prerequisites - -* Configure an Azure account. - -* Generate the Ignition config files for your cluster. - -* Store the {op-system} virtual hard disk (VHD) cluster image in an Azure storage container. - -* Store the bootstrap Ignition config file in an Azure storage container. - -.Procedure - -. Copy the template from the *ARM template for image storage* section of -this topic and save it as `02_storage.json` in your cluster's installation directory. This template -describes the image storage that your cluster requires. - -. Export the {op-system} VHD blob URL as a variable: -+ -[source,terminal] ----- -$ export VHD_BLOB_URL=`az storage blob url --account-name ${CLUSTER_NAME}sa --account-key ${ACCOUNT_KEY} -c vhd -n "rhcos.vhd" -o tsv` ----- - -. Deploy the cluster image: -+ -[source,terminal] ----- -$ az deployment group create -g ${RESOURCE_GROUP} \ - --template-file "<installation_directory>/02_storage.json" \ - --parameters vhdBlobURL="${VHD_BLOB_URL}" \ <1> - --parameters baseName="${INFRA_ID}" \ <2> - --parameters storageAccount="${CLUSTER_NAME}sa" \ <3> - --parameters architecture="<architecture>" <4> ----- -<1> The blob URL of the {op-system} VHD to be used to create master and worker machines. -<2> The base name to be used in resource names; this is usually the cluster's infrastructure ID. -<3> The name of your Azure storage account. -<4> Specify the system architecture. Valid values are `x64` (default) or `Arm64`. - -ifeval::["{context}" == "installing-azure-user-infra"] -:!azure: -:!cp: Azure -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:!ash: -:!cp: Azure Stack Hub -endif::[] diff --git a/modules/installation-azure-user-infra-uploading-rhcos.adoc b/modules/installation-azure-user-infra-uploading-rhcos.adoc deleted file mode 100644 index b244c84d2f99..000000000000 --- a/modules/installation-azure-user-infra-uploading-rhcos.adoc +++ /dev/null @@ -1,179 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_azure/installing-azure-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-default.adoc - -ifeval::["{context}" == "installing-azure-user-infra"] -:azure: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:ash: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-default"] -:ash-ipi: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-network-customizations"] -:ash-ipi: -endif::[] - -:_content-type: PROCEDURE -[id="installation-azure-user-infra-uploading-rhcos_{context}"] -ifndef::ash-ipi[] -= Uploading the {op-system} cluster image and bootstrap Ignition config file -endif::ash-ipi[] -ifdef::ash-ipi[] -= Uploading the {op-system} cluster image -endif::ash-ipi[] - -ifndef::ash-ipi[] -The Azure client does not support deployments based on files existing locally. You -must copy and store the {op-system} virtual hard disk (VHD) cluster image and bootstrap Ignition config file in a storage container so they are accessible during deployment. -endif::ash-ipi[] - -ifdef::ash-ipi[] -You must download the {op-system} virtual hard disk (VHD) cluster image and upload it to your Azure Stack Hub environment so that it is accessible during deployment. -endif::ash-ipi[] - -.Prerequisites - -* Configure an Azure account. -ifndef::ash-ipi[] -* Generate the Ignition config files for your cluster. -endif::ash-ipi[] - -.Procedure - -ifndef::ash-ipi[] -. Create an Azure storage account to store the VHD cluster image: -+ -[source,terminal] ----- -$ az storage account create -g ${RESOURCE_GROUP} --location ${AZURE_REGION} --name ${CLUSTER_NAME}sa --kind Storage --sku Standard_LRS ----- -+ -[WARNING] -==== -The Azure storage account name must be between 3 and 24 characters in length and -use numbers and lower-case letters only. If your `CLUSTER_NAME` variable does -not follow these restrictions, you must manually define the Azure storage -account name. For more information on Azure storage account name restrictions, -see link:https://docs.microsoft.com/en-us/azure/azure-resource-manager/templates/error-storage-account-name[Resolve errors for storage account names] -in the Azure documentation. -==== - -. Export the storage account key as an environment variable: -+ -[source,terminal] ----- -$ export ACCOUNT_KEY=`az storage account keys list -g ${RESOURCE_GROUP} --account-name ${CLUSTER_NAME}sa --query "[0].value" -o tsv` ----- - -. Export the URL of the {op-system} VHD to an environment variable: -+ -ifdef::azure[] -[source,terminal] ----- -$ export VHD_URL=`openshift-install coreos print-stream-json | jq -r '.architectures.<architecture>."rhel-coreos-extensions"."azure-disk".url'` ----- -endif::azure[] -ifdef::ash[] -[source,terminal] ----- -$ export COMPRESSED_VHD_URL=$(openshift-install coreos print-stream-json | jq -r '.architectures.x86_64.artifacts.azurestack.formats."vhd.gz".disk.location') ----- -endif::ash[] -+ -[IMPORTANT] -==== -The {op-system} images might not change with every release of {product-title}. -You must specify an image with the highest version that is -less than or equal to the {product-title} version that you install. Use the image version -that matches your {product-title} version if it is available. -==== - -. Create the storage container for the VHD: -+ -[source,terminal] ----- -$ az storage container create --name vhd --account-name ${CLUSTER_NAME}sa --account-key ${ACCOUNT_KEY} ----- -ifdef::ash[] -. Download the compressed {op-system} VHD file locally: -+ -[source,terminal] ----- -$ curl -O -L ${COMPRESSED_VHD_URL} ----- - -. Decompress the VHD file. -+ -[NOTE] -==== -The decompressed VHD file is approximately 16 GB, so be sure that your host system has 16 GB of free space available. You can delete the VHD file after you upload it. -==== -endif::ash[] - -. Copy the local VHD to a blob: -+ -ifdef::azure[] -[source,terminal] ----- -$ az storage blob copy start --account-name ${CLUSTER_NAME}sa --account-key ${ACCOUNT_KEY} --destination-blob "rhcos.vhd" --destination-container vhd --source-uri "${VHD_URL}" ----- -endif::azure[] -ifdef::ash[] -[source,terminal] ----- -$ az storage blob upload --account-name ${CLUSTER_NAME}sa --account-key ${ACCOUNT_KEY} -c vhd -n "rhcos.vhd" -f rhcos-<rhcos_version>-azurestack.x86_64.vhd ----- -endif::ash[] - -. Create a blob storage container and upload the generated `bootstrap.ign` file: -+ -[source,terminal] ----- -$ az storage container create --name files --account-name ${CLUSTER_NAME}sa --account-key ${ACCOUNT_KEY} ----- -+ -[source,terminal] ----- -$ az storage blob upload --account-name ${CLUSTER_NAME}sa --account-key ${ACCOUNT_KEY} -c "files" -f "<installation_directory>/bootstrap.ign" -n "bootstrap.ign" ----- -endif::ash-ipi[] - -ifdef::ash-ipi[] -. Obtain the {op-system} VHD cluster image: -.. Export the URL of the {op-system} VHD to an environment variable. -+ -[source,terminal] ----- -$ export COMPRESSED_VHD_URL=$(openshift-install coreos print-stream-json | jq -r '.architectures.x86_64.artifacts.azurestack.formats."vhd.gz".disk.location') ----- -.. Download the compressed {op-system} VHD file locally. -+ -[source,terminal] ----- -$ curl -O -L ${COMPRESSED_VHD_URL} ----- -. Decompress the VHD file. -+ -[NOTE] -==== -The decompressed VHD file is approximately 16 GB, so be sure that your host system has 16 GB of free space available. The VHD file can be deleted once you have uploaded it. -==== -. Upload the local VHD to the Azure Stack Hub environment, making sure that the blob is publicly available. For example, you can upload the VHD to a blob using the `az` cli or the web portal. -endif::ash-ipi[] - -ifeval::["{context}" == "installing-azure-user-infra"] -:!azure: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:!ash: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-default"] -:!ash-ipi: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-network-customizations"] -:!ash-ipi: -endif::[] diff --git a/modules/installation-azure-user-infra-wait-for-bootstrap.adoc b/modules/installation-azure-user-infra-wait-for-bootstrap.adoc deleted file mode 100644 index c30d1e24036d..000000000000 --- a/modules/installation-azure-user-infra-wait-for-bootstrap.adoc +++ /dev/null @@ -1,77 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_azure/installing-azure-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc - -ifeval::["{context}" == "installing-azure-user-infra"] -:azure: -:cp: Azure -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:ash: -:cp: Azure Stack Hub -endif::[] - -:_content-type: PROCEDURE -[id="installation-azure-user-infra-wait-for-bootstrap_{context}"] -= Wait for bootstrap completion and remove bootstrap resources in {cp} - -After you create all of the required infrastructure in Microsoft {cp}, wait for -the bootstrap process to complete on the machines that you provisioned by using -the Ignition config files that you generated with the installation program. - -.Prerequisites - -* Configure an Azure account. -* Generate the Ignition config files for your cluster. -* Create and configure a VNet and associated subnets in {cp}. -* Create and configure networking and load balancers in {cp}. -* Create control plane and compute roles. -* Create the bootstrap machine. -* Create the control plane machines. - -.Procedure - -. Change to the directory that contains the installation program and run the -following command: -+ -[source,terminal] ----- -$ ./openshift-install wait-for bootstrap-complete --dir <installation_directory> \ <1> - --log-level info <2> ----- -<1> For `<installation_directory>`, specify the path to the directory that you -stored the installation files in. -<2> To view different installation details, specify `warn`, `debug`, or -`error` instead of `info`. -+ -If the command exits without a `FATAL` warning, your production control plane -has initialized. - -. Delete the bootstrap resources: -+ -[source,terminal] ----- -$ az network nsg rule delete -g ${RESOURCE_GROUP} --nsg-name ${INFRA_ID}-nsg --name bootstrap_ssh_in -$ az vm stop -g ${RESOURCE_GROUP} --name ${INFRA_ID}-bootstrap -$ az vm deallocate -g ${RESOURCE_GROUP} --name ${INFRA_ID}-bootstrap -$ az vm delete -g ${RESOURCE_GROUP} --name ${INFRA_ID}-bootstrap --yes -$ az disk delete -g ${RESOURCE_GROUP} --name ${INFRA_ID}-bootstrap_OSDisk --no-wait --yes -$ az network nic delete -g ${RESOURCE_GROUP} --name ${INFRA_ID}-bootstrap-nic --no-wait -$ az storage blob delete --account-key ${ACCOUNT_KEY} --account-name ${CLUSTER_NAME}sa --container-name files --name bootstrap.ign -$ az network public-ip delete -g ${RESOURCE_GROUP} --name ${INFRA_ID}-bootstrap-ssh-pip ----- - -[NOTE] -==== -If you do not delete the bootstrap server, installation may not succeed due to API traffic being routed to the bootstrap server. -==== - -ifeval::["{context}" == "installing-azure-user-infra"] -:!azure: -:!cp: Azure -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:!ash: -:!cp: Azure Stack Hub -endif::[] diff --git a/modules/installation-bare-metal-agent-installer-config-yaml.adoc b/modules/installation-bare-metal-agent-installer-config-yaml.adoc deleted file mode 100644 index 20cd5025e86b..000000000000 --- a/modules/installation-bare-metal-agent-installer-config-yaml.adoc +++ /dev/null @@ -1,108 +0,0 @@ -// Module included in the following assemblies: - -//* installing-with-agent/installing-with-agent.adoc -// Re-used content from Sample install-config.yaml file for bare metal without conditionals - -:_content-type: CONCEPT -[id="installation-bare-metal-agent-installer-config-yaml_{context}"] -= Sample install-config.yaml file for bare metal - -You can customize the `install-config.yaml` file to specify more details about your {product-title} cluster's platform or modify the values of the required parameters. - -[source,yaml] ----- -apiVersion: v1 -baseDomain: example.com <1> -compute: <2> -- name: worker - replicas: 0 <3> -controlPlane: <2> - name: master - replicas: 1 <4> -metadata: - name: sno-cluster <5> -networking: - clusterNetwork: - - cidr: 10.128.0.0/14 <6> - hostPrefix: 23 <7> - networkType: OVNKubernetes <8> - serviceNetwork: <9> - - 172.30.0.0/16 -platform: - none: {} <10> -fips: false <11> -pullSecret: '{"auths": ...}' <12> -sshKey: 'ssh-ed25519 AAAA...' <13> ----- -<1> The base domain of the cluster. All DNS records must be sub-domains of this base and include the cluster name. -<2> The `controlPlane` section is a single mapping, but the `compute` section is a sequence of mappings. To meet the requirements of the different data structures, the first line of the `compute` section must begin with a hyphen, `-`, and the first line of the `controlPlane` section must not. Only one control plane pool is used. -<3> This parameter controls the number of compute machines that the Agent-based installation waits to discover before triggering the installation process. It is the number of compute machines that must be booted with the generated ISO. - -+ -[NOTE] -==== -If you are installing a three-node cluster, do not deploy any compute machines when you install the {op-system-first} machines. -==== -+ -<4> The number of control plane machines that you add to the cluster. Because the cluster uses these values as the number of etcd endpoints in the cluster, the value must match the number of control plane machines that you deploy. -<5> The cluster name that you specified in your DNS records. -<6> A block of IP addresses from which pod IP addresses are allocated. This block must not overlap with existing physical networks. These IP addresses are used for the pod network. If you need to access the pods from an external network, you must configure load balancers and routers to manage the traffic. -+ -[NOTE] -==== -Class E CIDR range is reserved for a future use. To use the Class E CIDR range, you must ensure your networking environment accepts the IP addresses within the Class E CIDR range. -==== -+ -<7> The subnet prefix length to assign to each individual node. For example, if `hostPrefix` is set to `23`, then each node is assigned a `/23` subnet out of the given `cidr`, which allows for 510 (2^(32 - 23) - 2) pod IP addresses. If you are required to provide access to nodes from an external network, configure load balancers and routers to manage the traffic. -<8> The cluster network plugin to install. The supported values are `OVNKubernetes` (default value) and `OpenShiftSDN`. -<9> The IP address pool to use for service IP addresses. You can enter only one IP address pool. This block must not overlap with existing physical networks. If you need to access the services from an external network, configure load balancers and routers to manage the traffic. -<10> You must set the platform to `none` for a single-node cluster. You can set the platform to either `vsphere` or `baremetal` for multi-node clusters. -+ -[NOTE] -==== -If you set the platform to `vsphere` or `baremetal`, you can configure IP address endpoints for cluster nodes in three ways: - -* IPv4 -* IPv6 -* IPv4 and IPv6 in parallel (dual-stack) - -.Example of dual-stack networking -[source,yaml] ----- -networking: - clusterNetwork: - - cidr: 172.21.0.0/16 - hostPrefix: 23 - - cidr: fd02::/48 - hostPrefix: 64 - machineNetwork: - - cidr: 192.168.11.0/16 - - cidr: 2001:DB8::/32 - serviceNetwork: - - 172.22.0.0/16 - - fd03::/112 - networkType: OVNKubernetes -platform: - baremetal: - apiVIPs: - - 192.168.11.3 - - 2001:DB8::4 - ingressVIPs: - - 192.168.11.4 - - 2001:DB8::5 ----- -==== -<11> Whether to enable or disable FIPS mode. By default, FIPS mode is not enabled. If FIPS mode is enabled, the {op-system-first} machines that {product-title} runs on bypass the default Kubernetes cryptography suite and use the cryptography modules that are provided with {op-system} instead. -+ -[IMPORTANT] -==== -The use of FIPS Validated / Modules in Process cryptographic libraries is only supported on {product-title} deployments on the `x86_64` architecture. -==== - -<12> This pull secret allows you to authenticate with the services that are provided by the included authorities, including Quay.io, which serves the container images for {product-title} components. -<13> The SSH public key for the `core` user in {op-system-first}. -+ -[NOTE] -==== -For production {product-title} clusters on which you want to perform installation debugging or disaster recovery, specify an SSH key that your `ssh-agent` process uses. -==== diff --git a/modules/installation-bare-metal-config-yaml.adoc b/modules/installation-bare-metal-config-yaml.adoc deleted file mode 100644 index 74c75bcbc5ed..000000000000 --- a/modules/installation-bare-metal-config-yaml.adoc +++ /dev/null @@ -1,347 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc -// * installing/installing_bare_metal/installing-bare-metal.adoc -// * installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc -// * installing/installing_ibm_z/installing-ibm-z.adoc -// * installing/installing_ibm_z/installing-ibm-z-kvm.adoc -// * installing/installing_ibm_power/installing-ibm-power.adoc -// * installing/installing_ibm_power/installing-restricted-networks-ibm-power.adoc -// * installing/installing_ibm_z/installing-restricted-networks-ibm-z.adoc -// * installing/installing_ibm_z/installing-restricted-networks-ibm-z-kvm.adoc -// * installing/installing_platform_agnostic/installing-platform-agnostic.adoc -// * installing/installing-rhv-restricted-network.adoc - -ifeval::["{context}" == "installing-restricted-networks-bare-metal"] -:restricted: -endif::[] -ifeval::["{context}" == "installing-ibm-z"] -:ibm-z: -endif::[] -ifeval::["{context}" == "installing-ibm-z-kvm"] -:ibm-z-kvm: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z"] -:ibm-z: -:restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z-kvm"] -:ibm-z-kvm: -:restricted: -endif::[] -ifeval::["{context}" == "installing-ibm-power"] -:ibm-power: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power"] -:ibm-power: -:restricted: -endif::[] -ifeval::["{context}" == "installing-platform-agnostic"] -:agnostic: -endif::[] -ifeval::["{context}" == "installing-rhv-restricted-network"] -:rhv: -endif::[] - -:_content-type: CONCEPT -// Assumption is that attribute once outside ifdef works for several level one headings. -[id="installation-bare-metal-config-yaml_{context}"] -ifndef::ibm-z,ibm-z-kvm,ibm-power,agnostic,rhv[] -= Sample install-config.yaml file for bare metal -endif::ibm-z,ibm-z-kvm,ibm-power,agnostic,rhv[] -ifdef::ibm-z,ibm-z-kvm[] -= Sample install-config.yaml file for {ibmzProductName} -endif::ibm-z,ibm-z-kvm[] -ifdef::ibm-power[] -= Sample install-config.yaml file for {ibmpowerProductName} -endif::ibm-power[] -ifdef::agnostic[] -= Sample install-config.yaml file for other platforms -endif::agnostic[] -ifdef::rhv[] -= Sample install-config.yaml file for RHV -endif::rhv[] - -You can customize the `install-config.yaml` file to specify more details about your {product-title} cluster's platform or modify the values of the required parameters. - -[source,yaml,subs="attributes+"] ----- -apiVersion: v1 -baseDomain: example.com <1> -compute: <2> -- hyperthreading: Enabled <3> - name: worker - replicas: 0 <4> -ifeval::["{context}" == "installing-ibm-z"] - architecture: s390x -endif::[] -ifeval::["{context}" == "installing-ibm-z-kvm"] - architecture: s390x -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z"] - architecture: s390x -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z-kvm"] - architecture: s390x -endif::[] -ifeval::["{context}" == "installing-ibm-power"] - architecture: ppc64le -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power"] - architecture : ppc64le -endif::[] -controlPlane: <2> - hyperthreading: Enabled <3> - name: master - replicas: 3 <5> -ifeval::["{context}" == "installing-ibm-z"] - architecture: s390x -endif::[] -ifeval::["{context}" == "installing-ibm-z-kvm"] - architecture: s390x -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z"] - architecture: s390x -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z-kvm"] - architecture: s390x -endif::[] -ifeval::["{context}" == "installing-ibm-power"] - architecture: ppc64le -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power"] - architecture: ppc64le -endif::[] -metadata: - name: test <6> -networking: - clusterNetwork: - - cidr: 10.128.0.0/14 <7> - hostPrefix: 23 <8> - networkType: OVNKubernetes <9> - serviceNetwork: <10> - - 172.30.0.0/16 -platform: - none: {} <11> -ifndef::openshift-origin[] -fips: false <12> -endif::openshift-origin[] -ifndef::restricted[] -ifndef::openshift-origin[] -pullSecret: '{"auths": ...}' <13> -endif::openshift-origin[] -ifdef::openshift-origin[] -pullSecret: '{"auths": ...}' <12> -endif::openshift-origin[] -ifndef::openshift-origin[] -sshKey: 'ssh-ed25519 AAAA...' <14> -endif::openshift-origin[] -ifdef::openshift-origin[] -sshKey: 'ssh-ed25519 AAAA...' <13> -endif::openshift-origin[] -endif::restricted[] -ifdef::restricted[] -ifndef::openshift-origin[] -pullSecret: '{"auths":{"<local_registry>": {"auth": "<credentials>","email": "you@example.com"}}}' <13> -endif::openshift-origin[] -ifdef::openshift-origin[] -pullSecret: '{"auths":{"<local_registry>": {"auth": "<credentials>","email": "you@example.com"}}}' <12> -endif::openshift-origin[] -ifndef::openshift-origin[] -sshKey: 'ssh-ed25519 AAAA...' <14> -endif::openshift-origin[] -ifdef::openshift-origin[] -sshKey: 'ssh-ed25519 AAAA...' <13> -endif::openshift-origin[] -endif::restricted[] -ifdef::restricted[] -ifndef::openshift-origin[] -additionalTrustBundle: | <15> - -----BEGIN CERTIFICATE----- - ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ - -----END CERTIFICATE----- -imageContentSources: <16> -- mirrors: -ifdef::ibm-z,ibm-z-kvm[] - - <local_repository>/ocp4/openshift4 - source: quay.io/openshift-release-dev/ocp-release -- mirrors: - - <local_repository>/ocp4/openshift4 - source: quay.io/openshift-release-dev/ocp-v4.0-art-dev -endif::ibm-z,ibm-z-kvm[] -ifndef::ibm-z,ibm-z-kvm[] - - <local_registry>/<local_repository_name>/release - source: quay.io/openshift-release-dev/ocp-release -- mirrors: - - <local_registry>/<local_repository_name>/release - source: quay.io/openshift-release-dev/ocp-v4.0-art-dev -endif::ibm-z,ibm-z-kvm[] -endif::openshift-origin[] -ifdef::openshift-origin[] -additionalTrustBundle: | <14> - -----BEGIN CERTIFICATE----- - ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ - -----END CERTIFICATE----- -imageContentSources: <15> -- mirrors: -ifdef::ibm-z,ibm-z-kvm[] - - <local_repository>/ocp4/openshift4 - source: quay.io/openshift-release-dev/ocp-release -- mirrors: - - <local_repository>/ocp4/openshift4 - source: quay.io/openshift-release-dev/ocp-v4.0-art-dev -endif::ibm-z,ibm-z-kvm[] -ifndef::ibm-z,ibm-z-kvm[] - - <local_registry>/<local_repository_name>/release - source: quay.io/openshift-release-dev/ocp-release -- mirrors: - - <local_registry>/<local_repository_name>/release - source: quay.io/openshift-release-dev/ocp-v4.0-art-dev -endif::ibm-z,ibm-z-kvm[] -endif::openshift-origin[] -endif::restricted[] ----- -<1> The base domain of the cluster. All DNS records must be sub-domains of this base and include the cluster name. -<2> The `controlPlane` section is a single mapping, but the `compute` section is a sequence of mappings. To meet the requirements of the different data structures, the first line of the `compute` section must begin with a hyphen, `-`, and the first line of the `controlPlane` section must not. Only one control plane pool is used. -<3> Specifies whether to enable or disable simultaneous multithreading (SMT), or hyperthreading. By default, SMT is enabled to increase the performance of the cores in your machines. You can disable it by setting the parameter value to `Disabled`. If you disable SMT, you must disable it in all cluster machines; this includes both control plane and compute machines. -ifndef::ibm-z,ibm-z-kvm[] -+ -[NOTE] -==== -Simultaneous multithreading (SMT) is enabled by default. If SMT is not enabled in your BIOS settings, the `hyperthreading` parameter has no effect. -==== -+ -[IMPORTANT] -==== -If you disable `hyperthreading`, whether in the BIOS or in the `install-config.yaml` file, ensure that your capacity planning accounts for the dramatically decreased machine performance. -==== -endif::ibm-z,ibm-z-kvm[] -ifdef::ibm-z,ibm-z-kvm[] -+ -[NOTE] -==== -Simultaneous multithreading (SMT) is enabled by default. If SMT is not available on your {product-title} nodes, the `hyperthreading` parameter has no effect. -==== -+ -[IMPORTANT] -==== -If you disable `hyperthreading`, whether on your {product-title} nodes or in the `install-config.yaml` file, ensure that your capacity planning accounts for the dramatically decreased machine performance. -==== -endif::ibm-z,ibm-z-kvm[] -<4> You must set this value to `0` when you install {product-title} on user-provisioned infrastructure. In installer-provisioned installations, the parameter controls the number of compute machines that the cluster creates and manages for you. In user-provisioned installations, you must manually deploy the compute machines before you finish installing the cluster. -+ -[NOTE] -==== -If you are installing a three-node cluster, do not deploy any compute machines when you install the {op-system-first} machines. -==== -+ -<5> The number of control plane machines that you add to the cluster. Because the cluster uses these values as the number of etcd endpoints in the cluster, the value must match the number of control plane machines that you deploy. -<6> The cluster name that you specified in your DNS records. -<7> A block of IP addresses from which pod IP addresses are allocated. This block must not overlap with existing physical networks. These IP addresses are used for the pod network. If you need to access the pods from an external network, you must configure load balancers and routers to manage the traffic. -+ -[NOTE] -==== -Class E CIDR range is reserved for a future use. To use the Class E CIDR range, you must ensure your networking environment accepts the IP addresses within the Class E CIDR range. -==== -+ -<8> The subnet prefix length to assign to each individual node. For example, if `hostPrefix` is set to `23`, then each node is assigned a `/23` subnet out of the given `cidr`, which allows for 510 (2^(32 - 23) - 2) pod IP addresses. If you are required to provide access to nodes from an external network, configure load balancers and routers to manage the traffic. -<9> The cluster network plugin to install. The supported values are `OVNKubernetes` and `OpenShiftSDN`. The default value is `OVNKubernetes`. -<10> The IP address pool to use for service IP addresses. You can enter only one IP address pool. This block must not overlap with existing physical networks. If you need to access the services from an external network, configure load balancers and routers to manage the traffic. -<11> You must set the platform to `none`. You cannot provide additional platform configuration variables for -ifndef::ibm-z,ibm-z-kvm,ibm-power,rhv[your platform.] -ifdef::ibm-z,ibm-z-kvm[{ibmzProductName} infrastructure.] -ifdef::ibm-power[{ibmpowerProductName} infrastructure.] -ifdef::rhv[RHV infrastructure.] -+ -[IMPORTANT] -==== -Clusters that are installed with the platform type `none` are unable to use some features, such as managing compute machines with the Machine API. This limitation applies even if the compute machines that are attached to the cluster are installed on a platform that would normally support the feature. This parameter cannot be changed after installation. -==== -ifndef::openshift-origin[] -<12> Whether to enable or disable FIPS mode. By default, FIPS mode is not enabled. If FIPS mode is enabled, the {op-system-first} machines that {product-title} runs on bypass the default Kubernetes cryptography suite and use the cryptography modules that are provided with {op-system} instead. -+ -[IMPORTANT] -==== -The use of FIPS Validated / Modules in Process cryptographic libraries is only supported on {product-title} deployments on `x86_64`, `ppc64le`, and `s390x` architectures. -==== -endif::openshift-origin[] -ifndef::restricted[] -ifndef::openshift-origin[] -<13> The {cluster-manager-url-pull}. This pull secret allows you to authenticate with the services that are provided by the included authorities, including Quay.io, which serves the container images for {product-title} components. -endif::openshift-origin[] -ifdef::openshift-origin[] -<12> The {cluster-manager-url-pull}. This pull secret allows you to authenticate with the services that are provided by the included authorities, including Quay.io, which serves the container images for {product-title} components. -endif::openshift-origin[] -endif::restricted[] -ifdef::restricted[] -ifndef::openshift-origin[] -<13> For `<local_registry>`, specify the registry domain name, and optionally the port, that your mirror registry uses to serve content. For example, `registry.example.com` or `registry.example.com:5000`. For `<credentials>`, specify the base64-encoded user name and password for your mirror registry. -endif::openshift-origin[] -ifdef::openshift-origin[] -<12> For `<local_registry>`, specify the registry domain name, and optionally the port, that your mirror registry uses to serve content. For example, `registry.example.com` or `registry.example.com:5000`. For `<credentials>`, specify the base64-encoded user name and password for your mirror registry. -endif::openshift-origin[] -endif::restricted[] -ifndef::openshift-origin[] -<14> The SSH public key for the `core` user in {op-system-first}. -endif::openshift-origin[] -ifdef::openshift-origin[] -<13> The SSH public key for the `core` user in {op-system-first}. -endif::openshift-origin[] -+ -[NOTE] -==== -For production {product-title} clusters on which you want to perform installation debugging or disaster recovery, specify an SSH key that your `ssh-agent` process uses. -==== -ifdef::restricted[] -ifndef::ibm-z,ibm-z-kvm[] -ifndef::openshift-origin[] -<15> Provide the contents of the certificate file that you used for your mirror registry. -endif::openshift-origin[] -ifdef::openshift-origin[] -<14> Provide the contents of the certificate file that you used for your mirror registry. -endif::openshift-origin[] -endif::ibm-z,ibm-z-kvm[] -ifdef::ibm-z,ibm-z-kvm[] -<15> Add the `additionalTrustBundle` parameter and value. The value must be the contents of the certificate file that you used for your mirror registry. The certificate file can be an existing, trusted certificate authority or the self-signed certificate that you generated for the mirror registry. -endif::ibm-z,ibm-z-kvm[] -ifndef::openshift-origin[] -<16> Provide the `imageContentSources` section from the output of the command to mirror the repository. -endif::openshift-origin[] -ifdef::openshift-origin[] -<15> Provide the `imageContentSources` section from the output of the command to mirror the repository. -endif::openshift-origin[] -endif::restricted[] - - -ifeval::["{context}" == "installing-restricted-networks-bare-metal"] -:!restricted: -endif::[] -ifdef::openshift-origin[] -:!restricted: -endif::[] -ifeval::["{context}" == "installing-ibm-z"] -:!ibm-z: -endif::[] -ifeval::["{context}" == "installing-ibm-z-kvm"] -:!ibm-z-kvm: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z"] -:!ibm-z: -:!restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z-kvm"] -:!ibm-z-kvm: -:!restricted: -endif::[] -ifeval::["{context}" == "installing-ibm-power"] -:!ibm-power: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power"] -:!ibm-power: -endif::[] -ifeval::["{context}" == "installing-platform-agnostic"] -:!agnostic: -endif::[] -ifeval::["{context}" == "installing-rhv-restricted-network"] -:!rhv: -endif::[] diff --git a/modules/installation-bootstrap-gather.adoc b/modules/installation-bootstrap-gather.adoc deleted file mode 100644 index 68c03e91fe09..000000000000 --- a/modules/installation-bootstrap-gather.adoc +++ /dev/null @@ -1,76 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing-troubleshooting.adoc -// * support/troubleshooting/troubleshooting-installations.adoc - -:_content-type: PROCEDURE -[id="installation-bootstrap-gather_{context}"] -= Gathering logs from a failed installation - -If you gave an SSH key to your installation program, you can gather data about -your failed installation. - -[NOTE] -==== -You use a different command to gather logs about an unsuccessful installation -than to gather logs from a running cluster. If you must gather logs from a -running cluster, use the `oc adm must-gather` command. -==== - -.Prerequisites - -* Your {product-title} installation failed before the bootstrap process finished. The bootstrap node is running and accessible through SSH. -* The `ssh-agent` process is active on your computer, and you provided the same SSH key to both the `ssh-agent` process and the installation program. -* If you tried to install a cluster on infrastructure that you provisioned, you must have the fully qualified domain names of the bootstrap and control plane nodes. - -.Procedure - -. Generate the commands that are required to obtain the installation logs from -the bootstrap and control plane machines: -+ --- -** If you used installer-provisioned infrastructure, change to the directory that contains the installation program and run the following command: -+ -[source,terminal] ----- -$ ./openshift-install gather bootstrap --dir <installation_directory> <1> ----- -<1> `installation_directory` is the directory you specified when you ran `./openshift-install create cluster`. This directory contains the {product-title} -definition files that the installation program creates. -+ -For installer-provisioned infrastructure, the installation program stores -information about the cluster, so you do not specify the hostnames or IP -addresses. - -** If you used infrastructure that you provisioned yourself, change to the directory that contains the installation program and run the following -command: -+ -[source,terminal] ----- -$ ./openshift-install gather bootstrap --dir <installation_directory> \ <1> - --bootstrap <bootstrap_address> \ <2> - --master <master_1_address> \ <3> - --master <master_2_address> \ <3> - --master <master_3_address>" <3> ----- -<1> For `installation_directory`, specify the same directory you specified when you ran `./openshift-install create cluster`. This directory contains the {product-title} -definition files that the installation program creates. -<2> `<bootstrap_address>` is the fully qualified domain name or IP address of -the cluster's bootstrap machine. -<3> For each control plane, or master, machine in your cluster, replace `<master_*_address>` with its fully qualified domain name or IP address. -+ -[NOTE] -==== -A default cluster contains three control plane machines. List all of your control plane machines as shown, no matter how many your cluster uses. -==== --- -+ -.Example output -[source,terminal] ----- -INFO Pulling debug logs from the bootstrap machine -INFO Bootstrap gather logs captured here "<installation_directory>/log-bundle-<timestamp>.tar.gz" ----- -+ -If you open a Red Hat support case about your installation failure, include -the compressed logs in the case. diff --git a/modules/installation-cis-ibm-cloud.adoc b/modules/installation-cis-ibm-cloud.adoc deleted file mode 100644 index b4ee1501fe71..000000000000 --- a/modules/installation-cis-ibm-cloud.adoc +++ /dev/null @@ -1,68 +0,0 @@ -// Module included in the following assemblies: -// -// installing/installing_ibm_cloud_public/installing-ibm-cloud-account.adoc -// installing/installing_ibm_powervs/installing-ibm-cloud-account-power-vs.adoc - -:_content-type: PROCEDURE -[id="installation-cis-ibm-cloud_{context}"] -= Using IBM Cloud Internet Services for DNS resolution - -The installation program uses IBM Cloud Internet Services (CIS) to configure cluster DNS resolution and provide name lookup for a public cluster. - -[NOTE] -==== -This offering does not support IPv6, so dual stack or IPv6 environments are not possible. -==== - -You must create a domain zone in CIS in the same account as your cluster. You must also ensure the zone is authoritative for the domain. You can do this using a root domain or subdomain. - -.Prerequisites - -* You have installed the link:https://www.ibm.com/cloud/cli[IBM Cloud CLI]. -* You have an existing domain and registrar. For more information, see the IBM link:https://cloud.ibm.com/docs/dns?topic=dns-getting-started[documentation]. - -.Procedure - -. Create a CIS instance to use with your cluster: - -.. Install the CIS plugin: -+ -[source,terminal] ----- -$ ibmcloud plugin install cis ----- - -.. Create the CIS instance: -+ -[source,terminal] ----- -$ ibmcloud cis instance-create <instance_name> standard <1> ----- -<1> At a minimum, a `Standard` plan is required for CIS to manage the cluster subdomain and its DNS records. - -. Connect an existing domain to your CIS instance: - -.. Set the context instance for CIS: -+ -[source,terminal] ----- -$ ibmcloud cis instance-set <instance_crn> <1> ----- -<1> The instance cloud resource name. - -.. Add the domain for CIS: -+ -[source,terminal] ----- -$ ibmcloud cis domain-add <domain_name> <1> ----- -<1> The fully qualified domain name. You can use either the root domain or subdomain value as the domain name, depending on which you plan to configure. -+ -[NOTE] -==== -A root domain uses the form `openshiftcorp.com`. A subdomain uses the form `clusters.openshiftcorp.com`. -==== - -. Open the link:https://cloud.ibm.com/catalog/services/internet-services[CIS web console], navigate to the *Overview* page, and note your CIS name servers. These name servers will be used in the next step. - -. Configure the name servers for your domains or subdomains at the domain's registrar or DNS provider. For more information, see the IBM Cloud link:https://cloud.ibm.com/docs/cis?topic=cis-getting-started#configure-your-name-servers-with-the-registrar-or-existing-dns-provider[documentation]. \ No newline at end of file diff --git a/modules/installation-cloudformation-bootstrap.adoc b/modules/installation-cloudformation-bootstrap.adoc deleted file mode 100644 index 2ee56936ca8b..000000000000 --- a/modules/installation-cloudformation-bootstrap.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-user-infra.adoc -// * installing/installing_aws/installing-restricted-networks-aws.adoc - -[id="installation-cloudformation-bootstrap_{context}"] -= CloudFormation template for the bootstrap machine - -You can use the following CloudFormation template to deploy the bootstrap machine that you need for your {product-title} cluster. - -.CloudFormation template for the bootstrap machine -[%collapsible] -==== -[source,yaml] ----- -include::https://raw.githubusercontent.com/openshift/installer/release-4.13/upi/aws/cloudformation/04_cluster_bootstrap.yaml[] ----- -==== diff --git a/modules/installation-cloudformation-control-plane.adoc b/modules/installation-cloudformation-control-plane.adoc deleted file mode 100644 index c5cd60b6ca57..000000000000 --- a/modules/installation-cloudformation-control-plane.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-user-infra.adoc -// * installing/installing_aws/installing-restricted-networks-aws.adoc - -[id="installation-cloudformation-control-plane_{context}"] -= CloudFormation template for control plane machines - -You can use the following CloudFormation template to deploy the control plane -machines that you need for your {product-title} cluster. - -.CloudFormation template for control plane machines -[%collapsible] -==== -[source,yaml] ----- -include::https://raw.githubusercontent.com/openshift/installer/release-4.13/upi/aws/cloudformation/05_cluster_master_nodes.yaml[] ----- -==== diff --git a/modules/installation-cloudformation-dns.adoc b/modules/installation-cloudformation-dns.adoc deleted file mode 100644 index fc1b7e3558bb..000000000000 --- a/modules/installation-cloudformation-dns.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-user-infra.adoc -// * installing/installing_aws/installing-restricted-networks-aws.adoc - -[id="installation-cloudformation-dns_{context}"] -= CloudFormation template for the network and load balancers - -You can use the following CloudFormation template to deploy the networking -objects and load balancers that you need for your {product-title} cluster. - -.CloudFormation template for the network and load balancers -[%collapsible] -==== -[source,yaml] ----- -include::https://raw.githubusercontent.com/openshift/installer/release-4.13/upi/aws/cloudformation/02_cluster_infra.yaml[] ----- -==== - -[IMPORTANT] -==== -If you are deploying your cluster to an AWS government or secret region, you must update the `InternalApiServerRecord` to use `CNAME` records. Records of type `ALIAS` are not supported for AWS government regions. For example: - -[source,yaml] ----- -Type: CNAME -TTL: 10 -ResourceRecords: -- !GetAtt IntApiElb.DNSName ----- -==== diff --git a/modules/installation-cloudformation-security.adoc b/modules/installation-cloudformation-security.adoc deleted file mode 100644 index 3dc650bfb6e0..000000000000 --- a/modules/installation-cloudformation-security.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-user-infra.adoc -// * installing/installing_aws/installing-restricted-networks-aws.adoc - -[id="installation-cloudformation-security_{context}"] -= CloudFormation template for security objects - -You can use the following CloudFormation template to deploy the security objects -that you need for your {product-title} cluster. - -.CloudFormation template for security objects -[%collapsible] -==== -[source,yaml] ----- -include::https://raw.githubusercontent.com/openshift/installer/release-4.13/upi/aws/cloudformation/03_cluster_security.yaml[] ----- -==== diff --git a/modules/installation-cloudformation-subnet-localzone.adoc b/modules/installation-cloudformation-subnet-localzone.adoc deleted file mode 100644 index 47503db35a56..000000000000 --- a/modules/installation-cloudformation-subnet-localzone.adoc +++ /dev/null @@ -1,66 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-localzone.adoc - -:_content-type: REFERENCE -[id="installation-cloudformation-subnet-localzone_{context}"] -= CloudFormation template for the subnet that uses AWS Local Zones - -You can use the following CloudFormation template to deploy the subnet that -you need for your {product-title} cluster that uses AWS Local Zones. - -.CloudFormation template for the subnet -[%collapsible] -==== -[source,yaml] ----- -# CloudFormation template used to create Local Zone subnets and dependencies -AWSTemplateFormatVersion: 2010-09-09 -Description: Template for create Public Local Zone subnets - -Parameters: - VpcId: - Description: VPC Id - Type: String - ZoneName: - Description: Local Zone Name (Example us-east-1-nyc-1a) - Type: String - SubnetName: - Description: Local Zone Name (Example cluster-public-us-east-1-nyc-1a) - Type: String - PublicRouteTableId: - Description: Public Route Table ID to associate the Local Zone subnet - Type: String - PublicSubnetCidr: - AllowedPattern: ^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(1[6-9]|2[0-4]))$ - ConstraintDescription: CIDR block parameter must be in the form x.x.x.x/16-24. - Default: 10.0.128.0/20 - Description: CIDR block for Public Subnet - Type: String - -Resources: - PublicSubnet: - Type: "AWS::EC2::Subnet" - Properties: - VpcId: !Ref VpcId - CidrBlock: !Ref PublicSubnetCidr - AvailabilityZone: !Ref ZoneName - Tags: - - Key: Name - Value: !Ref SubnetName - - Key: kubernetes.io/cluster/unmanaged - Value: "true" - - PublicSubnetRouteTableAssociation: - Type: "AWS::EC2::SubnetRouteTableAssociation" - Properties: - SubnetId: !Ref PublicSubnet - RouteTableId: !Ref PublicRouteTableId - -Outputs: - PublicSubnetIds: - Description: Subnet IDs of the public subnets. - Value: - !Join ["", [!Ref PublicSubnet]] ----- -==== diff --git a/modules/installation-cloudformation-vpc-localzone.adoc b/modules/installation-cloudformation-vpc-localzone.adoc deleted file mode 100644 index e242a8f0f46d..000000000000 --- a/modules/installation-cloudformation-vpc-localzone.adoc +++ /dev/null @@ -1,309 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-localzone.adoc - -:_content-type: REFERENCE -[id="installation-cloudformation-vpc-localzone_{context}"] -= CloudFormation template for the VPC - -You can use the following CloudFormation template to deploy the VPC that -you need for your {product-title} cluster. - -.CloudFormation template for the VPC -[%collapsible] -==== -[source,yaml] ----- -AWSTemplateFormatVersion: 2010-09-09 -Description: Template for Best Practice VPC with 1-3 AZs - -Parameters: - VpcCidr: - AllowedPattern: ^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(1[6-9]|2[0-4]))$ - ConstraintDescription: CIDR block parameter must be in the form x.x.x.x/16-24. - Default: 10.0.0.0/16 - Description: CIDR block for VPC. - Type: String - AvailabilityZoneCount: - ConstraintDescription: "The number of availability zones. (Min: 1, Max: 3)" - MinValue: 1 - MaxValue: 3 - Default: 1 - Description: "How many AZs to create VPC subnets for. (Min: 1, Max: 3)" - Type: Number - SubnetBits: - ConstraintDescription: CIDR block parameter must be in the form x.x.x.x/19-27. - MinValue: 5 - MaxValue: 13 - Default: 12 - Description: "Size of each subnet to create within the availability zones. (Min: 5 = /27, Max: 13 = /19)" - Type: Number - -Metadata: - AWS::CloudFormation::Interface: - ParameterGroups: - - Label: - default: "Network Configuration" - Parameters: - - VpcCidr - - SubnetBits - - Label: - default: "Availability Zones" - Parameters: - - AvailabilityZoneCount - ParameterLabels: - AvailabilityZoneCount: - default: "Availability Zone Count" - VpcCidr: - default: "VPC CIDR" - SubnetBits: - default: "Bits Per Subnet" - -Conditions: - DoAz3: !Equals [3, !Ref AvailabilityZoneCount] - DoAz2: !Or [!Equals [2, !Ref AvailabilityZoneCount], Condition: DoAz3] - -Resources: - VPC: - Type: "AWS::EC2::VPC" - Properties: - EnableDnsSupport: "true" - EnableDnsHostnames: "true" - CidrBlock: !Ref VpcCidr - PublicSubnet: - Type: "AWS::EC2::Subnet" - Properties: - VpcId: !Ref VPC - CidrBlock: !Select [0, !Cidr [!Ref VpcCidr, 6, !Ref SubnetBits]] - AvailabilityZone: !Select - - 0 - - Fn::GetAZs: !Ref "AWS::Region" - PublicSubnet2: - Type: "AWS::EC2::Subnet" - Condition: DoAz2 - Properties: - VpcId: !Ref VPC - CidrBlock: !Select [1, !Cidr [!Ref VpcCidr, 6, !Ref SubnetBits]] - AvailabilityZone: !Select - - 1 - - Fn::GetAZs: !Ref "AWS::Region" - PublicSubnet3: - Type: "AWS::EC2::Subnet" - Condition: DoAz3 - Properties: - VpcId: !Ref VPC - CidrBlock: !Select [2, !Cidr [!Ref VpcCidr, 6, !Ref SubnetBits]] - AvailabilityZone: !Select - - 2 - - Fn::GetAZs: !Ref "AWS::Region" - InternetGateway: - Type: "AWS::EC2::InternetGateway" - GatewayToInternet: - Type: "AWS::EC2::VPCGatewayAttachment" - Properties: - VpcId: !Ref VPC - InternetGatewayId: !Ref InternetGateway - PublicRouteTable: - Type: "AWS::EC2::RouteTable" - Properties: - VpcId: !Ref VPC - PublicRoute: - Type: "AWS::EC2::Route" - DependsOn: GatewayToInternet - Properties: - RouteTableId: !Ref PublicRouteTable - DestinationCidrBlock: 0.0.0.0/0 - GatewayId: !Ref InternetGateway - PublicSubnetRouteTableAssociation: - Type: "AWS::EC2::SubnetRouteTableAssociation" - Properties: - SubnetId: !Ref PublicSubnet - RouteTableId: !Ref PublicRouteTable - PublicSubnetRouteTableAssociation2: - Type: "AWS::EC2::SubnetRouteTableAssociation" - Condition: DoAz2 - Properties: - SubnetId: !Ref PublicSubnet2 - RouteTableId: !Ref PublicRouteTable - PublicSubnetRouteTableAssociation3: - Condition: DoAz3 - Type: "AWS::EC2::SubnetRouteTableAssociation" - Properties: - SubnetId: !Ref PublicSubnet3 - RouteTableId: !Ref PublicRouteTable - PrivateSubnet: - Type: "AWS::EC2::Subnet" - Properties: - VpcId: !Ref VPC - CidrBlock: !Select [3, !Cidr [!Ref VpcCidr, 6, !Ref SubnetBits]] - AvailabilityZone: !Select - - 0 - - Fn::GetAZs: !Ref "AWS::Region" - PrivateRouteTable: - Type: "AWS::EC2::RouteTable" - Properties: - VpcId: !Ref VPC - PrivateSubnetRouteTableAssociation: - Type: "AWS::EC2::SubnetRouteTableAssociation" - Properties: - SubnetId: !Ref PrivateSubnet - RouteTableId: !Ref PrivateRouteTable - NAT: - DependsOn: - - GatewayToInternet - Type: "AWS::EC2::NatGateway" - Properties: - AllocationId: - "Fn::GetAtt": - - EIP - - AllocationId - SubnetId: !Ref PublicSubnet - EIP: - Type: "AWS::EC2::EIP" - Properties: - Domain: vpc - Route: - Type: "AWS::EC2::Route" - Properties: - RouteTableId: - Ref: PrivateRouteTable - DestinationCidrBlock: 0.0.0.0/0 - NatGatewayId: - Ref: NAT - PrivateSubnet2: - Type: "AWS::EC2::Subnet" - Condition: DoAz2 - Properties: - VpcId: !Ref VPC - CidrBlock: !Select [4, !Cidr [!Ref VpcCidr, 6, !Ref SubnetBits]] - AvailabilityZone: !Select - - 1 - - Fn::GetAZs: !Ref "AWS::Region" - PrivateRouteTable2: - Type: "AWS::EC2::RouteTable" - Condition: DoAz2 - Properties: - VpcId: !Ref VPC - PrivateSubnetRouteTableAssociation2: - Type: "AWS::EC2::SubnetRouteTableAssociation" - Condition: DoAz2 - Properties: - SubnetId: !Ref PrivateSubnet2 - RouteTableId: !Ref PrivateRouteTable2 - NAT2: - DependsOn: - - GatewayToInternet - Type: "AWS::EC2::NatGateway" - Condition: DoAz2 - Properties: - AllocationId: - "Fn::GetAtt": - - EIP2 - - AllocationId - SubnetId: !Ref PublicSubnet2 - EIP2: - Type: "AWS::EC2::EIP" - Condition: DoAz2 - Properties: - Domain: vpc - Route2: - Type: "AWS::EC2::Route" - Condition: DoAz2 - Properties: - RouteTableId: - Ref: PrivateRouteTable2 - DestinationCidrBlock: 0.0.0.0/0 - NatGatewayId: - Ref: NAT2 - PrivateSubnet3: - Type: "AWS::EC2::Subnet" - Condition: DoAz3 - Properties: - VpcId: !Ref VPC - CidrBlock: !Select [5, !Cidr [!Ref VpcCidr, 6, !Ref SubnetBits]] - AvailabilityZone: !Select - - 2 - - Fn::GetAZs: !Ref "AWS::Region" - PrivateRouteTable3: - Type: "AWS::EC2::RouteTable" - Condition: DoAz3 - Properties: - VpcId: !Ref VPC - PrivateSubnetRouteTableAssociation3: - Type: "AWS::EC2::SubnetRouteTableAssociation" - Condition: DoAz3 - Properties: - SubnetId: !Ref PrivateSubnet3 - RouteTableId: !Ref PrivateRouteTable3 - NAT3: - DependsOn: - - GatewayToInternet - Type: "AWS::EC2::NatGateway" - Condition: DoAz3 - Properties: - AllocationId: - "Fn::GetAtt": - - EIP3 - - AllocationId - SubnetId: !Ref PublicSubnet3 - EIP3: - Type: "AWS::EC2::EIP" - Condition: DoAz3 - Properties: - Domain: vpc - Route3: - Type: "AWS::EC2::Route" - Condition: DoAz3 - Properties: - RouteTableId: - Ref: PrivateRouteTable3 - DestinationCidrBlock: 0.0.0.0/0 - NatGatewayId: - Ref: NAT3 - S3Endpoint: - Type: AWS::EC2::VPCEndpoint - Properties: - PolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Principal: '*' - Action: - - '*' - Resource: - - '*' - RouteTableIds: - - !Ref PublicRouteTable - - !Ref PrivateRouteTable - - !If [DoAz2, !Ref PrivateRouteTable2, !Ref "AWS::NoValue"] - - !If [DoAz3, !Ref PrivateRouteTable3, !Ref "AWS::NoValue"] - ServiceName: !Join - - '' - - - com.amazonaws. - - !Ref 'AWS::Region' - - .s3 - VpcId: !Ref VPC - -Outputs: - VpcId: - Description: ID of the new VPC. - Value: !Ref VPC - PublicSubnetIds: - Description: Subnet IDs of the public subnets. - Value: - !Join [ - ",", - [!Ref PublicSubnet, !If [DoAz2, !Ref PublicSubnet2, !Ref "AWS::NoValue"], !If [DoAz3, !Ref PublicSubnet3, !Ref "AWS::NoValue"]] - ] - PrivateSubnetIds: - Description: Subnet IDs of the private subnets. - Value: - !Join [ - ",", - [!Ref PrivateSubnet, !If [DoAz2, !Ref PrivateSubnet2, !Ref "AWS::NoValue"], !If [DoAz3, !Ref PrivateSubnet3, !Ref "AWS::NoValue"]] - ] - PublicRouteTableId: - Description: Public Route table ID - Value: !Ref PublicRouteTable ----- -==== diff --git a/modules/installation-cloudformation-vpc.adoc b/modules/installation-cloudformation-vpc.adoc deleted file mode 100644 index 9cc2bb3920c3..000000000000 --- a/modules/installation-cloudformation-vpc.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-user-infra.adoc -// * installing/installing_aws/installing-restricted-networks-aws.adoc - -[id="installation-cloudformation-vpc_{context}"] -= CloudFormation template for the VPC - -You can use the following CloudFormation template to deploy the VPC that -you need for your {product-title} cluster. - -.CloudFormation template for the VPC -[%collapsible] -==== -[source,yaml] ----- -include::https://raw.githubusercontent.com/openshift/installer/release-4.13/upi/aws/cloudformation/01_vpc.yaml[] ----- -==== diff --git a/modules/installation-cloudformation-worker.adoc b/modules/installation-cloudformation-worker.adoc deleted file mode 100644 index 91d8186ed923..000000000000 --- a/modules/installation-cloudformation-worker.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-user-infra.adoc -// * installing/installing_aws/installing-restricted-networks-aws.adoc - -[id="installation-cloudformation-worker_{context}"] -= CloudFormation template for worker machines - -You can use the following CloudFormation template to deploy the worker machines -that you need for your {product-title} cluster. - -.CloudFormation template for worker machines -[%collapsible] -==== -[source,yaml] ----- -include::https://raw.githubusercontent.com/openshift/installer/release-4.13/upi/aws/cloudformation/06_cluster_worker_node.yaml[] ----- -==== diff --git a/modules/installation-common-issues.adoc b/modules/installation-common-issues.adoc deleted file mode 100644 index e537e06043d4..000000000000 --- a/modules/installation-common-issues.adoc +++ /dev/null @@ -1,58 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing/installing-troubleshooting.adoc - -[id="installation-common-issues_{context}"] -= Troubleshooting common issues with installing on {rh-virtualization-first} - -Here are some common issues you might encounter, along with proposed causes and solutions. - -[id="cpu-load-increases-and-nodes-go-into-a-not-ready-state_{context}"] -== CPU load increases and nodes go into a `Not Ready` state - -* *Symptom*: CPU load increases significantly and nodes start going into a `Not Ready` state. -* *Cause*: The storage domain latency might be too high, especially for control plane nodes. -* *Solution*: -+ -Make the nodes ready again by restarting the kubelet service: -+ -[source,terminal] ----- -$ systemctl restart kubelet ----- -+ -Inspect the {product-title} metrics service, which automatically gathers and reports on some valuable data such as the etcd disk sync duration. If the cluster is operational, use this data to help determine whether storage latency or throughput is the root issue. If so, consider using a storage resource that has lower latency and higher throughput. -+ -To get raw metrics, enter the following command as kubeadmin or user with cluster-admin privileges: -+ -[source,terminal] ----- -$ oc get --insecure-skip-tls-verify --server=https://localhost:<port> --raw=/metrics ----- -+ -To learn more, see https://access.redhat.com/articles/3793621[Exploring Application Endpoints for the purposes of Debugging with OpenShift 4.x] - -[id="trouble-connecting-the-rhv-cluster-api_{context}"] -== Trouble connecting the {product-title} cluster API - -* *Symptom*: The installation program completes but the {product-title} cluster API is not available. The bootstrap virtual machine remains up after the bootstrap process is complete. When you enter the following command, the response will time out. -+ -[source,terminal] ----- -$ oc login -u kubeadmin -p *** <apiurl> ----- - -* *Cause*: The bootstrap VM was not deleted by the installation program and has not released the cluster's API IP address. -* *Solution*: Use the `wait-for` subcommand to be notified when the bootstrap process is complete: -+ -[source,terminal] ----- -$ ./openshift-install wait-for bootstrap-complete ----- -+ -When the bootstrap process is complete, delete the bootstrap virtual machine: -+ -[source,terminal] ----- -$ ./openshift-install destroy bootstrap ----- diff --git a/modules/installation-complete-user-infra.adoc b/modules/installation-complete-user-infra.adoc deleted file mode 100644 index 972139fda40f..000000000000 --- a/modules/installation-complete-user-infra.adoc +++ /dev/null @@ -1,198 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_bare_metal/installing-bare-metal.adoc -// * installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc -// * installing/installing_vsphere/installing-restricted-networks-vsphere.adoc -// * installing/installing_vsphere/installing-vsphere.adoc -// * installing/installing_vsphere/installing-vsphere-network-customizations.adoc -// * installing/installing_ibm_z/installing-ibm-z.adoc - -ifeval::["{context}" == "installing-restricted-networks-vsphere"] -:restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-bare-metal"] -:restricted: -endif::[] -ifdef::openshift-origin[] -:restricted: -endif::[] -ifeval::["{context}" == "installing-ibm-z"] -:ibm-z: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z"] -:ibm-z: -:restricted: -endif::[] -ifeval::["{context}" == "installing-ibm-z-kvm"] -:ibm-z-kvm: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z-kvm"] -:ibm-z-kvm: -:restricted: -endif::[] -ifeval::["{context}" == "installing-ibm-power"] -:ibm-power: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power"] -:ibm-power: -:restricted: -endif::[] -:_content-type: PROCEDURE -[id="installation-complete-user-infra_{context}"] -= Completing installation on user-provisioned infrastructure - -After you complete the Operator configuration, you can finish installing the -cluster on infrastructure that you provide. - -.Prerequisites - -* Your control plane has initialized. -* You have completed the initial Operator configuration. - -.Procedure - -. Confirm that all the cluster components are online with the following command: -+ -[source,terminal] ----- -$ watch -n5 oc get clusteroperators ----- -+ -.Example output -[source,terminal] ----- -NAME VERSION AVAILABLE PROGRESSING DEGRADED SINCE -authentication 4.13.0 True False False 19m -baremetal 4.13.0 True False False 37m -cloud-credential 4.13.0 True False False 40m -cluster-autoscaler 4.13.0 True False False 37m -config-operator 4.13.0 True False False 38m -console 4.13.0 True False False 26m -csi-snapshot-controller 4.13.0 True False False 37m -dns 4.13.0 True False False 37m -etcd 4.13.0 True False False 36m -image-registry 4.13.0 True False False 31m -ingress 4.13.0 True False False 30m -insights 4.13.0 True False False 31m -kube-apiserver 4.13.0 True False False 26m -kube-controller-manager 4.13.0 True False False 36m -kube-scheduler 4.13.0 True False False 36m -kube-storage-version-migrator 4.13.0 True False False 37m -machine-api 4.13.0 True False False 29m -machine-approver 4.13.0 True False False 37m -machine-config 4.13.0 True False False 36m -marketplace 4.13.0 True False False 37m -monitoring 4.13.0 True False False 29m -network 4.13.0 True False False 38m -node-tuning 4.13.0 True False False 37m -openshift-apiserver 4.13.0 True False False 32m -openshift-controller-manager 4.13.0 True False False 30m -openshift-samples 4.13.0 True False False 32m -operator-lifecycle-manager 4.13.0 True False False 37m -operator-lifecycle-manager-catalog 4.13.0 True False False 37m -operator-lifecycle-manager-packageserver 4.13.0 True False False 32m -service-ca 4.13.0 True False False 38m -storage 4.13.0 True False False 37m ----- -+ -Alternatively, the following command notifies you when all of the clusters are available. It also retrieves and displays credentials: -+ -[source,terminal] ----- -$ ./openshift-install --dir <installation_directory> wait-for install-complete <1> ----- -<1> For `<installation_directory>`, specify the path to the directory that you -stored the installation files in. -+ -.Example output -[source,terminal] ----- -INFO Waiting up to 30m0s for the cluster to initialize... ----- -+ -The command succeeds when the Cluster Version Operator finishes deploying the -{product-title} cluster from Kubernetes API server. -+ -[IMPORTANT] -==== -* The Ignition config files that the installation program generates contain certificates that expire after 24 hours, which are then renewed at that time. If the cluster is shut down before renewing the certificates and the cluster is later restarted after the 24 hours have elapsed, the cluster automatically recovers the expired certificates. The exception is that you must manually approve the pending `node-bootstrapper` certificate signing requests (CSRs) to recover kubelet certificates. See the documentation for _Recovering from expired control plane certificates_ for more information. - -* It is recommended that you use Ignition config files within 12 hours after they are generated because the 24-hour certificate rotates from 16 to 22 hours after the cluster is installed. By using the Ignition config files within 12 hours, you can avoid installation failure if the certificate update runs during installation. -==== - -. Confirm that the Kubernetes API server is communicating with the pods. -.. To view a list of all pods, use the following command: -+ -[source,terminal] ----- -$ oc get pods --all-namespaces ----- -+ -.Example output -[source,terminal] ----- -NAMESPACE NAME READY STATUS RESTARTS AGE -openshift-apiserver-operator openshift-apiserver-operator-85cb746d55-zqhs8 1/1 Running 1 9m -openshift-apiserver apiserver-67b9g 1/1 Running 0 3m -openshift-apiserver apiserver-ljcmx 1/1 Running 0 1m -openshift-apiserver apiserver-z25h4 1/1 Running 0 2m -openshift-authentication-operator authentication-operator-69d5d8bf84-vh2n8 1/1 Running 0 5m -... ----- - -.. View the logs for a pod that is listed in the output of the previous command -by using the following command: -+ -[source,terminal] ----- -$ oc logs <pod_name> -n <namespace> <1> ----- -<1> Specify the pod name and namespace, as shown in the output of the previous -command. -+ -If the pod logs display, the Kubernetes API server can communicate with the -cluster machines. - -ifndef::ibm-power[] -. For an installation with Fibre Channel Protocol (FCP), additional steps are required to enable multipathing. Do not enable multipathing during installation. -endif::ibm-power[] -ifdef::ibm-power[] -. Additional steps are required to enable multipathing. Do not enable multipathing during installation. -endif::ibm-power[] -+ -See "Enabling multipathing with kernel arguments on {op-system}" in the _Post-installation machine configuration tasks_ documentation for more information. - -ifdef::restricted[] -. Register your cluster on the link:https://console.redhat.com/openshift/register[Cluster registration] page. -endif::restricted[] - -ifeval::["{context}" == "installing-restricted-networks-vsphere"] -:!restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-bare-metal"] -:!restricted: -endif::[] -ifdef::openshift-origin[] -:!restricted: -endif::[] -ifeval::["{context}" == "installing-ibm-z"] -:!ibm-z: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z"] -:!ibm-z: -:!restricted: -endif::[] -ifeval::["{context}" == "installing-ibm-power"] -:!ibm-power: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power"] -:!ibm-power: -:!restricted: -endif::[] -ifeval::["{context}" == "installing-ibm-z-kvm"] -:!ibm-z-kvm: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z-kvm"] -:!ibm-z-kvm: -:!restricted: -endif::[] diff --git a/modules/installation-configuration-parameters.adoc b/modules/installation-configuration-parameters.adoc deleted file mode 100644 index f1efe09ff57b..000000000000 --- a/modules/installation-configuration-parameters.adoc +++ /dev/null @@ -1,2504 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_alibaba/installing-alibaba-default.adoc -// * installing/installing_aws/installing-alibaba-customizations.adoc -// * installing/installing_alibaba/installing-alibaba-network-customizations.adoc -// * installing/installing_alibaba_installing-alibaba-vpc.adoc -// * installing/installing_aws/installing-aws-china.adoc -// * installing/installing_aws/installing-aws-customizations.adoc -// * installing/installing_aws/installing-aws-government-region.adoc -// * installing/installing_aws/installing-aws-network-customizations.adoc -// * installing/installing_aws/installing-aws-private.adoc -// * installing/installing_aws/installing-aws-secret-region.adoc -// * installing/installing_aws/installing-aws-vpc.adoc -// * installing/installing_aws/installing-restricted-networks-aws-installer-provisioned.adoc -// * installing/installing_aws/installing-aws-outposts-remote-workers.adoc -// * installing/installing_azure/installing-azure-customizations.adoc -// * installing/installing_azure/installing-azure-government-region.adoc -// * installing/installing_azure/installing-azure-network-customizations.adoc -// * installing/installing_azure/installing-azure-private.adoc -// * installing/installing_azure/installing-azure-vnet.adoc -// * installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc -// * installing/installing_bare_metal/installing-bare-metal.adoc -// * installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc -// * installing/installing_gcp/installing-gcp-customizations.adoc -// * installing/installing_gcp/installing-gcp-network-customizations.adoc -// * installing/installing_gcp/installing-gcp-private.adoc -// * installing/installing_gcp/installing-gcp-vpc.adoc -// * installing/installing_gcp/installing-gcp-shared-vpc.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp-installer-provisioned.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-customizations.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-network-customizations.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-vpc.adoc -// * installing/installing_ibm_cloud_public/intalling-ibm-cloud-private.adoc -// * installing/installing_ibm_power/installing-ibm-power.adoc -// * installing/installing_ibm_power/installing-restricted-networks-ibm-power.adoc -// * installing/installing_ibm_powervs/installing-ibm-power-vs-customizations.adoc -// * installing/installing_ibm_powervs/installing-ibm-power-vs-private-cluster.adoc -// * installing/installing_ibm_powervs/installing-restricted-networks-ibm-power-vs.adoc -// * installing/installing_ibm_powervs/installing-ibm-powervs-vpc.adoc -// * installing/installing_ibm_z/installing-ibm-z-kvm.adoc -// * installing/installing_ibm_z/installing-ibm-z.adoc -// * installing/installing_ibm_z/installing-restricted-networks-ibm-z-kvm.adoc -// * installing/installing_ibm_z/installing-restricted-networks-ibm-z.adoc -// * installing/installing_openstack/installing-openstack-installer-custom.adoc -// * installing/installing_openstack/installing-openstack-installer-kuryr.adoc -// * installing/installing_openstack/installing-openstack-installer-restricted.adoc -// * installing/installing_openstack/installing-openstack-installer-sr-iov.adoc -// * installing/installing_openstack/installing-openstack-user-kuryr.adoc -// * installing/installing_openstack/installing-openstack-user-sr-iov-kuryr.adoc -// * installing/installing_openstack/installing-openstack-user-sr-iov.adoc -// * installing/installing_openstack/installing-openstack-user.adoc -// * installing/installing_rhv/installing-rhv-customizations.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-default.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-customizations.adoc -// * installing/installing_nutanix/installing-nutanix-installer-provisioned.adoc -// * installing/installing-restricted-networks-nutanix-installer-provisioned.adoc -// * installing/installing_vsphere/installation-config-parameters-vsphere.adoc - -ifeval::["{context}" == "installing-alibaba-customizations"] -:alibabacloud: -endif::[] -ifeval::["{context}" == "installing-alibaba-vpc"] -:alibabacloud: -endif::[] -ifeval::["{context}" == "installing-aws-customizations"] -:aws: -endif::[] -//Starting in 4.10, aws on arm64 is only supported for installation on custom, network custom, private clusters and VPC . This attribute excludes arm64 content from installing on gov regions. When government regions are supported on arm64, change `aws-govcloud` to `aws`. -ifeval::["{context}" == "installing-aws-government-region"] -:aws-govcloud: -endif::[] -//Starting in 4.10, aws on arm64 is only supported for installation on custom, network custom, private clusters and VPC. This attribute excludes arm64 content from installing on secret regions. When secret regions are supported on arm64, change `aws-secret` to `aws`. -ifeval::["{context}" == "installing-aws-secret-region"] -:aws-secret: -endif::[] -ifeval::["{context}" == "installing-aws-network-customizations"] -:aws: -endif::[] -ifeval::["{context}" == "installing-aws-private"] -:aws: -endif::[] -ifeval::["{context}" == "installing-aws-vpc"] -:aws: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-aws-installer-provisioned"] -:aws: -endif::[] -ifeval::["{context}" == "installing-aws-outposts-remote-workers"] -:aws: -endif::[] -ifeval::["{context}" == "installing-azure-customizations"] -:azure: -endif::[] -ifeval::["{context}" == "installing-azure-government-region"] -:azure: -endif::[] -ifeval::["{context}" == "installing-azure-network-customizations"] -:azure: -endif::[] -ifeval::["{context}" == "installing-azure-private"] -:azure: -endif::[] -ifeval::["{context}" == "installing-azure-vnet"] -:azure: -endif::[] -ifeval::["{context}" == "installing-gcp-customizations"] -:gcp: -endif::[] -ifeval::["{context}" == "installing-bare-metal"] -:bare: -endif::[] -ifeval::["{context}" == "installing-bare-metal-network-customizations"] -:bare: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-bare-metal"] -:bare: -endif::[] -ifeval::["{context}" == "installing-gcp-private"] -:gcp: -endif::[] -ifeval::["{context}" == "installing-gcp-network-customizations"] -:gcp: -endif::[] -ifeval::["{context}" == "installing-gcp-vpc"] -:gcp: -endif::[] -ifeval::["{context}" == "installing-gcp-shared-vpc"] -:gcp: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-gcp-installer-provisioned"] -:gcp: -endif::[] -ifeval::["{context}" == "installing-aws-customizations"] -:aws: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-customizations"] -:ibm-cloud: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-network-customizations"] -:ibm-cloud: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-vpc"] -:ibm-cloud: -:ibm-cloud-vpc: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-private"] -:ibm-cloud: -:ibm-cloud-vpc: -endif::[] -ifeval::["{context}" == "installing-openstack-installer-custom"] -:osp: -:osp-custom: -endif::[] -ifeval::["{context}" == "installing-openstack-installer-kuryr"] -:osp: -:osp-kuryr: -endif::[] -ifeval::["{context}" == "installing-openstack-user"] -:osp: -:osp-custom: -endif::[] -ifeval::["{context}" == "installing-openstack-user-kuryr"] -:osp: -:osp-kuryr: -endif::[] -ifeval::["{context}" == "installing-openstack-user-sr-iov"] -:osp: -:osp-custom: -endif::[] -ifeval::["{context}" == "installing-openstack-user-sr-iov-kuryr"] -:osp: -:osp-kuryr: -endif::[] -ifeval::["{context}" == "installing-rhv-customizations"] -:rhv: -endif::[] -ifeval::["{context}" == "installing-openstack-installer-restricted"] -:osp: -:osp-custom: -endif::[] -ifeval::["{context}" == "installing-ibm-z"] -:ibm-z: -endif::[] -ifeval::["{context}" == "installing-ibm-z-kvm"] -:ibm-z: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z"] -:ibm-z: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z-kvm"] -:ibm-z: -endif::[] -ifeval::["{context}" == "installing-ibm-power"] -:ibm-power: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power"] -:ibm-power: -endif::[] -ifeval::["{context}" == "installing-ibm-power-vs-customizations"] -:ibm-power-vs: -endif::[] -ifeval::["{context}" == "installing-ibm-power-vs-private-cluster"] -:ibm-power-vs: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power-vs"] -:ibm-power-vs: -endif::[] -ifeval::["{context}" == "installing-ibm-powervs-vpc"] -:ibm-power-vs: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-default"] -:ash: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-network-customizations"] -:ash: -endif::[] -ifeval::["{context}" == "installing-nutanix-installer-provisioned"] -:nutanix: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-nutanix-installer-provisioned"] -:nutanix: -endif::[] -ifeval::["{context}" == "installation-config-parameters-vsphere"] -:vsphere: -endif::[] - - -:_content-type: CONCEPT -[id="installation-configuration-parameters_{context}"] -ifndef::vsphere[] -= Installation configuration parameters -endif::vsphere[] - -// Managing headings is required as part of the effort for https://issues.redhat.com/browse/OSDOCS-6493. -// This accommodates the existing IA of the installation assemblies, while the improvement is implemented. -// As part of the updates for the last provider, the conditions can be removed and the following heading can be used. -ifdef::vsphere[] -= Available installation configuration parameters for {platform} -endif::vsphere[] - -// If install-config.yaml is generated by openshift-install -// The addition of providers beyond bare,ibm-power,ibm-z,ash is necessary as part of the effort for https://issues.redhat.com/browse/OSDOCS-6493 -// This accommodates the existing IA of the installation assemblies, while the improvement is implemented. -// As part of the updates for the last provider, content between lines 277-292 can be completely removed. -ifndef::bare,ibm-power,ibm-z,ash,vsphere[] -Before you deploy an {product-title} cluster, you provide parameter values to describe your account on the cloud platform that hosts your cluster and optionally customize your cluster's platform. When you create the `install-config.yaml` installation configuration file, you provide values for the required parameters through the command line. If you customize your cluster, you can modify the `install-config.yaml` file to provide more details about the platform. -endif::bare,ibm-power,ibm-z,ash,vsphere[] - -// If the user manually creates install-config.yaml -ifdef::bare,ibm-power,ibm-power-vs,ibm-z,ash[] -Before you deploy an {product-title} cluster, you provide a customized `install-config.yaml` installation configuration file that describes the details for your environment. -endif::bare,ibm-power,ibm-power-vs,ibm-z,ash[] - -// A condition for this note is required as part of the effort for https://issues.redhat.com/browse/OSDOCS-6493. -// This accommodates the existing content for installation assemblies, while the improvement is implemented. -// As part of the updates for the last provider, this note can be removed from the module -ifndef::vsphere[] -[NOTE] -==== -After installation, you cannot modify these parameters in the `install-config.yaml` file. -==== -endif::vsphere[] - -// This condition is required as part of the effort for https://issues.redhat.com/browse/OSDOCS-6493. -// As part of the update for each provider, this content applies to the net new provider-specific installation configuration parameter assembly. -// As part of the updates for the last provider, the conditions can be completely removed. -ifdef::vsphere[] -The following tables specify the required, optional, and {platform}-specific installation configuration parameters that you can set as part of the installation process. - -[NOTE] -==== -After installation, you cannot modify these parameters in the `install-config.yaml` file. -==== -endif::vsphere[] - -[id="installation-configuration-parameters-required_{context}"] -== Required configuration parameters - -Required installation configuration parameters are described in the following table: - -.Required parameters -[cols=".^2,.^3,.^5a",options="header"] -|==== -|Parameter|Description|Values - -|`apiVersion` -|The API version for the `install-config.yaml` content. The current version is `v1`. The installation program may also support older API versions. -|String - -|`baseDomain` -|The base domain of your cloud provider. The base domain is used to create routes to your {product-title} cluster components. The full DNS name for your cluster is a combination of the `baseDomain` and `metadata.name` parameter values that uses the `<metadata.name>.<baseDomain>` format. -|A fully-qualified domain or subdomain name, such as `example.com`. - -|`metadata` -|Kubernetes resource `ObjectMeta`, from which only the `name` parameter is consumed. -|Object - -|`metadata.name` -|The name of the cluster. DNS records for the cluster are all subdomains of `{{.metadata.name}}.{{.baseDomain}}`. -ifndef::bare,nutanix,vsphere[] -|String of lowercase letters, hyphens (`-`), and periods (`.`), such as `dev`. -endif::bare,nutanix,vsphere[] -ifdef::bare,nutanix,vsphere[] -|String of lowercase letters and hyphens (`-`), such as `dev`. -endif::bare,nutanix,vsphere[] -ifdef::osp[] -The string must be 14 characters or fewer long. -endif::osp[] - -|`platform` -|The configuration for the specific platform upon which to perform the installation: `alibabacloud`, `aws`, `baremetal`, `azure`, `gcp`, `ibmcloud`, `nutanix`, `openstack`, `ovirt`, `powervs`, `vsphere`, or `{}`. For additional information about `platform.<platform>` parameters, consult the table for your specific platform that follows. -|Object - -ifndef::openshift-origin[] -|`pullSecret` -|Get a {cluster-manager-url-pull} to authenticate downloading container images for {product-title} components from services such as Quay.io. -| -[source,json] ----- -{ - "auths":{ - "cloud.openshift.com":{ - "auth":"b3Blb=", - "email":"you@example.com" - }, - "quay.io":{ - "auth":"b3Blb=", - "email":"you@example.com" - } - } -} ----- -endif::[] - -|==== - -[id="installation-configuration-parameters-network_{context}"] -== Network configuration parameters - -You can customize your installation configuration based on the requirements of your existing network infrastructure. For example, you can expand the IP address block for the cluster network or provide different IP address blocks than the defaults. - -// OSDOCS-1640 - IPv4/IPv6 dual-stack bare metal only -// But only for installer-provisioned -// https://bugzilla.redhat.com/show_bug.cgi?id=2020416 -// Once BM UPI supports dual-stack, uncomment all the following conditionals and blocks - -ifndef::bare,vsphere[] -Only IPv4 addresses are supported. -endif::[] - -ifdef::bare,vsphere[] -* If you use the {openshift-networking} OVN-Kubernetes network plugin, both IPv4 and IPv6 address families are supported. - -* If you use the {openshift-networking} OpenShift SDN network plugin, only the IPv4 address family is supported. - -ifdef::ibm-cloud[] -[NOTE] -==== -IBM Cloud VPC does not support IPv6 address families. -==== -endif::ibm-cloud[] - -ifdef::vsphere[] -[NOTE] -==== -On VMware vSphere, dual-stack networking must specify IPv4 as the primary address family. - -The following additional limitations apply to dual-stack networking: - -* Nodes report only their IPv6 IP address in `node.status.addresses` -* Nodes with only a single NIC are supported -* Pods configured for host networking report only their IPv6 addresses in `pod.status.IP` -==== -endif::vsphere[] - -If you configure your cluster to use both IP address families, review the following requirements: - -* Both IP families must use the same network interface for the default gateway. - -* Both IP families must have the default gateway. - -* You must specify IPv4 and IPv6 addresses in the same order for all network configuration parameters. For example, in the following configuration IPv4 addresses are listed before IPv6 addresses. - -[source,yaml] ----- -networking: - clusterNetwork: - - cidr: 10.128.0.0/14 - hostPrefix: 23 - - cidr: fd00:10:128::/56 - hostPrefix: 64 - serviceNetwork: - - 172.30.0.0/16 - - fd00:172:16::/112 ----- -endif::[] - -[NOTE] -==== -Globalnet is not supported with {rh-storage-first} disaster recovery solutions. For regional disaster recovery scenarios, ensure that you use a nonoverlapping range of private IP addresses for the cluster and service networks in each cluster. -==== - -.Network parameters -[cols=".^2,.^3a,.^3a",options="header"] -|==== -|Parameter|Description|Values - -|`networking` -|The configuration for the cluster network. -|Object - -[NOTE] -==== -You cannot modify parameters specified by the `networking` object after installation. -==== - -|`networking.networkType` -|The {openshift-networking} network plugin to install. -| -ifdef::openshift-origin[] -Either `OpenShiftSDN` or `OVNKubernetes`. The default value is `OVNKubernetes`. -endif::openshift-origin[] -ifndef::openshift-origin[] -ifndef::ibm-power-vs[] -Either `OpenShiftSDN` or `OVNKubernetes`. `OpenShiftSDN` is a CNI plugin for all-Linux networks. `OVNKubernetes` is a CNI plugin for Linux networks and hybrid networks that contain both Linux and Windows servers. The default value is `OVNKubernetes`. -endif::ibm-power-vs[] -ifdef::ibm-power-vs[] -The default value is `OVNKubernetes`. -endif::ibm-power-vs[] -endif::openshift-origin[] - -|`networking.clusterNetwork` -| -The IP address blocks for pods. - -The default value is `10.128.0.0/14` with a host prefix of `/23`. - -If you specify multiple IP address blocks, the blocks must not overlap. -|An array of objects. For example: - -[source,yaml] ----- -ifndef::bare[] -networking: - clusterNetwork: - - cidr: 10.128.0.0/14 - hostPrefix: 23 -endif::bare[] -ifdef::bare[] -networking: - clusterNetwork: - - cidr: 10.128.0.0/14 - hostPrefix: 23 - - cidr: fd01::/48 - hostPrefix: 64 -endif::bare[] ----- - -|`networking.clusterNetwork.cidr` -| -Required if you use `networking.clusterNetwork`. An IP address block. - -ifndef::bare[] -An IPv4 network. -endif::bare[] - -ifdef::bare[] -If you use the OpenShift SDN network plugin, specify an IPv4 network. If you use the OVN-Kubernetes network plugin, you can specify IPv4 and IPv6 networks. -endif::bare[] -| -An IP address block in Classless Inter-Domain Routing (CIDR) notation. -The prefix length for an IPv4 block is between `0` and `32`. -ifdef::bare[] -The prefix length for an IPv6 block is between `0` and `128`. For example, `10.128.0.0/14` or `fd01::/48`. -endif::bare[] - -|`networking.clusterNetwork.hostPrefix` -|The subnet prefix length to assign to each individual node. For example, if `hostPrefix` is set to `23` then each node is assigned a `/23` subnet out of the given `cidr`. A `hostPrefix` value of `23` provides 510 (2^(32 - 23) - 2) pod IP addresses. -| -A subnet prefix. - -ifndef::bare[] -The default value is `23`. -endif::bare[] - -ifdef::bare[] -For an IPv4 network the default value is `23`. -For an IPv6 network the default value is `64`. The default value is also the minimum value for IPv6. -endif::bare[] - -|`networking.serviceNetwork` -| -The IP address block for services. The default value is `172.30.0.0/16`. - -The OpenShift SDN and OVN-Kubernetes network plugins support only a single IP address block for the service network. - -ifdef::bare[] -If you use the OVN-Kubernetes network plugin, you can specify an IP address block for both of the IPv4 and IPv6 address families. -endif::bare[] - -| -An array with an IP address block in CIDR format. For example: - -[source,yaml] ----- -ifndef::bare[] -networking: - serviceNetwork: - - 172.30.0.0/16 -endif::bare[] -ifdef::bare[] -networking: - serviceNetwork: - - 172.30.0.0/16 - - fd02::/112 -endif::bare[] ----- - -|`networking.machineNetwork` -| -The IP address blocks for machines. - -ifndef::ibm-power-vs[] -If you specify multiple IP address blocks, the blocks must not overlap. -endif::ibm-power-vs[] - -ifdef::ibm-z,ibm-power[] -If you specify multiple IP kernel arguments, the `machineNetwork.cidr` value must be the CIDR of the primary network. -endif::ibm-z,ibm-power[] -|An array of objects. For example: - -[source,yaml] ----- -networking: - machineNetwork: - - cidr: 10.0.0.0/16 ----- - -|`networking.machineNetwork.cidr` -| -Required if you use `networking.machineNetwork`. An IP address block. The default value is `10.0.0.0/16` for all platforms other than libvirt and {ibmpowerProductName} Virtual Server. For libvirt, the default value is `192.168.126.0/24`. For {ibmpowerProductName} Virtual Server, the default value is `192.168.0.0/24`. -ifdef::ibm-cloud-vpc[] -The CIDR must contain the subnets defined in `platform.ibmcloud.controlPlaneSubnets` and `platform.ibmcloud.computeSubnets`. -endif::ibm-cloud-vpc[] -| -An IP network block in CIDR notation. - -ifndef::bare,ibm-power-vs[] -For example, `10.0.0.0/16`. -endif::bare,ibm-power-vs[] -ifdef::bare[] -For example, `10.0.0.0/16` or `fd00::/48`. -endif::bare[] -ifdef::ibm-power-vs[] -For example, `192.168.0.0/24`. -endif::ibm-power-vs[] - -[NOTE] -==== -Set the `networking.machineNetwork` to match the CIDR that the preferred NIC resides in. -==== - -|==== - -[id="installation-configuration-parameters-optional_{context}"] -== Optional configuration parameters - -Optional installation configuration parameters are described in the following table: - -.Optional parameters -[cols=".^2,.^3a,.^3a",options="header"] -|==== -|Parameter|Description|Values - -|`additionalTrustBundle` -|A PEM-encoded X.509 certificate bundle that is added to the nodes' trusted certificate store. This trust bundle may also be used when a proxy has been configured. -|String - -|`capabilities` -|Controls the installation of optional core cluster components. You can reduce the footprint of your {product-title} cluster by disabling optional components. For more information, see the "Cluster capabilities" page in _Installing_. -|String array - -|`capabilities.baselineCapabilitySet` -|Selects an initial set of optional capabilities to enable. Valid values are `None`, `v4.11`, `v4.12` and `vCurrent`. The default value is `vCurrent`. -|String - -|`capabilities.additionalEnabledCapabilities` -|Extends the set of optional capabilities beyond what you specify in `baselineCapabilitySet`. You may specify multiple capabilities in this parameter. -|String array - -|`cpuPartitioningMode` -|Enables workload partitioning, which isolates {product-title} services, cluster management workloads, and infrastructure pods to run on a reserved set of CPUs. Workload partitioning can only be enabled during installation and cannot be disabled after installation. While this field enables workload partitioning, it does not configure workloads to use specific CPUs. For more information, see the _Workload partitioning_ page in the _Scalability and Performance_ section. -|`None` or `AllNodes`. `None` is the default value. - -|`compute` -|The configuration for the machines that comprise the compute nodes. -|Array of `MachinePool` objects. -ifdef::rhv[] -For details, see the "Additional RHV parameters for machine pools" table. -endif::rhv[] - -ifndef::openshift-origin[] - -ifndef::aws,bare,ibm-power,ibm-z,azure,ibm-power-vs[] -|`compute.architecture` -|Determines the instruction set architecture of the machines in the pool. Currently, clusters with varied architectures are not supported. All pools must specify the same architecture. Valid values are `amd64` (the default). -|String -endif::aws,bare,ibm-power,ibm-z,azure,ibm-power-vs[] - -ifdef::aws,bare,azure[] -|`compute.architecture` -|Determines the instruction set architecture of the machines in the pool. Currently, clusters with varied architectures are not supported. All pools must specify the same architecture. Valid values are `amd64` and `arm64`. See _Supported installation methods for different platforms_ in _Installing_ documentation for information about instance availability. -|String -endif::aws,bare,azure[] - -ifdef::ibm-z[] -|`compute.architecture` -|Determines the instruction set architecture of the machines in the pool. Currently, heteregeneous clusters are not supported, so all pools must specify the same architecture. Valid values are `s390x` (the default). -|String -endif::ibm-z[] - -ifdef::ibm-power,ibm-power-vs[] -|`compute.architecture` -|Determines the instruction set architecture of the machines in the pool. Currently, heteregeneous clusters are not supported, so all pools must specify the same architecture. Valid values are `ppc64le` (the default). -|String -endif::ibm-power,ibm-power-vs[] -endif::openshift-origin[] - -ifdef::openshift-origin[] -|`compute.architecture` -|Determines the instruction set architecture of the machines in the pool. Currently, clusters with varied architectures are not supported. All pools must specify the same architecture. Valid values are `amd64` (the default). -ifdef::aws[] -See _Supported installation methods for different platforms_ in _Installing_ documentation for information about instance availability. -endif::aws[] -|String -endif::openshift-origin[] - -|`compute.hyperthreading` -|Whether to enable or disable simultaneous multithreading, or `hyperthreading`, on compute machines. By default, simultaneous multithreading is enabled to increase the performance of your machines' cores. -[IMPORTANT] -==== -If you disable simultaneous multithreading, ensure that your capacity planning -accounts for the dramatically decreased machine performance. -==== -|`Enabled` or `Disabled` - -|`compute.name` -|Required if you use `compute`. The name of the machine pool. -|`worker` - -|`compute.platform` -|Required if you use `compute`. Use this parameter to specify the cloud provider to host the worker machines. This parameter value must match the `controlPlane.platform` parameter value. -|`alibabacloud`, `aws`, `azure`, `gcp`, `ibmcloud`, `nutanix`, `openstack`, `ovirt`, `powervs`, `vsphere`, or `{}` - -|`compute.replicas` -|The number of compute machines, which are also known as worker machines, to provision. -|A positive integer greater than or equal to `2`. The default value is `3`. - -|`featureSet` -|Enables the cluster for a feature set. A feature set is a collection of {product-title} features that are not enabled by default. For more information about enabling a feature set during installation, see "Enabling features using feature gates". -|String. The name of the feature set to enable, such as `TechPreviewNoUpgrade`. - -|`controlPlane` -|The configuration for the machines that comprise the control plane. -|Array of `MachinePool` objects. -ifdef::rhv[] -For details, see the "Additional RHV parameters for machine pools" table. -endif::rhv[] - -ifndef::openshift-origin[] -ifndef::aws,bare,ibm-z,ibm-power,azure,ibm-power-vs[] -|`controlPlane.architecture` -|Determines the instruction set architecture of the machines in the pool. Currently, clusters with varied architectures are not supported. All pools must specify the same architecture. Valid values are `amd64` (the default). -|String -endif::aws,bare,ibm-z,ibm-power,azure,ibm-power-vs[] - -ifdef::aws,bare,azure[] -|`controlPlane.architecture` -|Determines the instruction set architecture of the machines in the pool. Currently, clusters with varied architectures are not supported. All pools must specify the same architecture. Valid values are `amd64` and `arm64`. See _Supported installation methods for different platforms_ in _Installing_ documentation for information about instance availability. -|String -endif::aws,bare,azure[] - -ifdef::ibm-z[] -|`controlPlane.architecture` -|Determines the instruction set architecture of the machines in the pool. Currently, heterogeneous clusters are not supported, so all pools must specify the same architecture. Valid values are `s390x` (the default). -|String -endif::ibm-z[] - -ifdef::ibm-power,ibm-power-vs[] -|`controlPlane.architecture` -|Determines the instruction set architecture of the machines in the pool. Currently, heterogeneous clusters are not supported, so all pools must specify the same architecture. Valid values are `ppc64le` (the default). -|String -endif::ibm-power,ibm-power-vs[] -endif::openshift-origin[] - -ifdef::openshift-origin[] -|`controlPlane.architecture` -|Determines the instruction set architecture of the machines in the pool. Currently, clusters with varied architectures are not supported. All pools must specify the same architecture. Valid values are `amd64`. -ifdef::aws[] -See _Supported installation methods for different platforms_ in _Installing_ documentation for information about instance availability. -endif::aws[] -|String -endif::openshift-origin[] - -|`controlPlane.hyperthreading` -|Whether to enable or disable simultaneous multithreading, or `hyperthreading`, on control plane machines. By default, simultaneous multithreading is enabled to increase the performance of your machines' cores. -[IMPORTANT] -==== -If you disable simultaneous multithreading, ensure that your capacity planning -accounts for the dramatically decreased machine performance. -==== -|`Enabled` or `Disabled` - -|`controlPlane.name` -|Required if you use `controlPlane`. The name of the machine pool. -|`master` - -|`controlPlane.platform` -|Required if you use `controlPlane`. Use this parameter to specify the cloud provider that hosts the control plane machines. This parameter value must match the `compute.platform` parameter value. -|`alibabacloud`, `aws`, `azure`, `gcp`, `ibmcloud`, `nutanix`, `openstack`, `ovirt`, `powervs`, `vsphere`, or `{}` - -|`controlPlane.replicas` -|The number of control plane machines to provision. -|The only supported value is `3`, which is the default value. - -|`credentialsMode` -|The Cloud Credential Operator (CCO) mode. If no mode is specified, the CCO dynamically tries to determine the capabilities of the provided credentials, with a preference for mint mode on the platforms where multiple modes are supported. -ifdef::gcp[If you are installing on GCP into a shared virtual private cloud (VPC), `credentialsMode` must be set to `Passthrough`.] -[NOTE] -==== -Not all CCO modes are supported for all cloud providers. For more information about CCO modes, see the _Cloud Credential Operator_ entry in the _Cluster Operators reference_ content. -==== -[NOTE] -==== -If your AWS account has service control policies (SCP) enabled, you must configure the `credentialsMode` parameter to `Mint`, `Passthrough` or `Manual`. -==== -|`Mint`, `Passthrough`, `Manual` or an empty string (`""`). -ifndef::openshift-origin,ibm-power-vs[] -|`fips` -|Enable or disable FIPS mode. The default is `false` (disabled). If FIPS mode is enabled, the {op-system-first} machines that {product-title} runs on bypass the default Kubernetes cryptography suite and use the cryptography modules that are provided with {op-system} instead. -[IMPORTANT] -==== -The use of FIPS Validated / Modules in Process cryptographic libraries is only supported on {product-title} deployments on `x86_64`, `ppc64le`, and `s390x` architectures. -==== -[NOTE] -==== -If you are using Azure File storage, you cannot enable FIPS mode. -==== -|`false` or `true` -endif::openshift-origin,ibm-power-vs[] -|`imageContentSources` -|Sources and repositories for the release-image content. -|Array of objects. Includes a `source` and, optionally, `mirrors`, as described in the following rows of this table. - -|`imageContentSources.source` -|Required if you use `imageContentSources`. Specify the repository that users refer to, for example, in image pull specifications. -|String - -|`imageContentSources.mirrors` -|Specify one or more repositories that may also contain the same images. -|Array of strings - -ifndef::openshift-origin[] -ifdef::aws[] -|`platform.aws.lbType` -|Required to set the NLB load balancer type in AWS. Valid values are `Classic` or `NLB`. If no value is specified, the installation program defaults to `Classic`. The installation program sets the value provided here in the ingress cluster configuration object. If you do not specify a load balancer type for other Ingress Controllers, they use the type set in this parameter. -|`Classic` or `NLB`. The default value is `Classic`. -endif::aws[] -endif::openshift-origin[] - -|`publish` -|How to publish or expose the user-facing endpoints of your cluster, such as the Kubernetes API, OpenShift routes. -| -ifdef::aws,aws-govcloud,aws-secret,azure,gcp,ibm-cloud[] -`Internal` or `External`. To deploy a private cluster, which cannot be accessed from the internet, set `publish` to `Internal`. The default value is `External`. -endif::[] -ifndef::aws,aws-govcloud,aws-secret,azure,gcp,ibm-cloud[] -`Internal` or `External`. The default value is `External`. - -Setting this field to `Internal` is not supported on non-cloud platforms. -ifndef::ibm-power-vs[] -ifeval::[{product-version} <= 4.7] -[IMPORTANT] -==== -If the value of the field is set to `Internal`, the cluster will become non-functional. For more information, refer to link:https://bugzilla.redhat.com/show_bug.cgi?id=1953035[BZ#1953035]. -==== -endif::[] -endif::ibm-power-vs[] -endif::[] - -|`sshKey` -| The SSH key to authenticate access to your cluster machines. -[NOTE] -==== -For production {product-title} clusters on which you want to perform installation debugging or disaster recovery, specify an SSH key that your `ssh-agent` process uses. -==== -a|For example, `sshKey: ssh-ed25519 AAAA..`. - -|==== - -ifdef::aws,aws-govcloud,aws-secret[] -[id="installation-configuration-parameters-optional-aws_{context}"] -== Optional AWS configuration parameters - -Optional AWS configuration parameters are described in the following table: - -.Optional AWS parameters -[cols=".^2,.^3,.^5a",options="header"] -|==== -|Parameter|Description|Values - -|`compute.platform.aws.amiID` -|The AWS AMI used to boot compute machines for the cluster. This is required for regions that require a custom {op-system} AMI. -|Any published or custom {op-system} AMI that belongs to the set AWS region. See _{op-system} AMIs for AWS infrastructure_ for available AMI IDs. - -|`compute.platform.aws.iamRole` -|A pre-existing AWS IAM role applied to the compute machine pool instance profiles. You can use these fields to match naming schemes and include predefined permissions boundaries for your IAM roles. If undefined, the installation program creates a new IAM role. -|The name of a valid AWS IAM role. - -|`compute.platform.aws.rootVolume.iops` -|The Input/Output Operations Per Second (IOPS) that is reserved for the root volume. -|Integer, for example `4000`. - -|`compute.platform.aws.rootVolume.size` -|The size in GiB of the root volume. -|Integer, for example `500`. - -|`compute.platform.aws.rootVolume.type` -|The type of the root volume. -|Valid link:https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html[AWS EBS volume type], -such as `io1`. - -|`compute.platform.aws.rootVolume.kmsKeyARN` -|The Amazon Resource Name (key ARN) of a KMS key. This is required to encrypt operating system volumes of worker nodes with a specific KMS key. -|Valid link:https://docs.aws.amazon.com/kms/latest/developerguide/find-cmk-id-arn.html[key ID or the key ARN]. - -|`compute.platform.aws.type` -|The EC2 instance type for the compute machines. -|Valid AWS instance type, such as `m4.2xlarge`. See the *Supported AWS machine types* table that follows. -//add an xref when possible. - -|`compute.platform.aws.zones` -|The availability zones where the installation program creates machines for the compute machine pool. If you provide your own VPC, you must provide a subnet in that availability zone. -|A list of valid AWS availability zones, such as `us-east-1c`, in a -link:https://yaml.org/spec/1.2/spec.html#sequence//[YAML sequence]. - -|`compute.aws.region` -|The AWS region that the installation program creates compute resources in. -|Any valid link:https://docs.aws.amazon.com/general/latest/gr/rande.html[AWS region], such as `us-east-1`. You can use the AWS CLI to access the regions available based on your selected instance type. For example: -[source,terminal] ----- -aws ec2 describe-instance-type-offerings --filters Name=instance-type,Values=c7g.xlarge ----- -ifndef::openshift-origin[] -[IMPORTANT] -==== -When running on ARM based AWS instances, ensure that you enter a region where AWS Graviton processors are available. See link:https://aws.amazon.com/ec2/graviton/#Global_availability[Global availability] map in the AWS documentation. Currently, AWS Graviton3 processors are only available in some regions. -==== -endif::openshift-origin[] - - -|`controlPlane.platform.aws.amiID` -|The AWS AMI used to boot control plane machines for the cluster. This is required for regions that require a custom {op-system} AMI. -|Any published or custom {op-system} AMI that belongs to the set AWS region. See _{op-system} AMIs for AWS infrastructure_ for available AMI IDs. - -|`controlPlane.platform.aws.iamRole` -|A pre-existing AWS IAM role applied to the control plane machine pool instance profiles. You can use these fields to match naming schemes and include predefined permissions boundaries for your IAM roles. If undefined, the installation program creates a new IAM role. -|The name of a valid AWS IAM role. - -|`controlPlane.platform.aws.rootVolume.kmsKeyARN` -|The Amazon Resource Name (key ARN) of a KMS key. This is required to encrypt operating system volumes of control plane nodes with a specific KMS key. -|Valid link:https://docs.aws.amazon.com/kms/latest/developerguide/find-cmk-id-arn.html[key ID and the key ARN]. - -|`controlPlane.platform.aws.type` -|The EC2 instance type for the control plane machines. -|Valid AWS instance type, such as `m6i.xlarge`. See the *Supported AWS machine types* table that follows. -//add an xref when possible - -|`controlPlane.platform.aws.zones` -|The availability zones where the installation program creates machines for the -control plane machine pool. -|A list of valid AWS availability zones, such as `us-east-1c`, in a link:https://yaml.org/spec/1.2/spec.html#sequence//[YAML sequence]. - -|`controlPlane.aws.region` -|The AWS region that the installation program creates control plane resources in. -|Valid link:https://docs.aws.amazon.com/general/latest/gr/rande.html[AWS region], such as `us-east-1`. - -|`platform.aws.amiID` -|The AWS AMI used to boot all machines for the cluster. If set, the AMI must -belong to the same region as the cluster. This is required for regions that require a custom {op-system} AMI. -|Any published or custom {op-system} AMI that belongs to the set AWS region. See _{op-system} AMIs for AWS infrastructure_ for available AMI IDs. - -|`platform.aws.hostedZone` -|An existing Route 53 private hosted zone for the cluster. You can only use a pre-existing hosted zone when also supplying your own VPC. The hosted zone must already be associated with the user-provided VPC before installation. Also, the domain of the hosted zone must be the cluster domain or a parent of the cluster domain. If undefined, the installation program creates a new hosted zone. -|String, for example `Z3URY6TWQ91KVV`. - -|`platform.aws.serviceEndpoints.name` -|The AWS service endpoint name. Custom endpoints are only required for cases -where alternative AWS endpoints, like FIPS, must be used. Custom API endpoints -can be specified for EC2, S3, IAM, Elastic Load Balancing, Tagging, Route 53, -and STS AWS services. -|Valid link:https://docs.aws.amazon.com/general/latest/gr/rande.html[AWS service endpoint] name. - -|`platform.aws.serviceEndpoints.url` -|The AWS service endpoint URL. The URL must use the `https` protocol and the -host must trust the certificate. -|Valid link:https://docs.aws.amazon.com/general/latest/gr/rande.html[AWS service endpoint] URL. - -|`platform.aws.userTags` -|A map of keys and values that the installation program adds as tags to all resources that it creates. -|Any valid YAML map, such as key value pairs in the `<key>: <value>` format. For more information about AWS tags, see link:https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html[Tagging Your Amazon EC2 Resources] in the AWS documentation. - -[NOTE] -==== -You can add up to 25 user defined tags during installation. The remaining 25 tags are reserved for {product-title}. -==== - -|`platform.aws.propagateUserTags` -| A flag that directs in-cluster Operators to include the specified user tags in the tags of the AWS resources that the Operators create. -| Boolean values, for example `true` or `false`. - - -|`platform.aws.subnets` -|If you provide the VPC instead of allowing the installation program to create the VPC for you, specify the subnet for the cluster to use. The subnet must be part of the same `machineNetwork[].cidr` ranges that you specify. - -For a standard cluster, specify a public and a private subnet for each availability zone. - -For a private cluster, specify a private subnet for each availability zone. - -For clusters that use AWS Local Zones, you must add AWS Local Zone subnets to this list to ensure edge machine pool creation. -|Valid subnet IDs. - -|==== -endif::aws,aws-govcloud,aws-secret[] - -ifdef::osp[] -[id="installation-configuration-parameters-additional-osp_{context}"] -== Additional {rh-openstack-first} configuration parameters - -Additional {rh-openstack} configuration parameters are described in the following table: - -.Additional {rh-openstack} parameters -[cols=".^2m,.^3a,^5a",options="header"] -|==== -|Parameter|Description|Values - -|`compute.platform.openstack.rootVolume.size` -|For compute machines, the size in gigabytes of the root volume. If you do not set this value, machines use ephemeral storage. -|Integer, for example `30`. - -|`compute.platform.openstack.rootVolume.type` -|For compute machines, the root volume's type. -|String, for example `performance`. - -|`controlPlane.platform.openstack.rootVolume.size` -|For control plane machines, the size in gigabytes of the root volume. If you do not set this value, machines use ephemeral storage. -|Integer, for example `30`. - -|`controlPlane.platform.openstack.rootVolume.type` -|For control plane machines, the root volume's type. -|String, for example `performance`. - -|`platform.openstack.cloud` -|The name of the {rh-openstack} cloud to use from the list of clouds in the -`clouds.yaml` file. -|String, for example `MyCloud`. - -|`platform.openstack.externalNetwork` -|The {rh-openstack} external network name to be used for installation. -|String, for example `external`. - -|`platform.openstack.computeFlavor` -|The {rh-openstack} flavor to use for control plane and compute machines. - -This property is deprecated. To use a flavor as the default for all machine pools, add it as the value of the `type` key in the `platform.openstack.defaultMachinePlatform` property. You can also set a flavor value for each machine pool individually. - -|String, for example `m1.xlarge`. -|==== - -[id="installation-configuration-parameters-optional-osp_{context}"] -== Optional {rh-openstack} configuration parameters - -Optional {rh-openstack} configuration parameters are described in the following table: - -.Optional {rh-openstack} parameters -[%header, cols=".^2,.^3,.^5a"] -|==== -|Parameter|Description|Values - -|`compute.platform.openstack.additionalNetworkIDs` -|Additional networks that are associated with compute machines. Allowed address pairs are not created for additional networks. -|A list of one or more UUIDs as strings. For example, `fa806b2f-ac49-4bce-b9db-124bc64209bf`. - -|`compute.platform.openstack.additionalSecurityGroupIDs` -|Additional security groups that are associated with compute machines. -|A list of one or more UUIDs as strings. For example, `7ee219f3-d2e9-48a1-96c2-e7429f1b0da7`. - -|`compute.platform.openstack.zones` -|{rh-openstack} Compute (Nova) availability zones (AZs) to install machines on. If this parameter is not set, the installation program relies on the default settings for Nova that the {rh-openstack} administrator configured. - -On clusters that use Kuryr, {rh-openstack} Octavia does not support availability zones. Load balancers and, if you are using the Amphora provider driver, {product-title} services that rely on Amphora VMs, are not created according to the value of this property. -|A list of strings. For example, `["zone-1", "zone-2"]`. - -|`compute.platform.openstack.rootVolume.zones` -|For compute machines, the availability zone to install root volumes on. If you do not set a value for this parameter, the installation program selects the default availability zone. -|A list of strings, for example `["zone-1", "zone-2"]`. - -|`compute.platform.openstack.serverGroupPolicy` -|Server group policy to apply to the group that will contain the compute machines in the pool. You cannot change server group policies or affiliations after creation. Supported options include `anti-affinity`, `soft-affinity`, and `soft-anti-affinity`. The default value is `soft-anti-affinity`. - -An `affinity` policy prevents migrations and therefore affects {rh-openstack} upgrades. The `affinity` policy is not supported. - -If you use a strict `anti-affinity` policy, an additional {rh-openstack} host is required during instance migration. -|A server group policy to apply to the machine pool. For example, `soft-affinity`. - -|`controlPlane.platform.openstack.additionalNetworkIDs` -|Additional networks that are associated with control plane machines. Allowed address pairs are not created for additional networks. - -Additional networks that are attached to a control plane machine are also attached to the bootstrap node. -|A list of one or more UUIDs as strings. For example, `fa806b2f-ac49-4bce-b9db-124bc64209bf`. - -|`controlPlane.platform.openstack.additionalSecurityGroupIDs` -|Additional security groups that are associated with control plane machines. -|A list of one or more UUIDs as strings. For example, `7ee219f3-d2e9-48a1-96c2-e7429f1b0da7`. - -|`controlPlane.platform.openstack.zones` -|{rh-openstack} Compute (Nova) availability zones (AZs) to install machines on. If this parameter is not set, the installation program relies on the default settings for Nova that the {rh-openstack} administrator configured. - -On clusters that use Kuryr, {rh-openstack} Octavia does not support availability zones. Load balancers and, if you are using the Amphora provider driver, {product-title} services that rely on Amphora VMs, are not created according to the value of this property. -|A list of strings. For example, `["zone-1", "zone-2"]`. - -|`controlPlane.platform.openstack.rootVolume.zones` -|For control plane machines, the availability zone to install root volumes on. If you do not set this value, the installation program selects the default availability zone. -|A list of strings, for example `["zone-1", "zone-2"]`. - -|`controlPlane.platform.openstack.serverGroupPolicy` -|Server group policy to apply to the group that will contain the control plane machines in the pool. You cannot change server group policies or affiliations after creation. Supported options include `anti-affinity`, `soft-affinity`, and `soft-anti-affinity`. The default value is `soft-anti-affinity`. - -An `affinity` policy prevents migrations, and therefore affects {rh-openstack} upgrades. The `affinity` policy is not supported. - -If you use a strict `anti-affinity` policy, an additional {rh-openstack} host is required during instance migration. -|A server group policy to apply to the machine pool. For example, `soft-affinity`. - -|`platform.openstack.clusterOSImage` -|The location from which the installation program downloads the {op-system} image. - -You must set this parameter to perform an installation in a restricted network. -|An HTTP or HTTPS URL, optionally with an SHA-256 checksum. - -For example, `\http://mirror.example.com/images/rhcos-43.81.201912131630.0-openstack.x86_64.qcow2.gz?sha256=ffebbd68e8a1f2a245ca19522c16c86f67f9ac8e4e0c1f0a812b068b16f7265d`. -The value can also be the name of an existing Glance image, for example `my-rhcos`. - -|`platform.openstack.clusterOSImageProperties` -|Properties to add to the installer-uploaded ClusterOSImage in Glance. This property is ignored if `platform.openstack.clusterOSImage` is set to an existing Glance image. - -You can use this property to exceed the default persistent volume (PV) limit for {rh-openstack} of 26 PVs per node. To exceed the limit, set the `hw_scsi_model` property value to `virtio-scsi` and the `hw_disk_bus` value to `scsi`. - -You can also use this property to enable the QEMU guest agent by including the `hw_qemu_guest_agent` property with a value of `yes`. -|A list of key-value string pairs. For example, `["hw_scsi_model": "virtio-scsi", "hw_disk_bus": "scsi"]`. - -|`platform.openstack.defaultMachinePlatform` -|The default machine pool platform configuration. -| -[source,json] ----- -{ - "type": "ml.large", - "rootVolume": { - "size": 30, - "type": "performance" - } -} ----- - -|`platform.openstack.ingressFloatingIP` -|An existing floating IP address to associate with the Ingress port. To use this property, you must also define the `platform.openstack.externalNetwork` property. -|An IP address, for example `128.0.0.1`. - -|`platform.openstack.apiFloatingIP` -|An existing floating IP address to associate with the API load balancer. To use this property, you must also define the `platform.openstack.externalNetwork` property. -|An IP address, for example `128.0.0.1`. - -|`platform.openstack.externalDNS` -|IP addresses for external DNS servers that cluster instances use for DNS resolution. -|A list of IP addresses as strings. For example, `["8.8.8.8", "192.168.1.12"]`. - -|`platform.openstack.loadbalancer` -|Whether or not to use the default, internal load balancer. If the value is set to `UserManaged`, this default load balancer is disabled so that you can deploy a cluster that uses an external, user-managed load balancer. If the parameter is not set, or if the value is `OpenShiftManagedDefault`, the cluster uses the default load balancer. -|`UserManaged` or `OpenShiftManagedDefault`. - -|`platform.openstack.machinesSubnet` -|The UUID of a {rh-openstack} subnet that the cluster's nodes use. Nodes and virtual IP (VIP) ports are created on this subnet. - -The first item in `networking.machineNetwork` must match the value of `machinesSubnet`. - -If you deploy to a custom subnet, you cannot specify an external DNS server to the {product-title} installer. Instead, link:https://access.redhat.com/documentation/en-us/red_hat_openstack_platform/16.0/html/command_line_interface_reference/subnet[add DNS to the subnet in {rh-openstack}]. - -|A UUID as a string. For example, `fa806b2f-ac49-4bce-b9db-124bc64209bf`. -|==== - -[id="installation-configuration-parameters-failure-domains-osp_{context}"] -== {rh-openstack} parameters for failure domains - -:FeatureName: {rh-openstack} failure domains -[IMPORTANT] -==== -[subs="attributes+"] -{FeatureName} is a Technology Preview feature only. Technology Preview features -are not supported with Red Hat production service level agreements (SLAs) and -might not be functionally complete. Red Hat does not recommend using them -in production. These features provide early access to upcoming product -features, enabling customers to test functionality and provide feedback during -the development process. - -For more information about the support scope of Red Hat Technology Preview features, see link:https://access.redhat.com/support/offerings/techpreview/[Technology Preview Features Support Scope]. -==== -// Undefine {FeatureName} attribute, so that any mistakes are easily spotted -:!FeatureName: - -{rh-openstack-first} deployments do not have a single implementation of failure domains. Instead, availability zones are defined individually for each service, such as the compute service, Nova; the networking service, Neutron; and the storage service, Cinder. - -Beginning with {product-title} 4.13, there is a unified definition of failure domains for {rh-openstack} deployments that covers all supported availability zone types. You can use failure domains to control related aspects of Nova, Neutron, and Cinder configurations from a single place. - -In {rh-openstack}, a port describes a network connection and maps to an interface inside a compute machine. A port also: - -* Is defined by a network or by one more or subnets -* Connects a machine to one or more subnets - -Failure domains group the services of your deployment by using ports. If you use failure domains, each machine connects to: - -* The `portTarget` object with the ID `control-plane` while that object exists. -* All non-control-plane `portTarget` objects within its own failure domain. -* All networks in the machine pool's `additionalNetworkIDs` list. - -To configure failure domains for a machine pool, edit availability zone and port target parameters under `controlPlane.platform.openstack.failureDomains`. - -.{rh-openstack} parameters for failure domains -[cols=".^2,.^3a,.^3a",options="header"] -|==== -|Parameter|Description|Values - -|`platform.openstack.failuredomains.computeAvailabilityZone` -|An availability zone for the server. If not specified, the cluster default is used. -|The name of the availability zone. For example, `nova-1`. - -|`platform.openstack.failuredomains.storageAvailabilityZone` -|An availability zone for the root volume. If not specified, the cluster default is used. -|The name of the availability zone. For example, `cinder-1`. - -|`platform.openstack.failuredomains.portTargets` -|A list of `portTarget` objects, each of which defines a network connection to attach to machines within a failure domain. -|A list of `portTarget` objects. - -|`platform.openstack.failuredomains.portTargets.portTarget.id` -|The ID of an individual port target. To select that port target as the first network for machines, set the value of this parameter to `control-plane`. If this parameter has a different value, it is ignored. -|`control-plane` or an arbitrary string. - -|`platform.openstack.failuredomains.portTargets.portTarget.network` -|Required. The name or ID of the network to attach to machines in the failure domain. -a|A `network` object that contains either a name or UUID. For example: - -[source,yaml] ----- -network: - id: 8db6a48e-375b-4caa-b20b-5b9a7218bfe6 ----- - -or: - -[source,yaml] ----- -network: - name: my-network-1 ----- - -|`platform.openstack.failuredomains.portTargets.portTarget.fixedIPs` -|Subnets to allocate fixed IP addresses to. These subnets must exist within the same network as the port. -|A list of `subnet` objects. -|==== - -NOTE: You cannot combine zone fields and failure domains. If you want to use failure domains, the `controlPlane.zone` and `controlPlane.rootVolume.zone` fields must be left unset. -endif::osp[] - -ifdef::azure[] -[id="installation-configuration-parameters-additional-azure_{context}"] -== Additional Azure configuration parameters - -Additional Azure configuration parameters are described in the following table: - -.Additional Azure parameters -[cols=".^2,.^3a,.^3a",options="header"] -|==== -|Parameter|Description|Values - -|`compute.platform.azure.encryptionAtHost` -|Enables host-level encryption for compute machines. You can enable this encryption alongside user-managed server-side encryption. This feature encrypts temporary, ephemeral, cached and un-managed disks on the VM host. This is not a prerequisite for user-managed server-side encryption. -|`true` or `false`. The default is `false`. - -|`compute.platform.azure.osDisk.diskSizeGB` -|The Azure disk size for the VM. -|Integer that represents the size of the disk in GB. The default is `128`. - -|`compute.platform.azure.osDisk.diskType` -|Defines the type of disk. -|`standard_LRS`, `premium_LRS`, or `standardSSD_LRS`. The default is `premium_LRS`. - -|`compute.platform.azure.ultraSSDCapability` -|Enables the use of Azure ultra disks for persistent storage on compute nodes. This requires that your Azure region and zone have ultra disks available. -|`Enabled`, `Disabled`. The default is `Disabled`. - -|`compute.platform.azure.osDisk.diskEncryptionSet.resourceGroup` -|The name of the Azure resource group that contains the disk encryption set from the installation prerequisites. This resource group should be different from the resource group where you install the cluster to avoid deleting your Azure encryption key when the cluster is destroyed. This value is only necessary if you intend to install the cluster with user-managed disk encryption. -|String, for example `production_encryption_resource_group`. - -|`compute.platform.azure.osDisk.diskEncryptionSet.name` -|The name of the disk encryption set that contains the encryption key from the installation prerequisites. -|String, for example `production_disk_encryption_set`. - -|`compute.platform.azure.osDisk.diskEncryptionSet.subscriptionId` -|Defines the Azure subscription of the disk encryption set where the disk encryption set resides. This secondary disk encryption set is used to encrypt compute machines. -|String, in the format `00000000-0000-0000-0000-000000000000`. - -|`compute.platform.azure.vmNetworkingType` -|Enables accelerated networking. Accelerated networking enables single root I/O virtualization (SR-IOV) to a VM, improving its networking performance. If instance type of compute machines support `Accelerated` networking, by default, the installer enables `Accelerated` networking, otherwise the default networking type is `Basic`. -|`Accelerated` or `Basic`. - -|`compute.platform.azure.type` -|Defines the Azure instance type for compute machines. -|String - -|`compute.platform.azure.zones` -|The availability zones where the installation program creates compute machines. -|String list - -|`controlPlane.platform.azure.type` -|Defines the Azure instance type for control plane machines. -|String - -|`controlPlane.platform.azure.zones` -|The availability zones where the installation program creates control plane machines. -|String list - -|`platform.azure.defaultMachinePlatform.encryptionAtHost` -|Enables host-level encryption for compute machines. You can enable this encryption alongside user-managed server-side encryption. This feature encrypts temporary, ephemeral, cached, and un-managed disks on the VM host. This parameter is not a prerequisite for user-managed server-side encryption. -|`true` or `false`. The default is `false`. - -|`platform.azure.defaultMachinePlatform.osDisk.diskEncryptionSet.name` -|The name of the disk encryption set that contains the encryption key from the installation prerequisites. -|String, for example, `production_disk_encryption_set`. - -|`platform.azure.defaultMachinePlatform.osDisk.diskEncryptionSet.resourceGroup` -|The name of the Azure resource group that contains the disk encryption set from the installation prerequisites. To avoid deleting your Azure encryption key when the cluster is destroyed, this resource group must be different from the resource group where you install the cluster. This value is necessary only if you intend to install the cluster with user-managed disk encryption. -|String, for example, `production_encryption_resource_group`. - -|`platform.azure.defaultMachinePlatform.osDisk.diskEncryptionSet.subscriptionId` -|Defines the Azure subscription of the disk encryption set where the disk encryption set resides. This secondary disk encryption set is used to encrypt compute machines. -|String, in the format `00000000-0000-0000-0000-000000000000`. - -|`platform.azure.defaultMachinePlatform.osDisk.diskSizeGB` -|The Azure disk size for the VM. -|Integer that represents the size of the disk in GB. The default is `128`. - -|`platform.azure.defaultMachinePlatform.osDisk.diskType` -|Defines the type of disk. -|`premium_LRS` or `standardSSD_LRS`. The default is `premium_LRS`. - -|`platform.azure.defaultMachinePlatform.type` -|The Azure instance type for control plane and compute machines. -|The Azure instance type. - -|`platform.azure.defaultMachinePlatform.zones` -|The availability zones where the installation program creates compute and control plane machines. -|String list. - -|`controlPlane.platform.azure.encryptionAtHost` -|Enables host-level encryption for control plane machines. You can enable this encryption alongside user-managed server-side encryption. This feature encrypts temporary, ephemeral, cached and un-managed disks on the VM host. This is not a prerequisite for user-managed server-side encryption. -|`true` or `false`. The default is `false`. - -|`controlPlane.platform.azure.osDisk.diskEncryptionSet.resourceGroup` -|The name of the Azure resource group that contains the disk encryption set from the installation prerequisites. This resource group should be different from the resource group where you install the cluster to avoid deleting your Azure encryption key when the cluster is destroyed. This value is only necessary if you intend to install the cluster with user-managed disk encryption. -|String, for example `production_encryption_resource_group`. - -|`controlPlane.platform.azure.osDisk.diskEncryptionSet.name` -|The name of the disk encryption set that contains the encryption key from the installation prerequisites. -|String, for example `production_disk_encryption_set`. - -|`controlPlane.platform.azure.osDisk.diskEncryptionSet.subscriptionId` -|Defines the Azure subscription of the disk encryption set where the disk encryption set resides. This secondary disk encryption set is used to encrypt control plane machines. -|String, in the format `00000000-0000-0000-0000-000000000000`. - -|`controlPlane.platform.azure.osDisk.diskSizeGB` -|The Azure disk size for the VM. -|Integer that represents the size of the disk in GB. The default is `1024`. - -|`controlPlane.platform.azure.osDisk.diskType` -|Defines the type of disk. -|`premium_LRS` or `standardSSD_LRS`. The default is `premium_LRS`. - -|`controlPlane.platform.azure.ultraSSDCapability` -|Enables the use of Azure ultra disks for persistent storage on control plane machines. This requires that your Azure region and zone have ultra disks available. -|`Enabled`, `Disabled`. The default is `Disabled`. - -|`controlPlane.platform.azure.vmNetworkingType` -|Enables accelerated networking. Accelerated networking enables single root I/O virtualization (SR-IOV) to a VM, improving its networking performance. If instance type of control plane machines support `Accelerated` networking, by default, the installer enables `Accelerated` networking, otherwise the default networking type is `Basic`. -|`Accelerated` or `Basic`. - -|`platform.azure.baseDomainResourceGroupName` -|The name of the resource group that contains the DNS zone for your base domain. -|String, for example `production_cluster`. - -|`platform.azure.resourceGroupName` -| The name of an already existing resource group to install your cluster to. This resource group must be empty and only used for this specific cluster; the cluster components assume ownership of all resources in the resource group. If you limit the service principal scope of the installation program to this resource group, you must ensure all other resources used by the installation program in your environment have the necessary permissions, such as the public DNS zone and virtual network. Destroying the cluster by using the installation program deletes this resource group. -|String, for example `existing_resource_group`. - -|`platform.azure.outboundType` -|The outbound routing strategy used to connect your cluster to the internet. If -you are using user-defined routing, you must have pre-existing networking -available where the outbound routing has already been configured prior to -installing a cluster. The installation program is not responsible for -configuring user-defined routing. -|`LoadBalancer` or `UserDefinedRouting`. The default is `LoadBalancer`. - -|`platform.azure.region` -|The name of the Azure region that hosts your cluster. -|Any valid region name, such as `centralus`. - -|`platform.azure.zone` -|List of availability zones to place machines in. For high availability, specify -at least two zones. -|List of zones, for example `["1", "2", "3"]`. - -|`platform.azure.defaultMachinePlatform.ultraSSDCapability` -|Enables the use of Azure ultra disks for persistent storage on control plane and compute machines. This requires that your Azure region and zone have ultra disks available. -|`Enabled`, `Disabled`. The default is `Disabled`. - -|`platform.azure.networkResourceGroupName` -|The name of the resource group that contains the existing VNet that you want to deploy your cluster to. This name cannot be the same as the `platform.azure.baseDomainResourceGroupName`. -|String. - -|`platform.azure.virtualNetwork` -|The name of the existing VNet that you want to deploy your cluster to. -|String. - -|`platform.azure.controlPlaneSubnet` -|The name of the existing subnet in your VNet that you want to deploy your control plane machines to. -|Valid CIDR, for example `10.0.0.0/16`. - -|`platform.azure.computeSubnet` -|The name of the existing subnet in your VNet that you want to deploy your compute machines to. -|Valid CIDR, for example `10.0.0.0/16`. - -|`platform.azure.cloudName` -|The name of the Azure cloud environment that is used to configure the Azure SDK with the appropriate Azure API endpoints. If empty, the default value `AzurePublicCloud` is used. -|Any valid cloud environment, such as `AzurePublicCloud` or `AzureUSGovernmentCloud`. - -|`platform.azure.defaultMachinePlatform.vmNetworkingType` -|Enables accelerated networking. Accelerated networking enables single root I/O virtualization (SR-IOV) to a VM, improving its networking performance. -|`Accelerated` or `Basic`. If instance type of control plane and compute machines support `Accelerated` networking, by default, the installer enables `Accelerated` networking, otherwise the default networking type is `Basic`. - -|==== - -[NOTE] -==== -You cannot customize -link:https://azure.microsoft.com/en-us/global-infrastructure/availability-zones/[Azure Availability Zones] -or -link:https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-using-tags[Use tags to organize your Azure resources] -with an Azure cluster. -==== -endif::azure[] - -ifdef::gcp[] -[id="installation-configuration-parameters-additional-gcp_{context}"] -== Additional Google Cloud Platform (GCP) configuration parameters - -Additional GCP configuration parameters are described in the following table: - -.Additional GCP parameters -[cols=".^1,.^6a,.^3a",options="header"] -|==== -|Parameter|Description|Values - -|`platform.gcp.network` -|The name of the existing Virtual Private Cloud (VPC) where you want to deploy your cluster. If you want to deploy your cluster into a shared VPC, you must set `platform.gcp.networkProjectID` with the name of the GCP project that contains the shared VPC. -|String. - -|`platform.gcp.networkProjectID` -|Optional. The name of the GCP project that contains the shared VPC where you want to deploy your cluster. -|String. - -|`platform.gcp.projectID` -|The name of the GCP project where the installation program installs the cluster. -|String. - -|`platform.gcp.region` -|The name of the GCP region that hosts your cluster. -|Any valid region name, such as `us-central1`. - -|`platform.gcp.controlPlaneSubnet` -|The name of the existing subnet where you want to deploy your control plane machines. -|The subnet name. - -|`platform.gcp.computeSubnet` -|The name of the existing subnet where you want to deploy your compute machines. -|The subnet name. - -|`platform.gcp.licenses` -|A list of license URLs that must be applied to the compute images. -[IMPORTANT] -==== -The `licenses` parameter is a deprecated field and nested virtualization is enabled by default. It is not recommended to use this field. -==== -|Any license available with the link:https://cloud.google.com/compute/docs/reference/rest/v1/licenses/list[license API], such as the license to enable link:https://cloud.google.com/compute/docs/instances/nested-virtualization/overview[nested virtualization]. You cannot use this parameter with a mechanism that generates pre-built images. Using a license URL forces the installation program to copy the source image before use. - -|`platform.gcp.defaultMachinePlatform.zones` -|The availability zones where the installation program creates machines. -|A list of valid link:https://cloud.google.com/compute/docs/regions-zones#available[GCP availability zones], such as `us-central1-a`, in a -link:https://yaml.org/spec/1.2/spec.html#sequence//[YAML sequence]. - -|`platform.gcp.defaultMachinePlatform.osDisk.diskSizeGB` -|The size of the disk in gigabytes (GB). -|Any size between 16 GB and 65536 GB. - -|`platform.gcp.defaultMachinePlatform.osDisk.diskType` -|The link:https://cloud.google.com/compute/docs/disks#disk-types[GCP disk type]. -|Either the default `pd-ssd` or the `pd-standard` disk type. The control plane nodes must be the `pd-ssd` disk type. Compute nodes can be either type. - -|`platform.gcp.defaultMachinePlatform.tags` -|Optional. Additional network tags to add to the control plane and compute machines. -|One or more strings, for example `network-tag1`. - -|`platform.gcp.defaultMachinePlatform.type` -|The link:https://cloud.google.com/compute/docs/machine-types[GCP machine type] for control plane and compute machines. -|The GCP machine type, for example `n1-standard-4`. - -|`platform.gcp.defaultMachinePlatform.osDisk.encryptionKey.kmsKey.name` -|The name of the customer managed encryption key to be used for machine disk encryption. -|The encryption key name. - -|`platform.gcp.defaultMachinePlatform.osDisk.encryptionKey.kmsKey.keyRing` -|The name of the Key Management Service (KMS) key ring to which the KMS key belongs. -|The KMS key ring name. - -|`platform.gcp.defaultMachinePlatform.osDisk.encryptionKey.kmsKey.location` -|The link:https://cloud.google.com/kms/docs/locations[GCP location] in which the KMS key ring exists. -|The GCP location. - -|`platform.gcp.defaultMachinePlatform.osDisk.encryptionKey.kmsKey.projectID` -|The ID of the project in which the KMS key ring exists. This value defaults to the value of the `platform.gcp.projectID` parameter if it is not set. -|The GCP project ID. - -|`platform.gcp.defaultMachinePlatform.osDisk.encryptionKey.kmsKeyServiceAccount` -|The GCP service account used for the encryption request for control plane and compute machines. If absent, the Compute Engine default service account is used. For more information about GCP service accounts, see Google's documentation on link:https://cloud.google.com/compute/docs/access/service-accounts#compute_engine_service_account[service accounts]. -|The GCP service account email, for example `<service_account_name>@<project_id>.iam.gserviceaccount.com`. - -|`platform.gcp.defaultMachinePlatform.secureBoot` -|Whether to enable Shielded VM secure boot for all machines in the cluster. Shielded VMs have additional security protocols such as secure boot, firmware and integrity monitoring, and rootkit protection. For more information on Shielded VMs, see Google's documentation on link:https://cloud.google.com/shielded-vm[Shielded VMs]. -|`Enabled` or `Disabled`. The default value is `Disabled`. - -|`platform.gcp.defaultMachinePlatform.confidentialCompute` -|Whether to use Confidential VMs for all machines in the cluster. Confidential VMs provide encryption for data during processing. For more information on Confidential computing, see Google's documentation on link:https://cloud.google.com/confidential-computing[Confidential computing]. -|`Enabled` or `Disabled`. The default value is `Disabled`. - -|`platform.gcp.defaultMachinePlatform.onHostMaintenance` -|Specifies the behavior of all VMs during a host maintenance event, such as a software or hardware update. For Confidential VMs, this parameter must be set to `Terminate`. Confidential VMs do not support live VM migration. -|`Terminate` or `Migrate`. The default value is `Migrate`. - -|`controlPlane.platform.gcp.osDisk.encryptionKey.kmsKey.name` -|The name of the customer managed encryption key to be used for control plane machine disk encryption. -|The encryption key name. - -|`controlPlane.platform.gcp.osDisk.encryptionKey.kmsKey.keyRing` -|For control plane machines, the name of the KMS key ring to which the KMS key belongs. -|The KMS key ring name. - -|`controlPlane.platform.gcp.osDisk.encryptionKey.kmsKey.location` -|For control plane machines, the GCP location in which the key ring exists. For more information about KMS locations, see Google's documentation on link:https://cloud.google.com/kms/docs/locations[Cloud KMS locations]. -|The GCP location for the key ring. - -|`controlPlane.platform.gcp.osDisk.encryptionKey.kmsKey.projectID` -|For control plane machines, the ID of the project in which the KMS key ring exists. This value defaults to the VM project ID if not set. -|The GCP project ID. - -|`controlPlane.platform.gcp.osDisk.encryptionKey.kmsKeyServiceAccount` -|The GCP service account used for the encryption request for control plane machines. If absent, the Compute Engine default service account is used. For more information about GCP service accounts, see Google's documentation on link:https://cloud.google.com/compute/docs/access/service-accounts#compute_engine_service_account[service accounts]. -|The GCP service account email, for example `<service_account_name>@<project_id>.iam.gserviceaccount.com`. - -|`controlPlane.platform.gcp.osDisk.diskSizeGB` -|The size of the disk in gigabytes (GB). This value applies to control plane machines. -|Any integer between 16 and 65536. - -|`controlPlane.platform.gcp.osDisk.diskType` -|The link:https://cloud.google.com/compute/docs/disks#disk-types[GCP disk type] for control plane machines. -|Control plane machines must use the `pd-ssd` disk type, which is the default. - -|`controlPlane.platform.gcp.tags` -|Optional. Additional network tags to add to the control plane machines. If set, this parameter overrides the `platform.gcp.defaultMachinePlatform.tags` parameter for control plane machines. -|One or more strings, for example `control-plane-tag1`. - -|`controlPlane.platform.gcp.type` -|The link:https://cloud.google.com/compute/docs/machine-types[GCP machine type] for control plane machines. If set, this parameter overrides the `platform.gcp.defaultMachinePlatform.type` parameter. -|The GCP machine type, for example `n1-standard-4`. - -|`controlPlane.platform.gcp.zones` -|The availability zones where the installation program creates control plane machines. -|A list of valid link:https://cloud.google.com/compute/docs/regions-zones#available[GCP availability zones], such as `us-central1-a`, in a -link:https://yaml.org/spec/1.2/spec.html#sequence//[YAML sequence]. - -|`controlPlane.platform.gcp.secureBoot` -|Whether to enable Shielded VM secure boot for control plane machines. Shielded VMs have additional security protocols such as secure boot, firmware and integrity monitoring, and rootkit protection. For more information on Shielded VMs, see Google's documentation on link:https://cloud.google.com/shielded-vm[Shielded VMs]. -|`Enabled` or `Disabled`. The default value is `Disabled`. - -|`controlPlane.platform.gcp.confidentialCompute` -|Whether to enable Confidential VMs for control plane machines. Confidential VMs provide encryption for data while it is being processed. For more information on Confidential VMs, see Google's documentation on link:https://cloud.google.com/confidential-computing[Confidential Computing]. -|`Enabled` or `Disabled`. The default value is `Disabled`. - -|`controlPlane.platform.gcp.onHostMaintenance` -|Specifies the behavior of control plane VMs during a host maintenance event, such as a software or hardware update. For Confidential VMs, this parameter must be set to `Terminate`. Confidential VMs do not support live VM migration. -|`Terminate` or `Migrate`. The default value is `Migrate`. - -|`compute.platform.gcp.osDisk.encryptionKey.kmsKey.name` -|The name of the customer managed encryption key to be used for compute machine disk encryption. -|The encryption key name. - -|`compute.platform.gcp.osDisk.encryptionKey.kmsKey.keyRing` -|For compute machines, the name of the KMS key ring to which the KMS key belongs. -|The KMS key ring name. - -|`compute.platform.gcp.osDisk.encryptionKey.kmsKey.location` -|For compute machines, the GCP location in which the key ring exists. For more information about KMS locations, see Google's documentation on link:https://cloud.google.com/kms/docs/locations[Cloud KMS locations]. -|The GCP location for the key ring. - -|`compute.platform.gcp.osDisk.encryptionKey.kmsKey.projectID` -|For compute machines, the ID of the project in which the KMS key ring exists. This value defaults to the VM project ID if not set. -|The GCP project ID. - -|`compute.platform.gcp.osDisk.encryptionKey.kmsKeyServiceAccount` -|The GCP service account used for the encryption request for compute machines. If this value is not set, the Compute Engine default service account is used. For more information about GCP service accounts, see Google's documentation on link:https://cloud.google.com/compute/docs/access/service-accounts#compute_engine_service_account[service accounts]. -|The GCP service account email, for example `<service_account_name>@<project_id>.iam.gserviceaccount.com`. - -|`compute.platform.gcp.osDisk.diskSizeGB` -|The size of the disk in gigabytes (GB). This value applies to compute machines. -|Any integer between 16 and 65536. - -|`compute.platform.gcp.osDisk.diskType` -|The link:https://cloud.google.com/compute/docs/disks#disk-types[GCP disk type] for compute machines. -|Either the default `pd-ssd` or the `pd-standard` disk type. - -|`compute.platform.gcp.tags` -|Optional. Additional network tags to add to the compute machines. If set, this parameter overrides the `platform.gcp.defaultMachinePlatform.tags` parameter for compute machines. -|One or more strings, for example `compute-network-tag1`. - -|`compute.platform.gcp.type` -|The link:https://cloud.google.com/compute/docs/machine-types[GCP machine type] for compute machines. If set, this parameter overrides the `platform.gcp.defaultMachinePlatform.type` parameter. -|The GCP machine type, for example `n1-standard-4`. - -|`compute.platform.gcp.zones` -|The availability zones where the installation program creates compute machines. -|A list of valid link:https://cloud.google.com/compute/docs/regions-zones#available[GCP availability zones], such as `us-central1-a`, in a -link:https://yaml.org/spec/1.2/spec.html#sequence//[YAML sequence]. - -|`compute.platform.gcp.secureBoot` -|Whether to enable Shielded VM secure boot for compute machines. Shielded VMs have additional security protocols such as secure boot, firmware and integrity monitoring, and rootkit protection. For more information on Shielded VMs, see Google's documentation on link:https://cloud.google.com/shielded-vm[Shielded VMs]. -|`Enabled` or `Disabled`. The default value is `Disabled`. - -|`compute.platform.gcp.confidentialCompute` -|Whether to enable Confidential VMs for compute machines. Confidential VMs provide encryption for data while it is being processed. For more information on Confidential VMs, see Google's documentation on link:https://cloud.google.com/confidential-computing[Confidential Computing]. -|`Enabled` or `Disabled`. The default value is `Disabled`. - -|`compute.platform.gcp.onHostMaintenance` -|Specifies the behavior of compute VMs during a host maintenance event, such as a software or hardware update. For Confidential VMs, this parameter must be set to `Terminate`. Confidential VMs do not support live VM migration. -|`Terminate` or `Migrate`. The default value is `Migrate`. - -|==== - -endif::gcp[] -ifdef::ibm-cloud[] -[id="installation-configuration-parameters-additional-ibm-cloud_{context}"] -== Additional IBM Cloud VPC configuration parameters - -Additional IBM Cloud VPC configuration parameters are described in the following table: - -.Additional IBM Cloud VPC parameters -[cols=".^1,.^6a,.^3a",options="header"] -|==== -|Parameter|Description|Values - -|`platform.ibmcloud.resourceGroupName` -|The name of an existing resource group. -By default, an installer-provisioned VPC and cluster resources are placed in this resource group. When not specified, the installation program creates the resource group for the cluster. -If you are deploying the cluster into an existing VPC, the installer-provisioned cluster resources are placed in this resource group. When not specified, the installation program creates the resource group for the cluster. The VPC resources that you have provisioned must exist in a resource group that you specify using the `networkResourceGroupName` parameter. -In either case, this resource group must only be used for a single cluster installation, as the cluster components assume ownership of all of the resources in the resource group. [^1^] -|String, for example `existing_resource_group`. - -|`platform.ibmcloud.networkResourceGroupName` -|The name of an existing resource group. This resource contains the existing VPC and subnets to which the cluster will be deployed. This parameter is required when deploying the cluster to a VPC that you have provisioned. -|String, for example `existing_network_resource_group`. - -|`platform.ibmcloud.dedicatedHosts.profile` -|The new dedicated host to create. If you specify a value for `platform.ibmcloud.dedicatedHosts.name`, this parameter is not required. -|Valid IBM Cloud VPC dedicated host profile, such as `cx2-host-152x304`. [^2^] - -|`platform.ibmcloud.dedicatedHosts.name` -|An existing dedicated host. If you specify a value for `platform.ibmcloud.dedicatedHosts.profile`, this parameter is not required. -|String, for example `my-dedicated-host-name`. - -|`platform.ibmcloud.type` -|The instance type for all IBM Cloud VPC machines. -|Valid IBM Cloud VPC instance type, such as `bx2-8x32`. [^2^] - -|`platform.ibmcloud.vpcName` -| The name of the existing VPC that you want to deploy your cluster to. -| String. - -|`platform.ibmcloud.controlPlaneSubnets` -| The name(s) of the existing subnet(s) in your VPC that you want to deploy your control plane machines to. Specify a subnet for each availability zone. -| String array - -|`platform.ibmcloud.computeSubnets` -| The name(s) of the existing subnet(s) in your VPC that you want to deploy your compute machines to. Specify a subnet for each availability zone. Subnet IDs are not supported. -| String array - -|==== -[.small] --- -1. Whether you define an existing resource group, or if the installer creates one, determines how the resource group is treated when the cluster is uninstalled. If you define a resource group, the installer removes all of the installer-provisioned resources, but leaves the resource group alone; if a resource group is created as part of the installation, the installer removes all of the installer-provisioned resources and the resource group. -2. To determine which profile best meets your needs, see https://cloud.ibm.com/docs/vpc?topic=vpc-profiles&interface=ui[Instance Profiles] in the IBM documentation. --- -endif::ibm-cloud[] - -ifdef::ibm-power-vs[] -[id="installation-configuration-parameters-additional-ibm-cloud_{context}"] -== Additional {ibmpowerProductName} Virtual Server configuration parameters - -Additional {ibmpowerProductName} Virtual Server configuration parameters are described in the following table: - -.Additional {ibmpowerProductName} Virtual Server parameters -[cols=".^1,.^6a,.^3a",options="header"] -|==== -|Parameter|Description|Values - -|`platform.powervs.userID` -|The UserID is the login for the user's IBM Cloud account. -|String. For example `existing_user_id`. - -|`platform.powervs.powervsResourceGroup` -|The PowerVSResourceGroup is the resource group in which {ibmpowerProductName} Virtual Server resources are created. If using an existing VPC, the existing VPC and subnets should be in this resource group. -|String. For example `existing_resource_group`. - -|`platform.powervs.region` -|Specifies the IBM Cloud colo region where the cluster will be created. -|String. For example `existing_region`. - -|`platform.powervs.zone` -|Specifies the IBM Cloud colo region where the cluster will be created. -|String. For example `existing_zone`. - -|`platform.powervs.serviceInstanceID` -|The ServiceInstanceID is the ID of the Power IAAS instance created from the IBM Cloud Catalog. -|String. For example `existing_service_instance_ID`. - -|`platform.powervs.vpcRegion` -|Specifies the IBM Cloud region in which to create VPC resources. -|String. For example `existing_vpc_region`. - -|`platform.powervs.vpcSubnets` -|Specifies existing subnets (by name) where cluster resources will be created. -|String. For example `powervs_region_example_subnet`. - -|`platform.powervs.vpcName` -|Specifies the IBM Cloud VPC name. -|String. For example `existing_vpcName`. - -|`platform.powervs.cloudConnectionName` -|The CloudConnctionName is the name of an existing PowerVS Cloud connection. -|String. For example `existing_cloudConnectionName`. - -|`platform.powervs.clusterOSImage` -|The ClusterOSImage is a pre-created {ibmpowerProductName} Virtual Server boot image that overrides the default image for cluster nodes. -|String. For example `existing_cluster_os_image`. - -|`platform.powervs.defaultMachinePlatform` -|The DefaultMachinePlatform is the default configuration used when installing on {ibmpowerProductName} Virtual Server for machine pools that do not define their own platform configuration. -|String. For example `existing_machine_platform`. - -//|`platform.ibmcloud.dedicatedHosts.profile` -//|The new dedicated host to create. If you specify a value for `platform.ibmcloud.dedicatedHosts.name`, this parameter is not required. -//|Valid IBM Cloud VPC dedicated host profile, such as `cx2-host-152x304`. [^2^] - -//|`platform.ibmcloud.dedicatedHosts.name` -//|An existing dedicated host. If you specify a value for `platform.ibmcloud.dedicatedHosts.profile`, this parameter is not required. -//|String, for example `my-dedicated-host-name`. - -//|`platform.ibmcloud.type` -//|The instance type for all IBM Cloud VPC machines. -//|Valid IBM Cloud VPC instance type, such as `bx2-8x32`. [^2^] - -|`platform.powervs.memoryGiB` -|The size of a virtual machine's memory, in GB. -|The valid integer must be an integer number of GB that is at least 2 and no more than 64, depending on the machine type. - -|`platform.powervs.procType` -|The ProcType defines the processor sharing model for the instance. -|The valid values are Capped, Dedicated and Shared. - -|`platform.powervs.processors` -|The Processors defines the processing units for the instance. -|The number of processors must be from .5 to 32 cores. The processors must be in increments of .25. - -|`platform.powervs.sysType` -|The SysType defines the system type for the instance. -|The system type must be one of {e980,s922}. - -|==== -[.small] --- -1. Whether you define an existing resource group, or if the installer creates one, determines how the resource group is treated when the cluster is uninstalled. If you define a resource group, the installer removes all of the installer-provisioned resources, but leaves the resource group alone; if a resource group is created as part of the installation, the installer removes all of the installer provisioned resources and the resource group. -2. To determine which profile best meets your needs, see https://cloud.ibm.com/docs/vpc?topic=vpc-profiles&interface=ui[Instance Profiles] in the IBM documentation. --- -endif::ibm-power-vs[] - -ifdef::rhv[] -[id="installation-configuration-parameters-additional-rhv_{context}"] -== Additional {rh-virtualization-first} configuration parameters - -Additional {rh-virtualization} configuration parameters are described in the following table: - -[id="additional-virt-parameters-for-clusters_{context}"] -.Additional {rh-virtualization-first} parameters for clusters -[cols=".^2,.^3a,.^3a",options="header"] -|==== -|Parameter|Description|Values - -|`platform.ovirt.ovirt_cluster_id` -|Required. The Cluster where the VMs will be created. -|String. For example: `68833f9f-e89c-4891-b768-e2ba0815b76b` - -|`platform.ovirt.ovirt_storage_domain_id` -|Required. The Storage Domain ID where the VM disks will be created. -|String. For example: `ed7b0f4e-0e96-492a-8fff-279213ee1468` - -|`platform.ovirt.ovirt_network_name` -|Required. The network name where the VM nics will be created. -|String. For example: `ocpcluster` - -|`platform.ovirt.vnicProfileID` -|Required. The vNIC profile ID of the VM network interfaces. This can be inferred if the cluster network has a single profile. -|String. For example: `3fa86930-0be5-4052-b667-b79f0a729692` - -|`platform.ovirt.api_vips` -|Required. An IP address on the machine network that will be assigned to the API virtual IP (VIP). You can access the OpenShift API at this endpoint. For dual-stack networks, assign up to two IP addresses. The primary IP address must be from the IPv4 network. - -[NOTE] -==== -In {product-title} 4.12 and later, the `api_vip` configuration setting is deprecated. Instead, use a list format to enter a value in the `api_vips` configuration setting. The order of the list indicates the primary and secondary VIP address for each service. -==== - -|String. Example: `10.46.8.230` - -|`platform.ovirt.ingress_vips` -|Required. An IP address on the machine network that will be assigned to the Ingress virtual IP (VIP). For dual-stack networks, assign up to two IP addresses. The primary IP address must be from the IPv4 network. - -[NOTE] -==== -In {product-title} 4.12 and later, the `ingress_vip` configuration setting is deprecated. Instead, use a list format to enter a value in the `ingress_vips` configuration setting. The order of the list indicates the primary and secondary VIP address for each service. -==== - -|String. Example: `10.46.8.232` - -|`platform.ovirt.affinityGroups` -|Optional. A list of affinity groups to create during the installation process. -|List of objects. - -|`platform.ovirt.affinityGroups.description` -|Required if you include `platform.ovirt.affinityGroups`. A description of the affinity group. -|String. Example: `AffinityGroup for spreading each compute machine to a different host` - -|`platform.ovirt.affinityGroups.enforcing` -|Required if you include `platform.ovirt.affinityGroups`. When set to `true`, {rh-virtualization} does not provision any machines if not enough hardware nodes are available. When set to `false`, {rh-virtualization} does provision machines even if not enough hardware nodes are available, resulting in multiple virtual machines being hosted on the same physical machine. - -|String. Example: `true` - -|`platform.ovirt.affinityGroups.name` -|Required if you include `platform.ovirt.affinityGroups`. The name of the affinity group. -|String. Example: `compute` - -|`platform.ovirt.affinityGroups.priority` -|Required if you include `platform.ovirt.affinityGroups`. The priority given to an affinity group when `platform.ovirt.affinityGroups.enforcing = false`. {rh-virtualization} applies affinity groups in the order of priority, where a greater number takes precedence over a lesser one. If multiple affinity groups have the same priority, the order in which they are applied is not guaranteed. -|Integer. Example: `3` -|==== - -[id="installation-configuration-parameters-additional-machine_{context}"] -== Additional {rh-virtualization} parameters for machine pools - -Additional {rh-virtualization} configuration parameters for machine pools are described in the following table: - -.Additional {rh-virtualization} parameters for machine pools -[cols=".^2,.^3a,.^3a",options="header"] -|==== -|Parameter|Description|Values - -|`<machine-pool>.platform.ovirt.cpu` -|Optional. Defines the CPU of the VM. -|Object - -|`<machine-pool>.platform.ovirt.cpu.cores` -|Required if you use `<machine-pool>.platform.ovirt.cpu`. The number of cores. Total virtual CPUs (vCPUs) is cores * sockets. -|Integer - -|`<machine-pool>.platform.ovirt.cpu.sockets` -|Required if you use `<machine-pool>.platform.ovirt.cpu`. The number of sockets per core. Total virtual CPUs (vCPUs) is cores * sockets. -|Integer - -|`<machine-pool>.platform.ovirt.memoryMB` -|Optional. Memory of the VM in MiB. -|Integer - -|`<machine-pool>.platform.ovirt.osDisk` -|Optional. Defines the first and bootable disk of the VM. -|String - -|`<machine-pool>.platform.ovirt.osDisk.sizeGB` -|Required if you use `<machine-pool>.platform.ovirt.osDisk`. Size of the disk in GiB. -|Number - -|`<machine-pool>.platform.ovirt.vmType` -|Optional. The VM workload type, such as `high-performance`, `server`, or `desktop`. By default, control plane nodes use `high-performance`, and worker nodes use `server`. For details, see link:https://access.redhat.com/documentation/en-us/red_hat_virtualization/4.4/html-single/virtual_machine_management_guide/index#Virtual_Machine_General_settings_explained[Explanation of Settings in the New Virtual Machine and Edit Virtual Machine Windows] and link:https://access.redhat.com/documentation/en-us/red_hat_virtualization/4.4/html-single/virtual_machine_management_guide/index#Configuring_High_Performance_Virtual_Machines_Templates_and_Pools[Configuring High Performance Virtual Machines, Templates, and Pools] in the _Virtual Machine Management Guide_. -[NOTE] -==== -`high_performance` improves performance on the VM, but there are limitations. For example, you cannot access the VM with a graphical console. For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_virtualization/4.4/html-single/virtual_machine_management_guide/index#Configuring_High_Performance_Virtual_Machines_Templates_and_Pools[Configuring High Performance Virtual Machines, Templates, and Pools] in the _Virtual Machine Management Guide_. -==== -|String - -|`<machine-pool>.platform.ovirt.affinityGroupsNames` -|Optional. A list of affinity group names that should be applied to the virtual machines. The affinity groups must exist in {rh-virtualization}, or be created during installation as described in _Additional {rh-virtualization} parameters for clusters_ in this topic. This entry can be empty. -// xref:../../installing/installing_rhv/installing-rhv-customizations.adoc#additional-virt-parameters-for-clusters[Additional {rh-virtualization} parameters for clusters]. This entry can be empty. -//xref:../../additional-virt-parameters-for-clusters[Additional {rh-virtualization} parameters for clusters]. This entry can be empty. - -.Example with two affinity groups - -This example defines two affinity groups, named `compute` and `clusterWideNonEnforcing`: - -[source,yaml] ----- -<machine-pool>: - platform: - ovirt: - affinityGroupNames: - - compute - - clusterWideNonEnforcing ----- - -This example defines no affinity groups: - -[source,yaml] ----- -<machine-pool>: - platform: - ovirt: - affinityGroupNames: [] ----- -|String -|`<machine-pool>.platform.ovirt.AutoPinningPolicy` -| Optional. AutoPinningPolicy defines the policy to automatically set the CPU and NUMA settings, including pinning to the host for the instance. When the field is omitted, the default is `none`. Supported values: `none`, `resize_and_pin`. For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_virtualization/4.4/html-single/virtual_machine_management_guide/index#Setting_NUMA_Nodes[Setting NUMA Nodes] in the _Virtual Machine Management Guide_. - -|String -|`<machine-pool>.platform.ovirt.hugepages` -|Optional. Hugepages is the size in KiB for defining hugepages in a VM. Supported values: `2048` or `1048576`. For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_virtualization/4.4/html-single/virtual_machine_management_guide/index#Configuring_Huge_Pages[Configuring Huge Pages] in the _Virtual Machine Management Guide_. - -|Integer - -|==== - -[NOTE] -==== -You can replace `<machine-pool>` with `controlPlane` or `compute`. -==== - -endif::rhv[] - -ifdef::vsphere[] -[id="installation-configuration-parameters-additional-vsphere_{context}"] -== Additional VMware vSphere configuration parameters - -Additional VMware vSphere configuration parameters are described in the following table: - -.Additional VMware vSphere cluster parameters -[cols=".^2,.^3a,.^3a",options="header"] -|==== -|Parameter|Description|Values - -|`platform.vsphere.apiVIPs` -|Virtual IP (VIP) addresses that you configured for control plane API access. -a|Multiple IP addresses - -|`platform.vsphere.diskType` -|Optional. The disk provisioning method. This value defaults to the vSphere default storage policy if not set. -|Valid values are `thin`, `thick`, or `eagerZeroedThick`. - -|`platform.vsphere.failureDomains` -|Establishes the relationships between a region and zone. You define a failure domain by using vCenter objects, such as a `datastore` object. A failure domain defines the vCenter location for {product-title} cluster nodes. -|String - -|`platform.vsphere.failureDomains.topology.networks` -|Lists any network in the vCenter instance that contains the virtual IP addresses and DNS records that you configured. -|String - -|`platform.vsphere.failureDomains.region` -|If you define multiple failure domains for your cluster, you must attach the tag to each vCenter datacenter. To define a region, use a tag from the `openshift-region` tag category. For a single vSphere datacenter environment, you do not need to attach a tag, but you must enter an alphanumeric value, such as `datacenter`, for the parameter. -|String - -|`platform.vsphere.failureDomains.zone` -|If you define multiple failure domains for your cluster, you must attach the tag to each vCenter cluster. To define a zone, use a tag from the `openshift-zone` tag category. For a single vSphere datacenter environment, you do not need to attach a tag, but you must enter an alphanumeric value, such as `cluster`, for the parameter. -|`String` - -|`platform.vsphere.ingressVIPs` -|Virtual IP (VIP) addresses that you configured for cluster Ingress. -|Multiple IP addresses - -|`platform.vsphere` -| Describes your account on the cloud platform that hosts your cluster. You can use the parameter to customize the platform. When providing additional configuration settings for compute and control plane machines in the machine pool, the parameter is optional. You can only specify one vCenter server for your {product-title} cluster. -|String - -|`platform.vsphere.vcenters` -|Lists any fully-qualified hostname or IP address of a vCenter server. -|String - -|`platform.vsphere.vcenters.datacenters` -|Lists and defines the datacenters where {product-title} virtual machines (VMs) operate. The list of datacenters must match the list of datacenters specified in the `failureDomains` field. -|String -|==== - - -[id="deprecated-parameters-vsphere_{context}"] -== Deprecated VMware vSphere configuration parameters - -In {product-title} 4.13, the following vSphere configuration parameters are deprecated. You can continue to use these parameters, but the installation program does not automatically specify these parameters in the `install-config.yaml` file. - -The following table lists each deprecated vSphere configuration parameter: - -.Deprecated VMware vSphere cluster parameters -[cols=".^2,.^3a,.^3a",options="header"] -|==== -|Parameter|Description|Values - -|`platform.vsphere.apiVIP` -|The virtual IP (VIP) address that you configured for control plane API access. -a|An IP address, for example `128.0.0.1`. - -[NOTE] -==== -In {product-title} 4.12 and later, the `apiVIP` configuration setting is deprecated. Instead, use a `List` format to enter a value in the `apiVIPs` configuration setting. -==== - -|`platform.vsphere.cluster` -|The vCenter cluster to install the {product-title} cluster in. -|String - -|`platform.vsphere.datacenter` -|Defines the datacenter where {product-title} virtual machines (VMs) operate. -|String - -|`platform.vsphere.defaultDatastore` -|The name of the default datastore to use for provisioning volumes. -|String - -|`platform.vsphere.folder` -|Optional. The absolute path of an existing folder where the installation program creates the virtual machines. If you do not provide this value, the installation program creates a folder that is named with the infrastructure ID in the data center virtual machine folder. -|String, for example, `/<datacenter_name>/vm/<folder_name>/<subfolder_name>`. - -|`platform.vsphere.ingressVIP` -|Virtual IP (VIP) addresses that you configured for cluster Ingress. -a|An IP address, for example `128.0.0.1`. - -[NOTE] -==== -In {product-title} 4.12 and later, the `ingressVIP` configuration setting is deprecated. Instead, use a `List` format to enter a value in the `ingressVIPs` configuration setting. -==== - -|`platform.vsphere.network` -|The network in the vCenter instance that contains the virtual IP addresses and DNS records that you configured. -|String - -|`platform.vsphere.password` -|The password for the vCenter user name. -|String - -|`platform.vsphere.resourcePool` -|Optional. The absolute path of an existing resource pool where the installation program creates the virtual machines. If you do not specify a value, the installation program installs the resources in the root of the cluster under `/<datacenter_name>/host/<cluster_name>/Resources`. -|String, for example, `/<datacenter_name>/host/<cluster_name>/Resources/<resource_pool_name>/<optional_nested_resource_pool_name>`. - -|`platform.vsphere.username` -|The user name to use to connect to the vCenter instance with. This user must have at least -the roles and privileges that are required for -link:https://github.com/vmware-archive/vsphere-storage-for-kubernetes/blob/master/documentation/vcp-roles.md[static or dynamic persistent volume provisioning] -in vSphere. -|String - -|`platform.vsphere.vCenter` -|The fully-qualified hostname or IP address of a vCenter server. -|String -|==== - - -[id="installation-configuration-parameters-optional-vsphere_{context}"] -== Optional VMware vSphere machine pool configuration parameters - -Optional VMware vSphere machine pool configuration parameters are described in the following table: - -.Optional VMware vSphere machine pool parameters -[cols=".^2,.^3a,.^3a",options="header"] -|==== -|Parameter|Description|Values - -|`platform.vsphere.clusterOSImage` -|The location from which the installation program downloads the {op-system} image. You must set this parameter to perform an installation in a restricted network. -|An HTTP or HTTPS URL, optionally with a SHA-256 checksum. For example, `\https://mirror.openshift.com/images/rhcos-<version>-vmware.<architecture>.ova`. - -|`platform.vsphere.osDisk.diskSizeGB` -|The size of the disk in gigabytes. -|Integer - -|`platform.vsphere.cpus` -|The total number of virtual processor cores to assign a virtual machine. The value of `platform.vsphere.cpus` must be a multiple of `platform.vsphere.coresPerSocket` value. -|Integer - -|`platform.vsphere.coresPerSocket` -|The number of cores per socket in a virtual machine. The number of virtual sockets on the virtual machine is `platform.vsphere.cpus`/`platform.vsphere.coresPerSocket`. The default value for control plane nodes and worker nodes is `4` and `2`, respectively. -|Integer - -|`platform.vsphere.memoryMB` -|The size of a virtual machine's memory in megabytes. -|Integer -|==== -endif::vsphere[] - -ifdef::ash[] -[id="installation-configuration-parameters-additional-azure-stack-hub_{context}"] -== Additional Azure Stack Hub configuration parameters - -Additional Azure configuration parameters are described in the following table: - -.Additional Azure Stack Hub parameters -[cols=".^2,.^3a,.^3a",options="header"] -|==== -|Parameter|Description|Values - -|`compute.platform.azure.osDisk.diskSizeGB` -|The Azure disk size for the VM. -|Integer that represents the size of the disk in GB. The default is `128`. - -|`compute.platform.azure.osDisk.diskType` -|Defines the type of disk. -|`standard_LRS`, `premium_LRS`, or `standardSSD_LRS`. The default is `premium_LRS`. - -|`compute.platform.azure.type` -|Defines the azure instance type for compute machines. -|String - -|`controlPlane.platform.azure.osDisk.diskSizeGB` -|The Azure disk size for the VM. -|Integer that represents the size of the disk in GB. The default is `1024`. - -|`controlPlane.platform.azure.osDisk.diskType` -|Defines the type of disk. -|`premium_LRS` or `standardSSD_LRS`. The default is `premium_LRS`. - -|`controlPlane.platform.azure.type` -|Defines the azure instance type for control plane machines. -|String - -|`platform.azure.defaultMachinePlatform.osDisk.diskSizeGB` -|The Azure disk size for the VM. -|Integer that represents the size of the disk in GB. The default is `128`. - -|`platform.azure.defaultMachinePlatform.osDisk.diskType` -|Defines the type of disk. -|`standard_LRS`, `premium_LRS`, or `standardSSD_LRS`. The default is `premium_LRS`. - -|`platform.azure.defaultMachinePlatform.type` -|The Azure instance type for control plane and compute machines. -|The Azure instance type. - -|`platform.azure.armEndpoint` -|The URL of the Azure Resource Manager endpoint that your Azure Stack Hub operator provides. -|String - -|`platform.azure.baseDomainResourceGroupName` -|The name of the resource group that contains the DNS zone for your base domain. -|String, for example `production_cluster`. - -|`platform.azure.region` -|The name of your Azure Stack Hub local region. -|String - -|`platform.azure.resourceGroupName` -|The name of an already existing resource group to install your cluster to. This resource group must be empty and only used for this specific cluster; the cluster components assume ownership of all resources in the resource group. If you limit the service principal scope of the installation program to this resource group, you must ensure all other resources used by the installation program in your environment have the necessary permissions, such as the public DNS zone and virtual network. Destroying the cluster by using the installation program deletes this resource group. -|String, for example `existing_resource_group`. - -|`platform.azure.outboundType` -|The outbound routing strategy used to connect your cluster to the internet. If -you are using user-defined routing, you must have pre-existing networking -available where the outbound routing has already been configured prior to -installing a cluster. The installation program is not responsible for -configuring user-defined routing. -|`LoadBalancer` or `UserDefinedRouting`. The default is `LoadBalancer`. - -|`platform.azure.cloudName` -|The name of the Azure cloud environment that is used to configure the Azure SDK with the appropriate Azure API endpoints. -|`AzureStackCloud` - -|`clusterOSImage` -|The URL of a storage blob in the Azure Stack environment that contains an {op-system} VHD. -|String, for example, \https://vhdsa.blob.example.example.com/vhd/rhcos-410.84.202112040202-0-azurestack.x86_64.vhd - -|==== -endif::ash[] - -ifdef::alibabacloud[] -//From: https://github.com/openshift/installer/blob/master/data/data/install.openshift.io_installconfigs.yaml#L20; https://github.com/openshift/openshift-docs/pull/40651/files#r792388476 - -[id="installation-configuration-parameters-additional-alibaba_{context}"] -== Additional Alibaba Cloud configuration parameters - -Additional Alibaba Cloud configuration parameters are described in the following table. The `alibabacloud` parameters are the configuration used when installing on Alibaba Cloud. The `defaultMachinePlatform` parameters are the default configuration used when installing on Alibaba Cloud for machine pools that do not define their own platform configuration. - -These parameters apply to both compute machines and control plane machines where specified. - -[NOTE] -==== -If defined, the parameters `compute.platform.alibabacloud` and `controlPlane.platform.alibabacloud` will overwrite `platform.alibabacloud.defaultMachinePlatform` settings for compute machines and control plane machines respectively. -==== - -.Optional {alibaba} parameters -[cols=".^2,.^3,.^5a",options="header"] -|==== -|Parameter|Description|Values - -|`compute.platform.alibabacloud.imageID` -|The imageID used to create the ECS instance. ImageID must belong to the same region as the cluster. -|String. - -|`compute.platform.alibabacloud.instanceType` -|InstanceType defines the ECS instance type. Example: `ecs.g6.large` -|String. - -|`compute.platform.alibabacloud.systemDiskCategory` -|Defines the category of the system disk. Examples: `cloud_efficiency`,`cloud_essd` -|String. - -|`compute.platform.alibabacloud.systemDisksize` -|Defines the size of the system disk in gibibytes (GiB). -|Integer. - -|`compute.platform.alibabacloud.zones` -|The list of availability zones that can be used. Examples: `cn-hangzhou-h`, `cn-hangzhou-j` -|String list. - -|`controlPlane.platform.alibabacloud.imageID` -|The imageID used to create the ECS instance. ImageID must belong to the same region as the cluster. -|String. - -|`controlPlane.platform.alibabacloud.instanceType` -|InstanceType defines the ECS instance type. Example: `ecs.g6.xlarge` -|String. - -|`controlPlane.platform.alibabacloud.systemDiskCategory` -|Defines the category of the system disk. Examples: `cloud_efficiency`,`cloud_essd` -|String. - -|`controlPlane.platform.alibabacloud.systemDisksize` -|Defines the size of the system disk in gibibytes (GiB). -|Integer. - -|`controlPlane.platform.alibabacloud.zones` -|The list of availability zones that can be used. Examples: `cn-hangzhou-h`, `cn-hangzhou-j` -|String list. - -|`platform.alibabacloud.region` -|Required. The Alibaba Cloud region where the cluster will be created. -|String. - -|`platform.alibabacloud.resourceGroupID` -|The ID of an already existing resource group where the cluster will be installed. If empty, the installation program will create a new resource group for the cluster. -|String. - -|`platform.alibabacloud.tags` -|Additional keys and values to apply to all Alibaba Cloud resources created for the cluster. -|Object. - -|`platform.alibabacloud.vpcID` -|The ID of an already existing VPC where the cluster should be installed. If empty, the installation program will create a new VPC for the cluster. -|String. - -|`platform.alibabacloud.vswitchIDs` -|The ID list of already existing VSwitches where cluster resources will be created. The existing VSwitches can only be used when also using existing VPC. If empty, the installation program will create new VSwitches for the cluster. -|String list. - -|`platform.alibabacloud.defaultMachinePlatform.imageID` -|For both compute machines and control plane machines, the image ID that should be used to create ECS instance. If set, the image ID should belong to the same region as the cluster. -|String. - -|`platform.alibabacloud.defaultMachinePlatform.instanceType` -|For both compute machines and control plane machines, the ECS instance type used to create the ECS instance. Example: `ecs.g6.xlarge` -|String. - -|`platform.alibabacloud.defaultMachinePlatform.systemDiskCategory` -|For both compute machines and control plane machines, the category of the system disk. Examples: `cloud_efficiency`, `cloud_essd`. -|String, for example "", `cloud_efficiency`, `cloud_essd`. - -|`platform.alibabacloud.defaultMachinePlatform.systemDiskSize` -|For both compute machines and control plane machines, the size of the system disk in gibibytes (GiB). The minimum is `120`. -|Integer. - -|`platform.alibabacloud.defaultMachinePlatform.zones` -|For both compute machines and control plane machines, the list of availability zones that can be used. Examples: `cn-hangzhou-h`, `cn-hangzhou-j` -|String list. - -|`platform.alibabacloud.privateZoneID` -|The ID of an existing private zone into which to add DNS records for the cluster's internal API. An existing private zone can only be used when also using existing VPC. The private zone must be associated with the VPC containing the subnets. Leave the private zone unset to have the installation program create the private zone on your behalf. -|String. - -|==== - -endif::alibabacloud[] - -ifdef::nutanix[] -[id="installation-configuration-parameters-additional-vsphere_{context}"] -== Additional Nutanix configuration parameters - -Additional Nutanix configuration parameters are described in the following table: - -.Additional Nutanix cluster parameters -[cols=".^2,.^3a,.^3a",options="header"] -|==== -|Parameter|Description|Values - -|`compute.platform.nutanix.categories.key` -|The name of a prism category key to apply to compute VMs. This parameter must be accompanied by the `value` parameter, and both `key` and `value` parameters must exist in Prism Central. For more information on categories, see link:https://portal.nutanix.com/page/documents/details?targetId=Prism-Central-Guide-vpc_2022_6:ssp-ssp-categories-manage-pc-c.html[Category management]. -|String - -|`compute.platform.nutanix.categories.value` -|The value of a prism category key-value pair to apply to compute VMs. This parameter must be accompanied by the `key` parameter, and both `key` and `value` parameters must exist in Prism Central. -|String - -|`compute.platform.nutanix.project.type` -|The type of identifier you use to select a project for compute VMs. Projects define logical groups of user roles for managing permissions, networks, and other parameters. For more information on projects, see link:https://portal.nutanix.com/page/documents/details?targetId=Prism-Central-Guide-vpc_2022_9:ssp-app-mgmt-project-env-c.html[Projects Overview]. -|`name` or `uuid` - -|`compute.platform.nutanix.project.name` or `compute.platform.nutanix.project.uuid` -|The name or UUID of a project with which compute VMs are associated. This parameter must be accompanied by the `type` parameter. -|String - -|`compute.platform.nutanix.bootType` -|The boot type that the compute machines use. You must use the `Legacy` boot type in {product-title} {product-version}. For more information on boot types, see link:https://portal.nutanix.com/page/documents/kbs/details?targetId=kA07V000000H3K9SAK[Understanding UEFI, Secure Boot, and TPM in the Virtualized Environment]. -|`Legacy`, `SecureBoot` or `UEFI`. The default is `Legacy`. - -|`controlPlane.platform.nutanix.categories.key` -|The name of a prism category key to apply to control plane VMs. This parameter must be accompanied by the `value` parameter, and both `key` and `value` parameters must exist in Prism Central. For more information on categories, see link:https://portal.nutanix.com/page/documents/details?targetId=Prism-Central-Guide-vpc_2022_6:ssp-ssp-categories-manage-pc-c.html[Category management]. -|String - -|`controlPlane.platform.nutanix.categories.value` -|The value of a prism category key-value pair to apply to control plane VMs. This parameter must be accompanied by the `key` parameter, and both `key` and `value` parameters must exist in Prism Central. -|String - -|`controlPlane.platform.nutanix.project.type` -|The type of identifier you use to select a project for control plane VMs. Projects define logical groups of user roles for managing permissions, networks, and other parameters. For more information on projects, see link:https://portal.nutanix.com/page/documents/details?targetId=Prism-Central-Guide-vpc_2022_9:ssp-app-mgmt-project-env-c.html[Projects Overview]. -|`name` or `uuid` - -|`controlPlane.platform.nutanix.project.name` or `controlPlane.platform.nutanix.project.uuid` -|The name or UUID of a project with which control plane VMs are associated. This parameter must be accompanied by the `type` parameter. -|String - -|`platform.nutanix.defaultMachinePlatform.categories.key` -|The name of a prism category key to apply to all VMs. This parameter must be accompanied by the `value` parameter, and both `key` and `value` parameters must exist in Prism Central. For more information on categories, see link:https://portal.nutanix.com/page/documents/details?targetId=Prism-Central-Guide-vpc_2022_6:ssp-ssp-categories-manage-pc-c.html[Category management]. -|String - -|`platform.nutanix.defaultMachinePlatform.categories.value` -|The value of a prism category key-value pair to apply to all VMs. This parameter must be accompanied by the `key` parameter, and both `key` and `value` parameters must exist in Prism Central. -|String - -|`platform.nutanix.defaultMachinePlatform.project.type` -|The type of identifier you use to select a project for all VMs. Projects define logical groups of user roles for managing permissions, networks, and other parameters. For more information on projects, see link:https://portal.nutanix.com/page/documents/details?targetId=Prism-Central-Guide-vpc_2022_9:ssp-app-mgmt-project-env-c.html[Projects Overview]. -|`name` or `uuid`. - -|`platform.nutanix.defaultMachinePlatform.project.name` or `platform.nutanix.defaultMachinePlatform.project.uuid` -|The name or UUID of a project with which all VMs are associated. This parameter must be accompanied by the `type` parameter. -|String - -|`platform.nutanix.defaultMachinePlatform.bootType` -|The boot type for all machines. You must use the `Legacy` boot type in {product-title} {product-version}. For more information on boot types, see link:https://portal.nutanix.com/page/documents/kbs/details?targetId=kA07V000000H3K9SAK[Understanding UEFI, Secure Boot, and TPM in the Virtualized Environment]. -|`Legacy`, `SecureBoot` or `UEFI`. The default is `Legacy`. - -|`platform.nutanix.apiVIP` -|The virtual IP (VIP) address that you configured for control plane API access. -|IP address - -|`platform.nutanix.ingressVIP` -|The virtual IP (VIP) address that you configured for cluster ingress. -|IP address - -|`platform.nutanix.prismCentral.endpoint.address` -|The Prism Central domain name or IP address. -|String - -|`platform.nutanix.prismCentral.endpoint.port` -|The port that is used to log into Prism Central. -|String - -|`platform.nutanix.prismCentral.password` -|The password for the Prism Central user name. -|String - -|`platform.nutanix.prismCentral.username` -|The user name that is used to log into Prism Central. -|String - -|`platform.nutanix.prismElments.endpoint.address` -|The Prism Element domain name or IP address. [^1^] -|String - -|`platform.nutanix.prismElments.endpoint.port` -|The port that is used to log into Prism Element. -|String - -|`platform.nutanix.prismElements.uuid` -|The universally unique identifier (UUID) for Prism Element. -|String - -|`platform.nutanix.subnetUUIDs` -|The UUID of the Prism Element network that contains the virtual IP addresses and DNS records that you configured. [^2^] -|String - -|`platform.nutanix.clusterOSImage` -|Optional: By default, the installation program downloads and installs the {op-system-first} image. If Prism Central does not have internet access, you can override the default behavior by hosting the {op-system} image on any HTTP server and pointing the installation program to the image. -|An HTTP or HTTPS URL, optionally with a SHA-256 checksum. For example, \http://example.com/images/rhcos-47.83.202103221318-0-nutanix.x86_64.qcow2 -|==== -[.small] --- -1. The `prismElements` section holds a list of Prism Elements (clusters). A Prism Element encompasses all of the Nutanix resources, for example virtual machines and subnets, that are used to host the {product-title} cluster. Only a single Prism Element is supported. -2. Only one subnet per {product-title} cluster is supported. --- -endif::nutanix[] - -ifdef::bare[] -:!bare: -endif::bare[] -ifeval::["{context}" == "installing-alibaba-customizations"] -:!alibabacloud: -endif::[] -ifeval::["{context}" == "installing-alibaba-vpc"] -:!alibabacloud: -endif::[] -ifeval::["{context}" == "installing-aws-customizations"] -:!aws: -endif::[] -ifeval::["{context}" == "installing-aws-government-region"] -:!aws-govcloud: -endif::[] -ifeval::["{context}" == "installing-aws-secret-region"] -:!aws-secret: -endif::[] -ifeval::["{context}" == "installing-aws-network-customizations"] -:!aws: -endif::[] -ifeval::["{context}" == "installing-aws-private"] -:!aws: -endif::[] -ifeval::["{context}" == "installing-aws-vpc"] -:!aws: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-aws-installer-provisioned"] -:!aws: -endif::[] -ifeval::["{context}" == "installing-aws-outposts-remote-workers"] -:!aws: -endif::[] -ifeval::["{context}" == "installing-azure-customizations"] -:!azure: -endif::[] -ifeval::["{context}" == "installing-azure-government-region"] -:!azure: -endif::[] -ifeval::["{context}" == "installing-azure-network-customizations"] -:!azure: -endif::[] -ifeval::["{context}" == "installing-azure-private"] -:!azure: -endif::[] -ifeval::["{context}" == "installing-azure-vnet"] -:!azure: -endif::[] -ifeval::["{context}" == "installing-gcp-customizations"] -:!gcp: -endif::[] -ifeval::["{context}" == "installing-gcp-private"] -:!gcp: -endif::[] -ifeval::["{context}" == "installing-gcp-network-customizations"] -:!gcp: -endif::[] -ifeval::["{context}" == "installing-gcp-vpc"] -:!gcp: -endif::[] -ifeval::["{context}" == "installing-gcp-shared-vpc"] -:!gcp: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-gcp-installer-provisioned"] -:!gcp: -endif::[] -ifeval::["{context}" == "installing-aws-customizations"] -:!aws: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-customizations"] -:!ibm-cloud: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-network-customizations"] -:!ibm-cloud: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-vpc"] -:!ibm-cloud: -:!ibm-cloud-vpc: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-private"] -:!ibm-cloud: -:!ibm-cloud-vpc: -endif::[] -ifeval::["{context}" == "installing-openstack-installer-custom"] -:!osp: -:!osp-custom: -endif::[] -ifeval::["{context}" == "installing-openstack-installer-kuryr"] -:!osp: -:!osp-kuryr: -endif::[] -ifeval::["{context}" == "installing-openstack-user"] -:!osp: -:!osp-custom: -endif::[] -ifeval::["{context}" == "installing-openstack-user-kuryr"] -:!osp: -:!osp-kuryr: -endif::[] -ifeval::["{context}" == "installing-openstack-user-sr-iov"] -:!osp: -:!osp-custom: -endif::[] -ifeval::["{context}" == "installing-openstack-user-sr-iov-kuryr"] -:!osp: -:!osp-kuryr: -endif::[] -ifeval::["{context}" == "installing-rhv-customizations"] -:!rhv: -endif::[] -ifeval::["{context}" == "installing-openstack-installer-restricted"] -:!osp: -:!osp-custom: -endif::[] -ifeval::["{context}" == "installing-ibm-z"] -:!ibm-z: -endif::[] -ifeval::["{context}" == "installing-ibm-z-kvm"] -:!ibm-z: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z"] -:!ibm-z: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z-kvm"] -:!ibm-z: -endif::[] -ifeval::["{context}" == "installing-ibm-power"] -:!ibm-power: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power"] -:!ibm-power: -endif::[] -ifeval::["{context}" == "installing-ibm-power-vs-customizations"] -:!ibm-power-vs: -endif::[] -ifeval::["{context}" == "installing-ibm-power-vs-private-cluster"] -:!ibm-power-vs: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power-vs"] -:!ibm-power-vs: -endif::[] -ifeval::["{context}" == "installing-ibm-powervs-vpc"] -:!ibm-power-vs: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-default"] -:!ash: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-network-customizations"] -:!ash: -endif::[] -ifeval::["{context}" == "installing-nutanix-installer-provisioned"] -:!nutanix: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-nutanix-installer-provisioned"] -:!nutanix: -endif::[] -ifeval::["{context}" == "installation-config-parameters-vsphere"] -:!vsphere: -endif::[] -:!platform: diff --git a/modules/installation-configure-proxy.adoc b/modules/installation-configure-proxy.adoc deleted file mode 100644 index 71a1838b8a9b..000000000000 --- a/modules/installation-configure-proxy.adoc +++ /dev/null @@ -1,303 +0,0 @@ -// Module included in the following assemblies: -// -// installing/installing_alibaba/installing-alibaba-network-customizations.adoc -// * installing/installing_aws/installing_aws-customizations.adoc -// * installing/installing_aws/installing_aws-network-customizations.adoc -// * installing/installing_aws/installing_aws-private.adoc -// * installing/installing_aws/installing_aws-vpc.adoc -// * installing/installing_aws/installing_aws-china.adoc -// * installing/installing_aws/installing-aws-secret-region.adoc -// * installing/installing_aws/installing-aws-user-infra.adoc -// * installing/installing_aws/installing-aws-government-region.adoc -// * installing/installing_aws/installing-restricted-networks-aws-installer-provisioned.adoc -// * installing/installing_aws/installing-restricted-networks-aws.adoc -// * installing/installing_azure/installing-azure-customizations.adoc -// * installing/installing_azure/installing-azure-network-customizations.adoc -// * installing/installing_azure/installing-azure-government-region.adoc -// * installing/installing_azure/installing-azure-private.adoc -// * installing/installing_azure/installing-azure-vnet.adoc -// * installing/installing_azure/installing-azure-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc -// * installing/installing_gcp/installing-gcp-customizations.adoc -// * installing/installing_gcp/installing-gcp-network-customizations.adoc -// * installing/installing_gcp/installing-gcp-private.adoc -// * installing/installing_gcp/installing-gcp-vpc.adoc -// * installing/installing_gcp/installing-gcp-user-infra.adoc -// * installing/installing_gcp/installing-gcp-user-infra-vpc.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp-installer-provisioned.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-customizations.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-network-customizations.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-vpc.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-private.adoc -// * installing/installing_bare_metal/installing-bare-metal.adoc -// * installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc -// * installing/installing_openstack/installing-openstack-installer-custom.adoc -// * installing/installing_openstack/installing-openstack-installer-kuryr.adoc -// * installing/installing_openstack/installing-openstack-installer-sr-iov.adoc -// * installing/installing_openstack/installing-openstack-installer-restricted.adoc -// * installing/installing_vsphere/installing-restricted-networks-vsphere.adoc -// * installing/installing_vsphere/installing-vsphere.adoc -// * installing/installing_vsphere/installing-vsphere-network-customizations.adoc -// * installing/installing_vsphere/installing-vsphere-installer-provisioned-customizations.adoc -// * installing/installing_vsphere/installing-vsphere-installer-provisioned-network-customizations.adoc -// * installing/installing_vsphere/installing-restricted-networks-installer-provisioned-vsphere.adoc -// * installing/installing_ibm_z/installing-ibm-z.adoc -// * installing/installing_ibm_z/installing-restricted-networks-ibm-z.adoc -// * installing/installing_ibm_z/installing-ibm-z-kvm.adoc -// * installing/installing_ibm_z/installing-restricted-networks-ibm-z-kvm.adoc -// * installing/installing_ibm_power/installing-ibm-power.adoc -// * installing/installing_ibm_power/installing-restricted-networks-ibm-power.adoc -// * installing/installing_ibm_powervs/installing-ibm-power-vs-customizations.adoc -// * installing/installing_ibm_powervs/installing-ibm-power-vs-private-cluster.adoc -// * installing/installing_ibm_powervs/installing-restricted-networks-ibm-power-vs.adoc -// * installing/installing_ibm_powervs/installing-ibm-powervs-vpc.adoc -// * installing/installing_platform_agnostic/installing-platform-agnostic.adoc -// * networking/configuring-a-custom-pki.adoc -// * installing/installing-rhv-restricted-network.adoc -// * installing/installing-nutanix-installer-provisioned.adoc -// * installing/installing-restricted-networks-nutanix-installer-provisioned.adoc - -ifeval::["{context}" == "installing-aws-china-region"] -:aws: -:aws-china: -endif::[] -ifeval::["{context}" == "installing-aws-customizations"] -:aws: -endif::[] -ifeval::["{context}" == "installing-aws-network-customizations"] -:aws: -endif::[] -ifeval::["{context}" == "installing-aws-private"] -:aws: -endif::[] -ifeval::["{context}" == "installing-aws-vpc"] -:aws: -endif::[] -ifeval::["{context}" == "installing-aws-user-infra"] -:aws: -endif::[] -ifeval::["{context}" == "installing-aws-government-region"] -:aws: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-aws-installer-provisioned"] -:aws: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-aws"] -:aws: -endif::[] -ifeval::["{context}" == "installing-aws-secret-region"] -:aws: -endif::[] -ifeval::["{context}" == "installing-bare-metal"] -:bare-metal: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-bare-metal"] -:bare-metal: -endif::[] -ifeval::["{context}" == "installing-vsphere"] -:vsphere: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-vsphere"] -:vsphere: -endif::[] -ifeval::["{context}" == "installing-vsphere-network-customizations"] -:vsphere: -endif::[] -ifeval::["{context}" == "installing-vsphere-installer-provisioned-customizations"] -:vsphere: -endif::[] -ifeval::["{context}" == "installing-vsphere-installer-provisioned-network-customizations"] -:vsphere: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-installer-provisioned-vsphere"] -:vsphere: -endif::[] -ifeval::["{context}" == "installing-openstack-installer-kuryr"] -:kuryr: -endif::[] -ifeval::["{context}" == "installing-openstack-installer-restricted"] -:kuryr: -endif::[] - -:_content-type: PROCEDURE -[id="installation-configure-proxy_{context}"] -= Configuring the cluster-wide proxy during installation - -Production environments can deny direct access to the internet and instead have -an HTTP or HTTPS proxy available. You can configure a new {product-title} -cluster to use a proxy by configuring the proxy settings in the -`install-config.yaml` file. - -ifdef::bare-metal[] -[NOTE] -==== -For bare metal installations, if you do not assign node IP addresses from the -range that is specified in the `networking.machineNetwork[].cidr` field in the -`install-config.yaml` file, you must include them in the `proxy.noProxy` field. -==== -endif::bare-metal[] - -ifdef::kuryr[] -[NOTE] -==== -Kuryr installations default to HTTP proxies. -==== -endif::kuryr[] - -.Prerequisites - -ifdef::kuryr[] - -* For Kuryr installations on restricted networks that use the `Proxy` object, the proxy must be able to reply to the router that the cluster uses. To add a static route for the proxy configuration, from a command line as the root user, enter: -+ -[source,terminal] ----- -$ ip route add <cluster_network_cidr> via <installer_subnet_gateway> ----- - -* The restricted subnet must have a gateway that is defined and available to be linked to the `Router` resource that Kuryr creates. - -endif::kuryr[] -* You have an existing `install-config.yaml` file. -// TODO: xref (../../installing/install_config/configuring-firewall.adoc#configuring-firewall) -* You reviewed the sites that your cluster requires access to and determined whether any of them need to bypass the proxy. By default, all cluster egress traffic is proxied, including calls to hosting cloud provider APIs. You added sites to the `Proxy` object's `spec.noProxy` field to bypass the proxy if necessary. -+ -[NOTE] -==== -The `Proxy` object `status.noProxy` field is populated with the values of the `networking.machineNetwork[].cidr`, `networking.clusterNetwork[].cidr`, and `networking.serviceNetwork[]` fields from your installation configuration. - -For installations on Amazon Web Services (AWS), Google Cloud Platform (GCP), Microsoft Azure, and {rh-openstack-first}, the `Proxy` object `status.noProxy` field is also populated with the instance metadata endpoint (`169.254.169.254`). -==== - -.Procedure - -. Edit your `install-config.yaml` file and add the proxy settings. For example: -+ -[source,yaml] ----- -apiVersion: v1 -baseDomain: my.domain.com -proxy: - httpProxy: http://<username>:<pswd>@<ip>:<port> <1> - httpsProxy: https://<username>:<pswd>@<ip>:<port> <2> -ifndef::aws[] - noProxy: example.com <3> -endif::aws[] -ifdef::aws[] - noProxy: ec2.<aws_region>.amazonaws.com,elasticloadbalancing.<aws_region>.amazonaws.com,s3.<aws_region>.amazonaws.com <3> -endif::aws[] -additionalTrustBundle: | <4> - -----BEGIN CERTIFICATE----- - <MY_TRUSTED_CA_CERT> - -----END CERTIFICATE----- -additionalTrustBundlePolicy: <policy_to_add_additionalTrustBundle> <5> ----- -<1> A proxy URL to use for creating HTTP connections outside the cluster. The -URL scheme must be `http`. -<2> A proxy URL to use for creating HTTPS connections outside the cluster. -<3> A comma-separated list of destination domain names, IP addresses, or other network CIDRs to exclude from proxying. Preface a domain with `.` to match subdomains only. For example, `.y.com` matches `x.y.com`, but not `y.com`. Use `*` to bypass the proxy for all destinations. -ifdef::aws[] -If you have added the Amazon `EC2`,`Elastic Load Balancing`, and `S3` VPC endpoints to your VPC, you must add these endpoints to the `noProxy` field. -endif::aws[] -ifdef::vsphere[] -You must include vCenter's IP address and the IP range that you use for its machines. -endif::vsphere[] -<4> If provided, the installation program generates a config map that is named `user-ca-bundle` in -the `openshift-config` namespace to hold the additional CA -certificates. If you provide `additionalTrustBundle` and at least one proxy setting, the `Proxy` object is configured to reference the `user-ca-bundle` config map in the `trustedCA` field. The Cluster Network -Operator then creates a `trusted-ca-bundle` config map that merges the contents specified for the `trustedCA` parameter -with the {op-system} trust bundle. The `additionalTrustBundle` field is required unless -the proxy's identity certificate is signed by an authority from the {op-system} trust -bundle. -<5> Optional: The policy to determine the configuration of the `Proxy` object to reference the `user-ca-bundle` config map in the `trustedCA` field. The allowed values are `Proxyonly` and `Always`. Use `Proxyonly` to reference the `user-ca-bundle` config map only when `http/https` proxy is configured. Use `Always` to always reference the `user-ca-bundle` config map. The default value is `Proxyonly`. -+ -[NOTE] -==== -The installation program does not support the proxy `readinessEndpoints` field. -==== -+ -[NOTE] -==== -If the installer times out, restart and then complete the deployment by using the `wait-for` command of the installer. For example: - -[source,terminal] ----- -$ ./openshift-install wait-for install-complete --log-level debug ----- -==== - -. Save the file and reference it when installing {product-title}. - -The installation program creates a cluster-wide proxy that is named `cluster` that uses the proxy -settings in the provided `install-config.yaml` file. If no proxy settings are -provided, a `cluster` `Proxy` object is still created, but it will have a nil -`spec`. - -[NOTE] -==== -Only the `Proxy` object named `cluster` is supported, and no additional -proxies can be created. -==== - -ifeval::["{context}" == "installing-aws-china-region"] -:!aws: -:!aws-china: -endif::[] -ifeval::["{context}" == "installing-aws-customizations"] -:!aws: -endif::[] -ifeval::["{context}" == "installing-aws-network-customizations"] -:!aws: -endif::[] -ifeval::["{context}" == "installing-aws-private"] -:!aws: -endif::[] -ifeval::["{context}" == "installing-aws-vpc"] -:!aws: -endif::[] -ifeval::["{context}" == "installing-aws-user-infra"] -:!aws: -endif::[] -ifeval::["{context}" == "installing-aws-government-region"] -:!aws: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-aws-installer-provisioned"] -:!aws: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-aws"] -:!aws: -endif::[] -ifeval::["{context}" == "installing-aws-secret-region"] -:!aws: -endif::[] -ifeval::["{context}" == "installing-bare-metal"] -:!bare-metal: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-bare-metal"] -:!bare-metal: -endif::[] -ifeval::["{context}" == "installing-vsphere"] -:!vsphere: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-vsphere"] -:!vsphere: -endif::[] -ifeval::["{context}" == "installing-vsphere-network-customizations"] -:!vsphere: -endif::[] -ifeval::["{context}" == "installing-vsphere-installer-provisioned-customizations"] -:!vsphere: -endif::[] -ifeval::["{context}" == "installing-vsphere-installer-provisioned-network-customizations"] -:!vsphere: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-installer-provisioned-vsphere"] -:!vsphere: -endif::[] -ifeval::["{context}" == "installing-openstack-installer-kuryr"] -:!kuryr: -endif::[] -ifeval::["{context}" == "installing-openstack-installer-restricted"] -:!kuryr: -endif::[] diff --git a/modules/installation-create-ingress-dns-records.adoc b/modules/installation-create-ingress-dns-records.adoc deleted file mode 100644 index 552e814f512e..000000000000 --- a/modules/installation-create-ingress-dns-records.adoc +++ /dev/null @@ -1,142 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-user-infra.adoc -// * installing/installing_aws/installing-restricted-networks-aws.adoc - -:_content-type: PROCEDURE -[id="installation-create-ingress-dns-records_{context}"] -= Creating the Ingress DNS Records - -If you removed the DNS Zone configuration, manually create DNS records that point to the Ingress load balancer. -You can create either a wildcard record or specific records. While the following procedure uses A records, you can use other record types that you require, such as CNAME or alias. - -.Prerequisites - -* You deployed an {product-title} cluster on Amazon Web Services (AWS) that uses infrastructure that you provisioned. -* You installed the OpenShift CLI (`oc`). -* You installed the `jq` package. -* You downloaded the AWS CLI and installed it on your computer. See -link:https://docs.aws.amazon.com/cli/latest/userguide/install-bundle.html[Install the AWS CLI Using the Bundled Installer (Linux, macOS, or Unix)]. - -.Procedure - -. Determine the routes to create. -** To create a wildcard record, use `*.apps.<cluster_name>.<domain_name>`, where `<cluster_name>` is your cluster name, and `<domain_name>` is the Route 53 base domain for your {product-title} cluster. -** To create specific records, you must create a record for each route that your cluster uses, as shown in the output of the following command: -+ -[source,terminal] ----- -$ oc get --all-namespaces -o jsonpath='{range .items[*]}{range .status.ingress[*]}{.host}{"\n"}{end}{end}' routes ----- -+ -.Example output -[source,terminal] ----- -oauth-openshift.apps.<cluster_name>.<domain_name> -console-openshift-console.apps.<cluster_name>.<domain_name> -downloads-openshift-console.apps.<cluster_name>.<domain_name> -alertmanager-main-openshift-monitoring.apps.<cluster_name>.<domain_name> -prometheus-k8s-openshift-monitoring.apps.<cluster_name>.<domain_name> ----- - -. Retrieve the Ingress Operator load balancer status and note the value of the external IP address that it uses, which is shown in the `EXTERNAL-IP` column: -+ -[source,terminal] ----- -$ oc -n openshift-ingress get service router-default ----- -+ -.Example output -[source,terminal] ----- -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -router-default LoadBalancer 172.30.62.215 ab3...28.us-east-2.elb.amazonaws.com 80:31499/TCP,443:30693/TCP 5m ----- - -. Locate the hosted zone ID for the load balancer: -+ -[source,terminal] ----- -$ aws elb describe-load-balancers | jq -r '.LoadBalancerDescriptions[] | select(.DNSName == "<external_ip>").CanonicalHostedZoneNameID' <1> ----- -<1> For `<external_ip>`, specify the value of the external IP address of the Ingress Operator load balancer that you obtained. -+ -.Example output -[source,terminal] ----- -Z3AADJGX6KTTL2 ----- - -+ -The output of this command is the load balancer hosted zone ID. - -. Obtain the public hosted zone ID for your cluster's domain: -+ -[source,terminal] ----- -$ aws route53 list-hosted-zones-by-name \ - --dns-name "<domain_name>" \ <1> - --query 'HostedZones[? Config.PrivateZone != `true` && Name == `<domain_name>.`].Id' <1> - --output text ----- -<1> For `<domain_name>`, specify the Route 53 base domain for your {product-title} cluster. -+ -.Example output -[source,terminal] ----- -/hostedzone/Z3URY6TWQ91KVV ----- -+ -The public hosted zone ID for your domain is shown in the command output. In this example, it is `Z3URY6TWQ91KVV`. - -. Add the alias records to your private zone: -+ -[source,terminal] ----- -$ aws route53 change-resource-record-sets --hosted-zone-id "<private_hosted_zone_id>" --change-batch '{ <1> -> "Changes": [ -> { -> "Action": "CREATE", -> "ResourceRecordSet": { -> "Name": "\\052.apps.<cluster_domain>", <2> -> "Type": "A", -> "AliasTarget":{ -> "HostedZoneId": "<hosted_zone_id>", <3> -> "DNSName": "<external_ip>.", <4> -> "EvaluateTargetHealth": false -> } -> } -> } -> ] -> }' ----- -<1> For `<private_hosted_zone_id>`, specify the value from the output of the CloudFormation template for DNS and load balancing. -<2> For `<cluster_domain>`, specify the domain or subdomain that you use with your {product-title} cluster. -<3> For `<hosted_zone_id>`, specify the public hosted zone ID for the load balancer that you obtained. -<4> For `<external_ip>`, specify the value of the external IP address of the Ingress Operator load balancer. Ensure that you include the trailing period (`.`) in this parameter value. - -. Add the records to your public zone: -+ -[source,terminal] ----- -$ aws route53 change-resource-record-sets --hosted-zone-id "<public_hosted_zone_id>"" --change-batch '{ <1> -> "Changes": [ -> { -> "Action": "CREATE", -> "ResourceRecordSet": { -> "Name": "\\052.apps.<cluster_domain>", <2> -> "Type": "A", -> "AliasTarget":{ -> "HostedZoneId": "<hosted_zone_id>", <3> -> "DNSName": "<external_ip>.", <4> -> "EvaluateTargetHealth": false -> } -> } -> } -> ] -> }' ----- -<1> For `<public_hosted_zone_id>`, specify the public hosted zone for your domain. -<2> For `<cluster_domain>`, specify the domain or subdomain that you use with your {product-title} cluster. -<3> For `<hosted_zone_id>`, specify the public hosted zone ID for the load balancer that you obtained. -<4> For `<external_ip>`, specify the value of the external IP address of the Ingress Operator load balancer. Ensure that you include the trailing period (`.`) in this parameter value. diff --git a/modules/installation-creating-aws-bootstrap.adoc b/modules/installation-creating-aws-bootstrap.adoc deleted file mode 100644 index d6765d2a70af..000000000000 --- a/modules/installation-creating-aws-bootstrap.adoc +++ /dev/null @@ -1,217 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-user-infra.adoc -// * installing/installing_aws/installing-restricted-networks-aws.adoc - -:_content-type: PROCEDURE -[id="installation-creating-aws-bootstrap_{context}"] -= Creating the bootstrap node in AWS - -You must create the bootstrap node in Amazon Web Services (AWS) to use during {product-title} cluster initialization. You do this by: - -* Providing a location to serve the `bootstrap.ign` Ignition config file to your cluster. This file is located in your installation directory. The provided CloudFormation Template assumes that the Ignition config files for your cluster are served from an S3 bucket. If you choose to serve the files from another location, you must modify the templates. -* Using the provided CloudFormation template and a custom parameter file to create a stack of AWS resources. The stack represents the bootstrap node that your {product-title} installation requires. - -[NOTE] -==== -If you do not use the provided CloudFormation template to create your bootstrap -node, you must review the provided information and manually create -the infrastructure. If your cluster does not initialize correctly, you might -have to contact Red Hat support with your installation logs. -==== - -.Prerequisites - -* You configured an AWS account. -* You added your AWS keys and region to your local AWS profile by running `aws configure`. -* You generated the Ignition config files for your cluster. -* You created and configured a VPC and associated subnets in AWS. -* You created and configured DNS, load balancers, and listeners in AWS. -* You created the security groups and roles required for your cluster in AWS. - -.Procedure - -. Create the bucket by running the following command: -+ -[source,terminal] ----- -$ aws s3 mb s3://<cluster-name>-infra <1> ----- -<1> `<cluster-name>-infra` is the bucket name. When creating the `install-config.yaml` file, replace `<cluster-name>` with the name specified for the cluster. -+ -You must use a presigned URL for your S3 bucket, instead of the `s3://` schema, if you are: -** Deploying to a region that has endpoints that differ from the AWS SDK. -** Deploying a proxy. -** Providing your own custom endpoints. - -. Upload the `bootstrap.ign` Ignition config file to the bucket by running the following command: -+ -[source,terminal] ----- -$ aws s3 cp <installation_directory>/bootstrap.ign s3://<cluster-name>-infra/bootstrap.ign <1> ----- -<1> For `<installation_directory>`, specify the path to the directory that you stored the installation files in. - -. Verify that the file uploaded by running the following command: -+ -[source,terminal] ----- -$ aws s3 ls s3://<cluster-name>-infra/ ----- -+ -.Example output -[source,terminal] ----- -2019-04-03 16:15:16 314878 bootstrap.ign ----- -+ -[NOTE] -==== -The bootstrap Ignition config file does contain secrets, like X.509 keys. The following steps provide basic security for the S3 bucket. To provide additional security, you can enable an S3 bucket policy to allow only certain users, such as the OpenShift IAM user, to access objects that the bucket contains. You can avoid S3 entirely and serve your bootstrap Ignition config file from any address that the bootstrap machine can reach. -==== - -. Create a JSON file that contains the parameter values that the template requires: -+ -[source,json] ----- -[ - { - "ParameterKey": "InfrastructureName", <1> - "ParameterValue": "mycluster-<random_string>" <2> - }, - { - "ParameterKey": "RhcosAmi", <3> - "ParameterValue": "ami-<random_string>" <4> - }, - { - "ParameterKey": "AllowedBootstrapSshCidr", <5> - "ParameterValue": "0.0.0.0/0" <6> - }, - { - "ParameterKey": "PublicSubnet", <7> - "ParameterValue": "subnet-<random_string>" <8> - }, - { - "ParameterKey": "MasterSecurityGroupId", <9> - "ParameterValue": "sg-<random_string>" <10> - }, - { - "ParameterKey": "VpcId", <11> - "ParameterValue": "vpc-<random_string>" <12> - }, - { - "ParameterKey": "BootstrapIgnitionLocation", <13> - "ParameterValue": "s3://<bucket_name>/bootstrap.ign" <14> - }, - { - "ParameterKey": "AutoRegisterELB", <15> - "ParameterValue": "yes" <16> - }, - { - "ParameterKey": "RegisterNlbIpTargetsLambdaArn", <17> - "ParameterValue": "arn:aws:lambda:<aws_region>:<account_number>:function:<dns_stack_name>-RegisterNlbIpTargets-<random_string>" <18> - }, - { - "ParameterKey": "ExternalApiTargetGroupArn", <19> - "ParameterValue": "arn:aws:elasticloadbalancing:<aws_region>:<account_number>:targetgroup/<dns_stack_name>-Exter-<random_string>" <20> - }, - { - "ParameterKey": "InternalApiTargetGroupArn", <21> - "ParameterValue": "arn:aws:elasticloadbalancing:<aws_region>:<account_number>:targetgroup/<dns_stack_name>-Inter-<random_string>" <22> - }, - { - "ParameterKey": "InternalServiceTargetGroupArn", <23> - "ParameterValue": "arn:aws:elasticloadbalancing:<aws_region>:<account_number>:targetgroup/<dns_stack_name>-Inter-<random_string>" <24> - } -] - ----- -<1> The name for your cluster infrastructure that is encoded in your Ignition -config files for the cluster. -<2> Specify the infrastructure name that you extracted from the Ignition config -file metadata, which has the format `<cluster-name>-<random-string>`. -<3> Current {op-system-first} AMI to use for the bootstrap node based on your selected architecture. -<4> Specify a valid `AWS::EC2::Image::Id` value. -<5> CIDR block to allow SSH access to the bootstrap node. -<6> Specify a CIDR block in the format `x.x.x.x/16-24`. -<7> The public subnet that is associated with your VPC to launch the bootstrap -node into. -<8> Specify the `PublicSubnetIds` value from the output of the CloudFormation -template for the VPC. -<9> The master security group ID (for registering temporary rules) -<10> Specify the `MasterSecurityGroupId` value from the output of the -CloudFormation template for the security group and roles. -<11> The VPC created resources will belong to. -<12> Specify the `VpcId` value from the output of the CloudFormation template -for the VPC. -<13> Location to fetch bootstrap Ignition config file from. -<14> Specify the S3 bucket and file name in the form -`s3://<bucket_name>/bootstrap.ign`. -<15> Whether or not to register a network load balancer (NLB). -<16> Specify `yes` or `no`. If you specify `yes`, you must provide a Lambda -Amazon Resource Name (ARN) value. -<17> The ARN for NLB IP target registration lambda group. -<18> Specify the `RegisterNlbIpTargetsLambda` value from the output of the -CloudFormation template for DNS and load balancing. Use `arn:aws-us-gov` if -deploying the cluster to an AWS GovCloud region. -<19> The ARN for external API load balancer target group. -<20> Specify the `ExternalApiTargetGroupArn` value from the output of the -CloudFormation template for DNS and load balancing. Use `arn:aws-us-gov` if -deploying the cluster to an AWS GovCloud region. -<21> The ARN for internal API load balancer target group. -<22> Specify the `InternalApiTargetGroupArn` value from the output of the -CloudFormation template for DNS and load balancing. Use `arn:aws-us-gov` if -deploying the cluster to an AWS GovCloud region. -<23> The ARN for internal service load balancer target group. -<24> Specify the `InternalServiceTargetGroupArn` value from the output of the -CloudFormation template for DNS and load balancing. Use `arn:aws-us-gov` if -deploying the cluster to an AWS GovCloud region. - -. Copy the template from the *CloudFormation template for the bootstrap machine* -section of this topic and save it as a YAML file on your computer. This template -describes the bootstrap machine that your cluster requires. - -. Optional: If you are deploying the cluster with a proxy, you must update the ignition in the template to add the `ignition.config.proxy` fields. Additionally, If you have added the Amazon EC2, Elastic Load Balancing, and S3 VPC endpoints to your VPC, you must add these endpoints to the `noProxy` field. - -. Launch the CloudFormation template to create a stack of AWS resources that represent the bootstrap node: -+ -[IMPORTANT] -==== -You must enter the command on a single line. -==== -+ -[source,terminal] ----- -$ aws cloudformation create-stack --stack-name <name> <1> - --template-body file://<template>.yaml <2> - --parameters file://<parameters>.json <3> - --capabilities CAPABILITY_NAMED_IAM <4> ----- -<1> `<name>` is the name for the CloudFormation stack, such as `cluster-bootstrap`. -You need the name of this stack if you remove the cluster. -<2> `<template>` is the relative path to and name of the CloudFormation template -YAML file that you saved. -<3> `<parameters>` is the relative path to and name of the CloudFormation -parameters JSON file. -<4> You must explicitly declare the `CAPABILITY_NAMED_IAM` capability because the provided template creates some `AWS::IAM::Role` and `AWS::IAM::InstanceProfile` resources. -+ -.Example output -[source,terminal] ----- -arn:aws:cloudformation:us-east-1:269333783861:stack/cluster-bootstrap/12944486-2add-11eb-9dee-12dace8e3a83 ----- - -. Confirm that the template components exist: -+ -[source,terminal] ----- -$ aws cloudformation describe-stacks --stack-name <name> ----- -+ -After the `StackStatus` displays `CREATE_COMPLETE`, the output displays values -for the following parameters. You must provide these parameter values to -the other CloudFormation templates that you run to create your cluster: -[horizontal] -`BootstrapInstanceId`:: The bootstrap Instance ID. -`BootstrapPublicIp`:: The bootstrap node public IP address. -`BootstrapPrivateIp`:: The bootstrap node private IP address. diff --git a/modules/installation-creating-aws-control-plane.adoc b/modules/installation-creating-aws-control-plane.adoc deleted file mode 100644 index ed6e2b23df4d..000000000000 --- a/modules/installation-creating-aws-control-plane.adoc +++ /dev/null @@ -1,222 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-user-infra.adoc -// * installing/installing_aws/installing-restricted-networks-aws.adoc - -:_content-type: PROCEDURE -[id="installation-creating-aws-control-plane_{context}"] -= Creating the control plane machines in AWS - -You must create the control plane machines in Amazon Web Services (AWS) that your cluster will use. - -You can use the provided CloudFormation template and a custom parameter file to create a stack of AWS resources that represent the control plane nodes. - -[IMPORTANT] -==== -The CloudFormation template creates a stack that represents three control plane nodes. -==== - -[NOTE] -==== -If you do not use the provided CloudFormation template to create your control plane -nodes, you must review the provided information and manually create -the infrastructure. If your cluster does not initialize correctly, you might -have to contact Red Hat support with your installation logs. -==== - -.Prerequisites - -* You configured an AWS account. -* You added your AWS keys and region to your local AWS profile by running `aws configure`. -* You generated the Ignition config files for your cluster. -* You created and configured a VPC and associated subnets in AWS. -* You created and configured DNS, load balancers, and listeners in AWS. -* You created the security groups and roles required for your cluster in AWS. -* You created the bootstrap machine. - -.Procedure - -. Create a JSON file that contains the parameter values that the template -requires: -+ -[source,json] ----- -[ - { - "ParameterKey": "InfrastructureName", <1> - "ParameterValue": "mycluster-<random_string>" <2> - }, - { - "ParameterKey": "RhcosAmi", <3> - "ParameterValue": "ami-<random_string>" <4> - }, - { - "ParameterKey": "AutoRegisterDNS", <5> - "ParameterValue": "yes" <6> - }, - { - "ParameterKey": "PrivateHostedZoneId", <7> - "ParameterValue": "<random_string>" <8> - }, - { - "ParameterKey": "PrivateHostedZoneName", <9> - "ParameterValue": "mycluster.example.com" <10> - }, - { - "ParameterKey": "Master0Subnet", <11> - "ParameterValue": "subnet-<random_string>" <12> - }, - { - "ParameterKey": "Master1Subnet", <11> - "ParameterValue": "subnet-<random_string>" <12> - }, - { - "ParameterKey": "Master2Subnet", <11> - "ParameterValue": "subnet-<random_string>" <12> - }, - { - "ParameterKey": "MasterSecurityGroupId", <13> - "ParameterValue": "sg-<random_string>" <14> - }, - { - "ParameterKey": "IgnitionLocation", <15> - "ParameterValue": "https://api-int.<cluster_name>.<domain_name>:22623/config/master" <16> - }, - { - "ParameterKey": "CertificateAuthorities", <17> - "ParameterValue": "data:text/plain;charset=utf-8;base64,ABC...xYz==" <18> - }, - { - "ParameterKey": "MasterInstanceProfileName", <19> - "ParameterValue": "<roles_stack>-MasterInstanceProfile-<random_string>" <20> - }, - { - "ParameterKey": "MasterInstanceType", <21> - "ParameterValue": "" <22> - }, - { - "ParameterKey": "AutoRegisterELB", <23> - "ParameterValue": "yes" <24> - }, - { - "ParameterKey": "RegisterNlbIpTargetsLambdaArn", <25> - "ParameterValue": "arn:aws:lambda:<aws_region>:<account_number>:function:<dns_stack_name>-RegisterNlbIpTargets-<random_string>" <26> - }, - { - "ParameterKey": "ExternalApiTargetGroupArn", <27> - "ParameterValue": "arn:aws:elasticloadbalancing:<aws_region>:<account_number>:targetgroup/<dns_stack_name>-Exter-<random_string>" <28> - }, - { - "ParameterKey": "InternalApiTargetGroupArn", <29> - "ParameterValue": "arn:aws:elasticloadbalancing:<aws_region>:<account_number>:targetgroup/<dns_stack_name>-Inter-<random_string>" <30> - }, - { - "ParameterKey": "InternalServiceTargetGroupArn", <31> - "ParameterValue": "arn:aws:elasticloadbalancing:<aws_region>:<account_number>:targetgroup/<dns_stack_name>-Inter-<random_string>" <32> - } -] ----- -<1> The name for your cluster infrastructure that is encoded in your Ignition -config files for the cluster. -<2> Specify the infrastructure name that you extracted from the Ignition config -file metadata, which has the format `<cluster-name>-<random-string>`. -<3> Current {op-system-first} AMI to use for the control plane machines based on your selected architecture. -<4> Specify an `AWS::EC2::Image::Id` value. -<5> Whether or not to perform DNS etcd registration. -<6> Specify `yes` or `no`. If you specify `yes`, you must provide hosted zone -information. -<7> The Route 53 private zone ID to register the etcd targets with. -<8> Specify the `PrivateHostedZoneId` value from the output of the -CloudFormation template for DNS and load balancing. -<9> The Route 53 zone to register the targets with. -<10> Specify `<cluster_name>.<domain_name>` where `<domain_name>` is the Route 53 -base domain that you used when you generated `install-config.yaml` file for the -cluster. Do not include the trailing period (.) that is -displayed in the AWS console. -<11> A subnet, preferably private, to launch the control plane machines on. -<12> Specify a subnet from the `PrivateSubnets` value from the output of the -CloudFormation template for DNS and load balancing. -<13> The master security group ID to associate with control plane nodes. -<14> Specify the `MasterSecurityGroupId` value from the output of the -CloudFormation template for the security group and roles. -<15> The location to fetch control plane Ignition config file from. -<16> Specify the generated Ignition config file location, -`https://api-int.<cluster_name>.<domain_name>:22623/config/master`. -<17> The base64 encoded certificate authority string to use. -<18> Specify the value from the `master.ign` file that is in the installation -directory. This value is the long string with the format -`data:text/plain;charset=utf-8;base64,ABC...xYz==`. -<19> The IAM profile to associate with control plane nodes. -<20> Specify the `MasterInstanceProfile` parameter value from the output of -the CloudFormation template for the security group and roles. -<21> The type of AWS instance to use for the control plane machines based on your selected architecture. -<22> The instance type value corresponds to the minimum resource requirements for -control plane machines. For example `m6i.xlarge` is a type for AMD64 -ifndef::openshift-origin[] -and `m6g.xlarge` is a type for ARM64. -endif::openshift-origin[] -<23> Whether or not to register a network load balancer (NLB). -<24> Specify `yes` or `no`. If you specify `yes`, you must provide a Lambda -Amazon Resource Name (ARN) value. -<25> The ARN for NLB IP target registration lambda group. -<26> Specify the `RegisterNlbIpTargetsLambda` value from the output of the CloudFormation template for DNS -and load balancing. Use `arn:aws-us-gov` if deploying the cluster to an AWS -GovCloud region. -<27> The ARN for external API load balancer target group. -<28> Specify the `ExternalApiTargetGroupArn` value from the output of the CloudFormation template for DNS -and load balancing. Use `arn:aws-us-gov` if deploying the cluster to an AWS -GovCloud region. -<29> The ARN for internal API load balancer target group. -<30> Specify the `InternalApiTargetGroupArn` value from the output of the CloudFormation template for DNS -and load balancing. Use `arn:aws-us-gov` if deploying the cluster to an AWS -GovCloud region. -<31> The ARN for internal service load balancer target group. -<32> Specify the `InternalServiceTargetGroupArn` value from the output of the CloudFormation template for DNS -and load balancing. Use `arn:aws-us-gov` if deploying the cluster to an AWS -GovCloud region. - -. Copy the template from the *CloudFormation template for control plane machines* -section of this topic and save it as a YAML file on your computer. This template -describes the control plane machines that your cluster requires. - -. If you specified an `m5` instance type as the value for `MasterInstanceType`, -add that instance type to the `MasterInstanceType.AllowedValues` parameter -in the CloudFormation template. - -. Launch the CloudFormation template to create a stack of AWS resources that represent the control plane nodes: -+ -[IMPORTANT] -==== -You must enter the command on a single line. -==== -+ -[source,terminal] ----- -$ aws cloudformation create-stack --stack-name <name> <1> - --template-body file://<template>.yaml <2> - --parameters file://<parameters>.json <3> ----- -<1> `<name>` is the name for the CloudFormation stack, such as `cluster-control-plane`. -You need the name of this stack if you remove the cluster. -<2> `<template>` is the relative path to and name of the CloudFormation template -YAML file that you saved. -<3> `<parameters>` is the relative path to and name of the CloudFormation -parameters JSON file. -+ -.Example output -[source,terminal] ----- -arn:aws:cloudformation:us-east-1:269333783861:stack/cluster-control-plane/21c7e2b0-2ee2-11eb-c6f6-0aa34627df4b ----- -+ -[NOTE] -==== -The CloudFormation template creates a stack that represents three control plane nodes. -==== - -. Confirm that the template components exist: -+ -[source,terminal] ----- -$ aws cloudformation describe-stacks --stack-name <name> ----- diff --git a/modules/installation-creating-aws-dns.adoc b/modules/installation-creating-aws-dns.adoc deleted file mode 100644 index 3c1efca6f7f3..000000000000 --- a/modules/installation-creating-aws-dns.adoc +++ /dev/null @@ -1,168 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-user-infra.adoc -// * installing/installing_aws/installing-restricted-networks-aws.adoc - -:_content-type: PROCEDURE -[id="installation-creating-aws-dns_{context}"] -= Creating networking and load balancing components in AWS - -You must configure networking and classic or network load balancing in Amazon Web Services (AWS) that your {product-title} cluster can use. - -You can use the provided CloudFormation template and a custom parameter file to create a stack of AWS resources. The stack represents the networking and load balancing components that your {product-title} cluster requires. The template also creates a hosted zone and subnet tags. - -You can run the template multiple times within a single Virtual Private Cloud (VPC). - -[NOTE] -==== -If you do not use the provided CloudFormation template to create your AWS -infrastructure, you must review the provided information and manually create -the infrastructure. If your cluster does not initialize correctly, you might -have to contact Red Hat support with your installation logs. -==== - -.Prerequisites - -* You configured an AWS account. -* You added your AWS keys and region to your local AWS profile by running `aws configure`. -* You generated the Ignition config files for your cluster. -* You created and configured a VPC and associated subnets in AWS. - -.Procedure - -. Obtain the hosted zone ID for the Route 53 base domain that you specified in the -`install-config.yaml` file for your cluster. You can obtain details about your hosted zone by running the following command: -+ -[source,terminal] ----- -$ aws route53 list-hosted-zones-by-name --dns-name <route53_domain> <1> ----- -<1> For the `<route53_domain>`, specify the Route 53 base domain that you used -when you generated the `install-config.yaml` file for the cluster. -+ -.Example output -[source,terminal] ----- -mycluster.example.com. False 100 -HOSTEDZONES 65F8F38E-2268-B835-E15C-AB55336FCBFA /hostedzone/Z21IXYZABCZ2A4 mycluster.example.com. 10 ----- -+ -In the example output, the hosted zone ID is `Z21IXYZABCZ2A4`. - -. Create a JSON file that contains the parameter values that the template -requires: -+ -[source,json] ----- -[ - { - "ParameterKey": "ClusterName", <1> - "ParameterValue": "mycluster" <2> - }, - { - "ParameterKey": "InfrastructureName", <3> - "ParameterValue": "mycluster-<random_string>" <4> - }, - { - "ParameterKey": "HostedZoneId", <5> - "ParameterValue": "<random_string>" <6> - }, - { - "ParameterKey": "HostedZoneName", <7> - "ParameterValue": "example.com" <8> - }, - { - "ParameterKey": "PublicSubnets", <9> - "ParameterValue": "subnet-<random_string>" <10> - }, - { - "ParameterKey": "PrivateSubnets", <11> - "ParameterValue": "subnet-<random_string>" <12> - }, - { - "ParameterKey": "VpcId", <13> - "ParameterValue": "vpc-<random_string>" <14> - } -] ----- -<1> A short, representative cluster name to use for hostnames, etc. -<2> Specify the cluster name that you used when you generated the -`install-config.yaml` file for the cluster. -<3> The name for your cluster infrastructure that is encoded in your Ignition -config files for the cluster. -<4> Specify the infrastructure name that you extracted from the Ignition config -file metadata, which has the format `<cluster-name>-<random-string>`. -<5> The Route 53 public zone ID to register the targets with. -<6> Specify the Route 53 public zone ID, which as a format similar to -`Z21IXYZABCZ2A4`. You can obtain this value from the AWS console. -<7> The Route 53 zone to register the targets with. -<8> Specify the Route 53 base domain that you used when you generated the -`install-config.yaml` file for the cluster. Do not include the trailing period -(.) that is displayed in the AWS console. -<9> The public subnets that you created for your VPC. -<10> Specify the `PublicSubnetIds` value from the output of the CloudFormation -template for the VPC. -<11> The private subnets that you created for your VPC. -<12> Specify the `PrivateSubnetIds` value from the output of the CloudFormation -template for the VPC. -<13> The VPC that you created for the cluster. -<14> Specify the `VpcId` value from the output of the CloudFormation template -for the VPC. - -. Copy the template from the *CloudFormation template for the network and load balancers* -section of this topic and save it as a YAML file on your computer. This template -describes the networking and load balancing objects that your cluster requires. -+ -[IMPORTANT] -==== -If you are deploying your cluster to an AWS government or secret region, you must update the `InternalApiServerRecord` in the CloudFormation template to use `CNAME` records. Records of type `ALIAS` are not supported for AWS government regions. -==== - -. Launch the CloudFormation template to create a stack of AWS resources that provide the networking and load balancing components: -+ -[IMPORTANT] -==== -You must enter the command on a single line. -==== -+ -[source,terminal] ----- -$ aws cloudformation create-stack --stack-name <name> <1> - --template-body file://<template>.yaml <2> - --parameters file://<parameters>.json <3> - --capabilities CAPABILITY_NAMED_IAM <4> ----- -<1> `<name>` is the name for the CloudFormation stack, such as `cluster-dns`. -You need the name of this stack if you remove the cluster. -<2> `<template>` is the relative path to and name of the CloudFormation template -YAML file that you saved. -<3> `<parameters>` is the relative path to and name of the CloudFormation -parameters JSON file. -<4> You must explicitly declare the `CAPABILITY_NAMED_IAM` capability because the provided template creates some `AWS::IAM::Role` resources. -+ -.Example output -[source,terminal] ----- -arn:aws:cloudformation:us-east-1:269333783861:stack/cluster-dns/cd3e5de0-2fd4-11eb-5cf0-12be5c33a183 ----- - -. Confirm that the template components exist: -+ -[source,terminal] ----- -$ aws cloudformation describe-stacks --stack-name <name> ----- -+ -After the `StackStatus` displays `CREATE_COMPLETE`, the output displays values -for the following parameters. You must provide these parameter values to -the other CloudFormation templates that you run to create your cluster: -[horizontal] -`PrivateHostedZoneId`:: Hosted zone ID for the private DNS. -`ExternalApiLoadBalancerName`:: Full name of the external API load balancer. -`InternalApiLoadBalancerName`:: Full name of the internal API load balancer. -`ApiServerDnsName`:: Full hostname of the API server. -`RegisterNlbIpTargetsLambda`:: Lambda ARN useful to help register/deregister IP -targets for these load balancers. -`ExternalApiTargetGroupArn`:: ARN of external API target group. -`InternalApiTargetGroupArn`:: ARN of internal API target group. -`InternalServiceTargetGroupArn`:: ARN of internal service target group. diff --git a/modules/installation-creating-aws-security.adoc b/modules/installation-creating-aws-security.adoc deleted file mode 100644 index 64dc0c527619..000000000000 --- a/modules/installation-creating-aws-security.adoc +++ /dev/null @@ -1,115 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-user-infra.adoc -// * installing/installing_aws/installing-restricted-networks-aws.adoc - -:_content-type: PROCEDURE -[id="installation-creating-aws-security_{context}"] -= Creating security group and roles in AWS - -You must create security groups and roles in Amazon Web Services (AWS) for your {product-title} cluster to use. - -You can use the provided CloudFormation template and a custom parameter file to create a stack of AWS resources. The stack represents the security groups and roles that your {product-title} cluster requires. - -[NOTE] -==== -If you do not use the provided CloudFormation template to create your AWS -infrastructure, you must review the provided information and manually create -the infrastructure. If your cluster does not initialize correctly, you might -have to contact Red Hat support with your installation logs. -==== - -.Prerequisites - -* You configured an AWS account. -* You added your AWS keys and region to your local AWS profile by running `aws configure`. -* You generated the Ignition config files for your cluster. -* You created and configured a VPC and associated subnets in AWS. - -.Procedure - -. Create a JSON file that contains the parameter values that the template -requires: -+ -[source,json] ----- -[ - { - "ParameterKey": "InfrastructureName", <1> - "ParameterValue": "mycluster-<random_string>" <2> - }, - { - "ParameterKey": "VpcCidr", <3> - "ParameterValue": "10.0.0.0/16" <4> - }, - { - "ParameterKey": "PrivateSubnets", <5> - "ParameterValue": "subnet-<random_string>" <6> - }, - { - "ParameterKey": "VpcId", <7> - "ParameterValue": "vpc-<random_string>" <8> - } -] ----- -<1> The name for your cluster infrastructure that is encoded in your Ignition -config files for the cluster. -<2> Specify the infrastructure name that you extracted from the Ignition config -file metadata, which has the format `<cluster-name>-<random-string>`. -<3> The CIDR block for the VPC. -<4> Specify the CIDR block parameter that you used for the VPC that you defined -in the form `x.x.x.x/16-24`. -<5> The private subnets that you created for your VPC. -<6> Specify the `PrivateSubnetIds` value from the output of the CloudFormation -template for the VPC. -<7> The VPC that you created for the cluster. -<8> Specify the `VpcId` value from the output of the CloudFormation template for -the VPC. - -. Copy the template from the *CloudFormation template for security objects* -section of this topic and save it as a YAML file on your computer. This template -describes the security groups and roles that your cluster requires. - -. Launch the CloudFormation template to create a stack of AWS resources that represent the security groups and roles: -+ -[IMPORTANT] -==== -You must enter the command on a single line. -==== -+ -[source,terminal] ----- -$ aws cloudformation create-stack --stack-name <name> <1> - --template-body file://<template>.yaml <2> - --parameters file://<parameters>.json <3> - --capabilities CAPABILITY_NAMED_IAM <4> ----- -<1> `<name>` is the name for the CloudFormation stack, such as `cluster-sec`. -You need the name of this stack if you remove the cluster. -<2> `<template>` is the relative path to and name of the CloudFormation template -YAML file that you saved. -<3> `<parameters>` is the relative path to and name of the CloudFormation -parameters JSON file. -<4> You must explicitly declare the `CAPABILITY_NAMED_IAM` capability because the provided template creates some `AWS::IAM::Role` and `AWS::IAM::InstanceProfile` resources. -+ -.Example output -[source,terminal] ----- -arn:aws:cloudformation:us-east-1:269333783861:stack/cluster-sec/03bd4210-2ed7-11eb-6d7a-13fc0b61e9db ----- - -. Confirm that the template components exist: -+ -[source,terminal] ----- -$ aws cloudformation describe-stacks --stack-name <name> ----- -+ -After the `StackStatus` displays `CREATE_COMPLETE`, the output displays values -for the following parameters. You must provide these parameter values to -the other CloudFormation templates that you run to create your cluster: -[horizontal] -`MasterSecurityGroupId`:: Master Security Group ID -`WorkerSecurityGroupId`:: Worker Security Group ID -`MasterInstanceProfile`:: Master IAM Instance Profile -`WorkerInstanceProfile`:: Worker IAM Instance Profile diff --git a/modules/installation-creating-aws-subnet-localzone.adoc b/modules/installation-creating-aws-subnet-localzone.adoc deleted file mode 100644 index 9546ed8e3d8d..000000000000 --- a/modules/installation-creating-aws-subnet-localzone.adoc +++ /dev/null @@ -1,107 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-localzone.adoc - -:_content-type: PROCEDURE -[id="installation-creating-aws-subnet-localzone_{context}"] -= Creating a subnet in AWS Local Zones - -You must create a subnet in AWS Local Zones before you configure a worker machineset for your {product-title} cluster. - -You must repeat the following process for each Local Zone you want to deploy worker nodes to. - -You can use the provided CloudFormation template and a custom parameter file to create a stack of AWS resources that represent the subnet. - -[NOTE] -==== -If you do not use the provided CloudFormation template to create your AWS -infrastructure, you must review the provided information and manually create -the infrastructure. If your cluster does not initialize correctly, you might -have to contact Red Hat support with your installation logs. -==== - -.Prerequisites - -* You configured an AWS account. -* You added your AWS keys and region to your local AWS profile by running `aws configure`. -* You opted in to the Local Zone group. - -.Procedure - -. Create a JSON file that contains the parameter values that the template -requires: -+ -[source,json] ----- -[ - { - "ParameterKey": "VpcId", - "ParameterValue": "<value_of_VpcId>" <1> - }, - { - "ParameterKey": "PublicRouteTableId", - "ParameterValue": "<value_of_PublicRouteTableId>" <2> - }, - { - "ParameterKey": "ZoneName", - "ParameterValue": "<value_of_ZoneName>" <3> - }, - { - "ParameterKey": "SubnetName", - "ParameterValue": "<value_of_SubnetName>" - }, - { - "ParameterKey": "PublicSubnetCidr", - "ParameterValue": "10.0.192.0/20" <4> - } -] ----- -<1> Specify the VPC ID, which is the value `VpcID` in the output of the CloudFormation template. -for the VPC. -<2> Specify the Route Table ID, which is the value of the `PublicRouteTableId` in the CloudFormation stack -for the VPC. -<3> Specify the AWS Local Zone name, which is the value of the `ZoneName` field in the `AvailabilityZones` object that you retrieve in the section "Opting into AWS Local Zones". -<4> Specify a CIDR block that is used to create the Local Zone subnet. This block must be part of the VPC CIDR block `VpcCidr`. - -. Copy the template from the *CloudFormation template for the subnet* -section of this topic and save it as a YAML file on your computer. This template -describes the VPC that your cluster requires. - -. Launch the CloudFormation template to create a stack of AWS resources that represent the VPC by running the following command: -+ -[IMPORTANT] -==== -You must enter the command on a single line. -==== -+ -[source,terminal] ----- -$ aws cloudformation create-stack --stack-name <subnet_stack_name> \ <1> - --template-body file://<template>.yaml \ <2> - --parameters file://<parameters>.json <3> ----- -<1> `<subnet_stack_name>` is the name for the CloudFormation stack, such as `cluster-lz-<local_zone_shortname>`. -You need the name of this stack if you remove the cluster. -<2> `<template>` is the relative path to and name of the CloudFormation template -YAML file that you saved. -<3> `<parameters>` is the relative path to and name of the CloudFormation -parameters JSON file. -+ -.Example output -[source,terminal] ----- -arn:aws:cloudformation:us-east-1:123456789012:stack/<subnet_stack_name>/dbedae40-2fd3-11eb-820e-12a48460849f ----- - -. Confirm that the template components exist by running the following command: -+ -[source,terminal] ----- -$ aws cloudformation describe-stacks --stack-name <subnet_stack_name> ----- -+ -After the `StackStatus` displays `CREATE_COMPLETE`, the output displays values -for the following parameters. You must provide these parameter values to -the other CloudFormation templates that you run to create your cluster: -[horizontal] -`PublicSubnetIds`:: The IDs of the new public subnets. diff --git a/modules/installation-creating-aws-vpc-localzone.adoc b/modules/installation-creating-aws-vpc-localzone.adoc deleted file mode 100644 index 1cca351d8450..000000000000 --- a/modules/installation-creating-aws-vpc-localzone.adoc +++ /dev/null @@ -1,103 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-localzone.adoc - -:_content-type: PROCEDURE -[id="installation-creating-aws-vpc-localzone_{context}"] -= Creating a VPC that uses AWS Local Zones - -You must create a Virtual Private Cloud (VPC), and subnets for each Local Zone location, in Amazon Web Services (AWS) for your {product-title} -cluster to extend worker nodes to the edge locations. You can further customize the VPC to meet your requirements, including -VPN, route tables, and add new Local Zone subnets that are not included at initial deployment. - -You can use the provided CloudFormation template and a custom parameter file to create a stack of AWS resources that represent the VPC. - -[NOTE] -==== -If you do not use the provided CloudFormation template to create your AWS -infrastructure, you must review the provided information and manually create -the infrastructure. If your cluster does not initialize correctly, you might -have to contact Red Hat support with your installation logs. -==== - -.Prerequisites - -* You configured an AWS account. -* You added your AWS keys and region to your local AWS profile by running `aws configure`. -* You opted in to the AWS Local Zones on your AWS account. - -.Procedure - -. Create a JSON file that contains the parameter values that the template -requires: -+ -[source,json] ----- -[ - { - "ParameterKey": "VpcCidr", <1> - "ParameterValue": "10.0.0.0/16" <2> - }, - { - "ParameterKey": "AvailabilityZoneCount", <3> - "ParameterValue": "3" <4> - }, - { - "ParameterKey": "SubnetBits", <5> - "ParameterValue": "12" <6> - } -] ----- -<1> The CIDR block for the VPC. -<2> Specify a CIDR block in the format `x.x.x.x/16-24`. -<3> The number of availability zones to deploy the VPC in. -<4> Specify an integer between `1` and `3`. -<5> The size of each subnet in each availability zone. -<6> Specify an integer between `5` and `13`, where `5` is `/27` and `13` is `/19`. - -. Copy the template from the *CloudFormation template for the VPC* -section of this topic and save it as a YAML file on your computer. This template -describes the VPC that your cluster requires. - -. Launch the CloudFormation template to create a stack of AWS resources that represent the VPC by running the following command: -+ -[IMPORTANT] -==== -You must enter the command on a single line. -==== -+ -[source,terminal] ----- -$ aws cloudformation create-stack --stack-name <name> \ <1> - --template-body file://<template>.yaml \ <2> - --parameters file://<parameters>.json <3> ----- -<1> `<name>` is the name for the CloudFormation stack, such as `cluster-vpc`. -You need the name of this stack if you remove the cluster. -<2> `<template>` is the relative path to and name of the CloudFormation template -YAML file that you saved. -<3> `<parameters>` is the relative path to and name of the CloudFormation -parameters JSON file. -+ -.Example output -[source,terminal] ----- -arn:aws:cloudformation:us-east-1:123456789012:stack/cluster-vpc/dbedae40-2fd3-11eb-820e-12a48460849f ----- - -. Confirm that the template components exist by running the following command: -+ -[source,terminal] ----- -$ aws cloudformation describe-stacks --stack-name <name> ----- -+ -After the `StackStatus` displays `CREATE_COMPLETE`, the output displays values -for the following parameters. You must provide these parameter values to -the other CloudFormation templates that you run to create your cluster: -[horizontal] -`VpcId`:: The ID of your VPC. -`PublicSubnetIds`:: The IDs of the new public subnets. -`PrivateSubnetIds`:: The IDs of the new private subnets. -`PublicRouteTableId`:: The ID of the new public route table ID. - diff --git a/modules/installation-creating-aws-vpc.adoc b/modules/installation-creating-aws-vpc.adoc deleted file mode 100644 index 80a2481d0698..000000000000 --- a/modules/installation-creating-aws-vpc.adoc +++ /dev/null @@ -1,102 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-user-infra.adoc -// * installing/installing_aws/installing-restricted-networks-aws.adoc - -:_content-type: PROCEDURE -[id="installation-creating-aws-vpc_{context}"] -= Creating a VPC in AWS - -You must create a Virtual Private Cloud (VPC) in Amazon Web Services (AWS) for your {product-title} -cluster to use. You can customize the VPC to meet your requirements, including -VPN and route tables. - -You can use the provided CloudFormation template and a custom parameter file to create a stack of AWS resources that represent the VPC. - -[NOTE] -==== -If you do not use the provided CloudFormation template to create your AWS -infrastructure, you must review the provided information and manually create -the infrastructure. If your cluster does not initialize correctly, you might -have to contact Red Hat support with your installation logs. -==== - -.Prerequisites - -* You configured an AWS account. -* You added your AWS keys and region to your local AWS profile by running `aws configure`. -* You generated the Ignition config files for your cluster. - -.Procedure - -. Create a JSON file that contains the parameter values that the template -requires: -+ -[source,json] ----- -[ - { - "ParameterKey": "VpcCidr", <1> - "ParameterValue": "10.0.0.0/16" <2> - }, - { - "ParameterKey": "AvailabilityZoneCount", <3> - "ParameterValue": "1" <4> - }, - { - "ParameterKey": "SubnetBits", <5> - "ParameterValue": "12" <6> - } -] ----- -<1> The CIDR block for the VPC. -<2> Specify a CIDR block in the format `x.x.x.x/16-24`. -<3> The number of availability zones to deploy the VPC in. -<4> Specify an integer between `1` and `3`. -<5> The size of each subnet in each availability zone. -<6> Specify an integer between `5` and `13`, where `5` is `/27` and `13` is `/19`. - -. Copy the template from the *CloudFormation template for the VPC* -section of this topic and save it as a YAML file on your computer. This template -describes the VPC that your cluster requires. - -. Launch the CloudFormation template to create a stack of AWS resources that represent the VPC: -+ -[IMPORTANT] -==== -You must enter the command on a single line. -==== -+ -[source,terminal] ----- -$ aws cloudformation create-stack --stack-name <name> <1> - --template-body file://<template>.yaml <2> - --parameters file://<parameters>.json <3> ----- -<1> `<name>` is the name for the CloudFormation stack, such as `cluster-vpc`. -You need the name of this stack if you remove the cluster. -<2> `<template>` is the relative path to and name of the CloudFormation template -YAML file that you saved. -<3> `<parameters>` is the relative path to and name of the CloudFormation -parameters JSON file. -+ -.Example output -[source,terminal] ----- -arn:aws:cloudformation:us-east-1:269333783861:stack/cluster-vpc/dbedae40-2fd3-11eb-820e-12a48460849f ----- - -. Confirm that the template components exist: -+ -[source,terminal] ----- -$ aws cloudformation describe-stacks --stack-name <name> ----- -+ -After the `StackStatus` displays `CREATE_COMPLETE`, the output displays values -for the following parameters. You must provide these parameter values to -the other CloudFormation templates that you run to create your cluster: -[horizontal] -`VpcId`:: The ID of your VPC. -`PublicSubnetIds`:: The IDs of the new public subnets. -`PrivateSubnetIds`:: The IDs of the new private subnets. diff --git a/modules/installation-creating-aws-worker.adoc b/modules/installation-creating-aws-worker.adoc deleted file mode 100644 index 33b53d0b67a0..000000000000 --- a/modules/installation-creating-aws-worker.adoc +++ /dev/null @@ -1,182 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-user-infra.adoc -// * installing/installing_aws/installing-restricted-networks-aws.adoc - -ifeval::["{context}" == "installing-aws-user-infra"] -:three-node-cluster: -endif::[] - -:_content-type: PROCEDURE -[id="installation-creating-aws-worker_{context}"] -= Creating the worker nodes in AWS - -//// -If you do not plan to automatically create worker nodes by using a MachineSet, -//// - -You can create worker nodes in Amazon Web Services (AWS) for your cluster to use. - -ifdef::three-node-cluster[] -[NOTE] -==== -If you are installing a three-node cluster, skip this step. A three-node cluster consists of three control plane machines, which also act as compute machines. -==== -endif::three-node-cluster[] - -You can use the provided CloudFormation template and a custom parameter file to create a stack of AWS resources that represent a worker node. - -[IMPORTANT] -==== -The CloudFormation template creates a stack that represents one worker node. -You must create a stack for each worker node. -==== - -[NOTE] -==== -If you do not use the provided CloudFormation template to create your worker -nodes, you must review the provided information and manually create -the infrastructure. If your cluster does not initialize correctly, you might -have to contact Red Hat support with your installation logs. -==== - -.Prerequisites - -* You configured an AWS account. -* You added your AWS keys and region to your local AWS profile by running `aws configure`. -* You generated the Ignition config files for your cluster. -* You created and configured a VPC and associated subnets in AWS. -* You created and configured DNS, load balancers, and listeners in AWS. -* You created the security groups and roles required for your cluster in AWS. -* You created the bootstrap machine. -* You created the control plane machines. - - - -.Procedure - -. Create a JSON file that contains the parameter values that the CloudFormation -template requires: -+ -[source,json] ----- -[ - { - "ParameterKey": "InfrastructureName", <1> - "ParameterValue": "mycluster-<random_string>" <2> - }, - { - "ParameterKey": "RhcosAmi", <3> - "ParameterValue": "ami-<random_string>" <4> - }, - { - "ParameterKey": "Subnet", <5> - "ParameterValue": "subnet-<random_string>" <6> - }, - { - "ParameterKey": "WorkerSecurityGroupId", <7> - "ParameterValue": "sg-<random_string>" <8> - }, - { - "ParameterKey": "IgnitionLocation", <9> - "ParameterValue": "https://api-int.<cluster_name>.<domain_name>:22623/config/worker" <10> - }, - { - "ParameterKey": "CertificateAuthorities", <11> - "ParameterValue": "" <12> - }, - { - "ParameterKey": "WorkerInstanceProfileName", <13> - "ParameterValue": "" <14> - }, - { - "ParameterKey": "WorkerInstanceType", <15> - "ParameterValue": "" <16> - } -] ----- -<1> The name for your cluster infrastructure that is encoded in your Ignition -config files for the cluster. -<2> Specify the infrastructure name that you extracted from the Ignition config -file metadata, which has the format `<cluster-name>-<random-string>`. -<3> Current {op-system-first} AMI to use for the worker nodes based on your selected architecture. -<4> Specify an `AWS::EC2::Image::Id` value. -<5> A subnet, preferably private, to start the worker nodes on. -<6> Specify a subnet from the `PrivateSubnets` value from the output of the -CloudFormation template for DNS and load balancing. -<7> The worker security group ID to associate with worker nodes. -<8> Specify the `WorkerSecurityGroupId` value from the output of the -CloudFormation template for the security group and roles. -<9> The location to fetch the bootstrap Ignition config file from. -<10> Specify the generated Ignition config location, -`https://api-int.<cluster_name>.<domain_name>:22623/config/worker`. -<11> Base64 encoded certificate authority string to use. -<12> Specify the value from the `worker.ign` file that is in the installation -directory. This value is the long string with the format -`data:text/plain;charset=utf-8;base64,ABC...xYz==`. -<13> The IAM profile to associate with worker nodes. -<14> Specify the `WorkerInstanceProfile` parameter value from the output of -the CloudFormation template for the security group and roles. -<15> The type of AWS instance to use for the compute machines based on your selected architecture. -<16> The instance type value corresponds to the minimum resource requirements -for compute machines. For example `m6i.large` is a type for AMD64 -ifndef::openshift-origin[] - and `m6g.large` is a type for ARM64. -endif::openshift-origin[] -. Copy the template from the *CloudFormation template for worker machines* -section of this topic and save it as a YAML file on your computer. This template -describes the networking objects and load balancers that your cluster requires. - -. Optional: If you specified an `m5` instance type as the value for `WorkerInstanceType`, add that instance type to the `WorkerInstanceType.AllowedValues` parameter in the CloudFormation template. - -. Optional: If you are deploying with an AWS Marketplace image, update the `Worker0.type.properties.ImageID` parameter with the AMI ID that you obtained from your subscription. - -. Use the CloudFormation template to create a stack of AWS resources that represent a worker node: -+ -[IMPORTANT] -==== -You must enter the command on a single line. -==== -+ -[source,terminal] ----- -$ aws cloudformation create-stack --stack-name <name> <1> - --template-body file://<template>.yaml \ <2> - --parameters file://<parameters>.json <3> ----- -<1> `<name>` is the name for the CloudFormation stack, such as `cluster-worker-1`. -You need the name of this stack if you remove the cluster. -<2> `<template>` is the relative path to and name of the CloudFormation template -YAML file that you saved. -<3> `<parameters>` is the relative path to and name of the CloudFormation -parameters JSON file. -+ -.Example output -[source,terminal] ----- -arn:aws:cloudformation:us-east-1:269333783861:stack/cluster-worker-1/729ee301-1c2a-11eb-348f-sd9888c65b59 ----- -+ -[NOTE] -==== -The CloudFormation template creates a stack that represents one worker node. -==== - -. Confirm that the template components exist: -+ -[source,terminal] ----- -$ aws cloudformation describe-stacks --stack-name <name> ----- - -. Continue to create worker stacks until you have created enough worker machines for your cluster. You can create additional worker stacks by referencing the same template and parameter files and specifying a different stack name. -+ -[IMPORTANT] -==== -You must create at least two worker machines, so you must create at least -two stacks that use this CloudFormation template. -==== - -ifeval::["{context}" == "installing-aws-user-infra"] -:!three-node-cluster: -endif::[] diff --git a/modules/installation-creating-azure-bootstrap.adoc b/modules/installation-creating-azure-bootstrap.adoc deleted file mode 100644 index 7a121da80e98..000000000000 --- a/modules/installation-creating-azure-bootstrap.adoc +++ /dev/null @@ -1,120 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_azure/installing-azure-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc - -ifeval::["{context}" == "installing-azure-user-infra"] -:azure: -:cp: Azure -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:ash: -:cp: Azure Stack Hub -endif::[] - -:_content-type: PROCEDURE -[id="installation-creating-azure-bootstrap_{context}"] -= Creating the bootstrap machine in {cp} - -You must create the bootstrap machine in Microsoft {cp} to use during -{product-title} cluster initialization. One way to create this machine is to -modify the provided Azure Resource Manager (ARM) template. - -[NOTE] -==== -If you do not use the provided ARM template to create your bootstrap machine, -you must review the provided information and manually create the infrastructure. -If your cluster does not initialize correctly, you might have to contact Red Hat -support with your installation logs. -==== - -.Prerequisites - -* Configure an Azure account. -* Generate the Ignition config files for your cluster. -* Create and configure a VNet and associated subnets in {cp}. -* Create and configure networking and load balancers in {cp}. -* Create control plane and compute roles. - -.Procedure - -. Copy the template from the *ARM template for the bootstrap machine* section of -this topic and save it as `04_bootstrap.json` in your cluster's installation directory. This template -describes the bootstrap machine that your cluster requires. - -. Export the bootstrap URL variable: -+ -[source,terminal] ----- -$ bootstrap_url_expiry=`date -u -d "10 hours" '+%Y-%m-%dT%H:%MZ'` ----- -+ -[source,terminal] ----- -$ export BOOTSTRAP_URL=`az storage blob generate-sas -c 'files' -n 'bootstrap.ign' --https-only --full-uri --permissions r --expiry $bootstrap_url_expiry --account-name ${CLUSTER_NAME}sa --account-key ${ACCOUNT_KEY} -o tsv` ----- - -. Export the bootstrap ignition variable: -ifdef::azure[] -+ -[source,terminal] ----- -$ export BOOTSTRAP_IGNITION=`jq -rcnM --arg v "3.2.0" --arg url ${BOOTSTRAP_URL} '{ignition:{version:$v,config:{replace:{source:$url}}}}' | base64 | tr -d '\n'` ----- -endif::azure[] -ifdef::ash[] -.. If your environment uses a public certificate authority (CA), run this command: -+ -[source,terminal] ----- -$ export BOOTSTRAP_IGNITION=`jq -rcnM --arg v "3.2.0" --arg url ${BOOTSTRAP_URL} '{ignition:{version:$v,config:{replace:{source:$url}}}}' | base64 | tr -d '\n'` ----- - -.. If your environment uses an internal CA, you must add your PEM encoded bundle to the bootstrap ignition stub so that your bootstrap virtual machine can pull the bootstrap ignition from the storage account. Run the following commands, which assume your CA is in a file called `CA.pem`: -+ -[source,terminal] ----- -$ export CA="data:text/plain;charset=utf-8;base64,$(cat CA.pem |base64 |tr -d '\n')" ----- -+ -[source,terminal] ----- -$ export BOOTSTRAP_IGNITION=`jq -rcnM --arg v "3.2.0" --arg url "$BOOTSTRAP_URL" --arg cert "$CA" '{ignition:{version:$v,security:{tls:{certificateAuthorities:[{source:$cert}]}},config:{replace:{source:$url}}}}' | base64 | tr -d '\n'` ----- -endif::ash[] - -. Create the deployment by using the `az` CLI: -+ -ifdef::azure[] -[source,terminal] ----- -$ az deployment group create -g ${RESOURCE_GROUP} \ - --template-file "<installation_directory>/04_bootstrap.json" \ - --parameters bootstrapIgnition="${BOOTSTRAP_IGNITION}" \ <1> - --parameters baseName="${INFRA_ID}" <2> ----- -<1> The bootstrap Ignition content for the bootstrap cluster. -<2> The base name to be used in resource names; this is usually the cluster's infrastructure ID. -endif::azure[] -ifdef::ash[] -[source,terminal] ----- -$ az deployment group create --verbose -g ${RESOURCE_GROUP} \ - --template-file "<installation_directory>/04_bootstrap.json" \ - --parameters bootstrapIgnition="${BOOTSTRAP_IGNITION}" \ <1> - --parameters baseName="${INFRA_ID}" \ <2> - --parameters diagnosticsStorageAccountName="${CLUSTER_NAME}sa" <3> ----- -<1> The bootstrap Ignition content for the bootstrap cluster. -<2> The base name to be used in resource names; this is usually the cluster's infrastructure ID. -<3> The name of the storage account for your cluster. -endif::ash[] - -ifeval::["{context}" == "installing-azure-user-infra"] -:!azure: -:!cp: Azure -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:!ash: -:!cp: Azure Stack Hub -endif::[] diff --git a/modules/installation-creating-azure-control-plane.adoc b/modules/installation-creating-azure-control-plane.adoc deleted file mode 100644 index 341b2bdb3476..000000000000 --- a/modules/installation-creating-azure-control-plane.adoc +++ /dev/null @@ -1,87 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_azure/installing-azure-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc - -ifeval::["{context}" == "installing-azure-user-infra"] -:azure: -:cp: Azure -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:ash: -:cp: Azure Stack Hub -endif::[] - -:_content-type: PROCEDURE -[id="installation-creating-azure-control-plane_{context}"] -= Creating the control plane machines in {cp} - -You must create the control plane machines in Microsoft {cp} for your cluster -to use. One way to create these machines is to modify the provided Azure -Resource Manager (ARM) template. - -[NOTE] -==== -If you do not use the provided ARM template to create your control plane -machines, you must review the provided information and manually create the -infrastructure. If your cluster does not initialize correctly, you might have to -contact Red Hat support with your installation logs. -==== - -.Prerequisites - -* Configure an Azure account. -* Generate the Ignition config files for your cluster. -* Create and configure a VNet and associated subnets in {cp}. -* Create and configure networking and load balancers in {cp}. -* Create control plane and compute roles. -* Create the bootstrap machine. - -.Procedure - -. Copy the template from the *ARM template for control plane machines* -section of this topic and save it as `05_masters.json` in your cluster's installation directory. -This template describes the control plane machines that your cluster requires. - -. Export the following variable needed by the control plane machine deployment: -+ -[source,terminal] ----- -$ export MASTER_IGNITION=`cat <installation_directory>/master.ign | base64 | tr -d '\n'` ----- - -. Create the deployment by using the `az` CLI: -+ -ifdef::azure[] -[source,terminal] ----- -$ az deployment group create -g ${RESOURCE_GROUP} \ - --template-file "<installation_directory>/05_masters.json" \ - --parameters masterIgnition="${MASTER_IGNITION}" \ <1> - --parameters baseName="${INFRA_ID}" <2> ----- -<1> The Ignition content for the control plane nodes. -<2> The base name to be used in resource names; this is usually the cluster's infrastructure ID. -endif::azure[] -ifdef::ash[] -[source,terminal] ----- -$ az deployment group create -g ${RESOURCE_GROUP} \ - --template-file "<installation_directory>/05_masters.json" \ - --parameters masterIgnition="${MASTER_IGNITION}" \ <1> - --parameters baseName="${INFRA_ID}" \ <2> - --parameters diagnosticsStorageAccountName="${CLUSTER_NAME}sa" <3> ----- -<1> The Ignition content for the control plane nodes (also known as the master nodes). -<2> The base name to be used in resource names; this is usually the cluster's infrastructure ID. -<3> The name of the storage account for your cluster. -endif::ash[] - -ifeval::["{context}" == "installing-azure-user-infra"] -:!azure: -:!cp: Azure -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:!ash: -:!cp: Azure Stack Hub -endif::[] diff --git a/modules/installation-creating-azure-dns.adoc b/modules/installation-creating-azure-dns.adoc deleted file mode 100644 index c71b671ec7c7..000000000000 --- a/modules/installation-creating-azure-dns.adoc +++ /dev/null @@ -1,148 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_azure/installing-azure-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc - -ifeval::["{context}" == "installing-azure-user-infra"] -:azure: -:cp: Azure -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:ash: -:cp: Azure Stack Hub -endif::[] - -:_content-type: PROCEDURE -[id="installation-creating-azure-dns_{context}"] -= Creating networking and load balancing components in {cp} - -You must configure networking and load balancing in Microsoft {cp} for your -{product-title} cluster to use. One way to create these components is -to modify the provided Azure Resource Manager (ARM) template. - -ifdef::ash[] -Load balancing requires the following DNS records: - -* An `api` DNS record for the API public load balancer in the DNS zone. -* An `api-int` DNS record for the API internal load balancer in the DNS zone. -endif::ash[] - -[NOTE] -==== -If you do not use the provided ARM template to create your {cp} infrastructure, -you must review the provided information and manually create the infrastructure. -If your cluster does not initialize correctly, you might have to contact Red Hat -support with your installation logs. -==== - -.Prerequisites - -* Configure an Azure account. -* Generate the Ignition config files for your cluster. -* Create and configure a VNet and associated subnets in {cp}. - -.Procedure - -. Copy the template from the *ARM template for the network and load balancers* -section of this topic and save it as `03_infra.json` in your cluster's installation directory. This -template describes the networking and load balancing objects that your cluster -requires. - -. Create the deployment by using the `az` CLI: -+ -ifdef::azure[] -[source,terminal] ----- -$ az deployment group create -g ${RESOURCE_GROUP} \ - --template-file "<installation_directory>/03_infra.json" \ - --parameters privateDNSZoneName="${CLUSTER_NAME}.${BASE_DOMAIN}" \ <1> - --parameters baseName="${INFRA_ID}"<2> ----- -<1> The name of the private DNS zone. -<2> The base name to be used in resource names; this is usually the cluster's infrastructure ID. -endif::azure[] - -ifdef::ash[] -[source,terminal] ----- -$ az deployment group create -g ${RESOURCE_GROUP} \ - --template-file "<installation_directory>/03_infra.json" \ - --parameters baseName="${INFRA_ID}"<1> ----- -<1> The base name to be used in resource names; this is usually the cluster's infrastructure ID. -endif::ash[] - -ifdef::azure[] -. Create an `api` DNS record in the public zone for the API public load -balancer. The `${BASE_DOMAIN_RESOURCE_GROUP}` variable must point to the -resource group where the public DNS zone exists. -endif::azure[] - -ifdef::ash[] -. Create an `api` DNS record and an `api-int` DNS record. When creating the API DNS records, the `${BASE_DOMAIN_RESOURCE_GROUP}` variable must point to the resource group where the DNS zone exists. -endif::ash[] - -.. Export the following variable: -+ -[source,terminal] ----- -$ export PUBLIC_IP=`az network public-ip list -g ${RESOURCE_GROUP} --query "[?name=='${INFRA_ID}-master-pip'] | [0].ipAddress" -o tsv` ----- -ifdef::ash[] -.. Export the following variable: -+ -[source,terminal] ----- -$ export PRIVATE_IP=`az network lb frontend-ip show -g "$RESOURCE_GROUP" --lb-name "${INFRA_ID}-internal" -n internal-lb-ip --query "privateIpAddress" -o tsv` ----- -endif::ash[] - -ifdef::azure[] -.. Create the `api` DNS record in a new public zone: -endif::azure[] -ifdef::ash[] -.. Create the `api` DNS record in a new DNS zone: -endif::ash[] -+ -[source,terminal] ----- -$ az network dns record-set a add-record -g ${BASE_DOMAIN_RESOURCE_GROUP} -z ${CLUSTER_NAME}.${BASE_DOMAIN} -n api -a ${PUBLIC_IP} --ttl 60 ----- -+ -ifdef::azure[] -If you are adding the cluster to an existing public zone, you can create the `api` DNS record in it instead: -endif::azure[] -ifdef::ash[] -If you are adding the cluster to an existing DNS zone, you can create the `api` DNS record in it instead: -endif::ash[] -+ -[source,terminal] ----- -$ az network dns record-set a add-record -g ${BASE_DOMAIN_RESOURCE_GROUP} -z ${BASE_DOMAIN} -n api.${CLUSTER_NAME} -a ${PUBLIC_IP} --ttl 60 ----- - -ifdef::ash[] -.. Create the `api-int` DNS record in a new DNS zone: -+ -[source,terminal] ----- -$ az network dns record-set a add-record -g ${BASE_DOMAIN_RESOURCE_GROUP} -z "${CLUSTER_NAME}.${BASE_DOMAIN}" -n api-int -a ${PRIVATE_IP} --ttl 60 ----- -+ -If you are adding the cluster to an existing DNS zone, you can create the `api-int` DNS -record in it instead: -+ -[source,terminal] ----- -$ az network dns record-set a add-record -g ${BASE_DOMAIN_RESOURCE_GROUP} -z ${BASE_DOMAIN} -n api-int.${CLUSTER_NAME} -a ${PRIVATE_IP} --ttl 60 ----- -endif::ash[] - -ifeval::["{context}" == "installing-azure-user-infra"] -:!azure: -:!cp: Azure -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:!ash: -:!cp: Azure Stack Hub -endif::[] diff --git a/modules/installation-creating-azure-vnet.adoc b/modules/installation-creating-azure-vnet.adoc deleted file mode 100644 index 337d6cf596a8..000000000000 --- a/modules/installation-creating-azure-vnet.adoc +++ /dev/null @@ -1,68 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_azure/installing-azure-user-infra.adoc - -ifeval::["{context}" == "installing-azure-user-infra"] -:azure: -:cp: Azure -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:ash: -:cp: Azure Stack Hub -endif::[] - -:_content-type: PROCEDURE -[id="installation-creating-azure-vnet_{context}"] -= Creating a VNet in {cp} - -You must create a virtual network (VNet) in Microsoft {cp} for your -{product-title} cluster to use. You can customize the VNet to meet your -requirements. One way to create the VNet is to modify the provided Azure -Resource Manager (ARM) template. - -[NOTE] -==== -If you do not use the provided ARM template to create your {cp} infrastructure, -you must review the provided information and manually create the infrastructure. -If your cluster does not initialize correctly, you might have to contact Red Hat -support with your installation logs. -==== - -.Prerequisites - -* Configure an Azure account. -* Generate the Ignition config files for your cluster. - -.Procedure - -. Copy the template from the *ARM template for the VNet* section of this topic -and save it as `01_vnet.json` in your cluster's installation directory. This template describes the -VNet that your cluster requires. - -. Create the deployment by using the `az` CLI: -+ -[source,terminal] ----- -$ az deployment group create -g ${RESOURCE_GROUP} \ - --template-file "<installation_directory>/01_vnet.json" \ - --parameters baseName="${INFRA_ID}"<1> ----- -<1> The base name to be used in resource names; this is usually the cluster's infrastructure ID. - -ifndef::ash[] -. Link the VNet template to the private DNS zone: -+ -[source,terminal] ----- -$ az network private-dns link vnet create -g ${RESOURCE_GROUP} -z ${CLUSTER_NAME}.${BASE_DOMAIN} -n ${INFRA_ID}-network-link -v "${INFRA_ID}-vnet" -e false ----- -endif::ash[] - -ifeval::["{context}" == "installing-azure-user-infra"] -:!azure: -:!cp: Azure -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:!ash: -:!cp: Azure Stack Hub -endif::[] diff --git a/modules/installation-creating-azure-worker.adoc b/modules/installation-creating-azure-worker.adoc deleted file mode 100644 index 407cf4bcc6c0..000000000000 --- a/modules/installation-creating-azure-worker.adoc +++ /dev/null @@ -1,102 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_azure/installing-azure-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc - -ifeval::["{context}" == "installing-azure-user-infra"] -:azure: -:cp: Azure -:three-node-cluster: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:ash: -:cp: Azure Stack Hub -endif::[] - -:_content-type: PROCEDURE -[id="installation-creating-azure-worker_{context}"] -= Creating additional worker machines in {cp} - -You can create worker machines in Microsoft {cp} for your cluster -to use by launching individual instances discretely or by automated processes -outside the cluster, such as auto scaling groups. You can also take advantage of -the built-in cluster scaling mechanisms and the machine API in {product-title}. - -ifdef::three-node-cluster[] -[NOTE] -==== -If you are installing a three-node cluster, skip this step. A three-node cluster consists of three control plane machines, which also act as compute machines. -==== -endif::three-node-cluster[] - -In this example, you manually launch one instance by using the Azure Resource -Manager (ARM) template. Additional instances can be launched by including -additional resources of type `06_workers.json` in the file. - -[NOTE] -==== -If you do not use the provided ARM template to create your worker machines, you -must review the provided information and manually create the infrastructure. If -your cluster does not initialize correctly, you might have to contact Red Hat -support with your installation logs. -==== - -.Prerequisites - -* Configure an Azure account. -* Generate the Ignition config files for your cluster. -* Create and configure a VNet and associated subnets in {cp}. -* Create and configure networking and load balancers in {cp}. -* Create control plane and compute roles. -* Create the bootstrap machine. -* Create the control plane machines. - -.Procedure - -. Copy the template from the *ARM template for worker machines* -section of this topic and save it as `06_workers.json` in your cluster's installation directory. This -template describes the worker machines that your cluster requires. - -. Export the following variable needed by the worker machine deployment: -+ -[source,terminal] ----- -$ export WORKER_IGNITION=`cat <installation_directory>/worker.ign | base64 | tr -d '\n'` ----- - -. Create the deployment by using the `az` CLI: -+ -ifdef::azure[] -[source,terminal] ----- -$ az deployment group create -g ${RESOURCE_GROUP} \ - --template-file "<installation_directory>/06_workers.json" \ - --parameters workerIgnition="${WORKER_IGNITION}" \ <1> - --parameters baseName="${INFRA_ID}" <2> ----- -<1> The Ignition content for the worker nodes. -<2> The base name to be used in resource names; this is usually the cluster's infrastructure ID. -endif::azure[] -ifdef::ash[] -[source,terminal] ----- -$ az deployment group create -g ${RESOURCE_GROUP} \ - --template-file "<installation_directory>/06_workers.json" \ - --parameters workerIgnition="${WORKER_IGNITION}" \ <1> - --parameters baseName="${INFRA_ID}" <2> - --parameters diagnosticsStorageAccountName="${CLUSTER_NAME}sa" <3> ----- -<1> The Ignition content for the worker nodes. -<2> The base name to be used in resource names; this is usually the cluster's infrastructure ID. -<3> The name of the storage account for your cluster. -endif::ash[] - -ifeval::["{context}" == "installing-azure-user-infra"] -:!azure: -:!cp: Azure -:!three-node-cluster: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:!ash: -:!cp: Azure Stack Hub -endif::[] diff --git a/modules/installation-creating-gcp-bootstrap.adoc b/modules/installation-creating-gcp-bootstrap.adoc deleted file mode 100644 index 7ba569b0e4a9..000000000000 --- a/modules/installation-creating-gcp-bootstrap.adoc +++ /dev/null @@ -1,150 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_gcp/installing-gcp-user-infra.adoc -// * installing/installing_gcp/installing-gcp-user-infra-vpc.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp.adoc - -ifeval::["{context}" == "installing-gcp-user-infra-vpc"] -:shared-vpc: -endif::[] - -:_content-type: PROCEDURE -[id="installation-creating-gcp-bootstrap_{context}"] -= Creating the bootstrap machine in GCP - -You must create the bootstrap machine in Google Cloud Platform (GCP) to use during -{product-title} cluster initialization. One way to create this machine is -to modify the provided Deployment Manager template. - -[NOTE] -==== -If you do not use the provided Deployment Manager template to create your bootstrap -machine, you must review the provided information and manually create -the infrastructure. If your cluster does not initialize correctly, you might -have to contact Red Hat support with your installation logs. -==== - -.Prerequisites - -* Configure a GCP account. -* Generate the Ignition config files for your cluster. -* Create and configure a VPC and associated subnets in GCP. -* Create and configure networking and load balancers in GCP. -* Create control plane and compute roles. -* Ensure pyOpenSSL is installed. - -.Procedure - -. Copy the template from the *Deployment Manager template for the bootstrap machine* -section of this topic and save it as `04_bootstrap.py` on your computer. This -template describes the bootstrap machine that your cluster requires. - -. Export the location of the {op-system-first} image that the installation program requires: -+ -[source,terminal] ----- -$ export CLUSTER_IMAGE=(`gcloud compute images describe ${INFRA_ID}-rhcos-image --format json | jq -r .selfLink`) ----- - -. Create a bucket and upload the `bootstrap.ign` file: -+ -[source,terminal] ----- -$ gsutil mb gs://${INFRA_ID}-bootstrap-ignition ----- -+ -[source,terminal] ----- -$ gsutil cp <installation_directory>/bootstrap.ign gs://${INFRA_ID}-bootstrap-ignition/ ----- - -. Create a signed URL for the bootstrap instance to use to access the Ignition -config. Export the URL from the output as a variable: -+ -[source,terminal] ----- -$ export BOOTSTRAP_IGN=`gsutil signurl -d 1h service-account-key.json gs://${INFRA_ID}-bootstrap-ignition/bootstrap.ign | grep "^gs:" | awk '{print $5}'` ----- - -. Create a `04_bootstrap.yaml` resource definition file: -+ -[source,terminal] ----- -$ cat <<EOF >04_bootstrap.yaml -imports: -- path: 04_bootstrap.py - -resources: -- name: cluster-bootstrap - type: 04_bootstrap.py - properties: - infra_id: '${INFRA_ID}' <1> - region: '${REGION}' <2> - zone: '${ZONE_0}' <3> - - cluster_network: '${CLUSTER_NETWORK}' <4> - control_subnet: '${CONTROL_SUBNET}' <5> - image: '${CLUSTER_IMAGE}' <6> - machine_type: 'n1-standard-4' <7> - root_volume_size: '128' <8> - - bootstrap_ign: '${BOOTSTRAP_IGN}' <9> -EOF ----- -<1> `infra_id` is the `INFRA_ID` infrastructure name from the extraction step. -<2> `region` is the region to deploy the cluster into, for example `us-central1`. -<3> `zone` is the zone to deploy the bootstrap instance into, for example `us-central1-b`. -<4> `cluster_network` is the `selfLink` URL to the cluster network. -<5> `control_subnet` is the `selfLink` URL to the control subnet. -<6> `image` is the `selfLink` URL to the {op-system} image. -<7> `machine_type` is the machine type of the instance, for example `n1-standard-4`. -<8> `root_volume_size` is the boot disk size for the bootstrap machine. -<9> `bootstrap_ign` is the URL output when creating a signed URL. - -. Create the deployment by using the `gcloud` CLI: -+ -[source,terminal] ----- -$ gcloud deployment-manager deployments create ${INFRA_ID}-bootstrap --config 04_bootstrap.yaml ----- - -ifndef::shared-vpc[] -. The templates do not manage load balancer membership due to limitations of Deployment -Manager, so you must add the bootstrap machine manually. - -.. Add the bootstrap instance to the internal load balancer instance group: -+ -[source,terminal] ----- -$ gcloud compute instance-groups unmanaged add-instances \ - ${INFRA_ID}-bootstrap-ig --zone=${ZONE_0} --instances=${INFRA_ID}-bootstrap ----- - -.. Add the bootstrap instance group to the internal load balancer backend service: -+ -[source,terminal] ----- -$ gcloud compute backend-services add-backend \ - ${INFRA_ID}-api-internal-backend-service --region=${REGION} --instance-group=${INFRA_ID}-bootstrap-ig --instance-group-zone=${ZONE_0} ----- -endif::shared-vpc[] - -ifdef::shared-vpc[] -. Add the bootstrap instance to the internal load balancer instance group: -+ -[source,terminal] ----- -$ gcloud compute instance-groups unmanaged add-instances ${INFRA_ID}-bootstrap-ig --zone=${ZONE_0} --instances=${INFRA_ID}-bootstrap ----- - -. Add the bootstrap instance group to the internal load balancer backend service: -+ -[source,terminal] ----- -$ gcloud compute backend-services add-backend ${INFRA_ID}-api-internal-backend-service --region=${REGION} --instance-group=${INFRA_ID}-bootstrap-ig --instance-group-zone=${ZONE_0} ----- -endif::shared-vpc[] - -ifeval::["{context}" == "installing-gcp-user-infra-vpc"] -:!shared-vpc: -endif::[] diff --git a/modules/installation-creating-gcp-control-plane.adoc b/modules/installation-creating-gcp-control-plane.adoc deleted file mode 100644 index 99311ea97fc0..000000000000 --- a/modules/installation-creating-gcp-control-plane.adoc +++ /dev/null @@ -1,129 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_gcp/installing-gcp-user-infra.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp.adoc -// * installing/installing_gcp/installing-gcp-user-infra-vpc.adoc - -ifeval::["{context}" == "installing-gcp-user-infra-vpc"] -:shared-vpc: -endif::[] - -:_content-type: PROCEDURE -[id="installation-creating-gcp-control-plane_{context}"] -= Creating the control plane machines in GCP - -You must create the control plane machines in Google Cloud Platform (GCP) for -your cluster to use. One way to create these machines is to modify the -provided Deployment Manager template. - -[NOTE] -==== -If you do not use the provided Deployment Manager template to create your -control plane machines, you must review the provided information and manually -create the infrastructure. If your cluster does not initialize correctly, you -might have to contact Red Hat support with your installation logs. -==== - -.Prerequisites - -* Configure a GCP account. -* Generate the Ignition config files for your cluster. -* Create and configure a VPC and associated subnets in GCP. -* Create and configure networking and load balancers in GCP. -* Create control plane and compute roles. -* Create the bootstrap machine. - -.Procedure - -. Copy the template from the *Deployment Manager template for control plane machines* -section of this topic and save it as `05_control_plane.py` on your computer. -This template describes the control plane machines that your cluster requires. - -. Export the following variable required by the resource definition: -+ -[source,terminal] ----- -$ export MASTER_IGNITION=`cat <installation_directory>/master.ign` ----- - -. Create a `05_control_plane.yaml` resource definition file: -+ -[source,terminal] ----- -$ cat <<EOF >05_control_plane.yaml -imports: -- path: 05_control_plane.py - -resources: -- name: cluster-control-plane - type: 05_control_plane.py - properties: - infra_id: '${INFRA_ID}' <1> - zones: <2> - - '${ZONE_0}' - - '${ZONE_1}' - - '${ZONE_2}' - - control_subnet: '${CONTROL_SUBNET}' <3> - image: '${CLUSTER_IMAGE}' <4> - machine_type: 'n1-standard-4' <5> - root_volume_size: '128' - service_account_email: '${MASTER_SERVICE_ACCOUNT}' <6> - - ignition: '${MASTER_IGNITION}' <7> -EOF ----- -<1> `infra_id` is the `INFRA_ID` infrastructure name from the extraction step. -<2> `zones` are the zones to deploy the control plane instances into, for example `us-central1-a`, `us-central1-b`, and `us-central1-c`. -<3> `control_subnet` is the `selfLink` URL to the control subnet. -<4> `image` is the `selfLink` URL to the {op-system} image. -<5> `machine_type` is the machine type of the instance, for example `n1-standard-4`. -<6> `service_account_email` is the email address for the master service account that you created. -<7> `ignition` is the contents of the `master.ign` file. - -. Create the deployment by using the `gcloud` CLI: -+ -[source,terminal] ----- -$ gcloud deployment-manager deployments create ${INFRA_ID}-control-plane --config 05_control_plane.yaml ----- - -. The templates do not manage load balancer membership due to limitations of Deployment -Manager, so you must add the control plane machines manually. -** Run the following commands to add the control plane machines to the appropriate instance groups: -+ -[source,terminal] ----- -$ gcloud compute instance-groups unmanaged add-instances ${INFRA_ID}-master-${ZONE_0}-ig --zone=${ZONE_0} --instances=${INFRA_ID}-master-0 ----- -+ -[source,terminal] ----- -$ gcloud compute instance-groups unmanaged add-instances ${INFRA_ID}-master-${ZONE_1}-ig --zone=${ZONE_1} --instances=${INFRA_ID}-master-1 ----- -+ -[source,terminal] ----- -$ gcloud compute instance-groups unmanaged add-instances ${INFRA_ID}-master-${ZONE_2}-ig --zone=${ZONE_2} --instances=${INFRA_ID}-master-2 ----- - -** For an external cluster, you must also run the following commands to add the control plane machines to the target pools: -+ -[source,terminal] ----- -$ gcloud compute target-pools add-instances ${INFRA_ID}-api-target-pool --instances-zone="${ZONE_0}" --instances=${INFRA_ID}-master-0 ----- -+ -[source,terminal] ----- -$ gcloud compute target-pools add-instances ${INFRA_ID}-api-target-pool --instances-zone="${ZONE_1}" --instances=${INFRA_ID}-master-1 ----- -+ -[source,terminal] ----- -$ gcloud compute target-pools add-instances ${INFRA_ID}-api-target-pool --instances-zone="${ZONE_2}" --instances=${INFRA_ID}-master-2 ----- - -ifeval::["{context}" == "installing-gcp-user-infra-vpc"] -:!shared-vpc: -endif::[] diff --git a/modules/installation-creating-gcp-dns.adoc b/modules/installation-creating-gcp-dns.adoc deleted file mode 100644 index 394a446b79e8..000000000000 --- a/modules/installation-creating-gcp-dns.adoc +++ /dev/null @@ -1,119 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_gcp/installing-gcp-user-infra.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp.adoc - -ifeval::["{context}" == "installing-gcp-user-infra-vpc"] -:shared-vpc: -endif::[] - -[id="installation-creating-gcp-dns_{context}"] -= Creating networking and load balancing components in GCP - -You must configure networking and load balancing in Google Cloud Platform (GCP) for your -{product-title} cluster to use. One way to create these components is -to modify the provided Deployment Manager template. - -[NOTE] -==== -If you do not use the provided Deployment Manager template to create your GCP -infrastructure, you must review the provided information and manually create -the infrastructure. If your cluster does not initialize correctly, you might -have to contact Red Hat support with your installation logs. -==== - -.Prerequisites - -* Configure a GCP account. -* Generate the Ignition config files for your cluster. -* Create and configure a VPC and associated subnets in GCP. - -.Procedure - -. Copy the template from the *Deployment Manager template for the network and load balancers* -section of this topic and save it as `02_infra.py` on your computer. This -template describes the networking and load balancing objects that your cluster -requires. - -. Export the following variable required by the resource definition: -+ -ifndef::shared-vpc[] -[source,terminal] ----- -$ export CLUSTER_NETWORK=`gcloud compute networks describe ${INFRA_ID}-network --project ${HOST_PROJECT} --account ${HOST_PROJECT_ACCOUNT} --format json | jq -r .selfLink` ----- -endif::shared-vpc[] -ifdef::shared-vpc[] -[source,terminal] ----- -$ export CLUSTER_NETWORK=`gcloud compute networks describe ${HOST_PROJECT_NETWORK} --project ${HOST_PROJECT} --account ${HOST_PROJECT_ACCOUNT} --format json | jq -r .selfLink` ----- -+ -Where `<network_name>` is the name of the network that hosts the shared VPC. -endif::shared-vpc[] - -. Create a `02_infra.yaml` resource definition file: -+ -[source,terminal] ----- -$ cat <<EOF >02_infra.yaml -imports: -- path: 02_infra.py - -resources: -- name: cluster-infra - type: 02_infra.py - properties: - infra_id: '${INFRA_ID}' <1> - region: '${REGION}' <2> - - cluster_domain: '${CLUSTER_NAME}.${BASE_DOMAIN}' <3> - cluster_network: '${CLUSTER_NETWORK}' <4> -EOF ----- -<1> `infra_id` is the `INFRA_ID` infrastructure name from the extraction step. -<2> `region` is the region to deploy the cluster into, for example `us-central1`. -<3> `cluster_domain` is the domain for the cluster, for example `openshift.example.com`. -<4> `cluster_network` is the `selfLink` URL to the cluster network. - -. Create the deployment by using the `gcloud` CLI: -+ -[source,terminal] ----- -$ gcloud deployment-manager deployments create ${INFRA_ID}-infra --config 02_infra.yaml ----- - -. The templates do not create DNS entries due to limitations of Deployment -Manager, so you must create them manually: - -.. Export the following variable: -+ -[source,terminal] ----- -$ export CLUSTER_IP=`gcloud compute addresses describe ${INFRA_ID}-cluster-public-ip --region=${REGION} --format json | jq -r .address` ----- - -.. Add external DNS entries: -+ -[source,terminal] ----- -$ if [ -f transaction.yaml ]; then rm transaction.yaml; fi -$ gcloud dns record-sets transaction start --zone ${BASE_DOMAIN_ZONE_NAME} -$ gcloud dns record-sets transaction add ${CLUSTER_IP} --name api.${CLUSTER_NAME}.${BASE_DOMAIN}. --ttl 60 --type A --zone ${BASE_DOMAIN_ZONE_NAME} -$ gcloud dns record-sets transaction execute --zone ${BASE_DOMAIN_ZONE_NAME} ----- - -.. Add internal DNS entries: -+ -[source,terminal] ----- -$ if [ -f transaction.yaml ]; then rm transaction.yaml; fi -$ gcloud dns record-sets transaction start --zone ${INFRA_ID}-private-zone -$ gcloud dns record-sets transaction add ${CLUSTER_IP} --name api.${CLUSTER_NAME}.${BASE_DOMAIN}. --ttl 60 --type A --zone ${INFRA_ID}-private-zone -$ gcloud dns record-sets transaction add ${CLUSTER_IP} --name api-int.${CLUSTER_NAME}.${BASE_DOMAIN}. --ttl 60 --type A --zone ${INFRA_ID}-private-zone -$ gcloud dns record-sets transaction execute --zone ${INFRA_ID}-private-zone ----- - -ifeval::["{context}" == "installing-gcp-user-infra-vpc"] -:!shared-vpc: -endif::[] diff --git a/modules/installation-creating-gcp-firewall-rules-vpc.adoc b/modules/installation-creating-gcp-firewall-rules-vpc.adoc deleted file mode 100644 index 4a6ce5e9e974..000000000000 --- a/modules/installation-creating-gcp-firewall-rules-vpc.adoc +++ /dev/null @@ -1,79 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_gcp/installing-gcp-user-infra.adoc -// * installing/installing_gcp/installing-gcp-user-infra-vpc.adoc - -ifeval::["{context}" == "installing-gcp-user-infra-vpc"] -:shared-vpc: -endif::[] - -:_content-type: PROCEDURE -[id="installation-creating-gcp-firewall-rules-vpc_{context}"] -= Creating firewall rules in GCP - -You must create firewall rules in Google Cloud Platform (GCP) for your -{product-title} cluster to use. One way to create these components is -to modify the provided Deployment Manager template. - -[NOTE] -==== -If you do not use the provided Deployment Manager template to create your GCP -infrastructure, you must review the provided information and manually create -the infrastructure. If your cluster does not initialize correctly, you might -have to contact Red Hat support with your installation logs. -==== - -.Prerequisites - -* Configure a GCP account. -* Generate the Ignition config files for your cluster. -* Create and configure a VPC and associated subnets in GCP. - -.Procedure - -. Copy the template from the -*Deployment Manager template for firewall rules* -section of this topic and save it as `03_firewall.py` on your computer. This -template describes the security groups that your cluster requires. - -. Create a `03_firewall.yaml` resource definition file: -+ -[source,terminal] ----- -$ cat <<EOF >03_firewall.yaml -imports: -- path: 03_firewall.py - -resources: -- name: cluster-firewall - type: 03_firewall.py - properties: - allowed_external_cidr: '0.0.0.0/0' <1> - infra_id: '${INFRA_ID}' <2> - cluster_network: '${CLUSTER_NETWORK}' <3> - network_cidr: '${NETWORK_CIDR}' <4> -EOF ----- -<1> `allowed_external_cidr` is the CIDR range that can access the cluster API and SSH to the bootstrap host. For an internal cluster, set this value to `${NETWORK_CIDR}`. -<2> `infra_id` is the `INFRA_ID` infrastructure name from the extraction step. -<3> `cluster_network` is the `selfLink` URL to the cluster network. -<4> `network_cidr` is the CIDR of the VPC network, for example `10.0.0.0/16`. - -. Create the deployment by using the `gcloud` CLI: -+ -ifdef::shared-vpc[] -[source,terminal] ----- -$ gcloud deployment-manager deployments create ${INFRA_ID}-firewall --config 03_firewall.yaml --project ${HOST_PROJECT} --account ${HOST_PROJECT_ACCOUNT} ----- -endif::shared-vpc[] -ifndef::shared-vpc[] -[source,terminal] ----- -$ gcloud deployment-manager deployments create ${INFRA_ID}-firewall --config 03_firewall.yaml ----- -endif::shared-vpc[] - -ifeval::["{context}" == "installing-gcp-user-infra-vpc"] -:!shared-vpc: -endif::[] diff --git a/modules/installation-creating-gcp-iam-shared-vpc.adoc b/modules/installation-creating-gcp-iam-shared-vpc.adoc deleted file mode 100644 index 4e6994b72a64..000000000000 --- a/modules/installation-creating-gcp-iam-shared-vpc.adoc +++ /dev/null @@ -1,148 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_gcp/installing-gcp-user-infra.adoc -// * installing/installing_gcp/installing-gcp-user-infra-vpc.adoc - -ifeval::["{context}" == "installing-gcp-user-infra-vpc"] -:shared-vpc: -endif::[] - -:_content-type: PROCEDURE -[id="installation-creating-gcp-iam-shared-vpc_{context}"] -= Creating IAM roles in GCP - -You must create IAM roles in Google Cloud Platform (GCP) for your -{product-title} cluster to use. One way to create these components is -to modify the provided Deployment Manager template. - -[NOTE] -==== -If you do not use the provided Deployment Manager template to create your GCP -infrastructure, you must review the provided information and manually create -the infrastructure. If your cluster does not initialize correctly, you might -have to contact Red Hat support with your installation logs. -==== - -.Prerequisites - -* Configure a GCP account. -* Generate the Ignition config files for your cluster. -* Create and configure a VPC and associated subnets in GCP. - -.Procedure - -. Copy the template from the -*Deployment Manager template for IAM roles* -section of this topic and save it as `03_iam.py` on your computer. This -template describes the IAM roles that your cluster requires. - -. Create a `03_iam.yaml` resource definition file: -+ -[source,terminal] ----- -$ cat <<EOF >03_iam.yaml -imports: -- path: 03_iam.py -resources: -- name: cluster-iam - type: 03_iam.py - properties: - infra_id: '${INFRA_ID}' <1> -EOF ----- -<1> `infra_id` is the `INFRA_ID` infrastructure name from the extraction step. - -. Create the deployment by using the `gcloud` CLI: -+ -[source,terminal] ----- -$ gcloud deployment-manager deployments create ${INFRA_ID}-iam --config 03_iam.yaml ----- - -. Export the variable for the master service account: -+ -[source,terminal] ----- -$ export MASTER_SERVICE_ACCOUNT=(`gcloud iam service-accounts list --filter "email~^${INFRA_ID}-m@${PROJECT_NAME}." --format json | jq -r '.[0].email'`) ----- - -. Export the variable for the worker service account: -+ -[source,terminal] ----- -$ export WORKER_SERVICE_ACCOUNT=(`gcloud iam service-accounts list --filter "email~^${INFRA_ID}-w@${PROJECT_NAME}." --format json | jq -r '.[0].email'`) ----- - -ifndef::shared-vpc[] -. Export the variable for the subnet that hosts the compute machines: -+ -[source,terminal] ----- -$ export COMPUTE_SUBNET=(`gcloud compute networks subnets describe ${INFRA_ID}-worker-subnet --region=${REGION} --format json | jq -r .selfLink`) ----- -endif::shared-vpc[] - -ifdef::shared-vpc[] -. Assign the permissions that the installation program requires to the service accounts for the subnets that host the control plane and compute subnets: - -.. Grant the `networkViewer` role of the project that hosts your shared VPC to the master service account: -+ -[source,terminal] ----- -$ gcloud --account=${HOST_PROJECT_ACCOUNT} --project=${HOST_PROJECT} projects add-iam-policy-binding ${HOST_PROJECT} --member "serviceAccount:${MASTER_SERVICE_ACCOUNT}" --role "roles/compute.networkViewer" ----- - -.. Grant the `networkUser` role to the master service account for the control plane subnet: -+ -[source,terminal] ----- -$ gcloud --account=${HOST_PROJECT_ACCOUNT} --project=${HOST_PROJECT} compute networks subnets add-iam-policy-binding "${HOST_PROJECT_CONTROL_SUBNET}" --member "serviceAccount:${MASTER_SERVICE_ACCOUNT}" --role "roles/compute.networkUser" --region ${REGION} ----- - -.. Grant the `networkUser` role to the worker service account for the control plane subnet: -+ -[source,terminal] ----- -$ gcloud --account=${HOST_PROJECT_ACCOUNT} --project=${HOST_PROJECT} compute networks subnets add-iam-policy-binding "${HOST_PROJECT_CONTROL_SUBNET}" --member "serviceAccount:${WORKER_SERVICE_ACCOUNT}" --role "roles/compute.networkUser" --region ${REGION} ----- - -.. Grant the `networkUser` role to the master service account for the compute subnet: -+ -[source,terminal] ----- -$ gcloud --account=${HOST_PROJECT_ACCOUNT} --project=${HOST_PROJECT} compute networks subnets add-iam-policy-binding "${HOST_PROJECT_COMPUTE_SUBNET}" --member "serviceAccount:${MASTER_SERVICE_ACCOUNT}" --role "roles/compute.networkUser" --region ${REGION} ----- - -.. Grant the `networkUser` role to the worker service account for the compute subnet: -+ -[source,terminal] ----- -$ gcloud --account=${HOST_PROJECT_ACCOUNT} --project=${HOST_PROJECT} compute networks subnets add-iam-policy-binding "${HOST_PROJECT_COMPUTE_SUBNET}" --member "serviceAccount:${WORKER_SERVICE_ACCOUNT}" --role "roles/compute.networkUser" --region ${REGION} ----- -endif::shared-vpc[] - -. The templates do not create the policy bindings due to limitations of Deployment -Manager, so you must create them manually: -+ -[source,terminal] ----- -$ gcloud projects add-iam-policy-binding ${PROJECT_NAME} --member "serviceAccount:${MASTER_SERVICE_ACCOUNT}" --role "roles/compute.instanceAdmin" -$ gcloud projects add-iam-policy-binding ${PROJECT_NAME} --member "serviceAccount:${MASTER_SERVICE_ACCOUNT}" --role "roles/compute.networkAdmin" -$ gcloud projects add-iam-policy-binding ${PROJECT_NAME} --member "serviceAccount:${MASTER_SERVICE_ACCOUNT}" --role "roles/compute.securityAdmin" -$ gcloud projects add-iam-policy-binding ${PROJECT_NAME} --member "serviceAccount:${MASTER_SERVICE_ACCOUNT}" --role "roles/iam.serviceAccountUser" -$ gcloud projects add-iam-policy-binding ${PROJECT_NAME} --member "serviceAccount:${MASTER_SERVICE_ACCOUNT}" --role "roles/storage.admin" - -$ gcloud projects add-iam-policy-binding ${PROJECT_NAME} --member "serviceAccount:${WORKER_SERVICE_ACCOUNT}" --role "roles/compute.viewer" -$ gcloud projects add-iam-policy-binding ${PROJECT_NAME} --member "serviceAccount:${WORKER_SERVICE_ACCOUNT}" --role "roles/storage.admin" ----- - -. Create a service account key and store it locally for later use: -+ -[source,terminal] ----- -$ gcloud iam service-accounts keys create service-account-key.json --iam-account=${MASTER_SERVICE_ACCOUNT} ----- - -ifeval::["{context}" == "installing-gcp-user-infra-vpc"] -:!shared-vpc: -endif::[] diff --git a/modules/installation-creating-gcp-lb.adoc b/modules/installation-creating-gcp-lb.adoc deleted file mode 100644 index c21fd4bca419..000000000000 --- a/modules/installation-creating-gcp-lb.adoc +++ /dev/null @@ -1,149 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_gcp/installing-gcp-user-infra.adoc -// * installing/installing_gcp/installing-gcp-user-infra-vpc.adoc - -ifeval::["{context}" == "installing-gcp-user-infra-vpc"] -:shared-vpc: -endif::[] - -:_content-type: PROCEDURE -[id="installation-creating-gcp-lb_{context}"] -= Creating load balancers in GCP - -You must configure load balancers in Google Cloud Platform (GCP) for your -{product-title} cluster to use. One way to create these components is -to modify the provided Deployment Manager template. - -[NOTE] -==== -If you do not use the provided Deployment Manager template to create your GCP -infrastructure, you must review the provided information and manually create -the infrastructure. If your cluster does not initialize correctly, you might -have to contact Red Hat support with your installation logs. -==== - -.Prerequisites - -* Configure a GCP account. -* Generate the Ignition config files for your cluster. -* Create and configure a VPC and associated subnets in GCP. - -.Procedure - -. Copy the template from the *Deployment Manager template for the internal load balancer* -section of this topic and save it as `02_lb_int.py` on your computer. This -template describes the internal load balancing objects that your cluster -requires. - -. For an external cluster, also copy the template from the *Deployment Manager template for the external load balancer* -section of this topic and save it as `02_lb_ext.py` on your computer. This -template describes the external load balancing objects that your cluster -requires. - -. Export the variables that the deployment template uses: - -.. Export the cluster network location: -+ -ifdef::shared-vpc[] -[source,terminal] ----- -$ export CLUSTER_NETWORK=(`gcloud compute networks describe ${HOST_PROJECT_NETWORK} --project ${HOST_PROJECT} --account ${HOST_PROJECT_ACCOUNT} --format json | jq -r .selfLink`) ----- -endif::shared-vpc[] -ifndef::shared-vpc[] -[source,terminal] ----- -$ export CLUSTER_NETWORK=(`gcloud compute networks describe ${INFRA_ID}-network --format json | jq -r .selfLink`) ----- -endif::shared-vpc[] - -.. Export the control plane subnet location: -+ -ifdef::shared-vpc[] -[source,terminal] ----- -$ export CONTROL_SUBNET=(`gcloud compute networks subnets describe ${HOST_PROJECT_CONTROL_SUBNET} --region=${REGION} --project ${HOST_PROJECT} --account ${HOST_PROJECT_ACCOUNT} --format json | jq -r .selfLink`) ----- -endif::shared-vpc[] -ifndef::shared-vpc[] -[source,terminal] ----- -$ export CONTROL_SUBNET=(`gcloud compute networks subnets describe ${INFRA_ID}-master-subnet --region=${REGION} --format json | jq -r .selfLink`) ----- -endif::shared-vpc[] - -.. Export the three zones that the cluster uses: -+ -[source,terminal] ----- -$ export ZONE_0=(`gcloud compute regions describe ${REGION} --format=json | jq -r .zones[0] | cut -d "/" -f9`) ----- -+ -[source,terminal] ----- -$ export ZONE_1=(`gcloud compute regions describe ${REGION} --format=json | jq -r .zones[1] | cut -d "/" -f9`) ----- -+ -[source,terminal] ----- -$ export ZONE_2=(`gcloud compute regions describe ${REGION} --format=json | jq -r .zones[2] | cut -d "/" -f9`) ----- - -. Create a `02_infra.yaml` resource definition file: -+ -[source,terminal] ----- -$ cat <<EOF >02_infra.yaml -imports: -- path: 02_lb_ext.py -- path: 02_lb_int.py <1> -resources: -- name: cluster-lb-ext <1> - type: 02_lb_ext.py - properties: - infra_id: '${INFRA_ID}' <2> - region: '${REGION}' <3> -- name: cluster-lb-int - type: 02_lb_int.py - properties: - cluster_network: '${CLUSTER_NETWORK}' - control_subnet: '${CONTROL_SUBNET}' <4> - infra_id: '${INFRA_ID}' - region: '${REGION}' - zones: <5> - - '${ZONE_0}' - - '${ZONE_1}' - - '${ZONE_2}' -EOF ----- -<1> Required only when deploying an external cluster. -<2> `infra_id` is the `INFRA_ID` infrastructure name from the extraction step. -<3> `region` is the region to deploy the cluster into, for example `us-central1`. -<4> `control_subnet` is the URI to the control subnet. -<5> `zones` are the zones to deploy the control plane instances into, like `us-east1-b`, `us-east1-c`, and `us-east1-d`. - -. Create the deployment by using the `gcloud` CLI: -+ -[source,terminal] ----- -$ gcloud deployment-manager deployments create ${INFRA_ID}-infra --config 02_infra.yaml ----- - -. Export the cluster IP address: -+ -[source,terminal] ----- -$ export CLUSTER_IP=(`gcloud compute addresses describe ${INFRA_ID}-cluster-ip --region=${REGION} --format json | jq -r .address`) ----- - -. For an external cluster, also export the cluster public IP address: -+ -[source,terminal] ----- -$ export CLUSTER_PUBLIC_IP=(`gcloud compute addresses describe ${INFRA_ID}-cluster-public-ip --region=${REGION} --format json | jq -r .address`) ----- - -ifeval::["{context}" == "installing-gcp-user-infra-vpc"] -:!shared-vpc: -endif::[] diff --git a/modules/installation-creating-gcp-private-dns.adoc b/modules/installation-creating-gcp-private-dns.adoc deleted file mode 100644 index 1bd2f2e060bb..000000000000 --- a/modules/installation-creating-gcp-private-dns.adoc +++ /dev/null @@ -1,124 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_gcp/installing-gcp-user-infra.adoc -// * installing/installing_gcp/installing-gcp-user-infra-vpc.adoc - -ifeval::["{context}" == "installing-gcp-user-infra-vpc"] -:shared-vpc: -endif::[] - -:_content-type: PROCEDURE -[id="installation-creating-gcp-private-dns_{context}"] -= Creating a private DNS zone in GCP - -You must configure a private DNS zone in Google Cloud Platform (GCP) for your -{product-title} cluster to use. One way to create this component is -to modify the provided Deployment Manager template. - -[NOTE] -==== -If you do not use the provided Deployment Manager template to create your GCP -infrastructure, you must review the provided information and manually create -the infrastructure. If your cluster does not initialize correctly, you might -have to contact Red Hat support with your installation logs. -==== - -.Prerequisites - -* Configure a GCP account. -* Generate the Ignition config files for your cluster. -* Create and configure a VPC and associated subnets in GCP. - -.Procedure - -. Copy the template from the *Deployment Manager template for the private DNS* -section of this topic and save it as `02_dns.py` on your computer. This -template describes the private DNS objects that your cluster -requires. - -. Create a `02_dns.yaml` resource definition file: -+ -[source,terminal] ----- -$ cat <<EOF >02_dns.yaml -imports: -- path: 02_dns.py - -resources: -- name: cluster-dns - type: 02_dns.py - properties: - infra_id: '${INFRA_ID}' <1> - cluster_domain: '${CLUSTER_NAME}.${BASE_DOMAIN}' <2> - cluster_network: '${CLUSTER_NETWORK}' <3> -EOF ----- -<1> `infra_id` is the `INFRA_ID` infrastructure name from the extraction step. -<2> `cluster_domain` is the domain for the cluster, for example `openshift.example.com`. -<3> `cluster_network` is the `selfLink` URL to the cluster network. - -. Create the deployment by using the `gcloud` CLI: -+ -ifdef::shared-vpc[] -[source,terminal] ----- -$ gcloud deployment-manager deployments create ${INFRA_ID}-dns --config 02_dns.yaml --project ${HOST_PROJECT} --account ${HOST_PROJECT_ACCOUNT} ----- -endif::shared-vpc[] -ifndef::shared-vpc[] -[source,terminal] ----- -$ gcloud deployment-manager deployments create ${INFRA_ID}-dns --config 02_dns.yaml ----- -endif::shared-vpc[] - -. The templates do not create DNS entries due to limitations of Deployment -Manager, so you must create them manually: - -.. Add the internal DNS entries: -+ -ifdef::shared-vpc[] -[source,terminal] ----- -$ if [ -f transaction.yaml ]; then rm transaction.yaml; fi -$ gcloud dns record-sets transaction start --zone ${INFRA_ID}-private-zone --project ${HOST_PROJECT} --account ${HOST_PROJECT_ACCOUNT} -$ gcloud dns record-sets transaction add ${CLUSTER_IP} --name api.${CLUSTER_NAME}.${BASE_DOMAIN}. --ttl 60 --type A --zone ${INFRA_ID}-private-zone --project ${HOST_PROJECT} --account ${HOST_PROJECT_ACCOUNT} -$ gcloud dns record-sets transaction add ${CLUSTER_IP} --name api-int.${CLUSTER_NAME}.${BASE_DOMAIN}. --ttl 60 --type A --zone ${INFRA_ID}-private-zone --project ${HOST_PROJECT} --account ${HOST_PROJECT_ACCOUNT} -$ gcloud dns record-sets transaction execute --zone ${INFRA_ID}-private-zone --project ${HOST_PROJECT} --account ${HOST_PROJECT_ACCOUNT} ----- -endif::shared-vpc[] -ifndef::shared-vpc[] -[source,terminal] ----- -$ if [ -f transaction.yaml ]; then rm transaction.yaml; fi -$ gcloud dns record-sets transaction start --zone ${INFRA_ID}-private-zone -$ gcloud dns record-sets transaction add ${CLUSTER_IP} --name api.${CLUSTER_NAME}.${BASE_DOMAIN}. --ttl 60 --type A --zone ${INFRA_ID}-private-zone -$ gcloud dns record-sets transaction add ${CLUSTER_IP} --name api-int.${CLUSTER_NAME}.${BASE_DOMAIN}. --ttl 60 --type A --zone ${INFRA_ID}-private-zone -$ gcloud dns record-sets transaction execute --zone ${INFRA_ID}-private-zone ----- -endif::shared-vpc[] - -.. For an external cluster, also add the external DNS entries: -+ -ifdef::shared-vpc[] -[source,terminal] ----- -$ if [ -f transaction.yaml ]; then rm transaction.yaml; fi -$ gcloud --account=${HOST_PROJECT_ACCOUNT} --project=${HOST_PROJECT} dns record-sets transaction start --zone ${BASE_DOMAIN_ZONE_NAME} -$ gcloud --account=${HOST_PROJECT_ACCOUNT} --project=${HOST_PROJECT} dns record-sets transaction add ${CLUSTER_PUBLIC_IP} --name api.${CLUSTER_NAME}.${BASE_DOMAIN}. --ttl 60 --type A --zone ${BASE_DOMAIN_ZONE_NAME} -$ gcloud --account=${HOST_PROJECT_ACCOUNT} --project=${HOST_PROJECT} dns record-sets transaction execute --zone ${BASE_DOMAIN_ZONE_NAME} ----- -endif::shared-vpc[] -ifndef::shared-vpc[] -[source,terminal] ----- -$ if [ -f transaction.yaml ]; then rm transaction.yaml; fi -$ gcloud dns record-sets transaction start --zone ${BASE_DOMAIN_ZONE_NAME} -$ gcloud dns record-sets transaction add ${CLUSTER_PUBLIC_IP} --name api.${CLUSTER_NAME}.${BASE_DOMAIN}. --ttl 60 --type A --zone ${BASE_DOMAIN_ZONE_NAME} -$ gcloud dns record-sets transaction execute --zone ${BASE_DOMAIN_ZONE_NAME} ----- -endif::shared-vpc[] - -ifeval::["{context}" == "installing-gcp-user-infra-vpc"] -:!shared-vpc: -endif::[] diff --git a/modules/installation-creating-gcp-security.adoc b/modules/installation-creating-gcp-security.adoc deleted file mode 100644 index 06e94492c178..000000000000 --- a/modules/installation-creating-gcp-security.adoc +++ /dev/null @@ -1,111 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_gcp/installing-gcp-user-infra.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp.adoc - -[id="installation-creating-gcp-security_{context}"] -= Creating firewall rules and IAM roles in GCP - -You must create security groups and roles in Google Cloud Platform (GCP) for your -{product-title} cluster to use. One way to create these components is -to modify the provided Deployment Manager template. - -[NOTE] -==== -If you do not use the provided Deployment Manager template to create your GCP -infrastructure, you must review the provided information and manually create -the infrastructure. If your cluster does not initialize correctly, you might -have to contact Red Hat support with your installation logs. -==== - -.Prerequisites - -* Configure a GCP account. -* Generate the Ignition config files for your cluster. -* Create and configure a VPC and associated subnets in GCP. - -.Procedure - -. Copy the template from the *Deployment Manager template for firewall rules and IAM roles* -section of this topic and save it as `03_security.py` on your computer. This -template describes the security groups and roles that your cluster requires. - -. Export the following variables required by the resource definition: -+ -[source,terminal] ----- -$ export MASTER_NAT_IP=`gcloud compute addresses describe ${INFRA_ID}-master-nat-ip --region ${REGION} --format json | jq -r .address` -$ export WORKER_NAT_IP=`gcloud compute addresses describe ${INFRA_ID}-worker-nat-ip --region ${REGION} --format json | jq -r .address` ----- - -. Create a `03_security.yaml` resource definition file: -+ -[source,terminal] ----- -$ cat <<EOF >03_security.yaml -imports: -- path: 03_security.py - -resources: -- name: cluster-security - type: 03_security.py - properties: - allowed_external_cidr: '0.0.0.0/0' <1> - infra_id: '${INFRA_ID}' <2> - region: '${REGION}' <3> - cluster_network: '${CLUSTER_NETWORK}' <4> - network_cidr: '${NETWORK_CIDR}' <5> - master_nat_ip: '${MASTER_NAT_IP}' <6> - worker_nat_ip: '${WORKER_NAT_IP}' <7> -EOF ----- -<1> `allowed_external_cidr` is the CIDR range that can access the cluster API and SSH to the bootstrap host. For an internal cluster, set this value to `${NETWORK_CIDR}`. -<2> `infra_id` is the `INFRA_ID` infrastructure name from the extraction step. -<3> `region` is the region to deploy the cluster into, for example `us-central1`. -<4> `cluster_network` is the `selfLink` URL to the cluster network. -<5> `network_cidr` is the CIDR of the VPC network, for example `10.0.0.0/16`. -<6> `master_nat_ip` is the IP address of the master NAT, for example `34.94.100.1`. -<7> `worker_nat_ip` is the IP address of the worker NAT, for example `34.94.200.1`. - -. Create the deployment by using the `gcloud` CLI: -+ -[source,terminal] ----- -$ gcloud deployment-manager deployments create ${INFRA_ID}-security --config 03_security.yaml ----- - -. Export the variable for the master service account: -+ -[source,terminal] ----- -$ export MASTER_SERVICE_ACCOUNT=`gcloud iam service-accounts list --filter "email~^${INFRA_ID}-m@${PROJECT_NAME}." --format json | jq -r '.[0].email'` ----- - -. Export the variable for the worker service account: -+ -[source,terminal] ----- -$ export WORKER_SERVICE_ACCOUNT=`gcloud iam service-accounts list --filter "email~^${INFRA_ID}-w@${PROJECT_NAME}." --format json | jq -r '.[0].email'` ----- - -. The templates do not create the policy bindings due to limitations of Deployment -Manager, so you must create them manually: -+ -[source,terminal] ----- -$ gcloud projects add-iam-policy-binding ${PROJECT_NAME} --member "serviceAccount:${MASTER_SERVICE_ACCOUNT}" --role "roles/compute.instanceAdmin" -$ gcloud projects add-iam-policy-binding ${PROJECT_NAME} --member "serviceAccount:${MASTER_SERVICE_ACCOUNT}" --role "roles/compute.networkAdmin" -$ gcloud projects add-iam-policy-binding ${PROJECT_NAME} --member "serviceAccount:${MASTER_SERVICE_ACCOUNT}" --role "roles/compute.securityAdmin" -$ gcloud projects add-iam-policy-binding ${PROJECT_NAME} --member "serviceAccount:${MASTER_SERVICE_ACCOUNT}" --role "roles/iam.serviceAccountUser" -$ gcloud projects add-iam-policy-binding ${PROJECT_NAME} --member "serviceAccount:${MASTER_SERVICE_ACCOUNT}" --role "roles/storage.admin" - -$ gcloud projects add-iam-policy-binding ${PROJECT_NAME} --member "serviceAccount:${WORKER_SERVICE_ACCOUNT}" --role "roles/compute.viewer" -$ gcloud projects add-iam-policy-binding ${PROJECT_NAME} --member "serviceAccount:${WORKER_SERVICE_ACCOUNT}" --role "roles/storage.admin" ----- - -. Create a service account key and store it locally for later use: -+ -[source,terminal] ----- -$ gcloud iam service-accounts keys create service-account-key.json --iam-account=${MASTER_SERVICE_ACCOUNT} ----- diff --git a/modules/installation-creating-gcp-shared-vpc-cluster-wide-firewall-rules.adoc b/modules/installation-creating-gcp-shared-vpc-cluster-wide-firewall-rules.adoc deleted file mode 100644 index b794bf7f9272..000000000000 --- a/modules/installation-creating-gcp-shared-vpc-cluster-wide-firewall-rules.adoc +++ /dev/null @@ -1,47 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_gcp/installing-gcp-user-infra-vpc.adoc - -:_content-type: PROCEDURE -[id="installation-creating-gcp-shared-vpc-cluster-wide-firewall-rules_{context}"] -= Creating cluster-wide firewall rules for a shared VPC in GCP - -You can create cluster-wide firewall rules to allow the access that the {product-title} cluster requires. - -[WARNING] -==== -If you do not choose to create firewall rules based on cluster events, you must create cluster-wide firewall rules. -==== - -.Prerequisites - -* You exported the variables that the Deployment Manager templates require to deploy your cluster. -* You created the networking and load balancing components in GCP that your cluster requires. - -.Procedure - -. Add a single firewall rule to allow the Google Cloud Engine health checks to access all of the services. This rule enables the ingress load balancers to determine the health status of their instances. -+ -[source,terminal] ----- -$ gcloud compute firewall-rules create --allow='tcp:30000-32767,udp:30000-32767' --network="${CLUSTER_NETWORK}" --source-ranges='130.211.0.0/22,35.191.0.0/16,209.85.152.0/22,209.85.204.0/22' --target-tags="${INFRA_ID}-master,${INFRA_ID}-worker" ${INFRA_ID}-ingress-hc --account=${HOST_PROJECT_ACCOUNT} --project=${HOST_PROJECT} ----- - -. Add a single firewall rule to allow access to all cluster services: -+ --- -** For an external cluster: -+ -[source,terminal] ----- -$ gcloud compute firewall-rules create --allow='tcp:80,tcp:443' --network="${CLUSTER_NETWORK}" --source-ranges="0.0.0.0/0" --target-tags="${INFRA_ID}-master,${INFRA_ID}-worker" ${INFRA_ID}-ingress --account=${HOST_PROJECT_ACCOUNT} --project=${HOST_PROJECT} ----- -** For a private cluster: -+ -[source,terminal] ----- -$ gcloud compute firewall-rules create --allow='tcp:80,tcp:443' --network="${CLUSTER_NETWORK}" --source-ranges=${NETWORK_CIDR} --target-tags="${INFRA_ID}-master,${INFRA_ID}-worker" ${INFRA_ID}-ingress --account=${HOST_PROJECT_ACCOUNT} --project=${HOST_PROJECT} ----- --- -+ -Because this rule only allows traffic on TCP ports `80` and `443`, ensure that you add all the ports that your services use. diff --git a/modules/installation-creating-gcp-shared-vpc-ingress-firewall-rules.adoc b/modules/installation-creating-gcp-shared-vpc-ingress-firewall-rules.adoc deleted file mode 100644 index 3861e982e9bb..000000000000 --- a/modules/installation-creating-gcp-shared-vpc-ingress-firewall-rules.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_gcp/installing-gcp-user-infra-vpc.adoc - -[id="installation-creating-gcp-shared-vpc-ingress-firewall-rules_{context}"] -= Creating ingress firewall rules for a shared VPC in GCP - -You must create ingress firewall rules to allow the access that the {product-title} cluster requires. - -.Prerequisites - -* You exported the variables that the Deployment Manager templates require to deploy your cluster. -* You created the networking and load balancing components in GCP that your cluster requires. - -.Procedure - -* Add ingress firewall rules: -** For an external cluster: -+ ----- -$ gcloud --account=${HOST_PROJECT_ACCOUNT} --project=${HOST_PROJECT} compute firewall-rules create --allow='tc p:30000-32767,udp:30000-32767' --network="${CLUSTER_NETWORK}" --source-ranges='130.211.0.0/22,35.191.0.0/16, 209.85.152.0/22,209.85.204.0/22' --target-tags="${INFRA_ID}-master,${INFRA_ID}-worker" ${INFRA_ID}-ingress-h c -417 -418 gcloud --account=${HOST_PROJECT_ACCOUNT} --project=${HOST_PROJECT} compute firewall-rules create --allow='tc p:80,tcp:443' --network="${CLUSTER_NETWORK}" --source-ranges="0.0.0.0/0" --target-tags="${INFRA_ID}-master,$ {INFRA_ID}-worker" ${INFRA_ID}-ingress ----- - -** For an internal cluster: -+ ----- -$ gcloud compute firewall-rules create --allow='tcp:30000-32767,udp:30000-32767' --network="${CLUSTER_NETWORK }" --source-ranges='130.211.0.0/22,35.191.0.0/16,209.85.152.0/22,209.85.204.0/22' --target-tags="${INFRA_ID} -master,${INFRA_ID}-worker" ${INFRA_ID}-ingress-hc -383 -gcloud compute firewall-rules create --allow='tcp:80,tcp:443' --network="${CLUSTER_NETWORK}" --source-ranges="${NETWORK_CIDR}" --target-tags="${INFRA_ID}-master,${INFRA_ID}-worker" ${INFRA_ID}-ingress ----- diff --git a/modules/installation-creating-gcp-vpc.adoc b/modules/installation-creating-gcp-vpc.adoc deleted file mode 100644 index 3e40ab12d4a9..000000000000 --- a/modules/installation-creating-gcp-vpc.adoc +++ /dev/null @@ -1,152 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_gcp/installing-gcp-user-infra.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp.adoc -// * installing/installing_gcp/installing-gcp-user-infra-vpc.adoc - -ifeval::["{context}" == "installing-gcp-user-infra-vpc"] -:shared-vpc: -endif::[] - -:_content-type: PROCEDURE -[id="installation-creating-gcp-vpc_{context}"] -= Creating a VPC in GCP - -You must create a VPC in Google Cloud Platform (GCP) for your {product-title} -cluster to use. You can customize the VPC to meet your requirements. One way to -create the VPC is to modify the provided Deployment Manager template. - -[NOTE] -==== -If you do not use the provided Deployment Manager template to create your GCP -infrastructure, you must review the provided information and manually create -the infrastructure. If your cluster does not initialize correctly, you might -have to contact Red Hat support with your installation logs. -==== - -.Prerequisites - -* Configure a GCP account. -ifndef::shared-vpc[] -* Generate the Ignition config files for your cluster. -endif::shared-vpc[] - -.Procedure - -. Copy the template from the *Deployment Manager template for the VPC* -section of this topic and save it as `01_vpc.py` on your computer. This template -describes the VPC that your cluster requires. - -ifdef::shared-vpc[] -. Export the following variables required by the resource definition: - -.. Export the control plane CIDR: -+ -[source,terminal] ----- -$ export MASTER_SUBNET_CIDR='10.0.0.0/17' ----- - -.. Export the compute CIDR: -+ -[source,terminal] ----- -$ export WORKER_SUBNET_CIDR='10.0.128.0/17' ----- - -.. Export the region to deploy the VPC network and cluster to: -+ -[source,terminal] ----- -$ export REGION='<region>' ----- - -. Export the variable for the ID of the project that hosts the shared VPC: -+ -[source,terminal] ----- -$ export HOST_PROJECT=<host_project> ----- - -. Export the variable for the email of the service account that belongs to host project: -+ -[source,terminal] ----- -$ export HOST_PROJECT_ACCOUNT=<host_service_account_email> ----- -endif::shared-vpc[] - -. Create a `01_vpc.yaml` resource definition file: -+ -[source,terminal] ----- -$ cat <<EOF >01_vpc.yaml -imports: -- path: 01_vpc.py - -resources: -- name: cluster-vpc - type: 01_vpc.py - properties: -ifndef::shared-vpc[] - infra_id: '${INFRA_ID}' <1> -endif::shared-vpc[] -ifdef::shared-vpc[] - infra_id: '<prefix>' <1> -endif::shared-vpc[] - region: '${REGION}' <2> - master_subnet_cidr: '${MASTER_SUBNET_CIDR}' <3> - worker_subnet_cidr: '${WORKER_SUBNET_CIDR}' <4> -EOF ----- -ifndef::shared-vpc[] -<1> `infra_id` is the `INFRA_ID` infrastructure name from the extraction step. -endif::shared-vpc[] -ifdef::shared-vpc[] -<1> `infra_id` is the prefix of the network name. -endif::shared-vpc[] -<2> `region` is the region to deploy the cluster into, for example `us-central1`. -<3> `master_subnet_cidr` is the CIDR for the master subnet, for example `10.0.0.0/17`. -<4> `worker_subnet_cidr` is the CIDR for the worker subnet, for example `10.0.128.0/17`. - -. Create the deployment by using the `gcloud` CLI: -+ -ifndef::shared-vpc[] -[source,terminal] ----- -$ gcloud deployment-manager deployments create ${INFRA_ID}-vpc --config 01_vpc.yaml ----- -endif::shared-vpc[] -ifdef::shared-vpc[] -[source,terminal] ----- -$ gcloud deployment-manager deployments create <vpc_deployment_name> --config 01_vpc.yaml --project ${HOST_PROJECT} --account ${HOST_PROJECT_ACCOUNT} <1> ----- -<1> For `<vpc_deployment_name>`, specify the name of the VPC to deploy. - -. Export the VPC variable that other components require: -.. Export the name of the host project network: -+ -[source,terminal] ----- -$ export HOST_PROJECT_NETWORK=<vpc_network> ----- -.. Export the name of the host project control plane subnet: -+ -[source,terminal] ----- -$ export HOST_PROJECT_CONTROL_SUBNET=<control_plane_subnet> ----- -.. Export the name of the host project compute subnet: -+ -[source,terminal] ----- -$ export HOST_PROJECT_COMPUTE_SUBNET=<compute_subnet> ----- - -. Set up the shared VPC. See link:https://cloud.google.com/vpc/docs/provisioning-shared-vpc#setting_up[Setting up Shared VPC] in the GCP documentation. -endif::shared-vpc[] - -ifeval::["{context}" == "installing-gcp-user-infra-vpc"] -:!shared-vpc: -endif::[] diff --git a/modules/installation-creating-gcp-worker.adoc b/modules/installation-creating-gcp-worker.adoc deleted file mode 100644 index 8ee13f0c22ef..000000000000 --- a/modules/installation-creating-gcp-worker.adoc +++ /dev/null @@ -1,153 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_gcp/installing-gcp-user-infra.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp.adoc - -ifeval::["{context}" == "installing-gcp-user-infra"] -:three-node-cluster: -endif::[] -ifeval::["{context}" == "installing-gcp-user-infra-vpc"] -:shared-vpc: -endif::[] - -:_content-type: PROCEDURE -[id="installation-creating-gcp-worker_{context}"] -= Creating additional worker machines in GCP - -You can create worker machines in Google Cloud Platform (GCP) for your cluster -to use by launching individual instances discretely or by automated processes -outside the cluster, such as auto scaling groups. You can also take advantage of -the built-in cluster scaling mechanisms and the machine API in {product-title}. - -ifdef::three-node-cluster[] -[NOTE] -==== -If you are installing a three-node cluster, skip this step. A three-node cluster consists of three control plane machines, which also act as compute machines. -==== -endif::three-node-cluster[] - -In this example, you manually launch one instance by using the Deployment -Manager template. Additional instances can be launched by including additional -resources of type `06_worker.py` in the file. - -[NOTE] -==== -If you do not use the provided Deployment Manager template to create your worker -machines, you must review the provided information and manually create -the infrastructure. If your cluster does not initialize correctly, you might -have to contact Red Hat support with your installation logs. -==== - -.Prerequisites - -* Configure a GCP account. -* Generate the Ignition config files for your cluster. -* Create and configure a VPC and associated subnets in GCP. -* Create and configure networking and load balancers in GCP. -* Create control plane and compute roles. -* Create the bootstrap machine. -* Create the control plane machines. - -.Procedure - -. Copy the template from the *Deployment Manager template for worker machines* -section of this topic and save it as `06_worker.py` on your computer. This -template describes the worker machines that your cluster requires. - -. Export the variables that the resource definition uses. -.. Export the subnet that hosts the compute machines: -+ -ifndef::shared-vpc[] -[source,terminal] ----- -$ export COMPUTE_SUBNET=(`gcloud compute networks subnets describe ${INFRA_ID}-worker-subnet --region=${REGION} --format json | jq -r .selfLink`) ----- -endif::shared-vpc[] -ifdef::shared-vpc[] -[source,terminal] ----- -$ export COMPUTE_SUBNET=(`gcloud compute networks subnets describe ${HOST_PROJECT_COMPUTE_SUBNET} --region=${REGION} --project ${HOST_PROJECT} --account ${HOST_PROJECT_ACCOUNT} --format json | jq -r .selfLink`) ----- -endif::shared-vpc[] - -.. Export the email address for your service account: -+ -[source,terminal] ----- -$ export WORKER_SERVICE_ACCOUNT=(`gcloud iam service-accounts list --filter "email~^${INFRA_ID}-w@${PROJECT_NAME}." --format json | jq -r '.[0].email'`) ----- - -.. Export the location of the compute machine Ignition config file: -+ -[source,terminal] ----- -$ export WORKER_IGNITION=`cat <installation_directory>/worker.ign` ----- - -. Create a `06_worker.yaml` resource definition file: -+ -[source,terminal] ----- -$ cat <<EOF >06_worker.yaml -imports: -- path: 06_worker.py - -resources: -- name: 'worker-0' <1> - type: 06_worker.py - properties: - infra_id: '${INFRA_ID}' <2> - zone: '${ZONE_0}' <3> - compute_subnet: '${COMPUTE_SUBNET}' <4> - image: '${CLUSTER_IMAGE}' <5> - machine_type: 'n1-standard-4' <6> - root_volume_size: '128' - service_account_email: '${WORKER_SERVICE_ACCOUNT}' <7> - ignition: '${WORKER_IGNITION}' <8> -- name: 'worker-1' - type: 06_worker.py - properties: - infra_id: '${INFRA_ID}' <2> - zone: '${ZONE_1}' <3> - compute_subnet: '${COMPUTE_SUBNET}' <4> - image: '${CLUSTER_IMAGE}' <5> - machine_type: 'n1-standard-4' <6> - root_volume_size: '128' - service_account_email: '${WORKER_SERVICE_ACCOUNT}' <7> - ignition: '${WORKER_IGNITION}' <8> -EOF ----- -<1> `name` is the name of the worker machine, for example `worker-0`. -<2> `infra_id` is the `INFRA_ID` infrastructure name from the extraction step. -<3> `zone` is the zone to deploy the worker machine into, for example `us-central1-a`. -<4> `compute_subnet` is the `selfLink` URL to the compute subnet. -<5> `image` is the `selfLink` URL to the {op-system} image. ^1^ -<6> `machine_type` is the machine type of the instance, for example `n1-standard-4`. -<7> `service_account_email` is the email address for the worker service account that you created. -<8> `ignition` is the contents of the `worker.ign` file. - -. Optional: If you want to launch additional instances, include additional -resources of type `06_worker.py` in your `06_worker.yaml` resource definition -file. - -. Create the deployment by using the `gcloud` CLI: -+ -[source,terminal] ----- -$ gcloud deployment-manager deployments create ${INFRA_ID}-worker --config 06_worker.yaml ----- - -[.small] --- -1. To use a GCP Marketplace image, specify the offer to use: -** {product-title}: `\https://www.googleapis.com/compute/v1/projects/redhat-marketplace-public/global/images/redhat-coreos-ocp-413-x86-64-202305021736` -** {opp}: `\https://www.googleapis.com/compute/v1/projects/redhat-marketplace-public/global/images/redhat-coreos-opp-413-x86-64-202305021736` -** {oke}: `\https://www.googleapis.com/compute/v1/projects/redhat-marketplace-public/global/images/redhat-coreos-oke-413-x86-64-202305021736` --- - -ifeval::["{context}" == "installing-gcp-user-infra"] -:!three-node-cluster: -endif::[] -ifeval::["{context}" == "installing-gcp-user-infra-vpc"] -:!shared-vpc: -endif::[] diff --git a/modules/installation-creating-image-restricted.adoc b/modules/installation-creating-image-restricted.adoc deleted file mode 100644 index 42b9ac663ef3..000000000000 --- a/modules/installation-creating-image-restricted.adoc +++ /dev/null @@ -1,87 +0,0 @@ -//Module included in the following assemblies: -// -// * installing/installing_openstack/installing-openstack-installer-restricted.adoc -// * installing/installing_vsphere/installing-restricted-networks-installer-provisioned-vsphere.adoc - -ifeval::["{context}" == "installing-openstack-installer-restricted"] -:osp: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-installer-provisioned-vsphere"] -:vsphere: -endif::[] - -:_content-type: PROCEDURE -[id="installation-creating-image-restricted_{context}"] -= Creating the {op-system} image for restricted network installations - -Download the {op-system-first} image to install {product-title} on a restricted network -ifdef::osp[{rh-openstack-first}] -ifdef::vsphere[VMware vSphere] -environment. - -.Prerequisites - -* Obtain the {product-title} installation program. For a restricted network installation, the program is on your mirror registry host. - -.Procedure - -. Log in to the Red Hat Customer Portal's https://access.redhat.com/downloads/content/290[Product Downloads page]. - -. Under *Version*, select the most recent release of {product-title} {product-version} for RHEL 8. -+ -[IMPORTANT] -==== -The {op-system} images might not change with every release of {product-title}. -You must download images with the highest version that is less than or equal to -the {product-title} version that you install. Use the image versions that match -your {product-title} version if they are available. -==== - -ifdef::osp[] -. Download the *{op-system-first} - OpenStack Image (QCOW)* image. -endif::osp[] -ifdef::vsphere[] -. Download the *{op-system-first} - vSphere* image. -endif::vsphere[] - -ifdef::osp[] -. Decompress the image. -+ -[NOTE] -==== -You must decompress the image before the cluster can use it. The name of the downloaded file might not contain a compression extension, like `.gz` or `.tgz`. To find out if or how the file is compressed, in a command line, enter: - ----- -$ file <name_of_downloaded_file> ----- - -==== - -. Upload the image that you decompressed to a location that is accessible from the bastion server, like Glance. For example: -+ ----- -$ openstack image create --file rhcos-44.81.202003110027-0-openstack.x86_64.qcow2 --disk-format qcow2 rhcos-${RHCOS_VERSION} ----- -+ -[IMPORTANT] -==== -Depending on your {rh-openstack} environment, you might be able to upload the image in either link:https://access.redhat.com/documentation/en-us/red_hat_openstack_platform/15/html/instances_and_images_guide/index[`.raw` or `.qcow2` formats]. If you use Ceph, you must use the `.raw` format. -==== -+ -[WARNING] -==== -If the installation program finds multiple images with the same name, it chooses one of them at random. To avoid this behavior, create unique names for resources in {rh-openstack}. -==== -endif::osp[] -ifdef::vsphere[] -. Upload the image you downloaded to a location that is accessible from the bastion server. -endif::vsphere[] - -The image is now available for a restricted installation. Note the image name or location for use in {product-title} deployment. - -ifeval::["{context}" == "installing-openstack-installer-restricted"] -:!osp: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-installer-provisioned-vsphere"] -:!vsphere: -endif::[] diff --git a/modules/installation-creating-mirror-registry.adoc b/modules/installation-creating-mirror-registry.adoc deleted file mode 100644 index 707f373d01cb..000000000000 --- a/modules/installation-creating-mirror-registry.adoc +++ /dev/null @@ -1,175 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/install_config/installing-restricted-networks-preparations.adoc -// * openshift_images/samples-operator-alt-registry.adoc - -ifeval::["{context}" == "installing-mirroring-installation-images"] -:restricted: -endif::[] - -[id="installation-creating-mirror-registry_{context}"] -= Creating a mirror registry - -Create a registry to host the mirrored content that you require for installing -{product-title}. - -[IMPORTANT] -==== -Deploying a disconnected registry host based on the `docker.io/library/registry:2` API for {product-title} is not officially supported by Red Hat. You can create a mirror host based on the `docker.io/library/registry:2` API with the following unsupported procedure. -==== - -ifdef::restricted[] -For installation in a restricted network, you can place the mirror -registry on a host that can be accessed from both the your network and -the internet. If you do not have access to such a host, use the -method that best fits your restrictions to bring the contents of the -mirror registry into your restricted network. -endif::restricted[] - -[NOTE] -==== -The following procedure creates a simple registry that stores data in the -`/opt/registry` folder and runs in a `podman` container. You can use a different -registry solution, such as -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/manage_red_hat_quay/index#repo-mirroring-in-red-hat-quay[Red Hat Quay]. -Review the following procedure to ensure that your registry functions -correctly. -==== - -.Prerequisites - -* You have a Red Hat Enterprise Linux (RHEL) server on your network to use -as the registry host. -* The registry host can access the internet. - -.Procedure - -ifdef::restricted[] -On the mirror host, take the following actions: -endif::restricted[] - -. Install the required packages: -+ -[source,terminal] ----- -# yum -y install podman httpd-tools ----- -+ -The `podman` package provides the container package that you run the registry -in. The `httpd-tools` package provides the `htpasswd` utility, which -you use to create users. - -. Create folders for the registry: -+ -[source,terminal] ----- -# mkdir -p /opt/registry/{auth,certs,data} ----- -+ -These folders are mounted inside the registry container. - -. Provide a certificate for the registry. If you do not have an existing, trusted -certificate authority, you can generate a self-signed certificate: -+ -[source,terminal] ----- -$ cd /opt/registry/certs ----- -+ -[source,terminal] ----- -# openssl req -newkey rsa:4096 -nodes -sha256 -keyout domain.key -x509 -days 365 -out domain.crt ----- -+ -At the prompts, provide the required values for the certificate: -[horizontal] -Country Name (2 letter code):: Specify the two-letter ISO country code for your location. -See the link:https://www.iso.org/iso-3166-country-codes.html[ISO 3166 country codes] -standard. -State or Province Name (full name):: Enter the full name of your state or province. -Locality Name (eg, city):: Enter the name of your city. -Organization Name (eg, company):: Enter your company name. -Organizational Unit Name (eg, section):: Enter your department name. -Common Name (eg, your name or your server's hostname):: Enter the hostname for -the registry host. Ensure that your hostname is in DNS and that it resolves to -the expected IP address. -Email Address:: Enter your email address. -For more information, see the -link:https://www.openssl.org/docs/man1.1.1/man1/req.html[req] description in the -OpenSSL documentation. - -. Generate a user name and a password for your registry that uses the `bcrpt` format: -+ -[source,terminal] ----- -# htpasswd -bBc /opt/registry/auth/htpasswd <user_name> <password> <1> ----- -<1> Replace `<user_name>` and `<password>` with a user name and a password. - -. Create the `mirror-registry` container to host your registry: -+ -[source,terminal] ----- -# podman run --name mirror-registry -p <local_registry_host_port>:5000 \ <1> - -v /opt/registry/data:/var/lib/registry:z \ - -v /opt/registry/auth:/auth:z \ - -e "REGISTRY_AUTH=htpasswd" \ - -e "REGISTRY_AUTH_HTPASSWD_REALM=Registry Realm" \ - -e REGISTRY_AUTH_HTPASSWD_PATH=/auth/htpasswd \ - -v /opt/registry/certs:/certs:z \ - -e REGISTRY_HTTP_TLS_CERTIFICATE=/certs/domain.crt \ - -e REGISTRY_HTTP_TLS_KEY=/certs/domain.key \ - -e REGISTRY_COMPATIBILITY_SCHEMA1_ENABLED=true \ - -d docker.io/library/registry:2 ----- -<1> For `<local_registry_host_port>`, specify the port that your mirror registry -uses to serve content. - -. Open the required ports for your registry: -+ -[source,terminal] ----- -# firewall-cmd --add-port=<local_registry_host_port>/tcp --zone=internal --permanent <1> -# firewall-cmd --add-port=<local_registry_host_port>/tcp --zone=public --permanent <1> -# firewall-cmd --reload ----- -<1> For `<local_registry_host_port>`, specify the port that your mirror registry -uses to serve content. - -. Add the self-signed certificate to your list of trusted certificates: -+ -[source,terminal] ----- -# cp /opt/registry/certs/domain.crt /etc/pki/ca-trust/source/anchors/ -# update-ca-trust ----- -+ -You must trust your certificate to log in to your registry during the mirror process. - -. Confirm that the registry is available: -+ -[source,terminal] ----- -$ curl -u <user_name>:<password> -k https://<local_registry_host_name>:<local_registry_host_port>/v2/_catalog <1> - -{"repositories":[]} ----- -<1> For `<user_name>` and `<password>`, specify the user name and password -for your registry. For `<local_registry_host_name>`, specify the registry domain name -that you specified in your certificate, such as `registry.example.com`. For -`<local_registry_host_port>`, specify the port that your mirror registry uses to -serve content. -+ -If the command output displays an empty repository, your registry is available. - -//// -. To stop the registry:: -+ ----- -# podman stop mirror-registry ----- -//// - -ifeval::["{context}" == "installing-mirroring-installation-images"] -:!restricted: -endif::[] diff --git a/modules/installation-custom-alibaba-vpc.adoc b/modules/installation-custom-alibaba-vpc.adoc deleted file mode 100644 index 8549403209ad..000000000000 --- a/modules/installation-custom-alibaba-vpc.adoc +++ /dev/null @@ -1,52 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_alibaba/installing-alibaba-vpc.adoc - -:_content-type: CONCEPT -[id="installation-custom-alibaba-vpc_{context}"] -= Using a custom VPC - -In {product-title} {product-version}, you can deploy a cluster into existing subnets in an existing Virtual Private Cloud (VPC) in the Alibaba Cloud Platform. By deploying {product-title} into an existing Alibaba VPC, you can avoid limit constraints in new accounts and more easily adhere to your organization's operational constraints. If you cannot obtain the infrastructure creation permissions that are required to create the VPC yourself, use this installation option. You must configure networking using vSwitches. - -[id="installation-custom-alibaba-vpc-requirements_{context}"] -== Requirements for using your VPC - -The union of the VPC CIDR block and the machine network CIDR must be non-empty. The vSwitches must be within the machine network. - -The installation program does not create the following components: - -* VPC -* vSwitches -* Route table -* NAT gateway - -include::snippets/custom-dns-server.adoc[] - -[id="installation-custom-alibaba-vpc-validation_{context}"] -== VPC validation - -To ensure that the vSwitches you provide are suitable, the installation program confirms the following data: - -* All the vSwitches that you specify must exist. -* You have provided one or more vSwitches for control plane machines and compute machines. -* The vSwitches' CIDRs belong to the machine CIDR that you specified. - -[id="installation-about-custom-alibaba-permissions_{context}"] -== Division of permissions - -Some individuals can create different resources in your cloud than others. For example, you might be able to create application-specific items, like instances, buckets, and load balancers, but not networking-related components, such as VPCs or vSwitches. - -[id="installation-custom-alibaba-vpc-isolation_{context}"] -== Isolation between clusters - -If you deploy {product-title} into an existing network, the isolation of cluster services is reduced in the following ways: - -* You can install multiple {product-title} clusters in the same VPC. - -* ICMP ingress is allowed to the entire network. - -* TCP 22 ingress (SSH) is allowed to the entire network. - -* Control plane TCP 6443 ingress (Kubernetes API) is allowed to the entire network. - -* Control plane TCP 22623 ingress (MCS) is allowed to the entire network. diff --git a/modules/installation-custom-aws-vpc.adoc b/modules/installation-custom-aws-vpc.adoc deleted file mode 100644 index ff4978815a26..000000000000 --- a/modules/installation-custom-aws-vpc.adoc +++ /dev/null @@ -1,302 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-government-region.adoc -// * installing/installing_aws/installing-aws-secret-region.adoc -// * installing/installing_aws/installing-aws-private.adoc -// * installing/installing_aws/installing-aws-vpc.adoc -// * installing/installing_aws/installing-aws-outposts-remote-workers.adoc - -ifeval::["{context}" == "installing-aws-china-region"] -:aws-china: -endif::[] -ifeval::["{context}" == "installing-aws-vpc"] -:public: -endif::[] -ifeval::["{context}" == "installing-aws-secret-region"] -:aws-secret: -endif::[] -ifeval::["{context}" == "installing-aws-outposts-remote-workers"] -:aws-outposts: -endif::[] - -:_content-type: CONCEPT -[id="installation-custom-aws-vpc_{context}"] -= About using a custom VPC - -ifndef::aws-outposts[] -In {product-title} {product-version}, you can deploy a cluster into existing subnets in an existing Amazon Virtual Private Cloud (VPC) in Amazon Web Services (AWS). By deploying {product-title} into an existing AWS VPC, you might be able to avoid limit constraints in new accounts or more easily abide by the operational constraints that your company's guidelines set. If you cannot obtain the infrastructure creation permissions that are required to create the VPC yourself, use this installation option. -endif::aws-outposts[] -ifdef::aws-outposts[] -{product-title} {product-version} installer cannot automatically deploy AWS Subnets on AWS Outposts, so you will need to manually configure the VPC. Therefore, you have to deploy the cluster into existing subnets in an existing Amazon Virtual Private Cloud (VPC) in Amazon Web Services (AWS). In addition, by deploying {product-title} into an existing AWS VPC, you might be able to avoid limit constraints in new accounts or more easily abide by the operational constraints that your company’s guidelines set. -endif::aws-outposts[] - -Because the installation program cannot know what other components are also in your existing subnets, it cannot choose subnet CIDRs and so forth on your behalf. You must configure networking for the subnets that you install your cluster to yourself. - -[id="installation-custom-aws-vpc-requirements_{context}"] -== Requirements for using your VPC - -The installation program no longer creates the following components: - -* Internet gateways -* NAT gateways -* Subnets -* Route tables -* VPCs -* VPC DHCP options -* VPC endpoints - -include::snippets/custom-dns-server.adoc[] - -If you use a custom VPC, you must correctly configure it and its subnets for the installation program and the cluster to use. See link:https://docs.aws.amazon.com/vpc/latest/userguide/VPC_wizard.html[Amazon VPC console wizard configurations] and link:https://docs.aws.amazon.com/vpc/latest/userguide/working-with-vpcs.html[Work with VPCs and subnets] in the AWS documentation for more information on creating and managing an AWS VPC. - -The installation program cannot: - -* Subdivide network ranges for the cluster to use. -* Set route tables for the subnets. -* Set VPC options like DHCP. - -You must complete these tasks before you install the cluster. See link:https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Networking.html[VPC networking components] and link:https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Route_Tables.html[Route tables for your VPC] for more information on configuring networking in an AWS VPC. - -Your VPC must meet the following characteristics: - -ifdef::public[] -* Create a public and private subnet for each availability zone that your cluster uses. Each availability zone can contain no more than one public and one private subnet. For an example of this type of configuration, see link:https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Scenario2.html[VPC with public and private subnets (NAT)] in the AWS documentation. -+ -Record each subnet ID. Completing the installation requires that you enter these values in the `platform` section of the `install-config.yaml` file. See link:https://docs.aws.amazon.com/managedservices/latest/userguide/find-subnet.html[Finding a subnet ID] in the AWS documentation. -* The VPC's CIDR block must contain the `Networking.MachineCIDR` range, which is the IP address pool for cluster machines. The subnet CIDR blocks must belong to the machine CIDR that you specify. -* The VPC must have a public internet gateway attached to it. For each availability zone: -** The public subnet requires a route to the internet gateway. -** The public subnet requires a NAT gateway with an EIP address. -** The private subnet requires a route to the NAT gateway in public subnet. -endif::public[] -ifdef::aws-outposts[] -[NOTE] -==== -To allow the creation of {product-title} with remote workers in the AWS Outposts, you must create at least one private subnet in the AWS Outpost instance for the workload instances creation and one private subnet in an AWS region for the control plane instances creation. If you specify more than one private subnet in the region, the control plane instances will be distributed across these subnets. You will also need to create a public subnet in each of the availability zones used for private subnets, including the Outpost private subnet, as Network Load Balancers will be created in the AWS region for the API server and Ingress network as part of the cluster installation. It is possible to create an AWS region private subnet in the same Availability zone as an Outpost private subnet. -==== -* Create a public and private subnet in the AWS Region for each availability zone that your control plane uses. Each availability zone can contain no more than one public and one private subnet in the AWS region. For an example of this type of configuration, see link:https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Scenario2.html[VPC with public and private subnets (NAT)] in the AWS documentation. -+ -To create a private subnet in the AWS Outposts, you need to first ensure that the Outpost instance is located in the desired availability zone. Then, you can create the private subnet within that availability zone within the Outpost instance, by adding the Outpost ARN. Make sure there is another public subnet in the AWS Region created in the same availability zone. -+ -Record each subnet ID. Completing the installation requires that you enter all the subnets IDs, created in the AWS Region, in the `platform` section of the `install-config.yaml` file and changing the workers `machineset` to use the private subnet ID created in the Outpost. See link:https://docs.aws.amazon.com/managedservices/latest/userguide/find-subnet.html[Finding a subnet ID] in the AWS documentation. -+ -[IMPORTANT] -==== -In case you need to create a public subnet in the AWS Outposts, verify that this subnet is not used for the Network or Classic LoadBalancer, otherwise the LoadBalancer creation fails. To achieve that, the `kubernetes.io/cluster/.*-outposts: owned` special tag must be included in the subnet. -==== -* The VPC's CIDR block must contain the `Networking.MachineCIDR` range, which is the IP address pool for cluster machines. The subnet CIDR blocks must belong to the machine CIDR that you specify. -* The VPC must have a public internet gateway attached to it. For each availability zone: -** The public subnet requires a route to the internet gateway. -** The public subnet requires a NAT gateway with an EIP address. -** The private subnet requires a route to the NAT gateway in public subnet. - -+ -[NOTE] -==== -To access your local cluster over your local network, the VPC must be associated with your Outpost's local gateway route table. For more information, see link:https://docs.aws.amazon.com/outposts/latest/userguide/outposts-local-gateways.html#vpc-associations[VPC associations] in the AWS Outposts User Guide. -==== -endif::aws-outposts[] -* The VPC must not use the `kubernetes.io/cluster/.*: owned`, `Name`, and `openshift.io/cluster` tags. -+ -The installation program modifies your subnets to add the `kubernetes.io/cluster/.*: shared` tag, so your subnets must have at least one free tag slot available for it. See link:https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#tag-restrictions[Tag Restrictions] in the AWS documentation to confirm that the installation program can add a tag to each subnet that you specify. You cannot use a `Name` tag, because it overlaps with the EC2 `Name` field and the installation fails. -* You must enable the `enableDnsSupport` and `enableDnsHostnames` attributes in your VPC, so that the cluster can use the Route 53 zones that are attached to the VPC to resolve cluster's internal DNS records. See link:https://docs.aws.amazon.com/vpc/latest/userguide/vpc-dns.html#vpc-dns-support[DNS Support in Your VPC] in the AWS documentation. -+ -If you prefer to use your own Route 53 hosted private zone, you must associate the existing hosted zone with your VPC prior to installing a cluster. You can define your hosted zone using the `platform.aws.hostedZone` field in the `install-config.yaml` file. - -ifndef::aws-secret,aws-outposts[] -If you are working in a disconnected environment, you are unable to reach the public IP addresses for EC2, ELB, and S3 endpoints. Depending on the level to which you want to restrict internet traffic during the installation, the following configuration options are available: -endif::aws-secret,aws-outposts[] - -ifdef::aws-secret[] -A cluster in an SC2S or C2S Region is unable to reach the public IP addresses for the EC2, ELB, and S3 endpoints. Depending on the level to which you want to restrict internet traffic during the installation, the following configuration options are available: -endif::aws-secret[] - -[discrete] -[id="create-vpc-endpoints_{context}"] -=== Option 1: Create VPC endpoints - -Create a VPC endpoint and attach it to the subnets that the clusters are using. Name the endpoints as follows: - -ifndef::aws-china,aws-secret[] -* `ec2.<aws_region>.amazonaws.com` -* `elasticloadbalancing.<aws_region>.amazonaws.com` -* `s3.<aws_region>.amazonaws.com` -endif::aws-china,aws-secret[] - -ifdef::aws-china[] -* `ec2.<aws_region>.amazonaws.com.cn` -* `elasticloadbalancing.<aws_region>.amazonaws.com` -* `s3.<aws_region>.amazonaws.com` -endif::aws-china[] - -ifdef::aws-secret[] -SC2S:: -** `elasticloadbalancing.<aws_region>.sc2s.sgov.gov` -** `ec2.<aws_region>.sc2s.sgov.gov` -** `s3.<aws_region>.sc2s.sgov.gov` -C2S:: -** `elasticloadbalancing.<aws_region>.c2s.ic.gov` -** `ec2.<aws_region>.c2s.ic.gov` -** `s3.<aws_region>.c2s.ic.gov` -endif::aws-secret[] - -With this option, network traffic remains private between your VPC and the required AWS services. - -[discrete] -[id="create-proxy-without-vpc-endpoints_{context}"] -=== Option 2: Create a proxy without VPC endpoints -As part of the installation process, you can configure an HTTP or HTTPS proxy. With this option, internet traffic goes through the proxy to reach the required AWS services. - -[discrete] -[id="create-proxy-with-vpc-endpoints_{context}"] -=== Option 3: Create a proxy with VPC endpoints -As part of the installation process, you can configure an HTTP or HTTPS proxy with VPC endpoints. Create a VPC endpoint and attach it to the subnets that the clusters are using. Name the endpoints as follows: - -ifndef::aws-china,aws-secret[] -* `ec2.<aws_region>.amazonaws.com` -* `elasticloadbalancing.<aws_region>.amazonaws.com` -* `s3.<aws_region>.amazonaws.com` -endif::aws-china,aws-secret[] - -ifdef::aws-china[] -* `ec2.<aws_region>.amazonaws.com.cn` -* `elasticloadbalancing.<aws_region>.amazonaws.com` -* `s3.<aws_region>.amazonaws.com` -endif::aws-china[] - -ifdef::aws-secret[] -SC2S:: -** `elasticloadbalancing.<aws_region>.sc2s.sgov.gov` -** `ec2.<aws_region>.sc2s.sgov.gov` -** `s3.<aws_region>.sc2s.sgov.gov` -C2S:: -** `elasticloadbalancing.<aws_region>.c2s.ic.gov` -** `ec2.<aws_region>.c2s.ic.gov` -** `s3.<aws_region>.c2s.ic.gov` -endif::aws-secret[] - -When configuring the proxy in the `install-config.yaml` file, add these endpoints to the `noProxy` field. With this option, the proxy prevents the cluster from accessing the internet directly. However, network traffic remains private between your VPC and the required AWS services. - -.Required VPC components - -You must provide a suitable VPC and subnets that allow communication to your -machines. - -[cols="2a,7a,3a,3a",options="header"] -|=== - -|Component -|AWS type -2+|Description - -|VPC -|* `AWS::EC2::VPC` -* `AWS::EC2::VPCEndpoint` -2+|You must provide a public VPC for the cluster to use. The VPC uses an -endpoint that references the route tables for each subnet to improve communication with the registry that is hosted in S3. - -|Public subnets -|* `AWS::EC2::Subnet` -* `AWS::EC2::SubnetNetworkAclAssociation` -2+|Your VPC must have public subnets for between 1 and 3 availability zones -and associate them with appropriate Ingress rules. - -|Internet gateway -| -* `AWS::EC2::InternetGateway` -* `AWS::EC2::VPCGatewayAttachment` -* `AWS::EC2::RouteTable` -* `AWS::EC2::Route` -* `AWS::EC2::SubnetRouteTableAssociation` -* `AWS::EC2::NatGateway` -* `AWS::EC2::EIP` -2+|You must have a public internet gateway, with public routes, attached to the -VPC. In the provided templates, each public subnet has a NAT gateway with an EIP address. These NAT gateways allow cluster resources, like private subnet instances, to reach the internet and are not required for some restricted network or proxy scenarios. - -.7+|Network access control -.7+| * `AWS::EC2::NetworkAcl` -* `AWS::EC2::NetworkAclEntry` -2+|You must allow the VPC to access the following ports: -h|Port -h|Reason - -|`80` -|Inbound HTTP traffic - -|`443` -|Inbound HTTPS traffic - -|`22` -|Inbound SSH traffic - -|`1024` - `65535` -|Inbound ephemeral traffic - -|`0` - `65535` -|Outbound ephemeral traffic - - -|Private subnets -|* `AWS::EC2::Subnet` -* `AWS::EC2::RouteTable` -* `AWS::EC2::SubnetRouteTableAssociation` -2+|Your VPC can have private subnets. The provided CloudFormation templates -can create private subnets for between 1 and 3 availability zones. -ifdef::aws-outposts[] -To enable remote workers running in the Outpost, the VPC must include a private subnet located within the Outpost instance, in addition to the private subnets located within the corresponding AWS region. -endif::aws-outposts[] -If you use private subnets, you must provide appropriate routes and tables -for them. - -|=== - -[id="installation-custom-aws-vpc-validation_{context}"] -== VPC validation - -To ensure that the subnets that you provide are suitable, the installation program confirms the following data: - -* All the subnets that you specify exist. -* You provide private subnets. -* The subnet CIDRs belong to the machine CIDR that you specified. -ifndef::aws-outposts[] -* You provide subnets for each availability zone. Each availability zone contains no more than one public and one private subnet. If you use a private cluster, provide only a private subnet for each availability zone. Otherwise, provide exactly one public and private subnet for each availability zone. -endif::aws-outposts[] -ifdef::aws-outposts[] -* You provide subnets for each availability zone. Each availability zone contains exactly one public and one private subnet in the AWS region (not created in the Outpost instance). The availability zone in which the Outpost instance is installed should include one aditional private subnet in the Outpost instance. -endif::aws-outposts[] -* You provide a public subnet for each private subnet availability zone. Machines are not provisioned in availability zones that you do not provide private subnets for. - -If you destroy a cluster that uses an existing VPC, the VPC is not deleted. When you remove the {product-title} cluster from a VPC, the `kubernetes.io/cluster/.*: shared` tag is removed from the subnets that it used. - -[id="installation-about-custom-aws-permissions_{context}"] -== Division of permissions - -Starting with {product-title} 4.3, you do not need all of the permissions that are required for an installation program-provisioned infrastructure cluster to deploy a cluster. This change mimics the division of permissions that you might have at your company: some individuals can create different resource in your clouds than others. For example, you might be able to create application-specific items, like instances, buckets, and load balancers, but not networking-related components such as VPCs, subnets, or ingress rules. - -The AWS credentials that you use when you create your cluster do not need the networking permissions that are required to make VPCs and core networking components within the VPC, such as subnets, routing tables, internet gateways, NAT, and VPN. You still need permission to make the application resources that the machines within the cluster require, such as ELBs, security groups, S3 buckets, and nodes. - -[id="installation-custom-aws-vpc-isolation_{context}"] -== Isolation between clusters - -If you deploy {product-title} to an existing network, the isolation of cluster services is reduced in the following ways: - -* You can install multiple {product-title} clusters in the same VPC. -* ICMP ingress is allowed from the entire network. -* TCP 22 ingress (SSH) is allowed to the entire network. -//You can restrict ingress to the control plane and compute security groups by either adding the security groups to an SSH bastion instance or altering rules to allow the bastion. -* Control plane TCP 6443 ingress (Kubernetes API) is allowed to the entire network. -* Control plane TCP 22623 ingress (MCS) is allowed to the entire network. -//This should be restricted to the control plane and compute security groups, instead of the current by-VPC-CIDR logic to avoid leaking sensitive Ignition configs to non-cluster entities sharing the VPC. - -ifeval::["{context}" == "installing-aws-china-region"] -:!aws-china: -endif::[] -ifeval::["{context}" == "installing-aws-vpc"] -:!public: -endif::[] -ifeval::["{context}" == "installing-aws-secret-region"] -:!aws-secret: -endif::[] -ifeval::["{context}" == "installing-aws-outposts-remote-workers"] -:!aws-outposts: -endif::[] diff --git a/modules/installation-custom-gcp-vpc.adoc b/modules/installation-custom-gcp-vpc.adoc deleted file mode 100644 index 4e935dd2c622..000000000000 --- a/modules/installation-custom-gcp-vpc.adoc +++ /dev/null @@ -1,52 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_gcp/installing-gcp-vpc.adoc - -:_content-type: CONCEPT -[id="installation-custom-gcp-vpc_{context}"] -= About using a custom VPC - -In {product-title} {product-version}, you can deploy a cluster into existing subnets in an existing Virtual Private Cloud (VPC) in Google Cloud Platform (GCP). By deploying {product-title} into an existing GCP VPC, you might be able to avoid limit constraints in new accounts or more easily abide by the operational constraints that your company's guidelines set. If you cannot obtain the infrastructure creation permissions that are required to create the VPC yourself, use this installation option. You must configure networking for the subnets. - -[id="installation-custom-gcp-vpc-requirements_{context}"] -== Requirements for using your VPC - -The union of the VPC CIDR block and the machine network CIDR must be non-empty. The subnets must be within the machine network. - -The installation program does not create the following components: - -* NAT gateways -* Subnets -* Route tables -* VPC network - -include::snippets/custom-dns-server.adoc[] - -[id="installation-custom-gcp-vpc-validation_{context}"] -== VPC validation - -To ensure that the subnets that you provide are suitable, the installation program confirms the following data: - -* All the subnets that you specify exist. -* You provide one subnet for control-plane machines and one subnet for compute machines. -* The subnet's CIDRs belong to the machine CIDR that you specified. - -[id="installation-about-custom-gcp-permissions_{context}"] -== Division of permissions - -Some individuals can create different resource in your clouds than others. For example, you might be able to create application-specific items, like instances, buckets, and load balancers, but not networking-related components such as VPCs, subnets, or ingress rules. - -[id="installation-custom-gcp-vpc-isolation_{context}"] -== Isolation between clusters - -If you deploy {product-title} to an existing network, the isolation of cluster services is reduced in the following ways: - -* You can install multiple {product-title} clusters in the same VPC. - -* ICMP ingress is allowed to the entire network. - -* TCP 22 ingress (SSH) is allowed to the entire network. - -* Control plane TCP 6443 ingress (Kubernetes API) is allowed to the entire network. - -* Control plane TCP 22623 ingress (MCS) is allowed to the entire network. diff --git a/modules/installation-custom-ibm-cloud-vpc.adoc b/modules/installation-custom-ibm-cloud-vpc.adoc deleted file mode 100644 index 3c60e3bebb25..000000000000 --- a/modules/installation-custom-ibm-cloud-vpc.adoc +++ /dev/null @@ -1,69 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_gcp/installing-ibm-cloud-vpc.adoc -// * installing/installing_ibm_powervs/installing-restricted-networks-ibm-power-vs.adoc - -:_content-type: CONCEPT -[id="installation-custom-ibm-cloud-vpc_{context}"] -= About using a custom VPC - -In {product-title} {product-version}, you can deploy a cluster into the subnets of an existing IBM Virtual Private Cloud (VPC). Deploying {product-title} into an existing VPC can help you avoid limit constraints in new accounts or more easily abide by the operational constraints that your company's guidelines set. If you cannot obtain the infrastructure creation permissions that are required to create the VPC yourself, use this installation option. - -Because the installation program cannot know what other components are in your existing subnets, it cannot choose subnet CIDRs and so forth. You must configure networking for the subnets to which you will install the cluster. - -[id="installation-custom-ibm-cloud-vpc-requirements_{context}"] -== Requirements for using your VPC - -You must correctly configure the existing VPC and its subnets before you install the cluster. The installation program does not create the following components: - -* NAT gateways -* Subnets -* Route tables -* VPC network - -The installation program cannot: - -* Subdivide network ranges for the cluster to use -* Set route tables for the subnets -* Set VPC options like DHCP - -include::snippets/custom-dns-server.adoc[] - -[id="installation-custom-ibm-cloud-vpc-validation_{context}"] -== VPC validation - -The VPC and all of the subnets must be in an existing resource group. The cluster is deployed to this resource group. - -As part of the installation, specify the following in the `install-config.yaml` file: - -* The name of the resource group -* The name of VPC -* The subnets for control plane machines and compute machines - -To ensure that the subnets that you provide are suitable, the installation program confirms the following: - -* All of the subnets that you specify exist. -* For each availability zone in the region, you specify: -** One subnet for control plane machines. -** One subnet for compute machines. -* The machine CIDR that you specified contains the subnets for the compute machines and control plane machines. - -[NOTE] -==== -Subnet IDs are not supported. -==== - -[id="installation-custom-ibm-cloud-vpc-isolation_{context}"] -== Isolation between clusters - -If you deploy {product-title} to an existing network, the isolation of cluster services is reduced in the following ways: - -* You can install multiple {product-title} clusters in the same VPC. - -* ICMP ingress is allowed to the entire network. - -* TCP port 22 ingress (SSH) is allowed to the entire network. - -* Control plane TCP 6443 ingress (Kubernetes API) is allowed to the entire network. - -* Control plane TCP 22623 ingress (MCS) is allowed to the entire network. diff --git a/modules/installation-custom-ibm-power-vs.adoc b/modules/installation-custom-ibm-power-vs.adoc deleted file mode 100644 index d4a249fdebe9..000000000000 --- a/modules/installation-custom-ibm-power-vs.adoc +++ /dev/null @@ -1,89 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_ibm_powervs/installing-ibm-power-vs-private-cluster.adoc -// * installing/installing_ibm_powervs/installing-restricted-networks-ibm-power-vs.adoc -// * installing/installing_ibm_powervs/installing-ibm-powervs-vpc.adoc - -ifeval::["{context}" == "installing-ibm-power-vs-private-cluster"] -:private: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power-vs"] -:restricted: -endif::[] -ifeval::["{context}" == "installing-ibm-powervs-vpc"] -:ibm-powervs-vpc: -endif::[] - -:_content-type: CONCEPT -ifndef::private[] -[id="installation-custom-ibm-powervs-vpc_{context}"] -= About using a custom VPC - -ifdef::ibm-powervs-vpc[] -In {product-title} {product-version}, you can deploy a cluster using an existing IBM Virtual Private Cloud (VPC). - -Because the installation program cannot know what other components are in your existing subnets, it cannot choose subnet CIDRs and so forth. You must configure networking for the subnets to which you will install the cluster. -endif::ibm-powervs-vpc[] -ifdef::restricted[] -In {product-title} {product-version}, you can deploy a cluster into the subnets of an existing IBM Virtual Private Cloud (VPC). -endif::restricted[] -endif::private[] - -[id="installation-custom-ibm-power-vs-requirements_{context}"] -ifdef::private[] -= Requirements for using your VPC -endif::private[] -ifdef::ibm-powervs-vpc,restricted[] -== Requirements for using your VPC -endif::ibm-powervs-vpc,restricted[] - -You must correctly configure the existing VPC and its subnets before you install the cluster. The installation program does not create a VPC or VPC subnet in this scenario. - -The installation program cannot: - -* Subdivide network ranges for the cluster to use -* Set route tables for the subnets -* Set VPC options like DHCP - -include::snippets/custom-dns-server.adoc[] - -[id="installation-custom-ibm-power-vs-validation_{context}"] -== VPC validation - -The VPC and all of the subnets must be in an existing resource group. The cluster is deployed to this resource group. - -As part of the installation, specify the following in the `install-config.yaml` file: - -* The name of the resource group -* The name of VPC -* The name of the VPC subnet - -To ensure that the subnets that you provide are suitable, the installation program confirms that all of the subnets you specify exists. - -[NOTE] -==== -Subnet IDs are not supported. -==== - -[id="installation-custom-ibm-power-vs-isolation_{context}"] -== Isolation between clusters - -If you deploy {product-title} to an existing network, the isolation of cluster services is reduced in the following ways: - -* ICMP Ingress is allowed to the entire network. - -* TCP port 22 Ingress (SSH) is allowed to the entire network. - -* Control plane TCP 6443 Ingress (Kubernetes API) is allowed to the entire network. - -* Control plane TCP 22623 Ingress (MCS) is allowed to the entire network. - -ifeval::["{context}" == "installing-ibm-power-vs-private-cluster"] -:!private: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power-vs"] -:!restricted: -endif::[] -ifeval::["{context}" == "installing-ibm-powervs-vpc"] -:!ibm-powervs-vpc: -endif::[] \ No newline at end of file diff --git a/modules/installation-deployment-manager-bootstrap.adoc b/modules/installation-deployment-manager-bootstrap.adoc deleted file mode 100644 index 476ee1141437..000000000000 --- a/modules/installation-deployment-manager-bootstrap.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_gcp/installing-gcp-user-infra.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp.adoc - -[id="installation-deployment-manager-bootstrap_{context}"] -= Deployment Manager template for the bootstrap machine - -You can use the following Deployment Manager template to deploy the bootstrap -machine that you need for your {product-title} cluster: - -.`04_bootstrap.py` Deployment Manager template -[%collapsible] -==== -[source,python] ----- -include::https://raw.githubusercontent.com/openshift/installer/release-4.13/upi/gcp/04_bootstrap.py[] ----- -==== diff --git a/modules/installation-deployment-manager-control-plane.adoc b/modules/installation-deployment-manager-control-plane.adoc deleted file mode 100644 index 83f14c0f1760..000000000000 --- a/modules/installation-deployment-manager-control-plane.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_gcp/installing-gcp-user-infra.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp.adoc - -[id="installation-deployment-manager-control-plane_{context}"] -= Deployment Manager template for control plane machines - -You can use the following Deployment Manager template to deploy the control -plane machines that you need for your {product-title} cluster: - -.`05_control_plane.py` Deployment Manager template -[%collapsible] -==== -[source,python] ----- -include::https://raw.githubusercontent.com/openshift/installer/release-4.13/upi/gcp/05_control_plane.py[] ----- -==== diff --git a/modules/installation-deployment-manager-dns.adoc b/modules/installation-deployment-manager-dns.adoc deleted file mode 100644 index 78e333aa7ab4..000000000000 --- a/modules/installation-deployment-manager-dns.adoc +++ /dev/null @@ -1,86 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_gcp/installing-gcp-user-infra.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp.adoc - -[id="installation-deployment-manager-dns_{context}"] -= Deployment Manager template for the network and load balancers - -You can use the following Deployment Manager template to deploy the networking -objects and load balancers that you need for your {product-title} cluster: - -.`02_infra.py` Deployment Manager template -[source,python] ----- -def GenerateConfig(context): - - resources = [{ - 'name': context.properties['infra_id'] + '-cluster-public-ip', - 'type': 'compute.v1.address', - 'properties': { - 'region': context.properties['region'] - } - }, { - 'name': context.properties['infra_id'] + '-api-http-health-check', - 'type': 'compute.v1.httpHealthCheck', - 'properties': { - 'port': 6080, - 'requestPath': '/readyz' - } - }, { - 'name': context.properties['infra_id'] + '-api-target-pool', - 'type': 'compute.v1.targetPool', - 'properties': { - 'region': context.properties['region'], - 'healthChecks': ['$(ref.' + context.properties['infra_id'] + '-api-http-health-check.selfLink)'], - 'instances': [] - } - }, { - 'name': context.properties['infra_id'] + '-api-forwarding-rule', - 'type': 'compute.v1.forwardingRule', - 'properties': { - 'region': context.properties['region'], - 'IPAddress': '$(ref.' + context.properties['infra_id'] + '-cluster-public-ip.selfLink)', - 'target': '$(ref.' + context.properties['infra_id'] + '-api-target-pool.selfLink)', - 'portRange': '6443' - } - }, { - 'name': context.properties['infra_id'] + '-ign-http-health-check', - 'type': 'compute.v1.httpHealthCheck', - 'properties': { - 'port': 22624, - 'requestPath': '/healthz' - } - }, { - 'name': context.properties['infra_id'] + '-ign-target-pool', - 'type': 'compute.v1.targetPool', - 'properties': { - 'region': context.properties['region'], - 'healthChecks': ['$(ref.' + context.properties['infra_id'] + '-ign-http-health-check.selfLink)'], - 'instances': [] - } - }, { - 'name': context.properties['infra_id'] + '-ign-forwarding-rule', - 'type': 'compute.v1.forwardingRule', - 'properties': { - 'region': context.properties['region'], - 'IPAddress': '$(ref.' + context.properties['infra_id'] + '-cluster-public-ip.selfLink)', - 'target': '$(ref.' + context.properties['infra_id'] + '-ign-target-pool.selfLink)', - 'portRange': '22623' - } - }, { - 'name': context.properties['infra_id'] + '-private-zone', - 'type': 'dns.v1.managedZone', - 'properties': { - 'description': '', - 'dnsName': context.properties['cluster_domain'] + '.', - 'visibility': 'private', - 'privateVisibilityConfig': { - 'networks': [{ - 'networkUrl': context.properties['cluster_network'] - }] - } - } - }] - return {'resources': resources} ----- diff --git a/modules/installation-deployment-manager-ext-lb.adoc b/modules/installation-deployment-manager-ext-lb.adoc deleted file mode 100644 index cc7209d31ef3..000000000000 --- a/modules/installation-deployment-manager-ext-lb.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_gcp/installing-gcp-user-infra-vpc.adoc - -[id="installation-deployment-manager-ext-lb_{context}"] -= Deployment Manager template for the external load balancer - -You can use the following Deployment Manager template to deploy the external load balancer that you need for your {product-title} cluster: - -.`02_lb_ext.py` Deployment Manager template -[%collapsible] -==== -[source,python] ----- -include::https://raw.githubusercontent.com/openshift/installer/release-4.13/upi/gcp/02_lb_ext.py[] ----- -==== diff --git a/modules/installation-deployment-manager-firewall-rules.adoc b/modules/installation-deployment-manager-firewall-rules.adoc deleted file mode 100644 index 0c818440439b..000000000000 --- a/modules/installation-deployment-manager-firewall-rules.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_gcp/installing-gcp-user-infra-shared-vpc.adoc - -[id="installation-deployment-manager-firewall-rules_{context}"] -= Deployment Manager template for firewall rules - -You can use the following Deployment Manager template to deploy the firewall rues that you need for your {product-title} cluster: - -.`03_firewall.py` Deployment Manager template -[%collapsible] -==== -[source,python] ----- -include::https://raw.githubusercontent.com/openshift/installer/release-4.13/upi/gcp/03_firewall.py[] ----- -==== diff --git a/modules/installation-deployment-manager-iam-shared-vpc.adoc b/modules/installation-deployment-manager-iam-shared-vpc.adoc deleted file mode 100644 index 037ffccc4f27..000000000000 --- a/modules/installation-deployment-manager-iam-shared-vpc.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_gcp/installing-gcp-user-infra-shared-vpc.adoc - -[id="installation-deployment-manager-iam-shared-vpc_{context}"] -= Deployment Manager template for IAM roles - -You can use the following Deployment Manager template to deploy the IAM roles that you need for your {product-title} cluster: - -.`03_iam.py` Deployment Manager template -[%collapsible] -==== -[source,python] ----- -include::https://raw.githubusercontent.com/openshift/installer/release-4.13/upi/gcp/03_iam.py[] ----- -==== diff --git a/modules/installation-deployment-manager-int-lb.adoc b/modules/installation-deployment-manager-int-lb.adoc deleted file mode 100644 index e14074135740..000000000000 --- a/modules/installation-deployment-manager-int-lb.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_gcp/installing-gcp-user-infra-vpc.adoc - -[id="installation-deployment-manager-int-lb_{context}"] -= Deployment Manager template for the internal load balancer - -You can use the following Deployment Manager template to deploy the internal load balancer that you need for your {product-title} cluster: - -.`02_lb_int.py` Deployment Manager template -[%collapsible] -==== -[source,python] ----- -include::https://raw.githubusercontent.com/openshift/installer/release-4.13/upi/gcp/02_lb_int.py[] ----- -==== - -You will need this template in addition to the `02_lb_ext.py` template when you create an external cluster. diff --git a/modules/installation-deployment-manager-private-dns.adoc b/modules/installation-deployment-manager-private-dns.adoc deleted file mode 100644 index 846a9253025b..000000000000 --- a/modules/installation-deployment-manager-private-dns.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_gcp/installing-gcp-user-infra-vpc.adoc - -[id="installation-deployment-manager-private-dns_{context}"] -= Deployment Manager template for the private DNS - -You can use the following Deployment Manager template to deploy the private DNS that you need for your {product-title} cluster: - -.`02_dns.py` Deployment Manager template -[%collapsible] -==== -[source,python] ----- -include::https://raw.githubusercontent.com/openshift/installer/release-4.13/upi/gcp/02_dns.py[] ----- -==== diff --git a/modules/installation-deployment-manager-security.adoc b/modules/installation-deployment-manager-security.adoc deleted file mode 100644 index a518b06e468c..000000000000 --- a/modules/installation-deployment-manager-security.adoc +++ /dev/null @@ -1,154 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_gcp/installing-gcp-user-infra.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp.adoc - -[id="installation-deployment-manager-security_{context}"] -= Deployment Manager template for firewall rules and IAM roles - -You can use the following Deployment Manager template to deploy the security -objects that you need for your {product-title} cluster: - -.`03_security.py` Deployment Manager template -[source,python] ----- -def GenerateConfig(context): - - resources = [{ - 'name': context.properties['infra_id'] + '-api', - 'type': 'compute.v1.firewall', - 'properties': { - 'network': context.properties['cluster_network'], - 'allowed': [{ - 'IPProtocol': 'tcp', - 'ports': ['6443'] - }], - 'sourceRanges': ['0.0.0.0/0'], - 'targetTags': [context.properties['infra_id'] + '-master'] - } - }, { - 'name': context.properties['infra_id'] + '-mcs', - 'type': 'compute.v1.firewall', - 'properties': { - 'network': context.properties['cluster_network'], - 'allowed': [{ - 'IPProtocol': 'tcp', - 'ports': ['22623'] - }], - 'sourceRanges': [ - context.properties['network_cidr'], - context.properties['master_nat_ip'], - context.properties['worker_nat_ip'] - ], - 'targetTags': [context.properties['infra_id'] + '-master'] - } - }, { - 'name': context.properties['infra_id'] + '-health-checks', - 'type': 'compute.v1.firewall', - 'properties': { - 'network': context.properties['cluster_network'], - 'allowed': [{ - 'IPProtocol': 'tcp', - 'ports': ['6080', '22624'] - }], - 'sourceRanges': ['35.191.0.0/16', '209.85.152.0/22', '209.85.204.0/22'], - 'targetTags': [context.properties['infra_id'] + '-master'] - } - }, { - 'name': context.properties['infra_id'] + '-etcd', - 'type': 'compute.v1.firewall', - 'properties': { - 'network': context.properties['cluster_network'], - 'allowed': [{ - 'IPProtocol': 'tcp', - 'ports': ['2379-2380'] - }], - 'sourceTags': [context.properties['infra_id'] + '-master'], - 'targetTags': [context.properties['infra_id'] + '-master'] - } - }, { - 'name': context.properties['infra_id'] + '-control-plane', - 'type': 'compute.v1.firewall', - 'properties': { - 'network': context.properties['cluster_network'], - 'allowed': [{ - 'IPProtocol': 'tcp', - 'ports': ['10257'] - },{ - 'IPProtocol': 'tcp', - 'ports': ['10259'] - }], - 'sourceTags': [ - context.properties['infra_id'] + '-master', - context.properties['infra_id'] + '-worker' - ], - 'targetTags': [context.properties['infra_id'] + '-master'] - } - }, { - 'name': context.properties['infra_id'] + '-internal-network', - 'type': 'compute.v1.firewall', - 'properties': { - 'network': context.properties['cluster_network'], - 'allowed': [{ - 'IPProtocol': 'icmp' - },{ - 'IPProtocol': 'tcp', - 'ports': ['22'] - }], - 'sourceRanges': [context.properties['network_cidr']], - 'targetTags': [ - context.properties['infra_id'] + '-master', - context.properties['infra_id'] + '-worker' - ] - } - }, { - 'name': context.properties['infra_id'] + '-internal-cluster', - 'type': 'compute.v1.firewall', - 'properties': { - 'network': context.properties['cluster_network'], - 'allowed': [{ - 'IPProtocol': 'udp', - 'ports': ['4789', '6081'] - },{ - 'IPProtocol': 'tcp', - 'ports': ['9000-9999'] - },{ - 'IPProtocol': 'udp', - 'ports': ['9000-9999'] - },{ - 'IPProtocol': 'tcp', - 'ports': ['10250'] - },{ - 'IPProtocol': 'tcp', - 'ports': ['30000-32767'] - },{ - 'IPProtocol': 'udp', - 'ports': ['30000-32767'] - }], - 'sourceTags': [ - context.properties['infra_id'] + '-master', - context.properties['infra_id'] + '-worker' - ], - 'targetTags': [ - context.properties['infra_id'] + '-master', - context.properties['infra_id'] + '-worker' - ] - } - }, { - 'name': context.properties['infra_id'] + '-master-node-sa', - 'type': 'iam.v1.serviceAccount', - 'properties': { - 'accountId': context.properties['infra_id'] + '-m', - 'displayName': context.properties['infra_id'] + '-master-node' - } - }, { - 'name': context.properties['infra_id'] + '-worker-node-sa', - 'type': 'iam.v1.serviceAccount', - 'properties': { - 'accountId': context.properties['infra_id'] + '-w', - 'displayName': context.properties['infra_id'] + '-worker-node' - } - }] - - return {'resources': resources} ----- diff --git a/modules/installation-deployment-manager-vpc.adoc b/modules/installation-deployment-manager-vpc.adoc deleted file mode 100644 index 67da87e8f3d9..000000000000 --- a/modules/installation-deployment-manager-vpc.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_gcp/installing-gcp-user-infra.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp.adoc - -[id="installation-deployment-manager-vpc_{context}"] -= Deployment Manager template for the VPC - -You can use the following Deployment Manager template to deploy the VPC that -you need for your {product-title} cluster: - -.`01_vpc.py` Deployment Manager template -[%collapsible] -==== -[source,python] ----- -include::https://raw.githubusercontent.com/openshift/installer/release-4.13/upi/gcp/01_vpc.py[] ----- -==== diff --git a/modules/installation-deployment-manager-worker.adoc b/modules/installation-deployment-manager-worker.adoc deleted file mode 100644 index 4622ad2cf1c5..000000000000 --- a/modules/installation-deployment-manager-worker.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_gcp/installing-gcp-user-infra.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp.adoc - -[id="installation-deployment-manager-worker_{context}"] -= Deployment Manager template for worker machines - -You can use the following Deployment Manager template to deploy the worker machines -that you need for your {product-title} cluster: - -.`06_worker.py` Deployment Manager template -[%collapsible] -==== -[source,python] ----- -include::https://raw.githubusercontent.com/openshift/installer/release-4.13/upi/gcp/06_worker.py[] ----- -==== diff --git a/modules/installation-disk-partitioning-upi-templates.adoc b/modules/installation-disk-partitioning-upi-templates.adoc deleted file mode 100644 index 2a8d332cebef..000000000000 --- a/modules/installation-disk-partitioning-upi-templates.adoc +++ /dev/null @@ -1,135 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-user-infra.adoc -// * installing/installing_aws/installing-restricted-networks-aws.adoc -// * installing/installing_azure/installing-azure-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc -// * installing/installing_gcp/installing-gcp-user-infra.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp.adoc - -// Similar content to what is in this module is also present in modules/installation-disk-partitioning.adoc. <-- This module is in use with the following vSphere assemblies: -// * installing-vsphere.adoc -// * installing-vsphere-network-customizations.adoc -// * installing-restricted-networks-vsphere.adoc - -// Similar content to what is in this module is also present in modules/installation-user-infra-machines-advanced.adoc. <-- This module is in use with the following bare metal assemblies: -// * installing-bare-metal-network-customizations.adoc -// * installing-bare-metal.adoc -// * installing-restricted-networks-bare-metal.adoc - -:_content-type: PROCEDURE -[id="installation-disk-partitioning-upi-templates_{context}"] -= Optional: Creating a separate `/var` partition -It is recommended that disk partitioning for {product-title} be left to the installer. However, there are cases where you might want to create separate partitions in a part of the filesystem that you expect to grow. - -{product-title} supports the addition of a single partition to attach storage to either the `/var` partition or a subdirectory of `/var`. For example: - -* `/var/lib/containers`: Holds container-related content that can grow as more images and containers are added to a system. -* `/var/lib/etcd`: Holds data that you might want to keep separate for purposes such as performance optimization of etcd storage. -* `/var`: Holds data that you might want to keep separate for purposes such as auditing. - -Storing the contents of a `/var` directory separately makes it easier to grow storage for those areas as needed and reinstall {product-title} at a later date and keep that data intact. With this method, you will not have to pull all your containers again, nor will you have to copy massive log files when you update systems. - -Because `/var` must be in place before a fresh installation of {op-system-first}, the following procedure sets up the separate `/var` partition by creating a machine config manifest that is inserted during the `openshift-install` preparation phases of an {product-title} installation. - -[IMPORTANT] -==== -If you follow the steps to create a separate `/var` partition in this procedure, it is not necessary to create the Kubernetes manifest and Ignition config files again as described later in this section. -==== - -.Procedure - -. Create a directory to hold the {product-title} installation files: -+ -[source,terminal] ----- -$ mkdir $HOME/clusterconfig ----- - -. Run `openshift-install` to create a set of files in the `manifest` and `openshift` subdirectories. Answer the system questions as you are prompted: -+ -[source,terminal] ----- -$ openshift-install create manifests --dir $HOME/clusterconfig ----- -+ -.Example output -+ -[source,terminal] ----- -? SSH Public Key ... -INFO Credentials loaded from the "myprofile" profile in file "/home/myuser/.aws/credentials" -INFO Consuming Install Config from target directory -INFO Manifests created in: $HOME/clusterconfig/manifests and $HOME/clusterconfig/openshift ----- - -. Optional: Confirm that the installation program created manifests in the `clusterconfig/openshift` directory: -+ -[source,terminal] ----- -$ ls $HOME/clusterconfig/openshift/ ----- -+ -.Example output -+ -[source,terminal] ----- -99_kubeadmin-password-secret.yaml -99_openshift-cluster-api_master-machines-0.yaml -99_openshift-cluster-api_master-machines-1.yaml -99_openshift-cluster-api_master-machines-2.yaml -... ----- - -. Create a Butane config that configures the additional partition. For example, name the file `$HOME/clusterconfig/98-var-partition.bu`, change the disk device name to the name of the storage device on the `worker` systems, and set the storage size as appropriate. This example places the `/var` directory on a separate partition: -+ -[source,yaml] ----- -variant: openshift -version: 4.13.0 -metadata: - labels: - machineconfiguration.openshift.io/role: worker - name: 98-var-partition -storage: - disks: - - device: /dev/<device_name> <1> - partitions: - - label: var - start_mib: <partition_start_offset> <2> - size_mib: <partition_size> <3> - filesystems: - - device: /dev/disk/by-partlabel/var - path: /var - format: xfs - mount_options: [defaults, prjquota] <4> - with_mount_unit: true ----- -+ -<1> The storage device name of the disk that you want to partition. -<2> When adding a data partition to the boot disk, a minimum value of 25000 MiB (Mebibytes) is recommended. The root file system is automatically resized to fill all available space up to the specified offset. If no value is specified, or if the specified value is smaller than the recommended minimum, the resulting root file system will be too small, and future reinstalls of {op-system} might overwrite the beginning of the data partition. -<3> The size of the data partition in mebibytes. -<4> The `prjquota` mount option must be enabled for filesystems used for container storage. -+ -[NOTE] -==== -When creating a separate `/var` partition, you cannot use different instance types for worker nodes, if the different instance types do not have the same device name. -==== - -. Create a manifest from the Butane config and save it to the `clusterconfig/openshift` directory. For example, run the following command: -+ -[source,terminal] ----- -$ butane $HOME/clusterconfig/98-var-partition.bu -o $HOME/clusterconfig/openshift/98-var-partition.yaml ----- - -. Run `openshift-install` again to create Ignition configs from a set of files in the `manifest` and `openshift` subdirectories: -+ -[source,terminal] ----- -$ openshift-install create ignition-configs --dir $HOME/clusterconfig -$ ls $HOME/clusterconfig/ -auth bootstrap.ign master.ign metadata.json worker.ign ----- - -Now you can use the Ignition config files as input to the installation procedures to install {op-system-first} systems. diff --git a/modules/installation-disk-partitioning.adoc b/modules/installation-disk-partitioning.adoc deleted file mode 100644 index 495b00237b14..000000000000 --- a/modules/installation-disk-partitioning.adoc +++ /dev/null @@ -1,134 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_vsphere/installing-vsphere.adoc -// * installing/installing_vsphere/installing-vsphere-network-customizations.adoc -// * installing/installing_vsphere/installing-restricted-networks-vsphere.adoc - -// This content was sourced from the bare metal RHCOS assembly file `modules/installation-user-infra-machines-advanced.adoc` under the `== Disk partitioning` subheader. "Disk partioning" content in the bare metal assembly is not modularized, so anything in this vsphere module should be checked against that file for consistency until such time that the large bare metal assembly can be modularized. - -:_content-type: PROCEDURE -[id="installation-disk-partitioning_{context}"] -= Disk partitioning - -In most cases, data partitions are originally created by installing {op-system}, rather than by installing another operating system. In such cases, the {product-title} installer should be allowed to configure your disk partitions. - -However, there are two cases where you might want to intervene to override the default partitioning when installing an -{product-title} node: - -* Create separate partitions: For greenfield installations on an empty -disk, you might want to add separate storage to a partition. This is -officially supported for making `/var` or a subdirectory of `/var`, such as `/var/lib/etcd`, a separate partition, but not both. -+ -[IMPORTANT] -==== -For disk sizes larger than 100GB, and especially disk sizes larger than 1TB, create a separate `/var` partition. See "Creating a separate `/var` partition" and this link:https://access.redhat.com/solutions/5587281[Red Hat Knowledgebase article] for more information. -==== -+ -[IMPORTANT] -==== -Kubernetes supports only two file system partitions. If you add more than one partition to the original configuration, Kubernetes cannot monitor all of them. -==== -* Retain existing partitions: For a brownfield installation where you are reinstalling {product-title} on an existing node and want to retain data partitions installed from your previous operating system, there are both boot arguments and options to `coreos-installer` that allow you to retain existing data partitions. - -[discrete] -= Creating a separate `/var` partition -In general, disk partitioning for {product-title} should be left to the -installer. However, there are cases where you might want to create separate partitions in a part of the filesystem that you expect to grow. - -{product-title} supports the addition of a single partition to attach -storage to either the `/var` partition or a subdirectory of `/var`. -For example: - -* `/var/lib/containers`: Holds container-related content that can grow -as more images and containers are added to a system. -* `/var/lib/etcd`: Holds data that you might want to keep separate for purposes such as performance optimization of etcd storage. -* `/var`: Holds data that you might want to keep separate for purposes such as auditing. -+ -[IMPORTANT] -==== -For disk sizes larger than 100GB, and especially larger than 1TB, create a separate `/var` partition. -==== - -Storing the contents of a `/var` directory separately makes it easier to grow storage for those areas as needed and reinstall {product-title} at a later date and keep that data intact. With this method, you will not have to pull all your containers again, nor will you have to copy massive log files when you update systems. - -Because `/var` must be in place before a fresh installation of -{op-system-first}, the following procedure sets up the separate `/var` partition -by creating a machine config manifest that is inserted during the `openshift-install` -preparation phases of an {product-title} installation. - -.Procedure - -. Create a directory to hold the {product-title} installation files: -+ -[source,terminal] ----- -$ mkdir $HOME/clusterconfig ----- - -. Run `openshift-install` to create a set of files in the `manifest` and -`openshift` subdirectories. Answer the system questions as you are prompted: -+ -[source,terminal] ----- -$ openshift-install create manifests --dir $HOME/clusterconfig -? SSH Public Key ... -$ ls $HOME/clusterconfig/openshift/ -99_kubeadmin-password-secret.yaml -99_openshift-cluster-api_master-machines-0.yaml -99_openshift-cluster-api_master-machines-1.yaml -99_openshift-cluster-api_master-machines-2.yaml -... ----- - -. Create a Butane config that configures the additional partition. For example, name the file `$HOME/clusterconfig/98-var-partition.bu`, change the disk device name to the name of the storage device on the `worker` systems, and set the storage size as appropriate. This example places the `/var` directory on a separate partition: -+ -[source,yaml] ----- -variant: openshift -version: 4.13.0 -metadata: - labels: - machineconfiguration.openshift.io/role: worker - name: 98-var-partition -storage: - disks: - - device: /dev/<device_name> <1> - partitions: - - label: var - start_mib: <partition_start_offset> <2> - size_mib: <partition_size> <3> - filesystems: - - device: /dev/disk/by-partlabel/var - path: /var - format: xfs - mount_options: [defaults, prjquota] <4> - with_mount_unit: true ----- -+ -<1> The storage device name of the disk that you want to partition. -<2> When adding a data partition to the boot disk, a minimum value of 25000 mebibytes is recommended. The root file system is automatically resized to fill all available space up to the specified offset. If no value is specified, or if the specified value is smaller than the recommended minimum, the resulting root file system will be too small, and future reinstalls of {op-system} might overwrite the beginning of the data partition. -<3> The size of the data partition in mebibytes. -<4> The `prjquota` mount option must be enabled for filesystems used for container storage. -+ -[NOTE] -==== -When creating a separate `/var` partition, you cannot use different instance types for worker nodes, if the different instance types do not have the same device name. -==== - -. Create a manifest from the Butane config and save it to the `clusterconfig/openshift` directory. For example, run the following command: -+ -[source,terminal] ----- -$ butane $HOME/clusterconfig/98-var-partition.bu -o $HOME/clusterconfig/openshift/98-var-partition.yaml ----- - -. Run `openshift-install` again to create Ignition configs from a set of files in the `manifest` and `openshift` subdirectories: -+ -[source,terminal] ----- -$ openshift-install create ignition-configs --dir $HOME/clusterconfig -$ ls $HOME/clusterconfig/ -auth bootstrap.ign master.ign metadata.json worker.ign ----- - -Now you can use the Ignition config files as input to the vSphere installation procedures to install {op-system-first} systems. diff --git a/modules/installation-dns-ibm-cloud.adoc b/modules/installation-dns-ibm-cloud.adoc deleted file mode 100644 index 14f96b32e101..000000000000 --- a/modules/installation-dns-ibm-cloud.adoc +++ /dev/null @@ -1,64 +0,0 @@ -// Module included in the following assemblies: -// -// installing/installing_ibm_cloud_public/installing-ibm-cloud-account.adoc - -:_content-type: PROCEDURE -[id="installation-dns-ibm-cloud_{context}"] -= Using IBM Cloud DNS Services for DNS resolution - -The installation program uses IBM Cloud DNS Services to configure cluster DNS resolution and provide name lookup for a private cluster. - -You configure DNS resolution by creating a DNS services instance for the cluster, and then adding a DNS zone to the DNS Services instance. Ensure that the zone is authoritative for the domain. You can do this using a root domain or subdomain. - -[NOTE] -==== -IBM Cloud VPC does not support IPv6, so dual stack or IPv6 environments are not possible. -==== - -.Prerequisites - -* You have installed the link:https://www.ibm.com/cloud/cli[IBM Cloud CLI]. -* You have an existing domain and registrar. For more information, see the IBM link:https://cloud.ibm.com/docs/dns?topic=dns-getting-started[documentation]. - -.Procedure - -. Create a DNS Services instance to use with your cluster: - -.. Install the DNS Services plugin by running the following command: -+ -[source,terminal] ----- -$ ibmcloud plugin install cloud-dns-services ----- - -.. Create the DNS Services instance by running the following command: -+ -[source,terminal] ----- -$ ibmcloud dns instance-create <instance-name> standard-dns <1> ----- -<1> At a minimum, a `Standard` plan is required for DNS Services to manage the cluster subdomain and its DNS records. - -. Create a DNS zone for the DNS Services instance: - -.. Set the target operating DNS Services instance by running the following command: -+ -[source,terminal] ----- -$ ibmcloud dns instance-target <instance-name> ----- - -.. Add the DNS zone to the DNS Services instance by running the following command: -+ -[source,terminal] ----- -$ ibmcloud dns zone-create <zone-name> <1> ----- -<1> The fully qualified zone name. You can use either the root domain or subdomain value as the zone name, depending on which you plan to configure. A root domain uses the form `openshiftcorp.com`. A subdomain uses the form `clusters.openshiftcorp.com`. - -. Record the name of the DNS zone you have created. As part of the installation process, you must update the `install-config.yaml` file before deploying the cluster. Use the name of the DNS zone as the value for the `baseDomain` parameter. - -[NOTE] -==== -You do not have to manage permitted networks or configure an "A" DNS resource record. As required, the installation program configures these resources automatically. -==== diff --git a/modules/installation-dns-user-infra.adoc b/modules/installation-dns-user-infra.adoc deleted file mode 100644 index 8d4b3644f7fa..000000000000 --- a/modules/installation-dns-user-infra.adoc +++ /dev/null @@ -1,234 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc -// * installing/installing_bare_metal/installing-bare-metal.adoc -// * installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc -// * installing/installing_ibm_power/installing-ibm-power.adoc -// * installing/installing_ibm_power/installing-restricted-networks-ibm-power.adoc -// * installing/installing_ibm_z/installing-restricted-networks-ibm-z-kvm.adoc -// * installing/installing_ibm_z/installing-ibm-z-kvm.adoc -// * installing/installing_ibm_z/installing-ibm-z.adoc -// * installing/installing_ibm_z/installing-restricted-networks-ibm-z.adoc -// * installing/installing_platform_agnostic/installing-platform-agnostic.adoc -// * installing/installing_rhv/installing-rhv-restricted-network.adoc -// * installing/installing_vsphere/installing-restricted-networks-vsphere.adoc -// * installing/installing_vsphere/installing-vsphere-network-customizations.adoc -// * installing/installing_vsphere/installing-vsphere.adoc - -ifeval::["{context}" == "installing-ibm-z"] -:ibm-z: -endif::[] -ifeval::["{context}" == "installing-ibm-z-kvm"] -:ibm-z-kvm: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z"] -:ibm-z: -:restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z-kvm"] -:ibm-z-kvm: -:restricted: -endif::[] - -:prewrap!: - -:_content-type: CONCEPT -[id="installation-dns-user-infra_{context}"] -= User-provisioned DNS requirements - -In {product-title} deployments, DNS name resolution is required for the following components: - -* The Kubernetes API -* The {product-title} application wildcard -* The bootstrap, control plane, and compute machines - -Reverse DNS resolution is also required for the Kubernetes API, the bootstrap machine, the control plane machines, and the compute machines. - -DNS A/AAAA or CNAME records are used for name resolution and PTR records are used for reverse name resolution. The reverse records are important because {op-system-first} uses the reverse records to set the hostnames for all the nodes, unless the hostnames are provided by DHCP. Additionally, the reverse records are used to generate the certificate signing requests (CSR) that {product-title} needs to operate. - -ifndef::ibm-z,ibm-z-kvm[] -[NOTE] -==== -It is recommended to use a DHCP server to provide the hostnames to each cluster node. See the _DHCP recommendations for user-provisioned infrastructure_ section for more information. -==== -endif::ibm-z,ibm-z-kvm[] - -The following DNS records are required for a user-provisioned {product-title} cluster and they must be in place before installation. In each record, `<cluster_name>` is the cluster name and `<base_domain>` is the base domain that you specify in the `install-config.yaml` file. A complete DNS record takes the form: `<component>.<cluster_name>.<base_domain>.`. - -.Required DNS records -[cols="1a,3a,5a",options="header"] -|=== - -|Component -|Record -|Description - -.2+a|Kubernetes API -|`api.<cluster_name>.<base_domain>.` -|A DNS A/AAAA or CNAME record, and a DNS PTR record, to identify the API load balancer. These records must be resolvable by both clients external to the cluster and from all the nodes within the cluster. - -|`api-int.<cluster_name>.<base_domain>.` -|A DNS A/AAAA or CNAME record, and a DNS PTR record, to internally identify the API load balancer. These records must be resolvable from all the nodes within the cluster. -[IMPORTANT] -==== -The API server must be able to resolve the worker nodes by the hostnames -that are recorded in Kubernetes. If the API server cannot resolve the node -names, then proxied API calls can fail, and you cannot retrieve logs from pods. -==== - -|Routes -|`*.apps.<cluster_name>.<base_domain>.` -|A wildcard DNS A/AAAA or CNAME record that refers to the application ingress load balancer. The application ingress load balancer targets the machines that run the Ingress Controller pods. The Ingress Controller pods run on the compute machines by default. These records must be resolvable by both clients external to the cluster and from all the nodes within the cluster. - -For example, `console-openshift-console.apps.<cluster_name>.<base_domain>` is used as a wildcard route to the {product-title} console. - -|Bootstrap machine -|`bootstrap.<cluster_name>.<base_domain>.` -|A DNS A/AAAA or CNAME record, and a DNS PTR record, to identify the bootstrap -machine. These records must be resolvable by the nodes within the cluster. - -|Control plane machines -|`<master><n>.<cluster_name>.<base_domain>.` -|DNS A/AAAA or CNAME records and DNS PTR records to identify each machine -for the control plane nodes. These records must be resolvable by the nodes within the cluster. - -|Compute machines -|`<worker><n>.<cluster_name>.<base_domain>.` -|DNS A/AAAA or CNAME records and DNS PTR records to identify each machine -for the worker nodes. These records must be resolvable by the nodes within the cluster. - -|=== - -[NOTE] -==== -In {product-title} 4.4 and later, you do not need to specify etcd host and SRV records in your DNS configuration. -==== - -[TIP] -==== -You can use the `dig` command to verify name and reverse name resolution. See the section on _Validating DNS resolution for user-provisioned infrastructure_ for detailed validation steps. -==== - -[id="installation-dns-user-infra-example_{context}"] -== Example DNS configuration for user-provisioned clusters - -This section provides A and PTR record configuration samples that meet the DNS requirements for deploying {product-title} on user-provisioned infrastructure. The samples are not meant to provide advice for choosing one DNS solution over another. - -In the examples, the cluster name is `ocp4` and the base domain is `example.com`. - -.Example DNS A record configuration for a user-provisioned cluster - -The following example is a BIND zone file that shows sample A records for name resolution in a user-provisioned cluster. - -.Sample DNS zone database -[%collapsible] -==== -[source,text] ----- -$TTL 1W -@ IN SOA ns1.example.com. root ( - 2019070700 ; serial - 3H ; refresh (3 hours) - 30M ; retry (30 minutes) - 2W ; expiry (2 weeks) - 1W ) ; minimum (1 week) - IN NS ns1.example.com. - IN MX 10 smtp.example.com. -; -; -ns1.example.com. IN A 192.168.1.5 -smtp.example.com. IN A 192.168.1.5 -; -helper.example.com. IN A 192.168.1.5 -helper.ocp4.example.com. IN A 192.168.1.5 -; -api.ocp4.example.com. IN A 192.168.1.5 <1> -api-int.ocp4.example.com. IN A 192.168.1.5 <2> -; -*.apps.ocp4.example.com. IN A 192.168.1.5 <3> -; -bootstrap.ocp4.example.com. IN A 192.168.1.96 <4> -; -master0.ocp4.example.com. IN A 192.168.1.97 <5> -master1.ocp4.example.com. IN A 192.168.1.98 <5> -master2.ocp4.example.com. IN A 192.168.1.99 <5> -; -worker0.ocp4.example.com. IN A 192.168.1.11 <6> -worker1.ocp4.example.com. IN A 192.168.1.7 <6> -; -;EOF ----- - -<1> Provides name resolution for the Kubernetes API. The record refers to the IP address of the API load balancer. -<2> Provides name resolution for the Kubernetes API. The record refers to the IP address of the API load balancer and is used for internal cluster communications. -<3> Provides name resolution for the wildcard routes. The record refers to the IP address of the application ingress load balancer. The application ingress load balancer targets the machines that run the Ingress Controller pods. The Ingress Controller pods run on the compute machines by default. -+ -[NOTE] -===== -In the example, the same load balancer is used for the Kubernetes API and application ingress traffic. In production scenarios, you can deploy the API and application ingress load balancers separately so that you can scale the load balancer infrastructure for each in isolation. -===== -+ -<4> Provides name resolution for the bootstrap machine. -<5> Provides name resolution for the control plane machines. -<6> Provides name resolution for the compute machines. -==== - -.Example DNS PTR record configuration for a user-provisioned cluster - -The following example BIND zone file shows sample PTR records for reverse name resolution in a user-provisioned cluster. - -.Sample DNS zone database for reverse records -[%collapsible] -==== -[source,text] ----- -$TTL 1W -@ IN SOA ns1.example.com. root ( - 2019070700 ; serial - 3H ; refresh (3 hours) - 30M ; retry (30 minutes) - 2W ; expiry (2 weeks) - 1W ) ; minimum (1 week) - IN NS ns1.example.com. -; -5.1.168.192.in-addr.arpa. IN PTR api.ocp4.example.com. <1> -5.1.168.192.in-addr.arpa. IN PTR api-int.ocp4.example.com. <2> -; -96.1.168.192.in-addr.arpa. IN PTR bootstrap.ocp4.example.com. <3> -; -97.1.168.192.in-addr.arpa. IN PTR master0.ocp4.example.com. <4> -98.1.168.192.in-addr.arpa. IN PTR master1.ocp4.example.com. <4> -99.1.168.192.in-addr.arpa. IN PTR master2.ocp4.example.com. <4> -; -11.1.168.192.in-addr.arpa. IN PTR worker0.ocp4.example.com. <5> -7.1.168.192.in-addr.arpa. IN PTR worker1.ocp4.example.com. <5> -; -;EOF ----- - -<1> Provides reverse DNS resolution for the Kubernetes API. The PTR record refers to the record name of the API load balancer. -<2> Provides reverse DNS resolution for the Kubernetes API. The PTR record refers to the record name of the API load balancer and is used for internal cluster communications. -<3> Provides reverse DNS resolution for the bootstrap machine. -<4> Provides reverse DNS resolution for the control plane machines. -<5> Provides reverse DNS resolution for the compute machines. -==== - -[NOTE] -==== -A PTR record is not required for the {product-title} application wildcard. -==== - -ifeval::["{context}" == "installing-ibm-z"] -:!ibm-z: -endif::[] -ifeval::["{context}" == "installing-ibm-z-kvm"] -:!ibm-z-kvm: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z"] -:!ibm-z: -:!restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z-kvm"] -:!ibm-z-kvm: -:!restricted: -endif::[] diff --git a/modules/installation-extend-edge-nodes-aws-local-zones.adoc b/modules/installation-extend-edge-nodes-aws-local-zones.adoc deleted file mode 100644 index 6ac1ddd43f9d..000000000000 --- a/modules/installation-extend-edge-nodes-aws-local-zones.adoc +++ /dev/null @@ -1,123 +0,0 @@ -// Module included in the following assemblies: -// -// * post_installation_configuration/cluster-tasks.adoc - -ifeval::["{context}" == "installing-aws-localzone"] -:localzone: -endif::[] - -:_content-type: PROCEDURE -[id="installation-extend-edge-nodes-aws-local-zones_{context}"] -= Creating user workloads in AWS Local Zones -After you create an Amazon Web Service (AWS) Local Zone environment, and you deploy your cluster, you can use edge worker nodes to create user workloads in Local Zone subnets. - -After the `openshift-installer` creates the cluster, the installation program automatically specifies a taint effect of `NoSchedule` to each edge worker node. This means that a scheduler does not add a new pod, or deployment, to a node if the pod does not match the specified tolerations for a taint. You can modify the taint for better control over how each node creates a workload in each Local Zone subnet. - -The `openshift-installer` creates the compute machine set manifests file with `node-role.kubernetes.io/edge` and `node-role.kubernetes.io/worker` labels applied to each edge worker node that is located in a Local Zone subnet. - -.Prerequisites - -* You have access to the OpenShift CLI (`oc`). -* You deployed your cluster in a Virtual Private Cloud (VPC) with defined Local Zone subnets. -* You ensured that the compute machine set for the edge workers on Local Zone subnets specifies the taints for `node-role.kubernetes.io/edge`. - -.Procedure - -. Create a `deployment` resource YAML file for an example application to be deployed in the edge worker node that operates in a Local Zone subnet. Ensure that you specify the correct tolerations that match the taints for the edge worker node. -+ -.Example of a configured `deployment` resource for an edge worker node that operates in a Local Zone subnet -[source,yaml] ----- -kind: Namespace -apiVersion: v1 -metadata: - name: <local_zone_application_namespace> ---- -kind: PersistentVolumeClaim -apiVersion: v1 -metadata: - name: <pvc_name> - namespace: <local_zone_application_namespace> -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 10Gi - storageClassName: gp2-csi <1> - volumeMode: Filesystem ---- -apiVersion: apps/v1 -kind: Deployment <2> -metadata: - name: <local_zone_application> <3> - namespace: <local_zone_application_namespace> <4> -spec: - selector: - matchLabels: - app: <local_zone_application> - replicas: 1 - template: - metadata: - labels: - app: <local_zone_application> - zone-group: ${ZONE_GROUP_NAME} <5> - spec: - securityContext: - seccompProfile: - type: RuntimeDefault - nodeSelector: <6> - machine.openshift.io/zone-group: ${ZONE_GROUP_NAME} - tolerations: <7> - - key: "node-role.kubernetes.io/edge" - operator: "Equal" - value: "" - effect: "NoSchedule" - containers: - - image: openshift/origin-node - command: - - "/bin/socat" - args: - - TCP4-LISTEN:8080,reuseaddr,fork - - EXEC:'/bin/bash -c \"printf \\\"HTTP/1.0 200 OK\r\n\r\n\\\"; sed -e \\\"/^\r/q\\\"\"' - imagePullPolicy: Always - name: echoserver - ports: - - containerPort: 8080 - volumeMounts: - - mountPath: "/mnt/storage" - name: data - volumes: - - name: data - persistentVolumeClaim: - claimName: <pvc_name> ----- -<1> `storageClassName`: For the Local Zone configuration, you must specify `gp2-csi`. -<2> `kind`: Defines the `deployment` resource. -<3> `name`: Specifies the name of your Local Zone application. For example, `local-zone-demo-app-nyc-1`. -<4> `namespace:` Defines the namespace for the AWS Local Zone where you want to run the user workload. For example: `local-zone-app-nyc-1a`. -<5> `zone-group`: Defines the group to where a zone belongs. For example, `us-east-1-iah-1`. -<6> `nodeSelector`: Targets edge worker nodes that match the specified labels. -<7> `tolerations`: Sets the values that match with the `taints` defined on the `MachineSet` manifest for the Local Zone node. - -. Create a `service` resource YAML file for the node. This resource exposes a pod from a targeted edge worker node to services that run inside your Local Zone network. -+ -.Example of a configured `service` resource for an edge worker node that operates in a Local Zone subnet -[source,yaml] ----- -apiVersion: v1 -kind: Service <1> -metadata: - name: <local_zone_application> - namespace: <local_zone_application_namespace> -spec: - ports: - - port: 80 - targetPort: 8080 - protocol: TCP - type: NodePort - selector: <2> - app: <local_zone_application> ----- -<1> `kind`: Defines the `service` resource. -<2> `selector:` Specifies the label type applied to managed pods. diff --git a/modules/installation-extracting-infraid.adoc b/modules/installation-extracting-infraid.adoc deleted file mode 100644 index 30157a8297c7..000000000000 --- a/modules/installation-extracting-infraid.adoc +++ /dev/null @@ -1,164 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-user-infra.adoc -// * installing/installing_aws/installing-restricted-networks-aws.adoc -// * installing/installing_azure/installing-azure-user-infra.adoc -// * installing/installing_gcp/installing-gcp-user-infra.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp.adoc -// * installing/installing_vsphere/installing-restricted-networks-vsphere.adoc -// * installing/installing_vsphere/installing-vsphere.adoc -// * installing/installing_vsphere/installing-vsphere-network-customizations.adoc - -ifeval::["{context}" == "installing-aws-user-infra"] -:cp-first: Amazon Web Services -:cp: AWS -:cp-template: CloudFormation -:aws: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-aws"] -:cp-first: Amazon Web Services -:cp: AWS -:cp-template: CloudFormation -:aws: -endif::[] -ifeval::["{context}" == "installing-azure-user-infra"] -:cp-first: Microsoft Azure -:cp: Azure -:cp-template-first: Azure Resource Manager -:cp-template: ARM -:azure: -endif::[] -ifeval::["{context}" == "installing-gcp-user-infra"] -:cp-first: Google Cloud Platform -:cp: GCP -:cp-template: Deployment Manager -:gcp: -endif::[] -ifeval::["{context}" == "installing-gcp-user-infra-vpc"] -:cp-first: Google Cloud Platform -:cp: GCP -:cp-template: Deployment Manager -:gcp: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-gcp"] -:cp-first: Google Cloud Platform -:cp: GCP -:cp-template: Deployment Manager -:gcp: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-vsphere"] -:cp-first: VMware vSphere -:cp: vSphere -:vsphere: -endif::[] -ifeval::["{context}" == "installing-vsphere"] -:cp-first: VMware vSphere -:cp: vSphere -:vsphere: -endif::[] -ifeval::["{context}" == "installing-vsphere-network-customizations"] -:cp-first: VMware vSphere -:cp: vSphere -:vsphere: -endif::[] - -:_content-type: PROCEDURE -[id="installation-extracting-infraid_{context}"] -= Extracting the infrastructure name - -ifdef::aws,gcp[] -The Ignition config files contain a unique cluster identifier that you can use to -uniquely identify your cluster in {cp-first} ({cp}). The infrastructure name is also used to locate the appropriate {cp} resources during an {product-title} installation. The provided {cp-template} -templates contain references to this infrastructure name, so you must extract -it. -endif::aws,gcp[] - -ifdef::azure[] -The Ignition config files contain a unique cluster identifier that you can use to -uniquely identify your cluster in {cp-first}. The provided {cp-template-first} ({cp-template}) -templates contain references to this infrastructure name, so you must extract -it. -endif::azure[] - -ifdef::vsphere[] -The Ignition config files contain a unique cluster identifier that you can use to -uniquely identify your cluster in {cp-first}. If you plan to use the cluster identifier as the name of your virtual machine folder, you must extract it. -endif::vsphere[] - -.Prerequisites - -* You obtained the {product-title} installation program and the pull secret for your cluster. -* You generated the Ignition config files for your cluster. -* You installed the `jq` package. - -.Procedure - -* To extract and view the infrastructure name from the Ignition config file -metadata, run the following command: -+ -[source,terminal] ----- -$ jq -r .infraID <installation_directory>/metadata.json <1> ----- -<1> For `<installation_directory>`, specify the path to the directory that you stored the -installation files in. -+ -.Example output -[source,terminal] ----- -openshift-vw9j6 <1> ----- -<1> The output of this command is your cluster name and a random string. - -ifeval::["{context}" == "installing-aws-user-infra"] -:!cp-first: -:!cp: -:!cp-template: -:!aws: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-aws"] -:!cp-first: -:!cp: -:!cp-template: -:!aws: -endif::[] -ifeval::["{context}" == "installing-azure-user-infra"] -:!cp-first: -:!cp: -:!cp-template-first: -:!cp-template: -:!azure: -endif::[] -ifeval::["{context}" == "installing-gcp-user-infra"] -:!cp-first: -:!cp: -:!cp-template: -:!gcp: -endif::[] -ifeval::["{context}" == "installing-gcp-user-infra-vpc"] -:!cp-first: Google Cloud Platform -:!cp: GCP -:!cp-template: Deployment Manager -:!gcp: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-gcp"] -:!cp-first: -:!cp: -:!cp-template: -:!gcp: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-vsphere"] -:!cp-first: VMware vSphere -:!cp: vSphere -:!vsphere: -endif::[] -ifeval::["{context}" == "installing-vsphere"] -:!cp-first: VMware vSphere -:!cp: vSphere -:!vsphere: -endif::[] -ifeval::["{context}" == "installing-vsphere-network-customizations"] -:!cp-first: VMware vSphere -:!cp: vSphere -:!vsphere: -endif::[] diff --git a/modules/installation-full-ibm-z-kvm-user-infra-machines-iso.adoc b/modules/installation-full-ibm-z-kvm-user-infra-machines-iso.adoc deleted file mode 100644 index 3c3a97bb56f8..000000000000 --- a/modules/installation-full-ibm-z-kvm-user-infra-machines-iso.adoc +++ /dev/null @@ -1,61 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_ibm_z/installing-ibm-z-kvm.adoc -// * installing/installing_ibm_z/installing-restricted-networks-ibm-z-kvm.adoc - -:_content-type: PROCEDURE -[id="installation-user-infra-machines-iso-ibm-z-kvm-full_{context}"] -= Full installation on a new QCOW2 disk image - -Complete the following steps to create the machines in a full installation on a new QEMU copy-on-write (QCOW2) disk image. - -.Prerequisites - -* At least one LPAR running on {op-system-base} 8.6 or later with KVM, referred to as {op-system-base} KVM host in this procedure. -* The KVM/QEMU hypervisor is installed on the {op-system-base} KVM host. -* A domain name server (DNS) that can perform hostname and reverse lookup for the nodes. -* An HTTP or HTTPS server is set up. - -.Procedure - -. Obtain the {op-system-base} kernel, initramfs, and rootfs files from the link:https://access.redhat.com/downloads/content/290[Product Downloads] page on the Red Hat Customer Portal or from the link:https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/[{op-system} image mirror] page. -+ -[IMPORTANT] -==== -The {op-system} images might not change with every release of {product-title}. -You must download images with the highest version that is less than or equal -to the {product-title} version that you install. Only use the appropriate {op-system} QCOW2 image described in the following procedure. -==== -+ -The file names contain the {product-title} version number. They resemble the following examples: - -** kernel: `rhcos-<version>-live-kernel-<architecture>` -** initramfs: `rhcos-<version>-live-initramfs.<architecture>.img` -** rootfs: `rhcos-<version>-live-rootfs.<architecture>.img` -+ -. Move the downloaded {op-system-base} live kernel, initramfs, and rootfs as well as the Ignition files to an HTTP or HTTPS server before you launch `virt-install`. -+ -[NOTE] -==== -The Ignition files are generated by the {product-title} installer. -==== -. Create the new KVM guest nodes using the {op-system-base} kernel, initramfs, and Ignition files, the new disk image, and adjusted parm line arguments. -** For `--location`, specify the location of the kernel/initrd on the HTTP or HTTPS server. -** For `coreos.inst.ignition_url=`, specify the Ignition file for the machine role. Use `bootstrap.ign`, `master.ign`, or `worker.ign`. Only HTTP and HTTPS protocols are supported. -** For `coreos.live.rootfs_url=`, specify the matching rootfs artifact for the kernel and initramfs you are booting. Only HTTP and HTTPS protocols are supported. -+ -[source,terminal] ----- -$ virt-install \ - --connect qemu:///system \ - --name {vn_name} \ - --vcpus {vcpus} \ - --memory {memory_mb} \ - --disk {vn_name}.qcow2,size={image_size| default(10,true)} \ - --network network={virt_network_parm} \ - --boot hd \ - --location {media_location},kernel={rhcos_kernel},initrd={rhcos_initrd} \ - --extra-args "rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url={rhcos_liveos} ip={ip}::{default_gateway}:{subnet_mask_length}:{vn_name}:enc1:none:{MTU} nameserver={dns} coreos.inst.ignition_url={rhcos_ign}" \ - --noautoconsole \ - --wait ----- diff --git a/modules/installation-gcp-config-yaml.adoc b/modules/installation-gcp-config-yaml.adoc deleted file mode 100644 index 5230bb78b9f2..000000000000 --- a/modules/installation-gcp-config-yaml.adoc +++ /dev/null @@ -1,284 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_gcp/installing-gcp-customizations.adoc -// * installing/installing_gcp/installing-gcp-network-customizations.adoc -// * installing/installing_gcp/installing-gcp-vpc.adoc -// * installing/installing_gcp/installing-gcp-private.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp-installer-provisioned.adoc - -ifeval::["{context}" == "installing-gcp-network-customizations"] -:with-networking: -endif::[] -ifeval::["{context}" != "installing-gcp-network-customizations"] -:without-networking: -endif::[] -ifeval::["{context}" == "installing-gcp-vpc"] -:vpc: -endif::[] -ifeval::["{context}" == "installing-gcp-private"] -:private: -:vpc: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-gcp-installer-provisioned"] -:restricted: -endif::[] - -[id="installation-gcp-config-yaml_{context}"] -= Sample customized install-config.yaml file for GCP - -You can customize the `install-config.yaml` file to specify more details about your {product-title} cluster's platform or modify the values of the required parameters. - -[IMPORTANT] -==== -This sample YAML file is provided for reference only. You must obtain your `install-config.yaml` file by using the installation program and modify it. -==== - -[source,yaml] ----- -apiVersion: v1 -baseDomain: example.com <1> -controlPlane: <2> <3> - hyperthreading: Enabled <4> - name: master - platform: - gcp: - type: n2-standard-4 - zones: - - us-central1-a - - us-central1-c - osDisk: - diskType: pd-ssd - diskSizeGB: 1024 - encryptionKey: <5> - kmsKey: - name: worker-key - keyRing: test-machine-keys - location: global - projectID: project-id - tags: <6> - - control-plane-tag1 - - control-plane-tag2 - replicas: 3 -compute: <2> <3> -- hyperthreading: Enabled <4> - name: worker - platform: - gcp: - type: n2-standard-4 - zones: - - us-central1-a - - us-central1-c - osDisk: - diskType: pd-standard - diskSizeGB: 128 - encryptionKey: <5> - kmsKey: - name: worker-key - keyRing: test-machine-keys - location: global - projectID: project-id - tags: <6> - - compute-tag1 - - compute-tag2 - replicas: 3 -metadata: - name: test-cluster <1> -ifdef::without-networking[] -networking: -endif::[] -ifdef::with-networking[] -networking: <2> -endif::[] - clusterNetwork: - - cidr: 10.128.0.0/14 - hostPrefix: 23 - machineNetwork: - - cidr: 10.0.0.0/16 - networkType: OVNKubernetes <7> - serviceNetwork: - - 172.30.0.0/16 -platform: - gcp: - projectID: openshift-production <1> - region: us-central1 <1> - defaultMachinePlatform: - tags: <6> - - global-tag1 - - global-tag2 -ifdef::vpc,restricted[] - network: existing_vpc <8> - controlPlaneSubnet: control_plane_subnet <9> - computeSubnet: compute_subnet <10> -endif::vpc,restricted[] -ifndef::restricted[] -pullSecret: '{"auths": ...}' <1> -endif::restricted[] -ifdef::restricted[] -pullSecret: '{"auths":{"<local_registry>": {"auth": "<credentials>","email": "you@example.com"}}}' <11> -endif::restricted[] -ifndef::vpc,restricted[] -ifndef::openshift-origin[] -fips: false <8> -sshKey: ssh-ed25519 AAAA... <9> -endif::openshift-origin[] -ifdef::openshift-origin[] -sshKey: ssh-ed25519 AAAA... <8> -endif::openshift-origin[] -endif::vpc,restricted[] -ifdef::vpc[] -ifndef::openshift-origin[] -fips: false <11> -sshKey: ssh-ed25519 AAAA... <12> -endif::openshift-origin[] -ifdef::openshift-origin[] -sshKey: ssh-ed25519 AAAA... <11> -endif::openshift-origin[] -endif::vpc[] -ifdef::restricted[] -ifndef::openshift-origin[] -fips: false <12> -sshKey: ssh-ed25519 AAAA... <13> -endif::openshift-origin[] -ifdef::openshift-origin[] -sshKey: ssh-ed25519 AAAA... <12> -endif::openshift-origin[] -endif::restricted[] -ifdef::private[] -ifndef::openshift-origin[] -publish: Internal <13> -endif::openshift-origin[] -ifdef::openshift-origin[] -publish: Internal <12> -endif::openshift-origin[] -endif::private[] -ifdef::restricted[] -ifndef::openshift-origin[] -additionalTrustBundle: | <14> - -----BEGIN CERTIFICATE----- - <MY_TRUSTED_CA_CERT> - -----END CERTIFICATE----- -imageContentSources: <15> -- mirrors: - - <local_registry>/<local_repository_name>/release - source: quay.io/openshift-release-dev/ocp-release -- mirrors: - - <local_registry>/<local_repository_name>/release - source: quay.io/openshift-release-dev/ocp-v4.0-art-dev -endif::openshift-origin[] -ifdef::openshift-origin[] -additionalTrustBundle: | <13> - -----BEGIN CERTIFICATE----- - <MY_TRUSTED_CA_CERT> - -----END CERTIFICATE----- -imageContentSources: <14> -- mirrors: - - <local_registry>/<local_repository_name>/release - source: quay.io/openshift-release-dev/ocp-release -- mirrors: - - <local_registry>/<local_repository_name>/release - source: quay.io/openshift-release-dev/ocp-v4.0-art-dev -endif::openshift-origin[] -endif::restricted[] ----- -<1> Required. The installation program prompts you for this value. -<2> If you do not provide these parameters and values, the installation program provides the default value. -<3> The `controlPlane` section is a single mapping, but the `compute` section is a sequence of mappings. To meet the requirements of the different data structures, the first line of the `compute` section must begin with a hyphen, `-`, and the first line of the `controlPlane` section must not. Only one control plane pool is used. -<4> Whether to enable or disable simultaneous multithreading, or `hyperthreading`. By default, simultaneous multithreading is enabled to increase the performance of your machines' cores. You can disable it by setting the parameter value to `Disabled`. If you disable simultaneous multithreading in some cluster machines, you must disable it in all cluster machines. -+ -[IMPORTANT] -==== -If you disable simultaneous multithreading, ensure that your capacity planning accounts for the dramatically decreased machine performance. Use larger machine types, such as `n1-standard-8`, for your machines if you disable simultaneous multithreading. -==== -<5> Optional: The custom encryption key section to encrypt both virtual machines and persistent volumes. Your default compute service account must have the permissions granted to use your KMS key and have the correct IAM role assigned. The default service account name follows the `service-<project_number>@compute-system.iam.gserviceaccount.com` pattern. For more information about granting the correct permissions for your service account, see "Machine management" -> "Creating compute machine sets" -> "Creating a compute machine set on GCP". -<6> Optional: A set of network tags to apply to the control plane or compute machine sets. The `platform.gcp.defaultMachinePlatform.tags` parameter will apply to both control plane and compute machines. If the `compute.platform.gcp.tags` or `controlPlane.platform.gcp.tags` parameters are set, they override the `platform.gcp.defaultMachinePlatform.tags` parameter. -<7> The cluster network plugin to install. The supported values are `OVNKubernetes` and `OpenShiftSDN`. The default value is `OVNKubernetes`. -ifdef::vpc,restricted[] -<8> Specify the name of an existing VPC. -<9> Specify the name of the existing subnet to deploy the control plane machines to. The subnet must belong to the VPC that you specified. -<10> Specify the name of the existing subnet to deploy the compute machines to. The subnet must belong to the VPC that you specified. -endif::vpc,restricted[] -ifdef::restricted[] -<11> For `<local_registry>`, specify the registry domain name, and optionally the port, that your mirror registry uses to serve content. For example, `registry.example.com` or `registry.example.com:5000`. For `<credentials>`, specify the base64-encoded user name and password for your mirror registry. -endif::restricted[] -ifdef::vpc[] -ifndef::openshift-origin[] -<11> Whether to enable or disable FIPS mode. By default, FIPS mode is not enabled. If FIPS mode is enabled, the {op-system-first} machines that {product-title} runs on bypass the default Kubernetes cryptography suite and use the cryptography modules that are provided with {op-system} instead. -+ -[IMPORTANT] -==== -The use of FIPS Validated / Modules in Process cryptographic libraries is only supported on {product-title} deployments on the `x86_64` architecture. -==== -<12> You can optionally provide the `sshKey` value that you use to access the machines in your cluster. -endif::openshift-origin[] -ifdef::openshift-origin[] -<11> You can optionally provide the `sshKey` value that you use to access the machines in your cluster. -endif::openshift-origin[] -endif::vpc[] -ifdef::restricted[] -ifndef::openshift-origin[] -<12> Whether to enable or disable FIPS mode. By default, FIPS mode is not enabled. If FIPS mode is enabled, the {op-system-first} machines that {product-title} runs on bypass the default Kubernetes cryptography suite and use the cryptography modules that are provided with {op-system} instead. -+ -[IMPORTANT] -==== -The use of FIPS Validated / Modules in Process cryptographic libraries is only supported on {product-title} deployments on the `x86_64` architecture. -==== -<13> You can optionally provide the `sshKey` value that you use to access the machines in your cluster. -endif::openshift-origin[] -ifdef::openshift-origin[] -<12> You can optionally provide the `sshKey` value that you use to access the machines in your cluster. -endif::openshift-origin[] -endif::restricted[] -ifndef::vpc,restricted[] -ifndef::openshift-origin[] -<8> Whether to enable or disable FIPS mode. By default, FIPS mode is not enabled. If FIPS mode is enabled, the {op-system-first} machines that {product-title} runs on bypass the default Kubernetes cryptography suite and use the cryptography modules that are provided with {op-system} instead. -+ -[IMPORTANT] -==== -The use of FIPS Validated / Modules in Process cryptographic libraries is only supported on {product-title} deployments on the `x86_64` architecture. -==== -<9> You can optionally provide the `sshKey` value that you use to access the machines in your cluster. -endif::openshift-origin[] -ifdef::openshift-origin[] -<8> You can optionally provide the `sshKey` value that you use to access the machines in your cluster. -endif::openshift-origin[] -endif::vpc,restricted[] -+ -[NOTE] -==== -For production {product-title} clusters on which you want to perform installation debugging or disaster recovery, specify an SSH key that your `ssh-agent` process uses. -==== -ifdef::private[] -ifndef::openshift-origin[] -<13> How to publish the user-facing endpoints of your cluster. Set `publish` to `Internal` to deploy a private cluster, which cannot be accessed from the internet. The default value is `External`. -endif::openshift-origin[] -ifdef::openshift-origin[] -<12> How to publish the user-facing endpoints of your cluster. Set `publish` to `Internal` to deploy a private cluster, which cannot be accessed from the internet. The default value is `External`. -endif::openshift-origin[] -endif::private[] -ifdef::restricted[] -ifndef::openshift-origin[] -<14> Provide the contents of the certificate file that you used for your mirror registry. -<15> Provide the `imageContentSources` section from the output of the command to mirror the repository. -endif::openshift-origin[] -ifdef::openshift-origin[] -<13> Provide the contents of the certificate file that you used for your mirror registry. -<14> Provide the `imageContentSources` section from the output of the command to mirror the repository. -endif::openshift-origin[] -endif::restricted[] - -ifeval::["{context}" == "installing-gcp-network-customizations"] -:!with-networking: -endif::[] -ifeval::["{context}" != "installing-gcp-network-customizations"] -:!without-networking: -endif::[] -ifeval::["{context}" == "installing-gcp-vpc"] -:!vpc: -endif::[] -ifeval::["{context}" == "installing-gcp-private"] -:!private: -:!vpc: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-gcp-installer-provisioned"] -:!restricted: -endif::[] diff --git a/modules/installation-gcp-dns.adoc b/modules/installation-gcp-dns.adoc deleted file mode 100644 index 4e010cdab36a..000000000000 --- a/modules/installation-gcp-dns.adoc +++ /dev/null @@ -1,63 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_gcp/installing-gcp-account.adoc -// * installing/installing_gcp/installing-gcp-user-infra.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp.adoc - -ifeval::["{context}" == "installing-gcp-user-infra-vpc"] -:user-infra-vpc: -endif::[] - -:_content-type: PROCEDURE -[id="installation-gcp-dns_{context}"] -= Configuring DNS for GCP - -To install {product-title}, the Google Cloud Platform (GCP) account you use must -have a dedicated public hosted zone -ifndef::user-infra-vpc[] -in the same project that you host the {product-title} cluster. -endif::user-infra-vpc[] -ifdef::user-infra-vpc[] -in the project that hosts the shared VPC that you install the cluster into. -endif::user-infra-vpc[] -This zone must be authoritative for the domain. The -DNS service provides cluster DNS resolution and name lookup for external -connections to the cluster. - -.Procedure - -. Identify your domain, or subdomain, and registrar. You can transfer an existing domain and -registrar or obtain a new one through GCP or another source. -+ -[NOTE] -==== -If you purchase a new domain, it can take time for the relevant DNS -changes to propagate. For more information about purchasing domains -through Google, see link:https://domains.google/[Google Domains]. -==== - -. Create a public hosted zone for your domain or subdomain in your GCP project. See -link:https://cloud.google.com/dns/zones/#creating_public_zones[Creating public zones] -in the GCP documentation. -+ -Use an appropriate root domain, such as `openshiftcorp.com`, or subdomain, -such as `clusters.openshiftcorp.com`. - -. Extract the new authoritative name servers from the hosted zone records. See -link:https://cloud.google.com/dns/docs/update-name-servers#look_up_your_name_servers[Look up your Cloud DNS name servers] -in the GCP documentation. -+ -You typically have four name servers. - -. Update the registrar records for the name servers that your domain -uses. For example, if you registered your domain to Google Domains, see the -following topic in the Google Domains Help: -link:https://support.google.com/domains/answer/3290309?hl=en[How to switch to custom name servers]. - -. If you migrated your root domain to Google Cloud DNS, migrate your DNS records. See link:https://cloud.google.com/dns/docs/migrating[Migrating to Cloud DNS] in the GCP documentation. - -. If you use a subdomain, follow your company's procedures to add its delegation records to the parent domain. This process might include a request to your company's IT department or the division that controls the root domain and DNS services for your company. - -ifeval::["{context}" == "installing-gcp-user-infra-vpc"] -:!user-infra-vpc: -endif::[] diff --git a/modules/installation-gcp-enabling-api-services.adoc b/modules/installation-gcp-enabling-api-services.adoc deleted file mode 100644 index 713323cc3bd9..000000000000 --- a/modules/installation-gcp-enabling-api-services.adoc +++ /dev/null @@ -1,92 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_gcp/installing-gcp-account.adoc -// * installing/installing_gcp/installing-gcp-user-infra.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp.adoc - -ifeval::["{context}" == "installing-gcp-user-infra"] -:template: -endif::[] -ifeval::["{context}" == "installing-gcp-user-infra-vpc"] -:template: -endif::[] -ifeval::["{context}" == "installing-gcp-restricted-networks"] -:template: -endif::[] - -:_content-type: PROCEDURE -[id="installation-gcp-enabling-api-services_{context}"] -= Enabling API services in GCP - -Your Google Cloud Platform (GCP) project requires access to several API services -to complete {product-title} installation. - -.Prerequisites - -* You created a project to host your cluster. - -.Procedure - -* Enable the following required API services in the project that hosts your -cluster. You may also enable optional API services which are not required for installation. See -link:https://cloud.google.com/service-usage/docs/enable-disable#enabling[Enabling services] -in the GCP documentation. -+ -.Required API services -[cols="2a,3a",options="header"] -|=== -|API service |Console service name - -|Compute Engine API -|`compute.googleapis.com` - -|Cloud Resource Manager API -|`cloudresourcemanager.googleapis.com` - -|Google DNS API -|`dns.googleapis.com` - -|IAM Service Account Credentials API -|`iamcredentials.googleapis.com` - -|Identity and Access Management (IAM) API -|`iam.googleapis.com` - -|Service Usage API -|`serviceusage.googleapis.com` - -|=== -+ -.Optional API services -[cols="2a,3a",options="header"] -|=== -|API service |Console service name - -ifdef::template[] -|Cloud Deployment Manager V2 API -|`deploymentmanager.googleapis.com` -endif::template[] - -|Google Cloud APIs -|`cloudapis.googleapis.com` - -|Service Management API -|`servicemanagement.googleapis.com` - -|Google Cloud Storage JSON API -|`storage-api.googleapis.com` - -|Cloud Storage -|`storage-component.googleapis.com` - -|=== - -ifeval::["{context}" == "installing-gcp-user-infra"] -:!template: -endif::[] -ifeval::["{context}" == "installing-gcp-user-infra-vpc"] -:!template: -endif::[] -ifeval::["{context}" == "installing-gcp-restricted-networks"] -:!template: -endif::[] diff --git a/modules/installation-gcp-enabling-confidential-vms.adoc b/modules/installation-gcp-enabling-confidential-vms.adoc deleted file mode 100644 index 970bc305d36e..000000000000 --- a/modules/installation-gcp-enabling-confidential-vms.adoc +++ /dev/null @@ -1,69 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_gcp/installing-gcp-customizations.adoc -// * installing/installing_gcp/installing-gcp-network-customizations.adoc -// * installing/installing_gcp/installing-gcp-private.adoc -// * installing/installing_gcp/installing-gcp-vpc.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp-installer-provisioned.adoc -// * installing/installing_gcp/installing-gcp-user-infra.adoc -// * installing/installing_gcp/installing-gcp-user-infra-vpc.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp.adoc - -:_content-type: PROCEDURE -[id="installation-gcp-enabling-confidential-vms_{context}"] -= Enabling Confidential VMs - -You can use Confidential VMs when installing your cluster. Confidential VMs encrypt data while it is being processed. For more information, see Google's documentation on link:https://cloud.google.com/confidential-computing[Confidential Computing]. You can enable Confidential VMs and Shielded VMs at the same time, although they are not dependent on each other. - -:FeatureName: Confidential Computing -include::snippets/technology-preview.adoc[] - -[IMPORTANT] -==== -Due to a known issue, you cannot use persistent volume storage on a cluster with Confidential VMs. For more information, see link:https://issues.redhat.com/browse/OCPBUGS-7582[OCPBUGS-7582]. -==== - -.Prerequisites -* You have created an `install-config.yaml` file. - -.Procedure - -* Use a text editor to edit the `install-config.yaml` file prior to deploying your cluster and add one of the following stanzas: -.. To use confidential VMs for only control plane machines: -+ -[source,yaml] ----- -controlPlane: - platform: - gcp: - confidentialCompute: Enabled <1> - type: n2d-standard-8 <2> - onHostMaintenance: Terminate <3> ----- -<1> Enable confidential VMs. -<2> Specify a machine type that supports Confidential VMs. Confidential VMs require the N2D or C2D series of machine types. For more information on supported machine types, see link:https://cloud.google.com/compute/confidential-vm/docs/os-and-machine-type#machine-type[Supported operating systems and machine types]. -<3> Specify the behavior of the VM during a host maintenance event, such as a hardware or software update. For a machine that uses Confidential VM, this value must be set to `Terminate`, which stops the VM. Confidential VMs do not support live VM migration. -+ -.. To use confidential VMs for only compute machines: -+ -[source,yaml] ----- -compute: -- platform: - gcp: - confidentialCompute: Enabled - type: n2d-standard-8 - onHostMaintenance: Terminate ----- -+ -.. To use confidential VMs for all machines: -+ -[source,yaml] ----- -platform: - gcp: - defaultMachinePlatform: - confidentialCompute: Enabled - type: n2d-standard-8 - onHostMaintenance: Terminate ----- \ No newline at end of file diff --git a/modules/installation-gcp-enabling-shielded-vms.adoc b/modules/installation-gcp-enabling-shielded-vms.adoc deleted file mode 100644 index 33f24019ed73..000000000000 --- a/modules/installation-gcp-enabling-shielded-vms.adoc +++ /dev/null @@ -1,51 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_gcp/installing-gcp-customizations.adoc -// * installing/installing_gcp/installing-gcp-network-customizations.adoc -// * installing/installing_gcp/installing-gcp-private.adoc -// * installing/installing_gcp/installing-gcp-vpc.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp-installer-provisioned.adoc -// * installing/installing_gcp/installing-gcp-user-infra.adoc -// * installing/installing_gcp/installing-gcp-user-infra-vpc.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp.adoc - -:_content-type: PROCEDURE -[id="installation-gcp-enabling-shielded-vms_{context}"] -= Enabling Shielded VMs -You can use Shielded VMs when installing your cluster. Shielded VMs have extra security features including secure boot, firmware and integrity monitoring, and rootkit detection. For more information, see Google's documentation on link:https://cloud.google.com/shielded-vm[Shielded VMs]. - -.Prerequisites -* You have created an `install-config.yaml` file. - -.Procedure - -* Use a text editor to edit the `install-config.yaml` file prior to deploying your cluster and add one of the following stanzas: -.. To use shielded VMs for only control plane machines: -+ -[source,yaml] ----- -controlPlane: - platform: - gcp: - secureBoot: Enabled ----- -+ -.. To use shielded VMs for only compute machines: -+ -[source,yaml] ----- -compute: -- platform: - gcp: - secureBoot: Enabled ----- -+ -.. To use shielded VMs for all machines: -+ -[source,yaml] ----- -platform: - gcp: - defaultMachinePlatform: - secureBoot: Enabled ----- \ No newline at end of file diff --git a/modules/installation-gcp-install-cli.adoc b/modules/installation-gcp-install-cli.adoc deleted file mode 100644 index f664cf38056c..000000000000 --- a/modules/installation-gcp-install-cli.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_gcp/installing-gcp-user-infra.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp.adoc - -:_content-type: PROCEDURE -[id="installation-gcp-install-cli_{context}"] -= Installing and configuring CLI tools for GCP - -To install {product-title} on Google Cloud Platform (GCP) using user-provisioned -infrastructure, you must install and configure the CLI tools for GCP. - -.Prerequisites - -* You created a project to host your cluster. -* You created a service account and granted it the required permissions. - -.Procedure - -. Install the following binaries in `$PATH`: -+ --- -* `gcloud` -* `gsutil` --- -+ -See link:https://cloud.google.com/sdk/docs/#install_the_latest_cloud_tools_version_cloudsdk_current_version[Install the latest Cloud SDK version] -in the GCP documentation. - -. Authenticate using the `gcloud` tool with your configured service account. -+ -See link:https://cloud.google.com/sdk/docs/authorizing#authorizing_with_a_service_account[Authorizing with a service account] in the GCP documentation. diff --git a/modules/installation-gcp-limits.adoc b/modules/installation-gcp-limits.adoc deleted file mode 100644 index 87899091a34b..000000000000 --- a/modules/installation-gcp-limits.adoc +++ /dev/null @@ -1,105 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_gcp/installing-gcp-account.adoc -// * installing/installing_gcp/installing-gcp-user-infra.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp.adoc - -ifeval::["{context}" == "installing-gcp-user-infra"] -:template: -endif::[] -ifeval::["{context}" == "installing-gcp-user-infra-vpc"] -:template: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-gcp"] -:template: -endif::[] - -[id="installation-gcp-limits_{context}"] -= GCP account limits - -The {product-title} cluster uses a number of Google Cloud Platform (GCP) -components, but the default -link:https://cloud.google.com/docs/quota[Quotas] -do not affect your ability to install a default {product-title} cluster. - -A default cluster, which contains three compute and three control plane machines, -uses the following resources. Note that some resources are required only during -the bootstrap process and are removed after the cluster deploys. - -.GCP resources used in a default cluster - -[cols="2a,2a,2a,2a,2a",options="header"] -|=== -|Service -|Component -|Location -|Total resources required -|Resources removed after bootstrap - -ifeval::["{context}" == "installing-gcp-account"] -|Service account |IAM |Global |6 |1 -|Firewall rules |Compute |Global |11 |1 -|Forwarding rules |Compute |Global |2 |0 -|In-use global IP addresses |Compute |Global |4 |1 -|Health checks |Compute |Global |3 |0 -|Images |Compute |Global |1 |0 -|Networks |Compute |Global |2 |0 -|Static IP addresses |Compute |Region |4 |1 -|Routers |Compute |Global |1 |0 -|Routes |Compute |Global |2 |0 -|Subnetworks |Compute |Global |2 |0 -|Target pools |Compute |Global |3 |0 -|CPUs |Compute |Region |28 |4 -|Persistent disk SSD (GB) |Compute |Region |896 |128 -endif::[] - -ifdef::template[] -|Service account |IAM |Global |6 |1 -|Firewall rules |Networking |Global |11 |1 -|Forwarding rules |Compute |Global |2 |0 -// |In-use IP addresses global |Networking |Global |4 |1 -|Health checks |Compute |Global |2 |0 -|Images |Compute |Global |1 |0 -|Networks |Networking |Global |1 |0 -// |Static IP addresses |Compute |Region |4 |1 -|Routers |Networking |Global |1 |0 -|Routes |Networking |Global |2 |0 -|Subnetworks |Compute |Global |2 |0 -|Target pools |Networking |Global |2 |0 -// |CPUs |Compute |Region |28 |4 -// |Persistent Disk SSD (GB) |Compute |Region |896 |128 -endif::template[] -|=== - -[NOTE] -==== -If any of the quotas are insufficient during installation, the installation program displays an error that states both which quota was exceeded and the region. -==== - -Be sure to consider your actual cluster size, planned cluster growth, and any usage from other clusters that are associated with your account. The CPU, static IP addresses, and persistent disk SSD (storage) quotas are the ones that are most likely to be insufficient. - -If you plan to deploy your cluster in one of the following regions, you will exceed the maximum storage quota and are likely to exceed the CPU quota limit: - -* `asia-east2` -* `asia-northeast2` -* `asia-south1` -* `australia-southeast1` -* `europe-north1` -* `europe-west2` -* `europe-west3` -* `europe-west6` -* `northamerica-northeast1` -* `southamerica-east1` -* `us-west2` - -You can increase resource quotas from the link:https://console.cloud.google.com/iam-admin/quotas[GCP console], but you might need to file a support ticket. Be sure to plan your cluster size early so that you can allow time to resolve the support ticket before you install your {product-title} cluster. - -ifeval::["{context}" == "installing-gcp-user-infra"] -:!template: -endif::[] -ifeval::["{context}" == "installing-gcp-user-infra-vpc"] -:!template: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-gcp"] -:!template: -endif::[] diff --git a/modules/installation-gcp-marketplace.adoc b/modules/installation-gcp-marketplace.adoc deleted file mode 100644 index 6f4424c250f7..000000000000 --- a/modules/installation-gcp-marketplace.adoc +++ /dev/null @@ -1,48 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_gcp/installing-gcp-customizations.adoc - -:_content-type: PROCEDURE -[id="installation-gcp-marketplace_{context}"] -= Using a GCP Marketplace image -If you want to deploy an {product-title} cluster using a GCP Marketplace image, you must create the manifests and edit the compute machine set definitions to specify the GCP Marketplace image. - -.Prerequisites - -* You have the {product-title} installation program and the pull secret for your cluster. - -.Procedure - -. Generate the installation manifests by running the following command: -+ -[source,terminal] ----- -$ openshift-install create manifests --dir <installation_dir> ----- - -. Locate the following files: - -** `<installation_dir>/openshift/99_openshift-cluster-api_worker-machineset-0.yaml` -** `<installation_dir>/openshift/99_openshift-cluster-api_worker-machineset-1.yaml` -** `<installation_dir>/openshift/99_openshift-cluster-api_worker-machineset-2.yaml` - -. In each file, edit the `.spec.template.spec.providerSpec.value.disks[0].image` property to reference the offer to use: -+ -{product-title}:: `projects/redhat-marketplace-public/global/images/redhat-coreos-ocp-413-x86-64-202305021736` -{opp}:: `projects/redhat-marketplace-public/global/images/redhat-coreos-opp-413-x86-64-202305021736` -{oke}:: `projects/redhat-marketplace-public/global/images/redhat-coreos-oke-413-x86-64-202305021736` - -.Example compute machine set with the GCP Marketplace image -[source,yaml] ----- -deletionProtection: false -disks: -- autoDelete: true - boot: true - image: projects/redhat-marketplace-public/global/images/redhat-coreos-ocp-48-x86-64-202210040145 - labels: null - sizeGb: 128 - type: pd-ssd -kind: GCPMachineProviderSpec -machineType: n2-standard-4 ----- diff --git a/modules/installation-gcp-permissions.adoc b/modules/installation-gcp-permissions.adoc deleted file mode 100644 index c17d0a43e57f..000000000000 --- a/modules/installation-gcp-permissions.adoc +++ /dev/null @@ -1,68 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_gcp/installing-gcp-account.adoc -// * installing/installing_gcp/installing-gcp-user-infra.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp.adoc - -ifeval::["{context}" == "installing-gcp-user-infra"] -:template: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-gcp"] -:template: -endif::[] -ifeval::["{context}" == "installing-gcp-user-infra-vpc"] -:template: -endif::[] - -[id="installation-gcp-permissions_{context}"] -= Required GCP roles - -When you attach the `Owner` role to the service account that you create, you grant that service account all permissions, including those that are required to install {product-title}. If your organization's security policies require a more restrictive set of permissions, you can create a service account with the following permissions. If you deploy your cluster into an existing virtual private cloud (VPC), the service account does not require certain networking permissions, which are noted in the following lists: - -.Required roles for the installation program -* Compute Admin -* IAM Security Admin -* Service Account Admin -* Service Account Key Admin -* Service Account User -* Storage Admin - -.Required roles for creating network resources during installation -* DNS Administrator - -.Required roles for using passthrough credentials mode -* Compute Load Balancer Admin -* IAM Role Viewer - -ifdef::template[] -.Required roles for user-provisioned GCP infrastructure -* Deployment Manager Editor -endif::template[] - -The roles are applied to the service accounts that the control plane and compute machines use: - -.GCP service account permissions -[cols="2a,2a",options="header"] -|=== -|Account -|Roles -.5+|Control Plane -|`roles/compute.instanceAdmin` -|`roles/compute.networkAdmin` -|`roles/compute.securityAdmin` -|`roles/storage.admin` -|`roles/iam.serviceAccountUser` -.2+|Compute -|`roles/compute.viewer` -|`roles/storage.admin` -|=== - -ifeval::["{context}" == "installing-gcp-user-infra"] -:!template: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-gcp"] -:!template: -endif::[] -ifeval::["{context}" == "installing-gcp-user-infra-vpc"] -:!template: -endif::[] \ No newline at end of file diff --git a/modules/installation-gcp-project.adoc b/modules/installation-gcp-project.adoc deleted file mode 100644 index 04bc1e381128..000000000000 --- a/modules/installation-gcp-project.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_gcp/installing-gcp-account.adoc -// * installing/installing_gcp/installing-gcp-user-infra.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp.adoc - -:_content-type: PROCEDURE -[id="installation-gcp-project_{context}"] -= Creating a GCP project - -To install {product-title}, you must create a project in your Google Cloud Platform (GCP) account to host the cluster. - -.Procedure - -* Create a project to host your {product-title} cluster. See -link:https://cloud.google.com/resource-manager/docs/creating-managing-projects[Creating and Managing Projects] in the GCP documentation. -+ -[IMPORTANT] -==== -Your GCP project must use the Premium Network Service Tier if you are using installer-provisioned infrastructure. The Standard Network Service Tier is not supported for clusters installed using the installation program. The installation program configures internal load balancing for the `api-int.<cluster_name>.<base_domain>` URL; the Premium Tier is required for internal load balancing. -==== diff --git a/modules/installation-gcp-regions.adoc b/modules/installation-gcp-regions.adoc deleted file mode 100644 index 64b4f33877b1..000000000000 --- a/modules/installation-gcp-regions.adoc +++ /dev/null @@ -1,48 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_gcp/installing-gcp-account.adoc -// * installing/installing_gcp/installing-gcp-user-infra.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp.adoc - -[id="installation-gcp-regions_{context}"] -= Supported GCP regions - -You can deploy an {product-title} cluster to the following Google Cloud Platform (GCP) -regions: - -* `asia-east1` (Changhua County, Taiwan) -* `asia-east2` (Hong Kong) -* `asia-northeast1` (Tokyo, Japan) -* `asia-northeast2` (Osaka, Japan) -* `asia-northeast3` (Seoul, South Korea) -* `asia-south1` (Mumbai, India) -* `asia-south2` (Delhi, India) -* `asia-southeast1` (Jurong West, Singapore) -* `asia-southeast2` (Jakarta, Indonesia) -* `australia-southeast1` (Sydney, Australia) -* `australia-southeast2` (Melbourne, Australia) -* `europe-central2` (Warsaw, Poland) -* `europe-north1` (Hamina, Finland) -* `europe-southwest1` (Madrid, Spain) -* `europe-west1` (St. Ghislain, Belgium) -* `europe-west2` (London, England, UK) -* `europe-west3` (Frankfurt, Germany) -* `europe-west4` (Eemshaven, Netherlands) -* `europe-west6` (Zürich, Switzerland) -* `europe-west8` (Milan, Italy) -* `europe-west9` (Paris, France) -* `europe-west12` (Turin, Italy) -* `me-west1` (Tel Aviv, Israel) -* `northamerica-northeast1` (Montréal, Québec, Canada) -* `northamerica-northeast2` (Toronto, Ontario, Canada) -* `southamerica-east1` (São Paulo, Brazil) -* `southamerica-west1` (Santiago, Chile) -* `us-central1` (Council Bluffs, Iowa, USA) -* `us-east1` (Moncks Corner, South Carolina, USA) -* `us-east4` (Ashburn, Northern Virginia, USA) -* `us-east5` (Columbus, Ohio) -* `us-south1` (Dallas, Texas) -* `us-west1` (The Dalles, Oregon, USA) -* `us-west2` (Los Angeles, California, USA) -* `us-west3` (Salt Lake City, Utah, USA) -* `us-west4` (Las Vegas, Nevada, USA) diff --git a/modules/installation-gcp-service-account.adoc b/modules/installation-gcp-service-account.adoc deleted file mode 100644 index c8e82cb1fc44..000000000000 --- a/modules/installation-gcp-service-account.adoc +++ /dev/null @@ -1,41 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_gcp/installing-gcp-account.adoc -// * installing/installing_gcp/installing-gcp-user-infra.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp.adoc - -:_content-type: PROCEDURE -[id="installation-gcp-service-account_{context}"] -= Creating a service account in GCP - -{product-title} requires a Google Cloud Platform (GCP) service account that provides authentication and authorization to access data in the Google APIs. If you do not have an existing IAM service account that contains the required roles in your project, you must create one. - -.Prerequisites - -* You created a project to host your cluster. - -.Procedure - -. Create a service account in the project that you use to host your -{product-title} cluster. See -link:https://cloud.google.com/iam/docs/creating-managing-service-accounts#creating_a_service_account[Creating a service account] -in the GCP documentation. - -. Grant the service account the appropriate permissions. You can either -grant the individual permissions that follow or assign the `Owner` role to it. -See link:https://cloud.google.com/iam/docs/granting-roles-to-service-accounts#granting_access_to_a_service_account_for_a_resource[Granting roles to a service account for specific resources]. -+ -[NOTE] -==== -While making the service account an owner of the project is the easiest way to gain the required permissions, it means that service account has complete control over the project. You must determine if the risk that comes from offering that power is acceptable. -==== - -. You can create the service account key in JSON format, or attach the service account to a GCP virtual machine. -See link:https://cloud.google.com/iam/docs/creating-managing-service-account-keys#creating_service_account_keys[Creating service account keys] and link:https://cloud.google.com/compute/docs/access/create-enable-service-accounts-for-instances[Creating and enabling service accounts for instances] in the GCP documentation. -+ -You must have a service account key or a virtual machine with an attached service account to create the cluster. -+ -[NOTE] -==== -If you use a virtual machine with an attached service account to create your cluster, you must set `credentialsMode: Manual` in the `install-config.yaml` file before installation. -==== \ No newline at end of file diff --git a/modules/installation-gcp-shared-vpc-config.adoc b/modules/installation-gcp-shared-vpc-config.adoc deleted file mode 100644 index 0dcd9c7bd465..000000000000 --- a/modules/installation-gcp-shared-vpc-config.adoc +++ /dev/null @@ -1,70 +0,0 @@ -// This file is referenced in the following assembly: -// installing/installing_gcp/installing-gcp-shared-vpc.adoc - -:_content-type: PROCEDURE -[id="installation-gcp-shared-vpc-config_{context}"] -= Sample customized install-config.yaml file for shared VPC installation -There are several configuration parameters which are required to install {product-title} on GCP using a shared VPC. The following is a sample `install-config.yaml` file which demonstrates these fields. - -[IMPORTANT] -==== -This sample YAML file is provided for reference only. You must modify this file with the correct values for your environment and cluster. -==== - -[source,yaml] ----- -apiVersion: v1 -baseDomain: example.com -credentialsMode: Passthrough <1> -metadata: - name: cluster_name -platform: - gcp: - computeSubnet: shared-vpc-subnet-1 <2> - controlPlaneSubnet: shared-vpc-subnet-2 <3> - network: shared-vpc <4> - networkProjectID: host-project-name <5> - projectID: service-project-name <6> - region: us-east1 - defaultMachinePlatform: - tags: <7> - - global-tag1 -controlPlane: - name: master - platform: - gcp: - tags: <7> - - control-plane-tag1 - type: n2-standard-4 - zones: - - us-central1-a - - us-central1-c - replicas: 3 -compute: -- name: worker - platform: - gcp: - tags: <7> - - compute-tag1 - type: n2-standard-4 - zones: - - us-central1-a - - us-central1-c - replicas: 3 -networking: - clusterNetwork: - - cidr: 10.128.0.0/14 - hostPrefix: 23 - machineNetwork: - - cidr: 10.0.0.0/16 -pullSecret: '{"auths": ...}' -sshKey: ssh-ed25519 AAAA... <8> ----- -<1> `credentialsMode` must be set to `Passthrough` to allow the cluster to use the provided GCP service account after cluster creation. See the "Prerequisites" section for the required GCP permissions that your service account must have. -<2> The name of the subnet in the shared VPC for compute machines to use. -<3> The name of the subnet in the shared VPC for control plane machines to use. -<4> The name of the shared VPC. -<5> The name of the host project where the shared VPC exists. -<6> The name of the GCP project where you want to install the cluster. -<7> Optional. One or more network tags to apply to compute machines, control plane machines, or all machines. -<8> You can optionally provide the `sshKey` value that you use to access the machines in your cluster. \ No newline at end of file diff --git a/modules/installation-gcp-shared-vpc-ingress.adoc b/modules/installation-gcp-shared-vpc-ingress.adoc deleted file mode 100644 index b6048ac391de..000000000000 --- a/modules/installation-gcp-shared-vpc-ingress.adoc +++ /dev/null @@ -1,49 +0,0 @@ -// File included in the following assemblies: -// * installation/installing_gcp/installing-gcp-shared-vpc.adoc - -:_content-type: PROCEDURE -[id="installation-gcp-shared-vpc-ingress_{context}"] -= Optional: Adding Ingress DNS records for shared VPC installations -If the public DNS zone exists in a host project outside the project where you installed your cluster, you must manually create DNS records that point at the Ingress load balancer. You can create either a wildcard `*.apps.{baseDomain}.` or specific records. You can use A, CNAME, and other records per your requirements. - -.Prerequisites -* You completed the installation of {product-title} on GCP into a shared VPC. -* Your public DNS zone exists in a host project separate from the service project that contains your cluster. - -.Procedure -. Verify that the Ingress router has created a load balancer and populated the `EXTERNAL-IP` field by running the following command: -+ -[source,terminal] ----- -$ oc -n openshift-ingress get service router-default ----- -+ -.Example output -[source,terminal] ----- -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -router-default LoadBalancer 172.30.18.154 35.233.157.184 80:32288/TCP,443:31215/TCP 98 ----- -. Record the external IP address of the router by running the following command: -+ -[source,terminal] ----- -$ oc -n openshift-ingress get service router-default --no-headers | awk '{print $4}' ----- -. Add a record to your GCP public zone with the router's external IP address and the name `*.apps.<cluster_name>.<cluster_domain>`. You can use the `gcloud` command line utility or the GCP web console. -. To add manual records instead of a wildcard record, create entries for each of the cluster's current routes. You can gather these routes by running the following command: -+ -[source,terminal] ----- -$ oc get --all-namespaces -o jsonpath='{range .items[*]}{range .status.ingress[*]}{.host}{"\n"}{end}{end}' routes ----- -+ -.Example output -[source,terminal] ----- -oauth-openshift.apps.your.cluster.domain.example.com -console-openshift-console.apps.your.cluster.domain.example.com -downloads-openshift-console.apps.your.cluster.domain.example.com -alertmanager-main-openshift-monitoring.apps.your.cluster.domain.example.com -prometheus-k8s-openshift-monitoring.apps.your.cluster.domain.example.com ----- diff --git a/modules/installation-gcp-tested-machine-types.adoc b/modules/installation-gcp-tested-machine-types.adoc deleted file mode 100644 index 7909a3c131ec..000000000000 --- a/modules/installation-gcp-tested-machine-types.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// Module included in the following assemblies: -// -// installing/installing_gcp/installing-gcp-customizations.adoc -// installing/installing_gcp/installing-gcp-network-customizations.adoc -// installing/installing_gcp/installing-gcp-private.adoc -// installing/installing_gcp/installing-gcp-user-infra-vpc.adoc -// installing/installing_gcp/installing-gcp-user-infra.adoc -// installing/installing_gcp/installing-gcp-vpc.adoc -// installing/installing_gcp/installing-restricted-networks-gcp-installer-provisioned.adoc -// installing/installing_gcp/installing-restricted-networks-gcp.adoc - -[id="installation-gcp-tested-machine-types_{context}"] -= Tested instance types for GCP - -The following Google Cloud Platform instance types have been tested with {product-title}. - -.Machine series -[%collapsible] -==== -include::https://raw.githubusercontent.com/openshift/installer/master/docs/user/gcp/tested_instance_types.md[] -==== diff --git a/modules/installation-gcp-user-infra-adding-ingress.adoc b/modules/installation-gcp-user-infra-adding-ingress.adoc deleted file mode 100644 index 40773d2cd6b5..000000000000 --- a/modules/installation-gcp-user-infra-adding-ingress.adoc +++ /dev/null @@ -1,125 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_gcp/installing-gcp-user-infra.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp.adoc -// * installing/installing_gcp/installing-gcp-user-infra-vpc.adoc - -ifeval::["{context}" == "installing-gcp-user-infra-vpc"] -:shared-vpc: -endif::[] - -:_content-type: PROCEDURE -[id="installation-gcp-user-infra-adding-ingress_{context}"] -ifndef::shared-vpc[] -= Optional: Adding the ingress DNS records -endif::shared-vpc[] -ifdef::shared-vpc[] -= Adding the ingress DNS records -endif::shared-vpc[] - -ifndef::shared-vpc[] -If you removed the DNS zone configuration when creating Kubernetes manifests and generating Ignition configs, you must manually create DNS records that point at the ingress load balancer. You can create either a wildcard `*.apps.{baseDomain}.` or specific records. You can use A, CNAME, and other records per your requirements. -endif::[] -ifdef::shared-vpc[] -DNS zone configuration is removed when creating Kubernetes manifests and generating Ignition configs. You must manually create DNS records that point at the ingress load balancer. You can create either a wildcard -`*.apps.{baseDomain}.` or specific records. You can use A, CNAME, and other records per your requirements. -endif::[] - -.Prerequisites - -* Configure a GCP account. -* Remove the DNS Zone configuration when creating Kubernetes manifests and -generating Ignition configs. -* Create and configure a VPC and associated subnets in GCP. -* Create and configure networking and load balancers in GCP. -* Create control plane and compute roles. -* Create the bootstrap machine. -* Create the control plane machines. -* Create the worker machines. - -.Procedure - -. Wait for the Ingress router to create a load balancer and populate the `EXTERNAL-IP` field: -+ -[source,terminal] ----- -$ oc -n openshift-ingress get service router-default ----- -+ -.Example output -[source,terminal] ----- -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -router-default LoadBalancer 172.30.18.154 35.233.157.184 80:32288/TCP,443:31215/TCP 98 ----- - -. Add the A record to your zones: -** To use A records: -... Export the variable for the router IP address: -+ -[source,terminal] ----- -$ export ROUTER_IP=`oc -n openshift-ingress get service router-default --no-headers | awk '{print $4}'` ----- -... Add the A record to the private zones: -+ -ifndef::shared-vpc[] -[source,terminal] ----- -$ if [ -f transaction.yaml ]; then rm transaction.yaml; fi -$ gcloud dns record-sets transaction start --zone ${INFRA_ID}-private-zone -$ gcloud dns record-sets transaction add ${ROUTER_IP} --name \*.apps.${CLUSTER_NAME}.${BASE_DOMAIN}. --ttl 300 --type A --zone ${INFRA_ID}-private-zone -$ gcloud dns record-sets transaction execute --zone ${INFRA_ID}-private-zone ----- -endif::shared-vpc[] -ifdef::shared-vpc[] -[source,terminal] ----- -$ if [ -f transaction.yaml ]; then rm transaction.yaml; fi -$ gcloud dns record-sets transaction start --zone ${INFRA_ID}-private-zone --project ${HOST_PROJECT} --account ${HOST_PROJECT_ACCOUNT} -$ gcloud dns record-sets transaction add ${ROUTER_IP} --name \*.apps.${CLUSTER_NAME}.${BASE_DOMAIN}. --ttl 300 --type A --zone ${INFRA_ID}-private-zone --project ${HOST_PROJECT} --account ${HOST_PROJECT_ACCOUNT} -$ gcloud dns record-sets transaction execute --zone ${INFRA_ID}-private-zone --project ${HOST_PROJECT} --account ${HOST_PROJECT_ACCOUNT} ----- -endif::shared-vpc[] -... For an external cluster, also add the A record to the public zones: -+ -ifndef::shared-vpc[] -[source,terminal] ----- -$ if [ -f transaction.yaml ]; then rm transaction.yaml; fi -$ gcloud dns record-sets transaction start --zone ${BASE_DOMAIN_ZONE_NAME} -$ gcloud dns record-sets transaction add ${ROUTER_IP} --name \*.apps.${CLUSTER_NAME}.${BASE_DOMAIN}. --ttl 300 --type A --zone ${BASE_DOMAIN_ZONE_NAME} -$ gcloud dns record-sets transaction execute --zone ${BASE_DOMAIN_ZONE_NAME} ----- -endif::shared-vpc[] -ifdef::shared-vpc[] -[source,terminal] ----- -$ if [ -f transaction.yaml ]; then rm transaction.yaml; fi -$ gcloud dns record-sets transaction start --zone ${BASE_DOMAIN_ZONE_NAME} --project ${HOST_PROJECT} --account ${HOST_PROJECT_ACCOUNT} -$ gcloud dns record-sets transaction add ${ROUTER_IP} --name \*.apps.${CLUSTER_NAME}.${BASE_DOMAIN}. --ttl 300 --type A --zone ${BASE_DOMAIN_ZONE_NAME} --project ${HOST_PROJECT} --account ${HOST_PROJECT_ACCOUNT} -$ gcloud dns record-sets transaction execute --zone ${BASE_DOMAIN_ZONE_NAME} --project ${HOST_PROJECT} --account ${HOST_PROJECT_ACCOUNT} ----- -endif::shared-vpc[] - -** To add explicit domains instead of using a wildcard, -create entries for each of the cluster's current routes: -+ -[source,terminal] ----- -$ oc get --all-namespaces -o jsonpath='{range .items[*]}{range .status.ingress[*]}{.host}{"\n"}{end}{end}' routes ----- -+ -.Example output -[source,terminal] ----- -oauth-openshift.apps.your.cluster.domain.example.com -console-openshift-console.apps.your.cluster.domain.example.com -downloads-openshift-console.apps.your.cluster.domain.example.com -alertmanager-main-openshift-monitoring.apps.your.cluster.domain.example.com -prometheus-k8s-openshift-monitoring.apps.your.cluster.domain.example.com ----- - -ifeval::["{context}" == "installing-gcp-user-infra-vpc"] -:!shared-vpc: -endif::[] diff --git a/modules/installation-gcp-user-infra-completing.adoc b/modules/installation-gcp-user-infra-completing.adoc deleted file mode 100644 index e96f6b9466ae..000000000000 --- a/modules/installation-gcp-user-infra-completing.adoc +++ /dev/null @@ -1,132 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_gcp/installing-gcp-user-infra.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp.adoc - -:_content-type: PROCEDURE -[id="installation-gcp-user-infra-installation_{context}"] -= Completing a GCP installation on user-provisioned infrastructure - -After you start the {product-title} installation on Google Cloud Platform (GCP) -user-provisioned infrastructure, you can monitor the cluster events until the -cluster is ready. - -.Prerequisites - -* Deploy the bootstrap machine for an {product-title} cluster on user-provisioned GCP infrastructure. -* Install the `oc` CLI and log in. - -.Procedure - -. Complete the cluster installation: -+ -[source,terminal] ----- -$ ./openshift-install --dir <installation_directory> wait-for install-complete <1> ----- -+ -.Example output -[source,terminal] ----- -INFO Waiting up to 30m0s for the cluster to initialize... ----- -<1> For `<installation_directory>`, specify the path to the directory that you -stored the installation files in. -+ -[IMPORTANT] -==== -* The Ignition config files that the installation program generates contain certificates that expire after 24 hours, which are then renewed at that time. If the cluster is shut down before renewing the certificates and the cluster is later restarted after the 24 hours have elapsed, the cluster automatically recovers the expired certificates. The exception is that you must manually approve the pending `node-bootstrapper` certificate signing requests (CSRs) to recover kubelet certificates. See the documentation for _Recovering from expired control plane certificates_ for more information. - -* It is recommended that you use Ignition config files within 12 hours after they are generated because the 24-hour certificate rotates from 16 to 22 hours after the cluster is installed. By using the Ignition config files within 12 hours, you can avoid installation failure if the certificate update runs during installation. -==== - -. Observe the running state of your cluster. -+ --- -.. Run the following command to view the current cluster version and status: -+ -[source,terminal] ----- -$ oc get clusterversion ----- -+ -.Example output -[source,terminal] ----- -NAME VERSION AVAILABLE PROGRESSING SINCE STATUS -version False True 24m Working towards 4.5.4: 99% complete ----- - -.. Run the following command to view the Operators managed on the control plane by -the Cluster Version Operator (CVO): -+ -[source,terminal] ----- -$ oc get clusteroperators ----- -+ -.Example output -[source,terminal] ----- -NAME VERSION AVAILABLE PROGRESSING DEGRADED SINCE -authentication 4.5.4 True False False 7m56s -cloud-credential 4.5.4 True False False 31m -cluster-autoscaler 4.5.4 True False False 16m -console 4.5.4 True False False 10m -csi-snapshot-controller 4.5.4 True False False 16m -dns 4.5.4 True False False 22m -etcd 4.5.4 False False False 25s -image-registry 4.5.4 True False False 16m -ingress 4.5.4 True False False 16m -insights 4.5.4 True False False 17m -kube-apiserver 4.5.4 True False False 19m -kube-controller-manager 4.5.4 True False False 20m -kube-scheduler 4.5.4 True False False 20m -kube-storage-version-migrator 4.5.4 True False False 16m -machine-api 4.5.4 True False False 22m -machine-config 4.5.4 True False False 22m -marketplace 4.5.4 True False False 16m -monitoring 4.5.4 True False False 10m -network 4.5.4 True False False 23m -node-tuning 4.5.4 True False False 23m -openshift-apiserver 4.5.4 True False False 17m -openshift-controller-manager 4.5.4 True False False 15m -openshift-samples 4.5.4 True False False 16m -operator-lifecycle-manager 4.5.4 True False False 22m -operator-lifecycle-manager-catalog 4.5.4 True False False 22m -operator-lifecycle-manager-packageserver 4.5.4 True False False 18m -service-ca 4.5.4 True False False 23m -service-catalog-apiserver 4.5.4 True False False 23m -service-catalog-controller-manager 4.5.4 True False False 23m -storage 4.5.4 True False False 17m ----- - -.. Run the following command to view your cluster pods: -+ -[source,terminal] ----- -$ oc get pods --all-namespaces ----- -+ -.Example output -[source,terminal] ----- -NAMESPACE NAME READY STATUS RESTARTS AGE -kube-system etcd-member-ip-10-0-3-111.us-east-2.compute.internal 1/1 Running 0 35m -kube-system etcd-member-ip-10-0-3-239.us-east-2.compute.internal 1/1 Running 0 37m -kube-system etcd-member-ip-10-0-3-24.us-east-2.compute.internal 1/1 Running 0 35m -openshift-apiserver-operator openshift-apiserver-operator-6d6674f4f4-h7t2t 1/1 Running 1 37m -openshift-apiserver apiserver-fm48r 1/1 Running 0 30m -openshift-apiserver apiserver-fxkvv 1/1 Running 0 29m -openshift-apiserver apiserver-q85nm 1/1 Running 0 29m -... -openshift-service-ca-operator openshift-service-ca-operator-66ff6dc6cd-9r257 1/1 Running 0 37m -openshift-service-ca apiservice-cabundle-injector-695b6bcbc-cl5hm 1/1 Running 0 35m -openshift-service-ca configmap-cabundle-injector-8498544d7-25qn6 1/1 Running 0 35m -openshift-service-ca service-serving-cert-signer-6445fc9c6-wqdqn 1/1 Running 0 35m -openshift-service-catalog-apiserver-operator openshift-service-catalog-apiserver-operator-549f44668b-b5q2w 1/1 Running 0 32m -openshift-service-catalog-controller-manager-operator openshift-service-catalog-controller-manager-operator-b78cr2lnm 1/1 Running 0 31m ----- --- -+ -When the current cluster version is `AVAILABLE`, the installation is complete. diff --git a/modules/installation-gcp-user-infra-config-host-project-vpc.adoc b/modules/installation-gcp-user-infra-config-host-project-vpc.adoc deleted file mode 100644 index 05b7728db5e4..000000000000 --- a/modules/installation-gcp-user-infra-config-host-project-vpc.adoc +++ /dev/null @@ -1,41 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_gcp/installing-gcp-user-infra-vpc.adoc - -:_content-type: PROCEDURE -[id="installation-gcp-user-infra-config-host-project-vpc_{context}"] -= Configuring the GCP project that hosts your shared VPC network - -If you use a shared Virtual Private Cloud (VPC) to host your {product-title} cluster in Google Cloud Platform (GCP), you must configure the project that hosts it. - -[NOTE] -==== -If you already have a project that hosts the shared VPC network, review this section to ensure that the project meets all of the requirements to install an {product-title} cluster. -==== - -.Procedure - -. Create a project to host the shared VPC for your {product-title} cluster. See -link:https://cloud.google.com/resource-manager/docs/creating-managing-projects[Creating and Managing Projects] in the GCP documentation. - -. Create a service account in the project that hosts your shared VPC. See -link:https://cloud.google.com/iam/docs/creating-managing-service-accounts#creating_a_service_account[Creating a service account] -in the GCP documentation. - -. Grant the service account the appropriate permissions. You can either -grant the individual permissions that follow or assign the `Owner` role to it. -See link:https://cloud.google.com/iam/docs/granting-roles-to-service-accounts#granting_access_to_a_service_account_for_a_resource[Granting roles to a service account for specific resources]. -+ -[NOTE] -==== -While making the service account an owner of the project is the easiest way to gain the required permissions, it means that service account has complete control over the project. You must determine if the risk that comes from offering that power is acceptable. - -The service account for the project that hosts the shared VPC network requires the following roles: - -* Compute Network User -* Compute Security Admin -* Deployment Manager Editor -* DNS Administrator -* Security Admin -* Network Management Admin -==== diff --git a/modules/installation-gcp-user-infra-rhcos.adoc b/modules/installation-gcp-user-infra-rhcos.adoc deleted file mode 100644 index bf0eb3c9b92e..000000000000 --- a/modules/installation-gcp-user-infra-rhcos.adoc +++ /dev/null @@ -1,60 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_gcp/installing-gcp-user-infra.adoc - -:_content-type: PROCEDURE -[id="installation-gcp-user-infra-rhcos_{context}"] -= Creating the {op-system} cluster image for the GCP infrastructure - -You must use a valid {op-system-first} image for Google Cloud Platform (GCP) for -your {product-title} nodes. - -.Procedure - -ifndef::openshift-origin[] -. Obtain the {op-system} image from the link:https://mirror.openshift.com/pub/openshift-v4/dependencies/rhcos/4.13/[{op-system} image mirror] page. -+ -[IMPORTANT] -==== -The {op-system} images might not change with every release of {product-title}. -You must download an image with the highest version that is -less than or equal to the {product-title} version that you install. Use the image version -that matches your {product-title} version if it is available. -==== -+ -The file name contains the {product-title} version number in the format -`rhcos-<version>-<arch>-gcp.<arch>.tar.gz`. -endif::openshift-origin[] -ifdef::openshift-origin[] -. Obtain the {op-system} image from the -link:https://getfedora.org/en/coreos/download?tab=cloud_operators&stream=stable[{op-system} Downloads] page -endif::openshift-origin[] - -. Create the Google storage bucket: -+ -[source,terminal] ----- -$ gsutil mb gs://<bucket_name> ----- - -. Upload the {op-system} image to the Google storage bucket: -+ -[source,terminal] ----- -$ gsutil cp <downloaded_image_file_path>/rhcos-<version>-x86_64-gcp.x86_64.tar.gz gs://<bucket_name> ----- - -. Export the uploaded {op-system} image location as a variable: -+ -[source,terminal] ----- -$ export IMAGE_SOURCE=gs://<bucket_name>/rhcos-<version>-x86_64-gcp.x86_64.tar.gz ----- - -. Create the cluster image: -+ -[source,terminal] ----- -$ gcloud compute images create "${INFRA_ID}-rhcos-image" \ - --source-uri="${IMAGE_SOURCE}" ----- diff --git a/modules/installation-gcp-user-infra-shared-vpc-config-yaml.adoc b/modules/installation-gcp-user-infra-shared-vpc-config-yaml.adoc deleted file mode 100644 index e2eca17b69d8..000000000000 --- a/modules/installation-gcp-user-infra-shared-vpc-config-yaml.adoc +++ /dev/null @@ -1,112 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_gcp/installing-gcp-user-infra-vpc.adoc - -[id="installation-gcp-user-infra-shared-vpc-config-yaml_{context}"] -= Sample customized `install-config.yaml` file for GCP - -You can customize the `install-config.yaml` file to specify more details about your {product-title} cluster's platform or modify the values of the required parameters. - -[IMPORTANT] -==== -This sample YAML file is provided for reference only. You must obtain your `install-config.yaml` file by using the installation program and modify it. -==== - -[source,yaml] ----- -apiVersion: v1 -baseDomain: example.com <1> -controlPlane: <2> - hyperthreading: Enabled <3> <4> - name: master - platform: - gcp: - type: n2-standard-4 - zones: - - us-central1-a - - us-central1-c - tags: <5> - - control-plane-tag1 - - control-plane-tag2 - replicas: 3 -compute: <2> -- hyperthreading: Enabled <3> - name: worker - platform: - gcp: - type: n2-standard-4 - zones: - - us-central1-a - - us-central1-c - tags: <5> - - compute-tag1 - - compute-tag2 - replicas: 0 -metadata: - name: test-cluster -networking: - clusterNetwork: - - cidr: 10.128.0.0/14 - hostPrefix: 23 - machineNetwork: - - cidr: 10.0.0.0/16 - networkType: OVNKubernetes <6> - serviceNetwork: - - 172.30.0.0/16 -platform: - gcp: - defaultMachinePlatform: - tags: <5> - - global-tag1 - - global-tag2 - projectID: openshift-production <7> - region: us-central1 <8> -pullSecret: '{"auths": ...}' -ifndef::openshift-origin[] -fips: false <9> -sshKey: ssh-ed25519 AAAA... <10> -publish: Internal <11> -endif::openshift-origin[] -ifdef::openshift-origin[] -sshKey: ssh-ed25519 AAAA... <9> -publish: Internal <10> -endif::openshift-origin[] ----- -<1> Specify the public DNS on the host project. -<2> If you do not provide these parameters and values, the installation program provides the default value. -<3> The `controlPlane` section is a single mapping, but the compute section is a sequence of mappings. To meet the requirements of the different data structures, the first line of the `compute` section must begin with a hyphen, `-`, and the first line of the `controlPlane` section must not. Although both sections currently define a single machine pool, it is possible that future versions of {product-title} will support defining multiple compute pools during installation. Only one control plane pool is used. -<4> Whether to enable or disable simultaneous multithreading, or `hyperthreading`. By default, simultaneous multithreading is enabled to increase the performance of your machines' cores. You can disable it by setting the parameter value to `Disabled`. If you disable simultaneous multithreading in some cluster machines, you must disable it in all cluster machines. -+ -[IMPORTANT] -==== -If you disable simultaneous multithreading, ensure that your capacity planning accounts for the dramatically decreased machine performance. Use larger machine types, such as `n1-standard-8`, for your machines if you disable simultaneous multithreading. -==== -<5> Optional: A set of network tags to apply to the control plane or compute machine sets. The `platform.gcp.defaultMachinePlatform.tags` parameter applies to both control plane and compute machines. If the `compute.platform.gcp.tags` or `controlPlane.platform.gcp.tags` parameters are set, they override the `platform.gcp.defaultMachinePlatform.tags` parameter. -<6> The cluster network plugin to install. The supported values are `OVNKubernetes` and `OpenShiftSDN`. The default value is `OVNKubernetes`. -<7> Specify the main project where the VM instances reside. -<8> Specify the region that your VPC network is in. -ifndef::openshift-origin[] -<9> Whether to enable or disable FIPS mode. By default, FIPS mode is not enabled. If FIPS mode is enabled, the {op-system-first} machines that {product-title} runs on bypass the default Kubernetes cryptography suite and use the cryptography modules that are provided with {op-system} instead. -+ -[IMPORTANT] -==== -The use of FIPS Validated / Modules in Process cryptographic libraries is only supported on {product-title} deployments on the `x86_64` architecture. -==== -<10> You can optionally provide the `sshKey` value that you use to access the machines in your cluster. -endif::openshift-origin[] -ifdef::openshift-origin[] -<9> You can optionally provide the `sshKey` value that you use to access the machines in your cluster. -endif::openshift-origin[] -+ -[NOTE] -==== -For production {product-title} clusters on which you want to perform installation debugging or disaster recovery, specify an SSH key that your `ssh-agent` process uses. -==== -ifndef::openshift-origin[] -<11> How to publish the user-facing endpoints of your cluster. Set `publish` to `Internal` to deploy a private cluster, which cannot be accessed from the internet. The default value is `External`. -To use a shared VPC in a cluster that uses infrastructure that you provision, you must set `publish` to `Internal`. The installation program will no longer be able to access the public DNS zone for the base domain in the host project. -endif::openshift-origin[] -ifdef::openshift-origin[] -<10> How to publish the user-facing endpoints of your cluster. Set `publish` to `Internal` to deploy a private cluster, which cannot be accessed from the internet. The default value is `External`. -To use a shared VPC in a cluster that uses infrastructure that you provision, you must set `publish` to `Internal`. The installation program will no longer be able to access the public DNS zone for the base domain in the host project. -endif::openshift-origin[] diff --git a/modules/installation-gcp-user-infra-wait-for-bootstrap.adoc b/modules/installation-gcp-user-infra-wait-for-bootstrap.adoc deleted file mode 100644 index 07d5f2439e11..000000000000 --- a/modules/installation-gcp-user-infra-wait-for-bootstrap.adoc +++ /dev/null @@ -1,63 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_gcp/installing-gcp-user-infra.adoc -// * installing/installing_gcp/installing-gcp-user-infra-vpc.adoc - -:_content-type: PROCEDURE -[id="installation-gcp-user-infra-wait-for-bootstrap_{context}"] -= Wait for bootstrap completion and remove bootstrap resources in GCP - -After you create all of the required infrastructure in Google Cloud Platform -(GCP), wait for the bootstrap process to complete on the machines that you -provisioned by using the Ignition config files that you generated with the -installation program. - -.Prerequisites - -* Configure a GCP account. -* Generate the Ignition config files for your cluster. -* Create and configure a VPC and associated subnets in GCP. -* Create and configure networking and load balancers in GCP. -* Create control plane and compute roles. -* Create the bootstrap machine. -* Create the control plane machines. - -.Procedure - -. Change to the directory that contains the installation program and run the -following command: -+ -[source,terminal] ----- -$ ./openshift-install wait-for bootstrap-complete --dir <installation_directory> \ <1> - --log-level info <2> ----- -<1> For `<installation_directory>`, specify the path to the directory that you -stored the installation files in. -<2> To view different installation details, specify `warn`, `debug`, or -`error` instead of `info`. -+ -If the command exits without a `FATAL` warning, your production control plane -has initialized. - -. Delete the bootstrap resources: -+ -[source,terminal] ----- -$ gcloud compute backend-services remove-backend ${INFRA_ID}-api-internal-backend-service --region=${REGION} --instance-group=${INFRA_ID}-bootstrap-ig --instance-group-zone=${ZONE_0} ----- -+ -[source,terminal] ----- -$ gsutil rm gs://${INFRA_ID}-bootstrap-ignition/bootstrap.ign ----- -+ -[source,terminal] ----- -$ gsutil rb gs://${INFRA_ID}-bootstrap-ignition ----- -+ -[source,terminal] ----- -$ gcloud deployment-manager deployments delete ${INFRA_ID}-bootstrap ----- diff --git a/modules/installation-generate-aws-user-infra-install-config.adoc b/modules/installation-generate-aws-user-infra-install-config.adoc deleted file mode 100644 index b81cf578cecc..000000000000 --- a/modules/installation-generate-aws-user-infra-install-config.adoc +++ /dev/null @@ -1,153 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-localzone.adoc -// * installing/installing_aws/installing-aws-user-infra.adoc -// * installing/installing_aws/installing-restricted-networks-aws.adoc - -ifeval::["{context}" == "installing-aws-user-infra"] -:three-node-cluster: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-aws"] -:restricted: -endif::[] -ifeval::["{context}" == "installing-aws-localzone"] -:localzone: -endif::[] - -:_content-type: PROCEDURE -[id="installation-generate-aws-user-infra-install-config_{context}"] -= Creating the installation configuration file - -Generate and customize the installation configuration file that the -installation program needs to deploy your cluster. - -.Prerequisites - -* You obtained the {product-title} installation program -ifndef::localzone[] -for user-provisioned infrastructure -endif::localzone[] -and the pull secret for your cluster. -ifdef::restricted[] -For a restricted network installation, these files are on your mirror host. -endif::restricted[] -* You checked that you are deploying your cluster to a region with an accompanying {op-system-first} AMI published by Red Hat. If you are deploying to a region that requires a custom AMI, such as an AWS GovCloud region, you must create the `install-config.yaml` file manually. - -.Procedure - -. Create the `install-config.yaml` file. -.. Change to the directory that contains the installation program and run the following command: -+ -[source,terminal] ----- -$ ./openshift-install create install-config --dir <installation_directory> <1> ----- -<1> For `<installation_directory>`, specify the directory name to store the -files that the installation program creates. -+ -[IMPORTANT] -==== -Specify an empty directory. Some installation assets, like bootstrap X.509 -certificates have short expiration intervals, so you must not reuse an -installation directory. If you want to reuse individual files from another -cluster installation, you can copy them into your directory. However, the file -names for the installation assets might change between releases. Use caution -when copying installation files from an earlier {product-title} version. -==== -.. At the prompts, provide the configuration details for your cloud: -... Optional: Select an SSH key to use to access your cluster machines. -+ -[NOTE] -==== -For production {product-title} clusters on which you want to perform installation debugging or disaster recovery, specify an SSH key that your `ssh-agent` process uses. -==== -... Select *aws* as the platform to target. -... If you do not have an AWS profile stored on your computer, enter the AWS -access key ID and secret access key for the user that you configured to run the -installation program. -+ -[NOTE] -==== -The AWS access key ID and secret access key are stored in `~/.aws/credentials` in the home directory of the current user on the installation host. You are prompted for the credentials by the installation program if the credentials for the exported profile are not present in the file. Any credentials that you provide to the installation program are stored in the file. -==== -... Select the AWS region to deploy the cluster to. -ifdef::localzone[] -The region that you specify must be the same region that contains the Local Zone that you opted into for your AWS account. -endif::localzone[] -... Select the base domain for the Route 53 service that you configured for your cluster. -... Enter a descriptive name for your cluster. -... Paste the {cluster-manager-url-pull}. -ifdef::openshift-origin[] -This field is optional. -endif::[] - -ifdef::restricted[] -. Edit the `install-config.yaml` file to give the additional information that -is required for an installation in a restricted network. -.. Update the `pullSecret` value to contain the authentication information for -your registry: -+ -[source,yaml] ----- -pullSecret: '{"auths":{"<local_registry>": {"auth": "<credentials>","email": "you@example.com"}}}' ----- -+ -For `<local_registry>`, specify the registry domain name, and optionally the -port, that your mirror registry uses to serve content. For example -`registry.example.com` or `registry.example.com:5000`. For `<credentials>`, -specify the base64-encoded user name and password for your mirror registry. -.. Add the `additionalTrustBundle` parameter and value. The value must be the contents of the certificate file that you used for your mirror registry. The certificate file can be an existing, trusted certificate authority or the self-signed certificate that you generated for the mirror registry. -+ -[source,yaml] ----- -additionalTrustBundle: | - -----BEGIN CERTIFICATE----- - ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ - -----END CERTIFICATE----- ----- -.. Add the image content resources: -+ -[source,yaml] ----- -imageContentSources: -- mirrors: - - <local_registry>/<local_repository_name>/release - source: quay.io/openshift-release-dev/ocp-release -- mirrors: - - <local_registry>/<local_repository_name>/release - source: quay.io/openshift-release-dev/ocp-v4.0-art-dev ----- -+ -Use the `imageContentSources` section from the output of the command to mirror the repository or the values that you used when you mirrored the content from the media that you brought into your restricted network. - -.. Optional: Set the publishing strategy to `Internal`: -+ -[source,yaml] ----- -publish: Internal ----- -+ -By setting this option, you create an internal Ingress Controller and a private load balancer. -endif::restricted[] - -ifdef::three-node-cluster[] -. If you are installing a three-node cluster, modify the `install-config.yaml` file by setting the `compute.replicas` parameter to `0`. This ensures that the cluster's control planes are schedulable. For more information, see "Installing a three-node cluster on AWS". -endif::three-node-cluster[] - -. Optional: Back up the `install-config.yaml` file. -+ -[IMPORTANT] -==== -The `install-config.yaml` file is consumed during the installation process. If -you want to reuse the file, you must back it up now. -==== - -ifeval::["{context}" == "installing-aws-user-infra"] -:!three-node-cluster: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-aws"] -:!restricted: -endif::[] -ifeval::["{context}" == "installing-aws-localzone"] -:!localzone: -endif::[] diff --git a/modules/installation-generate-ignition-configs.adoc b/modules/installation-generate-ignition-configs.adoc deleted file mode 100644 index 112d7d050528..000000000000 --- a/modules/installation-generate-ignition-configs.adoc +++ /dev/null @@ -1,75 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc -// * installing/installing_vsphere/installing-vsphere-network-customizations.adoc - -ifeval::["{context}" == "installing-restricted-networks-vsphere"] -:restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-bare-metal"] -:restricted: -endif::[] - -:_content-type: PROCEDURE -[id="installation-generate-ignition-configs_{context}"] -= Creating the Ignition config files - -Because you must manually start the cluster machines, you must generate the -Ignition config files that the cluster needs to make its machines. - -[IMPORTANT] -==== -* The Ignition config files that the installation program generates contain certificates that expire after 24 hours, which are then renewed at that time. If the cluster is shut down before renewing the certificates and the cluster is later restarted after the 24 hours have elapsed, the cluster automatically recovers the expired certificates. The exception is that you must manually approve the pending `node-bootstrapper` certificate signing requests (CSRs) to recover kubelet certificates. See the documentation for _Recovering from expired control plane certificates_ for more information. - -* It is recommended that you use Ignition config files within 12 hours after they are generated because the 24-hour certificate rotates from 16 to 22 hours after the cluster is installed. By using the Ignition config files within 12 hours, you can avoid installation failure if the certificate update runs during installation. -==== - -.Prerequisites - -* Obtain the {product-title} installation program and the pull secret for your cluster. -ifdef::restricted[] -For a restricted network installation, these files are on your mirror host. -endif::restricted[] - -.Procedure - -* Obtain the Ignition config files: -+ -[source,terminal] ----- -$ ./openshift-install create ignition-configs --dir <installation_directory> <1> ----- -<1> For `<installation_directory>`, specify the directory name to store the -files that the installation program creates. -+ -[IMPORTANT] -==== -If you created an `install-config.yaml` file, specify the directory that contains -it. Otherwise, specify an empty directory. Some installation assets, like -bootstrap X.509 certificates have short expiration intervals, so you must not -reuse an installation directory. If you want to reuse individual files from another -cluster installation, you can copy them into your directory. However, the file -names for the installation assets might change between releases. Use caution -when copying installation files from an earlier {product-title} version. -==== -+ -The following files are generated -in the directory: -+ ----- -. -├── auth -│ ├── kubeadmin-password -│ └── kubeconfig -├── bootstrap.ign -├── master.ign -├── metadata.json -└── worker.ign ----- - -ifeval::["{context}" == "installing-restricted-networks-vsphere"] -:!restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-bare-metal"] -:!restricted: -endif::[] diff --git a/modules/installation-getting-debug-information.adoc b/modules/installation-getting-debug-information.adoc deleted file mode 100644 index 78ac7814d936..000000000000 --- a/modules/installation-getting-debug-information.adoc +++ /dev/null @@ -1,24 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_rhv/installing-rhv-troubleshooting.adoc - -[id="installing-getting-debug-information_{context}"] -= Getting debug information from the installation program - -You can use any of the following actions to get debug information from the installation program. - -* Look at debug messages from a past installation in the hidden `.openshift_install.log` file. For example, enter: -+ -[source,terminal] ----- -$ cat ~/<installation_directory>/.openshift_install.log <1> ----- -<1> For `installation_directory`, specify the same directory you specified when you ran `./openshift-install create cluster`. - -* Change to the directory that contains the installation program and re-run it with `--log-level=debug`: -+ -[source,terminal] ----- -$ ./openshift-install create cluster --dir <installation_directory> --log-level debug <1> ----- -<1> For `installation_directory`, specify the same directory you specified when you ran `./openshift-install create cluster`. diff --git a/modules/installation-ibm-cloud-config-yaml.adoc b/modules/installation-ibm-cloud-config-yaml.adoc deleted file mode 100644 index 1f056ce0efbb..000000000000 --- a/modules/installation-ibm-cloud-config-yaml.adoc +++ /dev/null @@ -1,298 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-customizations.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-network-customizations.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-vpc.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-private.adoc - -ifeval::["{context}" == "installing-ibm-cloud-network-customizations"] -:with-networking: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-customizations"] -:without-networking: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-vpc"] -:vpc: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-private"] -:private: -endif::[] - -:_content-type: REFERENCE -[id="installation-ibm-cloud-config-yaml_{context}"] -= Sample customized install-config.yaml file for IBM Cloud VPC - -You can customize the `install-config.yaml` file to specify more details about your {product-title} cluster's platform or modify the values of the required parameters. - -[IMPORTANT] -==== -This sample YAML file is provided for reference only. You must obtain your `install-config.yaml` file by using the installation program and then modify it. -==== - -ifdef::with-networking,without-networking[] -[source,yaml] ----- -apiVersion: v1 -baseDomain: example.com <1> -controlPlane: <2> <3> - hyperthreading: Enabled <4> - name: master - platform: - ibmcloud: {} - replicas: 3 -compute: <2> <3> -- hyperthreading: Enabled <4> - name: worker - platform: - ibmcloud: {} - replicas: 3 -metadata: - name: test-cluster <1> -ifdef::without-networking[] -networking: -endif::[] -ifdef::with-networking[] -networking: <2> -endif::[] - clusterNetwork: - - cidr: 10.128.0.0/14 - hostPrefix: 23 - machineNetwork: - - cidr: 10.0.0.0/16 - networkType: OVNKubernetes <5> - serviceNetwork: - - 172.30.0.0/16 -platform: - ibmcloud: - region: us-south <1> -credentialsMode: Manual -publish: External -pullSecret: '{"auths": ...}' <1> -ifndef::openshift-origin[] -fips: false <6> -sshKey: ssh-ed25519 AAAA... <7> -endif::openshift-origin[] -ifdef::openshift-origin[] -sshKey: ssh-ed25519 AAAA... <6> -endif::openshift-origin[] ----- -<1> Required. The installation program prompts you for this value. -<2> If you do not provide these parameters and values, the installation program provides the default value. -<3> The `controlPlane` section is a single mapping, but the `compute` section is a sequence of mappings. To meet the requirements of the different data structures, the first line of the `compute` section must begin with a hyphen, `-`, and the first line of the `controlPlane` section must not. Only one control plane pool is used. -<4> Enables or disables simultaneous multithreading, also known as Hyper-Threading. By default, simultaneous multithreading is enabled to increase the performance of your machines' cores. You can disable it by setting the parameter value to `Disabled`. If you disable simultaneous multithreading in some cluster machines, you must disable it in all cluster machines. -+ -[IMPORTANT] -==== -If you disable simultaneous multithreading, ensure that your capacity planning accounts for the dramatically decreased machine performance. Use larger machine types, such as `n1-standard-8`, for your machines if you disable simultaneous multithreading. -==== -<5> The cluster network plugin to install. The supported values are `OVNKubernetes` and `OpenShiftSDN`. The default value is `OVNKubernetes`. -ifndef::openshift-origin[] -<6> Enables or disables FIPS mode. By default, FIPS mode is not enabled. If FIPS mode is enabled, the {op-system-first} machines that {product-title} runs on bypass the default Kubernetes cryptography suite and use the cryptography modules that are provided with {op-system} instead. -+ -[IMPORTANT] -==== -The use of FIPS Validated or Modules in Process cryptographic libraries is only supported on {product-title} deployments on the `x86_64` architecture. -==== -<7> Optional: provide the `sshKey` value that you use to access the machines in your cluster. -endif::openshift-origin[] -ifdef::openshift-origin[] -<6> Optional: provide the `sshKey` value that you use to access the machines in your cluster. -endif::openshift-origin[] -+ -[NOTE] -==== -For production {product-title} clusters on which you want to perform installation debugging or disaster recovery, specify an SSH key that your `ssh-agent` process uses. -==== -endif::with-networking,without-networking[] - -ifdef::vpc[] -[source,yaml] ----- -apiVersion: v1 -baseDomain: example.com <1> -controlPlane: <2> <3> - hyperthreading: Enabled <4> - name: master - platform: - ibmcloud: {} - replicas: 3 -compute: <2> <3> -- hyperthreading: Enabled <4> - name: worker - platform: - ibmcloud: {} - replicas: 3 -metadata: - name: test-cluster <1> -networking: - clusterNetwork: - - cidr: 10.128.0.0/14 <5> - hostPrefix: 23 - machineNetwork: - - cidr: 10.0.0.0/16 - networkType: OVNKubernetes <6> - serviceNetwork: - - 172.30.0.0/16 -platform: - ibmcloud: - region: eu-gb <1> - resourceGroupName: eu-gb-example-network-rg <7> - networkResourceGroupName: eu-gb-example-existing-network-rg <8> - vpcName: eu-gb-example-network-1 <9> - controlPlaneSubnets: <10> - - eu-gb-example-network-1-cp-eu-gb-1 - - eu-gb-example-network-1-cp-eu-gb-2 - - eu-gb-example-network-1-cp-eu-gb-3 - computeSubnets: <11> - - eu-gb-example-network-1-compute-eu-gb-1 - - eu-gb-example-network-1-compute-eu-gb-2 - - eu-gb-example-network-1-compute-eu-gb-3 -credentialsMode: Manual -publish: External -pullSecret: '{"auths": ...}' <1> -ifndef::openshift-origin[] -fips: false <12> -sshKey: ssh-ed25519 AAAA... <13> -endif::openshift-origin[] -ifdef::openshift-origin[] -sshKey: ssh-ed25519 AAAA... <12> -endif::openshift-origin[] ----- -<1> Required. The installation program prompts you for this value. -<2> If you do not provide these parameters and values, the installation program provides the default value. -<3> The `controlPlane` section is a single mapping, but the `compute` section is a sequence of mappings. To meet the requirements of the different data structures, the first line of the `compute` section must begin with a hyphen, `-`, and the first line of the `controlPlane` section must not. Only one control plane pool is used. -<4> Enables or disables simultaneous multithreading, also known as Hyper-Threading. By default, simultaneous multithreading is enabled to increase the performance of your machines' cores. You can disable it by setting the parameter value to `Disabled`. If you disable simultaneous multithreading in some cluster machines, you must disable it in all cluster machines. -+ -[IMPORTANT] -==== -If you disable simultaneous multithreading, ensure that your capacity planning accounts for the dramatically decreased machine performance. Use larger machine types, such as `n1-standard-8`, for your machines if you disable simultaneous multithreading. -==== -<5> The machine CIDR must contain the subnets for the compute machines and control plane machines. -<6> The cluster network plugin to install. The supported values are `OVNKubernetes` and `OpenShiftSDN`. The default value is `OVNKubernetes`. -<7> The name of an existing resource group. All installer-provisioned cluster resources are deployed to this resource group. If undefined, a new resource group is created for the cluster. -<8> Specify the name of the resource group that contains the existing virtual private cloud (VPC). The existing VPC and subnets should be in this resource group. The cluster will be installed to this VPC. -<9> Specify the name of an existing VPC. -<10> Specify the name of the existing subnets to which to deploy the control plane machines. The subnets must belong to the VPC that you specified. Specify a subnet for each availability zone in the region. -<11> Specify the name of the existing subnets to which to deploy the compute machines. The subnets must belong to the VPC that you specified. Specify a subnet for each availability zone in the region. -ifndef::openshift-origin[] -<12> Enables or disables FIPS mode. By default, FIPS mode is not enabled. If FIPS mode is enabled, the {op-system-first} machines that {product-title} runs on bypass the default Kubernetes cryptography suite and use the cryptography modules that are provided with {op-system} instead. -+ -[IMPORTANT] -==== -The use of FIPS Validated or Modules in Process cryptographic libraries is only supported on {product-title} deployments on the `x86_64` architecture. -==== -<13> Optional: provide the `sshKey` value that you use to access the machines in your cluster. -endif::openshift-origin[] -ifdef::openshift-origin[] -<12> Optional: provide the `sshKey` value that you use to access the machines in your cluster. -endif::openshift-origin[] -+ -[NOTE] -==== -For production {product-title} clusters on which you want to perform installation debugging or disaster recovery, specify an SSH key that your `ssh-agent` process uses. -==== -endif::vpc[] - -ifdef::private[] -[source,yaml] ----- -apiVersion: v1 -baseDomain: example.com <1> -controlPlane: <2> <3> - hyperthreading: Enabled <4> - name: master - platform: - ibmcloud: {} - replicas: 3 -compute: <2> <3> -- hyperthreading: Enabled <4> - name: worker - platform: - ibmcloud: {} - replicas: 3 -metadata: - name: test-cluster <1> -networking: - clusterNetwork: - - cidr: 10.128.0.0/14 <5> - hostPrefix: 23 - machineNetwork: - - cidr: 10.0.0.0/16 <6> - networkType: OVNKubernetes <7> - serviceNetwork: - - 172.30.0.0/16 -platform: - ibmcloud: - region: eu-gb <1> - resourceGroupName: eu-gb-example-network-rg <8> - networkResourceGroupName: eu-gb-example-existing-network-rg <9> - vpcName: eu-gb-example-network-1 <10> - controlPlaneSubnets: <11> - - eu-gb-example-network-1-cp-eu-gb-1 - - eu-gb-example-network-1-cp-eu-gb-2 - - eu-gb-example-network-1-cp-eu-gb-3 - computeSubnets: <12> - - eu-gb-example-network-1-compute-eu-gb-1 - - eu-gb-example-network-1-compute-eu-gb-2 - - eu-gb-example-network-1-compute-eu-gb-3 -credentialsMode: Manual -publish: Internal <13> -pullSecret: '{"auths": ...}' <1> -ifndef::openshift-origin[] -fips: false <14> -sshKey: ssh-ed25519 AAAA... <15> -endif::openshift-origin[] -ifdef::openshift-origin[] -sshKey: ssh-ed25519 AAAA... <14> -endif::openshift-origin[] ----- -<1> Required. -<2> If you do not provide these parameters and values, the installation program provides the default value. -<3> The `controlPlane` section is a single mapping, but the `compute` section is a sequence of mappings. To meet the requirements of the different data structures, the first line of the `compute` section must begin with a hyphen, `-`, and the first line of the `controlPlane` section must not. Only one control plane pool is used. -<4> Enables or disables simultaneous multithreading, also known as Hyper-Threading. By default, simultaneous multithreading is enabled to increase the performance of your machines' cores. You can disable it by setting the parameter value to `Disabled`. If you disable simultaneous multithreading in some cluster machines, you must disable it in all cluster machines. -+ -[IMPORTANT] -==== -If you disable simultaneous multithreading, ensure that your capacity planning accounts for the dramatically decreased machine performance. Use larger machine types, such as `n1-standard-8`, for your machines if you disable simultaneous multithreading. -==== -<5> The machine CIDR must contain the subnets for the compute machines and control plane machines. -<6> The CIDR must contain the subnets defined in `platform.ibmcloud.controlPlaneSubnets` and `platform.ibmcloud.computeSubnets`. -<7> The cluster network plugin to install. The supported values are `OVNKubernetes` and `OpenShiftSDN`. The default value is `OVNKubernetes`. -<8> The name of an existing resource group. All installer-provisioned cluster resources are deployed to this resource group. If undefined, a new resource group is created for the cluster. -<9> Specify the name of the resource group that contains the existing virtual private cloud (VPC). The existing VPC and subnets should be in this resource group. The cluster will be installed to this VPC. -<10> Specify the name of an existing VPC. -<11> Specify the name of the existing subnets to which to deploy the control plane machines. The subnets must belong to the VPC that you specified. Specify a subnet for each availability zone in the region. -<12> Specify the name of the existing subnets to which to deploy the compute machines. The subnets must belong to the VPC that you specified. Specify a subnet for each availability zone in the region. -<13> How to publish the user-facing endpoints of your cluster. Set `publish` to `Internal` to deploy a private cluster. The default value is `External`. -ifndef::openshift-origin[] -<14> Enables or disables FIPS mode. By default, FIPS mode is not enabled. If FIPS mode is enabled, the {op-system-first} machines that {product-title} runs on bypass the default Kubernetes cryptography suite and use the cryptography modules that are provided with {op-system} instead. -+ -[IMPORTANT] -==== -The use of FIPS Validated or Modules in Process cryptographic libraries is only supported on {product-title} deployments on the `x86_64` architecture. -==== -<15> Optional: provide the `sshKey` value that you use to access the machines in your cluster. -endif::openshift-origin[] -ifdef::openshift-origin[] -<14> You can optionally provide the `sshKey` value that you use to access the machines in your cluster. -endif::openshift-origin[] -+ -[NOTE] -==== -For production {product-title} clusters on which you want to perform installation debugging or disaster recovery, specify an SSH key that your `ssh-agent` process uses. -==== -endif::private[] - - -ifeval::["{context}" == "installing-ibm-cloud-network-customizations"] -:!with-networking: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-customizations"] -:!without-networking: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-vpc"] -:!vpc: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-private"] -:!private: -endif::[] diff --git a/modules/installation-ibm-cloud-creating-api-key.adoc b/modules/installation-ibm-cloud-creating-api-key.adoc deleted file mode 100644 index f77f544984fc..000000000000 --- a/modules/installation-ibm-cloud-creating-api-key.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// Module included in the following assemblies: -// -// installing/installing_ibm_cloud_public/installing-ibm-cloud-account.adoc -// installing/installing_ibm_powervs/installing-ibm-cloud-account-power-vs.adoc - -:_content-type: PROCEDURE -[id="installation-ibm-cloud-creating-api-key_{context}"] -= Creating an API key - -You must create a user API key or a service ID API key for your IBM Cloud account. - -.Prerequisites - -* You have assigned the required access policies to your IBM Cloud account. -* You have attached you IAM access policies to an access group, or other appropriate resource. - -.Procedure - -* Create an API key, depending on how you defined your IAM access policies. -+ -For example, if you assigned your access policies to a user, you must create a link:https://cloud.ibm.com/docs/account?topic=account-userapikey[user API key]. If you assigned your access policies to a service ID, you must create a link:https://cloud.ibm.com/docs/account?topic=account-serviceidapikeys[service ID API key]. If your access policies are assigned to an access group, you can use either API key type. For more information on IBM Cloud VPC API keys, see link:https://cloud.ibm.com/docs/account?topic=account-manapikey&interface=ui[Understanding API keys]. diff --git a/modules/installation-ibm-cloud-export-variables.adoc b/modules/installation-ibm-cloud-export-variables.adoc deleted file mode 100644 index f93c390548f8..000000000000 --- a/modules/installation-ibm-cloud-export-variables.adoc +++ /dev/null @@ -1,93 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-customizations.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-network-customizations.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-vpc.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-private.adoc -// * installing/installing_ibm_powervs/installing-ibm-power-vs-customizations.adoc -// * installing/installing_ibm_powervs/installing-ibm-power-vs-private-cluster.adoc -// * installing/installing_ibm_powervs/installing-restricted-networks-ibm-power-vs.adoc -// * installing/installing_ibm_powervs/installing-ibm-powervs-vpc.adoc - -ifeval::["{context}" == "installing-ibm-cloud-customizations"] -:ibm-vpc: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-network-customizations"] -:ibm-vpc: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-vpc"] -:ibm-vpc: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-private"] -:ibm-vpc: -endif::[] -ifeval::["{context}" == "installing-ibm-power-vs-customizations"] -:ibm-power-vs: -endif::[] -ifeval::["{context}" == "installing-ibm-power-vs-private-cluster"] -:ibm-power-vs: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power-vs"] -:ibm-power-vs: -endif::[] -ifeval::["{context}" == "installing-ibm-powervs-vpc"] -:ibm-power-vs: -endif::[] - -:_content-type: PROCEDURE -[id="installation-ibm-cloud-export-variables_{context}"] -= Exporting the API key - -You must set the API key you created as a global variable; the installation program ingests the variable during startup to set the API key. - -.Prerequisites - -* You have created either a user API key or service ID API key for your IBM Cloud account. - -.Procedure - -* Export your API key for your account as a global variable: -ifdef::ibm-vpc[] -+ -[source,terminal] ----- -$ export IC_API_KEY=<api_key> ----- -endif::ibm-vpc[] -ifdef::ibm-power-vs[] -+ -[source,terminal] ----- -$ export IBMCLOUD_API_KEY=<api_key> ----- -endif::ibm-power-vs[] - -[IMPORTANT] -==== -You must set the variable name exactly as specified; the installation program expects the variable name to be present during startup. -==== - -ifeval::["{context}" == "installing-ibm-cloud-customizations"] -:!ibm-vpc: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-network-customizations"] -:!ibm-vpc: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-vpc"] -:!ibm-vpc: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-private"] -:!ibm-vpc: -endif::[] -ifeval::["{context}" == "installing-ibm-power-vs-customizations"] -:!ibm-power-vs: -endif::[] -ifeval::["{context}" == "installing-ibm-power-vs-private-cluster"] -:!ibm-power-vs: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power-vs"] -:!ibm-power-vs: -endif::[] -ifeval::["{context}" == "installing-ibm-powervs-vpc"] -:!ibm-power-vs: -endif::[] diff --git a/modules/installation-ibm-cloud-iam-policies-api-key.adoc b/modules/installation-ibm-cloud-iam-policies-api-key.adoc deleted file mode 100644 index 5c2222e02e5f..000000000000 --- a/modules/installation-ibm-cloud-iam-policies-api-key.adoc +++ /dev/null @@ -1,158 +0,0 @@ -// Module included in the following assemblies: -// -// installing/installing_ibm_cloud_public/installing-ibm-cloud-account.adoc -// installing/installing_ibm_powervs/installing-ibm-cloud-account-power-vs.adoc - -ifeval::["{context}" == "installing-ibm-cloud-account"] -:ibm-vpc: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-account-power-vs"] -:ibm-power-vs: -endif::[] - -:_content-type: CONCEPT -[id="installation-ibm-cloud-iam-policies-api-key_{context}"] -= IBM Cloud VPC IAM Policies and API Key - -To install {product-title} into your IBM Cloud account, the installation program requires an IAM API key, which provides authentication and authorization to access IBM Cloud service APIs. You can use an existing IAM API key that contains the required policies or create a new one. - -For an IBM Cloud IAM overview, see the IBM Cloud link:https://cloud.ibm.com/docs/account?topic=account-iamoverview[documentation]. - -ifdef::ibm-vpc[] -[id="required-access-policies-ibm-cloud_{context}"] -== Required access policies - -You must assign the required access policies to your IBM Cloud account. - -.Required access policies -[cols="1,2,2,2,3",options="header"] -|=== -|Service type |Service |Access policy scope |Platform access |Service access - -|Account management -|IAM Identity Service -|All resources or a subset of resources ^[1]^ -|Editor, Operator, Viewer, Administrator -|Service ID creator - -|Account management ^[2]^ -|Identity and Access Management -|All resources -|Editor, Operator, Viewer, Administrator -| - -|IAM services -|Cloud Object Storage -|All resources or a subset of resources ^[1]^ -|Editor, Operator, Viewer, Administrator -|Reader, Writer, Manager, Content Reader, Object Reader, Object Writer - -|IAM services -|Internet Services ^3^ -|All resources or a subset of resources ^[1]^ -|Editor, Operator, Viewer, Administrator -|Reader, Writer, Manager - -|IAM services -|DNS Services ^3^ -|All resources or a subset of resources ^[1]^ -|Editor, Operator, Viewer, Administrator -|Reader, Writer, Manager - - -|IAM services -|VPC Infrastructure Services -|All resources or a subset of resources ^[1]^ -|Editor, Operator, Viewer, Administrator -|Reader, Writer, Manager -|=== -[.small] --- -1. The policy access scope should be set based on how granular you want to assign access. The scope can be set to *All resources* or *Resources based on selected attributes*. -2. Optional: This access policy is only required if you want the installation program to create a resource group. For more information about resource groups, see the IBM link:https://cloud.ibm.com/docs/account?topic=account-rgs[documentation]. -3. Only one service is required. The service that is required depends on the type of cluster that you are installing. If you are installing a public cluster, `Internet Services` is required. If you are installing a private cluster, `DNS Services` is required. --- -//TODO: IBM confirmed current values in the table above. They hope to provide more guidance on possibly scoping down the permissions (related to resource group actions). -endif::ibm-vpc[] - -ifdef::ibm-power-vs[] -[id="pre-requisite-permissions-ibm-cloud_{context}"] -== Pre-requisite permissions - -.Pre-requisite permissions -[cols="1,2",options="header"] -|=== -|Role |Access - -|Viewer, Operator, Editor, Administrator, Reader, Writer, Manager -|Internet Services service in <resource_group> resource group - -|Viewer, Operator, Editor, Administrator, User API key creator, Service ID creator -|IAM Identity Service service - -|Viewer, Operator, Administrator, Editor, Reader, Writer, Manager, Console Administrator -|VPC Infrastructure Services service in <resource_group> resource group - -|Viewer -|Resource Group: Access to view the resource group itself. The resource type should equal `Resource group`, with a value of <your_resource_group_name>. -|=== - -[id="cluster-creation-permissions-ibm-cloud_{context}"] -== Cluster-creation permissions - -.Cluster-creation permissions -[cols="1,2",options="header"] -|=== -|Role |Access - -|Viewer -|<resource_group> (Resource Group Created for Your Team) - -|Viewer, Operator, Editor, Reader, Writer, Manager -|All service in Default resource group - -|Viewer, Reader -|Internet Services service - -|Viewer, Operator, Reader, Writer, Manager, Content Reader, Object Reader, Object Writer, Editor -|Cloud Object Storage service - -|Viewer -|Default resource group: The resource type should equal `Resource group`, with a value of `Default`. If your account administrator changed your account's default resource group to something other than Default, use that value instead. - -|Viewer, Operator, Editor, Reader, Manager -|Power Systems Virtual Server service in <resoure_group> resource group - -|Viewer, Operator, Editor, Reader, Writer, Manager, Administrator -|Internet Services service in <resource_group> resource group: CIS functional scope string equals reliability - -|Viewer, Operator, Editor -|Direct Link service - -|Viewer, Operator, Editor, Administrator, Reader, Writer, Manager, Console Administrator -|VPC Infrastructure Services service <resource_group> resource group -|=== -endif::ibm-power-vs[] - -[id="access-policy-assignment-ibm-cloud_{context}"] -== Access policy assignment - -ifdef::ibm-vpc[] -In IBM Cloud VPC IAM, access policies can be attached to different subjects: -endif::ibm-vpc[] -ifdef::ibm-power-vs[] -In IBM Cloud IAM, access policies can be attached to different subjects: -endif::ibm-power-vs[] - -* Access group (Recommended) -* Service ID -* User - -The recommended method is to define IAM access policies in an link:https://cloud.ibm.com/docs/account?topic=account-groups[access group]. This helps organize all the access required for {product-title} and enables you to onboard users and service IDs to this group. You can also assign access to link:https://cloud.ibm.com/docs/account?topic=account-assign-access-resources[users and service IDs] directly, if desired. - -ifeval::["{context}" == "installing-ibm-cloud-account"] -:!ibm-vpc: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-account-power-vs"] -:!ibm-power-vs: -endif::[] \ No newline at end of file diff --git a/modules/installation-ibm-cloud-regions.adoc b/modules/installation-ibm-cloud-regions.adoc deleted file mode 100644 index 733507b03831..000000000000 --- a/modules/installation-ibm-cloud-regions.adoc +++ /dev/null @@ -1,78 +0,0 @@ -// Module included in the following assemblies: -// -// installing/installing_ibm_cloud_public/installing-ibm-cloud-account.adoc -// installing/installing_ibm_powervs/installing-ibm-cloud-account-power-vs.adoc - -ifeval::["{context}" == "installing-ibm-cloud-account"] -:ibm-vpc: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-account-power-vs"] -:ibm-power-vs: -endif::[] - -:_content-type: REFERENCE -ifdef::ibm-vpc[] -[id="installation-ibm-cloud-regions_{context}"] -= Supported IBM Cloud VPC regions -endif::ibm-vpc[] -ifdef::ibm-power-vs[] -[id="installation-ibm-power-vs-regions_{context}"] -= Supported {ibmpowerProductName} Virtual Server regions and zones -endif::ibm-power-vs[] - -You can deploy an {product-title} cluster to the following regions: - -ifdef::ibm-vpc[] -//Not listed for openshift-install: br-sao, in-che, kr-seo - -* `au-syd` (Sydney, Australia) -* `br-sao` (Sao Paulo, Brazil) -* `ca-tor` (Toronto, Canada) -* `eu-de` (Frankfurt, Germany) -* `eu-gb` (London, United Kingdom) -* `jp-osa` (Osaka, Japan) -* `jp-tok` (Tokyo, Japan) -* `us-east` (Washington DC, United States) -* `us-south` (Dallas, United States) -endif::ibm-vpc[] -ifdef::ibm-power-vs[] - -* `dal` (Dallas, USA) -** `dal12` -* `us-east` (Washington DC, USA) -** `us-east` -* `eu-de` (Frankfurt, Germany) -** `eu-de-1` -** `eu-de-2` -* `lon` (London, UK) -** `lon04` -** `lon06` -* `osa` (Osaka, Japan) -** `osa21` -* `sao` (Sao Paulo, Brazil) -** `sao01` -* `syd` (Sydney, Australia) -** `syd04` -* `tok` (Tokyo, Japan) -** `tok04` -* `tor` (Toronto, Canada) -** `tor01` - -You might optionally specify the IBM Cloud VPC region in which the installer will create any VPC components. Supported regions in IBM Cloud are: - -* `us-south` -* `eu-de` -* `eu-gb` -* `jp-osa` -* `au-syd` -* `br-sao` -* `ca-tor` -* `jp-tok` -endif::ibm-power-vs[] - -ifeval::["{context}" == "installing-ibm-cloud-account"] -:!ibm-vpc: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-account-power-vs"] -:!ibm-power-vs: -endif::[] \ No newline at end of file diff --git a/modules/installation-ibm-power-vs-config-yaml.adoc b/modules/installation-ibm-power-vs-config-yaml.adoc deleted file mode 100644 index dfd06d641235..000000000000 --- a/modules/installation-ibm-power-vs-config-yaml.adoc +++ /dev/null @@ -1,354 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_ibm_power/installing-ibm-power-vs-customizations.adoc -// * installing/installing_ibm_power/installing-ibm-power-vs-private-cluster.adoc -// * installing/installing_ibm_power/installing-restricted-networks-ibm-power-vs.adoc -// * installing/installing_ibm_power/installing-ibm-powervs-vpc.adoc - -ifeval::["{context}" == "installing-ibm-power-vs-customizations"] -:ibm-power-vs: -endif::[] -ifeval::["{context}" == "installing-ibm-power-vs-private-cluster"] -:private: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power-vs"] -:restricted: -endif::[] -ifeval::["{context}" == "installing-ibm-powervs-vpc"] -:ibm-powervs-vpc: -endif::[] - -:_content-type: REFERENCE -[id="installation-ibm-power-vs-config-yaml_{context}"] -= Sample customized install-config.yaml file for {ibmpowerProductName} Virtual Server - -You can customize the `install-config.yaml` file to specify more details about your {product-title} cluster's platform or modify the values of the required parameters. - -[IMPORTANT] -==== -This sample YAML file is provided for reference only. You must obtain your `install-config.yaml` file by using the installation program and modify it. -==== - -ifdef::ibm-power-vs[] -[source,yaml] ----- -apiVersion: v1 -baseDomain: example.com -compute: <1> <2> -- architecture: ppc64le - hyperthreading: Enabled <3> - name: worker - platform: {} - replicas: 3 -controlPlane: <1> <2> - architecture: ppc64le - hyperthreading: Enabled <3> - name: master - platform: {} - replicas: 3 -metadata: - creationTimestamp: null - name: example-cluster-name -networking: - clusterNetwork: - - cidr: 10.128.0.0/14 - hostPrefix: 23 - machineNetwork: - - cidr: 192.168.0.0/24 - networkType: OVNKubernetes <4> - serviceNetwork: - - 172.30.0.0/16 -platform: - powervs: - userID: ibm-user-id - region: powervs-region - zone: powervs-zone - powervsResourceGroup: "ibmcloud-resource-group" <5> - serviceInstanceID: "powervs-region-service-instance-id" -vpcRegion : vpc-region -publish: External -pullSecret: '{"auths": ...}' <6> -sshKey: ssh-ed25519 AAAA... <7> ----- -<1> If you do not provide these parameters and values, the installation program provides the default value. -<2> The `controlPlane` section is a single mapping, but the compute section is a sequence of mappings. To meet the requirements of the different data structures, the first line of the `compute` section must begin with a hyphen, `-`, and the first line of the `controlPlane` section must not. Although both sections currently define a single machine pool, it is possible that {product-title} will support defining multiple compute pools during installation. Only one control plane pool is used. -<3> Whether to enable or disable simultaneous multithreading, or `hyperthreading`. By default, simultaneous multithreading is enabled to increase the performance of your machines' cores. You can disable it by setting the parameter value to `Disabled`. If you disable simultaneous multithreading in some cluster machines, you must disable it in all cluster machines. -+ -[IMPORTANT] -==== -If you disable simultaneous multithreading, ensure that your capacity planning accounts for the dramatically decreased machine performance. -==== -//ifndef::openshift-origin[] -//<5> Whether to enable or disable FIPS mode. By default, FIPS mode is not enabled. If FIPS mode is enabled, the {op-system-first} machines that {product-title} runs on bypass the default Kubernetes cryptography suite and use the cryptography modules that are provided with {op-system} instead. -//+ -//[IMPORTANT] -//==== -//The use of FIPS Validated or Modules in Process cryptographic libraries is only supported on {product-title} deployments on the `x86_64` architecture. -//==== -//<6> You can optionally provide the `sshKey` value that you use to access the machines in your cluster. -//endif::openshift-origin[] -<4> The cluster network plugin to install. The supported values are `OVNKubernetes` and `OpenShiftSDN`. The default value is `OVNKubernetes`. -<5> The name of an existing resource group. -<6> Required. The installation program prompts you for this value. -ifdef::openshift-origin[] -<7> You can optionally provide the `sshKey` value that you use to access the machines in your cluster. -endif::openshift-origin[] -+ -[NOTE] -==== -For production {product-title} clusters on which you want to perform installation debugging or disaster recovery, specify an SSH key that your `ssh-agent` process uses. -==== -endif::ibm-power-vs[] - -ifdef::private[] -[source,yaml] ----- -apiVersion: v1 -baseDomain: example.com -compute: <1> <2> -- architecture: ppc64le - hyperthreading: Enabled <3> - name: worker - platform: {} - replicas: 3 -controlPlane: <1> <2> - architecture: ppc64le - hyperthreading: Enabled <3> - name: master - platform: {} - replicas: 3 -metadata: - creationTimestamp: null - name: example-private-cluster-name -networking: - clusterNetwork: - - cidr: 10.128.0.0/14 <4> - hostPrefix: 23 - machineNetwork: - - cidr: 192.168.0.0/24 - networkType: OVNKubernetes <5> - serviceNetwork: - - 172.30.0.0/16 -platform: - powervs: - userID: ibm-user-id - powervsResourceGroup: "ibmcloud-resource-group" - region: powervs-region - vpcName: name-of-existing-vpc <6> - cloudConnectionName: powervs-region-example-cloud-con-priv - vpcSubnets: - - powervs-region-example-subnet-1 - vpcRegion : vpc-region - zone: powervs-zone - serviceInstanceID: "powervs-region-service-instance-id" -publish: Internal <7> -pullSecret: '{"auths": ...}' <8> -sshKey: ssh-ed25519 AAAA... <9> ----- -<1> If you do not provide these parameters and values, the installation program provides the default value. -<2> The `controlPlane` section is a single mapping, but the compute section is a sequence of mappings. To meet the requirements of the different data structures, the first line of the `compute` section must begin with a hyphen, `-`, and the first line of the `controlPlane` section must not. Both sections currently define a single machine pool. Only one control plane pool is used. -<3> Whether to enable or disable simultaneous multithreading, or `hyperthreading`. By default, simultaneous multithreading is enabled to increase the performance of your machines' cores. You can disable it by setting the parameter value to `Disabled`. If you disable simultaneous multithreading in some cluster machines, you must disable it in all cluster machines. -<4> The machine CIDR must contain the subnets for the compute machines and control plane machines. -<5> The cluster network plugin to install. The supported values are `OVNKubernetes` and `OpenShiftSDN`. The default value is `OVNKubernetes`. -<6> Specify the name of an existing VPC. -<7> How to publish the user-facing endpoints of your cluster. Set publish to `Internal` to deploy a private cluster. -<8> Required. The installation program prompts you for this value. -<9> Provide the `sshKey` value that you use to access the machines in your cluster. -+ -[IMPORTANT] -==== -If you disable simultaneous multithreading, ensure that your capacity planning accounts for the dramatically decreased machine performance. -==== -//ifndef::openshift-origin[] -//<5> Whether to enable or disable FIPS mode. By default, FIPS mode is not enabled. If FIPS mode is enabled, the {op-system-first} machines that {product-title} runs on bypass the default Kubernetes cryptography suite and use the cryptography modules that are provided with {op-system} instead. -//+ -//[IMPORTANT] -//==== -//The use of FIPS Validated or Modules in Process cryptographic libraries is only supported on {product-title} deployments on the `x86_64` architecture. -//==== -//<6> You can optionally provide the `sshKey` value that you use to access the machines in your cluster. -//endif::openshift-origin[] - -ifdef::openshift-origin[] -<5> You can optionally provide the `sshKey` value that you use to access the machines in your cluster. -endif::openshift-origin[] -+ -[NOTE] -==== -For production {product-title} clusters on which you want to perform installation debugging or disaster recovery, specify an SSH key that your `ssh-agent` process uses. -==== -endif::private[] - -ifdef::ibm-powervs-vpc[] -[source,yaml] ----- -apiVersion: v1 -baseDomain: example.com -compute: <1> <2> -- architecture: ppc64le - hyperthreading: Enabled <3> - name: worker - platform: {} - replicas: 3 -controlPlane: <1> <2> - architecture: ppc64le - hyperthreading: Enabled <3> - name: master - platform: {} - replicas: 3 -metadata: - creationTimestamp: null - name: example-cluster-existing-vpc -networking: - clusterNetwork: - - cidr: 10.128.0.0/14 <4> - hostPrefix: 23 - machineNetwork: - - cidr: 192.168.0.0/24 - networkType: OVNKubernetes <5> - serviceNetwork: - - 172.30.0.0/16 -platform: - powervs: - userID: ibm-user-id - powervsResourceGroup: "ibmcloud-resource-group" - region: powervs-region - vpcRegion : vpc-region - vpcName: name-of-existing-vpc <6> - vpcSubnets: <7> - - powervs-region-example-subnet-1 - zone: powervs-zone - serviceInstanceID: "powervs-region-service-instance-id" -credentialsMode: Manual -publish: External <8> -pullSecret: '{"auths": ...}' <9> -fips: false -sshKey: ssh-ed25519 AAAA... <10> ----- -<1> If you do not provide these parameters and values, the installation program provides the default value. -<2> The `controlPlane` section is a single mapping, but the compute section is a sequence of mappings. To meet the requirements of the different data structures, the first line of the `compute` section must begin with a hyphen, `-`, and the first line of the `controlPlane` section must not. Both sections currently define a single machine pool. Only one control plane pool is used. -<3> Whether to enable or disable simultaneous multithreading, or `hyperthreading`. By default, simultaneous multithreading is enabled to increase the performance of your machines' cores. You can disable it by setting the parameter value to `Disabled`. If you disable simultaneous multithreading in some cluster machines, you must disable it in all cluster machines. -<4> The machine CIDR must contain the subnets for the compute machines and control plane machines. -<5> The cluster network plugin to install. The supported values are `OVNKubernetes` and `OpenShiftSDN`. The default value is `OVNKubernetes`. -<6> Specify the name of an existing VPC. -<7> Specify the name of the existing VPC subnet. The subnets must belong to the VPC that you specified. Specify a subnet for each availability zone in the region. -<8> How to publish the user-facing endpoints of your cluster. -<9> Required. The installation program prompts you for this value. -<10> Provide the `sshKey` value that you use to access the machines in your cluster. -+ -[IMPORTANT] -==== -If you disable simultaneous multithreading, ensure that your capacity planning accounts for the dramatically decreased machine performance. -==== -//ifndef::openshift-origin[] -//<5> Whether to enable or disable FIPS mode. By default, FIPS mode is not enabled. If FIPS mode is enabled, the {op-system-first} machines that {product-title} runs on bypass the default Kubernetes cryptography suite and use the cryptography modules that are provided with {op-system} instead. -//+ -//[IMPORTANT] -//==== -//The use of FIPS Validated or Modules in Process cryptographic libraries is only supported on {product-title} deployments on the `x86_64` architecture. -//==== -//<6> You can optionally provide the `sshKey` value that you use to access the machines in your cluster. -//endif::openshift-origin[] - -ifdef::openshift-origin[] -<5> You can optionally provide the `sshKey` value that you use to access the machines in your cluster. -endif::openshift-origin[] -+ -[NOTE] -==== -For production {product-title} clusters on which you want to perform installation debugging or disaster recovery, specify an SSH key that your `ssh-agent` process uses. -==== -endif::ibm-powervs-vpc[] - -ifdef::restricted[] -[source,yaml] ----- -apiVersion: v1 -baseDomain: example.com <1> -controlPlane: <2> <3> - hyperthreading: Enabled <4> - name: master - platform: - replicas: 3 -compute: <2> <3> -- hyperthreading: Enabled <4> - name: worker - platform: - ibmcloud: {} - replicas: 3 -metadata: - name: example-restricted-cluster-name <1> -networking: - clusterNetwork: - - cidr: 10.128.0.0/14 <5> - hostPrefix: 23 - machineNetwork: - - cidr: 10.0.0.0/16 <6> - networkType: OVNKubernetes <7> - serviceNetwork: - - 192.168.0.0/24 -platform: - powervs: - userid: ibm-user-id - powervsResourceGroup: "ibmcloud-resource-group" <8> - region: "powervs-region" - vpcRegion: "vpc-region" - vpcName: name-of-existing-vpc <9> - vpcSubnets: <10> - - name-of-existing-vpc-subnet - zone: "powervs-zone" - serviceInstanceID: "service-instance-id" -publish: Internal -credentialsMode: Manual -pullSecret: '{"auths":{"<local_registry>": {"auth": "<credentials>","email": "you@example.com"}}}' <11> -sshKey: ssh-ed25519 AAAA... <12> -additionalTrustBundle: | <13> - -----BEGIN CERTIFICATE----- - <MY_TRUSTED_CA_CERT> - -----END CERTIFICATE----- -imageContentSources: <14> -- mirrors: - - <local_registry>/<local_repository_name>/release - source: quay.io/openshift-release-dev/ocp-release -- mirrors: - - <local_registry>/<local_repository_name>/release - source: quay.io/openshift-release-dev/ocp-v4.0-art-dev ----- -<1> Required. -<2> If you do not provide these parameters and values, the installation program provides the default value. -<3> The `controlPlane` section is a single mapping, but the `compute` section is a sequence of mappings. To meet the requirements of the different data structures, the first line of the `compute` section must begin with a hyphen, `-`, and the first line of the `controlPlane` section must not. Only one control plane pool is used. -<4> Enables or disables simultaneous multithreading, also known as Hyper-Threading. By default, simultaneous multithreading is enabled to increase the performance of your machines' cores. You can disable it by setting the parameter value to `Disabled`. If you disable simultaneous multithreading in some cluster machines, you must disable it in all cluster machines. -+ -[IMPORTANT] -==== -If you disable simultaneous multithreading, ensure that your capacity planning accounts for the dramatically decreased machine performance. Use larger machine types, such as `n1-standard-8`, for your machines if you disable simultaneous multithreading. -==== -<5> The machine CIDR must contain the subnets for the compute machines and control plane machines. -<6> The CIDR must contain the subnets defined in `platform.ibmcloud.controlPlaneSubnets` and `platform.ibmcloud.computeSubnets`. -<7> The cluster network plugin to install. The supported values are `OVNKubernetes` and `OpenShiftSDN`. The default value is `OVNKubernetes`. -<8> The name of an existing resource group. The existing VPC and subnets should be in this resource group. The cluster is deployed to this resource group. -<9> Specify the name of an existing VPC. -<10> Specify the name of the existing VPC subnet. The subnets must belong to the VPC that you specified. Specify a subnet for each availability zone in the region. -<11> For `<local_registry>`, specify the registry domain name, and optionally the port, that your mirror registry uses to serve content. For example, registry.example.com or registry.example.com:5000. For `<credentials>`, specify the base64-encoded user name and password for your mirror registry. -<12> You can optionally provide the `sshKey` value that you use to access the machines in your cluster. -<13> Provide the contents of the certificate file that you used for your mirror registry. -<14> Provide the `imageContentSources` section from the output of the command to mirror the repository. -+ -[NOTE] -==== -For production {product-title} clusters on which you want to perform installation debugging or disaster recovery, specify an SSH key that your `ssh-agent` process uses. -==== -endif::restricted[] - -ifeval::["{context}" == "installing-ibm-power-vs-customizations"] -:!ibm-power-vs: -endif::[] -ifeval::["{context}" == "installing-ibm-power-vs-private-cluster"] -:!private: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power-vs"] -:!restricted: -endif::[] -ifeval::["{context}" == "installing-ibm-powervs-vpc"] -:!ibm-powervs-vpc: -endif::[] diff --git a/modules/installation-ibm-z-kvm-user-infra-installing-rhcos.adoc b/modules/installation-ibm-z-kvm-user-infra-installing-rhcos.adoc deleted file mode 100644 index 8f5e08fb281a..000000000000 --- a/modules/installation-ibm-z-kvm-user-infra-installing-rhcos.adoc +++ /dev/null @@ -1,14 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_ibm_z/installing-restricted-networks-ibm-z-kvm.adoc -// * installing/installing_ibm_z/installing-ibm-z-kvm.adoc - -:_content-type: CONCEPT -[id="installation-ibm-z-kvm-user-infra-installing-rhcos_{context}"] -= Installing {op-system} and starting the {product-title} bootstrap process - -To install {product-title} on {ibmzProductName} infrastructure that you provision, you must install {op-system-first} as {op-system-base-full} guest virtual machines. When you install {op-system}, you must provide the Ignition config file that was generated by the {product-title} installation program for the type of machine you are installing. If you have configured suitable networking, DNS, and load balancing infrastructure, the {product-title} bootstrap process begins automatically after the {op-system} machines have rebooted. - -You can perform a fast-track installation of {op-system} that uses a prepackaged QEMU copy-on-write (QCOW2) disk image. Alternatively, you can perform a full installation on a new QCOW2 disk image. - -To add further security to your system, you can optionally install {op-system} using IBM Secure Execution before proceeding to the fast-track installation. diff --git a/modules/installation-ibm-z-kvm-user-infra-machines-iso.adoc b/modules/installation-ibm-z-kvm-user-infra-machines-iso.adoc deleted file mode 100644 index 6afe796fd9cd..000000000000 --- a/modules/installation-ibm-z-kvm-user-infra-machines-iso.adoc +++ /dev/null @@ -1,59 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_ibm_z/installing-restricted-networks-ibm-z-kvm.adoc -// * installing/installing_ibm_z/installing-ibm-z-kvm.adoc - -:_content-type: PROCEDURE -[id="installation-user-infra-machines-iso-ibm-z_kvm_{context}"] -= Fast-track installation by using a prepackaged QCOW2 disk image - -Complete the following steps to create the machines in a fast-track installation of {op-system-first}, importing a prepackaged {op-system-first} QEMU copy-on-write (QCOW2) disk image. - -.Prerequisites - -* At least one LPAR running on {op-system-base} 8.6 or later with KVM, referred to as {op-system-base} KVM host in this procedure. -* The KVM/QEMU hypervisor is installed on the {op-system-base} KVM host. -* A domain name server (DNS) that can perform hostname and reverse lookup for the nodes. -* A DHCP server that provides IP addresses. - -.Procedure - -. Obtain the {op-system-base} QEMU copy-on-write (QCOW2) disk image file from the link:https://access.redhat.com/downloads/content/290[Product Downloads] page on the Red Hat Customer Portal or from the link:https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/[{op-system} image mirror] page. -+ -[IMPORTANT] -==== -The {op-system} images might not change with every release of {product-title}. -You must download images with the highest version that is less than or equal -to the {product-title} version that you install. Only use the appropriate {op-system} QCOW2 image described in the following procedure. -==== -+ -. Download the QCOW2 disk image and Ignition files to a common directory on the {op-system-base} KVM host. -+ -For example: `/var/lib/libvirt/images` -+ -[NOTE] -==== -The Ignition files are generated by the {product-title} installer. -==== -. Create a new disk image with the QCOW2 disk image backing file for each KVM guest node. -+ -[source,terminal] ----- -$ qemu-img create -f qcow2 -F qcow2 -b /var/lib/libvirt/images/{source_rhcos_qemu} /var/lib/libvirt/images/{vmname}.qcow2 {size} ----- -+ -. Create the new KVM guest nodes using the Ignition file and the new disk image. -+ -[source,terminal] ----- -$ virt-install --noautoconsole \ - --connect qemu:///system \ - --name {vn_name} \ - --memory {memory} \ - --vcpus {vcpus} \ - --disk {disk} \ - --import \ - --network network={network},mac={mac} \ - --disk path={ign_file},format=raw,readonly=on,serial=ignition,startup_policy=optional <1> ----- -<1> If IBM Secure Execution is enabled, replace `serial=ignition` with `serial=ignition_crypted`. diff --git a/modules/installation-ibm-z-troubleshooting-and-debugging.adoc b/modules/installation-ibm-z-troubleshooting-and-debugging.adoc deleted file mode 100644 index e56d9d1699ed..000000000000 --- a/modules/installation-ibm-z-troubleshooting-and-debugging.adoc +++ /dev/null @@ -1,44 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_ibm_z/installing-ibm-z.adoc - -:_content-type: PROCEDURE -[id="installation-ibm-z-troubleshooting-and-debugging_{context}"] -= Collecting debugging information - -You can gather debugging information that might help you to troubleshoot and -debug certain issues with an {product-title} installation on {ibmzProductName}. - -.Prerequisites - -* The `oc` CLI tool installed. - -.Procedure - -. Log in to the cluster: -+ ----- -$ oc login -u <username> ----- - -. On the node you want to gather hardware information about, start a debugging -container: -+ ----- -$ oc debug node/<nodename> ----- - -. Change to the */host* file system and start `toolbox`: -+ ----- -$ chroot /host -$ toolbox ----- - -. Collect the `dbginfo` data: -+ ----- -$ dbginfo.sh ----- - -. You can then retrieve the data, for example, using `scp`. diff --git a/modules/installation-ibm-z-user-infra-machines-iso.adoc b/modules/installation-ibm-z-user-infra-machines-iso.adoc deleted file mode 100644 index 5ef3578456ec..000000000000 --- a/modules/installation-ibm-z-user-infra-machines-iso.adoc +++ /dev/null @@ -1,135 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_ibm_z/installing-ibm-z.adoc - -:_content-type: PROCEDURE -[id="installation-user-infra-machines-iso-ibm-z_{context}"] -= Installing {op-system} and starting the {product-title} bootstrap process - -To install {product-title} on {ibmzProductName} infrastructure that you provision, you must install {op-system-first} on z/VM guest virtual machines. When you install {op-system}, you must provide the Ignition config file that was generated by the {product-title} installation program for the type of machine you are installing. If you have configured suitable networking, DNS, and load balancing infrastructure, the {product-title} bootstrap process begins automatically after the {op-system} z/VM guest virtual machines have rebooted. - -Complete the following steps to create the machines. - -.Prerequisites - -* An HTTP or HTTPS server running on your provisioning machine that is accessible to the machines you create. - -.Procedure - -. Log in to Linux on your provisioning machine. - -. Obtain the {op-system-first} kernel, initramfs, and rootfs files from the link:https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/[{op-system} image mirror]. -+ -[IMPORTANT] -==== -The {op-system} images might not change with every release of {product-title}. -You must download images with the highest version that is less than or equal -to the {product-title} version that you install. Only use the appropriate kernel, initramfs, and rootfs artifacts described in the following procedure. -==== -+ -The file names contain the {product-title} version number. They resemble the following examples: - -* kernel: `rhcos-<version>-live-kernel-<architecture>` -* initramfs: `rhcos-<version>-live-initramfs.<architecture>.img` -* rootfs: `rhcos-<version>-live-rootfs.<architecture>.img` -+ -[NOTE] -==== -The rootfs image is the same for FCP and DASD. -==== -+ -. Create parameter files. The following parameters are specific for a particular virtual machine: -** For `ip=`, specify the following seven entries: -... The IP address for the machine. -... An empty string. -... The gateway. -... The netmask. -... The machine host and domain name in the form `hostname.domainname`. Omit this value to let {op-system} decide. -... The network interface name. Omit this value to let {op-system} decide. -... If you use static IP addresses, specify `none`. -** For `coreos.inst.ignition_url=`, specify the Ignition file for the machine role. Use `bootstrap.ign`, `master.ign`, or `worker.ign`. Only HTTP and HTTPS protocols are supported. -** For `coreos.live.rootfs_url=`, specify the matching rootfs artifact for the kernel and initramfs you are booting. Only HTTP and HTTPS protocols are supported. - -** For installations on DASD-type disks, complete the following tasks: -... For `coreos.inst.install_dev=`, specify `/dev/dasda`. -... Use `rd.dasd=` to specify the DASD where {op-system} is to be installed. -... Leave all other parameters unchanged. -+ -Example parameter file, `bootstrap-0.parm`, for the bootstrap machine: -+ -[source,terminal] ----- -rd.neednet=1 \ -console=ttysclp0 \ -coreos.inst.install_dev=/dev/dasda \ -coreos.live.rootfs_url=http://cl1.provide.example.com:8080/assets/rhcos-live-rootfs.s390x.img \ -coreos.inst.ignition_url=http://cl1.provide.example.com:8080/ignition/bootstrap.ign \ -ip=172.18.78.2::172.18.78.1:255.255.255.0:::none nameserver=172.18.78.1 \ -rd.znet=qeth,0.0.bdf0,0.0.bdf1,0.0.bdf2,layer2=1,portno=0 \ -zfcp.allow_lun_scan=0 \ -rd.dasd=0.0.3490 ----- -+ -Write all options in the parameter file as a single line and make sure you have no newline characters. - -** For installations on FCP-type disks, complete the following tasks: -... Use `rd.zfcp=<adapter>,<wwpn>,<lun>` to specify the FCP disk where {op-system} is to be installed. For multipathing repeat this step for each additional path. -+ -[NOTE] -==== -When you install with multiple paths, you must enable multipathing directly after the installation, not at a later point in time, as this can cause problems. -==== -... Set the install device as: `coreos.inst.install_dev=/dev/sda`. -+ -[NOTE] -==== -If additional LUNs are configured with NPIV, FCP requires `zfcp.allow_lun_scan=0`. If you must enable `zfcp.allow_lun_scan=1` because you use a CSI driver, for example, you must configure your NPIV so that each node cannot access the boot partition of another node. -==== -... Leave all other parameters unchanged. -+ -[IMPORTANT] -==== -Additional post-installation steps are required to fully enable multipathing. For more information, see “Enabling multipathing with kernel arguments on {op-system}" in _Post-installation machine configuration tasks_. -==== -// Add xref once it's allowed. -+ -The following is an example parameter file `worker-1.parm` for a worker node with multipathing: -+ -[source,terminal] ----- -rd.neednet=1 \ -console=ttysclp0 \ -coreos.inst.install_dev=/dev/sda \ -coreos.live.rootfs_url=http://cl1.provide.example.com:8080/assets/rhcos-live-rootfs.s390x.img \ -coreos.inst.ignition_url=http://cl1.provide.example.com:8080/ignition/worker.ign \ -ip=172.18.78.2::172.18.78.1:255.255.255.0:::none nameserver=172.18.78.1 \ -rd.znet=qeth,0.0.bdf0,0.0.bdf1,0.0.bdf2,layer2=1,portno=0 \ -zfcp.allow_lun_scan=0 \ -rd.zfcp=0.0.1987,0x50050763070bc5e3,0x4008400B00000000 \ -rd.zfcp=0.0.19C7,0x50050763070bc5e3,0x4008400B00000000 \ -rd.zfcp=0.0.1987,0x50050763071bc5e3,0x4008400B00000000 \ -rd.zfcp=0.0.19C7,0x50050763071bc5e3,0x4008400B00000000 ----- -+ -Write all options in the parameter file as a single line and make sure you have no newline characters. - -. Transfer the initramfs, kernel, parameter files, and {op-system} images to z/VM, for example with FTP. For details about how to transfer the files with FTP and boot from the virtual reader, see link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/html/installation_guide/sect-installing-zvm-s390[Installing under Z/VM]. -. Punch the files to the virtual reader of the z/VM guest virtual machine that is to become your bootstrap node. -+ -See link:https://www.ibm.com/docs/en/zvm/7.1?topic=commands-punch[PUNCH] in IBM Documentation. -+ -[TIP] -==== -You can use the CP PUNCH command or, if you use Linux, the **vmur** command to transfer files between two z/VM guest virtual machines. -==== -+ -. Log in to CMS on the bootstrap machine. -. IPL the bootstrap machine from the reader: -+ ----- -$ ipl c ----- -+ -See link:https://www.ibm.com/docs/en/zvm/7.1?topic=commands-ipl[IPL] in IBM Documentation. -+ -. Repeat this procedure for the other machines in the cluster. diff --git a/modules/installation-identify-supported-aws-outposts-instance-types.adoc b/modules/installation-identify-supported-aws-outposts-instance-types.adoc deleted file mode 100644 index 0e184d710a29..000000000000 --- a/modules/installation-identify-supported-aws-outposts-instance-types.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// -// installing/installing_aws/installing-aws-outposts-remote-workers.adoc - -:_content-type: PROCEDURE -[id="installation-identify-supported-aws-outposts-instance-types_{context}"] -= Identifying your AWS Outposts instance types - -AWS Outposts rack catalog includes options supporting the latest generation Intel powered EC2 instance types with or without local instance storage. -Identify which instance types are configured in your AWS Outpost instance. As part of the installation process, you must update the `install-config.yaml` file with the instance type that the installation program will use to deploy worker nodes. - -.Procedure - -Use the AWS CLI to get the list of supported instance types by running the following command: -[source,terminal] ----- -$ aws outposts get-outpost-instance-types --outpost-id <outpost_id> <1> ----- -<1> For `<outpost_id>`, specify the Outpost ID, used in the AWS account for the worker instances - -+ -[IMPORTANT] -==== -When you purchase capacity for your AWS Outpost instance, you specify an EC2 capacity layout that each server provides. Each server supports a single family of instance types. A layout can offer a single instance type or multiple instance types. Dedicated Hosts allows you to alter whatever you chose for that initial layout. If you allocate a host to support a single instance type for the entire capacity, you can only start a single instance type from that host. -==== - -Supported instance types in AWS Outposts might be changed. For more information, you can check the link:https://aws.amazon.com/outposts/rack/features/#Compute_and_storage[Compute and Storage] page in AWS Outposts documents. diff --git a/modules/installation-images-samples-disconnected-mirroring-assist.adoc b/modules/installation-images-samples-disconnected-mirroring-assist.adoc deleted file mode 100644 index f01833243456..000000000000 --- a/modules/installation-images-samples-disconnected-mirroring-assist.adoc +++ /dev/null @@ -1,26 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/install_config/installing-restricted-networks-preparations.adoc -// * openshift_images/samples-operator-alt-registry.adoc -// * openshift_images/configuring-samples-operator.adoc - -[id="installation-images-samples-disconnected-mirroring-assist_{context}"] -= Cluster Samples Operator assistance for mirroring - -During installation, {product-title} creates a config map named `imagestreamtag-to-image` in the `openshift-cluster-samples-operator` namespace. The `imagestreamtag-to-image` config map contains an entry, the populating image, for each image stream tag. - -The format of the key for each entry in the data field in the config map is `<image_stream_name>_<image_stream_tag_name>`. - -During a disconnected installation of {product-title}, the status of the Cluster Samples Operator is set to `Removed`. If you choose to change it to `Managed`, it installs samples. -[NOTE] -==== -The use of samples in a network-restricted or discontinued environment may require access to services external to your network. Some example services include: Github, Maven Central, npm, RubyGems, PyPi and others. There might be additional steps to take that allow the cluster samples operators's objects to reach the services they require. -==== - -You can use this config map as a reference for which images need to be mirrored for your image streams to import. - -* While the Cluster Samples Operator is set to `Removed`, you can create your mirrored registry, or determine which existing mirrored registry you want to use. -* Mirror the samples you want to the mirrored registry using the new config map as your guide. -* Add any of the image streams you did not mirror to the `skippedImagestreams` list of the Cluster Samples Operator configuration object. -* Set `samplesRegistry` of the Cluster Samples Operator configuration object to the mirrored registry. -* Then set the Cluster Samples Operator to `Managed` to install the image streams you have mirrored. diff --git a/modules/installation-infrastructure-user-infra.adoc b/modules/installation-infrastructure-user-infra.adoc deleted file mode 100644 index 4122df6c5b8e..000000000000 --- a/modules/installation-infrastructure-user-infra.adoc +++ /dev/null @@ -1,115 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_bare_metal/installing-bare-metal.adoc -// * installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc -// * installing/installing_platform_agnostic/installing-platform-agnostic.adoc -// * installing/installing_vsphere/installing-restricted-networks-vsphere.adoc -// * installing/installing_vsphere/installing-vsphere.adoc -// * installing/installing_vsphere/installing-vsphere-network-customizations.adoc -// * installing/installing_ibm_z/installing-ibm-z.adoc -// * installing/installing_ibm_z/installing-restricted-networks-ibm-z.adoc -// * installing/installing_ibm_z/installing-restricted-networks-ibm-z-kvm.adoc -// * installing/installing_ibm_z/installing-ibm-power.adoc -// * installing/installing_ibm_z/installing-restricted-networks-ibm-power.adoc - -ifeval::["{context}" == "installing-ibm-z"] -:ibm-z: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z"] -:ibm-z: -endif::[] -ifeval::["{context}" == "installing-ibm-z-kvm"] -:ibm-z-kvm: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z-kvm"] -:ibm-z-kvm: -endif::[] - -:_content-type: PROCEDURE -[id="installation-infrastructure-user-infra_{context}"] -= Preparing the user-provisioned infrastructure - -Before you install {product-title} on user-provisioned infrastructure, you must prepare the underlying infrastructure. - -This section provides details about the high-level steps required to set up your cluster infrastructure in preparation for an {product-title} installation. This includes configuring IP networking and network connectivity for your cluster nodes, -ifdef::ibm-z[] -preparing a web server for the Ignition files, -endif::ibm-z[] -enabling the required ports through your firewall, and setting up the required DNS and load balancing infrastructure. - -After preparation, your cluster infrastructure must meet the requirements outlined in the _Requirements for a cluster with user-provisioned infrastructure_ section. - -.Prerequisites - -* You have reviewed the link:https://access.redhat.com/articles/4128421[{product-title} 4.x Tested Integrations] page. -* You have reviewed the infrastructure requirements detailed in the _Requirements for a cluster with user-provisioned infrastructure_ section. - -.Procedure - -ifdef::ibm-z[] -. Set up static IP addresses. -. Set up an HTTP or HTTPS server to provide Ignition files to the cluster nodes. -endif::ibm-z[] -ifndef::ibm-z[] -. If you are using DHCP to provide the IP networking configuration to your cluster nodes, configure your DHCP service. -.. Add persistent IP addresses for the nodes to your DHCP server configuration. In your configuration, match the MAC address of the relevant network interface to the intended IP address for each node. -.. When you use DHCP to configure IP addressing for the cluster machines, the machines also obtain the DNS server information through DHCP. Define the persistent DNS server address that is used by the cluster nodes through your DHCP server configuration. -+ -[NOTE] -==== -If you are not using a DHCP service, you must provide the IP networking configuration and the address of the DNS server to the nodes at {op-system} install time. These can be passed as boot arguments if you are installing from an ISO image. See the _Installing {op-system} and starting the {product-title} bootstrap process_ section for more information about static IP provisioning and advanced networking options. -==== -+ -.. Define the hostnames of your cluster nodes in your DHCP server configuration. See the _Setting the cluster node hostnames through DHCP_ section for details about hostname considerations. -+ -[NOTE] -==== -If you are not using a DHCP service, the cluster nodes obtain their hostname through a reverse DNS lookup. -==== -endif::ibm-z[] -ifdef::ibm-z-kvm[] -. Choose to perform either a fast track installation of {op-system-first} or a full installation of {op-system-first}. For the full installation, you must set up an HTTP or HTTPS server to provide Ignition files and install images to the cluster nodes. For the fast track installation an HTTP or HTTPS server is not required, however, a DHCP server is required. See sections “Fast-track installation: Creating {op-system-first} machines" and “Full installation: Creating {op-system-first} machines". -endif::ibm-z-kvm[] - -. Ensure that your network infrastructure provides the required network connectivity between the cluster components. See the _Networking requirements for user-provisioned infrastructure_ section for details about the requirements. - -. Configure your firewall to enable the ports required for the {product-title} cluster components to communicate. See _Networking requirements for user-provisioned infrastructure_ section for details about the ports that are required. -+ -[IMPORTANT] -==== -By default, port `1936` is accessible for an {product-title} cluster, because each control plane node needs access to this port. - -Avoid using the Ingress load balancer to expose this port, because doing so might result in the exposure of sensitive information, such as statistics and metrics, related to Ingress Controllers. -==== - -. Setup the required DNS infrastructure for your cluster. -.. Configure DNS name resolution for the Kubernetes API, the application wildcard, the bootstrap machine, the control plane machines, and the compute machines. -.. Configure reverse DNS resolution for the Kubernetes API, the bootstrap machine, the control plane machines, and the compute machines. -+ -See the _User-provisioned DNS requirements_ section for more information about the {product-title} DNS requirements. - -. Validate your DNS configuration. -.. From your installation node, run DNS lookups against the record names of the Kubernetes API, the wildcard routes, and the cluster nodes. Validate that the IP addresses in the responses correspond to the correct components. -.. From your installation node, run reverse DNS lookups against the IP addresses of the load balancer and the cluster nodes. Validate that the record names in the responses correspond to the correct components. -+ -See the _Validating DNS resolution for user-provisioned infrastructure_ section for detailed DNS validation steps. - -. Provision the required API and application ingress load balancing infrastructure. See the _Load balancing requirements for user-provisioned infrastructure_ section for more information about the requirements. - -[NOTE] -==== -Some load balancing solutions require the DNS name resolution for the cluster nodes to be in place before the load balancing is initialized. -==== - -ifeval::["{context}" == "installing-ibm-z"] -:!ibm-z: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z"] -:!ibm-z: -endif::[] -ifeval::["{context}" == "installing-ibm-z-kvm"] -:!ibm-z-kvm: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z-kvm"] -:!ibm-z-kvm: -endif::[] diff --git a/modules/installation-initializing-manual.adoc b/modules/installation-initializing-manual.adoc deleted file mode 100644 index a704b6476aeb..000000000000 --- a/modules/installation-initializing-manual.adoc +++ /dev/null @@ -1,261 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-government-region.adoc -// * installing/installing_aws/installing-aws-secret-region.adoc -// * installing/installing_aws/installing-aws-private.adoc -// * installing/installing_azure/installing-azure-government-region.adoc -// * installing/installing_azure/installing-azure-private.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-default.adoc -// * installing/installing_bare_metal/installing-bare-metal.adoc -// * installing/installing_gcp/installing-gcp-private.adoc -// * installing/installing_gcp/installing-gcp-shared-vpc.adoc -// * installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc -// * installing/installing_platform_agnostic/installing-platform-agnostic.adoc -// * installing/installing_vsphere/installing-restricted-networks-vsphere.adoc -// * installing/installing_vsphere/installing-vsphere.adoc -// * installing/installing_vsphere/installing-vsphere-network-customizations.adoc -// * installing/installing_ibm_z/installing-ibm-z.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-network-customizations.adoc -// * installing/installing_ibm_powervs/installing-ibm-power-vs-private-cluster.adoc - -ifeval::["{context}" == "installing-azure-government-region"] -:azure-gov: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:ash: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-vsphere"] -:restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-bare-metal"] -:restricted: -endif::[] -ifeval::["{context}" == "installing-aws-china-region"] -:aws-china: -endif::[] -ifeval::["{context}" == "installing-aws-government-region"] -:aws-gov: -endif::[] -ifeval::["{context}" == "installing-aws-secret-region"] -:aws-secret: -endif::[] -ifeval::["{context}" == "installing-aws-private"] -:aws-private: -endif::[] -ifeval::["{context}" == "installing-azure-private"] -:azure-private: -endif::[] -ifeval::["{context}" == "installing-gcp-private"] -:gcp-private: -endif::[] -ifeval::["{context}" == "installing-gcp-shared-vpc"] -:gcp-shared: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-default"] -:ash-default: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-network-customizations"] -:ash-network: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-private"] -:ibm-cloud-private: -endif::[] -ifeval::["{context}" == "installing-ibm-power-vs-private-cluster"] -:ibm-power-vs-private: -endif::[] -ifeval::["{context}" == "installing-vsphere"] -:three-node-cluster: -endif::[] - -:_content-type: PROCEDURE -[id="installation-initializing-manual_{context}"] -= Manually creating the installation configuration file - -ifndef::aws-china,aws-gov,aws-secret,azure-gov,ash,aws-private,azure-private,gcp-private,gcp-shared,ash-default,ash-network,ibm-cloud-private,ibm-power-vs-private[] -For user-provisioned installations of {product-title}, you manually generate your installation configuration file. -endif::aws-china,aws-gov,aws-secret,azure-gov,ash,aws-private,azure-private,gcp-private,gcp-shared,ash-default,ash-network,ibm-cloud-private,ibm-power-vs-private[] -ifdef::aws-china,aws-gov,aws-secret[] -Installing the cluster requires that you manually generate the installation configuration file. -//Made this update as part of feedback in PR3961. tl;dr Simply state you have to create the config file, instead of creating a number of conditions to explain why. -endif::aws-china,aws-gov,aws-secret[] -ifdef::azure-gov[] -When installing {product-title} on Microsoft Azure into a government region, you -must manually generate your installation configuration file. -endif::azure-gov[] -ifdef::aws-private,azure-private,gcp-private,ibm-cloud-private,ibm-power-vs-private[] -When installing a private {product-title} cluster, you must manually generate the installation configuration file. -endif::aws-private,azure-private,gcp-private,ibm-cloud-private,ibm-power-vs-private[] -ifdef::ash-default,ash-network[] -When installing {product-title} on Microsoft Azure Stack Hub, you must manually create your installation configuration file. -endif::ash-default,ash-network[] -ifdef::gcp-shared[] -You must manually create your installation configuration file when installing {product-title} on GCP into a shared VPC using installer-provisioned infrastructure. -endif::gcp-shared[] - -.Prerequisites - -ifdef::aws-china,aws-secret[] -* You have uploaded a custom RHCOS AMI. -endif::aws-china,aws-secret[] -* You have an SSH public key on your local machine to provide to the installation program. The key will be used for SSH authentication onto your cluster nodes for debugging and disaster recovery. -* You have obtained the {product-title} installation program and the pull secret for your -cluster. -ifdef::restricted[] -* Obtain the `imageContentSources` section from the output of the command to -mirror the repository. -* Obtain the contents of the certificate for your mirror registry. -endif::restricted[] - -.Procedure - -. Create an installation directory to store your required installation assets in: -+ -[source,terminal] ----- -$ mkdir <installation_directory> ----- -+ -[IMPORTANT] -==== -You must create a directory. Some installation assets, like bootstrap X.509 -certificates have short expiration intervals, so you must not reuse an -installation directory. If you want to reuse individual files from another -cluster installation, you can copy them into your directory. However, the file -names for the installation assets might change between releases. Use caution -when copying installation files from an earlier {product-title} version. -==== - -. Customize the sample `install-config.yaml` file template that is provided and save -it in the `<installation_directory>`. -+ -[NOTE] -==== -You must name this configuration file `install-config.yaml`. -==== -ifdef::restricted[] -** Unless you use a registry that {op-system} trusts by default, such as -`docker.io`, you must provide the contents of the certificate for your mirror -repository in the `additionalTrustBundle` section. In most cases, you must -provide the certificate for your mirror. -** You must include the `imageContentSources` section from the output of the command to -mirror the repository. -endif::restricted[] -+ - -ifndef::aws-china,aws-gov,aws-secret,azure-gov,ash,ash-default,ash-network,gcp-shared,ibm-cloud-private,ibm-power-vs-private[] -[NOTE] -==== -For some platform types, you can alternatively run `./openshift-install create install-config --dir <installation_directory>` to generate an `install-config.yaml` file. You can provide details about your cluster configuration at the prompts. -==== -endif::aws-china,aws-gov,aws-secret,azure-gov,ash,ash-default,ash-network,gcp-shared,ibm-cloud-private,ibm-power-vs-private[] -ifdef::ash[] -+ -Make the following modifications for Azure Stack Hub: - -.. Set the `replicas` parameter to `0` for the `compute` pool: -+ -[source,yaml] ----- -compute: -- hyperthreading: Enabled - name: worker - platform: {} - replicas: 0 <1> ----- -<1> Set to `0`. -+ -The compute machines will be provisioned manually later. - -.. Update the `platform.azure` section of the `install-config.yaml` file to configure your Azure Stack Hub configuration: -+ -[source,yaml] ----- -platform: - azure: - armEndpoint: <azurestack_arm_endpoint> <1> - baseDomainResourceGroupName: <resource_group> <2> - cloudName: AzureStackCloud <3> - region: <azurestack_region> <4> ----- -<1> Specify the Azure Resource Manager endpoint of your Azure Stack Hub environment, like `\https://management.local.azurestack.external`. -<2> Specify the name of the resource group that contains the DNS zone for your base domain. -<3> Specify the Azure Stack Hub environment, which is used to configure the Azure SDK with the appropriate Azure API endpoints. -<4> Specify the name of your Azure Stack Hub region. -endif::ash[] - -ifdef::ash-default,ash-network[] -+ -Make the following modifications: - -.. Specify the required installation parameters. - -.. Update the `platform.azure` section to specify the parameters that are specific to Azure Stack Hub. - -.. Optional: Update one or more of the default configuration parameters to customize the installation. -+ -For more information about the parameters, see "Installation configuration parameters". -endif::ash-default,ash-network[] - -ifdef::three-node-cluster[] -. If you are installing a three-node cluster, modify the `install-config.yaml` file by setting the `compute.replicas` parameter to `0`. This ensures that the cluster's control planes are schedulable. For more information, see "Installing a three-node cluster on {platform}". -endif::three-node-cluster[] - -. Back up the `install-config.yaml` file so that you can use it to install -multiple clusters. -+ -[IMPORTANT] -==== -The `install-config.yaml` file is consumed during the next step of the -installation process. You must back it up now. -==== - -ifeval::["{context}" == "installing-azure-government-region"] -:!azure-gov: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:!ash: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-vsphere"] -:!restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-bare-metal"] -:!restricted: -endif::[] -ifeval::["{context}" == "installing-aws-china-region"] -:!aws-china: -endif::[] -ifeval::["{context}" == "installing-aws-government-region"] -:!aws-gov: -endif::[] -ifeval::["{context}" == "installing-aws-secret-region"] -:!aws-secret: -endif::[] -ifeval::["{context}" == "installing-aws-private"] -:!aws-private: -endif::[] -ifeval::["{context}" == "installing-azure-private"] -:!azure-private: -endif::[] -ifeval::["{context}" == "installing-gcp-private"] -:!gcp-private: -endif::[] -ifeval::["{context}" == "installing-gcp-shared-vpc"] -:!gcp-shared: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-default"] -:!ash-default: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-network-customizations"] -:!ash-network: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-private"] -:!ibm-cloud-private: -endif::[] -ifeval::["{context}" == "installing-ibm-power-vs-private-cluster"] -:!ibm-power-vs-private: -endif::[] -ifeval::["{context}" == "installing-vsphere"] -:!three-node-cluster: -endif::[] -:!platform: diff --git a/modules/installation-initializing.adoc b/modules/installation-initializing.adoc deleted file mode 100644 index 651f4ef9eebc..000000000000 --- a/modules/installation-initializing.adoc +++ /dev/null @@ -1,818 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-alibaba-default.adoc -// * installing/installing_aws/installing-alibaba-customizations.adoc -// * installing/installing_alibaba/installing-alibaba-network-customizations.adoc -// * installing/installing_aws/installing-alibaba-vpc.adoc -// * installing/installing_aws/installing-aws-customizations.adoc -// * installing/installing_aws/installing-aws-network-customizations.adoc -// * installing/installing_aws/installing-aws-vpc.adoc -// * installing/installing_aws/installing-restricted-networks-aws-installer-provisioned.adoc -// * installing/installing_aws/installing-aws-outposts-remote-workers.adoc -// * installing/installing_azure/installing-azure-customizations.adoc -// * installing/installing_azure/installing-azure-network-customizations -// * installing/installing_azure/installing-azure-vnet.adoc -// * installing/installing_azure/installing-azure-user-infra.adoc -// * installing/installing_gcp/installing-gcp-customizations.adoc -// * installing/installing_gcp/installing-gcp-network-customizations.adoc -// * installing/installing_gcp/installing-gcp-vpc.adoc -// * installing/installing_gcp/installing-gcp-shared-vpc.adoc -// * installing/installing_gcp/installing-gcp-user-infra.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp-installer-provisioned.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-customizations.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-network-customizations.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-vpc.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-private.adoc -// * installing/installing_ibm_powervs/installing-ibm-power-vs-customizations.adoc -// * installing/installing_ibm_powervs/installing-restricted-networks-ibm-power-vs.adoc -// * installing/installing_ibm_powervs/installing-ibm-powervs-vpc.adoc -// * installing/installing_openstack/installing-openstack-installer-custom.adoc -// * installing/installing_openstack/installing-openstack-installer-kuryr.adoc -// * installing/installing_openstack/installing-openstack-installer-restricted.adoc -// * installing/installing_openstack/installing-openstack-user-kuryr.adoc -// * installing/installing_openstack/installing-openstack-user.adoc -// * installing/installing_rhv/installing-rhv-customizations.adoc -// * installing/installing_vsphere/installing-vsphere-installer-provisioned-customizations.adoc -// * installing/installing_vsphere/installing-vsphere-installer-provisioned-network-customizations.adoc -// * installing/installing_vsphere/installing-restricted-networks-installer-provisioned-vsphere.adoc -// * installing/installing_nutanix/configuring-iam-nutanix.adoc -// * installing/installing-restricted-networks-nutanix-installer-provisioned.adoc - -// * installing/installing_gcp/installing-openstack-installer-restricted.adoc -// Consider also adding the installation-configuration-parameters.adoc module. -//YOU MUST SET AN IFEVAL FOR EACH NEW MODULE - -ifeval::["{context}" == "installing-alibaba-default"] -:alibabacloud-default: -endif::[] -ifeval::["{context}" == "installing-alibaba-customizations"] -:alibabacloud-custom: -endif::[] -ifeval::["{context}" == "installing-alibaba-vpc"] -:alibabacloud-vpc: -endif::[] -ifeval::["{context}" == "installing-aws-customizations"] -:aws: -:three-node-cluster: -endif::[] -ifeval::["{context}" == "installing-aws-network-customizations"] -:aws: -endif::[] -ifeval::["{context}" == "installing-aws-vpc"] -:aws: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-aws-installer-provisioned"] -:aws: -:restricted: -endif::[] -ifeval::["{context}" == "installing-aws-outposts-remote-workers"] -:aws: -:aws-outposts: -endif::[] -ifeval::["{context}" == "installing-azure-customizations"] -:azure: -:three-node-cluster: -endif::[] -ifeval::["{context}" == "installing-azure-network-customizations"] -:azure: -endif::[] -ifeval::["{context}" == "installing-azure-vnet"] -:azure: -endif::[] -ifeval::["{context}" == "installing-azure-user-infra"] -:azure: -:three-node-cluster: -endif::[] -ifeval::["{context}" == "installing-gcp-customizations"] -:gcp: -:three-node-cluster: -endif::[] -ifeval::["{context}" == "installing-gcp-vpc"] -:gcp: -endif::[] -ifeval::["{context}" == "installing-gcp-shared-vpc"] -:gcp: -endif::[] -ifeval::["{context}" == "installing-gcp-network-customizations"] -:gcp: -endif::[] -ifeval::["{context}" == "installing-gcp-user-infra"] -:gcp: -:three-node-cluster: -endif::[] -ifeval::["{context}" == "installing-gcp-user-infra-vpc"] -:gcp: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-gcp"] -:gcp: -:restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-gcp-installer-provisioned"] -:gcp: -:restricted: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-customizations"] -:ibm-cloud: -endif::[] -ifeval::["{context}" == "installing-ibm-power-vs-customizations"] -:ibm-power-vs: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power-vs"] -:ibm-power-vs: -:restricted: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-network-customizations"] -:ibm-cloud: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-vpc"] -:ibm-cloud: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-private"] -:ibm-cloud: -endif::[] -ifeval::["{context}" == "installing-openstack-installer-custom"] -:osp: -endif::[] -ifeval::["{context}" == "installing-openstack-installer-kuryr"] -:osp: -endif::[] -ifeval::["{context}" == "installing-openstack-user"] -:osp: -:osp-user: -endif::[] -ifeval::["{context}" == "installing-openstack-user-kuryr"] -:osp: -:osp-user: -endif::[] -ifeval::["{context}" == "installing-openstack-user-sr-iov"] -:osp: -:osp-user: -endif::[] -ifeval::["{context}" == "installing-openstack-user-sr-iov-kuryr"] -:osp: -:osp-user: -endif::[] -ifeval::["{context}" == "installing-rhv-customizations"] -:rhv: -endif::[] -ifeval::["{context}" == "installing-rhv-default"] -:rhv: -endif::[] -ifeval::["{context}" == "installing-vsphere-installer-provisioned-customizations"] -:vsphere: -:three-node-cluster: -endif::[] -ifeval::["{context}" == "installing-vsphere-installer-provisioned-network-customizations"] -:vsphere: -endif::[] -ifeval::["{context}" == "installing-openstack-installer-restricted"] -:osp: -:restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-installer-provisioned-vsphere"] -:vsphere: -:restricted: -endif::[] -ifeval::["{context}" == "installing-nutanix-installer-provisioned"] -:nutanix: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-nutanix-installer-provisioned"] -:nutanix: -:restricted: -endif::[] - -:_content-type: PROCEDURE -[id="installation-initializing_{context}"] -= Creating the installation configuration file - -You can customize the {product-title} cluster you install on -ifdef::alibabacloud-default,alibabacloud-custom,alibabacloud-vpc[] -Alibaba Cloud. -endif::alibabacloud-default,alibabacloud-custom,alibabacloud-vpc[] -ifdef::aws[] -Amazon Web Services (AWS). -endif::aws[] -ifdef::azure[] -Microsoft Azure. -endif::azure[] -ifdef::gcp[] -Google Cloud Platform (GCP). -endif::gcp[] -ifdef::ibm-cloud[] -IBM Cloud. -endif::ibm-cloud[] -ifdef::osp[] -{rh-openstack-first}. -endif::osp[] -ifdef::vsphere[] -VMware vSphere. -endif::vsphere[] -ifdef::rhv[] -{rh-virtualization-first}. -endif::rhv[] -ifdef::nutanix[] -Nutanix. -endif::nutanix[] - -.Prerequisites - -* Obtain the {product-title} installation program and the pull secret for your cluster. -ifdef::restricted[] -For a restricted network installation, these files are on your mirror host. -ifndef::nutanix[] -* Have the `imageContentSources` values that were generated during mirror registry creation. -endif::nutanix[] -ifdef::nutanix+restricted[] -* Have the `imageContentSourcePolicy.yaml` file that was created when you mirrored your registry. -* Have the location of the {op-system-first} image you download. -endif::nutanix+restricted[] -* Obtain the contents of the certificate for your mirror registry. -ifndef::aws,gcp[] -* Retrieve a {op-system-first} image and upload it to an accessible location. -endif::aws,gcp[] -endif::restricted[] -ifndef::nutanix[] -* Obtain service principal permissions at the subscription level. -endif::nutanix[] -ifdef::nutanix[] -* Verify that you have met the Nutanix networking requirements. For more information, see "Preparing to install on Nutanix". -endif::nutanix[] - -.Procedure - -. Create the `install-config.yaml` file. -+ -.. Change to the directory that contains the installation program and run the following command: -+ -[source,terminal] ----- -$ ./openshift-install create install-config --dir <installation_directory> <1> ----- -<1> For `<installation_directory>`, specify the directory name to store the -files that the installation program creates. -+ -When specifying the directory: -* Verify that the directory has the `execute` permission. This permission is required to run Terraform binaries under the installation directory. -* Use an empty directory. Some installation assets, such as bootstrap X.509 certificates, have short expiration intervals, therefore you must not reuse an installation directory. If you want to reuse individual files from another cluster installation, you can copy them into your directory. However, the file names for the installation assets might change between releases. Use caution when copying installation files from an earlier {product-title} version. - -ifndef::rhv[] -.. At the prompts, provide the configuration details for your cloud: -... Optional: Select an SSH key to use to access your cluster machines. -+ -[NOTE] -==== -For production {product-title} clusters on which you want to perform installation debugging or disaster recovery, specify an SSH key that your `ssh-agent` process uses. -==== -endif::rhv[] -ifdef::alibabacloud-default,alibabacloud-custom,alibabacloud-vpc[] -... Select *alibabacloud* as the platform to target. -... Select the region to deploy the cluster to. -... Select the base domain to deploy the cluster to. The base domain corresponds to the public DNS zone that you created for your cluster. -... Provide a descriptive name for your cluster. -endif::alibabacloud-default,alibabacloud-custom,alibabacloud-vpc[] -ifdef::aws[] -... Select *AWS* as the platform to target. -... If you do not have an Amazon Web Services (AWS) profile stored on your computer, enter the AWS -access key ID and secret access key for the user that you configured to run the -installation program. -... Select the AWS region to deploy the cluster to. -... Select the base domain for the Route 53 service that you configured for your cluster. -endif::aws[] -ifdef::azure[] -... Select *azure* as the platform to target. -... If you do not have a Microsoft Azure profile stored on your computer, specify the -following Azure parameter values for your subscription and service principal: -**** *azure subscription id*: The subscription ID to use for the cluster. -Specify the `id` value in your account output. -**** *azure tenant id*: The tenant ID. Specify the `tenantId` value in your -account output. -**** *azure service principal client id*: The value of the `appId` parameter -for the service principal. -**** *azure service principal client secret*: The value of the `password` -parameter for the service principal. -... Select the region to deploy the cluster to. -... Select the base domain to deploy the cluster to. The base domain corresponds -to the Azure DNS Zone that you created for your cluster. -endif::azure[] -ifdef::gcp[] -... Select *gcp* as the platform to target. -... If you have not configured the service account key for your GCP account on -your computer, you must obtain it from GCP and paste the contents of the file -or enter the absolute path to the file. -... Select the project ID to provision the cluster in. The default value is -specified by the service account that you configured. -... Select the region to deploy the cluster to. -... Select the base domain to deploy the cluster to. The base domain corresponds -to the public DNS zone that you created for your cluster. -endif::gcp[] -ifdef::ibm-cloud[] -... Select *ibmcloud* as the platform to target. -... Select the region to deploy the cluster to. -... Select the base domain to deploy the cluster to. The base domain corresponds -to the public DNS zone that you created for your cluster. -endif::ibm-cloud[] -ifdef::ibm-power-vs[] -... Select *powervs* as the platform to target. -... Select the region to deploy the cluster to. -... Select the zone to deploy the cluster to. -... Select the base domain to deploy the cluster to. The base domain corresponds -to the public DNS zone that you created for your cluster. -endif::ibm-power-vs[] -ifdef::osp[] -... Select *openstack* as the platform to target. -... Specify the {rh-openstack-first} external network name to use for installing the cluster. -... Specify the floating IP address to use for external access to the OpenShift API. -... Specify a {rh-openstack} flavor with at least 16 GB RAM to use for control plane nodes -and 8 GB RAM for compute nodes. -... Select the base domain to deploy the cluster to. All DNS records will be -sub-domains of this base and will also include the cluster name. -endif::osp[] -ifdef::vsphere[] -... Select *vsphere* as the platform to target. -... Specify the name of your vCenter instance. -... Specify the user name and password for the vCenter account that has the required permissions to create the cluster. -+ -The installation program connects to your vCenter instance. -... Select the data center in your vCenter instance to connect to. -+ -[NOTE] -==== -After you create the installation configuration file, you can modify the file to create a multiple vSphere datacenters environment. This means that you can deploy an {product-title} cluster to multiple vSphere datacenters that run in a single VMware vCenter. For more information about creating this environment, see the section named _VMware vSphere region and zone enablement_. -==== - -... Select the default vCenter datastore to use. -... Select the vCenter cluster to install the {product-title} cluster in. The installation program uses the root resource pool of the vSphere cluster as the default resource pool. -... Select the network in the vCenter instance that contains the virtual IP addresses and DNS records that you configured. -... Enter the virtual IP address that you configured for control plane API access. -... Enter the virtual IP address that you configured for cluster ingress. -... Enter the base domain. This base domain must be the same one that you used in the DNS records that you configured. -endif::vsphere[] -ifdef::nutanix[] -... Select *nutanix* as the platform to target. -... Enter the Prism Central domain name or IP address. -... Enter the port that is used to log into Prism Central. -... Enter the credentials that are used to log into Prism Central. -+ -The installation program connects to Prism Central. -... Select the Prism Element that will manage the {product-title} cluster. -... Select the network subnet to use. -... Enter the virtual IP address that you configured for control plane API access. -... Enter the virtual IP address that you configured for cluster ingress. -... Enter the base domain. This base domain must be the same one that you configured in the DNS records. -endif::nutanix[] -ifndef::osp[] -ifndef::rhv,alibabacloud-default,alibabacloud-custom,alibabacloud-vpc[] -... Enter a descriptive name for your cluster. -ifdef::vsphere,nutanix[] -The cluster name you enter must match the cluster name you specified when configuring the DNS records. -endif::vsphere,nutanix[] -endif::rhv,alibabacloud-default,alibabacloud-custom,alibabacloud-vpc[] -endif::osp[] -ifdef::osp[] -... Enter a name for your cluster. The name must be 14 or fewer characters long. -endif::osp[] -ifdef::azure[] -+ -[IMPORTANT] -==== -All Azure resources that are available through public endpoints are subject to -resource name restrictions, and you cannot create resources that use certain -terms. For a list of terms that Azure restricts, see -link:https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-manager-reserved-resource-name[Resolve reserved resource name errors] -in the Azure documentation. -==== -endif::azure[] -ifdef::rhv[] -.. Respond to the installation program prompts. -... For `SSH Public Key`, select a password-less public key, such as `~/.ssh/id_rsa.pub`. This key authenticates connections with the new {product-title} cluster. -+ -[NOTE] -==== -For production {product-title} clusters on which you want to perform installation debugging or disaster recovery, select an SSH key that your `ssh-agent` process uses. -==== -... For `Platform`, select `ovirt`. -... For `Enter oVirt's API endpoint URL`, enter the URL of the {rh-virtualization} API using this format: -+ -[source,terminal] ----- -https://<engine-fqdn>/ovirt-engine/api <1> ----- -<1> For `<engine-fqdn>`, specify the fully qualified domain name of the {rh-virtualization} environment. -+ -For example: -+ -ifndef::openshift-origin[] -[source,terminal] ----- -$ curl -k -u ocpadmin@internal:pw123 \ -https://rhv-env.virtlab.example.com/ovirt-engine/api ----- -endif::openshift-origin[] -ifdef::openshift-origin[] -[source,terminal] ----- -$ curl -k -u admin@internal:pw123 \ -https://ovirtlab.example.com/ovirt-engine/api ----- -endif::openshift-origin[] -+ -... For `Is the oVirt CA trusted locally?`, enter `Yes`, because you have already set up a CA certificate. Otherwise, enter `No`. - -... For `oVirt's CA bundle`, if you entered `Yes` for the preceding question, copy the certificate content from `/etc/pki/ca-trust/source/anchors/ca.pem` and paste it here. Then, press `Enter` twice. Otherwise, if you entered `No` for the preceding question, this question does not appear. -... For `oVirt engine username`, enter the user name and profile of the {rh-virtualization} administrator using this format: -+ -[source,terminal] ----- -<username>@<profile> <1> ----- -<1> For `<username>`, specify the user name of an {rh-virtualization} administrator. For `<profile>`, specify the login profile, which you can get by going to the {rh-virtualization} Administration Portal login page and reviewing the *Profile* dropdown list. Together, the user name and profile should look similar to this example: -+ -ifndef::openshift-origin[] -[source,terminal] ----- -ocpadmin@internal ----- -endif::openshift-origin[] -ifdef::openshift-origin[] -[source,terminal] ----- -admin@internal ----- -endif::openshift-origin[] -+ -... For `oVirt engine password`, enter the {rh-virtualization} admin password. -... For `oVirt cluster`, select the cluster for installing {product-title}. -... For `oVirt storage domain`, select the storage domain for installing {product-title}. -... For `oVirt network`, select a virtual network that has access to the {rh-virtualization} {rh-virtualization-engine-name} REST API. -... For `Internal API Virtual IP`, enter the static IP address you set aside for the cluster's REST API. -... For `Ingress virtual IP`, enter the static IP address you reserved for the wildcard apps domain. -... For `Base Domain`, enter the base domain of the {product-title} cluster. If this cluster is exposed to the outside world, this must be a valid domain recognized by DNS infrastructure. For example, enter: `virtlab.example.com` -... For `Cluster Name`, enter the name of the cluster. For example, `my-cluster`. Use cluster name from the externally registered/resolvable DNS entries you created for the {product-title} REST API and apps domain names. The installation program also gives this name to the cluster in the {rh-virtualization} environment. -... For `Pull Secret`, copy the pull secret from the `pull-secret.txt` file you downloaded earlier and paste it here. You can also get a copy of the same {cluster-manager-url-pull}. -endif::rhv[] -ifndef::rhv[] -... Paste the {cluster-manager-url-pull}. -ifdef::openshift-origin[] -This field is optional. -endif::[] -endif::rhv[] - -ifdef::aws-outposts[] -. Modify the `install-config.yaml` file. The AWS Outposts installation has the following limitations which require manual modification of the `install-config.yaml` file: - -* Unlike AWS Regions, which offer near-infinite scale, AWS Outposts are limited by their provisioned capacity, EC2 family and generations, configured instance sizes, and availability of compute capacity that is not already consumed by other workloads. Therefore, when creating new {product-title} cluster, you need to provide the supported instance type in the `compute.platform.aws.type` section in the configuration file. -* When deploying {product-title} cluster with remote workers running in AWS Outposts, only one Availability Zone can be used for the compute instances - the Availability Zone in which the Outpost instance was created in. Therefore, when creating new {product-title} cluster, it recommended to provide the relevant Availability Zone in the `compute.platform.aws.zones` section in the configuration file, in order to limit the compute instances to this Availability Zone. -* Amazon Elastic Block Store (EBS) gp3 volumes aren't supported by the AWS Outposts service. This volume type is the default type used by the {product-title} cluster. Therefore, when creating new {product-title} cluster, you must change the volume type in the `compute.platform.aws.rootVolume.type` section to gp2. -You will find more information about how to change these values below. -endif::aws-outposts[] - -ifndef::restricted,alibabacloud-default,alibabacloud-custom,alibabacloud-vpc,nutanix,aws-outposts[] -. Modify the `install-config.yaml` file. You can find more information about -the available parameters in the "Installation configuration parameters" section. -endif::restricted,alibabacloud-default,alibabacloud-custom,alibabacloud-vpc,nutanix,aws-outposts[] -ifdef::three-node-cluster[] -+ -[NOTE] -==== -If you are installing a three-node cluster, be sure to set the `compute.replicas` parameter to `0`. This ensures that the cluster's control planes are schedulable. For more information, see "Installing a three-node cluster on {platform}". -==== -endif::three-node-cluster[] - -ifdef::alibabacloud-default,alibabacloud-custom,alibabacloud-vpc[] -. Installing the cluster into Alibaba Cloud requires that the Cloud Credential Operator (CCO) operate in manual mode. Modify the `install-config.yaml` file to set the `credentialsMode` parameter to `Manual`: -+ -.Example install-config.yaml configuration file with `credentialsMode` set to `Manual` -[source,yaml] ----- -apiVersion: v1 -baseDomain: cluster1.example.com -credentialsMode: Manual <1> -compute: -- architecture: amd64 - hyperthreading: Enabled - ... ----- -<1> Add this line to set the `credentialsMode` to `Manual`. -endif::alibabacloud-default,alibabacloud-custom,alibabacloud-vpc[] - -ifdef::alibabacloud-custom,alibabacloud-vpc[] -. Modify the `install-config.yaml` file. You can find more information about -the available parameters in the "Installation configuration parameters" section. -endif::alibabacloud-custom,alibabacloud-vpc[] - -ifndef::restricted[] - -ifdef::rhv[] -+ -[NOTE] -==== -If you have any intermediate CA certificates on the {rh-virtualization-engine-name}, verify that the certificates appear in the `ovirt-config.yaml` file and the `install-config.yaml` file. If they do not appear, add them as follows: - -. In the `~/.ovirt/ovirt-config.yaml` file: -+ -[source,yaml] ----- -[ovirt_ca_bundle]: | - -----BEGIN CERTIFICATE----- - <MY_TRUSTED_CA> - -----END CERTIFICATE----- - -----BEGIN CERTIFICATE----- - <INTERMEDIATE_CA> - -----END CERTIFICATE----- ----- -. In the `install-config.yaml` file: -+ -[source,yaml] ----- -[additionalTrustBundle]: | - -----BEGIN CERTIFICATE----- - <MY_TRUSTED_CA> - -----END CERTIFICATE----- - -----BEGIN CERTIFICATE----- - <INTERMEDIATE_CA> - -----END CERTIFICATE----- ----- -==== -endif::rhv[] -endif::restricted[] - -ifdef::osp+restricted[] -. In the `install-config.yaml` file, set the value of `platform.openstack.clusterOSImage` to the image location or name. For example: -+ -[source,yaml] ----- -platform: - openstack: - clusterOSImage: http://mirror.example.com/images/rhcos-43.81.201912131630.0-openstack.x86_64.qcow2.gz?sha256=ffebbd68e8a1f2a245ca19522c16c86f67f9ac8e4e0c1f0a812b068b16f7265d ----- -endif::osp+restricted[] -ifdef::vsphere+restricted[] -. In the `install-config.yaml` file, set the value of `platform.vsphere.clusterOSImage` to the image location or name. For example: -+ -[source,yaml] ----- -platform: - vsphere: - clusterOSImage: http://mirror.example.com/images/rhcos-43.81.201912131630.0-vmware.x86_64.ova?sha256=ffebbd68e8a1f2a245ca19522c16c86f67f9ac8e4e0c1f0a812b068b16f7265d ----- -endif::vsphere+restricted[] -ifdef::nutanix+restricted[] -. In the `install-config.yaml` file, set the value of `platform.nutanix.clusterOSImage` to the image location or name. For example: -+ -[source,yaml] ----- -platform: - nutanix: - clusterOSImage: http://mirror.example.com/images/rhcos-47.83.202103221318-0-nutanix.x86_64.qcow2 ----- -endif::nutanix+restricted[] -ifdef::restricted[] -. Edit the `install-config.yaml` file to give the additional information that -is required for an installation in a restricted network. -.. Update the `pullSecret` value to contain the authentication information for -your registry: -+ -[source,yaml] ----- -pullSecret: '{"auths":{"<mirror_host_name>:5000": {"auth": "<credentials>","email": "you@example.com"}}}' ----- -+ -For `<mirror_host_name>`, specify the registry domain name -that you specified in the certificate for your mirror registry, and for -`<credentials>`, specify the base64-encoded user name and password for -your mirror registry. -.. Add the `additionalTrustBundle` parameter and value. -+ -[source,yaml] ----- -additionalTrustBundle: | - -----BEGIN CERTIFICATE----- - ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ - -----END CERTIFICATE----- ----- -+ -The value must be the contents of the certificate file that you used for your mirror registry. The certificate file can be an existing, trusted certificate authority, or the self-signed certificate that you generated for the mirror registry. - -ifdef::aws+restricted[] -.. Define the subnets for the VPC to install the cluster in: -+ -[source,yaml] ----- -subnets: -- subnet-1 -- subnet-2 -- subnet-3 ----- -endif::aws+restricted[] -ifdef::gcp+restricted[] -.. Define the network and subnets for the VPC to install the cluster in under the parent `platform.gcp` field: -+ -[source,yaml] ----- -network: <existing_vpc> -controlPlaneSubnet: <control_plane_subnet> -computeSubnet: <compute_subnet> ----- -+ -For `platform.gcp.network`, specify the name for the existing Google VPC. For `platform.gcp.controlPlaneSubnet` and `platform.gcp.computeSubnet`, specify the existing subnets to deploy the control plane machines and compute machines, respectively. -endif::gcp+restricted[] -ifdef::ibm-power-vs+restricted[] -.. Define the network and subnets for the VPC to install the cluster in under the parent `platform.ibmcloud` field: -+ -[source,yaml] ----- -vpcName: <existing_vpc> -vpcSubnets: <vpcSubnet> ----- -+ -For `platform.powervs.vpcName`, specify the name for the existing IBM Cloud VPC. For `platform.powervs.vpcSubnets`, specify the existing subnets. -endif::ibm-power-vs+restricted[] - -.. Add the image content resources, which resemble the following YAML excerpt: -+ -[source,yaml] ----- -imageContentSources: -- mirrors: - - <mirror_host_name>:5000/<repo_name>/release - source: quay.io/openshift-release-dev/ocp-release -- mirrors: - - <mirror_host_name>:5000/<repo_name>/release - source: registry.redhat.io/ocp/release ----- -+ -ifndef::nutanix[] -For these values, use the `imageContentSources` that you recorded during mirror registry creation. -endif::nutanix[] -ifdef::nutanix[] -For these values, use the `imageContentSourcePolicy.yaml` file that was created when you mirrored the registry. -endif::nutanix[] - -ifndef::nutanix[] -. Make any other modifications to the `install-config.yaml` file that you require. You can find more information about -the available parameters in the *Installation configuration parameters* section. -endif::nutanix[] -endif::restricted[] - -ifdef::nutanix[] -. Optional: Update one or more of the default configuration parameters in the `install.config.yaml` file to customize the installation. -+ -For more information about the parameters, see "Installation configuration parameters". -+ -[NOTE] -==== -If you are installing a three-node cluster, be sure to set the `compute.replicas` parameter to `0`. This ensures that cluster's control planes are schedulable. For more information, see "Installing a three-node cluster on {platform}". -==== -endif::nutanix[] - -. Back up the `install-config.yaml` file so that you can use -it to install multiple clusters. -+ -[IMPORTANT] -==== -The `install-config.yaml` file is consumed during the installation process. If -you want to reuse the file, you must back it up now. -==== - -ifdef::osp-user[You now have the file `install-config.yaml` in the directory that you specified.] - -ifeval::["{context}" == "installing-alibaba-default"] -:!alibabacloud-default: -endif::[] -ifeval::["{context}" == "installing-alibaba-customizations"] -:!alibabacloud-custom: -endif::[] -ifeval::["{context}" == "installing-alibaba-vpc"] -:!alibabacloud-vpc: -endif::[] -ifeval::["{context}" == "installing-aws-customizations"] -:!aws: -:!three-node-cluster: -endif::[] -ifeval::["{context}" == "installing-aws-network-customizations"] -:!aws: -endif::[] -ifeval::["{context}" == "installing-aws-vpc"] -:!aws: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-aws-installer-provisioned"] -:!aws: -:!restricted: -endif::[] -ifeval::["{context}" == "installing-aws-outposts-remote-workers"] -:!aws: -:!aws-outposts: -endif::[] -ifeval::["{context}" == "installing-azure-customizations"] -:!azure: -:!three-node-cluster: -endif::[] -ifeval::["{context}" == "installing-azure-network-customizations"] -:!azure: -endif::[] -ifeval::["{context}" == "installing-azure-vnet"] -:!azure: -endif::[] -ifeval::["{context}" == "installing-azure-user-infra"] -:!azure: -:!three-node-cluster: -endif::[] -ifeval::["{context}" == "installing-gcp-customizations"] -:!gcp: -:!three-node-cluster: -endif::[] -ifeval::["{context}" == "installing-gcp-network-customizations"] -:!gcp: -endif::[] -ifeval::["{context}" == "installing-gcp-vpc"] -:!gcp: -endif::[] -ifeval::["{context}" == "installing-gcp-shared-vpc"] -:!gcp: -endif::[] -ifeval::["{context}" == "installing-gcp-user-infra"] -:!gcp: -:!three-node-cluster: -endif::[] -ifeval::["{context}" == "installing-gcp-user-infra-vpc"] -:!gcp: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-gcp"] -:!gcp: -:!restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-gcp-installer-provisioned"] -:!gcp: -:!restricted: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-customizations"] -:!ibm-cloud: -endif::[] -ifeval::["{context}" == "installing-ibm-power-vs-customizations"] -:!ibm-power-vs: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power-vs"] -:!ibm-power-vs: -:!restricted: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-network-customizations"] -:!ibm-cloud: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-vpc"] -:!ibm-cloud: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-private"] -:!ibm-cloud: -endif::[] -ifeval::["{context}" == "installing-openstack-installer-custom"] -:!osp: -endif::[] -ifeval::["{context}" == "installing-openstack-installer-kuryr"] -:!osp: -endif::[] -ifeval::["{context}" == "installing-openstack-user"] -:!osp: -:!osp-user: -endif::[] -ifeval::["{context}" == "installing-openstack-user-kuryr"] -:!osp: -:!osp-user: -endif::[] -ifeval::["{context}" == "installing-openstack-user-sr-iov"] -:!osp: -:!osp-user: -endif::[] -ifeval::["{context}" == "installing-openstack-user-sr-iov-kuryr"] -:!osp: -:!osp-user: -endif::[] -ifeval::["{context}" == "installing-rhv-customizations"] -:!rhv: -endif::[] -ifeval::["{context}" == "installing-rhv-default"] -:!rhv: -endif::[] -ifeval::["{context}" == "installing-vsphere-installer-provisioned-customizations"] -:!vsphere: -:!three-node-cluster: -endif::[] -ifeval::["{context}" == "installing-vsphere-installer-provisioned-network-customizations"] -:!vsphere: -endif::[] -ifeval::["{context}" == "installing-openstack-installer-restricted"] -:!osp: -:!restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-installer-provisioned-vsphere"] -:!vsphere: -:!restricted: -endif::[] -ifeval::["{context}" == "installing-nutanix-installer-provisioned"] -:!nutanix: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-nutanix-installer-provisioned"] -:!nutanix: -:!restricted: -endif::[] -:!platform: diff --git a/modules/installation-installer-provisioned-vsphere-config-yaml.adoc b/modules/installation-installer-provisioned-vsphere-config-yaml.adoc deleted file mode 100644 index b627d7dc4ed4..000000000000 --- a/modules/installation-installer-provisioned-vsphere-config-yaml.adoc +++ /dev/null @@ -1,142 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_vsphere/installing-vsphere-installer-provisioned-customizations.adoc -// * installing/installing_vsphere/installing-vsphere-installer-provisioned-network-customizations.adoc -// * installing/installing_vsphere/installing-restricted-networks-installer-provisioned-vsphere.adoc - -ifeval::["{context}" == "installing-vsphere-installer-provisioned-network-customizations"] -:network: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-installer-provisioned-vsphere"] -:restricted: -endif::[] - -[id="installation-installer-provisioned-vsphere-config-yaml_{context}"] -= Sample install-config.yaml file for an installer-provisioned VMware vSphere cluster - -You can customize the `install-config.yaml` file to specify more details about -your {product-title} cluster's platform or modify the values of the required -parameters. - -[source,yaml] ----- -apiVersion: v1 -baseDomain: example.com <1> -compute: <2> -- architecture: amd64 - hyperthreading: Enabled <3> - name: <worker_node> - platform: {} - replicas: 3 -controlPlane: <2> -- architecture: amd64 - hyperthreading: Enabled <3> - name: <parent_node> - platform: {} - replicas: 3 -metadata: - creationTimestamp: null - name: test <4> -ifdef::network[] -networking: - clusterNetwork: - - cidr: 10.128.0.0/14 - hostPrefix: 23 - machineNetwork: - - cidr: 10.0.0.0/16 - networkType: OVNKubernetes <9> - serviceNetwork: - - 172.30.0.0/16 -endif::network[] -platform: - vsphere: <5> - apiVIPs: - - 10.0.0.1 - failureDomains: <6> - - name: <failure_domain_name> - region: <default_region_name> - server: <fully_qualified_domain_name> - topology: - computeCluster: "/<datacenter>/host/<cluster>" - datacenter: <datacenter> - datastore: "/<datacenter>/datastore/<datastore>" - networks: - - <VM_Network_name> - resourcePool: "/<datacenter>/host/<cluster>/Resources/<resourcePool>" <7> - folder: "/<datacenter_name>/vm/<folder_name>/<subfolder_name>" - zone: <default_zone_name> - ingressVIPs: - - 10.0.0.2 - vcenters: - - datacenters: - - <datacenter> - password: <password> - port: 443 - server: <fully_qualified_domain_name> - user: administrator@vsphere.local - diskType: thin <8> -ifdef::restricted[] - clusterOSImage: http://mirror.example.com/images/rhcos-47.83.202103221318-0-vmware.x86_64.ova <9> -endif::restricted[] -ifndef::openshift-origin[] -fips: false -endif::openshift-origin[] -ifndef::restricted[] -pullSecret: '{"auths": ...}' -endif::restricted[] -ifdef::restricted[] -pullSecret: '{"auths":{"<local_registry>": {"auth": "<credentials>","email": "you@example.com"}}}' <10> -endif::restricted[] -sshKey: 'ssh-ed25519 AAAA...' -ifdef::restricted[] -additionalTrustBundle: | <11> - -----BEGIN CERTIFICATE----- - ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ - -----END CERTIFICATE----- -imageContentSources: <12> -- mirrors: - - <local_registry>/<local_repository_name>/release - source: quay.io/openshift-release-dev/ocp-release -- mirrors: - - <local_registry>/<local_repository_name>/release - source: quay.io/openshift-release-dev/ocp-v4.0-art-dev -endif::restricted[] ----- -<1> The base domain of the cluster. All DNS records must be sub-domains of this base and include the cluster name. -<2> The `controlPlane` section is a single mapping, but the `compute` section is a sequence of mappings. To meet the requirements of the different data structures, the first line of the `compute` section must begin with a hyphen, `-`, and the first line of the `controlPlane` section must not. Only one control plane pool is used. -<3> Whether to enable or disable simultaneous multithreading, or `hyperthreading`. By default, simultaneous multithreading is enabled -to increase the performance of your machines' cores. You can disable it by setting the parameter value to `Disabled`. If you disable simultaneous multithreading in some cluster machines, you must disable it in all cluster machines. -+ -[IMPORTANT] -==== -If you disable simultaneous multithreading, ensure that your capacity planning accounts for the dramatically decreased machine performance. Your machines must use at least 8 CPUs and 32 GB of RAM if you disable simultaneous multithreading. -==== -<4> The cluster name that you specified in your DNS records. -<5> Optional parameter for providing additional configuration for the machine pool parameters for the compute and control plane machines. -<6> Establishes the relationships between a region and zone. You define a failure domain by using vCenter objects, such as a `datastore` object. A failure domain defines the vCenter location for {product-title} cluster nodes. -<7> Optional parameter for providing an existing resource pool for machine creation. If you do not specify a value, the installation program uses the root resource pool of the vSphere cluster. -<8> The vSphere disk provisioning method. -ifdef::network[] -<9> The cluster network plugin to install. The supported values are `OVNKubernetes` and `OpenShiftSDN`. The default value is `OVNKubernetes`. -endif::network[] -ifdef::restricted[] -<9> The location of the {op-system-first} image that is accessible from the bastion server. -<10> For `<local_registry>`, specify the registry domain name, and optionally the -port, that your mirror registry uses to serve content. For example -`registry.example.com` or `registry.example.com:5000`. For `<credentials>`, -specify the base64-encoded user name and password for your mirror registry. -<11> Provide the contents of the certificate file that you used for your mirror registry. -<12> Provide the `imageContentSources` section from the output of the command to mirror the repository. -endif::restricted[] - -[NOTE] -==== -In {product-title} 4.12 and later, the `apiVIP` and `ingressVIP` configuration settings are deprecated. Instead, use a list format to enter values in the `apiVIPs` and `ingressVIPs` configuration settings. -==== - -ifeval::["{context}" == "installing-vsphere-installer-provisioned-network-customizations"] -:!network: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-installer-provisioned-vsphere"] -:!restricted: -endif::[] diff --git a/modules/installation-installing-bare-metal.adoc b/modules/installation-installing-bare-metal.adoc deleted file mode 100644 index eae9ea10c0b3..000000000000 --- a/modules/installation-installing-bare-metal.adoc +++ /dev/null @@ -1,91 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_bare_metal/installing-bare-metal.adoc -// * installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc -// * installing/installing_vsphere/installing-vsphere.adoc -// * installing/installing_vsphere/installing-vsphere-network-customizations.adoc -// * installing/installing_ibm_z/installing-ibm-z.adoc -// * installing/installing_ibm_z/installing-restricted-networks-ibm-z.adoc -// * installing/installing_ibm_z/installing-ibm-z-kvm.adoc -// * installing/installing_ibm_z/installing-restricted-networks-ibm-z-kvm.adoc - -ifeval::["{context}" == "installing-restricted-networks-ibm-z"] -:restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z-kvm"] -:restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power"] -:restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-bare-metal"] -:restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-vsphere"] -:restricted: -endif::[] - -:_content-type: PROCEDURE -[id="installation-installing-bare-metal_{context}"] -= Waiting for the bootstrap process to complete - -The {product-title} bootstrap process begins after the cluster nodes first boot into the persistent {op-system} environment that has been installed to disk. The configuration information provided through the Ignition config files is used to initialize the bootstrap process and install {product-title} on the machines. You must wait for the bootstrap process to complete. - -.Prerequisites - -* You have created the Ignition config files for your cluster. -* You have configured suitable network, DNS and load balancing infrastructure. -* You have obtained the installation program and generated the Ignition config files for your cluster. -* You installed {op-system} on your cluster machines and provided the Ignition config files that the {product-title} installation program generated. -ifndef::restricted[] -* Your machines have direct internet access or have an HTTP or HTTPS proxy available. -endif::restricted[] - -.Procedure - -. Monitor the bootstrap process: -+ -[source,terminal] ----- -$ ./openshift-install --dir <installation_directory> wait-for bootstrap-complete \ <1> - --log-level=info <2> ----- -<1> For `<installation_directory>`, specify the path to the directory that you stored the installation files in. -<2> To view different installation details, specify `warn`, `debug`, or `error` instead of `info`. -+ -.Example output -[source,terminal] ----- -INFO Waiting up to 30m0s for the Kubernetes API at https://api.test.example.com:6443... -INFO API v1.26.0 up -INFO Waiting up to 30m0s for bootstrapping to complete... -INFO It is now safe to remove the bootstrap resources ----- -+ -The command succeeds when the Kubernetes API server signals that it has been -bootstrapped on the control plane machines. - -. After the bootstrap process is complete, remove the bootstrap machine from the -load balancer. -+ -[IMPORTANT] -==== -You must remove the bootstrap machine from the load balancer at this point. You -can also remove or reformat the bootstrap machine itself. -==== - -ifeval::["{context}" == "installing-restricted-networks-ibm-z"] -:!restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z-kvm"] -:!restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power"] -:!restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-bare-metal"] -:!restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-vsphere"] -:!restricted: -endif::[] diff --git a/modules/installation-launching-installer.adoc b/modules/installation-launching-installer.adoc deleted file mode 100644 index efe4de49864c..000000000000 --- a/modules/installation-launching-installer.adoc +++ /dev/null @@ -1,763 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_alibaba/installing-alibaba-network-customizations.adoc -// * installing/installing_alibaba/installing-alibaba-customizations.adoc -// * installing/installing_alibaba/installing-alibaba-default.adoc -// * installing/installing_alibaba/installing-alibaba-vpc.adoc -// * installing/installing_aws/installing-aws-customizations.adoc -// * installing/installing_aws/installing-aws-default.adoc -// * installing/installing_aws/installing-aws-government-region.adoc -// * installing/installing_aws/installing-aws-network-customizations.adoc -// * installing/installing_aws/installing-aws-private.adoc -// * installing/installing_aws/installing-aws-vpc.adoc -// * installing/installing_aws/installing-restricted-networks-aws-installer-provisioned.adoc -// * installing/installing_aws/installing-aws-outposts-remote-workers.adoc -// * installing/installing_azure/installing-azure-customizations.adoc -// * installing/installing_azure/installing-azure-default.adoc -// * installing/installing_azure/installing-azure-government-region.adoc -// * installing/installing_azure/installing-azure-private.adoc -// * installing/installing_azure/installing-azure-vnet.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-default.adoc -// * installing/installing_gcp/installing-gcp-customizations.adoc -// * installing/installing_gcp/installing-gcp-private.adoc -// * installing/installing_gcp/installing-gcp-default.adoc -// * installing/installing_gcp/installing-gcp-vpc.adoc -// * installing/installing_gcp/installing-gcp-shared-vpc.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp-installer-provisioned.adoc -// * installing/installing_gcp/installing-ibm-cloud-customizations.adoc -// * installing/installing_gcp/installing-ibm-cloud-vpc.adoc -// * installing/installing_openstack/installing-openstack-installer-custom.adoc -// * installing/installing_openstack/installing-openstack-installer-kuryr.adoc -// * installing/installing_openstack/installing-openstack-installer-restricted.adoc -// * installing/installing_openstack/installing-openstack-installer.adoc -// * installing/installing_rhv/installing-rhv-customizations.adoc -// * installing/installing_rhv/installing-rhv-default.adoc -// * installing/installing_vsphere/installing-vsphere-installer-provisioned.adoc -// * installing/installing_vsphere/installing-vsphere-installer-provisioned-network-customizations.adoc -// * installing/installing_vsphere/installing-vsphere-installer-provisioned-customizations.adoc -// * installing/installing_vsphere/installing-restricted-networks-installer-provisioned-vsphere.adoc -// * installing/installing-nutanix-installer-provisioned.adoc -// * installing/installing-restricted-networks-nutanix-installer-provisioned.adoc -// * installing/installing_ibm_powervs/installing-ibm-power-vs-customizations.adoc -// * installing/installing_ibm_powervs/installing-ibm-power-vs-private-cluster.adoc -// * installing/installing_ibm_powervs/installing-restricted-networks-ibm-power-vs.adoc -// * installing/installing_ibm_powervs/installing-ibm-powervs-vpc.adoc -// If you use this module in any other assembly, you must update the ifeval -// statements. - -ifeval::["{context}" == "installing-alibaba-customizations"] -:custom-config: -:single-step: -endif::[] -ifeval::["{context}" == "installing-alibaba-default"] -:custom-config: -:single-step: -endif::[] -ifeval::["{context}" == "installing-alibaba-network-customizations"] -:custom-config: -:single-step: -endif::[] -ifeval::["{context}" == "installing-alibaba-vpc"] -:custom-config: -:single-step: -endif::[] -ifeval::["{context}" == "installing-aws-private"] -:custom-config: -:aws: -endif::[] -ifeval::["{context}" == "installing-aws-customizations"] -:custom-config: -:aws: -endif::[] -ifeval::["{context}" == "installing-aws-china-region"] -:custom-config: -:aws: -endif::[] -ifeval::["{context}" == "installing-aws-government-region"] -:custom-config: -:aws: -endif::[] -ifeval::["{context}" == "installing-aws-secret-region"] -:custom-config: -:aws: -endif::[] -ifeval::["{context}" == "installing-aws-network-customizations"] -:custom-config: -:aws: -endif::[] -ifeval::["{context}" == "installing-aws-vpc"] -:custom-config: -:aws: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-aws-installer-provisioned"] -:custom-config: -:aws: -endif::[] -ifeval::["{context}" == "installing-aws-default"] -:no-config: -:aws: -endif::[] -ifeval::["{context}" == "installing-aws-localzone"] -:custom-config: -:aws: -endif::[] -ifeval::["{context}" == "installing-aws-outposts-remote-workers"] -:custom-config: -:aws: -endif::[] -ifeval::["{context}" == "installing-azure-default"] -:no-config: -:azure: -endif::[] -ifeval::["{context}" == "installing-gcp-customizations"] -:custom-config: -:gcp: -endif::[] -ifeval::["{context}" == "installing-gcp-vpc"] -:custom-config: -:gcp: -endif::[] -ifeval::["{context}" == "installing-gcp-shared-vpc"] -:custom-config: -:gcp: -endif::[] -ifeval::["{context}" == "installing-gcp-default"] -:no-config: -:gcp: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-gcp-installer-provisioned"] -:custom-config: -:gcp: -endif::[] -ifeval::["{context}" == "installing-gcp-network-customizations"] -:custom-config: -:gcp: -endif::[] -ifeval::["{context}" == "installing-gcp-private"] -:custom-config: -:gcp: -endif::[] -ifeval::["{context}" == "installing-azure-customizations"] -:custom-config: -:azure: -:single-step: -endif::[] -ifeval::["{context}" == "installing-azure-government-region"] -:custom-config: -:azure: -:single-step: -endif::[] -ifeval::["{context}" == "installing-azure-vnet"] -:custom-config: -:azure: -:single-step: -endif::[] -ifeval::["{context}" == "installing-azure-network-customizations"] -:custom-config: -:azure: -:single-step: -endif::[] -ifeval::["{context}" == "installing-azure-private"] -:custom-config: -:azure: -:single-step: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-default"] -:custom-config: -:ash: -:single-step: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-network-customizations"] -:custom-config: -:ash: -:single-step: -endif::[] -ifeval::["{context}" == "installing-openstack-installer-custom"] -:osp: -:custom-config: -:single-step: -endif::[] -ifeval::["{context}" == "installing-openstack-installer-kuryr"] -:osp: -:custom-config: -:single-step: -endif::[] -ifeval::["{context}" == "installing-openstack-installer-restricted"] -:osp: -:custom-config: -:single-step: -endif::[] -ifeval::["{context}" == "installing-openstack-installer"] -:osp: -endif::[] -ifeval::["{context}" == "installing-rhv-customizations"] -:custom-config: -:rhv: -:single-step: -endif::[] -ifeval::["{context}" == "installing-rhv-default"] -:no-config: -:rhv: -endif::[] -ifeval::["{context}" == "installing-vsphere-installer-provisioned"] -:no-config: -:vsphere: -endif::[] -ifeval::["{context}" == "installing-vsphere-installer-provisioned-customizations"] -:custom-config: -:vsphere: -:single-step: -endif::[] -ifeval::["{context}" == "installing-vsphere-installer-provisioned-network-customizations"] -:custom-config: -:vsphere: -:single-step: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-installer-provisioned-vsphere"] -:custom-config: -:vsphere: -:single-step: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-customizations"] -:custom-config: -:ibm-cloud: -:single-step: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-network-customizations"] -:custom-config: -:ibm-cloud: -:single-step: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-vpc"] -:custom-config: -:ibm-cloud: -:single-step: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-private"] -:custom-config: -:ibm-cloud: -:single-step: -endif::[] -ifeval::["{context}" == "installing-nutanix-installer-provisioned"] -:custom-config: -:nutanix: -:single-step: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-nutanix-installer-provisioned"] -:custom-config: -:nutanix: -:single-step: -endif::[] -ifeval::["{context}" == "installing-ibm-power-vs-customizations"] -:custom-config: -:single-step: -endif::[] -ifeval::["{context}" == "installing-ibm-power-vs-private-cluster"] -:custom-config: -:single-step: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power-vs"] -:custom-config: -:single-step: -endif::[] -ifeval::["{context}" == "installing-ibm-powervs-vpc"] -:custom-config: -:single-step: -endif::[] - -:_content-type: PROCEDURE -[id="installation-launching-installer_{context}"] -= Deploying the cluster - -You can install {product-title} on a compatible cloud platform. - -[IMPORTANT] -==== -You can run the `create cluster` command of the installation program only once, during initial installation. -==== - -.Prerequisites - -ifndef::osp,rhv,vsphere,nutanix[* Configure an account with the cloud platform that hosts your cluster.] - -ifdef::rhv[* Open the `ovirt-imageio` port to the {rh-virtualization-engine-name} from the machine running the installer. By default, the port is `54322`.] - -* Obtain the {product-title} installation program and the pull secret for your -cluster. - -* Verify the cloud provider account on your host has the correct permissions to deploy the cluster. An account with incorrect permissions causes the installation process to fail with an error message that displays the missing permissions. - -.Procedure - -ifdef::gcp[] -. Remove any existing GCP credentials that do not use the service account key -for the GCP account that you configured for your cluster and that are stored in the -following locations: -** The `GOOGLE_CREDENTIALS`, `GOOGLE_CLOUD_KEYFILE_JSON`, or `GCLOUD_KEYFILE_JSON` -environment variables -** The `~/.gcp/osServiceAccount.json` file -** The `gcloud cli` default credentials -endif::gcp[] - -ifdef::aws,gcp,no-config[] -. Change to the directory that contains the installation program and initialize the cluster deployment: -endif::aws,gcp,no-config[] -ifdef::single-step[] -* Change to the directory that contains the installation program and initialize the cluster deployment: -endif::single-step[] -+ -[source,terminal] ----- -$ ./openshift-install create cluster --dir <installation_directory> \ <1> - --log-level=info <2> ----- -<1> For `<installation_directory>`, specify the -ifdef::custom-config[] -location of your customized `./install-config.yaml` file. -endif::custom-config[] -ifdef::no-config[] -directory name to store the files that the installation program creates. -endif::no-config[] -<2> To view different installation details, specify `warn`, `debug`, or -`error` instead of `info`. -ifdef::no-config[] -+ -When specifying the directory: -* Verify that the directory has the `execute` permission. This permission is required to run Terraform binaries under the installation directory. -* Use an empty directory. Some installation assets, such as bootstrap X.509 certificates, have short expiration intervals, therefore you must not reuse an installation directory. If you want to reuse individual files from another cluster installation, you can copy them into your directory. However, the file names for the installation assets might change between releases. Use caution when copying installation files from an earlier {product-title} version. - -ifndef::rhv[] -. Provide values at the prompts: - -.. Optional: Select an SSH key to use to access your cluster machines. -+ -[NOTE] -==== -For production {product-title} clusters on which you want to perform installation debugging or disaster recovery, specify an SSH key that your `ssh-agent` process uses. -==== -ifdef::aws[] -.. Select *aws* as the platform to target. -.. If you do not have an Amazon Web Services (AWS) profile stored on your computer, enter the AWS access key ID and secret access key for the user that you configured to run the -installation program. -+ -[NOTE] -==== -The AWS access key ID and secret access key are stored in `~/.aws/credentials` in the home directory of the current user on the installation host. You are prompted for the credentials by the installation program if the credentials for the exported profile are not present in the file. Any credentials that you provide to the installation program are stored in the file. -==== -.. Select the AWS region to deploy the cluster to. -.. Select the base domain for the Route 53 service that you configured for your cluster. -endif::aws[] -ifdef::azure,ash[] -.. Select *azure* as the platform to target. -.. If the installation program cannot locate the `osServicePrincipal.json` configuration file, which contains Microsoft Azure profile information, in the `~/.azure/` directory on your computer, the installer prompts you to specify the following Azure parameter values for your subscription and service principal. -*** *azure subscription id*: The subscription ID to use for the cluster. -Specify the `id` value in your account output. -*** *azure tenant id*: The tenant ID. Specify the `tenantId` value in your -account output. -*** *azure service principal client id*: The value of the `appId` parameter -for the service principal. -*** *azure service principal client secret*: The value of the `password` -parameter for the service principal. -+ -[IMPORTANT] -==== -After you enter values for the previously listed parameters, the installation program creates a `osServicePrincipal.json` configuration file and stores this file in -the `~/.azure/` directory on your computer. These actions ensure that the installation program can load the profile when it is creating an {product-title} cluster on the target platform. -==== -.. Select the region to deploy the cluster to. -.. Select the base domain to deploy the cluster to. The base domain corresponds -to the Azure DNS Zone that you created for your cluster. -endif::azure,ash[] -ifdef::gcp[] -.. Select *gcp* as the platform to target. -.. If you have not configured the service account key for your GCP account on -your host, you must obtain it from GCP and paste the contents of the file -or enter the absolute path to the file. -.. Select the project ID to provision the cluster in. The default value is -specified by the service account that you configured. -.. Select the region to deploy the cluster to. -.. Select the base domain to deploy the cluster to. The base domain corresponds -to the public DNS zone that you created for your cluster. -endif::gcp[] -ifdef::ibm-cloud[] -.. test -endif::ibm-cloud[] -ifdef::osp[] -.. Select *openstack* as the platform to target. -.. Specify the {rh-openstack-first} external network name to use for installing the cluster. -.. Specify the Floating IP address to use for external access to the OpenShift API. -.. Specify the {rh-openstack} flavor with at least 16 GB RAM to use for control plane -and compute nodes. -.. Select the base domain to deploy the cluster to. All DNS records will be -sub-domains of this base and will also include the cluster name. -endif::osp[] -ifdef::vsphere[] -.. Select *vsphere* as the platform to target. -.. Specify the name of your vCenter instance. -.. Specify the user name and password for the vCenter account that has the required permissions to create the cluster. -+ -The installation program connects to your vCenter instance. -.. Select the data center in your vCenter instance to connect to. -.. Select the default vCenter datastore to use. -+ -[NOTE] -==== -Datastore and cluster names cannot exceed 60 characters; therefore, ensure the combined string length does not exceed the 60 character limit. -==== -.. Select the vCenter cluster to install the {product-title} cluster in. The installation program uses the root resource pool of the vSphere cluster as the default resource pool. -.. Select the network in the vCenter instance that contains the virtual IP addresses and DNS records that you configured. -.. Enter the virtual IP address that you configured for control plane API access. -.. Enter the virtual IP address that you configured for cluster ingress. -.. Enter the base domain. This base domain must be the same one that you used in the DNS records that you configured. -endif::vsphere[] -.. Enter a descriptive name for your cluster. -ifdef::vsphere[] -The cluster name must be the same one that you used in the DNS records that you configured. -+ -[NOTE] -==== -Datastore and cluster names cannot exceed 60 characters; therefore, ensure the combined string length does not exceed the 60 character limit. -==== -endif::vsphere[] -ifdef::azure[] -+ -[IMPORTANT] -==== -All Azure resources that are available through public endpoints are subject to -resource name restrictions, and you cannot create resources that use certain -terms. For a list of terms that Azure restricts, see -link:https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-manager-reserved-resource-name[Resolve reserved resource name errors] -in the Azure documentation. -==== -endif::azure[] -ifdef::gcp[] -If you provide a name that is longer -than 6 characters, only the first 6 characters will be used in the infrastructure -ID that is generated from the cluster name. -endif::gcp[] -ifndef::openshift-origin[] -.. Paste the {cluster-manager-url-pull}. -endif::openshift-origin[] -ifdef::openshift-origin[] -.. Paste the {cluster-manager-url-pull}. -* If you do not have a {cluster-manager-url-pull}, you can paste the pull secret another private registry. -* If you do not need the cluster to pull images from a private registry, you can paste `{"auths":{"fake":{"auth":"aWQ6cGFzcwo="}}}` as the pull secret. -endif::openshift-origin[] -endif::rhv[] -ifdef::rhv[] -. Respond to the installation program prompts. - -.. Optional: For `SSH Public Key`, select a password-less public key, such as `~/.ssh/id_rsa.pub`. This key authenticates connections with the new {product-title} cluster. -+ -[NOTE] -==== -For production {product-title} clusters on which you want to perform installation debugging or disaster recovery, select an SSH key that your `ssh-agent` process uses. -==== -.. For `Platform`, select `ovirt`. -.. For `Engine FQDN[:PORT]`, enter the fully qualified domain name (FQDN) of the {rh-virtualization} environment. -+ -For example: -+ -ifndef::openshift-origin[] -[source,terminal] ----- -rhv-env.virtlab.example.com:443 ----- -endif::openshift-origin[] -ifdef::openshift-origin[] -[source,terminal] ----- -$ curl -k -u admin@internal:pw123 \ -https://ovirtlab.example.com/ovirt-engine/api ----- -endif::openshift-origin[] -+ -.. The installation program automatically generates a CA certificate. For `Would you like to use the above certificate to connect to the {rh-virtualization-engine-name}?`, answer `y` or `N`. If you answer `N`, you must install {product-title} in insecure mode. -//TODO: Add this sentence with xref after it's OK to add xrefs: For information about insecure mode, see xref:installing-rhv-insecure-mode_installing-rhv-default[]. -.. For `Engine username`, enter the user name and profile of the {rh-virtualization} administrator using this format: -+ -[source,terminal] ----- -<username>@<profile> <1> ----- -+ -<1> For `<username>`, specify the user name of an {rh-virtualization} administrator. For `<profile>`, specify the login profile, which you can get by going to the {rh-virtualization} Administration Portal login page and reviewing the *Profile* dropdown list. For example: `admin@internal`. -+ -.. For `Engine password`, enter the {rh-virtualization} admin password. -.. For `Cluster`, select the {rh-virtualization} cluster for installing {product-title}. -.. For `Storage domain`, select the storage domain for installing {product-title}. -.. For `Network`, select a virtual network that has access to the {rh-virtualization} {rh-virtualization-engine-name} REST API. -.. For `Internal API Virtual IP`, enter the static IP address you set aside for the cluster's REST API. -.. For `Ingress virtual IP`, enter the static IP address you reserved for the wildcard apps domain. -.. For `Base Domain`, enter the base domain of the {product-title} cluster. If this cluster is exposed to the outside world, this must be a valid domain recognized by DNS infrastructure. For example, enter: `virtlab.example.com` -.. For `Cluster Name`, enter the name of the cluster. For example, `my-cluster`. Use cluster name from the externally registered/resolvable DNS entries you created for the {product-title} REST API and apps domain names. The installation program also gives this name to the cluster in the {rh-virtualization} environment. -.. For `Pull Secret`, copy the pull secret from the `pull-secret.txt` file you downloaded earlier and paste it here. You can also get a copy of the same {cluster-manager-url-pull}. -endif::rhv[] - -endif::no-config[] - -ifdef::aws[] -. Optional: Remove or disable the `AdministratorAccess` policy from the IAM -account that you used to install the cluster. -+ -[NOTE] -==== -The elevated permissions provided by the `AdministratorAccess` policy are required only during installation. -==== -endif::aws[] - -ifdef::gcp[] -. Optional: You can reduce the number of permissions for the service account that you used to install the cluster. -** If you assigned the `Owner` role to your service account, you can remove that role and replace it with the `Viewer` role. -** If you included the `Service Account Key Admin` role, -you can remove it. -endif::gcp[] - -.Verification -When the cluster deployment completes successfully: - -* The terminal displays directions for accessing your cluster, including a link to the web console and credentials for the `kubeadmin` user. -* Credential information also outputs to `<installation_directory>/.openshift_install.log`. - -[IMPORTANT] -==== -Do not delete the installation program or the files that the installation program creates. Both are required to delete the cluster. -==== - -.Example output -[source,terminal] ----- -... -INFO Install complete! -INFO To access the cluster as the system:admin user when using 'oc', run 'export KUBECONFIG=/home/myuser/install_dir/auth/kubeconfig' -INFO Access the OpenShift web-console here: https://console-openshift-console.apps.mycluster.example.com -INFO Login to the console with user: "kubeadmin", and password: "4vYBz-Ee6gm-ymBZj-Wt5AL" -INFO Time elapsed: 36m22s ----- - -[IMPORTANT] -==== -* The Ignition config files that the installation program generates contain certificates that expire after 24 hours, which are then renewed at that time. If the cluster is shut down before renewing the certificates and the cluster is later restarted after the 24 hours have elapsed, the cluster automatically recovers the expired certificates. The exception is that you must manually approve the pending `node-bootstrapper` certificate signing requests (CSRs) to recover kubelet certificates. See the documentation for _Recovering from expired control plane certificates_ for more information. - -* It is recommended that you use Ignition config files within 12 hours after they are generated because the 24-hour certificate rotates from 16 to 22 hours after the cluster is installed. By using the Ignition config files within 12 hours, you can avoid installation failure if the certificate update runs during installation. -==== - -ifeval::["{context}" == "installing-alibaba-customizations"] -:!custom-config: -:!single-step: -endif::[] -ifeval::["{context}" == "installing-alibaba-default"] -:!custom-config: -:!single-step: -endif::[] -ifeval::["{context}" == "installing-alibaba-network-customizations"] -:!custom-config: -:!single-step: -endif::[] -ifeval::["{context}" == "installing-alibaba-vpc"] -:!custom-config: -:!single-step: -endif::[] -ifeval::["{context}" == "installing-aws-private"] -:!custom-config: -:!aws: -endif::[] -ifeval::["{context}" == "installing-aws-customizations"] -:!custom-config: -:!aws: -endif::[] -ifeval::["{context}" == "installing-aws-china-region"] -:!custom-config: -:!aws: -endif::[] -ifeval::["{context}" == "installing-aws-government-region"] -:!custom-config: -:!aws: -endif::[] -ifeval::["{context}" == "installing-aws-secret-region"] -:!custom-config: -:!aws: -endif::[] -ifeval::["{context}" == "installing-aws-network-customizations"] -:!custom-config: -:!aws: -endif::[] -ifeval::["{context}" == "installing-aws-vpc"] -:!custom-config: -:!aws: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-aws-installer-provisioned"] -:!custom-config: -:!aws: -endif::[] -ifeval::["{context}" == "installing-aws-default"] -:!no-config: -:!aws: -endif::[] -ifeval::["{context}" == "installing-aws-localzone"] -:!custom-config: -:!aws: -endif::[] -ifeval::["{context}" == "installing-aws-outposts-remote-workers"] -:!custom-config: -:!aws: -endif::[] -ifeval::["{context}" == "installing-azure-default"] -:!no-config: -:!azure: -endif::[] -ifeval::["{context}" == "installing-azure-network-customizations"] -:!custom-config: -:!azure: -:!single-step: -endif::[] -ifeval::["{context}" == "installing-gcp-customizations"] -:!custom-config: -:!gcp: -endif::[] -ifeval::["{context}" == "installing-gcp-vpc"] -:!custom-config: -:!gcp: -endif::[] -ifeval::["{context}" == "installing-gcp-shared-vpc"] -:!custom-config: -:!gcp: -endif::[] -ifeval::["{context}" == "installing-gcp-default"] -:!no-config: -:!gcp: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-gcp-installer-provisioned"] -:!custom-config: -:!gcp: -endif::[] -ifeval::["{context}" == "installing-gcp-network-customizations"] -:!custom-config: -:!gcp: -endif::[] -ifeval::["{context}" == "installing-gcp-private"] -:!custom-config: -:!gcp: -endif::[] -ifeval::["{context}" == "installing-azure-customizations"] -:!custom-config: -:!azure: -:!single-step: -endif::[] -ifeval::["{context}" == "installing-azure-government-region"] -:!custom-config: -:!azure: -:!single-step: -endif::[] -ifeval::["{context}" == "installing-azure-vnet"] -:!custom-config: -:!azure: -:!single-step: -endif::[] -ifeval::["{context}" == "installing-azure-private"] -:!custom-config: -:!azure: -:!single-step: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-default"] -:!custom-config: -:!ash: -:!single-step: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-network-customizations"] -:!custom-config: -:!ash: -:!single-step: -endif::[] -ifeval::["{context}" == "installing-openstack-installer-custom"] -:!osp: -:!custom-config: -:!single-step: -endif::[] -ifeval::["{context}" == "installing-openstack-installer-kuryr"] -:!osp: -:!custom-config: -:!single-step: -endif::[] -ifeval::["{context}" == "installing-openstack-installer-restricted"] -:!osp: -:!custom-config: -:!single-step: -endif::[] -ifeval::["{context}" == "installing-openstack-installer"] -:!osp: -endif::[] -ifeval::["{context}" == "installing-rhv-customizations"] -:!custom-config: -:!rhv: -:!single-step: -endif::[] -ifeval::["{context}" == "installing-rhv-default"] -:!no-config: -:!rhv: -endif::[] -ifeval::["{context}" == "installing-vsphere-installer-provisioned"] -:!no-config: -:!vsphere: -endif::[] -ifeval::["{context}" == "installing-vsphere-installer-provisioned-customizations"] -:!custom-config: -:!vsphere: -:!single-step: -endif::[] -ifeval::["{context}" == "installing-vsphere-installer-provisioned-network-customizations"] -:!custom-config: -:!vsphere: -:!single-step: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-installer-provisioned-vsphere"] -:!custom-config: -:!vsphere: -:!single-step: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-customizations"] -:!custom-config: -:!ibm-cloud: -:!single-step: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-network-customizations"] -:!custom-config: -:!ibm-cloud: -:!single-step: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-vpc"] -:!custom-config: -:!ibm-cloud: -:!single-step: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-private"] -:!custom-config: -:!ibm-cloud: -:!single-step: -endif::[] -ifeval::["{context}" == "installing-nutanix-installer-provisioned"] -:!custom-config: -:!nutanix: -:!single-step: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-nutanix-installer-provisioned"] -:!custom-config: -:!nutanix: -:!single-step: -endif::[] -ifeval::["{context}" == "installing-ibm-power-vs-customizations"] -:!custom-config: -:!single-step: -endif::[] -ifeval::["{context}" == "installing-ibm-power-vs-private-cluster"] -:!custom-config: -:!single-step: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power-vs"] -:!custom-config: -:!single-step: -endif::[] -ifeval::["{context}" == "installing-ibm-powervs-vpc"] -:!custom-config: -:!single-step: -endif::[] diff --git a/modules/installation-load-balancing-user-infra.adoc b/modules/installation-load-balancing-user-infra.adoc deleted file mode 100644 index cd0a9e6deb0a..000000000000 --- a/modules/installation-load-balancing-user-infra.adoc +++ /dev/null @@ -1,291 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_bare_metal/installing-bare-metal.adoc -// * installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc -// * installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc -// * installing/installing_platform_agnostic/installing-platform-agnostic.adoc -// * installing/installing_vsphere/installing-restricted-networks-vsphere.adoc -// * installing/installing_vsphere/installing-vsphere.adoc -// * installing/installing_vsphere/installing-vsphere-network-customizations.adoc -// * installing/installing_ibm_z/installing-ibm-z.adoc -// * installing/installing_ibm_z/installing-restricted-networks-ibm-z.adoc -// * installing/installing_ibm_z/installing-ibm-z-kvm.adoc -// * installing/installing_ibm_z/installing-ibm-power.adoc -// * installing/installing_ibm_z/installing-restricted-networks-ibm-power.adoc -// * installing/installing-rhv-restricted-network.adoc -// * installing/installing_openstack/installing-openstack-installer-custom.adoc -// * installing/installing_openstack/installing-openstack-installer-kuryr.adoc - -ifeval::["{context}" == "installing-vsphere"] -:vsphere: -endif::[] - -ifeval::["{context}" == "installing-restricted-networks-vsphere"] -:vsphere: -endif::[] - -ifeval::["{context}" == "installing-vsphere-network-customizations"] -:vsphere: -endif::[] - -ifeval::["{context}" == "installing-ibm-z"] -:ibm-z: -endif::[] -ifeval::["{context}" == "installing-ibm-z-kvm"] -:ibm-z-kvm: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z"] -:restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power"] -:restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-bare-metal"] -:restricted: -endif::[] -ifeval::["{context}" == "installing-openstack-installer-custom"] -:user-managed-lb: -endif::[] -ifeval::["{context}" == "installing-openstack-installer-kuryr"] -:user-managed-lb: -endif::[] - -:_content-type: CONCEPT -[id="installation-load-balancing-user-infra_{context}"] -= Load balancing requirements for user-provisioned infrastructure - -ifndef::user-managed-lb[] -Before you install {product-title}, you must provision the API and application Ingress load balancing infrastructure. In production scenarios, you can deploy the API and application Ingress load balancers separately so that you can scale the load balancer infrastructure for each in isolation. -endif::user-managed-lb[] - -ifdef::user-managed-lb[] -[IMPORTANT] -==== -[subs="attributes+"] -Deployment with User-Managed Load Balancers is a Technology Preview feature only. Technology Preview features -are not supported with Red Hat production service level agreements (SLAs) and -might not be functionally complete. Red Hat does not recommend using them -in production. These features provide early access to upcoming product -features, enabling customers to test functionality and provide feedback during -the development process. - -For more information about the support scope of Red Hat Technology Preview features, see link:https://access.redhat.com/support/offerings/techpreview/[Technology Preview Features Support Scope]. -==== - -Before you install {product-title}, you can provision your own API and application ingress load balancing infrastructure to use in place of the default, internal load balancing solution. In production scenarios, you can deploy the API and application Ingress load balancers separately so that you can scale the load balancer infrastructure for each in isolation. -endif::user-managed-lb[] - -[NOTE] -==== -If you want to deploy the API and application Ingress load balancers with a {op-system-base-full} instance, you must purchase the {op-system-base} subscription separately. -==== - -The load balancing infrastructure must meet the following requirements: - -. *API load balancer*: Provides a common endpoint for users, both human and machine, to interact with and configure the platform. Configure the following conditions: -+ --- - ** Layer 4 load balancing only. This can be referred to as Raw TCP, SSL Passthrough, or SSL Bridge mode. If you use SSL Bridge mode, you must enable Server Name Indication (SNI) for the API routes. - ** A stateless load balancing algorithm. The options vary based on the load balancer implementation. --- -+ -[IMPORTANT] -==== -Do not configure session persistence for an API load balancer. -==== -+ -Configure the following ports on both the front and back of the load balancers: -+ -.API load balancer -[cols="2,5,^2,^2,2",options="header"] -|=== - -|Port -|Back-end machines (pool members) -|Internal -|External -|Description - -|`6443` -|Bootstrap and control plane. You remove the bootstrap machine from the load -balancer after the bootstrap machine initializes the cluster control plane. You -must configure the `/readyz` endpoint for the API server health check probe. -|X -|X -|Kubernetes API server - -|`22623` -|Bootstrap and control plane. You remove the bootstrap machine from the load -balancer after the bootstrap machine initializes the cluster control plane. -|X -| -|Machine config server - -|=== -+ -[NOTE] -==== -The load balancer must be configured to take a maximum of 30 seconds from the -time the API server turns off the `/readyz` endpoint to the removal of the API -server instance from the pool. Within the time frame after `/readyz` returns an -error or becomes healthy, the endpoint must have been removed or added. Probing -every 5 or 10 seconds, with two successful requests to become healthy and three -to become unhealthy, are well-tested values. -==== -+ -. *Application Ingress load balancer*: Provides an ingress point for application traffic flowing in from outside the cluster. A working configuration for the Ingress router is required for an {product-title} cluster. -+ -Configure the following conditions: -+ --- - ** Layer 4 load balancing only. This can be referred to as Raw TCP, SSL Passthrough, or SSL Bridge mode. If you use SSL Bridge mode, you must enable Server Name Indication (SNI) for the ingress routes. - ** A connection-based or session-based persistence is recommended, based on the options available and types of applications that will be hosted on the platform. --- -+ -[TIP] -==== -If the true IP address of the client can be seen by the application Ingress load balancer, enabling source IP-based session persistence can improve performance for applications that use end-to-end TLS encryption. -==== -+ -Configure the following ports on both the front and back of the load balancers: -+ -.Application Ingress load balancer -[cols="2,5,^2,^2,2",options="header"] -|=== - -|Port -|Back-end machines (pool members) -|Internal -|External -|Description - -|`443` -|The machines that run the Ingress Controller pods, compute, or worker, by default. -|X -|X -|HTTPS traffic - -|`80` -|The machines that run the Ingress Controller pods, compute, or worker, by default. -|X -|X -|HTTP traffic - -|=== -+ -[NOTE] -==== -If you are deploying a three-node cluster with zero compute nodes, the Ingress Controller pods run on the control plane nodes. In three-node cluster deployments, you must configure your application Ingress load balancer to route HTTP and HTTPS traffic to the control plane nodes. -==== - -[id="installation-load-balancing-user-infra-example_{context}"] -ifndef::user-managed-lb[] -== Example load balancer configuration for user-provisioned clusters - -This section provides an example API and application Ingress load balancer configuration that meets the load balancing requirements for user-provisioned clusters. The sample is an `/etc/haproxy/haproxy.cfg` configuration for an HAProxy load balancer. The example is not meant to provide advice for choosing one load balancing solution over another. -endif::user-managed-lb[] - -ifdef::user-managed-lb[] -== Example load balancer configuration for clusters that are deployed with user-managed load balancers - -This section provides an example API and application Ingress load balancer configuration that meets the load balancing requirements for clusters that are deployed with user-managed load balancers. The sample is an `/etc/haproxy/haproxy.cfg` configuration for an HAProxy load balancer. The example is not meant to provide advice for choosing one load balancing solution over another. -endif::user-managed-lb[] - -In the example, the same load balancer is used for the Kubernetes API and application ingress traffic. In production scenarios, you can deploy the API and application ingress load balancers separately so that you can scale the load balancer infrastructure for each in isolation. - -[NOTE] -==== -If you are using HAProxy as a load balancer and SELinux is set to `enforcing`, you must ensure that the HAProxy service can bind to the configured TCP port by running `setsebool -P haproxy_connect_any=1`. -==== - -.Sample API and application Ingress load balancer configuration -[%collapsible] -==== -[source,text] ----- -global - log 127.0.0.1 local2 - pidfile /var/run/haproxy.pid - maxconn 4000 - daemon -defaults - mode http - log global - option dontlognull - option http-server-close - option redispatch - retries 3 - timeout http-request 10s - timeout queue 1m - timeout connect 10s - timeout client 1m - timeout server 1m - timeout http-keep-alive 10s - timeout check 10s - maxconn 3000 -listen api-server-6443 <1> - bind *:6443 - mode tcp - server bootstrap bootstrap.ocp4.example.com:6443 check inter 1s backup <2> - server master0 master0.ocp4.example.com:6443 check inter 1s - server master1 master1.ocp4.example.com:6443 check inter 1s - server master2 master2.ocp4.example.com:6443 check inter 1s -listen machine-config-server-22623 <3> - bind *:22623 - mode tcp - server bootstrap bootstrap.ocp4.example.com:22623 check inter 1s backup <2> - server master0 master0.ocp4.example.com:22623 check inter 1s - server master1 master1.ocp4.example.com:22623 check inter 1s - server master2 master2.ocp4.example.com:22623 check inter 1s -listen ingress-router-443 <4> - bind *:443 - mode tcp - balance source - server worker0 worker0.ocp4.example.com:443 check inter 1s - server worker1 worker1.ocp4.example.com:443 check inter 1s -listen ingress-router-80 <5> - bind *:80 - mode tcp - balance source - server worker0 worker0.ocp4.example.com:80 check inter 1s - server worker1 worker1.ocp4.example.com:80 check inter 1s ----- - -<1> Port `6443` handles the Kubernetes API traffic and points to the control plane machines. -<2> The bootstrap entries must be in place before the {product-title} cluster installation and they must be removed after the bootstrap process is complete. -<3> Port `22623` handles the machine config server traffic and points to the control plane machines. -<4> Port `443` handles the HTTPS traffic and points to the machines that run the Ingress Controller pods. The Ingress Controller pods run on the compute machines by default. -<5> Port `80` handles the HTTP traffic and points to the machines that run the Ingress Controller pods. The Ingress Controller pods run on the compute machines by default. -+ -[NOTE] -===== -If you are deploying a three-node cluster with zero compute nodes, the Ingress Controller pods run on the control plane nodes. In three-node cluster deployments, you must configure your application Ingress load balancer to route HTTP and HTTPS traffic to the control plane nodes. -===== -==== - -[TIP] -==== -If you are using HAProxy as a load balancer, you can check that the `haproxy` process is listening on ports `6443`, `22623`, `443`, and `80` by running `netstat -nltupe` on the HAProxy node. -==== - -ifeval::["{context}" == "installing-ibm-z"] -:!ibm-z: -endif::[] -ifeval::["{context}" == "installing-ibm-z-kvm"] -:!ibm-z-kvm: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z"] -:!restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power"] -:!restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-bare-metal"] -:!restricted: -endif::[] -ifeval::["{context}" == "installing-openstack-installer-custom"] -:!user-managed-lb: -endif::[] -ifeval::["{context}" == "installing-openstack-installer-kuryr"] -:!user-managed-lb: -endif::[] \ No newline at end of file diff --git a/modules/installation-local-registry-pull-secret.adoc b/modules/installation-local-registry-pull-secret.adoc deleted file mode 100644 index 45c07ff50f4b..000000000000 --- a/modules/installation-local-registry-pull-secret.adoc +++ /dev/null @@ -1,36 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/install_config/installing-restricted-networks-preparations.adoc -// * openshift_images/samples-operator-alt-registry.adoc - -[id="installation-local-registry-pull-secret_{context}"] -= Creating a pull secret for your mirror registry - -In a restricted network, you create a pull secret that contains only -the information for your registry. - -.Prerequisites - -* You configured a mirror registry to use in your restricted network and have its domain name and port as well as credentials for it. - -.Procedure - -. On the mirror host, generate the pull secret for your registry: -+ ----- -$ podman login --authfile ~/pullsecret_config.json <local_registry_host_name>:<local_registry_host_port> <1> ----- -<1> For `<local_registry_host_name>`, specify the registry domain name -for your mirror registry, such as `registry.example.com`. For -`<local_registry_host_port>`, specify the port that your mirror registry uses to -serve content. -+ -Provide your credentials for the mirror registry at the prompts. - -. View the pull secret that you created: -+ ----- -# cat ~/pullsecret_config.json - -{ "auths": { "<local_registry_host_name>:<local_registry_host_port>": { "auth": "ZHVtbXk6ZHVtbXk=" } } } ----- diff --git a/modules/installation-localzone-generate-k8s-manifest.adoc b/modules/installation-localzone-generate-k8s-manifest.adoc deleted file mode 100644 index 7f8504e39b9f..000000000000 --- a/modules/installation-localzone-generate-k8s-manifest.adoc +++ /dev/null @@ -1,195 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-localzone.adoc - -:_content-type: PROCEDURE -[id="installation-localzone-generate-k8s-manifest_{context}"] -= Creating the Kubernetes manifest files - -Because you must modify some cluster definition files and manually start the cluster machines, you must generate the Kubernetes manifest files that the cluster needs to configure the machines. - -.Prerequisites - -* You obtained the {product-title} installation program. -* You created the `install-config.yaml` installation configuration file. -* You installed the `jq` package. - -.Procedure - -. Change to the directory that contains the {product-title} installation program and generate the Kubernetes manifests for the cluster by running the following command: -+ -[source,terminal] ----- -$ ./openshift-install create manifests --dir <installation_directory> <1> ----- -+ -<1> For `<installation_directory>`, specify the installation directory that -contains the `install-config.yaml` file you created. - -. Set the default Maximum Transmission Unit (MTU) according to the network plugin: -+ -[IMPORTANT] -==== -Generally, the Maximum Transmission Unit (MTU) between an Amazon EC2 instance in a Local Zone and an Amazon EC2 instance in the Region is 1300. See link:https://docs.aws.amazon.com/local-zones/latest/ug/how-local-zones-work.html[How Local Zones work] in the AWS documentation. -The cluster network MTU must be always less than the EC2 MTU to account for the overhead. The specific overhead is determined by your network plugin, for example: - -- OVN-Kubernetes: `100 bytes` -- OpenShift SDN: `50 bytes` - -The network plugin could provide additional features, like IPsec, that also must be decreased the MTU. Check the documentation for additional information. - -==== - -.. If you are using the `OVN-Kubernetes` network plugin, enter the following command: -+ -[source,terminal] ----- -$ cat <<EOF > <installation_directory>/manifests/cluster-network-03-config.yml -apiVersion: operator.openshift.io/v1 -kind: Network -metadata: - name: cluster -spec: - defaultNetwork: - ovnKubernetesConfig: - mtu: 1200 -EOF ----- - -.. If you are using the `OpenShift SDN` network plugin, enter the following command: -+ -[source,terminal] ----- -$ cat <<EOF > <installation_directory>/manifests/cluster-network-03-config.yml -apiVersion: operator.openshift.io/v1 -kind: Network -metadata: - name: cluster -spec: - defaultNetwork: - openshiftSDNConfig: - mtu: 1250 -EOF ----- - -. Create the machine set manifests for the worker nodes in your Local Zone. -.. Export a local variable that contains the name of the Local Zone that you opted your AWS account into by running the following command: -+ -[source,terminal] ----- -$ export LZ_ZONE_NAME="<local_zone_name>" <1> ----- -<1> For `<local_zone_name>`, specify the Local Zone that you opted your AWS account into, such as `us-east-1-nyc-1a`. - -.. Review the instance types for the location that you will deploy to by running the following command: -+ -[source,terminal] ----- -$ aws ec2 describe-instance-type-offerings \ - --location-type availability-zone \ - --filters Name=location,Values=${LZ_ZONE_NAME} - --region <region> <1> ----- -<1> For `<region>`, specify the name of the region that you will deploy to, such as `us-east-1`. - -.. Export a variable to define the instance type for the worker machines to deploy on the Local Zone subnet by running the following command: -+ -[source,terminal] ----- -$ export INSTANCE_TYPE="<instance_type>" <1> ----- -<1> Set `<instance_type>` to a tested instance type, such as `c5d.2xlarge`. - -.. Store the AMI ID as a local variable by running the following command: -+ -[source,terminal] ----- -$ export AMI_ID=$(grep ami - <installation_directory>/openshift/99_openshift-cluster-api_worker-machineset-0.yaml \ - | tail -n1 | awk '{print$2}') ----- - -.. Store the subnet ID as a local variable by running the following command: -+ -[source,terminal] ----- -$ export SUBNET_ID=$(aws cloudformation describe-stacks --stack-name "<subnet_stack_name>" \ <1> - | jq -r '.Stacks[0].Outputs[0].OutputValue') ----- -<1> For `<subnet_stack_name>`, specify the name of the subnet stack that you created. - -.. Store the cluster ID as local variable by running the following command: -+ -[source,terminal] ----- -$ export CLUSTER_ID="$(awk '/infrastructureName: / {print $2}' <installation_directory>/manifests/cluster-infrastructure-02-config.yml)" ----- - -.. Create the worker manifest file for the Local Zone that your VPC uses by running the following command: -+ -[source,terminal] ----- -$ cat <<EOF > <installation_directory>/openshift/99_openshift-cluster-api_worker-machineset-nyc1.yaml -apiVersion: machine.openshift.io/v1beta1 -kind: MachineSet -metadata: - labels: - machine.openshift.io/cluster-api-cluster: ${CLUSTER_ID} - name: ${CLUSTER_ID}-edge-${LZ_ZONE_NAME} - namespace: openshift-machine-api -spec: - replicas: 1 - selector: - matchLabels: - machine.openshift.io/cluster-api-cluster: ${CLUSTER_ID} - machine.openshift.io/cluster-api-machineset: ${CLUSTER_ID}-edge-${LZ_ZONE_NAME} - template: - metadata: - labels: - machine.openshift.io/cluster-api-cluster: ${CLUSTER_ID} - machine.openshift.io/cluster-api-machine-role: edge - machine.openshift.io/cluster-api-machine-type: edge - machine.openshift.io/cluster-api-machineset: ${CLUSTER_ID}-edge-${LZ_ZONE_NAME} - spec: - metadata: - labels: - zone_type: local-zone - zone_group: ${LZ_ZONE_NAME:0:-1} - node-role.kubernetes.io/edge: "" - taints: - - key: node-role.kubernetes.io/edge - effect: NoSchedule - providerSpec: - value: - ami: - id: ${AMI_ID} - apiVersion: machine.openshift.io/v1beta1 - blockDevices: - - ebs: - volumeSize: 120 - volumeType: gp2 - credentialsSecret: - name: aws-cloud-credentials - deviceIndex: 0 - iamInstanceProfile: - id: ${CLUSTER_ID}-worker-profile - instanceType: ${INSTANCE_TYPE} - kind: AWSMachineProviderConfig - placement: - availabilityZone: ${LZ_ZONE_NAME} - region: ${CLUSTER_REGION} - securityGroups: - - filters: - - name: tag:Name - values: - - ${CLUSTER_ID}-worker-sg - subnet: - id: ${SUBNET_ID} - publicIp: true - tags: - - name: kubernetes.io/cluster/${CLUSTER_ID} - value: owned - userDataSecret: - name: worker-user-data -EOF ----- diff --git a/modules/installation-machine-requirements.adoc b/modules/installation-machine-requirements.adoc deleted file mode 100644 index f05497049622..000000000000 --- a/modules/installation-machine-requirements.adoc +++ /dev/null @@ -1,118 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-user-infra.adoc -// * installing/installing_aws/installing-restricted-networks-aws.adoc -// * installing/installing_azure/installing-azure-user-infra.adoc -// * installing/installing_bare_metal/installing-bare-metal.adoc -// * installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc -// * installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc -// * installing/installing_gcp/installing-gcp-user-infra.adoc -// * installing/installing_gcp/installing-gcp-user-infra-vpc.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp.adoc -// * installing/installing_platform_agnostic/installing-platform-agnostic.adoc -// * installing/installing_vsphere/installing-restricted-networks-vsphere.adoc -// * installing/installing_vsphere/installing-vsphere.adoc -// * installing/installing_vsphere/installing-vsphere-network-customizations.adoc -// * installing/installing_ibm_power/installing-ibm-power.adoc -// * installing/installing_ibm_power/installing-restricted-networks-ibm-power.adoc -// * installing/installing_ibm_z/installing-ibm-z.adoc -// * installing/installing_ibm_z/installing-restricted-networks-ibm-z.adoc - -ifeval::["{context}" == "installing-bare-metal"] -:bare-metal: -endif::[] -ifeval::["{context}" == "installing-bare-metal-network-customizations"] -:bare-metal: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-bare-metal"] -:bare-metal: -endif::[] -ifeval::["{context}" == "installing-ibm-z"] -:ibm-z: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z"] -:ibm-z: -endif::[] -ifeval::["{context}" == "installing-ibm-power"] -:ibm-power: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power"] -:ibm-power: -endif::[] - -:_content-type: REFERENCE -[id="installation-machine-requirements_{context}"] -= Required machines for cluster installation - -The smallest {product-title} clusters require the following hosts: - -.Minimum required hosts -[options="header"] -|=== - -|Hosts |Description - -|One temporary bootstrap machine -|The cluster requires the bootstrap machine to deploy the {product-title} cluster -on the three control plane machines. You can remove the bootstrap machine after -you install the cluster. - -|Three control plane machines -|The control plane machines run the Kubernetes and {product-title} services that form the control plane. - -|At least two compute machines, which are also known as worker machines. -|The workloads requested by {product-title} users run on the compute machines. - -|=== - -ifdef::bare-metal[] -[NOTE] -==== -As an exception, you can run zero compute machines in a bare metal cluster that consists of three control plane machines only. This provides smaller, more resource efficient clusters for cluster administrators and developers to use for testing, development, and production. Running one compute machine is not supported. -==== -endif::bare-metal[] - -[IMPORTANT] -==== -ifdef::ibm-z[] -To improve high availability of your cluster, distribute the control plane machines over different z/VM instances on at least two physical machines. -endif::ibm-z[] -ifndef::ibm-z[] -To maintain high availability of your cluster, use separate physical hosts for -these cluster machines. -endif::ibm-z[] -==== - -ifndef::ibm-z,ibm-power[] -The bootstrap and control plane machines must use {op-system-first} as the operating system. However, the compute machines can choose between {op-system-first}, {op-system-base-full} 8.6, {op-system-base} 8.7, or {op-system-base} 8.8. -endif::ibm-z,ibm-power[] -ifdef::ibm-z,ibm-power[] -The bootstrap, control plane, and compute machines must use {op-system-first} as the operating system. -endif::ibm-z,ibm-power[] - -ifndef::openshift-origin[] -Note that {op-system} is based on {op-system-base-full} 9.2 and inherits all of its hardware certifications and requirements. -endif::[] -See link:https://access.redhat.com/articles/rhel-limits[Red Hat Enterprise Linux technology capabilities and limits]. - -ifeval::["{context}" == "installing-bare-metal"] -:!bare-metal: -endif::[] -ifeval::["{context}" == "installing-bare-metal-network-customizations"] -:!bare-metal: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-bare-metal"] -:!bare-metal: -endif::[] -ifeval::["{context}" == "installing-ibm-z"] -:!ibm-z: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z"] -:!ibm-z: -endif::[] -ifeval::["{context}" == "installing-ibm-power"] -:!ibm-power: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power"] -:!ibm-power: -endif::[] diff --git a/modules/installation-minimum-resource-requirements.adoc b/modules/installation-minimum-resource-requirements.adoc deleted file mode 100644 index 4a58774321e5..000000000000 --- a/modules/installation-minimum-resource-requirements.adoc +++ /dev/null @@ -1,278 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-china.adoc -// * installing/installing_aws/installing-aws-customizations.adoc -// * installing/installing_aws/installing-aws-government-region.adoc -// * installing/installing_aws/installing-aws-network-customizations.adoc -// * installing/installing_aws/installing-aws-private.adoc -// * installing/installing_aws/installing-aws-vpc.adoc -// * installing/installing_aws/installing-restricted-networks-aws-installer-provisioned.adoc -// * installing/installing_aws/installing-aws-user-infra.adoc -// * installing/installing_aws/installing-restricted-networks-aws.adoc -// * installing/installing_aws/installing-aws-outposts-remote-workers.adoc -// * installing/installing_azure/installing-azure-customizations.adoc -// * installing/installing_azure/installing-azure-government-region.adoc -// * installing/installing_azure/installing-azure-network-customizations.adoc -// * installing/installing_azure/installing-azure-private.adoc -// * installing/installing_azure/installing-azure-vnet.adoc -// * installing/installing_azure/installing-azure-user-infra.adoc -// * installing/installing_bare_metal/installing-bare-metal.adoc -// * installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc -// * installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc -// * installing/installing_gcp/installing-gcp-customizations.adoc -// * installing/installing_gcp/installing-gcp-network-customizations.adoc -// * installing/installing_gcp/installing-gcp-private.adoc -// * installing/installing_gcp/installing-gcp-vpc.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp-installer-provisioned.adoc -// * installing/installing_gcp/installing-gcp-user-infra.adoc -// * installing/installing_gcp/installing-gcp-user-infra-vpc.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp.adoc -// * installing/installing_platform_agnostic/installing-platform-agnostic.adoc -// * installing/installing_vsphere/installing-restricted-networks-vsphere.adoc -// * installing/installing_vsphere/installing-vsphere.adoc -// * installing/installing_vsphere/installing-vsphere-network-customizations.adoc -// * installing/installing_ibm_power/installing-ibm-power.adoc -// * installing/installing_ibm_power/installing-restricted-networks-ibm-power.adoc -// * installing/installing_ibm_powervs/installing-ibm-power-vs-private-cluster.adoc -// * installing/installing_ibm_powervs/installing-restricted-networks-ibm-power-vs.adoc -// * installing/installing_ibm_powervs/installing-ibm-powervs-vpc.adoc -// * installing/installing_ibm_z/installing-ibm-z.adoc -// * installing/installing_ibm_z/installing-restricted-networks-ibm-z.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-customizations.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-network-customizations.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-private.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-vpc.adoc - -ifeval::["{context}" == "installing-azure-customizations"] -:azure: -endif::[] -ifeval::["{context}" == "installing-azure-government-region"] -:azure: -endif::[] -ifeval::["{context}" == "installing-azure-network-customizations"] -:azure: -endif::[] -ifeval::["{context}" == "installing-azure-private"] -:azure: -endif::[] -ifeval::["{context}" == "installing-azure-vnet"] -:azure: -endif::[] -ifeval::["{context}" == "installing-azure-user-infra"] -:azure: -endif::[] -ifeval::["{context}" == "installing-bare-metal"] -:bare-metal: -endif::[] -ifeval::["{context}" == "installing-bare-metal-network-customizations"] -:bare-metal: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-bare-metal"] -:bare-metal: -endif::[] -ifeval::["{context}" == "installing-ibm-power"] -:ibm-power: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power"] -:ibm-power: -endif::[] -ifeval::["{context}" == "installing-ibm-power-vs-private-cluster"] -:ibm-power: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power-vs"] -:ibm-power: -endif::[] -ifeval::["{context}" == "installing-ibm-z"] -:ibm-z: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z"] -:ibm-z: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-customizations"] -:ibm-cloud-vpc: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-network-customizations"] -:ibm-cloud-vpc: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-vpc"] -:ibm-cloud-vpc: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-private"] -:ibm-cloud-vpc: -endif::[] - -:_content-type: CONCEPT -[id="installation-minimum-resource-requirements_{context}"] -= Minimum resource requirements for cluster installation - -Each cluster machine must meet the following minimum requirements: - -.Minimum resource requirements -[cols="2,2,2,2,2,2",options="header"] -|=== - -|Machine -|Operating System -ifndef::bare-metal[] -ifndef::ibm-cloud-vpc[] -|vCPU ^[1]^ -endif::ibm-cloud-vpc[] -ifdef::ibm-cloud-vpc[] -|vCPU -endif::ibm-cloud-vpc[] -|Virtual RAM -endif::bare-metal[] -ifdef::bare-metal[] -|CPU ^[1]^ -|RAM -endif::bare-metal[] -|Storage -ifndef::ibm-z,ibm-cloud-vpc[] -|IOPS ^[2]^ -endif::ibm-z,ibm-cloud-vpc[] -ifdef::ibm-z,ibm-cloud-vpc[] -|IOPS -endif::ibm-z,ibm-cloud-vpc[] - -|Bootstrap -|{op-system} -ifdef::ibm-power[|2] -ifndef::ibm-power[|4] -|16 GB -|100 GB -ifndef::ibm-z[] -|300 -endif::ibm-z[] -ifdef::ibm-z[] -|N/A -endif::ibm-z[] - - -|Control plane -|{op-system} -ifdef::ibm-power[|2] -ifndef::ibm-power[|4] -|16 GB -|100 GB -ifndef::ibm-z[] -|300 -endif::ibm-z[] -ifdef::ibm-z[] -|N/A -endif::ibm-z[] - -ifndef::openshift-origin[] -|Compute -ifdef::ibm-z,ibm-power,ibm-cloud-vpc[|{op-system}] -ifndef::ibm-z,ibm-power,ibm-cloud-vpc[|{op-system}, {op-system-base} 8.6, {op-system-base} 8.7, or {op-system-base} 8.8 ^[3]^] -|2 -|8 GB -|100 GB -ifndef::ibm-z[] -|300 -endif::ibm-z[] -ifdef::ibm-z[] -|N/A -endif::ibm-z[] -endif::openshift-origin[] - -ifdef::openshift-origin[] -|Compute -|{op-system} -|2 -|8 GB -|100 GB -ifndef::ibm-z[] -|300 -endif::ibm-z[] -ifdef::ibm-z[] -|N/A -endif::ibm-z[] -endif::openshift-origin[] -|=== -[.small] --- -ifdef::ibm-z[] -1. One physical core (IFL) provides two logical cores (threads) when SMT-2 is enabled. The hypervisor can provide two or more vCPUs. -endif::ibm-z[] -ifdef::bare-metal[] -1. One CPU is equivalent to one physical core when simultaneous multithreading (SMT), or hyperthreading, is not enabled. When enabled, use the following formula to calculate the corresponding ratio: (threads per core × cores) × sockets = CPUs. -endif::bare-metal[] -ifndef::ibm-z,bare-metal,ibm-cloud-vpc[] -1. One vCPU is equivalent to one physical core when simultaneous multithreading (SMT), or hyperthreading, is not enabled. When enabled, use the following formula to calculate the corresponding ratio: (threads per core × cores) × sockets = vCPUs. -endif::ibm-z,bare-metal,ibm-cloud-vpc[] -ifndef::ibm-z,ibm-power,ibm-cloud-vpc[] -2. {product-title} and Kubernetes are sensitive to disk performance, and faster storage is recommended, particularly for etcd on the control plane nodes which require a 10 ms p99 fsync duration. Note that on many cloud platforms, storage size and IOPS scale together, so you might need to over-allocate storage volume to obtain sufficient performance. -3. As with all user-provisioned installations, if you choose to use {op-system-base} compute machines in your cluster, you take responsibility for all operating system life cycle management and maintenance, including performing system updates, applying patches, and completing all other required tasks. Use of {op-system-base} 7 compute machines is deprecated and has been removed in {product-title} 4.10 and later. -endif::ibm-z,ibm-power,ibm-cloud-vpc[] -ifdef::ibm-power[] -2. {product-title} and Kubernetes are sensitive to disk performance, and faster storage is recommended, particularly for etcd on the control plane nodes. Note that on many cloud platforms, storage size and IOPS scale together, so you might need to over-allocate storage volume to obtain sufficient performance. -endif::ibm-power[] --- - -ifdef::azure[] -[IMPORTANT] -==== -You are required to use Azure virtual machines that have the `premiumIO` parameter set to `true`. -==== -endif::azure[] - -If an instance type for your platform meets the minimum requirements for cluster machines, it is supported to use in {product-title}. - -ifeval::["{context}" == "installing-azure-customizations"] -:!azure: -endif::[] -ifeval::["{context}" == "installing-azure-government-region"] -:!azure: -endif::[] -ifeval::["{context}" == "installing-azure-network-customizations"] -:!azure: -endif::[] -ifeval::["{context}" == "installing-azure-private"] -:!azure: -endif::[] -ifeval::["{context}" == "installing-azure-vnet"] -:!azure: -endif::[] -ifeval::["{context}" == "installing-azure-user-infra"] -:!azure: -endif::[] -ifeval::["{context}" == "installing-bare-metal"] -:!bare-metal: -endif::[] -ifeval::["{context}" == "installing-bare-metal-network-customizations"] -:!bare-metal: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-bare-metal"] -:!bare-metal: -endif::[] -ifeval::["{context}" == "installing-ibm-power"] -:!ibm-power: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power"] -:!ibm-power: -endif::[] -ifeval::["{context}" == "installing-ibm-power-vs-private-cluster"] -:!ibm-power: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power-vs"] -:!ibm-power: -endif::[] -ifeval::["{context}" == "installing-ibm-z"] -:!ibm-z: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z"] -:!ibm-z: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-customizations"] -:!ibm-cloud-vpc: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-network-customizations"] -:!ibm-cloud-vpc: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-vpc"] -:!ibm-cloud-vpc: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-private"] -:!ibm-cloud-vpc: -endif::[] diff --git a/modules/installation-mirror-repository.adoc b/modules/installation-mirror-repository.adoc deleted file mode 100644 index 73d209f0a199..000000000000 --- a/modules/installation-mirror-repository.adoc +++ /dev/null @@ -1,254 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/install_config/installing-restricted-networks-preparations.adoc -// * openshift_images/samples-operator-alt-registry.adoc -// * installing/installing-rhv-restricted-network.adoc - -:_content-type: PROCEDURE -[id="installation-mirror-repository_{context}"] -= Mirroring the {product-title} image repository - -Mirror the {product-title} image repository to your registry to use during cluster installation or upgrade. - -.Prerequisites - -* Your mirror host has access to the internet. -* You configured a mirror registry to use in your restricted network and -can access the certificate and credentials that you configured. -ifndef::openshift-origin[] -* You downloaded the {cluster-manager-url-pull} and modified it to include authentication to your mirror repository. -endif::[] -ifdef::openshift-origin[] -* You have created a pull secret for your mirror repository. -endif::[] - -* If you use self-signed certificates, you have specified a Subject Alternative Name in the certificates. - -.Procedure - -Complete the following steps on the mirror host: - -. Review the -link:https://access.redhat.com/downloads/content/290/[{product-title} downloads page] -to determine the version of {product-title} that you want to install and determine the corresponding tag on the link:https://quay.io/repository/openshift-release-dev/ocp-release?tab=tags[Repository Tags] page. - -. Set the required environment variables: -.. Export the release version: -+ -[source,terminal] ----- -$ OCP_RELEASE=<release_version> ----- -+ -For `<release_version>`, specify the tag that corresponds to the version of {product-title} to -install, such as `4.5.4`. - -.. Export the local registry name and host port: -+ -[source,terminal] ----- -$ LOCAL_REGISTRY='<local_registry_host_name>:<local_registry_host_port>' ----- -+ -For `<local_registry_host_name>`, specify the registry domain name for your mirror -repository, and for `<local_registry_host_port>`, specify the port that it -serves content on. - -.. Export the local repository name: -+ -[source,terminal] ----- -$ LOCAL_REPOSITORY='<local_repository_name>' ----- -+ -For `<local_repository_name>`, specify the name of the repository to create in your -registry, such as `ocp4/openshift4`. - -.. Export the name of the repository to mirror: -+ -ifndef::openshift-origin[] -[source,terminal] ----- -$ PRODUCT_REPO='openshift-release-dev' ----- -+ -For a production release, you must specify `openshift-release-dev`. -endif::[] -ifdef::openshift-origin[] -[source,terminal] ----- -$ PRODUCT_REPO='openshift' ----- -endif::[] - -.. Export the path to your registry pull secret: -+ -[source,terminal] ----- -$ LOCAL_SECRET_JSON='<path_to_pull_secret>' ----- -+ -For `<path_to_pull_secret>`, specify the absolute path to and file name of the pull secret for your mirror registry that you created. - -.. Export the release mirror: -+ -ifndef::openshift-origin[] -[source,terminal] ----- -$ RELEASE_NAME="ocp-release" ----- -+ -For a production release, you must specify `ocp-release`. -endif::[] -ifdef::openshift-origin[] -[source,terminal] ----- -$ RELEASE_NAME="okd" ----- -endif::[] - -ifndef::openshift-origin[] -.. Export the type of architecture for your cluster: -+ -[source,terminal] ----- -$ ARCHITECTURE=<cluster_architecture> <1> ----- -<1> Specify the architecture of the cluster, such as `x86_64`, `aarch64`, `s390x`, or `ppc64le`. - -endif::[] - -.. Export the path to the directory to host the mirrored images: -+ -[source,terminal] ----- -$ REMOVABLE_MEDIA_PATH=<path> <1> ----- -<1> Specify the full path, including the initial forward slash (/) character. - -. Mirror the version images to the mirror registry: -** If your mirror host does not have internet access, take the following actions: -... Connect the removable media to a system that is connected to the internet. -... Review the images and configuration manifests to mirror: -+ -ifdef::openshift-origin[] -[source,terminal] ----- -$ oc adm release mirror -a ${LOCAL_SECRET_JSON} \ - --from=quay.io/${PRODUCT_REPO}/${RELEASE_NAME}:${OCP_RELEASE} \ - --to=${LOCAL_REGISTRY}/${LOCAL_REPOSITORY} \ - --to-release-image=${LOCAL_REGISTRY}/${LOCAL_REPOSITORY}:${OCP_RELEASE} --dry-run ----- -endif::[] -ifndef::openshift-origin[] -[source,terminal] ----- -$ oc adm release mirror -a ${LOCAL_SECRET_JSON} \ - --from=quay.io/${PRODUCT_REPO}/${RELEASE_NAME}:${OCP_RELEASE}-${ARCHITECTURE} \ - --to=${LOCAL_REGISTRY}/${LOCAL_REPOSITORY} \ - --to-release-image=${LOCAL_REGISTRY}/${LOCAL_REPOSITORY}:${OCP_RELEASE}-${ARCHITECTURE} --dry-run ----- -endif::[] - -... Record the entire `imageContentSources` section from the output of the previous -command. The information about your mirrors is unique to your mirrored repository, and you must add the `imageContentSources` section to the `install-config.yaml` file during installation. -... Mirror the images to a directory on the removable media: -+ -ifdef::openshift-origin[] -[source,terminal] ----- -$ oc adm release mirror -a ${LOCAL_SECRET_JSON} --to-dir=${REMOVABLE_MEDIA_PATH}/mirror quay.io/${PRODUCT_REPO}/${RELEASE_NAME}:${OCP_RELEASE} ----- -endif::[] -ifndef::openshift-origin[] -[source,terminal] ----- -$ oc adm release mirror -a ${LOCAL_SECRET_JSON} --to-dir=${REMOVABLE_MEDIA_PATH}/mirror quay.io/${PRODUCT_REPO}/${RELEASE_NAME}:${OCP_RELEASE}-${ARCHITECTURE} ----- -endif::[] - -... Take the media to the restricted network environment and upload the images to the local container registry. -+ -[source,terminal] ----- -$ oc image mirror -a ${LOCAL_SECRET_JSON} --from-dir=${REMOVABLE_MEDIA_PATH}/mirror "file://openshift/release:${OCP_RELEASE}*" ${LOCAL_REGISTRY}/${LOCAL_REPOSITORY} <1> ----- -+ -<1> For `REMOVABLE_MEDIA_PATH`, you must use the same path that you specified when you mirrored the images. -+ -[IMPORTANT] -==== -Running `oc image mirror` might result in the following error: `error: unable to retrieve source image`. This error occurs when image indexes include references to images that no longer exist on the image registry. Image indexes might retain older references to allow users running those images an upgrade path to newer points on the upgrade graph. As a temporary workaround, you can use the `--skip-missing` option to bypass the error and continue downloading the image index. For more information, see link:https://access.redhat.com/solutions/6975305[Service Mesh Operator mirroring failed]. -==== - -** If the local container registry is connected to the mirror host, take the following actions: -... Directly push the release images to the local registry by using following command: -+ -ifdef::openshift-origin[] -[source,terminal] ----- -$ oc adm release mirror -a ${LOCAL_SECRET_JSON} \ - --from=quay.io/${PRODUCT_REPO}/${RELEASE_NAME}:${OCP_RELEASE} \ - --to=${LOCAL_REGISTRY}/${LOCAL_REPOSITORY} \ - --to-release-image=${LOCAL_REGISTRY}/${LOCAL_REPOSITORY}:${OCP_RELEASE} ----- -endif::[] -ifndef::openshift-origin[] -[source,terminal] ----- -$ oc adm release mirror -a ${LOCAL_SECRET_JSON} \ - --from=quay.io/${PRODUCT_REPO}/${RELEASE_NAME}:${OCP_RELEASE}-${ARCHITECTURE} \ - --to=${LOCAL_REGISTRY}/${LOCAL_REPOSITORY} \ - --to-release-image=${LOCAL_REGISTRY}/${LOCAL_REPOSITORY}:${OCP_RELEASE}-${ARCHITECTURE} ----- -endif::[] -+ -This command pulls the release information as a digest, and its output includes -the `imageContentSources` data that you require when you install your cluster. - -... Record the entire `imageContentSources` section from the output of the previous -command. The information about your mirrors is unique to your mirrored repository, and you must add the `imageContentSources` section to the `install-config.yaml` file during installation. -+ -[NOTE] -==== -The image name gets patched to Quay.io during the mirroring process, and the podman images will show Quay.io in the registry on the bootstrap virtual machine. -==== - -. To create the installation program that is based on the content that you -mirrored, extract it and pin it to the release: -** If your mirror host does not have internet access, run the following command: -+ -[source,terminal] ----- -$ oc adm release extract -a ${LOCAL_SECRET_JSON} --icsp-file=<file> \ --command=openshift-install "${LOCAL_REGISTRY}/${LOCAL_REPOSITORY}:${OCP_RELEASE}" ----- -** If the local container registry is connected to the mirror host, run the following command: -+ -ifdef::openshift-origin[] -[source,terminal] ----- -$ oc adm release extract -a ${LOCAL_SECRET_JSON} --command=openshift-install "${LOCAL_REGISTRY}/${LOCAL_REPOSITORY}:${OCP_RELEASE}" ----- -endif::[] -ifndef::openshift-origin[] -[source,terminal] ----- -$ oc adm release extract -a ${LOCAL_SECRET_JSON} --command=openshift-install "${LOCAL_REGISTRY}/${LOCAL_REPOSITORY}:${OCP_RELEASE}-${ARCHITECTURE}" ----- -endif::[] -+ -[IMPORTANT] -==== -To ensure that you use the correct images for the version of {product-title} -that you selected, you must extract the installation program from the mirrored -content. - -You must perform this step on a machine with an active internet connection. -==== -+ -. For clusters using installer-provisioned infrastructure, run the following command: -+ -[source,terminal] ----- -$ openshift-install ----- diff --git a/modules/installation-network-user-infra.adoc b/modules/installation-network-user-infra.adoc deleted file mode 100644 index eb5802f0f64d..000000000000 --- a/modules/installation-network-user-infra.adoc +++ /dev/null @@ -1,328 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_azure/installing-azure-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc -// * installing/installing_bare_metal/installing-bare-metal.adoc -// * installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc -// * installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc -// * installing/installing_gcp/installing-gcp-user-infra.adoc -// * installing/installing_gcp/installing-gcp-user-infra-vpc.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp.adoc -// * installing/installing_platform_agnostic/installing-platform-agnostic.adoc -// * installing/installing_vsphere/installing-restricted-networks-vsphere.adoc -// * installing/installing_vsphere/installing-vsphere.adoc -// * installing/installing_vsphere/installing-vsphere-network-customizations.adoc -// * installing/installing_ibm_z/installing-ibm-z.adoc -// * installing/installing_ibm_z/installing-restricted-networks-ibm-z.adoc -// * installing/installing_ibm_z/installing-ibm-z-kvm.adoc -// * installing/installing_ibm_z/installing-restricted-networks-ibm-z-kvm.adoc -// * installing/installing_ibm_z/installing-ibm-power.adoc -// * installing/installing_ibm_z/installing-restricted-networks-ibm-power.adoc -// * installing/installing-rhv-restricted-network.adoc -// * installing/installing-rhv-user-infra.adoc - -ifeval::["{context}" == "installing-vsphere"] -:vsphere: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-vsphere"] -:vsphere: -endif::[] -ifeval::["{context}" == "installing-vsphere-network-customizations"] -:vsphere: -endif::[] -ifeval::["{context}" == "installing-ibm-z"] -:ibm-z: -endif::[] -ifeval::["{context}" == "installing-ibm-z-kvm"] -:ibm-z-kvm: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z"] -:ibm-z-restricted: -:restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z-kvm"] -:restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power"] -:restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-bare-metal"] -:restricted: -endif::[] -ifeval::["{context}" == "installing-azure-user-infra"] -:azure: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:azure: -endif::[] -ifeval::["{context}" == "installing-gcp-user-infra"] -:gcp: -endif::[] -ifeval::["{context}" == "installing-gcp-user-infra-vpc"] -:gcp: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-gcp"] -:gcp: -:restricted: -endif::[] -ifeval::["{context}" == "installing-rhv-user-infra"] -:rhv: -endif::[] -ifeval::["{context}" == "installing-rhv-restricted-network"] -:rhv: -endif::[] - - -:_content-type: CONCEPT -[id="installation-network-user-infra_{context}"] -= Networking requirements for user-provisioned infrastructure - -All the {op-system-first} machines require networking to be configured in `initramfs` during boot -to fetch their Ignition config files. - -ifndef::azure,gcp[] -ifdef::ibm-z[] -During the initial boot, the machines require an HTTP or HTTPS server to -establish a network connection to download their Ignition config files. - -The machines are configured with static IP addresses. No DHCP server is required. Ensure that the machines have persistent IP addresses and hostnames. -endif::ibm-z[] -ifndef::ibm-z[] -During the initial boot, the machines require an IP address configuration that is set either through a DHCP server or statically by providing the required boot options. After a network connection is established, the machines download their Ignition config files from an HTTP or HTTPS server. The Ignition config files are then used to set the exact state of each machine. The Machine Config Operator completes more changes to the machines, such as the application of new certificates or keys, after installation. - -It is recommended to use a DHCP server for long-term management of the cluster machines. Ensure that the DHCP server is configured to provide persistent IP addresses, DNS server information, and hostnames to the cluster machines. - -[NOTE] -==== -If a DHCP service is not available for your user-provisioned infrastructure, you can instead provide the IP networking configuration and the address of the DNS server to the nodes at {op-system} install time. These can be passed as boot arguments if you are installing from an ISO image. See the _Installing {op-system} and starting the {product-title} bootstrap process_ section for more information about static IP provisioning and advanced networking options. -==== -endif::ibm-z[] - -The Kubernetes API server must be able to resolve the node names of the cluster -machines. If the API servers and worker nodes are in different zones, you can -configure a default DNS search zone to allow the API server to resolve the -node names. Another supported approach is to always refer to hosts by their -fully-qualified domain names in both the node objects and all DNS requests. -endif::azure,gcp[] - -ifdef::rhv[] -.Firewall - -Configure your firewall so your cluster has access to required sites. - -See also: - -ifndef::openshift-origin[] -* link:https://access.redhat.com/documentation/en-us/red_hat_virtualization/4.4/html-single/planning_and_prerequisites_guide/index#RHV-manager-firewall-requirements_RHV_planning[Red Hat Virtualization Manager firewall requirements] -* link:https://access.redhat.com/documentation/en-us/red_hat_virtualization/4.4/html-single/planning_and_prerequisites_guide#host-firewall-requirements_RHV_planning[Host firewall requirements] -endif::[] -ifdef::openshift-origin[] -* link:https://ovirt.org/documentation/installing_ovirt_as_a_self-hosted_engine_using_the_command_line/index.html#RHV-manager-firewall-requirements_SHE_cli_deploy[oVirt Engine firewall requirements] -* link:https://ovirt.org/documentation/installing_ovirt_as_a_self-hosted_engine_using_the_command_line/index.html#host-firewall-requirements_SHE_cli_deploy[Host firewall requirements] -endif::[] - -ifeval::["{context}" == "installing-rhv-user-infra"] -.Load balancers - -Configure one or preferably two layer-4 load balancers: - -* Provide load balancing for ports `6443` and `22623` on the control plane and bootstrap machines. Port `6443` provides access to the Kubernetes API server and must be reachable both internally and externally. Port `22623` must be accessible to nodes within the cluster. - -* Provide load balancing for port `443` and `80` for machines that run the Ingress router, which are usually compute nodes in the default configuration. Both ports must be accessible from within and outside the cluster. -endif::[] - -.DNS - -Configure infrastructure-provided DNS to allow the correct resolution of the main components and services. If you use only one load balancer, these DNS records can point to the same IP address. - -* Create DNS records for `api.<cluster_name>.<base_domain>` (internal and external resolution) and `api-int.<cluster_name>.<base_domain>` (internal resolution) that point to the load balancer for the control plane machines. - -* Create a DNS record for `*.apps.<cluster_name>.<base_domain>` that points to the load balancer for the Ingress router. For example, ports `443` and `80` of the compute machines. -endif::rhv[] - -ifndef::ibm-z,azure[] -[id="installation-host-names-dhcp-user-infra_{context}"] -== Setting the cluster node hostnames through DHCP - -On {op-system-first} machines, the hostname is set through NetworkManager. By default, the machines obtain their hostname through DHCP. If the hostname is not provided by DHCP, set statically through kernel arguments, or another method, it is obtained through a reverse DNS lookup. Reverse DNS lookup occurs after the network has been initialized on a node and can take time to resolve. Other system services can start prior to this and detect the hostname as `localhost` or similar. You can avoid this by using DHCP to provide the hostname for each cluster node. - -Additionally, setting the hostnames through DHCP can bypass any manual DNS record name configuration errors in environments that have a DNS split-horizon implementation. -endif::ibm-z,azure[] - -[id="installation-network-connectivity-user-infra_{context}"] -== Network connectivity requirements - -You must configure the network connectivity between machines to allow {product-title} cluster -components to communicate. Each machine must be able to resolve the hostnames -of all other machines in the cluster. - -This section provides details about the ports that are required. - -ifndef::restricted,origin[] -[IMPORTANT] -==== -In connected {product-title} environments, all nodes are required to have internet access to pull images -for platform containers and provide telemetry data to Red Hat. -==== -ifeval::["{context}" == "installing-rhv-restricted-network"] -:!rhv: -endif::[] -ifeval::["{context}" == "installing-rhv-user-infra"] -:!rhv: -endif::[] -endif::restricted,origin[] - -ifdef::ibm-z-kvm[] -[NOTE] -==== -The {op-system-base} KVM host must be configured to use bridged networking in libvirt or MacVTap to connect the network to the virtual machines. The virtual machines must have access to the network, which is attached to the {op-system-base} KVM host. Virtual Networks, for example network address translation (NAT), within KVM are not a supported configuration. -==== -endif::ibm-z-kvm[] - -.Ports used for all-machine to all-machine communications -[cols="2a,2a,5a",options="header"] -|=== - -|Protocol -|Port -|Description - -|ICMP -|N/A -|Network reachability tests - -.4+|TCP -|`1936` -|Metrics - -|`9000`-`9999` -|Host level services, including the node exporter on ports `9100`-`9101` and -the Cluster Version Operator on port `9099`. - -|`10250`-`10259` -|The default ports that Kubernetes reserves - -|`10256` -|openshift-sdn - -.5+|UDP -|`4789` -|VXLAN - -|`6081` -|Geneve - -|`9000`-`9999` -|Host level services, including the node exporter on ports `9100`-`9101`. - -|`500` -|IPsec IKE packets - -|`4500` -|IPsec NAT-T packets - -|TCP/UDP -|`30000`-`32767` -|Kubernetes node port - -|ESP -|N/A -|IPsec Encapsulating Security Payload (ESP) - -|=== - -.Ports used for all-machine to control plane communications -[cols="2a,2a,5a",options="header"] -|=== - -|Protocol -|Port -|Description - -|TCP -|`6443` -|Kubernetes API - -|=== - -.Ports used for control plane machine to control plane machine communications -[cols="2a,2a,5a",options="header"] -|=== - -|Protocol -|Port -|Description - -|TCP -|`2379`-`2380` -|etcd server and peer ports - -|=== - -ifdef::vsphere[] -[discrete] -== Ethernet adaptor hardware address requirements - -When provisioning VMs for the cluster, the ethernet interfaces configured for -each VM must use a MAC address from the VMware Organizationally Unique -Identifier (OUI) allocation ranges: - -* `00:05:69:00:00:00` to `00:05:69:FF:FF:FF` -* `00:0c:29:00:00:00` to `00:0c:29:FF:FF:FF` -* `00:1c:14:00:00:00` to `00:1c:14:FF:FF:FF` -* `00:50:56:00:00:00` to `00:50:56:3F:FF:FF` - -If a MAC address outside the VMware OUI is used, the cluster installation will -not succeed. -endif::vsphere[] - -ifdef::vsphere[] -:!vsphere: -endif::[] - -ifndef::azure,gcp[] -[discrete] -== NTP configuration for user-provisioned infrastructure - -{product-title} clusters are configured to use a public Network Time Protocol (NTP) server by default. If you want to use a local enterprise NTP server, or if your cluster is being deployed in a disconnected network, you can configure the cluster to use a specific time server. For more information, see the documentation for _Configuring chrony time service_. - -ifndef::ibm-z,ibm-z-restricted[] -If a DHCP server provides NTP server information, the chrony time service on the {op-system-first} machines read the information and can sync the clock with the NTP servers. -endif::ibm-z,ibm-z-restricted[] -endif::azure,gcp[] - -ifeval::["{context}" == "installing-ibm-z"] -:!ibm-z: -endif::[] -ifeval::["{context}" == "installing-ibm-z-kvm"] -:!ibm-z-kvm: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z"] -:!ibm-z-restricted: -:!restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z-kvm"] -:!restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power"] -:!restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-bare-metal"] -:!restricted: -endif::[] -ifeval::["{context}" == "installing-azure-user-infra"] -:!azure: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:!azure: -endif::[] -ifeval::["{context}" == "installing-gcp-user-infra"] -:!gcp: -endif::[] -ifeval::["{context}" == "installing-gcp-user-infra-vpc"] -:!gcp: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-gcp"] -:!gcp: -:!restricted: -endif::[] diff --git a/modules/installation-nutanix-config-yaml.adoc b/modules/installation-nutanix-config-yaml.adoc deleted file mode 100644 index 716b652f598b..000000000000 --- a/modules/installation-nutanix-config-yaml.adoc +++ /dev/null @@ -1,299 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_nutanix/configuring-iam-nutanix.adoc -// * installing/installing-restricted-networks-nutanix-installer-provisioned.adoc - -ifeval::["{context}" == "installing-nutanix-installer-provisioned"] -:default: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-nutanix-installer-provisioned"] -:restricted: -endif::[] - -:_content-type: REFERENCE -[id="installation-nutanix-config-yaml_{context}"] -= Sample customized install-config.yaml file for Nutanix - -You can customize the `install-config.yaml` file to specify more details about your {product-title} cluster's platform or modify the values of the required parameters. - -[IMPORTANT] -==== -This sample YAML file is provided for reference only. You must obtain your `install-config.yaml` file by using the installation program and modify it. -==== - -ifdef::default[] -[source,yaml] ----- -apiVersion: v1 -baseDomain: example.com <1> -compute: <2> -- hyperthreading: Enabled <3> - name: worker - replicas: 3 - platform: - nutanix: <4> - cpus: 2 - coresPerSocket: 2 - memoryMiB: 8196 - osDisk: - diskSizeGiB: 120 - categories: <5> - - key: <category_key_name> - value: <category_value> -controlPlane: <2> - hyperthreading: Enabled <3> - name: master - replicas: 3 - platform: - nutanix: <4> - cpus: 4 - coresPerSocket: 2 - memoryMiB: 16384 - osDisk: - diskSizeGiB: 120 - categories: <5> - - key: <category_key_name> - value: <category_value> -metadata: - creationTimestamp: null - name: test-cluster <1> -networking: - clusterNetwork: - - cidr: 10.128.0.0/14 - hostPrefix: 23 - machineNetwork: - - cidr: 10.0.0.0/16 - networkType: OVNKubernetes <6> - serviceNetwork: - - 172.30.0.0/16 -platform: - nutanix: - apiVIP: 10.40.142.7 <1> - defaultMachinePlatform: - bootType: Legacy - categories: <5> - - key: <category_key_name> - value: <category_value> - project: <7> - type: name - name: <project_name> - ingressVIP: 10.40.142.8 <1> - prismCentral: - endpoint: - address: your.prismcentral.domainname <1> - port: 9440 <1> - password: samplepassword <1> - username: sampleadmin <1> - prismElements: - - endpoint: - address: your.prismelement.domainname - port: 9440 - uuid: 0005b0f1-8f43-a0f2-02b7-3cecef193712 - subnetUUIDs: - - c7938dc6-7659-453e-a688-e26020c68e43 - clusterOSImage: http://example.com/images/rhcos-47.83.202103221318-0-nutanix.x86_64.qcow2 <8> -credentialsMode: Manual -publish: External -pullSecret: '{"auths": ...}' <1> -ifndef::openshift-origin[] -fips: false <9> -sshKey: ssh-ed25519 AAAA... <10> -endif::openshift-origin[] -ifdef::openshift-origin[] -sshKey: ssh-ed25519 AAAA... <9> -endif::openshift-origin[] ----- -<1> Required. The installation program prompts you for this value. -<2> The `controlPlane` section is a single mapping, but the compute section is a sequence of mappings. To meet the requirements of the different data structures, the first line of the `compute` section must begin with a hyphen, `-`, and the first line of the `controlPlane` section must not. Although both sections currently define a single machine pool, it is possible that future versions of {product-title} will support defining multiple compute pools during installation. Only one control plane pool is used. -<3> Whether to enable or disable simultaneous multithreading, or `hyperthreading`. By default, simultaneous multithreading is enabled to increase the performance of your machines' cores. You can disable it by setting the parameter value to `Disabled`. If you disable simultaneous multithreading in some cluster machines, you must disable it in all cluster machines. -+ -[IMPORTANT] -==== -If you disable simultaneous multithreading, ensure that your capacity planning accounts for the dramatically decreased machine performance. -==== -<4> Optional: Provide additional configuration for the machine pool parameters for the compute and control plane machines. -<5> Optional: Provide one or more pairs of a prism category key and a prism category value. These category key-value pairs must exist in Prism Central. You can provide separate categories to compute machines, control plane machines, or all machines. -<6> The cluster network plugin to install. The supported values are `OVNKubernetes` and `OpenShiftSDN`. The default value is `OVNKubernetes`. -ifndef::openshift-origin[] -<7> Optional: Specify a project with which VMs are associated. Specify either `name` or `uuid` for the project type, and then provide the corresponding UUID or project name. You can associate projects to compute machines, control plane machines, or all machines. -<8> Optional: By default, the installation program downloads and installs the {op-system-first} image. If Prism Central does not have internet access, you can override the default behavior by hosting the {op-system} image on any HTTP server and pointing the installation program to the image. -<9> Whether to enable or disable FIPS mode. By default, FIPS mode is not enabled. If FIPS mode is enabled, the {op-system-first} machines that {product-title} runs on bypass the default Kubernetes cryptography suite and use the cryptography modules that are provided with {op-system} instead. -+ -[IMPORTANT] -==== -The use of FIPS Validated or Modules in Process cryptographic libraries is only supported on {product-title} deployments on the `x86_64` architecture. -==== -<10> Optional: You can provide the `sshKey` value that you use to access the machines in your cluster. -endif::openshift-origin[] -ifdef::openshift-origin[] -<7> Optional: By default, the installation program downloads and installs the {op-system-first} image. If Prism Central does not have internet access, you can override the default behavior by hosting the {op-system} image on any HTTP server and pointing the installation program to the image. -<8> Optional: You can provide the `sshKey` value that you use to access the machines in your cluster. -endif::openshift-origin[] -+ -[NOTE] -==== -For production {product-title} clusters on which you want to perform installation debugging or disaster recovery, specify an SSH key that your `ssh-agent` process uses. -==== -endif::default[] - -ifdef::restricted[] -[source,yaml] ----- -apiVersion: v1 -baseDomain: example.com <1> -compute: <2> -- hyperthreading: Enabled <3> - name: worker - replicas: 3 - platform: - nutanix: <4> - cpus: 2 - coresPerSocket: 2 - memoryMiB: 8196 - osDisk: - diskSizeGiB: 120 - categories: <5> - - key: <category_key_name> - value: <category_value> -controlPlane: <2> - hyperthreading: Enabled <3> - name: master - replicas: 3 - platform: - nutanix: <4> - cpus: 4 - coresPerSocket: 2 - memoryMiB: 16384 - osDisk: - diskSizeGiB: 120 - categories: <5> - - key: <category_key_name> - value: <category_value> -metadata: - creationTimestamp: null - name: test-cluster <1> -networking: - clusterNetwork: - - cidr: 10.128.0.0/14 - hostPrefix: 23 - machineNetwork: - - cidr: 10.0.0.0/16 - networkType: OVNKubernetes <6> - serviceNetwork: - - 172.30.0.0/16 -platform: - nutanix: - apiVIP: 10.40.142.7 <1> - ingressVIP: 10.40.142.8 <1> - defaultMachinePlatform: - bootType: Legacy - categories: <5> - - key: <category_key_name> - value: <category_value> - project: <7> - type: name - name: <project_name> - prismCentral: - endpoint: - address: your.prismcentral.domainname <1> - port: 9440 <1> - password: samplepassword <1> - username: sampleadmin <1> - prismElements: - - endpoint: - address: your.prismelement.domainname - port: 9440 - uuid: 0005b0f1-8f43-a0f2-02b7-3cecef193712 - subnetUUIDs: - - c7938dc6-7659-453e-a688-e26020c68e43 - clusterOSImage: http://example.com/images/rhcos-47.83.202103221318-0-nutanix.x86_64.qcow2 <8> -credentialsMode: Manual -publish: External -pullSecret: '{"auths":{"<local_registry>": {"auth": "<credentials>","email": "you@example.com"}}}' <9> -ifndef::openshift-origin[] -fips: false <10> -sshKey: ssh-ed25519 AAAA... <11> -endif::openshift-origin[] -ifdef::openshift-origin[] -sshKey: ssh-ed25519 AAAA... <10> -endif::openshift-origin[] -ifndef::openshift-origin[] -additionalTrustBundle: | <12> - -----BEGIN CERTIFICATE----- - ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ - -----END CERTIFICATE----- -imageContentSources: <13> -- mirrors: - - <local_registry>/<local_repository_name>/release - source: quay.io/openshift-release-dev/ocp-release -- mirrors: - - <local_registry>/<local_repository_name>/release - source: quay.io/openshift-release-dev/ocp-v4.0-art-dev -endif::openshift-origin[] -ifdef::openshift-origin[] -additionalTrustBundle: | <11> - -----BEGIN CERTIFICATE----- - ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ - -----END CERTIFICATE----- -imageContentSources: <12> -- mirrors: - - <local_registry>/<local_repository_name>/release - source: quay.io/openshift-release-dev/ocp-release -- mirrors: - - <local_registry>/<local_repository_name>/release - source: quay.io/openshift-release-dev/ocp-v4.0-art-dev -endif::openshift-origin[] ----- -<1> Required. The installation program prompts you for this value. -<2> The `controlPlane` section is a single mapping, but the compute section is a sequence of mappings. To meet the requirements of the different data structures, the first line of the `compute` section must begin with a hyphen, `-`, and the first line of the `controlPlane` section must not. Although both sections currently define a single machine pool, it is possible that future versions of {product-title} will support defining multiple compute pools during installation. Only one control plane pool is used. -<3> Whether to enable or disable simultaneous multithreading, or `hyperthreading`. By default, simultaneous multithreading is enabled to increase the performance of your machines' cores. You can disable it by setting the parameter value to `Disabled`. If you disable simultaneous multithreading in some cluster machines, you must disable it in all cluster machines. -+ -[IMPORTANT] -==== -If you disable simultaneous multithreading, ensure that your capacity planning accounts for the dramatically decreased machine performance. -==== -<4> Optional: Provide additional configuration for the machine pool parameters for the compute and control plane machines. -<5> Optional: Provide one or more pairs of a prism category key and a prism category value. These category key-value pairs must exist in Prism Central. You can provide separate categories to compute machines, control plane machines, or all machines. -<6> The cluster network plugin to install. The supported values are `OVNKubernetes` and `OpenShiftSDN`. The default value is `OVNKubernetes`. -<7> Optional: Specify a project with which VMs are associated. Specify either `name` or `uuid` for the project type, and then provide the corresponding UUID or project name. You can associate projects to compute machines, control plane machines, or all machines. -<8> Optional: By default, the installation program downloads and installs the {op-system-first} image. If Prism Central does not have internet access, you can override the default behavior by hosting the {op-system} image on any HTTP server or Nutanix Objects and pointing the installation program to the image. -<9> For `<local_registry>`, specify the registry domain name, and optionally the port, that your mirror registry uses to serve content. For example `registry.example.com` or `registry.example.com:5000`. For `<credentials>`, -specify the base64-encoded user name and password for your mirror registry. -ifndef::openshift-origin[] -<10> Whether to enable or disable FIPS mode. By default, FIPS mode is not enabled. If FIPS mode is enabled, the {op-system-first} machines that {product-title} runs on bypass the default Kubernetes cryptography suite and use the cryptography modules that are provided with {op-system} instead. -+ -[IMPORTANT] -==== -The use of FIPS Validated or Modules in Process cryptographic libraries is only supported on {product-title} deployments on the `x86_64` architecture. -==== -<11> Optional: You can provide the `sshKey` value that you use to access the machines in your cluster. -+ -[NOTE] -==== -For production {product-title} clusters on which you want to perform installation debugging or disaster recovery, specify an SSH key that your `ssh-agent` process uses. -==== -endif::openshift-origin[] -ifdef::openshift-origin[] -<10> Optional: You can provide the `sshKey` value that you use to access the machines in your cluster. -+ -[NOTE] -==== -For production {product-title} clusters on which you want to perform installation debugging or disaster recovery, specify an SSH key that your `ssh-agent` process uses. -==== -endif::openshift-origin[] -ifndef::openshift-origin[] -<12> Provide the contents of the certificate file that you used for your mirror registry. -<13> Provide these values from the `metadata.name: release-0` section of the `imageContentSourcePolicy.yaml` file that was created when you mirrored the registry. -endif::openshift-origin[] -ifdef::openshift-origin[] -<11> Provide the contents of the certificate file that you used for your mirror registry. -<12> Provide these values from the `metadata.name: release-0` section of the `imageContentSourcePolicy.yaml` file that was created when you mirrored the registry. -endif::openshift-origin[] -endif::restricted[] - -ifeval::["{context}" == "installing-nutanix-installer-provisioned"] -:!default: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-nutanix-installer-provisioned"] -:!restricted: -endif::[] diff --git a/modules/installation-nutanix-download-rhcos.adoc b/modules/installation-nutanix-download-rhcos.adoc deleted file mode 100644 index 8e8088fa2a48..000000000000 --- a/modules/installation-nutanix-download-rhcos.adoc +++ /dev/null @@ -1,48 +0,0 @@ -// Module included in the following assemblies: -// * installing/installing-restricted-networks-nutanix-installer-provisioned.adoc - -:_content-type: PROCEDURE -[id="installation-nutanix-download-rhcos_{context}"] -= Downloading the RHCOS cluster image - -Prism Central requires access to the {op-system-first} image to install the cluster. You can use the installation program to locate and download the {op-system} image and make it available through an internal HTTP server or Nutanix Objects. - -.Prerequisites - -* Obtain the {product-title} installation program and the pull secret for your cluster. For a restricted network installation, these files are on your mirror host. - -.Procedure - -. Change to the directory that contains the installation program and run the following command: -+ -[source,terminal] ----- -$ ./openshift-install coreos print-stream-json ----- - -. Use the output of the command to find the location of the Nutanix image, and click the link to download it. -+ -.Example output -[source, terminal] ----- -"nutanix": { - "release": "411.86.202210041459-0", - "formats": { - "qcow2": { - "disk": { - "location": "https://rhcos.mirror.openshift.com/art/storage/releases/rhcos-4.11/411.86.202210041459-0/x86_64/rhcos-411.86.202210041459-0-nutanix.x86_64.qcow2", - "sha256": "42e227cac6f11ac37ee8a2f9528bb3665146566890577fd55f9b950949e5a54b" ----- - -. Make the image available through an internal HTTP server or Nutanix Objects. - -. Note the location of the downloaded image. You update the `platform` section in the installation configuration file (`install-config.yaml`) with the image's location before deploying the cluster. - -.Snippet of an `install-config.yaml` file that specifies the {op-system} image - -[source,yaml] ----- -platform: - nutanix: - clusterOSImage: http://example.com/images/rhcos-411.86.202210041459-0-nutanix.x86_64.qcow2 ----- diff --git a/modules/installation-nutanix-infrastructure.adoc b/modules/installation-nutanix-infrastructure.adoc deleted file mode 100644 index cf97a0bf75cf..000000000000 --- a/modules/installation-nutanix-infrastructure.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_nutanix/preparing-to-install-nutanix.adoc - -:_content-type: CONCEPT -[id="installation-nutanix-infrastructure_{context}"] -= Nutanix version requirements - -You must install the {product-title} cluster to a Nutanix environment that meets the following requirements. - - -.Version requirements for Nutanix virtual environments -[cols=2, options="header"] -|=== -|Component |Required version -|Nutanix AOS | 5.20.4+ or 6.5.1+ -|Prism Central | 2022.4+ -|=== diff --git a/modules/installation-nutanix-installer-infra-reqs.adoc b/modules/installation-nutanix-installer-infra-reqs.adoc deleted file mode 100644 index d378ac009ca7..000000000000 --- a/modules/installation-nutanix-installer-infra-reqs.adoc +++ /dev/null @@ -1,85 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_nutanix/preparing-to-install-on-nutanix.adoc - -:_content-type: CONCEPT -[id="installation-nutanix-installer-infra-reqs_{context}"] -= Environment requirements - -Before you install an {product-title} cluster, review the following Nutanix AOS environment requirements. - -[id="installation-nutanix-installer-infra-reqs-account_{context}"] -== Required account privileges - -Installing a cluster to Nutanix requires an account with administrative privileges to read and create the required resources. - -[id="installation-nutanix-installer-infra-reqs-limits_{context}"] -== Cluster limits - -Available resources vary between clusters. The number of possible clusters within a Nutanix environment is limited primarily by available storage space and any limitations associated with the resources that the cluster creates, and resources that you require to deploy the cluster, such a IP addresses and networks. - -[id="installation-nutanix-installer-infra-reqs-resources_{context}"] -== Cluster resources - -A minimum of 800 GB of storage is required to use a standard cluster. - -When you deploy a {product-title} cluster that uses installer-provisioned infrastructure, the installation program must be able to create several resources in your Nutanix instance. Although these resources use 856 GB of storage, the bootstrap node is destroyed as part of the installation process. - -A standard {product-title} installation creates the following resources: - -* 1 label -* Virtual machines: -** 1 disk image -** 1 temporary bootstrap node -** 3 control plane nodes -** 3 compute machines - -[id="installation-nutanix-installer-infra-requirements-networking_{context}"] -== Networking requirements - -You must use AHV IP Address Management (IPAM) for the network and ensure that it is configured to provide persistent IP addresses to the cluster machines. Additionally, create the following networking resources before you install the {product-title} cluster: - -* IP addresses -* DNS records - -[NOTE] -==== -It is recommended that each {product-title} node in the cluster have access to a Network Time Protocol (NTP) server that is discoverable via DHCP. Installation is possible without an NTP server. However, an NTP server prevents errors typically associated with asynchronous server clocks. -==== - -[id="installation-nutanix-installer-infra-reqs-_{context}"] -=== Required IP Addresses -An installer-provisioned installation requires two static virtual IP (VIP) addresses: - -* A VIP address for the API is required. This address is used to access the cluster API. -* A VIP address for ingress is required. This address is used for cluster ingress traffic. - -You specify these IP addresses when you install the {product-title} cluster. - -[id="installation-nutanix-installer-infra-reqs-dns-records_{context}"] -=== DNS records -You must create DNS records for two static IP addresses in the appropriate DNS server for the Nutanix instance that hosts your {product-title} cluster. In each record, `<cluster_name>` is the cluster name and `<base_domain>` is the cluster base domain that you specify when you install the cluster. - -A complete DNS record takes the form: `<component>.<cluster_name>.<base_domain>.`. - -.Required DNS records -[cols="1a,5a,3a",options="header"] -|=== - -|Component -|Record -|Description - -|API VIP -|`api.<cluster_name>.<base_domain>.` -|This DNS A/AAAA or CNAME record must point to the load balancer -for the control plane machines. This record must be resolvable by both clients -external to the cluster and from all the nodes within the cluster. - -|Ingress VIP -|`*.apps.<cluster_name>.<base_domain>.` -|A wildcard DNS A/AAAA or CNAME record that points to the load balancer that targets the -machines that run the Ingress router pods, which are the worker nodes by -default. This record must be resolvable by both clients external to the cluster -and from all the nodes within the cluster. -|=== diff --git a/modules/installation-obtaining-installer.adoc b/modules/installation-obtaining-installer.adoc deleted file mode 100644 index 57ec0cdd6673..000000000000 --- a/modules/installation-obtaining-installer.adoc +++ /dev/null @@ -1,184 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_alibaba/installing-alibaba-network-customizations.adoc -// * installing/installing_alibaba/installing-alibaba-vpc.adoc -// * installing/installing_aws/installing-aws-user-infra.adoc -// * installing/installing_aws/installing-aws-customizations.adoc -// * installing/installing_aws/installing-aws-default.adoc -// * installing/installing_aws/installing-aws-government-region.adoc -// * installing/installing_aws/installing-aws-secret-region.adoc -// * installing/installing_aws/installing-aws-network-customizations.adoc -// * installing/installing_aws/installing-aws-private.adoc -// * installing/installing_aws/installing-aws-vpc.adoc -// * installing/installing_aws/installing-aws-outposts-remote-workers.adoc -// * installing/installing_azure/installing-azure-customizations.adoc -// * installing/installing_azure/installing-azure-default.adoc -// * installing/installing_azure/installing-azure-government-region.adoc -// * installing/installing_azure/installing-azure-private.adoc -// * installing/installing_azure/installing-azure-vnet.adoc -// * installing/installing_azure/installing-azure-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-default.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc -// * installing/installing_bare_metal/installing-bare-metal.adoc -// * installing/installing_gcp/installing-gcp-customizations.adoc -// * installing/installing_gcp/installing-gcp-private.adoc -// * installing/installing_gcp/installing-gcp-default.adoc -// * installing/installing_gcp/installing-gcp-vpc.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-customizations.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-network-customizations.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-vpc.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-private.adoc -// * installing/installing_openstack/installing-openstack-installer-custom.adoc -// * installing/installing_openstack/installing-openstack-installer-kuryr.adoc -// * installing/installing_openstack/installing-openstack-installer.adoc -// * installing/installing_platform_agnostic/installing-platform-agnostic.adoc -// * installing/installing_ibm_powervs/installing-ibm-power-vs-private-cluster.adoc -// * installing/installing_ibm_powervs/installing-ibm-powervs-vpc.adoc -// * installing/installing_vsphere/installing-vsphere.adoc -// * installing/installing_vsphere/installing-vsphere-network-customizations.adoc -// * installing/installing_vsphere/installing-vsphere-installer-provisioned.adoc -// * installing/installing_vsphere/installing-vsphere-installer-provisioned-customizations.adoc -// * installing/installing_vsphere/installing-vsphere-installer-provisioned-network-customizations.adoc -// * installing/installing_ibm_z/installing-ibm-z.adoc -// * installing/installing_ibm_z/installing-ibm-z-kvm.adoc -// * installing/installing_rhv/installing-rhv-default.adoc -// * installing/installing_rhv/installing-rhv-customizations.adoc -// * installing/installing_rhv/installing-rhv-user-infra.adoc -// * installing/installing_nutanix/installing-nutanix-installer-provisioned.adoc - - -ifeval::["{context}" == "installing-ibm-z"] -:ibm-z: -endif::[] -ifeval::["{context}" == "installing-ibm-z-kvm"] -:ibm-z-kvm: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-default"] -:ash: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:ash: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-network-customizations"] -:ash: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-private"] -:private: -endif::[] -ifeval::["{context}" == "installing-vsphere-installer-provisioned"] -:vsphere: -endif::[] -ifeval::["{context}" == "installing-vsphere-installer-provisioned-customizations"] -:vsphere: -endif::[] -ifeval::["{context}" == "installing-vsphere-installer-provisioned-network-customizations"] -:vsphere: -endif::[] - -:_content-type: PROCEDURE -[id="installation-obtaining-installer_{context}"] -= Obtaining the installation program - -Before you install {product-title}, download the installation file on -ifdef::restricted[] -the mirror host. -endif::restricted[] -ifndef::restricted[] -ifdef::ibm-z,ibm-z-kvm[ your provisioning machine.] -ifndef::ibm-z,ibm-z-kvm,private[ the host you are using for installation.] -ifdef::private[] -a bastion host on your cloud network or a machine that has access to the to the network through a VPN. - -For more information about private cluster installation requirements, see "Private clusters". -endif::private[] -endif::restricted[] -//mpytlak: Added "private" in the context of a review for the IBM Cloud VPC private work. In an effort to keep updates to other platforms separate, I will open a doc story for each platform that supports a private install. - -.Prerequisites - -ifdef::ibm-z,ibm-z-kvm,private,vsphere[] -* You have a machine that runs Linux, for example Red Hat Enterprise Linux 8, with 500 MB of local disk space. -endif::ibm-z,ibm-z-kvm,private,vsphere[] -ifdef::vsphere[] -+ -[IMPORTANT] -==== -If you attempt to run the installation program on macOS, a known issue related to the `golang` compiler causes the installation of the {product-title} cluster to fail. For more information about this issue, see the section named "Known Issues" in the _{product-title} {product-version} release notes_ document. -==== -endif::vsphere[] -ifndef::ibm-z,ibm-z-kvm,private,vsphere[* You have a computer that runs Linux or macOS, with 500 MB of local disk space.] - -.Procedure - -ifndef::openshift-origin[] -. Access the link:https://console.redhat.com/openshift/install[Infrastructure Provider] page on the {cluster-manager} site. If you have a Red Hat account, log in with your credentials. If you do not, create an account. -ifndef::ash[] -. Select your infrastructure provider. -endif::ash[] -ifdef::ash[] -. Select *Azure* as the cloud provider. -endif::ash[] -. Navigate to the page for your installation type, download the installation program that corresponds with your host operating system and architecture, and place the file in the directory where you will store the installation configuration files. -endif::[] -ifdef::openshift-origin[] -. Download installer from https://github.com/openshift/okd/releases -endif::[] - -+ -[IMPORTANT] -==== -The installation program creates several files on the computer that you use to install your cluster. You must keep the installation program and the files that the installation program creates after you finish installing the cluster. Both files are required to delete the cluster. -==== -+ -[IMPORTANT] -==== -Deleting the files created by the installation program does not remove your cluster, even if the cluster failed during installation. To remove your cluster, complete the {product-title} uninstallation procedures for your specific cloud provider. -==== - -. Extract the installation program. For example, on a computer that uses a Linux -operating system, run the following command: -+ -[source,terminal] ----- -$ tar -xvf openshift-install-linux.tar.gz ----- - -. Download your installation {cluster-manager-url-pull}. This pull secret allows you to authenticate with the services that are provided by the included authorities, including Quay.io, which serves the container images for {product-title} components. -ifdef::openshift-origin[] -+ -Using a {cluster-manager-url-pull} is not required. You can use a pull secret for another private registry. Or, if you do not need the cluster to pull images from a private registry, you can use `{"auths":{"fake":{"auth":"aWQ6cGFzcwo="}}}` as the pull secret when prompted during the installation. -+ -If you do not use the {cluster-manager-url-pull}: -+ -* Red Hat Operators are not available. -* The Telemetry and Insights operators do not send data to Red Hat. -* Content from the link:https://catalog.redhat.com/software/containers/explore[Red Hat Ecosystem Catalog Container images] registry, such as image streams and Operators, are not available. -endif::openshift-origin[] - -ifeval::["{context}" == "installing-ibm-z"] -:!ibm-z: -endif::[] -ifeval::["{context}" == "installing-ibm-z-kvm"] -:!ibm-z-kvm: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-default"] -:!ash: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:!ash: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-network-customizations"] -:!ash: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-private"] -:!private: -endif::[] -ifeval::["{context}" == "installing-vsphere-installer-provisioned"] -:!vsphere: -endif::[] -ifeval::["{context}" == "installing-vsphere-installer-provisioned-customizations"] -:!vsphere: -endif::[] -ifeval::["{context}" == "installing-vsphere-installer-provisioned-network-customizations"] -:!vsphere: -endif::[] diff --git a/modules/installation-openshift-local.adoc b/modules/installation-openshift-local.adoc deleted file mode 100644 index 3b963b16e417..000000000000 --- a/modules/installation-openshift-local.adoc +++ /dev/null @@ -1,16 +0,0 @@ -// Module included in the following assemblies: -// -// * getting_started/openshift-overview.adoc -// * installing/index.adoc - -:_content-type: CONCEPT -[id="installation-openshift-local_{context}"] -= OpenShift Local overview - -OpenShift Local supports rapid application development to get started building {product-title} clusters. OpenShift Local is designed to run on a local computer to simplify setup and testing, and to emulate the cloud development environment locally with all of the tools needed to develop container-based applications. - -Regardless of the programming language you use, OpenShift Local hosts your application and brings a minimal, preconfigured Red Hat {product-title} cluster to your local PC without the need for a server-based infrastructure. - -On a hosted environment, OpenShift Local can create microservices, convert them into images, and run them in Kubernetes-hosted containers directly on your laptop or desktop running Linux, macOS, or Windows 10 or later. - -For more information about OpenShift Local, see link:https://developers.redhat.com/products/openshift-local/overview[Red Hat OpenShift Local Overview]. \ No newline at end of file diff --git a/modules/installation-openstack-nfv-requirements.adoc b/modules/installation-openstack-nfv-requirements.adoc deleted file mode 100644 index d072ab75f0da..000000000000 --- a/modules/installation-openstack-nfv-requirements.adoc +++ /dev/null @@ -1,11 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_openstack/installing-openstack-nfv-preparing - -:_content-type: REFERENCE -[id="installation-openstack-nfv-requirements_{context}"] -= Requirements for clusters on {rh-openstack} that use either SR-IOV or OVS-DPDK - -If you use SR-IOV or OVS-DPDK with your deployment, you must meet the following requirements: - -* {rh-openstack} compute nodes must use a flavor that supports huge pages. \ No newline at end of file diff --git a/modules/installation-openstack-ovs-dpdk-performance-profile.adoc b/modules/installation-openstack-ovs-dpdk-performance-profile.adoc deleted file mode 100644 index a699d078c9ea..000000000000 --- a/modules/installation-openstack-ovs-dpdk-performance-profile.adoc +++ /dev/null @@ -1,50 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/cnf-create-performance-profiles.adoc -// TODO: ^^^ at the moment - -:_content-type: REFERENCE -[id="installation-openstack-ovs-dpdk-performance-profile_{context}"] -= A performance profile template for clusters that use OVS-DPDK on OpenStack - -To maximize machine performance in a cluster that uses Open vSwitch with the Data Plane Development Kit (OVS-DPDK) on {rh-openstack-first}, you can use a performance profile. - -You can use the following performance profile template to create a profile for your deployment. - -.A performance profile template for clusters that use OVS-DPDK -[source,yaml] ----- -apiVersion: performance.openshift.io/v2 -kind: PerformanceProfile -metadata: - name: cnf-performanceprofile -spec: - additionalKernelArgs: - - nmi_watchdog=0 - - audit=0 - - mce=off - - processor.max_cstate=1 - - idle=poll - - intel_idle.max_cstate=0 - - default_hugepagesz=1GB - - hugepagesz=1G - - intel_iommu=on - cpu: - isolated: <CPU_ISOLATED> - reserved: <CPU_RESERVED> - hugepages: - defaultHugepagesSize: 1G - pages: - - count: <HUGEPAGES_COUNT> - node: 0 - size: 1G - nodeSelector: - node-role.kubernetes.io/worker: '' - realTimeKernel: - enabled: false - globallyDisableIrqLoadBalancing: true ----- - -Insert values that are appropriate for your configuration for the `CPU_ISOLATED`, `CPU_RESERVED`, and `HUGEPAGES_COUNT` keys. - -To learn how to create and use performance profiles, see the "Creating a performance profile" page in the "Scalability and performance" section of the {product-title} documentation. \ No newline at end of file diff --git a/modules/installation-openstack-ovs-dpdk-requirements.adoc b/modules/installation-openstack-ovs-dpdk-requirements.adoc deleted file mode 100644 index 9e18e27e6188..000000000000 --- a/modules/installation-openstack-ovs-dpdk-requirements.adoc +++ /dev/null @@ -1,13 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_openstack/installing-openstack-nfv-preparing - -:_content-type: REFERENCE -[id="installation-openstack-ovs-dpdk-requirements_{context}"] -= Requirements for clusters on {rh-openstack} that use OVS-DPDK - -To use Open vSwitch with the Data Plane Development Kit (OVS-DPDK) with your deployment, you must meet the following requirements: - -* Plan your {rh-openstack-first} OVS-DPDK deployment by referring to link:https://access.redhat.com/documentation/en-us/red_hat_openstack_platform/16.2/html/network_functions_virtualization_planning_and_configuration_guide/assembly_ovsdpdk_parameters[Planning your OVS-DPDK deployment] in the Network Functions Virtualization Planning and Configuration Guide. - -* Configure your {rh-openstack} OVS-DPDK deployment according to link:https://access.redhat.com/documentation/en-us/red_hat_openstack_platform/16.2/html/network_functions_virtualization_planning_and_configuration_guide/part-dpdk-configure[Configuring an OVS-DPDK deployment] in the Network Functions Virtualization Planning and Configuration Guide. diff --git a/modules/installation-openstack-sr-iov-requirements.adoc b/modules/installation-openstack-sr-iov-requirements.adoc deleted file mode 100644 index 7aef3cd75c16..000000000000 --- a/modules/installation-openstack-sr-iov-requirements.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_openstack/installing-openstack-nfv-preparing - -:_content-type: REFERENCE -[id="installation-openstack-sr-iov-requirements_{context}"] -= Requirements for clusters on {rh-openstack} that use SR-IOV - -To use single-root I/O virtualization (SR-IOV) with your deployment, you must meet the following requirements: - -* link:https://access.redhat.com/documentation/en-us/red_hat_openstack_platform/16.2/html-single/network_functions_virtualization_planning_and_configuration_guide/index#assembly_sriov_parameters[Plan your {rh-openstack-first} SR-IOV deployment]. - -* {product-title} must support the NICs that you use. For a list of supported NICs, see "About Single Root I/O Virtualization (SR-IOV) hardware networks" in the "Hardware networks" subsection of the "Networking" documentation. - -* For each node that will have an attached SR-IOV NIC, your {rh-openstack} cluster must have: - - ** One instance from the {rh-openstack} quota - ** One port attached to the machines subnet - ** One port for each SR-IOV Virtual Function - ** A flavor with at least 16 GB memory, 4 vCPUs, and 25 GB storage space - -* SR-IOV deployments often employ performance optimizations, such as dedicated or isolated CPUs. For maximum performance, configure your underlying {rh-openstack} deployment to use these optimizations, and then run {product-title} compute machines on the optimized infrastructure. -** For more information about configuring performant {rh-openstack} compute nodes, see link:https://access.redhat.com/documentation/en-us/red_hat_openstack_platform/16.1/html-single/configuring_the_compute_service_for_instance_creation/configuring-compute-nodes-for-performance#configuring-compute-nodes-for-performance[Configuring Compute nodes for performance]. diff --git a/modules/installation-operators-config.adoc b/modules/installation-operators-config.adoc deleted file mode 100644 index 79cd36cb1995..000000000000 --- a/modules/installation-operators-config.adoc +++ /dev/null @@ -1,69 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-user-infra.adoc -// * installing/installing_bare_metal/installing-bare-metal.adoc -// * installing/installing_aws/installing-restricted-networks-aws.adoc -// * installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc -// * installing/installing_platform_agnostic/installing-platform-agnostic.adoc -// * installing/installing_vsphere/installing-restricted-networks-vsphere.adoc -// * installing/installing_vsphere/installing-vsphere.adoc -// * installing/installing_vsphere/installing-vsphere-network-customizations.adoc -// * installing/installing_ibm_z/installing-ibm-z.adoc - -:_content-type: PROCEDURE -[id="installation-operators-config_{context}"] -= Initial Operator configuration - -After the control plane initializes, you must immediately configure some -Operators so that they all become available. - -.Prerequisites - -* Your control plane has initialized. - -.Procedure - -. Watch the cluster components come online: -+ -[source,terminal] ----- -$ watch -n5 oc get clusteroperators ----- -+ -.Example output -[source,terminal] ----- -NAME VERSION AVAILABLE PROGRESSING DEGRADED SINCE -authentication 4.13.0 True False False 19m -baremetal 4.13.0 True False False 37m -cloud-credential 4.13.0 True False False 40m -cluster-autoscaler 4.13.0 True False False 37m -config-operator 4.13.0 True False False 38m -console 4.13.0 True False False 26m -csi-snapshot-controller 4.13.0 True False False 37m -dns 4.13.0 True False False 37m -etcd 4.13.0 True False False 36m -image-registry 4.13.0 True False False 31m -ingress 4.13.0 True False False 30m -insights 4.13.0 True False False 31m -kube-apiserver 4.13.0 True False False 26m -kube-controller-manager 4.13.0 True False False 36m -kube-scheduler 4.13.0 True False False 36m -kube-storage-version-migrator 4.13.0 True False False 37m -machine-api 4.13.0 True False False 29m -machine-approver 4.13.0 True False False 37m -machine-config 4.13.0 True False False 36m -marketplace 4.13.0 True False False 37m -monitoring 4.13.0 True False False 29m -network 4.13.0 True False False 38m -node-tuning 4.13.0 True False False 37m -openshift-apiserver 4.13.0 True False False 32m -openshift-controller-manager 4.13.0 True False False 30m -openshift-samples 4.13.0 True False False 32m -operator-lifecycle-manager 4.13.0 True False False 37m -operator-lifecycle-manager-catalog 4.13.0 True False False 37m -operator-lifecycle-manager-packageserver 4.13.0 True False False 32m -service-ca 4.13.0 True False False 38m -storage 4.13.0 True False False 37m ----- -. Configure the Operators that are not available. diff --git a/modules/installation-osp-about-kuryr.adoc b/modules/installation-osp-about-kuryr.adoc deleted file mode 100644 index 218e06cfc793..000000000000 --- a/modules/installation-osp-about-kuryr.adoc +++ /dev/null @@ -1,57 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_openstack/installing-openstack-installer-kuryr.adoc - -:_content-type: CONCEPT -[id="installation-osp-about-kuryr_{context}"] -= About Kuryr SDN - -:FeatureName: Kuryr -include::snippets/deprecated-feature.adoc[] - -link:https://docs.openstack.org/kuryr-kubernetes/latest/[Kuryr] is a container -network interface (CNI) plugin solution that uses the -link:https://docs.openstack.org/neutron/latest/[Neutron] and -link:https://docs.openstack.org/octavia/latest/[Octavia] {rh-openstack-first} services -to provide networking for pods and Services. - -Kuryr and {product-title} integration is primarily designed for -{product-title} clusters running on {rh-openstack} VMs. Kuryr improves the -network performance by plugging {product-title} pods into {rh-openstack} SDN. -In addition, it provides interconnectivity between pods and -{rh-openstack} virtual instances. - -Kuryr components are installed as pods in {product-title} using the -`openshift-kuryr` namespace: - -* `kuryr-controller` - a single service instance installed on a `master` node. -This is modeled in {product-title} as a `Deployment` object. -* `kuryr-cni` - a container installing and configuring Kuryr as a CNI driver on -each {product-title} node. This is modeled in {product-title} as a `DaemonSet` object. - -The Kuryr controller watches the {product-title} API server for pod, service, and -namespace create, update, and delete events. It maps the {product-title} API -calls to corresponding objects in Neutron and Octavia. This means that every -network solution that implements the Neutron trunk port functionality can be -used to back {product-title} via Kuryr. This includes open source solutions -such as Open vSwitch (OVS) and Open Virtual Network (OVN) as well as -Neutron-compatible commercial SDNs. - -Kuryr is recommended for {product-title} deployments on encapsulated {rh-openstack} tenant -networks to avoid double encapsulation, such as running an encapsulated -{product-title} SDN over an {rh-openstack} network. - -If you use provider networks or tenant VLANs, you do not need to use Kuryr to -avoid double encapsulation. The performance benefit is negligible. Depending on -your configuration, though, using Kuryr to avoid having two overlays might still -be beneficial. - -Kuryr is not recommended in deployments where all of the following criteria are true: - -* The {rh-openstack} version is less than 16. -* The deployment uses UDP services, or a large number of TCP services on few hypervisors. - -or - -* The `ovn-octavia` Octavia driver is disabled. -* The deployment uses a large number of TCP services on few hypervisors. \ No newline at end of file diff --git a/modules/installation-osp-accessing-api-floating.adoc b/modules/installation-osp-accessing-api-floating.adoc deleted file mode 100644 index 313d938c0425..000000000000 --- a/modules/installation-osp-accessing-api-floating.adoc +++ /dev/null @@ -1,114 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_openstack/installing-openstack-installer.adoc -// * installing/installing_openstack/installing-openstack-installer-custom.adoc -// * installing/installing_openstack/installing-openstack-installer-kuryr.adoc -// * installing/installing_openstack/installing-openstack-user.adoc - -ifeval::["{context}" == "installing-openstack-user"] -:osp-user: -endif::[] -ifeval::["{context}" == "installing-openstack-user-kuryr"] -:osp-user: -endif::[] -ifeval::["{context}" == "installing-openstack-user-sr-iov"] -:osp-user: -endif::[] -ifeval::["{context}" == "installing-openstack-user-sr-iov-kuryr"] -:osp-user: -endif::[] - -:_content-type: PROCEDURE -[id="installation-osp-accessing-api-floating_{context}"] -= Enabling access with floating IP addresses - -Create floating IP (FIP) addresses for external access to the {product-title} -ifdef::osp-user[API, cluster applications, and the bootstrap process.] -ifndef::osp-user[API and cluster applications.] - -.Procedure - -. Using the {rh-openstack-first} CLI, create the API FIP: -+ -[source,terminal] ----- -$ openstack floating ip create --description "API <cluster_name>.<base_domain>" <external_network> ----- - -. Using the {rh-openstack-first} CLI, create the apps, or Ingress, FIP: -+ -[source,terminal] ----- -$ openstack floating ip create --description "Ingress <cluster_name>.<base_domain>" <external_network> ----- - -ifdef::osp-user[] -. By using the {rh-openstack-first} CLI, create the bootstrap FIP: -+ -[source,terminal] ----- -$ openstack floating ip create --description "bootstrap machine" <external_network> ----- -endif::osp-user[] - -. Add records that follow these patterns to your DNS server for the API and Ingress FIPs: -+ -[source,dns] ----- -api.<cluster_name>.<base_domain>. IN A <API_FIP> -*.apps.<cluster_name>.<base_domain>. IN A <apps_FIP> ----- -+ -[NOTE] -==== -If you do not control the DNS server, you can access the cluster by adding the cluster domain names such as the following to your `/etc/hosts` file: - -* `<api_floating_ip> api.<cluster_name>.<base_domain>` -* `<application_floating_ip> grafana-openshift-monitoring.apps.<cluster_name>.<base_domain>` -* `<application_floating_ip> prometheus-k8s-openshift-monitoring.apps.<cluster_name>.<base_domain>` -* `<application_floating_ip> oauth-openshift.apps.<cluster_name>.<base_domain>` -* `<application_floating_ip> console-openshift-console.apps.<cluster_name>.<base_domain>` -* `application_floating_ip integrated-oauth-server-openshift-authentication.apps.<cluster_name>.<base_domain>` - -The cluster domain names in the `/etc/hosts` file grant access to the web console and the monitoring interface of your cluster locally. You can also use the `kubectl` or `oc`. You can access the user applications by using the additional entries pointing to the <application_floating_ip>. This action makes the API and applications accessible to only you, which is not suitable for production deployment, but does allow installation for development and testing. -==== - -. Add the FIPs to the -ifdef::osp-user[`inventory.yaml`] -ifndef::osp-user[`install-config.yaml`] -file as the values of the following -ifdef::osp-user[variables:] -ifndef::osp-user[parameters:] - -ifdef::osp-user[] -* `os_api_fip` -* `os_bootstrap_fip` -* `os_ingress_fip` -endif::osp-user[] - -ifndef::osp-user[] -* `platform.openstack.ingressFloatingIP` -* `platform.openstack.apiFloatingIP` -endif::osp-user[] - -If you use these values, you must also enter an external network as the value of the -ifdef::osp-user[`os_external_network` variable in the `inventory.yaml` file.] -ifndef::osp-user[`platform.openstack.externalNetwork` parameter in the `install-config.yaml` file.] - -[TIP] -==== -You can make {product-title} resources available outside of the cluster by assigning a floating IP address and updating your firewall configuration. -==== - -ifeval::["{context}" == "installing-openstack-user"] -:!osp-user: -endif::[] -ifeval::["{context}" == "installing-openstack-user-kuryr"] -:!osp-user: -endif::[] -ifeval::["{context}" == "installing-openstack-user-sr-iov"] -:!osp-user: -endif::[] -ifeval::["{context}" == "installing-openstack-user-sr-iov-kuryr"] -:!osp-user: -endif::[] diff --git a/modules/installation-osp-accessing-api-no-floating.adoc b/modules/installation-osp-accessing-api-no-floating.adoc deleted file mode 100644 index 7654adeba22d..000000000000 --- a/modules/installation-osp-accessing-api-no-floating.adoc +++ /dev/null @@ -1,103 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_openstack/installing-openstack-installer.adoc -// * installing/installing_openstack/installing-openstack-installer-custom.adoc -// * installing/installing_openstack/installing-openstack-installer-kuryr.adoc -// * installing/installing_openstack/installing-openstack-user.adoc -// * installing/installing_openstack/installing-openstack-user-kuryr.adoc - -ifeval::["{context}" == "installing-openstack-installer-custom"] -:osp-ipi: -endif::[] -ifeval::["{context}" == "installing-openstack-installer-kuryr"] -:osp-kuryr: -:osp-ipi: -endif::[] -ifeval::["{context}" == "installing-openstack-user"] -:osp-upi: -endif::[] -ifeval::["{context}" == "installing-openstack-user-kuryr"] -:osp-kuryr: -:osp-upi: -endif::[] -ifeval::["{context}" == "installing-openstack-user-sr-iov"] -:osp-upi: -endif::[] -ifeval::["{context}" == "installing-openstack-user-sr-iov-kuryr"] -:osp-upi: -:osp-kuryr: -endif::[] -ifeval::["{context}" == "installing-openstack-installer-restricted"] -:osp-ipi: -:osp-restricted: -endif::[] - -[id="installation-osp-accessing-api-no-floating_{context}"] -= Completing installation without floating IP addresses - -You can install {product-title} on {rh-openstack-first} without providing floating IP addresses. - -In the -ifdef::osp-ipi[`install-config.yaml`] -ifdef::osp-upi[`inventory.yaml`] -file, do not define the following -ifdef::osp-ipi[parameters:] -ifdef::osp-upi[variables:] - -ifdef::osp-ipi[] -* `platform.openstack.ingressFloatingIP` -* `platform.openstack.apiFloatingIP` - -If you cannot provide an external network, you can also leave `platform.openstack.externalNetwork` blank. If you do not provide a value for `platform.openstack.externalNetwork`, a router is not created for you, and, without additional action, the installer will fail to retrieve an image from Glance. You must configure external connectivity on your own. -endif::osp-ipi[] - -ifdef::osp-upi[] -* `os_api_fip` -* `os_bootstrap_fip` -* `os_ingress_fip` - -If you cannot provide an external network, you can also leave `os_external_network` blank. If you do not provide a value for `os_external_network`, a router is not created for you, and, without additional action, the installer will fail to retrieve an image from Glance. Later in the installation process, when you create network resources, you must configure external connectivity on your own. -endif::osp-upi[] - -If you run the installer -ifdef::osp-upi[with the `wait-for` command] -from a system that cannot reach the cluster API due to a lack of floating IP addresses or name resolution, installation fails. To prevent installation failure in these cases, you can use a proxy network or run the installer from a system that is on the same network as your machines. - -[NOTE] -==== -You can enable name resolution by creating DNS records for the API and Ingress ports. For example: - -[source,dns] ----- -api.<cluster_name>.<base_domain>. IN A <api_port_IP> -*.apps.<cluster_name>.<base_domain>. IN A <ingress_port_IP> ----- - -If you do not control the DNS server, you can add the record to your `/etc/hosts` file. This action makes the API accessible to only you, which is not suitable for production deployment but does allow installation for development and testing. -==== - -ifeval::["{context}" == "installing-openstack-installer-custom"] -:!osp-ipi: -endif::[] -ifeval::["{context}" == "installing-openstack-installer-kuryr"] -:!osp-kuryr: -:!osp-ipi: -endif::[] -ifeval::["{context}" == "installing-openstack-user"] -:!osp-upi: -endif::[] -ifeval::["{context}" == "installing-openstack-user-kuryr"] -:!osp-kuryr: -:!osp-upi: -endif::[] -ifeval::["{context}" == "installing-openstack-user-sr-iov"] -:!osp-upi: -endif::[] -ifeval::["{context}" == "installing-openstack-user-sr-iov-kuryr"] -:!osp-upi: -:!osp-kuryr: -endif::[] -ifeval::["{context}" == "installing-openstack-installer-restricted"] -:!osp-ipi: -:!osp-restricted: -endif::[] diff --git a/modules/installation-osp-accessing-api.adoc b/modules/installation-osp-accessing-api.adoc deleted file mode 100644 index 22de3d2cddb0..000000000000 --- a/modules/installation-osp-accessing-api.adoc +++ /dev/null @@ -1,16 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_openstack/installing-openstack-installer.adoc -// * installing/installing_openstack/installing-openstack-installer-custom.adoc -// * installing/installing_openstack/installing-openstack-installer-kuryr.adoc -// * installing/installing_openstack/installing-openstack-user.adoc -// * installing/installing_openstack/installing-openstack-user-kuryr.adoc -// -// Stub module. To be used with other FIP OSP modules only. - -[id="installation-osp-accessing-api_{context}"] -= Enabling access to the environment - -At deployment, all {product-title} machines are created in a {rh-openstack-first}-tenant network. Therefore, they are not accessible directly in most {rh-openstack} deployments. - -You can configure {product-title} API and application access by using floating IP addresses (FIPs) during installation. You can also complete an installation without configuring FIPs, but the installer will not configure a way to reach the API or applications externally. diff --git a/modules/installation-osp-api-octavia.adoc b/modules/installation-osp-api-octavia.adoc deleted file mode 100644 index 8d04a884c1ee..000000000000 --- a/modules/installation-osp-api-octavia.adoc +++ /dev/null @@ -1,12 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/load-balancing-openstack.adoc - -[id="installation-osp-api-octavia_{context}"] -= Scaling clusters for application traffic by using Octavia - -{product-title} clusters that run on {rh-openstack-first} can use the Octavia load balancing service to distribute traffic across multiple virtual machines (VMs) or floating IP addresses. This feature mitigates the bottleneck that single machines or addresses create. - -If your cluster uses Kuryr, the Cluster Network Operator created an internal Octavia load balancer at deployment. You can use this load balancer for application network scaling. - -If your cluster does not use Kuryr, you must create your own Octavia load balancer to use it for application network scaling. \ No newline at end of file diff --git a/modules/installation-osp-api-scaling.adoc b/modules/installation-osp-api-scaling.adoc deleted file mode 100644 index dcbe18fbceb6..000000000000 --- a/modules/installation-osp-api-scaling.adoc +++ /dev/null @@ -1,83 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/load-balancing-openstack.adoc - -:_content-type: PROCEDURE -[id="installation-osp-api-scaling_{context}"] -= Scaling clusters by using Octavia - -If you want to use multiple API load balancers, or if your cluster does not use Kuryr, create an Octavia load balancer and then configure your cluster to use it. - -.Prerequisites - -* Octavia is available on your {rh-openstack-first} deployment. - -.Procedure - -. From a command line, create an Octavia load balancer that uses the Amphora driver: -+ -[source,terminal] ----- -$ openstack loadbalancer create --name API_OCP_CLUSTER --vip-subnet-id <id_of_worker_vms_subnet> ----- -+ -You can use a name of your choice instead of `API_OCP_CLUSTER`. - -. After the load balancer becomes active, create listeners: -+ -[source,terminal] ----- -$ openstack loadbalancer listener create --name API_OCP_CLUSTER_6443 --protocol HTTPS--protocol-port 6443 API_OCP_CLUSTER ----- -+ -[NOTE] -==== -To view the status of the load balancer, enter `openstack loadbalancer list`. -==== - -. Create a pool that uses the round robin algorithm and has session persistence enabled: -+ -[source,terminal] ----- -$ openstack loadbalancer pool create --name API_OCP_CLUSTER_pool_6443 --lb-algorithm ROUND_ROBIN --session-persistence type=<source_IP_address> --listener API_OCP_CLUSTER_6443 --protocol HTTPS ----- - -. To ensure that control plane machines are available, create a health monitor: -+ -[source,terminal] ----- -$ openstack loadbalancer healthmonitor create --delay 5 --max-retries 4 --timeout 10 --type TCP API_OCP_CLUSTER_pool_6443 ----- - -. Add the control plane machines as members of the load balancer pool: -+ -[source,terminal] ----- -$ for SERVER in $(MASTER-0-IP MASTER-1-IP MASTER-2-IP) -do - openstack loadbalancer member create --address $SERVER --protocol-port 6443 API_OCP_CLUSTER_pool_6443 -done ----- - -. Optional: To reuse the cluster API floating IP address, unset it: -+ -[source,terminal] ----- -$ openstack floating ip unset $API_FIP ----- - -. Add either the unset `API_FIP` or a new address to the created load balancer VIP: -+ -[source,terminal] ----- -$ openstack floating ip set --port $(openstack loadbalancer show -c <vip_port_id> -f value API_OCP_CLUSTER) $API_FIP ----- - -Your cluster now uses Octavia for load balancing. - -[NOTE] -==== -If Kuryr uses the Octavia Amphora driver, all traffic is routed through a single Amphora virtual machine (VM). - -You can repeat this procedure to create additional load balancers, which can alleviate the bottleneck. -==== diff --git a/modules/installation-osp-balancing-external-loads.adoc b/modules/installation-osp-balancing-external-loads.adoc deleted file mode 100644 index 36b26703c599..000000000000 --- a/modules/installation-osp-balancing-external-loads.adoc +++ /dev/null @@ -1,120 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_openstack/installing-openstack-load-balancing.adoc - -[id="installation-osp-balancing-external-loads_{context}"] -= Configuring an external load balancer - -Configure an external load balancer in {rh-openstack-first} to use your own load balancer, resolve external networking needs, or scale beyond what the default {product-title} load balancer can provide. - -The load balancer serves ports 6443, 443, and 80 to any users of the system. Port 22623 serves Ignition startup configurations to the {product-title} machines and must not be reachable from outside the cluster. - -.Prerequisites - -* Access to a {rh-openstack} administrator's account -* The https://docs.openstack.org/python-openstackclient/latest/[{rh-openstack} client] installed on the target environment - -.Procedure - -. Using the {rh-openstack} CLI, add floating IP addresses to all of the control plane machines: -+ -[source,terminal] ----- -$ openstack floating ip create --port master-port-0 <public network> ----- -+ -[source,terminal] ----- -$ openstack floating ip create --port master-port-1 <public network> ----- -+ -[source,terminal] ----- -$ openstack floating ip create --port master-port-2 <public network> ----- - -. View the new floating IPs: -+ -[source,terminal] ----- -$ openstack server list ----- - -. Incorporate the listed floating IP addresses into the load balancer configuration to allow access the cluster via port 6443. -+ -.A HAProxy configuration for port 6443 -[source,txt] ----- -listen <cluster name>-api-6443 - bind 0.0.0.0:6443 - mode tcp - balance roundrobin - server <cluster name>-master-2 <floating ip>:6443 check - server <cluster name>-master-0 <floating ip>:6443 check - server <cluster name>-master-1 <floating ip>:6443 check ----- - -. Repeat the previous three steps for ports 443 and 80. - -. Enable network access from the load balancer network to the control plane machines on ports 6443, 443, and 80: -+ -[source,terminal] ----- -$ openstack security group rule create master --remote-ip <load balancer CIDR> --ingress --protocol tcp --dst-port 6443 ----- -+ -[source,terminal] ----- -$ openstack security group rule create master --remote-ip <load balancer CIDR> --ingress --protocol tcp --dst-port 443 ----- -+ -[source,terminal] ----- -$ openstack security group rule create master --remote-ip <load balancer CIDR> --ingress --protocol tcp --dst-port 80 ----- - -[TIP] -You can also specify a particular IP address with `/32`. - -. Update the DNS entry for `api.<cluster name>.<base domain>` to point to the new load balancer: -+ -[source,txt] ----- -<load balancer ip> api.<cluster-name>.<base domain> ----- -+ -The external load balancer is now available. - -. Verify the load balancer's functionality by using the following curl command: -+ -[source,terminal] ----- -$ curl https://<loadbalancer-ip>:6443/version --insecure ----- -+ -The output resembles the following example: -+ -[source,json] ----- -{ - "major": "1", - "minor": "11+", - "gitVersion": "v1.11.0+ad103ed", - "gitCommit": "ad103ed", - "gitTreeState": "clean", - "buildDate": "2019-01-09T06:44:10Z", - "goVersion": "go1.10.3", - "compiler": "gc", - "platform": "linux/amd64" -} ----- - -. Optional: Verify that the Ignition configuration files are available only from -within the cluster by running a curl command on port 22623 from outside the cluster: -+ -[source,terminal] ----- -$ curl https://<loadbalancer ip>:22623/config/master --insecure ----- -+ -The command fails. diff --git a/modules/installation-osp-bootstrap-machine.adoc b/modules/installation-osp-bootstrap-machine.adoc deleted file mode 100644 index 32581e3744a8..000000000000 --- a/modules/installation-osp-bootstrap-machine.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_openstack/installing-openstack-installer-custom.adoc -// * installing/installing_openstack/installing-openstack-installer-kuryr.adoc - -[id="installation-osp-bootstrap-machine_{context}"] -= Bootstrap machine - -During installation, a bootstrap machine is temporarily provisioned to stand up the -control plane. After the production control plane is ready, the bootstrap -machine is deprovisioned. - -The bootstrap machine requires: - -* An instance from the {rh-openstack} quota -* A port from the {rh-openstack} quota -* A flavor with at least 16 GB memory and 4 vCPUs -* At least 100 GB storage space from the {rh-openstack} quota diff --git a/modules/installation-osp-config-yaml.adoc b/modules/installation-osp-config-yaml.adoc deleted file mode 100644 index f9de1a04f30a..000000000000 --- a/modules/installation-osp-config-yaml.adoc +++ /dev/null @@ -1,52 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_openstack/installing-openstack-installer-custom.adoc -// * installing/installing_openstack/installing-openstack-installer-kuryr.adoc - -[id="installation-osp-config-yaml_{context}"] -= Sample customized `install-config.yaml` file for {rh-openstack} - -This sample `install-config.yaml` demonstrates all of the possible {rh-openstack-first} -customization options. - -[IMPORTANT] -This sample file is provided for reference only. You must obtain your -`install-config.yaml` file by using the installation program. - -[source,yaml] ----- -apiVersion: v1 -baseDomain: example.com -controlPlane: - name: master - platform: {} - replicas: 3 -compute: -- name: worker - platform: - openstack: - type: ml.large - replicas: 3 -metadata: - name: example -networking: - clusterNetwork: - - cidr: 10.128.0.0/14 - hostPrefix: 23 - machineNetwork: - - cidr: 10.0.0.0/16 - serviceNetwork: - - 172.30.0.0/16 - networkType: OVNKubernetes -platform: - openstack: - cloud: mycloud - externalNetwork: external - computeFlavor: m1.xlarge - apiFloatingIP: 128.0.0.1 -ifndef::openshift-origin[] -fips: false -endif::openshift-origin[] -pullSecret: '{"auths": ...}' -sshKey: ssh-ed25519 AAAA... ----- diff --git a/modules/installation-osp-configuring-api-floating-ip.adoc b/modules/installation-osp-configuring-api-floating-ip.adoc deleted file mode 100644 index 93ee17222636..000000000000 --- a/modules/installation-osp-configuring-api-floating-ip.adoc +++ /dev/null @@ -1,58 +0,0 @@ -// Module included in the following assemblies: -// -// * post_installation_configuration/network-configuration.adoc - -:_content-type: PROCEDURE -[id="installation-osp-configuring-api-floating-ip_{context}"] -= Configuring application access with floating IP addresses - -After you install {product-title}, configure {rh-openstack-first} to allow application network traffic. - -[NOTE] -==== -You do not need to perform this procedure if you provided values for `platform.openstack.apiFloatingIP` and `platform.openstack.ingressFloatingIP` in the `install-config.yaml` file, or `os_api_fip` and `os_ingress_fip` in the `inventory.yaml` playbook, during installation. The floating IP addresses are already set. -==== - -.Prerequisites - -* {product-title} cluster must be installed -* Floating IP addresses are enabled as described in the {product-title} on {rh-openstack} installation documentation. - -.Procedure - -After you install the {product-title} cluster, attach a floating IP address to the ingress port: - -. Show the port: -+ -[source,terminal] ----- -$ openstack port show <cluster_name>-<cluster_ID>-ingress-port ----- - -. Attach the port to the IP address: -+ -[source,terminal] ----- -$ openstack floating ip set --port <ingress_port_ID> <apps_FIP> ----- - -. Add a wildcard `A` record for `*apps.` to your DNS file: -+ -[source,dns] ----- -*.apps.<cluster_name>.<base_domain> IN A <apps_FIP> ----- - -[NOTE] -==== -If you do not control the DNS server but want to enable application access for non-production purposes, you can add these hostnames to `/etc/hosts`: - -[source,dns] ----- -<apps_FIP> console-openshift-console.apps.<cluster name>.<base domain> -<apps_FIP> integrated-oauth-server-openshift-authentication.apps.<cluster name>.<base domain> -<apps_FIP> oauth-openshift.apps.<cluster name>.<base domain> -<apps_FIP> prometheus-k8s-openshift-monitoring.apps.<cluster name>.<base domain> -<apps_FIP> <app name>.apps.<cluster name>.<base domain> ----- -==== diff --git a/modules/installation-osp-configuring-sr-iov.adoc b/modules/installation-osp-configuring-sr-iov.adoc deleted file mode 100644 index d189e4ff37f2..000000000000 --- a/modules/installation-osp-configuring-sr-iov.adoc +++ /dev/null @@ -1,77 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_openstack/installing-openstack-user-sr-iov.adoc -// * installing/installing_openstack/installing-openstack-user-sr-iov-kuryr.adoc - -:_content-type: PROCEDURE -[id="installation-osp-configuring-sr-iov_{context}"] -= Creating SR-IOV networks for compute machines - -If your {rh-openstack-first} deployment supports link:https://access.redhat.com/documentation/en-us/red_hat_openstack_platform/16.1/html-single/network_functions_virtualization_planning_and_configuration_guide/index#assembly_sriov_parameters[single root I/O virtualization (SR-IOV)], you can provision SR-IOV networks that compute machines run on. - -[NOTE] -==== -The following instructions entail creating an external flat network and an external, VLAN-based network that can be attached to a compute machine. Depending on your {rh-openstack} deployment, other network types might be required. -==== - -.Prerequisites - -* Your cluster supports SR-IOV. -+ -[NOTE] -==== -If you are unsure about what your cluster supports, review the {product-title} SR-IOV hardware networks documentation. -==== - -* You created radio and uplink provider networks as part of your {rh-openstack} deployment. The names `radio` and `uplink` are used in all example commands to represent these networks. - -.Procedure - -. On a command line, create a radio {rh-openstack} network: -+ -[source,terminal] ----- -$ openstack network create radio --provider-physical-network radio --provider-network-type flat --external ----- - -. Create an uplink {rh-openstack} network: -+ -[source,terminal] ----- -$ openstack network create uplink --provider-physical-network uplink --provider-network-type vlan --external ----- - -. Create a subnet for the radio network: -+ -[source,terminal] ----- -$ openstack subnet create --network radio --subnet-range <radio_network_subnet_range> radio ----- - -. Create a subnet for the uplink network: -+ -[source,terminal] ----- -$ openstack subnet create --network uplink --subnet-range <uplink_network_subnet_range> uplink ----- - -// . Create a port that allows machines to connect to your cluster and each other: -// + -// [source,terminal] -// ---- -// $ openstack port os_port_worker_0 --network <infrastructure_id>-network --security-group <infrastructure_id>-worker --fixed-ip subnet=<infrastructure_id>-nodes,ip-address=<fixed_IP_address> --allowed-address ip-address=<infrastructure_ID>-ingress-port -// ---- - -// . Create a port for SR-IOV traffic: -// + -// [source,terminal] -// ---- -// $ openstack port create radio_port --vnic-type direct --network radio --fixed-ip subnet=radio,ip-address=<fixed_IP_address> --tag=radio --disable-port-security -// ---- - -// . Create an {rh-openstack} server instance that uses the two ports you created as NICs: -// + -// [source,terminal] -// ---- -// $ openstack server create --image <infrastructure_id>-rhcos --flavor ocp --user-data <ocp project>/build-artifacts/worker.ign --nic port-id=<os_port_worker_0 ID> --nic port-id=<radio_port_ID> --config-drive true worker-<worker_ID>.<cluster_name>.<cluster_domain> -// ---- diff --git a/modules/installation-osp-control-compute-machines.adoc b/modules/installation-osp-control-compute-machines.adoc deleted file mode 100644 index 3bf6e8297515..000000000000 --- a/modules/installation-osp-control-compute-machines.adoc +++ /dev/null @@ -1,71 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_openstack/installing-openstack-installer-custom.adoc -// * installing/installing_openstack/installing-openstack-installer-kuryr.adoc -// * installing/installing_openstack/installing-openstack-installer-sr-iov.adoc - -ifeval::["{context}" == "installing-openstack-user-sr-iov"] -:osp-sr-iov: -endif::[] -ifeval::["{context}" == "installing-openstack-installer-sr-iov"] -:osp-sr-iov: -endif::[] -ifeval::["{context}" == "installing-openstack-installer-ovs-dpdk"] -:osp-sr-iov: -endif::[] - -[id="installation-osp-control-machines_{context}"] -= Control plane machines - -By default, the {product-title} installation process creates three control -plane machines. - -Each machine requires: - -* An instance from the {rh-openstack} quota -* A port from the {rh-openstack} quota -* A flavor with at least 16 GB memory and 4 vCPUs -* At least 100 GB storage space from the {rh-openstack} quota - -[id="installation-osp-compute-machines_{context}"] -= Compute machines - -By default, the {product-title} installation process creates three compute -machines. - -Each machine requires: - -* An instance from the {rh-openstack} quota -* A port from the {rh-openstack} quota -* A flavor with at least 8 GB memory and 2 vCPUs -* At least 100 GB storage space from the {rh-openstack} quota - -[TIP] -==== -Compute machines host the applications that you run on {product-title}; aim to -run as many as you can. -==== - -ifdef::osp-sr-iov[] -Additionally, for clusters that use single-root input/output virtualization (SR-IOV), {rh-openstack} compute nodes require a flavor that supports link:https://access.redhat.com/documentation/en-us/red_hat_openstack_platform/16.1/html/configuring_the_compute_service_for_instance_creation/assembly_configuring-compute-nodes-for-performance_compute-performance#proc_configuring-huge-pages-on-compute-nodes_compute-performance[huge pages]. - -[IMPORTANT] -==== -SR-IOV deployments often employ performance optimizations, such as dedicated or isolated CPUs. For maximum performance, configure your underlying {rh-openstack} deployment to use these optimizations, and then run {product-title} compute machines on the optimized infrastructure. -==== - -[role="_additional-resources"] -.Additional resources - -* For more information about configuring performant {rh-openstack} compute nodes, see link:https://access.redhat.com/documentation/en-us/red_hat_openstack_platform/16.1/html-single/configuring_the_compute_service_for_instance_creation/configuring-compute-nodes-for-performance#configuring-compute-nodes-for-performance[Configuring Compute nodes for performance]. -endif::osp-sr-iov[] - -ifeval::["{context}" == "installing-openstack-user-sr-iov"] -:!osp-sr-iov: -endif::[] -ifeval::["{context}" == "installing-openstack-installer-sr-iov"] -:!osp-sr-iov: -endif::[] -ifeval::["{context}" == "installing-openstack-installer-ovs-dpdk"] -:!osp-sr-iov: -endif::[] diff --git a/modules/installation-osp-converting-ignition-resources.adoc b/modules/installation-osp-converting-ignition-resources.adoc deleted file mode 100644 index 28887b106ae7..000000000000 --- a/modules/installation-osp-converting-ignition-resources.adoc +++ /dev/null @@ -1,167 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_openstack/installing-openstack-user.adoc - -:_content-type: PROCEDURE -[id="installation-osp-converting-ignition-resources_{context}"] -= Preparing the bootstrap Ignition files - -The {product-title} installation process relies on bootstrap machines that are created from a bootstrap Ignition configuration file. - -Edit the file and upload it. Then, create a secondary bootstrap Ignition configuration file that -{rh-openstack-first} uses to download the primary file. - -.Prerequisites - -* You have the bootstrap Ignition file that the installer program generates, `bootstrap.ign`. -* The infrastructure ID from the installer's metadata file is set as an environment variable (`$INFRA_ID`). -** If the variable is not set, see *Creating the Kubernetes manifest and Ignition config files*. -* You have an HTTP(S)-accessible way to store the bootstrap Ignition file. -** The documented procedure uses the {rh-openstack} image service (Glance), but you can also use the {rh-openstack} storage service (Swift), Amazon S3, an internal HTTP server, or an ad hoc Nova server. - -.Procedure - -. Run the following Python script. The script modifies the bootstrap Ignition file to set the hostname and, if available, CA certificate file when it runs: -+ -[source,python] ----- -import base64 -import json -import os - -with open('bootstrap.ign', 'r') as f: - ignition = json.load(f) - -files = ignition['storage'].get('files', []) - -infra_id = os.environ.get('INFRA_ID', 'openshift').encode() -hostname_b64 = base64.standard_b64encode(infra_id + b'-bootstrap\n').decode().strip() -files.append( -{ - 'path': '/etc/hostname', - 'mode': 420, - 'contents': { - 'source': 'data:text/plain;charset=utf-8;base64,' + hostname_b64 - } -}) - -ca_cert_path = os.environ.get('OS_CACERT', '') -if ca_cert_path: - with open(ca_cert_path, 'r') as f: - ca_cert = f.read().encode() - ca_cert_b64 = base64.standard_b64encode(ca_cert).decode().strip() - - files.append( - { - 'path': '/opt/openshift/tls/cloud-ca-cert.pem', - 'mode': 420, - 'contents': { - 'source': 'data:text/plain;charset=utf-8;base64,' + ca_cert_b64 - } - }) - -ignition['storage']['files'] = files; - -with open('bootstrap.ign', 'w') as f: - json.dump(ignition, f) ----- - -. Using the {rh-openstack} CLI, create an image that uses the bootstrap Ignition file: -+ -[source,terminal] ----- -$ openstack image create --disk-format=raw --container-format=bare --file bootstrap.ign <image_name> ----- - -. Get the image's details: -+ -[source,terminal] ----- -$ openstack image show <image_name> ----- -+ -Make a note of the `file` value; it follows the pattern `v2/images/<image_ID>/file`. -+ -[NOTE] -Verify that the image you created is active. - -. Retrieve the image service's public address: -+ -[source,terminal] ----- -$ openstack catalog show image ----- - -. Combine the public address with the image `file` value and save the result as the storage location. The location follows the pattern `<image_service_public_URL>/v2/images/<image_ID>/file`. - -. Generate an auth token and save the token ID: -+ -[source,terminal] ----- -$ openstack token issue -c id -f value ----- - -. Insert the following content into a file called `$INFRA_ID-bootstrap-ignition.json` and edit the placeholders to match your own values: -+ -[source,json] ----- -{ - "ignition": { - "config": { - "merge": [{ - "source": "<storage_url>", <1> - "httpHeaders": [{ - "name": "X-Auth-Token", <2> - "value": "<token_ID>" <3> - }] - }] - }, - "security": { - "tls": { - "certificateAuthorities": [{ - "source": "data:text/plain;charset=utf-8;base64,<base64_encoded_certificate>" <4> - }] - } - }, - "version": "3.2.0" - } -} ----- -<1> Replace the value of `ignition.config.merge.source` with the bootstrap Ignition file storage URL. -<2> Set `name` in `httpHeaders` to `"X-Auth-Token"`. -<3> Set `value` in `httpHeaders` to your token's ID. -<4> If the bootstrap Ignition file server uses a self-signed certificate, include the base64-encoded certificate. - -. Save the secondary Ignition config file. - -The bootstrap Ignition data will be passed to {rh-openstack} during installation. - -[WARNING] -The bootstrap Ignition file contains sensitive information, like `clouds.yaml` credentials. Ensure that you store it in a secure place, and delete it after you complete the installation process. - -// . If you are using Swift: -// .. Using the Swift CLI, create a container: -// + -// ---- -// $ swift post <container_name> -// ---- -// -// .. Upload the bootstrap Ignition file to the container: -// + -// ---- -// $ swift upload <container_name> bootstrap.ign -// ---- -// -// .. Set the container to be read-accessible: -// + -// ---- -// $ swift post <container_name> --read-acl ".r:*,.rlistings" -// ---- -// -// .. Retrieve the storage URL: -// + -// ---- -// $ swift stat -v -// ---- -// ** The URL should follow this format: `<storage_URL>/<container_name>/bootstrap.ign` -// May need to bring this back. diff --git a/modules/installation-osp-creating-bootstrap-machine.adoc b/modules/installation-osp-creating-bootstrap-machine.adoc deleted file mode 100644 index de630603955c..000000000000 --- a/modules/installation-osp-creating-bootstrap-machine.adoc +++ /dev/null @@ -1,33 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_openstack/installing-openstack-user.adoc - -:_content-type: PROCEDURE -[id="installation-osp-creating-bootstrap-machine_{context}"] -= Creating the bootstrap machine on {rh-openstack} - -Create a bootstrap machine and give it the network access it needs to run on {rh-openstack-first}. Red Hat provides an Ansible playbook that you run to simplify this process. - -.Prerequisites -* You downloaded the modules in "Downloading playbook dependencies". -* You downloaded the playbooks in "Downloading the installation playbooks". -* The `inventory.yaml`, `common.yaml`, and `bootstrap.yaml` Ansible playbooks are in a common directory. -* The `metadata.json` file that the installation program created is in the same directory as the Ansible playbooks. - -.Procedure - -. On a command line, change the working directory to the location of the playbooks. - -. On a command line, run the `bootstrap.yaml` playbook: -+ -[source,terminal] ----- -$ ansible-playbook -i inventory.yaml bootstrap.yaml ----- - -. After the bootstrap server is active, view the logs to verify that the Ignition files were received: -+ -[source,terminal] ----- -$ openstack console log show "$INFRA_ID-bootstrap" ----- diff --git a/modules/installation-osp-creating-compute-machines.adoc b/modules/installation-osp-creating-compute-machines.adoc deleted file mode 100644 index 784442e4a486..000000000000 --- a/modules/installation-osp-creating-compute-machines.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_openstack/installing-openstack-user.adoc -// * installing/installing_openstack/installing-openstack-user-kuryr.adoc - -:_content-type: PROCEDURE -[id="installation-osp-creating-compute-machines_{context}"] -= Creating compute machines on {rh-openstack} - -After standing up the control plane, create compute machines. Red Hat provides an Ansible playbook that you run to simplify this process. - -.Prerequisites -* You downloaded the modules in "Downloading playbook dependencies". -* You downloaded the playbooks in "Downloading the installation playbooks". -* The `inventory.yaml`, `common.yaml`, and `compute-nodes.yaml` Ansible playbooks are in a common directory. -* The `metadata.json` file that the installation program created is in the same directory as the Ansible playbooks. -* The control plane is active. - -.Procedure - -. On a command line, change the working directory to the location of the playbooks. - -. On a command line, run the playbook: -+ -[source,terminal] ----- -$ ansible-playbook -i inventory.yaml compute-nodes.yaml ----- - -.Next steps - -* Approve the certificate signing requests for the machines. diff --git a/modules/installation-osp-creating-control-plane-ignition.adoc b/modules/installation-osp-creating-control-plane-ignition.adoc deleted file mode 100644 index f97291eb97ec..000000000000 --- a/modules/installation-osp-creating-control-plane-ignition.adoc +++ /dev/null @@ -1,39 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_openstack/installing-openstack-user.adoc - -:_content-type: PROCEDURE -[id="installation-osp-creating-control-plane-ignition_{context}"] -= Creating control plane Ignition config files on {rh-openstack} - -Installing {product-title} on {rh-openstack-first} on your own infrastructure requires control plane Ignition config files. You must create multiple config files. - -[NOTE] -As with the bootstrap Ignition configuration, you must explicitly define a hostname for each control plane machine. - -.Prerequisites - -* The infrastructure ID from the installation program's metadata file is set as an environment variable (`$INFRA_ID`). -** If the variable is not set, see "Creating the Kubernetes manifest and Ignition config files". - -.Procedure - -* On a command line, run the following Python script: -+ -[source,terminal] ----- -$ for index in $(seq 0 2); do - MASTER_HOSTNAME="$INFRA_ID-master-$index\n" - python -c "import base64, json, sys; -ignition = json.load(sys.stdin); -storage = ignition.get('storage', {}); -files = storage.get('files', []); -files.append({'path': '/etc/hostname', 'mode': 420, 'contents': {'source': 'data:text/plain;charset=utf-8;base64,' + base64.standard_b64encode(b'$MASTER_HOSTNAME').decode().strip(), 'verification': {}}, 'filesystem': 'root'}); -storage['files'] = files; -ignition['storage'] = storage -json.dump(ignition, sys.stdout)" <master.ign >"$INFRA_ID-master-$index-ignition.json" -done ----- -+ -You now have three control plane Ignition files: `<INFRA_ID>-master-0-ignition.json`, `<INFRA_ID>-master-1-ignition.json`, -and `<INFRA_ID>-master-2-ignition.json`. \ No newline at end of file diff --git a/modules/installation-osp-creating-control-plane.adoc b/modules/installation-osp-creating-control-plane.adoc deleted file mode 100644 index 05682083e3d1..000000000000 --- a/modules/installation-osp-creating-control-plane.adoc +++ /dev/null @@ -1,47 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_openstack/installing-openstack-user.adoc - -:_content-type: PROCEDURE -[id="installation-osp-creating-control-plane_{context}"] -= Creating the control plane machines on {rh-openstack} - -Create three control plane machines by using the Ignition config files that you generated. Red Hat provides an Ansible playbook that you run to simplify this process. - -.Prerequisites - -* You downloaded the modules in "Downloading playbook dependencies". -* You downloaded the playbooks in "Downloading the installation playbooks". -* The infrastructure ID from the installation program's metadata file is set as an environment variable (`$INFRA_ID`). -* The `inventory.yaml`, `common.yaml`, and `control-plane.yaml` Ansible playbooks are in a common directory. -* You have the three Ignition files that were created in "Creating control plane Ignition config files". - -.Procedure - -. On a command line, change the working directory to the location of the playbooks. - -. If the control plane Ignition config files aren't already in your working directory, copy them into it. - -. On a command line, run the `control-plane.yaml` playbook: -+ -[source,terminal] ----- -$ ansible-playbook -i inventory.yaml control-plane.yaml ----- - -. Run the following command to monitor the bootstrapping process: -+ -[source,terminal] ----- -$ openshift-install wait-for bootstrap-complete ----- -+ -You will see messages that confirm that the control plane machines are running and have joined the cluster: -+ -[source,terminal] ----- -INFO API v1.26.0 up -INFO Waiting up to 30m0s for bootstrapping to complete... -... -INFO It is now safe to remove the bootstrap resources ----- diff --git a/modules/installation-osp-creating-image.adoc b/modules/installation-osp-creating-image.adoc deleted file mode 100644 index ac4d934ed17c..000000000000 --- a/modules/installation-osp-creating-image.adoc +++ /dev/null @@ -1,57 +0,0 @@ -//Module included in the following assemblies: -// -// * installing/installing_openstack/installing-openstack-user.adoc - -:_content-type: PROCEDURE -[id="installation-osp-creating-image_{context}"] -= Creating the {op-system-first} image - -The {product-title} installation program requires that a {op-system-first} image be present in the {rh-openstack-first} cluster. Retrieve the latest {op-system} image, then upload it using the {rh-openstack} CLI. - -.Prerequisites - -* The {rh-openstack} CLI is installed. - -.Procedure - -. Log in to the Red Hat Customer Portal's https://access.redhat.com/downloads/content/290[Product Downloads page]. - -. Under *Version*, select the most recent release of {product-title} {product-version} for {op-system-base-full} 8. -+ -[IMPORTANT] -==== -The {op-system} images might not change with every release of {product-title}. -You must download images with the highest version that is less than or equal to -the {product-title} version that you install. Use the image versions that match -your {product-title} version if they are available. -==== - -. Download the _{op-system-first} - OpenStack Image (QCOW)_. - -. Decompress the image. -+ -[NOTE] -==== -You must decompress the {rh-openstack} image before the cluster can use it. The name of the downloaded file might not contain a compression extension, like `.gz` or `.tgz`. To find out if or how the file is compressed, in a command line, enter: - -[source,terminal] ----- -$ file <name_of_downloaded_file> ----- - -==== - -. From the image that you downloaded, create an image that is named `rhcos` in your cluster by using the {rh-openstack} CLI: -+ -[source,terminal] ----- -$ openstack image create --container-format=bare --disk-format=qcow2 --file rhcos-${RHCOS_VERSION}-openstack.qcow2 rhcos ----- -+ -[IMPORTANT] -Depending on your {rh-openstack} environment, you might be able to upload the image in either link:https://access.redhat.com/documentation/en-us/red_hat_openstack_platform/15/html/instances_and_images_guide/index[`.raw` or `.qcow2` formats]. If you use Ceph, you must use the `.raw` format. -+ -[WARNING] -If the installation program finds multiple images with the same name, it chooses one of them at random. To avoid this behavior, create unique names for resources in {rh-openstack}. - -After you upload the image to {rh-openstack}, it is usable in the installation process. diff --git a/modules/installation-osp-creating-network-resources.adoc b/modules/installation-osp-creating-network-resources.adoc deleted file mode 100644 index 559e59da284c..000000000000 --- a/modules/installation-osp-creating-network-resources.adoc +++ /dev/null @@ -1,89 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_openstack/installing-openstack-user.adoc - -:_content-type: PROCEDURE -[id="installation-osp-creating-network-resources_{context}"] -= Creating network resources on {rh-openstack} - -Create the network resources that an {product-title} on {rh-openstack-first} installation on your own infrastructure requires. To save time, run supplied Ansible playbooks that generate security groups, networks, subnets, routers, and ports. - -.Prerequisites - -* Python 3 is installed on your machine. -* You downloaded the modules in "Downloading playbook dependencies". -* You downloaded the playbooks in "Downloading the installation playbooks". - -.Procedure - -. Optional: Add an external network value to the `inventory.yaml` playbook: -+ -.Example external network value in the `inventory.yaml` Ansible playbook -[source,yaml] ----- -... - # The public network providing connectivity to the cluster. If not - # provided, the cluster external connectivity must be provided in another - # way. - - # Required for os_api_fip, os_ingress_fip, os_bootstrap_fip. - os_external_network: 'external' -... ----- -+ -[IMPORTANT] -==== -If you did not provide a value for `os_external_network` in the `inventory.yaml` file, you must ensure that VMs can access Glance and an external connection yourself. -==== - -. Optional: Add external network and floating IP (FIP) address values to the `inventory.yaml` playbook: -+ -.Example FIP values in the `inventory.yaml` Ansible playbook -[source,yaml] ----- -... - # OpenShift API floating IP address. If this value is non-empty, the - # corresponding floating IP will be attached to the Control Plane to - # serve the OpenShift API. - os_api_fip: '203.0.113.23' - - # OpenShift Ingress floating IP address. If this value is non-empty, the - # corresponding floating IP will be attached to the worker nodes to serve - # the applications. - os_ingress_fip: '203.0.113.19' - - # If this value is non-empty, the corresponding floating IP will be - # attached to the bootstrap machine. This is needed for collecting logs - # in case of install failure. - os_bootstrap_fip: '203.0.113.20' ----- -+ -[IMPORTANT] -==== -If you do not define values for `os_api_fip` and `os_ingress_fip`, you must perform post-installation network configuration. - -If you do not define a value for `os_bootstrap_fip`, the installer cannot download debugging information from failed installations. - -See "Enabling access to the environment" for more information. -==== - -. On a command line, create security groups by running the `security-groups.yaml` playbook: -+ -[source,terminal] ----- -$ ansible-playbook -i inventory.yaml security-groups.yaml ----- - -. On a command line, create a network, subnet, and router by running the `network.yaml` playbook: -+ -[source,terminal] ----- -$ ansible-playbook -i inventory.yaml network.yaml ----- - -. Optional: If you want to control the default resolvers that Nova servers use, run the {rh-openstack} CLI command: -+ -[source,terminal] ----- -$ openstack subnet set --dns-nameserver <server_1> --dns-nameserver <server_2> "$INFRA_ID-nodes" ----- \ No newline at end of file diff --git a/modules/installation-osp-creating-sr-iov-compute-machines.adoc b/modules/installation-osp-creating-sr-iov-compute-machines.adoc deleted file mode 100644 index 8b91e68e6ac8..000000000000 --- a/modules/installation-osp-creating-sr-iov-compute-machines.adoc +++ /dev/null @@ -1,196 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_openstack/installing-openstack-user-sr-iov.adoc -// * installing/installing_openstack/installing-openstack-user-sr-iov-kuryr.adoc -// -// TODO: Get https://github.com/shiftstack/SRIOV-Compute-Nodes-Ansible-Automation into a supported -// repo, associate playbooks with individual releases, and then embed here. - -:_content-type: PROCEDURE -[id="installation-osp-creating-sr-iov-compute-machines_{context}"] -= Creating compute machines that run on SR-IOV networks - -After standing up the control plane, create compute machines that run on the SR-IOV networks that you created in "Creating SR-IOV networks for compute machines". - - -.Prerequisites -* You downloaded the modules in "Downloading playbook dependencies". -* You downloaded the playbooks in "Downloading the installation playbooks". -* The `metadata.yaml` file that the installation program created is in the same directory as the Ansible playbooks. -* The control plane is active. -* You created `radio` and `uplink` SR-IOV networks as described in "Creating SR-IOV networks for compute machines". - -.Procedure - -. On a command line, change the working directory to the location of the `inventory.yaml` and `common.yaml` files. - -. Add the `radio` and `uplink` networks to the end of the `inventory.yaml` file by using the `additionalNetworks` parameter: -+ -[source,yaml] ----- -.... -# If this value is non-empty, the corresponding floating IP will be -# attached to the bootstrap machine. This is needed for collecting logs -# in case of install failure. - os_bootstrap_fip: '203.0.113.20' - - additionalNetworks: - - id: radio - count: 4 <1> - type: direct - port_security_enabled: no - - id: uplink - count: 4 <1> - type: direct - port_security_enabled: no ----- -<1> The `count` parameter defines the number of SR-IOV virtual functions (VFs) to attach to each worker node. In this case, each network has four VFs. - -. Replace the content of the `compute-nodes.yaml` file with the following text: -+ -.`compute-nodes.yaml` -[%collapsible] -==== -[source,yaml] ----- -- import_playbook: common.yaml - -- hosts: all - gather_facts: no - - vars: - worker_list: [] - port_name_list: [] - nic_list: [] - - tasks: - # Create the SDN/primary port for each worker node - - name: 'Create the Compute ports' - os_port: - name: "{{ item.1 }}-{{ item.0 }}" - network: "{{ os_network }}" - security_groups: - - "{{ os_sg_worker }}" - allowed_address_pairs: - - ip_address: "{{ os_ingressVIP }}" - with_indexed_items: "{{ [os_port_worker] * os_compute_nodes_number }}" - register: ports - - # Tag each SDN/primary port with cluster name - - name: 'Set Compute ports tag' - command: - cmd: "openstack port set --tag {{ cluster_id_tag }} {{ item.1 }}-{{ item.0 }}" - with_indexed_items: "{{ [os_port_worker] * os_compute_nodes_number }}" - - - name: 'List the Compute Trunks' - command: - cmd: "openstack network trunk list" - when: os_networking_type == "Kuryr" - register: compute_trunks - - - name: 'Create the Compute trunks' - command: - cmd: "openstack network trunk create --parent-port {{ item.1.id }} {{ os_compute_trunk_name }}-{{ item.0 }}" - with_indexed_items: "{{ ports.results }}" - when: - - os_networking_type == "Kuryr" - - "os_compute_trunk_name|string not in compute_trunks.stdout" - - - name: ‘Call additional-port processing’ - include_tasks: additional-ports.yaml - - # Create additional ports in OpenStack - - name: ‘Create additionalNetworks ports’ - os_port: - name: "{{ item.0 }}-{{ item.1.name }}" - vnic_type: "{{ item.1.type }}" - network: "{{ item.1.uuid }}" - port_security_enabled: "{{ item.1.port_security_enabled|default(omit) }}" - no_security_groups: "{{ 'true' if item.1.security_groups is not defined else omit }}" - security_groups: "{{ item.1.security_groups | default(omit) }}" - with_nested: - - "{{ worker_list }}" - - "{{ port_name_list }}" - - # Tag the ports with the cluster info - - name: 'Set additionalNetworks ports tag' - command: - cmd: "openstack port set --tag {{ cluster_id_tag }} {{ item.0 }}-{{ item.1.name }}" - with_nested: - - "{{ worker_list }}" - - "{{ port_name_list }}" - - # Build the nic list to use for server create - - name: Build nic list - set_fact: - nic_list: "{{ nic_list | default([]) + [ item.name ] }}" - with_items: "{{ port_name_list }}" - - # Create the servers - - name: 'Create the Compute servers' - vars: - worker_nics: "{{ [ item.1 ] | product(nic_list) | map('join','-') | map('regex_replace', '(.*)', 'port-name=\\1') | list }}" - os_server: - name: "{{ item.1 }}" - image: "{{ os_image_rhcos }}" - flavor: "{{ os_flavor_worker }}" - auto_ip: no - userdata: "{{ lookup('file', 'worker.ign') | string }}" - security_groups: [] - nics: "{{ [ 'port-name=' + os_port_worker + '-' + item.0|string ] + worker_nics }}" - config_drive: yes - with_indexed_items: "{{ worker_list }}" - ----- -==== - -. Insert the following content into a local file that is called `additional-ports.yaml`: -+ -.`additional-ports.yaml` -[%collapsible] -==== -[source,yaml] ----- -# Build a list of worker nodes with indexes -- name: ‘Build worker list’ - set_fact: - worker_list: "{{ worker_list | default([]) + [ item.1 + '-' + item.0 | string ] }}" - with_indexed_items: "{{ [ os_compute_server_name ] * os_compute_nodes_number }}" - -# Ensure that each network specified in additionalNetworks exists -- name: ‘Verify additionalNetworks’ - os_networks_info: - name: "{{ item.id }}" - with_items: "{{ additionalNetworks }}" - register: network_info - -# Expand additionalNetworks by the count parameter in each network definition -- name: ‘Build port and port index list for additionalNetworks’ - set_fact: - port_list: "{{ port_list | default([]) + [ { - 'net_name' : item.1.id, - 'uuid' : network_info.results[item.0].openstack_networks[0].id, - 'type' : item.1.type|default('normal'), - 'security_groups' : item.1.security_groups|default(omit), - 'port_security_enabled' : item.1.port_security_enabled|default(omit) - } ] * item.1.count|default(1) }}" - index_list: "{{ index_list | default([]) + range(item.1.count|default(1)) | list }}" - with_indexed_items: "{{ additionalNetworks }}" - -# Calculate and save the name of the port -# The format of the name is cluster_name-worker-workerID-networkUUID(partial)-count -# i.e. fdp-nz995-worker-1-99bcd111-1 -- name: ‘Calculate port name’ - set_fact: - port_name_list: "{{ port_name_list | default([]) + [ item.1 | combine( {'name' : item.1.uuid | regex_search('([^-]+)') + '-' + index_list[item.0]|string } ) ] }}" - with_indexed_items: "{{ port_list }}" - when: port_list is defined ----- -==== - -. On a command line, run the `compute-nodes.yaml` playbook: -+ -[source,terminal] ----- -$ ansible-playbook -i inventory.yaml compute-nodes.yaml ----- diff --git a/modules/installation-osp-custom-subnet.adoc b/modules/installation-osp-custom-subnet.adoc deleted file mode 100644 index c026151efb4a..000000000000 --- a/modules/installation-osp-custom-subnet.adoc +++ /dev/null @@ -1,39 +0,0 @@ -// Module included in the following assemblies: -// - -// * installing/installing_openstack/installing-openstack-installer-custom.adoc -// * installing/installing_openstack/installing-openstack-installer-kuryr.adoc -// * installing/installing_openstack/installing-openstack-user.adoc -// * installing/installing_openstack/installing-openstack-user-kuryr.adoc - -[id="installation-osp-custom-subnet_{context}"] -= Custom subnets in {rh-openstack} deployments - -Optionally, you can deploy a cluster on a {rh-openstack-first} subnet of your choice. The subnet's GUID is passed as the value of `platform.openstack.machinesSubnet` in the `install-config.yaml` file. - -This subnet is used as the cluster's primary subnet. By default, nodes and ports are created on it. You can create nodes and ports on a different {rh-openstack} subnet by setting the value of the `platform.openstack.machinesSubnet` property to the subnet's UUID. - -Before you run the {product-title} installer with a custom subnet, verify that your configuration meets the following requirements: - -* The subnet that is used by `platform.openstack.machinesSubnet` has DHCP enabled. -* The CIDR of `platform.openstack.machinesSubnet` matches the CIDR of `networking.machineNetwork`. -* The installation program user has permission to create ports on this network, including ports with fixed IP addresses. - -Clusters that use custom subnets have the following limitations: - -* If you plan to install a cluster that uses floating IP addresses, the `platform.openstack.machinesSubnet` subnet must be attached to a router that is connected to the `externalNetwork` network. - -* If the `platform.openstack.machinesSubnet` value is set in the `install-config.yaml` file, the installation program does not create a private network or subnet for your {rh-openstack} machines. - -* You cannot use the `platform.openstack.externalDNS` property at the same time as a custom subnet. To add DNS to a cluster that uses a custom subnet, configure DNS on the {rh-openstack} network. - -[NOTE] -==== -By default, the API VIP takes x.x.x.5 and the Ingress VIP takes x.x.x.7 from your network's CIDR block. To override these default values, -set values for `platform.openstack.apiVIPs` and `platform.openstack.ingressVIPs` that are outside of the DHCP allocation pool. -==== - -[IMPORTANT] -==== -The CIDR ranges for networks are not adjustable after cluster installation. Red Hat does not provide direct guidance on determining the range during cluster installation because it requires careful consideration of the number of created pods per namespace. -==== diff --git a/modules/installation-osp-default-deployment.adoc b/modules/installation-osp-default-deployment.adoc deleted file mode 100644 index fcdb0230f831..000000000000 --- a/modules/installation-osp-default-deployment.adoc +++ /dev/null @@ -1,39 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_openstack/installing-openstack-installer.adoc -// * installing/installing_openstack/installing-openstack-installer-custom.adoc -// * installing/installing_openstack/installing-openstack-user.adoc - -[id="installation-osp-default-deployment_{context}"] -= Resource guidelines for installing {product-title} on {rh-openstack} - -To support an {product-title} installation, your {rh-openstack-first} quota must meet the following requirements: - -.Recommended resources for a default {product-title} cluster on {rh-openstack} -[options="header"] -|====================================== -|Resource | Value -|Floating IP addresses | 3 -|Ports | 15 -|Routers | 1 -|Subnets | 1 -|RAM | 88 GB -|vCPUs | 22 -|Volume storage | 275 GB -|Instances | 7 -|Security groups | 3 -|Security group rules | 60 -|Server groups | 2 - plus 1 for each additional availability zone in each machine pool -|====================================== - -A cluster might function with fewer than recommended resources, but its performance is not guaranteed. - -[IMPORTANT] -==== -If {rh-openstack} object storage (Swift) is available and operated by a user account with the `swiftoperator` role, it is used as the default backend for the {product-title} image registry. In this case, the volume storage requirement is 175 GB. Swift space requirements vary depending on the size of the image registry. -==== - -[NOTE] -By default, your security group and security group rule quotas might be low. If you encounter problems, run `openstack quota set --secgroups 3 --secgroup-rules 60 <project>` as an administrator to increase them. - -An {product-title} deployment comprises control plane machines, compute machines, and a bootstrap machine. \ No newline at end of file diff --git a/modules/installation-osp-default-kuryr-deployment.adoc b/modules/installation-osp-default-kuryr-deployment.adoc deleted file mode 100644 index 30694dce284e..000000000000 --- a/modules/installation-osp-default-kuryr-deployment.adoc +++ /dev/null @@ -1,83 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_openstack/installing-openstack-installer-kuryr.adoc - -[id="installation-osp-default-kuryr-deployment_{context}"] -= Resource guidelines for installing {product-title} on {rh-openstack} with Kuryr - -When using Kuryr SDN, the pods, services, namespaces, and network policies are -using resources from the {rh-openstack} quota; this increases the minimum -requirements. Kuryr also has some additional requirements on top of what a -default install requires. - -Use the following quota to satisfy a default cluster's minimum requirements: - -.Recommended resources for a default {product-title} cluster on {rh-openstack} with Kuryr - -[options="header"] -|============================================================================================== -|Resource | Value -|Floating IP addresses | 3 - plus the expected number of Services of LoadBalancer type -|Ports | 1500 - 1 needed per Pod -|Routers | 1 -|Subnets | 250 - 1 needed per Namespace/Project -|Networks | 250 - 1 needed per Namespace/Project -|RAM | 112 GB -|vCPUs | 28 -|Volume storage | 275 GB -|Instances | 7 -|Security groups | 250 - 1 needed per Service and per NetworkPolicy -|Security group rules | 1000 -|Server groups | 2 - plus 1 for each additional availability zone in each machine pool -|Load balancers | 100 - 1 needed per Service -|Load balancer listeners | 500 - 1 needed per Service-exposed port -|Load balancer pools | 500 - 1 needed per Service-exposed port -|============================================================================================== - -A cluster might function with fewer than recommended resources, but its performance is not guaranteed. - -[IMPORTANT] -==== -If {rh-openstack} object storage (Swift) is available and operated by a user account with the `swiftoperator` role, it is used as the default backend for the {product-title} image registry. In this case, the volume storage requirement is 175 GB. Swift space requirements vary depending on the size of the image registry. -==== - -[IMPORTANT] -==== -If you are using {rh-openstack-first} version 16 with the Amphora driver rather than the OVN Octavia driver, security groups are associated with service accounts instead of user projects. -==== - -Take the following notes into consideration when setting resources: - -* The number of ports that are required is larger than the number of pods. Kuryr -uses ports pools to have pre-created ports ready to be used by pods and speed up -the pods' booting time. - -* Each network policy is mapped into an {rh-openstack} security group, and -depending on the `NetworkPolicy` spec, one or more rules are added to the -security group. - -* Each service is mapped to an {rh-openstack} load balancer. Consider this requirement - when estimating the number of security groups required for the quota. -+ -If you are using -{rh-openstack} version 15 or earlier, or the `ovn-octavia driver`, each load balancer -has a security group with the user project. - -* The quota does not account for load balancer resources (such as VM -resources), but you must consider these resources when you decide the -{rh-openstack} deployment's size. The default installation will have more than -50 load balancers; the clusters must be able to accommodate them. -+ -If you are using {rh-openstack} version 16 with the OVN Octavia driver enabled, only one load balancer -VM is generated; services are load balanced through OVN flows. - -An {product-title} deployment comprises control plane machines, compute -machines, and a bootstrap machine. - -To enable Kuryr SDN, your environment must meet the following requirements: - -* Run {rh-openstack} 13+. -* Have Overcloud with Octavia. -* Use Neutron Trunk ports extension. -* Use `openvswitch` firewall driver if ML2/OVS Neutron driver is used instead -of `ovs-hybrid`. diff --git a/modules/installation-osp-deleting-bootstrap-resources.adoc b/modules/installation-osp-deleting-bootstrap-resources.adoc deleted file mode 100644 index 83bf50ad565f..000000000000 --- a/modules/installation-osp-deleting-bootstrap-resources.adoc +++ /dev/null @@ -1,33 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_openstack/installing-openstack-user.adoc -// * installing/installing_openstack/installing-openstack-user-kuryr.adoc - -:_content-type: PROCEDURE -[id="installation-osp-deleting-bootstrap-resources_{context}"] -= Deleting bootstrap resources from {rh-openstack} - -Delete the bootstrap resources that you no longer need. - -.Prerequisites -* You downloaded the modules in "Downloading playbook dependencies". -* You downloaded the playbooks in "Downloading the installation playbooks". -* The `inventory.yaml`, `common.yaml`, and `down-bootstrap.yaml` Ansible playbooks are in a common directory. -* The control plane machines are running. -** If you do not know the status of the machines, see "Verifying cluster status". - -.Procedure - -. On a command line, change the working directory to the location of the playbooks. - -. On a command line, run the `down-bootstrap.yaml` playbook: -+ -[source,terminal] ----- -$ ansible-playbook -i inventory.yaml down-bootstrap.yaml ----- - -The bootstrap port, server, and floating IP address are deleted. - -[WARNING] -If you did not disable the bootstrap Ignition file URL earlier, do so now. \ No newline at end of file diff --git a/modules/installation-osp-deploying-bare-metal-machines.adoc b/modules/installation-osp-deploying-bare-metal-machines.adoc deleted file mode 100644 index dacb9ba8f464..000000000000 --- a/modules/installation-osp-deploying-bare-metal-machines.adoc +++ /dev/null @@ -1,135 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_openstack/installing-openstack-installer-custom.adoc -// * installing/installing_openstack/installing-openstack-user.adoc - -ifeval::["{context}" == "installing-openstack-installer-custom"] -:osp-ipi: -endif::[] - -:_content-type: PROCEDURE -[id="installation-osp-deploying-bare-metal-machines_{context}"] -= Deploying a cluster with bare metal machines - -If you want your cluster to use bare metal machines, modify the -ifdef::osp-ipi[`install-config.yaml`] -ifndef::osp-ipi[`inventory.yaml`] -file. Your cluster can have both control plane and compute machines running on bare metal, or just compute machines. - -Bare-metal compute machines are not supported on clusters that use Kuryr. - -[NOTE] -==== -Be sure that your `install-config.yaml` file reflects whether the {rh-openstack} network that you use for bare metal workers supports floating IP addresses or not. - -==== - -.Prerequisites - -* The {rh-openstack} link:https://access.redhat.com/documentation/en-us/red_hat_openstack_platform/16.1/html/bare_metal_provisioning/index[Bare Metal service (Ironic)] is enabled and accessible via the {rh-openstack} Compute API. - -* Bare metal is available as link:https://access.redhat.com/documentation/en-us/red_hat_openstack_platform/16.1/html/bare_metal_provisioning/configuring-the-bare-metal-provisioning-service-after-deployment#creating-the-bare-metal-flavor_bare-metal-post-deployment[a {rh-openstack} flavor]. - -* The {rh-openstack} network supports both VM and bare metal server attachment. - -* Your network configuration does not rely on a provider network. Provider networks are not supported. - -* If you want to deploy the machines on a pre-existing network, a {rh-openstack} subnet is provisioned. - -* If you want to deploy the machines on an installer-provisioned network, the {rh-openstack} Bare Metal service (Ironic) is able to listen for and interact with Preboot eXecution Environment (PXE) boot machines that run on tenant networks. - -ifdef::osp-ipi[] -* You created an `install-config.yaml` file as part of the {product-title} installation process. -endif::osp-ipi[] - -ifndef::osp-ipi[] -* You created an `inventory.yaml` file as part of the {product-title} installation process. -endif::osp-ipi[] - -.Procedure - -ifdef::osp-ipi[] -. In the `install-config.yaml` file, edit the flavors for machines: -.. If you want to use bare-metal control plane machines, change the value of `controlPlane.platform.openstack.type` to a bare metal flavor. -.. Change the value of `compute.platform.openstack.type` to a bare metal flavor. -.. If you want to deploy your machines on a pre-existing network, change the value of `platform.openstack.machinesSubnet` to the {rh-openstack} subnet UUID of the network. Control plane and compute machines must use the same subnet. -+ -.An example bare metal `install-config.yaml` file -[source,yaml] ----- -controlPlane: - platform: - openstack: - type: <bare_metal_control_plane_flavor> <1> -... - -compute: - - architecture: amd64 - hyperthreading: Enabled - name: worker - platform: - openstack: - type: <bare_metal_compute_flavor> <2> - replicas: 3 -... - -platform: - openstack: - machinesSubnet: <subnet_UUID> <3> -... ----- -<1> If you want to have bare-metal control plane machines, change this value to a bare metal flavor. -<2> Change this value to a bare metal flavor to use for compute machines. -<3> If you want to use a pre-existing network, change this value to the UUID of the {rh-openstack} subnet. - - -Use the updated `install-config.yaml` file to complete the installation process. -The compute machines that are created during deployment use the flavor that you -added to the file. -endif::osp-ipi[] - -ifndef::osp-ipi[] -. In the `inventory.yaml` file, edit the flavors for machines: -.. If you want to use bare-metal control plane machines, change the value of `os_flavor_master` to a bare metal flavor. -.. Change the value of `os_flavor_worker` to a bare metal flavor. -+ -.An example bare metal `inventory.yaml` file -[source,yaml] ----- -all: - hosts: - localhost: - ansible_connection: local - ansible_python_interpreter: "{{ansible_playbook_python}}" - - # User-provided values - os_subnet_range: '10.0.0.0/16' - os_flavor_master: 'my-bare-metal-flavor' <1> - os_flavor_worker: 'my-bare-metal-flavor' <2> - os_image_rhcos: 'rhcos' - os_external_network: 'external' -... ----- -<1> If you want to have bare-metal control plane machines, change this value to a bare metal flavor. -<2> Change this value to a bare metal flavor to use for compute machines. - -Use the updated `inventory.yaml` file to complete the installation process. -Machines that are created during deployment use the flavor that you -added to the file. -endif::osp-ipi[] - -[NOTE] -==== -The installer may time out while waiting for bare metal machines to boot. - -If the installer times out, restart and then complete the deployment by using the `wait-for` command of the installer. For example: - -[source,terminal] ----- -$ ./openshift-install wait-for install-complete --log-level debug ----- -==== - -ifeval::["{context}" == "installing-openstack-installer-custom"] -:!osp-ipi: -endif::[] diff --git a/modules/installation-osp-deploying-provider-networks-installer.adoc b/modules/installation-osp-deploying-provider-networks-installer.adoc deleted file mode 100644 index ea3d7e679b9c..000000000000 --- a/modules/installation-osp-deploying-provider-networks-installer.adoc +++ /dev/null @@ -1,55 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_openstack/installing-openstack-installer-custom.adoc -// * installing/installing_openstack/installing-openstack-installer-kuryr.adoc -// * installing/installing_openstack/installing-openstack-user-kuryr.adoc -// * installing/installing_openstack/installing-openstack-user.adoc - -:_content-type: PROCEDURE -[id="installation-osp-deploying-provider-networks-installer_{context}"] -= Deploying a cluster that has a primary interface on a provider network - -You can deploy an {product-title} cluster that has its primary network interface on an {rh-openstack-first} provider network. - -.Prerequisites - -* Your {rh-openstack-first} deployment is configured as described by "{rh-openstack} provider network requirements for cluster installation". - -.Procedure - -. In a text editor, open the `install-config.yaml` file. -. Set the value of the `platform.openstack.apiVIPs` property to the IP address for the API VIP. -. Set the value of the `platform.openstack.ingressVIPs` property to the IP address for the Ingress VIP. -. Set the value of the `platform.openstack.machinesSubnet` property to the UUID of the provider network subnet. -. Set the value of the `networking.machineNetwork.cidr` property to the CIDR block of the provider network subnet. - -[IMPORTANT] -==== -The `platform.openstack.apiVIPs` and `platform.openstack.ingressVIPs` properties must both be unassigned IP addresses from the `networking.machineNetwork.cidr` block. -==== - -.Section of an installation configuration file for a cluster that relies on a {rh-openstack} provider network -[source,yaml] ----- - ... - platform: - openstack: - apiVIPs: <1> - - 192.0.2.13 - ingressVIPs: <1> - - 192.0.2.23 - machinesSubnet: fa806b2f-ac49-4bce-b9db-124bc64209bf - # ... - networking: - machineNetwork: - - cidr: 192.0.2.0/24 ----- - -<1> In {product-title} 4.12 and later, the `apiVIP` and `ingressVIP` configuration settings are deprecated. Instead, use a list format to enter values in the `apiVIPs` and `ingressVIPs` configuration settings. - -[WARNING] -==== -You cannot set the `platform.openstack.externalNetwork` or `platform.openstack.externalDNS` parameters while using a provider network for the primary network interface. -==== - -When you deploy the cluster, the installer uses the `install-config.yaml` file to deploy the cluster on the provider network. diff --git a/modules/installation-osp-describing-cloud-parameters.adoc b/modules/installation-osp-describing-cloud-parameters.adoc deleted file mode 100644 index cee1f24af9ac..000000000000 --- a/modules/installation-osp-describing-cloud-parameters.adoc +++ /dev/null @@ -1,78 +0,0 @@ -//Module included in the following assemblies: -// -// * installing/installing_openstack/installing-openstack-installer.adoc -// * installing/installing_openstack/installing-openstack-installer-custom.adoc -// * installing/installing_openstack/installing-openstack-installer-kuryr.adoc -// * installing/installing_openstack/installing-openstack-installer-user.adoc - -:_content-type: PROCEDURE -[id="installation-osp-describing-cloud-parameters_{context}"] -= Defining parameters for the installation program - -The {product-title} installation program relies on a file that is called `clouds.yaml`. The file describes {rh-openstack-first} configuration parameters, including the project name, log in information, and authorization service URLs. - -.Procedure - -. Create the `clouds.yaml` file: - -** If your {rh-openstack} distribution includes the Horizon web UI, generate a `clouds.yaml` file in it. -+ -[IMPORTANT] -==== -Remember to add a password to the `auth` field. You can also keep secrets in link:https://docs.openstack.org/os-client-config/latest/user/configuration.html#splitting-secrets[a separate file] from `clouds.yaml`. -==== - -** If your {rh-openstack} distribution does not include the Horizon web UI, or you do not want to use Horizon, create the file yourself. For detailed information about `clouds.yaml`, see https://docs.openstack.org/openstacksdk/latest/user/config/configuration.html#config-files[Config files] in the {rh-openstack} documentation. -+ -[source,yaml] ----- -clouds: - shiftstack: - auth: - auth_url: http://10.10.14.42:5000/v3 - project_name: shiftstack - username: shiftstack_user - password: XXX - user_domain_name: Default - project_domain_name: Default - dev-env: - region_name: RegionOne - auth: - username: 'devuser' - password: XXX - project_name: 'devonly' - auth_url: 'https://10.10.14.22:5001/v2.0' ----- - -. If your {rh-openstack} installation uses self-signed certificate authority (CA) certificates for endpoint authentication: -.. Copy the certificate authority file to your machine. -.. Add the `cacerts` key to the `clouds.yaml` file. The value must be an absolute, non-root-accessible path to the CA certificate: -+ -[source,yaml] ----- -clouds: - shiftstack: - ... - cacert: "/etc/pki/ca-trust/source/anchors/ca.crt.pem" ----- -+ -[TIP] -==== -After you run the installer with a custom CA certificate, you can update the certificate by editing the value of the `ca-cert.pem` key in the `cloud-provider-config` keymap. On a command line, run: -[source,terminal] ----- -$ oc edit configmap -n openshift-config cloud-provider-config ----- -==== - -. Place the `clouds.yaml` file in one of the following locations: -.. The value of the `OS_CLIENT_CONFIG_FILE` environment variable -.. The current directory -.. A Unix-specific user configuration directory, for example `~/.config/openstack/clouds.yaml` -.. A Unix-specific site configuration directory, for example `/etc/openstack/clouds.yaml` -+ -The installation program searches for `clouds.yaml` in that order. -//// -[TIP] -To set up an isolated development environment, you can use a bare metal host that runs CentOS 7. See https://github.com/shiftstack-dev-tools/ocp-doit[OpenShift Installer OpenStack Dev Scripts] for details. -//// diff --git a/modules/installation-osp-downloading-modules.adoc b/modules/installation-osp-downloading-modules.adoc deleted file mode 100644 index d463e7422725..000000000000 --- a/modules/installation-osp-downloading-modules.adoc +++ /dev/null @@ -1,122 +0,0 @@ -// Module included in the following assemblies: -// * installing/installing_openstack/installing-openstack-installer-user.adoc -// * installing/installing_openstack/installing-openstack-installer-user-kuryr.adoc -// * installing/installing_openstack/uninstalling-openstack-user.adoc -// -//YOU MUST SET AN IFEVAL FOR EACH NEW MODULE - -ifeval::["{context}" == "installing-openstack-user"] -:osp-user: -endif::[] -ifeval::["{context}" == "installing-openstack-user-kuryr"] -:osp-user: -endif::[] -ifeval::["{context}" == "installing-openstack-user-sr-iov"] -:osp-user: -endif::[] -ifeval::["{context}" == "installing-openstack-user-sr-iov-kuryr"] -:osp-user: -endif::[] -ifeval::["{context}" == "uninstalling-openstack-user"] -:osp-user-uninstall: -endif::[] - -:_content-type: PROCEDURE -[id="installation-osp-downloading-modules_{context}"] -= Downloading playbook dependencies - -ifdef::osp-user[] -The Ansible playbooks that simplify the installation process on user-provisioned -infrastructure require several Python modules. On the machine where you will run the installer, -add the modules' repositories and then download them. -endif::osp-user[] - -ifdef::osp-user-uninstall[] -The Ansible playbooks that simplify the removal process on user-provisioned -infrastructure require several Python modules. On the machine where you will run the process, -add the modules' repositories and then download them. -endif::osp-user-uninstall[] - -[NOTE] -These instructions assume that you are using {op-system-base-full} 8. - -.Prerequisites - -* Python 3 is installed on your machine. - -.Procedure - -. On a command line, add the repositories: - -.. Register with Red Hat Subscription Manager: -+ -[source,terminal] ----- -$ sudo subscription-manager register # If not done already ----- - -.. Pull the latest subscription data: -+ -[source,terminal] ----- -$ sudo subscription-manager attach --pool=$YOUR_POOLID # If not done already ----- - -.. Disable the current repositories: -+ -[source,terminal] ----- -$ sudo subscription-manager repos --disable=* # If not done already ----- - -.. Add the required repositories: -+ -[source,terminal] ----- -$ sudo subscription-manager repos \ - --enable=rhel-8-for-x86_64-baseos-rpms \ - --enable=openstack-16-tools-for-rhel-8-x86_64-rpms \ - --enable=ansible-2.9-for-rhel-8-x86_64-rpms \ - --enable=rhel-8-for-x86_64-appstream-rpms ----- - -ifdef::osp-user[] -. Install the modules: -+ -[source,terminal] ----- -$ sudo yum install python3-openstackclient ansible python3-openstacksdk python3-netaddr ----- -endif::osp-user[] - -ifdef::osp-user-uninstall[] -. Install the modules: -+ -[source,terminal] ----- -$ sudo yum install python3-openstackclient ansible python3-openstacksdk ----- -endif::osp-user-uninstall[] - -. Ensure that the `python` command points to `python3`: -+ -[source,terminal] ----- -$ sudo alternatives --set python /usr/bin/python3 ----- - -ifeval::["{context}" == "installing-openstack-user"] -:!osp-user: -endif::[] -ifeval::["{context}" == "installing-openstack-user-kuryr"] -:!osp-user: -endif::[] -ifeval::["{context}" == "installing-openstack-user-sr-iov"] -:!osp-user: -endif::[] -ifeval::["{context}" == "installing-openstack-user-sr-iov-kuryr"] -:!osp-user: -endif::[] -ifeval::["{context}" == "uninstalling-cluster-openstack"] -:!osp-user-uninstall: -endif::[] \ No newline at end of file diff --git a/modules/installation-osp-downloading-playbooks.adoc b/modules/installation-osp-downloading-playbooks.adoc deleted file mode 100644 index 616ce5dbaa8f..000000000000 --- a/modules/installation-osp-downloading-playbooks.adoc +++ /dev/null @@ -1,50 +0,0 @@ -// Module included in the following assemblies: -// * installing/installing_openstack/installing-openstack-user.adoc -// * installing/installing_openstack/installing-openstack-user-kuryr.adoc - -:_content-type: PROCEDURE -[id="installation-osp-downloading-playbooks_{context}"] -= Downloading the installation playbooks - -Download Ansible playbooks that you can use to install {product-title} on your own {rh-openstack-first} infrastructure. - -.Prerequisites - -* The curl command-line tool is available on your machine. - -.Procedure - -* To download the playbooks to your working directory, run the following script from a command line: -+ -[source,terminal,subs=attributes+] ----- -$ xargs -n 1 curl -O <<< ' - https://raw.githubusercontent.com/openshift/installer/release-{product-version}/upi/openstack/bootstrap.yaml - https://raw.githubusercontent.com/openshift/installer/release-{product-version}/upi/openstack/common.yaml - https://raw.githubusercontent.com/openshift/installer/release-{product-version}/upi/openstack/compute-nodes.yaml - https://raw.githubusercontent.com/openshift/installer/release-{product-version}/upi/openstack/control-plane.yaml - https://raw.githubusercontent.com/openshift/installer/release-{product-version}/upi/openstack/inventory.yaml - https://raw.githubusercontent.com/openshift/installer/release-{product-version}/upi/openstack/network.yaml - https://raw.githubusercontent.com/openshift/installer/release-{product-version}/upi/openstack/security-groups.yaml - https://raw.githubusercontent.com/openshift/installer/release-{product-version}/upi/openstack/down-bootstrap.yaml - https://raw.githubusercontent.com/openshift/installer/release-{product-version}/upi/openstack/down-compute-nodes.yaml - https://raw.githubusercontent.com/openshift/installer/release-{product-version}/upi/openstack/down-control-plane.yaml - https://raw.githubusercontent.com/openshift/installer/release-{product-version}/upi/openstack/down-load-balancers.yaml - https://raw.githubusercontent.com/openshift/installer/release-{product-version}/upi/openstack/down-network.yaml - https://raw.githubusercontent.com/openshift/installer/release-{product-version}/upi/openstack/down-security-groups.yaml - https://raw.githubusercontent.com/openshift/installer/release-{product-version}/upi/openstack/down-containers.yaml' ----- - -The playbooks are downloaded to your machine. - -[IMPORTANT] -==== -During the installation process, you can modify the playbooks to configure your deployment. - -Retain all playbooks for the life of your cluster. You must have the playbooks to remove your {product-title} cluster from {rh-openstack}. -==== - -[IMPORTANT] -==== -You must match any edits you make in the `bootstrap.yaml`, `compute-nodes.yaml`, `control-plane.yaml`, `network.yaml`, and `security-groups.yaml` files to the corresponding playbooks that are prefixed with `down-`. For example, edits to the `bootstrap.yaml` file must be reflected in the `down-bootstrap.yaml` file, too. If you do not edit both files, the supported cluster removal process will fail. -==== diff --git a/modules/installation-osp-dpdk-binding-vfio-pci.adoc b/modules/installation-osp-dpdk-binding-vfio-pci.adoc deleted file mode 100644 index edf73d787c51..000000000000 --- a/modules/installation-osp-dpdk-binding-vfio-pci.adoc +++ /dev/null @@ -1,193 +0,0 @@ -:_content-type: PROCEDURE -[id="installation-osp-dpdk-binding-vfio-pci_{context}"] -= Binding the vfio-pci kernel driver to NICs - -Compute machines that connect to a virtual function I/O (VFIO) network require the `vfio-pci` kernel driver to be bound to the ports that are attached to a configured network. Create a compute machine set for workers that attach to this VFIO network. - -.Procedure - -. From a command line, retrieve VFIO network UUIDs: -+ -[source,terminal] ----- -$ openstack network show <VFIO_network_name> -f value -c id ----- - -. Create a compute machine set on your cluster from the following template: -+ -[%collapsible] -==== -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfig -metadata: - labels: - machineconfiguration.openshift.io/role: worker - name: 99-vhostuser-bind -spec: - config: - ignition: - version: 2.2.0 - systemd: - units: - - name: vhostuser-bind.service - enabled: true - contents: | - [Unit] - Description=Vhostuser Interface vfio-pci Bind - Wants=network-online.target - After=network-online.target ignition-firstboot-complete.service - [Service] - Type=oneshot - EnvironmentFile=/etc/vhostuser-bind.conf - ExecStart=/usr/local/bin/vhostuser $ARG - [Install] - WantedBy=multi-user.target - storage: - files: - - contents: - inline: vfio-pci - filesystem: root - mode: 0644 - path: /etc/modules-load.d/vfio-pci.conf - - contents: - inline: | - #!/bin/bash - set -e - if [[ "$#" -lt 1 ]]; then - echo "Nework ID not provided, nothing to do" - exit - fi - - source /etc/vhostuser-bind.conf - - NW_DATA="/var/config/openstack/latest/network_data.json" - if [ ! -f ${NW_DATA} ]; then - echo "Network data file not found, trying to download it from nova metadata" - if ! curl http://169.254.169.254/openstack/latest/network_data.json > /tmp/network_data.json; then - echo "Failed to download network data file" - exit 1 - fi - NW_DATA="/tmp/network_data.json" - fi - function parseNetwork() { - local nwid=$1 - local pcis=() - echo "Network ID is $nwid" - links=$(jq '.networks[] | select(.network_id == "'$nwid'") | .link' $NW_DATA) - if [ ${#links} -gt 0 ]; then - for link in $links; do - echo "Link Name: $link" - mac=$(jq -r '.links[] | select(.id == '$link') | .ethernet_mac_address' $NW_DATA) - if [ -n $mac ]; then - pci=$(bindDriver $mac) - pci_ret=$? - if [[ "$pci_ret" -eq 0 ]]; then - echo "$pci bind succesful" - fi - fi - done - fi - } - - function bindDriver() { - local mac=$1 - for file in /sys/class/net/*; do - dev_mac=$(cat $file/address) - if [[ "$mac" == "$dev_mac" ]]; then - name=${file##*\/} - bus_str=$(ethtool -i $name | grep bus) - dev_t=${bus_str#*:} - dev=${dev_t#[[:space:]]} - - echo $dev - - devlink="/sys/bus/pci/devices/$dev" - syspath=$(realpath "$devlink") - if [ ! -f "$syspath/driver/unbind" ]; then - echo "File $syspath/driver/unbind not found" - return 1 - fi - if ! echo "$dev">"$syspath/driver/unbind"; then - return 1 - fi - - if [ ! -f "$syspath/driver_override" ]; then - echo "File $syspath/driver_override not found" - return 1 - fi - if ! echo "vfio-pci">"$syspath/driver_override"; then - return 1 - fi - - if [ ! -f "/sys/bus/pci/drivers/vfio-pci/bind" ]; then - echo "File /sys/bus/pci/drivers/vfio-pci/bind not found" - return 1 - fi - if ! echo "$dev">"/sys/bus/pci/drivers/vfio-pci/bind"; then - return 1 - fi - return 0 - fi - done - return 1 - } - - for nwid in "$@"; do - parseNetwork $nwid - done - filesystem: root - mode: 0744 - path: /usr/local/bin/vhostuser - - contents: - inline: | - ARG="be22563c-041e-44a0-9cbd-aa391b439a39,ec200105-fb85-4181-a6af-35816da6baf7" <1> - filesystem: root - mode: 0644 - path: /etc/vhostuser-bind.conf ----- -<1> Replace this value with a comma-separated list of VFIO network UUIDs. -==== -+ -On boot for machines that are part of this set, the MAC addresses of ports are translated into PCI bus IDs. The `vfio-pci` module is bound to any port that is assocated with a network that is identified by the {rh-openstack} network ID. - -.Verification - -. On a compute node, from a command line, retrieve the name of the node by entering: -+ -[source,terminal] ----- -$ oc get nodes ----- - -. Create a shell to debug the node: -+ -[source,terminal] ----- -$ oc debug node/<node_name> ----- - -. Change the root directory for the current running process: -+ -[source,terminal] ----- -$ chroot /host ----- - -. Enter the following command to list the kernel drivers that are handling each device on your machine: -+ -[source,terminal] ----- -$ lspci -k ----- -+ -.Example output -[source,terminal] ----- -00:07.0 Ethernet controller: Red Hat, Inc. Virtio network device -Subsystem: Red Hat, Inc. Device 0001 -Kernel driver in use: vfio-pci ----- -+ -In the output of the command, VFIO ethernet controllers use the `vfio-pci` kernel driver. \ No newline at end of file diff --git a/modules/installation-osp-dpdk-exposing-host-interface.adoc b/modules/installation-osp-dpdk-exposing-host-interface.adoc deleted file mode 100644 index 5010e75cf410..000000000000 --- a/modules/installation-osp-dpdk-exposing-host-interface.adoc +++ /dev/null @@ -1,29 +0,0 @@ -:_content-type: PROCEDURE -[id="installation-osp-dpdk-exposing-host-interface_{context}"] -= Exposing the host-device interface to the pod - -You can use the Container Network Interface (CNI) plugin to expose an interface that is on the host to the pod. The plugin moves the interface from the namespace of the host network to the namespace of the pod. The pod then has direct control of the interface. - -.Procedure - -* Create an additional network attachment with the host-device CNI plugin by using the following object as an example: -+ -[source,yaml] ----- - apiVersion: k8s.cni.cncf.io/v1 - kind: NetworkAttachmentDefinition - metadata: - name: vhostuser1 - namespace: default - spec: - config: '{ "cniVersion": "0.3.1", "name": "hostonly", "type": "host-device", "pciBusId": "0000:00:04.0", "ipam": { } }' ----- - -.Verification - -* From a command line, run the following command to see if networks are created in the namespace: -+ -[source,terminal] ----- -$ oc -n <your_cnf_namespace> get net-attach-def ----- \ No newline at end of file diff --git a/modules/installation-osp-emptying-worker-pools.adoc b/modules/installation-osp-emptying-worker-pools.adoc deleted file mode 100644 index d1f38d77a2b3..000000000000 --- a/modules/installation-osp-emptying-worker-pools.adoc +++ /dev/null @@ -1,33 +0,0 @@ -// Module included in the following assemblies: -// * installing/installing_openstack/installing-openstack-user.adoc -// * installing/installing_openstack/installing-openstack-user-kuryr.adoc - -:_content-type: PROCEDURE -[id="installation-osp-emptying-worker-pools_{context}"] -= Emptying compute machine pools - -To proceed with an installation that uses your own infrastructure, set the number of compute machines in the installation configuration file to zero. Later, you create these machines manually. - -.Prerequisites - -* You have the `install-config.yaml` file that was generated by the {product-title} installation program. - -.Procedure - -. On a command line, browse to the directory that contains `install-config.yaml`. - -. From that directory, either run a script to edit the `install-config.yaml` file or update the file manually: - -** To set the value by using a script, run: -+ -[source,terminal] ----- -$ python -c ' -import yaml; -path = "install-config.yaml"; -data = yaml.safe_load(open(path)); -data["compute"][0]["replicas"] = 0; -open(path, "w").write(yaml.dump(data, default_flow_style=False))' ----- - -** To set the value manually, open the file and set the value of `compute.<first entry>.replicas` to `0`. diff --git a/modules/installation-osp-enabling-swift.adoc b/modules/installation-osp-enabling-swift.adoc deleted file mode 100644 index de016f677928..000000000000 --- a/modules/installation-osp-enabling-swift.adoc +++ /dev/null @@ -1,44 +0,0 @@ -//Module included in the following assemblies: -// -// * installing/installing_openstack/installing-openstack-installer.adoc -// * installing/installing_openstack/installing-openstack-installer-custom.adoc -// * installing/installing_openstack/installing-openstack-installer-kuryr.adoc - -:_content-type: PROCEDURE -[id="installation-osp-enabling-swift_{context}"] -= Enabling Swift on {rh-openstack} - -Swift is operated by a user account with the `swiftoperator` role. Add the role to an account before you run the installation program. - -[IMPORTANT] -==== -If link:https://access.redhat.com/documentation/en-us/red_hat_openstack_platform/16.0/html-single/storage_guide/index#ch-manage-containers[the {rh-openstack-first} object storage service], commonly known as Swift, is available, {product-title} uses it as the image registry storage. If it is unavailable, the installation program relies on the {rh-openstack} block storage service, commonly known as Cinder. - -If Swift is present and you want to use it, you must enable access to it. If it is not present, or if you do not want to use it, skip this section. -==== - -[IMPORTANT] -==== -{rh-openstack} 17 sets the `rgw_max_attr_size` parameter of Ceph RGW to 256 characters. This setting causes issues with uploading container images to the {product-title} registry. You must set the value of `rgw_max_attr_size` to at least 1024 characters. - -Before installation, check if your {rh-openstack} deployment is affected by this problem. If it is, reconfigure Ceph RGW. -==== - -.Prerequisites - -* You have a {rh-openstack} administrator account on the target environment. -* The Swift service is installed. -* On link:https://access.redhat.com/documentation/en-us/red_hat_openstack_platform/16.0/html-single/deploying_an_overcloud_with_containerized_red_hat_ceph/index#ceph-rgw[Ceph RGW], the `account in url` option is enabled. - -.Procedure - -To enable Swift on {rh-openstack}: - -. As an administrator in the {rh-openstack} CLI, add the `swiftoperator` role to the account that will access Swift: -+ -[source,terminal] ----- -$ openstack role add --user <user> --project <project> swiftoperator ----- - -Your {rh-openstack} deployment can now use Swift for the image registry. diff --git a/modules/installation-osp-external-lb-config.adoc b/modules/installation-osp-external-lb-config.adoc deleted file mode 100644 index 0ad440b3a7c2..000000000000 --- a/modules/installation-osp-external-lb-config.adoc +++ /dev/null @@ -1,48 +0,0 @@ -:_content-type: REFERENCE -[id="install-osp-external-lb-config_{context}"] -= Installation configuration for a cluster on OpenStack with a user-managed load balancer - -:FeatureName: Deployment on OpenStack with User-Managed Load Balancers -include::snippets/technology-preview.adoc[] - -The following example `install-config.yaml` file demonstrates how to configure a cluster that uses an external, user-managed load balancer rather than the default internal load balancer. - -[source,yaml] ----- -apiVersion: v1 -baseDomain: mydomain.test -compute: -- name: worker - platform: - openstack: - type: m1.xlarge - replicas: 3 -controlPlane: - name: master - platform: - openstack: - type: m1.xlarge - replicas: 3 -metadata: - name: mycluster -networking: - clusterNetwork: - - cidr: 10.128.0.0/14 - hostPrefix: 23 - machineNetwork: - - cidr: 192.168.10.0/24 -platform: - openstack: - cloud: mycloud - machinesSubnet: 8586bf1a-cc3c-4d40-bdf6-c243decc603a <1> - apiVIPs: - - 192.168.10.5 - ingressVIPs: - - 192.168.10.7 - loadBalancer: - type: UserManaged <2> -featureSet: TechPreviewNoUpgrade <3> ----- -<1> Regardless of which load balancer you use, the load balancer is deployed to this subnet. -<2> The `UserManaged` value indicates that you are using an user-managed load balancer. -<3> Because user-managed load balancers are in Technology Preview, you must include the `TechPreviewNoUpgrade` value to deploy a cluster that uses a user-managed load balancer. \ No newline at end of file diff --git a/modules/installation-osp-failure-domains-config.adoc b/modules/installation-osp-failure-domains-config.adoc deleted file mode 100644 index 94c1a52afc2e..000000000000 --- a/modules/installation-osp-failure-domains-config.adoc +++ /dev/null @@ -1,43 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_openstack/installing-openstack-*.adoc - -:_content-type: PROCEDURE -[id="installation-osp-failure-domains-config_{context}"] -= Example installation configuration section that uses failure domains - -:FeatureName: {rh-openstack} failure domains -include::snippets/technology-preview.adoc[] - -The following section of an `install-config.yaml` file demonstrates the use of failure domains in a cluster to deploy on {rh-openstack-first}: - -[source,yaml] ----- -# ... -controlPlane: - name: master - platform: - openstack: - type: m1.large - failureDomains: - - computeAvailabilityZone: 'nova-1' - storageAvailabilityZone: 'cinder-1' - portTargets: - - id: control-plane - network: - id: 8db6a48e-375b-4caa-b20b-5b9a7218bfe6 - - computeAvailabilityZone: 'nova-2' - storageAvailabilityZone: 'cinder-2' - portTargets: - - id: control-plane - network: - id: 39a7b82a-a8a4-45a4-ba5a-288569a6edd1 - - computeAvailabilityZone: 'nova-3' - storageAvailabilityZone: 'cinder-3' - portTargets: - - id: control-plane - network: - id: 8e4b4e0d-3865-4a9b-a769-559270271242 -featureSet: TechPreviewNoUpgrade -# ... ----- \ No newline at end of file diff --git a/modules/installation-osp-fixing-subnet.adoc b/modules/installation-osp-fixing-subnet.adoc deleted file mode 100644 index ab0935039a07..000000000000 --- a/modules/installation-osp-fixing-subnet.adoc +++ /dev/null @@ -1,35 +0,0 @@ -// Module included in the following assemblies: -// * installing/installing_openstack/installing-openstack-installer-user.adoc -// -//YOU MUST SET AN IFEVAL FOR EACH NEW MODULE - -:_content-type: PROCEDURE -[id="installation-osp-fixing-subnet_{context}"] -= Setting a custom subnet for machines - -The IP range that the installation program uses by default might not match the Neutron subnet that you create when you install {product-title}. If necessary, update the CIDR value for new machines by editing the installation configuration file. - -.Prerequisites - -* You have the `install-config.yaml` file that was generated by the {product-title} installation program. - -.Procedure - -. On a command line, browse to the directory that contains `install-config.yaml`. - -. From that directory, either run a script to edit the `install-config.yaml` file or update the file manually: - -** To set the value by using a script, run: -+ -[source,terminal] ----- -$ python -c ' -import yaml; -path = "install-config.yaml"; -data = yaml.safe_load(open(path)); -data["networking"]["machineNetwork"] = [{"cidr": "192.168.0.0/18"}]; <1> -open(path, "w").write(yaml.dump(data, default_flow_style=False))' ----- -<1> Insert a value that matches your intended Neutron subnet, e.g. `192.0.2.0/24`. - -** To set the value manually, open the file and set the value of `networking.machineCIDR` to something that matches your intended Neutron subnet. diff --git a/modules/installation-osp-kuryr-api-scaling.adoc b/modules/installation-osp-kuryr-api-scaling.adoc deleted file mode 100644 index 3032874d7838..000000000000 --- a/modules/installation-osp-kuryr-api-scaling.adoc +++ /dev/null @@ -1,43 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/load-balancing-openstack.adoc - -:_content-type: PROCEDURE -[id="installation-osp-kuryr-api-scaling_{context}"] -= Scaling clusters that use Kuryr by using Octavia - -:FeatureName: Kuryr -include::snippets/deprecated-feature.adoc[] - -If your cluster uses Kuryr, associate the API floating IP address of your cluster with the pre-existing Octavia load balancer. - -.Prerequisites - -* Your {product-title} cluster uses Kuryr. - -* Octavia is available on your {rh-openstack-first} deployment. - -.Procedure - -. Optional: From a command line, to reuse the cluster API floating IP address, unset it: -+ -[source,terminal] ----- -$ openstack floating ip unset $API_FIP ----- - -. Add either the unset `API_FIP` or a new address to the created load balancer VIP: -+ -[source,terminal] ----- -$ openstack floating ip set --port $(openstack loadbalancer show -c <vip_port_id> -f value ${OCP_CLUSTER}-kuryr-api-loadbalancer) $API_FIP ----- - -Your cluster now uses Octavia for load balancing. - -[NOTE] -==== -If Kuryr uses the Octavia Amphora driver, all traffic is routed through a single Amphora virtual machine (VM). - -You can repeat this procedure to create additional load balancers, which can alleviate the bottleneck. -==== \ No newline at end of file diff --git a/modules/installation-osp-kuryr-config-yaml.adoc b/modules/installation-osp-kuryr-config-yaml.adoc deleted file mode 100644 index b883e747fb28..000000000000 --- a/modules/installation-osp-kuryr-config-yaml.adoc +++ /dev/null @@ -1,63 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_openstack/installing-openstack-installer-kuryr.adoc - -[id="installation-osp-kuryr-config-yaml_{context}"] -= Sample customized `install-config.yaml` file for {rh-openstack} with Kuryr - -To deploy with Kuryr SDN instead of the default OVN-Kubernetes network plugin, you must modify the `install-config.yaml` file to include `Kuryr` as the desired `networking.networkType`. -This sample `install-config.yaml` demonstrates all of the possible -{rh-openstack-first} customization options. - -[IMPORTANT] -==== -This sample file is provided for reference only. You must obtain your -`install-config.yaml` file by using the installation program. -==== - -[source,yaml] ----- -apiVersion: v1 -baseDomain: example.com -controlPlane: - name: master - platform: {} - replicas: 3 -compute: -- name: worker - platform: - openstack: - type: ml.large - replicas: 3 -metadata: - name: example -networking: - clusterNetwork: - - cidr: 10.128.0.0/14 - hostPrefix: 23 - machineNetwork: - - cidr: 10.0.0.0/16 - serviceNetwork: - - 172.30.0.0/16 <1> - networkType: Kuryr <2> -platform: - openstack: - cloud: mycloud - externalNetwork: external - computeFlavor: m1.xlarge - apiFloatingIP: 128.0.0.1 - trunkSupport: true <3> - octaviaSupport: true <3> -pullSecret: '{"auths": ...}' -sshKey: ssh-ed25519 AAAA... ----- -<1> The Amphora Octavia driver creates two ports per load balancer. As a -result, the service subnet that the installer creates is twice the size of the -CIDR that is specified as the value of the `serviceNetwork` property. The larger range is -required to prevent IP address conflicts. -<2> The cluster network plugin to install. The supported values are `Kuryr`, `OVNKubernetes`, and `OpenShiftSDN`. The default value is `OVNKubernetes`. -<3> Both `trunkSupport` and `octaviaSupport` are automatically discovered by the -installer, so there is no need to set them. But if your environment does not -meet both requirements, Kuryr SDN will not properly work. Trunks are needed -to connect the pods to the {rh-openstack} network and Octavia is required to create the -{product-title} services. diff --git a/modules/installation-osp-kuryr-increase-quota.adoc b/modules/installation-osp-kuryr-increase-quota.adoc deleted file mode 100644 index 4aeff7fbac7a..000000000000 --- a/modules/installation-osp-kuryr-increase-quota.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_openstack/installing-openstack-installer-kuryr.adoc - -:_content-type: PROCEDURE -[id="installation-osp-kuryr-increase-quota_{context}"] -= Increasing quota - -When using Kuryr SDN, you must increase quotas to satisfy the {rh-openstack-first} -resources used by pods, services, namespaces, and network policies. - -.Procedure - -* Increase the quotas for a project by running the following command: -+ -[source,terminal] ----- -$ sudo openstack quota set --secgroups 250 --secgroup-rules 1000 --ports 1500 --subnets 250 --networks 250 <project> ----- diff --git a/modules/installation-osp-kuryr-ingress-scaling.adoc b/modules/installation-osp-kuryr-ingress-scaling.adoc deleted file mode 100644 index 0888b7b40265..000000000000 --- a/modules/installation-osp-kuryr-ingress-scaling.adoc +++ /dev/null @@ -1,126 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/load-balancing-openstack.adoc - -:_content-type: PROCEDURE -[id="installation-osp-kuryr-octavia-scale_{context}"] -= Scaling for ingress traffic by using {rh-openstack} Octavia - -:FeatureName: Kuryr -include::snippets/deprecated-feature.adoc[] - -You can use Octavia load balancers to scale Ingress controllers on clusters that use Kuryr. - -.Prerequisites - -* Your {product-title} cluster uses Kuryr. - -* Octavia is available on your {rh-openstack} deployment. - -.Procedure - -. To copy the current internal router service, on a command line, enter: -+ -[source,terminal] ----- -$ oc -n openshift-ingress get svc router-internal-default -o yaml > external_router.yaml ----- - -. In the file `external_router.yaml`, change the values of `metadata.name` and `spec.type` to -`LoadBalancer`. -+ -[source,yaml] -.Example router file ----- -apiVersion: v1 -kind: Service -metadata: - labels: - ingresscontroller.operator.openshift.io/owning-ingresscontroller: default - name: router-external-default <1> - namespace: openshift-ingress -spec: - ports: - - name: http - port: 80 - protocol: TCP - targetPort: http - - name: https - port: 443 - protocol: TCP - targetPort: https - - name: metrics - port: 1936 - protocol: TCP - targetPort: 1936 - selector: - ingresscontroller.operator.openshift.io/deployment-ingresscontroller: default - sessionAffinity: None - type: LoadBalancer <2> ----- -<1> Ensure that this value is descriptive, like `router-external-default`. -<2> Ensure that this value is `LoadBalancer`. - -[NOTE] -==== -You can delete timestamps and other information that is irrelevant to load balancing. -==== - -. From a command line, create a service from the `external_router.yaml` file: -+ -[source,terminal] ----- -$ oc apply -f external_router.yaml ----- - -. Verify that the external IP address of the service is the same as the one that is associated with the load balancer: -.. On a command line, retrieve the external IP address of the service: -+ -[source,terminal] ----- -$ oc -n openshift-ingress get svc ----- -+ -[source,terminal] -.Example output ----- -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -router-external-default LoadBalancer 172.30.235.33 10.46.22.161 80:30112/TCP,443:32359/TCP,1936:30317/TCP 3m38s -router-internal-default ClusterIP 172.30.115.123 <none> 80/TCP,443/TCP,1936/TCP 22h ----- - -.. Retrieve the IP address of the load balancer: -+ -[source,terminal] ----- -$ openstack loadbalancer list | grep router-external ----- -+ -.Example output -[source,terminal] ----- -| 21bf6afe-b498-4a16-a958-3229e83c002c | openshift-ingress/router-external-default | 66f3816acf1b431691b8d132cc9d793c | 172.30.235.33 | ACTIVE | octavia | ----- - -.. Verify that the addresses you retrieved in the previous steps are associated with each other in the floating IP list: -+ -[source,terminal] ----- -$ openstack floating ip list | grep 172.30.235.33 ----- -+ -.Example output -[source,terminal] ----- -| e2f80e97-8266-4b69-8636-e58bacf1879e | 10.46.22.161 | 172.30.235.33 | 655e7122-806a-4e0a-a104-220c6e17bda6 | a565e55a-99e7-4d15-b4df-f9d7ee8c9deb | 66f3816acf1b431691b8d132cc9d793c | ----- - -You can now use the value of `EXTERNAL-IP` as the new Ingress address. - - -[NOTE] -==== -If Kuryr uses the Octavia Amphora driver, all traffic is routed through a single Amphora virtual machine (VM). - -You can repeat this procedure to create additional load balancers, which can alleviate the bottleneck. -==== diff --git a/modules/installation-osp-kuryr-known-limitations.adoc b/modules/installation-osp-kuryr-known-limitations.adoc deleted file mode 100644 index 64bba5bc87b1..000000000000 --- a/modules/installation-osp-kuryr-known-limitations.adoc +++ /dev/null @@ -1,57 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_openstack/installing-openstack-installer-kuryr.adoc - -[id="installation-osp-kuryr-known-limitations_{context}"] -= Known limitations of installing with Kuryr - -Using {product-title} with Kuryr SDN has several known limitations. - -[discrete] -[id="openstack-general-limitations_{context}"] -== {rh-openstack} general limitations - -Using {product-title} with Kuryr SDN has several limitations that apply to all versions and environments: - -* `Service` objects with the `NodePort` type are not supported. - -* Clusters that use the OVN Octavia provider driver support `Service` objects for which the `.spec.selector` property is unspecified only if the `.subsets.addresses` property of the `Endpoints` object includes the subnet of the nodes or pods. - -* If the subnet on which machines are created is not connected to a router, or if the subnet is connected, but the router has no external gateway set, Kuryr cannot create floating IPs for `Service` objects with type `LoadBalancer`. - -* Configuring the `sessionAffinity=ClientIP` property on `Service` objects does not have an effect. Kuryr does not support this setting. - -[discrete] -[id="openstack-version-limitations_{context}"] -== {rh-openstack} version limitations - -Using {product-title} with Kuryr SDN has several limitations that depend on the {rh-openstack} version. - -* {rh-openstack} versions before 16 use -the default Octavia load balancer driver (Amphora). This driver requires that one -Amphora load balancer VM is deployed per {product-title} service. Creating too many -services can cause you to run out of resources. -+ -Deployments of later versions of {rh-openstack} that have the OVN Octavia driver disabled also -use the Amphora driver. They are subject to the same resource concerns as earlier versions of {rh-openstack}. - -* Kuryr SDN does not support automatic unidling by a service. - -[discrete] -[id="openstack-upgrade-limitations_{context}"] -== {rh-openstack} upgrade limitations - -As a result of the {rh-openstack} upgrade process, the Octavia API might be changed, and upgrades to the Amphora images that are used for load balancers might be required. - -You can address API changes on an individual basis. - -If the Amphora image is upgraded, the {rh-openstack} operator can handle existing load balancer VMs in two ways: - -* Upgrade each VM by triggering a link:https://access.redhat.com/documentation/en-us/red_hat_openstack_platform/16.0/html/networking_guide/sec-octavia#update-running-amphora-instances[load balancer failover]. - -* Leave responsibility for upgrading the VMs to users. - -If the operator takes the first option, there might be short downtimes during failovers. - -If the operator takes the second option, the existing load balancers will not support upgraded Octavia -API features, like UDP listeners. In this case, users must recreate their Services to use these features. diff --git a/modules/installation-osp-kuryr-neutron-configuration.adoc b/modules/installation-osp-kuryr-neutron-configuration.adoc deleted file mode 100644 index 4341f276ffd7..000000000000 --- a/modules/installation-osp-kuryr-neutron-configuration.adoc +++ /dev/null @@ -1,13 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_openstack/installing-openstack-installer-kuryr.adoc - -[id="installation-osp-kuryr-neutron-configuration_{context}"] -= Configuring Neutron - -Kuryr CNI leverages the Neutron Trunks extension to plug containers into the -{rh-openstack-first} SDN, so you must use the `trunks` extension for Kuryr to properly work. - -In addition, if you leverage the default ML2/OVS Neutron driver, the firewall -must be set to `openvswitch` instead of `ovs_hybrid` so that security groups are -enforced on trunk subports and Kuryr can properly handle network policies. diff --git a/modules/installation-osp-kuryr-octavia-configuration.adoc b/modules/installation-osp-kuryr-octavia-configuration.adoc deleted file mode 100644 index 9447b8834c2b..000000000000 --- a/modules/installation-osp-kuryr-octavia-configuration.adoc +++ /dev/null @@ -1,145 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_openstack/installing-openstack-installer-kuryr.adoc - -:_content-type: PROCEDURE -[id="installation-osp-kuryr-octavia-configuration_{context}"] -= Configuring Octavia - -Kuryr SDN uses {rh-openstack-first}'s Octavia LBaaS to implement {product-title} services. Thus, -you must install and configure Octavia components in {rh-openstack} -to use Kuryr SDN. - -To enable Octavia, you must include the Octavia service during the installation -of the {rh-openstack} Overcloud, or upgrade the Octavia service if the Overcloud -already exists. The following steps for enabling Octavia apply to both a clean -install of the Overcloud or an Overcloud update. - -[NOTE] -==== -The following steps only capture the key pieces required during the -https://access.redhat.com/documentation/en-us/red_hat_openstack_platform/13/html/director_installation_and_usage/[deployment of {rh-openstack}] -when dealing with Octavia. It is also important to note that -https://access.redhat.com/documentation/en-us/red_hat_openstack_platform/13/html/director_installation_and_usage/configuring-a-container-image-source#registry-methods[registry methods] -vary. - -This example uses the local registry method. -==== - -.Procedure - -. If you are using the local registry, create a template to upload the images to -the registry. For example: -+ -[source,terminal] ----- -(undercloud) $ openstack overcloud container image prepare \ --e /usr/share/openstack-tripleo-heat-templates/environments/services-docker/octavia.yaml \ ---namespace=registry.access.redhat.com/rhosp13 \ ---push-destination=<local-ip-from-undercloud.conf>:8787 \ ---prefix=openstack- \ ---tag-from-label {version}-{product-version} \ ---output-env-file=/home/stack/templates/overcloud_images.yaml \ ---output-images-file /home/stack/local_registry_images.yaml ----- - -. Verify that the `local_registry_images.yaml` file contains the Octavia images. -For example: -+ -[source,yaml] ----- -... -- imagename: registry.access.redhat.com/rhosp13/openstack-octavia-api:13.0-43 - push_destination: <local-ip-from-undercloud.conf>:8787 -- imagename: registry.access.redhat.com/rhosp13/openstack-octavia-health-manager:13.0-45 - push_destination: <local-ip-from-undercloud.conf>:8787 -- imagename: registry.access.redhat.com/rhosp13/openstack-octavia-housekeeping:13.0-45 - push_destination: <local-ip-from-undercloud.conf>:8787 -- imagename: registry.access.redhat.com/rhosp13/openstack-octavia-worker:13.0-44 - push_destination: <local-ip-from-undercloud.conf>:8787 ----- -+ -[NOTE] -==== -The Octavia container versions vary depending upon the specific -{rh-openstack} release installed. -==== - -. Pull the container images from `registry.redhat.io` to the Undercloud node: -+ -[source,terminal] ----- -(undercloud) $ sudo openstack overcloud container image upload \ - --config-file /home/stack/local_registry_images.yaml \ - --verbose ----- -+ -This may take some time depending on the speed of your network and Undercloud -disk. - -. Install or update your Overcloud environment with Octavia: -+ -[source,terminal] ----- -$ openstack overcloud deploy --templates \ - -e /usr/share/openstack-tripleo-heat-templates/environments/services-docker/octavia.yaml \ - -e octavia_timeouts.yaml ----- -+ -[NOTE] -==== -This command only includes the files associated with Octavia; it varies based on -your specific installation of {rh-openstack}. See the {rh-openstack} -documentation for further information. For more information on customizing your -Octavia installation, see -https://access.redhat.com/documentation/en-us/red_hat_openstack_platform/13/html-single/networking_guide/#planning_your_octavia_deployment[installation -of Octavia using Director]. -==== -+ -[NOTE] -==== -When leveraging Kuryr SDN, the Overcloud installation requires the Neutron `trunk` extension. This is available by default on director deployments. -Use the `openvswitch` firewall instead of the default `ovs-hybrid` when the Neutron -backend is ML2/OVS. There is no need for modifications if the backend is -ML2/OVN. -==== - -[id="installation-osp-kuryr-octavia-driver_{context}"] -== The Octavia OVN Driver - -Octavia supports multiple provider drivers through the Octavia API. - -To see all available Octavia provider drivers, on a command line, enter: -[source,terminal] ----- -$ openstack loadbalancer provider list ----- - -.Example output -[source,terminal] ----- -+---------+-------------------------------------------------+ -| name    | description                                     | -+---------+-------------------------------------------------+ -| amphora | The Octavia Amphora driver.                     | -| octavia | Deprecated alias of the Octavia Amphora driver. | -| ovn     | Octavia OVN driver.                             | -+---------+-------------------------------------------------+ ----- - -Beginning with {rh-openstack} version 16, the Octavia OVN provider driver (`ovn`) is supported on -{product-title} on {rh-openstack} deployments. - -`ovn` is an integration driver for the load balancing -that Octavia and OVN provide. It supports basic load balancing capabilities, -and is based on OpenFlow rules. The driver is automatically enabled -in Octavia by Director on deployments that use OVN Neutron ML2. - -The Amphora provider driver is the default driver. If `ovn` is enabled, however, Kuryr uses it. - -If Kuryr uses `ovn` instead of Amphora, it offers the following benefits: - -* Decreased resource requirements. Kuryr does not require a load balancer VM for each service. -* Reduced network latency. -* Increased service creation speed by using OpenFlow rules instead of a VM for each service. -* Distributed load balancing actions across all nodes instead of centralized on Amphora VMs. diff --git a/modules/installation-osp-kuryr-octavia-upgrade.adoc b/modules/installation-osp-kuryr-octavia-upgrade.adoc deleted file mode 100644 index 87a8a3e218b6..000000000000 --- a/modules/installation-osp-kuryr-octavia-upgrade.adoc +++ /dev/null @@ -1,111 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/load-balancing-openstack.adoc - -:_content-type: PROCEDURE -[id="installation-osp-kuryr-octavia-configure_{context}"] -= Using the Octavia OVN load balancer provider driver with Kuryr SDN - -:FeatureName: Kuryr -include::snippets/deprecated-feature.adoc[] - -If your {product-title} cluster uses Kuryr and was installed on a {rh-openstack-first} 13 cloud -that was later upgraded to {rh-openstack} 16, you can configure it to use the Octavia OVN provider driver. - -[IMPORTANT] -==== -Kuryr replaces existing load balancers after you change provider drivers. This process -results in some downtime. -==== - -.Prerequisites - -* Install the {rh-openstack} CLI, `openstack`. - -* Install the {product-title} CLI, `oc`. - -* Verify that the Octavia OVN driver on {rh-openstack} is enabled. -+ -[TIP] -==== -To view a list of available Octavia drivers, on a command line, enter `openstack loadbalancer provider list`. - -The `ovn` driver is displayed in the command's output. -==== - -.Procedure - -To change from the Octavia Amphora provider driver to Octavia OVN: - -. Open the `kuryr-config` ConfigMap. On a command line, enter: -+ -[source,terminal] ----- -$ oc -n openshift-kuryr edit cm kuryr-config ----- - -. In the ConfigMap, delete the line that contains `kuryr-octavia-provider: default`. For example: -+ -[source,yaml] ----- -... -kind: ConfigMap -metadata: - annotations: - networkoperator.openshift.io/kuryr-octavia-provider: default <1> -... ----- -<1> Delete this line. The cluster will regenerate it with `ovn` as the value. -+ -Wait for the Cluster Network Operator to detect the modification and to redeploy the `kuryr-controller` and `kuryr-cni` pods. This process might take several minutes. - -. Verify that the `kuryr-config` ConfigMap annotation is present with `ovn` as its value. On a command line, enter: -+ -[source,terminal] ----- -$ oc -n openshift-kuryr edit cm kuryr-config ----- -+ -The `ovn` provider value is displayed in the output: -+ -[source,yaml] ----- -... -kind: ConfigMap -metadata: - annotations: - networkoperator.openshift.io/kuryr-octavia-provider: ovn -... ----- - -. Verify that {rh-openstack} recreated its load balancers. - -.. On a command line, enter: -+ -[source,terminal] ----- -$ openstack loadbalancer list | grep amphora ----- -+ -A single Amphora load balancer is displayed. For example: -+ -[source,terminal] ----- -a4db683b-2b7b-4988-a582-c39daaad7981 | ostest-7mbj6-kuryr-api-loadbalancer | 84c99c906edd475ba19478a9a6690efd | 172.30.0.1 | ACTIVE | amphora ----- - -.. Search for `ovn` load balancers by entering: -+ -[source,terminal] ----- -$ openstack loadbalancer list | grep ovn ----- -+ -The remaining load balancers of the `ovn` type are displayed. For example: -+ -[source,terminal] ----- -2dffe783-98ae-4048-98d0-32aa684664cc | openshift-apiserver-operator/metrics | 84c99c906edd475ba19478a9a6690efd | 172.30.167.119 | ACTIVE | ovn -0b1b2193-251f-4243-af39-2f99b29d18c5 | openshift-etcd/etcd | 84c99c906edd475ba19478a9a6690efd | 172.30.143.226 | ACTIVE | ovn -f05b07fc-01b7-4673-bd4d-adaa4391458e | openshift-dns-operator/metrics | 84c99c906edd475ba19478a9a6690efd | 172.30.152.27 | ACTIVE | ovn ----- diff --git a/modules/installation-osp-kuryr-port-pools.adoc b/modules/installation-osp-kuryr-port-pools.adoc deleted file mode 100644 index 3264dac0d7d4..000000000000 --- a/modules/installation-osp-kuryr-port-pools.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_openstack/installing-openstack-installer-kuryr.adoc -// * installing/installing_openstack/installing-openstack-user-kuryr.adoc -// * post_installation_configuration/network-configuration.adoc - -[id="installation-osp-kuryr-port-pools_{context}"] -= Kuryr ports pools - -A Kuryr ports pool maintains a number of ports on standby for pod creation. - -Keeping ports on standby minimizes pod creation time. Without ports pools, Kuryr must explicitly request port creation or deletion whenever a pod is created or deleted. - -The Neutron ports that Kuryr uses are created in subnets that are tied to namespaces. These pod ports are also added as subports to the primary port of {product-title} cluster nodes. - -Because Kuryr keeps each namespace in a separate subnet, a separate ports pool is maintained for each namespace-worker pair. - -Prior to installing a cluster, you can set the following parameters in the `cluster-network-03-config.yml` manifest file to configure ports pool behavior: - -* The `enablePortPoolsPrepopulation` parameter controls pool prepopulation, which forces Kuryr to add Neutron ports to the pools when the first pod that is configured to use the dedicated network for pods is created in a namespace. The default value is `false`. -* The `poolMinPorts` parameter is the minimum number of free ports that are kept in the pool. The default value is `1`. -* The `poolMaxPorts` parameter is the maximum number of free ports that are kept in the pool. A value of `0` disables that upper bound. This is the default setting. -+ -If your OpenStack port quota is low, or you have a limited number of IP addresses on the pod network, consider setting this option to ensure that unneeded ports are deleted. -* The `poolBatchPorts` parameter defines the maximum number of Neutron ports that can be created at once. The default value is `3`. diff --git a/modules/installation-osp-kuryr-settings-active.adoc b/modules/installation-osp-kuryr-settings-active.adoc deleted file mode 100644 index 09afe176207e..000000000000 --- a/modules/installation-osp-kuryr-settings-active.adoc +++ /dev/null @@ -1,52 +0,0 @@ -// Module included in the following assemblies: -// -// * post_installation_configuration/network-configuration.adoc - -:_content-type: PROCEDURE -[id="installation-osp-kuryr-settings-active_{context}"] -= Adjusting Kuryr ports pool settings in active deployments on {rh-openstack} - -You can use a custom resource (CR) to configure how Kuryr manages {rh-openstack-first} Neutron ports to control the speed and efficiency of pod creation on a deployed cluster. - -.Procedure - -. From a command line, open the Cluster Network Operator (CNO) CR for editing: -+ -[source,terminal] ----- -$ oc edit networks.operator.openshift.io cluster ----- - -. Edit the settings to meet your requirements. The following file is provided as an example: -+ -[source,yaml] ----- -apiVersion: operator.openshift.io/v1 -kind: Network -metadata: - name: cluster -spec: - clusterNetwork: - - cidr: 10.128.0.0/14 - hostPrefix: 23 - serviceNetwork: - - 172.30.0.0/16 - defaultNetwork: - type: Kuryr - kuryrConfig: - enablePortPoolsPrepopulation: false <1> - poolMinPorts: 1 <2> - poolBatchPorts: 3 <3> - poolMaxPorts: 5 <4> ----- -<1> Set `enablePortPoolsPrepopulation` to `true` to make Kuryr create Neutron ports when the first pod that is configured to use the dedicated network for pods is created in a namespace. This setting raises the Neutron ports quota but can reduce the time that is required to spawn pods. The default value is `false`. -<2> Kuryr creates new ports for a pool if the number of free ports in that pool is lower than the value of `poolMinPorts`. The default value is `1`. -<3> `poolBatchPorts` controls the number of new ports that are created if the number of free ports is lower than the value of `poolMinPorts`. The default value is `3`. -<4> If the number of free ports in a pool is higher than the value of `poolMaxPorts`, Kuryr deletes them until the number matches that value. Setting the value to `0` disables this upper bound, preventing pools from shrinking. The default value is `0`. - -. Save your changes and quit the text editor to commit your changes. - -[IMPORTANT] -==== -Modifying these options on a running cluster forces the kuryr-controller and kuryr-cni pods to restart. As a result, the creation of new pods and services will be delayed. -==== diff --git a/modules/installation-osp-kuryr-settings-installing.adoc b/modules/installation-osp-kuryr-settings-installing.adoc deleted file mode 100644 index 8e4ec3bbfb54..000000000000 --- a/modules/installation-osp-kuryr-settings-installing.adoc +++ /dev/null @@ -1,98 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_openstack/installing-openstack-installer-kuryr.adoc -// * installing/installing_openstack/installing-openstack-user-kuryr.adoc - -:_content-type: PROCEDURE -[id="installation-osp-kuryr-settings-installing_{context}"] -= Adjusting Kuryr ports pools during installation - -During installation, you can configure how Kuryr manages {rh-openstack-first} Neutron ports to control the speed and efficiency of pod creation. - -.Prerequisites - -* Create and modify the `install-config.yaml` file. - -.Procedure - -. From a command line, create the manifest files: -+ -[source,terminal] ----- -$ ./openshift-install create manifests --dir <installation_directory> <1> ----- -<1> For `<installation_directory>`, specify the name of the directory that -contains the `install-config.yaml` file for your cluster. - -. Create a file that is named `cluster-network-03-config.yml` in the -`<installation_directory>/manifests/` directory: -+ -[source,terminal] ----- -$ touch <installation_directory>/manifests/cluster-network-03-config.yml <1> ----- -<1> For `<installation_directory>`, specify the directory name that contains the -`manifests/` directory for your cluster. -+ -After creating the file, several network configuration files are in the -`manifests/` directory, as shown: -+ -[source,terminal] ----- -$ ls <installation_directory>/manifests/cluster-network-* ----- -+ -.Example output -[source,terminal] ----- -cluster-network-01-crd.yml -cluster-network-02-config.yml -cluster-network-03-config.yml ----- - -. Open the `cluster-network-03-config.yml` file in an editor, and enter a custom resource (CR) that describes the Cluster Network Operator configuration that you want: -+ -[source,terminal] ----- -$ oc edit networks.operator.openshift.io cluster ----- - -. Edit the settings to meet your requirements. The following file is provided as an example: -+ -[source,yaml] ----- -apiVersion: operator.openshift.io/v1 -kind: Network -metadata: - name: cluster -spec: - clusterNetwork: - - cidr: 10.128.0.0/14 - hostPrefix: 23 - serviceNetwork: - - 172.30.0.0/16 - defaultNetwork: - type: Kuryr - kuryrConfig: - enablePortPoolsPrepopulation: false <1> - poolMinPorts: 1 <2> - poolBatchPorts: 3 <3> - poolMaxPorts: 5 <4> - openstackServiceNetwork: 172.30.0.0/15 <5> ----- -<1> Set `enablePortPoolsPrepopulation` to `true` to make Kuryr create new Neutron ports when the first pod on the network for pods is created in a namespace. This setting raises the Neutron ports quota but can reduce the time that is required to spawn pods. The default value is `false`. -<2> Kuryr creates new ports for a pool if the number of free ports in that pool is lower than the value of `poolMinPorts`. The default value is `1`. -<3> `poolBatchPorts` controls the number of new ports that are created if the number of free ports is lower than the value of `poolMinPorts`. The default value is `3`. -<4> If the number of free ports in a pool is higher than the value of `poolMaxPorts`, Kuryr deletes them until the number matches that value. Setting this value to `0` disables this upper bound, preventing pools from shrinking. The default value is `0`. -<5> The `openStackServiceNetwork` parameter defines the CIDR range of the network from which IP addresses are allocated to {rh-openstack} Octavia's LoadBalancers. -+ -If this parameter is used with the Amphora driver, Octavia takes two IP addresses from this network for each load balancer: one for OpenShift and the other for VRRP connections. Because these IP addresses are managed by {product-title} and Neutron respectively, they must come from different pools. -Therefore, the value of `openStackServiceNetwork` must be at least twice the size of the value of `serviceNetwork`, and the value of `serviceNetwork` must overlap entirely with the range that is defined by `openStackServiceNetwork`. -+ -The CNO verifies that VRRP IP addresses that are taken from the range that is defined by this parameter do not overlap with the range that is defined by the `serviceNetwork` parameter. -+ -If this parameter is not set, the CNO uses an expanded value of `serviceNetwork` that is determined by decrementing the prefix size by 1. - -. Save the `cluster-network-03-config.yml` file, and exit the text editor. - -. Optional: Back up the `manifests/cluster-network-03-config.yml` file. The installation program deletes the `manifests/` directory while creating the cluster. diff --git a/modules/installation-osp-modifying-networktype.adoc b/modules/installation-osp-modifying-networktype.adoc deleted file mode 100644 index d879f93ba359..000000000000 --- a/modules/installation-osp-modifying-networktype.adoc +++ /dev/null @@ -1,34 +0,0 @@ -// Module included in the following assemblies: -// * installing/installing_openstack/installing-openstack-installer-user-kuryr.adoc -// -//YOU MUST SET AN IFEVAL FOR EACH NEW MODULE - -:_content-type: PROCEDURE -[id="installation-osp-modifying-networktype_{context}"] -= Modifying the network type - -By default, the installation program selects the `OpenShiftSDN` network type. To use Kuryr instead, change the value in the installation configuration file that the program generated. - -.Prerequisites - -* You have the file `install-config.yaml` that was generated by the {product-title} installation program - -.Procedure - -. In a command prompt, browse to the directory that contains `install-config.yaml`. - -. From that directory, either run a script to edit the `install-config.yaml` file or update the file manually: - -** To set the value by using a script, run: -+ -[source,terminal] ----- -$ python -c ' -import yaml; -path = "install-config.yaml"; -data = yaml.safe_load(open(path)); -data["networking"]["networkType"] = "Kuryr"; -open(path, "w").write(yaml.dump(data, default_flow_style=False))' ----- - -** To set the value manually, open the file and set `networking.networkType` to `"Kuryr"`. diff --git a/modules/installation-osp-provider-network-preparation.adoc b/modules/installation-osp-provider-network-preparation.adoc deleted file mode 100644 index dbcfdde97f0c..000000000000 --- a/modules/installation-osp-provider-network-preparation.adoc +++ /dev/null @@ -1,56 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_openstack/installing-openstack-installer-custom.adoc -// * installing/installing_openstack/installing-openstack-installer-kuryr.adoc -// * installing/installing_openstack/installing-openstack-user-kuryr.adoc -// * installing/installing_openstack/installing-openstack-user.adoc - -[id="installation-osp-provider-network-preparation_{context}"] -= {rh-openstack} provider network requirements for cluster installation - -Before you install an {product-title} cluster, your {rh-openstack-first} deployment and provider network must meet a number of conditions: - -* The link:https://access.redhat.com/documentation/en-us/red_hat_openstack_platform/16.1/html/networking_guide/networking-overview_rhosp-network#install-networking_network-overview[{rh-openstack} networking service (Neutron) is enabled] and accessible through the {rh-openstack} networking API. -* The {rh-openstack} networking service has the link:https://access.redhat.com/documentation/en-us/red_hat_openstack_platform/16.1/html/networking_guide/config-allowed-address-pairs_rhosp-network#overview-allow-addr-pairs_config-allowed-address-pairs[port security and allowed address pairs extensions enabled]. -* The provider network can be shared with other tenants. -+ -[TIP] -==== -Use the `openstack network create` command with the `--share` flag to create a network that can be shared. -==== -* The {rh-openstack} project that you use to install the cluster must own the provider network, as well as an appropriate subnet. -+ -[TIP] -==== -To create a network for a project that is named "openshift," enter the following command:: -[source,terminal] ----- -$ openstack network create --project openshift ----- - -To create a subnet for a project that is named "openshift," enter the following command:: -[source,terminal] ----- -$ openstack subnet create --project openshift ----- - -To learn more about creating networks on {rh-openstack}, read link:https://access.redhat.com/documentation/en-us/red_hat_openstack_platform/16.1/html/networking_guide/networking-overview_rhosp-network#tenant-provider-networks_network-overview[the provider networks documentation]. -==== -+ -If the cluster is owned by the `admin` user, you must run the installer as that user to create ports on the network. -+ -[IMPORTANT] -==== -Provider networks must be owned by the {rh-openstack} project that is used to create the cluster. If they are not, the {rh-openstack} Compute service (Nova) cannot request a port from that network. -==== - -* Verify that the provider network can reach the {rh-openstack} metadata service IP address, which is `169.254.169.254` by default. -+ -Depending on your {rh-openstack} SDN and networking service configuration, you might need to provide the route when you create the subnet. For example: -+ -[source,terminal] ----- -$ openstack subnet create --dhcp --host-route destination=169.254.169.254/32,gateway=192.0.2.2 ... ----- - -* Optional: To secure the network, create link:https://access.redhat.com/documentation/en-us/red_hat_openstack_platform/16.1/html/networking_guide/config-rbac-policies_rhosp-network#proc_create-rbac-policies_config-rbac-policies[role-based access control (RBAC)] rules that limit network access to a single project. diff --git a/modules/installation-osp-provider-networks.adoc b/modules/installation-osp-provider-networks.adoc deleted file mode 100644 index 6a269966f31e..000000000000 --- a/modules/installation-osp-provider-networks.adoc +++ /dev/null @@ -1,28 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_openstack/installing-openstack-installer-custom.adoc -// * installing/installing_openstack/installing-openstack-installer-kuryr.adoc -// * installing/installing_openstack/installing-openstack-user-kuryr.adoc -// * installing/installing_openstack/installing-openstack-user.adoc - -[id="installation-osp-provider-networks_{context}"] -= Cluster deployment on {rh-openstack} provider networks - -You can deploy your {product-title} clusters on {rh-openstack-first} with a primary network interface on a provider network. Provider networks are commonly used to give projects direct access to a public network that can be used to reach the internet. You can also share provider networks among projects as part of the network creation process. - -{rh-openstack} provider networks map directly to an existing physical network in the data center. A {rh-openstack} administrator must create them. - -In the following example, {product-title} workloads are connected to a data center by using a provider network: - -image::openshift-on-openstack-provider-network.png[A diagram that depicts four OpenShift workloads on OpenStack. Each workload is connected by its NIC to an external data center by using a provider network.] - -{product-title} clusters that are installed on provider networks do not require tenant networks or floating IP addresses. The installer does not create these resources during installation. - -Example provider network types include flat (untagged) and VLAN (802.1Q tagged). - -[NOTE] -==== -A cluster can support as many provider network connections as the network type allows. For example, VLAN networks typically support up to 4096 connections. -==== - -You can learn more about provider and tenant networks in link:https://access.redhat.com/documentation/en-us/red_hat_openstack_platform/16.1/html/networking_guide/networking-overview_rhosp-network#tenant-provider-networks_network-overview[the {rh-openstack} documentation]. diff --git a/modules/installation-osp-restricted-config-yaml.adoc b/modules/installation-osp-restricted-config-yaml.adoc deleted file mode 100644 index 5ded857e4669..000000000000 --- a/modules/installation-osp-restricted-config-yaml.adoc +++ /dev/null @@ -1,69 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_openstack/installing-openstack-installer-restricted.adoc - -[id="installation-osp-restricted-config-yaml_{context}"] -= Sample customized `install-config.yaml` file for restricted OpenStack installations - -This sample `install-config.yaml` demonstrates all of the possible {rh-openstack-first} -customization options. - -[IMPORTANT] -==== -This sample file is provided for reference only. You must obtain your -`install-config.yaml` file by using the installation program. -==== - -[source, yaml] ----- -apiVersion: v1 -baseDomain: example.com -controlPlane: - name: master - platform: {} - replicas: 3 -compute: -- name: worker - platform: - openstack: - type: ml.large - replicas: 3 -metadata: - name: example -networking: - clusterNetwork: - - cidr: 10.128.0.0/14 - hostPrefix: 23 - machineNetwork: - - cidr: 10.0.0.0/16 - serviceNetwork: - - 172.30.0.0/16 - networkType: OVNKubernetes -platform: - openstack: - region: region1 - cloud: mycloud - externalNetwork: external - computeFlavor: m1.xlarge - apiFloatingIP: 128.0.0.1 -ifndef::openshift-origin[] -fips: false -endif::openshift-origin[] -pullSecret: '{"auths": ...}' -sshKey: ssh-ed25519 AAAA... -additionalTrustBundle: | - - -----BEGIN CERTIFICATE----- - - ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ - - -----END CERTIFICATE----- - -imageContentSources: -- mirrors: - - <mirror_registry>/<repo_name>/release - source: quay.io/openshift-release-dev/ocp-release -- mirrors: - - <mirror_registry>/<repo_name>/release - source: quay.io/openshift-release-dev/ocp-v4.0-art-dev ----- diff --git a/modules/installation-osp-setting-cloud-provider-options.adoc b/modules/installation-osp-setting-cloud-provider-options.adoc deleted file mode 100644 index a6705de95db9..000000000000 --- a/modules/installation-osp-setting-cloud-provider-options.adoc +++ /dev/null @@ -1,85 +0,0 @@ -//Module included in the following assemblies: -// -// * installing/installing_openstack/installing-openstack-installer.adoc -// * installing/installing_openstack/installing-openstack-installer-custom.adoc -// * installing/installing_openstack/installing-openstack-installer-kuryr.adoc -// * installing/installing_openstack/installing-openstack-installer-user.adoc - -:_content-type: PROCEDURE -[id="installation-osp-setting-cloud-provider-options_{context}"] -= Setting OpenStack Cloud Controller Manager options - -Optionally, you can edit the OpenStack Cloud Controller Manager (CCM) configuration for your cluster. This configuration controls how {product-title} interacts with {rh-openstack-first}. - -For a complete list of configuration parameters, see the "OpenStack Cloud Controller Manager reference guide" page in the "Installing on OpenStack" documentation. - -.Procedure - -. If you have not already generated manifest files for your cluster, generate them by running the following command: -+ -[source,terminal] ----- -$ openshift-install --dir <destination_directory> create manifests ----- - -. In a text editor, open the cloud-provider configuration manifest file. For example: -+ -[source,terminal] ----- -$ vi openshift/manifests/cloud-provider-config.yaml ----- - -. Modify the options according to the CCM reference guide. -+ -Configuring Octavia for load balancing is a common case for clusters that do not use Kuryr. For example: -+ -[source,text] ----- -#... -[LoadBalancer] -use-octavia=true <1> -lb-provider = "amphora" <2> -floating-network-id="d3deb660-4190-40a3-91f1-37326fe6ec4a" <3> -create-monitor = True <4> -monitor-delay = 10s <5> -monitor-timeout = 10s <6> -monitor-max-retries = 1 <7> -#... ----- -<1> This property enables Octavia integration. -<2> This property sets the Octavia provider that your load balancer uses. It accepts `"ovn"` or `"amphora"` as values. If you choose to use OVN, you must also set `lb-method` to `SOURCE_IP_PORT`. -<3> This property is required if you want to use multiple external networks with your cluster. The cloud provider creates floating IP addresses on the network that is specified here. -<4> This property controls whether the cloud provider creates health monitors for Octavia load balancers. Set the value to `True` to create health monitors. As of {rh-openstack} 16.1 and 16.2, this feature is only available for the Amphora provider. -<5> This property sets the frequency with which endpoints are monitored. The value must be in the `time.ParseDuration()` format. This property is required if the value of the `create-monitor` property is `True`. -<6> This property sets the time that monitoring requests are open before timing out. The value must be in the `time.ParseDuration()` format. This property is required if the value of the `create-monitor` property is `True`. -<7> This property defines how many successful monitoring requests are required before a load balancer is marked as online. The value must be an integer. This property is required if the value of the `create-monitor` property is `True`. - -+ -[IMPORTANT] -==== -Prior to saving your changes, verify that the file is structured correctly. Clusters might fail if properties are not placed in the appropriate section. -==== -+ -[IMPORTANT] -==== -You must set the value of the `create-monitor` property to `True` if you use services that have the value of the `.spec.externalTrafficPolicy` property set to `Local`. The OVN Octavia provider in {rh-openstack} 16.1 and 16.2 does not support health monitors. Therefore, services that have `ETP` parameter values set to `Local` might not respond when the `lb-provider` value is set to `"ovn"`. -==== -+ -[IMPORTANT] -==== -For installations that use Kuryr, Kuryr handles relevant services. There is no need to configure Octavia load balancing in the cloud provider. -==== - -. Save the changes to the file and proceed with installation. -+ -[TIP] -==== -You can update your cloud provider configuration after you run the installer. On a command line, run: - -[source,terminal] ----- -$ oc edit configmap -n openshift-config cloud-provider-config ----- - -After you save your changes, your cluster will take some time to reconfigure itself. The process is complete if none of your nodes have a `SchedulingDisabled` status. -==== \ No newline at end of file diff --git a/modules/installation-osp-setting-worker-affinity.adoc b/modules/installation-osp-setting-worker-affinity.adoc deleted file mode 100644 index ee1e906e718d..000000000000 --- a/modules/installation-osp-setting-worker-affinity.adoc +++ /dev/null @@ -1,117 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_openstack/installing-openstack-installer.adoc -// * installing/installing_openstack/installing-openstack-installer-custom.adoc -// * installing/installing_openstack/installing-openstack-installer-kuryr.adoc -// * installing/installing_openstack/installing-openstack-installer-restricted.adoc -// * installing/installing_openstack/installing-openstack-user.adoc -// * installing/installing_openstack/installing-openstack-user-kuryr.adoc - - -:_content-type: PROCEDURE -[id="installation-osp-setting-worker-affinity_{context}"] -= Setting compute machine affinity - -Optionally, you can set the affinity policy for compute machines during installation. By default, both compute and control plane machines are created with a `soft-anti-affinity` policy. - -You can also create compute machine sets that use particular {rh-openstack} server groups after installation. - -[TIP] -==== -You can learn more about link:https://access.redhat.com/documentation/en-us/red_hat_openstack_platform/16.1/html/configuring_the_compute_service_for_instance_creation/assembly_configuring-instance-scheduling-and-placement_scheduling-and-placement[{rh-openstack} instance scheduling and placement] in the {rh-openstack} documentation. -==== - -.Prerequisites - -* Create the `install-config.yaml` file and complete any modifications to it. - -.Procedure - -. Using the {rh-openstack} command-line interface, create a server group for your compute machines. For example: -+ -[source,terminal] ----- -$ openstack \ - --os-compute-api-version=2.15 \ - server group create \ - --policy anti-affinity \ - my-openshift-worker-group ----- -+ -For more information, see the link:https://access.redhat.com/documentation/en-us/red_hat_openstack_platform/15/html/command_line_interface_reference/server#server_group_create[`server group create` command documentation]. - -. Change to the directory that contains the installation program and create the manifests: -+ -[source,terminal] ----- -$ ./openshift-install create manifests --dir <installation_directory> ----- -+ -where: -+ -`installation_directory` :: Specifies the name of the directory that contains the `install-config.yaml` file for your cluster. - -. Open `manifests/99_openshift-cluster-api_worker-machineset-0.yaml`, the `MachineSet` definition file. - -. Add the property `serverGroupID` to the definition beneath the `spec.template.spec.providerSpec.value` property. For example: -+ -[source,yaml] ----- -apiVersion: machine.openshift.io/v1beta1 -kind: MachineSet -metadata: - labels: - machine.openshift.io/cluster-api-cluster: <infrastructure_ID> - machine.openshift.io/cluster-api-machine-role: <node_role> - machine.openshift.io/cluster-api-machine-type: <node_role> - name: <infrastructure_ID>-<node_role> - namespace: openshift-machine-api -spec: - replicas: <number_of_replicas> - selector: - matchLabels: - machine.openshift.io/cluster-api-cluster: <infrastructure_ID> - machine.openshift.io/cluster-api-machineset: <infrastructure_ID>-<node_role> - template: - metadata: - labels: - machine.openshift.io/cluster-api-cluster: <infrastructure_ID> - machine.openshift.io/cluster-api-machine-role: <node_role> - machine.openshift.io/cluster-api-machine-type: <node_role> - machine.openshift.io/cluster-api-machineset: <infrastructure_ID>-<node_role> - spec: - providerSpec: - value: - apiVersion: openstackproviderconfig.openshift.io/v1alpha1 - cloudName: openstack - cloudsSecret: - name: openstack-cloud-credentials - namespace: openshift-machine-api - flavor: <nova_flavor> - image: <glance_image_name_or_location> - serverGroupID: aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee <1> - kind: OpenstackProviderSpec - networks: - - filter: {} - subnets: - - filter: - name: <subnet_name> - tags: openshiftClusterID=<infrastructure_ID> - securityGroups: - - filter: {} - name: <infrastructure_ID>-<node_role> - serverMetadata: - Name: <infrastructure_ID>-<node_role> - openshiftClusterID: <infrastructure_ID> - tags: - - openshiftClusterID=<infrastructure_ID> - trunk: true - userDataSecret: - name: <node_role>-user-data - availabilityZone: <optional_openstack_availability_zone> ----- -<1> Add the UUID of your server group here. - -. Optional: Back up the `manifests/99_openshift-cluster-api_worker-machineset-0.yaml` file. The installation program deletes the `manifests/` directory when creating the cluster. - -When you install the cluster, the installer uses the `MachineSet` definition that you modified to create compute machines within your {rh-openstack} server group. diff --git a/modules/installation-osp-verifying-cluster-status.adoc b/modules/installation-osp-verifying-cluster-status.adoc deleted file mode 100644 index dc3ca66c2a74..000000000000 --- a/modules/installation-osp-verifying-cluster-status.adoc +++ /dev/null @@ -1,72 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_openstack/installing-openstack-installer.adoc -// * installing/installing_openstack/installing-openstack-installer-custom.adoc -// * installing/installing_openstack/installing-openstack-installer-kuryr.adoc -// * installing/installing_openstack/installing-openstack-installer-restricted.adoc -// * installing/installing_rhv/installing-rhv-default.adoc -// * installing/installing_rhv/installing-rhv-customizations.adoc -// * installing/installing_rhv/installing-rhv-user-infra.adoc -// * installing/installing-rhv-restricted-network.adoc - -ifeval::["{context}" == "installing-rhv-user-infra"] -:rhv-user-infra: -endif::[] - -:_content-type: PROCEDURE -[id="installation-osp-verifying-cluster-status_{context}"] -= Verifying cluster status - -You can verify your {product-title} cluster's status during or after installation. - -.Procedure - -. In the cluster environment, export the administrator's kubeconfig file: -+ -ifdef::rhv-user-infra[] -[source,terminal] ----- -$ export KUBECONFIG=$ASSETS_DIR/auth/kubeconfig ----- -endif::rhv-user-infra[] -ifndef::rhv-user-infra[] -[source,terminal] ----- -$ export KUBECONFIG=<installation_directory>/auth/kubeconfig <1> ----- -<1> For `<installation_directory>`, specify the path to the directory that you stored the installation files in. -endif::rhv-user-infra[] -+ -The `kubeconfig` file contains information about the cluster that is used by the CLI to connect a client to the correct cluster and API server. - -. View the control plane and compute machines created after a deployment: -+ -[source,terminal] ----- -$ oc get nodes ----- - -. View your cluster's version: -+ -[source,terminal] ----- -$ oc get clusterversion ----- - -. View your Operators' status: -+ -[source,terminal] ----- -$ oc get clusteroperator ----- - -. View all running pods in the cluster: -+ -[source,terminal] ----- -$ oc get pods -A ----- - -ifeval::["{context}" == "installing-rhv-customizations"] -:!rhv-user-infra: -endif::[] diff --git a/modules/installation-osp-verifying-external-network.adoc b/modules/installation-osp-verifying-external-network.adoc deleted file mode 100644 index 3757237fde51..000000000000 --- a/modules/installation-osp-verifying-external-network.adoc +++ /dev/null @@ -1,112 +0,0 @@ -//Module included in the following assemblies: -// -// * installing/installing_openstack/installing-openstack-installer.adoc -// * installing/installing_openstack/installing-openstack-installer-custom.adoc -// * installing/installing_openstack/installing-openstack-installer-kuryr.adoc -// * installing/installing_openstack/installing-openstack-user.adoc -// -// DNS resolution KI -ifeval::["{context}" == "installing-openstack-installer-custom"] -:osp-custom: -endif::[] -ifeval::["{context}" == "installing-openstack-installer-kuryr"] -:osp-kuryr: -endif::[] -ifeval::["{context}" == "installing-openstack-user"] -:osp-user: -endif::[] -ifeval::["{context}" == "installing-openstack-user-sr-iov"] -:osp-user: -endif::[] - -:_content-type: PROCEDURE -[id="installation-osp-verifying-external-network_{context}"] -= Verifying external network access - -The {product-title} installation process requires external network access. You must provide an external network value to it, or deployment fails. Before you begin the process, verify that a network with the external router type exists in {rh-openstack-first}. - -.Prerequisites - -* On {rh-openstack}, the `NeutronDhcpAgentDnsmasqDnsServers` parameter must be configured to allow DHCP agents to forward instances' DNS queries. One way to set this parameter is to: -.. link:https://access.redhat.com/documentation/en-us/red_hat_openstack_platform/13/html/advanced_overcloud_customization/sect-understanding_heat_templates#sect-Environment_Files[Create a new environment file] in the template directory. -.. Provide link:https://access.redhat.com/documentation/en-us/red_hat_openstack_platform/13/html-single/overcloud_parameters/index#networking-neutron-parameters[parameter values] in the file. For example: -+ -.Sample `neutron-dhcp-agent-dnsmasq-dns-servers.yaml` file - -[source,yaml] ----- -parameter_defaults: - NeutronDhcpAgentDnsmasqDnsServers: ['<DNS_server_address_1>','<DNS_server_address_2'] ----- -.. link:https://access.redhat.com/documentation/en-us/red_hat_openstack_platform/13/html/advanced_overcloud_customization/sect-understanding_heat_templates#sect-Including_Environment_Files_in_Overcloud_Creation[Include the environment file] in your Overcloud deploy command. For example: -+ -[source,terminal] ----- -$ openstack overcloud deploy --templates -e neutron-dhcp-agent-dnsmasq-dns-servers.yaml ... ----- - -.Procedure - -. Using the {rh-openstack} CLI, verify the name and ID of the 'External' network: -+ -[source,terminal] ----- -$ openstack network list --long -c ID -c Name -c "Router Type" ----- -+ -.Example output -[source,terminal] ----- -+--------------------------------------+----------------+-------------+ -| ID | Name | Router Type | -+--------------------------------------+----------------+-------------+ -| 148a8023-62a7-4672-b018-003462f8d7dc | public_network | External | -+--------------------------------------+----------------+-------------+ ----- - -A network with an external router type appears in the network list. If at least one does not, see link:https://access.redhat.com/documentation/en-us/red_hat_openstack_platform/16.0/html/director_installation_and_usage/performing-overcloud-post-installation-tasks#creating-a-default-floating-ip-network[Creating a default floating IP network] and link:https://access.redhat.com/documentation/en-us/red_hat_openstack_platform/16.0/html/director_installation_and_usage/performing-overcloud-post-installation-tasks#creating-a-default-provider-network[Creating a default provider network]. - -ifdef::osp-custom,osp-kuryr[] -[IMPORTANT] -==== -If the external network's CIDR range overlaps one of the default network ranges, you must change the matching network ranges in the `install-config.yaml` file before you start the installation process. - -The default network ranges are: -[options="header"] -|==== -|Network |Range - -|`machineNetwork` -|10.0.0.0/16 - -|`serviceNetwork` -|172.30.0.0/16 - -|`clusterNetwork` -|10.128.0.0/14 -|==== -==== -endif::osp-custom,osp-kuryr[] - -ifdef::osp-custom,osp-kuryr[] -[WARNING] -If the installation program finds multiple networks with the same name, it sets one of them at random. To avoid this behavior, create unique names for resources in {rh-openstack}. -endif::osp-custom,osp-kuryr[] - -[NOTE] -==== -If the Neutron trunk service plugin is enabled, a trunk port is created by default. For more information, see https://wiki.openstack.org/wiki/Neutron/TrunkPort[Neutron trunk port]. -==== - -ifeval::["{context}" == "installing-openstack-installer-custom"] -:!osp-custom: -endif::[] -ifeval::["{context}" == "installing-openstack-installer-kuryr"] -:!osp-kuryr: -endif::[] -ifeval::["{context}" == "installing-openstack-user"] -:!osp-user: -endif::[] -ifeval::["{context}" == "installing-openstack-user-sr-iov"] -:!osp-user: -endif::[] diff --git a/modules/installation-osp-verifying-installation.adoc b/modules/installation-osp-verifying-installation.adoc deleted file mode 100644 index 78d3ad18f565..000000000000 --- a/modules/installation-osp-verifying-installation.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_openstack/installing-openstack-user.adoc - -:_content-type: PROCEDURE -[id="installation-osp-verifying-installation_{context}"] -= Verifying a successful installation - -Verify that the {product-title} installation is complete. - -.Prerequisites - -* You have the installation program (`openshift-install`) - - -.Procedure - -* On a command line, enter: -+ -[source,terminal] ----- -$ openshift-install --log-level debug wait-for install-complete ----- - -The program outputs the console URL, as well as the administrator's login information. diff --git a/modules/installation-overview.adoc b/modules/installation-overview.adoc deleted file mode 100644 index 4ad461198eee..000000000000 --- a/modules/installation-overview.adoc +++ /dev/null @@ -1,43 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/index.adoc -// * architecture/architecture-installation.adoc - -:_content-type: CONCEPT -[id="installation-overview_{context}"] -= About {product-title} installation - -The {product-title} installation program offers four methods for deploying a cluster: - -* *Interactive*: You can deploy a cluster with the web-based link:https://access.redhat.com/documentation/en-us/assisted_installer_for_openshift_container_platform/2022/html-single/assisted_installer_for_openshift_container_platform/index[{ai-full}]. This is the recommended approach for clusters with networks connected to the internet. The {ai-full} is the easiest way to install {product-title}, it provides smart defaults, and it performs pre-flight validations before installing the cluster. It also provides a RESTful API for automation and advanced configuration scenarios. - -* *Local Agent-based*: You can deploy a cluster locally with the agent-based installer for air-gapped or restricted networks. It provides many of the benefits of the {ai-full}, but you must download and configure the link:https://console.redhat.com/openshift/install/metal/agent-based[agent-based installer] first. Configuration is done with a commandline interface. This approach is ideal for air-gapped or restricted networks. - -* *Automated*: You can deploy a cluster on installer-provisioned infrastructure and the cluster it maintains. The installer uses each cluster host's baseboard management controller (BMC) for provisioning. You can deploy clusters with both connected or air-gapped or restricted networks. - -* *Full control*: You can deploy a cluster on infrastructure that you prepare and maintain, which provides maximum customizability. You can deploy clusters with both connected or air-gapped or restricted networks. - -The clusters have the following characteristics: - -* Highly available infrastructure with no single points of failure is available by default. -* Administrators maintain control over what updates are applied and when. - -[id="about-the-installation-program"] -== About the installation program - -You can use the installation program to deploy each type of cluster. The installation program generates main assets such as Ignition config files for the bootstrap, control plane (master), and worker machines. You can start an {product-title} cluster with these three configurations and correctly configured infrastructure. - -The {product-title} installation program uses a set of targets and dependencies to manage cluster installations. The installation program has a set of targets that it must achieve, and each target has a set of dependencies. Because each target is only concerned with its own dependencies, the installation program can act to achieve multiple targets in parallel with the ultimate target being a running cluster. The installation program recognizes and uses existing components instead of running commands to create them again because the program meets dependencies. - -.{product-title} installation targets and dependencies -image::targets-and-dependencies.png[{product-title} installation targets and dependencies] - - -[id="about-rhcos"] -== About {op-system-first} - -Post-installation, each cluster machine uses {op-system-first} as the operating system. {op-system} is the immutable container host version of {op-system-base-full} and features a {op-system-base} kernel with SELinux enabled by default. It includes the `kubelet`, which is the Kubernetes node agent, and the CRI-O container runtime, which is optimized for Kubernetes. - -Every control plane machine in an {product-title} {product-version} cluster must use {op-system}, which includes a critical first-boot provisioning tool called Ignition. This tool enables the cluster to configure the machines. Operating system updates are delivered as a bootable container image, using **OSTree** as a backend, that is deployed across the cluster by the Machine Config Operator. Actual operating system changes are made in-place on each machine as an atomic operation by using **rpm-ostree**. Together, these technologies enable {product-title} to manage the operating system like it manages any other application on the cluster, by in-place upgrades that keep the entire platform up-to-date. These in-place updates can reduce the burden on operations teams. - -If you use {op-system} as the operating system for all cluster machines, the cluster manages all aspects of its components and machines, including the operating system. Because of this, only the installation program and the Machine Config Operator can change machines. The installation program uses Ignition config files to set the exact state of each machine, and the Machine Config Operator completes more changes to the machines, such as the application of new certificates or keys, after installation. diff --git a/modules/installation-performing-disconnected-mirror-without-registry.adoc b/modules/installation-performing-disconnected-mirror-without-registry.adoc deleted file mode 100644 index 1b956603c07e..000000000000 --- a/modules/installation-performing-disconnected-mirror-without-registry.adoc +++ /dev/null @@ -1,40 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_restricted_networks/installing-restricted-networks-preparations.adoc - -[id="installation-performing-disconnected-mirror-without-registry"] -= Performing a mirror to disk for use in disconnected environments with a non-production mirror registry - -If a production mirror registry is not available, you can configure a simple mirror registry by using the disconnected procedure to serve container images that you downloaded to disk. - -.Procedure - -. Determine the IP of your host within the restricted network as `<private_ip>`. - -. On a local host, copy the required `imageContentSources` and `ImageContentSourcePolicy`: -.. Make the mirror directory and change to it: -+ ----- -$ mkdir <mirror_dir> ; cd <mirror_dir> ----- - -.. Mirror the images: -+ ----- -$ oc adm release mirror <product_version> --to file://openshift/release ----- - -. From within the restricted network, start an image mirror server on port 5000 on all interfaces on the host: -.. Change to the mirror directory: -+ ----- -$ cd MIRROR_DIR ----- - -.. Serve the images for the installation program to use: -+ ----- -$ oc image serve ----- - -This registry does not perform authentication and does not require TLS to guarantee integrity of the provided images. diff --git a/modules/installation-performing-disconnected-mirror.adoc b/modules/installation-performing-disconnected-mirror.adoc deleted file mode 100644 index f87630457d58..000000000000 --- a/modules/installation-performing-disconnected-mirror.adoc +++ /dev/null @@ -1,38 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_restricted_networks/installing-restricted-networks-preparations.adoc - -[id="installation-performing-disconnected-mirror"] -= Mirroring the {product-title} image registry contents to disk for use in disconnected environments - -When you mirror images to disk, you download images as files. Then, you move your -mirror host, which can be a laptop or a removable media device, like a -USB drive, into the restricted network and complete the mirror -procedure. - -.Procedure - -. On a local host, test the mirror process: -+ ----- -$ oc adm release mirror <product_version> --to <mirror_repository> --dry-run ----- - -. On a local host, copy the required `imageContentSources` and `ImageContentSourcePolicy`: -.. Make the mirror directory and change to it: -+ ----- -$ mkdir <mirror_dir> ; cd <mirror_dir> ----- - -.. Mirror the images: -+ ----- -$ oc adm release mirror <product_version> --to file://openshift/release ----- - -. From within the restricted network, mirror the images to your restricted mirror repository: -+ ----- -$ oc adm release mirror file://openshift/release:<product_version>* --to <mirror_repository> ----- diff --git a/modules/installation-preparing-restricted-cluster-to-gather-support-data.adoc b/modules/installation-preparing-restricted-cluster-to-gather-support-data.adoc deleted file mode 100644 index 759162b1e787..000000000000 --- a/modules/installation-preparing-restricted-cluster-to-gather-support-data.adoc +++ /dev/null @@ -1,40 +0,0 @@ -// Module included in the following assemblies: -// -// * post_installation_configuration/cluster-tasks.adoc - -:_content-type: PROCEDURE -[id="installation-preparing-restricted-cluster-to-gather-support-data_{context}"] -= Preparing your cluster to gather support data - -Clusters using a restricted network must import the default must-gather image to gather debugging data for Red Hat support. The must-gather image is not imported by default, and clusters on a restricted network do not have access to the internet to pull the latest image from a remote repository. - -.Procedure - -. If you have not added your mirror registry's trusted CA to your cluster's image configuration object as part of the Cluster Samples Operator configuration, perform the following steps: -.. Create the cluster's image configuration object: -+ -[source,terminal] ----- -$ oc create configmap registry-config --from-file=${MIRROR_ADDR_HOSTNAME}..5000=$path/ca.crt -n openshift-config ----- - -.. Add the required trusted CAs for the mirror in the cluster's image -configuration object: -+ -[source,terminal] ----- -$ oc patch image.config.openshift.io/cluster --patch '{"spec":{"additionalTrustedCA":{"name":"registry-config"}}}' --type=merge ----- - -. Import the default must-gather image from your installation payload: -+ -[source,terminal] ----- -$ oc import-image is/must-gather -n openshift ----- - -When running the `oc adm must-gather` command, use the `--image` flag and point to the payload image, as in the following example: -[source,terminal] ----- -$ oc adm must-gather --image=$(oc adm release info --image-for must-gather) ----- diff --git a/modules/installation-prereq-aws-private-cluster.adoc b/modules/installation-prereq-aws-private-cluster.adoc deleted file mode 100644 index 570d0bd4ba2a..000000000000 --- a/modules/installation-prereq-aws-private-cluster.adoc +++ /dev/null @@ -1,13 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-government-region.adoc - -[id="installation-prereq-aws-private-cluster_{context}"] -= Installation requirements - -Before you can install the cluster, you must: - -* Provide an existing private AWS VPC and subnets to host the cluster. -+ -Public zones are not supported in Route 53 in AWS GovCloud. As a result, clusters must be private when you deploy to an AWS government region. -* Manually create the installation configuration file (`install-config.yaml`). diff --git a/modules/installation-process.adoc b/modules/installation-process.adoc deleted file mode 100644 index a042ba7f90bb..000000000000 --- a/modules/installation-process.adoc +++ /dev/null @@ -1,124 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/index.adoc -// * architecture/architecture-installation.adoc - -[id="installation-process_{context}"] -= Installation process - -Except for the {ai-full}, when you install an {product-title} cluster, you download the installation program from -ifndef::openshift-origin[] -the appropriate link:https://console.redhat.com/openshift/install[Infrastructure Provider] page on the {cluster-manager} site. This site manages: - -* REST API for accounts -* Registry tokens, which are the pull secrets that you use to obtain the required components -* Cluster registration, which associates the cluster identity to your Red Hat account to facilitate the gathering of usage metrics -endif::[] -ifdef::openshift-origin[] -https://github.com/openshift/okd/releases. -endif::[] - -In {product-title} {product-version}, the installation program is a Go binary file that performs a series of file transformations on a set of assets. The way you interact with the installation program differs depending on your installation type. - -* To deploy a cluster with the {ai-full}, you configure the cluster settings using the link:https://access.redhat.com/documentation/en-us/assisted_installer_for_openshift_container_platform/2022/html-single/assisted_installer_for_openshift_container_platform/index[{ai-full}]. There is no installer to download and configure. After you complete the configuration, you download a discovery ISO and boot cluster machines with that image. You can install clusters with the {ai-full} on Nutanix, vSphere, and bare metal with full integration, and other platforms without integration. If you install on bare metal, you must provide all of the cluster infrastructure and resources, including the networking, load balancing, storage, and individual cluster machines. - -* To deploy clusters with the agent-based installer, you download the link:https://console.redhat.com/openshift/install/metal/agent-based[agent-based installer] first. Then, you configure the cluster and generate a discovery image. You boot cluster machines with the discovery image, which installs an agent that communicates with the installation program and handles the provisioning for you instead of you interacting with the installation program or setting up a provisioner machine yourself. You must provide all of the cluster infrastructure and resources, including the networking, load balancing, storage, and individual cluster machines. This approach is ideal for air-gapped or restricted network environments. - -* For clusters with installer-provisioned infrastructure, you delegate the infrastructure bootstrapping and provisioning to the installation program instead of doing it yourself. The installation program creates all of the networking, machines, and operating systems that are required to support the cluster, except if you install on bare metal. If you install on bare metal, you must provide all of the cluster infrastructure and resources, including the bootstrap machine, networking, load balancing, storage, and individual cluster machines. - -* If you provision and manage the infrastructure for your cluster, you must provide all of the cluster infrastructure and resources, including the bootstrap machine, networking, load balancing, storage, and individual cluster machines. - -The installer uses three sets of files during installation: an installation configuration file that is named `install-config.yaml`, Kubernetes manifests, and Ignition config files for your machine types. - -[IMPORTANT] -==== -It is possible to modify Kubernetes and the Ignition config files that control the underlying {op-system} operating system during installation. However, no validation is available to confirm the suitability of any modifications that you make to these objects. If you modify these objects, you might render your cluster non-functional. Because of this risk, modifying Kubernetes and Ignition config files is not supported unless you are following documented procedures or are instructed to do so by Red Hat support. -==== - -The installation configuration file is transformed into Kubernetes manifests, and then the manifests are wrapped into Ignition config files. The installation program uses these Ignition config files to create the cluster. - -The installation configuration files are all pruned when you run the installation program, so be sure to back up all configuration files that you want to use again. - -[IMPORTANT] -==== -You cannot modify the parameters that you set during installation, but you can modify many cluster attributes after installation. -==== - -[discrete] -== The installation process with the {ai-full} - -Installation with the link:https://access.redhat.com/documentation/en-us/assisted_installer_for_openshift_container_platform/2022/html-single/assisted_installer_for_openshift_container_platform/index[{ai-full}] involves creating a cluster configuration interactively using the web-based user interface or using the RESTful API. The {ai-full} user interface prompts you for required values and provides reasonable default values for the remaining parameters, unless you change them in the user interface or with the API. The {ai-full} generates a discovery image, which you download and use to boot the cluster machines. The image installs {op-system} and an agent, and the agent handles the provisioning for you. You can install {product-title} with the {ai-full} and full integration on Nutanix, vSphere, and bare metal, and on other platforms without integration. - -{product-title} manages all aspects of the cluster, including the operating system itself. Each machine boots with a configuration that references resources hosted in the cluster that it joins. This configuration allows the cluster to manage itself as updates are applied. - -If possible, use this feature to avoid having to download and configure the agent-based installer. - -[discrete] -== The installation process with agent-based infrastructure - -Agent-based installation is similar to using the {ai-full}, except that you download and install the link:https://console.redhat.com/openshift/install/metal/agent-based[agent-based installer] first. Agent-based installation is recommended when you want all the convenience of the {ai-full}, but you need to install with an air-gapped or disconnected network. - -If possible, use this feature to avoid having to create a provisioner machine with a bootstrap VM and provision and maintain the cluster infrastructure. - -[discrete] -== The installation process with installer-provisioned infrastructure - -The default installation type uses installer-provisioned infrastructure. By default, the installation program acts as an installation wizard, prompting you for values that it cannot determine on its own and providing reasonable default values for the remaining parameters. You can also customize the installation process to support advanced infrastructure scenarios. The installation program provisions the underlying infrastructure for the cluster. - -You can install either a standard cluster or a customized cluster. With a standard cluster, you provide minimum details that are required to install the cluster. With a customized cluster, you can specify more details about the platform, such as the number of machines that the control plane uses, the type of virtual machine that the cluster deploys, or the CIDR range for the Kubernetes service network. - -If possible, use this feature to avoid having to provision and maintain the cluster infrastructure. In all other environments, you use the installation program to generate the assets that you require to provision your cluster infrastructure. - -With installer-provisioned infrastructure clusters, {product-title} manages all aspects of the cluster, including the operating system itself. Each machine boots with a configuration that references resources hosted in the cluster that it joins. This configuration allows the cluster to manage itself as updates are applied. - -[discrete] -== The installation process with user-provisioned infrastructure - -You can also install {product-title} on infrastructure that you provide. You use the installation program to generate the assets that you require to provision the cluster infrastructure, create the cluster infrastructure, and then deploy the cluster to the infrastructure that you provided. - -If you do not use infrastructure that the installation program provisioned, you must manage and maintain the cluster resources yourself, including: - -* The underlying infrastructure for the control plane and compute machines that make up the cluster -* Load balancers -* Cluster networking, including the DNS records and required subnets -* Storage for the cluster infrastructure and applications - -If your cluster uses user-provisioned infrastructure, you have the option of adding {op-system-base} compute machines to your cluster. - -[discrete] -== Installation process details - -Because each machine in the cluster requires information about the cluster when it is provisioned, {product-title} uses a temporary _bootstrap_ machine during initial configuration to provide the required information to the permanent control plane. It boots by using an Ignition config file that describes how to create the cluster. The bootstrap machine creates the control plane machines that make up the control plane. The control plane machines then create the compute machines, which are also known as worker machines. The following figure illustrates this process: - -ifndef::openshift-origin[] -.Creating the bootstrap, control plane, and compute machines -image::create-nodes.png[Creating bootstrap, control plane, and compute machines] -endif::openshift-origin[] -ifdef::openshift-origin[] -.Creating the bootstrap, control plane, and compute machines -image::150_OpenShift_VMware_on_AWS_1021_installer_FCOS.png[Creating bootstrap, control plane, and compute machines] -endif::openshift-origin[] - -After the cluster machines initialize, the bootstrap machine is destroyed. All clusters use the bootstrap process to initialize the cluster, but if you provision the infrastructure for your cluster, you must complete many of the steps manually. - -[IMPORTANT] -==== -* The Ignition config files that the installation program generates contain certificates that expire after 24 hours, which are then renewed at that time. If the cluster is shut down before renewing the certificates and the cluster is later restarted after the 24 hours have elapsed, the cluster automatically recovers the expired certificates. The exception is that you must manually approve the pending `node-bootstrapper` certificate signing requests (CSRs) to recover kubelet certificates. See the documentation for _Recovering from expired control plane certificates_ for more information. - -* It is recommended that you use Ignition config files within 12 hours after they are generated because the 24-hour certificate rotates from 16 to 22 hours after the cluster is installed. By using the Ignition config files within 12 hours, you can avoid installation failure if the certificate update runs during installation. -==== - -Bootstrapping a cluster involves the following steps: - -. The bootstrap machine boots and starts hosting the remote resources required for the control plane machines to boot. (Requires manual intervention if you provision the infrastructure) -. The bootstrap machine starts a single-node etcd cluster and a temporary Kubernetes control plane. -. The control plane machines fetch the remote resources from the bootstrap machine and finish booting. (Requires manual intervention if you provision the infrastructure) -. The temporary control plane schedules the production control plane to the production control plane machines. -. The Cluster Version Operator (CVO) comes online and installs the etcd Operator. The etcd Operator scales up etcd on all control plane nodes. -. The temporary control plane shuts down and passes control to the production control plane. -. The bootstrap machine injects {product-title} components into the production control plane. -. The installation program shuts down the bootstrap machine. (Requires manual intervention if you provision the infrastructure) -. The control plane sets up the compute nodes. -. The control plane installs additional services in the form of a set of Operators. - -The result of this bootstrapping process is a running {product-title} cluster. The cluster then downloads and configures remaining components needed for the day-to-day operation, including the creation of compute machines in supported environments. diff --git a/modules/installation-registry-osp-creating-custom-pvc.adoc b/modules/installation-registry-osp-creating-custom-pvc.adoc deleted file mode 100644 index ee79c983f4aa..000000000000 --- a/modules/installation-registry-osp-creating-custom-pvc.adoc +++ /dev/null @@ -1,133 +0,0 @@ -// Module included in the following assemblies: -// -// * registry/configuring_registry_storage/configuring-registry-storage.adoc - -:_content-type: PROCEDURE -[id="installation-registry-osp-creating-custom-pvc_{context}"] -= Configuring an image registry with custom storage on clusters that run on {rh-openstack} - -After you install a cluster on {rh-openstack-first}, you can use a Cinder volume that is in a specific availability zone for registry storage. - -.Procedure - -. Create a YAML file that specifies the storage class and availability zone to use. For example: -+ -[source,yaml] ----- -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: custom-csi-storageclass -provisioner: cinder.csi.openstack.org -volumeBindingMode: WaitForFirstConsumer -allowVolumeExpansion: true -parameters: - availability: <availability_zone_name> ----- -+ -[NOTE] -==== -{product-title} does not verify the existence of the availability zone you choose. Verify the name of the availability zone before you apply the configuration. -==== - -. From a command line, apply the configuration: -+ -[source,terminal] ----- -$ oc apply -f <storage_class_file_name> ----- -+ -.Example output -[source,terminal] ----- -storageclass.storage.k8s.io/custom-csi-storageclass created ----- - -. Create a YAML file that specifies a persistent volume claim (PVC) that uses your storage class and the `openshift-image-registry` namespace. For example: -+ -[source,yaml] ----- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: csi-pvc-imageregistry - namespace: openshift-image-registry <1> - annotations: - imageregistry.openshift.io: "true" -spec: - accessModes: - - ReadWriteOnce - volumeMode: Filesystem - resources: - requests: - storage: 100Gi <2> - storageClassName: <your_custom_storage_class> <3> ----- -<1> Enter the namespace `openshift-image-registry`. This namespace allows the Cluster Image Registry Operator to consume the PVC. -<2> Optional: Adjust the volume size. -<3> Enter the name of the storage class that you created. - -. From a command line, apply the configuration: -+ -[source,terminal] ----- -$ oc apply -f <pvc_file_name> ----- -+ -.Example output -[source,terminal] ----- -persistentvolumeclaim/csi-pvc-imageregistry created ----- - -. Replace the original persistent volume claim in the image registry configuration with the new claim: -+ -[source,terminal] ----- -$ oc patch configs.imageregistry.operator.openshift.io/cluster --type 'json' -p='[{"op": "replace", "path": "/spec/storage/pvc/claim", "value": "csi-pvc-imageregistry"}]' ----- -+ -.Example output -[source,terminal] ----- -config.imageregistry.operator.openshift.io/cluster patched ----- -+ -Over the next several minutes, the configuration is updated. - -.Verification - -To confirm that the registry is using the resources that you defined: - -. Verify that the PVC claim value is identical to the name that you provided in your PVC definition: -+ -[source,terminal] ----- -$ oc get configs.imageregistry.operator.openshift.io/cluster -o yaml ----- -+ -.Example output -[source,terminal] ----- -... -status: - ... - managementState: Managed - pvc: - claim: csi-pvc-imageregistry -... ----- - -. Verify that the status of the PVC is `Bound`: -+ -[source,terminal] ----- -$ oc get pvc -n openshift-image-registry csi-pvc-imageregistry ----- -+ -.Example output -[source,terminal] ----- -NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE -csi-pvc-imageregistry Bound pvc-72a8f9c9-f462-11e8-b6b6-fa163e18b7b5 100Gi RWO custom-csi-storageclass 11m ----- diff --git a/modules/installation-registry-storage-block-recreate-rollout-bare-metal.adoc b/modules/installation-registry-storage-block-recreate-rollout-bare-metal.adoc deleted file mode 100644 index c007bf0eb6ad..000000000000 --- a/modules/installation-registry-storage-block-recreate-rollout-bare-metal.adoc +++ /dev/null @@ -1,36 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_bare_metal/installing-bare-metal.adoc -// * installing/installing_baremetal/installing-bare-metal-network-customizations.adoc -// * installing/installing_baremetal/installing-restricted-networks-bare-metal.adoc -// * installing/installing_platform_agnostic/installing-platform-agnostic.adoc -// * registry/configuring_registry_storage/configuring-registry-storage-baremetal.adoc - -:_content-type: PROCEDURE -[id="installation-registry-storage-block-recreate-rollout-bare-metal_{context}"] -= Configuring block registry storage - -To allow the image registry to use block storage types during upgrades as a cluster administrator, you can use the `Recreate` rollout strategy. - -[IMPORTANT] -==== -Block storage volumes, or block persistent volumes, are supported but not recommended for use with the image -registry on production clusters. An installation where the registry is -configured on block storage is not highly available because the registry cannot -have more than one replica. - -If you choose to use a block storage volume with the image registry, you must use a filesystem Persistent Volume Claim (PVC). -==== - -.Procedure - -. To set the image registry storage as a block storage type, patch the registry so that it uses the `Recreate` rollout strategy and runs with only one (`1`) replica: -+ -[source,terminal] ----- -$ oc patch config.imageregistry.operator.openshift.io/cluster --type=merge -p '{"spec":{"rolloutStrategy":"Recreate","replicas":1}}' ----- -+ -. Provision the PV for the block storage device, and create a PVC for that volume. The requested block volume uses the ReadWriteOnce (RWO) access mode. -+ -. Edit the registry configuration so that it references the correct PVC. diff --git a/modules/installation-registry-storage-block-recreate-rollout.adoc b/modules/installation-registry-storage-block-recreate-rollout.adoc deleted file mode 100644 index 93c9581fe82b..000000000000 --- a/modules/installation-registry-storage-block-recreate-rollout.adoc +++ /dev/null @@ -1,78 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_vsphere/installing-vsphere-installer-provisioned-customizations.adoc -// * installing/installing_vsphere/installing-vsphere-installer-provisioned-network-customizations.adoc -// * installing/installing_vsphere/installing-vsphere-installer-provisioned.adoc -// * installing/installing_vsphere/installing-vsphere-network-customizations.adoc -// * installing/installing_vsphere/installing-vsphere.adoc -// * installing/installing_vsphere/installing-restricted-networks-vsphere.adoc -// * registry/configuring_registry_storage/configuring-registry-storage-vsphere.adoc - -:_content-type: PROCEDURE -[id="installation-registry-storage-block-recreate-rollout_{context}"] -= Configuring block registry storage for VMware vSphere - -To allow the image registry to use block storage types such as vSphere Virtual Machine Disk (VMDK) during upgrades as a cluster administrator, you can use the `Recreate` rollout strategy. - -[IMPORTANT] -==== -Block storage volumes are supported but not recommended for use with image -registry on production clusters. An installation where the registry is -configured on block storage is not highly available because the registry cannot -have more than one replica. -==== - -.Procedure - -. To set the image registry storage as a block storage type, patch the registry so that it uses the `Recreate` rollout strategy and runs with only `1` replica: -+ -[source,terminal] ----- -$ oc patch config.imageregistry.operator.openshift.io/cluster --type=merge -p '{"spec":{"rolloutStrategy":"Recreate","replicas":1}}' ----- -+ -. Provision the PV for the block storage device, and create a PVC for that volume. The requested block volume uses the ReadWriteOnce (RWO) access mode. -.. Create a `pvc.yaml` file with the following contents to define a VMware vSphere `PersistentVolumeClaim` object: -+ -[source,yaml] ----- -kind: PersistentVolumeClaim -apiVersion: v1 -metadata: - name: image-registry-storage <1> - namespace: openshift-image-registry <2> -spec: - accessModes: - - ReadWriteOnce <3> - resources: - requests: - storage: 100Gi <4> ----- -<1> A unique name that represents the `PersistentVolumeClaim` object. -<2> The namespace for the `PersistentVolumeClaim` object, which is `openshift-image-registry`. -<3> The access mode of the persistent volume claim. With `ReadWriteOnce`, the volume can be mounted with read and write permissions by a single node. -<4> The size of the persistent volume claim. - -.. Create the `PersistentVolumeClaim` object from the file: -+ -[source,terminal] ----- -$ oc create -f pvc.yaml -n openshift-image-registry ----- - -+ -. Edit the registry configuration so that it references the correct PVC: -+ -[source,terminal] ----- -$ oc edit config.imageregistry.operator.openshift.io -o yaml ----- -+ -.Example output -[source,yaml] ----- -storage: - pvc: - claim: <1> ----- -<1> Creating a custom PVC allows you to leave the `claim` field blank for the default automatic creation of an `image-registry-storage` PVC. diff --git a/modules/installation-registry-storage-config.adoc b/modules/installation-registry-storage-config.adoc deleted file mode 100644 index 6f2b23df9e66..000000000000 --- a/modules/installation-registry-storage-config.adoc +++ /dev/null @@ -1,54 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-user-infra.adoc -// * installing/installing_bare_metal/installing-bare-metal.adoc -// * installing/installing_aws/installing-restricted-networks-aws.adoc -// * installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc -// * installing/installing_ibm_z/installing-ibm-z.adoc -// * installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc -// * installing/installing_bare_metal/installing-bare-metal.adoc -// * installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc -// * installing/installing_platform_agnostic/installing-platform-agnostic.adoc -// * installing/installing_vsphere/installing-vsphere-installer-provisioned-customizations.adoc -// * installing/installing_vsphere/installing-vsphere-installer-provisioned-network-customizations.adoc -// * installing/installing_vsphere/installing-vsphere-installer-provisioned.adoc -// * installing/installing_vsphere/installing-restricted-networks-vsphere.adoc -// * installing/installing_vsphere/installing-vsphere.adoc -// * installing/installing_vsphere/installing-restricted-networks-installer-provisioned-vsphere.adoc -// * installing/installing_vsphere/installing-vsphere-network-customizations.adoc -// * registry/configuring_registry_storage/configuring-registry-storage-baremetal.adoc -// * registry/configuring_registry_storage/configuring-registry-storage-vsphere.adoc - -ifeval::["{context}" == "installing-aws-user-infra"] -:aws: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-aws"] -:aws: -endif::[] - -:_content-type: CONCEPT -[id="installation-registry-storage-config_{context}"] -= Image registry storage configuration - -ifdef::aws[] -Amazon Web Services provides default storage, which means the Image Registry -Operator is available after installation. However, if the Registry Operator -cannot create an S3 bucket and automatically configure storage, you must -manually configure registry storage. -endif::aws[] -ifndef::aws[] -The Image Registry Operator is not initially available for platforms that do -not provide default storage. After installation, you must configure your -registry to use storage so that the Registry Operator is made available. -endif::aws[] - -Instructions are shown for configuring a persistent volume, which is required for production clusters. Where applicable, instructions are shown for configuring an empty directory as the storage location, which is available for only non-production clusters. - -Additional instructions are provided for allowing the image registry to use block storage types by using the `Recreate` rollout strategy during upgrades. - -ifeval::["{context}" == "installing-aws-user-infra"] -:!aws: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-aws"] -:!aws: -endif::[] diff --git a/modules/installation-registry-storage-non-production.adoc b/modules/installation-registry-storage-non-production.adoc deleted file mode 100644 index 983d096b9217..000000000000 --- a/modules/installation-registry-storage-non-production.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-user-infra.adoc -// * installing/installing_bare_metal/installing-bare-metal.adoc -// * installing/installing_aws/installing-restricted-networks-aws.adoc -// * installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc -// * installing/installing_platform_agnostic/installing-platform-agnostic.adoc -// * installing/installing_vsphere/installing-restricted-networks-vsphere.adoc -// * installing/installing_vsphere/installing-vsphere.adoc -// * installing/installing_ibm_z/installing-ibm-z.adoc - -:_content-type: PROCEDURE -[id="installation-registry-storage-non-production_{context}"] -= Configuring storage for the image registry in non-production clusters - -You must configure storage for the Image Registry Operator. For non-production -clusters, you can set the image registry to an empty directory. If you do so, -all images are lost if you restart the registry. - -.Procedure - -* To set the image registry storage to an empty directory: -+ -[source,terminal] ----- -$ oc patch configs.imageregistry.operator.openshift.io cluster --type merge --patch '{"spec":{"storage":{"emptyDir":{}}}}' ----- -+ -[WARNING] -==== -Configure this option for only non-production clusters. -==== -+ -If you run this command before the Image Registry Operator initializes its -components, the `oc patch` command fails with the following error: -+ -[source,terminal] ----- -Error from server (NotFound): configs.imageregistry.operator.openshift.io "cluster" not found ----- -+ -Wait a few minutes and run the command again. diff --git a/modules/installation-requirements-user-infra-ibm-z-kvm.adoc b/modules/installation-requirements-user-infra-ibm-z-kvm.adoc deleted file mode 100644 index 229b88f6aa71..000000000000 --- a/modules/installation-requirements-user-infra-ibm-z-kvm.adoc +++ /dev/null @@ -1,195 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_ibm_z/installing-ibm-z-kvm.adoc - - -:_content-type: CONCEPT -[id="installation-requirements-user-infra_{context}"] -= Machine requirements for a cluster with user-provisioned infrastructure - -For a cluster that contains user-provisioned infrastructure, you must deploy all -of the required machines. - -One or more KVM host machines based on {op-system-base} 8.6 or later. Each {op-system-base} KVM host machine must have libvirt installed and running. The virtual machines are provisioned under each {op-system-base} KVM host machine. - - -[id="machine-requirements_{context}"] -== Required machines - -The smallest {product-title} clusters require the following hosts: - -.Minimum required hosts -[options="header"] -|=== -|Hosts |Description - -|One temporary bootstrap machine -|The cluster requires the bootstrap machine to deploy the {product-title} cluster -on the three control plane machines. You can remove the bootstrap machine after -you install the cluster. -|Three control plane machines -|The control plane machines run the Kubernetes and {product-title} services that form the control plane. - -|At least two compute machines, which are also known as worker machines. -|The workloads requested by {product-title} users run on the compute machines. - -|=== - -[IMPORTANT] -==== -To improve high availability of your cluster, distribute the control plane machines over different {op-system-base} instances on at least two physical machines. -==== - -The bootstrap, control plane, and compute machines must use {op-system-first} as the operating system. - -See link:https://access.redhat.com/articles/rhel-limits[Red Hat Enterprise Linux technology capabilities and limits]. - -[id="network-connectivity_{context}"] -== Network connectivity requirements - -The {product-title} installer creates the Ignition files, which are necessary for all the {op-system-first} virtual machines. The automated installation of {product-title} is performed by the bootstrap machine. It starts the installation of {product-title} on each node, starts the Kubernetes cluster, and then finishes. During this bootstrap, the virtual machine must have an established network connection either through a Dynamic Host Configuration Protocol (DHCP) server or static IP address. - -[id="ibm-z-network-connectivity_{context}"] -== {ibmzProductName} network connectivity requirements - -To install on {ibmzProductName} under {op-system-base} KVM, you need: - -* A {op-system-base} KVM host configured with an OSA or RoCE network adapter. -* Either a {op-system-base} KVM host that is configured to use bridged networking in libvirt or MacVTap to connect the network to the guests. -+ -See link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html-single/configuring_and_managing_virtualization/index#types-of-virtual-machine-network-connections_configuring-virtual-machine-network-connections[Types of virtual network connections]. - -[id="host-machine-resource-requirements_{context}"] -== Host machine resource requirements -The {op-system-base} KVM host in your environment must meet the following requirements to host the virtual machines that you plan for the {product-title} environment. See link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/configuring_and_managing_virtualization/getting-started-with-virtualization-in-rhel-8_configuring-and-managing-virtualization[Getting started with virtualization]. - -You can install {product-title} version {product-version} on the following IBM hardware: - -* IBM z16 (all models), IBM z15 (all models), IBM z14 (all models) -* {linuxoneProductName} 4 (all models), {linuxoneProductName} III (all models), {linuxoneProductName} Emperor II, {linuxoneProductName} Rockhopper II - -[id="minimum-ibm-z-system-requirements_{context}"] -== Minimum {ibmzProductName} system environment - -[discrete] -=== Hardware requirements - -* The equivalent of six Integrated Facilities for Linux (IFL), which are SMT2 enabled, for each cluster. -* At least one network connection to both connect to the `LoadBalancer` service and to serve data for traffic outside the cluster. - -[NOTE] -==== -You can use dedicated or shared IFLs to assign sufficient compute resources. Resource sharing is one of the key strengths of {ibmzProductName}. However, you must adjust capacity correctly on each hypervisor layer and ensure sufficient resources for every {product-title} cluster. -==== - -[IMPORTANT] -==== -Since the overall performance of the cluster can be impacted, the LPARs that are used to set up the {product-title} clusters must provide sufficient compute capacity. In this context, LPAR weight management, entitlements, and CPU shares on the hypervisor level play an important role. -==== - -[discrete] -=== Operating system requirements -* One LPAR running on {op-system-base} 8.6 or later with KVM, which is managed by libvirt - -On your {op-system-base} KVM host, set up: - -* Three guest virtual machines for {product-title} control plane machines -* Two guest virtual machines for {product-title} compute machines -* One guest virtual machine for the temporary {product-title} bootstrap machine - -[id="minimum-resource-requirements_{context}"] -== Minimum resource requirements - -Each cluster virtual machine must meet the following minimum requirements: - -[cols="2,2,2,2,2,2",options="header"] -|=== - -|Virtual Machine -|Operating System -|vCPU ^[1]^ -|Virtual RAM -|Storage -|IOPS - -|Bootstrap -|{op-system} -|4 -|16 GB -|100 GB -|N/A - -|Control plane -|{op-system} -|4 -|16 GB -|100 GB -|N/A - -|Compute -|{op-system} -|2 -|8 GB -|100 GB -|N/A - -|=== -[.small] --- -1. One physical core (IFL) provides two logical cores (threads) when SMT-2 is enabled. The hypervisor can provide two or more vCPUs. --- - -[id="preferred-ibm-z-system-requirements_{context}"] -== Preferred {ibmzProductName} system environment - -[discrete] -=== Hardware requirements - -* Three LPARS that each have the equivalent of six IFLs, which are SMT2 enabled, for each cluster. -* Two network connections to both connect to the `LoadBalancer` service and to serve data for traffic outside the cluster. - -[discrete] -=== Operating system requirements - -* For high availability, two or three LPARs running on {op-system-base} 8.6 or later with KVM, which are managed by libvirt. - -On your {op-system-base} KVM host, set up: - -* Three guest virtual machines for {product-title} control plane machines, distributed across the {op-system-base} KVM host machines. -* At least six guest virtual machines for {product-title} compute machines, distributed across the {op-system-base} KVM host machines. -* One guest virtual machine for the temporary {product-title} bootstrap machine. -* To ensure the availability of integral components in an overcommitted environment, increase the priority of the control plane by using `cpu_shares`. Do the same for infrastructure nodes, if they exist. See link:https://www.ibm.com/docs/en/linux-on-systems?topic=domain-schedinfo[schedinfo] in IBM Documentation. - -[id="preferred-resource-requirements_{context}"] -== Preferred resource requirements - -The preferred requirements for each cluster virtual machine are: - -[cols="2,2,2,2,2",options="header"] -|=== - -|Virtual Machine -|Operating System -|vCPU -|Virtual RAM -|Storage - -|Bootstrap -|{op-system} -|4 -|16 GB -|120 GB - -|Control plane -|{op-system} -|8 -|16 GB -|120 GB - -|Compute -|{op-system} -|6 -|8 GB -|120 GB - -|=== diff --git a/modules/installation-restricted-network-samples.adoc b/modules/installation-restricted-network-samples.adoc deleted file mode 100644 index bde1f1f54228..000000000000 --- a/modules/installation-restricted-network-samples.adoc +++ /dev/null @@ -1,113 +0,0 @@ -// Module included in the following assemblies: -// -// * post_installation_configuration/cluster-tasks.adoc -// * openshift_images/samples-operator-alt-registry.adoc - -ifeval::["{context}" == "post-install-cluster-tasks"] -:restrictednetwork: -endif::[] - -ifeval::["{context}" == "samples-operator-alt-registry"] -:samplesoperatoraltreg: -endif::[] - -:_content-type: PROCEDURE -[id="installation-restricted-network-samples_{context}"] -= Using Cluster Samples Operator image streams with alternate or mirrored registries - -Most image streams in the `openshift` namespace managed by the Cluster Samples Operator -point to images located in the Red Hat registry at link:https://registry.redhat.io[registry.redhat.io]. -ifdef::restrictednetwork[] -Mirroring -will not apply to these image streams. -endif::[] - -[NOTE] -==== -The `cli`, `installer`, `must-gather`, and `tests` image streams, while -part of the install payload, are not managed by the Cluster Samples Operator. These are -not addressed in this procedure. -==== - -[IMPORTANT] -==== -The Cluster Samples Operator must be set to `Managed` in a disconnected environment. To install the image streams, you have a mirrored registry. -==== - -.Prerequisites -* Access to the cluster as a user with the `cluster-admin` role. -* Create a pull secret for your mirror registry. - -.Procedure - -. Access the images of a specific image stream to mirror, for example: -+ -[source,terminal] ----- -$ oc get is <imagestream> -n openshift -o json | jq .spec.tags[].from.name | grep registry.redhat.io ----- -+ -. Mirror images from link:https://registry.redhat.io[registry.redhat.io] associated with any image streams you need -ifdef::restrictednetwork[] -in the restricted network environment into one of the defined mirrors, for example: -endif::[] -ifdef::configsamplesoperator[] -into your defined preferred registry, for example: -endif::[] -+ -[source,terminal] ----- -$ oc image mirror registry.redhat.io/rhscl/ruby-25-rhel7:latest ${MIRROR_ADDR}/rhscl/ruby-25-rhel7:latest ----- - -. Create the cluster's image configuration object: -+ -[source,terminal] ----- -$ oc create configmap registry-config --from-file=${MIRROR_ADDR_HOSTNAME}..5000=$path/ca.crt -n openshift-config ----- - -. Add the required trusted CAs for the mirror in the cluster's image -configuration object: -+ -[source,terminal] ----- -$ oc patch image.config.openshift.io/cluster --patch '{"spec":{"additionalTrustedCA":{"name":"registry-config"}}}' --type=merge ----- - -. Update the `samplesRegistry` field in the Cluster Samples Operator configuration object -to contain the `hostname` portion of the mirror location defined in the mirror -configuration: -+ -[source,terminal] ----- -$ oc edit configs.samples.operator.openshift.io -n openshift-cluster-samples-operator ----- -+ -[NOTE] -==== -This is required because the image stream import process does not use the mirror or search mechanism at this time. -==== -+ -. Add any image streams that are not mirrored into the `skippedImagestreams` field -of the Cluster Samples Operator configuration object. Or if you do not want to support -any of the sample image streams, set the Cluster Samples Operator to `Removed` in the -Cluster Samples Operator configuration object. -+ -[NOTE] -==== -The Cluster Samples Operator issues alerts if image stream imports are failing but the Cluster Samples Operator is either periodically retrying or does not appear to be retrying them. -==== -+ -Many of the templates in the `openshift` namespace -reference the image streams. So using `Removed` to purge both the image streams -and templates will eliminate the possibility of attempts to use them if they -are not functional because of any missing image streams. - -ifeval::["{context}" == "post-install-cluster-tasks"] -:!restrictednetwork: -endif::[] - -ifeval::["{context}" == "samples-operator-alt-registry"] -:!samplesoperatoraltreg: -endif::[] diff --git a/modules/installation-rhv-about-inventory-yml.adoc b/modules/installation-rhv-about-inventory-yml.adoc deleted file mode 100644 index afb61642bcda..000000000000 --- a/modules/installation-rhv-about-inventory-yml.adoc +++ /dev/null @@ -1,187 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_rhv/installing-rhv-user-infra.adoc -// * installing/installing-rhv-restricted-network.adoc - -[id="installation-rhv-about-inventory-yml_{context}"] -= The inventory.yml file - -You use the `inventory.yml` file to define and create elements of the {product-title} cluster you are installing. This includes elements such as the {op-system-first} image, virtual machine templates, bootstrap machine, control plane nodes, and worker nodes. You also use `inventory.yml` to destroy the cluster. - -The following `inventory.yml` example shows you the parameters and their default values. The quantities and numbers in these default values meet the requirements for running a production {product-title} cluster in a {rh-virtualization} environment. - -.Example `inventory.yml` file -[source,yaml] ----- ---- -all: - vars: - - ovirt_cluster: "Default" - ocp: - assets_dir: "{{ lookup('env', 'ASSETS_DIR') }}" - ovirt_config_path: "{{ lookup('env', 'HOME') }}/.ovirt/ovirt-config.yaml" - - # --- - # {op-system} section - # --- - rhcos: - image_url: "https://mirror.openshift.com/pub/openshift-v4/dependencies/rhcos/4.13/latest/rhcos-openstack.x86_64.qcow2.gz" - local_cmp_image_path: "/tmp/rhcos.qcow2.gz" - local_image_path: "/tmp/rhcos.qcow2" - - # --- - # Profiles section - # --- - control_plane: - cluster: "{{ ovirt_cluster }}" - memory: 16GiB - sockets: 4 - cores: 1 - template: rhcos_tpl - operating_system: "rhcos_x64" - type: high_performance - graphical_console: - headless_mode: false - protocol: - - spice - - vnc - disks: - - size: 120GiB - name: os - interface: virtio_scsi - storage_domain: depot_nvme - nics: - - name: nic1 - network: lab - profile: lab - - compute: - cluster: "{{ ovirt_cluster }}" - memory: 16GiB - sockets: 4 - cores: 1 - template: worker_rhcos_tpl - operating_system: "rhcos_x64" - type: high_performance - graphical_console: - headless_mode: false - protocol: - - spice - - vnc - disks: - - size: 120GiB - name: os - interface: virtio_scsi - storage_domain: depot_nvme - nics: - - name: nic1 - network: lab - profile: lab - - # --- - # Virtual machines section - # --- - vms: - - name: "{{ metadata.infraID }}-bootstrap" - ocp_type: bootstrap - profile: "{{ control_plane }}" - type: server - - name: "{{ metadata.infraID }}-master0" - ocp_type: master - profile: "{{ control_plane }}" - - name: "{{ metadata.infraID }}-master1" - ocp_type: master - profile: "{{ control_plane }}" - - name: "{{ metadata.infraID }}-master2" - ocp_type: master - profile: "{{ control_plane }}" - - name: "{{ metadata.infraID }}-worker0" - ocp_type: worker - profile: "{{ compute }}" - - name: "{{ metadata.infraID }}-worker1" - ocp_type: worker - profile: "{{ compute }}" - - name: "{{ metadata.infraID }}-worker2" - ocp_type: worker - profile: "{{ compute }}" ----- - -[IMPORTANT] -==== -Enter values for parameters whose descriptions begin with "Enter." Otherwise, you can use the default value or replace it with a new value. -==== - -.General section - -* `ovirt_cluster`: Enter the name of an existing {rh-virtualization} cluster in which to install the {product-title} cluster. -* `ocp.assets_dir`: The path of a directory the `openshift-install` installation program creates to store the files that it generates. -* `ocp.ovirt_config_path`: The path of the `ovirt-config.yaml` file the installation program generates, for example, `./wrk/install-config.yaml`. This file contains the credentials required to interact with the REST API of the {rh-virtualization-engine-name}. - -.{op-system-first} section - -* `image_url`: Enter the URL of the {op-system} image you specified for download. -* `local_cmp_image_path`: The path of a local download directory for the compressed {op-system} image. -* `local_image_path`: The path of a local directory for the extracted {op-system} image. - -.Profiles section - -This section consists of two profiles: - -* `control_plane`: The profile of the bootstrap and control plane nodes. -* `compute`: The profile of workers nodes in the compute plane. - -These profiles have the following parameters. The default values of the parameters meet the minimum requirements for running a production cluster. You can increase or customize these values to meet your workload requirements. - -* `cluster`: The value gets the cluster name from `ovirt_cluster` in the General Section. -* `memory`: The amount of memory, in GB, for the virtual machine. -* `sockets`: The number of sockets for the virtual machine. -* `cores`: The number of cores for the virtual machine. -* `template`: The name of the virtual machine template. If plan to install multiple clusters, and these clusters use templates that contain different specifications, prepend the template name with the ID of the cluster. -* `operating_system`: The type of guest operating system in the virtual machine. With oVirt/{rh-virtualization} version 4.4, this value must be `rhcos_x64` so the value of `Ignition script` can be passed to the VM. -* `type`: Enter `server` as the type of the virtual machine. -+ -[IMPORTANT] -==== -You must change the value of the `type` parameter from `high_performance` to `server`. -==== -* `disks`: The disk specifications. The `control_plane` and `compute` nodes can have different storage domains. -* `size`: The minimum disk size. -* `name`: Enter the name of a disk connected to the target cluster in {rh-virtualization}. -* `interface`: Enter the interface type of the disk you specified. -* `storage_domain`: Enter the storage domain of the disk you specified. -* `nics`: Enter the `name` and `network` the virtual machines use. You can also specify the virtual network interface profile. By default, NICs obtain their MAC addresses from the oVirt/{rh-virtualization} MAC pool. - -.Virtual machines section - -This final section, `vms`, defines the virtual machines you plan to create and deploy in the cluster. By default, it provides the minimum number of control plane and worker nodes for a production environment. - -`vms` contains three required elements: - -* `name`: The name of the virtual machine. In this case, `metadata.infraID` prepends the virtual machine name with the infrastructure ID from the `metadata.yml` file. -* `ocp_type`: The role of the virtual machine in the {product-title} cluster. Possible values are `bootstrap`, `master`, `worker`. -* `profile`: The name of the profile from which each virtual machine inherits specifications. Possible values in this example are `control_plane` or `compute`. -+ -You can override the value a virtual machine inherits from its profile. To do this, you add the name of the profile attribute to the virtual machine in `inventory.yml` and assign it an overriding value. To see an example of this, examine the `name: "{{ metadata.infraID }}-bootstrap"` virtual machine in the preceding `inventory.yml` example: It has a `type` attribute whose value, `server`, overrides the value of the `type` attribute this virtual machine would otherwise inherit from the `control_plane` profile. - -// TBD https://issues.redhat.com/browse/OCPRHV-414 -// Consider documenting *additional* optional attributes in https://github.com/oVirt/ovirt-ansible-vm-infra that aren't already covered here. Hypothetically, it seems like a user could add these attributes to a profile and then want to override them in the inventory.yml. - -// TBD - Consider adding a topic on how related to: Configure DHCP to assign permanent IP addresses to the virtual machines, consider using the `mac_address` attribute to assign a fixed MAC address to each virtual machine. However, avoid using the same MAC address if you are deploying more than one cluster. We should consider creating a new topic to document this/these scenario(s). - -.Metadata variables - -For virtual machines, `metadata.infraID` prepends the name of the virtual machine with the infrastructure ID from the `metadata.json` file you create when you build the Ignition files. - -The playbooks use the following code to read `infraID` from the specific file located in the `ocp.assets_dir`. - -[source,yaml] ----- ---- -- name: include metadata.json vars - include_vars: - file: "{{ ocp.assets_dir }}/metadata.json" - name: metadata - - ... ----- diff --git a/modules/installation-rhv-building-ignition-files.adoc b/modules/installation-rhv-building-ignition-files.adoc deleted file mode 100644 index c8621709f68b..000000000000 --- a/modules/installation-rhv-building-ignition-files.adoc +++ /dev/null @@ -1,46 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_rhv/installing-rhv-user-infra.adoc -// * installing/installing-rhv-restricted-network.adoc - -:_content-type: PROCEDURE -[id="installation-rhv-building-ignition-files_{context}"] -= Building the Ignition files - -To build the Ignition files from the manifest files you just generated and modified, you run the installation program. This action creates a {op-system-first} machine, `initramfs`, which fetches the Ignition files and performs the configurations needed to create a node. - -In addition to the Ignition files, the installation program generates the following: - -* An `auth` directory that contains the admin credentials for connecting to the cluster with the `oc` and `kubectl` utilities. -* A `metadata.json` file that contains information such as the {product-title} cluster name, cluster ID, and infrastructure ID for the current installation. - -The Ansible playbooks for this installation process use the value of `infraID` as a prefix for the virtual machines they create. This prevents naming conflicts when there are multiple installations in the same oVirt/{rh-virtualization} cluster. - -[NOTE] -==== -Certificates in Ignition configuration files expire after 24 hours. Complete the cluster installation and keep the cluster running in a non-degraded state for 24 hours so that the first certificate rotation can finish. -==== - -.Procedure - -. To build the Ignition files, enter: -+ -[source,terminal] ----- -$ openshift-install create ignition-configs --dir $ASSETS_DIR ----- -+ -.Example output -[source,terminal] ----- -$ tree -. -└── wrk - ├── auth - │ ├── kubeadmin-password - │ └── kubeconfig - ├── bootstrap.ign - ├── master.ign - ├── metadata.json - └── worker.ign ----- diff --git a/modules/installation-rhv-creating-bootstrap-machine.adoc b/modules/installation-rhv-creating-bootstrap-machine.adoc deleted file mode 100644 index 072b53762542..000000000000 --- a/modules/installation-rhv-creating-bootstrap-machine.adoc +++ /dev/null @@ -1,40 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_rhv/installing-rhv-user-infra.adoc -// * installing/installing-rhv-restricted-network.adoc - -:_content-type: PROCEDURE -[id="installation-rhv-creating-bootstrap-machine_{context}"] -= Creating the bootstrap machine - -You create a bootstrap machine by running the `bootstrap.yml` playbook. This playbook starts the bootstrap virtual machine, and passes it the `bootstrap.ign` Ignition file from the assets directory. The bootstrap node configures itself so it can serve Ignition files to the control plane nodes. - -To monitor the bootstrap process, you use the console in the {rh-virtualization} Administration Portal or connect to the virtual machine by using SSH. - -.Procedure - -. Create the bootstrap machine: -+ -[source,terminal] ----- -$ ansible-playbook -i inventory.yml bootstrap.yml ----- - -. Connect to the bootstrap machine using a console in the Administration Portal or SSH. Replace `<bootstrap_ip>` with the bootstrap node IP address. To use SSH, enter: -+ -[source,terminal] ----- -$ ssh core@<boostrap.ip> ----- - -. Collect `bootkube.service` journald unit logs for the release image service from the bootstrap node: -+ -[source,terminal] ----- -[core@ocp4-lk6b4-bootstrap ~]$ journalctl -b -f -u release-image.service -u bootkube.service ----- -+ -[NOTE] -==== -The `bootkube.service` log on the bootstrap node outputs etcd `connection refused` errors, indicating that the bootstrap server is unable to connect to etcd on control plane nodes. After etcd has started on each control plane node and the nodes have joined the cluster, the errors should stop. -==== diff --git a/modules/installation-rhv-creating-control-plane-nodes.adoc b/modules/installation-rhv-creating-control-plane-nodes.adoc deleted file mode 100644 index 11494a4ea7de..000000000000 --- a/modules/installation-rhv-creating-control-plane-nodes.adoc +++ /dev/null @@ -1,41 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_rhv/installing-rhv-user-infra.adoc -// * installing/installing-rhv-restricted-network.adoc - -:_content-type: PROCEDURE -[id="installation-rhv-creating-control-plane-nodes_{context}"] -= Creating the control plane nodes - -You create the control plane nodes by running the `masters.yml` playbook. This playbook passes the `master.ign` Ignition file to each of the virtual machines. The Ignition file contains a directive for the control plane node to get the Ignition from a URL such as `https://api-int.ocp4.example.org:22623/config/master`. The port number in this URL is managed by the load balancer, and is accessible only inside the cluster. - -.Procedure - -. Create the control plane nodes: -+ -[source,terminal] ----- -$ ansible-playbook -i inventory.yml masters.yml ----- - -. While the playbook creates your control plane, monitor the bootstrapping process: -+ -[source,terminal] ----- -$ openshift-install wait-for bootstrap-complete --dir $ASSETS_DIR ----- -+ -.Example output -[source,terminal] ----- -INFO API v1.26.0 up -INFO Waiting up to 40m0s for bootstrapping to complete... ----- - -. When all the pods on the control plane nodes and etcd are up and running, the installation program displays the following output. -+ -.Example output -[source,terminal] ----- -INFO It is now safe to remove the bootstrap resources ----- diff --git a/modules/installation-rhv-creating-install-config-file.adoc b/modules/installation-rhv-creating-install-config-file.adoc deleted file mode 100644 index d4c9d3aa6948..000000000000 --- a/modules/installation-rhv-creating-install-config-file.adoc +++ /dev/null @@ -1,76 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_rhv/installing-rhv-user-infra.adoc -// * installing/installing-rhv-restricted-network.adoc - -:_content-type: PROCEDURE -[id="installation-rhv-creating-install-config-file_{context}"] -= Creating the install config file - -You create an installation configuration file by running the installation program, `openshift-install`, and responding to its prompts with information you specified or gathered earlier. - -When you finish responding to the prompts, the installation program creates an initial version of the `install-config.yaml` file in the assets directory you specified earlier, for example, `./wrk/install-config.yaml` - -The installation program also creates a file, `$HOME/.ovirt/ovirt-config.yaml`, that contains all the connection parameters that are required to reach the {rh-virtualization-engine-name} and use its REST API. - -**NOTE:** -The installation process does not use values you supply for some parameters, such as `Internal API virtual IP` and `Ingress virtual IP`, because you have already configured them in your infrastructure DNS. - -It also uses the values you supply for parameters in `inventory.yml`, like the ones for `oVirt cluster`, `oVirt storage`, and `oVirt network`. And uses a script to remove or replace these same values from `install-config.yaml` with the previously mentioned `virtual IPs`. -//For details, see xref:set-platform-to-none[]. - -.Procedure - -. Run the installation program: -+ -[source,terminal] ----- -$ openshift-install create install-config --dir $ASSETS_DIR ----- - -. Respond to the installation program's prompts with information about your system. -+ -ifndef::openshift-origin[] -.Example output -[source,terminal] ----- -? SSH Public Key /home/user/.ssh/id_dsa.pub -? Platform <ovirt> -? Engine FQDN[:PORT] [? for help] <engine.fqdn> -? Enter ovirt-engine username <ocpadmin@internal> -? Enter password <******> -? oVirt cluster <cluster> -? oVirt storage <storage> -? oVirt network <net> -? Internal API virtual IP <172.16.0.252> -? Ingress virtual IP <172.16.0.251> -? Base Domain <example.org> -? Cluster Name <ocp4> -? Pull Secret [? for help] <********> ----- -endif::openshift-origin[] -ifndef::openshift-origin[] -.Example output -[source,terminal] ----- -? SSH Public Key /home/user/.ssh/id_dsa.pub -? Platform <ovirt> -? Engine FQDN[:PORT] [? for help] <engine.fqdn> -? Enter ovirt-engine username <ocpadmin@internal> -? Enter password <******> -? oVirt cluster <cluster> -? oVirt storage <storage> -? oVirt network <net> -? Internal API virtual IP <172.16.0.252> -? Ingress virtual IP <172.16.0.251> -? Base Domain <example.org> -? Cluster Name <ocp4> -? Pull Secret [? for help] <********> ----- -endif::openshift-origin[] - -For `Internal API virtual IP` and `Ingress virtual IP`, supply the IP addresses you specified when you configured the DNS service. - -Together, the values you enter for the `oVirt cluster` and `Base Domain` prompts form the FQDN portion of URLs for the REST API and any applications you create, such as `\https://api.ocp4.example.org:6443/` and `\https://console-openshift-console.apps.ocp4.example.org`. - -You can get the {cluster-manager-url-pull}. diff --git a/modules/installation-rhv-creating-templates-virtual-machines.adoc b/modules/installation-rhv-creating-templates-virtual-machines.adoc deleted file mode 100644 index dac4128acf3a..000000000000 --- a/modules/installation-rhv-creating-templates-virtual-machines.adoc +++ /dev/null @@ -1,44 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_rhv/installing-rhv-user-infra.adoc -// * installing/installing-rhv-restricted-network.adoc - -:_content-type: PROCEDURE -[id="installation-rhv-creating-templates-virtual-machines_{context}"] -= Creating templates and virtual machines - -After confirming the variables in the `inventory.yml`, you run the first Ansible provisioning playbook, `create-templates-and-vms.yml`. - -This playbook uses the connection parameters for the {rh-virtualization} {rh-virtualization-engine-name} from `$HOME/.ovirt/ovirt-config.yaml` and reads `metadata.json` in the assets directory. - -If a local {op-system-first} image is not already present, the playbook downloads one from the URL you specified for `image_url` in `inventory.yml`. It extracts the image and uploads it to {rh-virtualization} to create templates. - -The playbook creates a template based on the `control_plane` and `compute` profiles in the `inventory.yml` file. If these profiles have different names, it creates two templates. - -When the playbook finishes, the virtual machines it creates are stopped. You can get information from them to help configure other infrastructure elements. For example, you can get the virtual machines' MAC addresses to configure DHCP to assign permanent IP addresses to the virtual machines. - -.Procedure - - -. In `inventory.yml`, under the `control_plane` and `compute` variables, change both instances of `type: high_performance` to `type: server`. - -. Optional: If you plan to perform multiple installations to the same cluster, create different templates for each {product-title} installation. In the `inventory.yml` file, prepend the value of `template` with `infraID`. For example: -+ -[source,yaml] ----- - control_plane: - cluster: "{{ ovirt_cluster }}" - memory: 16GiB - sockets: 4 - cores: 1 - template: "{{ metadata.infraID }}-rhcos_tpl" - operating_system: "rhcos_x64" - ... ----- - -. Create the templates and virtual machines: -+ -[source,terminal] ----- -$ ansible-playbook -i inventory.yml create-templates-and-vms.yml ----- diff --git a/modules/installation-rhv-creating-worker-nodes-completing-installation.adoc b/modules/installation-rhv-creating-worker-nodes-completing-installation.adoc deleted file mode 100644 index d7977662fb4a..000000000000 --- a/modules/installation-rhv-creating-worker-nodes-completing-installation.adoc +++ /dev/null @@ -1,106 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_rhv/installing-rhv-user-infra.adoc -// * installing/installing-rhv-restricted-network.adoc - -:_content-type: PROCEDURE -[id="installation-rhv-creating-worker-nodes-completing-installation_{context}"] -= Creating the worker nodes and completing the installation - -Creating worker nodes is similar to creating control plane nodes. However, worker nodes workers do not automatically join the cluster. To add them to the cluster, you review and approve the workers' pending CSRs (Certificate Signing Requests). - -After approving the first requests, you continue approving CSR until all of the worker nodes are approved. When you complete this process, the worker nodes become `Ready` and can have pods scheduled to run on them. - -Finally, monitor the command line to see when the installation process completes. - -.Procedure - -. Create the worker nodes: -+ -[source,terminal] ----- -$ ansible-playbook -i inventory.yml workers.yml ----- - - -. To list all of the CSRs, enter: -+ -[source,terminal] ----- -$ oc get csr -A ----- -+ -Eventually, this command displays one CSR per node. For example: -+ -.Example output -[source,terminal] ----- -NAME AGE SIGNERNAME REQUESTOR CONDITION -csr-2lnxd 63m kubernetes.io/kubelet-serving system:node:ocp4-lk6b4-master0.ocp4.example.org Approved,Issued -csr-hff4q 64m kubernetes.io/kube-apiserver-client-kubelet system:serviceaccount:openshift-machine-config-operator:node-bootstrapper Approved,Issued -csr-hsn96 60m kubernetes.io/kubelet-serving system:node:ocp4-lk6b4-master2.ocp4.example.org Approved,Issued -csr-m724n 6m2s kubernetes.io/kube-apiserver-client-kubelet system:serviceaccount:openshift-machine-config-operator:node-bootstrapper Pending -csr-p4dz2 60m kubernetes.io/kube-apiserver-client-kubelet system:serviceaccount:openshift-machine-config-operator:node-bootstrapper Approved,Issued -csr-t9vfj 60m kubernetes.io/kubelet-serving system:node:ocp4-lk6b4-master1.ocp4.example.org Approved,Issued -csr-tggtr 61m kubernetes.io/kube-apiserver-client-kubelet system:serviceaccount:openshift-machine-config-operator:node-bootstrapper Approved,Issued -csr-wcbrf 7m6s kubernetes.io/kube-apiserver-client-kubelet system:serviceaccount:openshift-machine-config-operator:node-bootstrapper Pending ----- - -. To filter the list and see only pending CSRs, enter: -+ -[source,terminal] ----- -$ watch "oc get csr -A | grep pending -i" ----- -+ -This command refreshes the output every two seconds and displays only pending CSRs. For example: -+ -.Example output -[source,terminal] ----- -Every 2.0s: oc get csr -A | grep pending -i - -csr-m724n 10m kubernetes.io/kube-apiserver-client-kubelet system:serviceaccount:openshift-machine-config-operator:node-bootstrapper Pending -csr-wcbrf 11m kubernetes.io/kube-apiserver-client-kubelet system:serviceaccount:openshift-machine-config-operator:node-bootstrapper Pending ----- - -. Inspect each pending request. For example: -+ -.Example output -[source,terminal] ----- -$ oc describe csr csr-m724n ----- -+ -.Example output -[source,terminal] ----- -Name: csr-m724n -Labels: <none> -Annotations: <none> -CreationTimestamp: Sun, 19 Jul 2020 15:59:37 +0200 -Requesting User: system:serviceaccount:openshift-machine-config-operator:node-bootstrapper -Signer: kubernetes.io/kube-apiserver-client-kubelet -Status: Pending -Subject: - Common Name: system:node:ocp4-lk6b4-worker1.ocp4.example.org - Serial Number: - Organization: system:nodes -Events: <none> ----- - -. If the CSR information is correct, approve the request: -+ -[source,terminal] ----- -$ oc adm certificate approve csr-m724n ----- - -. Wait for the installation process to finish: -+ -[source,terminal] ----- -$ openshift-install wait-for install-complete --dir $ASSETS_DIR --log-level debug ----- -+ -When the installation completes, the command line displays the URL of the {product-title} web console and the administrator user name and password. diff --git a/modules/installation-rhv-customizing-install-config-yaml.adoc b/modules/installation-rhv-customizing-install-config-yaml.adoc deleted file mode 100644 index a710a635e780..000000000000 --- a/modules/installation-rhv-customizing-install-config-yaml.adoc +++ /dev/null @@ -1,73 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_rhv/installing-rhv-user-infra.adoc -// * installing/installing-rhv-restricted-network.adoc - -:_content-type: PROCEDURE -[id="installation-rhv-customizing-install-config-yaml_{context}"] -= Customizing install-config.yaml - -Here, you use three Python scripts to override some of the installation program's default behaviors: - -* By default, the installation program uses the machine API to create nodes. To override this default behavior, you set the number of compute nodes to zero replicas. Later, you use Ansible playbooks to create the compute nodes. - -* By default, the installation program sets the IP range of the machine network for nodes. To override this default behavior, you set the IP range to match your infrastructure. - -* By default, the installation program sets the platform to `ovirt`. However, installing a cluster on user-provisioned infrastructure is more similar to installing a cluster on bare metal. Therefore, you delete the ovirt platform section from `install-config.yaml` and change the platform to `none`. Instead, you use `inventory.yml` to specify all of the required settings. - -[NOTE] -==== -These snippets work with Python 3 and Python 2. -==== - -// TBD - https://issues.redhat.com/browse/OCPRHV-414 -// Please discuss with engineering whether these three scripts can/should be combined into a single script. -// Also consider combining this topic with other customization topics. - -.Procedure -//TBD - Should we combine these into one script? - -. Set the number of compute nodes to zero replicas: -+ -[source,python] ----- -$ python3 -c 'import os, yaml -path = "%s/install-config.yaml" % os.environ["ASSETS_DIR"] -conf = yaml.safe_load(open(path)) -conf["compute"][0]["replicas"] = 0 -open(path, "w").write(yaml.dump(conf, default_flow_style=False))' ----- - - -. Set the IP range of the machine network. For example, to set the range to `172.16.0.0/16`, enter: -+ -[source,python] ----- -$ python3 -c 'import os, yaml -path = "%s/install-config.yaml" % os.environ["ASSETS_DIR"] -conf = yaml.safe_load(open(path)) -conf["networking"]["machineNetwork"][0]["cidr"] = "172.16.0.0/16" -open(path, "w").write(yaml.dump(conf, default_flow_style=False))' ----- - - -. Remove the `ovirt` section and change the platform to `none`: -+ -[source,python] ----- -$ python3 -c 'import os, yaml -path = "%s/install-config.yaml" % os.environ["ASSETS_DIR"] -conf = yaml.safe_load(open(path)) -platform = conf["platform"] -del platform["ovirt"] -platform["none"] = {} -open(path, "w").write(yaml.dump(conf, default_flow_style=False))' ----- -+ -[WARNING] -==== -Red Hat Virtualization does not currently support installation with user-provisioned infrastructure on the oVirt platform. Therefore, you must set the platform to `none`, allowing {product-title} to identify each node as a bare-metal node and the cluster as a bare-metal cluster. This is the same as xref:../../installing/installing_platform_agnostic/installing-platform-agnostic.adoc#installing-platform-agnostic[installing a cluster on any platform], and has the following limitations: - -. There will be no cluster provider so you must manually add each machine and there will be no node scaling capabilities. -. The oVirt CSI driver will not be installed and there will be no CSI capabilities. -==== diff --git a/modules/installation-rhv-destroying-cluster.adoc b/modules/installation-rhv-destroying-cluster.adoc deleted file mode 100644 index 4efef6a3e2b6..000000000000 --- a/modules/installation-rhv-destroying-cluster.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_rhv/installing-rhv-user-infra.adoc - -[id="installation-rhv-destroying-cluster_{context}"] -= Destroying the cluster - -When you are finished using the cluster, you can destroy it and remove related configurations from your infrastructure. - -.Prerequisites -* You preserved the original files you used to create the cluster. - -.Procedure - -. Optional: To remove the cluster, enter: -+ -[source,terminal] ----- -$ ansible-playbook -i inventory.yml \ - retire-bootstrap.yml \ - retire-masters.yml \ - retire-workers.yml ----- - -. Remove any previous configurations you added to DNS, load balancers, and any other infrastructure. diff --git a/modules/installation-rhv-downloading-ansible-playbooks.adoc b/modules/installation-rhv-downloading-ansible-playbooks.adoc deleted file mode 100644 index 9356e5201624..000000000000 --- a/modules/installation-rhv-downloading-ansible-playbooks.adoc +++ /dev/null @@ -1,43 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_rhv/installing-rhv-user-infra.adoc - -:_content-type: PROCEDURE -[id="installation-rhv-downloading-ansible-playbooks_{context}"] -= Downloading the Ansible playbooks - -Download the Ansible playbooks for installing {product-title} version {product-version} on {rh-virtualization}. - -.Procedure - -* On your installation machine, run the following commands: -+ -[source,terminal,subs=attributes+] ----- -$ mkdir playbooks ----- -+ -[source,terminal,subs=attributes+] ----- -$ cd playbooks ----- -+ -[source,terminal,subs=attributes+] ----- -$ xargs -n 1 curl -O <<< ' - https://raw.githubusercontent.com/openshift/installer/release-{product-version}/upi/ovirt/bootstrap.yml - https://raw.githubusercontent.com/openshift/installer/release-{product-version}/upi/ovirt/common-auth.yml - https://raw.githubusercontent.com/openshift/installer/release-{product-version}/upi/ovirt/create-templates-and-vms.yml - https://raw.githubusercontent.com/openshift/installer/release-{product-version}/upi/ovirt/inventory.yml - https://raw.githubusercontent.com/openshift/installer/release-{product-version}/upi/ovirt/masters.yml - https://raw.githubusercontent.com/openshift/installer/release-{product-version}/upi/ovirt/retire-bootstrap.yml - https://raw.githubusercontent.com/openshift/installer/release-{product-version}/upi/ovirt/retire-masters.yml - https://raw.githubusercontent.com/openshift/installer/release-{product-version}/upi/ovirt/retire-workers.yml - https://raw.githubusercontent.com/openshift/installer/release-{product-version}/upi/ovirt/workers.yml' - ----- - - -.Next steps - -* After you download these Ansible playbooks, you must also create the environment variable for the assets directory and customize the `inventory.yml` file before you create an installation configuration file by running the installation program. diff --git a/modules/installation-rhv-editing-manifests.adoc b/modules/installation-rhv-editing-manifests.adoc deleted file mode 100644 index 35c5a1ecaa91..000000000000 --- a/modules/installation-rhv-editing-manifests.adoc +++ /dev/null @@ -1,89 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_rhv/installing-rhv-user-infra.adoc -// * installing/installing-rhv-restricted-network.adoc - -:_content-type: PROCEDURE -[id="installation-rhv-editing-mantifests_{context}"] -= Generate manifest files - -Use the installation program to generate a set of manifest files in the assets directory. - -The command to generate the manifest files displays a warning message before it consumes the `install-config.yaml` file. - -If you plan to reuse the `install-config.yaml` file, create a backup copy of it before you back it up before you generate the manifest files. - -// TBD There isn't a clear reason to generate the manifest files. Is this step necessary? It seem like normally the user only does this if they need to edit the files to customize something. Unfortunately, the lead developer on this project has left the organization. Looking at similar commands/topics in the openshift-docs, it seems like this step is only taken when the user needs to perform a specific customization. - - -.Procedure - -. Optional: Create a backup copy of the `install-config.yaml` file: -+ -[source,terminal] ----- -$ cp install-config.yaml install-config.yaml.backup ----- - -. Generate a set of manifests in your assets directory: -+ -[source,terminal] ----- -$ openshift-install create manifests --dir $ASSETS_DIR ----- -+ -This command displays the following messages. -+ -.Example output -[source,terminal] ----- -INFO Consuming Install Config from target directory -WARNING Making control-plane schedulable by setting MastersSchedulable to true for Scheduler cluster settings ----- -+ -The command generates the following manifest files: -+ -.Example output -[source,terminal] ----- -$ tree -. -└── wrk - ├── manifests - │ ├── 04-openshift-machine-config-operator.yaml - │ ├── cluster-config.yaml - │ ├── cluster-dns-02-config.yml - │ ├── cluster-infrastructure-02-config.yml - │ ├── cluster-ingress-02-config.yml - │ ├── cluster-network-01-crd.yml - │ ├── cluster-network-02-config.yml - │ ├── cluster-proxy-01-config.yaml - │ ├── cluster-scheduler-02-config.yml - │ ├── cvo-overrides.yaml - │ ├── etcd-ca-bundle-configmap.yaml - │ ├── etcd-client-secret.yaml - │ ├── etcd-host-service-endpoints.yaml - │ ├── etcd-host-service.yaml - │ ├── etcd-metric-client-secret.yaml - │ ├── etcd-metric-serving-ca-configmap.yaml - │ ├── etcd-metric-signer-secret.yaml - │ ├── etcd-namespace.yaml - │ ├── etcd-service.yaml - │ ├── etcd-serving-ca-configmap.yaml - │ ├── etcd-signer-secret.yaml - │ ├── kube-cloud-config.yaml - │ ├── kube-system-configmap-root-ca.yaml - │ ├── machine-config-server-tls-secret.yaml - │ └── openshift-config-secret-pull-secret.yaml - └── openshift - ├── 99_kubeadmin-password-secret.yaml - ├── 99_openshift-cluster-api_master-user-data-secret.yaml - ├── 99_openshift-cluster-api_worker-user-data-secret.yaml - ├── 99_openshift-machineconfig_99-master-ssh.yaml - ├── 99_openshift-machineconfig_99-worker-ssh.yaml - └── openshift-install-manifests.yaml ----- - -.Next steps - -* Make control plane nodes non-schedulable. diff --git a/modules/installation-rhv-making-control-plane-nodes-non-schedulable.adoc b/modules/installation-rhv-making-control-plane-nodes-non-schedulable.adoc deleted file mode 100644 index c0caa206ce09..000000000000 --- a/modules/installation-rhv-making-control-plane-nodes-non-schedulable.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_rhv/installing-rhv-user-infra.adoc -// * installing/installing-rhv-restricted-network.adoc - -:_content-type: PROCEDURE -[id="installation-rhv-making-control-plane-nodes-non-schedulable_{context}"] -= Making control-plane nodes non-schedulable - -// TBD - https://issues.redhat.com/browse/OCPRHV-414 -// Here's my version of the intro text from https://github.com/openshift/installer/blob/master/docs/user/ovirt/install_upi.md#set-control-plane-nodes-unschedulable . This information is confusing. Please discuss with engineering and provide a good concise explanation of why the user is doing this. - -// "Earlier, when you set the compute `replicas` to zero, it also made control-plane nodes schedulable, which is something you do not want at this stage in the process."" -// -// "NOTE: Router pods can run also on control-plane nodes but there are some Kubernetes limitations that prevent the ingress load balancer from reaching those pods."" - -Because you are manually creating and deploying the control plane machines, you must configure a manifest file to make the control plane nodes non-schedulable. - -.Procedure - -. To make the control plane nodes non-schedulable, enter: -+ -[source,terminal] ----- -$ python3 -c 'import os, yaml -path = "%s/manifests/cluster-scheduler-02-config.yml" % os.environ["ASSETS_DIR"] -data = yaml.safe_load(open(path)) -data["spec"]["mastersSchedulable"] = False -open(path, "w").write(yaml.dump(data, default_flow_style=False))' ----- diff --git a/modules/installation-rhv-removing-bootstrap-machine.adoc b/modules/installation-rhv-removing-bootstrap-machine.adoc deleted file mode 100644 index d43bc16e7cc4..000000000000 --- a/modules/installation-rhv-removing-bootstrap-machine.adoc +++ /dev/null @@ -1,22 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_rhv/installing-rhv-user-infra.adoc -// * installing/installing-rhv-restricted-network.adoc - -:_content-type: PROCEDURE -[id="installation-rhv-removing-bootstrap-machine_{context}"] -= Removing the bootstrap machine - -After the `wait-for` command shows that the bootstrap process is complete, you must remove the bootstrap virtual machine to free up compute, memory, and storage resources. Also, remove settings for the bootstrap machine from the load balancer directives. - -.Procedure - - -. To remove the bootstrap machine from the cluster, enter: -+ -[source,terminal] ----- -$ ansible-playbook -i inventory.yml retire-bootstrap.yml ----- - -. Remove settings for the bootstrap machine from the load balancer directives. diff --git a/modules/installation-rhv-removing-cluster-upi.adoc b/modules/installation-rhv-removing-cluster-upi.adoc deleted file mode 100644 index 34c5a70da878..000000000000 --- a/modules/installation-rhv-removing-cluster-upi.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_rhv/installing-rhv-user-infra.adoc - -:_content-type: PROCEDURE -[id="installation-rhv-removing-cluster-upi_{context}"] -= Removing a cluster that uses user-provisioned infrastructure - -When you are finished using the cluster, you can remove a cluster that uses user-provisioned infrastructure from your cloud. - -.Prerequisites - -* Have the original playbook files, assets directory and files, and `$ASSETS_DIR` environment variable that you used to you install the cluster. Typically, you can achieve this by using the same computer you used when you installed the cluster. - -.Procedure - -. To remove the cluster, enter: -+ -[source,terminal] ----- -$ ansible-playbook -i inventory.yml \ - retire-bootstrap.yml \ - retire-masters.yml \ - retire-workers.yml ----- - -. Remove any configurations you added to DNS, load balancers, and any other infrastructure for this cluster. diff --git a/modules/installation-rhv-specifying-rhcos-image-settings.adoc b/modules/installation-rhv-specifying-rhcos-image-settings.adoc deleted file mode 100644 index 60eacb9486fd..000000000000 --- a/modules/installation-rhv-specifying-rhcos-image-settings.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_rhv/installing-rhv-user-infra.adoc -// * installing/installing-rhv-restricted-network.adoc - -:_content-type: PROCEDURE -[id="installation-rhv-specifying-rhcos-image-settings_{context}"] -= Specifying the {op-system} image settings - -Update the {op-system-first} image settings of the `inventory.yml` file. Later, when you run this file one of the playbooks, it downloads a compressed {op-system-first} image from the `image_url` URL to the `local_cmp_image_path` directory. The playbook then uncompresses the image to the `local_image_path` directory and uses it to create oVirt/{rh-virtualization} templates. - -// TBD - https://issues.redhat.com/browse/OCPRHV-414 -// Consider combining this topic with another one after we've resolved the issue of getting the files. - -.Procedure - -ifndef::openshift-origin[] -. Locate the {op-system} image download page for the version of {product-title} you are installing, such as link:https://mirror.openshift.com/pub/openshift-v4/dependencies/rhcos/4.13/latest/[Index of /pub/openshift-v4/dependencies/rhcos/latest/latest]. - -. From that download page, copy the URL of an OpenStack `qcow2` image, such as `\https://mirror.openshift.com/pub/openshift-v4/dependencies/rhcos/4.13/latest/rhcos-openstack.x86_64.qcow2.gz`. - -. Edit the `inventory.yml` playbook you downloaded earlier. In it, paste the URL as the value for `image_url`. For example: -+ -[source,yaml] ----- -rhcos: - "https://mirror.openshift.com/pub/openshift-v4/dependencies/rhcos/4.13/latest/rhcos-openstack.x86_64.qcow2.gz" ----- -endif::openshift-origin[] -ifdef::openshift-origin[] -. Locate the {op-system} image download page, such as link:https://getfedora.org/coreos/download?tab=cloud_operators&stream=stable[Download Fedora CoreOS]. - -. From that download page, copy the URL of an OpenStack `qcow2` image, such as `\https://builds.coreos.fedoraproject.org/prod/streams/stable/builds/34.20210611.3.0/x86_64/fedora-coreos-34.20210611.3.0-openstack.x86_64.qcow2.xz`. - -. Edit the `inventory.yml` playbook you downloaded earlier. In it, replace the `rhcos` stanza and paste the URL as the value for `image_url`. For example: -+ -[source,yaml] ----- -rhcos: - image_url: "https://builds.coreos.fedoraproject.org/prod/streams/stable/builds/34.20210611.3.0/x86_64/fedora-coreos-34.20210611.3.0-openstack.x86_64.qcow2.xz" ----- -endif::openshift-origin[] diff --git a/modules/installation-special-config-butane-about.adoc b/modules/installation-special-config-butane-about.adoc deleted file mode 100644 index a5dedc13fd13..000000000000 --- a/modules/installation-special-config-butane-about.adoc +++ /dev/null @@ -1,10 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/install_config/installing-customizing.adoc - -:_content-type: CONCEPT -[id="installation-special-config-butane-about_{context}"] -= About Butane - -Butane is a command-line utility that {product-title} uses to provide convenient, short-hand syntax for writing machine configs, as well as for performing additional validation of machine configs. The format of the Butane config file that Butane accepts is defined in the -https://coreos.github.io/butane/specs/[OpenShift Butane config spec]. diff --git a/modules/installation-special-config-butane-create.adoc b/modules/installation-special-config-butane-create.adoc deleted file mode 100644 index c282326e5e5d..000000000000 --- a/modules/installation-special-config-butane-create.adoc +++ /dev/null @@ -1,63 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/install_config/installing-customizing.adoc - -:_content-type: PROCEDURE -[id="installation-special-config-butane-create_{context}"] -= Creating a MachineConfig object by using Butane - -You can use Butane to produce a `MachineConfig` object so that you can configure worker or control plane nodes at installation time or via the Machine Config Operator. - -.Prerequisites - -* You have installed the `butane` utility. - -.Procedure - -. Create a Butane config file. The following example creates a file named `99-worker-custom.bu` that configures the system console to show kernel debug messages and specifies custom settings for the chrony time service: -+ -[source,yaml] ----- -variant: openshift -version: 4.13.0 -metadata: - name: 99-worker-custom - labels: - machineconfiguration.openshift.io/role: worker -openshift: - kernel_arguments: - - loglevel=7 -storage: - files: - - path: /etc/chrony.conf - mode: 0644 - overwrite: true - contents: - inline: | - pool 0.rhel.pool.ntp.org iburst - driftfile /var/lib/chrony/drift - makestep 1.0 3 - rtcsync - logdir /var/log/chrony ----- -+ -[NOTE] -==== -The `99-worker-custom.bu` file is set to create a machine config for worker nodes. To deploy on control plane nodes, change the role from `worker` to `master`. To do both, you could repeat the whole procedure using different file names for the two types of deployments. -==== - -. Create a `MachineConfig` object by giving Butane the file that you created in the previous step: -+ -[source,terminal] ----- -$ butane 99-worker-custom.bu -o ./99-worker-custom.yaml ----- -+ -A `MachineConfig` object YAML file is created for you to finish configuring your machines. -. Save the Butane config in case you need to update the `MachineConfig` object in the future. -. If the cluster is not running yet, generate manifest files and add the `MachineConfig` object YAML file to the `openshift` directory. If the cluster is already running, apply the file as follows: -+ -[source,terminal] ----- -$ oc create -f 99-worker-custom.yaml ----- diff --git a/modules/installation-special-config-butane-install.adoc b/modules/installation-special-config-butane-install.adoc deleted file mode 100644 index 19473dc10be8..000000000000 --- a/modules/installation-special-config-butane-install.adoc +++ /dev/null @@ -1,57 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/install_config/installing-customizing.adoc - -:_content-type: PROCEDURE -[id="installation-special-config-butane-install_{context}"] -= Installing Butane - -You can install the Butane tool (`butane`) to create {product-title} machine configs from a command-line interface. You can install `butane` on Linux, Windows, or macOS by downloading the corresponding binary file. - -[TIP] -==== -Butane releases are backwards-compatible with older releases and with the Fedora CoreOS Config Transpiler (FCCT). -==== - -.Procedure - -. Navigate to the Butane image download page at https://mirror.openshift.com/pub/openshift-v4/clients/butane/. -. Get the `butane` binary: -.. For the newest version of Butane, save the latest `butane` image to your current directory: -+ -[source,terminal] ----- -$ curl https://mirror.openshift.com/pub/openshift-v4/clients/butane/latest/butane --output butane ----- -+ -.. Optional: For a specific type of architecture you are installing Butane on, such as aarch64 or ppc64le, indicate the appropriate URL. For example: -+ -[source,terminal] ----- -$ curl https://mirror.openshift.com/pub/openshift-v4/clients/butane/latest/butane-aarch64 --output butane ----- -+ -. Make the downloaded binary file executable: -+ -[source,terminal] ----- -$ chmod +x butane ----- -+ -. Move the `butane` binary file to a directory on your `PATH`. -+ -To check your `PATH`, open a terminal and execute the following command: -+ -[source,terminal] ----- -$ echo $PATH ----- - -.Verification steps - -* You can now use the Butane tool by running the `butane` command: -+ -[source,terminal] ----- -$ butane <butane_file> ----- diff --git a/modules/installation-special-config-butane.adoc b/modules/installation-special-config-butane.adoc deleted file mode 100644 index 548b0b400310..000000000000 --- a/modules/installation-special-config-butane.adoc +++ /dev/null @@ -1,10 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/install_config/installing-customizing.adoc - -[id="installation-special-config-butane_{context}"] -= Creating machine configs with Butane - -Machine configs are used to configure control plane and worker machines by instructing machines how to create users and file systems, set up the network, install systemd units, and more. - -Because modifying machine configs can be difficult, you can use Butane configs to create machine configs for you, thereby making node configuration much easier. diff --git a/modules/installation-special-config-chrony.adoc b/modules/installation-special-config-chrony.adoc deleted file mode 100644 index 058b36b02bda..000000000000 --- a/modules/installation-special-config-chrony.adoc +++ /dev/null @@ -1,87 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/install_config/installing-customizing.adoc -// * installing/installing_aws/installing-restricted-networks-aws.adoc -// * installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp.adoc -// * installing/installing_vsphere/installing-restricted-networks-vsphere.adoc -// * post_installation_configuration/machine-configuration-tasks.adoc - - -ifeval::["{context}" == "installing-restricted-networks-bare-metal"] -:restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-vsphere"] -:restricted: -endif::[] - -:_content-type: PROCEDURE -[id="installation-special-config-chrony_{context}"] -= Configuring chrony time service - -You -ifdef::restricted[must] -ifndef::restricted[can] -set the time server and related settings used by the chrony time service (`chronyd`) -by modifying the contents of the `chrony.conf` file and passing those contents -to your nodes as a machine config. - -.Procedure - -. Create a Butane config including the contents of the `chrony.conf` file. For example, to configure chrony on worker nodes, create a `99-worker-chrony.bu` file. -+ -[NOTE] -==== -See "Creating machine configs with Butane" for information about Butane. -==== -+ -[source,yaml] ----- -variant: openshift -version: 4.13.0 -metadata: - name: 99-worker-chrony <1> - labels: - machineconfiguration.openshift.io/role: worker <1> -storage: - files: - - path: /etc/chrony.conf - mode: 0644 <2> - overwrite: true - contents: - inline: | - pool 0.rhel.pool.ntp.org iburst <3> - driftfile /var/lib/chrony/drift - makestep 1.0 3 - rtcsync - logdir /var/log/chrony ----- -<1> On control plane nodes, substitute `master` for `worker` in both of these locations. -<2> Specify an octal value mode for the `mode` field in the machine config file. After creating the file and applying the changes, the `mode` is converted to a decimal value. You can check the YAML file with the command `oc get mc <mc-name> -o yaml`. -<3> Specify any valid, reachable time source, such as the one provided by your DHCP server. -ifndef::restricted[Alternately, you can specify any of the following NTP servers: `1.rhel.pool.ntp.org`, `2.rhel.pool.ntp.org`, or `3.rhel.pool.ntp.org`.] - -. Use Butane to generate a `MachineConfig` object file, `99-worker-chrony.yaml`, containing the configuration to be delivered to the nodes: -+ -[source,terminal] ----- -$ butane 99-worker-chrony.bu -o 99-worker-chrony.yaml ----- - -. Apply the configurations in one of two ways: -+ -* If the cluster is not running yet, after you generate manifest files, add the `MachineConfig` object file to the `<installation_directory>/openshift` directory, and then continue to create the cluster. -+ -* If the cluster is already running, apply the file: -+ -[source,terminal] ----- -$ oc apply -f ./99-worker-chrony.yaml ----- - -ifeval::["{context}" == "installing-restricted-networks-bare-metal"] -:!restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-vsphere"] -:!restricted: -endif::[] diff --git a/modules/installation-special-config-kargs.adoc b/modules/installation-special-config-kargs.adoc deleted file mode 100644 index d4ab4fe5250b..000000000000 --- a/modules/installation-special-config-kargs.adoc +++ /dev/null @@ -1,67 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing-special-config.adoc - -:_content-type: PROCEDURE -[id="installation-special-config-kargs_{context}"] - -= Adding day-1 kernel arguments -Although it is often preferable to modify kernel arguments as a day-2 activity, -you might want to add kernel arguments to all master or worker nodes during initial cluster -installation. Here are some reasons you might want -to add kernel arguments during cluster installation so they take effect before -the systems first boot up: - -* You want to disable a feature, such as SELinux, so it has no impact on the systems when they first come up. - -[WARNING] -==== -Disabling SELinux on {op-system} is not supported. -==== - -* You need to do some low-level network configuration before the systems start. - -To add kernel arguments to master or worker nodes, you can create a `MachineConfig` object -and inject that object into the set of manifest files used by Ignition during -cluster setup. - -For a listing of arguments you can pass to a RHEL 8 kernel at boot time, see -link:https://www.kernel.org/doc/Documentation/admin-guide/kernel-parameters.txt[Kernel.org kernel parameters]. -It is best to only add kernel arguments with this procedure if they are needed to complete the initial -{product-title} installation. - -.Procedure - -. Change to the directory that contains the installation program and generate the Kubernetes manifests for the cluster: -+ -[source,terminal] ----- -$ ./openshift-install create manifests --dir <installation_directory> ----- - -. Decide if you want to add kernel arguments to worker or control plane nodes. - -. In the `openshift` directory, create a file (for example, -`99-openshift-machineconfig-master-kargs.yaml`) to define a `MachineConfig` -object to add the kernel settings. -This example adds a `loglevel=7` kernel argument to control plane nodes: -+ -[source,terminal] ----- -$ cat << EOF > 99-openshift-machineconfig-master-kargs.yaml -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfig -metadata: - labels: - machineconfiguration.openshift.io/role: master - name: 99-openshift-machineconfig-master-kargs -spec: - kernelArguments: - - loglevel=7 -EOF ----- -+ -You can change `master` to `worker` to add kernel arguments to worker nodes instead. -Create a separate YAML file to add to both master and worker nodes. - -You can now continue on to create the cluster. diff --git a/modules/installation-special-config-kmod.adoc b/modules/installation-special-config-kmod.adoc deleted file mode 100644 index 375c7dcb4ccf..000000000000 --- a/modules/installation-special-config-kmod.adoc +++ /dev/null @@ -1,439 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing-special-config.adoc - -:_content-type: PROCEDURE -[id="installation-special-config-kmod_{context}"] -= Adding kernel modules to nodes - -For most common hardware, the Linux kernel includes the device driver -modules needed to use that hardware when the computer starts up. For -some hardware, however, modules are not available in Linux. Therefore, you must -find a way to provide those modules to each host computer. This -procedure describes how to do that for nodes in an {product-title} cluster. - -When a kernel module is first deployed by following these instructions, -the module is made available for the current kernel. If a new kernel -is installed, the kmods-via-containers software will rebuild and deploy -the module so a compatible version of that module is available with the -new kernel. - -The way that this feature is able to keep the module up to date on each -node is by: - -* Adding a systemd service to each node that starts at boot time to detect -if a new kernel has been installed and -* If a new kernel is detected, the -service rebuilds the module and installs it to the kernel - -For information on the software needed for this procedure, see the -link:https://github.com/kmods-via-containers/kmods-via-containers[kmods-via-containers] github site. - -A few important issues to keep in mind: - -* This procedure is Technology Preview. -* Software tools and examples are not yet available in official RPM form -and can only be obtained for now from unofficial `github.com` sites noted in the procedure. -* Third-party kernel modules you might add through these procedures are not supported by Red Hat. -* In this procedure, the software needed to build your kernel modules is -deployed in a RHEL 8 container. Keep in mind that modules are rebuilt -automatically on each node when that node gets a new kernel. For that -reason, each node needs access to a `yum` repository that contains the -kernel and related packages needed to rebuild the module. That content -is best provided with a valid RHEL subscription. - -[id="building-testing-kernel-module-container_{context}"] -== Building and testing the kernel module container - -Before deploying kernel modules to your {product-title} cluster, -you can test the process on a separate RHEL system. -Gather the kernel module's source code, the KVC framework, and the -kmod-via-containers software. Then build and test the module. To do -that on a RHEL 8 system, do the following: - -.Procedure - -. Register a RHEL 8 system: -+ -[source,terminal] ----- -# subscription-manager register ----- - -. Attach a subscription to the RHEL 8 system: -+ -[source,terminal] ----- -# subscription-manager attach --auto ----- - -. Install software that is required to build the software and container: -+ -[source,terminal] ----- -# yum install podman make git -y ----- - -. Clone the `kmod-via-containers` repository: -.. Create a folder for the repository: -+ -[source,terminal] ----- -$ mkdir kmods; cd kmods ----- - -.. Clone the repository: -+ -[source,terminal] ----- -$ git clone https://github.com/kmods-via-containers/kmods-via-containers ----- - -. Install a KVC framework instance on your RHEL 8 build host to test the module. -This adds a `kmods-via-container` systemd service and loads it: - -.. Change to the `kmod-via-containers` directory: -+ -[source,terminal] ----- -$ cd kmods-via-containers/ ----- - -.. Install the KVC framework instance: -+ -[source,terminal] ----- -$ sudo make install ----- - -.. Reload the systemd manager configuration: -+ -[source,terminal] ----- -$ sudo systemctl daemon-reload ----- - -. Get the kernel module source code. The source code might be used to -build a third-party module that you do not -have control over, but is supplied by others. You will need content -similar to the content shown in the `kvc-simple-kmod` example that can -be cloned to your system as follows: -+ -[source,terminal] ----- -$ cd .. ; git clone https://github.com/kmods-via-containers/kvc-simple-kmod ----- - -. Edit the configuration file, `simple-kmod.conf` file, in this example, and -change the name of the Dockerfile to `Dockerfile.rhel`: - -.. Change to the `kvc-simple-kmod` directory: -+ -[source,terminal] ----- -$ cd kvc-simple-kmod ----- - -.. Rename the Dockerfile: -+ -[source,terminal] ----- -$ cat simple-kmod.conf ----- -+ -.Example Dockerfile -[source,terminal] ----- -KMOD_CONTAINER_BUILD_CONTEXT="https://github.com/kmods-via-containers/kvc-simple-kmod.git" -KMOD_CONTAINER_BUILD_FILE=Dockerfile.rhel -KMOD_SOFTWARE_VERSION=dd1a7d4 -KMOD_NAMES="simple-kmod simple-procfs-kmod" ----- - -. Create an instance of `kmods-via-containers@.service` for your kernel module, -`simple-kmod` in this example: -+ -[source,terminal] ----- -$ sudo make install ----- - -. Enable the `kmods-via-containers@.service` instance: -+ -[source,terminal] ----- -$ sudo kmods-via-containers build simple-kmod $(uname -r) ----- - -. Enable and start the systemd service: -+ -[source,terminal] ----- -$ sudo systemctl enable kmods-via-containers@simple-kmod.service --now ----- - -.. Review the service status: -+ -[source,terminal] ----- -$ sudo systemctl status kmods-via-containers@simple-kmod.service ----- -+ -.Example output -[source,terminal] ----- -● kmods-via-containers@simple-kmod.service - Kmods Via Containers - simple-kmod - Loaded: loaded (/etc/systemd/system/kmods-via-containers@.service; - enabled; vendor preset: disabled) - Active: active (exited) since Sun 2020-01-12 23:49:49 EST; 5s ago... ----- - -. To confirm that the kernel modules are loaded, use the `lsmod` command to list the modules: -+ -[source,terminal] ----- -$ lsmod | grep simple_ ----- -+ -.Example output -[source,terminal] ----- -simple_procfs_kmod 16384 0 -simple_kmod 16384 0 ----- - -. Optional. Use other methods to check that the `simple-kmod` example is working: -** Look for a "Hello world" message in the kernel ring buffer with `dmesg`: -+ -[source,terminal] ----- -$ dmesg | grep 'Hello world' ----- -+ -.Example output -[source,terminal] ----- -[ 6420.761332] Hello world from simple_kmod. ----- - -** Check the value of `simple-procfs-kmod` in `/proc`: -+ -[source,terminal] ----- -$ sudo cat /proc/simple-procfs-kmod ----- -+ -.Example output -[source,terminal] ----- -simple-procfs-kmod number = 0 ----- - -** Run the `spkut` command to get more information from the module: -+ -[source,terminal] ----- -$ sudo spkut 44 ----- -+ -.Example output -[source,terminal] ----- -KVC: wrapper simple-kmod for 4.18.0-147.3.1.el8_1.x86_64 -Running userspace wrapper using the kernel module container... -+ podman run -i --rm --privileged - simple-kmod-dd1a7d4:4.18.0-147.3.1.el8_1.x86_64 spkut 44 -simple-procfs-kmod number = 0 -simple-procfs-kmod number = 44 ----- - -Going forward, when the system boots this service will check if a new -kernel is running. If there is a new kernel, the service builds a new -version of the kernel module and then loads it. If the module is already -built, it will just load it. - -[id="provisioning-kernel-module-to-ocp_{context}"] -== Provisioning a kernel module to {product-title} - -Depending on whether or not you must have the kernel module in place -when {product-title} cluster first boots, you can set up the -kernel modules to be deployed in one of two ways: - -* **Provision kernel modules at cluster install time (day-1)**: -You can create the content as a `MachineConfig` object and provide it to `openshift-install` -by including it with a set of manifest files. - -* **Provision kernel modules via Machine Config Operator (day-2)**: If you can wait until the -cluster is up and running to add your kernel module, you can deploy the kernel -module software via the Machine Config Operator (MCO). - -In either case, each node needs to be able to get the kernel packages and related -software packages at the time that a new kernel is detected. There are a few ways -you can set up each node to be able to obtain that content. - -* Provide RHEL entitlements to each node. -* Get RHEL entitlements from an existing RHEL host, from the `/etc/pki/entitlement` directory -and copy them to the same location as the other files you provide -when you build your Ignition config. -* Inside the Dockerfile, add pointers to a `yum` repository containing the kernel and other packages. -This must include new kernel packages as they are needed to match newly installed kernels. - -[id="provision-kernel-modules-via-machineconfig_{context}"] -=== Provision kernel modules via a MachineConfig object - -By packaging kernel module software with a `MachineConfig` object, you can -deliver that software to worker or control plane nodes at installation time -or via the Machine Config Operator. - -.Procedure - -. Register a RHEL 8 system: -+ -[source,terminal] ----- -# subscription-manager register ----- - -. Attach a subscription to the RHEL 8 system: -+ -[source,terminal] ----- -# subscription-manager attach --auto ----- - -. Install software needed to build the software: -+ -[source,terminal] ----- -# yum install podman make git -y ----- - -. Create a directory to host the kernel module and tooling: -+ -[source,terminal] ----- -$ mkdir kmods; cd kmods ----- - -. Get the `kmods-via-containers` software: - -.. Clone the `kmods-via-containers` repository: -+ -[source,terminal] ----- -$ git clone https://github.com/kmods-via-containers/kmods-via-containers ----- - -.. Clone the `kvc-simple-kmod` repository: -+ -[source,terminal] ----- -$ git clone https://github.com/kmods-via-containers/kvc-simple-kmod ----- - -. Get your module software. In this example, `kvc-simple-kmod` is used. - -. Create a fakeroot directory and populate it with files that you want to -deliver via Ignition, using the repositories cloned earlier: - -.. Create the directory: -+ -[source,terminal] ----- -$ FAKEROOT=$(mktemp -d) ----- - -.. Change to the `kmod-via-containers` directory: -+ -[source,terminal] ----- -$ cd kmods-via-containers ----- - -.. Install the KVC framework instance: -+ -[source,terminal] ----- -$ make install DESTDIR=${FAKEROOT}/usr/local CONFDIR=${FAKEROOT}/etc/ ----- - -.. Change to the `kvc-simple-kmod` directory: -+ -[source,terminal] ----- -$ cd ../kvc-simple-kmod ----- - -.. Create the instance: -+ -[source,terminal] ----- -$ make install DESTDIR=${FAKEROOT}/usr/local CONFDIR=${FAKEROOT}/etc/ ----- - -. Clone the fakeroot directory, replacing any symbolic links with copies of their targets, by running the following command: -+ -[source,terminal] ----- -$ cd .. && rm -rf kmod-tree && cp -Lpr ${FAKEROOT} kmod-tree ----- - -. Create a Butane config file, `99-simple-kmod.bu`, that embeds the kernel module tree and enables the systemd service. -+ -[NOTE] -==== -See "Creating machine configs with Butane" for information about Butane. -==== -+ -[source,yaml] ----- -variant: openshift -version: 4.13.0 -metadata: - name: 99-simple-kmod - labels: - machineconfiguration.openshift.io/role: worker <1> -storage: - trees: - - local: kmod-tree -systemd: - units: - - name: kmods-via-containers@simple-kmod.service - enabled: true ----- -+ -<1> To deploy on control plane nodes, change `worker` to `master`. To deploy on both control plane and worker nodes, perform the remainder of these instructions once for each node type. - -. Use Butane to generate a machine config YAML file, `99-simple-kmod.yaml`, containing the files and configuration to be delivered: -+ -[source,terminal] ----- -$ butane 99-simple-kmod.bu --files-dir . -o 99-simple-kmod.yaml ----- - -. If the cluster is not up yet, generate manifest files and add this file to the -`openshift` directory. If the cluster is already running, apply the file as follows: -+ -[source,terminal] ----- -$ oc create -f 99-simple-kmod.yaml ----- -+ -Your nodes will start the `kmods-via-containers@simple-kmod.service` -service and the kernel modules will be loaded. - -. To confirm that the kernel modules are loaded, you can log in to a node -(using `oc debug node/<openshift-node>`, then `chroot /host`). -To list the modules, use the `lsmod` command: -+ -[source,terminal] ----- -$ lsmod | grep simple_ ----- -+ -.Example output -[source,terminal] ----- -simple_procfs_kmod 16384 0 -simple_kmod 16384 0 ----- diff --git a/modules/installation-special-config-raid.adoc b/modules/installation-special-config-raid.adoc deleted file mode 100644 index 6d14dbab8466..000000000000 --- a/modules/installation-special-config-raid.adoc +++ /dev/null @@ -1,112 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/install_config/installing-customizing.adoc - -[id="installation-special-config-raid_{context}"] -== Configuring a RAID-enabled data volume - -You can enable software RAID partitioning to provide an external data volume. {product-title} supports RAID 0, RAID 1, RAID 4, RAID 5, RAID 6, and RAID 10 for data protection and fault tolerance. See "About disk mirroring" for more details. - -.Prerequisites - -* You have downloaded the {product-title} installation program on your installation node. -* You have installed Butane on your installation node. -+ -[NOTE] -==== -Butane is a command-line utility that {product-title} uses to provide convenient, short-hand syntax for writing machine configs, as well as for performing additional validation of machine configs. For more information, see the _Creating machine configs with Butane_ section. -==== - -.Procedure - -. Create a Butane config that configures a data volume by using software RAID. - -* To configure a data volume with RAID 1 on the same disks that are used for a mirrored boot disk, create a `$HOME/clusterconfig/raid1-storage.bu` file, for example: -+ -.RAID 1 on mirrored boot disk -[source,yaml] ----- -variant: openshift -version: 4.13.0 -metadata: - name: raid1-storage - labels: - machineconfiguration.openshift.io/role: worker -boot_device: - mirror: - devices: - - /dev/sda - - /dev/sdb -storage: - disks: - - device: /dev/sda - partitions: - - label: root-1 - size_mib: 25000 <1> - - label: var-1 - - device: /dev/sdb - partitions: - - label: root-2 - size_mib: 25000 <1> - - label: var-2 - raid: - - name: md-var - level: raid1 - devices: - - /dev/disk/by-partlabel/var-1 - - /dev/disk/by-partlabel/var-2 - filesystems: - - device: /dev/md/md-var - path: /var - format: xfs - wipe_filesystem: true - with_mount_unit: true ----- -<1> When adding a data partition to the boot disk, a minimum value of 25000 mebibytes is recommended. If no value is specified, or if the specified value is smaller than the recommended minimum, the resulting root file system will be too small, and future reinstalls of {op-system} might overwrite the beginning of the data partition. - -* To configure a data volume with RAID 1 on secondary disks, create a `$HOME/clusterconfig/raid1-alt-storage.bu` file, for example: -+ -.RAID 1 on secondary disks -[source,yaml] ----- -variant: openshift -version: 4.13.0 -metadata: - name: raid1-alt-storage - labels: - machineconfiguration.openshift.io/role: worker -storage: - disks: - - device: /dev/sdc - wipe_table: true - partitions: - - label: data-1 - - device: /dev/sdd - wipe_table: true - partitions: - - label: data-2 - raid: - - name: md-var-lib-containers - level: raid1 - devices: - - /dev/disk/by-partlabel/data-1 - - /dev/disk/by-partlabel/data-2 - filesystems: - - device: /dev/md/md-var-lib-containers - path: /var/lib/containers - format: xfs - wipe_filesystem: true - with_mount_unit: true ----- - -. Create a RAID manifest from the Butane config you created in the previous step and save it to the `<installation_directory>/openshift` directory. For example, to create a manifest for the compute nodes, run the following command: -+ -[source,terminal] ----- -$ butane $HOME/clusterconfig/<butane_config>.bu -o <installation_directory>/openshift/<manifest_name>.yaml <1> ----- -<1> Replace `<butane_config>` and `<manifest_name>` with the file names from the previous step. For example, `raid1-alt-storage.bu` and `raid1-alt-storage.yaml` for secondary disks. - -. Save the Butane config in case you need to update the manifest in the future. - -. Continue with the remainder of the {product-title} installation. diff --git a/modules/installation-special-config-rtkernel.adoc b/modules/installation-special-config-rtkernel.adoc deleted file mode 100644 index 1d03d79c5d01..000000000000 --- a/modules/installation-special-config-rtkernel.adoc +++ /dev/null @@ -1,128 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/install_config/installing-customizing.adoc - -:_content-type: PROCEDURE -[id="installation-special-config-rtkernel_{context}"] - -= Adding a real-time kernel to nodes (during installation) - -Some {product-title} workloads require a high degree of determinism. -While Linux is not a real-time operating system, the Linux real-time -kernel includes a preemptive scheduler that provides the operating -system with real-time characteristics. - -If your {product-title} workloads require these real-time characteristics, -you can set up your compute (worker) and/or control plane machines to use the -Linux real-time kernel when you first install the cluster. To do this, -create a `MachineConfig` object and inject that object into the set of manifest -files used by Ignition during cluster setup, as described in the following -procedure. - -[NOTE] -==== -This procedure is fully supported with bare metal installations using -systems that are certified for Red Hat Enterprise Linux for Real Time 8. -Real time support in {product-title} is also limited to specific subscriptions. -This procedure is also supported for use with Google Cloud Platform. -==== - -.Prerequisites -* For a bare metal installation of {product-title}, prepare masters and workers. -* Use {product-title} version 4.4 or later. - -.Procedure - -. Create the `intall-config.yaml` file using the installer or prepare it manually. -To create it using installer, run: -+ -[source,terminal] ----- -$ ./openshift-install create install-config --dir <installation_directory> ----- - -. Generate the Kubernetes manifests for the cluster: -+ -[source,terminal] ----- -$ ./openshift-install create manifests --dir <installation_directory> ----- - -. Decide if you want to add the real-time kernel to worker or control plane nodes. - -. In the `openshift` directory, create a file (for example, -`99-worker-realtime.yaml`) to define a `MachineConfig` object that applies a -real-time kernel to the selected nodes (worker nodes in this case): -+ -[source,terminal] ----- -$ cat << EOF > 99-worker-realtime.yaml -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfig -metadata: - labels: - machineconfiguration.openshift.io/role: "worker" - name: 99-worker-realtime -spec: - kernelType: realtime -EOF ----- -+ -You can change `worker` to `master` to add kernel arguments to control plane nodes instead. -Create a separate YAML file to add to both master and worker nodes. - -. Create the cluster. You can now continue on to create the {product-title} cluster. -+ -[source,terminal] ----- -$ ./openshift-install create cluster --dir <installation_directory> ----- - -. Check the real-time kernel: Once the cluster comes up, log in to the cluster -and run the following commands to make sure that the real-time kernel has -replaced the regular kernel for the set of worker or control plane nodes you -configured: -+ -[source,terminal] ----- -$ oc get nodes ----- -+ -.Example output -[source,terminal] ----- -NAME STATUS ROLES AGE VERSION -ip-10-0-139-200.us-east-2.compute.internal Ready master 111m v1.26.0 -ip-10-0-143-147.us-east-2.compute.internal Ready worker 103m v1.26.0 -ip-10-0-146-92.us-east-2.compute.internal Ready worker 101m v1.26.0 -ip-10-0-156-255.us-east-2.compute.internal Ready master 111m v1.26.0 -ip-10-0-164-74.us-east-2.compute.internal Ready master 111m v1.26.0 -ip-10-0-169-2.us-east-2.compute.internal Ready worker 102m v1.26.0 ----- -+ -[source,terminal] ----- -$ oc debug node/ip-10-0-143-147.us-east-2.compute.internal ----- -+ -.Example output -[source,terminal] ----- -Starting pod/ip-10-0-143-147us-east-2computeinternal-debug ... -To use host binaries, run `chroot /host` ----- -+ -[source,terminal] ----- -sh-4.4# uname -a ----- -+ -.Example output -[source,terminal] ----- -Linux <worker_node> 4.18.0-147.3.1.rt24.96.el8_1.x86_64 #1 SMP PREEMPT RT - Wed Nov 27 18:29:55 UTC 2019 x86_64 x86_64 x86_64 GNU/Linux ----- -+ -The kernel name contains `rt` and text `PREEMPT RT` indicates that this is a -real-time kernel. diff --git a/modules/installation-special-config-storage.adoc b/modules/installation-special-config-storage.adoc deleted file mode 100644 index a42e636cbc98..000000000000 --- a/modules/installation-special-config-storage.adoc +++ /dev/null @@ -1,454 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/install_config/installing-customizing.adoc - -:_content-type: PROCEDURE -[id="installation-special-config-storage_{context}"] -= Encrypting and mirroring disks during installation - -During an {product-title} installation, you can enable boot disk encryption and mirroring on the cluster nodes. - -[id="installation-special-config-encrypt-disk_{context}"] -== About disk encryption - -You can enable encryption for the boot disks on the control plane and compute nodes at installation time. -{product-title} supports the Trusted Platform Module (TPM) v2 and Tang encryption modes. - -TPM v2:: This is the preferred mode. -TPM v2 stores passphrases in a secure cryptoprocessor on the server. -You can use this mode to prevent decryption of the boot disk data on a cluster node if the disk is removed from the server. -Tang:: Tang and Clevis are server and client components that enable network-bound disk encryption (NBDE). -You can bind the boot disk data on your cluster nodes to one or more Tang servers. -This prevents decryption of the data unless the nodes are on a secure network where the Tang servers are accessible. -Clevis is an automated decryption framework used to implement decryption on the client side. - -[IMPORTANT] -==== -The use of the Tang encryption mode to encrypt your disks is only supported for bare metal and vSphere installations on user-provisioned infrastructure. -==== - -In earlier versions of {op-system-first}, disk encryption was configured by specifying `/etc/clevis.json` in the Ignition config. -That file is not supported in clusters created with {product-title} 4.7 or later. -Configure disk encryption by using the following procedure. - -When the TPM v2 or Tang encryption modes are enabled, the {op-system} boot disks are encrypted using the LUKS2 format. - -This feature: - -* Is available for installer-provisioned infrastructure, user-provisioned infrastructure, and Assisted Installer deployments - * For Assisted installer deployments: - - Each cluster can only have a single encryption method, Tang or TPM - - Encryption can be enabled on some or all nodes - - There is no Tang threshold; all servers must be valid and operational - - Encryption applies to the installation disks only, not to the workload disks -* Is supported on {op-system-first} systems only -* Sets up disk encryption during the manifest installation phase, encrypting all data written to disk, from first boot forward -* Requires no user intervention for providing passphrases -* Uses AES-256-XTS encryption, or AES-256-CBC if FIPS mode is enabled - -[id="installation-special-config-encryption-threshold_{context}"] -=== Configuring an encryption threshold - -In {product-title}, you can specify a requirement for more than one Tang server. -You can also configure the TPM v2 and Tang encryption modes simultaneously. -This enables boot disk data decryption only if the TPM secure cryptoprocessor is present and the Tang servers are accessible over a secure network. - -You can use the `threshold` attribute in your Butane configuration to define the minimum number of TPM v2 and Tang encryption conditions required for decryption to occur. -The threshold is met when the stated value is reached through any combination of the declared conditions. -For example, the `threshold` value of `2` in the following configuration can be reached by accessing the two Tang servers, or by accessing the TPM secure cryptoprocessor and one of the Tang servers: - -.Example Butane configuration for disk encryption - -[source,yaml] ----- -variant: openshift -version: 4.13.0 -metadata: - name: worker-storage - labels: - machineconfiguration.openshift.io/role: worker -boot_device: - layout: x86_64 <1> - luks: - tpm2: true <2> - tang: <3> - - url: http://tang1.example.com:7500 - thumbprint: jwGN5tRFK-kF6pIX89ssF3khxxX - - url: http://tang2.example.com:7500 - thumbprint: VCJsvZFjBSIHSldw78rOrq7h2ZF - threshold: 2 <4> -openshift: - fips: true ----- -<1> Set this field to the instruction set architecture of the cluster nodes. -Some examples include, `x86_64`, `aarch64`, or `ppc64le`. -<2> Include this field if you want to use a Trusted Platform Module (TPM) to encrypt the root file system. -<3> Include this section if you want to use one or more Tang servers. -<4> Specify the minimum number of TPM v2 and Tang encryption conditions required for decryption to occur. - -[IMPORTANT] -==== -The default `threshold` value is `1`. -If you include multiple encryption conditions in your configuration but do not specify a threshold, decryption can occur if any of the conditions are met. -==== - -[NOTE] -==== -If you require TPM v2 _and_ Tang for decryption, the value of the `threshold` attribute must equal the total number of stated Tang servers plus one. -If the `threshold` value is lower, it is possible to reach the threshold value by using a single encryption mode. -For example, if you set `tpm2` to `true` and specify two Tang servers, a threshold of `2` can be met by accessing the two Tang servers, even if the TPM secure cryptoprocessor is not available. -==== - -[id="installation-special-config-mirrored-disk_{context}"] -== About disk mirroring - -During {product-title} installation on control plane and worker nodes, you can enable mirroring of the boot and other disks to two or more redundant storage devices. -A node continues to function after storage device failure provided one device remains available. - -Mirroring does not support replacement of a failed disk. -Reprovision the node to restore the mirror to a pristine, non-degraded state. - -[NOTE] -==== -For user-provisioned infrastructure deployments, mirroring is available only on {op-system} systems. -Support for mirroring is available on `x86_64` nodes booted with BIOS or UEFI and on `ppc64le` nodes. -==== - -[id="installation-special-config-storage-procedure_{context}"] -== Configuring disk encryption and mirroring - -You can enable and configure encryption and mirroring during an {product-title} installation. - -.Prerequisites - -* You have downloaded the {product-title} installation program on your installation node. -* You installed Butane on your installation node. -+ -[NOTE] -==== -Butane is a command-line utility that {product-title} uses to offer convenient, short-hand syntax for writing and validating machine configs. -For more information, see "Creating machine configs with Butane". -==== -+ -* You have access to a {op-system-base-full} 8 machine that can be used to generate a thumbprint of the Tang exchange key. - -.Procedure - -. If you want to use TPM v2 to encrypt your cluster, check to see if TPM v2 encryption needs to be enabled in the host firmware for each node. -This is required on most Dell systems. -Check the manual for your specific system. - -. If you want to use Tang to encrypt your cluster, follow these preparatory steps: - -.. Set up a Tang server or access an existing one. -See link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/security_hardening/configuring-automated-unlocking-of-encrypted-volumes-using-policy-based-decryption_security-hardening#network-bound-disk-encryption_configuring-automated-unlocking-of-encrypted-volumes-using-policy-based-decryption[Network-bound disk encryption] for instructions. - -.. Install the `clevis` package on a {op-system-base} 8 machine, if it is not already installed: -+ -[source,terminal] ----- -$ sudo yum install clevis ----- - -.. On the {op-system-base} 8 machine, run the following command to generate a thumbprint of the exchange key. -Replace `\http://tang.example.com:7500` with the URL of your Tang server: -+ -[source,terminal] ----- -$ clevis-encrypt-tang '{"url":"http://tang.example.com:7500"}' < /dev/null > /dev/null <1> ----- -<1> In this example, `tangd.socket` is listening on port `7500` on the Tang server. -+ -[NOTE] -==== -The `clevis-encrypt-tang` command generates a thumbprint of the exchange key. -No data passes to the encryption command during this step; `/dev/null` exists here as an input instead of plain text. -The encrypted output is also sent to `/dev/null`, because it is not required for this procedure. -==== -+ -.Example output -[source,terminal] ----- -The advertisement contains the following signing keys: - -PLjNyRdGw03zlRoGjQYMahSZGu9 <1> ----- -<1> The thumbprint of the exchange key. -+ -When the `Do you wish to trust these keys? [ynYN]` prompt displays, type `Y`. -+ -[NOTE] -==== -{op-system-base} 8 provides Clevis version 15, which uses the SHA-1 hash algorithm to generate thumbprints. -Some other distributions provide Clevis version 17 or later, which use the SHA-256 hash algorithm for thumbprints. -You must use a Clevis version that uses SHA-1 to create the thumbprint, to prevent Clevis binding issues when you install {op-system-first} on your {product-title} cluster nodes. -==== - -.. If the nodes are configured with static IP addressing, run `coreos-installer iso customize --dest-karg-append` or use the `coreos-installer` `--append-karg` option when installing {op-system} nodes to set the IP address of the installed system. -Append the `ip=` and other arguments needed for your network. -+ -[IMPORTANT] -==== -Some methods for configuring static IPs do not affect the initramfs after the first boot and will not work with Tang encryption. -These include the `coreos-installer` `--copy-network` option, the `coreos-installer iso customize` `--network-keyfile` option, and the `coreos-installer pxe customize` `--network-keyfile` option, as well as adding `ip=` arguments to the kernel command line of the live ISO or PXE image during installation. -Incorrect static IP configuration causes the second boot of the node to fail. -==== - -. On your installation node, change to the directory that contains the installation program and generate the Kubernetes manifests for the cluster: -+ -[source,terminal] ----- -$ ./openshift-install create manifests --dir <installation_directory> <1> ----- -<1> Replace `<installation_directory>` with the path to the directory that you want to store the installation files in. - -. Create a Butane config that configures disk encryption, mirroring, or both. -For example, to configure storage for compute nodes, create a `$HOME/clusterconfig/worker-storage.bu` file. -+ -[source,yaml] -.Butane config example for a boot device ----- -variant: openshift -version: 4.13.0 -metadata: - name: worker-storage <1> - labels: - machineconfiguration.openshift.io/role: worker <1> -boot_device: - layout: x86_64 <2> - luks: <3> - tpm2: true <4> - tang: <5> - - url: http://tang.example.com:7500 <6> - thumbprint: PLjNyRdGw03zlRoGjQYMahSZGu9 <7> - threshold: 1 <8> - mirror: <9> - devices: <10> - - /dev/sda - - /dev/sdb -openshift: - fips: true <11> ----- -+ -<1> For control plane configurations, replace `worker` with `master` in both of these locations. -<2> Set this field to the instruction set architecture of the cluster nodes. -Some examples include, `x86_64`, `aarch64`, or `ppc64le`. -<3> Include this section if you want to encrypt the root file system. -For more details, see "About disk encryption". -<4> Include this field if you want to use a Trusted Platform Module (TPM) to encrypt the root file system. -<5> Include this section if you want to use one or more Tang servers. -<6> Specify the URL of a Tang server. -In this example, `tangd.socket` is listening on port `7500` on the Tang server. -<7> Specify the exchange key thumbprint, which was generated in a preceding step. -<8> Specify the minimum number of TPM v2 and Tang encryption conditions that must be met for decryption to occur. -The default value is `1`. -For more information about this topic, see "Configuring an encryption threshold". -<9> Include this section if you want to mirror the boot disk. -For more details, see "About disk mirroring". -<10> List all disk devices that should be included in the boot disk mirror, including the disk that {op-system} will be installed onto. -<11> Include this directive to enable FIPS mode on your cluster. -+ -[IMPORTANT] -==== -If you are configuring nodes to use both disk encryption and mirroring, both features must be configured in the same Butane config. -If you are configuring disk encryption on a node with FIPS mode enabled, you must include the `fips` directive in the same Butane config, even if FIPS mode is also enabled in a separate manifest. -==== - -. Create a control plane or compute node manifest from the corresponding Butane config and save it to the `<installation_directory>/openshift` directory. -For example, to create a manifest for the compute nodes, run the following command: -+ -[source,terminal] ----- -$ butane $HOME/clusterconfig/worker-storage.bu -o <installation_directory>/openshift/99-worker-storage.yaml ----- -+ -Repeat this step for each node type that requires disk encryption or mirroring. - -. Save the Butane configs in case you need to update the manifests in the future. - -. Continue with the remainder of the {product-title} installation. -+ -[TIP] -==== -You can monitor the console log on the {op-system} nodes during installation for error messages relating to disk encryption or mirroring. -==== -+ -[IMPORTANT] -==== -If you configure additional data partitions, they will not be encrypted unless encryption is explicitly requested. -==== - -.Verification - -After installing {product-title}, you can verify if boot disk encryption or mirroring is enabled on the cluster nodes. - -. From the installation host, access a cluster node by using a debug pod: -.. Start a debug pod for the node, for example: -+ -[source,terminal] ----- -$ oc debug node/compute-1 ----- -+ -.. Set `/host` as the root directory within the debug shell. -The debug pod mounts the root file system of the node in `/host` within the pod. -By changing the root directory to `/host`, you can run binaries contained in the executable paths on the node: -+ -[source,terminal] ----- -# chroot /host ----- -+ -[NOTE] -==== -{product-title} cluster nodes running {op-system-first} are immutable and rely on Operators to apply cluster changes. -Accessing cluster nodes using SSH is not recommended. -However, if the {product-title} API is not available, or `kubelet` is not properly functioning on the target node, `oc` operations will be impacted. -In such situations, it is possible to access nodes using `ssh core@<node>.<cluster_name>.<base_domain>` instead. -==== - -. If you configured boot disk encryption, verify if it is enabled: -.. From the debug shell, review the status of the root mapping on the node: -+ -[source,terminal] ----- -# cryptsetup status root ----- -+ -.Example output -[source,terminal] ----- -/dev/mapper/root is active and is in use. - type: LUKS2 <1> - cipher: aes-xts-plain64 <2> - keysize: 512 bits - key location: keyring - device: /dev/sda4 <3> - sector size: 512 - offset: 32768 sectors - size: 15683456 sectors - mode: read/write ----- -<1> The encryption format. -When the TPM v2 or Tang encryption modes are enabled, the {op-system} boot disks are encrypted using the LUKS2 format. -<2> The encryption algorithm used to encrypt the LUKS2 volume. -The `aes-cbc-essiv:sha256` cipher is used if FIPS mode is enabled. -<3> The device that contains the encrypted LUKS2 volume. -If mirroring is enabled, the value will represent a software mirror device, for example `/dev/md126`. -+ -.. List the Clevis plugins that are bound to the encrypted device: -+ -[source,terminal] ----- -# clevis luks list -d /dev/sda4 <1> ----- -<1> Specify the device that is listed in the `device` field in the output of the preceding step. -+ -.Example output -[source,terminal] ----- -1: sss '{"t":1,"pins":{"tang":[{"url":"http://tang.example.com:7500"}]}}' <1> ----- -<1> In the example output, the Tang plugin is used by the Shamir's Secret Sharing (SSS) Clevis plugin for the `/dev/sda4` device. - -. If you configured mirroring, verify if it is enabled: -.. From the debug shell, list the software RAID devices on the node: -+ -[source,terminal] ----- -# cat /proc/mdstat ----- -+ -.Example output -[source,terminal] ----- -Personalities : [raid1] -md126 : active raid1 sdb3[1] sda3[0] <1> - 393152 blocks super 1.0 [2/2] [UU] - -md127 : active raid1 sda4[0] sdb4[1] <2> - 51869632 blocks super 1.2 [2/2] [UU] - -unused devices: <none> ----- -<1> The `/dev/md126` software RAID mirror device uses the `/dev/sda3` and `/dev/sdb3` disk devices on the cluster node. -<2> The `/dev/md127` software RAID mirror device uses the `/dev/sda4` and `/dev/sdb4` disk devices on the cluster node. -+ -.. Review the details of each of the software RAID devices listed in the output of the preceding command. -The following example lists the details of the `/dev/md126` device: -+ -[source,terminal] ----- -# mdadm --detail /dev/md126 ----- -+ -.Example output -[source,terminal] ----- -/dev/md126: - Version : 1.0 - Creation Time : Wed Jul 7 11:07:36 2021 - Raid Level : raid1 <1> - Array Size : 393152 (383.94 MiB 402.59 MB) - Used Dev Size : 393152 (383.94 MiB 402.59 MB) - Raid Devices : 2 - Total Devices : 2 - Persistence : Superblock is persistent - - Update Time : Wed Jul 7 11:18:24 2021 - State : clean <2> - Active Devices : 2 <3> - Working Devices : 2 <3> - Failed Devices : 0 <4> - Spare Devices : 0 - -Consistency Policy : resync - - Name : any:md-boot <5> - UUID : ccfa3801:c520e0b5:2bee2755:69043055 - Events : 19 - - Number Major Minor RaidDevice State - 0 252 3 0 active sync /dev/sda3 <6> - 1 252 19 1 active sync /dev/sdb3 <6> ----- -<1> Specifies the RAID level of the device. -`raid1` indicates RAID 1 disk mirroring. -<2> Specifies the state of the RAID device. -<3> States the number of underlying disk devices that are active and working. -<4> States the number of underlying disk devices that are in a failed state. -<5> The name of the software RAID device. -<6> Provides information about the underlying disk devices used by the software RAID device. -+ -.. List the file systems mounted on the software RAID devices: -+ -[source,terminal] ----- -# mount | grep /dev/md ----- -+ -.Example output -[source,terminal] ----- -/dev/md127 on / type xfs (rw,relatime,seclabel,attr2,inode64,logbufs=8,logbsize=32k,prjquota) -/dev/md127 on /etc type xfs (rw,relatime,seclabel,attr2,inode64,logbufs=8,logbsize=32k,prjquota) -/dev/md127 on /usr type xfs (ro,relatime,seclabel,attr2,inode64,logbufs=8,logbsize=32k,prjquota) -/dev/md127 on /sysroot type xfs (ro,relatime,seclabel,attr2,inode64,logbufs=8,logbsize=32k,prjquota) -/dev/md127 on /var type xfs (rw,relatime,seclabel,attr2,inode64,logbufs=8,logbsize=32k,prjquota) -/dev/md127 on /var/lib/containers/storage/overlay type xfs (rw,relatime,seclabel,attr2,inode64,logbufs=8,logbsize=32k,prjquota) -/dev/md127 on /var/lib/kubelet/pods/e5054ed5-f882-4d14-b599-99c050d4e0c0/volume-subpaths/etc/tuned/1 type xfs (rw,relatime,seclabel,attr2,inode64,logbufs=8,logbsize=32k,prjquota) -/dev/md127 on /var/lib/kubelet/pods/e5054ed5-f882-4d14-b599-99c050d4e0c0/volume-subpaths/etc/tuned/2 type xfs (rw,relatime,seclabel,attr2,inode64,logbufs=8,logbsize=32k,prjquota) -/dev/md127 on /var/lib/kubelet/pods/e5054ed5-f882-4d14-b599-99c050d4e0c0/volume-subpaths/etc/tuned/3 type xfs (rw,relatime,seclabel,attr2,inode64,logbufs=8,logbsize=32k,prjquota) -/dev/md127 on /var/lib/kubelet/pods/e5054ed5-f882-4d14-b599-99c050d4e0c0/volume-subpaths/etc/tuned/4 type xfs (rw,relatime,seclabel,attr2,inode64,logbufs=8,logbsize=32k,prjquota) -/dev/md127 on /var/lib/kubelet/pods/e5054ed5-f882-4d14-b599-99c050d4e0c0/volume-subpaths/etc/tuned/5 type xfs (rw,relatime,seclabel,attr2,inode64,logbufs=8,logbsize=32k,prjquota) -/dev/md126 on /boot type ext4 (rw,relatime,seclabel) ----- -+ -In the example output, the `/boot` file system is mounted on the `/dev/md126` software RAID device and the root file system is mounted on `/dev/md127`. - -. Repeat the verification steps for each {product-title} node type. - -[role="_additional-resources"] -.Additional resources - -* For more information about the TPM v2 and Tang encryption modes, see link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/security_hardening/configuring-automated-unlocking-of-encrypted-volumes-using-policy-based-decryption_security-hardening[Configuring automated unlocking of encrypted volumes using policy-based decryption]. \ No newline at end of file diff --git a/modules/installation-supported-aws-machine-types.adoc b/modules/installation-supported-aws-machine-types.adoc deleted file mode 100644 index dd9910e9c317..000000000000 --- a/modules/installation-supported-aws-machine-types.adoc +++ /dev/null @@ -1,542 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-user-infra.adoc -// * installing/installing_aws/installing-restricted-networks-aws.adoc -// * installing/installing_aws/installing-aws-network-customizations.adoc -// * installing/installing_aws/installing-aws-government-region.adoc -// * installing/installing_aws/installing-aws-secret-region.adoc -// * installing/installing_aws/installing-aws-customizations.adoc -// * installing/installing_aws/installing-aws-vpc.adoc -// * installing/installing_aws/installing-aws-private.adoc -// * installing/installing_aws/installing-aws-china.adoc - -//Starting in 4.10, aws on arm64 is only supported for installation on custom, network custom, private clusters and VPC. This attribute excludes arm64 content from installing on gov regions. When government regions are supported on arm64, remove this ifdevel. -ifeval::["{context}" == "installing-aws-government-region"] -:aws-govcloud: -endif::[] -//Starting in 4.10, aws on arm64 is only supported for installation on custom, network custom, private clusters and VPC. This attribute excludes arm64 content from installing on secret regions. When secret regions are supported on arm64, remove this ifdevel. -ifeval::["{context}" == "installing-aws-secret-region"] -:aws-secret: -endif::[] -//Starting in 4.10, aws on arm64 is only supported for installation on custom, network custom, private clusters and VPC. This attribute excludes arm64 content from installing in china regions. When china regions are supported on arm64, remove this ifdevel. -ifeval::["{context}" == "installing-aws-china-region"] -:aws-china: -endif::[] - -[id="installation-supported-aws-machine-types_{context}"] -= Supported AWS machine types - -The following Amazon Web Services (AWS) instance types are supported with {product-title}. - -.Machine types based on x86_64 architecture -[%collapsible] -==== -[cols="2a,2a,2a,2a",options="header"] -|=== - -|Instance type -|Bootstrap -|Control plane -|Compute - -|`i3.large` -|x -| -| - -|`m4.large` -| -| -|x - -|`m4.xlarge` -| -|x -|x - -|`m4.2xlarge` -| -|x -|x - -|`m4.4xlarge` -| -|x -|x - -|`m4.10xlarge` -| -|x -|x - -|`m4.16xlarge` -| -|x -|x - -|`m5.large` -| -| -|x - -|`m5.xlarge` -| -|x -|x - -|`m5.2xlarge` -| -|x -|x - -|`m5.4xlarge` -| -|x -|x - -|`m5.8xlarge` -| -|x -|x - -|`m5.12xlarge` -| -|x -|x - -|`m5.16xlarge` -| -|x -|x - -|`m5a.large` -| -| -|x - -|`m5a.xlarge` -| -|x -|x - -|`m5a.2xlarge` -| -|x -|x - -|`m5a.4xlarge` -| -|x -|x - -|`m5a.8xlarge` -| -|x -|x - -|`m5a.12xlarge` -| -|x -|x - -|`m5a.16xlarge` -| -|x -|x - -|`m6i.large` -| -| -|x - -|`m6i.xlarge` -| -|x -|x - -|`m6i.2xlarge` -| -|x -|x - -|`m6i.4xlarge` -| -|x -|x - -|`m6i.8xlarge` -| -|x -|x - -|`m6i.12xlarge` -| -|x -|x - -|`m6i.16xlarge` -| -|x -|x - -|`c4.2xlarge` -| -|x -|x - -|`c4.4xlarge` -| -|x -|x - -|`c4.8xlarge` -| -|x -|x - -|`c5.xlarge` -| -| -|x - -|`c5.2xlarge` -| -|x -|x - -|`c5.4xlarge` -| -|x -|x - -|`c5.9xlarge` -| -|x -|x - -|`c5.12xlarge` -| -|x -|x - -|`c5.18xlarge` -| -|x -|x - -|`c5.24xlarge` -| -|x -|x - -|`c5a.xlarge` -| -| -|x - -|`c5a.2xlarge` -| -|x -|x - -|`c5a.4xlarge` -| -|x -|x - -|`c5a.8xlarge` -| -|x -|x - -|`c5a.12xlarge` -| -|x -|x - -|`c5a.16xlarge` -| -|x -|x - -|`c5a.24xlarge` -| -|x -|x - -|`r4.large` -| -| -|x - -|`r4.xlarge` -| -|x -|x - -|`r4.2xlarge` -| -|x -|x - -|`r4.4xlarge` -| -|x -|x - -|`r4.8xlarge` -| -|x -|x - -|`r4.16xlarge` -| -|x -|x - -|`r5.large` -| -| -|x - -|`r5.xlarge` -| -|x -|x - -|`r5.2xlarge` -| -|x -|x - -|`r5.4xlarge` -| -|x -|x - -|`r5.8xlarge` -| -|x -|x - -|`r5.12xlarge` -| -|x -|x - -|`r5.16xlarge` -| -|x -|x - -|`r5.24xlarge` -| -|x -|x - -|`r5a.large` -| -| -|x - -|`r5a.xlarge` -| -|x -|x - -|`r5a.2xlarge` -| -|x -|x - -|`r5a.4xlarge` -| -|x -|x - -|`r5a.8xlarge` -| -|x -|x - -|`r5a.12xlarge` -| -|x -|x - -|`r5a.16xlarge` -| -|x -|x - -|`r5a.24xlarge` -| -|x -|x - -|`t3.large` -| -| -|x - -|`t3.xlarge` -| -| -|x - -|`t3.2xlarge` -| -| -|x - -|`t3a.large` -| -| -|x - -|`t3a.xlarge` -| -| -|x - -|`t3a.2xlarge` -| -| -|x - -|=== -==== - -ifndef::aws-govcloud,aws-secret,aws-china,openshift-origin,localzone[] -.Machine types based on arm64 architecture -[%collapsible] -==== -[cols="2a,2a,2a,2a",options="header"] -|=== - -|Instance type -|Bootstrap -|Control plane -|Compute - -|`m6g.large` -|x -| -|x - -|`m6g.xlarge` -| -|x -|x - -|`m6g.2xlarge` -| -|x -|x - -|`m6g.4xlarge` -| -|x -|x - -|`m6g.8xlarge` -| -|x -|x - -|`m6g.12xlarge` -| -|x -|x - -|`m6g.16xlarge` -| -|x -|x - -|`c6g.large` -|x -| -| - -|`c6g.xlarge` -| -| -|x - -|`c6g.2xlarge` -| -|x -|x - -|`c6g.4xlarge` -| -|x -|x - -|`c6g.8xlarge` -| -|x -|x - -|`c6g.12xlarge` -| -|x -|x - -|`c6g.16xlarge` -| -|x -|x - -|`c7g.large` -|x -| -| - -|`c7g.xlarge` -| -|x -|x - -|`c7g.2xlarge` -| -|x -|x - -|`c7g.4xlarge` -| -|x -|x - -|`c7g.8xlarge` -| -|x -|x - -|`c7g.12xlarge` -| -|x -|x - -|`c7g.16large` -| -|x -|x - -|=== -==== -endif::[] - -ifeval::["{context}" == "installing-restricted-networks-aws"] -:!aws-restricted-upi: -endif::[] -ifeval::["{context}" == "installing-aws-government-region"] -:!aws-govcloud: -endif::[] -ifeval::["{context}" == "installing-aws-secret-region"] -:!aws-secret: -endif::[] -ifeval::["{context}" == "installing-aws-china-region"] -:!aws-china: -endif::[] diff --git a/modules/installation-three-node-cluster-cloud-provider.adoc b/modules/installation-three-node-cluster-cloud-provider.adoc deleted file mode 100644 index 34f68ea1b5a1..000000000000 --- a/modules/installation-three-node-cluster-cloud-provider.adoc +++ /dev/null @@ -1,115 +0,0 @@ -// Module included in the following assemblies: -// * installing/installing_aws/installing-aws-three-node.adoc -// * installing/installing_azure/installing-azure-three-node.adoc -// * installing/installing_gcp/installing-gcp-three-node.adoc -// * installing/installing_vsphere/installing-vsphere-three-node.adoc - -ifeval::["{context}" == "installing-aws-three-node"] -:aws: -endif::[] -ifeval::["{context}" == "installing-azure-three-node"] -:azure: -endif::[] -ifeval::["{context}" == "installing-gcp-three-node"] -:gcp: -endif::[] -ifeval::["{context}" == "installing-vsphere-three-node"] -:vsphere: -endif::[] -ifeval::["{context}" == "installing-nutanix-three-node"] -:nutanix: -endif::[] - -:_content-type: PROCEDURE -[id="installation-three-node-cluster_{context}"] -= Configuring a three-node cluster - -You configure a three-node cluster by setting the number of worker nodes to `0` in the `install-config.yaml` file before deploying the cluster. Setting the number of worker nodes to `0` ensures that the control plane machines are schedulable. This allows application workloads to be scheduled to run from the control plane nodes. - -[NOTE] -==== -Because application workloads run from control plane nodes, additional subscriptions are required, as the control plane nodes are considered to be compute nodes. -==== - -.Prerequisites - -* You have an existing `install-config.yaml` file. - -.Procedure - -ifdef::nutanix[] -* Set the number of compute replicas to `0` in your `install-config.yaml` file, as shown in the following `compute` stanza: -endif::nutanix[] - -ifndef::nutanix[] -. Set the number of compute replicas to `0` in your `install-config.yaml` file, as shown in the following `compute` stanza: -endif::nutanix[] -+ -.Example `install-config.yaml` file for a three-node cluster -[source,yaml] ----- -apiVersion: v1 -baseDomain: example.com -compute: -- name: worker - platform: {} - replicas: 0 -# ... ----- -ifndef::vsphere,nutanix[] -. If you are deploying a cluster with user-provisioned infrastructure: -** After you create the Kubernetes manifest files, make sure that the `spec.mastersSchedulable` parameter is set to `true` in `cluster-scheduler-02-config.yml` file. You can locate this file in `<installation_directory>/manifests`. -ifdef::aws[] -For more information, see "Creating the Kubernetes manifest and Ignition config files" in "Installing a cluster on user-provisioned infrastructure in AWS by using CloudFormation templates". -endif::aws[] -ifdef::azure[] -For more information, see "Creating the Kubernetes manifest and Ignition config files" in "Installing a cluster on Azure using ARM templates". -endif::azure[] -ifdef::gcp[] -For more information, see "Creating the Kubernetes manifest and Ignition config files" in "Installing a cluster on user-provisioned infrastructure in GCP by using Deployment Manager templates". -endif::gcp[] -** Do not create additional worker nodes. -endif::vsphere,nutanix[] - -ifdef::vsphere[] -. If you are deploying a cluster with user-provisioned infrastructure: -** Configure your application ingress load balancer to route HTTP and HTTPS traffic to the control plane nodes. In a three-node cluster, the Ingress Controller pods run on the control plane nodes. For more information, see the "Load balancing requirements for user-provisioned infrastructure". -** After you create the Kubernetes manifest files, make sure that the `spec.mastersSchedulable` parameter is set to `true` in `cluster-scheduler-02-config.yml` file. You can locate this file in `<installation_directory>/manifests`. -ifdef::vsphere[] -For more information, see "Creating the Kubernetes manifest and Ignition config files" in "Installing a cluster on vSphere with user-provisioned infrastructure". -endif::vsphere[] -** Do not create additional worker nodes. -endif::vsphere[] - -ifndef::nutanix[] -.Example `cluster-scheduler-02-config.yml` file for a three-node cluster -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: Scheduler -metadata: - creationTimestamp: null - name: cluster -spec: - mastersSchedulable: true - policy: - name: "" -status: {} ----- -endif::nutanix[] - -ifeval::["{context}" == "installing-aws-three-node"] -:!aws: -endif::[] -ifeval::["{context}" == "installing-azure-three-node"] -:!azure: -endif::[] -ifeval::["{context}" == "installing-gcp-three-node"] -:!gcp: -endif::[] -ifeval::["{context}" == "installing-vsphere-three-node"] -:!vsphere: -endif::[] -ifeval::["{context}" == "installing-nutanix-three-node"] -:!nutanix: -endif::[] diff --git a/modules/installation-three-node-cluster.adoc b/modules/installation-three-node-cluster.adoc deleted file mode 100644 index cb78197d15d1..000000000000 --- a/modules/installation-three-node-cluster.adoc +++ /dev/null @@ -1,93 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-user-infra.adoc [Eventually] -// * installing/installing_azure/installing-azure-user-infra.adoc [Eventually] -// * installing/installing_gcp/installing-gcp-user-infra.adoc [Eventually] -// * installing/installing_gcp/installing-restricted-networks-gcp.adoc [Eventually] -// * installing/installing_bare_metal/installing-bare-metal.adoc -// * installing/installing_aws/installing-restricted-networks-aws.adoc [Eventually] -// * installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc -// * installing/installing_platform_agnostic/installing-platform-agnostic.adoc -// * installing/installing_vsphere/installing-restricted-networks-vsphere.adoc [Eventually] -// * installing/installing_vsphere/installing-vsphere.adoc [Eventually] -// * installing/installing_ibm_z/installing-ibm-z.adoc [Eventually] - -ifeval::["{context}" == "installing-ibm-z"] -:ibm-z: -endif::[] -ifeval::["{context}" == "installing-ibm-z-kvm"] -:ibm-z-kvm: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z"] -:ibm-z: -:restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z-kvm"] -:ibm-z-kvm: -:restricted: -endif::[] - -:_content-type: PROCEDURE -[id="installation-three-node-cluster_{context}"] -= Configuring a three-node cluster - -ifndef::ibm-z,ibm-z-kvm[] -Optionally, you can deploy zero compute machines in a bare metal cluster that consists of three control plane machines only. This provides smaller, more resource efficient clusters for cluster administrators and developers to use for testing, development, and production. -endif::ibm-z,ibm-z-kvm[] -ifdef::ibm-z,ibm-z-kvm[] -Optionally, you can deploy zero compute machines in a minimal three node cluster that consists of three control plane machines only. This provides smaller, more resource efficient clusters for cluster administrators and developers to use for testing, development, and production. -endif::ibm-z,ibm-z-kvm[] - -In three-node {product-title} environments, the three control plane machines are schedulable, which means that your application workloads are scheduled to run on them. - -.Prerequisites - -* You have an existing `install-config.yaml` file. - -.Procedure - -* Ensure that the number of compute replicas is set to `0` in your `install-config.yaml` file, as shown in the following `compute` stanza: -+ -[source,yaml] ----- -compute: -- name: worker - platform: {} - replicas: 0 ----- -+ -[NOTE] -==== -You must set the value of the `replicas` parameter for the compute machines to `0` when you install {product-title} on user-provisioned infrastructure, regardless of the number of compute machines you are deploying. In installer-provisioned installations, the parameter controls the number of compute machines that the cluster creates and manages for you. This does not apply to user-provisioned installations, where the compute machines are deployed manually. -==== -ifdef::ibm-z,ibm-z-kvm[] -+ -[NOTE] -==== -The preferred resource for control plane nodes is six vCPUs and 21 GB. For three control plane nodes this is the memory + vCPU equivalent of a minimum five-node cluster. You should back the three nodes, each installed on a 120 GB disk, with three IFLs that are SMT2 enabled. The minimum tested setup is three vCPUs and 10 GB on a 120 GB disk for each control plane node. -==== -endif::ibm-z,ibm-z-kvm[] -.Next steps - -For three-node cluster installations, follow these next steps: - -* If you are deploying a three-node cluster with zero compute nodes, the Ingress Controller pods run on the control plane nodes. In three-node cluster deployments, you must configure your application ingress load balancer to route HTTP and HTTPS traffic to the control plane nodes. See the _Load balancing requirements for user-provisioned infrastructure_ section for more information. - -* When you create the Kubernetes manifest files in the following procedure, ensure that the `mastersSchedulable` parameter in the `<installation_directory>/manifests/cluster-scheduler-02-config.yml` file is set to `true`. This enables your application workloads to run on the control plane nodes. - -* Do not deploy any compute nodes when you create the {op-system-first} machines. - -ifeval::["{context}" == "installing-ibm-z"] -:!ibm-z: -endif::[] -ifeval::["{context}" == "installing-ibm-z-kvm"] -:!ibm-z-kvm: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z"] -:!ibm-z: -:!restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z-kvm"] -:!ibm-z-kvm: -:!restricted: -endif::[] diff --git a/modules/installation-uninstall-clouds.adoc b/modules/installation-uninstall-clouds.adoc deleted file mode 100644 index 45a6f372cb3d..000000000000 --- a/modules/installation-uninstall-clouds.adoc +++ /dev/null @@ -1,176 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/uninstalling-cluster-aws.adoc -// * installing/installing_azure/uninstalling-cluster-azure.adoc -// * installing/installing_azure/uninstalling-cluster-azure-stack-hub.adoc -// * installing/installing_gcp/uninstalling-cluster-gcp.adoc -// * installing/installing_ibm_cloud_public/uninstalling-cluster-ibm-cloud.adoc -// * installing/installing_ibm_powervs/uninstalling-cluster-ibm-power-vs.adoc -// * installing/installing_osp/uninstalling-cluster-openstack.adoc -// * installing/installing_rhv/uninstalling-cluster-rhv.adoc -// * installing/installing_vsphere/uninstalling-cluster-vsphere-installer-provisioned.adoc -// * installing/installing_nutanix/uninstalling-cluster-nutanix.adoc - -ifeval::["{context}" == "uninstalling-cluster-aws"] -:aws: -endif::[] -ifeval::["{context}" == "uninstalling-cluster-gcp"] -:gcp: -endif::[] -ifeval::["{context}" == "uninstalling-cluster-ibm-cloud"] -:ibm-cloud: -endif::[] -ifeval::["{context}" == "uninstalling-cluster-ibm-power-vs"] -:ibm-power-vs: -endif::[] - -:_content-type: PROCEDURE -[id="installation-uninstall-clouds_{context}"] -= Removing a cluster that uses installer-provisioned infrastructure - -You can remove a cluster that uses installer-provisioned infrastructure from your cloud. - -ifdef::aws[] -[NOTE] -==== -If you deployed your cluster to the AWS C2S Secret Region, the installation program does not support destroying the cluster; you must manually remove the cluster resources. -==== -endif::aws[] - -[NOTE] -==== -After uninstallation, check your cloud provider for any resources not removed properly, especially with User Provisioned Infrastructure (UPI) clusters. There might be resources that the installer did not create or that the installer is unable to access. -ifdef::gcp[] -For example, some Google Cloud resources require link:https://cloud.google.com/iam/docs/overview#concepts_related_to_access_management[IAM permissions] in shared VPC host projects, or there might be unused link:https://cloud.google.com/sdk/gcloud/reference/compute/health-checks/delete[health checks that must be deleted]. -endif::gcp[] -==== - -.Prerequisites - -* You have a copy of the installation program that you used to deploy the cluster. -* You have the files that the installation program generated when you created your -cluster. -ifdef::ibm-cloud,ibm-power-vs[] -* You have configured the `ccoctl` binary. -* You have installed the IBM Cloud CLI and installed or updated the VPC infrastructure service plugin. For more information see "Prerequisites" in the link:https://cloud.ibm.com/docs/vpc?topic=vpc-infrastructure-cli-plugin-vpc-reference&interface=ui#cli-ref-prereqs[IBM Cloud VPC CLI documentation]. -endif::ibm-cloud,ibm-power-vs[] - -.Procedure -ifdef::ibm-cloud,ibm-power-vs[] -. If the following conditions are met, this step is required: -** The installer created a resource group as part of the installation process. -** You or one of your applications created persistent volume claims (PVCs) after the cluster was deployed. - -+ -In which case, the PVCs are not removed when uninstalling the cluster, which might prevent the resource group from being successfully removed. To prevent a failure: - -.. Log in to the IBM Cloud using the CLI. -.. To list the PVCs, run the following command: -+ -[source, terminal] ----- -$ ibmcloud is volumes --resource-group-name <infrastructure_id> ----- -+ -For more information about listing volumes, see the link:https://cloud.ibm.com/docs/vpc?topic=vpc-infrastructure-cli-plugin-vpc-reference&interface=ui#volume-cli[IBM Cloud VPC CLI documentation]. - -.. To delete the PVCs, run the following command: -+ -[source, terminal] ----- -$ ibmcloud is volume-delete --force <volume_id> ----- -+ -For more information about deleting volumes, see the link:https://cloud.ibm.com/docs/vpc?topic=vpc-infrastructure-cli-plugin-vpc-reference&interface=ui#volume-delete[IBM Cloud VPC CLI documentation]. - -. Export the API key that was created as part of the installation process. -endif::ibm-cloud,ibm-power-vs[] -ifdef::ibm-cloud[] -+ -[source,terminal] ----- -$ export IC_API_KEY=<api_key> ----- -+ -endif::ibm-cloud[] -ifdef::ibm-power-vs[] -+ -[source,terminal] ----- -$ export IBMCLOUD_API_KEY=<api_key> ----- -+ -endif::ibm-power-vs[] -ifdef::ibm-cloud,ibm-power-vs[] -[NOTE] -==== -You must set the variable name exactly as specified. The installation program expects the variable name to be present to remove the service IDs that were created when the cluster was installed. -==== -endif::ibm-cloud,ibm-power-vs[] -. From the directory that contains the installation program on the computer that you used to install the cluster, run the following command: -+ -[source,terminal] ----- -$ ./openshift-install destroy cluster \ ---dir <installation_directory> --log-level info <1> <2> ----- -<1> For `<installation_directory>`, specify the path to the directory that you -stored the installation files in. -<2> To view different details, specify `warn`, `debug`, or `error` instead of `info`. -ifndef::ibm-power-vs[] -+ -[NOTE] -==== -You must specify the directory that contains the cluster definition files for -your cluster. The installation program requires the `metadata.json` file in this -directory to delete the cluster. -==== -endif::ibm-power-vs[] -ifdef::ibm-power-vs[] -+ -[NOTE] -==== -* You must specify the directory that contains the cluster definition files for -your cluster. The installation program requires the `metadata.json` file in this -directory to delete the cluster. - -* You might have to run the `openshift-install destroy` command up to three times to ensure a proper cleanup. -==== -endif::ibm-power-vs[] - -ifdef::ibm-cloud,ibm-power-vs[] -. Remove the manual CCO credentials that were created for the cluster: -+ -[source,terminal] ----- -$ ccoctl ibmcloud delete-service-id \ - --credentials-requests-dir <path_to_credential_requests_directory> \ - --name <cluster_name> ----- -+ --- -[NOTE] -==== -If your cluster uses Technology Preview features that are enabled by the `TechPreviewNoUpgrade` feature set, you must include the `--enable-tech-preview` parameter. -==== --- -endif::ibm-cloud,ibm-power-vs[] - -. Optional: Delete the `<installation_directory>` directory and the -{product-title} installation program. - - -ifeval::["{context}" == "uninstalling-cluster-aws"] -:!aws: -endif::[] -ifeval::["{context}" == "uninstalling-cluster-gcp"] -:!gcp: -endif::[] -ifeval::["{context}" == "uninstalling-cluster-ibm-cloud"] -:!ibm-cloud: -endif::[] -ifeval::["{context}" == "uninstalling-cluster-ibm-power-vs"] -:!ibm-power-vs: -endif::[] - -// The above CCO credential removal for IBM Cloud is only necessary for manual mode. Future releases that support other credential methods will not require this step. diff --git a/modules/installation-uninstall-infra.adoc b/modules/installation-uninstall-infra.adoc deleted file mode 100644 index 457ad3cfcdf5..000000000000 --- a/modules/installation-uninstall-infra.adoc +++ /dev/null @@ -1,36 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_openstack/uninstalling-openstack-user.adoc - -:_content-type: PROCEDURE -[id="installation-uninstall-infra_{context}"] -= Removing a cluster from {rh-openstack} that uses your own infrastructure - -You can remove an {product-title} cluster on {rh-openstack-first} that uses your own infrastructure. To complete the removal process quickly, run several Ansible playbooks. - -.Prerequisites - -* Python 3 is installed on your machine. -* You downloaded the modules in "Downloading playbook dependencies." -* You have the playbooks that you used to install the cluster. -* You modified the playbooks that are prefixed with `down-` to reflect any changes that you made to their corresponding installation playbooks. For example, changes to the `bootstrap.yaml` file are reflected in the `down-bootstrap.yaml` file. -* All of the playbooks are in a common directory. - -.Procedure - -. On a command line, run the playbooks that you downloaded: -+ -[source,terminal] ----- -$ ansible-playbook -i inventory.yaml \ - down-bootstrap.yaml \ - down-control-plane.yaml \ - down-compute-nodes.yaml \ - down-load-balancers.yaml \ - down-network.yaml \ - down-security-groups.yaml ----- - -. Remove any DNS record changes you made for the {product-title} installation. - -{product-title} is removed from your infrastructure. \ No newline at end of file diff --git a/modules/installation-user-defined-tags-azure.adoc b/modules/installation-user-defined-tags-azure.adoc deleted file mode 100644 index 53fd0c1375f6..000000000000 --- a/modules/installation-user-defined-tags-azure.adoc +++ /dev/null @@ -1,80 +0,0 @@ -// Module included in the following assemblies: -// * installing/installing_azure/installing-azure-customizations.adoc - -:_content-type: CONCEPT -[id="installing-azure-user-defined-tags_{context}"] -= Configuring the user-defined tags for Azure - -In {product-title}, you can use the tags for grouping resources and for managing resource access and cost. You can define the tags on the Azure resources in the `install-config.yaml` file only during {product-title} cluster creation. You cannot modify the user-defined tags after cluster creation. - -Support for user-defined tags is available only for the resources created in the Azure Public Cloud. User-defined tags are not supported for the {product-title} clusters upgraded to {product-title} 4.14. - -User-defined and {product-title} specific tags are applied only to the resources created by the {product-title} installer and its core operators such as Machine api provider azure Operator, Cluster Ingress Operator, Cluster Image Registry Operator. - -By default, {product-title} installer attaches the {product-title} tags to the Azure resources. These {product-title} tags are not accessible for the users. - -You can use the `.platform.azure.userTags` field in the `install-config.yaml` file to define the list of user-defined tags as shown in the following `install-config.yaml` file. - -.Sample `install-config.yaml` file -[source,yaml] ----- -additionalTrustBundlePolicy: Proxyonly <1> -apiVersion: v1 -baseDomain: catchall.azure.devcluster.openshift.com <2> -compute: <3> -- architecture: amd64 - hyperthreading: Enabled <4> - name: worker - platform: {} - replicas: 3 -controlPlane: <5> - architecture: amd64 - hyperthreading: Enabled <6> - name: master - platform: {} - replicas: 3 -metadata: - creationTimestamp: null - name: user <7> -networking: - clusterNetwork: - - cidr: 10.128.0.0/14 - hostPrefix: 23 - machineNetwork: - - cidr: 10.0.0.0/16 - networkType: OVNKubernetes <8> - serviceNetwork: - - 172.30.0.0/16 -platform: - azure: - baseDomainResourceGroupName: os4-common <9> - cloudName: AzurePublicCloud <10> - outboundType: Loadbalancer - region: southindia <11> - userTags: <12> - createdBy: user - environment: dev ----- -<1> Defines the trust bundle policy. -<2> Required. The `baseDomain` parameter specifies the base domain of your cloud provider. The installation program prompts you for this value. -<3> The configuration for the machines that comprise compute. The `compute` section is a sequence of mappings. To meet the requirements of the different data structures, the first line of the `compute` section must begin with a hyphen, `-`. If you do not provide these parameters and values, the installation program provides the default value. -<4> To enable or disable simultaneous multithreading, or `hyperthreading`. By default, simultaneous multithreading is enabled to increase the performance of your machines' cores. You can disable it by setting the parameter value to `Disabled`. If you disable simultaneous multithreading in some cluster machines, you must disable it in all cluster machines. -<5> The configuration for the machines that comprise the control plane. The `controlPlane` section is a single mapping. The first line of the `controlPlane` section must not begin with a hyphen, `-`. You can use only one control plane pool. If you do not provide these parameters and values, the installation program provides the default value. -<6> To enable or disable simultaneous multithreading, or `hyperthreading`. By default, simultaneous multithreading is enabled to increase the performance of your machines' cores. You can disable it by setting the parameter value to `Disabled`. If you disable simultaneous multithreading in some cluster machines, you must disable it in all cluster machines. -<7> The installation program prompts you for this value. -<8> The cluster network plugin to install. The supported values are `OVNKubernetes` and `OpenShiftSDN`. The default value is `OVNKubernetes`. -<9> Specifies the resource group for the base domain of the Azure DNS zone. -<10> Specifies the name of the Azure cloud environment. You can use the `cloudName` field to configure the Azure SDK with the Azure API endpoints. If you do not provide value, the default value is Azure Public Cloud. -<11> Required. Specifies the name of the Azure region that hosts your cluster. The installation program prompts you for this value. -<12> Defines the additional keys and values that the installation program adds as tags to all Azure resources that it creates. - -The user-defined tags have the following limitations: - -* A tag key can have a maximum of 128 characters. -* A tag key must begin with a letter, end with a letter, number or underscore, and can contain only letters, numbers, underscores, periods, and hyphens. -* Tag keys are case-insensitive. -* Tag keys cannot be `name`. It cannot have prefixes such as `kubernetes.io`, `openshift.io`, `microsoft`, `azure`, and `windows`. -* A tag value can have a maximum of 256 characters. -* You can configure a maximum of 10 tags for resource group and resources. - -For more information about Azure tags, see link:https://learn.microsoft.com/en-us/azure/azure-resource-manager/management/tag-resources?tabs=json[Azure user-defined tags] \ No newline at end of file diff --git a/modules/installation-user-infra-exporting-common-variables-arm-templates.adoc b/modules/installation-user-infra-exporting-common-variables-arm-templates.adoc deleted file mode 100644 index 826155780304..000000000000 --- a/modules/installation-user-infra-exporting-common-variables-arm-templates.adoc +++ /dev/null @@ -1,91 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_azure/installing-azure-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc - -ifeval::["{context}" == "installing-azure-user-infra"] -:cp: Azure -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:cp: Azure Stack Hub -:ash: -endif::[] - -:_content-type: PROCEDURE -[id="installation-user-infra-exporting-common-variables-arm-templates_{context}"] -= Exporting common variables for ARM templates - -You must export a common set of variables that are used with the provided Azure -Resource Manager (ARM) templates used to assist in completing a user-provided -infrastructure install on Microsoft {cp}. - -[NOTE] -==== -Specific ARM templates can also require additional exported variables, which are -detailed in their related procedures. -==== - -.Prerequisites - -* Obtain the {product-title} installation program and the pull secret for your cluster. - -.Procedure - -. Export common variables found in the `install-config.yaml` to be used by the -provided ARM templates: -+ -[source,terminal] ----- -$ export CLUSTER_NAME=<cluster_name><1> -$ export AZURE_REGION=<azure_region><2> -$ export SSH_KEY=<ssh_key><3> -$ export BASE_DOMAIN=<base_domain><4> -$ export BASE_DOMAIN_RESOURCE_GROUP=<base_domain_resource_group><5> ----- -<1> The value of the `.metadata.name` attribute from the `install-config.yaml` file. -ifndef::ash[] -<2> The region to deploy the cluster into, for example `centralus`. This is the value of the `.platform.azure.region` attribute from the `install-config.yaml` file. -endif::ash[] -ifdef::ash[] -<2> The region to deploy the cluster into. This is the value of the `.platform.azure.region` attribute from the `install-config.yaml` file. -endif::ash[] -<3> The SSH RSA public key file as a string. You must enclose the SSH key in quotes since it contains spaces. This is the value of the `.sshKey` attribute from the `install-config.yaml` file. -ifndef::ash[] -<4> The base domain to deploy the cluster to. The base domain corresponds to the public DNS zone that you created for your cluster. This is the value of the `.baseDomain` attribute from the `install-config.yaml` file. -endif::ash[] -ifdef::ash[] -<4> The base domain to deploy the cluster to. The base domain corresponds to the DNS zone that you created for your cluster. This is the value of the `.baseDomain` attribute from the `install-config.yaml` file. -endif::ash[] -ifndef::ash[] -<5> The resource group where the public DNS zone exists. This is the value of the `.platform.azure.baseDomainResourceGroupName` attribute from the `install-config.yaml` file. -endif::ash[] -ifdef::ash[] -<5> The resource group where the DNS zone exists. This is the value of the `.platform.azure.baseDomainResourceGroupName` attribute from the `install-config.yaml` file. -endif::ash[] -+ -For example: -+ -[source,terminal] ----- -$ export CLUSTER_NAME=test-cluster -$ export AZURE_REGION=centralus -$ export SSH_KEY="ssh-rsa xxx/xxx/xxx= user@email.com" -$ export BASE_DOMAIN=example.com -$ export BASE_DOMAIN_RESOURCE_GROUP=ocp-cluster ----- - -. Export the kubeadmin credentials: -+ -[source,terminal] ----- -$ export KUBECONFIG=<installation_directory>/auth/kubeconfig <1> ----- -<1> For `<installation_directory>`, specify the path to the directory that you stored the installation files in. - -ifeval::["{context}" == "installing-azure-user-infra"] -:!cp: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:!cp: -:!ash: -endif::[] diff --git a/modules/installation-user-infra-exporting-common-variables.adoc b/modules/installation-user-infra-exporting-common-variables.adoc deleted file mode 100644 index f2fd861626b6..000000000000 --- a/modules/installation-user-infra-exporting-common-variables.adoc +++ /dev/null @@ -1,114 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_gcp/installing-gcp-user-infra.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp.adoc - -ifeval::["{context}" == "installing-gcp-user-infra"] -:cp-first: Google Cloud Platform -:cp: GCP -:cp-template: Deployment Manager -endif::[] - -ifeval::["{context}" == "installing-restricted-networks-gcp"] -:cp-first: Google Cloud Platform -:cp: GCP -:cp-template: Deployment Manager -endif::[] - -ifeval::["{context}" == "installing-restricted-networks-gcp-vpc"] -:cp-first: Google Cloud Platform -:cp: GCP -:cp-template: Deployment Manager -endif::[] - -ifeval::["{context}" == "installing-gcp-user-infra-vpc"] -:cp-first: Google Cloud Platform -:cp: GCP -:cp-template: Deployment Manager -:shared-vpc: -endif::[] - -:_content-type: PROCEDURE -[id="installation-user-infra-exporting-common-variables_{context}"] -= Exporting common variables for {cp-template} templates - -You must export a common set of variables that are used with the provided -{cp-template} templates used to assist in completing a user-provided -infrastructure install on {cp-first} ({cp}). - -[NOTE] -==== -Specific {cp-template} templates can also require additional exported -variables, which are detailed in their related procedures. -==== - -.Prerequisites - -* Obtain the {product-title} installation program and the pull secret for your cluster. -* Generate the Ignition config files for your cluster. -* Install the `jq` package. - -.Procedure - -. Export the following common variables to be used by the provided {cp-template} -templates: -+ -ifndef::shared-vpc[] -[source,terminal] ----- -$ export BASE_DOMAIN='<base_domain>' -$ export BASE_DOMAIN_ZONE_NAME='<base_domain_zone_name>' -$ export NETWORK_CIDR='10.0.0.0/16' -$ export MASTER_SUBNET_CIDR='10.0.0.0/17' -$ export WORKER_SUBNET_CIDR='10.0.128.0/17' - -$ export KUBECONFIG=<installation_directory>/auth/kubeconfig <1> -$ export CLUSTER_NAME=`jq -r .clusterName <installation_directory>/metadata.json` -$ export INFRA_ID=`jq -r .infraID <installation_directory>/metadata.json` -$ export PROJECT_NAME=`jq -r .gcp.projectID <installation_directory>/metadata.json` -$ export REGION=`jq -r .gcp.region <installation_directory>/metadata.json` ----- -<1> For `<installation_directory>`, specify the path to the directory that you stored the installation files in. -endif::shared-vpc[] -//you need some of these variables for the VPC, and you do that - -ifdef::shared-vpc[] -[source,terminal] ----- -$ export BASE_DOMAIN='<base_domain>' <1> -$ export BASE_DOMAIN_ZONE_NAME='<base_domain_zone_name>' <1> -$ export NETWORK_CIDR='10.0.0.0/16' - -$ export KUBECONFIG=<installation_directory>/auth/kubeconfig <2> -$ export CLUSTER_NAME=`jq -r .clusterName <installation_directory>/metadata.json` -$ export INFRA_ID=`jq -r .infraID <installation_directory>/metadata.json` -$ export PROJECT_NAME=`jq -r .gcp.projectID <installation_directory>/metadata.json` ----- -<1> Supply the values for the host project. -<2> For `<installation_directory>`, specify the path to the directory that you stored the installation files in. -endif::shared-vpc[] - -ifeval::["{context}" == "installing-gcp-user-infra"] -:!cp-first: -:!cp: -:!cp-template: -endif::[] - -ifeval::["{context}" == "installing-restricted-networks-gcp"] -:!cp-first: -:!cp: -:!cp-template: -endif::[] - -ifeval::["{context}" == "installing-restricted-networks-gcp-vpc"] -:!cp-first: Google Cloud Platform -:!cp: GCP -:!cp-template: Deployment Manager -endif::[] - -ifeval::["{context}" == "installing-gcp-user-infra-vpc"] -:!cp-first: Google Cloud Platform -:!cp: GCP -:!cp-template: Deployment Manager -:!shared-vpc: -endif::[] diff --git a/modules/installation-user-infra-generate-k8s-manifest-ignition.adoc b/modules/installation-user-infra-generate-k8s-manifest-ignition.adoc deleted file mode 100644 index d756d3fa0ee8..000000000000 --- a/modules/installation-user-infra-generate-k8s-manifest-ignition.adoc +++ /dev/null @@ -1,566 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-user-infra.adoc -// * installing/installing_azure/installing-azure-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc -// * installing/installing_bare_metal/installing-bare-metal.adoc -// * installing/installing_gcp/installing-gcp-user-infra.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp.adoc -// * installing/installing_aws/installing-restricted-networks-aws.adoc -// * installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc -// * installing/installing_platform_agnostic/installing-platform-agnostic.adoc -// * installing/installing_vsphere/installing-restricted-networks-vsphere.adoc -// * installing/installing_vsphere/installing-vsphere.adoc -// * installing/installing_ibm_z/installing-ibm-z.adoc -// * installing/installing_ibm_z/installing-ibm-z-kvm.adoc -// * installing/installing_ibm_z/installing-restricted-networks-ibm-z.adoc -// * installing/installing_ibm_z/installing-restricted-networks-ibm-z-kvm.adoc -// * installing/installing_ibm_power/installing-ibm-power.adoc -// * installing/installing_ibm_power/installing-restricted-networks-ibm-power.adoc -// * installing/installing_openstack/installing-openstack-user.adoc - - -ifeval::["{context}" == "installing-aws-user-infra"] -:aws: -:three-node-cluster: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-aws"] -:aws: -:restricted: -endif::[] -ifeval::["{context}" == "installing-azure-user-infra"] -:azure: -:azure-user-infra: -:three-node-cluster: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:ash: -:azure-user-infra: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-vsphere"] -:vsphere: -:restricted: -endif::[] -ifeval::["{context}" == "installing-bare-metal"] -:baremetal: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-bare-metal"] -:baremetal-restricted: -endif::[] -ifeval::["{context}" == "installing-gcp-user-infra"] -:gcp: -:three-node-cluster: -endif::[] -ifeval::["{context}" == "installing-gcp-user-infra-vpc"] -:gcp: -:user-infra-vpc: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-gcp"] -:gcp: -:restricted: -endif::[] -ifeval::["{context}" == "installing-openstack-user"] -:osp: -endif::[] -ifeval::["{context}" == "installing-openstack-user-kuryr"] -:osp: -endif::[] -ifeval::["{context}" == "installing-openstack-user-sr-iov"] -:osp: -endif::[] -ifeval::["{context}" == "installing-openstack-user-sr-iov-kuryr"] -:osp: -endif::[] -ifeval::["{context}" == "installing-vsphere"] -:vsphere: -:three-node-cluster: -endif::[] -ifeval::["{context}" == "installing-platform-agnostic"] -:baremetal: -endif::[] -ifeval::["{context}" == "installing-ibm-z"] -:ibm-z: -endif::[] -ifeval::["{context}" == "installing-ibm-z-kvm"] -:ibm-z: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z"] -:ibm-z: -:restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z-kvm"] -:ibm-z: -:restricted: -endif::[] -ifeval::["{context}" == "installing-ibm-power"] -:ibm-power: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power"] -:ibm-power: -:restricted: -endif::[] - -:_content-type: PROCEDURE -[id="installation-user-infra-generate-k8s-manifest-ignition_{context}"] -= Creating the Kubernetes manifest and Ignition config files - -Because you must modify some cluster definition files and manually start the cluster machines, you must generate the Kubernetes manifest and Ignition config files that the cluster needs to configure the machines. - -The installation configuration file transforms into the Kubernetes manifests. The manifests wrap into the Ignition configuration files, which are later used to configure the cluster machines. - -[IMPORTANT] -==== -* The Ignition config files that the {product-title} installation program generates contain certificates that expire after 24 hours, which are then renewed at that time. If the cluster is shut down before renewing the certificates and the cluster is later restarted after the 24 hours have elapsed, the cluster automatically recovers the expired certificates. The exception is that you must manually approve the pending `node-bootstrapper` certificate signing requests (CSRs) to recover kubelet certificates. See the documentation for _Recovering from expired control plane certificates_ for more information. - -* It is recommended that you use Ignition config files within 12 hours after they are generated because the 24-hour certificate rotates from 16 to 22 hours after the cluster is installed. By using the Ignition config files within 12 hours, you can avoid installation failure if the certificate update runs during installation. -==== - -ifdef::ibm-z[] -[NOTE] -==== -The installation program that generates the manifest and Ignition files is architecture specific and can be obtained from the -link:https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/latest/[client image mirror]. The Linux version of the installation program runs on s390x only. This installer program is also available as a Mac OS version. -==== -endif::ibm-z[] -ifdef::ibm-power[] -[NOTE] -==== -The installation program that generates the manifest and Ignition files is architecture specific and can be obtained from the -link:https://mirror.openshift.com/pub/openshift-v4/ppc64le/clients/ocp/latest/[client image mirror]. The Linux version of the installation program (without an architecture postfix) runs on ppc64le only. This installer program is also available as a Mac OS version. -==== -endif::ibm-power[] - -.Prerequisites - -* You obtained the {product-title} installation program. -ifdef::restricted,baremetal-restricted[] -For a restricted network installation, these files are on your mirror host. -endif::restricted,baremetal-restricted[] -* You created the `install-config.yaml` installation configuration file. - -.Procedure - -. Change to the directory that contains the {product-title} installation program and generate the Kubernetes manifests for the cluster: -+ -[source,terminal] ----- -$ ./openshift-install create manifests --dir <installation_directory> <1> ----- -+ -<1> For `<installation_directory>`, specify the installation directory that -contains the `install-config.yaml` file you created. - -ifdef::aws,azure,ash,gcp[] -. Remove the Kubernetes manifest files that define the control plane machines: -+ -[source,terminal] ----- -$ rm -f <installation_directory>/openshift/99_openshift-cluster-api_master-machines-*.yaml ----- -+ -By removing these files, you prevent the cluster from automatically generating control plane machines. -endif::aws,azure,ash,gcp[] - -ifdef::aws,ash,azure,gcp[] -. Remove the Kubernetes manifest files that define the control plane machine set: -+ -[source,terminal] ----- -$ rm -f <installation_directory>/openshift/99_openshift-machine-api_master-control-plane-machine-set.yaml ----- -endif::aws,ash,azure,gcp[] - -ifdef::gcp[] -ifndef::user-infra-vpc[] -. Optional: If you do not want the cluster to provision compute machines, remove -the Kubernetes manifest files that define the worker machines: -endif::user-infra-vpc[] -endif::gcp[] -ifdef::aws,azure,ash,user-infra-vpc[] -. Remove the Kubernetes manifest files that define the worker machines: -endif::aws,azure,ash,user-infra-vpc[] -ifdef::aws,azure,ash,gcp[] -+ -[source,terminal] ----- -$ rm -f <installation_directory>/openshift/99_openshift-cluster-api_worker-machineset-*.yaml ----- -+ -Because you create and manage the worker machines yourself, you do not need to initialize these machines. -endif::aws,azure,ash,gcp[] - -ifdef::osp,vsphere[] -. Remove the Kubernetes manifest files that define the control plane machines and compute machine sets: -+ -[source,terminal] ----- -$ rm -f openshift/99_openshift-cluster-api_master-machines-*.yaml openshift/99_openshift-cluster-api_worker-machineset-*.yaml ----- -+ -Because you create and manage these resources yourself, you do not have -to initialize them. -+ -* You can preserve the compute machine set files to create compute machines by using the machine API, but you must update references to them to match your environment. -endif::osp,vsphere[] -ifdef::baremetal,baremetal-restricted,ibm-z,ibm-power,three-node-cluster[] -+ -[WARNING] -==== -If you are installing a three-node cluster, skip the following step to allow the control plane nodes to be schedulable. -==== -+ -[IMPORTANT] -==== -When you configure control plane nodes from the default unschedulable to schedulable, additional subscriptions are required. This is because control plane nodes then become compute nodes. -==== -endif::baremetal,baremetal-restricted,ibm-z,ibm-power,three-node-cluster[] - -. Check that the `mastersSchedulable` parameter in the `<installation_directory>/manifests/cluster-scheduler-02-config.yml` Kubernetes manifest file is set to `false`. This setting prevents pods from being scheduled on the control plane machines: -+ --- -.. Open the `<installation_directory>/manifests/cluster-scheduler-02-config.yml` file. -.. Locate the `mastersSchedulable` parameter and ensure that it is set to `false`. -.. Save and exit the file. --- - -ifdef::gcp,aws,azure,ash[] -ifndef::user-infra-vpc[] -. Optional: If you do not want -link:https://github.com/openshift/cluster-ingress-operator[the Ingress Operator] -to create DNS records on your behalf, remove the `privateZone` and `publicZone` -sections from the `<installation_directory>/manifests/cluster-dns-02-config.yml` DNS configuration file: -endif::user-infra-vpc[] -ifdef::user-infra-vpc[] -. Remove the `privateZone` -sections from the `<installation_directory>/manifests/cluster-dns-02-config.yml` DNS configuration file: -endif::user-infra-vpc[] -+ -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: DNS -metadata: - creationTimestamp: null - name: cluster -spec: - baseDomain: example.openshift.com - privateZone: <1> - id: mycluster-100419-private-zone -ifndef::user-infra-vpc[] - publicZone: <1> - id: example.openshift.com -endif::user-infra-vpc[] -status: {} ----- -<1> Remove this section completely. -+ -ifndef::user-infra-vpc[] -If you do so, you must add ingress DNS records manually in a later step. -endif::user-infra-vpc[] -endif::gcp,aws,azure,ash[] - -ifdef::user-infra-vpc[] -. Configure the cloud provider for your VPC. -+ --- -.. Open the `<installation_directory>/manifests/cloud-provider-config.yaml` file. -.. Add the `network-project-id` parameter and set its value to the ID of project that hosts the shared VPC network. -.. Add the `network-name` parameter and set its value to the name of the shared VPC network that hosts the {product-title} cluster. -.. Replace the value of the `subnetwork-name` parameter with the value of the shared VPC subnet that hosts your compute machines. -+ --- -The contents of the `<installation_directory>/manifests/cloud-provider-config.yaml` resemble the following example: -+ -[source,yaml] ----- -config: |+ - [global] - project-id = example-project - regional = true - multizone = true - node-tags = opensh-ptzzx-master - node-tags = opensh-ptzzx-worker - node-instance-prefix = opensh-ptzzx - external-instance-groups-prefix = opensh-ptzzx - network-project-id = example-shared-vpc - network-name = example-network - subnetwork-name = example-worker-subnet ----- - -. If you deploy a cluster that is not on a private network, open the `<installation_directory>/manifests/cluster-ingress-default-ingresscontroller.yaml` file and replace the value of the `scope` parameter with `External`. The contents of the file resemble the following example: -+ -[source,yaml] ----- -apiVersion: operator.openshift.io/v1 -kind: IngressController -metadata: - creationTimestamp: null - name: default - namespace: openshift-ingress-operator -spec: - endpointPublishingStrategy: - loadBalancer: - scope: External - type: LoadBalancerService -status: - availableReplicas: 0 - domain: '' - selector: '' ----- - -endif::user-infra-vpc[] - -ifdef::ash[] -. Optional: If your Azure Stack Hub environment uses an internal certificate authority (CA), you must update the `.spec.trustedCA.name` field in the `<installation_directory>/manifests/cluster-proxy-01-config.yaml` file to use `user-ca-bundle`: -+ -[source,yaml] ----- -... -spec: - trustedCA: - name: user-ca-bundle -... ----- -+ -Later, you must update your bootstrap ignition to include the CA. -endif::ash[] - -ifdef::azure-user-infra[] -. When configuring Azure on user-provisioned infrastructure, you must export -some common variables defined in the manifest files to use later in the Azure -Resource Manager (ARM) templates: -.. Export the infrastructure ID by using the following command: -+ -[source,terminal] ----- -$ export INFRA_ID=<infra_id> <1> ----- -<1> The {product-title} cluster has been assigned an identifier (`INFRA_ID`) in the form of `<cluster_name>-<random_string>`. This will be used as the base name for most resources created using the provided ARM templates. This is the value of the `.status.infrastructureName` attribute from the `manifests/cluster-infrastructure-02-config.yml` file. - -.. Export the resource group by using the following command: -+ -[source,terminal] ----- -$ export RESOURCE_GROUP=<resource_group> <1> ----- -<1> All resources created in this Azure deployment exists as part of a link:https://docs.microsoft.com/en-us/azure/azure-resource-manager/management/overview#resource-groups[resource group]. The resource group name is also based on the `INFRA_ID`, in the form of `<cluster_name>-<random_string>-rg`. This is the value of the `.status.platformStatus.azure.resourceGroupName` attribute from the `manifests/cluster-infrastructure-02-config.yml` file. -endif::azure-user-infra[] - -ifdef::ash[] -. Manually create your cloud credentials. - -.. From the directory that contains the installation program, obtain details of the {product-title} release image that your `openshift-install` binary is built to use: -+ -[source,terminal] ----- -$ openshift-install version ----- -+ -.Example output -[source,terminal] ----- -release image quay.io/openshift-release-dev/ocp-release:4.y.z-x86_64 ----- - -.. Locate all `CredentialsRequest` objects in this release image that target the cloud you are deploying on: -+ -[source,terminal] ----- -$ oc adm release extract quay.io/openshift-release-dev/ocp-release:4.y.z-x86_64 --credentials-requests --cloud=azure ----- -+ -This command creates a YAML file for each `CredentialsRequest` object. -+ -.Sample `CredentialsRequest` object -[source,yaml] ----- -apiVersion: cloudcredential.openshift.io/v1 -kind: CredentialsRequest -metadata: - labels: - controller-tools.k8s.io: "1.0" - name: openshift-image-registry-azure - namespace: openshift-cloud-credential-operator -spec: - secretRef: - name: installer-cloud-credentials - namespace: openshift-image-registry - providerSpec: - apiVersion: cloudcredential.openshift.io/v1 - kind: AzureProviderSpec - roleBindings: - - role: Contributor ----- - -.. Create YAML files for secrets in the `openshift-install` manifests directory that you generated previously. The secrets must be stored using the namespace and secret name defined in the `spec.secretRef` for each `CredentialsRequest` object. The format for the secret data varies for each cloud provider. -+ -.Sample `secrets.yaml` file: -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: ${secret_name} - namespace: ${secret_namespace} -stringData: - azure_subscription_id: ${subscription_id} - azure_client_id: ${app_id} - azure_client_secret: ${client_secret} - azure_tenant_id: ${tenant_id} - azure_resource_prefix: ${cluster_name} - azure_resourcegroup: ${resource_group} - azure_region: ${azure_region} ----- - -[IMPORTANT] -==== -The release image includes `CredentialsRequest` objects for Technology Preview features that are enabled by the `TechPreviewNoUpgrade` feature set. You can identify these objects by their use of the `release.openshift.io/feature-set: TechPreviewNoUpgrade` annotation. - -* If you are not using any of these features, do not create secrets for these objects. Creating secrets for Technology Preview features that you are not using can cause the installation to fail. - -* If you are using any of these features, you must create secrets for the corresponding objects. -==== - -*** To find `CredentialsRequest` objects with the `TechPreviewNoUpgrade` annotation, run the following command: -+ -[source,terminal] ----- -$ grep "release.openshift.io/feature-set" * ----- -+ -.Example output -[source,terminal] ----- -0000_30_capi-operator_00_credentials-request.yaml: release.openshift.io/feature-set: TechPreviewNoUpgrade ----- -// Right now, only the CAPI Operator is an issue, but it might make sense to update `0000_30_capi-operator_00_credentials-request.yaml` to `<tech_preview_credentials_request>.yaml` for the future. - -.. Create a `cco-configmap.yaml` file in the manifests directory with the Cloud Credential Operator (CCO) disabled: -+ -.Sample `ConfigMap` object -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: -name: cloud-credential-operator-config -namespace: openshift-cloud-credential-operator - annotations: - release.openshift.io/create-only: "true" -data: - disabled: "true" ----- -endif::ash[] - -. To create the Ignition configuration files, run the following command from the directory that contains the installation program: -+ -[source,terminal] ----- -$ ./openshift-install create ignition-configs --dir <installation_directory> <1> ----- -<1> For `<installation_directory>`, specify the same installation directory. -+ -Ignition config files are created for the bootstrap, control plane, and compute nodes in the installation directory. The `kubeadmin-password` and `kubeconfig` files are created in the `./<installation_directory>/auth` directory: -+ ----- -. -├── auth -│ ├── kubeadmin-password -│ └── kubeconfig -├── bootstrap.ign -├── master.ign -├── metadata.json -└── worker.ign ----- - -ifdef::osp[] -. Export the metadata file's `infraID` key as an environment variable: -+ -[source,terminal] ----- -$ export INFRA_ID=$(jq -r .infraID metadata.json) ----- - -[TIP] -Extract the `infraID` key from `metadata.json` and use it as a prefix for all of the {rh-openstack} resources that you create. By doing so, you avoid name conflicts when making multiple deployments in the same project. -endif::osp[] - -ifeval::["{context}" == "installing-restricted-networks-aws"] -:!aws: -:!restricted: -endif::[] -ifeval::["{context}" == "installing-aws-user-infra"] -:!aws: -:!three-node-cluster: -endif::[] -ifeval::["{context}" == "installing-azure-user-infra"] -:!azure: -:!azure-user-infra: -:!three-node-cluster: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:!ash: -:!azure-user-infra: -endif::[] -ifeval::["{context}" == "installing-gcp-user-infra"] -:!gcp: -:!three-node-cluster: -endif::[] -ifeval::["{context}" == "installing-gcp-user-infra-vpc"] -:!gcp: -:!user-infra-vpc: -endif::[] -ifeval::["{context}" == "installing-bare-metal"] -:!baremetal: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-bare-metal"] -:!baremetal-restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-gcp"] -:!gcp: -:!restricted: -endif::[] -ifeval::["{context}" == "installing-osp-user"] -:!osp: -endif::[] -ifeval::["{context}" == "installing-openstack-user-kuryr"] -:!osp: -endif::[] -ifeval::["{context}" == "installing-openstack-user-sr-iov"] -:!osp: -endif::[] -ifeval::["{context}" == "installing-openstack-user-sr-iov-kuryr"] -:!osp: -endif::[] -ifeval::["{context}" == "installing-vsphere"] -:!vsphere: -:!three-node-cluster: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-vsphere"] -:!vsphere: -:!restricted: -endif::[] -ifeval::["{context}" == "installing-platform-agnostic"] -:!baremetal: -endif::[] -ifeval::["{context}" == "installing-ibm-z"] -:!ibm-z: -endif::[] -ifeval::["{context}" == "installing-ibm-z-kvm"] -:!ibm-z: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z"] -:!ibm-z: -:!restricted: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z-kvm"] -:!ibm-z: -:!restricted: -endif::[] -ifeval::["{context}" == "installing-ibm-power"] -:!ibm-power: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power"] -:!ibm-power: -:!restricted: -endif::[] diff --git a/modules/installation-user-infra-generate.adoc b/modules/installation-user-infra-generate.adoc deleted file mode 100644 index de6a6362cb8f..000000000000 --- a/modules/installation-user-infra-generate.adoc +++ /dev/null @@ -1,154 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-user-infra.adoc -// * installing/installing_azure/installing-azure-user-infra.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc -// * installing/installing_gcp/installing-gcp-user-infra.adoc -// * installing/installing_gcp/installing-gcp-shared-vpc.adoc -// * installing/installing_aws/installing-restricted-networks-aws.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp.adoc - -ifeval::["{context}" == "installing-restricted-networks-aws"] -:restricted: -:cp-first: Amazon Web Services -:cp: AWS -:aws: -endif::[] -ifeval::["{context}" == "installing-aws-user-infra"] -:cp-first: Amazon Web Services -:cp: AWS -:aws: -endif::[] -ifeval::["{context}" == "installing-aws-localzone"] -:cp-first: Amazon Web Services -:cp: AWS -:localzone: -endif::[] -ifeval::["{context}" == "installing-azure-user-infra"] -:cp-first: Microsoft Azure -:cp: Azure -:azure: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:cp-first: Microsoft Azure Stack Hub -:cp: Azure Stack Hub -:ash: -endif::[] -ifeval::["{context}" == "installing-gcp-user-infra"] -:cp-first: Google Cloud Platform -:cp: GCP -:gcp: -endif::[] -ifeval::["{context}" == "installing-gcp-user-infra-vpc"] -:cp-first: Google Cloud Platform -:cp: GCP -:gcp: -endif::[] -ifeval::["{context}" == "installing-gcp-shared-vpc"] -:cp-first: Google Cloud Platform -:cp: GCP -:gcp-shared: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-gcp"] -:cp-first: Google Cloud Platform -:cp: GCP -:gcp: -endif::[] -ifeval::["{context}" == "installing-openstack-user"] -:cp-first: Red Hat OpenStack Platform -:cp: RHOSP -endif::[] -ifeval::["{context}" == "installing-openstack-user-kuryr"] -:cp-first: Red Hat OpenStack Platform -:cp: RHOSP -endif::[] -ifeval::["{context}" == "installing-openstack-user-sr-iov"] -:cp-first: Red Hat OpenStack Platform -:cp: RHOSP -endif::[] -ifeval::["{context}" == "installing-openstack-user-sr-iov-kuryr"] -:cp-first: Red Hat OpenStack Platform -:cp: RHOSP -endif::[] - -[id="installation-user-infra-generate_{context}"] -= Creating the installation files for {cp} - -ifdef::azure[] -To install {product-title} on {cp-first} using user-provisioned infrastructure, you must generate the files that the installation program needs to deploy your cluster and modify them so that the cluster creates only the machines that it will use. You generate and customize the `install-config.yaml` file, Kubernetes manifests, and Ignition config files. You also have the option to first set up a separate `var` partition during the preparation phases of installation. -endif::azure[] -ifdef::ash[] -To install {product-title} on {cp-first} using user-provisioned infrastructure, you must generate the files that the installation program needs to deploy your cluster and modify them so that the cluster creates only the machines that it will use. You manually create the `install-config.yaml` file, and then generate and customize the Kubernetes manifests and Ignition config files. You also have the option to first set up a separate `var` partition during the preparation phases of installation. -endif::ash[] -ifdef::aws,gcp[] -To install {product-title} on {cp-first} ({cp}) using user-provisioned infrastructure, you must generate the files that the installation program needs to deploy your cluster and modify them so that the cluster creates only the machines that it will use. You generate and customize the `install-config.yaml` file, Kubernetes manifests, and Ignition config files. You also have the option to first set up a separate `var` partition during the preparation phases of installation. -endif::aws,gcp[] -ifdef::localzone[] -To install {product-title} on {cp-first} ({cp}) and use AWS Local Zones, you must generate the files that the installation program needs to deploy your cluster and modify them so that the cluster creates only the machines that it will use. You generate and customize the `install-config.yaml` file and configure add Local Zone subnets to it. -endif::localzone[] -ifdef::gcp-shared[] -To install {product-title} on {cp-first} ({cp}) into a shared VPC, you must generate the `install-config.yaml` file and modify it so that the cluster uses the correct VPC networks, DNS zones, and project names. -endif::gcp-shared[] - -ifeval::["{context}" == "installing-restricted-networks-aws"] -:!restricted: -:!cp-first: -:!cp: -:!aws: -endif::[] -ifeval::["{context}" == "installing-aws-user-infra"] -:!cp-first: -:!cp: -:!aws: -endif::[] -ifeval::["{context}" == "installing-aws-localzone"] -:!cp-first: Amazon Web Services -:!cp: AWS -:!localzone: -endif::[] -ifeval::["{context}" == "installing-azure-user-infra"] -:!cp-first: -:!cp: -:!azure: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] -:!cp-first: Microsoft Azure Stack Hub -:!cp: Azure Stack Hub -:!ash: -endif::[] -ifeval::["{context}" == "installing-gcp-user-infra"] -:!cp-first: -:!cp: -:!gcp: -endif::[] -ifeval::["{context}" == "installing-gcp-user-infra-vpc"] -:!cp-first: Google Cloud Platform -:!cp: GCP -:!gcp: -endif::[] -ifeval::["{context}" == "installing-gcp-shared-vpc"] -:!cp-first: Google Cloud Platform -:!cp: GCP -:!gcp-shared: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-gcp"] -:!cp-first: -:!cp: -:!gcp: -endif::[] -ifeval::["{context}" == "installing-openstack-user"] -:!cp-first: Red Hat OpenStack Platform -:!cp: RHOSP -endif::[] -ifeval::["{context}" == "installing-openstack-user-kuryr"] -:!cp-first: Red Hat OpenStack Platform -:!cp: RHOSP -endif::[] -ifeval::["{context}" == "installing-openstack-user-sr-iov"] -:!cp-first: Red Hat OpenStack Platform -:!cp: RHOSP -endif::[] -ifeval::["{context}" == "installing-openstack-user-sr-iov-kuryr"] -:!cp-first: Red Hat OpenStack Platform -:!cp: RHOSP -endif::[] diff --git a/modules/installation-user-infra-machines-advanced-console-configuration.adoc b/modules/installation-user-infra-machines-advanced-console-configuration.adoc deleted file mode 100644 index 318bec9e939f..000000000000 --- a/modules/installation-user-infra-machines-advanced-console-configuration.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// Module included in the following assemblies -// -// * installing/installing_bare_metal/installing-bare-metal.adoc -// * installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc -// * installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc - -:_content-type: CONCEPT -[id="installation-user-infra-machines-advanced-console-configuration_{context}"] -= Default console configuration - -{op-system-first} nodes installed from an {product-title} {product-version} boot image use a default console that is meant to accomodate most virtualized and bare metal setups. Different cloud and virtualization platforms may use different default settings depending on the chosen architecture. Bare metal installations use the kernel default settings which typically means the graphical console is the primary console and the serial console is disabled. - -The default consoles may not match your specific hardware configuration or you might have specific needs that require you to adjust the default console. For example: - -* You want to access the emergency shell on the console for debugging purposes. -* Your cloud platform does not provide interactive access to the graphical console, but provides a serial console. -* You want to enable multiple consoles. - -Console configuration is inherited from the boot image. This means that new nodes in existing clusters are unaffected by changes to the default console. - -You can configure the console for bare metal installations in the following ways: - -* Using `coreos-installer` manually on the command line. -* Using the `coreos-installer iso customize` or `coreos-installer pxe customize` subcommands with the `--dest-console` option to create a custom image that automates the process. - -[NOTE] -==== -For advanced customization, perform console configuration using the `coreos-installer iso` or `coreos-installer pxe` subcommands, and not kernel arguments. -==== diff --git a/modules/installation-user-infra-machines-advanced-customizing-iso-or-pxe.adoc b/modules/installation-user-infra-machines-advanced-customizing-iso-or-pxe.adoc deleted file mode 100644 index 01300cae7fae..000000000000 --- a/modules/installation-user-infra-machines-advanced-customizing-iso-or-pxe.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Module included in the following assemblies -// -// * installing/installing_bare_metal/installing-bare-metal.adoc -// * installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc -// * installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc - -[id="installation-user-infra-machines-advanced-customizing-iso-or-pxe_{context}"] -= Customizing a live {op-system} ISO or PXE install -You can use the live ISO image or PXE environment to install {op-system} by injecting an Ignition config file directly into the image. This creates a customized image that you can use to provision your system. - -For an ISO image, the mechanism to do this is the `coreos-installer iso customize` subcommand, which modifies the `.iso` file with your configuration. Similarly, the mechanism for a PXE environment is the `coreos-installer pxe customize` subcommand, which creates a new `initramfs` file that includes your customizations. - -The `customize` subcommand is a general purpose tool that can embed other types of customizations as well. The following tasks are examples of some of the more common customizations: - -* Inject custom CA certificates for when corporate security policy requires their use. -* Configure network settings without the need for kernel arguments. -* Embed arbitrary pre-install and post-install scripts or binaries. diff --git a/modules/installation-user-infra-machines-advanced-customizing-live-ca-certs.adoc b/modules/installation-user-infra-machines-advanced-customizing-live-ca-certs.adoc deleted file mode 100644 index 1a9244f7e3df..000000000000 --- a/modules/installation-user-infra-machines-advanced-customizing-live-ca-certs.adoc +++ /dev/null @@ -1,46 +0,0 @@ -// Module included in the following assemblies -// -// * installing/installing_bare_metal/installing-bare-metal.adoc -// * installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc -// * installing_bare_metal/installing-bare-metal-network-customizations.adoc - -[id="installation-user-infra-machines-advanced-customizing-live-{boot}-ca-certs_{context}"] -= Modifying a live install {boot-media} to use a custom certificate authority - -You can provide certificate authority (CA) certificates to Ignition with the `--ignition-ca` flag of the `customize` subcommand. You can use the CA certificates during both the installation boot and when provisioning the installed system. - -.Procedure - -. Download the `coreos-installer` binary from the link:https://mirror.openshift.com/pub/openshift-v4/clients/coreos-installer/latest/[`coreos-installer` image mirror] page. - -ifeval::["{boot-media}" == "ISO image"] -. Retrieve the {op-system} ISO image from the link:https://mirror.openshift.com/pub/openshift-v4/dependencies/rhcos/latest/[{op-system} image mirror] page and run the following command to customize the ISO image for use with a custom CA: -+ -[source,terminal] ----- -$ coreos-installer iso customize rhcos-<version>-live.x86_64.iso --ignition-ca cert.pem ----- -endif::[] -ifeval::["{boot-media}" == "PXE environment"] -. Retrieve the {op-system} `kernel`, `initramfs` and `rootfs` files from the link:https://mirror.openshift.com/pub/openshift-v4/dependencies/rhcos/latest/[{op-system} image mirror] page and run the following command to create a new customized `initramfs` file for use with a custom CA: -+ -[source,terminal] ----- -$ coreos-installer pxe customize rhcos-<version>-live-initramfs.x86_64.img \ - --ignition-ca cert.pem \ - -o rhcos-<version>-custom-initramfs.x86_64.img ----- -endif::[] -+ -[IMPORTANT] -==== -The `coreos.inst.ignition_url` kernel parameter does not work with the `--ignition-ca` flag. -You must use the `--dest-ignition` flag to create a customized image for each cluster. -==== -+ -[NOTE] -==== -Custom CA certificates affect how Ignition fetches remote resources but they do not affect the certificates installed onto the system. -==== -+ -Your CA certificate is applied and affects every subsequent boot of the {boot-media}. diff --git a/modules/installation-user-infra-machines-advanced-customizing-live-network-config.adoc b/modules/installation-user-infra-machines-advanced-customizing-live-network-config.adoc deleted file mode 100644 index 39b3d5675bd7..000000000000 --- a/modules/installation-user-infra-machines-advanced-customizing-live-network-config.adoc +++ /dev/null @@ -1,101 +0,0 @@ -// Module included in the following assemblies -// -// * installing/installing_bare_metal/installing-bare-metal.adoc -// * installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc -// * installing_bare_metal/installing-bare-metal-network-customizations.adoc - -[id="installation-user-infra-machines-advanced-customizing-live-{boot}_network_keyfile_{context}"] -= Modifying a live install {boot-media} with customized network settings -You can embed a NetworkManager keyfile into the live {boot-media} and pass it through to the installed system with the `--network-keyfile` flag of the `customize` subcommand. - -.Procedure - -. Download the `coreos-installer` binary from the link:https://mirror.openshift.com/pub/openshift-v4/clients/coreos-installer/latest/[`coreos-installer` image mirror] page. - -. Create a connection profile for a bonded interface. For example, create the `bond0.nmconnection` file in your local directory with the following content: -+ -[source,ini] ----- -[connection] -id=bond0 -type=bond -interface-name=bond0 -multi-connect=1 -permissions= - -[ethernet] -mac-address-blacklist= - -[bond] -miimon=100 -mode=active-backup - -[ipv4] -method=auto - -[ipv6] -method=auto - -[proxy] ----- -+ -. Create a connection profile for a secondary interface to add to the bond. For example, create the `bond0-proxy-em1.nmconnection` file in your local directory with the following content: -+ -[source,ini] ----- -[connection] -id=em1 -type=ethernet -interface-name=em1 -master=bond0 -multi-connect=1 -permissions= -slave-type=bond - -[ethernet] -mac-address-blacklist= ----- -+ -. Create a connection profile for a secondary interface to add to the bond. For example, create the `bond0-proxy-em2.nmconnection` file in your local directory with the following content: -+ -[source,ini] ----- -[connection] -id=em2 -type=ethernet -interface-name=em2 -master=bond0 -multi-connect=1 -permissions= -slave-type=bond - -[ethernet] -mac-address-blacklist= ----- -+ -ifeval::["{boot-media}" == "ISO image"] -. Retrieve the {op-system} ISO image from the link:https://mirror.openshift.com/pub/openshift-v4/dependencies/rhcos/latest/[{op-system} image mirror] page and run the following command to customize the ISO image with your configured networking: -+ -[source,terminal] ----- -$ coreos-installer iso customize rhcos-<version>-live.x86_64.iso \ - --network-keyfile bond0.nmconnection \ - --network-keyfile bond0-proxy-em1.nmconnection \ - --network-keyfile bond0-proxy-em2.nmconnection ----- -endif::[] - -ifeval::["{boot-media}" == "PXE environment"] -. Retrieve the {op-system} `kernel`, `initramfs` and `rootfs` files from the link:https://mirror.openshift.com/pub/openshift-v4/dependencies/rhcos/latest/[{op-system} image mirror] page and run the following command to create a new customized `initramfs` file that contains your configured networking: -+ -[source,terminal] ----- -$ coreos-installer pxe customize rhcos-<version>-live-initramfs.x86_64.img \ - --network-keyfile bond0.nmconnection \ - --network-keyfile bond0-proxy-em1.nmconnection \ - --network-keyfile bond0-proxy-em2.nmconnection \ - -o rhcos-<version>-custom-initramfs.x86_64.img ----- -endif::[] -+ -Network settings are applied to the live system and are carried over to the destination system. diff --git a/modules/installation-user-infra-machines-advanced-customizing-live-serial-console.adoc b/modules/installation-user-infra-machines-advanced-customizing-live-serial-console.adoc deleted file mode 100644 index c0a2e3517921..000000000000 --- a/modules/installation-user-infra-machines-advanced-customizing-live-serial-console.adoc +++ /dev/null @@ -1,71 +0,0 @@ -// Module included in the following assemblies -// -// * installing/installing_bare_metal/installing-bare-metal.adoc -// * installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc -// * installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc - -:_content-type: PROCEDURE -[id="installation-user-infra-machines-advanced-customizing-live-{boot}-serial-console_{context}"] -= Modifying a live install {boot-media} to enable the serial console - -On clusters installed with {product-title} 4.12 and above, the serial console is disabled by default and all output is written to the graphical console. You can enable the serial console with the following procedure. - -.Procedure - -. Download the `coreos-installer` binary from the link:https://mirror.openshift.com/pub/openshift-v4/clients/coreos-installer/latest/[`coreos-installer` image mirror] page. - -ifeval::["{boot-media}" == "ISO image"] -. Retrieve the {op-system} ISO image from the link:https://mirror.openshift.com/pub/openshift-v4/dependencies/rhcos/latest/[{op-system} image mirror] page and run the following command to customize the ISO image to enable the serial console to receive output: -+ -[source,terminal] ----- -$ coreos-installer iso customize rhcos-<version>-live.x86_64.iso \ - --dest-ignition <path> \//<1> - --dest-console tty0 \//<2> - --dest-console ttyS0,<options> \//<3> - --dest-device /dev/sda <4> ----- -+ -<1> The location of the Ignition config to install. -<2> The desired secondary console. In this case, the graphical console. Omitting this option will disable the graphical console. -<3> The desired primary console. In this case, the serial console. The `options` field defines the baud rate and other settings. A common value for this field is `115200n8`. If no options are provided, the default kernel value of `9600n8` is used. For more information on the format of this option, see the link:https://www.kernel.org/doc/html/latest/admin-guide/serial-console.html[Linux kernel serial console] documentation. -<4> The specified disk to install to. In this case, `/dev/sda`. If you omit this option, the {boot-media} automatically runs the installation program which will fail unless you also specify the `coreos.inst.install_dev` kernel argument. -+ -[NOTE] -==== -The `--dest-console` option affects the installed system and not the live ISO system. To modify the console for a live ISO system, use the `--live-karg-append` option and specify the console with `console=`. -==== -+ -Your customizations are applied and affect every subsequent boot of the {boot-media}. - -. Optional: To remove the {boot-media} customizations and return the image to its original state, run the following command: -+ -[source,terminal] ----- -$ coreos-installer iso reset rhcos-<version>-live.x86_64.iso ----- -+ -You can now recustomize the live {boot-media} or use it in its original state. - -endif::[] - -ifeval::["{boot-media}" == "PXE environment"] -. Retrieve the {op-system} `kernel`, `initramfs` and `rootfs` files from the link:https://mirror.openshift.com/pub/openshift-v4/dependencies/rhcos/latest/[{op-system} image mirror] page and the Ignition config file, and then run the following command to create a new customized `initramfs` file that enables the serial console to receive output: -+ -[source,terminal] ----- -$ coreos-installer pxe customize rhcos-<version>-live-initramfs.x86_64.img \ - --dest-ignition <path> \//<1> - --dest-console tty0 \//<2> - --dest-console ttyS0,<options> \//<3> - --dest-device /dev/sda \//<4> - -o rhcos-<version>-custom-initramfs.x86_64.img ----- -+ -<1> The location of the Ignition config to install. -<2> The desired secondary console. In this case, the graphical console. Omitting this option will disable the graphical console. -<3> The desired primary console. In this case, the serial console. The `options` field defines the baud rate and other settings. A common value for this field is `115200n8`. If no options are provided, the default kernel value of `9600n8` is used. For more information on the format of this option, see the link:https://www.kernel.org/doc/html/latest/admin-guide/serial-console.html[Linux kernel serial console] documentation. -<4> The specified disk to install to. In this case, `/dev/sda`. If you omit this option, the {boot-media} automatically runs the installer which will fail unless you also specify the `coreos.inst.install_dev` kernel argument. -+ -Your customizations are applied and affect every subsequent boot of the {boot-media}. -endif::[] diff --git a/modules/installation-user-infra-machines-advanced-customizing-live.adoc b/modules/installation-user-infra-machines-advanced-customizing-live.adoc deleted file mode 100644 index 4e328052194f..000000000000 --- a/modules/installation-user-infra-machines-advanced-customizing-live.adoc +++ /dev/null @@ -1,60 +0,0 @@ -// Module included in the following assemblies -// -// * installing/installing_bare_metal/installing-bare-metal.adoc -// * installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc -// * installing_bare_metal/installing-bare-metal-network-customizations.adoc - -[id="installation-user-infra-machines-advanced-customizing-live-{boot}_{context}"] -= Customizing a live {op-system} {boot-media} -You can customize a live {op-system} {boot-media} directly with the -ifeval::["{boot-media}" == "ISO image"] -`coreos-installer iso customize` -endif::[] -ifeval::["{boot-media}" == "PXE environment"] -`coreos-installer pxe customize` -endif::[] -subcommand. When you boot the {boot-media}, the customizations are applied automatically. - -You can use this feature to configure the {boot-media} to automatically install {op-system}. - -.Procedure - -. Download the `coreos-installer` binary from the link:https://mirror.openshift.com/pub/openshift-v4/clients/coreos-installer/latest/[`coreos-installer` image mirror] page. - -ifeval::["{boot-media}" == "ISO image"] -. Retrieve the {op-system} ISO image from the link:https://mirror.openshift.com/pub/openshift-v4/dependencies/rhcos/latest/[{op-system} image mirror] page and the Ignition config file, and then run the following command to inject the Ignition config directly into the ISO image: -+ -[source,terminal] ----- -$ coreos-installer iso customize rhcos-<version>-live.x86_64.iso \ - --dest-ignition bootstrap.ign \ <1> - --dest-device /dev/sda <2> ----- -endif::[] - -ifeval::["{boot-media}" == "PXE environment"] -. Retrieve the {op-system} `kernel`, `initramfs` and `rootfs` files from the link:https://mirror.openshift.com/pub/openshift-v4/dependencies/rhcos/latest/[{op-system} image mirror] page and the Ignition config file, and then run the following command to create a new `initramfs` file that contains the customizations from your Ignition config: -+ -[source,terminal] ----- -$ coreos-installer pxe customize rhcos-<version>-live-initramfs.x86_64.img \ - --dest-ignition bootstrap.ign \ <1> - --dest-device /dev/sda \ <2> - -o rhcos-<version>-custom-initramfs.x86_64.img ----- -endif::[] -<1> The Ignition config file that is generated from `openshift-installer`. -<2> When you specify this option, the {boot-media} automatically runs an install. Otherwise, the image remains configured for installing, but does not do so automatically unless you specify the `coreos.inst.install_dev` kernel argument. -+ -Your customizations are applied and affect every subsequent boot of the {boot-media}. - -ifeval::["{boot-media}" == "ISO image"] -. To remove the ISO image customizations and return the image to its pristine state, run: -+ -[source,terminal] ----- -$ coreos-installer iso reset rhcos-<version>-live.x86_64.iso ----- -+ -You can now re-customize the live ISO image or use it in its pristine state. -endif::[] diff --git a/modules/installation-user-infra-machines-advanced-enabling-serial-console.adoc b/modules/installation-user-infra-machines-advanced-enabling-serial-console.adoc deleted file mode 100644 index 074b3fef2a49..000000000000 --- a/modules/installation-user-infra-machines-advanced-enabling-serial-console.adoc +++ /dev/null @@ -1,38 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_bare_metal/installing-bare-metal.adoc -// * installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc -// * installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc - -:_content-type: PROCEDURE -[id="installation-user-infra-machines-advanced-enabling-serial-console_{context}"] -= Enabling the serial console for PXE and ISO installations - -By default, the {op-system-first} serial console is disabled and all output is written to the graphical console. You can enable the serial console for an ISO installatiand reconfigure the bootloader so that output is sent to both the serial console and the graphical console. - -.Procedure - -. Boot the ISO installer. - -. Run the `coreos-installer` command to install the system, adding the `--console` option once to specify the graphical console, and a second time to specify the serial console: -+ -[source,terminal] ----- -$ coreos-installer install \ - --console=tty0 \//<1> - --console=ttyS0,<options> \//<2> - --ignition-url=http://host/worker.ign /dev/sda ----- -+ -<1> The desired secondary console. In this case, the graphical console. Omitting this option will disable the graphical console. -<2> The desired primary console. In this case the serial console. The `options` field defines the baud rate and other settings. A common value for this field is `11520n8`. If no options are provided, the default kernel value of `9600n8` is used. For more information on the format of this option, see link:https://www.kernel.org/doc/html/latest/admin-guide/serial-console.html[Linux kernel serial console] documentation. -+ -. Reboot into the installed system. -+ -[NOTE] -==== -A similar outcome can be obtained by using the `coreos-installer install --append-karg` option, and specifying the console with `console=`. However, this will only set the console for the kernel and not the bootloader. -==== - -To configure a PXE installation, make sure the `coreos.inst.install_dev` kernel command line option is omitted, and use the shell prompt to run `coreos-installer` manually using the above ISO installation procedure. - diff --git a/modules/installation-user-infra-machines-advanced.adoc b/modules/installation-user-infra-machines-advanced.adoc deleted file mode 100644 index e6218590621d..000000000000 --- a/modules/installation-user-infra-machines-advanced.adoc +++ /dev/null @@ -1,291 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_bare_metal/installing-bare-metal.adoc -// * installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc -// * installing_bare_metal/installing-bare-metal-network-customizations.adoc - -:_content-type: PROCEDURE -[id="installation-user-infra-machines-advanced_{context}"] -= Advanced {op-system} installation configuration - -A key benefit for manually provisioning the {op-system-first} -nodes for {product-title} is to be able to do configuration that is not -available through default {product-title} installation methods. -This section describes some of the configurations that you can do using -techniques that include: - -* Passing kernel arguments to the live installer -* Running `coreos-installer` manually from the live system -* Customizing a live ISO or PXE boot image - -The advanced configuration topics for manual {op-system-first} -installations detailed in this section relate to disk partitioning, networking, and using Ignition configs in different ways. - -[id="installation-user-infra-machines-advanced_network_{context}"] -== Using advanced networking options for PXE and ISO installations -Networking for {product-title} nodes uses DHCP by default to gather all -necessary configuration settings. To set up static IP addresses or configure special settings, such as bonding, you can do one of the following: - -* Pass special kernel parameters when you boot the live installer. - -* Use a machine config to copy networking files to the installed system. - -* Configure networking from a live installer shell prompt, then copy those settings to the installed system so that they take effect when the installed system first boots. - -To configure a PXE or iPXE installation, use one of the following options: - -* See the "Advanced RHCOS installation reference" tables. -* Use a machine config to copy networking files to the installed system. - -To configure an ISO installation, use the following procedure. - -.Procedure - -. Boot the ISO installer. -. From the live system shell prompt, configure networking for the live -system using available RHEL tools, such as `nmcli` or `nmtui`. -. Run the `coreos-installer` command to install the system, adding the `--copy-network` option to copy networking configuration. For example: -+ -[source,terminal] ----- -$ sudo coreos-installer install --copy-network \ - --ignition-url=http://host/worker.ign /dev/sda ----- -+ -[IMPORTANT] -==== -The `--copy-network` option only copies networking configuration found under `/etc/NetworkManager/system-connections`. In particular, it does not copy the system hostname. -==== - -. Reboot into the installed system. - -[role="_additional-resources"] -.Additional resources - -* See link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html-single/configuring_and_managing_networking/index#getting-started-with-nmcli_configuring-and-managing-networking[Getting started with nmcli] and link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html-single/configuring_and_managing_networking/index#getting-started-with-nmtui_configuring-and-managing-networking[Getting started with nmtui] in the {op-system-base} 8 documentation for more information about the `nmcli` and `nmtui` tools. - -[id="installation-user-infra-machines-advanced_disk_{context}"] -== Disk partitioning - -// This content is not modularized, so any updates to this "Disk partitioning" section should be checked against the module created for vSphere UPI parity in the module file named `installation-disk-partitioning.adoc` for consistency until such time as this large assembly can be modularized. - -The disk partitions are created on {product-title} cluster nodes during the {op-system-first} installation. Each {op-system} node of a particular architecture uses the same partition layout, unless the default partitioning configuration is overridden. During the {op-system} installation, the size of the root file system is increased to use the remaining available space on the target device. - -There are two cases where you might want to override the default partitioning when installing {op-system} on an {product-title} cluster node: - -* Creating separate partitions: For greenfield installations on an empty -disk, you might want to add separate storage to a partition. This is -officially supported for mounting `/var` or a subdirectory of `/var`, such as `/var/lib/etcd`, on a separate partition, but not both. -+ -[IMPORTANT] -==== -For disk sizes larger than 100GB, and especially disk sizes larger than 1TB, create a separate `/var` partition. See "Creating a separate `/var` partition" and this link:https://access.redhat.com/solutions/5587281[Red Hat Knowledgebase article] for more information. -==== -+ -[IMPORTANT] -==== -Kubernetes supports only two file system partitions. If you add more than one partition to the original configuration, Kubernetes cannot monitor all of them. -==== - -* Retaining existing partitions: For a brownfield installation where you are reinstalling {product-title} on an existing node and want to retain data partitions installed from your previous operating system, there are both boot arguments and options to `coreos-installer` that allow you to retain existing data partitions. - -[WARNING] -==== -The use of custom partitions could result in those partitions not being monitored by {product-title} or alerted on. If you are overriding the default partitioning, see link:https://access.redhat.com/articles/4766521[Understanding OpenShift File System Monitoring (eviction conditions)] for more information about how {product-title} monitors your host file systems. -==== - -[id="installation-user-infra-machines-advanced_vardisk_{context}"] -=== Creating a separate `/var` partition -In general, you should use the default disk partitioning that is created during the {op-system} installation. However, there are cases where you might want to create a separate partition for a directory that you expect to grow. - -{product-title} supports the addition of a single partition to attach -storage to either the `/var` directory or a subdirectory of `/var`. -For example: - -* `/var/lib/containers`: Holds container-related content that can grow -as more images and containers are added to a system. -* `/var/lib/etcd`: Holds data that you might want to keep separate for purposes such as performance optimization of etcd storage. -* `/var`: Holds data that you might want to keep separate for purposes such as auditing. -+ -[IMPORTANT] -==== -For disk sizes larger than 100GB, and especially larger than 1TB, create a separate `/var` partition. -==== - -Storing the contents of a `/var` directory separately makes it easier to grow storage for those areas as needed and reinstall {product-title} at a later date and keep that data intact. With this method, you will not have to pull all your containers again, nor will you have to copy massive log files when you update systems. - -The use of a separate partition for the `/var` directory or a subdirectory of `/var` also prevents data growth in the partitioned directory from filling up the root file system. - -The following procedure sets up a separate `/var` partition by adding a machine config manifest that is wrapped into the Ignition config file for a node type during the preparation phase of an installation. - -.Procedure - -. On your installation host, change to the directory that contains the {product-title} installation program and generate the Kubernetes manifests for the cluster: -+ -[source,terminal] ----- -$ openshift-install create manifests --dir <installation_directory> ----- - -. Create a Butane config that configures the additional partition. For example, name the file `$HOME/clusterconfig/98-var-partition.bu`, change the disk device name to the name of the storage device on the `worker` systems, and set the storage size as appropriate. This example places the `/var` directory on a separate partition: -+ -[source,yaml] ----- -variant: openshift -version: 4.13.0 -metadata: - labels: - machineconfiguration.openshift.io/role: worker - name: 98-var-partition -storage: - disks: - - device: /dev/<device_name> <1> - partitions: - - label: var - start_mib: <partition_start_offset> <2> - size_mib: <partition_size> <3> - filesystems: - - device: /dev/disk/by-partlabel/var - path: /var - format: xfs - mount_options: [defaults, prjquota] <4> - with_mount_unit: true ----- -+ -<1> The storage device name of the disk that you want to partition. -<2> When adding a data partition to the boot disk, a minimum offset value of 25000 mebibytes is recommended. The root file system is automatically resized to fill all available space up to the specified offset. If no offset value is specified, or if the specified value is smaller than the recommended minimum, the resulting root file system will be too small, and future reinstalls of {op-system} might overwrite the beginning of the data partition. -<3> The size of the data partition in mebibytes. -<4> The `prjquota` mount option must be enabled for filesystems used for container storage. -+ -[NOTE] -==== -When creating a separate `/var` partition, you cannot use different instance types for compute nodes, if the different instance types do not have the same device name. -==== - -. Create a manifest from the Butane config and save it to the `clusterconfig/openshift` directory. For example, run the following command: -+ -[source,terminal] ----- -$ butane $HOME/clusterconfig/98-var-partition.bu -o $HOME/clusterconfig/openshift/98-var-partition.yaml ----- - -. Create the Ignition config files: -+ -[source,terminal] ----- -$ openshift-install create ignition-configs --dir <installation_directory> <1> ----- -<1> For `<installation_directory>`, specify the same installation directory. -+ -Ignition config files are created for the bootstrap, control plane, and compute nodes in the installation directory: -+ ----- -. -├── auth -│ ├── kubeadmin-password -│ └── kubeconfig -├── bootstrap.ign -├── master.ign -├── metadata.json -└── worker.ign ----- -+ -The files in the `<installation_directory>/manifest` and `<installation_directory>/openshift` directories are wrapped into the Ignition config files, including the file that contains the `98-var-partition` custom `MachineConfig` object. - -.Next steps - -* You can apply the custom disk partitioning by referencing the Ignition config files during the {op-system} installations. - -[id="installation-user-infra-machines-advanced_retaindisk_{context}"] -=== Retaining existing partitions - -For an ISO installation, you can add options to the `coreos-installer` command -that cause the installer to maintain one or more existing partitions. -For a PXE installation, you can add `coreos.inst.*` options to the `APPEND` parameter to preserve partitions. - -Saved partitions might be data partitions from an existing {product-title} system. You can identify the disk partitions you want to keep either by partition label or by number. - -[NOTE] -==== -If you save existing partitions, and those partitions do not leave enough space for {op-system}, the installation will fail without damaging the saved partitions. -==== - -.Retaining existing partitions during an ISO installation - -This example preserves any partition in which the partition label begins with `data` (`data*`): - -[source,terminal] ----- -# coreos-installer install --ignition-url http://10.0.2.2:8080/user.ign \ - --save-partlabel 'data*' /dev/sda ----- - -The following example illustrates running the `coreos-installer` in a way that preserves -the sixth (6) partition on the disk: - -[source,terminal] ----- -# coreos-installer install --ignition-url http://10.0.2.2:8080/user.ign \ - --save-partindex 6 /dev/sda ----- - -This example preserves partitions 5 and higher: - -[source,terminal] ----- -# coreos-installer install --ignition-url http://10.0.2.2:8080/user.ign - --save-partindex 5- /dev/sda ----- - -In the previous examples where partition saving is used, `coreos-installer` recreates the partition immediately. - -.Retaining existing partitions during a PXE installation - -This `APPEND` option preserves any partition in which the partition label begins with 'data' ('data*'): - -[source,terminal] ----- -coreos.inst.save_partlabel=data* ----- - -This `APPEND` option preserves partitions 5 and higher: - -[source,terminal] ----- -coreos.inst.save_partindex=5- ----- - -This `APPEND` option preserves partition 6: - -[source,terminal] ----- -coreos.inst.save_partindex=6 ----- - -[id="installation-user-infra-machines-advanced_ignition_{context}"] -== Identifying Ignition configs -When doing an {op-system} manual installation, there are two types of Ignition configs that you can provide, with different reasons for providing each one: - -* **Permanent install Ignition config**: Every manual {op-system} installation -needs to pass one of the Ignition config files generated by `openshift-installer`, -such as `bootstrap.ign`, `master.ign` and `worker.ign`, to carry out the -installation. -+ -[IMPORTANT] -==== -It is not recommended to modify these Ignition config files directly. You can update the manifest files that are wrapped into the Ignition config files, as outlined in examples in the preceding sections. -==== -+ -For PXE installations, you pass the Ignition configs on the `APPEND` line using the -`coreos.inst.ignition_url=` option. For ISO installations, after the ISO boots to -the shell prompt, you identify the Ignition config on the `coreos-installer` -command line with the `--ignition-url=` option. In both cases, only HTTP and HTTPS -protocols are supported. -+ - -* **Live install Ignition config**: This type can be created by using the `coreos-installer` `customize` subcommand and its various options. With this method, the Ignition config passes to the live install medium, runs immediately upon booting, and performs setup tasks before or after the {op-system} system installs to disk. This method should only be used for performing tasks that must be done once and not applied again later, such as with advanced partitioning that cannot be done using a machine config. -+ -For PXE or ISO boots, you can create the Ignition config -and `APPEND` the `ignition.config.url=` option to identify the location of -the Ignition config. You also need to append `ignition.firstboot ignition.platform.id=metal` -or the `ignition.config.url` option will be ignored. diff --git a/modules/installation-user-infra-machines-iso.adoc b/modules/installation-user-infra-machines-iso.adoc deleted file mode 100644 index aa3741f0c583..000000000000 --- a/modules/installation-user-infra-machines-iso.adoc +++ /dev/null @@ -1,183 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc -// * installing/installing_bare_metal/installing-bare-metal.adoc -// * installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc -// * installing/installing_ibm_power/installing-ibm-power.adoc -// * installing/installing_ibm_power/installing-restricted-networks-ibm-power.adoc -// * installing/installing_platform_agnostic/installing-platform-agnostic.adoc - -ifeval::["{context}" == "installing-ibm-power"] -:ibm-power: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power"] -:ibm-power: -endif::[] - -:_content-type: PROCEDURE -[id="installation-user-infra-machines-iso_{context}"] -= Installing {op-system} by using an ISO image - -You can use an ISO image to install {op-system} on the machines. - -.Prerequisites - -* You have created the Ignition config files for your cluster. -* You have configured suitable network, DNS and load balancing infrastructure. -* You have an HTTP server that can be accessed from your computer, and from the machines that you create. -* You have reviewed the _Advanced {op-system} installation configuration_ section for different ways to configure features, such as networking and disk partitioning. - -.Procedure - -. Obtain the SHA512 digest for each of your Ignition config files. For example, you can use the following on a system running Linux to get the SHA512 digest for your `bootstrap.ign` Ignition config file: -+ -[source,terminal] ----- -$ sha512sum <installation_directory>/bootstrap.ign ----- -+ -The digests are provided to the `coreos-installer` in a later step to validate the authenticity of the Ignition config files on the cluster nodes. - -. Upload the bootstrap, control plane, and compute node Ignition config files that the installation program created to your HTTP server. Note the URLs of these files. -+ -[IMPORTANT] -==== -You can add or change configuration settings in your Ignition configs before saving them to your HTTP server. If you plan to add more compute machines to your cluster after you finish installation, do not delete these files. -==== - -. From the installation host, validate that the Ignition config files are available on the URLs. The following example gets the Ignition config file for the bootstrap node: -+ -[source,terminal] ----- -$ curl -k http://<HTTP_server>/bootstrap.ign <1> ----- -+ -.Example output -[source,terminal] ----- - % Total % Received % Xferd Average Speed Time Time Time Current - Dload Upload Total Spent Left Speed - 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0{"ignition":{"version":"3.2.0"},"passwd":{"users":[{"name":"core","sshAuthorizedKeys":["ssh-rsa... ----- -+ -Replace `bootstrap.ign` with `master.ign` or `worker.ign` in the command to validate that the Ignition config files for the control plane and compute nodes are also available. -+ -. Although it is possible to obtain the {op-system} images that are required for your preferred method of installing operating system instances from the -ifdef::openshift-enterprise[] -ifndef::ibm-power[] -link:https://mirror.openshift.com/pub/openshift-v4/x86_64/dependencies/rhcos/[{op-system} image mirror] -endif::ibm-power[] -endif::openshift-enterprise[] -ifdef::openshift-origin[] -link:https://getfedora.org/en/coreos/download?tab=metal_virtualized&stream=stable[{op-system}] -endif::openshift-origin[] -ifdef::ibm-power[] -link:https://mirror.openshift.com/pub/openshift-v4/ppc64le/dependencies/rhcos/[{op-system} image mirror] -endif::ibm-power[] -page, the recommended way to obtain the correct version of your {op-system} images are from the output of `openshift-install` command: -+ -[source,terminal] ----- -$ openshift-install coreos print-stream-json | grep '\.iso[^.]' ----- -+ -.Example output -[source,terminal] -ifndef::openshift-origin[] ----- -"location": "<url>/art/storage/releases/rhcos-4.13-aarch64/<release>/aarch64/rhcos-<release>-live.aarch64.iso", -"location": "<url>/art/storage/releases/rhcos-4.13-ppc64le/<release>/ppc64le/rhcos-<release>-live.ppc64le.iso", -"location": "<url>/art/storage/releases/rhcos-4.13-s390x/<release>/s390x/rhcos-<release>-live.s390x.iso", -"location": "<url>/art/storage/releases/rhcos-4.13/<release>/x86_64/rhcos-<release>-live.x86_64.iso", ----- -endif::openshift-origin[] -ifdef::openshift-origin[] ----- -"location": "<url>/prod/streams/stable/builds/<release>/x86_64/fedora-coreos-<release>-live.x86_64.iso", ----- -endif::openshift-origin[] -+ -[IMPORTANT] -==== -The {op-system} images might not change with every release of {product-title}. You must download images with the highest version that is less than or equal to the {product-title} version that you install. Use the image versions that match your {product-title} version if they are available. Use only ISO images for this procedure. {op-system} qcow2 images are not supported for this installation type. -==== -+ -ISO file names resemble the following example: -+ -ifndef::openshift-origin[] -`rhcos-<version>-live.<architecture>.iso` -endif::openshift-origin[] -ifdef::openshift-origin[] -`fedora-coreos-<version>-live.<architecture>.iso` -endif::openshift-origin[] - -. Use the ISO to start the {op-system} installation. Use one of the following installation options: -** Burn the ISO image to a disk and boot it directly. -** Use ISO redirection by using a lights-out management (LOM) interface. - -. Boot the {op-system} ISO image without specifying any options or interrupting the live boot sequence. Wait for the installer to boot into a shell prompt in the {op-system} live environment. -+ -[NOTE] -==== -It is possible to interrupt the {op-system} installation boot process to add kernel arguments. However, for this ISO procedure you should use the `coreos-installer` command as outlined in the following steps, instead of adding kernel arguments. -==== - -. Run the `coreos-installer` command and specify the options that meet your installation requirements. At a minimum, you must specify the URL that points to the Ignition config file for the node type, and the device that you are installing to: -+ -[source,terminal] ----- -$ sudo coreos-installer install --ignition-url=http://<HTTP_server>/<node_type>.ign <device> --ignition-hash=sha512-<digest> <1><2> ----- -<1> You must run the `coreos-installer` command by using `sudo`, because the `core` user does not have the required root privileges to perform the installation. -<2> The `--ignition-hash` option is required when the Ignition config file is obtained through an HTTP URL to validate the authenticity of the Ignition config file on the cluster node. `<digest>` is the Ignition config file SHA512 digest obtained in a preceding step. -+ -[NOTE] -==== -If you want to provide your Ignition config files through an HTTPS server that uses TLS, you can add the internal certificate authority (CA) to the system trust store before running `coreos-installer`. -==== -+ -The following example initializes a bootstrap node installation to the `/dev/sda` device. The Ignition config file for the bootstrap node is obtained from an HTTP web server with the IP address 192.168.1.2: -+ -[source,terminal] ----- -$ sudo coreos-installer install --ignition-url=http://192.168.1.2:80/installation_directory/bootstrap.ign /dev/sda --ignition-hash=sha512-a5a2d43879223273c9b60af66b44202a1d1248fc01cf156c46d4a79f552b6bad47bc8cc78ddf0116e80c59d2ea9e32ba53bc807afbca581aa059311def2c3e3b ----- - -. Monitor the progress of the {op-system} installation on the console of the machine. -+ -[IMPORTANT] -==== -Be sure that the installation is successful on each node before commencing with the {product-title} installation. Observing the installation process can also help to determine the cause of {op-system} installation issues that might arise. -==== - -. After {op-system} installs, you must reboot the system. During the system reboot, it applies the Ignition config file that you specified. - -. Check the console output to verify that Ignition ran. -+ -.Example command -[source,terminal] ----- -Ignition: ran on 2022/03/14 14:48:33 UTC (this boot) -Ignition: user-provided config was applied ----- - -. Continue to create the other machines for your cluster. -+ -[IMPORTANT] -==== -You must create the bootstrap and control plane machines at this time. If the control plane machines are not made schedulable, also create at least two compute machines before you install {product-title}. -==== -+ -If the required network, DNS, and load balancer infrastructure are in place, the {product-title} bootstrap process begins automatically after the {op-system} nodes have rebooted. -+ -[NOTE] -==== -{op-system} nodes do not include a default password for the `core` user. You can access the nodes by running `ssh core@<node>.<cluster_name>.<base_domain>` as a user with access to the SSH private key that is paired to the public key that you specified in your `install_config.yaml` file. {product-title} 4 cluster nodes running {op-system} are immutable and rely on Operators to apply cluster changes. Accessing cluster nodes by using SSH is not recommended. However, when investigating installation issues, if the {product-title} API is not available, or the kubelet is not properly functioning on a target node, SSH access might be required for debugging or disaster recovery. -==== - -ifeval::["{context}" == "installing-ibm-power"] -:!ibm-power: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power"] -:!ibm-power: -endif::[] diff --git a/modules/installation-user-infra-machines-pxe.adoc b/modules/installation-user-infra-machines-pxe.adoc deleted file mode 100644 index 374099c74a91..000000000000 --- a/modules/installation-user-infra-machines-pxe.adoc +++ /dev/null @@ -1,300 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc -// * installing/installing_bare_metal/installing-bare-metal.adoc -// * installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc -// * installing/installing_ibm_power/installing-ibm-power.adoc -// * installing/installing_ibm_power/installing-restricted-networks-ibm-power.adoc -// * installing/installing_platform_agnostic/installing-platform-agnostic.adoc - -ifeval::["{context}" == "installing-ibm-power"] -:ibm-power: -:only-pxe: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power"] -:ibm-power: -:only-pxe: -endif::[] - -:_content-type: PROCEDURE -[id="installation-user-infra-machines-pxe_{context}"] -ifndef::only-pxe[] -= Installing {op-system} by using PXE or iPXE booting -endif::only-pxe[] -ifdef::only-pxe[] -= Installing {op-system} by using PXE booting -endif::only-pxe[] - -ifndef::only-pxe[] -You can use PXE or iPXE booting to install {op-system} on the machines. -endif::only-pxe[] -ifdef::only-pxe[] -You can use PXE booting to install {op-system} on the machines. -endif::only-pxe[] - -.Prerequisites - -* You have created the Ignition config files for your cluster. -* You have configured suitable network, DNS and load balancing infrastructure. -ifndef::only-pxe[] -* You have configured suitable PXE or iPXE infrastructure. -endif::only-pxe[] -ifdef::only-pxe[] -* You have configured suitable PXE infrastructure. -endif::only-pxe[] -* You have an HTTP server that can be accessed from your computer, and from the machines that you create. -* You have reviewed the _Advanced {op-system} installation configuration_ section for different ways to configure features, such as networking and disk partitioning. - -.Procedure - -. Upload the bootstrap, control plane, and compute node Ignition config files that the -installation program created to your HTTP server. Note the URLs of these files. -+ -[IMPORTANT] -==== -You can add or change configuration settings in your Ignition configs -before saving them to your HTTP server. -If you plan to add more compute machines to your cluster after you finish -installation, do not delete these files. -==== - -. From the installation host, validate that the Ignition config files are available on the URLs. The following example gets the Ignition config file for the bootstrap node: -+ -[source,terminal] ----- -$ curl -k http://<HTTP_server>/bootstrap.ign <1> ----- -+ -.Example output -[source,terminal] ----- - % Total % Received % Xferd Average Speed Time Time Time Current - Dload Upload Total Spent Left Speed - 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0{"ignition":{"version":"3.2.0"},"passwd":{"users":[{"name":"core","sshAuthorizedKeys":["ssh-rsa... ----- -+ -Replace `bootstrap.ign` with `master.ign` or `worker.ign` in the command to validate -that the Ignition config files for the control plane and compute nodes are also available. - -. Although it is possible to obtain the {op-system} `kernel`, `initramfs` and `rootfs` -files that are required for your preferred method of installing operating system instances from the -ifdef::openshift-enterprise[] -ifndef::ibm-power[] -link:https://mirror.openshift.com/pub/openshift-v4/x86_64/dependencies/rhcos/[{op-system} image mirror] -endif::ibm-power[] -endif::openshift-enterprise[] -ifdef::openshift-origin[] -link:https://getfedora.org/en/coreos/download?tab=metal_virtualized&stream=stable[{op-system}] -endif::openshift-origin[] -ifdef::ibm-power[] -link:https://mirror.openshift.com/pub/openshift-v4/ppc64le/dependencies/rhcos/[{op-system} image mirror] -endif::ibm-power[] -page, the recommended way to obtain the correct version of your {op-system} files are -from the output of `openshift-install` command: -+ -[source,terminal] ----- -$ openshift-install coreos print-stream-json | grep -Eo '"https.*(kernel-|initramfs.|rootfs.)\w+(\.img)?"' ----- -+ -.Example output -[source,terminal] -ifndef::openshift-origin[] ----- -"<url>/art/storage/releases/rhcos-4.13-aarch64/<release>/aarch64/rhcos-<release>-live-kernel-aarch64" -"<url>/art/storage/releases/rhcos-4.13-aarch64/<release>/aarch64/rhcos-<release>-live-initramfs.aarch64.img" -"<url>/art/storage/releases/rhcos-4.13-aarch64/<release>/aarch64/rhcos-<release>-live-rootfs.aarch64.img" -"<url>/art/storage/releases/rhcos-4.13-ppc64le/49.84.202110081256-0/ppc64le/rhcos-<release>-live-kernel-ppc64le" -"<url>/art/storage/releases/rhcos-4.13-ppc64le/<release>/ppc64le/rhcos-<release>-live-initramfs.ppc64le.img" -"<url>/art/storage/releases/rhcos-4.13-ppc64le/<release>/ppc64le/rhcos-<release>-live-rootfs.ppc64le.img" -"<url>/art/storage/releases/rhcos-4.13-s390x/<release>/s390x/rhcos-<release>-live-kernel-s390x" -"<url>/art/storage/releases/rhcos-4.13-s390x/<release>/s390x/rhcos-<release>-live-initramfs.s390x.img" -"<url>/art/storage/releases/rhcos-4.13-s390x/<release>/s390x/rhcos-<release>-live-rootfs.s390x.img" -"<url>/art/storage/releases/rhcos-4.13/<release>/x86_64/rhcos-<release>-live-kernel-x86_64" -"<url>/art/storage/releases/rhcos-4.13/<release>/x86_64/rhcos-<release>-live-initramfs.x86_64.img" -"<url>/art/storage/releases/rhcos-4.13/<release>/x86_64/rhcos-<release>-live-rootfs.x86_64.img" ----- -endif::openshift-origin[] -ifdef::openshift-origin[] ----- -"<url>/prod/streams/stable/builds/<release>/x86_64/fedora-coreos-<release>-live-kernel-x86_64" -"<url>/prod/streams/stable/builds/<release>/x86_64/fedora-coreos-<release>-live-initramfs.x86_64.img" -"<url>/prod/streams/stable/builds/<release>/x86_64/fedora-coreos-<release>-live-rootfs.x86_64.img" ----- -endif::openshift-origin[] -+ -[IMPORTANT] -==== -The {op-system} artifacts might not change with every release of {product-title}. -You must download images with the highest version that is less than or equal -to the {product-title} version that you install. Only use -the appropriate `kernel`, `initramfs`, and `rootfs` artifacts described below -for this procedure. -{op-system} QCOW2 images are not supported for this installation type. -==== -+ -The file names contain the {product-title} version number. -They resemble the following examples: -+ -ifndef::openshift-origin[] -** `kernel`: `rhcos-<version>-live-kernel-<architecture>` -** `initramfs`: `rhcos-<version>-live-initramfs.<architecture>.img` -** `rootfs`: `rhcos-<version>-live-rootfs.<architecture>.img` -endif::openshift-origin[] -ifdef::openshift-origin[] -** `kernel`: `fedora-coreos-<version>-live-kernel-<architecture>` -** `initramfs`: `fedora-coreos-<version>-live-initramfs.<architecture>.img` -** `rootfs`: `fedora-coreos-<version>-live-rootfs.<architecture>.img` -endif::openshift-origin[] - -. Upload the `rootfs`, `kernel`, and `initramfs` files -to your HTTP server. -+ -[IMPORTANT] -==== -If you plan to add more compute machines to your cluster after you finish -installation, do not delete these files. -==== - -. Configure the network boot infrastructure so that the machines boot from their -local disks after {op-system} is installed on them. - -ifndef::only-pxe[] -. Configure PXE or iPXE installation for the {op-system} images and begin the installation. -endif::only-pxe[] -ifdef::only-pxe[] -. Configure PXE installation for the {op-system} images and begin the installation. -endif::only-pxe[] -+ -ifndef::only-pxe[] -Modify one of the following example menu entries for your environment and verify -that the image and Ignition files are properly accessible: -endif::only-pxe[] - -ifdef::only-pxe[] -Modify the following example menu entry for your environment and verify that the image and Ignition files are properly accessible: -endif::only-pxe[] -ifndef::only-pxe[] -** For PXE (`x86_64`): -endif::only-pxe[] -+ ----- -DEFAULT pxeboot -TIMEOUT 20 -PROMPT 0 -LABEL pxeboot - KERNEL http://<HTTP_server>/rhcos-<version>-live-kernel-<architecture> <1> - APPEND initrd=http://<HTTP_server>/rhcos-<version>-live-initramfs.<architecture>.img coreos.live.rootfs_url=http://<HTTP_server>/rhcos-<version>-live-rootfs.<architecture>.img coreos.inst.install_dev=/dev/sda coreos.inst.ignition_url=http://<HTTP_server>/bootstrap.ign <2> <3> ----- -<1> Specify the location of the live `kernel` file that you uploaded to your HTTP -server. -The URL must be HTTP, TFTP, or FTP; HTTPS and NFS are not supported. -<2> If you use multiple NICs, specify a single interface in the `ip` option. -For example, to use DHCP on a NIC that is named `eno1`, set `ip=eno1:dhcp`. -<3> Specify the locations of the {op-system} files that you uploaded to your -HTTP server. The `initrd` parameter value is the location of the `initramfs` file, -the `coreos.live.rootfs_url` parameter value is the location of the -`rootfs` file, and the `coreos.inst.ignition_url` parameter value is the -location of the bootstrap Ignition config file. -You can also add more kernel arguments to the `APPEND` line to configure networking -or other boot options. -+ -[NOTE] -==== -This configuration does not enable serial console access on machines with a graphical console. To configure a different console, add one or more `console=` arguments to the `APPEND` line. For example, add `console=tty0 console=ttyS0` to set the first PC serial port as the primary console and the graphical console as a secondary console. For more information, see link:https://access.redhat.com/articles/7212[How does one set up a serial terminal and/or console in Red Hat Enterprise Linux?] and "Enabling the serial console for PXE and ISO installation" in the "Advanced {op-system} installation configuration" section. -==== - -ifndef::only-pxe[] -** For iPXE (`x86_64` -ifndef::openshift-origin[] -+ `aarch64` -endif::openshift-origin[] -): -+ ----- -kernel http://<HTTP_server>/rhcos-<version>-live-kernel-<architecture> initrd=main coreos.live.rootfs_url=http://<HTTP_server>/rhcos-<version>-live-rootfs.<architecture>.img coreos.inst.install_dev=/dev/sda coreos.inst.ignition_url=http://<HTTP_server>/bootstrap.ign <1> <2> -initrd --name main http://<HTTP_server>/rhcos-<version>-live-initramfs.<architecture>.img <3> -boot ----- -<1> Specify the locations of the {op-system} files that you uploaded to your -HTTP server. The `kernel` parameter value is the location of the `kernel` file, -the `initrd=main` argument is needed for booting on UEFI systems, -the `coreos.live.rootfs_url` parameter value is the location of the `rootfs` file, -and the `coreos.inst.ignition_url` parameter value is the -location of the bootstrap Ignition config file. -<2> If you use multiple NICs, specify a single interface in the `ip` option. -For example, to use DHCP on a NIC that is named `eno1`, set `ip=eno1:dhcp`. -<3> Specify the location of the `initramfs` file that you uploaded to your HTTP server. -+ -[NOTE] -==== -This configuration does not enable serial console access on machines with a graphical console. To configure a different console, add one or more `console=` arguments to the `kernel` line. For example, add `console=tty0 console=ttyS0` to set the first PC serial port as the primary console and the graphical console as a secondary console. For more information, see link:https://access.redhat.com/articles/7212[How does one set up a serial terminal and/or console in Red Hat Enterprise Linux?] and "Enabling the serial console for PXE and ISO installation" in the "Advanced {op-system} installation configuration" section. -==== -+ -ifndef::openshift-origin[] -[NOTE] -==== -To network boot the CoreOS `kernel` on `aarch64` architecture, you need to use a version of iPXE build with the `IMAGE_GZIP` option enabled. See link:https://ipxe.org/buildcfg/image_gzip[`IMAGE_GZIP` option in iPXE]. -==== -endif::openshift-origin[] -endif::only-pxe[] -ifndef::only-pxe,openshift-origin[] -** For PXE (with UEFI and Grub as second stage) on `aarch64`: -+ ----- -menuentry 'Install CoreOS' { - linux rhcos-<version>-live-kernel-<architecture> coreos.live.rootfs_url=http://<HTTP_server>/rhcos-<version>-live-rootfs.<architecture>.img coreos.inst.install_dev=/dev/sda coreos.inst.ignition_url=http://<HTTP_server>/bootstrap.ign <1> <2> - initrd rhcos-<version>-live-initramfs.<architecture>.img <3> -} ----- -<1> Specify the locations of the {op-system} files that you uploaded to your -HTTP/TFTP server. The `kernel` parameter value is the location of the `kernel` file on your TFTP server. -The `coreos.live.rootfs_url` parameter value is the location of the `rootfs` file, and the `coreos.inst.ignition_url` parameter value is the location of the bootstrap Ignition config file on your HTTP Server. -<2> If you use multiple NICs, specify a single interface in the `ip` option. -For example, to use DHCP on a NIC that is named `eno1`, set `ip=eno1:dhcp`. -<3> Specify the location of the `initramfs` file that you uploaded to your TFTP server. - -endif::only-pxe,openshift-origin[] - -. Monitor the progress of the {op-system} installation on the console of the machine. -+ -[IMPORTANT] -==== -Be sure that the installation is successful on each node before commencing with the {product-title} installation. Observing the installation process can also help to determine the cause of {op-system} installation issues that might arise. -==== - -. After {op-system} installs, the system reboots. During reboot, the system applies the Ignition config file that you specified. - -. Check the console output to verify that Ignition ran. -+ -.Example command -[source,terminal] ----- -Ignition: ran on 2022/03/14 14:48:33 UTC (this boot) -Ignition: user-provided config was applied ----- - -. Continue to create the machines for your cluster. -+ -[IMPORTANT] -==== -You must create the bootstrap and control plane machines at this time. If the -control plane machines are not made schedulable, also -create at least two compute machines before you install the cluster. -==== -+ -If the required network, DNS, and load balancer infrastructure are in place, the {product-title} bootstrap process begins automatically after the {op-system} nodes have rebooted. -+ -[NOTE] -==== -{op-system} nodes do not include a default password for the `core` user. You can access the nodes by running `ssh core@<node>.<cluster_name>.<base_domain>` as a user with access to the SSH private key that is paired to the public key that you specified in your `install_config.yaml` file. {product-title} 4 cluster nodes running {op-system} are immutable and rely on Operators to apply cluster changes. Accessing cluster nodes by using SSH is not recommended. However, when investigating installation issues, if the {product-title} API is not available, or the kubelet is not properly functioning on a target node, SSH access might be required for debugging or disaster recovery. -==== - -ifeval::["{context}" == "installing-ibm-power"] -:!ibm-power: -:!only-pxe: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power"] -:!ibm-power: -:!only-pxe: -endif::[] diff --git a/modules/installation-user-infra-machines-static-network.adoc b/modules/installation-user-infra-machines-static-network.adoc deleted file mode 100644 index c5038e5a4338..000000000000 --- a/modules/installation-user-infra-machines-static-network.adoc +++ /dev/null @@ -1,637 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_bare_metal/installing-bare-metal.adoc -// * installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc -// * installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc -// * installing/installing_platform_agnostic/installing-platform-agnostic.adoc -// * installing/installing_ibm_power/installing-ibm-power.adoc -// * installing/installing_ibm_power/installing-restricted-networks-ibm-power.adoc -// * installing/installing_ibm_z/installing-ibm-z.adoc -// * installing/installing_ibm_z/installing-ibm-z-kvm.adoc -// * installing/installing_ibm_z/installing-restricted-networks-ibm-z.adoc -// * installing/installing_ibm_z/installing-restricted-networks-ibm-z-kvm.adoc -// * installing/installing_ibm_power/installing-ibm-power.adoc -// * installing/installing_ibm_power/installing-restricted-networks-ibm-power.adoc - -ifeval::["{context}" == "installing-ibm-z"] -:ibm-z: -endif::[] -ifeval::["{context}" == "installing-ibm-z-kvm"] -:ibm-z-kvm: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z"] -:ibm-z: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z-kvm"] -:ibm-z-kvm: -endif::[] -ifeval::["{context}" == "installing-ibm-power"] -:ibm-power: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power"] -:ibm-power: -:restricted: -endif::[] - -:_content-type: REFERENCE -[id="installation-user-infra-machines-static-network_{context}"] -= Advanced {op-system} installation reference - -This section illustrates the networking configuration and other advanced options that allow you to modify the {op-system-first} manual installation process. The following tables describe the kernel arguments and command-line options you can use with the {op-system} live installer and the `coreos-installer` command. - -[id="installation-user-infra-machines-routing-bonding_{context}"] -ifndef::ibm-z-kvm[] -== Networking and bonding options for ISO installations -endif::ibm-z-kvm[] -ifdef::ibm-z-kvm[] -== Networking options for ISO installations -endif::ibm-z-kvm[] - -If you install {op-system} from an ISO image, you can add kernel arguments manually when you boot the image to configure networking for a node. If no networking arguments are specified, DHCP is activated in the initramfs when {op-system} detects that networking is required to fetch the Ignition config file. - -[IMPORTANT] -==== -When adding networking arguments manually, you must also add the `rd.neednet=1` kernel argument to bring the network up in the initramfs. -==== -ifndef::ibm-z-kvm[] -The following information provides examples for configuring networking and bonding on your {op-system} nodes for ISO installations. The examples describe how to use the `ip=`, `nameserver=`, and `bond=` kernel arguments. - -[NOTE] -==== -Ordering is important when adding the kernel arguments: `ip=`, `nameserver=`, and then `bond=`. -==== - -The networking options are passed to the `dracut` tool during system boot. For more information about the networking options supported by `dracut`, see the `dracut.cmdline` manual page. - -endif::ibm-z-kvm[] -ifdef::ibm-z-kvm[] -The following information provides examples for configuring networking on your {op-system} nodes for ISO installations. The examples describe how to use the `ip=` and `nameserver=` kernel arguments. - -[NOTE] -==== -Ordering is important when adding the kernel arguments: `ip=` and `nameserver=`. -==== - -The networking options are passed to the `dracut` tool during system boot. For more information about the networking options supported by `dracut`, see the `dracut.cmdline` manual page. -endif::ibm-z-kvm[] - -The following examples are the networking options for ISO installation. - -[discrete] -=== Configuring DHCP or static IP addresses - -To configure an IP address, either use DHCP (`ip=dhcp`) or set an individual static IP address (`ip=<host_ip>`). If setting a static IP, you must then identify the DNS server IP address (`nameserver=<dns_ip>`) on each node. The following example sets: - -* The node's IP address to `10.10.10.2` -* The gateway address to `10.10.10.254` -* The netmask to `255.255.255.0` -* The hostname to `core0.example.com` -* The DNS server address to `4.4.4.41` -* The auto-configuration value to `none`. No auto-configuration is required when IP networking is configured statically. - -[source,terminal] ----- -ip=10.10.10.2::10.10.10.254:255.255.255.0:core0.example.com:enp1s0:none -nameserver=4.4.4.41 ----- - -[NOTE] -==== -When you use DHCP to configure IP addressing for the {op-system} machines, the machines also obtain the DNS server information through DHCP. For DHCP-based deployments, you can define the DNS server address that is used by the {op-system} nodes through your DHCP server configuration. -==== - -[discrete] -=== Configuring an IP address without a static hostname - -You can configure an IP address without assigning a static hostname. If a static hostname is not set by the user, it will be picked up and automatically set by a reverse DNS lookup. To configure an IP address without a static hostname refer to the following example: - -* The node's IP address to `10.10.10.2` -* The gateway address to `10.10.10.254` -* The netmask to `255.255.255.0` -* The DNS server address to `4.4.4.41` -* The auto-configuration value to `none`. No auto-configuration is required when IP networking is configured statically. - -[source,terminal] ----- -ip=10.10.10.2::10.10.10.254:255.255.255.0::enp1s0:none -nameserver=4.4.4.41 ----- - -[discrete] -=== Specifying multiple network interfaces - -You can specify multiple network interfaces by setting multiple `ip=` entries. - -[source,terminal] ----- -ip=10.10.10.2::10.10.10.254:255.255.255.0:core0.example.com:enp1s0:none -ip=10.10.10.3::10.10.10.254:255.255.255.0:core0.example.com:enp2s0:none ----- - -[discrete] -=== Configuring default gateway and route - -Optional: You can configure routes to additional networks by setting an `rd.route=` value. - -[NOTE] -==== -When you configure one or multiple networks, one default gateway is required. If the additional network gateway is different from the primary network gateway, the default gateway must be the primary network gateway. -==== - -* Run the following command to configure the default gateway: -+ -[source,terminal] ----- -ip=::10.10.10.254:::: ----- - -* Enter the following command to configure the route for the additional network: -+ -[source,terminal] ----- -rd.route=20.20.20.0/24:20.20.20.254:enp2s0 ----- - -[discrete] -=== Disabling DHCP on a single interface - -You can disable DHCP on a single interface, such as when there are two or more network interfaces and only one interface is being used. In the example, the `enp1s0` interface has a static networking configuration and DHCP is disabled for `enp2s0`, which is not used: - -[source,terminal] ----- -ip=10.10.10.2::10.10.10.254:255.255.255.0:core0.example.com:enp1s0:none -ip=::::core0.example.com:enp2s0:none ----- - -[discrete] -=== Combining DHCP and static IP configurations - -You can combine DHCP and static IP configurations on systems with multiple network interfaces, for example: - -[source,terminal] ----- -ip=enp1s0:dhcp -ip=10.10.10.2::10.10.10.254:255.255.255.0:core0.example.com:enp2s0:none ----- - -[discrete] -=== Configuring VLANs on individual interfaces - -Optional: You can configure VLANs on individual interfaces by using the `vlan=` parameter. - -* To configure a VLAN on a network interface and use a static IP address, run the following command: -+ -[source,terminal] ----- -ip=10.10.10.2::10.10.10.254:255.255.255.0:core0.example.com:enp2s0.100:none -vlan=enp2s0.100:enp2s0 ----- - -* To configure a VLAN on a network interface and to use DHCP, run the following command: -+ -[source,terminal] ----- -ip=enp2s0.100:dhcp -vlan=enp2s0.100:enp2s0 ----- - -[discrete] -=== Providing multiple DNS servers - -You can provide multiple DNS servers by adding a `nameserver=` entry for each server, for example: - -[source,terminal] ----- -nameserver=1.1.1.1 -nameserver=8.8.8.8 ----- -ifndef::ibm-z-kvm[] - - -[discrete] -=== Bonding multiple network interfaces to a single interface - -Optional: You can bond multiple network interfaces to a single interface by using the `bond=` option. Refer to the following examples: - -* The syntax for configuring a bonded interface is: `bond=<name>[:<network_interfaces>][:options]` -+ -`<name>` is the bonding device name (`bond0`), `<network_interfaces>` represents a comma-separated list of physical (ethernet) interfaces (`em1,em2`), -and _options_ is a comma-separated list of bonding options. Enter `modinfo bonding` to see available options. - -* When you create a bonded interface using `bond=`, you must specify how the IP address is assigned and other -information for the bonded interface. - -** To configure the bonded interface to use DHCP, set the bond's IP address to `dhcp`. For example: -+ -[source,terminal] ----- -bond=bond0:em1,em2:mode=active-backup -ip=bond0:dhcp ----- - -** To configure the bonded interface to use a static IP address, enter the specific IP address you want and related information. For example: -ifndef::ibm-z[] -+ -[source,terminal] ----- -bond=bond0:em1,em2:mode=active-backup -ip=10.10.10.2::10.10.10.254:255.255.255.0:core0.example.com:bond0:none ----- -endif::ibm-z[] -ifdef::ibm-z[] - -[source,terminal] ----- -bond=bond0:em1,em2:mode=active-backup,fail_over_mac=1 -ip=10.10.10.2::10.10.10.254:255.255.255.0:core0.example.com:bond0:none ----- - -Always set the `fail_over_mac=1` option in active-backup mode, to avoid problems when shared OSA/RoCE cards are used. -endif::ibm-z[] - -ifdef::ibm-z[] -[discrete] -=== Bonding multiple network interfaces to a single interface - -Optional: You can configure VLANs on bonded interfaces by using the `vlan=` parameter and to use DHCP, for example: - -[source,terminal] ----- -ip=bond0.100:dhcp -bond=bond0:em1,em2:mode=active-backup -vlan=bond0.100:bond0 ----- - -Use the following example to configure the bonded interface with a VLAN and to use a static IP address: - -[source,terminal] ----- -ip=10.10.10.2::10.10.10.254:255.255.255.0:core0.example.com:bond0.100:none -bond=bond0:em1,em2:mode=active-backup -vlan=bond0.100:bond0 ----- -endif::ibm-z[] - -ifndef::ibm-z[] - -[id="bonding-multiple-sriov-network-interfaces-to-dual-port_{context}"] -[discrete] -=== Bonding multiple SR-IOV network interfaces to a dual port NIC interface - -:FeatureName: Support for Day 1 operations associated with enabling NIC partitioning for SR-IOV devices -include::snippets/technology-preview.adoc[leveloffset=+1] - -Optional: You can bond multiple SR-IOV network interfaces to a dual port NIC interface by using the `bond=` option. - -On each node, you must perform the following tasks: - -. Create the SR-IOV virtual functions (VFs) following the guidance in link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html/configuring_and_managing_virtualization/managing-virtual-devices_configuring-and-managing-virtualization#managing-sr-iov-devices_managing-virtual-devices[Managing SR-IOV devices]. Follow the procedure in the "Attaching SR-IOV networking devices to virtual machines" section. - -. Create the bond, attach the desired VFs to the bond and set the bond link state up following the guidance in link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html/configuring_and_managing_networking/configuring-network-bonding_configuring-and-managing-networking[Configuring network bonding]. Follow any of the described procedures to create the bond. - -The following examples illustrate the syntax you must use: - -* The syntax for configuring a bonded interface is `bond=<name>[:<network_interfaces>][:options]`. -+ -`<name>` is the bonding device name (`bond0`), `<network_interfaces>` represents the virtual functions (VFs) by their known name in the kernel and shown in the output of the `ip link` command(`eno1f0`, `eno2f0`), and _options_ is a comma-separated list of bonding options. Enter `modinfo bonding` to see available options. - -* When you create a bonded interface using `bond=`, you must specify how the IP address is assigned and other information for the bonded interface. - -** To configure the bonded interface to use DHCP, set the bond's IP address to `dhcp`. For example: -+ -[source,terminal] ----- -bond=bond0:eno1f0,eno2f0:mode=active-backup -ip=bond0:dhcp ----- - -** To configure the bonded interface to use a static IP address, enter the specific IP address you want and related information. For example: -+ -[source,terminal] ----- -bond=bond0:eno1f0,eno2f0:mode=active-backup -ip=10.10.10.2::10.10.10.254:255.255.255.0:core0.example.com:bond0:none ----- -endif::ibm-z[] - -[discrete] -=== Using network teaming - -Optional: You can use a network teaming as an alternative to bonding by using the `team=` parameter: - -* The syntax for configuring a team interface is: `team=name[:network_interfaces]` -+ -_name_ is the team device name (`team0`) and _network_interfaces_ represents a comma-separated list of physical (ethernet) interfaces (`em1, em2`). - -[NOTE] -==== -Teaming is planned to be deprecated when {op-system} switches to an upcoming version of {op-system-base}. For more information, see this https://access.redhat.com/solutions/6509691[Red Hat Knowledgebase Article]. -==== - -Use the following example to configure a network team: - -[source,terminal] ----- -team=team0:em1,em2 -ip=team0:dhcp ----- -endif::ibm-z-kvm[] - -ifndef::ibm-z,ibm-z-kvm,ibm-power[] -[id="installation-user-infra-machines-coreos-installer-options_{context}"] -== `coreos-installer` options for ISO and PXE installations - -You can install {op-system} by running `coreos-installer install <options> <device>` at the command prompt, after booting into the {op-system} live environment from an ISO image. - -The following table shows the subcommands, options, and arguments you can pass to the `coreos-installer` command. - -.`coreos-installer` subcommands, command-line options, and arguments -|=== - -2+|*coreos-installer install subcommand* - -|*_Subcommand_* |*_Description_* - -a|`$ coreos-installer install <options> <device>` -a|Embed an Ignition config in an ISO image. - -2+|*coreos-installer install subcommand options* - -|*_Option_* |*_Description_* - -a| `-u`, `--image-url <url>` -a|Specify the image URL manually. - -a| `-f`, `--image-file <path>` -a|Specify a local image file manually. Used for debugging. - -a|`-i,` `--ignition-file <path>` -a|Embed an Ignition config from a file. - -a|`-I`, `--ignition-url <URL>` -a|Embed an Ignition config from a URL. - -a|`--ignition-hash <digest>` -a|Digest `type-value` of the Ignition config. - -a|`-p`, `--platform <name>` -a|Override the Ignition platform ID for the installed system. - -a|`--console <spec>` -a|Set the kernel and bootloader console for the installed system. For more information about the format of `<spec>`, see the link:https://www.kernel.org/doc/html/latest/admin-guide/serial-console.html[Linux kernel serial console] documentation. - -a|`--append-karg <arg>...` -a|Append a default kernel argument to the installed system. - -a|`--delete-karg <arg>...` -a|Delete a default kernel argument from the installed system. - -a|`-n`, `--copy-network` -a|Copy the network configuration from the install environment. - -[IMPORTANT] -==== -The `--copy-network` option only copies networking configuration found under `/etc/NetworkManager/system-connections`. In particular, it does not copy the system hostname. -==== - -a|`--network-dir <path>` -a|For use with `-n`. Default is `/etc/NetworkManager/system-connections/`. - -a|`--save-partlabel <lx>..` -a|Save partitions with this label glob. - -a|`--save-partindex <id>...` -a|Save partitions with this number or range. - -a|`--insecure` -a|Skip {op-system} image signature verification. - -a|`--insecure-ignition` -a|Allow Ignition URL without HTTPS or hash. - -a|`--architecture <name>` -a|Target CPU architecture. Valid values are `x86_64` and `aarch64`. - -a|`--preserve-on-error` -a|Do not clear partition table on error. - -a|`-h`, `--help` -a|Print help information. - -2+|*coreos-installer install subcommand argument* - -|*_Argument_* |*_Description_* - -a|`<device>` -a|The destination device. - -2+|*coreos-installer ISO subcommands* - -|*_Subcommand_* |*_Description_* - -a|`$ coreos-installer iso customize <options> <ISO_image>` -a|Customize a {op-system} live ISO image. - -a|`coreos-installer iso reset <options> <ISO_image>` -|Restore a {op-system} live ISO image to default settings. - -a|`coreos-installer iso ignition remove <options> <ISO_image>` -a|Remove the embedded Ignition config from an ISO image. - -2+|*coreos-installer ISO customize subcommand options* - -|*_Option_* |*_Description_* - -a|`--dest-ignition <path>` -a|Merge the specified Ignition config file into a new configuration fragment for the destination system. - -a|`--dest-console <spec>` -a|Specify the kernel and bootloader console for the destination system. - -a|`--dest-device <path>` -a|Install and overwrite the specified destination device. - -a|`--dest-karg-append <arg>` -a|Add a kernel argument to each boot of the destination system. - -a|`--dest-karg-delete <arg>` -a|Delete a kernel argument from each boot of the destination system. - -a|`--network-keyfile <path>` -a|Configure networking by using the specified NetworkManager keyfile for live and destination systems. - -a|`--ignition-ca <path>` -a|Specify an additional TLS certificate authority to be trusted by Ignition. - -a|`--pre-install <path>` -a|Run the specified script before installation. - -a|`--post-install <path>` -a|Run the specified script after installation. - -a|`--installer-config <path>` -a|Apply the specified installer configuration file. - -a|`--live-ignition <path>` -a|Merge the specified Ignition config file into a new configuration fragment for the live environment. - -a|`--live-karg-append <arg>` -a|Add a kernel argument to each boot of the live environment. - -a|`--live-karg-delete <arg>` -a|Delete a kernel argument from each boot of the live environment. - -a|`--live-karg-replace <k=o=n>` -a|Replace a kernel argument in each boot of the live environment, in the form `key=old=new`. - -a|`-f`, `--force` -a|Overwrite an existing Ignition config. - -a|`-o`, `--output <path>` -a|Write the ISO to a new output file. - -a|`-h`, `--help` -a|Print help information. - -2+|*coreos-installer PXE subcommands* - -|*_Subcommand_* |*_Description_* - -2+|Note that not all of these options are accepted by all subcommands. - -a|`coreos-installer pxe customize <options> <path>` -a|Customize a {op-system} live PXE boot config. - -a|`coreos-installer pxe ignition wrap <options>` -a|Wrap an Ignition config in an image. - -a|`coreos-installer pxe ignition unwrap <options> <image_name>` -a|Show the wrapped Ignition config in an image. - -2+|*coreos-installer PXE customize subcommand options* - -|*_Option_* |*_Description_* - -2+|Note that not all of these options are accepted by all subcommands. - -a|`--dest-ignition <path>` -a|Merge the specified Ignition config file into a new configuration fragment for the destination system. - -a|`--dest-console <spec>` -a|Specify the kernel and bootloader console for the destination system. - -a|`--dest-device <path>` -a|Install and overwrite the specified destination device. - -a|`--network-keyfile <path>` -a|Configure networking by using the specified NetworkManager keyfile for live and destination systems. - -a|`--ignition-ca <path>` -a|Specify an additional TLS certificate authority to be trusted by Ignition. - -a|`--pre-install <path>` -a|Run the specified script before installation. - -a|`post-install <path>` -a|Run the specified script after installation. - -a|`--installer-config <path>` -a|Apply the specified installer configuration file. - -a|`--live-ignition <path>` -a|Merge the specified Ignition config file into a new configuration fragment for the live environment. - -a|`-o,` `--output <path>` -a|Write the initramfs to a new output file. - -[NOTE] -==== -This option is required for PXE environments. -==== - -a|`-h`, `--help` -a|Print help information. - -|=== - -[id="installation-user-infra-machines-coreos-inst-options_{context}"] -== `coreos.inst` boot options for ISO or PXE installations - -You can automatically invoke `coreos-installer` options at boot time by passing `coreos.inst` boot arguments to the {op-system} live installer. These are provided in addition to the standard boot arguments. - -* For ISO installations, the `coreos.inst` options can be added by interrupting the automatic boot at the bootloader menu. You can interrupt the automatic boot by pressing `TAB` while the *RHEL CoreOS (Live)* menu option is highlighted. - -* For PXE or iPXE installations, the `coreos.inst` options must be added to the `APPEND` line before the {op-system} live installer is booted. - -The following table shows the {op-system} live installer `coreos.inst` boot options for ISO and PXE installations. - -.`coreos.inst` boot options -|=== -|Argument |Description - -a|`coreos.inst.install_dev` - -a|Required. The block device on the system to install to. It is recommended to use the full path, such as `/dev/sda`, although `sda` is allowed. - -a|`coreos.inst.ignition_url` - -a|Optional: The URL of the Ignition config to embed into the installed system. If no URL is specified, no Ignition config is embedded. Only HTTP and HTTPS protocols are supported. - -a|`coreos.inst.save_partlabel` - -a|Optional: Comma-separated labels of partitions to preserve during the install. Glob-style wildcards are permitted. The specified partitions do not need to exist. - -a|`coreos.inst.save_partindex` - -a|Optional: Comma-separated indexes of partitions to preserve during the install. Ranges `m-n` are permitted, and either `m` or `n` can be omitted. The specified partitions do not need to exist. - -a|`coreos.inst.insecure` - -a|Optional: Permits the OS image that is specified by `coreos.inst.image_url` to be unsigned. - -a|`coreos.inst.image_url` - -a|Optional: Download and install the specified {op-system} image. - -* This argument should not be used in production environments and is intended for debugging purposes only. - -* While this argument can be used to install a version of {op-system} that does not match the live media, it is recommended that you instead use the media that matches the version you want to install. - -* If you are using `coreos.inst.image_url`, you must also use `coreos.inst.insecure`. This is because the bare-metal media are not GPG-signed for {product-title}. - -* Only HTTP and HTTPS protocols are supported. - -a|`coreos.inst.skip_reboot` - -a|Optional: The system will not reboot after installing. After the install finishes, you will receive a prompt that allows you to inspect what is happening during installation. This argument should not be used in production environments and is intended for debugging purposes only. - -a|`coreos.inst.platform_id` - -a| Optional: The Ignition platform ID of the platform the {op-system} image is being installed on. Default is `metal`. This option determines whether or not to request an Ignition config from the cloud provider, such as VMware. For example: `coreos.inst.platform_id=vmware`. - -a|`ignition.config.url` - -a|Optional: The URL of the Ignition config for the live boot. For example, this can be used to customize how `coreos-installer` is invoked, or to run code before or after the installation. This is different from `coreos.inst.ignition_url`, which is the Ignition config for the installed system. -|=== - -endif::ibm-z,ibm-z-kvm,ibm-power[] - -ifeval::["{context}" == "installing-ibm-z"] -:!ibm-z: -endif::[] -ifeval::["{context}" == "installing-ibm-z-kvm"] -:!ibm-z-kvm: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z"] -:!ibm-z: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-z-kvm"] -:!ibm-z-kvm: -endif::[] -ifeval::["{context}" == "installing-ibm-power"] -:!ibm-power: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power"] -:!ibm-power: -:!restricted: -endif::[] diff --git a/modules/installation-user-provisioned-validating-dns.adoc b/modules/installation-user-provisioned-validating-dns.adoc deleted file mode 100644 index 8a5c168097c5..000000000000 --- a/modules/installation-user-provisioned-validating-dns.adoc +++ /dev/null @@ -1,147 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc -// * installing/installing_bare_metal/installing-bare-metal.adoc -// * installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc -// * installing/installing_ibm_power/installing-ibm-power.adoc -// * installing/installing_ibm_power/installing-restricted-networks-ibm-power.adoc -// * installing/installing_ibm_z/installing-ibm-z-kvm.adoc -// * installing/installing_ibm_z/installing-ibm-z.adoc -// * installing/installing_ibm_z/installing-restricted-networks-ibm-z.adoc -// * installing/installing_platform_agnostic/installing-platform-agnostic.adoc -// * installing/installing_rhv/installing-rhv-restricted-network.adoc -// * installing/installing_vsphere/installing-restricted-networks-vsphere.adoc -// * installing/installing_vsphere/installing-vsphere-network-customizations.adoc -// * installing/installing_vsphere/installing-vsphere.adoc - -:_content-type: PROCEDURE -[id="installation-user-provisioned-validating-dns_{context}"] -= Validating DNS resolution for user-provisioned infrastructure - -You can validate your DNS configuration before installing {product-title} on user-provisioned infrastructure. - -[IMPORTANT] -==== -The validation steps detailed in this section must succeed before you install your cluster. -==== - -.Prerequisites - -* You have configured the required DNS records for your user-provisioned infrastructure. - -.Procedure - -. From your installation node, run DNS lookups against the record names of the Kubernetes API, the wildcard routes, and the cluster nodes. Validate that the IP addresses contained in the responses correspond to the correct components. - -.. Perform a lookup against the Kubernetes API record name. Check that the result points to the IP address of the API load balancer: -+ -[source,terminal] ----- -$ dig +noall +answer @<nameserver_ip> api.<cluster_name>.<base_domain> <1> ----- -+ -<1> Replace `<nameserver_ip>` with the IP address of the nameserver, `<cluster_name>` with your cluster name, and `<base_domain>` with your base domain name. -+ -.Example output -[source,terminal] ----- -api.ocp4.example.com. 0 IN A 192.168.1.5 ----- - -.. Perform a lookup against the Kubernetes internal API record name. Check that the result points to the IP address of the API load balancer: -+ -[source,terminal] ----- -$ dig +noall +answer @<nameserver_ip> api-int.<cluster_name>.<base_domain> ----- -+ -.Example output -[source,terminal] ----- -api-int.ocp4.example.com. 0 IN A 192.168.1.5 ----- - -.. Test an example `*.apps.<cluster_name>.<base_domain>` DNS wildcard lookup. All of the application wildcard lookups must resolve to the IP address of the application ingress load balancer: -+ -[source,terminal] ----- -$ dig +noall +answer @<nameserver_ip> random.apps.<cluster_name>.<base_domain> ----- -+ -.Example output -[source,terminal] ----- -random.apps.ocp4.example.com. 0 IN A 192.168.1.5 ----- -+ -[NOTE] -==== -In the example outputs, the same load balancer is used for the Kubernetes API and application ingress traffic. In production scenarios, you can deploy the API and application ingress load balancers separately so that you can scale the load balancer infrastructure for each in isolation. -==== -+ -You can replace `random` with another wildcard value. For example, you can query the route to the {product-title} console: -+ -[source,terminal] ----- -$ dig +noall +answer @<nameserver_ip> console-openshift-console.apps.<cluster_name>.<base_domain> ----- -+ -.Example output -[source,terminal] ----- -console-openshift-console.apps.ocp4.example.com. 0 IN A 192.168.1.5 ----- - -.. Run a lookup against the bootstrap DNS record name. Check that the result points to the IP address of the bootstrap node: -+ -[source,terminal] ----- -$ dig +noall +answer @<nameserver_ip> bootstrap.<cluster_name>.<base_domain> ----- -+ -.Example output -[source,terminal] ----- -bootstrap.ocp4.example.com. 0 IN A 192.168.1.96 ----- - -.. Use this method to perform lookups against the DNS record names for the control plane and compute nodes. Check that the results correspond to the IP addresses of each node. - -. From your installation node, run reverse DNS lookups against the IP addresses of the load balancer and the cluster nodes. Validate that the record names contained in the responses correspond to the correct components. - -.. Perform a reverse lookup against the IP address of the API load balancer. Check that the response includes the record names for the Kubernetes API and the Kubernetes internal API: -+ -[source,terminal] ----- -$ dig +noall +answer @<nameserver_ip> -x 192.168.1.5 ----- -+ -.Example output -[source,terminal] ----- -5.1.168.192.in-addr.arpa. 0 IN PTR api-int.ocp4.example.com. <1> -5.1.168.192.in-addr.arpa. 0 IN PTR api.ocp4.example.com. <2> ----- -+ -<1> Provides the record name for the Kubernetes internal API. -<2> Provides the record name for the Kubernetes API. -+ -[NOTE] -==== -A PTR record is not required for the {product-title} application wildcard. No validation step is needed for reverse DNS resolution against the IP address of the application ingress load balancer. -==== - -.. Perform a reverse lookup against the IP address of the bootstrap node. Check that the result points to the DNS record name of the bootstrap node: -+ -[source,terminal] ----- -$ dig +noall +answer @<nameserver_ip> -x 192.168.1.96 ----- -+ -.Example output -[source,terminal] ----- -96.1.168.192.in-addr.arpa. 0 IN PTR bootstrap.ocp4.example.com. ----- - -.. Use this method to perform reverse lookups against the IP addresses for the control plane and compute nodes. Check that the results correspond to the DNS record names of each node. diff --git a/modules/installation-using-gcp-custom-machine-types.adoc b/modules/installation-using-gcp-custom-machine-types.adoc deleted file mode 100644 index ed96036f8009..000000000000 --- a/modules/installation-using-gcp-custom-machine-types.adoc +++ /dev/null @@ -1,84 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_gcp/installing-gcp-customizations.adoc -// * installing/installing_gcp/installing-gcp-network-customizations.adoc -// * installing/installing_gcp/installing-gcp-private.adoc -// * installing/installing_gcp/installing-gcp-vpc.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp-installer-provisioned.adoc -// * installing/installing_gcp/installing-gcp-user-infra.adoc -// * installing/installing_gcp/installing-gcp-user-infra-vpc.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp.adoc - -ifeval::["{context}" == "installing-gcp-customizations"] -:ipi: -endif::[] -ifeval::["{context}" == "installing-gcp-network-customizations"] -:ipi: -endif::[] -ifeval::["{context}" == "installing-gcp-private"] -:ipi: -endif::[] -ifeval::["{context}" == "installing-gcp-vpc"] -:ipi: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-gcp-installer-provisioned"] -:ipi: -endif::[] - -:_content-type: PROCEDURE -[id="installation-custom-machine-types_{context}"] -= Using custom machine types -Using a custom machine type to install a {product-title} cluster is supported. - -Consider the following when using a custom machine type: - -* Similar to predefined instance types, custom machine types must meet the minimum resource requirements for control plane and compute machines. For more information, see "Minimum resource requirements for cluster installation". -* The name of the custom machine type must adhere to the following syntax: -+ --- -`custom-<number_of_cpus>-<amount_of_memory_in_mb>` - -For example, `custom-6-20480`. --- - -ifdef::ipi[] -As part of the installation process, you specify the custom machine type in the `install-config.yaml` file. - -.Sample `install-config.yaml` file with a custom machine type - -[source,yaml] ----- -compute: -- architecture: amd64 - hyperthreading: Enabled - name: worker - platform: - gcp: - type: custom-6-20480 - replicas: 2 -controlPlane: - architecture: amd64 - hyperthreading: Enabled - name: master - platform: - gcp: - type: custom-6-20480 - replicas: 3 ----- -endif::ipi[] - -ifeval::["{context}" == "installing-gcp-customizations"] -:!ipi: -endif::[] -ifeval::["{context}" == "installing-gcp-network-customizations"] -:!ipi: -endif::[] -ifeval::["{context}" == "installing-gcp-private"] -:!ipi: -endif::[] -ifeval::["{context}" == "installing-gcp-vpc"] -:!ipi: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-gcp-installer-provisioned"] -:!ipi: -endif::[] diff --git a/modules/installation-vsphere-config-yaml.adoc b/modules/installation-vsphere-config-yaml.adoc deleted file mode 100644 index 890b6a414c4b..000000000000 --- a/modules/installation-vsphere-config-yaml.adoc +++ /dev/null @@ -1,234 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_vsphere/installing-restricted-networks-vsphere.adoc -// * installing/installing_vsphere/installing-vsphere-network-customizations.adoc -// * installing/installing_vsphere/installing-vsphere.adoc - -ifeval::["{context}" == "installing-restricted-networks-vsphere"] -:restricted: -endif::[] -ifdef::openshift-origin[] -:restricted: -endif::[] - -[id="installation-vsphere-config-yaml_{context}"] -= Sample `install-config.yaml` file for VMware vSphere - -You can customize the `install-config.yaml` file to specify more details about -your {product-title} cluster's platform or modify the values of the required -parameters. - -[source,yaml] ----- -additionalTrustBundlePolicy: Proxyonly -apiVersion: v1 -baseDomain: example.com <1> -compute: <2> -- architecture: amd64 - hyperthreading: Enabled <3> - name: <worker_node> - platform: {} - replicas: 0 <4> -controlPlane: <2> - architecture: amd64 - hyperthreading: Enabled <3> - name: <parent_node> - platform: {} - replicas: 3 <5> -metadata: - creationTimestamp: null - name: test <6> -networking: ---- -platform: - vsphere: - apiVIPs: - - 10.0.0.1 - failureDomains: <7> - - name: <failure_domain_name> - region: <default_region_name> - server: <fully_qualified_domain_name> - topology: - computeCluster: "/<datacenter>/host/<cluster>" - datacenter: <datacenter> <8> - datastore: "/<datacenter>/datastore/<datastore>" - networks: - - <VM_Network_name> - resourcePool: "/<datacenter>/host/<cluster>/Resources/<resourcePool>" <9> - folder: "/<datacenter_name>/vm/<folder_name>/<subfolder_name>" <10> - zone: <default_zone_name> - ingressVIPs: - - 10.0.0.2 - vcenters: - - datacenters: - - <datacenter> - password: <password> <12> - port: 443 - server: <fully_qualified_domain_name> <11> - user: administrator@vsphere.local - diskType: thin <13> -ifndef::restricted[] -ifndef::openshift-origin[] -fips: false <14> -endif::openshift-origin[] -ifndef::openshift-origin[] -pullSecret: '{"auths": ...}' <15> -endif::openshift-origin[] -ifdef::openshift-origin[] -pullSecret: '{"auths": ...}' <14> -endif::openshift-origin[] -endif::restricted[] -ifdef::restricted[] -ifndef::openshift-origin[] -fips: false <14> -pullSecret: '{"auths":{"<local_registry>": {"auth": "<credentials>","email": "you@example.com"}}}' <15> -endif::openshift-origin[] -ifdef::openshift-origin[] -pullSecret: '{"auths":{"<local_registry>": {"auth": "<credentials>","email": "you@example.com"}}}' <14> -endif::openshift-origin[] -endif::restricted[] -ifndef::openshift-origin[] -sshKey: 'ssh-ed25519 AAAA...' <16> -endif::openshift-origin[] -ifdef::openshift-origin[] -sshKey: 'ssh-ed25519 AAAA...' <15> -endif::openshift-origin[] -ifdef::restricted[] -ifndef::openshift-origin[] -additionalTrustBundle: | <17> - -----BEGIN CERTIFICATE----- - ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ - -----END CERTIFICATE----- -imageContentSources: <18> -- mirrors: - - <local_registry>/<local_repository_name>/release - source: quay.io/openshift-release-dev/ocp-release -- mirrors: - - <local_registry>/<local_repository_name>/release - source: quay.io/openshift-release-dev/ocp-v4.0-art-dev -endif::openshift-origin[] -ifdef::openshift-origin[] -additionalTrustBundle: | <16> - -----BEGIN CERTIFICATE----- - ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ - -----END CERTIFICATE----- -imageContentSources: <17> -- mirrors: - - <local_registry>/<local_repository_name>/release - source: quay.io/openshift-release-dev/ocp-release -- mirrors: - - <local_registry>/<local_repository_name>/release - source: quay.io/openshift-release-dev/ocp-v4.0-art-dev -endif::openshift-origin[] -endif::restricted[] ----- -<1> The base domain of the cluster. All DNS records must be sub-domains of this -base and include the cluster name. -<2> The `controlPlane` section is a single mapping, but the compute section is a -sequence of mappings. To meet the requirements of the different data structures, -the first line of the `compute` section must begin with a hyphen, `-`, and the -first line of the `controlPlane` section must not. Both sections define a single machine pool, so only one control plane is used. {product-title} does not support defining multiple compute pools. -<3> Whether to enable or disable simultaneous multithreading, or -`hyperthreading`. By default, simultaneous multithreading is enabled -to increase the performance of your machines' cores. You can disable it by -setting the parameter value to `Disabled`. If you disable simultaneous -multithreading in some cluster machines, you must disable it in all cluster -machines. -+ -[IMPORTANT] -==== -If you disable simultaneous multithreading, ensure that your capacity planning -accounts for the dramatically decreased machine performance. -Your machines must use at least 8 CPUs and 32 GB of RAM if you disable -simultaneous multithreading. -==== -<4> You must set the value of the `replicas` parameter to `0`. This parameter -controls the number of workers that the cluster creates and manages for you, -which are functions that the cluster does not perform when you -use user-provisioned infrastructure. You must manually deploy worker -machines for the cluster to use before you finish installing {product-title}. -<5> The number of control plane machines that you add to the cluster. Because -the cluster uses this values as the number of etcd endpoints in the cluster, the -value must match the number of control plane machines that you deploy. -<6> The cluster name that you specified in your DNS records. -<7> Establishes the relationships between a region and zone. You define a failure domain by using vCenter objects, such as a `datastore` object. A failure domain defines the vCenter location for {product-title} cluster nodes. -<8> The vSphere datacenter. -<9> Optional parameter. For installer-provisioned infrastructure, the absolute path of an existing resource pool where the installation program creates the virtual machines, for example, `/<datacenter_name>/host/<cluster_name>/Resources/<resource_pool_name>/<optional_nested_resource_pool_name>`. If you do not specify a value, resources are installed in the root of the cluster `/example_datacenter/host/example_cluster/Resources`. -<10> Optional parameter For installer-provisioned infrastructure, the absolute path of an existing folder where the installation program creates the virtual machines, for example, `/<datacenter_name>/vm/<folder_name>/<subfolder_name>`. If you do not provide this value, the installation program creates a top-level folder in the datacenter virtual machine folder that is named with the infrastructure ID. If you are providing the infrastructure for the cluster, omit this parameter. -<11> The fully-qualified hostname or IP address of the vCenter server. -<12> The password associated with the vSphere user. -<13> The vSphere disk provisioning method. -ifndef::openshift-origin[] -<14> Whether to enable or disable FIPS mode. By default, FIPS mode is not enabled. If FIPS mode is enabled, the {op-system-first} machines that {product-title} runs on bypass the default Kubernetes cryptography suite and use the cryptography modules that are provided with {op-system} instead. -+ -[IMPORTANT] -==== -The use of FIPS Validated / Modules in Process cryptographic libraries is only supported on {product-title} deployments on the `x86_64` architecture. -==== -endif::openshift-origin[] -ifndef::restricted[] -ifndef::openshift-origin[] -<15> The pull secret that you obtained from {cluster-manager-url}. This pull secret allows you to authenticate with the services that are provided by the included authorities, including Quay.io, which serves the container images for {product-title} components. -<16> The public portion of the default SSH key for the `core` user in -{op-system-first}. -endif::openshift-origin[] -ifdef::openshift-origin[] -<15> You obtained the {cluster-manager-url-pull}. This pull secret allows you to authenticate with the services that are provided by the included authorities, including Quay.io, which serves the container images for {product-title} components. -<16> The public portion of the default SSH key for the `core` user in -{op-system-first}. -+ -[NOTE] -==== -For production {product-title} clusters on which you want to perform installation debugging or disaster recovery, specify an SSH key that your `ssh-agent` process uses. -==== -endif::openshift-origin[] -endif::restricted[] -ifdef::restricted[] -ifndef::openshift-origin[] -<15> For `<local_registry>`, specify the registry domain name, and optionally the -port, that your mirror registry uses to serve content. For example -`registry.example.com` or `registry.example.com:5000`. For `<credentials>`, -specify the base64-encoded user name and password for your mirror registry. -<16> The public portion of the default SSH key for the `core` user in -{op-system-first}. -+ -[NOTE] -==== -For production {product-title} clusters on which you want to perform installation debugging or disaster recovery, specify an SSH key that your `ssh-agent` process uses. -==== -endif::openshift-origin[] -ifdef::openshift-origin[] -<14> For `<local_registry>`, specify the registry domain name, and optionally the -port, that your mirror registry uses to serve content. For example -`registry.example.com` or `registry.example.com:5000`. For `<credentials>`, -specify the base64-encoded user name and password for your mirror registry. -<15> The public portion of the default SSH key for the `core` user in -{op-system-first}. -+ -[NOTE] -==== -For production {product-title} clusters on which you want to perform installation debugging or disaster recovery, specify an SSH key that your `ssh-agent` process uses. -==== -endif::openshift-origin[] -endif::restricted[] -ifdef::restricted[] -ifndef::openshift-origin[] -<17> Provide the contents of the certificate file that you used for your mirror -registry. -<18> Provide the `imageContentSources` section from the output of the command to -mirror the repository. -endif::openshift-origin[] -ifdef::openshift-origin[] -<16> Provide the contents of the certificate file that you used for your mirror -registry. -<17> Provide the `imageContentSources` section from the output of the command to -mirror the repository. -endif::openshift-origin[] -endif::restricted[] - -ifeval::["{context}" == "installing-restricted-networks-vsphere"] -:!restricted: -endif::[] -ifdef::openshift-origin[] -:!restricted: -endif::[] diff --git a/modules/installation-vsphere-encrypted-vms.adoc b/modules/installation-vsphere-encrypted-vms.adoc deleted file mode 100644 index 639795ff923d..000000000000 --- a/modules/installation-vsphere-encrypted-vms.adoc +++ /dev/null @@ -1,20 +0,0 @@ -// module is included in the following assemblies: -// ../installing/installing_vsphere/installing-vsphere.adoc - -:_content-type: PROCEDURE -[id="installation-vsphere-encrypted-vms_{context}"] -= Requirements for encrypting virtual machines - -You can encrypt your virtual machines prior to installing {product-title} {product-version} by meeting the following requirements. - -* You have configured a Standard key provider in vSphere. For more information, see link:https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.vsphere.vsan.doc/GUID-AC06B3C3-901F-402E-B25F-1EE7809D1264.html[Adding a KMS to vCenter Server]. -+ -[IMPORTANT] -==== -The Native key provider in vCenter is not supported. For more information, see link:https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.vsphere.security.doc/GUID-54B9FBA2-FDB1-400B-A6AE-81BF3AC9DF97.html[vSphere Native Key Provider Overview]. -==== - -* You have enabled host encryption mode on all of the ESXi hosts that are hosting the cluster. For more information, see link:https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.vsphere.security.doc/GUID-A9E1F016-51B3-472F-B8DE-803F6BDB70BC.html[Enabling host encryption mode]. -* You have a vSphere account which has all cryptographic privileges enabled. For more information, see link:https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.vsphere.security.doc/GUID-660CCB35-847F-46B3-81CA-10DDDB9D7AA9.html[Cryptographic Operations Privileges]. - -When you deploy the OVF template in the section titled "Installing RHCOS and starting the OpenShift Container Platform bootstrap process", select the option to "Encrypt this virtual machine" when you are selecting storage for the OVF template. After completing cluster installation, create a storage class that uses the encryption storage policy you used to encrypt the virtual machines. diff --git a/modules/installation-vsphere-infrastructure.adoc b/modules/installation-vsphere-infrastructure.adoc deleted file mode 100644 index fabb8a339116..000000000000 --- a/modules/installation-vsphere-infrastructure.adoc +++ /dev/null @@ -1,52 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_vsphere/installing-restricted-networks-vsphere.adoc -// * installing/installing_vsphere/installing-vsphere.adoc -// * installing/installing_vsphere/installing-vsphere-network-customizations.adoc -// * installing/installing_vsphere/installing-vsphere-installer-provisioned.adoc -// * installing/installing_vsphere/installing-vsphere-installer-provisioned-customizations.adoc -// * installing/installing_vsphere/installing-vsphere-installer-provisioned-network-customizations.adoc -// * installing/installing_vsphere/installing-restricted-networks-installer-provisioned-vsphere.adoc - -[id="installation-vsphere-infrastructure_{context}"] -= VMware vSphere infrastructure requirements - -You must install the {product-title} cluster on a VMware vSphere version 7.0 Update 2 or later instance that meets the requirements for the components that you use. - -[NOTE] -==== -{product-title} version {product-version} supports VMware vSphere version 8.0. -==== - -You can host the VMware vSphere infrastructure on-premise or on a link:https://cloud.vmware.com/providers[VMware Cloud Verified provider] that meets the requirements outlined in the following table: - -.Version requirements for vSphere virtual environments -[cols=2, options="header"] -|=== -|Virtual environment product |Required version -|VMware virtual hardware | 15 or later -|vSphere ESXi hosts | 7.0 Update 2 or later -|vCenter host | 7.0 Update 2 or later -|=== - -.Minimum supported vSphere version for VMware components -|=== -|Component | Minimum supported versions |Description - -|Hypervisor -|vSphere 7.0 Update 2 and later with virtual hardware version 15 -|This version is the minimum version that {op-system-first} supports. See the link:https://access.redhat.com/ecosystem/search/#/ecosystem/Red%20Hat%20Enterprise%20Linux?sort=sortTitle%20asc&vendors=VMware&category=Server[Red Hat Enterprise Linux 8 supported hypervisors list]. - -|Storage with in-tree drivers -|vSphere 7.0 Update 2 and later -|This plugin creates vSphere storage by using the in-tree storage drivers for vSphere included in {product-title}. - -|Optional: Networking (NSX-T) -|vSphere 7.0 Update 2 and later -|vSphere 7.0 Update 2 is required for {product-title}. For more information about the compatibility of NSX and {product-title}, see the Release Notes section of VMware's link:https://docs.vmware.com/en/VMware-NSX-Container-Plugin/index.html[NSX container plugin documentation]. -|=== - -[IMPORTANT] -==== -You must ensure that the time on your ESXi hosts is synchronized before you install {product-title}. See link:https://docs.vmware.com/en/VMware-vSphere/6.7/com.vmware.vsphere.vcenterhost.doc/GUID-8756D419-A878-4AE0-9183-C6D5A91A8FB1.html[Edit Time Configuration for a Host] in the VMware documentation. -==== diff --git a/modules/installation-vsphere-installer-infra-requirements.adoc b/modules/installation-vsphere-installer-infra-requirements.adoc deleted file mode 100644 index 6b9a7b2ba57f..000000000000 --- a/modules/installation-vsphere-installer-infra-requirements.adoc +++ /dev/null @@ -1,496 +0,0 @@ -// Module included in the following assemblies for vSphere: -// -// * installing/installing_vsphere/installing-vsphere-installer-provisioned.adoc -// * installing/installing_vsphere/installing-vsphere-installer-provisioned-customizations.adoc -// * installing/installing_vsphere/installing-vsphere-installer-provisioned-network-customizations.adoc -// * installing/installing_vsphere/installing-vsphere.adoc -// * installing/installing_vsphere/installing-vsphere-network-customizations.adoc -// * installing/installing_vsphere/installing-restricted-networks-installer-provisioned-vsphere.adoc -// * installing/installing_vsphere/installing-restricted-networks-vsphere.adoc - - - -ifeval::["{context}" == "installing-restricted-networks-installer-provisioned-vsphere"] -:restricted: -endif::[] - -ifeval::["{context}" == "installing-vsphere"] -:vsphere: -endif::[] -ifeval::["{context}" == "installing-vsphere-network-customizations"] -:vsphere: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-vsphere"] -:vsphere: -endif::[] - - -[id="installation-vsphere-installer-infra-requirements_{context}"] -= vCenter requirements - -ifndef::vsphere[] -Before you install an {product-title} cluster on your vCenter that uses infrastructure that the installer provisions, you must prepare your environment. -endif::vsphere[] - -ifdef::vsphere[] -Before you install an {product-title} cluster on your vCenter that uses infrastructure that you provided, you must prepare your environment. -endif::vsphere[] - -[discrete] -[id="installation-vsphere-installer-infra-requirements-account_{context}"] -== Required vCenter account privileges - -ifndef::vsphere[] -To install an {product-title} cluster in a vCenter, the installation program requires access to an account with privileges to read and create the required resources. Using an account that has global administrative privileges is the simplest way to access all of the necessary permissions. - -If you cannot use an account with global administrative privileges, you must create roles to grant the privileges necessary for {product-title} cluster installation. While most of the privileges are always required, some are required only if you plan for the installation program to provision a folder to contain the {product-title} cluster on your vCenter instance, which is the default behavior. You must create or amend vSphere roles for the specified objects to grant the required privileges. - -An additional role is required if the installation program is to create a vSphere virtual machine folder. -endif::vsphere[] - -ifdef::vsphere[] -To install an {product-title} cluster in a vCenter, your vSphere account must include privileges for reading and creating the required resources. Using an account that has global administrative privileges is the simplest way to access all of the necessary permissions. -endif::vsphere[] - - -.Roles and privileges required for installation in vSphere API -[%collapsible] -==== -[cols="3a,3a,3a",options="header"] -|=== -|vSphere object for role -|When required -|Required privileges in vSphere API - -|vSphere vCenter -|Always -| -[%hardbreaks] -`Cns.Searchable` -`InventoryService.Tagging.AttachTag` -`InventoryService.Tagging.CreateCategory` -`InventoryService.Tagging.CreateTag` -`InventoryService.Tagging.DeleteCategory` -`InventoryService.Tagging.DeleteTag` -`InventoryService.Tagging.EditCategory` -`InventoryService.Tagging.EditTag` -`Sessions.ValidateSession` -`StorageProfile.Update` -`StorageProfile.View` - -|vSphere vCenter Cluster -|If VMs will be created in the cluster root -| -[%hardbreaks] -`Host.Config.Storage` -`Resource.AssignVMToPool` -`VApp.AssignResourcePool` -`VApp.Import` -`VirtualMachine.Config.AddNewDisk` - -|vSphere vCenter Resource Pool -|If an existing resource pool is provided -| -[%hardbreaks] -`Host.Config.Storage` -`Resource.AssignVMToPool` -`VApp.AssignResourcePool` -`VApp.Import` -`VirtualMachine.Config.AddNewDisk` - -|vSphere Datastore -|Always -| -[%hardbreaks] -`Datastore.AllocateSpace` -`Datastore.Browse` -`Datastore.FileManagement` -`InventoryService.Tagging.ObjectAttachable` - -|vSphere Port Group -|Always -|`Network.Assign` - -|Virtual Machine Folder -|Always -| -[%hardbreaks] -`InventoryService.Tagging.ObjectAttachable` -`Resource.AssignVMToPool` -`VApp.Import` -`VirtualMachine.Config.AddExistingDisk` -`VirtualMachine.Config.AddNewDisk` -`VirtualMachine.Config.AddRemoveDevice` -`VirtualMachine.Config.AdvancedConfig` -`VirtualMachine.Config.Annotation` -`VirtualMachine.Config.CPUCount` -`VirtualMachine.Config.DiskExtend` -`VirtualMachine.Config.DiskLease` -`VirtualMachine.Config.EditDevice` -`VirtualMachine.Config.Memory` -`VirtualMachine.Config.RemoveDisk` -`VirtualMachine.Config.Rename` -`VirtualMachine.Config.ResetGuestInfo` -`VirtualMachine.Config.Resource` -`VirtualMachine.Config.Settings` -`VirtualMachine.Config.UpgradeVirtualHardware` -`VirtualMachine.Interact.GuestControl` -`VirtualMachine.Interact.PowerOff` -`VirtualMachine.Interact.PowerOn` -`VirtualMachine.Interact.Reset` -`VirtualMachine.Inventory.Create` -`VirtualMachine.Inventory.CreateFromExisting` -`VirtualMachine.Inventory.Delete` -`VirtualMachine.Provisioning.Clone` -`VirtualMachine.Provisioning.MarkAsTemplate` -`VirtualMachine.Provisioning.DeployTemplate` - -|vSphere vCenter Datacenter -|If the installation program creates the virtual machine folder. For UPI, `VirtualMachine.Inventory.Create` and `VirtualMachine.Inventory.Delete` privileges are optional if your cluster does not use the Machine API. -| -[%hardbreaks] -`InventoryService.Tagging.ObjectAttachable` -`Resource.AssignVMToPool` -`VApp.Import` -`VirtualMachine.Config.AddExistingDisk` -`VirtualMachine.Config.AddNewDisk` -`VirtualMachine.Config.AddRemoveDevice` -`VirtualMachine.Config.AdvancedConfig` -`VirtualMachine.Config.Annotation` -`VirtualMachine.Config.CPUCount` -`VirtualMachine.Config.DiskExtend` -`VirtualMachine.Config.DiskLease` -`VirtualMachine.Config.EditDevice` -`VirtualMachine.Config.Memory` -`VirtualMachine.Config.RemoveDisk` -`VirtualMachine.Config.Rename` -`VirtualMachine.Config.ResetGuestInfo` -`VirtualMachine.Config.Resource` -`VirtualMachine.Config.Settings` -`VirtualMachine.Config.UpgradeVirtualHardware` -`VirtualMachine.Interact.GuestControl` -`VirtualMachine.Interact.PowerOff` -`VirtualMachine.Interact.PowerOn` -`VirtualMachine.Interact.Reset` -`VirtualMachine.Inventory.Create` -`VirtualMachine.Inventory.CreateFromExisting` -`VirtualMachine.Inventory.Delete` -`VirtualMachine.Provisioning.Clone` -`VirtualMachine.Provisioning.DeployTemplate` -`VirtualMachine.Provisioning.MarkAsTemplate` -`Folder.Create` -`Folder.Delete` -|=== -==== - -.Roles and privileges required for installation in vCenter graphical user interface (GUI) -[%collapsible] -==== -[cols="3a,3a,3a",options="header"] -|=== -|vSphere object for role -|When required -|Required privileges in vCenter GUI - -|vSphere vCenter -|Always -| -[%hardbreaks] -`Cns.Searchable` -`"vSphere Tagging"."Assign or Unassign vSphere Tag"` -`"vSphere Tagging"."Create vSphere Tag Category"` -`"vSphere Tagging"."Create vSphere Tag"` -`vSphere Tagging"."Delete vSphere Tag Category"` -`"vSphere Tagging"."Delete vSphere Tag"` -`"vSphere Tagging"."Edit vSphere Tag Category"` -`"vSphere Tagging"."Edit vSphere Tag"` -`Sessions."Validate session"` -`"Profile-driven storage"."Profile-driven storage update"` -`"Profile-driven storage"."Profile-driven storage view"` - -|vSphere vCenter Cluster -|If VMs will be created in the cluster root -| -[%hardbreaks] -`Host.Configuration."Storage partition configuration"` -`Resource."Assign virtual machine to resource pool"` -`VApp."Assign resource pool"` -`VApp.Import` -`"Virtual machine"."Change Configuration"."Add new disk"` - -|vSphere vCenter Resource Pool -|If an existing resource pool is provided -| -[%hardbreaks] -`Host.Configuration."Storage partition configuration"` -`Resource."Assign virtual machine to resource pool"` -`VApp."Assign resource pool"` -`VApp.Import` -`"Virtual machine"."Change Configuration"."Add new disk"` - -|vSphere Datastore -|Always -| -[%hardbreaks] -`Datastore."Allocate space"` -`Datastore."Browse datastore"` -`Datastore."Low level file operations"` -`"vSphere Tagging"."Assign or Unassign vSphere Tag on Object"` - -|vSphere Port Group -|Always -|`Network."Assign network"` - -|Virtual Machine Folder -|Always -| -[%hardbreaks] -`"vSphere Tagging"."Assign or Unassign vSphere Tag on Object"` -`Resource."Assign virtual machine to resource pool"` -`VApp.Import` -`"Virtual machine"."Change Configuration"."Add existing disk"` -`"Virtual machine"."Change Configuration"."Add new disk"` -`"Virtual machine"."Change Configuration"."Add or remove device"` -`"Virtual machine"."Change Configuration"."Advanced configuration"` -`"Virtual machine"."Change Configuration"."Set annotation"` -`"Virtual machine"."Change Configuration"."Change CPU count"` -`"Virtual machine"."Change Configuration"."Extend virtual disk"` -`"Virtual machine"."Change Configuration"."Acquire disk lease"` -`"Virtual machine"."Change Configuration"."Modify device settings"` -`"Virtual machine"."Change Configuration"."Change Memory"` -`"Virtual machine"."Change Configuration"."Remove disk"` -`"Virtual machine"."Change Configuration".Rename` -`"Virtual machine"."Change Configuration"."Reset guest information"` -`"Virtual machine"."Change Configuration"."Change resource"` -`"Virtual machine"."Change Configuration"."Change Settings"` -`"Virtual machine"."Change Configuration"."Upgrade virtual machine compatibility"` -`"Virtual machine".Interaction."Guest operating system management by VIX API"` -`"Virtual machine".Interaction."Power off"` -`"Virtual machine".Interaction."Power on"` -`"Virtual machine".Interaction.Reset` -`"Virtual machine"."Edit Inventory"."Create new"` -`"Virtual machine"."Edit Inventory"."Create from existing"` -`"Virtual machine"."Edit Inventory"."Remove"` -`"Virtual machine".Provisioning."Clone virtual machine"` -`"Virtual machine".Provisioning."Mark as template"` -`"Virtual machine".Provisioning."Deploy template"` - -|vSphere vCenter Datacenter -|If the installation program creates the virtual machine folder. For UPI, `VirtualMachine.Inventory.Create` and `VirtualMachine.Inventory.Delete` privileges are optional if your cluster does not use the Machine API. -| -[%hardbreaks] -`"vSphere Tagging"."Assign or Unassign vSphere Tag on Object"` -`Resource."Assign virtual machine to resource pool"` -`VApp.Import` -`"Virtual machine"."Change Configuration"."Add existing disk"` -`"Virtual machine"."Change Configuration"."Add new disk"` -`"Virtual machine"."Change Configuration"."Add or remove device"` -`"Virtual machine"."Change Configuration"."Advanced configuration"` -`"Virtual machine"."Change Configuration"."Set annotation"` -`"Virtual machine"."Change Configuration"."Change CPU count"` -`"Virtual machine"."Change Configuration"."Extend virtual disk"` -`"Virtual machine"."Change Configuration"."Acquire disk lease"` -`"Virtual machine"."Change Configuration"."Modify device settings"` -`"Virtual machine"."Change Configuration"."Change Memory"` -`"Virtual machine"."Change Configuration"."Remove disk"` -`"Virtual machine"."Change Configuration".Rename` -`"Virtual machine"."Change Configuration"."Reset guest information"` -`"Virtual machine"."Change Configuration"."Change resource"` -`"Virtual machine"."Change Configuration"."Change Settings"` -`"Virtual machine"."Change Configuration"."Upgrade virtual machine compatibility"` -`"Virtual machine".Interaction."Guest operating system management by VIX API"` -`"Virtual machine".Interaction."Power off"` -`"Virtual machine".Interaction."Power on"` -`"Virtual machine".Interaction.Reset` -`"Virtual machine"."Edit Inventory"."Create new"` -`"Virtual machine"."Edit Inventory"."Create from existing"` -`"Virtual machine"."Edit Inventory"."Remove"` -`"Virtual machine".Provisioning."Clone virtual machine"` -`"Virtual machine".Provisioning."Deploy template"` -`"Virtual machine".Provisioning."Mark as template"` -`Folder."Create folder"` -`Folder."Delete folder"` -|=== -==== - - -Additionally, the user requires some `ReadOnly` permissions, and some of the roles require permission to propogate the permissions to child objects. These settings vary depending on whether or not you install the cluster into an existing folder. - -.Required permissions and propagation settings -[%collapsible] -==== -[cols="3a,3a,3a,3a",options="header"] -|=== -|vSphere object -|When required -|Propagate to children -|Permissions required - -|vSphere vCenter -|Always -|False -|Listed required privileges - -.2+|vSphere vCenter Datacenter -|Existing folder -|False -|`ReadOnly` permission - -|Installation program creates the folder -|True -|Listed required privileges - -.2+|vSphere vCenter Cluster -|Existing resource pool -|True -|`ReadOnly` permission - -|VMs in cluster root -|True -|Listed required privileges - -|vSphere vCenter Datastore -|Always -|False -|Listed required privileges - -|vSphere Switch -|Always -|False -|`ReadOnly` permission - -|vSphere Port Group -|Always -|False -|Listed required privileges - -|vSphere vCenter Virtual Machine Folder -|Existing folder -|True -|Listed required privileges - -|vSphere vCenter Resource Pool -|Existing resource pool -|True -|Listed required privileges -|=== -==== - -For more information about creating an account with only the required privileges, see link:https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.vsphere.security.doc/GUID-5372F580-5C23-4E9C-8A4E-EF1B4DD9033E.html[vSphere Permissions and User Management Tasks] in the vSphere documentation. - -[discrete] -[id="installation-vsphere-installer-infra-requirements-vmotion_{context}"] -== Using {product-title} with vMotion - -If you intend on using vMotion in your vSphere environment, consider the following before installing an {product-title} cluster. - -* {product-title} generally supports compute-only vMotion. Using Storage vMotion can cause issues and is not supported. -+ --- -To help ensure the uptime of your compute and control plane nodes, it is recommended that you follow the VMware best practices for vMotion. It is also recommended to use VMware anti-affinity rules to improve the availability of {product-title} during maintenance or hardware issues. - -For more information about vMotion and anti-affinity rules, see the VMware vSphere documentation for link:https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.vsphere.vcenterhost.doc/GUID-3B41119A-1276-404B-8BFB-A32409052449.html[vMotion networking requirements] and link:https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.vsphere.resmgmt.doc/GUID-FBE46165-065C-48C2-B775-7ADA87FF9A20.html[VM anti-affinity rules]. --- -* If you are using vSphere volumes in your pods, migrating a VM across datastores either manually or through Storage vMotion causes, invalid references within {product-title} persistent volume (PV) objects. These references prevent affected pods from starting up and can result in data loss. -* Similarly, {product-title} does not support selective migration of VMDKs across datastores, using datastore clusters for VM provisioning or for dynamic or static provisioning of PVs, or using a datastore that is part of a datastore cluster for dynamic or static provisioning of PVs. - -[discrete] -[id="installation-vsphere-installer-infra-requirements-resources_{context}"] -== Cluster resources - -ifndef::vsphere[] -When you deploy an {product-title} cluster that uses installer-provisioned infrastructure, the installation program must be able to create several resources in your vCenter instance. - -A standard {product-title} installation creates the following vCenter resources: -endif::vsphere[] - -ifdef::vsphere[] -When you deploy an {product-title} cluster that uses infrastructure that you provided, you must create the following resources in your vCenter instance: -endif::vsphere[] - -* 1 Folder -* 1 Tag category -* 1 Tag -* Virtual machines: -** 1 template -** 1 temporary bootstrap node -** 3 control plane nodes -** 3 compute machines - -Although these resources use 856 GB of storage, the bootstrap node is destroyed during the cluster installation process. A minimum of 800 GB of storage is required to use a standard cluster. - -If you deploy more compute machines, the {product-title} cluster will use more storage. - -[discrete] -[id="installation-vsphere-installer-infra-requirements-limits_{context}"] -== Cluster limits - -Available resources vary between clusters. The number of possible clusters within a vCenter is limited primarily by available storage space and any limitations on the number of required resources. Be sure to consider both limitations to the vCenter resources that the cluster creates and the resources that you require to deploy a cluster, such as IP addresses and networks. - -[discrete] -[id="installation-vsphere-installer-infra-requirements-networking_{context}"] -== Networking requirements - -You must use DHCP for the network and ensure that the DHCP server is configured to provide persistent IP addresses to the cluster machines. You must configure the default gateway to use the DHCP server. All nodes must be in the same VLAN. You cannot scale the cluster using a second VLAN as a Day 2 operation. -ifdef::restricted[] -The VM in your restricted network must have access to vCenter so that it can provision and manage nodes, persistent volume claims (PVCs), and other resources. -endif::restricted[] -Additionally, you must create the following networking resources before you install the {product-title} cluster: - -[NOTE] -==== -It is recommended that each {product-title} node in the cluster must have access to a Network Time Protocol (NTP) server that is discoverable via DHCP. Installation is possible without an NTP server. However, asynchronous server clocks will cause errors, which NTP server prevents. -==== - -[discrete] -[id="installation-vsphere-installer-infra-requirements-_{context}"] -=== Required IP Addresses -ifndef::vsphere[] -An installer-provisioned vSphere installation requires two static IP addresses: - -* The **API** address is used to access the cluster API. -* The **Ingress** address is used for cluster ingress traffic. - -You must provide these IP addresses to the installation program when you install the {product-title} cluster. -endif::vsphere[] - -[discrete] -[id="installation-vsphere-installer-infra-requirements-dns-records_{context}"] -=== DNS records -You must create DNS records for two static IP addresses in the appropriate DNS server for the vCenter instance that hosts your {product-title} cluster. In each record, `<cluster_name>` is the cluster name and `<base_domain>` is the cluster base domain that you specify when you install the cluster. A complete DNS record takes the form: `<component>.<cluster_name>.<base_domain>.`. - -.Required DNS records -[cols="1a,5a,3a",options="header"] -|=== - -|Component -|Record -|Description - -|API VIP -|`api.<cluster_name>.<base_domain>.` -|This DNS A/AAAA or CNAME record must point to the load balancer -for the control plane machines. This record must be resolvable by both clients -external to the cluster and from all the nodes within the cluster. - -|Ingress VIP -|`*.apps.<cluster_name>.<base_domain>.` -|A wildcard DNS A/AAAA or CNAME record that points to the load balancer that targets the -machines that run the Ingress router pods, which are the worker nodes by -default. This record must be resolvable by both clients external to the cluster -and from all the nodes within the cluster. -|=== - -ifeval::["{context}" == "installing-restricted-networks-installer-provisioned-vsphere"] -:!restricted: -endif::[] - -ifeval::["{context}" == "installing-vsphere"] -:!vsphere: -endif::[] -ifeval::["{context}" == "installing-vsphere-network-customizations"] -:!vsphere: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-vsphere"] -:!vsphere: -endif::[] diff --git a/modules/installation-vsphere-installer-network-requirements.adoc b/modules/installation-vsphere-installer-network-requirements.adoc deleted file mode 100644 index e995f7d23ad6..000000000000 --- a/modules/installation-vsphere-installer-network-requirements.adoc +++ /dev/null @@ -1,93 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_vsphere/installing-restricted-networks-installer-provisioned-vsphere.adoc -// * installing/installing_vsphere/installing-vsphere-installer-provisioned-customizations.adoc -// * installing/installing_vsphere/installing-vsphere-installer-provisioned-network-customizations.adoc - -:_content-type: CONCEPT -[id="installation-vsphere-installer-network-requirements_{context}"] -= Network connectivity requirements - -You must configure the network connectivity between machines to allow {product-title} cluster components to communicate. - -Review the following details about the required network ports. - -.Ports used for all-machine to all-machine communications -[cols="2a,2a,5a",options="header"] -|=== - -|Protocol -|Port -|Description - -|ICMP -|N/A -|Network reachability tests - -.4+|TCP -|`1936` -|Metrics - -|`9000`-`9999` -|Host level services, including the node exporter on ports `9100`-`9101` and -the Cluster Version Operator on port `9099`. - -|`10250`-`10259` -|The default ports that Kubernetes reserves - -|`10256` -|openshift-sdn - -.5+|UDP -|`4789` -|virtual extensible LAN (VXLAN) - -|`6081` -|Geneve - -|`9000`-`9999` -|Host level services, including the node exporter on ports `9100`-`9101`. - -|`500` -|IPsec IKE packets - -|`4500` -|IPsec NAT-T packets - -|TCP/UDP -|`30000`-`32767` -|Kubernetes node port - -|ESP -|N/A -|IPsec Encapsulating Security Payload (ESP) - -|=== - -.Ports used for all-machine to control plane communications -[cols="2a,2a,5a",options="header"] -|=== - -|Protocol -|Port -|Description - -|TCP -|`6443` -|Kubernetes API - -|=== - -.Ports used for control plane machine to control plane machine communications -[cols="2a,2a,5a",options="header"] -|=== - -|Protocol -|Port -|Description - -|TCP -|`2379`-`2380` -|etcd server and peer ports - -|=== diff --git a/modules/installation-vsphere-machines.adoc b/modules/installation-vsphere-machines.adoc deleted file mode 100644 index f3c9204335c5..000000000000 --- a/modules/installation-vsphere-machines.adoc +++ /dev/null @@ -1,193 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_vsphere/installing-restricted-networks-vsphere.adoc -// * installing/installing_vsphere/installing-vsphere.adoc -// * installing/installing_vsphere/installing-vsphere-network-customizations.adoc - -:_content-type: PROCEDURE -[id="installation-vsphere-machines_{context}"] -= Installing {op-system} and starting the {product-title} bootstrap process - -To install {product-title} on user-provisioned infrastructure on VMware vSphere, you must install {op-system-first} on vSphere hosts. When you install {op-system}, you must provide the Ignition config file that was generated by the {product-title} installation program for the type of machine you are installing. If you have configured suitable networking, DNS, and load balancing infrastructure, the {product-title} bootstrap process begins automatically after the {op-system} machines have rebooted. - -.Prerequisites - -* You have obtained the Ignition config files for your cluster. -* You have access to an HTTP server that you can access from your computer and that the machines that you create can access. -* You have created a link:https://docs.vmware.com/en/VMware-vSphere/6.0/com.vmware.vsphere.vcenterhost.doc/GUID-B1018F28-3F14-4DFE-9B4B-F48BBDB72C10.html[vSphere cluster]. - -.Procedure - -. Upload the bootstrap Ignition config file, which is named `<installation_directory>/bootstrap.ign`, that the installation program created to your HTTP server. Note the URL of this file. -+ -. Save the following secondary Ignition config file for your bootstrap node to your computer as `<installation_directory>/merge-bootstrap.ign`: -+ -[source,text] ----- -{ - "ignition": { - "config": { - "merge": [ - { - "source": "<bootstrap_ignition_config_url>", <1> - "verification": {} - } - ] - }, - "timeouts": {}, - "version": "3.2.0" - }, - "networkd": {}, - "passwd": {}, - "storage": {}, - "systemd": {} -} ----- -+ -<1> Specify the URL of the bootstrap Ignition config file that you hosted. -+ -When you create the virtual machine (VM) for the bootstrap machine, you use this Ignition config file. -+ -. Locate the following Ignition config files that the installation program created: -+ -* `<installation_directory>/master.ign` -* `<installation_directory>/worker.ign` -* `<installation_directory>/merge-bootstrap.ign` -+ -. Convert the Ignition config files to Base64 encoding. Later in this procedure, you must add these files to the extra configuration parameter `guestinfo.ignition.config.data` in your VM. -+ -For example, if you use a Linux operating system, you can use the `base64` command to encode the files. -+ -[source,terminal] ----- -$ base64 -w0 <installation_directory>/master.ign > <installation_directory>/master.64 ----- -+ -[source,terminal] ----- -$ base64 -w0 <installation_directory>/worker.ign > <installation_directory>/worker.64 ----- -+ -[source,terminal] ----- -$ base64 -w0 <installation_directory>/merge-bootstrap.ign > <installation_directory>/merge-bootstrap.64 ----- -+ -[IMPORTANT] -==== -If you plan to add more compute machines to your cluster after you finish installation, do not delete these files. -==== - -ifndef::openshift-origin[] -. Obtain the {op-system} OVA image. Images are available from the link:https://mirror.openshift.com/pub/openshift-v4/dependencies/rhcos/4.13/[{op-system} image mirror] page. -+ -[IMPORTANT] -==== -The {op-system} images might not change with every release of {product-title}. You must download an image with the highest version that is less than or equal to the {product-title} version that you install. Use the image version that matches your {product-title} version if it is available. -==== -+ -The filename contains the {product-title} version number in the format `rhcos-vmware.<architecture>.ova`. -endif::openshift-origin[] -ifdef::openshift-origin[] -. Obtain the {op-system} images from the link:https://getfedora.org/en/coreos/download?tab=metal_virtualized&stream=stable[{op-system} Downloads] page -endif::openshift-origin[] - -. In the vSphere Client, create a folder in your datacenter to store your VMs. -.. Click the *VMs and Templates* view. -.. Right-click the name of your datacenter. -.. Click *New Folder* -> *New VM and Template Folder*. -.. In the window that is displayed, enter the folder name. If you did not specify an existing folder in the `install-config.yaml` file, then create a folder with the same name as the infrastructure ID. You use this folder name so vCenter dynamically provisions storage in the appropriate location for its Workspace configuration. - -. In the vSphere Client, create a template for the OVA image and then clone the template as needed. -+ -[NOTE] -==== -In the following steps, you create a template and then clone the template for all of your cluster machines. You then provide the location for the Ignition config file for that cloned machine type when you provision the VMs. -==== -.. From the *Hosts and Clusters* tab, right-click your cluster name and select *Deploy OVF Template*. -.. On the *Select an OVF* tab, specify the name of the {op-system} OVA file that you downloaded. -.. On the *Select a name and folder* tab, set a *Virtual machine name* for your template, such as `Template-{op-system}`. Click the name of your vSphere cluster and select the folder you created in the previous step. -.. On the *Select a compute resource* tab, click the name of your vSphere cluster. -.. On the *Select storage* tab, configure the storage options for your VM. -*** Select *Thin Provision* or *Thick Provision*, based on your storage preferences. -*** Select the datastore that you specified in your `install-config.yaml` file. -*** If you want to encrypt your virtual machines, select *Encrypt this virtual machine*. See the section titled "Requirements for encrypting virtual machines" for more information. -.. On the *Select network* tab, specify the network that you configured for the cluster, if available. -.. When creating the OVF template, do not specify values on the *Customize template* tab or configure the template any further. -+ -[IMPORTANT] -==== -Do not start the original VM template. The VM template must remain off and must be cloned for new {op-system} machines. Starting the VM template configures the VM template as a VM on the platform, which prevents it from being used as a template that compute machine sets can apply configurations to. -//This admonition note also appears in `modules/installation-vsphere-machines.adoc` and `modules/windows-machineset-vsphere.adoc`. -==== - -. Optional: Update the configured virtual hardware version in the VM template, if necessary. Follow link:https://kb.vmware.com/s/article/1010675[Upgrading a virtual machine to the latest hardware version] in the VMware documentation for more information. -+ -[IMPORTANT] -==== -It is recommended that you update the hardware version of the VM template to version 15 before creating VMs from it, if necessary. Using hardware version 13 for your cluster nodes running on vSphere is now deprecated. If your imported template defaults to hardware version 13, you must ensure that your ESXi host is on 6.7U3 or later before upgrading the VM template to hardware version 15. If your vSphere version is less than 6.7U3, you can skip this upgrade step; however, a future version of {product-title} is scheduled to remove support for hardware version 13 and vSphere versions less than 6.7U3. -==== - -. After the template deploys, deploy a VM for a machine in the cluster. -.. Right-click the template name and click *Clone* -> *Clone to Virtual Machine*. -.. On the *Select a name and folder* tab, specify a name for the VM. You might include the machine type in the name, such as `control-plane-0` or `compute-1`. -+ -[NOTE] -==== -Ensure that all virtual machine names across a vSphere installation are unique. -==== -.. On the *Select a name and folder* tab, select the name of the folder that you created for the cluster. -.. On the *Select a compute resource* tab, select the name of a host in your datacenter. -+ -.. Optional: On the *Select storage* tab, customize the storage options. -.. On the *Select clone options*, select -*Customize this virtual machine's hardware*. -.. On the *Customize hardware* tab, click *VM Options* -> *Advanced*. -*** Optional: Override default DHCP networking in vSphere. To enable static IP networking: -+ -... Set your static IP configuration: -+ -[source,terminal] ----- -$ export IPCFG="ip=<ip>::<gateway>:<netmask>:<hostname>:<iface>:none nameserver=srv1 [nameserver=srv2 [nameserver=srv3 [...]]]" ----- -+ -.Example command -[source,terminal] ----- -$ export IPCFG="ip=192.168.100.101::192.168.100.254:255.255.255.0:::none nameserver=8.8.8.8" ----- - -... Set the `guestinfo.afterburn.initrd.network-kargs` property before booting a VM from an OVA in vSphere: -+ -[source,terminal] ----- -$ govc vm.change -vm "<vm_name>" -e "guestinfo.afterburn.initrd.network-kargs=${IPCFG}" ----- -+ -*** Optional: In the event of cluster performance issues, from the *Latency Sensitivity* list, select *High*. Ensure that your VM's CPU and memory reservation have the following values: -**** Memory reservation value must be equal to its configured memory size. -**** CPU reservation value must be at least the number of low latency virtual CPUs multiplied by the measured physical CPU speed. -*** Click *Edit Configuration*, and on the *Configuration Parameters* window, search the list of available parameters for steal clock accounting (`stealclock.enable`). If it is available, set its value to `TRUE`. Enabling steal clock accounting can help with troubleshooting cluster issues. -*** Click *Add Configuration Params*. Define the following parameter names and values: -**** `guestinfo.ignition.config.data`: Locate the base-64 encoded files that you created previously in this procedure, and paste the contents of the base64-encoded Ignition config file for this machine type. -**** `guestinfo.ignition.config.data.encoding`: Specify `base64`. -**** `disk.EnableUUID`: Specify `TRUE`. -**** `stealclock.enable`: If this parameter was not defined, add it and specify `TRUE`. -.. In the *Virtual Hardware* panel of the *Customize hardware* tab, modify the specified values as required. Ensure that the amount of RAM, CPU, and disk storage meets the minimum requirements for the -machine type. -.. Complete the configuration and power on the VM. -.. Check the console output to verify that Ignition ran. -+ -.Example command -[source,terminal] ----- -Ignition: ran on 2022/03/14 14:48:33 UTC (this boot) -Ignition: user-provided config was applied ----- -. Create the rest of the machines for your cluster by following the preceding steps for each machine. -+ -[IMPORTANT] -==== -You must create the bootstrap and control plane machines at this time. Because some pods are deployed on compute machines by default, also create at least two compute machines before you install the cluster. -==== diff --git a/modules/installation-vsphere-regions-zones.adoc b/modules/installation-vsphere-regions-zones.adoc deleted file mode 100644 index beb68fa3d7ba..000000000000 --- a/modules/installation-vsphere-regions-zones.adoc +++ /dev/null @@ -1,62 +0,0 @@ -// Module included in the following assemblies: -// -//* installing/installing-vsphere-installer-provisioned-customizations.adoc [IPI] -//* installing/installing-vsphere-installer-provisioned-network-customizations.adoc [IPI] -//* installing/installing-vsphere.adoc [UPI] -//* installing/installing-vsphere-network-customizations.adoc [UPI] -//* installing/installing-restricted-networks-installer-provisioned-vsphere.adoc [IPI] -//* installing/installing-restricted-networks-vsphere.adoc [IPI] - -:_content-type: CONCEPT -[id="installation-vsphere-regions-zones_{context}"] -= VMware vSphere region and zone enablement - -You can deploy an {product-title} cluster to multiple vSphere datacenters that run in a single VMware vCenter. Each datacenter can run multiple clusters. This configuration reduces the risk of a hardware failure or network outage that can cause your cluster to fail. - -[IMPORTANT] -==== -The VMware vSphere region and zone enablement feature requires the vSphere Container Storage Interface (CSI) driver as the default storage driver in the cluster. As a result, the feature only available on a newly installed cluster. - -A cluster that was upgraded from a previous release defaults to using the in-tree vSphere driver, so you must enable CSI automatic migration for the cluster. You can then configure multiple regions and zones for the upgraded cluster. -==== - -The default installation configuration deploys a cluster to a single vSphere datacenter. If you want to deploy a cluster to multiple vSphere datacenters, you must create an installation configuration file that enables the region and zone feature. - -The default `install-config.yaml` file includes `vcenters` and `failureDomains` fields, where you can specify multiple vSphere datacenters and clusters for your {product-title} cluster. You can leave these fields blank if you want to install an {product-title} cluster in a vSphere environment that consists of single datacenter. - -The following list describes terms associated with defining zones and regions for your cluster: - -* Failure domain: Establishes the relationships between a region and zone. You define a failure domain by using vCenter objects, such as a `datastore` object. A failure domain defines the vCenter location for {product-title} cluster nodes. -* Region: Specifies a vCenter datacenter. You define a region by using a tag from the `openshift-region` tag category. -* Zone: Specifies a vCenter cluster. You define a zone by using a tag from the `openshift-zone` tag category. - -[NOTE] -==== -If you plan on specifying more than one failure domain in your `install-config.yaml` file, you must create tag categories, zone tags, and region tags in advance of creating the configuration file. -==== - -You must create a vCenter tag for each vCenter datacenter, which represents a region. Additionally, you must create a vCenter tag for each cluster than runs in a datacenter, which represents a zone. After you create the tags, you must attach each tag to their respective datacenters and clusters. - -The following table outlines an example of the relationship among regions, zones, and tags for a configuration with multiple vSphere datacenters running in a single VMware vCenter. - -[cols="2,2a,4a",options="header"] -|=== -|Datacenter (region)| Cluster (zone)| Tags - -.4+|us-east - -.2+|us-east-1 -|us-east-1a -|us-east-1b -.2+|us-east-2 -|us-east-2a -|us-east-2b - -.4+|us-west -.2+|us-west-1 -|us-west-1a -|us-west-1b -.2+|us-west-2 -|us-west-2a -|us-west-2b -|=== diff --git a/modules/installing-aws-load-balancer-operator.adoc b/modules/installing-aws-load-balancer-operator.adoc deleted file mode 100644 index 0ca0f1d6dbb5..000000000000 --- a/modules/installing-aws-load-balancer-operator.adoc +++ /dev/null @@ -1,33 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/aws_load_balancer_operator/install-aws-load-balancer-operator.adoc - -:_content-type: PROCEDURE -[id="nw-installing-aws-load-balancer-operator_{context}"] -= Installing the AWS Load Balancer Operator - -You can install the AWS Load Balancer Operator from the OperatorHub by using the {product-title} web console. - -.Prerequisites - -* You have logged in to the {product-title} web console as a user with `cluster-admin` permissions. - -* Your cluster is configured with AWS as the platform type and cloud provider. - -.Procedure - -. Navigate to *Operators* → *OperatorHub* in the {product-title} web console. -. Select the *AWS Load Balancer Operator*. You can use the *Filter by keyword* text box or use the filter list to search for the AWS Load Balancer Operator from the list of Operators. -. Select the `aws-load-balancer-operator` namespace. -. Follow the instructions to prepare the Operator for installation. -. On the *AWS Load Balancer Operator* page, click *Install*. -. On the *Install Operator* page, select the following options: -.. *Update the channel* as *stable-v1*. -.. *Installation mode* as *A specific namespace on the cluster*. -.. *Installed Namespace* as `aws-load-balancer-operator`. If the `aws-load-balancer-operator` namespace does not exist, it gets created during the Operator installation. -.. Select *Update approval* as *Automatic* or *Manual*. By default, the *Update approval* is set to *Automatic*. If you select automatic updates, the Operator Lifecycle Manager (OLM) automatically upgrades the running instance of your Operator without any intervention. If you select manual updates, the OLM creates an update request. As a cluster administrator, you must then manually approve that update request to update the Operator updated to the new version. -.. Click *Install*. - -.Verification - -* Verify that the AWS Load Balancer Operator shows the *Status* as *Succeeded* on the Installed Operators dashboard. diff --git a/modules/installing-ocp-agent-boot.adoc b/modules/installing-ocp-agent-boot.adoc deleted file mode 100644 index b2db06ed2639..000000000000 --- a/modules/installing-ocp-agent-boot.adoc +++ /dev/null @@ -1,175 +0,0 @@ -// Module included in the following assemblies: -// -// * installing-with-agent/installing-with-agent.adoc - -:_content-type: PROCEDURE -[id="installing-ocp-agent-boot_{context}"] -= Creating and booting the agent image - -Use this procedure to boot the agent image on your machines. - -.Procedure - -. Install `nmstate` dependency by running the following command: -+ -[source,terminal] ----- -$ sudo dnf install /usr/bin/nmstatectl -y ----- - -. Place the `openshift-install` binary in a directory that is on your PATH. - -. Create a directory to store the install configuration by running the following command: -+ -[source,terminal] ----- -$ mkdir ~/<directory_name> ----- - -+ -[NOTE] -==== -This is the preferred method for the Agent-based installation. Using {ztp} manifests is optional. -==== - -. Create the `install-config.yaml` file: -+ -[source,yaml] ----- -cat << EOF > ./my-cluster/install-config.yaml -apiVersion: v1 -baseDomain: test.example.com -compute: - architecture: amd64 <1> - hyperthreading: Enabled - name: worker - replicas: 0 -controlPlane: - architecture: amd64 - hyperthreading: Enabled - name: master - replicas: 1 -metadata: - name: sno-cluster <2> -networking: - clusterNetwork: - - cidr: 10.128.0.0/14 - hostPrefix: 23 - machineNetwork: - - cidr: 192.168.111.0/16 - networkType: OVNKubernetes <3> - serviceNetwork: - - 172.30.0.0/16 -platform: - none: {} -pullSecret: '<pull_secret>' <4> -sshKey: | - '<ssh_pub_key>' <5> - EOF ----- -+ -<1> Specify the system architecture, valid values are `amd64` and `arm64`. -<2> Required. Specify your cluster name. -<3> State the cluster network plugin to install. The supported values are `OVNKubernetes` and `OpenShiftSDN`. The default value is `OVNKubernetes`. -<4> Specify your pull secret. -<5> Specify your ssh public key. - -+ -[NOTE] -==== -If you set the platform to `vSphere` or `baremetal`, you can configure IP address endpoints for cluster nodes in three ways: - -* IPv4 -* IPv6 -* IPv4 and IPv6 in parallel (dual-stack) - -IPv6 is supported only on bare metal platforms. -==== -+ -.Example of dual-stack networking -[source,yaml] ----- -networking: - clusterNetwork: - - cidr: 172.21.0.0/16 - hostPrefix: 23 - - cidr: fd02::/48 - hostPrefix: 64 - machineNetwork: - - cidr: 192.168.11.0/16 - - cidr: 2001:DB8::/32 - serviceNetwork: - - 172.22.0.0/16 - - fd03::/112 - networkType: OVNKubernetes -platform: - baremetal: - apiVIPs: - - 192.168.11.3 - - 2001:DB8::4 - ingressVIPs: - - 192.168.11.4 - - 2001:DB8::5 ----- - -. Create the `agent-config.yaml` file: -+ -[source,yaml] ----- - cat > agent-config.yaml << EOF - apiVersion: v1alpha1 - kind: AgentConfig - metadata: - name: sno-cluster - rendezvousIP: 192.168.111.80 <1> - hosts: <2> - - hostname: master-0 <3> - interfaces: - - name: eno1 - macAddress: 00:ef:44:21:e6:a5 - rootDeviceHints: <4> - deviceName: /dev/sdb - networkConfig: <5> - interfaces: - - name: eno1 - type: ethernet - state: up - mac-address: 00:ef:44:21:e6:a5 - ipv4: - enabled: true - address: - - ip: 192.168.111.80 - prefix-length: 23 - dhcp: false - dns-resolver: - config: - server: - - 192.168.111.1 - routes: - config: - - destination: 0.0.0.0/0 - next-hop-address: 192.168.111.2 - next-hop-interface: eno1 - table-id: 254 - EOF ----- -+ -<1> This IP address is used to determine which node performs the bootstrapping process as well as running the `assisted-service` component. -You must provide the rendezvous IP address when you do not specify at least one host's IP address in the `networkConfig` parameter. If this address is not provided, one IP address is selected from the provided hosts' `networkConfig`. -<2> Host configuration is optional. The number of hosts defined must not exceed the total number of hosts defined in the `install-config.yaml` file, which is the sum of the values of the `compute.replicas` and `controlPlane.replicas` parameters. -<3> The optional `hostname` parameter overrides the hostname obtained from either the Dynamic Host Configuration Protocol (DHCP) or a reverse DNS lookup. Each host must have a unique hostname supplied by one of these methods. -<4> The `rootDeviceHints` parameter enables provisioning of the Red Hat Enterprise Linux CoreOS (RHCOS) image to a particular device. It examines the devices in the order it discovers them, and compares the discovered values with the hint values. It uses the first discovered device that matches the hint value. -<5> Set this optional parameter to configure the network interface of a host in NMState format. - -+ -. Create the agent image by running the following command: - -+ -[source,terminal] ----- -$ openshift-install --dir <install_directory> agent create image ----- -+ -NOTE: Red Hat Enterprise Linux CoreOS (RHCOS) supports multipathing on the primary disk, allowing stronger resilience to hardware failure to achieve higher host availability. Multipathing is enabled by default in the agent ISO image, with a default `/etc/multipath.conf` configuration. - -. Boot the `agent.x86_64.iso` or `agent.aarch64.iso` image on the bare metal machines. diff --git a/modules/installing-ocp-agent-download.adoc b/modules/installing-ocp-agent-download.adoc deleted file mode 100644 index 45f41be54ca4..000000000000 --- a/modules/installing-ocp-agent-download.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// Module included in the following assemblies: -// -// * installing-with-agent/installing-with-agent.adoc - -:_content-type: PROCEDURE -[id="installing-ocp-agent-retrieve_{context}"] -= Downloading the Agent-based Installer - -.Procedure - -Use this procedure to download the Agent-based Installer and the CLI needed for your installation. - -. Log in to the {product-title} web console using your login credentials. - -. Navigate to link:https://console.redhat.com/openshift/create/datacenter[Datacenter]. - -. Click *Run Agent-based Installer locally*. - -. Select the operating system and architecture for the *OpenShift Installer* and *Command line interface*. - -. Click *Download Installer* to download and extract the install program. - -. You can either download or copy the pull secret by clicking on *Download pull secret* or *Copy pull secret*. - -. Click *Download command-line tools* and place the `openshift-install` binary in a directory that is on your `PATH`. diff --git a/modules/installing-ocp-agent-tui.adoc b/modules/installing-ocp-agent-tui.adoc deleted file mode 100644 index 4d21bbc7476b..000000000000 --- a/modules/installing-ocp-agent-tui.adoc +++ /dev/null @@ -1,58 +0,0 @@ -// Module included in the following assemblies: -// -// * installing-with-agent/installing-with-agent.adoc - -:_content-type: PROCEDURE -[id="installing-ocp-agent-tui_{context}"] -= Verifying that the current installation host can pull release images - -After you boot the agent image and network services are made available to the host, the agent console application performs a pull check to verify that the current host can retrieve release images. - -If the primary pull check passes, you can quit the application to continue with the installation. If the pull check fails, the application performs additional checks, as seen in the `Additional checks` section of the TUI, to help you troubleshoot the problem. A failure for any of the additional checks is not necessarily critical as long as the primary pull check succeeds. - -If there are host network configuration issues that might cause an installation to fail, you can use the console application to make adjustments to your network configurations. - -[IMPORTANT] -==== -If the agent console application detects host network configuration issues, the installation workflow will be halted until the user manually stops the console application and signals the intention to proceed. -==== - -.Procedure - -. Wait for the agent console application to check whether or not the configured release image can be pulled from a registry. - -. If the agent console application states that the installer connectivity checks have passed, wait for the prompt to time out to continue with the installation. -+ -[NOTE] -==== -You can still choose to view or change network configuration settings even if the connectivity checks have passed. - -However, if you choose to interact with the agent console application rather than letting it time out, you must manually quit the TUI to proceed with the installation. -==== - -. If the agent console application checks have failed, which is indicated by a red icon beside the `Release image URL` pull check, use the following steps to reconfigure the host's network settings: - -.. Read the `Check Errors` section of the TUI. -This section displays error messages specific to the failed checks. -+ -image::agent-tui-home.png[The home screen of the agent console application displaying check errors, indicating a failed check] - -.. Select *Configure network* to launch the NetworkManager TUI. - -.. Select *Edit a connection* and select the connection you want to reconfigure. - -.. Edit the configuration and select *OK* to save your changes. - -.. Select *Back* to return to the main screen of the NetworkManager TUI. - -.. Select *Activate a Connection*. - -.. Select the reconfigured network to deactivate it. - -.. Select the reconfigured network again to reactivate it. - -.. Select *Back* and then select *Quit* to return to the agent console application. - -.. Wait at least five seconds for the continuous network checks to restart using the new network configuration. - -.. If the `Release image URL` pull check succeeds and displays a green icon beside the URL, select *Quit* to exit the agent console application and continue with the installation. diff --git a/modules/installing-ocp-agent-verify.adoc b/modules/installing-ocp-agent-verify.adoc deleted file mode 100644 index b2eda98b065b..000000000000 --- a/modules/installing-ocp-agent-verify.adoc +++ /dev/null @@ -1,90 +0,0 @@ -// Module included in the following assemblies: -// -// * installing-with-agent/installing-with-agent.adoc - -:_content-type: PROCEDURE -[id="installing-ocp-agent-verify_{context}"] -= Tracking and verifying installation progress - -Use the following procedure to track installation progress and to verify a successful installation. - -.Procedure - -. Optional: To know when the bootstrap host (rendezvous host) reboots, run the following command: - -+ -[source,terminal] ----- -$ ./openshift-install --dir <install_directory> agent wait-for bootstrap-complete \ <1> - --log-level=info <2> ----- -<1> For `<install_directory>`, specify the path to the directory where the agent ISO was generated. -<2> To view different installation details, specify `warn`, `debug`, or `error` instead of `info`. - -+ -.Example output -[source,terminal] ----- -................................................................... -................................................................... -INFO Bootstrap configMap status is complete -INFO cluster bootstrap is complete ----- -+ -The command succeeds when the Kubernetes API server signals that it has been bootstrapped on the control plane machines. - -. To track the progress and verify successful installation, run the following command: -+ -[source,terminal] ----- -$ openshift-install --dir <install_directory> agent wait-for install-complete <1> ----- -<1> For `<install_directory>` directory, specify the path to the directory where the agent ISO was generated. - -+ -.Example output -[source,terminal] ----- -................................................................... -................................................................... -INFO Cluster is installed -INFO Install complete! -INFO To access the cluster as the system:admin user when using 'oc', run -INFO export KUBECONFIG=/home/core/installer/auth/kubeconfig -INFO Access the OpenShift web-console here: https://console-openshift-console.apps.sno-cluster.test.example.com ----- - - -[NOTE] -==== -If you are using the optional method of {ztp} manifests, you can configure IP address endpoints for cluster nodes through the `AgentClusterInstall.yaml` file in three ways: - -* IPv4 -* IPv6 -* IPv4 and IPv6 in parallel (dual-stack) - -IPv6 is supported only on bare metal platforms. -==== -.Example of dual-stack networking -[source,yaml] ----- -apiVIP: 192.168.11.3 -ingressVIP: 192.168.11.4 -clusterDeploymentRef: - name: mycluster -imageSetRef: - name: openshift-4.13 -networking: - clusterNetwork: - - cidr: 172.21.0.0/16 - hostPrefix: 23 - - cidr: fd02::/48 - hostPrefix: 64 - machineNetwork: - - cidr: 192.168.11.0/16 - - cidr: 2001:DB8::/32 - serviceNetwork: - - 172.22.0.0/16 - - fd03::/112 - networkType: OVNKubernetes ----- diff --git a/modules/installing-rhv-accessing-ocp-web-console.adoc b/modules/installing-rhv-accessing-ocp-web-console.adoc deleted file mode 100644 index 35d86db327dc..000000000000 --- a/modules/installing-rhv-accessing-ocp-web-console.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_rhv/installing-rhv-default.adoc -// * installing/installing_rhv/installing-rhv-customizations.adoc - -:_content-type: PROCEDURE -[id="installing-rhv-accessing-ocp-web-console_{context}"] -= Accessing the {product-title} web console on {rh-virtualization} - -After the {product-title} cluster initializes, you can log in to the {product-title} web console. - -.Procedure -. Optional: In the {rh-virtualization-first} Administration Portal, open *Compute* -> *Cluster*. -. Verify that the installation program creates the virtual machines. -. Return to the command line where the installation program is running. When the installation program finishes, it displays the user name and temporary password for logging into the {product-title} web console. -. In a browser, open the URL of the {product-title} web console. The URL uses this format: -+ ----- -console-openshift-console.apps.<clustername>.<basedomain> <1> ----- -<1> For `<clustername>.<basedomain>`, specify the cluster name and base domain. -+ -For example: -+ ----- -console-openshift-console.apps.my-cluster.virtlab.example.com ----- diff --git a/modules/installing-rhv-example-install-config-yaml.adoc b/modules/installing-rhv-example-install-config-yaml.adoc deleted file mode 100644 index ed5c0fe71e0b..000000000000 --- a/modules/installing-rhv-example-install-config-yaml.adoc +++ /dev/null @@ -1,242 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_rhv/installing-rhv-customizations.adoc -// * installing/installing_rhv/installing-rhv-default.adoc - -[id="installing-rhv-example-install-config-yaml_{context}"] -= Example `install-config.yaml` files for {rh-virtualization-first} - -You can customize the {product-title} cluster the installation program creates by changing the parameters and parameter values in the `install-config.yaml` file. - -The following examples are specific to installing {product-title} on {rh-virtualization}. - -`install-config.yaml` is located in `<installation_directory>`, which you specified when you ran the following command. -[source,terminal] ----- -$ ./openshift-install create install-config --dir <installation_directory> ----- - -[NOTE] -==== -* These example files are provided for reference only. You must obtain your -`install-config.yaml` file by using the installation program. -* Changing the `install-config.yaml` file can increase the resources your cluster requires. Verify that your {rh-virtualization} environment has those additional resources. Otherwise, the installation or cluster will fail. -==== - -[discrete] -== Example default `install-config.yaml` file - -[source,yaml] ----- -apiVersion: v1 -baseDomain: example.com -compute: -- architecture: amd64 - hyperthreading: Enabled - name: worker - platform: - ovirt: - sparse: false <1> - format: raw <2> - replicas: 3 -controlPlane: - architecture: amd64 - hyperthreading: Enabled - name: master - platform: - ovirt: - sparse: false <1> - format: raw <2> - replicas: 3 -metadata: - creationTimestamp: null - name: my-cluster -networking: - clusterNetwork: - - cidr: 10.128.0.0/14 - hostPrefix: 23 - machineNetwork: - - cidr: 10.0.0.0/16 - networkType: OVNKubernetes <3> - serviceNetwork: - - 172.30.0.0/16 -platform: - ovirt: - api_vips: - - 10.0.0.10 - ingress_vips: - - 10.0.0.11 - ovirt_cluster_id: 68833f9f-e89c-4891-b768-e2ba0815b76b - ovirt_storage_domain_id: ed7b0f4e-0e96-492a-8fff-279213ee1468 - ovirt_network_name: ovirtmgmt - vnicProfileID: 3fa86930-0be5-4052-b667-b79f0a729692 -publish: External -pullSecret: '{"auths": ...}' -sshKey: ssh-ed12345 AAAA... ----- - -<1> Setting this option to `false` enables preallocation of disks. The default is `true`. Setting `sparse` to `true` with `format` set to `raw` is not available for block storage domains. The `raw` format writes the entire virtual disk to the underlying physical disk. -+ -[NOTE] -==== -Preallocating disks on file storage domains writes zeroes to the file. This might not actually preallocate disks depending on the underlying storage. -==== -<2> Can be set to `cow` or `raw`. The default is `cow`. The `cow` format is optimized for virtual machines. -<3> The cluster network plugin to install. The supported values are `OVNKubernetes` and `OpenShiftSDN`. The default value is `OVNKubernetes`. - -[NOTE] -==== -In {product-title} 4.12 and later, the `api_vip` and `ingress_vip` configuration settings are deprecated. Instead, use a list format to enter values in the `api_vips` and `ingress_vips` configuration settings. -==== - -[discrete] -== Example minimal `install-config.yaml` file - -[source,yaml] ----- -apiVersion: v1 -baseDomain: example.com -metadata: - name: test-cluster -platform: - ovirt: - api_vips: - - 10.46.8.230 - ingress_vips: - - 10.46.8.232 - ovirt_cluster_id: 68833f9f-e89c-4891-b768-e2ba0815b76b - ovirt_storage_domain_id: ed7b0f4e-0e96-492a-8fff-279213ee1468 - ovirt_network_name: ovirtmgmt - vnicProfileID: 3fa86930-0be5-4052-b667-b79f0a729692 -pullSecret: '{"auths": ...}' -sshKey: ssh-ed12345 AAAA... ----- - -[NOTE] -==== -In {product-title} 4.12 and later, the `api_vip` and `ingress_vip` configuration settings are deprecated. Instead, use a list format to enter values in the `api_vips` and `ingress_vips` configuration settings. -==== - -[discrete] -== Example Custom machine pools in an `install-config.yaml` file - -[source,yaml] ----- -apiVersion: v1 -baseDomain: example.com -controlPlane: - name: master - platform: - ovirt: - cpu: - cores: 4 - sockets: 2 - memoryMB: 65536 - osDisk: - sizeGB: 100 - vmType: server - replicas: 3 -compute: -- name: worker - platform: - ovirt: - cpu: - cores: 4 - sockets: 4 - memoryMB: 65536 - osDisk: - sizeGB: 200 - vmType: server - replicas: 5 -metadata: - name: test-cluster -platform: - ovirt: - api_vips: - - 10.46.8.230 - ingress_vips: - - 10.46.8.232 - ovirt_cluster_id: 68833f9f-e89c-4891-b768-e2ba0815b76b - ovirt_storage_domain_id: ed7b0f4e-0e96-492a-8fff-279213ee1468 - ovirt_network_name: ovirtmgmt - vnicProfileID: 3fa86930-0be5-4052-b667-b79f0a729692 -pullSecret: '{"auths": ...}' -sshKey: ssh-ed25519 AAAA... ----- - -[NOTE] -==== -In {product-title} 4.12 and later, the `api_vip` and `ingress_vip` configuration settings are deprecated. Instead, use a list format to enter values in the `api_vips` and `ingress_vips` configuration settings. -==== - -[discrete] -== Example non-enforcing affinity group - -It is recommended to add a non-enforcing affinity group to distribute the control plane and workers, if possible, to use as much of the cluster as possible. - -[source,yaml] ----- -platform: - ovirt: - affinityGroups: - - description: AffinityGroup to place each compute machine on a separate host - enforcing: true - name: compute - priority: 3 - - description: AffinityGroup to place each control plane machine on a separate host - enforcing: true - name: controlplane - priority: 5 - - description: AffinityGroup to place worker nodes and control plane nodes on separate hosts - enforcing: false - name: openshift - priority: 5 -compute: -- architecture: amd64 - hyperthreading: Enabled - name: worker - platform: - ovirt: - affinityGroupsNames: - - compute - - openshift - replicas: 3 -controlPlane: - architecture: amd64 - hyperthreading: Enabled - name: master - platform: - ovirt: - affinityGroupsNames: - - controlplane - - openshift - replicas: 3 ----- - -[discrete] -== Example removing all affinity groups for a non-production lab setup - -For non-production lab setups, you must remove all affinity groups to concentrate the {product-title} cluster on the few hosts you have. - -[source,yaml] ----- -platform: - ovirt: - affinityGroups: [] -compute: -- architecture: amd64 - hyperthreading: Enabled - name: worker - platform: - ovirt: - affinityGroupsNames: [] - replicas: 3 -controlPlane: - architecture: amd64 - hyperthreading: Enabled - name: master - platform: - ovirt: - affinityGroupsNames: [] - replicas: 3 ----- diff --git a/modules/installing-rhv-insecure-mode.adoc b/modules/installing-rhv-insecure-mode.adoc deleted file mode 100644 index a62b0117c593..000000000000 --- a/modules/installing-rhv-insecure-mode.adoc +++ /dev/null @@ -1,52 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_rhv/installing-rhv-customizations.adoc -// * installing/installing_rhv/installing-rhv-default.adoc -// * installing/installing_rhv/installing-rhv-user-infra.adoc - -:_content-type: PROCEDURE -[id="installing-rhv-insecure-mode_{context}"] -= Installing {product-title} on {rh-virtualization} in insecure mode - -By default, the installer creates a CA certificate, prompts you for confirmation, and stores the certificate to use during installation. You do not need to create or install one manually. - -Although it is not recommended, you can override this functionality and install {product-title} without verifying a certificate by installing {product-title} on {rh-virtualization} in *insecure* mode. - -[WARNING] -==== -Installing in *insecure* mode is not recommended, because it enables a potential attacker to perform a Man-in-the-Middle attack and capture sensitive credentials on the network. -==== - -.Procedure - -. Create a file named `~/.ovirt/ovirt-config.yaml`. - -. Add the following content to `ovirt-config.yaml`: -+ -ifndef::openshift-origin[] -[source,terminal] ----- -ovirt_url: https://ovirt.example.com/ovirt-engine/api <1> -ovirt_fqdn: ovirt.example.com <2> -ovirt_pem_url: "" -ovirt_username: ocpadmin@internal -ovirt_password: super-secret-password <3> -ovirt_insecure: true ----- -endif::openshift-origin[] -ifdef::openshift-origin[] -[source,terminal] ----- -ovirt_url: https://ovirt.example.com/ovirt-engine/api <1> -ovirt_fqdn: ovirt.example.com <2> -ovirt_pem_url: "" -ovirt_username: admin@internal -ovirt_password: super-secret-password <3> -ovirt_insecure: true ----- -endif::openshift-origin[] -<1> Specify the hostname or address of your oVirt engine. -<2> Specify the fully qualified domain name of your oVirt engine. -<3> Specify the admin password for your oVirt engine. - -. Run the installer. diff --git a/modules/installing-rhv-network-infrastructure-configuration-upi.adoc b/modules/installing-rhv-network-infrastructure-configuration-upi.adoc deleted file mode 100644 index e57c140a689d..000000000000 --- a/modules/installing-rhv-network-infrastructure-configuration-upi.adoc +++ /dev/null @@ -1,44 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_rhv/installing-rhv-user-infra.adoc - -[id="installing-rhv-network-infrastructure-configuration-upi_{context}"] -= Network infrastructure configuration for installing {product-title} on {rh-virtualization-first} - -Before installing {product-title}, configure your network environment to meet the following requirements. - -When they boot, virtual machines must have IP addresses get the Ignition config files. Consider configuring DHCP to provide persistent IP addresses and hostnames to the cluster machines. -// TBD - Day 0 versus day 2? Alternatives? - -.Firewall - -Configure your firewall so your cluster has access to required sites. - -.Network connectivity - -// TBD - What do we mean by "machine" here? Where do we configure this? Can this be done at this stage in the process? -Configure your network to enable the following connections: - -* Grant every machine access to every other machine on ports `30000`-`32767`. This provides connectivity to {product-title} components. - -* Grant every machine access to reserved ports `10250`-`10259` and `9000`-`9999`. - -* Grant every machine access on ports `2379`-`2380`. This provides access to etcd, peers, and metrics on the control plane. - -* Grant every machine access to the Kubernetes API on port `6443`. - -.Load balancers - -Configure one or two (preferred) layer-4 load balancers: - -* Provide load balancing for ports `6443` and `22623` on the control plane and bootstrap machines. Port `6443` provides access to the Kubernetes API server and must be reachable both internally and externally. Port `22623` must be accessible to nodes within the cluster. - -* Provide load balancing for port `443` and `80` for machines that run the Ingress router, which are usually worker nodes in the default configuration. Both ports must be accessible from within and outside the cluster. - -.DNS - -Configure infrastructure-provided DNS to allow the correct resolution of the main components and services. If you use only one load balancer, these DNS records can point to the same IP address. - -* Create DNS records for `api.<cluster_name>.<base_domain>` (internal and external resolution) and `api-int.<cluster_name>.<base_domain>` (internal resolution) that point to the load balancer for the control plane machines. - -* Create a DNS record for `*.apps.<cluster_name>.<base_domain>` that points to the load balancer for the Ingress router. For example, ports `443` and `80` of the compute machines. diff --git a/modules/installing-rhv-preparing-network-environment.adoc b/modules/installing-rhv-preparing-network-environment.adoc deleted file mode 100644 index d2e6fd2b1aa8..000000000000 --- a/modules/installing-rhv-preparing-network-environment.adoc +++ /dev/null @@ -1,48 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_rhv/installing-rhv-customizations.adoc -// * installing/installing_rhv/installing-rhv-default.adoc - -:_content-type: PROCEDURE -[id="installing-rhv-preparing-network-environment_{context}"] -= Preparing the network environment on {rh-virtualization} - -Configure two static IP addresses for the {product-title} cluster and create DNS entries using these addresses. - -.Procedure - -. Reserve two static IP addresses -.. On the network where you plan to install {product-title}, identify two static IP addresses that are outside the DHCP lease pool. -.. Connect to a host on this network and verify that each of the IP addresses is not in use. For example, use Address Resolution Protocol (ARP) to check that none of the IP addresses have entries: -+ -[source,terminal] ----- -$ arp 10.35.1.19 ----- -+ -.Example output -[source,terminal] ----- -10.35.1.19 (10.35.1.19) -- no entry ----- - -.. Reserve two static IP addresses following the standard practices for your network environment. -.. Record these IP addresses for future reference. - -. Create DNS entries for the {product-title} REST API and apps domain names using this format: -+ -[source,dns] ----- -api.<cluster-name>.<base-domain> <ip-address> <1> -*.apps.<cluster-name>.<base-domain> <ip-address> <2> ----- -<1> For `<cluster-name>`, `<base-domain>`, and `<ip-address>`, specify the cluster name, base domain, and static IP address of your {product-title} API. -<2> Specify the cluster name, base domain, and static IP address of your {product-title} apps for Ingress and the load balancer. -+ -For example: -+ -[source,dns] ----- -api.my-cluster.virtlab.example.com 10.35.1.19 -*.apps.my-cluster.virtlab.example.com 10.35.1.20 ----- diff --git a/modules/installing-rhv-requirements.adoc b/modules/installing-rhv-requirements.adoc deleted file mode 100644 index 57ca048af5ef..000000000000 --- a/modules/installing-rhv-requirements.adoc +++ /dev/null @@ -1,65 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_rhv/installing-rhv-customizations.adoc -// * installing/installing_rhv/installing-rhv-default.adoc -// * installing/installing_rhv/installing-rhv-user-infra.adoc -// * installing/installing_rhv/installing-rhv-restricted-network.adoc - -[id="installing-rhv-requirements_{context}"] -= Requirements for the {rh-virtualization} environment - -To install and run an {product-title} version {product-version} cluster, the {rh-virtualization} environment must meet the following requirements. - -Not meeting these requirements can cause the installation or process to fail. Additionally, not meeting these requirements can cause the {product-title} cluster to fail days or weeks after installation. - -The following requirements for CPU, memory, and storage resources are based on *default* values multiplied by the default number of virtual machines the installation program creates. These resources must be available *in addition to* what the {rh-virtualization} environment uses for non-{product-title} operations. - -By default, the installation program creates seven virtual machines during the installation process. First, it creates a bootstrap virtual machine to provide temporary services and a control plane while it creates the rest of the {product-title} cluster. When the installation program finishes creating the cluster, deleting the bootstrap machine frees up its resources. - -If you increase the number of virtual machines in the {rh-virtualization} environment, you must increase the resources accordingly. - -.Requirements - -* The {rh-virtualization} version is 4.4. -* The {rh-virtualization} environment has one data center whose state is *Up*. -* The {rh-virtualization} data center contains an {rh-virtualization} cluster. -* The {rh-virtualization} cluster has the following resources exclusively for the {product-title} cluster: -** Minimum 28 vCPUs: four for each of the seven virtual machines created during installation. -** 112 GiB RAM or more, including: -*** 16 GiB or more for the bootstrap machine, which provides the temporary control plane. -*** 16 GiB or more for each of the three control plane machines which provide the control plane. -*** 16 GiB or more for each of the three compute machines, which run the application workloads. -* The {rh-virtualization} storage domain must meet link:https://access.redhat.com/solutions/4770281[these etcd backend performance requirements]. -ifeval::["{context}" == "installing-rhv-default"] -* For affinity group support: -Three or more hosts in the {rh-virtualization} cluster. If necessary, you can disable affinity groups. For details, see _Example: Removing all affinity groups for a non-production lab setup_ in _Installing a cluster on {rh-virtualization} with customizations_ -endif::[] -ifeval::["{context}" == "installing-rhv-customizations"] -* For affinity group support: -+ -One physical machine per worker or control plane. Workers and control planes can be on the same physical machine. For example, if you have three workers and three control planes, you need three physical machines. If you have four workers and three control planes, you need four physical machines. - -** For hard anti-affinity (default): A minimum of three physical machines. For more than three worker nodes, one physical machine per worker or control plane. Workers and control planes can be on the same physical machine. -** For custom affinity groups: Ensure that the resources are appropriate for the affinity group rules that you define. -//// -** Production setup: For hard anti-affinity, you need a minimum of three physical machines. For more than three worker nodes, one physical machine per worker or control plane. Workers and control planes can be on the same physical machine. For example, if you have three workers and three control planes, you need three physical machines. If you have four workers and three control planes, you need four physical machines. -** Non-production setup, such as a lab: Remove all affinity groups to enable putting multiple workers or control planes on as few physical machines as possible. This setup does not guarantee redundancy so it is not appropriate for production. -//// -endif::[] -* In production environments, each virtual machine must have 120 GiB or more. Therefore, the storage domain must provide 840 GiB or more for the default {product-title} cluster. In resource-constrained or non-production environments, each virtual machine must have 32 GiB or more, so the storage domain must have 230 GiB or more for the default {product-title} cluster. -* To download images from the Red Hat Ecosystem Catalog during installation and update procedures, the {rh-virtualization} cluster must have access to an internet connection. The Telemetry service also needs an internet connection to simplify the subscription and entitlement process. -// TBD - What about the disconnected installation alternative? -* The {rh-virtualization} cluster must have a virtual network with access to the REST API on the {rh-virtualization} {rh-virtualization-engine-name}. Ensure that DHCP is enabled on this network, because the VMs that the installer creates obtain their IP address by using DHCP. -* A user account and group with the following least privileges for installing and managing an {product-title} cluster on the target {rh-virtualization} cluster: -** `DiskOperator` -** `DiskCreator` -** `UserTemplateBasedVm` -** `TemplateOwner` -** `TemplateCreator` -** `ClusterAdmin` on the target cluster - - -[WARNING] -==== -Apply the principle of least privilege: Avoid using an administrator account with `SuperUser` privileges on {rh-virtualization} during the installation process. The installation program saves the credentials you provide to a temporary `ovirt-config.yaml` file that might be compromised. -==== diff --git a/modules/installing-rhv-setting-up-ca-certificate.adoc b/modules/installing-rhv-setting-up-ca-certificate.adoc deleted file mode 100644 index c51b1f93722f..000000000000 --- a/modules/installing-rhv-setting-up-ca-certificate.adoc +++ /dev/null @@ -1,57 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_rhv/installing-rhv-customizations.adoc -// * installing/installing_rhv/installing-rhv-default.adoc -// * installing/installing-rhv-restricted-network.adoc - -:_content-type: PROCEDURE -[id="installing-rhv-setting-up-ca-certificate_{context}"] -= Setting up the CA certificate for {rh-virtualization} - -Download the CA certificate from the {rh-virtualization-first} Manager and set it up on the installation machine. - -You can download the certificate from a webpage on the {rh-virtualization} {rh-virtualization-engine-name} or by using a `curl` command. - -Later, you provide the certificate to the installation program. - -.Procedure - -. Use either of these two methods to download the CA certificate: -** Go to the {rh-virtualization-engine-name}'s webpage, `\https://<engine-fqdn>/ovirt-engine/`. Then, under *Downloads*, click the *CA Certificate* link. -** Run the following command: -+ -[source,terminal] ----- -$ curl -k 'https://<engine-fqdn>/ovirt-engine/services/pki-resource?resource=ca-certificate&format=X509-PEM-CA' -o /tmp/ca.pem <1> ----- -<1> For `<engine-fqdn>`, specify the fully qualified domain name of the {rh-virtualization} {rh-virtualization-engine-name}, such as `rhv-env.virtlab.example.com`. - -. Configure the CA file to grant rootless user access to the {rh-virtualization-engine-name}. Set the CA file permissions to have an octal value of `0644` (symbolic value: `-rw-r--r--`): -+ -[source,terminal] ----- -$ sudo chmod 0644 /tmp/ca.pem ----- -. For Linux, copy the CA certificate to the directory for server certificates. Use `-p` to preserve the permissions: -+ -[source,terminal] ----- -$ sudo cp -p /tmp/ca.pem /etc/pki/ca-trust/source/anchors/ca.pem ----- -. Add the certificate to the certificate manager for your operating system: -** For macOS, double-click the certificate file and use the *Keychain Access* utility to add the file to the *System* keychain. -** For Linux, update the CA trust: -+ -[source,terminal] ----- -$ sudo update-ca-trust ----- -+ -[NOTE] -==== -If you use your own certificate authority, make sure the system trusts it. -==== - -[role="_additional-resources"] -.Additional resources -*To learn more, see link:https://access.redhat.com/documentation/en-us/red_hat_virtualization/4.0/html/rest_api_guide/documents-002_authentication_and_security[Authentication and Security] in the {rh-virtualization} documentation. diff --git a/modules/installing-rhv-setting-up-installation-machine.adoc b/modules/installing-rhv-setting-up-installation-machine.adoc deleted file mode 100644 index 10f0ba4b4c3c..000000000000 --- a/modules/installing-rhv-setting-up-installation-machine.adoc +++ /dev/null @@ -1,50 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_rhv/installing-rhv-user-infra.adoc -// * installing/installing-rhv-restricted-network.adoc - -:_content-type: PROCEDURE -[id="installing-rhv-setting-up-installation-machine_{context}"] -= Setting up the installation machine - - -To run the binary `openshift-install` installation program and Ansible scripts, set up the {rh-virtualization} {rh-virtualization-engine-name} or an {op-system-base-full} computer with network access to the {rh-virtualization} environment and the REST API on the {rh-virtualization-engine-name}. - -// The following steps include creating an `ASSETS_DIR` environment variable, which the installation program uses to create a directory of asset files. Later, the installation process reuses this variable to locate these asset files. - -.Procedure - -. Update or install Python3 and Ansible. For example: -+ -[source,terminal] ----- -# dnf update python3 ansible ----- - -. link:https://access.redhat.com/documentation/en-us/red_hat_virtualization/4.4/html/python_sdk_guide/chap-overview#Installing_the_Software_Development_Kit[Install the `python3-ovirt-engine-sdk4` package] to get the Python Software Development Kit. - -. Install the `ovirt.image-template` Ansible role. On the {rh-virtualization} {rh-virtualization-engine-name} and other {op-system-base-full} machines, this role is distributed as the `ovirt-ansible-image-template` package. For example, enter: -+ -[source,terminal] ----- -# dnf install ovirt-ansible-image-template ----- - -. Install the `ovirt.vm-infra` Ansible role. On the {rh-virtualization} {rh-virtualization-engine-name} and other {op-system-base} machines, this role is distributed as the `ovirt-ansible-vm-infra` package. -+ -[source,terminal] ----- -# dnf install ovirt-ansible-vm-infra ----- - -. Create an environment variable and assign an absolute or relative path to it. For example, enter: -+ -[source,terminal] ----- -$ export ASSETS_DIR=./wrk ----- -+ -[NOTE] -==== -The installation program uses this variable to create a directory where it saves important installation-related files. Later, the installation process reuses this variable to locate those asset files. Avoid deleting this assets directory; it is required for uninstalling the cluster. -==== diff --git a/modules/installing-rhv-verifying-rhv-environment.adoc b/modules/installing-rhv-verifying-rhv-environment.adoc deleted file mode 100644 index d5bbe488c525..000000000000 --- a/modules/installing-rhv-verifying-rhv-environment.adoc +++ /dev/null @@ -1,75 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_rhv/installing-rhv-customizations.adoc -// * installing/installing_rhv/installing-rhv-default.adoc -// * installing/installing_rhv/installing-rhv-restricted-network.adoc - - -:_content-type: PROCEDURE -[id="installing-rhv-verifying-rhv-environment_{context}"] -= Verifying the requirements for the {rh-virtualization} environment - -Verify that the {rh-virtualization} environment meets the requirements to install and run an {product-title} cluster. Not meeting these requirements can cause failures. - -[IMPORTANT] -==== -These requirements are based on the default resources the installation program uses to create control plane and compute machines. These resources include vCPUs, memory, and storage. If you change these resources or increase the number of {product-title} machines, adjust these requirements accordingly. -==== - -.Procedure - -. Check that the {rh-virtualization} version supports installation of {product-title} version {product-version}. -.. In the {rh-virtualization} Administration Portal, click the *?* help icon in the upper-right corner and select *About*. -.. In the window that opens, make a note of the **{rh-virtualization} Software Version**. -.. Confirm that the {rh-virtualization} version is 4.4. For more information about supported version combinations, see link:https://access.redhat.com/articles/5485861[Support Matrix for {product-title} on {rh-virtualization}]. - -. Inspect the data center, cluster, and storage. -.. In the {rh-virtualization} Administration Portal, click *Compute* -> *Data Centers*. -.. Confirm that the data center where you plan to install {product-title} is accessible. -.. Click the name of that data center. -.. In the data center details, on the *Storage* tab, confirm the storage domain where you plan to install {product-title} is *Active*. -.. Record the *Domain Name* for use later on. -.. Confirm *Free Space* has at least 230 GiB. -.. Confirm that the storage domain meets link:https://access.redhat.com/solutions/4770281[these etcd backend performance requirements], which you link:https://access.redhat.com/solutions/3780861[can measure by using the fio performance benchmarking tool]. -.. In the data center details, click the *Clusters* tab. -.. Find the {rh-virtualization} cluster where you plan to install {product-title}. Record the cluster name for use later on. - -. Inspect the {rh-virtualization} host resources. -.. In the {rh-virtualization} Administration Portal, click *Compute > Clusters*. -.. Click the cluster where you plan to install {product-title}. -.. In the cluster details, click the *Hosts* tab. -.. Inspect the hosts and confirm they have a combined total of at least 28 *Logical CPU Cores* available _exclusively_ for the {product-title} cluster. -.. Record the number of available *Logical CPU Cores* for use later on. -.. Confirm that these CPU cores are distributed so that each of the seven virtual machines created during installation can have four cores. -.. Confirm that, all together, the hosts have 112 GiB of *Max free Memory for scheduling new virtual machines* distributed to meet the requirements for each of the following {product-title} machines: -** 16 GiB required for the bootstrap machine -** 16 GiB required for each of the three control plane machines -** 16 GiB for each of the three compute machines -.. Record the amount of *Max free Memory for scheduling new virtual machines* for use later on. -+ -. Verify that the virtual network for installing {product-title} has access to the {rh-virtualization} {rh-virtualization-engine-name}'s REST API. From a virtual machine on this network, use curl to reach the {rh-virtualization} {rh-virtualization-engine-name}'s REST API: -+ -[source,terminal] ----- -$ curl -k -u <username>@<profile>:<password> \ <1> -https://<engine-fqdn>/ovirt-engine/api <2> ----- -<1> For `<username>`, specify the user name of an {rh-virtualization} account with privileges to create and manage an {product-title} cluster on {rh-virtualization}. For `<profile>`, specify the login profile, which you can get by going to the {rh-virtualization} Administration Portal login page and reviewing the *Profile* dropdown list. For `<password>`, specify the password for that user name. -<2> For `<engine-fqdn>`, specify the fully qualified domain name of the {rh-virtualization} environment. -+ -For example: -+ -ifndef::openshift-origin[] -[source,terminal] ----- -$ curl -k -u ocpadmin@internal:pw123 \ -https://rhv-env.virtlab.example.com/ovirt-engine/api ----- -endif::openshift-origin[] -ifdef::openshift-origin[] -[source,terminal] ----- -$ curl -k -u admin@internal:pw123 \ -https://ovirtlab.example.com/ovirt-engine/api ----- -endif::openshift-origin[] diff --git a/modules/installing-sno-requirements-for-installing-single-node-openshift.adoc b/modules/installing-sno-requirements-for-installing-single-node-openshift.adoc deleted file mode 100644 index fe39a1513f3a..000000000000 --- a/modules/installing-sno-requirements-for-installing-single-node-openshift.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// This is included in the following assemblies: -// -// installing_sno/install-sno-preparing-to-install-sno.adoc - -[id="installing-sno-requirements-for-installing-single-node-openshift_{context}"] -= Requirements for installing OpenShift on a single node - -Installing {product-title} on a single node alleviates some of the requirements of high availability and large scale clusters. However, you must address the following requirements: - -* *Administration host:* You must have a computer to prepare the ISO, to create the USB boot drive, and to monitor the installation. - -* *Production-grade server:* Installing {product-title} on a single node requires a server with sufficient resources to run {product-title} services and a production workload. -+ -.Minimum resource requirements -[options="header"] -|==== -|Profile|vCPU|Memory|Storage -|Minimum|8 vCPU cores|32GB of RAM| 120GB -|==== -+ -[NOTE] -==== -One vCPU is equivalent to one physical core when simultaneous multithreading (SMT), or hyperthreading, is not enabled. When enabled, use the following formula to calculate the corresponding ratio: - -(threads per core × cores) × sockets = vCPUs -==== -+ -The server must have a Baseboard Management Controller (BMC) when booting with virtual media. - -* *Networking:* The server must have access to the internet or access to a local registry if it is not connected to a routable network. The server must have a DHCP reservation or a static IP address for the Kubernetes API, Ingress route, and cluster node domain names. You must configure the DNS to resolve the IP address to each of the following fully qualified domain names (FQDN): -+ -.Required DNS records -[options="header"] -|==== -|Usage|FQDN|Description -|Kubernetes API|`api.<cluster_name>.<base_domain>`| Add a DNS A/AAAA or CNAME record. This record must be resolvable by clients external to the cluster. -|Internal API|`api-int.<cluster_name>.<base_domain>`| Add a DNS A/AAAA or CNAME record when creating the ISO manually. This record must be resolvable by nodes within the cluster. -|Ingress route|`*.apps.<cluster_name>.<base_domain>`| Add a wildcard DNS A/AAAA or CNAME record that targets the node. This record must be resolvable by clients external to the cluster. -|Cluster node|`<hostname>.<cluster_name>.<base_domain>`| Add a DNS A/AAAA or CNAME record and DNS PTR record to identify the node. -|==== -+ -Without persistent IP addresses, communications between the `apiserver` and `etcd` might fail. diff --git a/modules/installing-with-usb-media.adoc b/modules/installing-with-usb-media.adoc deleted file mode 100644 index 6ddc31e58025..000000000000 --- a/modules/installing-with-usb-media.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// This is included in the following assemblies: -// -// installing_on_prem_assisted/assisted-installer-installation.adoc - -:_content-type: PROCEDURE -[id="installing-with-usb-media_{context}"] -= Creating an ISO image on a USB drive - -You can install software using a USB drive that contains an ISO image. Starting the server with the USB drive prepares the server for the software installation. - -.Procedure - -. On the administration host, insert a USB drive into a USB port. - -. Create an ISO image on the USB drive, for example: -+ -[source,terminal] ----- -# dd if=<path_to_iso> of=<path_to_usb> status=progress ----- -+ -where: -+ -<path_to_iso>:: is the relative path to the downloaded ISO file, for example, `rhcos-live.iso`. -<path_to_usb>:: is the location of the connected USB drive, for example, `/dev/sdb`. -+ -After the ISO is copied to the USB drive, you can use the USB drive to install software on the server. diff --git a/modules/installing-wmco-using-cli.adoc b/modules/installing-wmco-using-cli.adoc deleted file mode 100644 index 26c524e81e39..000000000000 --- a/modules/installing-wmco-using-cli.adoc +++ /dev/null @@ -1,129 +0,0 @@ -// Module included in the following assemblies: -// -// * windows_containers/enabling-windows-container-workloads.adoc - -:_content-type: PROCEDURE -[id="installing-wmco-using-cli_{context}"] -= Installing the Windows Machine Config Operator using the CLI - -You can use the OpenShift CLI (`oc`) to install the Windows Machine Config Operator (WMCO). - -[NOTE] -==== -Dual NIC is not supported on WMCO-managed Windows instances. -==== - -.Procedure - -. Create a namespace for the WMCO. - -.. Create a `Namespace` object YAML file for the WMCO. For example, `wmco-namespace.yaml`: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Namespace -metadata: - name: openshift-windows-machine-config-operator <1> - labels: - openshift.io/cluster-monitoring: "true" <2> ----- -<1> It is recommended to deploy the WMCO in the `openshift-windows-machine-config-operator` namespace. -<2> This label is required for enabling cluster monitoring for the WMCO. - -.. Create the namespace: -+ -[source,terminal] ----- -$ oc create -f <file-name>.yaml ----- -+ -For example: -+ -[source,terminal] ----- -$ oc create -f wmco-namespace.yaml ----- - -. Create the Operator group for the WMCO. - -.. Create an `OperatorGroup` object YAML file. For example, `wmco-og.yaml`: -+ -[source,yaml] ----- -apiVersion: operators.coreos.com/v1 -kind: OperatorGroup -metadata: - name: windows-machine-config-operator - namespace: openshift-windows-machine-config-operator -spec: - targetNamespaces: - - openshift-windows-machine-config-operator ----- - -.. Create the Operator group: -+ -[source,terminal] ----- -$ oc create -f <file-name>.yaml ----- -+ -For example: -+ -[source,terminal] ----- -$ oc create -f wmco-og.yaml ----- - -. Subscribe the namespace to the WMCO. - -.. Create a `Subscription` object YAML file. For example, `wmco-sub.yaml`: -+ -[source,yaml, subs="attributes+"] ----- -apiVersion: operators.coreos.com/v1alpha1 -kind: Subscription -metadata: - name: windows-machine-config-operator - namespace: openshift-windows-machine-config-operator -spec: - channel: "stable" <1> - installPlanApproval: "Automatic" <2> - name: "windows-machine-config-operator" - source: "redhat-operators" <3> - sourceNamespace: "openshift-marketplace" <4> ----- -<1> Specify `stable` as the channel. -<2> Set an approval strategy. You can set `Automatic` or `Manual`. -<3> Specify the `redhat-operators` catalog source, which contains the `windows-machine-config-operator` package manifests. If your {product-title} is installed on a restricted network, also known as a disconnected cluster, specify the name of the `CatalogSource` object you created when you configured the Operator LifeCycle Manager (OLM). -<4> Namespace of the catalog source. Use `openshift-marketplace` for the default OperatorHub catalog sources. - -.. Create the subscription: -+ -[source,terminal] ----- -$ oc create -f <file-name>.yaml ----- -+ -For example: -+ -[source,terminal] ----- -$ oc create -f wmco-sub.yaml ----- -+ -The WMCO is now installed to the `openshift-windows-machine-config-operator`. - -. Verify the WMCO installation: -+ -[source,terminal] ----- -$ oc get csv -n openshift-windows-machine-config-operator ----- -+ -.Example output -[source,terminal] ----- -NAME DISPLAY VERSION REPLACES PHASE -windows-machine-config-operator.2.0.0 Windows Machine Config Operator 2.0.0 Succeeded ----- diff --git a/modules/installing-wmco-using-web-console.adoc b/modules/installing-wmco-using-web-console.adoc deleted file mode 100644 index ecedd07973dc..000000000000 --- a/modules/installing-wmco-using-web-console.adoc +++ /dev/null @@ -1,49 +0,0 @@ -// Module included in the following assemblies: -// -// * windows_containers/enabling-windows-container-workloads.adoc - -:_content-type: PROCEDURE -[id="installing-wmco-using-web-console_{context}"] -= Installing the Windows Machine Config Operator using the web console - -You can use the {product-title} web console to install the Windows Machine Config Operator (WMCO). - -[NOTE] -==== -Dual NIC is not supported on WMCO-managed Windows instances. -==== - -.Procedure - -. From the *Administrator* perspective in the {product-title} web console, navigate to the *Operators -> OperatorHub* page. - -. Use the *Filter by keyword* box to search for `Windows Machine Config Operator` in the catalog. Click the *Windows Machine Config Operator* tile. - -. Review the information about the Operator and click *Install*. - -. On the *Install Operator* page: - -.. Select the *stable* channel as the *Update Channel*. The *stable* channel enables the latest stable release of the WMCO to be installed. - -.. The *Installation Mode* is preconfigured because the WMCO must be available in a single namespace only. - -.. Choose the *Installed Namespace* for the WMCO. The default Operator recommended namespace is `openshift-windows-machine-config-operator`. - -.. Click the *Enable Operator recommended cluster monitoring on the Namespace* checkbox to enable cluster monitoring for the WMCO. - -.. Select an *Approval Strategy*. -+ -* The *Automatic* strategy allows Operator Lifecycle Manager (OLM) to automatically update the Operator when a new version is available. -+ -* The *Manual* strategy requires a user with appropriate credentials to approve the Operator update. - -//TODO add image of Installation page when official Operator is available. - -. Click *Install*. The WMCO is now listed on the *Installed Operators* page. -+ -[NOTE] -==== -The WMCO is installed automatically into the namespace you defined, like `openshift-windows-machine-config-operator`. -==== - -. Verify that the *Status* shows *Succeeded* to confirm successful installation of the WMCO. diff --git a/modules/interacting-serverless-apps-http2-gRPC-up-to-4-9.adoc b/modules/interacting-serverless-apps-http2-gRPC-up-to-4-9.adoc deleted file mode 100644 index 0fbdab288897..000000000000 --- a/modules/interacting-serverless-apps-http2-gRPC-up-to-4-9.adoc +++ /dev/null @@ -1,80 +0,0 @@ -// Module included in the following assemblies: -// -// serverless/knative-serving/external-ingress-routing/using-http2-gRPC.adoc - -:_content-type: PROCEDURE -[id="interacting-serverless-apps-http2-grpc-up-to-4-9_{context}"] -= Interacting with a serverless application using HTTP2 and gRPC in {product-title} 4.9 and older - -[IMPORTANT] -==== -This method needs to expose Kourier Gateway using the `LoadBalancer` service type. You can configure this by adding the following YAML to your `KnativeServing` custom resource definition (CRD): - -[source,yaml] ----- -... -spec: - ingress: - kourier: - service-type: LoadBalancer -... ----- -==== - -.Prerequisites - -* Install {ServerlessOperatorName} and Knative Serving on your cluster. -* Install the OpenShift CLI (`oc`). -* Create a Knative service. - -.Procedure - -. Find the application host. See the instructions in _Verifying your serverless application deployment_. - -. Find the ingress gateway's public address: -+ -[source,terminal] ----- -$ oc -n knative-serving-ingress get svc kourier ----- -+ -.Example output -+ -[source,terminal] ----- -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -kourier LoadBalancer 172.30.51.103 a83e86291bcdd11e993af02b7a65e514-33544245.us-east-1.elb.amazonaws.com 80:31380/TCP,443:31390/TCP 67m ----- -+ -The public address is surfaced in the `EXTERNAL-IP` field, and in this case is `a83e86291bcdd11e993af02b7a65e514-33544245.us-east-1.elb.amazonaws.com`. - -. Manually set the host header of your HTTP request to the application's host, but direct the request itself against the public address of the ingress gateway. -+ -[source,terminal] ----- -$ curl -H "Host: hello-default.example.com" a83e86291bcdd11e993af02b7a65e514-33544245.us-east-1.elb.amazonaws.com ----- -+ -.Example output -[source,terminal] ----- -Hello Serverless! ----- -+ -You can also make a direct gRPC request against the ingress gateway: -+ -[source,golang] ----- -import "google.golang.org/grpc" - -grpc.Dial( - "a83e86291bcdd11e993af02b7a65e514-33544245.us-east-1.elb.amazonaws.com:80", - grpc.WithAuthority("hello-default.example.com:80"), - grpc.WithInsecure(), -) ----- -+ -[NOTE] -==== -Ensure that you append the respective port, 80 by default, to both hosts as shown in the previous example. -==== diff --git a/modules/interacting-serverless-apps-http2-gRPC.adoc b/modules/interacting-serverless-apps-http2-gRPC.adoc deleted file mode 100644 index 94dca60949d7..000000000000 --- a/modules/interacting-serverless-apps-http2-gRPC.adoc +++ /dev/null @@ -1,57 +0,0 @@ -// Module included in the following assemblies: -// -// serverless/knative-serving/external-ingress-routing/using-http2-gRPC.adoc - -:_content-type: PROCEDURE -[id="interacting-serverless-apps-http2-grpc_{context}"] -= Interacting with a serverless application using HTTP2 and gRPC - -[IMPORTANT] -==== -This method applies to {product-title} 4.10 and later. For older versions, see the following section. -==== - -.Prerequisites - -* Install {ServerlessOperatorName} and Knative Serving on your cluster. -* Install the OpenShift CLI (`oc`). -* Create a Knative service. -* Upgrade {product-title} 4.10 or later. -* Enable HTTP/2 on OpenShift Ingress controller. - -.Procedure - -. Add the `serverless.openshift.io/default-enable-http2=true` annotation to the `KnativeServing` Custom Resource: -+ -[source,terminal] ----- -$ oc annotate knativeserving <your_knative_CR> -n knative-serving serverless.openshift.io/default-enable-http2=true ----- - -. After the annotation is added, you can verify that the `appProtocol` value of the Kourier service is `h2c`: -+ -[source,terminal] ----- -$ oc get svc -n knative-serving-ingress kourier -o jsonpath="{.spec.ports[0].appProtocol}" ----- -+ -.Example output -+ -[source,terminal] ----- -h2c ----- - -. Now you can use the gRPC framework over the HTTP/2 protocol for external traffic, for example: -+ -[source,golang] ----- -import "google.golang.org/grpc" - -grpc.Dial( - YOUR_URL, <1> - grpc.WithTransportCredentials(insecure.NewCredentials())), <2> -) ----- -<1> Your `ksvc` URL. -<2> Your certificate. diff --git a/modules/investigating-etcd-installation-issues.adoc b/modules/investigating-etcd-installation-issues.adoc deleted file mode 100644 index 36fef0831551..000000000000 --- a/modules/investigating-etcd-installation-issues.adoc +++ /dev/null @@ -1,104 +0,0 @@ -// Module included in the following assemblies: -// -// * support/troubleshooting/troubleshooting-installations.adoc - -:_content-type: PROCEDURE -[id="investigating-etcd-installation-issues_{context}"] -= Investigating etcd installation issues - -If you experience etcd issues during installation, you can check etcd pod status and collect etcd pod logs. You can also verify etcd DNS records and check DNS availability on control plane nodes. - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. -* You have installed the OpenShift CLI (`oc`). -* You have SSH access to your hosts. -* You have the fully qualified domain names of the control plane nodes. - -.Procedure - -. Check the status of etcd pods. -.. Review the status of pods in the `openshift-etcd` namespace: -+ -[source,terminal] ----- -$ oc get pods -n openshift-etcd ----- -+ -.. Review the status of pods in the `openshift-etcd-operator` namespace: -+ -[source,terminal] ----- -$ oc get pods -n openshift-etcd-operator ----- - -. If any of the pods listed by the previous commands are not showing a `Running` or a `Completed` status, gather diagnostic information for the pod. -.. Review events for the pod: -+ -[source,terminal] ----- -$ oc describe pod/<pod_name> -n <namespace> ----- -+ -.. Inspect the pod's logs: -+ -[source,terminal] ----- -$ oc logs pod/<pod_name> -n <namespace> ----- -+ -.. If the pod has more than one container, the preceding command will create an error, and the container names will be provided in the error message. Inspect logs for each container: -+ -[source,terminal] ----- -$ oc logs pod/<pod_name> -c <container_name> -n <namespace> ----- - -. If the API is not functional, review etcd pod and container logs on each control plane node by using SSH instead. Replace `<master-node>.<cluster_name>.<base_domain>` with appropriate values. -.. List etcd pods on each control plane node: -+ -[source,terminal] ----- -$ ssh core@<master-node>.<cluster_name>.<base_domain> sudo crictl pods --name=etcd- ----- -+ -.. For any pods not showing `Ready` status, inspect pod status in detail. Replace `<pod_id>` with the pod's ID listed in the output of the preceding command: -+ -[source,terminal] ----- -$ ssh core@<master-node>.<cluster_name>.<base_domain> sudo crictl inspectp <pod_id> ----- -+ -.. List containers related to a pod: -+ -// TODO: Once https://bugzilla.redhat.com/show_bug.cgi?id=1858239 has been resolved, replace the `grep` command below: -//[source,terminal] -//---- -//$ ssh core@<master-node>.<cluster_name>.<base_domain> sudo crictl ps --pod=<pod_id> -//---- -+ -[source,terminal] ----- -$ ssh core@<master-node>.<cluster_name>.<base_domain> sudo crictl ps | grep '<pod_id>' ----- -+ -.. For any containers not showing `Ready` status, inspect container status in detail. Replace `<container_id>` with container IDs listed in the output of the preceding command: -+ -[source,terminal] ----- -$ ssh core@<master-node>.<cluster_name>.<base_domain> sudo crictl inspect <container_id> ----- -+ -.. Review the logs for any containers not showing a `Ready` status. Replace `<container_id>` with the container IDs listed in the output of the preceding command: -+ -[source,terminal] ----- -$ ssh core@<master-node>.<cluster_name>.<base_domain> sudo crictl logs -f <container_id> ----- -+ -[NOTE] -==== -{product-title} {product-version} cluster nodes running {op-system-first} are immutable and rely on Operators to apply cluster changes. Accessing cluster nodes by using SSH is not recommended. Before attempting to collect diagnostic data over SSH, review whether the data collected by running `oc adm must gather` and other `oc` commands is sufficient instead. However, if the {product-title} API is not available, or the kubelet is not properly functioning on the target node, `oc` operations will be impacted. In such situations, it is possible to access nodes using `ssh core@<node>.<cluster_name>.<base_domain>`. -==== -+ -. Validate primary and secondary DNS server connectivity from control plane nodes. diff --git a/modules/investigating-kernel-crashes.adoc b/modules/investigating-kernel-crashes.adoc deleted file mode 100644 index 435d702f9f96..000000000000 --- a/modules/investigating-kernel-crashes.adoc +++ /dev/null @@ -1,38 +0,0 @@ -// Module included in the following assemblies: -// -// * support/troubleshooting/troubleshooting-operating-system-issues.adoc - -:_content-type: CONCEPT -[id="investigating-kernel-crashes"] -= Investigating kernel crashes - -The `kdump` service, included in the `kexec-tools` package, provides a crash-dumping mechanism. You can use this service to save the contents of a system's memory for later analysis. - -The `x86_64` architecture supports kdump in General Availability (GA) status, whereas other architectures support kdump in Technology Preview (TP) status. - -The following table provides details about the support level of kdump for different architectures. - -.Kdump support in {op-system} -[cols=",^v,^v width="100%",options="header"] -|=== -|Architecture |Support level - -a| -`x86_64` -| GA - -a| -`aarch64` -| TP - -a| -`s390x` -| TP - -a| -`ppc64le` -| TP -|=== - -:FeatureName: Kdump support, for the preceding three architectures in the table, -include::snippets/technology-preview.adoc[leveloffset=+1] diff --git a/modules/investigating-kubelet-api-installation-issues.adoc b/modules/investigating-kubelet-api-installation-issues.adoc deleted file mode 100644 index 318b634d6586..000000000000 --- a/modules/investigating-kubelet-api-installation-issues.adoc +++ /dev/null @@ -1,59 +0,0 @@ -// Module included in the following assemblies: -// -// * support/troubleshooting/troubleshooting-installations.adoc - -:_content-type: PROCEDURE -[id="investigating-kubelet-api-installation-issues_{context}"] -= Investigating control plane node kubelet and API server issues - -To investigate control plane node kubelet and API server issues during installation, check DNS, DHCP, and load balancer functionality. Also, verify that certificates have not expired. - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. -* You have installed the OpenShift CLI (`oc`). -* You have SSH access to your hosts. -* You have the fully qualified domain names of the control plane nodes. - -.Procedure - -. Verify that the API server's DNS record directs the kubelet on control plane nodes to [x-]`https://api-int.<cluster_name>.<base_domain>:6443`. Ensure that the record references the load balancer. - -. Ensure that the load balancer's port 6443 definition references each control plane node. - -. Check that unique control plane node hostnames have been provided by DHCP. - -. Inspect the `kubelet.service` journald unit logs on each control plane node. -.. Retrieve the logs using `oc`: -+ -[source,terminal] ----- -$ oc adm node-logs --role=master -u kubelet ----- -+ -.. If the API is not functional, review the logs using SSH instead. Replace `<master-node>.<cluster_name>.<base_domain>` with appropriate values: -+ -[source,terminal] ----- -$ ssh core@<master-node>.<cluster_name>.<base_domain> journalctl -b -f -u kubelet.service ----- -+ -[NOTE] -==== -{product-title} {product-version} cluster nodes running {op-system-first} are immutable and rely on Operators to apply cluster changes. Accessing cluster nodes by using SSH is not recommended. Before attempting to collect diagnostic data over SSH, review whether the data collected by running `oc adm must gather` and other `oc` commands is sufficient instead. However, if the {product-title} API is not available, or the kubelet is not properly functioning on the target node, `oc` operations will be impacted. In such situations, it is possible to access nodes using `ssh core@<node>.<cluster_name>.<base_domain>`. -==== -+ -. Check for certificate expiration messages in the control plane node kubelet logs. -.. Retrieve the log using `oc`: -+ -[source,terminal] ----- -$ oc adm node-logs --role=master -u kubelet | grep -is 'x509: certificate has expired' ----- -+ -.. If the API is not functional, review the logs using SSH instead. Replace `<master-node>.<cluster_name>.<base_domain>` with appropriate values: -+ -[source,terminal] ----- -$ ssh core@<master-node>.<cluster_name>.<base_domain> journalctl -b -f -u kubelet.service | grep -is 'x509: certificate has expired' ----- diff --git a/modules/investigating-master-node-installation-issues.adoc b/modules/investigating-master-node-installation-issues.adoc deleted file mode 100644 index 39f69cc8cddb..000000000000 --- a/modules/investigating-master-node-installation-issues.adoc +++ /dev/null @@ -1,247 +0,0 @@ -// Module included in the following assemblies: -// -// * support/troubleshooting/troubleshooting-installations.adoc - -:_content-type: PROCEDURE -[id="investigating-master-node-installation-issues_{context}"] -= Investigating control plane node installation issues - -If you experience control plane node installation issues, determine the control plane node {product-title} software defined network (SDN), and network Operator status. Collect `kubelet.service`, `crio.service` journald unit logs, and control plane node container logs for visibility into control plane node agent, CRI-O container runtime, and pod activity. - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. -* You have installed the OpenShift CLI (`oc`). -* You have SSH access to your hosts. -* You have the fully qualified domain names of the bootstrap and control plane nodes. -* If you are hosting Ignition configuration files by using an HTTP server, you must have the HTTP server's fully qualified domain name and the port number. You must also have SSH access to the HTTP host. -+ -[NOTE] -==== -The initial `kubeadmin` password can be found in `<install_directory>/auth/kubeadmin-password` on the installation host. -==== - -.Procedure - -. If you have access to the console for the control plane node, monitor the console until the node reaches the login prompt. During the installation, Ignition log messages are output to the console. - -. Verify Ignition file configuration. -+ -* If you are hosting Ignition configuration files by using an HTTP server. -+ -.. Verify the control plane node Ignition file URL. Replace `<http_server_fqdn>` with HTTP server's fully qualified domain name: -+ -[source,terminal] ----- -$ curl -I http://<http_server_fqdn>:<port>/master.ign <1> ----- -<1> The `-I` option returns the header only. If the Ignition file is available on the specified URL, the command returns `200 OK` status. If it is not available, the command returns `404 file not found`. -+ -.. To verify that the Ignition file was received by the control plane node query the HTTP server logs on the serving host. For example, if you are using an Apache web server to serve Ignition files: -+ -[source,terminal] ----- -$ grep -is 'master.ign' /var/log/httpd/access_log ----- -+ -If the master Ignition file is received, the associated `HTTP GET` log message will include a `200 OK` success status, indicating that the request succeeded. -+ -.. If the Ignition file was not received, check that it exists on the serving host directly. Ensure that the appropriate file and web server permissions are in place. -+ -* If you are using a cloud provider mechanism to inject Ignition configuration files into hosts as part of their initial deployment. -+ -.. Review the console for the control plane node to determine if the mechanism is injecting the control plane node Ignition file correctly. - -. Check the availability of the storage device assigned to the control plane node. - -. Verify that the control plane node has been assigned an IP address from the DHCP server. - -. Determine control plane node status. -.. Query control plane node status: -+ -[source,terminal] ----- -$ oc get nodes ----- -+ -.. If one of the control plane nodes does not reach a `Ready` status, retrieve a detailed node description: -+ -[source,terminal] ----- -$ oc describe node <master_node> ----- -+ -[NOTE] -==== -It is not possible to run `oc` commands if an installation issue prevents the {product-title} API from running or if the kubelet is not running yet on each node: -==== -+ -. Determine {product-title} SDN status. -+ -.. Review `sdn-controller`, `sdn`, and `ovs` daemon set status, in the `openshift-sdn` namespace: -+ -[source,terminal] ----- -$ oc get daemonsets -n openshift-sdn ----- -+ -.. If those resources are listed as `Not found`, review pods in the `openshift-sdn` namespace: -+ -[source,terminal] ----- -$ oc get pods -n openshift-sdn ----- -+ -.. Review logs relating to failed {product-title} SDN pods in the `openshift-sdn` namespace: -+ -[source,terminal] ----- -$ oc logs <sdn_pod> -n openshift-sdn ----- - -. Determine cluster network configuration status. -.. Review whether the cluster's network configuration exists: -+ -[source,terminal] ----- -$ oc get network.config.openshift.io cluster -o yaml ----- -+ -.. If the installer failed to create the network configuration, generate the Kubernetes manifests again and review message output: -+ -[source,terminal] ----- -$ ./openshift-install create manifests ----- -+ -.. Review the pod status in the `openshift-network-operator` namespace to determine whether the Cluster Network Operator (CNO) is running: -+ -[source,terminal] ----- -$ oc get pods -n openshift-network-operator ----- -+ -.. Gather network Operator pod logs from the `openshift-network-operator` namespace: -+ -[source,terminal] ----- -$ oc logs pod/<network_operator_pod_name> -n openshift-network-operator ----- - -. Monitor `kubelet.service` journald unit logs on control plane nodes, after they have booted. This provides visibility into control plane node agent activity. -.. Retrieve the logs using `oc`: -+ -[source,terminal] ----- -$ oc adm node-logs --role=master -u kubelet ----- -+ -.. If the API is not functional, review the logs using SSH instead. Replace `<master-node>.<cluster_name>.<base_domain>` with appropriate values: -+ -[source,terminal] ----- -$ ssh core@<master-node>.<cluster_name>.<base_domain> journalctl -b -f -u kubelet.service ----- -+ -[NOTE] -==== -{product-title} {product-version} cluster nodes running {op-system-first} are immutable and rely on Operators to apply cluster changes. Accessing cluster nodes by using SSH is not recommended. Before attempting to collect diagnostic data over SSH, review whether the data collected by running `oc adm must gather` and other `oc` commands is sufficient instead. However, if the {product-title} API is not available, or the kubelet is not properly functioning on the target node, `oc` operations will be impacted. In such situations, it is possible to access nodes using `ssh core@<node>.<cluster_name>.<base_domain>`. -==== -+ -. Retrieve `crio.service` journald unit logs on control plane nodes, after they have booted. This provides visibility into control plane node CRI-O container runtime activity. -.. Retrieve the logs using `oc`: -+ -[source,terminal] ----- -$ oc adm node-logs --role=master -u crio ----- -+ -.. If the API is not functional, review the logs using SSH instead: -+ -[source,terminal] ----- -$ ssh core@<master-node>.<cluster_name>.<base_domain> journalctl -b -f -u crio.service ----- - -. Collect logs from specific subdirectories under `/var/log/` on control plane nodes. -.. Retrieve a list of logs contained within a `/var/log/` subdirectory. The following example lists files in `/var/log/openshift-apiserver/` on all control plane nodes: -+ -[source,terminal] ----- -$ oc adm node-logs --role=master --path=openshift-apiserver ----- -+ -.. Inspect a specific log within a `/var/log/` subdirectory. The following example outputs `/var/log/openshift-apiserver/audit.log` contents from all control plane nodes: -+ -[source,terminal] ----- -$ oc adm node-logs --role=master --path=openshift-apiserver/audit.log ----- -+ -.. If the API is not functional, review the logs on each node using SSH instead. The following example tails `/var/log/openshift-apiserver/audit.log`: -+ -[source,terminal] ----- -$ ssh core@<master-node>.<cluster_name>.<base_domain> sudo tail -f /var/log/openshift-apiserver/audit.log ----- - -. Review control plane node container logs using SSH. -.. List the containers: -+ -[source,terminal] ----- -$ ssh core@<master-node>.<cluster_name>.<base_domain> sudo crictl ps -a ----- -+ -.. Retrieve a container's logs using `crictl`: -+ -[source,terminal] ----- -$ ssh core@<master-node>.<cluster_name>.<base_domain> sudo crictl logs -f <container_id> ----- - -. If you experience control plane node configuration issues, verify that the MCO, MCO endpoint, and DNS record are functioning. The Machine Config Operator (MCO) manages operating system configuration during the installation procedure. Also verify system clock accuracy and certificate validity. -.. Test whether the MCO endpoint is available. Replace `<cluster_name>` with appropriate values: -+ -[source,terminal] ----- -$ curl https://api-int.<cluster_name>:22623/config/master ----- -+ -.. If the endpoint is unresponsive, verify load balancer configuration. Ensure that the endpoint is configured to run on port 22623. -+ -.. Verify that the MCO endpoint's DNS record is configured and resolves to the load balancer. -... Run a DNS lookup for the defined MCO endpoint name: -+ -[source,terminal] ----- -$ dig api-int.<cluster_name> @<dns_server> ----- -+ -... Run a reverse lookup to the assigned MCO IP address on the load balancer: -+ -[source,terminal] ----- -$ dig -x <load_balancer_mco_ip_address> @<dns_server> ----- -+ -.. Verify that the MCO is functioning from the bootstrap node directly. Replace `<bootstrap_fqdn>` with the bootstrap node's fully qualified domain name: -+ -[source,terminal] ----- -$ ssh core@<bootstrap_fqdn> curl https://api-int.<cluster_name>:22623/config/master ----- -+ -.. System clock time must be synchronized between bootstrap, master, and worker nodes. Check each node's system clock reference time and time synchronization statistics: -+ -[source,terminal] ----- -$ ssh core@<node>.<cluster_name>.<base_domain> chronyc tracking ----- -+ -.. Review certificate validity: -+ -[source,terminal] ----- -$ openssl s_client -connect api-int.<cluster_name>:22623 | openssl x509 -noout -text ----- diff --git a/modules/investigating-why-windows-machine-compute-node.adoc b/modules/investigating-why-windows-machine-compute-node.adoc deleted file mode 100644 index 340bca3d04a7..000000000000 --- a/modules/investigating-why-windows-machine-compute-node.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// -// * support/troubleshooting/troubleshooting-windows-container-workload-issues.adoc - -:_content-type: PROCEDURE -[id="investigating-why-windows-machine-compute-node_{context}"] -= Investigating why Windows Machine does not become compute node - -There are various reasons why a Windows Machine does not become a compute node. The best way to investigate this problem is to collect the Windows Machine Config Operator (WMCO) logs. - -.Prerequisites - -* You installed the Windows Machine Config Operator (WMCO) using Operator Lifecycle Manager (OLM). -* You have created a Windows compute machine set. - -.Procedure - -* Run the following command to collect the WMCO logs: -+ -[source,terminal] ----- -$ oc logs -f deployment/windows-machine-config-operator -n openshift-windows-machine-config-operator ----- diff --git a/modules/investigating-worker-node-installation-issues.adoc b/modules/investigating-worker-node-installation-issues.adoc deleted file mode 100644 index 0050ee3a5593..000000000000 --- a/modules/investigating-worker-node-installation-issues.adoc +++ /dev/null @@ -1,224 +0,0 @@ -// Module included in the following assemblies: -// -// * support/troubleshooting/troubleshooting-installations.adoc - -:_content-type: PROCEDURE -[id="investigating-worker-node-installation-issues_{context}"] -= Investigating worker node installation issues - -If you experience worker node installation issues, you can review the worker node status. Collect `kubelet.service`, `crio.service` journald unit logs and the worker node container logs for visibility into the worker node agent, CRI-O container runtime and pod activity. Additionally, you can check the Ignition file and Machine API Operator functionality. If worker node post-installation configuration fails, check Machine Config Operator (MCO) and DNS functionality. You can also verify system clock synchronization between the bootstrap, master, and worker nodes, and validate certificates. - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. -* You have installed the OpenShift CLI (`oc`). -* You have SSH access to your hosts. -* You have the fully qualified domain names of the bootstrap and worker nodes. -* If you are hosting Ignition configuration files by using an HTTP server, you must have the HTTP server's fully qualified domain name and the port number. You must also have SSH access to the HTTP host. -+ -[NOTE] -==== -The initial `kubeadmin` password can be found in `<install_directory>/auth/kubeadmin-password` on the installation host. -==== - -.Procedure - -. If you have access to the worker node's console, monitor the console until the node reaches the login prompt. During the installation, Ignition log messages are output to the console. - -. Verify Ignition file configuration. -+ -* If you are hosting Ignition configuration files by using an HTTP server. -+ -.. Verify the worker node Ignition file URL. Replace `<http_server_fqdn>` with HTTP server's fully qualified domain name: -+ -[source,terminal] ----- -$ curl -I http://<http_server_fqdn>:<port>/worker.ign <1> ----- -<1> The `-I` option returns the header only. If the Ignition file is available on the specified URL, the command returns `200 OK` status. If it is not available, the command returns `404 file not found`. -+ -.. To verify that the Ignition file was received by the worker node, query the HTTP server logs on the HTTP host. For example, if you are using an Apache web server to serve Ignition files: -+ -[source,terminal] ----- -$ grep -is 'worker.ign' /var/log/httpd/access_log ----- -+ -If the worker Ignition file is received, the associated `HTTP GET` log message will include a `200 OK` success status, indicating that the request succeeded. -+ -.. If the Ignition file was not received, check that it exists on the serving host directly. Ensure that the appropriate file and web server permissions are in place. -+ -* If you are using a cloud provider mechanism to inject Ignition configuration files into hosts as part of their initial deployment. -+ -.. Review the worker node's console to determine if the mechanism is injecting the worker node Ignition file correctly. - -. Check the availability of the worker node's assigned storage device. - -. Verify that the worker node has been assigned an IP address from the DHCP server. - -. Determine worker node status. -.. Query node status: -+ -[source,terminal] ----- -$ oc get nodes ----- -+ -.. Retrieve a detailed node description for any worker nodes not showing a `Ready` status: -+ -[source,terminal] ----- -$ oc describe node <worker_node> ----- -+ -[NOTE] -==== -It is not possible to run `oc` commands if an installation issue prevents the {product-title} API from running or if the kubelet is not running yet on each node. -==== -+ -. Unlike control plane nodes, worker nodes are deployed and scaled using the Machine API Operator. Check the status of the Machine API Operator. -.. Review Machine API Operator pod status: -+ -[source,terminal] ----- -$ oc get pods -n openshift-machine-api ----- -+ -.. If the Machine API Operator pod does not have a `Ready` status, detail the pod's events: -+ -[source,terminal] ----- -$ oc describe pod/<machine_api_operator_pod_name> -n openshift-machine-api ----- -+ -.. Inspect `machine-api-operator` container logs. The container runs within the `machine-api-operator` pod: -+ -[source,terminal] ----- -$ oc logs pod/<machine_api_operator_pod_name> -n openshift-machine-api -c machine-api-operator ----- -+ -.. Also inspect `kube-rbac-proxy` container logs. The container also runs within the `machine-api-operator` pod: -+ -[source,terminal] ----- -$ oc logs pod/<machine_api_operator_pod_name> -n openshift-machine-api -c kube-rbac-proxy ----- - -. Monitor `kubelet.service` journald unit logs on worker nodes, after they have booted. This provides visibility into worker node agent activity. -.. Retrieve the logs using `oc`: -+ -[source,terminal] ----- -$ oc adm node-logs --role=worker -u kubelet ----- -+ -.. If the API is not functional, review the logs using SSH instead. Replace `<worker-node>.<cluster_name>.<base_domain>` with appropriate values: -+ -[source,terminal] ----- -$ ssh core@<worker-node>.<cluster_name>.<base_domain> journalctl -b -f -u kubelet.service ----- -+ -[NOTE] -==== -{product-title} {product-version} cluster nodes running {op-system-first} are immutable and rely on Operators to apply cluster changes. Accessing cluster nodes by using SSH is not recommended. Before attempting to collect diagnostic data over SSH, review whether the data collected by running `oc adm must gather` and other `oc` commands is sufficient instead. However, if the {product-title} API is not available, or the kubelet is not properly functioning on the target node, `oc` operations will be impacted. In such situations, it is possible to access nodes using `ssh core@<node>.<cluster_name>.<base_domain>`. -==== -+ -. Retrieve `crio.service` journald unit logs on worker nodes, after they have booted. This provides visibility into worker node CRI-O container runtime activity. -.. Retrieve the logs using `oc`: -+ -[source,terminal] ----- -$ oc adm node-logs --role=worker -u crio ----- -+ -.. If the API is not functional, review the logs using SSH instead: -+ -[source,terminal] ----- -$ ssh core@<worker-node>.<cluster_name>.<base_domain> journalctl -b -f -u crio.service ----- - -. Collect logs from specific subdirectories under `/var/log/` on worker nodes. -.. Retrieve a list of logs contained within a `/var/log/` subdirectory. The following example lists files in `/var/log/sssd/` on all worker nodes: -+ -[source,terminal] ----- -$ oc adm node-logs --role=worker --path=sssd ----- -+ -.. Inspect a specific log within a `/var/log/` subdirectory. The following example outputs `/var/log/sssd/audit.log` contents from all worker nodes: -+ -[source,terminal] ----- -$ oc adm node-logs --role=worker --path=sssd/sssd.log ----- -+ -.. If the API is not functional, review the logs on each node using SSH instead. The following example tails `/var/log/sssd/sssd.log`: -+ -[source,terminal] ----- -$ ssh core@<worker-node>.<cluster_name>.<base_domain> sudo tail -f /var/log/sssd/sssd.log ----- - -. Review worker node container logs using SSH. -.. List the containers: -+ -[source,terminal] ----- -$ ssh core@<worker-node>.<cluster_name>.<base_domain> sudo crictl ps -a ----- -+ -.. Retrieve a container's logs using `crictl`: -+ -[source,terminal] ----- -$ ssh core@<worker-node>.<cluster_name>.<base_domain> sudo crictl logs -f <container_id> ----- - -. If you experience worker node configuration issues, verify that the MCO, MCO endpoint, and DNS record are functioning. The Machine Config Operator (MCO) manages operating system configuration during the installation procedure. Also verify system clock accuracy and certificate validity. -.. Test whether the MCO endpoint is available. Replace `<cluster_name>` with appropriate values: -+ -[source,terminal] ----- -$ curl https://api-int.<cluster_name>:22623/config/worker ----- -+ -.. If the endpoint is unresponsive, verify load balancer configuration. Ensure that the endpoint is configured to run on port 22623. -+ -.. Verify that the MCO endpoint's DNS record is configured and resolves to the load balancer. -... Run a DNS lookup for the defined MCO endpoint name: -+ -[source,terminal] ----- -$ dig api-int.<cluster_name> @<dns_server> ----- -+ -... Run a reverse lookup to the assigned MCO IP address on the load balancer: -+ -[source,terminal] ----- -$ dig -x <load_balancer_mco_ip_address> @<dns_server> ----- -+ -.. Verify that the MCO is functioning from the bootstrap node directly. Replace `<bootstrap_fqdn>` with the bootstrap node's fully qualified domain name: -+ -[source,terminal] ----- -$ ssh core@<bootstrap_fqdn> curl https://api-int.<cluster_name>:22623/config/worker ----- -+ -.. System clock time must be synchronized between bootstrap, master, and worker nodes. Check each node's system clock reference time and time synchronization statistics: -+ -[source,terminal] ----- -$ ssh core@<node>.<cluster_name>.<base_domain> chronyc tracking ----- -+ -.. Review certificate validity: -+ -[source,terminal] ----- -$ openssl s_client -connect api-int.<cluster_name>:22623 | openssl x509 -noout -text ----- diff --git a/modules/ipi-install-additional-install-config-parameters.adoc b/modules/ipi-install-additional-install-config-parameters.adoc deleted file mode 100644 index 54e2f5c90e59..000000000000 --- a/modules/ipi-install-additional-install-config-parameters.adoc +++ /dev/null @@ -1,231 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_bare_metal_ipi/ipi-install-installation-workflow.adoc - -:_content-type: REFERENCE -[id="additional-install-config-parameters_{context}"] -= Additional `install-config` parameters - -See the following tables for the required parameters, the `hosts` parameter, -and the `bmc` parameter for the `install-config.yaml` file. - -[cols="2,1,5"] -[options="header"] -.Required parameters -|=== -|Parameters |Default |Description - - -| `baseDomain` -| -| The domain name for the cluster. For example, `example.com`. - -| `bootMode` -| `UEFI` -| The boot mode for a node. Options are `legacy`, `UEFI`, and `UEFISecureBoot`. If `bootMode` is not set, Ironic sets it while inspecting the node. - -| `bootstrapExternalStaticIP` -| -| The static IP address for the bootstrap VM. You must set this value when deploying a cluster with static IP addresses when there is no DHCP server on the `baremetal` network. - -| `bootstrapExternalStaticGateway` -| -| The static IP address of the gateway for the bootstrap VM. You must set this value when deploying a cluster with static IP addresses when there is no DHCP server on the `baremetal` network. - -| `sshKey` -| -| The `sshKey` configuration setting contains the key in the `~/.ssh/id_rsa.pub` file required to access the control plane nodes and worker nodes. Typically, this key is from the `provisioner` node. - -| `pullSecret` -| -| The `pullSecret` configuration setting contains a copy of the pull secret downloaded from the link:https://console.redhat.com/openshift/install/metal/user-provisioned[Install OpenShift on Bare Metal] page when preparing the provisioner node. - - -a| ----- -metadata: - name: ----- -| -|The name to be given to the {product-title} cluster. For example, `openshift`. - - -a| ----- -networking: - machineNetwork: - - cidr: ----- -| -|The public CIDR (Classless Inter-Domain Routing) of the external network. For example, `10.0.0.0/24`. - -a| ----- -compute: - - name: worker ----- -| -|The {product-title} cluster requires a name be provided for worker (or compute) nodes even if there are zero nodes. - - -a| ----- -compute: - replicas: 2 ----- -| -|Replicas sets the number of worker (or compute) nodes in the {product-title} cluster. - - -a| ----- -controlPlane: - name: master ----- -| -|The {product-title} cluster requires a name for control plane (master) nodes. - - -a| ----- -controlPlane: - replicas: 3 ----- -| -|Replicas sets the number of control plane (master) nodes included as part of the {product-title} cluster. - -a| `provisioningNetworkInterface` | | The name of the network interface on nodes connected to the `provisioning` network. For {product-title} 4.9 and later releases, use the `bootMACAddress` configuration setting to enable Ironic to identify the IP address of the NIC instead of using the `provisioningNetworkInterface` configuration setting to identify the name of the NIC. - - -| `defaultMachinePlatform` | | The default configuration used for machine pools without a platform configuration. - -| `apiVIPs` | a| (Optional) The virtual IP address for Kubernetes API communication. - -This setting must either be provided in the `install-config.yaml` file as a reserved IP from the MachineNetwork or pre-configured in the DNS so that the default name resolves correctly. Use the virtual IP address and not the FQDN when adding a value to the `apiVIPs` configuration setting in the `install-config.yaml` file. The primary IP address must be from the IPv4 network when using dual stack networking. If not set, the installation program uses `api.<cluster_name>.<base_domain>` to derive the IP address from the DNS. - -[NOTE] -==== -Before {product-title} 4.12, the cluster installation program only accepted an IPv4 address or an IPv6 address for the `apiVIP` configuration setting. From {product-title} 4.12 or later, the `apiVIP` configuration setting is deprecated. Instead, use a list format for the `apiVIPs` configuration setting to specify an IPv4 address, an IPv6 address or both IP address formats. -==== - - -| `disableCertificateVerification` | `False` | `redfish` and `redfish-virtualmedia` need this parameter to manage BMC addresses. The value should be `True` when using a self-signed certificate for BMC addresses. - -| `ingressVIPs` | a| (Optional) The virtual IP address for ingress traffic. - -This setting must either be provided in the `install-config.yaml` file as a reserved IP from the MachineNetwork or pre-configured in the DNS so that the default name resolves correctly. Use the virtual IP address and not the FQDN when adding a value to the `ingressVIPs` configuration setting in the `install-config.yaml` file. The primary IP address must be from the IPv4 network when using dual stack networking. If not set, the installation program uses `test.apps.<cluster_name>.<base_domain>` to derive the IP address from the DNS. - -[NOTE] -==== -Before {product-title} 4.12, the cluster installation program only accepted an IPv4 address or an IPv6 address for the `ingressVIP` configuration setting. In {product-title} 4.12 and later, the `ingressVIP` configuration setting is deprecated. Instead, use a list format for the `ingressVIPs` configuration setting to specify an IPv4 addresses, an IPv6 addresses or both IP address formats. -==== - -|=== - - -[cols="1,1,3", options="header"] -.Optional Parameters -|=== -|Parameters -|Default -|Description - -|`provisioningDHCPRange` -|`172.22.0.10,172.22.0.100` -|Defines the IP range for nodes on the `provisioning` network. - -a|`provisioningNetworkCIDR` -|`172.22.0.0/24` -|The CIDR for the network to use for provisioning. This option is required when not using the default address range on the `provisioning` network. - -|`clusterProvisioningIP` -|The third IP address of the `provisioningNetworkCIDR`. -|The IP address within the cluster where the provisioning services run. Defaults to the third IP address of the `provisioning` subnet. For example, `172.22.0.3`. - -|`bootstrapProvisioningIP` -|The second IP address of the `provisioningNetworkCIDR`. -|The IP address on the bootstrap VM where the provisioning services run while the installer is deploying the control plane (master) nodes. Defaults to the second IP address of the `provisioning` subnet. For example, `172.22.0.2` or `2620:52:0:1307::2`. - -| `externalBridge` -| `baremetal` -| The name of the `baremetal` bridge of the hypervisor attached to the `baremetal` network. - -| `provisioningBridge` -| `provisioning` -| The name of the `provisioning` bridge on the `provisioner` host attached to the `provisioning` network. - -|`architecture` -| -|Defines the host architecture for your cluster. Valid values are `amd64` or `arm64`. - -| `defaultMachinePlatform` -| -| The default configuration used for machine pools without a platform configuration. - -| `bootstrapOSImage` -| -| A URL to override the default operating system image for the bootstrap node. The URL must contain a SHA-256 hash of the image. For example: -`https://mirror.openshift.com/rhcos-<version>-qemu.qcow2.gz?sha256=<uncompressed_sha256>`. - -| `provisioningNetwork` -| -| The `provisioningNetwork` configuration setting determines whether the cluster uses the `provisioning` network. If it does, the configuration setting also determines if the cluster manages the network. - -`Disabled`: Set this parameter to `Disabled` to disable the requirement for a `provisioning` network. When set to `Disabled`, you must only use virtual media based provisioning, or bring up the cluster using the assisted installer. If `Disabled` and using power management, BMCs must be accessible from the `baremetal` network. If `Disabled`, you must provide two IP addresses on the `baremetal` network that are used for the provisioning services. - -`Managed`: Set this parameter to `Managed`, which is the default, to fully manage the provisioning network, including DHCP, TFTP, and so on. - -`Unmanaged`: Set this parameter to `Unmanaged` to enable the provisioning network but take care of manual configuration of DHCP. Virtual media provisioning is recommended but PXE is still available if required. - -| `httpProxy` -| -| Set this parameter to the appropriate HTTP proxy used within your environment. - -| `httpsProxy` -| -| Set this parameter to the appropriate HTTPS proxy used within your environment. - -| `noProxy` -| -| Set this parameter to the appropriate list of exclusions for proxy usage within your environment. - -|=== - -[discrete] -== Hosts - -The `hosts` parameter is a list of separate bare metal assets used to build the cluster. - -[width="100%", cols="3,2,5", options="header"] -.Hosts -|=== -|Name |Default |Description -| `name` -| -| The name of the `BareMetalHost` resource to associate with the details. For example, `openshift-master-0`. - - -| `role` -| -| The role of the bare metal node. Either `master` or `worker`. - - -| `bmc` -| -| Connection details for the baseboard management controller. See the BMC addressing section for additional details. - - -| `bootMACAddress` -| -a| The MAC address of the NIC that the host uses for the `provisioning` network. Ironic retrieves the IP address using the `bootMACAddress` configuration setting. Then, it binds to the host. - -[NOTE] -==== -You must provide a valid MAC address from the host if you disabled the `provisioning` network. -==== - -| `networkConfig` -| -| Set this optional parameter to configure the network interface of a host. See "(Optional) Configuring host network interfaces" for additional details. - -|=== diff --git a/modules/ipi-install-bmc-addressing-for-dell-idrac.adoc b/modules/ipi-install-bmc-addressing-for-dell-idrac.adoc deleted file mode 100644 index 70e64c820bad..000000000000 --- a/modules/ipi-install-bmc-addressing-for-dell-idrac.adoc +++ /dev/null @@ -1,131 +0,0 @@ -// This is included in the following assemblies: -// -// installing/installing_bare_metal_ipi/ipi-install-configuration-files.adoc - -:_content-type: REFERENCE -[id='bmc-addressing-for-dell-idrac_{context}'] -= BMC addressing for Dell iDRAC - -The `address` field for each `bmc` entry is a URL for connecting to the {product-title} cluster nodes, including the type of controller in the URL scheme and its location on the network. - -[source,yaml] ----- -platform: - baremetal: - hosts: - - name: <hostname> - role: <master | worker> - bmc: - address: <address> <1> - username: <user> - password: <password> ----- -<1> The `address` configuration setting specifies the protocol. - -For Dell hardware, Red Hat supports integrated Dell Remote Access Controller (iDRAC) virtual media, Redfish network boot, and IPMI. - -[discrete] -== BMC address formats for Dell iDRAC -[width="100%", cols="1,3", options="header"] -|==== -|Protocol|Address Format -|iDRAC virtual media| `idrac-virtualmedia://<out-of-band-ip>/redfish/v1/Systems/System.Embedded.1` -|Redfish network boot|`redfish://<out-of-band-ip>/redfish/v1/Systems/System.Embedded.1` -|IPMI|`ipmi://<out-of-band-ip>` -|==== - -[IMPORTANT] -==== -Use `idrac-virtualmedia` as the protocol for Redfish virtual media. `redfish-virtualmedia` will not work on Dell hardware. Dell's `idrac-virtualmedia` uses the Redfish standard with Dell's OEM extensions. -==== - -See the following sections for additional details. - -[discrete] -== Redfish virtual media for Dell iDRAC - -For Redfish virtual media on Dell servers, use `idrac-virtualmedia://` in the `address` setting. Using `redfish-virtualmedia://` will not work. - -[NOTE] -==== -Use `idrac-virtualmedia://` as the protocol for Redfish virtual media. Using `redfish-virtualmedia://` will not work on Dell hardware, because the `idrac-virtualmedia://` protocol corresponds to the `idrac` hardware type and the Redfish protocol in Ironic. Dell's `idrac-virtualmedia://` protocol uses the Redfish standard with Dell's OEM extensions. Ironic also supports the `idrac` type with the WSMAN protocol. Therefore, you must specify `idrac-virtualmedia://` to avoid unexpected behavior when electing to use Redfish with virtual media on Dell hardware. -==== - -The following example demonstrates using iDRAC virtual media within the `install-config.yaml` file. - -[source,yaml] ----- -platform: - baremetal: - hosts: - - name: openshift-master-0 - role: master - bmc: - address: idrac-virtualmedia://<out-of-band-ip>/redfish/v1/Systems/System.Embedded.1 - username: <user> - password: <password> ----- - -While it is recommended to have a certificate of authority for the out-of-band management addresses, you must include `disableCertificateVerification: True` in the `bmc` configuration if using self-signed certificates. - -[NOTE] -==== -Ensure the {product-title} cluster nodes have *AutoAttach* enabled through the iDRAC console. The menu path is: *Configuration* -> *Virtual Media* -> *Attach Mode* -> *AutoAttach*. -==== - -The following example demonstrates a Redfish configuration using the `disableCertificateVerification: True` configuration parameter within the `install-config.yaml` file. - -[source,yaml] ----- -platform: - baremetal: - hosts: - - name: openshift-master-0 - role: master - bmc: - address: idrac-virtualmedia://<out-of-band-ip>/redfish/v1/Systems/System.Embedded.1 - username: <user> - password: <password> - disableCertificateVerification: True ----- - -[discrete] -== Redfish network boot for iDRAC - -To enable Redfish, use `redfish://` or `redfish+http://` to disable transport layer security (TLS). The installer requires both the hostname or the IP address and the path to the system ID. The following example demonstrates a Redfish configuration within the `install-config.yaml` file. - -[source,yaml] ----- -platform: - baremetal: - hosts: - - name: openshift-master-0 - role: master - bmc: - address: redfish://<out-of-band-ip>/redfish/v1/Systems/System.Embedded.1 - username: <user> - password: <password> ----- - -While it is recommended to have a certificate of authority for the out-of-band management addresses, you must include `disableCertificateVerification: True` in the `bmc` configuration if using self-signed certificates. The following example demonstrates a Redfish configuration using the `disableCertificateVerification: True` configuration parameter within the `install-config.yaml` file. - -[source,yaml] ----- -platform: - baremetal: - hosts: - - name: openshift-master-0 - role: master - bmc: - address: redfish://<out-of-band-ip>/redfish/v1/Systems/System.Embedded.1 - username: <user> - password: <password> - disableCertificateVerification: True ----- - -[NOTE] -==== -There is a known issue on Dell iDRAC 9 with firmware version `04.40.00.00` and all releases up to including the `5.xx` series for installer-provisioned installations on bare metal deployments. The virtual console plugin defaults to eHTML5, an enhanced version of HTML5, which causes problems with the *InsertVirtualMedia* workflow. Set the plugin to use HTML5 to avoid this issue. The menu path is *Configuration* -> *Virtual console* -> *Plug-in Type* -> *HTML5* . - -Ensure the {product-title} cluster nodes have *AutoAttach* enabled through the iDRAC console. The menu path is: *Configuration* -> *Virtual Media* -> *Attach Mode* -> *AutoAttach* . -==== diff --git a/modules/ipi-install-bmc-addressing-for-fujitsu-irmc.adoc b/modules/ipi-install-bmc-addressing-for-fujitsu-irmc.adoc deleted file mode 100644 index 0599b7fe6c3b..000000000000 --- a/modules/ipi-install-bmc-addressing-for-fujitsu-irmc.adoc +++ /dev/null @@ -1,55 +0,0 @@ -// This is included in the following assemblies: -// -// installing/installing_bare_metal_ipi/ipi-install-configuration-files.adoc - -:_content-type: REFERENCE -[id='bmc-addressing-for-fujitsu-irmc_{context}'] -= BMC addressing for Fujitsu iRMC - -The `address` field for each `bmc` entry is a URL for connecting to the {product-title} cluster nodes, including the type of controller in the URL scheme and its location on the network. - -[source,yaml] ----- -platform: - baremetal: - hosts: - - name: <hostname> - role: <master | worker> - bmc: - address: <address> <1> - username: <user> - password: <password> ----- -<1> The `address` configuration setting specifies the protocol. - -For Fujitsu hardware, Red Hat supports integrated Remote Management Controller (iRMC) and IPMI. - -.BMC address formats for Fujitsu iRMC -[options="header"] -|==== -|Protocol|Address Format -|iRMC| `irmc://<out-of-band-ip>` -|IPMI| `ipmi://<out-of-band-ip>` -|==== - -.iRMC - -Fujitsu nodes can use `irmc://<out-of-band-ip>` and defaults to port `443`. The following example demonstrates an iRMC configuration within the `install-config.yaml` file. - -[source,yaml] ----- -platform: - baremetal: - hosts: - - name: openshift-master-0 - role: master - bmc: - address: irmc://<out-of-band-ip> - username: <user> - password: <password> ----- - -[NOTE] -==== -Currently Fujitsu supports iRMC S5 firmware version 3.05P and above for installer-provisioned installation on bare metal. -==== diff --git a/modules/ipi-install-bmc-addressing-for-hpe-ilo.adoc b/modules/ipi-install-bmc-addressing-for-hpe-ilo.adoc deleted file mode 100644 index b73aba9d4e59..000000000000 --- a/modules/ipi-install-bmc-addressing-for-hpe-ilo.adoc +++ /dev/null @@ -1,110 +0,0 @@ -// This is included in the following assemblies: -// -// installing/installing_bare_metal_ipi/ipi-install-configuration-files.adoc - -:_content-type: REFERENCE -[id='bmc-addressing-for-hpe-ilo_{context}'] -= BMC addressing for HPE iLO - -The `address` field for each `bmc` entry is a URL for connecting to the {product-title} cluster nodes, including the type of controller in the URL scheme and its location on the network. - -[source,yaml] ----- -platform: - baremetal: - hosts: - - name: <hostname> - role: <master | worker> - bmc: - address: <address> <1> - username: <user> - password: <password> ----- -<1> The `address` configuration setting specifies the protocol. - -For HPE integrated Lights Out (iLO), Red Hat supports Redfish virtual media, Redfish network boot, and IPMI. - -.BMC address formats for HPE iLO -[width="100%", cols="1,3", options="header"] -|==== -|Protocol|Address Format -|Redfish virtual media| `redfish-virtualmedia://<out-of-band-ip>/redfish/v1/Systems/1` -|Redfish network boot| `redfish://<out-of-band-ip>/redfish/v1/Systems/1` -|IPMI| `ipmi://<out-of-band-ip>` -|==== - -See the following sections for additional details. - -[discrete] -== Redfish virtual media for HPE iLO - -To enable Redfish virtual media for HPE servers, use `redfish-virtualmedia://` in the `address` setting. The following example demonstrates using Redfish virtual media within the `install-config.yaml` file. - -[source,yaml] ----- -platform: - baremetal: - hosts: - - name: openshift-master-0 - role: master - bmc: - address: redfish-virtualmedia://<out-of-band-ip>/redfish/v1/Systems/1 - username: <user> - password: <password> ----- - -While it is recommended to have a certificate of authority for the out-of-band management addresses, you must include `disableCertificateVerification: True` in the `bmc` configuration if using self-signed certificates. The following example demonstrates a Redfish configuration using the `disableCertificateVerification: True` configuration parameter within the `install-config.yaml` file. - -[source,yaml] ----- -platform: - baremetal: - hosts: - - name: openshift-master-0 - role: master - bmc: - address: redfish-virtualmedia://<out-of-band-ip>/redfish/v1/Systems/1 - username: <user> - password: <password> - disableCertificateVerification: True ----- - -[NOTE] -==== -Redfish virtual media is not supported on 9th generation systems running iLO4, because Ironic does not support iLO4 with virtual media. -==== - - -[discrete] -== Redfish network boot for HPE iLO - -To enable Redfish, use `redfish://` or `redfish+http://` to disable TLS. The installer requires both the hostname or the IP address and the path to the system ID. The following example demonstrates a Redfish configuration within the `install-config.yaml` file. - -[source,yaml] ----- -platform: - baremetal: - hosts: - - name: openshift-master-0 - role: master - bmc: - address: redfish://<out-of-band-ip>/redfish/v1/Systems/1 - username: <user> - password: <password> ----- - -While it is recommended to have a certificate of authority for the out-of-band management addresses, you must include `disableCertificateVerification: True` in the `bmc` configuration if using self-signed certificates. The following example demonstrates a Redfish configuration using the `disableCertificateVerification: True` configuration parameter within the `install-config.yaml` file. - -[source,yaml] ----- -platform: - baremetal: - hosts: - - name: openshift-master-0 - role: master - bmc: - address: redfish://<out-of-band-ip>/redfish/v1/Systems/1 - username: <user> - password: <password> - disableCertificateVerification: True ----- diff --git a/modules/ipi-install-bmc-addressing.adoc b/modules/ipi-install-bmc-addressing.adoc deleted file mode 100644 index f025e2f7b06b..000000000000 --- a/modules/ipi-install-bmc-addressing.adoc +++ /dev/null @@ -1,125 +0,0 @@ -// This is included in the following assemblies: -// -// installing/installing_bare_metal_ipi/ipi-install-configuration-files.adoc - -:_content-type: REFERENCE -[id='bmc-addressing_{context}'] -= BMC addressing - -Most vendors support Baseboard Management Controller (BMC) addressing with the Intelligent Platform Management Interface (IPMI). IPMI does not encrypt communications. It is suitable for use within a data center over a secured or dedicated management network. Check with your vendor to see if they support Redfish network boot. Redfish delivers simple and secure management for converged, hybrid IT and the Software Defined Data Center (SDDC). Redfish is human readable and machine capable, and leverages common internet and web services standards to expose information directly to the modern tool chain. If your hardware does not support Redfish network boot, use IPMI. - -[discrete] -== IPMI - -Hosts using IPMI use the `ipmi://<out-of-band-ip>:<port>` address format, which defaults to port `623` if not specified. The following example demonstrates an IPMI configuration within the `install-config.yaml` file. - -[source,yaml] ----- -platform: - baremetal: - hosts: - - name: openshift-master-0 - role: master - bmc: - address: ipmi://<out-of-band-ip> - username: <user> - password: <password> ----- - -[IMPORTANT] -==== -The `provisioning` network is required when PXE booting using IPMI for BMC addressing. It is not possible to PXE boot hosts without a `provisioning` network. If you deploy without a `provisioning` network, you must use a virtual media BMC addressing option such as `redfish-virtualmedia` or `idrac-virtualmedia`. See "Redfish virtual media for HPE iLO" in the "BMC addressing for HPE iLO" section or "Redfish virtual media for Dell iDRAC" in the "BMC addressing for Dell iDRAC" section for additional details. -==== - -[discrete] -== Redfish network boot - -To enable Redfish, use `redfish://` or `redfish+http://` to disable TLS. The installer requires both the hostname or the IP address and the path to the system ID. The following example demonstrates a Redfish configuration within the `install-config.yaml` file. - -[source,yaml] ----- -platform: - baremetal: - hosts: - - name: openshift-master-0 - role: master - bmc: - address: redfish://<out-of-band-ip>/redfish/v1/Systems/1 - username: <user> - password: <password> ----- - -While it is recommended to have a certificate of authority for the out-of-band management addresses, you must include `disableCertificateVerification: True` in the `bmc` configuration if using self-signed certificates. The following example demonstrates a Redfish configuration using the `disableCertificateVerification: True` configuration parameter within the `install-config.yaml` file. - -[source,yaml] ----- -platform: - baremetal: - hosts: - - name: openshift-master-0 - role: master - bmc: - address: redfish://<out-of-band-ip>/redfish/v1/Systems/1 - username: <user> - password: <password> - disableCertificateVerification: True ----- -[discrete] -== Redfish APIs - -Several redfish API endpoints are called onto your BCM when using the bare-metal installer-provisioned infrastructure. - -[IMPORTANT] -==== -You need to ensure that your BMC supports all of the redfish APIs before installation. -==== - -List of redfish APIs:: -* Power on -+ -[source, terminal] ----- -curl -u $USER:$PASS -X POST -H'Content-Type: application/json' -H'Accept: application/json' -d '{"Action": "Reset", "ResetType": "On"}' https://$SERVER/redfish/v1/Systems/$SystemID/Actions/ComputerSystem.Reset ----- -* Power off -+ -[source, terminal] ----- -curl -u $USER:$PASS -X POST -H'Content-Type: application/json' -H'Accept: application/json' -d '{"Action": "Reset", "ResetType": "ForceOff"}' https://$SERVER/redfish/v1/Systems/$SystemID/Actions/ComputerSystem.Reset ----- -* Temporary boot using `pxe` -+ -[source, terminal] ----- -curl -u $USER:$PASS -X PATCH -H "Content-Type: application/json" https://$Server/redfish/v1/Systems/$SystemID/ -d '{"Boot": {"BootSourceOverrideTarget": "pxe", "BootSourceOverrideEnabled": "Once"}} ----- -* Set BIOS boot mode using `Legacy` or `UEFI` -+ -[source, terminal] ----- -curl -u $USER:$PASS -X PATCH -H "Content-Type: application/json" https://$Server/redfish/v1/Systems/$SystemID/ -d '{"Boot": {"BootSourceOverrideMode":"UEFI"}} ----- - -List of redfish-virtualmedia APIs:: -* Set temporary boot device using `cd` or `dvd` -+ -[source, terminal] ----- -curl -u $USER:$PASS -X PATCH -H "Content-Type: application/json" https://$Server/redfish/v1/Systems/$SystemID/ -d '{"Boot": {"BootSourceOverrideTarget": "cd", "BootSourceOverrideEnabled": "Once"}}' ----- -* Mount virtual media -+ -[source, terminal] ----- -curl -u $USER:$PASS -X PATCH -H "Content-Type: application/json" -H "If-Match: *" https://$Server/redfish/v1/Managers/$ManagerID/VirtualMedia/$VmediaId -d '{"Image": "https://example.com/test.iso", "TransferProtocolType": "HTTPS", "UserName": "", "Password":""}' ----- - -[NOTE] -==== -The `PowerOn` and `PowerOff` commands for redfish APIs are the same for the redfish-virtualmedia APIs. -==== - -[IMPORTANT] -==== -`HTTPS` and `HTTP` are the only supported parameter types for `TransferProtocolTypes`. -==== diff --git a/modules/ipi-install-configure-multiple-cluster-nodes.adoc b/modules/ipi-install-configure-multiple-cluster-nodes.adoc deleted file mode 100644 index 4401d1b2a5f8..000000000000 --- a/modules/ipi-install-configure-multiple-cluster-nodes.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_bare_metal_ipi/ipi-install-installation-workflow.adoc - -:_content-type: CONCEPT -[id="ipi-install-configure-multiple-cluster-nodes_{context}"] -= Configuring multiple cluster nodes - -You can simultaneously configure {product-title} cluster nodes with identical settings. Configuring multiple cluster nodes avoids adding redundant information for each node to the `install-config.yaml` file. This file contains specific parameters to apply an identical configuration to multiple nodes in the cluster. - -Compute nodes are configured separately from the controller node. However, configurations for both node types use the highlighted parameters in the `install-config.yaml` file to enable multi-node configuration. Set the `networkConfig` parameters to `BOND`, as shown in the following example: - -[source,yaml] ----- -hosts: -- name: ostest-master-0 - [...] - networkConfig: &BOND - interfaces: - - name: bond0 - type: bond - state: up - ipv4: - dhcp: true - enabled: true - link-aggregation: - mode: active-backup - port: - - enp2s0 - - enp3s0 -- name: ostest-master-1 - [...] - networkConfig: *BOND -- name: ostest-master-2 - [...] - networkConfig: *BOND ----- - -[NOTE] -==== -Configuration of multiple cluster nodes is only available for initial deployments on installer-provisioned infrastructure. -==== diff --git a/modules/ipi-install-configure-network-components-to-run-on-the-control-plane.adoc b/modules/ipi-install-configure-network-components-to-run-on-the-control-plane.adoc deleted file mode 100644 index e7a965141d3d..000000000000 --- a/modules/ipi-install-configure-network-components-to-run-on-the-control-plane.adoc +++ /dev/null @@ -1,123 +0,0 @@ -// This is included in the following assemblies: -// -// ipi-install-configuration-files.adoc -ifeval::["{context}" == "ipi-install-installation-workflow"] -:bare: -endif::[] -ifeval::["{context}" == "installing-vsphere-installer-provisioned-network-customizations"] -:vSphere: -endif::[] - -:_content-type: PROCEDURE -[id='configure-network-components-to-run-on-the-control-plane_{context}'] -= Configuring network components to run on the control plane - -You can configure networking components to run exclusively on the control plane nodes. By default, {product-title} allows any node in the machine config pool to host the `ingressVIP` virtual IP address. However, some environments deploy worker nodes in separate subnets from the control plane nodes, which requires configuring the `ingressVIP` virtual IP address to run on the control plane nodes. - -ifdef::vSphere[] -[NOTE] -==== -You can scale the remote workers by creating a worker machineset in a separate subnet. -==== -endif::vSphere[] - -[IMPORTANT] -==== -When deploying remote workers in separate subnets, you must place the `ingressVIP` virtual IP address exclusively with the control plane nodes. -==== - -ifdef::bare[] -image::161_OpenShift_Baremetal_IPI_Deployment_updates_0521.png[Installer-provisioned networking] -endif::bare[] -ifdef::vSphere[] -image::325_OpenShift_vSphere_Deployment_updates_0323.png[Installer-provisioned networking] -endif::vSphere[] - -.Procedure - -. Change to the directory storing the `install-config.yaml` file: -+ -[source,terminal] ----- -$ cd ~/clusterconfigs ----- - -. Switch to the `manifests` subdirectory: -+ -[source,terminal] ----- -$ cd manifests ----- - -. Create a file named `cluster-network-avoid-workers-99-config.yaml`: -+ -[source,terminal] ----- -$ touch cluster-network-avoid-workers-99-config.yaml ----- - -. Open the `cluster-network-avoid-workers-99-config.yaml` file in an editor and enter a custom resource (CR) that describes the Operator configuration: -+ -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfig -metadata: - name: 50-worker-fix-ipi-rwn - labels: - machineconfiguration.openshift.io/role: worker -spec: - config: - ignition: - version: 3.2.0 - storage: - files: - - path: /etc/kubernetes/manifests/keepalived.yaml - mode: 0644 - contents: - source: data:, ----- -+ -This manifest places the `ingressVIP` virtual IP address on the control plane nodes. Additionally, this manifest deploys the following processes on the control plane nodes only: -+ -* `openshift-ingress-operator` -+ -* `keepalived` - -. Save the `cluster-network-avoid-workers-99-config.yaml` file. - -. Create a `manifests/cluster-ingress-default-ingresscontroller.yaml` file: -+ -[source,yaml] ----- -apiVersion: operator.openshift.io/v1 -kind: IngressController -metadata: - name: default - namespace: openshift-ingress-operator -spec: - nodePlacement: - nodeSelector: - matchLabels: - node-role.kubernetes.io/master: "" ----- - -. Consider backing up the `manifests` directory. The installer deletes the `manifests/` directory when creating the cluster. - -. Modify the `cluster-scheduler-02-config.yml` manifest to make the control plane nodes schedulable by setting the `mastersSchedulable` field to `true`. Control plane nodes are not schedulable by default. For example: -+ ----- -$ sed -i "s;mastersSchedulable: false;mastersSchedulable: true;g" clusterconfigs/manifests/cluster-scheduler-02-config.yml ----- -+ -[NOTE] -==== -If control plane nodes are not schedulable after completing this procedure, deploying the cluster will fail. -==== - -ifeval::["{context}" == "ipi-install-installation-workflow"] -:!bare: -endif::[] -ifeval::["{context}" == "installing-vsphere-installer-provisioned-network-customizations"] -:!vSphere: -endif::[] diff --git a/modules/ipi-install-configuring-host-dual-network-interfaces-in-the-install-config.yaml-file.adoc b/modules/ipi-install-configuring-host-dual-network-interfaces-in-the-install-config.yaml-file.adoc deleted file mode 100644 index 88c0e5ed3cb1..000000000000 --- a/modules/ipi-install-configuring-host-dual-network-interfaces-in-the-install-config.yaml-file.adoc +++ /dev/null @@ -1,135 +0,0 @@ -// This is included in the following assemblies: -// -// installing_bare_metal_ipi/ipi-install-installation-workflow.adoc - -:_content-type: PROCEDURE -[id="configuring-host-dual-network-interfaces-in-the-install-config-yaml-file_{context}"] -= Optional: Configuring host network interfaces for dual port NIC - -:FeatureName: Support for Day 1 operations associated with enabling NIC partitioning for SR-IOV devices -include::snippets/technology-preview.adoc[leveloffset=+1] - -Before installation, you can set the `networkConfig` configuration setting in the `install-config.yaml` file to configure host network interfaces using NMState to support dual port NIC. - -.Prequisites - -* Configure a `PTR` DNS record with a valid hostname for each node with a static IP address. -* Install the NMState CLI (`nmstate`). - -[NOTE] -==== -Errors in the YAML syntax might result in a failure to apply the network configuration. Additionally, maintaining the validated YAML syntax is useful when applying changes using Kubernetes NMState after deployment or when expanding the cluster. -==== - -.Procedure - -. Add the NMState configuration to the `networkConfig` field to hosts within the `install-config.yaml` file: -+ -[source,yaml] ----- -hosts: - - hostname: worker-1 - interfaces: - - name: eno1 - macAddress: 0c:42:a1:55:f3:06 - - name: eno2 - macAddress: 0c:42:a1:55:f3:07 - networkConfig: <1> - interfaces: <2> - - name: eno1 <3> - type: ethernet <4> - state: up - mac-address: 0c:42:a1:55:f3:06 - ipv4: - enabled: true - dhcp: false <5> - ethernet: - sr-iov: - total-vfs: 2 <6> - ipv6: - enabled: false - dhcp: false - - name: sriov:eno1:0 - type: ethernet - state: up <7> - ipv4: - enabled: false <8> - ipv6: - enabled: false - - name: sriov:eno1:1 - type: ethernet - state: down - - name: eno2 - type: ethernet - state: up - mac-address: 0c:42:a1:55:f3:07 - ipv4: - enabled: true - ethernet: - sr-iov: - total-vfs: 2 - ipv6: - enabled: false - - name: sriov:eno2:0 - type: ethernet - state: up - ipv4: - enabled: false - ipv6: - enabled: false - - name: sriov:eno2:1 - type: ethernet - state: down - - name: bond0 - type: bond - state: up - min-tx-rate: 100 <9> - max-tx-rate: 200 <10> - link-aggregation: - mode: active-backup <11> - options: - primary: sriov:eno1:0 <12> - port: - - sriov:eno1:0 - - sriov:eno2:0 - ipv4: - address: - - ip: 10.19.16.57 <13> - prefix-length: 23 - dhcp: false - enabled: true - ipv6: - enabled: false - dns-resolver: - config: - server: - - 10.11.5.160 - - 10.2.70.215 - routes: - config: - - destination: 0.0.0.0/0 - next-hop-address: 10.19.17.254 - next-hop-interface: bond0 <14> - table-id: 254 ----- -<1> The `networkConfig` field contains information about the network configuration of the host, with subfields including `interfaces`, `dns-resolver`, and `routes`. -<2> The `interfaces` field is an array of network interfaces defined for the host. -<3> The name of the interface. -<4> The type of interface. This example creates a ethernet interface. -<5> Set this to `false to disable DHCP for the physical function (PF) if it is not strictly required. -<6> Set to the number of SR-IOV virtual functions (VFs) to instantiate. -<7> Set this to `up`. -<8> Set this to `false` to disable IPv4 addressing for the VF attached to the bond. -<9> Sets a minimum transmission rate, in Mbps, for the VF. This sample value sets a rate of 100 Mbps. - * This value must be less than or equal to the maximum transmission rate. - * Intel NICs do not support the `min-tx-rate` parameter. For more information, see link:https://bugzilla.redhat.com/show_bug.cgi?id=1772847[*BZ#1772847*]. -<10> Sets a maximum transmission rate, in Mbps, for the VF. This sample value sets a rate of 200 Mbps. -<11> Sets the desired bond mode. -<12> Sets the preferred port of the bonding interface. The primary device is the first of the bonding interfaces to be used and is not abandoned unless it fails. This setting is particularly useful when one NIC in the bonding interface is faster and, therefore, able to handle a bigger load. This setting is only valid when the bonding interface is in active-backup mode (mode 1) and balance-tlb (mode 5). -<13> Sets a static IP address for the bond interface. This is the node IP address. -<14> Sets `bond0` as the gateway for the default route. -+ -[IMPORTANT] -==== -After deploying the cluster, you cannot modify the `networkConfig` configuration setting of `install-config.yaml` file to make changes to the host network interface. Use the Kubernetes NMState Operator to make changes to the host network interface after deployment. -==== diff --git a/modules/ipi-install-configuring-host-network-interfaces-in-the-install-config.yaml-file.adoc b/modules/ipi-install-configuring-host-network-interfaces-in-the-install-config.yaml-file.adoc deleted file mode 100644 index e8ef61cd91ba..000000000000 --- a/modules/ipi-install-configuring-host-network-interfaces-in-the-install-config.yaml-file.adoc +++ /dev/null @@ -1,105 +0,0 @@ -// This is included in the following assemblies: -// -// installing_bare_metal_ipi/ipi-install-installation-workflow.adoc - -:_content-type: PROCEDURE -[id="configuring-host-network-interfaces-in-the-install-config-yaml-file_{context}"] -= Optional: Configuring host network interfaces - -Before installation, you can set the `networkConfig` configuration setting in the `install-config.yaml` file to configure host network interfaces using NMState. - -The most common use case for this functionality is to specify a static IP address on the `baremetal` network, but you can also configure other networks such as a storage network. This functionality supports other NMState features such as VLAN, VXLAN, bridges, bonds, routes, MTU, and DNS resolver settings. - -.Prequisites - -* Configure a `PTR` DNS record with a valid hostname for each node with a static IP address. -* Install the NMState CLI (`nmstate`). - -.Procedure - -. Optional: Consider testing the NMState syntax with `nmstatectl gc` before including it in the `install-config.yaml` file, because the installer will not check the NMState YAML syntax. -+ -[NOTE] -==== -Errors in the YAML syntax might result in a failure to apply the network configuration. Additionally, maintaining the validated YAML syntax is useful when applying changes using Kubernetes NMState after deployment or when expanding the cluster. -==== - - -.. Create an NMState YAML file: -+ -[source,yaml] ----- -interfaces: -- name: <nic1_name> <1> - type: ethernet - state: up - ipv4: - address: - - ip: <ip_address> <1> - prefix-length: 24 - enabled: true -dns-resolver: - config: - server: - - <dns_ip_address> <1> -routes: - config: - - destination: 0.0.0.0/0 - next-hop-address: <next_hop_ip_address> <1> - next-hop-interface: <next_hop_nic1_name> <1> ----- -+ -<1> Replace `<nic1_name>`, `<ip_address>`, `<dns_ip_address>`, `<next_hop_ip_address>` and `<next_hop_nic1_name>` with appropriate values. - -.. Test the configuration file by running the following command: -+ -[source,terminal] ----- -$ nmstatectl gc <nmstate_yaml_file> ----- -+ -Replace `<nmstate_yaml_file>` with the configuration file name. - -. Use the `networkConfig` configuration setting by adding the NMState configuration to hosts within the `install-config.yaml` file: -+ -[source,yaml] ----- - hosts: - - name: openshift-master-0 - role: master - bmc: - address: redfish+http://<out_of_band_ip>/redfish/v1/Systems/ - username: <user> - password: <password> - disableCertificateVerification: null - bootMACAddress: <NIC1_mac_address> - bootMode: UEFI - rootDeviceHints: - deviceName: "/dev/sda" - networkConfig: <1> - interfaces: - - name: <nic1_name> <2> - type: ethernet - state: up - ipv4: - address: - - ip: <ip_address> <2> - prefix-length: 24 - enabled: true - dns-resolver: - config: - server: - - <dns_ip_address> <2> - routes: - config: - - destination: 0.0.0.0/0 - next-hop-address: <next_hop_ip_address> <2> - next-hop-interface: <next_hop_nic1_name> <2> ----- -<1> Add the NMState YAML syntax to configure the host interfaces. -<2> Replace `<nic1_name>`, `<ip_address>`, `<dns_ip_address>`, `<next_hop_ip_address>` and `<next_hop_nic1_name>` with appropriate values. -+ -[IMPORTANT] -==== -After deploying the cluster, you cannot modify the `networkConfig` configuration setting of `install-config.yaml` file to make changes to the host network interface. Use the Kubernetes NMState Operator to make changes to the host network interface after deployment. -==== diff --git a/modules/ipi-install-configuring-managed-secure-boot-in-the-install-config-file.adoc b/modules/ipi-install-configuring-managed-secure-boot-in-the-install-config-file.adoc deleted file mode 100644 index 6aa79c6131d0..000000000000 --- a/modules/ipi-install-configuring-managed-secure-boot-in-the-install-config-file.adoc +++ /dev/null @@ -1,39 +0,0 @@ -// This is included in the following assemblies: -// -// installing/installing_bare_metal_ipi/ipi-install-configuration-files.adoc - -:_content-type: PROCEDURE -[id="configuring-managed-secure-boot-in-the-install-config-file_{context}"] -= Optional: Configuring managed Secure Boot - -You can enable managed Secure Boot when deploying an installer-provisioned cluster using Redfish BMC addressing, such as `redfish`, `redfish-virtualmedia`, or `idrac-virtualmedia`. To enable managed Secure Boot, add the `bootMode` configuration setting to each node: - -[source,yaml] -.Example ----- -hosts: - - name: openshift-master-0 - role: master - bmc: - address: redfish://<out_of_band_ip> <1> - username: <user> - password: <password> - bootMACAddress: <NIC1_mac_address> - rootDeviceHints: - deviceName: "/dev/sda" - bootMode: UEFISecureBoot <2> ----- - -<1> Ensure the `bmc.address` setting uses `redfish`, `redfish-virtualmedia`, or `idrac-virtualmedia` as the protocol. See "BMC addressing for HPE iLO" or "BMC addressing for Dell iDRAC" for additional details. - -<2> The `bootMode` setting is `UEFI` by default. Change it to `UEFISecureBoot` to enable managed Secure Boot. - -[NOTE] -==== -See "Configuring nodes" in the "Prerequisites" to ensure the nodes can support managed Secure Boot. If the nodes do not support managed Secure Boot, see "Configuring nodes for Secure Boot manually" in the "Configuring nodes" section. Configuring Secure Boot manually requires Redfish virtual media. -==== - -[NOTE] -==== -Red Hat does not support Secure Boot with IPMI, because IPMI does not provide Secure Boot management facilities. -==== diff --git a/modules/ipi-install-configuring-networking.adoc b/modules/ipi-install-configuring-networking.adoc deleted file mode 100644 index 04d71a4ca8a9..000000000000 --- a/modules/ipi-install-configuring-networking.adoc +++ /dev/null @@ -1,108 +0,0 @@ -// This is included in the following assemblies: -// -// ipi-install-installation-workflow.adoc - -:_content-type: PROCEDURE -[id="configuring-networking_{context}"] -= Configuring networking - -Before installation, you must configure the networking on the provisioner node. Installer-provisioned clusters deploy with a `baremetal` bridge and network, and an optional `provisioning` bridge and network. - -image::210_OpenShift_Baremetal_IPI_Deployment_updates_0122_1.png[Configure networking] - -[NOTE] -==== -You can also configure networking from the web console. -==== - -.Procedure - -. Export the `baremetal` network NIC name: -+ -[source,terminal] ----- -$ export PUB_CONN=<baremetal_nic_name> ----- - -. Configure the `baremetal` network: -+ -[NOTE] -==== -The SSH connection might disconnect after executing these steps. -==== -+ -[source,terminal] ----- -$ sudo nohup bash -c " - nmcli con down \"$PUB_CONN\" - nmcli con delete \"$PUB_CONN\" - # RHEL 8.1 appends the word \"System\" in front of the connection, delete in case it exists - nmcli con down \"System $PUB_CONN\" - nmcli con delete \"System $PUB_CONN\" - nmcli connection add ifname baremetal type bridge con-name baremetal bridge.stp no - nmcli con add type bridge-slave ifname \"$PUB_CONN\" master baremetal - pkill dhclient;dhclient baremetal -" ----- - -. Optional: If you are deploying with a `provisioning` network, export the `provisioning` network NIC name: -+ -[source,terminal] ----- -$ export PROV_CONN=<prov_nic_name> ----- - -. Optional: If you are deploying with a `provisioning` network, configure the `provisioning` network: -+ -[source,terminal] ----- -$ sudo nohup bash -c " - nmcli con down \"$PROV_CONN\" - nmcli con delete \"$PROV_CONN\" - nmcli connection add ifname provisioning type bridge con-name provisioning - nmcli con add type bridge-slave ifname \"$PROV_CONN\" master provisioning - nmcli connection modify provisioning ipv6.addresses fd00:1101::1/64 ipv6.method manual - nmcli con down provisioning - nmcli con up provisioning -" ----- -+ -[NOTE] -==== -The ssh connection might disconnect after executing these steps. - -The IPv6 address can be any address as long as it is not routable via the `baremetal` network. - -Ensure that UEFI is enabled and UEFI PXE settings are set to the IPv6 protocol when using IPv6 addressing. -==== - -. Optional: If you are deploying with a `provisioning` network, configure the IPv4 address on the `provisioning` network connection: -+ -[source,terminal] ----- -$ nmcli connection modify provisioning ipv4.addresses 172.22.0.254/24 ipv4.method manual ----- - -. `ssh` back into the `provisioner` node (if required): -+ -[source,terminal] ----- -# ssh kni@provisioner.<cluster-name>.<domain> ----- - -. Verify the connection bridges have been properly created: -+ -[source,terminal] ----- -$ sudo nmcli con show ----- -+ -[source,terminal] ----- -NAME UUID TYPE DEVICE -baremetal 4d5133a5-8351-4bb9-bfd4-3af264801530 bridge baremetal -provisioning 43942805-017f-4d7d-a2c2-7cb3324482ed bridge provisioning -virbr0 d9bca40f-eee1-410b-8879-a2d4bb0465e7 bridge virbr0 -bridge-slave-eno1 76a8ed50-c7e5-4999-b4f6-6d9014dd0812 ethernet eno1 -bridge-slave-eno2 f31c3353-54b7-48de-893a-02d2b34c4736 ethernet eno2 ----- diff --git a/modules/ipi-install-configuring-nodes.adoc b/modules/ipi-install-configuring-nodes.adoc deleted file mode 100644 index 43dc661dc3e8..000000000000 --- a/modules/ipi-install-configuring-nodes.adoc +++ /dev/null @@ -1,90 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_bare_metal_ipi/ipi-install-prerequisites.adoc - -:_content-type: PROCEDURE -[id="configuring-nodes_{context}"] -= Configuring nodes - -[discrete] -== Configuring nodes when using the `provisioning` network - -Each node in the cluster requires the following configuration for proper installation. - -[WARNING] -==== -A mismatch between nodes will cause an installation failure. -==== - -While the cluster nodes can contain more than two NICs, the installation process only focuses on the first two NICs. In the following table, NIC1 is a non-routable network (`provisioning`) that is only used for the installation of the {product-title} cluster. - -[options="header"] -|=== -|NIC |Network |VLAN -| NIC1 | `provisioning` | `<provisioning_vlan>` -| NIC2 | `baremetal` | `<baremetal_vlan>` -|=== - -ifndef::openshift-origin[The {op-system-base-full} 8.x installation process on the provisioner node might vary. To install {op-system-base-full} 8.x using a local Satellite server or a PXE server, PXE-enable NIC2.] -ifdef::openshift-origin[The {op-system-first} installation process on the provisioner node might vary. To install {op-system} using a local Satellite server or a PXE server, PXE-enable NIC2.] - -[options="header"] -|=== -|PXE |Boot order -| NIC1 PXE-enabled `provisioning` network | 1 -| NIC2 `baremetal` network. PXE-enabled is optional. | 2 -|=== - -[NOTE] -==== -Ensure PXE is disabled on all other NICs. -==== - -Configure the control plane and worker nodes as follows: - -[options="header"] -|=== -|PXE | Boot order -| NIC1 PXE-enabled (provisioning network) | 1 -|=== - -[discrete] -== Configuring nodes without the `provisioning` network - -The installation process requires one NIC: - -[options="header"] -|=== -|NIC |Network |VLAN -| NICx | `baremetal` | `<baremetal_vlan>` -|=== - -NICx is a routable network (`baremetal`) that is used for the installation of the {product-title} cluster, and routable to the internet. - -[IMPORTANT] -==== -The `provisioning` network is optional, but it is required for PXE booting. If you deploy without a `provisioning` network, you must use a virtual media BMC addressing option such as `redfish-virtualmedia` or `idrac-virtualmedia`. -==== - -[id="configuring-nodes-for-secure-boot_{context}"] -[discrete] -== Configuring nodes for Secure Boot manually - -Secure Boot prevents a node from booting unless it verifies the node is using only trusted software, such as UEFI firmware drivers, EFI applications, and the operating system. - -[NOTE] -==== -Red Hat only supports manually configured Secure Boot when deploying with Redfish virtual media. -==== - -To enable Secure Boot manually, refer to the hardware guide for the node and execute the following: - -.Procedure -. Boot the node and enter the BIOS menu. -. Set the node's boot mode to `UEFI Enabled`. -. Enable Secure Boot. - -[IMPORTANT] -==== -Red Hat does not support Secure Boot with self-generated keys. -==== diff --git a/modules/ipi-install-configuring-ntp-for-disconnected-clusters.adoc b/modules/ipi-install-configuring-ntp-for-disconnected-clusters.adoc deleted file mode 100644 index 586e449e88fa..000000000000 --- a/modules/ipi-install-configuring-ntp-for-disconnected-clusters.adoc +++ /dev/null @@ -1,183 +0,0 @@ -// This is included in the following assemblies: -// -// installing/installing_bare_metal_ipi/ipi-install-configuration-files -// installing/installing_bare_metal_ipi/ipi-install-post-installation-configuration.adoc - -:_content-type: PROCEDURE -[id="configuring-ntp-for-disconnected-clusters_{context}"] -= Optional: Configuring NTP for disconnected clusters - -//This procedure can be executed as a day 1 or day 2 operation with minor differences. -//The conditional text picks up the context and displays the appropriate alternate steps. - -{product-title} installs the `chrony` Network Time Protocol (NTP) service on the cluster nodes. -ifeval::["{context}" == "ipi-install-configuration-files"] -Use the following procedure to configure NTP servers on the control plane nodes and configure worker nodes as NTP clients of the control plane nodes before deployment. -endif::[] -ifeval::["{context}" == "ipi-install-post-installation-configuration"] -Use the following procedure to configure NTP servers on the control plane nodes and configure worker nodes as NTP clients of the control plane nodes after a successful deployment. -endif::[] - -image::152_OpenShift_Config_NTP_0421.png[Configuring NTP for disconnected clusters] - -{product-title} nodes must agree on a date and time to run properly. When worker nodes retrieve the date and time from the NTP servers on the control plane nodes, it enables the installation and operation of clusters that are not connected to a routable network and thereby do not have access to a higher stratum NTP server. - -.Procedure - -. Create a Butane config, `99-master-chrony-conf-override.bu`, including the contents of the `chrony.conf` file for the control plane nodes. -+ -[NOTE] -==== -See "Creating machine configs with Butane" for information about Butane. -==== -+ -[source,yaml] -.Butane config example ----- -variant: openshift -version: 4.13.0 -metadata: - name: 99-master-chrony-conf-override - labels: - machineconfiguration.openshift.io/role: master -storage: - files: - - path: /etc/chrony.conf - mode: 0644 - overwrite: true - contents: - inline: | - # Use public servers from the pool.ntp.org project. - # Please consider joining the pool (https://www.pool.ntp.org/join.html). - - # The Machine Config Operator manages this file - server openshift-master-0.<cluster-name>.<domain> iburst <1> - server openshift-master-1.<cluster-name>.<domain> iburst - server openshift-master-2.<cluster-name>.<domain> iburst - - stratumweight 0 - driftfile /var/lib/chrony/drift - rtcsync - makestep 10 3 - bindcmdaddress 127.0.0.1 - bindcmdaddress ::1 - keyfile /etc/chrony.keys - commandkey 1 - generatecommandkey - noclientlog - logchange 0.5 - logdir /var/log/chrony - - # Configure the control plane nodes to serve as local NTP servers - # for all worker nodes, even if they are not in sync with an - # upstream NTP server. - - # Allow NTP client access from the local network. - allow all - # Serve time even if not synchronized to a time source. - local stratum 3 orphan ----- -+ -<1> You must replace `<cluster-name>` with the name of the cluster and replace `<domain>` with the fully qualified domain name. - -. Use Butane to generate a `MachineConfig` object file, `99-master-chrony-conf-override.yaml`, containing the configuration to be delivered to the control plane nodes: -+ -[source,terminal] ----- -$ butane 99-master-chrony-conf-override.bu -o 99-master-chrony-conf-override.yaml ----- - -. Create a Butane config, `99-worker-chrony-conf-override.bu`, including the contents of the `chrony.conf` file for the worker nodes that references the NTP servers on the control plane nodes. -+ -[source,yaml] -.Butane config example ----- -variant: openshift -version: 4.13.0 -metadata: - name: 99-worker-chrony-conf-override - labels: - machineconfiguration.openshift.io/role: worker -storage: - files: - - path: /etc/chrony.conf - mode: 0644 - overwrite: true - contents: - inline: | - # The Machine Config Operator manages this file. - server openshift-master-0.<cluster-name>.<domain> iburst <1> - server openshift-master-1.<cluster-name>.<domain> iburst - server openshift-master-2.<cluster-name>.<domain> iburst - - stratumweight 0 - driftfile /var/lib/chrony/drift - rtcsync - makestep 10 3 - bindcmdaddress 127.0.0.1 - bindcmdaddress ::1 - keyfile /etc/chrony.keys - commandkey 1 - generatecommandkey - noclientlog - logchange 0.5 - logdir /var/log/chrony ----- -+ -<1> You must replace `<cluster-name>` with the name of the cluster and replace `<domain>` with the fully qualified domain name. - -. Use Butane to generate a `MachineConfig` object file, `99-worker-chrony-conf-override.yaml`, containing the configuration to be delivered to the worker nodes: -+ -[source,terminal] ----- -$ butane 99-worker-chrony-conf-override.bu -o 99-worker-chrony-conf-override.yaml ----- - -ifeval::["{context}" == "ipi-install-configuration-files"] -. Copy the `99-master-chrony-conf-override.yaml` file to the `~/clusterconfigs/manifests` directory. -+ ----- -$ cp 99-master-chrony-conf-override.yaml ~/clusterconfigs/manifests ----- - -. Copy the `99-worker-chrony-conf-override.yaml` file to the `~/clusterconfigs/manifests` directory. -+ ----- -$ cp 99-worker-chrony-conf-override.yaml ~/clusterconfigs/manifests ----- -endif::[] - -ifeval::["{context}" == "ipi-install-post-installation-configuration"] -. Apply the `99-master-chrony-conf-override.yaml` policy to the control plane nodes. -+ -[source,terminal] ----- -$ oc apply -f 99-master-chrony-conf-override.yaml ----- -+ -[source,terminal] -.Example output ----- -machineconfig.machineconfiguration.openshift.io/99-master-chrony-conf-override created ----- - -. Apply the `99-worker-chrony-conf-override.yaml` policy to the worker nodes. -+ -[source,terminal] ----- -$ oc apply -f 99-worker-chrony-conf-override.yaml ----- -+ -[source,terminal] -.Example output ----- -machineconfig.machineconfiguration.openshift.io/99-worker-chrony-conf-override created ----- - -. Check the status of the applied NTP settings. -+ -[source,terminal] ----- -$ oc describe machineconfigpool ----- -endif::[] diff --git a/modules/ipi-install-configuring-storage-on-nodes.adoc b/modules/ipi-install-configuring-storage-on-nodes.adoc deleted file mode 100644 index bbb02a91cea4..000000000000 --- a/modules/ipi-install-configuring-storage-on-nodes.adoc +++ /dev/null @@ -1,50 +0,0 @@ -// Module included in the following assemblies: -// -// * list of assemblies where this module is included -// ipi-install-installation-workflow.adoc - -:_content-type: PROCEDURE -[id="configuring-storage-on-nodes_{context}"] -= Optional: Configuring storage on nodes - -You can make changes to operating systems on {product-title} nodes by creating `MachineConfig` objects that are managed by the Machine Config Operator (MCO). - -The `MachineConfig` specification includes an ignition config for configuring the machines at first boot. This config object can be used to modify files, systemd services, and other operating system features running on {product-title} machines. - -.Procedure - -Use the ignition config to configure storage on nodes. The following `MachineSet` manifest example demonstrates how to add a partition to a device on a primary node. In this example, apply the manifest before installation to have a partition named `recovery` with a size of 16 GiB on the primary node. - -. Create a `custom-partitions.yaml` file and include a `MachineConfig` object that contains your partition layout: -+ -[source,terminal] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfig -metadata: - labels: - machineconfiguration.openshift.io/role: primary - name: 10_primary_storage_config -spec: - config: - ignition: - version: 3.2.0 - storage: - disks: - - device: </dev/xxyN> - partitions: - - label: recovery - startMiB: 32768 - sizeMiB: 16384 - filesystems: - - device: /dev/disk/by-partlabel/recovery - label: recovery - format: xfs ----- -+ -. Save and copy the `custom-partitions.yaml` file to the `clusterconfigs/openshift` directory: -+ -[source,terminal] ----- -$ cp ~/<MachineConfig_manifest> ~/clusterconfigs/openshift ----- \ No newline at end of file diff --git a/modules/ipi-install-configuring-the-bios.adoc b/modules/ipi-install-configuring-the-bios.adoc deleted file mode 100644 index d7b119137a1c..000000000000 --- a/modules/ipi-install-configuring-the-bios.adoc +++ /dev/null @@ -1,37 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_bare_metal_ipi/ipi-install-installation-workflow.adoc - -:_content-type: PROCEDURE -[id="configuring-the-bios_{context}"] -= Optional: Configuring the BIOS - -The following procedure configures the BIOS during the installation process. - -.Procedure -. Create the manifests. - -. Modify the `BareMetalHost` resource file corresponding to the node: -+ -[source,terminal] ----- -$ vim clusterconfigs/openshift/99_openshift-cluster-api_hosts-*.yaml ----- - -. Add the BIOS configuration to the `spec` section of the `BareMetalHost` resource: -+ -[source,yaml] ----- -spec: - firmware: - simultaneousMultithreadingEnabled: true - sriovEnabled: true - virtualizationEnabled: true ----- -+ -[NOTE] -==== -Red Hat supports three BIOS configurations. Only servers with BMC type `irmc` are supported. Other types of servers are currently not supported. -==== - -. Create the cluster. diff --git a/modules/ipi-install-configuring-the-install-config-file.adoc b/modules/ipi-install-configuring-the-install-config-file.adoc deleted file mode 100644 index cfb4a4bd7d39..000000000000 --- a/modules/ipi-install-configuring-the-install-config-file.adoc +++ /dev/null @@ -1,139 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_bare_metal_ipi/ipi-install-installation-workflow.adoc - -:_content-type: PROCEDURE -[id="configuring-the-install-config-file_{context}"] -= Configuring the install-config.yaml file - -The `install-config.yaml` file requires some additional details. -Most of the information teaches the installation program and the resulting cluster enough about the available hardware that it is able to fully manage it. - -[NOTE] -==== -The installation program no longer needs the `clusterOSImage` {op-system} image because the correct image is in the release payload. -==== - -. Configure `install-config.yaml`. Change the appropriate variables to match the environment, including `pullSecret` and `sshKey`: -+ -[source,yaml] ----- -apiVersion: v1 -baseDomain: <domain> -metadata: - name: <cluster_name> -networking: - machineNetwork: - - cidr: <public_cidr> - networkType: OVNKubernetes -compute: -- name: worker - replicas: 2 <1> -controlPlane: - name: master - replicas: 3 - platform: - baremetal: {} -platform: - baremetal: - apiVIPs: - - <api_ip> - ingressVIPs: - - <wildcard_ip> - provisioningNetworkCIDR: <CIDR> - bootstrapExternalStaticIP: <bootstrap_static_ip_address> <2> - bootstrapExternalStaticGateway: <bootstrap_static_gateway> <3> - hosts: - - name: openshift-master-0 - role: master - bmc: - address: ipmi://<out_of_band_ip> <4> - username: <user> - password: <password> - bootMACAddress: <NIC1_mac_address> - rootDeviceHints: - deviceName: "/dev/disk/by-id/<disk_id>" <5> - - name: <openshift_master_1> - role: master - bmc: - address: ipmi://<out_of_band_ip> <4> - username: <user> - password: <password> - bootMACAddress: <NIC1_mac_address> - rootDeviceHints: - deviceName: "/dev/disk/by-id/<disk_id>" <5> - - name: <openshift_master_2> - role: master - bmc: - address: ipmi://<out_of_band_ip> <4> - username: <user> - password: <password> - bootMACAddress: <NIC1_mac_address> - rootDeviceHints: - deviceName: "/dev/disk/by-id/<disk_id>" <5> - - name: <openshift_worker_0> - role: worker - bmc: - address: ipmi://<out_of_band_ip> <4> - username: <user> - password: <password> - bootMACAddress: <NIC1_mac_address> - - name: <openshift_worker_1> - role: worker - bmc: - address: ipmi://<out_of_band_ip> - username: <user> - password: <password> - bootMACAddress: <NIC1_mac_address> - rootDeviceHints: - deviceName: "/dev/disk/by-id/<disk_id>" <5> -pullSecret: '<pull_secret>' -sshKey: '<ssh_pub_key>' ----- -+ -<1> Scale the worker machines based on the number of worker nodes that are part of the {product-title} cluster. Valid options for the `replicas` value are `0` and integers greater than or equal to `2`. Set the number of replicas to `0` to deploy a three-node cluster, which contains only three control plane machines. A three-node cluster is a smaller, more resource-efficient cluster that can be used for testing, development, and production. You cannot install the cluster with only one worker. -<2> When deploying a cluster with static IP addresses, you must set the `bootstrapExternalStaticIP` configuration setting to specify the static IP address of the bootstrap VM when there is no DHCP server on the `baremetal` network. -<3> When deploying a cluster with static IP addresses, you must set the `bootstrapExternalStaticGateway` configuration setting to specify the gateway IP address for the bootstrap VM when there is no DHCP server on the `baremetal` network. -<4> See the BMC addressing sections for more options. -<5> Set the path to the installation disk drive, for example, `/dev/disk/by-id/wwn-0x64cd98f04fde100024684cf3034da5c2`. - -[NOTE] -==== -Before {product-title} 4.12, the cluster installation program only accepted an IPv4 address or and IPv6 address for the `apiVIP` and `ingressVIP` configuration settings. In {product-title} 4.12 and later, these configuration settings are deprecated. Instead, use a list format in the `apiVIPs` and `ingressVIPs` configuration settings to specify IPv4 addresses, IPv6 addresses or both IP address formats. -==== - -. Create a directory to store the cluster configuration: -+ -[source,terminal] ----- -$ mkdir ~/clusterconfigs ----- - -. Copy the `install-config.yaml` file to the new directory: -+ -[source,terminal] ----- -$ cp install-config.yaml ~/clusterconfigs ----- - -. Ensure all bare metal nodes are powered off prior to installing the {product-title} cluster: -+ -[source,terminal] ----- -$ ipmitool -I lanplus -U <user> -P <password> -H <management-server-ip> power off ----- - -. Remove old bootstrap resources if any are left over from a previous deployment attempt: -+ -[source,bash] ----- -for i in $(sudo virsh list | tail -n +3 | grep bootstrap | awk {'print $2'}); -do - sudo virsh destroy $i; - sudo virsh undefine $i; - sudo virsh vol-delete $i --pool $i; - sudo virsh vol-delete $i.ign --pool $i; - sudo virsh pool-destroy $i; - sudo virsh pool-undefine $i; -done ----- diff --git a/modules/ipi-install-configuring-the-metal3-config-file.adoc b/modules/ipi-install-configuring-the-metal3-config-file.adoc deleted file mode 100644 index c1a1e116c224..000000000000 --- a/modules/ipi-install-configuring-the-metal3-config-file.adoc +++ /dev/null @@ -1,52 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_bare_metal_ipi/ipi-install-installation-workflow.adoc - -:_content-type: PROCEDURE -[id="configuring-the-metal3-config-file_{context}"] -= Configuring the `metal3-config.yaml` file - -You must create and configure a ConfigMap `metal3-config.yaml` file. - -.Procedure - -. Create a ConfigMap `metal3-config.yaml.sample`. -+ ----- -$ vim metal3-config.yaml.sample ----- -+ -Provide the following contents: -+ ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: metal3-config - namespace: openshift-machine-api -data: - cache_url: '' - deploy_kernel_url: http://<provisioning_ip>:6180/images/ironic-python-agent.kernel - deploy_ramdisk_url: http://<provisioning_ip>:6180/images/ironic-python-agent.initramfs - dhcp_range: 172.22.0.10,172.22.0.100 - http_port: "6180" - ironic_endpoint: http://<provisioning_ip>:6385/v1/ - ironic_inspector_endpoint: http://172.22.0.3:5050/v1/ - provisioning_interface: <NIC1> - provisioning_ip: <provisioning_ip>/24 - rhcos_image_url: ${RHCOS_URI}${RHCOS_PATH} ----- -+ -[NOTE] -==== -Replace `<provisioning_ip>` with an available IP on the `provisioning` network. The default is `172.22.0.3`. -==== - -. Create the final ConfigMap. -+ ----- -$ export COMMIT_ID=$(./openshift-baremetal-install version | grep '^built from commit' | awk '{print $4}') -$ export RHCOS_PATH=$(curl -s -S https://raw.githubusercontent.com/openshift/installer/$COMMIT_ID/data/data/rhcos.json | jq .images.openstack.path | sed 's/"//g') -$ export RHCOS_URI=$(curl -s -S https://raw.githubusercontent.com/openshift/installer/$COMMIT_ID/data/data/rhcos.json | jq .baseURI | sed 's/"//g') -$ envsubst < metal3-config.yaml.sample > metal3-config.yaml ----- diff --git a/modules/ipi-install-configuring-the-raid.adoc b/modules/ipi-install-configuring-the-raid.adoc deleted file mode 100644 index e5cc66f3ba1d..000000000000 --- a/modules/ipi-install-configuring-the-raid.adoc +++ /dev/null @@ -1,59 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_bare_metal_ipi/ipi-install-installation-workflow.adoc - -:_content-type: PROCEDURE -[id="configuring-the-raid_{context}"] -= Optional: Configuring the RAID - -The following procedure configures a redundant array of independent disks (RAID) during the installation process. - -[NOTE] -==== -. {product-title} supports hardware RAID for baseboard management controllers (BMCs) using the iRMC protocol only. {product-title} {product-version} does not support software RAID. -. If you want to configure a hardware RAID for the node, verify that the node has a RAID controller. -==== - -.Procedure - -. Create the manifests. - -. Modify the `BareMetalHost` resource corresponding to the node: -+ -[source,terminal] ----- -$ vim clusterconfigs/openshift/99_openshift-cluster-api_hosts-*.yaml ----- -+ -[NOTE] -==== -The following example uses a hardware RAID configuration because {product-title} {product-version} does not support software RAID. -==== -+ -.. If you added a specific RAID configuration to the `spec` section, this causes the node to delete the original RAID configuration in the `preparing` phase and perform a specified configuration on the RAID. For example: -+ -[source,yaml] ----- -spec: - raid: - hardwareRAIDVolumes: - - level: "0" <1> - name: "sda" - numberOfPhysicalDisks: 1 - rotational: true - sizeGibibytes: 0 ----- -<1> `level` is a required field, and the others are optional fields. -+ -.. If you added an empty RAID configuration to the `spec` section, the empty configuration causes the node to delete the original RAID configuration during the `preparing` phase, but does not perform a new configuration. For example: -+ -[source,yaml] ----- -spec: - raid: - hardwareRAIDVolumes: [] ----- -+ -.. If you do not add a `raid` field in the `spec` section, the original RAID configuration is not deleted, and no new configuration will be performed. - -. Create the cluster. diff --git a/modules/ipi-install-creating-a-disconnected-registry.adoc b/modules/ipi-install-creating-a-disconnected-registry.adoc deleted file mode 100644 index cf9ec762b2b6..000000000000 --- a/modules/ipi-install-creating-a-disconnected-registry.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// Module included in the following assemblies: -// -// * list of assemblies where this module is included -// install/installing_bare_metal_ipi/ipi-install-installation-workflow.adoc - -:_content-type: CONCEPT -[id="ipi-install-creating-a-disconnected-registry_{context}"] -= Creating a disconnected registry - -In some cases, you might want to install an {product-title} cluster using a local copy of the installation registry. This could be for enhancing network efficiency because the cluster nodes are on a network that does not have access to the internet. - -A local, or mirrored, copy of the registry requires the following: - -* A certificate for the registry node. This can be a self-signed certificate. -* A web server that a container on a system will serve. -* An updated pull secret that contains the certificate and local repository information. - -[NOTE] -==== -Creating a disconnected registry on a registry node is optional. If you need to create a disconnected registry on a registry node, you must complete all of the following sub-sections. -==== diff --git a/modules/ipi-install-creating-an-rhcos-images-cache.adoc b/modules/ipi-install-creating-an-rhcos-images-cache.adoc deleted file mode 100644 index f96f6e62c9e3..000000000000 --- a/modules/ipi-install-creating-an-rhcos-images-cache.adoc +++ /dev/null @@ -1,141 +0,0 @@ -// Module included in the following assemblies: -// -// *installing/installing_bare_metal_ipi/ipi-install-installation-workflow.adoc - -:_content-type: PROCEDURE -[id="ipi-install-creating-an-rhcos-images-cache_{context}"] -= Optional: Creating an {op-system} images cache - -To employ image caching, you must download the {op-system-first} image used by the bootstrap VM to provision the cluster nodes. Image caching is optional, but it is especially useful when running the installation program on a network with limited bandwidth. - -[NOTE] -==== -The installation program no longer needs the `clusterOSImage` {op-system} image because the correct image is in the release payload. -==== - -If you are running the installation program on a network with limited bandwidth and the {op-system} images download takes more than 15 to 20 minutes, the installation program will timeout. Caching images on a web server will help in such scenarios. - -[WARNING] -==== -If you enable TLS for the HTTPD server, you must confirm the root certificate is signed by an authority trusted by the client and verify the trusted certificate chain between your {product-title} hub and spoke clusters and the HTTPD server. Using a server configured with an untrusted certificate prevents the images from being downloaded to the image creation service. Using untrusted HTTPS servers is not supported. -==== - -Install a container that contains the images. - -.Procedure - -. Install `podman`: -+ -[source,terminal] ----- -$ sudo dnf install -y podman ----- - -. Open firewall port `8080` to be used for {op-system} image caching: -+ -[source,terminal] ----- -$ sudo firewall-cmd --add-port=8080/tcp --zone=public --permanent ----- -+ -[source,terminal] ----- -$ sudo firewall-cmd --reload ----- - -. Create a directory to store the `bootstraposimage`: -+ -[source,terminal] ----- -$ mkdir /home/kni/rhcos_image_cache ----- - -. Set the appropriate SELinux context for the newly created directory: -+ -[source,terminal] ----- -$ sudo semanage fcontext -a -t httpd_sys_content_t "/home/kni/rhcos_image_cache(/.*)?" ----- -+ -[source,terminal] ----- -$ sudo restorecon -Rv /home/kni/rhcos_image_cache/ ----- - -. Get the URI for the {op-system} image that the installation program will deploy on the bootstrap VM: -+ -[source,terminal] ----- -$ export RHCOS_QEMU_URI=$(/usr/local/bin/openshift-baremetal-install coreos print-stream-json | jq -r --arg ARCH "$(arch)" '.architectures[$ARCH].artifacts.qemu.formats["qcow2.gz"].disk.location') ----- - -. Get the name of the image that the installation program will deploy on the bootstrap VM: -+ -[source,terminal] ----- -$ export RHCOS_QEMU_NAME=${RHCOS_QEMU_URI##*/} ----- - -. Get the SHA hash for the {op-system} image that will be deployed on the bootstrap VM: -+ -[source,terminal] ----- -$ export RHCOS_QEMU_UNCOMPRESSED_SHA256=$(/usr/local/bin/openshift-baremetal-install coreos print-stream-json | jq -r --arg ARCH "$(arch)" '.architectures[$ARCH].artifacts.qemu.formats["qcow2.gz"].disk["uncompressed-sha256"]') ----- - -. Download the image and place it in the `/home/kni/rhcos_image_cache` directory: -+ -[source,terminal] ----- -$ curl -L ${RHCOS_QEMU_URI} -o /home/kni/rhcos_image_cache/${RHCOS_QEMU_NAME} ----- - -. Confirm SELinux type is of `httpd_sys_content_t` for the new file: -+ -[source,terminal] ----- -$ ls -Z /home/kni/rhcos_image_cache ----- - -. Create the pod: -+ -[source,terminal] ----- -$ podman run -d --name rhcos_image_cache \ <1> --v /home/kni/rhcos_image_cache:/var/www/html \ --p 8080:8080/tcp \ -quay.io/centos7/httpd-24-centos7:latest ----- -ifndef::upstream[] -+ -<1> Creates a caching webserver with the name `rhcos_image_cache`. This pod serves the `bootstrapOSImage` image in the `install-config.yaml` file for deployment. -endif::[] - -. Generate the `bootstrapOSImage` configuration: -+ -[source,terminal] ----- -$ export BAREMETAL_IP=$(ip addr show dev baremetal | awk '/inet /{print $2}' | cut -d"/" -f1) ----- -+ -[source,terminal] ----- -$ export BOOTSTRAP_OS_IMAGE="http://${BAREMETAL_IP}:8080/${RHCOS_QEMU_NAME}?sha256=${RHCOS_QEMU_UNCOMPRESSED_SHA256}" ----- -+ -[source,terminal] ----- -$ echo " bootstrapOSImage=${BOOTSTRAP_OS_IMAGE}" ----- - -. Add the required configuration to the `install-config.yaml` file under `platform.baremetal`: -+ -[source,terminal] ----- -platform: - baremetal: - bootstrapOSImage: <bootstrap_os_image> <1> ----- -<1> Replace `<bootstrap_os_image>` with the value of `$BOOTSTRAP_OS_IMAGE`. -+ -See the "Configuring the install-config.yaml file" section for additional details. diff --git a/modules/ipi-install-creating-the-openshift-manifests.adoc b/modules/ipi-install-creating-the-openshift-manifests.adoc deleted file mode 100644 index a6df3631755d..000000000000 --- a/modules/ipi-install-creating-the-openshift-manifests.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_bare_metal_ipi/ipi-install-installation-workflow.adoc - -:_content-type: PROCEDURE -[id="creating-the-openshift-manifests_{context}"] -= Creating the {product-title} manifests - -. Create the {product-title} manifests. -+ -[source,terminal] ----- -$ ./openshift-baremetal-install --dir ~/clusterconfigs create manifests ----- -+ -[source,terminal] ----- -INFO Consuming Install Config from target directory -WARNING Making control-plane schedulable by setting MastersSchedulable to true for Scheduler cluster settings -WARNING Discarding the OpenShift Manifest that was provided in the target directory because its dependencies are dirty and it needs to be regenerated ----- diff --git a/modules/ipi-install-deploying-routers-on-worker-nodes.adoc b/modules/ipi-install-deploying-routers-on-worker-nodes.adoc deleted file mode 100644 index 061049a4b502..000000000000 --- a/modules/ipi-install-deploying-routers-on-worker-nodes.adoc +++ /dev/null @@ -1,53 +0,0 @@ -// Module included in the following assemblies: -// -// * list of assemblies where this module is included -// ipi-install-installation-workflow.adoc - -:_content-type: PROCEDURE -[id="deploying-routers-on-worker-nodes_{context}"] -= Optional: Deploying routers on worker nodes - -During installation, the installer deploys router pods on worker nodes. By default, the installer installs two router pods. If a deployed cluster requires additional routers to handle external traffic loads destined for services within the {product-title} cluster, you can create a `yaml` file to set an appropriate number of router replicas. - -[IMPORTANT] -==== -Deploying a cluster with only one worker node is not supported. While modifying the router replicas will address issues with the `degraded` state when deploying with one worker, the cluster loses high availability for the ingress API, which is not suitable for production environments. -==== - -[NOTE] -==== -By default, the installer deploys two routers. If the cluster has no worker nodes, the installer deploys the two routers on the control plane nodes by default. -==== - -.Procedure - -. Create a `router-replicas.yaml` file: -+ -[source,yaml] ----- -apiVersion: operator.openshift.io/v1 -kind: IngressController -metadata: - name: default - namespace: openshift-ingress-operator -spec: - replicas: <num-of-router-pods> - endpointPublishingStrategy: - type: HostNetwork - nodePlacement: - nodeSelector: - matchLabels: - node-role.kubernetes.io/worker: "" ----- -+ -[NOTE] -==== -Replace `<num-of-router-pods>` with an appropriate value. If working with just one worker node, set `replicas:` to `1`. If working with more than 3 worker nodes, you can increase `replicas:` from the default value `2` as appropriate. -==== - -. Save and copy the `router-replicas.yaml` file to the `clusterconfigs/openshift` directory: -+ -[source,terminal] ----- -$ cp ~/router-replicas.yaml clusterconfigs/openshift/99_router-replicas.yaml ----- diff --git a/modules/ipi-install-deploying-the-cluster-via-the-openshift-installer.adoc b/modules/ipi-install-deploying-the-cluster-via-the-openshift-installer.adoc deleted file mode 100644 index 43409f23a30c..000000000000 --- a/modules/ipi-install-deploying-the-cluster-via-the-openshift-installer.adoc +++ /dev/null @@ -1,14 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_bare_metal_ipi/ipi-install-installation-workflow.adoc - -:_content-type: PROCEDURE -[id='deploying-the-cluster-via-the-openshift-installer_{context}'] -= Deploying the cluster via the {product-title} installer - -Run the {product-title} installer: - -[source,terminal] ----- -$ ./openshift-baremetal-install --dir ~/clusterconfigs --log-level debug create cluster ----- diff --git a/modules/ipi-install-diagnosing-duplicate-mac-address.adoc b/modules/ipi-install-diagnosing-duplicate-mac-address.adoc deleted file mode 100644 index f2641ee92e1e..000000000000 --- a/modules/ipi-install-diagnosing-duplicate-mac-address.adoc +++ /dev/null @@ -1,54 +0,0 @@ -:_content-type: PROCEDURE -[id="ipi-install-diagnosing-duplicate-mac-address_{context}"] -= Diagnosing a duplicate MAC address when provisioning a new host in the cluster - -If the MAC address of an existing bare-metal node in the cluster matches the MAC address of a bare-metal host you are attempting to add to the cluster, the Bare Metal Operator associates the host with the existing node. If the host enrollment, inspection, cleaning, or other Ironic steps fail, the Bare Metal Operator retries the installation continuously. A registration error is displayed for the failed bare-metal host. - -You can diagnose a duplicate MAC address by examining the bare-metal hosts that are running in the `openshift-machine-api` namespace. - -.Prerequisites - -* Install an {product-title} cluster on bare metal. -* Install the {product-title} CLI `oc`. -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -To determine whether a bare-metal host that fails provisioning has the same MAC address as an existing node, do the following: - -. Get the bare-metal hosts running in the `openshift-machine-api` namespace: -+ -[source,terminal] ----- -$ oc get bmh -n openshift-machine-api ----- -+ -.Example output -[source,terminal] ----- -NAME STATUS PROVISIONING STATUS CONSUMER -openshift-master-0 OK externally provisioned openshift-zpwpq-master-0 -openshift-master-1 OK externally provisioned openshift-zpwpq-master-1 -openshift-master-2 OK externally provisioned openshift-zpwpq-master-2 -openshift-worker-0 OK provisioned openshift-zpwpq-worker-0-lv84n -openshift-worker-1 OK provisioned openshift-zpwpq-worker-0-zd8lm -openshift-worker-2 error registering ----- - -. To see more detailed information about the status of the failing host, run the following command replacing `<bare_metal_host_name>` with the name of the host: -+ -[source,terminal] ----- -$ oc get -n openshift-machine-api bmh <bare_metal_host_name> -o yaml ----- -+ -.Example output -[source,yaml] ----- -... -status: - errorCount: 12 - errorMessage: MAC address b4:96:91:1d:7c:20 conflicts with existing node openshift-worker-1 - errorType: registration error -... ----- diff --git a/modules/ipi-install-extracting-the-openshift-installer.adoc b/modules/ipi-install-extracting-the-openshift-installer.adoc deleted file mode 100644 index 163f6743e738..000000000000 --- a/modules/ipi-install-extracting-the-openshift-installer.adoc +++ /dev/null @@ -1,53 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_bare_metal_ipi/ipi-install-installation-workflow.adoc - -:_content-type: PROCEDURE -[id="extracting-the-openshift-installer_{context}"] -= Extracting the {product-title} installer - -After retrieving the installer, the next step is to extract it. - -.Procedure - -. Set the environment variables: -+ -[source,terminal] ----- -$ export cmd=openshift-baremetal-install ----- -+ -[source,terminal] ----- -$ export pullsecret_file=~/pull-secret.txt ----- -+ -[source,terminal] ----- -$ export extract_dir=$(pwd) ----- - - -. Get the `oc` binary: -+ -[source,terminal] ----- -$ curl -s https://mirror.openshift.com/pub/openshift-v4/clients/ocp/$VERSION/openshift-client-linux.tar.gz | tar zxvf - oc ----- - -. Extract the installer: -+ -[source,terminal] ----- -$ sudo cp oc /usr/local/bin ----- -+ -[source,terminal] ----- -$ oc adm release extract --registry-config "${pullsecret_file}" --command=$cmd --to "${extract_dir}" ${RELEASE_IMAGE} ----- -+ -[source,terminal] ----- -$ sudo cp openshift-baremetal-install /usr/local/bin ----- diff --git a/modules/ipi-install-firmware-requirements-for-installing-with-virtual-media.adoc b/modules/ipi-install-firmware-requirements-for-installing-with-virtual-media.adoc deleted file mode 100644 index 0ae303aab7e5..000000000000 --- a/modules/ipi-install-firmware-requirements-for-installing-with-virtual-media.adoc +++ /dev/null @@ -1,41 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_bare_metal_ipi/ipi-install-prerequisites.adoc - -:_content-type: CONCEPT -[id='ipi-install-firmware-requirements-for-installing-with-virtual-media_{context}'] -= Firmware requirements for installing with virtual media - -The installation program for installer-provisioned {product-title} clusters validates the hardware and firmware compatibility with Redfish virtual media. The installation program does not begin installation on a node if the node firmware is not compatible. The following tables list the minimum firmware versions tested and verified to work for installer-provisioned {product-title} clusters deployed by using Redfish virtual media. - -[NOTE] -==== -Red Hat does not test every combination of firmware, hardware, or other third-party components. For further information about third-party support, see link:https://access.redhat.com/third-party-software-support[Red Hat third-party support policy]. For information about updating the firmware, see the hardware documentation for the nodes or contact the hardware vendor. -==== - -.Firmware compatibility for HP hardware with Redfish virtual media -[frame="topbot", options="header"] -[cols="1,1,1"] -|==== -| Model | Management | Firmware versions -| 10th Generation | iLO5 | 2.63 or later - -|==== - -.Firmware compatibility for Dell hardware with Redfish virtual media -[frame="topbot", options="header"] -|==== -| Model | Management | Firmware versions - -| 15th Generation | iDRAC 9 | v6.10.30.00 -| 14th Generation | iDRAC 9 | v6.10.30.00 - -| 13th Generation .2+| iDRAC 8 | v2.75.75.75 or later - -|==== - -[NOTE] -==== - -For Dell servers, ensure the {product-title} cluster nodes have *AutoAttach* enabled through the iDRAC console. The menu path is *Configuration* -> *Virtual Media* -> *Attach Mode* -> *AutoAttach* . With iDRAC 9 firmware version `04.40.00.00` and all releases up to including the `5.xx` series, the virtual console plugin defaults to eHTML5, an enhanced version of HTML5, which causes problems with the *InsertVirtualMedia* workflow. Set the plugin to use HTML5 to avoid this issue. The menu path is *Configuration* -> *Virtual console* -> *Plug-in Type* -> *HTML5* . -==== diff --git a/modules/ipi-install-following-the-installation.adoc b/modules/ipi-install-following-the-installation.adoc deleted file mode 100644 index 2d2f4ca4ead5..000000000000 --- a/modules/ipi-install-following-the-installation.adoc +++ /dev/null @@ -1,14 +0,0 @@ -// Module included in the following assemblies: -// -//installing/installing_bare_metal_ipi/installing_bare_metal_ipi/ipi-install-installation-workflow.adoc - -:_content-type: PROCEDURE -[id="ipi-install-troubleshooting-following-the-installation_{context}"] -= Following the installation - -During the deployment process, you can check the installation's overall status by issuing the `tail` command to the `.openshift_install.log` log file in the install directory folder: - -[source,terminal] ----- -$ tail -f /path/to/install-dir/.openshift_install.log ----- diff --git a/modules/ipi-install-installing-rhel-on-the-provisioner-node.adoc b/modules/ipi-install-installing-rhel-on-the-provisioner-node.adoc deleted file mode 100644 index 799a1c9ac08e..000000000000 --- a/modules/ipi-install-installing-rhel-on-the-provisioner-node.adoc +++ /dev/null @@ -1,10 +0,0 @@ -// Module included in the following assemblies: -// -// * list of assemblies where this module is included -// ipi-install-installation-workflow.adoc - -:_content-type: PROCEDURE -[id="installing-rhel-on-the-provisioner-node_{context}"] -= Installing {op-system-base} on the provisioner node - -With the configuration of the prerequisites complete, the next step is to install {op-system-base} {op-system-version} on the provisioner node. The installer uses the provisioner node as the orchestrator while installing the {product-title} cluster. For the purposes of this document, installing {op-system-base} on the provisioner node is out of scope. However, options include but are not limited to using a RHEL Satellite server, PXE, or installation media. diff --git a/modules/ipi-install-ipv6-network-requirements.adoc b/modules/ipi-install-ipv6-network-requirements.adoc deleted file mode 100644 index 65273f590ac0..000000000000 --- a/modules/ipi-install-ipv6-network-requirements.adoc +++ /dev/null @@ -1,71 +0,0 @@ -// This is included in the following assemblies: -// -// ipi-install-prerequisites.adoc - -:_content-type: CONCEPT -[id="ipi-install-ipv6-network-requirements_{context}"] -= IPv6 Network Requirements - -.SLAAC addressing - -If you do not plan to use SLAAC (Stateless Address AutoConfiguration) addresses on your {product-title} node, then it should be disabled for `baremetal` networks. That means that if the network equipment is configured to send SLAAC addresses when replying to Route Advertisements, you must configure the equipment so that it only sends the route and not the SLAAC address. - -.Procedure - -. Install `ndptool`. -+ -[source,terminal] ----- -$ sudo dnf install ndptool ----- - -. Turn down the `baremetal` interface on a control plane node, and then turn it back up. -+ -[source,terminal] ----- -$ sudo nmcli con down "Wired connection 5" && sudo nmcli con up "Wired connection 5" ----- -+ -[source,terminal] ----- -Connection 'Wired connection 5' successfully deactivated (D-Bus active path: /org/freedesktop/NetworkManager/ActiveConnection/1983) -Connection successfully activated (D-Bus active path: /org/freedesktop/NetworkManager/ActiveConnection/2044) ----- - -. Activate the `ndptool` to check the route advertisements: -+ -[source,terminal] ----- -$ sudo ndptool monitor -t ra ----- -+ -[source,terminal] ----- -NDP payload len 80, from addr: fe80::c0a4:6464:bcb3:d657, iface: baremetal.153 - Type: RA - Hop limit: 64 - Managed address configuration: yes - Other configuration: no - Default router preference: medium - Router lifetime: 0s - Reachable time: unspecified - Retransmit time: unspecified - Source linkaddr: 1c:40:24:1b:0c:34 - Prefix: 2620:52:0:1303::/64, valid_time: 86400s, preferred_time: 14400s, on_link: yes, autonomous_addr_conf: no, router_addr: no - Route: ::/0, lifetime: 0s, preference: low ----- -+ -The `ndptool monitor` should report `Managed address configuration: yes`. - -.Network ranges and configurations - -Each environment requires different `baremetal` and `provisioning` network; and, will have a different IPv6 range for each network. Typically, only the `baremetal` network is routable. The `provisioning` network cannot be in the same broadcast domain as the `baremetal` network, because of additional services running, such as DHCP. - -.Route advertisements - -[IMPORTANT] -==== -Route advertisement must be enabled for both the `baremetal` and `provisioning` networks. -==== - -Both the `baremetal` and the `provisioning` networks must have route advertisement enabled. For the `baremetal` network, installer-provisioned installation uses the `radvd` daemon, and the `provisioning` network has route advertisement enabled in the Metal³ dnsmasq, so it requires no additional configuration. diff --git a/modules/ipi-install-mirroring-for-disconnected-registry.adoc b/modules/ipi-install-mirroring-for-disconnected-registry.adoc deleted file mode 100644 index ff216870ccce..000000000000 --- a/modules/ipi-install-mirroring-for-disconnected-registry.adoc +++ /dev/null @@ -1,246 +0,0 @@ -// Module included in the following assemblies: -// -// * list of assemblies where this module is included -// install/installing_bare_metal_ipi/ipi-install-installation-workflow.adoc - -:_content-type: PROCEDURE -[id="ipi-install-mirroring-for-disconnected-registry_{context}"] -= Mirroring the {product-title} image repository for a disconnected registry - -Complete the following steps to mirror the {product-title} image repository for a disconnected registry. - -.Prerequisites - -* Your mirror host has access to the internet. -* You configured a mirror registry to use in your restricted network and -can access the certificate and credentials that you configured. -ifndef::openshift-origin[] -* You downloaded the {cluster-manager-url-pull} and modified it to include authentication to your mirror repository. -endif::[] -ifdef::openshift-origin[] -* You have created a pull secret for your mirror repository. -endif::[] - -.Procedure - -. Review the -link:https://access.redhat.com/downloads/content/290/[{product-title} downloads page] -to determine the version of {product-title} that you want to install and determine the corresponding tag on the link:https://quay.io/repository/openshift-release-dev/ocp-release?tab=tags[Repository Tags] page. - -. Set the required environment variables: -.. Export the release version: -+ -[source,terminal] ----- -$ OCP_RELEASE=<release_version> ----- -+ -For `<release_version>`, specify the tag that corresponds to the version of {product-title} to -install, such as `4.5.4`. - -.. Export the local registry name and host port: -+ -[source,terminal] ----- -$ LOCAL_REGISTRY='<local_registry_host_name>:<local_registry_host_port>' ----- -+ -For `<local_registry_host_name>`, specify the registry domain name for your mirror -repository, and for `<local_registry_host_port>`, specify the port that it -serves content on. - -.. Export the local repository name: -+ -[source,terminal] ----- -$ LOCAL_REPOSITORY='<local_repository_name>' ----- -+ -For `<local_repository_name>`, specify the name of the repository to create in your -registry, such as `ocp4/openshift4`. - -.. Export the name of the repository to mirror: -+ -ifndef::openshift-origin[] -[source,terminal] ----- -$ PRODUCT_REPO='openshift-release-dev' ----- -+ -For a production release, you must specify `openshift-release-dev`. -endif::[] -ifdef::openshift-origin[] -[source,terminal] ----- -$ PRODUCT_REPO='openshift' ----- -endif::[] - -.. Export the path to your registry pull secret: -+ -[source,terminal] ----- -$ LOCAL_SECRET_JSON='<path_to_pull_secret>' ----- -+ -For `<path_to_pull_secret>`, specify the absolute path to and file name of the pull secret for your mirror registry that you created. - -.. Export the release mirror: -+ -ifndef::openshift-origin[] -[source,terminal] ----- -$ RELEASE_NAME="ocp-release" ----- -+ -For a production release, you must specify `ocp-release`. -endif::[] -ifdef::openshift-origin[] -[source,terminal] ----- -$ RELEASE_NAME="okd" ----- -endif::[] - -ifndef::openshift-origin[] -.. Export the type of architecture for your cluster: -+ -[source,terminal] ----- -$ ARCHITECTURE=<cluster_architecture> <1> ----- -<1> Specify the architecture of the cluster, such as `x86_64`, `aarch64`, `s390x`, or `ppc64le`. - -endif::[] - -.. Export the path to the directory to host the mirrored images: -+ -[source,terminal] ----- -$ REMOVABLE_MEDIA_PATH=<path> <1> ----- -<1> Specify the full path, including the initial forward slash (/) character. - -. Mirror the version images to the mirror registry: -** If your mirror host does not have internet access, take the following actions: -... Connect the removable media to a system that is connected to the internet. -... Review the images and configuration manifests to mirror: -+ -ifdef::openshift-origin[] -[source,terminal] ----- -$ oc adm release mirror -a ${LOCAL_SECRET_JSON} \ - --from=quay.io/${PRODUCT_REPO}/${RELEASE_NAME}:${OCP_RELEASE} \ - --to=${LOCAL_REGISTRY}/${LOCAL_REPOSITORY} \ - --to-release-image=${LOCAL_REGISTRY}/${LOCAL_REPOSITORY}:${OCP_RELEASE} --dry-run ----- -endif::[] -ifndef::openshift-origin[] -[source,terminal] ----- -$ oc adm release mirror -a ${LOCAL_SECRET_JSON} \ - --from=quay.io/${PRODUCT_REPO}/${RELEASE_NAME}:${OCP_RELEASE}-${ARCHITECTURE} \ - --to=${LOCAL_REGISTRY}/${LOCAL_REPOSITORY} \ - --to-release-image=${LOCAL_REGISTRY}/${LOCAL_REPOSITORY}:${OCP_RELEASE}-${ARCHITECTURE} --dry-run ----- -endif::[] - -... Record the entire `imageContentSources` section from the output of the previous -command. The information about your mirrors is unique to your mirrored repository, and you must add the `imageContentSources` section to the `install-config.yaml` file during installation. -... Mirror the images to a directory on the removable media: -+ -ifdef::openshift-origin[] -[source,terminal] ----- -$ oc adm release mirror -a ${LOCAL_SECRET_JSON} --to-dir=${REMOVABLE_MEDIA_PATH}/mirror quay.io/${PRODUCT_REPO}/${RELEASE_NAME}:${OCP_RELEASE} ----- -endif::[] -ifndef::openshift-origin[] -[source,terminal] ----- -$ oc adm release mirror -a ${LOCAL_SECRET_JSON} --to-dir=${REMOVABLE_MEDIA_PATH}/mirror quay.io/${PRODUCT_REPO}/${RELEASE_NAME}:${OCP_RELEASE}-${ARCHITECTURE} ----- -endif::[] - -... Take the media to the restricted network environment and upload the images to the local container registry. -+ -[source,terminal] ----- -$ oc image mirror -a ${LOCAL_SECRET_JSON} --from-dir=${REMOVABLE_MEDIA_PATH}/mirror "file://openshift/release:${OCP_RELEASE}*" ${LOCAL_REGISTRY}/${LOCAL_REPOSITORY} <1> ----- -+ -<1> For `REMOVABLE_MEDIA_PATH`, you must use the same path that you specified when you mirrored the images. - -** If the local container registry is connected to the mirror host, take the following actions: -... Directly push the release images to the local registry by using following command: -+ -ifdef::openshift-origin[] -[source,terminal] ----- -$ oc adm release mirror -a ${LOCAL_SECRET_JSON} \ - --from=quay.io/${PRODUCT_REPO}/${RELEASE_NAME}:${OCP_RELEASE} \ - --to=${LOCAL_REGISTRY}/${LOCAL_REPOSITORY} \ - --to-release-image=${LOCAL_REGISTRY}/${LOCAL_REPOSITORY}:${OCP_RELEASE} ----- -endif::[] -ifndef::openshift-origin[] -[source,terminal] ----- -$ oc adm release mirror -a ${LOCAL_SECRET_JSON} \ - --from=quay.io/${PRODUCT_REPO}/${RELEASE_NAME}:${OCP_RELEASE}-${ARCHITECTURE} \ - --to=${LOCAL_REGISTRY}/${LOCAL_REPOSITORY} \ - --to-release-image=${LOCAL_REGISTRY}/${LOCAL_REPOSITORY}:${OCP_RELEASE}-${ARCHITECTURE} ----- -endif::[] -+ -This command pulls the release information as a digest, and its output includes -the `imageContentSources` data that you require when you install your cluster. - -... Record the entire `imageContentSources` section from the output of the previous -command. The information about your mirrors is unique to your mirrored repository, and you must add the `imageContentSources` section to the `install-config.yaml` file during installation. -+ -[NOTE] -==== -The image name gets patched to Quay.io during the mirroring process, and the podman images will show Quay.io in the registry on the bootstrap virtual machine. -==== - -. To create the installation program that is based on the content that you -mirrored, extract it and pin it to the release: -** If your mirror host does not have internet access, run the following command: -+ -[source,terminal] ----- -$ oc adm release extract -a ${LOCAL_SECRET_JSON} --command=openshift-baremetal-install "${LOCAL_REGISTRY}/${LOCAL_REPOSITORY}:${OCP_RELEASE}" ----- -** If the local container registry is connected to the mirror host, run the following command: -+ -ifdef::openshift-origin[] -[source,terminal] ----- -$ oc adm release extract -a ${LOCAL_SECRET_JSON} --command=openshift-baremetal-install "${LOCAL_REGISTRY}/${LOCAL_REPOSITORY}:${OCP_RELEASE}" ----- -endif::[] -ifndef::openshift-origin[] -[source,terminal] ----- -$ oc adm release extract -a ${LOCAL_SECRET_JSON} --command=openshift-baremetal-install "${LOCAL_REGISTRY}/${LOCAL_REPOSITORY}:${OCP_RELEASE}-${ARCHITECTURE}" ----- -endif::[] -+ -[IMPORTANT] -==== -To ensure that you use the correct images for the version of {product-title} -that you selected, you must extract the installation program from the mirrored -content. - -You must perform this step on a machine with an active internet connection. - -If you are in a disconnected environment, use the `--image` flag as part of must-gather and point to the payload image. -==== -+ -. For clusters using installer-provisioned infrastructure, run the following command: -+ -[source,terminal] ----- -$ openshift-baremetal-install ----- diff --git a/modules/ipi-install-modifying-install-config-for-dual-stack-network.adoc b/modules/ipi-install-modifying-install-config-for-dual-stack-network.adoc deleted file mode 100644 index 5b92189d4b74..000000000000 --- a/modules/ipi-install-modifying-install-config-for-dual-stack-network.adoc +++ /dev/null @@ -1,59 +0,0 @@ -// This is included in the following assemblies: -// -// ipi-install-configuration-files.adoc -// installing-vsphere-installer-provisioned-network-customizations.adoc -ifeval::["{context}" == "installing-vsphere-installer-provisioned-network-customizations"] -:vSphere: -endif::[] - -:_content-type: PROCEDURE -[id='modifying-install-config-for-dual-stack-network_{context}'] -= Optional: Deploying with dual-stack networking - -For dual-stack networking in {product-title} clusters, you can configure IPv4 and IPv6 address endpoints for cluster nodes. To configure IPv4 and IPv6 address endpoints for cluster nodes, edit the `machineNetwork`, `clusterNetwork`, and `serviceNetwork` configuration settings in the `install-config.yaml` file. Each setting must have two CIDR entries each. For a cluster with the IPv4 family as the primary address family, specify the IPv4 setting first. For a cluster with the IPv6 family as the primary address family, specify the IPv6 setting first. - -[source,yaml] ----- -machineNetwork: -- cidr: {{ extcidrnet }} -- cidr: {{ extcidrnet6 }} -clusterNetwork: -- cidr: 10.128.0.0/14 - hostPrefix: 23 -- cidr: fd02::/48 - hostPrefix: 64 -serviceNetwork: -- 172.30.0.0/16 -- fd03::/112 ----- - -To provide an interface to the cluster for applications that use IPv4 and IPv6 addresses, configure IPv4 and IPv6 virtual IP (VIP) address endpoints for the Ingress VIP and API VIP services. To configure IPv4 and IPv6 address endpoints, edit the `apiVIPs` and `ingressVIPs` configuration settings in the `install-config.yaml` file . The `apiVIPs` and `ingressVIPs` configuration settings use a list format. The order of the list indicates the primary and secondary VIP address for each service. - -[source,yaml] ----- -platform: - baremetal: - apiVIPs: - - <api_ipv4> - - <api_ipv6> - ingressVIPs: - - <wildcard_ipv4> - - <wildcard_ipv6> ----- - -ifdef::vSphere[] -[IMPORTANT] -==== -You can configure dual-stack networking on a single interface only. -==== - -[NOTE] -==== -* In a vSphere cluster configured for dual-stack networking, the node custom resource object has only the IP address from the primary network listed in `Status.addresses` field. -* In the pod that uses the host networking with dual-stack connectivity, the `Status.podIP` and `Status.podIPs` fields contain only the IP address from the primary network. -==== -endif::vSphere[] - -ifeval::["{context}" == "installing-vsphere-installer-provisioned-network-customizations"] -:!vSphere: -endif::[] \ No newline at end of file diff --git a/modules/ipi-install-modifying-install-config-for-no-provisioning-network.adoc b/modules/ipi-install-modifying-install-config-for-no-provisioning-network.adoc deleted file mode 100644 index 082a02d4e122..000000000000 --- a/modules/ipi-install-modifying-install-config-for-no-provisioning-network.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// This is included in the following assemblies: -// -// ipi-install-configuration-files.adoc - -:_content-type: PROCEDURE -[id='modifying-install-config-for-no-provisioning-network_{context}'] -= Optional: Deploying with no provisioning network - -To deploy an {product-title} cluster without a `provisioning` network, make the following changes to the `install-config.yaml` file. - -[source,yaml] ----- -platform: - baremetal: - apiVIPs: - - <api_VIP> - ingressVIPs: - - <ingress_VIP> - provisioningNetwork: "Disabled" <1> ----- - -<1> Add the `provisioningNetwork` configuration setting, if needed, and set it to `Disabled`. - -[IMPORTANT] -==== -The `provisioning` network is required for PXE booting. If you deploy without a `provisioning` network, you must use a virtual media BMC addressing option such as `redfish-virtualmedia` or `idrac-virtualmedia`. See "Redfish virtual media for HPE iLO" in the "BMC addressing for HPE iLO" section or "Redfish virtual media for Dell iDRAC" in the "BMC addressing for Dell iDRAC" section for additional details. -==== diff --git a/modules/ipi-install-network-requirements.adoc b/modules/ipi-install-network-requirements.adoc deleted file mode 100644 index 6f7e3f8872ea..000000000000 --- a/modules/ipi-install-network-requirements.adoc +++ /dev/null @@ -1,164 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_bare_metal_ipi/ipi-install-prerequisites.adoc - -:_content-type: CONCEPT -[id='network-requirements_{context}'] -= Network requirements - -Installer-provisioned installation of {product-title} involves several network requirements. First, installer-provisioned installation involves an optional non-routable `provisioning` network for provisioning the operating system on each bare metal node. Second, installer-provisioned installation involves a routable `baremetal` network. - -image::210_OpenShift_Baremetal_IPI_Deployment_updates_0122_2.png[Installer-provisioned networking] - - -[id="network-requirements-increase-mtu_{context}"] -== Increase the network MTU - -Before deploying {product-title}, increase the network maximum transmission unit (MTU) to 1500 or more. If the MTU is lower than 1500, the Ironic image that is used to boot the node might fail to communicate with the Ironic inspector pod, and inspection will fail. If this occurs, installation stops because the nodes are not available for installation. - -[id='network-requirements-config-nics_{context}'] -== Configuring NICs - -{product-title} deploys with two networks: - -- `provisioning`: The `provisioning` network is an optional non-routable network used for provisioning the underlying operating system on each node that is a part of the {product-title} cluster. The network interface for the `provisioning` network on each cluster node must have the BIOS or UEFI configured to PXE boot. -+ -The `provisioningNetworkInterface` configuration setting specifies the `provisioning` network NIC name on the control plane nodes, which must be identical on the control plane nodes. The `bootMACAddress` configuration setting provides a means to specify a particular NIC on each node for the `provisioning` network. -+ -The `provisioning` network is optional, but it is required for PXE booting. If you deploy without a `provisioning` network, you must use a virtual media BMC addressing option such as `redfish-virtualmedia` or `idrac-virtualmedia`. - -- `baremetal`: The `baremetal` network is a routable network. You can use any NIC to interface with the `baremetal` network provided the NIC is not configured to use the `provisioning` network. - -[IMPORTANT] -==== -When using a VLAN, each NIC must be on a separate VLAN corresponding to the appropriate network. -==== - -[id='network-requirements-dns_{context}'] -== DNS requirements - -Clients access the {product-title} cluster nodes over the `baremetal` network. A network administrator must configure a subdomain or subzone where the canonical name extension is the cluster name. - -[source,text] ----- -<cluster_name>.<base_domain> ----- - -For example: - -[source,text] ----- -test-cluster.example.com ----- - -{product-title} includes functionality that uses cluster membership information to generate A/AAAA records. This resolves the node names to their IP addresses. After the nodes are registered with the API, the cluster can disperse node information without using CoreDNS-mDNS. This eliminates the network traffic associated with multicast DNS. - -In {product-title} deployments, DNS name resolution is required for the following components: - -* The Kubernetes API -* The {product-title} application wildcard ingress API - -A/AAAA records are used for name resolution and PTR records are used for reverse name resolution. {op-system-first} uses the reverse records or DHCP to set the hostnames for all the nodes. - -Installer-provisioned installation includes functionality that uses cluster membership information to generate A/AAAA records. This resolves the node names to their IP addresses. In each record, `<cluster_name>` is the cluster name and `<base_domain>` is the base domain that you specify in the `install-config.yaml` file. A complete DNS record takes the form: `<component>.<cluster_name>.<base_domain>.`. - -.Required DNS records -[cols="1a,3a,5a",options="header"] -|=== - -|Component -|Record -|Description - -|Kubernetes API -|`api.<cluster_name>.<base_domain>.` -|An A/AAAA record and a PTR record identify the API load balancer. These records must be resolvable by both clients external to the cluster and from all the nodes within the cluster. - -|Routes -|`*.apps.<cluster_name>.<base_domain>.` -|The wildcard A/AAAA record refers to the application ingress load balancer. The application ingress load balancer targets the nodes that run the Ingress Controller pods. The Ingress Controller pods run on the worker nodes by default. These records must be resolvable by both clients external to the cluster and from all the nodes within the cluster. - -For example, `console-openshift-console.apps.<cluster_name>.<base_domain>` is used as a wildcard route to the {product-title} console. - -|=== - -[TIP] -==== -You can use the `dig` command to verify DNS resolution. -==== - -[id='network-requirements-dhcp-reqs_{context}'] -== Dynamic Host Configuration Protocol (DHCP) requirements - -By default, installer-provisioned installation deploys `ironic-dnsmasq` with DHCP enabled for the `provisioning` network. No other DHCP servers should be running on the `provisioning` network when the `provisioningNetwork` configuration setting is set to `managed`, which is the default value. If you have a DHCP server running on the `provisioning` network, you must set the `provisioningNetwork` configuration setting to `unmanaged` in the `install-config.yaml` file. - -Network administrators must reserve IP addresses for each node in the {product-title} cluster for the `baremetal` network on an external DHCP server. - -[id='network-requirements-reserving-ip-addresses_{context}'] -== Reserving IP addresses for nodes with the DHCP server - -For the `baremetal` network, a network administrator must reserve a number of IP addresses, including: - -. Two unique virtual IP addresses. -+ -- One virtual IP address for the API endpoint. -- One virtual IP address for the wildcard ingress endpoint. -+ -. One IP address for the provisioner node. -. One IP address for each control plane node. -. One IP address for each worker node, if applicable. - -[IMPORTANT] -.Reserving IP addresses so they become static IP addresses -==== -Some administrators prefer to use static IP addresses so that each node's IP address remains constant in the absence of a DHCP server. To configure static IP addresses with NMState, see "(Optional) Configuring host network interfaces" in the "Setting up the environment for an OpenShift installation" section. -==== - -[IMPORTANT] -.Networking between external load balancers and control plane nodes -==== -External load balancing services and the control plane nodes must run on the same L2 network, and on the same VLAN when using VLANs to route traffic between the load balancing services and the control plane nodes. -==== - -[IMPORTANT] -==== -The storage interface requires a DHCP reservation or a static IP. -==== - -The following table provides an exemplary embodiment of fully qualified domain names. The API and Nameserver addresses begin with canonical name extensions. The hostnames of the control plane and worker nodes are exemplary, so you can use any host naming convention you prefer. - -[width="100%", cols="3,5,2", options="header"] -|===== -| Usage | Host Name | IP -| API | `api.<cluster_name>.<base_domain>` | `<ip>` -| Ingress LB (apps) | `*.apps.<cluster_name>.<base_domain>` | `<ip>` -| Provisioner node | `provisioner.<cluster_name>.<base_domain>` | `<ip>` -| Control-plane-0 | `openshift-control-plane-0.<cluster_name>.<base_domain>` | `<ip>` -| Control-plane-1 | `openshift-control-plane-1.<cluster_name>-.<base_domain>` | `<ip>` -| Control-plane-2 | `openshift-control-plane-2.<cluster_name>.<base_domain>` | `<ip>` -| Worker-0 | `openshift-worker-0.<cluster_name>.<base_domain>` | `<ip>` -| Worker-1 | `openshift-worker-1.<cluster_name>.<base_domain>` | `<ip>` -| Worker-n | `openshift-worker-n.<cluster_name>.<base_domain>` | `<ip>` -|===== - -[NOTE] -==== -If you do not create DHCP reservations, the installer requires reverse DNS resolution to set the hostnames for the Kubernetes API node, the provisioner node, the control plane nodes, and the worker nodes. -==== - -[id='network-requirements-ntp_{context}'] -== Network Time Protocol (NTP) - -Each {product-title} node in the cluster must have access to an NTP server. {product-title} nodes use NTP to synchronize their clocks. For example, cluster nodes use SSL certificates that require validation, which might fail if the date and time between the nodes are not in sync. - -[IMPORTANT] -==== -Define a consistent clock date and time format in each cluster node's BIOS settings, or installation might fail. -==== - -You can reconfigure the control plane nodes to act as NTP servers on disconnected clusters, and reconfigure worker nodes to retrieve time from the control plane nodes. - -[id='network-requirements-out-of-band_{context}'] -== Port access for the out-of-band management IP address - -The out-of-band management IP address is on a separate network from the node. To ensure that the out-of-band management can communicate with the provisioner during installation, the out-of-band management IP address must be granted access to port `6180` on the bootstrap host and on the {product-title} control plane hosts. TLS port `6183` is required for virtual media installation, for example, via Redfish. diff --git a/modules/ipi-install-node-requirements.adoc b/modules/ipi-install-node-requirements.adoc deleted file mode 100644 index 68a0a5bf2447..000000000000 --- a/modules/ipi-install-node-requirements.adoc +++ /dev/null @@ -1,58 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_bare_metal_ipi/ipi-install-prerequisites.adoc - -:_content-type: CONCEPT -[id="node-requirements_{context}"] -= Node requirements - -Installer-provisioned installation involves a number of hardware node requirements: - -* *CPU architecture:* All nodes must use `x86_64` -ifndef::openshift-origin[] -or `aarch64` -endif::openshift-origin[] -CPU architecture. -* *Similar nodes:* Red Hat recommends nodes have an identical configuration per role. That is, Red Hat recommends nodes be the same brand and model with the same CPU, memory, and storage configuration. - -* *Baseboard Management Controller:* The `provisioner` node must be able to access the baseboard management controller (BMC) of each {product-title} cluster node. You may use IPMI, Redfish, or a proprietary protocol. - -ifndef::openshift-origin[] -* *Latest generation:* Nodes must be of the most recent generation. Installer-provisioned installation relies on BMC protocols, which must be compatible across nodes. Additionally, {op-system-base} 8 ships with the most recent drivers for RAID controllers. Ensure that the nodes are recent enough to support {op-system-base} 8 for the `provisioner` node and {op-system} 8 for the control plane and worker nodes. -endif::[] -ifdef::openshift-origin[] -* *Latest generation:* Nodes must be of the most recent generation. Installer-provisioned installation relies on BMC protocols, which must be compatible across nodes. Additionally, {op-system-first} ships with the most recent drivers for RAID controllers. Ensure that the nodes are recent enough to support {op-system} for the `provisioner` node and {op-system} for the control plane and worker nodes. -endif::[] - -* *Registry node:* (Optional) If setting up a disconnected mirrored registry, it is recommended the registry reside in its own node. - -* *Provisioner node:* Installer-provisioned installation requires one `provisioner` node. - -* *Control plane:* Installer-provisioned installation requires three control plane nodes for high availability. You can deploy an {product-title} cluster with only three control plane nodes, making the control plane nodes schedulable as worker nodes. Smaller clusters are more resource efficient for administrators and developers during development, production, and testing. - -* *Worker nodes:* While not required, a typical production cluster has two or more worker nodes. -+ -[IMPORTANT] -==== -Do not deploy a cluster with only one worker node, because the cluster will deploy with routers and ingress traffic in a degraded state. -==== - -* *Network interfaces:* Each node must have at least one network interface for the routable `baremetal` network. Each node must have one network interface for a `provisioning` network when using the `provisioning` network for deployment. Using the `provisioning` network is the default configuration. - -* *Unified Extensible Firmware Interface (UEFI):* Installer-provisioned installation requires UEFI boot on all {product-title} nodes when using IPv6 addressing on the `provisioning` network. In addition, UEFI Device PXE Settings must be set to use the IPv6 protocol on the `provisioning` network NIC, but omitting the `provisioning` network removes this requirement. -+ -[IMPORTANT] -==== -When starting the installation from virtual media such as an ISO image, delete all old UEFI boot table entries. If the boot table includes entries that are not generic entries provided by the firmware, the installation might fail. -==== - -* *Secure Boot:* Many production scenarios require nodes with Secure Boot enabled to verify the node only boots with trusted software, such as UEFI firmware drivers, EFI applications, and the operating system. You may deploy with Secure Boot manually or managed. -+ -. *Manually:* To deploy an {product-title} cluster with Secure Boot manually, you must enable UEFI boot mode and Secure Boot on each control plane node and each worker node. Red Hat supports Secure Boot with manually enabled UEFI and Secure Boot only when installer-provisioned installations use Redfish virtual media. See "Configuring nodes for Secure Boot manually" in the "Configuring nodes" section for additional details. -+ -. *Managed:* To deploy an {product-title} cluster with managed Secure Boot, you must set the `bootMode` value to `UEFISecureBoot` in the `install-config.yaml` file. Red Hat only supports installer-provisioned installation with managed Secure Boot on 10th generation HPE hardware and 13th generation Dell hardware running firmware version `2.75.75.75` or greater. Deploying with managed Secure Boot does not require Redfish virtual media. See "Configuring managed Secure Boot" in the "Setting up the environment for an OpenShift installation" section for details. -+ -[NOTE] -==== -Red Hat does not support Secure Boot with self-generated keys. -==== diff --git a/modules/ipi-install-out-of-band-management.adoc b/modules/ipi-install-out-of-band-management.adoc deleted file mode 100644 index 7863712aa64b..000000000000 --- a/modules/ipi-install-out-of-band-management.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_bare_metal_ipi/ipi-install-prerequisites.adoc - -:_content-type: CONCEPT -[id="out-of-band-management_{context}"] -= Out-of-band management - -Nodes typically have an additional NIC used by the baseboard management controllers (BMCs). These BMCs must be accessible from the provisioner node. - -Each node must be accessible via out-of-band management. When using an out-of-band management network, the provisioner node requires access to the out-of-band management network for a successful {product-title} installation. - -The out-of-band management setup is out of scope for this document. Using a separate management network for out-of-band management can enhance performance and improve security. However, using the provisioning network or the bare metal network are valid options. - -[NOTE] -==== -The bootstrap VM features a maximum of two network interfaces. If you configure a separate management network for out-of-band management, and you are using a provisioning network, the bootstrap VM requires routing access to the management network through one of the network interfaces. In this scenario, the bootstrap VM can then access three networks: - -* the bare metal network -* the provisioning network -* the management network routed through one of the network interfaces -==== - diff --git a/modules/ipi-install-preparing-a-disconnected-registry.adoc b/modules/ipi-install-preparing-a-disconnected-registry.adoc deleted file mode 100644 index 68f20e6588e1..000000000000 --- a/modules/ipi-install-preparing-a-disconnected-registry.adoc +++ /dev/null @@ -1,43 +0,0 @@ -// Module included in the following assemblies: -// -// * list of assemblies where this module is included -// install/installing_bare_metal_ipi/ipi-install-installation-workflow.adoc - -:_content-type: PROCEDURE -[id="ipi-install-preparing-a-disconnected-registry_{context}"] -= Preparing the registry node to host the mirrored registry - -The following steps must be completed prior to hosting a mirrored registry on bare metal. - -.Procedure - -. Open the firewall port on the registry node: -+ -[source,terminal] ----- -$ sudo firewall-cmd --add-port=5000/tcp --zone=libvirt --permanent ----- -+ -[source,terminal] ----- -$ sudo firewall-cmd --add-port=5000/tcp --zone=public --permanent ----- -+ -[source,terminal] ----- -$ sudo firewall-cmd --reload ----- - -. Install the required packages for the registry node: -+ -[source,terminal] ----- -$ sudo yum -y install python3 podman httpd httpd-tools jq ----- - -. Create the directory structure where the repository information will be held: -+ -[source,terminal] ----- -$ sudo mkdir -p /opt/registry/{auth,certs,data} ----- diff --git a/modules/ipi-install-preparing-the-bare-metal-node.adoc b/modules/ipi-install-preparing-the-bare-metal-node.adoc deleted file mode 100644 index b49b44c714bc..000000000000 --- a/modules/ipi-install-preparing-the-bare-metal-node.adoc +++ /dev/null @@ -1,235 +0,0 @@ -// This is included in the following assemblies: -// -// installing/installing_bare_metal_ipi/ipi-install-expanding-the-cluster.adoc - -:_content-type: PROCEDURE -[id='preparing-the-bare-metal-node_{context}'] -= Preparing the bare metal node - -To expand your cluster, you must provide the node with the relevant IP address. This can be done with a static configuration, or with a DHCP (Dynamic Host Configuration protocol) server. When expanding the cluster using a DHCP server, each node must have a DHCP reservation. - - -[IMPORTANT] -.Reserving IP addresses so they become static IP addresses -==== -Some administrators prefer to use static IP addresses so that each node's IP address remains constant in the absence of a DHCP server. To configure static IP addresses with NMState, see "Optional: Configuring host network interfaces in the `install-config.yaml` file" in the "Setting up the environment for an OpenShift installation" section for additional details. -==== - -Preparing the bare metal node requires executing the following procedure from the provisioner node. - -.Procedure - -. Get the `oc` binary: -+ -[source,terminal] ----- -$ curl -s https://mirror.openshift.com/pub/openshift-v4/clients/ocp/$VERSION/openshift-client-linux-$VERSION.tar.gz | tar zxvf - oc ----- -+ -[source,terminal] ----- -$ sudo cp oc /usr/local/bin ----- - -. Power off the bare metal node by using the baseboard management controller (BMC), and ensure it is off. - -. Retrieve the user name and password of the bare metal node's baseboard management controller. Then, create `base64` strings from the user name and password: -+ -[source,terminal,subs="+quotes"] ----- -$ echo -ne "root" | base64 ----- -+ -[source,terminal] ----- -$ echo -ne "password" | base64 ----- - -. Create a configuration file for the bare metal node. Depending on whether you are using a static configuration or a DHCP server, use one of the following example `bmh.yaml` files, replacing values in the YAML to match your environment: -+ -[source,terminal] ----- -$ vim bmh.yaml ----- -* *Static configuration* `bmh.yaml`: -+ -[source,yaml] ----- ---- -apiVersion: v1 <1> -kind: Secret -metadata: - name: openshift-worker-<num>-network-config-secret <2> - namespace: openshift-machine-api -type: Opaque -stringData: - nmstate: | <3> - interfaces: <4> - - name: <nic1_name> <5> - type: ethernet - state: up - ipv4: - address: - - ip: <ip_address> <5> - prefix-length: 24 - enabled: true - dns-resolver: - config: - server: - - <dns_ip_address> <5> - routes: - config: - - destination: 0.0.0.0/0 - next-hop-address: <next_hop_ip_address> <5> - next-hop-interface: <next_hop_nic1_name> <5> ---- -apiVersion: v1 -kind: Secret -metadata: - name: openshift-worker-<num>-bmc-secret <2> - namespace: openshift-machine-api -type: Opaque -data: - username: <base64_of_uid> <6> - password: <base64_of_pwd> <6> ---- -apiVersion: metal3.io/v1alpha1 -kind: BareMetalHost -metadata: - name: openshift-worker-<num> <2> - namespace: openshift-machine-api -spec: - online: True - bootMACAddress: <nic1_mac_address> <7> - bmc: - address: <protocol>://<bmc_url> <8> - credentialsName: openshift-worker-<num>-bmc-secret <2> - disableCertificateVerification: True <9> - username: <bmc_username> <10> - password: <bmc_password> <10> - rootDeviceHints: - deviceName: <root_device_hint> <11> - preprovisioningNetworkDataName: openshift-worker-<num>-network-config-secret <12> ----- -+ --- -<1> To configure the network interface for a newly created node, specify the name of the secret that contains the network configuration. Follow the `nmstate` syntax to define the network configuration for your node. See "Optional: Configuring host network interfaces in the install-config.yaml file" for details on configuring NMState syntax. -<2> Replace `<num>` for the worker number of the bare metal node in the `name` fields, the `credentialsName` field, and the `preprovisioningNetworkDataName` field. -<3> Add the NMState YAML syntax to configure the host interfaces. -<4> Optional: If you have configured the network interface with `nmstate`, and you want to disable an interface, set `state: up` with the IP addresses set to `enabled: false` as shown: -+ -[source,yaml] ----- ---- - interfaces: - - name: <nic_name> - type: ethernet - state: up - ipv4: - enabled: false - ipv6: - enabled: false ----- -<5> Replace `<nic1_name>`, `<ip_address>`, `<dns_ip_address>`, `<next_hop_ip_address>` and `<next_hop_nic1_name>` with appropriate values. -<6> Replace `<base64_of_uid>` and `<base64_of_pwd>` with the base64 string of the user name and password. -<7> Replace `<nic1_mac_address>` with the MAC address of the bare metal node's first NIC. See the "BMC addressing" section for additional BMC configuration options. -<8> Replace `<protocol>` with the BMC protocol, such as IPMI, RedFish, or others. Replace `<bmc_url>` with the URL of the bare metal node's baseboard management controller. -<9> To skip certificate validation, set `disableCertificateVerification` to true. -<10> Replace `<bmc_username>` and `<bmc_password>` with the string of the BMC user name and password. -<11> Optional: Replace `<root_device_hint>` with a device path if you specify a root device hint. -<12> Optional: If you have configured the network interface for the newly created node, provide the network configuration secret name in the `preprovisioningNetworkDataName` of the BareMetalHost CR. --- - -* *DHCP configuration* `bmh.yaml`: -+ -[source,yaml] ----- ---- -apiVersion: v1 -kind: Secret -metadata: - name: openshift-worker-<num>-bmc-secret <1> - namespace: openshift-machine-api -type: Opaque -data: - username: <base64_of_uid> <2> - password: <base64_of_pwd> <2> ---- -apiVersion: metal3.io/v1alpha1 -kind: BareMetalHost -metadata: - name: openshift-worker-<num> <1> - namespace: openshift-machine-api -spec: - online: True - bootMACAddress: <nic1_mac_address> <3> - bmc: - address: <protocol>://<bmc_url> <4> - credentialsName: openshift-worker-<num>-bmc-secret <1> - disableCertificateVerification: True <5> - username: <bmc_username> <6> - password: <bmc_password> <6> - rootDeviceHints: - deviceName: <root_device_hint> <7> - preprovisioningNetworkDataName: openshift-worker-<num>-network-config-secret <8> ----- -+ -<1> Replace `<num>` for the worker number of the bare metal node in the `name` fields, the `credentialsName` field, and the `preprovisioningNetworkDataName` field. -+ -<2> Replace `<base64_of_uid>` and `<base64_of_pwd>` with the base64 string of the user name and password. -+ -<3> Replace `<nic1_mac_address>` with the MAC address of the bare metal node's first NIC. See the "BMC addressing" section for additional BMC configuration options. -+ -<4> Replace `<protocol>` with the BMC protocol, such as IPMI, RedFish, or others. Replace `<bmc_url>` with the URL of the bare metal node's baseboard management controller. -+ -<5> To skip certificate validation, set `disableCertificateVerification` to true. -+ -<6> Replace `<bmc_username>` and `<bmc_password>` with the string of the BMC user name and password. -+ -<7> Optional: Replace `<root_device_hint>` with a device path if you specify a root device hint. -+ -<8> Optional: If you have configured the network interface for the newly created node, provide the network configuration secret name in the `preprovisioningNetworkDataName` of the BareMetalHost CR. - -+ -[NOTE] -==== -If the MAC address of an existing bare metal node matches the MAC address of a bare metal host that you are attempting to provision, then the Ironic installation will fail. If the host enrollment, inspection, cleaning, or other Ironic steps fail, the Bare Metal Operator retries the installation continuously. See "Diagnosing a host duplicate MAC address" for more information. -==== - -. Create the bare metal node: -+ -[source,terminal] ----- -$ oc -n openshift-machine-api create -f bmh.yaml ----- -+ -.Example output -[source,terminal] ----- -secret/openshift-worker-<num>-network-config-secret created -secret/openshift-worker-<num>-bmc-secret created -baremetalhost.metal3.io/openshift-worker-<num> created ----- -+ -Where `<num>` will be the worker number. - -. Power up and inspect the bare metal node: -+ -[source,terminal] ----- -$ oc -n openshift-machine-api get bmh openshift-worker-<num> ----- -+ -Where `<num>` is the worker node number. -+ -.Example output -[source,terminal] ----- -NAME STATE CONSUMER ONLINE ERROR -openshift-worker-<num> available true ----- -+ -[NOTE] -==== -To allow the worker node to join the cluster, scale the `machineset` object to the number of the `BareMetalHost` objects. You can scale nodes either manually or automatically. To scale nodes automatically, use the `metal3.io/autoscale-to-hosts` annotation for `machineset`. -==== diff --git a/modules/ipi-install-preparing-the-provisioner-node-for-openshift-install.adoc b/modules/ipi-install-preparing-the-provisioner-node-for-openshift-install.adoc deleted file mode 100644 index 18b1f46ca996..000000000000 --- a/modules/ipi-install-preparing-the-provisioner-node-for-openshift-install.adoc +++ /dev/null @@ -1,128 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_bare_metal_ipi/ipi-install-installation-workflow.adoc - -:_content-type: PROCEDURE -[id="preparing-the-provisioner-node-for-openshift-install_{context}"] -= Preparing the provisioner node for {product-title} installation - -Perform the following steps to prepare the environment. - -.Procedure - -. Log in to the provisioner node via `ssh`. - -. Create a non-root user (`kni`) and provide that user with `sudo` privileges: -+ -[source,terminal] ----- -# useradd kni ----- -+ -[source,terminal] ----- -# passwd kni ----- -+ -[source,terminal] ----- -# echo "kni ALL=(root) NOPASSWD:ALL" | tee -a /etc/sudoers.d/kni ----- -+ -[source,terminal] ----- -# chmod 0440 /etc/sudoers.d/kni ----- - -. Create an `ssh` key for the new user: -+ -[source,terminal] ----- -# su - kni -c "ssh-keygen -t ed25519 -f /home/kni/.ssh/id_rsa -N ''" ----- - -. Log in as the new user on the provisioner node: -+ -[source,terminal] ----- -# su - kni ----- - -ifndef::openshift-origin[] -. Use Red Hat Subscription Manager to register the provisioner node: -+ -[source,terminal] ----- -$ sudo subscription-manager register --username=<user> --password=<pass> --auto-attach -$ sudo subscription-manager repos --enable=rhel-8-for-<architecture>-appstream-rpms --enable=rhel-8-for-<architecture>-baseos-rpms ----- -+ -[NOTE] -==== -For more information about Red Hat Subscription Manager, see link:https://access.redhat.com/documentation/en-us/red_hat_subscription_management/1/html-single/rhsm/index[Using and Configuring Red Hat Subscription Manager]. -==== -endif::openshift-origin[] - -. Install the following packages: -+ -[source,terminal] ----- -$ sudo dnf install -y libvirt qemu-kvm mkisofs python3-devel jq ipmitool ----- - -. Modify the user to add the `libvirt` group to the newly created user: -+ -[source,terminal] ----- -$ sudo usermod --append --groups libvirt <user> ----- - -. Restart `firewalld` and enable the `http` service: -+ -[source,terminal] ----- -$ sudo systemctl start firewalld ----- -+ -[source,terminal] ----- -$ sudo firewall-cmd --zone=public --add-service=http --permanent ----- -+ -[source,terminal] ----- -$ sudo firewall-cmd --reload ----- - -. Start and enable the `libvirtd` service: -+ -[source,terminal] ----- -$ sudo systemctl enable libvirtd --now ----- - -. Create the `default` storage pool and start it: -+ -[source,terminal] ----- -$ sudo virsh pool-define-as --name default --type dir --target /var/lib/libvirt/images ----- -+ -[source,terminal] ----- -$ sudo virsh pool-start default ----- -+ -[source,terminal] ----- -$ sudo virsh pool-autostart default ----- - -. Create a `pull-secret.txt` file: -+ -[source,terminal] ----- -$ vim pull-secret.txt ----- -+ -In a web browser, navigate to link:https://console.redhat.com/openshift/install/metal/installer-provisioned[Install OpenShift on Bare Metal with installer-provisioned infrastructure]. Click **Copy pull secret**. Paste the contents into the `pull-secret.txt` file and save the contents in the `kni` user's home directory. diff --git a/modules/ipi-install-preparing-to-deploy-with-virtual-media-on-the-baremetal-network.adoc b/modules/ipi-install-preparing-to-deploy-with-virtual-media-on-the-baremetal-network.adoc deleted file mode 100644 index 7d937598ef54..000000000000 --- a/modules/ipi-install-preparing-to-deploy-with-virtual-media-on-the-baremetal-network.adoc +++ /dev/null @@ -1,121 +0,0 @@ -// This is included in the following assemblies: -// -// installing_bare_metal_ipi/ipi-install-expanding-the-cluster.adoc - -:_content-type: PROCEDURE -[id="preparing-to-deploy-with-virtual-media-on-the-baremetal-network_{context}"] -= Preparing to deploy with Virtual Media on the baremetal network - -If the `provisioning` network is enabled and you want to expand the cluster using Virtual Media on the `baremetal` network, use the following procedure. - -.Prerequisites - -* There is an existing cluster with a `baremetal` network and a `provisioning` network. - -.Procedure - -. Edit the `provisioning` custom resource (CR) to enable deploying with Virtual Media on the `baremetal` network: -+ -[source,terminmal] ----- -oc edit provisioning ----- -+ -[source,yaml] ----- - apiVersion: metal3.io/v1alpha1 - kind: Provisioning - metadata: - creationTimestamp: "2021-08-05T18:51:50Z" - finalizers: - - provisioning.metal3.io - generation: 8 - name: provisioning-configuration - resourceVersion: "551591" - uid: f76e956f-24c6-4361-aa5b-feaf72c5b526 - spec: - provisioningDHCPRange: 172.22.0.10,172.22.0.254 - provisioningIP: 172.22.0.3 - provisioningInterface: enp1s0 - provisioningNetwork: Managed - provisioningNetworkCIDR: 172.22.0.0/24 - virtualMediaViaExternalNetwork: true <1> - status: - generations: - - group: apps - hash: "" - lastGeneration: 7 - name: metal3 - namespace: openshift-machine-api - resource: deployments - - group: apps - hash: "" - lastGeneration: 1 - name: metal3-image-cache - namespace: openshift-machine-api - resource: daemonsets - observedGeneration: 8 - readyReplicas: 0 ----- -+ -<1> Add `virtualMediaViaExternalNetwork: true` to the `provisioning` CR. - -. If the image URL exists, edit the `machineset` to use the API VIP address. This step only applies to clusters installed in versions 4.9 or earlier. -+ -[source,terminal] ----- -oc edit machineset ----- -+ -[source,yaml] ----- - apiVersion: machine.openshift.io/v1beta1 - kind: MachineSet - metadata: - creationTimestamp: "2021-08-05T18:51:52Z" - generation: 11 - labels: - machine.openshift.io/cluster-api-cluster: ostest-hwmdt - machine.openshift.io/cluster-api-machine-role: worker - machine.openshift.io/cluster-api-machine-type: worker - name: ostest-hwmdt-worker-0 - namespace: openshift-machine-api - resourceVersion: "551513" - uid: fad1c6e0-b9da-4d4a-8d73-286f78788931 - spec: - replicas: 2 - selector: - matchLabels: - machine.openshift.io/cluster-api-cluster: ostest-hwmdt - machine.openshift.io/cluster-api-machineset: ostest-hwmdt-worker-0 - template: - metadata: - labels: - machine.openshift.io/cluster-api-cluster: ostest-hwmdt - machine.openshift.io/cluster-api-machine-role: worker - machine.openshift.io/cluster-api-machine-type: worker - machine.openshift.io/cluster-api-machineset: ostest-hwmdt-worker-0 - spec: - metadata: {} - providerSpec: - value: - apiVersion: baremetal.cluster.k8s.io/v1alpha1 - hostSelector: {} - image: - checksum: http:/172.22.0.3:6181/images/rhcos-<version>.<architecture>.qcow2.<md5sum> <1> - url: http://172.22.0.3:6181/images/rhcos-<version>.<architecture>.qcow2 <2> - kind: BareMetalMachineProviderSpec - metadata: - creationTimestamp: null - userData: - name: worker-user-data - status: - availableReplicas: 2 - fullyLabeledReplicas: 2 - observedGeneration: 11 - readyReplicas: 2 - replicas: 2 ----- -+ -<1> Edit the `checksum` URL to use the API VIP address. -<2> Edit the `url` URL to use the API VIP address. diff --git a/modules/ipi-install-provisioning-the-bare-metal-node.adoc b/modules/ipi-install-provisioning-the-bare-metal-node.adoc deleted file mode 100644 index b13cd2292707..000000000000 --- a/modules/ipi-install-provisioning-the-bare-metal-node.adoc +++ /dev/null @@ -1,120 +0,0 @@ -// This is included in the following assemblies: -// -// ipi-install-expanding-the-cluster.adoc - -:_content-type: PROCEDURE -[id='provisioning-the-bare-metal-node_{context}'] -= Provisioning the bare metal node - -Provisioning the bare metal node requires executing the following procedure from the provisioner node. - -.Procedure - -. Ensure the `STATE` is `available` before provisioning the bare metal node. -+ -[source,terminal] ----- -$ oc -n openshift-machine-api get bmh openshift-worker-<num> ----- -+ -Where `<num>` is the worker node number. -+ -[source,terminal] ----- -NAME STATE ONLINE ERROR AGE -openshift-worker available true 34h ----- - -. Get a count of the number of worker nodes. -[source,terminal] -+ ----- -$ oc get nodes ----- -+ -[source,terminal] ----- -NAME STATUS ROLES AGE VERSION -openshift-master-1.openshift.example.com Ready master 30h v1.26.0 -openshift-master-2.openshift.example.com Ready master 30h v1.26.0 -openshift-master-3.openshift.example.com Ready master 30h v1.26.0 -openshift-worker-0.openshift.example.com Ready worker 30h v1.26.0 -openshift-worker-1.openshift.example.com Ready worker 30h v1.26.0 ----- - -. Get the compute machine set. -+ -[source,terminal] ----- -$ oc get machinesets -n openshift-machine-api ----- -+ -[source,terminal] ----- -NAME DESIRED CURRENT READY AVAILABLE AGE -... -openshift-worker-0.example.com 1 1 1 1 55m -openshift-worker-1.example.com 1 1 1 1 55m ----- - -. Increase the number of worker nodes by one. -+ -[source,terminal] ----- -$ oc scale --replicas=<num> machineset <machineset> -n openshift-machine-api ----- -+ -Replace `<num>` with the new number of worker nodes. Replace `<machineset>` with the name of the compute machine set from the previous step. - -. Check the status of the bare metal node. -+ -[source,terminal] ----- -$ oc -n openshift-machine-api get bmh openshift-worker-<num> ----- -+ -Where `<num>` is the worker node number. The STATE changes from `ready` to `provisioning`. -+ -[source,terminal] ----- -NAME STATE CONSUMER ONLINE ERROR -openshift-worker-<num> provisioning openshift-worker-<num>-65tjz true ----- -+ -The `provisioning` status remains until the {product-title} cluster provisions the node. This can take 30 minutes or more. After the node is provisioned, the state will change to `provisioned`. -+ -[source,terminal] ----- -NAME STATE CONSUMER ONLINE ERROR -openshift-worker-<num> provisioned openshift-worker-<num>-65tjz true ----- - -. After provisioning completes, ensure the bare metal node is ready. -+ -[source,terminal] ----- -$ oc get nodes ----- -+ -[source,terminal] ----- -NAME STATUS ROLES AGE VERSION -openshift-master-1.openshift.example.com Ready master 30h v1.26.0 -openshift-master-2.openshift.example.com Ready master 30h v1.26.0 -openshift-master-3.openshift.example.com Ready master 30h v1.26.0 -openshift-worker-0.openshift.example.com Ready worker 30h v1.26.0 -openshift-worker-1.openshift.example.com Ready worker 30h v1.26.0 -openshift-worker-<num>.openshift.example.com Ready worker 3m27s v1.26.0 ----- -+ -You can also check the kubelet. -+ -[source,terminal] ----- -$ ssh openshift-worker-<num> ----- -+ -[source,terminal] ----- -[kni@openshift-worker-<num>]$ journalctl -fu kubelet ----- diff --git a/modules/ipi-install-replacing-a-bare-metal-control-plane-node.adoc b/modules/ipi-install-replacing-a-bare-metal-control-plane-node.adoc deleted file mode 100644 index 8da5280ff6f2..000000000000 --- a/modules/ipi-install-replacing-a-bare-metal-control-plane-node.adoc +++ /dev/null @@ -1,192 +0,0 @@ -// This is included in the following assemblies: -// -// installing/installing_bare_metal_ipi/ipi-install-expanding-the-cluster.adoc - -:_content-type: PROCEDURE -[id="replacing-a-bare-metal-control-plane-node_{context}"] -= Replacing a bare-metal control plane node - -Use the following procedure to replace an installer-provisioned {product-title} control plane node. - -[IMPORTANT] -==== -If you reuse the `BareMetalHost` object definition from an existing control plane host, do not leave the `externallyProvisioned` field set to `true`. - -Existing control plane `BareMetalHost` objects may have the `externallyProvisioned` flag set to `true` if they were provisioned by the {product-title} installation program. -==== - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. - -* You have taken an etcd backup. -+ -[IMPORTANT] -==== -Take an etcd backup before performing this procedure so that you can restore your cluster if you encounter any issues. For more information about taking an etcd backup, see the _Additional resources_ section. -==== - -.Procedure - -. Ensure that the Bare Metal Operator is available: -+ -[source,terminal] ----- -$ oc get clusteroperator baremetal ----- -+ -.Example output -[source,terminal] ----- -NAME VERSION AVAILABLE PROGRESSING DEGRADED SINCE MESSAGE -baremetal 4.12.0 True False False 3d15h ----- - -. Remove the old `BareMetalHost` and `Machine` objects: -+ -[source,terminal] ----- -$ oc delete bmh -n openshift-machine-api <host_name> -$ oc delete machine -n openshift-machine-api <machine_name> ----- -+ -Replace `<host_name>` with the name of the host and `<machine_name>` with the name of the machine. The machine name appears under the `CONSUMER` field. -+ -After you remove the `BareMetalHost` and `Machine` objects, then the machine controller automatically deletes the `Node` object. - -. Create the new `BareMetalHost` object and the secret to store the BMC credentials: -+ -[source,terminal] ----- -$ cat <<EOF | oc apply -f - -apiVersion: v1 -kind: Secret -metadata: - name: control-plane-<num>-bmc-secret <1> - namespace: openshift-machine-api -data: - username: <base64_of_uid> <2> - password: <base64_of_pwd> <3> -type: Opaque ---- -apiVersion: metal3.io/v1alpha1 -kind: BareMetalHost -metadata: - name: control-plane-<num> <1> - namespace: openshift-machine-api -spec: - automatedCleaningMode: disabled - bmc: - address: <protocol>://<bmc_ip> <4> - credentialsName: control-plane-<num>-bmc-secret <1> - bootMACAddress: <NIC1_mac_address> <5> - bootMode: UEFI - externallyProvisioned: false - hardwareProfile: unknown - online: true -EOF ----- -<1> Replace `<num>` for the control plane number of the bare metal node in the `name` fields and the `credentialsName` field. -<2> Replace `<base64_of_uid>` with the `base64` string of the user name. -<3> Replace `<base64_of_pwd>` with the `base64` string of the password. -<4> Replace `<protocol>` with the BMC protocol, such as `redfish`, `redfish-virtualmedia`, `idrac-virtualmedia`, or others. Replace `<bmc_ip>` with the IP address of the bare metal node's baseboard management controller. For additional BMC configuration options, see "BMC addressing" in the _Additional resources_ section. -<5> Replace `<NIC1_mac_address>` with the MAC address of the bare metal node's first NIC. -+ -After the inspection is complete, the `BareMetalHost` object is created and available to be provisioned. - -. View available `BareMetalHost` objects: -+ -[source,terminal] ----- -$ oc get bmh -n openshift-machine-api ----- -+ -.Example output -[source,terminal] ----- -NAME STATE CONSUMER ONLINE ERROR AGE -control-plane-1.example.com available control-plane-1 true 1h10m -control-plane-2.example.com externally provisioned control-plane-2 true 4h53m -control-plane-3.example.com externally provisioned control-plane-3 true 4h53m -compute-1.example.com provisioned compute-1-ktmmx true 4h53m -compute-1.example.com provisioned compute-2-l2zmb true 4h53m ----- -+ -There are no `MachineSet` objects for control plane nodes, so you must create a `Machine` object instead. You can copy the `providerSpec` from another control plane `Machine` object. - -. Create a `Machine` object: -+ -[source,terminal] ----- -$ cat <<EOF | oc apply -f - -apiVersion: machine.openshift.io/v1beta1 -kind: Machine -metadata: - annotations: - metal3.io/BareMetalHost: openshift-machine-api/control-plane-<num> <1> - labels: - machine.openshift.io/cluster-api-cluster: control-plane-<num> <1> - machine.openshift.io/cluster-api-machine-role: master - machine.openshift.io/cluster-api-machine-type: master - name: control-plane-<num> <1> - namespace: openshift-machine-api -spec: - metadata: {} - providerSpec: - value: - apiVersion: baremetal.cluster.k8s.io/v1alpha1 - customDeploy: - method: install_coreos - hostSelector: {} - image: - checksum: "" - url: "" - kind: BareMetalMachineProviderSpec - metadata: - creationTimestamp: null - userData: - name: master-user-data-managed -EOF ----- -<1> Replace `<num>` for the control plane number of the bare metal node in the `name`, `labels` and `annotations` fields. -+ -. To view the `BareMetalHost` objects, run the following command: -+ -[source,terminal] ----- -$ oc get bmh -A ----- -+ -.Example output -[source,terminal] ----- -NAME STATE CONSUMER ONLINE ERROR AGE -control-plane-1.example.com provisioned control-plane-1 true 2h53m -control-plane-2.example.com externally provisioned control-plane-2 true 5h53m -control-plane-3.example.com externally provisioned control-plane-3 true 5h53m -compute-1.example.com provisioned compute-1-ktmmx true 5h53m -compute-2.example.com provisioned compute-2-l2zmb true 5h53m ----- -+ -. After the RHCOS installation, verify that the `BareMetalHost` is added to the cluster: -+ -[source,terminal] ----- -$ oc get nodes ----- -+ -.Example output -[source,terminal] ----- -NAME STATUS ROLES AGE VERSION -control-plane-1.example.com available master 4m2s v1.18.2 -control-plane-2.example.com available master 141m v1.18.2 -control-plane-3.example.com available master 141m v1.18.2 -compute-1.example.com available worker 87m v1.18.2 -compute-2.example.com available worker 87m v1.18.2 ----- -+ -[NOTE] -==== -After replacement of the new control plane node, the etcd pod running in the new node is in `crashloopback` status. See "Replacing an unhealthy etcd member" in the _Additional resources_ section for more information. -==== diff --git a/modules/ipi-install-required-data-for-installation.adoc b/modules/ipi-install-required-data-for-installation.adoc deleted file mode 100644 index 48fd7e320f6d..000000000000 --- a/modules/ipi-install-required-data-for-installation.adoc +++ /dev/null @@ -1,24 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_bare_metal_ipi/ipi-install-prerequisites.adoc - -:_content-type: CONCEPT -[id="required-data-for-installation_{context}"] -= Required data for installation - -Prior to the installation of the {product-title} cluster, gather the following information from all cluster nodes: - -* Out-of-band management IP -** Examples -*** Dell (iDRAC) IP -*** HP (iLO) IP -*** Fujitsu (iRMC) IP - -.When using the `provisioning` network - -* NIC (`provisioning`) MAC address -* NIC (`baremetal`) MAC address - -.When omitting the `provisioning` network - -* NIC (`baremetal`) MAC address diff --git a/modules/ipi-install-retrieving-the-openshift-installer.adoc b/modules/ipi-install-retrieving-the-openshift-installer.adoc deleted file mode 100644 index 2743d1c84238..000000000000 --- a/modules/ipi-install-retrieving-the-openshift-installer.adoc +++ /dev/null @@ -1,22 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_bare_metal_ipi/ipi-install-installation-workflow.adoc - -:_content-type: PROCEDURE -[id="retrieving-the-openshift-installer_{context}"] -= Retrieving the {product-title} installer - -Use the `stable-4.x` version of the installation program and your selected architecture to deploy the generally available stable version of {product-title}: - -[source,terminal,subs="attributes+"] ----- -$ export VERSION=stable-{product-version} ----- -[source,terminal,subs="attributes+"] ----- -$ export RELEASE_ARCH=<architecture> ----- -[source,terminal,subs="attributes+"] ----- -$ export RELEASE_IMAGE=$(curl -s https://mirror.openshift.com/pub/openshift-v4/$RELEASE_ARCH/clients/ocp/$VERSION/release.txt | grep 'Pull From: quay.io' | awk -F ' ' '{print $3}') ----- diff --git a/modules/ipi-install-root-device-hints.adoc b/modules/ipi-install-root-device-hints.adoc deleted file mode 100644 index 388c6ab5d3f9..000000000000 --- a/modules/ipi-install-root-device-hints.adoc +++ /dev/null @@ -1,51 +0,0 @@ -// This is included in the following assemblies: -// -// ipi-install-configuration-files.adoc - -:_content-type: REFERENCE -[id='root-device-hints_{context}'] -= Root device hints - -The `rootDeviceHints` parameter enables the installer to provision the {op-system-first} image to a particular device. The installer examines the devices in the order it discovers them, and compares the discovered values with the hint values. The installer uses the first discovered device that matches the hint value. The configuration can combine multiple hints, but a device must match all hints for the installer to select it. - -.Subfields - -|=== -| Subfield | Description - -| `deviceName` | A string containing a Linux device name like `/dev/vda`. The hint must match the actual value exactly. - -| `hctl` | A string containing a SCSI bus address like `0:0:0:0`. The hint must match the actual value exactly. - -| `model` | A string containing a vendor-specific device identifier. The hint can be a substring of the actual value. - -| `vendor` | A string containing the name of the vendor or manufacturer of the device. The hint can be a sub-string of the actual value. - -| `serialNumber` | A string containing the device serial number. The hint must match the actual value exactly. - -| `minSizeGigabytes` | An integer representing the minimum size of the device in gigabytes. - -| `wwn` | A string containing the unique storage identifier. The hint must match the actual value exactly. - -| `wwnWithExtension` | A string containing the unique storage identifier with the vendor extension appended. The hint must match the actual value exactly. - -| `wwnVendorExtension` | A string containing the unique vendor storage identifier. The hint must match the actual value exactly. - -| `rotational` | A boolean indicating whether the device should be a rotating disk (true) or not (false). - -|=== - -.Example usage - -[source,yaml] ----- - - name: master-0 - role: master - bmc: - address: ipmi://10.10.0.3:6203 - username: admin - password: redhat - bootMACAddress: de:ad:be:ef:00:40 - rootDeviceHints: - deviceName: "/dev/sda" ----- diff --git a/modules/ipi-install-setting-proxy-settings-within-install-config.adoc b/modules/ipi-install-setting-proxy-settings-within-install-config.adoc deleted file mode 100644 index fff308ef7162..000000000000 --- a/modules/ipi-install-setting-proxy-settings-within-install-config.adoc +++ /dev/null @@ -1,39 +0,0 @@ -// This is included in the following assemblies: -// -// ipi-install-configuration-files.adoc - -:_content-type: PROCEDURE -[id='ipi-install-setting-proxy-settings-within-install-config_{context}'] -= Optional: Setting proxy settings - -To deploy an {product-title} cluster using a proxy, make the following changes to the `install-config.yaml` file. - -[source,yaml] ----- -apiVersion: v1 -baseDomain: <domain> -proxy: - httpProxy: http://USERNAME:PASSWORD@proxy.example.com:PORT - httpsProxy: https://USERNAME:PASSWORD@proxy.example.com:PORT - noProxy: <WILDCARD_OF_DOMAIN>,<PROVISIONING_NETWORK/CIDR>,<BMC_ADDRESS_RANGE/CIDR> ----- - -The following is an example of `noProxy` with values. - -[source,yaml] ----- -noProxy: .example.com,172.22.0.0/24,10.10.0.0/24 ----- - -With a proxy enabled, set the appropriate values of the proxy in the corresponding key/value pair. - -Key considerations: - -* If the proxy does not have an HTTPS proxy, change the value of `httpsProxy` from `https://` to `http://`. -* If using a provisioning network, include it in the `noProxy` setting, otherwise the installer will fail. -* Set all of the proxy settings as environment variables within the provisioner node. For example, `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY`. - -[NOTE] -==== -When provisioning with IPv6, you cannot define a CIDR address block in the `noProxy` settings. You must define each address separately. -==== diff --git a/modules/ipi-install-troubleshooting-api-not-accessible.adoc b/modules/ipi-install-troubleshooting-api-not-accessible.adoc deleted file mode 100644 index ed203e72605f..000000000000 --- a/modules/ipi-install-troubleshooting-api-not-accessible.adoc +++ /dev/null @@ -1,63 +0,0 @@ -// Module included in the following assemblies: -// //installing/installing_bare_metal_ipi/installing_bare_metal_ipi/ipi-install-troubleshooting.adoc - -:_content-type: PROCEDURE -[id="ipi-install-troubleshooting-api-not-accessible_{context}"] - -= The API is not accessible - -When the cluster is running and clients cannot access the API, domain name resolution issues might impede access to the API. - -.Procedure - -. **Hostname Resolution:** Check the cluster nodes to ensure they have a fully qualified domain name, and not just `localhost.localdomain`. For example: -+ -[source,terminal] ----- -$ hostname ----- -+ -If a hostname is not set, set the correct hostname. For example: -+ -[source,terminal] ----- -$ hostnamectl set-hostname <hostname> ----- - -. **Incorrect Name Resolution:** Ensure that each node has the correct name resolution in the DNS server using `dig` and `nslookup`. For example: -+ -[source,terminal] ----- -$ dig api.<cluster_name>.example.com ----- -+ -[source,terminal] ----- -; <<>> DiG 9.11.4-P2-RedHat-9.11.4-26.P2.el8 <<>> api.<cluster_name>.example.com -;; global options: +cmd -;; Got answer: -;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 37551 -;; flags: qr aa rd ra; QUERY: 1, ANSWER: 1, AUTHORITY: 1, ADDITIONAL: 2 - -;; OPT PSEUDOSECTION: -; EDNS: version: 0, flags:; udp: 4096 -; COOKIE: 866929d2f8e8563582af23f05ec44203d313e50948d43f60 (good) -;; QUESTION SECTION: -;api.<cluster_name>.example.com. IN A - -;; ANSWER SECTION: -api.<cluster_name>.example.com. 10800 IN A 10.19.13.86 - -;; AUTHORITY SECTION: -<cluster_name>.example.com. 10800 IN NS <cluster_name>.example.com. - -;; ADDITIONAL SECTION: -<cluster_name>.example.com. 10800 IN A 10.19.14.247 - -;; Query time: 0 msec -;; SERVER: 10.19.14.247#53(10.19.14.247) -;; WHEN: Tue May 19 20:30:59 UTC 2020 -;; MSG SIZE rcvd: 140 ----- -+ -The output in the foregoing example indicates that the appropriate IP address for the `api.<cluster_name>.example.com` VIP is `10.19.13.86`. This IP address should reside on the `baremetal` network. diff --git a/modules/ipi-install-troubleshooting-bootstrap-vm-cannot-boot.adoc b/modules/ipi-install-troubleshooting-bootstrap-vm-cannot-boot.adoc deleted file mode 100644 index dd55ab5e9052..000000000000 --- a/modules/ipi-install-troubleshooting-bootstrap-vm-cannot-boot.adoc +++ /dev/null @@ -1,46 +0,0 @@ -// Module included in the following assemblies: -// //installing/installing_bare_metal_ipi/installing_bare_metal_ipi/ipi-install-troubleshooting.adoc - -:_content-type: PROCEDURE -[id="ipi-install-troubleshooting-bootstrap-vm-cannot-boot_{context}"] -= Bootstrap VM cannot boot up the cluster nodes - -During the deployment, it is possible for the bootstrap VM to fail to boot the cluster nodes, which prevents the VM from provisioning the nodes with the {op-system} image. This scenario can arise due to: - -* A problem with the `install-config.yaml` file. -* Issues with out-of-band network access when using the baremetal network. - -To verify the issue, there are three containers related to `ironic`: - -* `ironic` -* `ironic-inspector` - -.Procedure - -. Log in to the bootstrap VM: -+ -[source,terminal] ----- -$ ssh core@172.22.0.2 ----- - -. To check the container logs, execute the following: -+ -[source,terminal] ----- -[core@localhost ~]$ sudo podman logs -f <container_name> ----- -+ -Replace `<container_name>` with one of `ironic` or `ironic-inspector`. If you encounter an issue where the control plane nodes are not booting up from PXE, check the `ironic` pod. The `ironic` pod contains information about the attempt to boot the cluster nodes, because it attempts to log in to the node over IPMI. - -.Potential reason -The cluster nodes might be in the `ON` state when deployment started. - -.Solution -Power off the {product-title} cluster nodes before you begin the -installation over IPMI: - -[source,terminal] ----- -$ ipmitool -I lanplus -U root -P <password> -H <out_of_band_ip> power off ----- \ No newline at end of file diff --git a/modules/ipi-install-troubleshooting-bootstrap-vm-inspecting-logs.adoc b/modules/ipi-install-troubleshooting-bootstrap-vm-inspecting-logs.adoc deleted file mode 100644 index 89120f5c1e99..000000000000 --- a/modules/ipi-install-troubleshooting-bootstrap-vm-inspecting-logs.adoc +++ /dev/null @@ -1,62 +0,0 @@ -// Module included in the following assemblies: -// //installing/installing_bare_metal_ipi/installing_bare_metal_ipi/ipi-install-troubleshooting.adoc - -:_content-type: PROCEDURE -[id="ipi-install-troubleshooting-bootstrap-vm-inspecting-logs_{context}"] -= Inspecting logs - -When experiencing issues downloading or accessing the {op-system} images, first verify that the URL is correct in the `install-config.yaml` configuration file. - -.Example of internal webserver hosting {op-system} images -[source,yaml] ----- -bootstrapOSImage: http://<ip:port>/rhcos-43.81.202001142154.0-qemu.<architecture>.qcow2.gz?sha256=9d999f55ff1d44f7ed7c106508e5deecd04dc3c06095d34d36bf1cd127837e0c -clusterOSImage: http://<ip:port>/rhcos-43.81.202001142154.0-openstack.<architecture>.qcow2.gz?sha256=a1bda656fa0892f7b936fdc6b6a6086bddaed5dafacedcd7a1e811abb78fe3b0 ----- - -The `coreos-downloader` container downloads resources from a webserver or from the external link:https://quay.io[quay.io] registry, whichever the `install-config.yaml` configuration file specifies. Verify that the `coreos-downloader` container is up and running and inspect its logs as needed. - -.Procedure - -. Log in to the bootstrap VM: -+ -[source,terminal] ----- -$ ssh core@172.22.0.2 ----- - -. Check the status of the `coreos-downloader` container within the bootstrap VM by running the following command: - -+ -[source,terminal] ----- -[core@localhost ~]$ sudo podman logs -f coreos-downloader ----- -+ -If the bootstrap VM cannot access the URL to the images, use the `curl` command to verify that the VM can access the images. - -. To inspect the `bootkube` logs that indicate if all the containers launched during the deployment phase, execute the following: -+ -[source,terminal] ----- -[core@localhost ~]$ journalctl -xe ----- -+ -[source,terminal] ----- -[core@localhost ~]$ journalctl -b -f -u bootkube.service ----- - -. Verify all the pods, including `dnsmasq`, `mariadb`, `httpd`, and `ironic`, are running: -+ -[source,terminal] ----- -[core@localhost ~]$ sudo podman ps ----- - -. If there are issues with the pods, check the logs of the containers with issues. To check the logs of the `ironic` service, run the following command: -+ -[source,terminal] ----- -[core@localhost ~]$ sudo podman logs ironic ----- \ No newline at end of file diff --git a/modules/ipi-install-troubleshooting-bootstrap-vm.adoc b/modules/ipi-install-troubleshooting-bootstrap-vm.adoc deleted file mode 100644 index f9cd18ec0651..000000000000 --- a/modules/ipi-install-troubleshooting-bootstrap-vm.adoc +++ /dev/null @@ -1,106 +0,0 @@ -// Module included in the following assemblies: -// //installing/installing_bare_metal_ipi/installing_bare_metal_ipi/ipi-install-troubleshooting.adoc - -:_content-type: PROCEDURE -[id="ipi-install-troubleshooting-bootstrap-vm_{context}"] - -= Bootstrap VM issues - -The {product-title} installation program spawns a bootstrap node virtual machine, which handles provisioning the {product-title} cluster nodes. - -.Procedure - -. About 10 to 15 minutes after triggering the installation program, check to ensure the bootstrap VM is operational using the `virsh` command: -+ -[source,terminal] ----- -$ sudo virsh list ----- -+ -[source,terminal] ----- - Id Name State - -------------------------------------------- - 12 openshift-xf6fq-bootstrap running ----- -+ -[NOTE] -==== -The name of the bootstrap VM is always the cluster name followed by a random set of characters and ending in the word "bootstrap." -==== -+ -If the bootstrap VM is not running after 10-15 minutes, troubleshoot why it is not running. Possible issues include: - -. Verify `libvirtd` is running on the system: -+ -[source,terminal] ----- -$ systemctl status libvirtd ----- -+ -[source,terminal] ----- -● libvirtd.service - Virtualization daemon - Loaded: loaded (/usr/lib/systemd/system/libvirtd.service; enabled; vendor preset: enabled) - Active: active (running) since Tue 2020-03-03 21:21:07 UTC; 3 weeks 5 days ago - Docs: man:libvirtd(8) - https://libvirt.org - Main PID: 9850 (libvirtd) - Tasks: 20 (limit: 32768) - Memory: 74.8M - CGroup: /system.slice/libvirtd.service - ├─ 9850 /usr/sbin/libvirtd ----- -+ -If the bootstrap VM is operational, log in to it. - -. Use the `virsh console` command to find the IP address of the bootstrap VM: -+ -[source,terminal] ----- -$ sudo virsh console example.com ----- -+ -[source,terminal] ----- -Connected to domain example.com -Escape character is ^] -Red Hat Enterprise Linux CoreOS 43.81.202001142154.0 (Ootpa) 4.3 -SSH host key: SHA256:BRWJktXZgQQRY5zjuAV0IKZ4WM7i4TiUyMVanqu9Pqg (ED25519) -SSH host key: SHA256:7+iKGA7VtG5szmk2jB5gl/5EZ+SNcJ3a2g23o0lnIio (ECDSA) -SSH host key: SHA256:DH5VWhvhvagOTaLsYiVNse9ca+ZSW/30OOMed8rIGOc (RSA) -ens3: fd35:919d:4042:2:c7ed:9a9f:a9ec:7 -ens4: 172.22.0.2 fe80::1d05:e52e:be5d:263f -localhost login: ----- -+ -[IMPORTANT] -==== -When deploying an {product-title} cluster without the `provisioning` network, you must use a public IP address and not a private IP address like `172.22.0.2`. -==== - - -. After you obtain the IP address, log in to the bootstrap VM using the `ssh` command: -+ -[NOTE] -==== -In the console output of the previous step, you can use the IPv6 IP address provided by `ens3` or the IPv4 IP provided by `ens4`. -==== -+ -[source,terminal] ----- -$ ssh core@172.22.0.2 ----- - -If you are not successful logging in to the bootstrap VM, you have likely encountered one of the following scenarios: - -* You cannot reach the `172.22.0.0/24` network. Verify the network connectivity between the provisioner and the `provisioning` network bridge. This issue might occur if you are using a `provisioning` network. -` -* You cannot reach the bootstrap VM through the public network. When attempting -to SSH via `baremetal` network, verify connectivity on the -`provisioner` host specifically around the `baremetal` network bridge. - -* You encountered `Permission denied (publickey,password,keyboard-interactive)`. When -attempting to access the bootstrap VM, a `Permission denied` error -might occur. Verify that the SSH key for the user attempting to log -in to the VM is set within the `install-config.yaml` file. diff --git a/modules/ipi-install-troubleshooting-cleaning-up-previous-installations.adoc b/modules/ipi-install-troubleshooting-cleaning-up-previous-installations.adoc deleted file mode 100644 index 182bec954f51..000000000000 --- a/modules/ipi-install-troubleshooting-cleaning-up-previous-installations.adoc +++ /dev/null @@ -1,40 +0,0 @@ -// Module included in the following assemblies: -// -//installing/installing_bare_metal_ipi/installing_bare_metal_ipi/ipi-install-troubleshooting.adoc - -:_content-type: PROCEDURE -[id="ipi-install-troubleshooting-cleaning-up-previous-installations_{context}"] -= Cleaning up previous installations - -In the event of a previous failed deployment, remove the artifacts from the failed attempt before attempting to deploy {product-title} again. - -.Procedure - -. Power off all bare metal nodes prior to installing the {product-title} cluster: -+ -[source,terminal] ----- -$ ipmitool -I lanplus -U <user> -P <password> -H <management_server_ip> power off ----- - -. Remove all old bootstrap resources if any are left over from a previous deployment attempt: -+ -[source,terminal] ----- -for i in $(sudo virsh list | tail -n +3 | grep bootstrap | awk {'print $2'}); -do - sudo virsh destroy $i; - sudo virsh undefine $i; - sudo virsh vol-delete $i --pool $i; - sudo virsh vol-delete $i.ign --pool $i; - sudo virsh pool-destroy $i; - sudo virsh pool-undefine $i; -done ----- - -. Remove the following from the `clusterconfigs` directory to prevent Terraform from failing: -+ -[source,terminal] ----- -$ rm -rf ~/clusterconfigs/auth ~/clusterconfigs/terraform* ~/clusterconfigs/tls ~/clusterconfigs/metadata.json ----- diff --git a/modules/ipi-install-troubleshooting-cluster-nodes-will-not-pxe.adoc b/modules/ipi-install-troubleshooting-cluster-nodes-will-not-pxe.adoc deleted file mode 100644 index 23c39ae3d428..000000000000 --- a/modules/ipi-install-troubleshooting-cluster-nodes-will-not-pxe.adoc +++ /dev/null @@ -1,31 +0,0 @@ -// Module included in the following assemblies: -// //installing/installing_bare_metal_ipi/installing_bare_metal_ipi/ipi-install-troubleshooting.adoc - -:_content-type: PROCEDURE -[id="ipi-install-troubleshooting-cluster-nodes-will-not-pxe_{context}"] - -= Cluster nodes will not PXE boot - -When {product-title} cluster nodes will not PXE boot, execute the following checks on the cluster nodes that will not PXE boot. This procedure does not apply when installing an {product-title} cluster without the `provisioning` network. - -.Procedure - -. Check the network connectivity to the `provisioning` network. - -. Ensure PXE is enabled on the NIC for the `provisioning` network and PXE is disabled for all other NICs. - -. Verify that the `install-config.yaml` configuration file has the proper hardware profile and boot MAC address for the NIC connected to the `provisioning` network. For example: -+ -.control plane node settings -+ ----- -bootMACAddress: 24:6E:96:1B:96:90 # MAC of bootable provisioning NIC -hardwareProfile: default #control plane node settings ----- -+ -.Worker node settings -+ ----- -bootMACAddress: 24:6E:96:1B:96:90 # MAC of bootable provisioning NIC -hardwareProfile: unknown #worker node settings ----- diff --git a/modules/ipi-install-troubleshooting-failed-ignition-during-firstboot.adoc b/modules/ipi-install-troubleshooting-failed-ignition-during-firstboot.adoc deleted file mode 100644 index 40a336071276..000000000000 --- a/modules/ipi-install-troubleshooting-failed-ignition-during-firstboot.adoc +++ /dev/null @@ -1,26 +0,0 @@ -// Module included in the following assemblies: -// //installing/installing_bare_metal_ipi/installing_bare_metal_ipi/ipi-install-troubleshooting.adoc - -:_content-type: PROCEDURE -[id="ipi-install-troubleshooting-failed-ignition-during-firstboot_{context}"] - -= Failed Ignition during Firstboot - -During the Firstboot, the Ignition configuration may fail. - -.Procedure - -. Connect to the node where the Ignition configuration failed: -+ -[source,terminal] ----- -Failed Units: 1 - machine-config-daemon-firstboot.service ----- - -. Restart the `machine-config-daemon-firstboot` service: -+ -[source,terminal] ----- -[core@worker-X ~]$ sudo systemctl restart machine-config-daemon-firstboot.service ----- diff --git a/modules/ipi-install-troubleshooting-install-config.adoc b/modules/ipi-install-troubleshooting-install-config.adoc deleted file mode 100644 index 1160649e453c..000000000000 --- a/modules/ipi-install-troubleshooting-install-config.adoc +++ /dev/null @@ -1,22 +0,0 @@ -// Module included in the following assemblies: -// //installing/installing_bare_metal_ipi/installing_bare_metal_ipi/ipi-install-troubleshooting.adoc - -:_content-type: PROCEDURE -[id="ipi-install-troubleshooting-install-config_{context}"] - -= Troubleshooting `install-config.yaml` - -The `install-config.yaml` configuration file represents all of the nodes that are part of the {product-title} cluster. The file contains the necessary options consisting of but not limited to `apiVersion`, `baseDomain`, `imageContentSources` and virtual IP addresses. If errors occur early in the deployment of the {product-title} cluster, the errors are likely in the `install-config.yaml` configuration file. - -.Procedure - -. Use the guidelines in link:https://www.redhat.com/sysadmin/yaml-tips[YAML-tips]. -. Verify the YAML syntax is correct using link:http://www.yamllint.com/[syntax-check]. -. Verify the {op-system-first} QEMU images are properly defined and accessible via the URL provided in the `install-config.yaml`. For example: -+ -[source,terminal] ----- -$ curl -s -o /dev/null -I -w "%{http_code}\n" http://webserver.example.com:8080/rhcos-44.81.202004250133-0-qemu.<architecture>.qcow2.gz?sha256=7d884b46ee54fe87bbc3893bf2aa99af3b2d31f2e19ab5529c60636fbd0f1ce7 ----- -+ -If the output is `200`, there is a valid response from the webserver storing the bootstrap VM image. diff --git a/modules/ipi-install-troubleshooting-misc-issues.adoc b/modules/ipi-install-troubleshooting-misc-issues.adoc deleted file mode 100644 index dbe414cf5597..000000000000 --- a/modules/ipi-install-troubleshooting-misc-issues.adoc +++ /dev/null @@ -1,270 +0,0 @@ -// Module included in the following assemblies: -// //installing/installing_bare_metal_ipi/installing_bare_metal_ipi/ipi-install-troubleshooting.adoc - -:_content-type: PROCEDURE -[id="ipi-install-troubleshooting-misc-issues_{context}"] - -= Miscellaneous issues - -== Addressing the `runtime network not ready` error - -After the deployment of a cluster you might receive the following error: - ----- -`runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: Missing CNI default network` ----- - -The Cluster Network Operator is responsible for deploying the networking components in response to a special object created by the installer. It runs very early in the installation process, after the control plane (master) nodes have come up, but before the bootstrap control plane has been torn down. It can be indicative of more subtle installer issues, such as long delays in bringing up control plane (master) nodes or issues with `apiserver` communication. - -.Procedure - -. Inspect the pods in the `openshift-network-operator` namespace: -+ -[source,terminal] ----- -$ oc get all -n openshift-network-operator ----- -+ -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -pod/network-operator-69dfd7b577-bg89v 0/1 ContainerCreating 0 149m ----- - - -. On the `provisioner` node, determine that the network configuration exists: -+ -[source,terminal] ----- -$ kubectl get network.config.openshift.io cluster -oyaml ----- -+ -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: Network -metadata: - name: cluster -spec: - serviceNetwork: - - 172.30.0.0/16 - clusterNetwork: - - cidr: 10.128.0.0/14 - hostPrefix: 23 - networkType: OVNKubernetes ----- -+ -If it does not exist, the installer did not create it. To determine why the installer did not create it, execute the following: -+ -[source,terminal] ----- -$ openshift-install create manifests ----- - -. Check that the `network-operator` is running: -+ -[source,terminal] ----- -$ kubectl -n openshift-network-operator get pods ----- - -. Retrieve the logs: -+ -[source,terminal] ----- -$ kubectl -n openshift-network-operator logs -l "name=network-operator" ----- -+ -On high availability clusters with three or more control plane (master) nodes, the Operator will perform leader election and all other Operators will sleep. For additional details, see https://github.com/openshift/installer/blob/master/docs/user/troubleshooting.md[Troubleshooting]. - -== Cluster nodes not getting the correct IPv6 address over DHCP - -If the cluster nodes are not getting the correct IPv6 address over DHCP, check the following: - -. Ensure the reserved IPv6 addresses reside outside the DHCP range. - -. In the IP address reservation on the DHCP server, ensure the reservation specifies the correct DHCP Unique Identifier (DUID). For example: -+ -[source,terminal] ----- -# This is a dnsmasq dhcp reservation, 'id:00:03:00:01' is the client id and '18:db:f2:8c:d5:9f' is the MAC Address for the NIC -id:00:03:00:01:18:db:f2:8c:d5:9f,openshift-master-1,[2620:52:0:1302::6] ----- - -. Ensure that route announcements are working. - -. Ensure that the DHCP server is listening on the required interfaces serving the IP address ranges. - - -== Cluster nodes not getting the correct hostname over DHCP - -During IPv6 deployment, cluster nodes must get their hostname over DHCP. Sometimes the `NetworkManager` does not assign the hostname immediately. A control plane (master) node might report an error such as: - ----- -Failed Units: 2 - NetworkManager-wait-online.service - nodeip-configuration.service ----- - -This error indicates that the cluster node likely booted without first receiving a hostname from the DHCP server, which causes `kubelet` to boot -with a `localhost.localdomain` hostname. To address the error, force the node to renew the hostname. - -.Procedure - -. Retrieve the `hostname`: -+ -[source,terminal] ----- -[core@master-X ~]$ hostname ----- -+ -If the hostname is `localhost`, proceed with the following steps. -+ -[NOTE] -==== -Where `X` is the control plane node number. -==== - -. Force the cluster node to renew the DHCP lease: -+ -[source,terminal] ----- -[core@master-X ~]$ sudo nmcli con up "<bare_metal_nic>" ----- -+ -Replace `<bare_metal_nic>` with the wired connection corresponding to the `baremetal` network. - -. Check `hostname` again: -+ -[source,terminal] ----- -[core@master-X ~]$ hostname ----- - -. If the hostname is still `localhost.localdomain`, restart `NetworkManager`: -+ -[source,terminal] ----- -[core@master-X ~]$ sudo systemctl restart NetworkManager ----- - -. If the hostname is still `localhost.localdomain`, wait a few minutes and check again. If the hostname remains `localhost.localdomain`, repeat the previous steps. - -. Restart the `nodeip-configuration` service: -+ -[source,terminal] ----- -[core@master-X ~]$ sudo systemctl restart nodeip-configuration.service ----- -+ -This service will reconfigure the `kubelet` service with the correct hostname references. - -. Reload the unit files definition since the kubelet changed in the previous step: -+ -[source,terminal] ----- -[core@master-X ~]$ sudo systemctl daemon-reload ----- - -. Restart the `kubelet` service: -+ -[source,terminal] ----- -[core@master-X ~]$ sudo systemctl restart kubelet.service ----- - -. Ensure `kubelet` booted with the correct hostname: -+ -[source,terminal] ----- -[core@master-X ~]$ sudo journalctl -fu kubelet.service ----- - -If the cluster node is not getting the correct hostname over DHCP after the cluster is up and running, such as during a reboot, the cluster will have a pending `csr`. **Do not** approve a `csr`, or other issues might arise. - -.Addressing a `csr` - -. Get CSRs on the cluster: -+ -[source,terminal] ----- -$ oc get csr ----- - -. Verify if a pending `csr` contains `Subject Name: localhost.localdomain`: -+ -[source,terminal] ----- -$ oc get csr <pending_csr> -o jsonpath='{.spec.request}' | base64 --decode | openssl req -noout -text ----- - -. Remove any `csr` that contains `Subject Name: localhost.localdomain`: -+ -[source,terminal] ----- -$ oc delete csr <wrong_csr> ----- - -== Routes do not reach endpoints - -During the installation process, it is possible to encounter a Virtual Router Redundancy Protocol (VRRP) conflict. This conflict might occur if a previously used {product-title} node that was once part of a cluster deployment using a specific cluster name is still running but not part of the current {product-title} cluster deployment using that same cluster name. For example, a cluster was deployed using the cluster name `openshift`, deploying three control plane (master) nodes and three worker nodes. Later, a separate install uses the same cluster name `openshift`, but this redeployment only installed three control plane (master) nodes, leaving the three worker nodes from a previous deployment in an `ON` state. This might cause a Virtual Router Identifier (VRID) conflict and a VRRP conflict. - -. Get the route: -+ -[source,terminal] ----- -$ oc get route oauth-openshift ----- - -. Check the service endpoint: -+ -[source,terminal] ----- -$ oc get svc oauth-openshift ----- -+ -[source,terminal] ----- -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -oauth-openshift ClusterIP 172.30.19.162 <none> 443/TCP 59m ----- - -. Attempt to reach the service from a control plane (master) node: -+ -[source,terminal] ----- -[core@master0 ~]$ curl -k https://172.30.19.162 ----- -+ -[source,terminal] ----- -{ - "kind": "Status", - "apiVersion": "v1", - "metadata": { - }, - "status": "Failure", - "message": "forbidden: User \"system:anonymous\" cannot get path \"/\"", - "reason": "Forbidden", - "details": { - }, - "code": 403 ----- - -. Identify the `authentication-operator` errors from the `provisioner` node: -+ -[source,terminal] ----- -$ oc logs deployment/authentication-operator -n openshift-authentication-operator ----- -+ -[source,terminal] ----- -Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-authentication-operator", Name:"authentication-operator", UID:"225c5bd5-b368-439b-9155-5fd3c0459d98", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'OperatorStatusChanged' Status for clusteroperator/authentication changed: Degraded message changed from "IngressStateEndpointsDegraded: All 2 endpoints for oauth-server are reporting" ----- - -.Solution - -. Ensure that the cluster name for every deployment is unique, ensuring no conflict. - -. Turn off all the rogue nodes which are not part of the cluster deployment that are using the same cluster name. Otherwise, the authentication pod of the {product-title} cluster might never start successfully. diff --git a/modules/ipi-install-troubleshooting-ntp-out-of-sync.adoc b/modules/ipi-install-troubleshooting-ntp-out-of-sync.adoc deleted file mode 100644 index 1e1360621d41..000000000000 --- a/modules/ipi-install-troubleshooting-ntp-out-of-sync.adoc +++ /dev/null @@ -1,135 +0,0 @@ -// Module included in the following assemblies: -// //installing/installing_bare_metal_ipi/installing_bare_metal_ipi/ipi-install-troubleshooting.adoc - -:_content-type: PROCEDURE -[id="ipi-install-troubleshooting-ntp-out-of-sync_{context}"] - -= NTP out of sync - -The deployment of {product-title} clusters depends on NTP synchronized clocks among the cluster nodes. Without synchronized clocks, the deployment may fail due to clock drift if the time difference is greater than two seconds. - -.Procedure - -. Check for differences in the `AGE` of the cluster nodes. For example: -+ -[source,terminal] ----- -$ oc get nodes ----- -+ -[source,terminal] ----- -NAME STATUS ROLES AGE VERSION -master-0.cloud.example.com Ready master 145m v1.26.0 -master-1.cloud.example.com Ready master 135m v1.26.0 -master-2.cloud.example.com Ready master 145m v1.26.0 -worker-2.cloud.example.com Ready worker 100m v1.26.0 ----- - -. Check for inconsistent timing delays due to clock drift. For example: -+ -[source,terminal] ----- -$ oc get bmh -n openshift-machine-api ----- -+ -[source,terminal] ----- -master-1 error registering master-1 ipmi://<out_of_band_ip> ----- -+ -[source,terminal] ----- -$ sudo timedatectl ----- -+ -[source,terminal] ----- - Local time: Tue 2020-03-10 18:20:02 UTC - Universal time: Tue 2020-03-10 18:20:02 UTC - RTC time: Tue 2020-03-10 18:36:53 - Time zone: UTC (UTC, +0000) -System clock synchronized: no - NTP service: active - RTC in local TZ: no ----- - -.Addressing clock drift in existing clusters - -. Create a Butane config file including the contents of the `chrony.conf` file to be delivered to the nodes. In the following example, create `99-master-chrony.bu` to add the file to the control plane nodes. You can modify the file for worker nodes or repeat this procedure for the worker role. -+ -[NOTE] -==== -See "Creating machine configs with Butane" for information about Butane. -==== -+ -[source,yaml] ----- -variant: openshift -version: 4.13.0 -metadata: - name: 99-master-chrony - labels: - machineconfiguration.openshift.io/role: master -storage: - files: - - path: /etc/chrony.conf - mode: 0644 - overwrite: true - contents: - inline: | - server <NTP_server> iburst <1> - stratumweight 0 - driftfile /var/lib/chrony/drift - rtcsync - makestep 10 3 - bindcmdaddress 127.0.0.1 - bindcmdaddress ::1 - keyfile /etc/chrony.keys - commandkey 1 - generatecommandkey - noclientlog - logchange 0.5 - logdir /var/log/chrony ----- -<1> Replace `<NTP_server>` with the IP address of the NTP server. - -. Use Butane to generate a `MachineConfig` object file, `99-master-chrony.yaml`, containing the configuration to be delivered to the nodes: -+ -[source,terminal] ----- -$ butane 99-master-chrony.bu -o 99-master-chrony.yaml ----- -. Apply the `MachineConfig` object file: -+ -[source,terminal] ----- -$ oc apply -f 99-master-chrony.yaml ----- - -. Ensure the `System clock synchronized` value is **yes**: -+ -[source,terminal] ----- -$ sudo timedatectl ----- -+ -[source,terminal] ----- - Local time: Tue 2020-03-10 19:10:02 UTC - Universal time: Tue 2020-03-10 19:10:02 UTC - RTC time: Tue 2020-03-10 19:36:53 - Time zone: UTC (UTC, +0000) -System clock synchronized: yes - NTP service: active - RTC in local TZ: no ----- -+ -To setup clock synchronization prior to deployment, generate the manifest files and add this file to the `openshift` directory. For example: -+ -[source,terminal] ----- -$ cp chrony-masters.yaml ~/clusterconfigs/openshift/99_masters-chrony-configuration.yaml ----- -+ -Then, continue to create the cluster. diff --git a/modules/ipi-install-troubleshooting-registry-issues.adoc b/modules/ipi-install-troubleshooting-registry-issues.adoc deleted file mode 100644 index b83ae03d4525..000000000000 --- a/modules/ipi-install-troubleshooting-registry-issues.adoc +++ /dev/null @@ -1,45 +0,0 @@ -// Module included in the following assemblies: -// //installing/installing_bare_metal_ipi/installing_bare_metal_ipi/ipi-install-troubleshooting.adoc - -:_content-type: PROCEDURE -[id="ipi-install-troubleshooting-registry-issues_{context}"] - -= Issues with creating the registry - -When creating a disconnected registry, you might encounter a "User Not Authorized" error when attempting to mirror the registry. This error might occur if you fail to append the new authentication to the existing `pull-secret.txt` file. - -.Procedure - -. Check to ensure authentication is successful: -+ -[source,terminal] ----- -$ /usr/local/bin/oc adm release mirror \ - -a pull-secret-update.json - --from=$UPSTREAM_REPO \ - --to-release-image=$LOCAL_REG/$LOCAL_REPO:${VERSION} \ - --to=$LOCAL_REG/$LOCAL_REPO ----- -+ -[NOTE] -==== -Example output of the variables used to mirror the install images: - -[source,terminal] ----- -UPSTREAM_REPO=${RELEASE_IMAGE} -LOCAL_REG=<registry_FQDN>:<registry_port> -LOCAL_REPO='ocp4/openshift4' ----- - -The values of `RELEASE_IMAGE` and `VERSION` were set during the **Retrieving OpenShift Installer** step of the **Setting up the environment for an OpenShift installation** section. -==== - -. After mirroring the registry, confirm that you can access it in your -disconnected environment: -+ -[source,terminal] ----- -$ curl -k -u <user>:<password> https://registry.example.com:<registry_port>/v2/_catalog -{"repositories":["<Repo_Name>"]} ----- diff --git a/modules/ipi-install-troubleshooting-reviewing-the-installation.adoc b/modules/ipi-install-troubleshooting-reviewing-the-installation.adoc deleted file mode 100644 index abb7fbaf0914..000000000000 --- a/modules/ipi-install-troubleshooting-reviewing-the-installation.adoc +++ /dev/null @@ -1,34 +0,0 @@ -// Module included in the following assemblies: -// //installing/installing_bare_metal_ipi/installing_bare_metal_ipi/ipi-install-troubleshooting.adoc - -:_content-type: PROCEDURE -[id="ipi-install-troubleshooting-reviewing-the-installation_{context}"] - -= Reviewing the installation - -After installation, ensure the installer deployed the nodes and pods successfully. - -.Procedure - -. When the {product-title} cluster nodes are installed appropriately, the following `Ready` state is seen within the `STATUS` column: -+ -[source,terminal] ----- -$ oc get nodes ----- -+ -[source,terminal] ----- -NAME STATUS ROLES AGE VERSION -master-0.example.com Ready master,worker 4h v1.26.0 -master-1.example.com Ready master,worker 4h v1.26.0 -master-2.example.com Ready master,worker 4h v1.26.0 ----- - -. Confirm the installer deployed all pods successfully. The following command -removes any pods that are still running or have completed as part of the output. -+ -[source,terminal] ----- -$ oc get pods --all-namespaces | grep -iv running | grep -iv complete ----- \ No newline at end of file diff --git a/modules/ipi-install-troubleshooting_proc_worker-nodes-cannot-join-the-cluster.adoc b/modules/ipi-install-troubleshooting_proc_worker-nodes-cannot-join-the-cluster.adoc deleted file mode 100644 index c083f4458a75..000000000000 --- a/modules/ipi-install-troubleshooting_proc_worker-nodes-cannot-join-the-cluster.adoc +++ /dev/null @@ -1,47 +0,0 @@ -// This module is included in the following assemblies: -// -// installing/installing_bare_metal_ipi/ipi-install-troubleshooting.adoc - -:_content-type: PROCEDURE -[id="worker-nodes-cannot-join-the-cluster_{context}"] -= Troubleshooting worker nodes that cannot join the cluster - -Installer-provisioned clusters deploy with a DNS server that includes a DNS entry for the `api-int.<cluster_name>.<base_domain>` URL. If the nodes within the cluster use an external or upstream DNS server to resolve the `api-int.<cluster_name>.<base_domain>` URL and there is no such entry, worker nodes might fail to join the cluster. Ensure that all nodes in the cluster can resolve the domain name. - -.Procedure - -. Add a DNS A/AAAA or CNAME record to internally identify the API load balancer. For example, when using dnsmasq, modify the `dnsmasq.conf` configuration file: -+ -[source,terminal,options="nowrap",role="white-space-pre"] ----- -$ sudo nano /etc/dnsmasq.conf ----- -+ -[source,terminal,options="nowrap",role="white-space-pre"] ----- -address=/api-int.<cluster_name>.<base_domain>/<IP_address> -address=/api-int.mycluster.example.com/192.168.1.10 -address=/api-int.mycluster.example.com/2001:0db8:85a3:0000:0000:8a2e:0370:7334 ----- - -. Add a DNS PTR record to internally identify the API load balancer. For example, when using dnsmasq, modify the `dnsmasq.conf` configuration file: -+ -[source,terminal,options="nowrap",role="white-space-pre"] ----- -$ sudo nano /etc/dnsmasq.conf ----- -+ -[source,terminal,options="nowrap",role="white-space-pre"] ----- -ptr-record=<IP_address>.in-addr.arpa,api-int.<cluster_name>.<base_domain> -ptr-record=10.1.168.192.in-addr.arpa,api-int.mycluster.example.com ----- - -. Restart the DNS server. For example, when using dnsmasq, execute the following command: -+ -[source,terminal,subs="+quotes",options="nowrap",role="white-space-pre"] ----- -$ sudo systemctl restart dnsmasq ----- - -These records must be resolvable from all the nodes within the cluster. \ No newline at end of file diff --git a/modules/ipi-install-troubleshooting_unable-to-discover-new-bare-metal-hosts-using-the-bmc.adoc b/modules/ipi-install-troubleshooting_unable-to-discover-new-bare-metal-hosts-using-the-bmc.adoc deleted file mode 100644 index bef53349920d..000000000000 --- a/modules/ipi-install-troubleshooting_unable-to-discover-new-bare-metal-hosts-using-the-bmc.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// This module is included in the following assemblies: -// -// installing/installing_bare_metal_ipi/ipi-install-troubleshooting.adoc - -:_content-type: PROC -[id="unable-to-discover-new-bare-metal-hosts-using-the-bmc_{context}"] -= Unable to discover new bare metal hosts using the BMC - -In some cases, the installation program will not be able to discover the new bare metal hosts and issue an error, because it cannot mount the remote virtual media share. - -For example: - -[source,terminal] ----- -ProvisioningError 51s metal3-baremetal-controller Image provisioning failed: Deploy step deploy.deploy failed with BadRequestError: HTTP POST -https://<bmc_address>/redfish/v1/Managers/iDRAC.Embedded.1/VirtualMedia/CD/Actions/VirtualMedia.InsertMedia -returned code 400. -Base.1.8.GeneralError: A general error has occurred. See ExtendedInfo for more information -Extended information: [ - { - "Message": "Unable to mount remote share https://<ironic_address>/redfish/boot-<uuid>.iso.", - "MessageArgs": [ - "https://<ironic_address>/redfish/boot-<uuid>.iso" - ], - "MessageArgs@odata.count": 1, - "MessageId": "IDRAC.2.5.RAC0720", - "RelatedProperties": [ - "#/Image" - ], - "RelatedProperties@odata.count": 1, - "Resolution": "Retry the operation.", - "Severity": "Informational" - } -]. ----- - -In this situation, if you are using virtual media with an unknown certificate authority, you can configure your baseboard management controller (BMC) remote file share settings to trust an unknown certificate authority to avoid this error. - -[NOTE] -==== -This resolution was tested on {product-title} 4.11 with Dell iDRAC 9 and firmware version 5.10.50. -==== diff --git a/modules/ipi-install-validation-checklist-for-installation.adoc b/modules/ipi-install-validation-checklist-for-installation.adoc deleted file mode 100644 index c7d24b1bd802..000000000000 --- a/modules/ipi-install-validation-checklist-for-installation.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_bare_metal_ipi/ipi-install-installation-workflow.adoc - - -[id="validation-checklist-for-installation_{context}"] -= Validation checklist for installation - -* [ ] {product-title} installer has been retrieved. -* [ ] {product-title} installer has been extracted. -* [ ] Required parameters for the `install-config.yaml` have been configured. -* [ ] The `hosts` parameter for the `install-config.yaml` has been configured. -* [ ] The `bmc` parameter for the `install-config.yaml` has been configured. -* [ ] Conventions for the values configured in the `bmc` `address` field have been applied. -* [ ] Created the {product-title} manifests. -* [ ] (Optional) Deployed routers on worker nodes. -* [ ] (Optional) Created a disconnected registry. -* [ ] (Optional) Validate disconnected registry settings if in use. diff --git a/modules/ipi-install-validation-checklist-for-nodes.adoc b/modules/ipi-install-validation-checklist-for-nodes.adoc deleted file mode 100644 index 56afd62b70f6..000000000000 --- a/modules/ipi-install-validation-checklist-for-nodes.adoc +++ /dev/null @@ -1,28 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_bare_metal_ipi/ipi-install-prerequisites.adoc - - -[id="validation-checklist-for-nodes_{context}"] -= Validation checklist for nodes - -.When using the `provisioning` network - -* [ ] NIC1 VLAN is configured for the `provisioning` network. -* [ ] NIC1 for the `provisioning` network is PXE-enabled on the provisioner, control plane, and worker nodes. -* [ ] NIC2 VLAN is configured for the `baremetal` network. -* [ ] PXE has been disabled on all other NICs. -* [ ] DNS is configured with API and Ingress endpoints. -* [ ] Control plane and worker nodes are configured. -* [ ] All nodes accessible via out-of-band management. -* [ ] (Optional) A separate management network has been created. -* [ ] Required data for installation. - -.When omitting the `provisioning` network - -* [ ] NIC1 VLAN is configured for the `baremetal` network. -* [ ] DNS is configured with API and Ingress endpoints. -* [ ] Control plane and worker nodes are configured. -* [ ] All nodes accessible via out-of-band management. -* [ ] (Optional) A separate management network has been created. -* [ ] Required data for installation. diff --git a/modules/ipi-install-verifying-static-ip-address-configuration.adoc b/modules/ipi-install-verifying-static-ip-address-configuration.adoc deleted file mode 100644 index 958bf09dc050..000000000000 --- a/modules/ipi-install-verifying-static-ip-address-configuration.adoc +++ /dev/null @@ -1,22 +0,0 @@ -// This is included in the following assemblies: -// -// installing/installing_bare_metal_ipi/ipi-install-installation-workflow.adoc - -:_content-type: PROCEDURE -[id="verifying-static-ip-address-configuration_{context}"] -= Verifying static IP address configuration - -If the DHCP reservation for a cluster node specifies an infinite lease, after the installer successfully provisions the node, the dispatcher script checks the node's network configuration. If the script determines that the network configuration contains an infinite DHCP lease, it creates a new connection using the IP address of the DHCP lease as a static IP address. - -[NOTE] -==== -The dispatcher script might run on successfully provisioned nodes while the provisioning of other nodes in the cluster is ongoing. -==== - -Verify the network configuration is working properly. - -.Procedure - -. Check the network interface configuration on the node. - -. Turn off the DHCP server and reboot the {product-title} node and ensure that the network configuration works properly. diff --git a/modules/ipi-modify-a-disconnected-registry-config-yaml.adoc b/modules/ipi-modify-a-disconnected-registry-config-yaml.adoc deleted file mode 100644 index e0e5052ada63..000000000000 --- a/modules/ipi-modify-a-disconnected-registry-config-yaml.adoc +++ /dev/null @@ -1,50 +0,0 @@ -// Module included in the following assemblies: - -// * installing-mirroring-creating-registry.adoc - -:_content-type: PROCEDURE -[id="ipi-modify-a-disconnected-registry-config-yaml_{context}"] -= Modify the install-config.yaml file to use the disconnected registry - -The `install-config.yaml` file must contain the disconnected registry node's certificate and registry information. - -.Procedure - -. Edit the `install-config.yaml` file to give the mirror information for the registry. -.. Update the `pullSecret` value to contain the authentication information from your registry: -+ -[source,terminal] ----- -$ pullSecret: '{"auths":{"<mirror_host_name>:5000": {"auth": "<credentials>","email": "you@example.com"}}}'---- ----- -+ -For `<mirror_host_name>`, specify the registry domain name that you specified in the certificate for your mirror registry, and for `<credentials>`, specify the base64-encoded user name and password for your mirror registry. - -.. Add the `additionalTrustBundle` parameter and value: -+ -[source,yaml] ----- -additionalTrustBundle: | - -----BEGIN CERTIFICATE----- - ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ - -----END CERTIFICATE----- ----- -+ -The value must be the contents of the certificate file that you used for your mirror registry. The certificate file can be an existing, trusted certificate authority, or the self-signed certificate that you generated for the mirror registry. - -.. Add the image content resources, which resemble the following YAML excerpt: -+ -[source,yaml] ----- -imageContentSources: -- mirrors: - - <mirror_host_name>:5000/<repo_name>/release - source: quay.example.com/openshift-release-dev/ocp-release -- mirrors: - - <mirror_host_name>:5000/<repo_name>/release - source: registry.example.com/ocp/release ----- -+ -For these values, use the `imageContentSources` that you recorded during mirror registry creation. - -. Make any other modifications to the `install-config.yaml` file that you require. You can find more information about the available parameters in the *Installation configuration parameters* section. diff --git a/modules/ipi-modify-install-config-for-a-disconnected-registry.adoc b/modules/ipi-modify-install-config-for-a-disconnected-registry.adoc deleted file mode 100644 index 3738a2b24222..000000000000 --- a/modules/ipi-modify-install-config-for-a-disconnected-registry.adoc +++ /dev/null @@ -1,68 +0,0 @@ -// Module included in the following assemblies: -// -// * list of assemblies where this module is included -// install/installing_bare_metal_ipi/ipi-install-installation-workflow.adoc - - -:_content-type: PROCEDURE -[id="ipi-modify-install-config-for-a-disconnected-registry_{context}"] -= Modify the install-config.yaml file to use the disconnected registry - -On the provisioner node, the `install-config.yaml` file should use the newly created pull-secret from the `pull-secret-update.txt` file. The `install-config.yaml` file must also contain the disconnected registry node's certificate and registry information. - -.Procedure - -. Add the disconnected registry node's certificate to the `install-config.yaml` file: -+ -[source,terminal] ----- -$ echo "additionalTrustBundle: |" >> install-config.yaml ----- -+ -The certificate should follow the `"additionalTrustBundle: |"` line and be properly indented, usually by two spaces. -+ -[source,terminal] ----- -$ sed -e 's/^/ /' /opt/registry/certs/domain.crt >> install-config.yaml ----- - -. Add the mirror information for the registry to the `install-config.yaml` file: -+ -[source,terminal] ----- -$ echo "imageContentSources:" >> install-config.yaml ----- -+ -[source,terminal] ----- -$ echo "- mirrors:" >> install-config.yaml ----- -+ -[source,terminal] ----- -$ echo " - registry.example.com:5000/ocp4/openshift4" >> install-config.yaml ----- -+ -Replace `registry.example.com` with the registry's fully qualified domain name. -+ -[source,terminal] ----- -$ echo " source: quay.io/openshift-release-dev/ocp-release" >> install-config.yaml ----- -+ -[source,terminal] ----- -$ echo "- mirrors:" >> install-config.yaml ----- -+ -[source,terminal] ----- -$ echo " - registry.example.com:5000/ocp4/openshift4" >> install-config.yaml ----- -+ -Replace `registry.example.com` with the registry's fully qualified domain name. -+ -[source,terminal] ----- -$ echo " source: quay.io/openshift-release-dev/ocp-v4.0-art-dev" >> install-config.yaml ----- diff --git a/modules/ipi-preparing-reinstall-cluster-bare-metal.adoc b/modules/ipi-preparing-reinstall-cluster-bare-metal.adoc deleted file mode 100644 index 62159eabffda..000000000000 --- a/modules/ipi-preparing-reinstall-cluster-bare-metal.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// //installing/installing_bare_metal_ipi/installing_bare_metal_ipi/ipi-install-installation-workflow.adoc - -[id="ipi-preparing-reinstall-cluster-bare-metal_{context}"] - -= Preparing to reinstall a cluster on bare metal -Before you reinstall a cluster on bare metal, you must perform cleanup operations. - -.Procedure -. Remove or reformat the disks for the bootstrap, control plane node, and worker nodes. If you are working in a hypervisor environment, you must add any disks you removed. -. Delete the artifacts that the previous installation generated: -+ -[source,terminal] ----- -$ cd ; /bin/rm -rf auth/ bootstrap.ign master.ign worker.ign metadata.json \ -.openshift_install.log .openshift_install_state.json ----- -. Generate new manifests and Ignition config files. See “Creating the Kubernetes manifest and Ignition config files" for more information. -. Upload the new bootstrap, control plane, and compute node Ignition config files that the installation program created to your HTTP server. This will overwrite the previous Ignition files. diff --git a/modules/ipi-verifying-nodes-after-installation.adoc b/modules/ipi-verifying-nodes-after-installation.adoc deleted file mode 100644 index 2d3ca42e95e3..000000000000 --- a/modules/ipi-verifying-nodes-after-installation.adoc +++ /dev/null @@ -1,70 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/index.adoc - -:_module-type: PROCEDURE -[id="ipi-verifying-nodes-after-installation_{context}"] -= Verifying node state after installation - -[role="_abstract"] -The {product-title} installation completes when the following installation health checks are successful: - -* The provisioner can access the {product-title} web console. - -* All control plane nodes are ready. - -* All cluster Operators are available. - -[NOTE] -==== -After the installation completes, the specific cluster Operators responsible for the worker nodes continuously attempt to provision all worker nodes. It can take some time before all worker nodes report as `READY`. For installations on bare metal, wait a minimum of 60 minutes before troubleshooting a worker node. For installations on all other platforms, wait a minimum of 40 minutes before troubleshooting a worker node. A `DEGRADED` state for the cluster Operators responsible for the worker nodes depends on the Operators' own resources and not on the state of the nodes. -==== - -After your installation completes, you can continue to monitor the condition of the nodes in your cluster by using the following steps. - -.Prerequisites -* The installation program resolves successfully in the terminal. - -.Procedure -. Show the status of all worker nodes: - -+ -[source,terminal] ----- -$ oc get nodes ----- - -+ -.Example output -[source,terminal] ----- -NAME STATUS ROLES AGE VERSION -example-compute1.example.com Ready worker 13m v1.21.6+bb8d50a -example-compute2.example.com Ready worker 13m v1.21.6+bb8d50a -example-compute4.example.com Ready worker 14m v1.21.6+bb8d50a -example-control1.example.com Ready master 52m v1.21.6+bb8d50a -example-control2.example.com Ready master 55m v1.21.6+bb8d50a -example-control3.example.com Ready master 55m v1.21.6+bb8d50a ----- - -. Show the phase of all worker machine nodes: - -+ -[source,terminal] ----- -$ oc get machines -A ----- - -+ -.Example output -[source,terminal] ----- -NAMESPACE NAME PHASE TYPE REGION ZONE AGE -openshift-machine-api example-zbbt6-master-0 Running 95m -openshift-machine-api example-zbbt6-master-1 Running 95m -openshift-machine-api example-zbbt6-master-2 Running 95m -openshift-machine-api example-zbbt6-worker-0-25bhp Running 49m -openshift-machine-api example-zbbt6-worker-0-8b4c2 Running 49m -openshift-machine-api example-zbbt6-worker-0-jkbqt Running 49m -openshift-machine-api example-zbbt6-worker-0-qrl5b Running 49m ----- diff --git a/modules/ipsec-impact-networking.adoc b/modules/ipsec-impact-networking.adoc deleted file mode 100644 index e735553f96af..000000000000 --- a/modules/ipsec-impact-networking.adoc +++ /dev/null @@ -1,10 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/optimization/optimizing-networking.adoc - -[id="ipsec-impact_{context}"] -= Impact of IPsec - -Because encrypting and decrypting node hosts uses CPU power, performance is affected both in throughput and CPU usage on the nodes when encryption is enabled, regardless of the IP security system being used. - -IPSec encrypts traffic at the IP payload level, before it hits the NIC, protecting fields that would otherwise be used for NIC offloading. This means that some NIC acceleration features might not be usable when IPSec is enabled and will lead to decreased throughput and increased CPU usage. diff --git a/modules/jaeger-architecture.adoc b/modules/jaeger-architecture.adoc deleted file mode 100644 index ece0d2274de6..000000000000 --- a/modules/jaeger-architecture.adoc +++ /dev/null @@ -1,25 +0,0 @@ -//// -This CONCEPT module included in the following assemblies: --service_mesh/v1x/ossm-architecture.adoc --service_mesh/v2x/ossm-architecture.adoc --rhbjaeger-architecture.adoc -//// -:_content-type: CONCEPT -[id="jaeger-architecture_{context}"] -= Distributed tracing architecture - -The {JaegerShortName} is based on the open source link:https://www.jaegertracing.io/[Jaeger project]. The {JaegerShortName} is made up of several components that work together to collect, store, and display tracing data. - -* *Jaeger Client* (Tracer, Reporter, instrumented application, client libraries)- Jaeger clients are language specific implementations of the OpenTracing API. They can be used to instrument applications for distributed tracing either manually or with a variety of existing open source frameworks, such as Camel (Fuse), Spring Boot (RHOAR), MicroProfile (RHOAR/Thorntail), Wildfly (EAP), and many more, that are already integrated with OpenTracing. - -* *Jaeger Agent* (Server Queue, Processor Workers) - The Jaeger agent is a network daemon that listens for spans sent over User Datagram Protocol (UDP), which it batches and sends to the collector. The agent is meant to be placed on the same host as the instrumented application. This is typically accomplished by having a sidecar in container environments like Kubernetes. - -* *Jaeger Collector* (Queue, Workers) - Similar to the Agent, the Collector is able to receive spans and place them in an internal queue for processing. This allows the collector to return immediately to the client/agent instead of waiting for the span to make its way to the storage. - -* *Storage* (Data Store) - Collectors require a persistent storage backend. Jaeger has a pluggable mechanism for span storage. Note that for this release, the only supported storage is Elasticsearch. - -* *Query* (Query Service) - Query is a service that retrieves traces from storage. - -* *Ingester* (Ingester Service) - Jaeger can use Apache Kafka as a buffer between the collector and the actual backing storage (Elasticsearch). Ingester is a service that reads data from Kafka and writes to another storage backend (Elasticsearch). - -* *Jaeger Console* – Jaeger provides a user interface that lets you visualize your distributed tracing data. On the Search page, you can find traces and explore details of the spans that make up an individual trace. diff --git a/modules/jt-comparison-of-jenkins-and-openshift-pipelines-concepts.adoc b/modules/jt-comparison-of-jenkins-and-openshift-pipelines-concepts.adoc deleted file mode 100644 index 2a4606905b78..000000000000 --- a/modules/jt-comparison-of-jenkins-and-openshift-pipelines-concepts.adoc +++ /dev/null @@ -1,55 +0,0 @@ -// Module included in the following assembly: -// -// jenkins/migrating-from-jenkins-to-openshift-pipelines.adoc - -:_content-type: CONCEPT -[id="jt-comparison-of-jenkins-and-openshift-pipelines-concepts_{context}"] -= Comparison of Jenkins and {pipelines-shortname} concepts - -You can review and compare the following equivalent terms used in Jenkins and {pipelines-shortname}. - -== Jenkins terminology -Jenkins offers declarative and scripted pipelines that are extensible using shared libraries and plugins. Some basic terms in Jenkins are as follows: - -* *Pipeline*: Automates the entire process of building, testing, and deploying applications by using link:https://groovy-lang.org/[Groovy] syntax. -* *Node*: A machine capable of either orchestrating or executing a scripted pipeline. -* *Stage*: A conceptually distinct subset of tasks performed in a pipeline. Plugins or user interfaces often use this block to display the status or progress of tasks. -* **Step**: A single task that specifies the exact action to be taken, either by using a command or a script. - -== {pipelines-shortname} terminology -{pipelines-shortname} uses link:https://yaml.org/[YAML] syntax for declarative pipelines and consists of tasks. Some basic terms in {pipelines-shortname} are as follows: - -* **Pipeline**: A set of tasks in a series, in parallel, or both. -* **Task**: A sequence of steps as commands, binaries, or scripts. -* **PipelineRun**: Execution of a pipeline with one or more tasks. -* **TaskRun**: Execution of a task with one or more steps. -+ -[NOTE] -==== -You can initiate a PipelineRun or a TaskRun with a set of inputs such as parameters and workspaces, and the execution results in a set of outputs and artifacts. -==== -* **Workspace**: In {pipelines-shortname}, workspaces are conceptual blocks that serve the following purposes: - -** Storage of inputs, outputs, and build artifacts. - -** Common space to share data among tasks. - -** Mount points for credentials held in secrets, configurations held in config maps, and common tools shared by an organization. - -+ -[NOTE] -==== -In Jenkins, there is no direct equivalent of {pipelines-shortname} workspaces. You can think of the control node as a workspace, as it stores the cloned code repository, build history, and artifacts. When a job is assigned to a different node, the cloned code and the generated artifacts are stored in that node, but the control node maintains the build history. -==== - -== Mapping of concepts -The building blocks of Jenkins and {pipelines-shortname} are not equivalent, and a specific comparison does not provide a technically accurate mapping. The following terms and concepts in Jenkins and {pipelines-shortname} correlate in general: - -.Jenkins and {pipelines-shortname} - basic comparison -[cols="1,1",options="header"] -|=== -|Jenkins|{pipelines-shortname} -|Pipeline|Pipeline and PipelineRun -|Stage|Task -|Step|A step in a task -|=== diff --git a/modules/jt-comparison-of-jenkins-openshift-pipelines-execution-models.adoc b/modules/jt-comparison-of-jenkins-openshift-pipelines-execution-models.adoc deleted file mode 100644 index d1641e88d9b2..000000000000 --- a/modules/jt-comparison-of-jenkins-openshift-pipelines-execution-models.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module included in the following assembly: -// -// jenkins/migrating-from-jenkins-to-openshift-pipelines.adoc - -:_content-type: CONCEPT -[id="jt-comparison-of-jenkins-openshift-pipelines-execution-models_{context}"] -= Comparison of Jenkins and {pipelines-shortname} execution models - -Jenkins and {pipelines-shortname} offer similar functions but are different in architecture and execution. - -.Comparison of execution models in Jenkins and {pipelines-shortname} -[cols="1,1",options="header"] -|=== -|Jenkins|{pipelines-shortname} -|Jenkins has a controller node. Jenkins runs pipelines and steps centrally, or orchestrates jobs running in other nodes.|{pipelines-shortname} is serverless and distributed, and there is no central dependency for execution. -|Containers are launched by the Jenkins controller node through the pipeline.|{pipelines-shortname} adopts a 'container-first' approach, where every step runs as a container in a pod (equivalent to nodes in Jenkins). -|Extensibility is achieved by using plugins.|Extensibility is achieved by using tasks in Tekton Hub or by creating custom tasks and scripts. -|=== diff --git a/modules/jt-examples-of-common-use-cases.adoc b/modules/jt-examples-of-common-use-cases.adoc deleted file mode 100644 index 5bfd8bb7d177..000000000000 --- a/modules/jt-examples-of-common-use-cases.adoc +++ /dev/null @@ -1,164 +0,0 @@ -:_content-type: CONCEPT -// Module included in the following assembly: -// -// jenkins/migrating-from-jenkins-to-openshift-pipelines.adoc - -[id="jt-examples-of-common-use-cases_{context}"] -= Examples of common use cases - -Both Jenkins and {pipelines-shortname} offer capabilities for common CI/CD use cases, such as: - -* Compiling, building, and deploying images using Apache Maven -* Extending the core capabilities by using plugins -* Reusing shareable libraries and custom scripts - -== Running a Maven pipeline in Jenkins and {pipelines-shortname} - -You can use Maven in both Jenkins and {pipelines-shortname} workflows for compiling, building, and deploying images. To map your existing Jenkins workflow to {pipelines-shortname}, consider the following examples: - -.Example: Compile and build an image and deploy it to OpenShift using Maven in Jenkins -[source,groovy] ----- -#!/usr/bin/groovy -node('maven') { - stage 'Checkout' - checkout scm - - stage 'Build' - sh 'cd helloworld && mvn clean' - sh 'cd helloworld && mvn compile' - - stage 'Run Unit Tests' - sh 'cd helloworld && mvn test' - - stage 'Package' - sh 'cd helloworld && mvn package' - - stage 'Archive artifact' - sh 'mkdir -p artifacts/deployments && cp helloworld/target/*.war artifacts/deployments' - archive 'helloworld/target/*.war' - - stage 'Create Image' - sh 'oc login https://kubernetes.default -u admin -p admin --insecure-skip-tls-verify=true' - sh 'oc new-project helloworldproject' - sh 'oc project helloworldproject' - sh 'oc process -f helloworld/jboss-eap70-binary-build.json | oc create -f -' - sh 'oc start-build eap-helloworld-app --from-dir=artifacts/' - - stage 'Deploy' - sh 'oc new-app helloworld/jboss-eap70-deploy.json' } - ----- - -.Example: Compile and build an image and deploy it to OpenShift using Maven in {pipelines-shortname}. -[source,yaml] ----- -apiVersion: tekton.dev/v1beta1 -kind: Pipeline -metadata: - name: maven-pipeline -spec: - workspaces: - - name: shared-workspace - - name: maven-settings - - name: kubeconfig-dir - optional: true - params: - - name: repo-url - - name: revision - - name: context-path - tasks: - - name: fetch-repo - taskRef: - name: git-clone - workspaces: - - name: output - workspace: shared-workspace - params: - - name: url - value: "$(params.repo-url)" - - name: subdirectory - value: "" - - name: deleteExisting - value: "true" - - name: revision - value: $(params.revision) - - name: mvn-build - taskRef: - name: maven - runAfter: - - fetch-repo - workspaces: - - name: source - workspace: shared-workspace - - name: maven-settings - workspace: maven-settings - params: - - name: CONTEXT_DIR - value: "$(params.context-path)" - - name: GOALS - value: ["-DskipTests", "clean", "compile"] - - name: mvn-tests - taskRef: - name: maven - runAfter: - - mvn-build - workspaces: - - name: source - workspace: shared-workspace - - name: maven-settings - workspace: maven-settings - params: - - name: CONTEXT_DIR - value: "$(params.context-path)" - - name: GOALS - value: ["test"] - - name: mvn-package - taskRef: - name: maven - runAfter: - - mvn-tests - workspaces: - - name: source - workspace: shared-workspace - - name: maven-settings - workspace: maven-settings - params: - - name: CONTEXT_DIR - value: "$(params.context-path)" - - name: GOALS - value: ["package"] - - name: create-image-and-deploy - taskRef: - name: openshift-client - runAfter: - - mvn-package - workspaces: - - name: manifest-dir - workspace: shared-workspace - - name: kubeconfig-dir - workspace: kubeconfig-dir - params: - - name: SCRIPT - value: | - cd "$(params.context-path)" - mkdir -p ./artifacts/deployments && cp ./target/*.war ./artifacts/deployments - oc new-project helloworldproject - oc project helloworldproject - oc process -f jboss-eap70-binary-build.json | oc create -f - - oc start-build eap-helloworld-app --from-dir=artifacts/ - oc new-app jboss-eap70-deploy.json - ----- - -== Extending the core capabilities of Jenkins and {pipelines-shortname} by using plugins -Jenkins has the advantage of a large ecosystem of numerous plugins developed over the years by its extensive user base. You can search and browse the plugins in the link:https://plugins.jenkins.io/[Jenkins Plugin Index]. - -{pipelines-shortname} also has many tasks developed and contributed by the community and enterprise users. A publicly available catalog of reusable {pipelines-shortname} tasks are available in the link:https://hub.tekton.dev/[Tekton Hub]. - -In addition, {pipelines-shortname} incorporates many of the plugins of the Jenkins ecosystem within its core capabilities. For example, authorization is a critical function in both Jenkins and {pipelines-shortname}. While Jenkins ensures authorization using the link:https://plugins.jenkins.io/role-strategy/[Role-based Authorization Strategy] plugin, {pipelines-shortname} uses OpenShift's built-in Role-based Access Control system. - -== Sharing reusable code in Jenkins and {pipelines-shortname} -Jenkins link:https://www.jenkins.io/doc/book/pipeline/shared-libraries/[shared libraries] provide reusable code for parts of Jenkins pipelines. The libraries are shared between link:https://www.jenkins.io/doc/book/pipeline/jenkinsfile/[Jenkinsfiles] to create highly modular pipelines without code repetition. - -Although there is no direct equivalent of Jenkins shared libraries in {pipelines-shortname}, you can achieve similar workflows by using tasks from the link:https://hub.tekton.dev/[Tekton Hub] in combination with custom tasks and scripts. diff --git a/modules/jt-extending-openshift-pipelines-capabilities-using-custom-tasks-and-scripts.adoc b/modules/jt-extending-openshift-pipelines-capabilities-using-custom-tasks-and-scripts.adoc deleted file mode 100644 index 2b9d69b55103..000000000000 --- a/modules/jt-extending-openshift-pipelines-capabilities-using-custom-tasks-and-scripts.adoc +++ /dev/null @@ -1,49 +0,0 @@ -// Module included in the following assembly: -// -// jenkins/migrating-from-jenkins-to-openshift-pipelines.adoc - -:_content-type: PROCEDURE -[id="jt-extending-openshift-pipelines-capabilities-using-custom-tasks-and-scripts_{context}"] -= Extending {pipelines-shortname} capabilities using custom tasks and scripts - -In {pipelines-shortname}, if you do not find the right task in Tekton Hub, or need greater control over tasks, you can create custom tasks and scripts to extend the capabilities of {pipelines-shortname}. - -.Example: A custom task for running the `maven test` command -[source,yaml,subs="attributes+"] ----- -apiVersion: tekton.dev/v1beta1 -kind: Task -metadata: - name: maven-test -spec: - workspaces: - - name: source - steps: - - image: my-maven-image - command: ["mvn test"] - workingDir: $(workspaces.source.path) ----- - -.Example: Run a custom shell script by providing its path -[source,yaml,subs="attributes+"] ----- -... -steps: - image: ubuntu - script: | - #!/usr/bin/env bash - /workspace/my-script.sh -... ----- - -.Example: Run a custom Python script by writing it in the YAML file -[source,yaml,subs="attributes+"] ----- -... -steps: - image: python - script: | - #!/usr/bin/env python3 - print(“hello from python!”) -... ----- diff --git a/modules/jt-migrating-a-sample-pipeline-from-jenkins-to-openshift-pipelines.adoc b/modules/jt-migrating-a-sample-pipeline-from-jenkins-to-openshift-pipelines.adoc deleted file mode 100644 index a998bda2f4dc..000000000000 --- a/modules/jt-migrating-a-sample-pipeline-from-jenkins-to-openshift-pipelines.adoc +++ /dev/null @@ -1,126 +0,0 @@ -// Module included in the following assembly: -// -// jenkins/migrating-from-jenkins-to-openshift-pipelines.adoc - -:_content-type: PROCEDURE -[id="jt-migrating-a-sample-pipeline-from-jenkins-to-openshift-pipelines_{context}"] -= Migrating a sample pipeline from Jenkins to {pipelines-shortname} - -You can use the following equivalent examples to help migrate your build, test, and deploy pipelines from Jenkins to {pipelines-shortname}. - -== Jenkins pipeline -Consider a Jenkins pipeline written in Groovy for building, testing, and deploying: -[source,groovy,subs="attributes+"] ----- -pipeline { - agent any - stages { - stage('Build') { - steps { - sh 'make' - } - } - stage('Test'){ - steps { - sh 'make check' - junit 'reports/**/*.xml' - } - } - stage('Deploy') { - steps { - sh 'make publish' - } - } - } -} ----- - -== {pipelines-shortname} pipeline - -To create a pipeline in {pipelines-shortname} that is equivalent to the preceding Jenkins pipeline, you create the following three tasks: - -.Example `build` task YAML definition file -[source,yaml,subs="attributes+"] ----- -apiVersion: tekton.dev/v1beta1 -kind: Task -metadata: - name: myproject-build -spec: - workspaces: - - name: source - steps: - - image: my-ci-image - command: ["make"] - workingDir: $(workspaces.source.path) ----- - -.Example `test` task YAML definition file -[source,yaml,subs="attributes+"] ----- -apiVersion: tekton.dev/v1beta1 -kind: Task -metadata: - name: myproject-test -spec: - workspaces: - - name: source - steps: - - image: my-ci-image - command: ["make check"] - workingDir: $(workspaces.source.path) - - image: junit-report-image - script: | - #!/usr/bin/env bash - junit-report reports/**/*.xml - workingDir: $(workspaces.source.path) ----- - -.Example `deploy` task YAML definition file -[source,yaml,subs="attributes+"] ----- -apiVersion: tekton.dev/v1beta1 -kind: Task -metadata: - name: myprojectd-deploy -spec: - workspaces: - - name: source - steps: - - image: my-deploy-image - command: ["make deploy"] - workingDir: $(workspaces.source.path) ----- - -You can combine the three tasks sequentially to form a pipeline in {pipelines-shortname}: - -.Example: {pipelines-shortname} pipeline for building, testing, and deployment -[source,yaml,subs="attributes+"] ----- -apiVersion: tekton.dev/v1beta1 -kind: Pipeline -metadata: - name: myproject-pipeline -spec: - workspaces: - - name: shared-dir - tasks: - - name: build - taskRef: - name: myproject-build - workspaces: - - name: source - workspace: shared-dir - - name: test - taskRef: - name: myproject-test - workspaces: - - name: source - workspace: shared-dir - - name: deploy - taskRef: - name: myproject-deploy - workspaces: - - name: source - workspace: shared-dir ----- diff --git a/modules/jt-migrating-from-jenkins-plugins-to-openshift-pipelines-hub-tasks.adoc b/modules/jt-migrating-from-jenkins-plugins-to-openshift-pipelines-hub-tasks.adoc deleted file mode 100644 index ac78bcabe8cc..000000000000 --- a/modules/jt-migrating-from-jenkins-plugins-to-openshift-pipelines-hub-tasks.adoc +++ /dev/null @@ -1,39 +0,0 @@ -// Module included in the following assembly: -// -// jenkins/migrating-from-jenkins-to-openshift-pipelines.adoc - -:_content-type: PROCEDURE - -[id="jt-migrating-from-jenkins-plugins-to-openshift-pipelines-hub-tasks_{context}"] -= Migrating from Jenkins plugins to Tekton Hub tasks - -You can extend the capability of Jenkins by using link:https://plugins.jenkinsci.org[plugins]. To achieve similar extensibility in {pipelines-shortname}, use any of the tasks available from link:https://hub.tekton.dev[Tekton Hub]. - -For example, consider the link:https://hub.tekton.dev/tekton/task/git-clone[git-clone] task in Tekton Hub, which corresponds to the link:https://plugins.jenkins.io/git/[git plugin] for Jenkins. - -.Example: `git-clone` task from Tekton Hub -[source,yaml,subs="attributes+"] ----- -apiVersion: tekton.dev/v1beta1 -kind: Pipeline -metadata: - name: demo-pipeline -spec: - params: - - name: repo_url - - name: revision - workspaces: - - name: source - tasks: - - name: fetch-from-git - taskRef: - name: git-clone - params: - - name: url - value: $(params.repo_url) - - name: revision - value: $(params.revision) - workspaces: - - name: output - workspace: source ----- diff --git a/modules/k8s-nmstate-deploying-nmstate-CLI.adoc b/modules/k8s-nmstate-deploying-nmstate-CLI.adoc deleted file mode 100644 index 0d7607f198a3..000000000000 --- a/modules/k8s-nmstate-deploying-nmstate-CLI.adoc +++ /dev/null @@ -1,103 +0,0 @@ -// This is included in the following assemblies: -// -// networking/k8s_nmstate/k8s-nmstate-about-the-kubernetes-nmstate-operator.adoc - -:_content-type: PROCEDURE -[id="installing-the-kubernetes-nmstate-operator-CLI_{context}"] -= Installing the Kubernetes NMState Operator using the CLI - -You can install the Kubernetes NMState Operator by using the OpenShift CLI (`oc)`. After it is installed, the Operator can deploy the NMState State Controller as a daemon set across all of the cluster nodes. - -.Prerequisites - -* You have installed the OpenShift CLI (`oc`). - -* You are logged in as a user with `cluster-admin` privileges. - -.Procedure - -. Create the `nmstate` Operator namespace: -+ -[source,terminal] ----- -$ cat << EOF | oc apply -f - -apiVersion: v1 -kind: Namespace -metadata: - labels: - kubernetes.io/metadata.name: openshift-nmstate - name: openshift-nmstate - name: openshift-nmstate -spec: - finalizers: - - kubernetes -EOF ----- - -. Create the `OperatorGroup`: -+ -[source,terminal] ----- -$ cat << EOF | oc apply -f - -apiVersion: operators.coreos.com/v1 -kind: OperatorGroup -metadata: - annotations: - olm.providedAPIs: NMState.v1.nmstate.io - generateName: openshift-nmstate- - name: openshift-nmstate-tn6k8 - namespace: openshift-nmstate -spec: - targetNamespaces: - - openshift-nmstate -EOF ----- -. Subscribe to the `nmstate` Operator: -+ -[source,terminal] ----- -$ cat << EOF| oc apply -f - -apiVersion: operators.coreos.com/v1alpha1 -kind: Subscription -metadata: - labels: - operators.coreos.com/kubernetes-nmstate-operator.openshift-nmstate: "" - name: kubernetes-nmstate-operator - namespace: openshift-nmstate -spec: - channel: stable - installPlanApproval: Automatic - name: kubernetes-nmstate-operator - source: redhat-operators - sourceNamespace: openshift-marketplace -EOF ----- - -. Create instance of the `nmstate` operator: -+ -[source,terminal] ----- -$ cat << EOF | oc apply -f - -apiVersion: nmstate.io/v1 -kind: NMState -metadata: - name: nmstate -EOF ----- - -.Verification - -* Confirm that the deployment for the `nmstate` operator is running: -+ -[source,terminal] ----- -oc get clusterserviceversion -n openshift-nmstate \ - -o custom-columns=Name:.metadata.name,Phase:.status.phase ----- -+ -.Example output -[source, terminal] ----- -Name Phase -kubernetes-nmstate-operator.4.13.0-202210210157 Succeeded ----- diff --git a/modules/k8s-nmstate-installing-the-kubernetes-nmstate-operator.adoc b/modules/k8s-nmstate-installing-the-kubernetes-nmstate-operator.adoc deleted file mode 100644 index 79b432b6cf89..000000000000 --- a/modules/k8s-nmstate-installing-the-kubernetes-nmstate-operator.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// This is included in the following assemblies: -// -// networking/k8s_nmstate/k8s-nmstate-about-the-kubernetes-nmstate-operator.adoc - -:_content-type: PROCEDURE -[id="installing-the-kubernetes-nmstate-operator-web-console_{context}"] -= Installing the Kubernetes NMState Operator using the web console - -You can install the Kubernetes NMState Operator by using the web console. After it is installed, the Operator can deploy the NMState State Controller as a daemon set across all of the cluster nodes. - -.Prerequisites - -* You are logged in as a user with `cluster-admin` privileges. - -.Procedure - -. Select *Operators* -> *OperatorHub*. - -. In the search field below *All Items*, enter `nmstate` and click *Enter* to search for the Kubernetes NMState Operator. - -. Click on the Kubernetes NMState Operator search result. - -. Click on *Install* to open the *Install Operator* window. - -. Click *Install* to install the Operator. - -. After the Operator finishes installing, click *View Operator*. - -. Under *Provided APIs*, click *Create Instance* to open the dialog box for creating an instance of `kubernetes-nmstate`. - -. In the *Name* field of the dialog box, ensure the name of the instance is `nmstate.` -+ -[NOTE] -==== -The name restriction is a known issue. The instance is a singleton for the entire cluster. -==== - -. Accept the default settings and click *Create* to create the instance. - -.Summary - -Once complete, the Operator has deployed the NMState State Controller as a daemon set across all of the cluster nodes. diff --git a/modules/kmm-about-kmm.adoc b/modules/kmm-about-kmm.adoc deleted file mode 100644 index 4638ca88eb76..000000000000 --- a/modules/kmm-about-kmm.adoc +++ /dev/null @@ -1,14 +0,0 @@ -// Module included in the following assemblies: -// -// * hardware_enablement/kmm-kernel-module-management.adoc - -:_content-type: CONCEPT -[id="about-kmm_{context}"] -= About the Kernel Module Management Operator - -The Kernel Module Management (KMM) Operator manages, builds, signs, and deploys out-of-tree kernel modules and device plugins on {product-title} clusters. - -KMM adds a new `Module` CRD which describes an out-of-tree kernel module and its associated device plugin. -You can use `Module` resources to configure how to load the module, define `ModuleLoader` images for kernel versions, and include instructions for building and signing modules for specific kernel versions. - -KMM is designed to accommodate multiple kernel versions at once for any kernel module, allowing for seamless node upgrades and reduced application downtime. \ No newline at end of file diff --git a/modules/kmm-adding-the-keys-for-secureboot.adoc b/modules/kmm-adding-the-keys-for-secureboot.adoc deleted file mode 100644 index 4d0a6cb80c39..000000000000 --- a/modules/kmm-adding-the-keys-for-secureboot.adoc +++ /dev/null @@ -1,78 +0,0 @@ -// Module included in the following assemblies: -// -// * hardware_enablement/kmm-kernel-module-management.adoc - -:_content-type: PROCEDURE -[id="kmm-adding-the-keys-for-secureboot_{context}"] -= Adding the keys for secureboot - -To use KMM Kernel Module Management (KMM) to sign kernel modules, a certificate and private key are required. For details on how to create these, see link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html/managing_monitoring_and_updating_the_kernel/signing-a-kernel-and-modules-for-secure-boot_managing-monitoring-and-updating-the-kernel#generating-a-public-and-private-key-pair_signing-a-kernel-and-modules-for-secure-boot[Generating a public and private key pair]. - -For details on how to extract the public and private key pair, see link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html/managing_monitoring_and_updating_the_kernel/signing-a-kernel-and-modules-for-secure-boot_managing-monitoring-and-updating-the-kernel#signing-kernel-modules-with-the-private-key_signing-a-kernel-and-modules-for-secure-boot[Signing kernel modules with the private key]. Use steps 1 through 4 to extract the keys into files. - -.Procedure - -. Create the `sb_cert.cer` file that contains the certificate and the `sb_cert.priv` file that contains the private key: -+ -[source,terminal] ----- -$ openssl req -x509 -new -nodes -utf8 -sha256 -days 36500 -batch -config configuration_file.config -outform DER -out my_signing_key_pub.der -keyout my_signing_key.priv ----- - -. Add the files by using one of the following methods: -+ -* Add the files as link:https://kubernetes.io/docs/concepts/configuration/secret/[secrets] directly: -+ -[source,terminal] ----- -$ oc create secret generic my-signing-key --from-file=key=<my_signing_key.priv> ----- -+ -[source,terminal] ----- -$ oc create secret generic my-signing-key-pub --from-file=key=<my_signing_key_pub.der> ----- -+ -* Add the files by base64 encoding them: -+ -[source,terminal] ----- -$ cat sb_cert.priv | base64 -w 0 > my_signing_key2.base64 ----- -+ -[source,terminal] ----- -$ cat sb_cert.cer | base64 -w 0 > my_signing_key_pub.base64 ----- - -. Add the encoded text to a YAML file: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: my-signing-key-pub - namespace: default <1> -type: Opaque -data: - cert: <base64_encoded_secureboot_public_key> - ---- -apiVersion: v1 -kind: Secret -metadata: - name: my-signing-key - namespace: default <1> -type: Opaque -data: - key: <base64_encoded_secureboot_private_key> ----- -<1> `namespace` - Replace `default` with a valid namespace. - -. Apply the YAML file: -+ -[source,terminal] ----- -$ oc apply -f <yaml_filename> ----- diff --git a/modules/kmm-build-validation-stage.adoc b/modules/kmm-build-validation-stage.adoc deleted file mode 100644 index dc4a8c7ab127..000000000000 --- a/modules/kmm-build-validation-stage.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// Module included in the following assemblies: -// -// * updating/kmm-preflight-validation.adoc - -:_content-type: CONCEPT -[id="kmm-build-validation-stage_{context}"] -= Build validation stage - -Build validation is executed only when image validation has failed and there is a `build` section in the `Module` that is relevant for the upgraded kernel. Build validation attempts to run the build job and validate that it finishes successfully. - -[NOTE] -==== -You must specify the kernel version when running `depmod`, as shown here: -[source,terminal] ----- -$ RUN depmod -b /opt ${KERNEL_VERSION} ----- -==== - -If the `PushBuiltImage` flag is defined in the `PreflightValidationOCP` custom resource (CR), it will also try to push the resulting image into its repository. The resulting image name is taken from the definition of the `containerImage` field of the `Module` CR. - -[NOTE] -==== -If the `sign` section is defined for the upgraded kernel, then the resulting image will not be the `containerImage` field of the `Module` CR, but a temporary image name, because the resulting image should be the product of Sign flow. -==== diff --git a/modules/kmm-building-a-moduleloader-image.adoc b/modules/kmm-building-a-moduleloader-image.adoc deleted file mode 100644 index d2917dec9e3a..000000000000 --- a/modules/kmm-building-a-moduleloader-image.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// -// * hardware_enablement/kmm-kernel-module-management.adoc - -:_content-type: PROCEDURE -[id="kmm-building-a-moduleloader-image_{context}"] -= Building a ModuleLoader image - -.Procedure - -* In addition to building the kernel module itself, include the binary firmware in the builder image: -+ -[source,dockerfile] ----- -FROM registry.redhat.io/ubi9/ubi-minimal as builder - -# Build the kmod - -RUN ["mkdir", "/firmware"] -RUN ["curl", "-o", "/firmware/firmware.bin", "https://artifacts.example.com/firmware.bin"] - -FROM registry.redhat.io/ubi9/ubi-minimal - -# Copy the kmod, install modprobe, run depmod - -COPY --from=builder /firmware /firmware ----- diff --git a/modules/kmm-building-and-signing-a-moduleloader-container-image.adoc b/modules/kmm-building-and-signing-a-moduleloader-container-image.adoc deleted file mode 100644 index f1358303135e..000000000000 --- a/modules/kmm-building-and-signing-a-moduleloader-container-image.adoc +++ /dev/null @@ -1,85 +0,0 @@ -// Module included in the following assemblies: -// -// * hardware_enablement/kmm-kernel-module-management.adoc - -:_content-type: PROCEDURE -[id="kmm-building-and-signing-a-moduleloader-container-image_{context}"] -= Building and signing a ModuleLoader container image - -Use this procedure if you have source code and must build your image first. - -The following YAML file builds a new container image using the source code from the repository. The image produced is saved back in the registry with a temporary name, and this temporary image is then signed using the parameters in the `sign` section. - -The temporary image name is based on the final image name and is set to be `<containerImage>:<tag>-<namespace>_<module name>_kmm_unsigned`. - -For example, using the following YAML file, Kernel Module Management (KMM) builds an image named `example.org/repository/minimal-driver:final-default_example-module_kmm_unsigned` containing the build with unsigned kmods and push it to the registry. Then it creates a second image named `example.org/repository/minimal-driver:final` that contains the signed kmods. It is this second image that is loaded by the `DaemonSet` object and deploys the kmods to the cluster nodes. - -After it is signed, the temporary image can be safely deleted from the registry. It will be rebuilt, if needed. - -.Prerequisites - -* The `keySecret` and `certSecret` secrets have been created. - -.Procedure - -. Apply the YAML file: -+ -[source,yaml] ----- ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: example-module-dockerfile - namespace: default <1> -data: - Dockerfile: | - ARG DTK_AUTO - ARG KERNEL_VERSION - FROM ${DTK_AUTO} as builder - WORKDIR /build/ - RUN git clone -b main --single-branch https://github.com/rh-ecosystem-edge/kernel-module-management.git - WORKDIR kernel-module-management/ci/kmm-kmod/ - RUN make - FROM registry.access.redhat.com/ubi9/ubi:latest - ARG KERNEL_VERSION - RUN yum -y install kmod && yum clean all - RUN mkdir -p /opt/lib/modules/${KERNEL_VERSION} - COPY --from=builder /build/kernel-module-management/ci/kmm-kmod/*.ko /opt/lib/modules/${KERNEL_VERSION}/ - RUN /usr/sbin/depmod -b /opt ---- -apiVersion: kmm.sigs.x-k8s.io/v1beta1 -kind: Module -metadata: - name: example-module - namespace: default <1> -spec: - moduleLoader: - serviceAccountName: default <2> - container: - modprobe: - moduleName: simple_kmod - kernelMappings: - - regexp: '^.*\.x86_64$' - containerImage: < the name of the final driver container to produce> - build: - dockerfileConfigMap: - name: example-module-dockerfile - sign: - keySecret: - name: <private key secret name> - certSecret: - name: <certificate secret name> - filesToSign: - - /opt/lib/modules/4.18.0-348.2.1.el8_5.x86_64/kmm_ci_a.ko - imageRepoSecret: <3> - name: repo-pull-secret - selector: # top-level selector - kubernetes.io/arch: amd64 ----- - -<1> `namespace` - Replace `default` with a valid namespace. - -<2> `serviceAccountName` - The default `serviceAccountName` does not have the required permissions to run a module that is privileged. For information on creating a service account, see "Creating service accounts" in the "Additional resources" of this section. - -<3> `imageRepoSecret` - Used as `imagePullSecrets` in the `DaemonSet` object and to pull and push for the build and sign features. diff --git a/modules/kmm-building-in-cluster.adoc b/modules/kmm-building-in-cluster.adoc deleted file mode 100644 index 36869593a0b3..000000000000 --- a/modules/kmm-building-in-cluster.adoc +++ /dev/null @@ -1,47 +0,0 @@ -// Module included in the following assemblies: -// -// * hardware_enablement/kmm-kernel-module-management.adoc - -:_content-type: CONCEPT -[id="kmm-building-in-cluster_{context}"] - -= Building in the cluster - -KMM can build module loader images in the cluster. Follow these guidelines: - -* Provide build instructions using the `build` section of a kernel mapping. -* Copy the `Dockerfile` for your container image into a `ConfigMap` resource, under the `dockerfile` key. -* Ensure that the `ConfigMap` is located in the same namespace as the `Module`. - -KMM checks if the image name specified in the `containerImage` field exists. If it does, the build is skipped. - -Otherwise, KMM creates a `Build` resource to build your image. After the image is built, KMM proceeds with the `Module` reconciliation. See the following example. - -[source,yaml] ----- -# ... -- regexp: '^.+$' - containerImage: "some.registry/org/<my_kmod>:${KERNEL_FULL_VERSION}" - build: - buildArgs: <1> - - name: ARG_NAME - value: <some_value> - secrets: <2> - - name: <some_kubernetes_secret> <3> - baseImageRegistryTLS: - insecure: false <4> - insecureSkipTLSVerify: false <5> - dockerfileConfigMap: <6> - name: <my_kmod_dockerfile> - registryTLS: - insecure: false <7> - insecureSkipTLSVerify: false <8> ----- -<1> Optional. -<2> Optional. -<3> Will be mounted in the build pod as `/run/secrets/some-kubernetes-secret`. -<4> Optional: Avoid using this parameter. If set to `true`, the build will be allowed to pull the image in the Dockerfile `FROM` instruction using plain HTTP. -<5> Optional: Avoid using this parameter. If set to `true`, the build will skip any TLS server certificate validation when pulling the image in the Dockerfile `FROM` instruction using plain HTTP. -<6> Required. -<7> Optional: Avoid using this parameter. If set to `true`, KMM will be allowed to check if the container image already exists using plain HTTP. -<8> Optional: Avoid using this parameter. If set to `true`, KMM will skip any TLS server certificate validation when checking if the container image already exists. diff --git a/modules/kmm-checking-the-keys.adoc b/modules/kmm-checking-the-keys.adoc deleted file mode 100644 index ef496b4632cd..000000000000 --- a/modules/kmm-checking-the-keys.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// Module included in the following assemblies: -// -// * hardware_enablement/kmm-kernel-module-management.adoc - -:_content-type: PROCEDURE -[id="kmm-checking-the-keys_{context}"] -= Checking the keys - -After you have added the keys, you must check them to ensure they are set correctly. - -.Procedure - -. Check to ensure the public key secret is set correctly: -+ -[source,terminal] ----- -$ oc get secret -o yaml <certificate secret name> | awk '/cert/{print $2; exit}' | base64 -d | openssl x509 -inform der -text ----- -+ -This should display a certificate with a Serial Number, Issuer, Subject, and more. - -. Check to ensure the private key secret is set correctly: -+ -[source,terminal] ----- -$ oc get secret -o yaml <private key secret name> | awk '/key/{print $2; exit}' | base64 -d ----- -+ -This should display the key enclosed in the `-----BEGIN PRIVATE KEY-----` and `-----END PRIVATE KEY-----` lines. diff --git a/modules/kmm-configuring-the-lookup-path-on-nodes.adoc b/modules/kmm-configuring-the-lookup-path-on-nodes.adoc deleted file mode 100644 index 99d0a0a7d38e..000000000000 --- a/modules/kmm-configuring-the-lookup-path-on-nodes.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -// * hardware_enablement/kmm-kernel-module-management.adoc - -:_content-type: PROCEDURE -[id="kmm-configuring-the-lookup-path-on-nodes_{context}"] -= Configuring the lookup path on nodes - -On {product-title} nodes, the set of default lookup paths for firmwares does not include the `/var/lib/firmware` path. - -.Procedure - -. Use the Machine Config Operator to create a `MachineConfig` custom resource (CR) that contains the `/var/lib/firmware` path: -+ -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfig -metadata: - labels: - machineconfiguration.openshift.io/role: worker <1> - name: 99-worker-kernel-args-firmware-path -spec: - kernelArguments: - - 'firmware_class.path=/var/lib/firmware' ----- -<1> You can configure the label based on your needs. In the case of {sno}, use either `control-pane` or `master` objects. - - -. By applying the `MachineConfig` CR, the nodes are automatically rebooted. diff --git a/modules/kmm-creating-module-cr.adoc b/modules/kmm-creating-module-cr.adoc deleted file mode 100644 index 26e533005f5e..000000000000 --- a/modules/kmm-creating-module-cr.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// -// * hardware_enablement/kmm-kernel-module-management.adoc - -:_content-type: CONCEPT -[id="kmm-creating-module-cr_{context}"] - -= The Module custom resource definition - -The `Module` custom resource definition (CRD) represents a kernel module that can be loaded on all or select nodes in the cluster, through a module loader image. -A `Module` custom resource (CR) specifies one or more kernel versions with which it is compatible, and a node selector. - -The compatible versions for a `Module` resource are listed under `.spec.moduleLoader.container.kernelMappings`. -A kernel mapping can either match a `literal` version, or use `regexp` to match many of them at the same time. - -The reconciliation loop for the `Module` resource runs the following steps: - -. List all nodes matching `.spec.selector`. -. Build a set of all kernel versions running on those nodes. -. For each kernel version: - .. Go through `.spec.moduleLoader.container.kernelMappings` and find the appropriate container image name. If the kernel mapping has `build` or `sign` defined and the container image does not already exist, run the build, the signing job, or both, as needed. -.. Create a module loader daemon set with the container image determined in the previous step. -.. If `.spec.devicePlugin` is defined, create a device plugin daemon set using the configuration specified under `.spec.devicePlugin.container`. -. Run `garbage-collect` on: - .. Existing daemon set resources targeting kernel versions that are not run by any node in the cluster. - .. Successful build jobs. - .. Successful signing jobs. diff --git a/modules/kmm-creating-moduleloader-image.adoc b/modules/kmm-creating-moduleloader-image.adoc deleted file mode 100644 index d3bc271a203a..000000000000 --- a/modules/kmm-creating-moduleloader-image.adoc +++ /dev/null @@ -1,13 +0,0 @@ -// Module included in the following assemblies: -// -// * hardware_enablement/kmm-kernel-module-management.adoc - -:_content-type: CONCEPT -[id="kmm-creating-moduleloader-image_{context}"] -= Using a ModuleLoader image - -Kernel Module Management (KMM) works with purpose-built module loader images. -These are standard OCI images that must satisfy the following requirements: - -* `.ko` files must be located in `+/opt/lib/modules/${KERNEL_VERSION}+`. -* `modprobe` and `sleep` binaries must be defined in the `$PATH` variable. diff --git a/modules/kmm-debugging-and-troubleshooting.adoc b/modules/kmm-debugging-and-troubleshooting.adoc deleted file mode 100644 index fca5f3b15d55..000000000000 --- a/modules/kmm-debugging-and-troubleshooting.adoc +++ /dev/null @@ -1,14 +0,0 @@ -// Module included in the following assemblies: -// -// * hardware_enablement/kmm-kernel-module-management.adoc - -:_content-type: CONCEPT -[id="kmm-debugging-and-troubleshooting_{context}"] -= Debugging and troubleshooting - -If the kmods in your driver container are not signed or are signed with the wrong key, then the container can enter a `PostStartHookError` or `CrashLoopBackOff` status. You can verify by running the `oc describe` command on your container, which displays the following message in this scenario: - -[source,terminal] ----- -modprobe: ERROR: could not insert '<your_kmod_name>': Required key not available ----- diff --git a/modules/kmm-deploying-modules.adoc b/modules/kmm-deploying-modules.adoc deleted file mode 100644 index 77731b3a910e..000000000000 --- a/modules/kmm-deploying-modules.adoc +++ /dev/null @@ -1,26 +0,0 @@ -// Module included in the following assemblies: -// -// * hardware_enablement/kmm-kernel-module-management.adoc - -:_content-type: CONCEPT -[id="kmm-deploy-kernel-modules_{context}"] -= Kernel module deployment - -For each `Module` resource, Kernel Module Management (KMM) can create a number of `DaemonSet` resources: - -* One ModuleLoader `DaemonSet` per compatible kernel version running in the cluster. -* One device plugin `DaemonSet`, if configured. - -The module loader daemon set resources run ModuleLoader images to load kernel modules. -A module loader image is an OCI image that contains the `.ko` files and both the `modprobe` and `sleep` binaries. - -When the module loader pod is created, the pod runs `modprobe` to insert the specified module into the kernel. -It then enters a sleep state until it is terminated. -When that happens, the `ExecPreStop` hook runs `modprobe -r` to unload the kernel module. - -If the `.spec.devicePlugin` attribute is configured in a `Module` resource, then KMM creates a link:https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/[device plugin] -daemon set in the cluster. -That daemon set targets: - -* Nodes that match the `.spec.selector` of the `Module` resource. -* Nodes with the kernel module loaded (where the module loader pod is in the `Ready` condition). diff --git a/modules/kmm-example-cr.adoc b/modules/kmm-example-cr.adoc deleted file mode 100644 index 49b506cdda9c..000000000000 --- a/modules/kmm-example-cr.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// Module included in the following assemblies: -// -// * updating/kmm-preflight-validation.adoc - -:_content-type: CONCEPT -[id="kmm-example-cr_{context}"] -= Example PreflightValidationOCP resource - -This section shows an example of the `PreflightValidationOCP` resource in the YAML format. - -The example verifies all the currently present modules against the upcoming kernel version included in the {product-title} release 4.11.18, which the following release image points to: - -[source,terminal] ----- -quay.io/openshift-release-dev/ocp-release@sha256:22e149142517dfccb47be828f012659b1ccf71d26620e6f62468c264a7ce7863 ----- - -Because `.spec.pushBuiltImage` is set to `true`, KMM pushes the resulting images of Build/Sign into the defined repositories. - -[source,yaml] ----- -apiVersion: kmm.sigs.x-k8s.io/v1beta1 -kind: PreflightValidationOCP -metadata: - name: preflight -spec: - releaseImage: quay.io/openshift-release-dev/ocp-release@sha256:22e149142517dfccb47be828f012659b1ccf71d26620e6f62468c264a7ce7863 - pushBuiltImage: true ----- diff --git a/modules/kmm-example-module-cr.adoc b/modules/kmm-example-module-cr.adoc deleted file mode 100644 index 1e9dfe79303b..000000000000 --- a/modules/kmm-example-module-cr.adoc +++ /dev/null @@ -1,97 +0,0 @@ -// Module included in the following assemblies: -// -// * hardware_enablement/kmm-kernel-module-management.adoc - -:_content-type: REFERENCE -[id="kmm-example-cr_{context}"] - -= Example Module CR - -The following is an annotated `Module` example: - -[source,yaml] ----- -apiVersion: kmm.sigs.x-k8s.io/v1beta1 -kind: Module -metadata: - name: <my_kmod> -spec: - moduleLoader: - container: - modprobe: - moduleName: <my_kmod> <1> - dirName: /opt <2> - firmwarePath: /firmware <3> - parameters: <4> - - param=1 - kernelMappings: <5> - - literal: 6.0.15-300.fc37.x86_64 - containerImage: some.registry/org/my-kmod:6.0.15-300.fc37.x86_64 - - regexp: '^.+\fc37\.x86_64$' <6> - containerImage: "some.other.registry/org/<my_kmod>:${KERNEL_FULL_VERSION}" - - regexp: '^.+$' <7> - containerImage: "some.registry/org/<my_kmod>:${KERNEL_FULL_VERSION}" - build: - buildArgs: <8> - - name: ARG_NAME - value: <some_value> - secrets: - - name: <some_kubernetes_secret> <9> - baseImageRegistryTLS: <10> - insecure: false - insecureSkipTLSVerify: false <11> - dockerfileConfigMap: <12> - name: <my_kmod_dockerfile> - sign: - certSecret: - name: <cert_secret> <13> - keySecret: - name: <key_secret> <14> - filesToSign: - - /opt/lib/modules/${KERNEL_FULL_VERSION}/<my_kmod>.ko - registryTLS: <15> - insecure: false <16> - insecureSkipTLSVerify: false - serviceAccountName: <sa_module_loader> <17> - devicePlugin: <18> - container: - image: some.registry/org/device-plugin:latest <19> - env: - - name: MY_DEVICE_PLUGIN_ENV_VAR - value: SOME_VALUE - volumeMounts: <20> - - mountPath: /some/mountPath - name: <device_plugin_volume> - volumes: <21> - - name: <device_plugin_volume> - configMap: - name: <some_configmap> - serviceAccountName: <sa_device_plugin> <22> - imageRepoSecret: <23> - name: <secret_name> - selector: - node-role.kubernetes.io/worker: "" ----- -<1> Required. -<2> Optional. -<3> Optional: Copies `/firmware/*` into `/var/lib/firmware/` on the node. -<4> Optional. -<5> At least one kernel item is required. -<6> For each node running a kernel matching the regular expression, KMM creates a `DaemonSet` resource running the image specified in `containerImage` with `${KERNEL_FULL_VERSION}` replaced with the kernel version. -<7> For any other kernel, build the image using the Dockerfile in the `my-kmod` ConfigMap. -<8> Optional. -<9> Optional: A value for `some-kubernetes-secret` can be obtained from the build environment at `/run/secrets/some-kubernetes-secret`. -<10> Optional: Avoid using this parameter. If set to `true`, the build is allowed to pull the image in the Dockerfile `FROM` instruction using plain HTTP. -<11> Optional: Avoid using this parameter. If set to `true`, the build will skip any TLS server certificate validation when pulling the image in the Dockerfile `FROM` instruction using plain HTTP. -<12> Required. -<13> Required: A secret holding the public secureboot key with the key 'cert'. -<14> Required: A secret holding the private secureboot key with the key 'key'. -<15> Optional: Avoid using this parameter. If set to `true`, KMM will be allowed to check if the container image already exists using plain HTTP. -<16> Optional: Avoid using this parameter. If set to `true`, KMM will skip any TLS server certificate validation when checking if the container image already exists. -<17> Optional. -<18> Optional. -<19> Required: If the device plugin section is present. -<20> Optional. -<21> Optional. -<22> Optional. -<23> Optional: Used to pull module loader and device plugin images. diff --git a/modules/kmm-firmware-support.adoc b/modules/kmm-firmware-support.adoc deleted file mode 100644 index 4649590e412f..000000000000 --- a/modules/kmm-firmware-support.adoc +++ /dev/null @@ -1,13 +0,0 @@ -// Module included in the following assemblies: -// -// * hardware_enablement/kmm-kernel-module-management.adoc - -:_content-type: CONCEPT -[id="kmm-firmware-support_{context}"] -= KMM firmware support - -Kernel modules sometimes need to load firmware files from the file system. KMM supports copying firmware files from the ModuleLoader image to the node's file system. - -The contents of `.spec.moduleLoader.container.modprobe.firmwarePath` are copied into the `/var/lib/firmware` path on the node before running the `modprobe` command to insert the kernel module. - -All files and empty directories are removed from that location before running the `modprobe -r` command to unload the kernel module, when the pod is terminated. diff --git a/modules/kmm-gathering-data-for-kmm-hub.adoc b/modules/kmm-gathering-data-for-kmm-hub.adoc deleted file mode 100644 index f5e26169a58b..000000000000 --- a/modules/kmm-gathering-data-for-kmm-hub.adoc +++ /dev/null @@ -1,70 +0,0 @@ -// Module included in the following assemblies: -// -// * hardware_enablement/kmm-kernel-module-management.adoc - -:_content-type: PROCEDURE -[id="kmm-gathering-data-for-kmm-hub_{context}"] -= Gathering data for KMM-Hub - -.Procedure - -. Gather the data for the KMM Operator hub controller manager: - -.. Set the `MUST_GATHER_IMAGE` variable: -+ -[source,terminal] ----- -$ export MUST_GATHER_IMAGE=$(oc get deployment -n openshift-kmm-hub kmm-operator-hub-controller-manager -ojsonpath='{.spec.template.spec.containers[?(@.name=="manager")].env[?(@.name=="RELATED_IMAGES_MUST_GATHER")].value}') ----- -+ -[NOTE] -==== -Use the `-n <namespace>` switch to specify a namespace if you installed KMM in a custom namespace. -==== - -.. Run the `must-gather` tool: -+ -[source,terminal] ----- -$ oc adm must-gather --image="${MUST_GATHER_IMAGE}" -- /usr/bin/gather -u ----- - -. View the Operator logs: -+ -[source,terminal] ----- -$ oc logs -fn openshift-kmm-hub deployments/kmm-operator-hub-controller-manager ----- -+ -.Example output -[%collapsible] -==== -[source,terminal] ----- -I0417 11:34:08.807472 1 request.go:682] Waited for 1.023403273s due to client-side throttling, not priority and fairness, request: GET:https://172.30.0.1:443/apis/tuned.openshift.io/v1?timeout=32s -I0417 11:34:12.373413 1 listener.go:44] kmm-hub/controller-runtime/metrics "msg"="Metrics server is starting to listen" "addr"="127.0.0.1:8080" -I0417 11:34:12.376253 1 main.go:150] kmm-hub/setup "msg"="Adding controller" "name"="ManagedClusterModule" -I0417 11:34:12.376621 1 main.go:186] kmm-hub/setup "msg"="starting manager" -I0417 11:34:12.377690 1 leaderelection.go:248] attempting to acquire leader lease openshift-kmm-hub/kmm-hub.sigs.x-k8s.io... -I0417 11:34:12.378078 1 internal.go:366] kmm-hub "msg"="Starting server" "addr"={"IP":"127.0.0.1","Port":8080,"Zone":""} "kind"="metrics" "path"="/metrics" -I0417 11:34:12.378222 1 internal.go:366] kmm-hub "msg"="Starting server" "addr"={"IP":"::","Port":8081,"Zone":""} "kind"="health probe" -I0417 11:34:12.395703 1 leaderelection.go:258] successfully acquired lease openshift-kmm-hub/kmm-hub.sigs.x-k8s.io -I0417 11:34:12.396334 1 controller.go:185] kmm-hub "msg"="Starting EventSource" "controller"="ManagedClusterModule" "controllerGroup"="hub.kmm.sigs.x-k8s.io" "controllerKind"="ManagedClusterModule" "source"="kind source: *v1beta1.ManagedClusterModule" -I0417 11:34:12.396403 1 controller.go:185] kmm-hub "msg"="Starting EventSource" "controller"="ManagedClusterModule" "controllerGroup"="hub.kmm.sigs.x-k8s.io" "controllerKind"="ManagedClusterModule" "source"="kind source: *v1.ManifestWork" -I0417 11:34:12.396430 1 controller.go:185] kmm-hub "msg"="Starting EventSource" "controller"="ManagedClusterModule" "controllerGroup"="hub.kmm.sigs.x-k8s.io" "controllerKind"="ManagedClusterModule" "source"="kind source: *v1.Build" -I0417 11:34:12.396469 1 controller.go:185] kmm-hub "msg"="Starting EventSource" "controller"="ManagedClusterModule" "controllerGroup"="hub.kmm.sigs.x-k8s.io" "controllerKind"="ManagedClusterModule" "source"="kind source: *v1.Job" -I0417 11:34:12.396522 1 controller.go:185] kmm-hub "msg"="Starting EventSource" "controller"="ManagedClusterModule" "controllerGroup"="hub.kmm.sigs.x-k8s.io" "controllerKind"="ManagedClusterModule" "source"="kind source: *v1.ManagedCluster" -I0417 11:34:12.396543 1 controller.go:193] kmm-hub "msg"="Starting Controller" "controller"="ManagedClusterModule" "controllerGroup"="hub.kmm.sigs.x-k8s.io" "controllerKind"="ManagedClusterModule" -I0417 11:34:12.397175 1 controller.go:185] kmm-hub "msg"="Starting EventSource" "controller"="imagestream" "controllerGroup"="image.openshift.io" "controllerKind"="ImageStream" "source"="kind source: *v1.ImageStream" -I0417 11:34:12.397221 1 controller.go:193] kmm-hub "msg"="Starting Controller" "controller"="imagestream" "controllerGroup"="image.openshift.io" "controllerKind"="ImageStream" -I0417 11:34:12.498335 1 filter.go:196] kmm-hub "msg"="Listing all ManagedClusterModules" "managedcluster"="local-cluster" -I0417 11:34:12.498570 1 filter.go:205] kmm-hub "msg"="Listed ManagedClusterModules" "count"=0 "managedcluster"="local-cluster" -I0417 11:34:12.498629 1 filter.go:238] kmm-hub "msg"="Adding reconciliation requests" "count"=0 "managedcluster"="local-cluster" -I0417 11:34:12.498687 1 filter.go:196] kmm-hub "msg"="Listing all ManagedClusterModules" "managedcluster"="sno1-0" -I0417 11:34:12.498750 1 filter.go:205] kmm-hub "msg"="Listed ManagedClusterModules" "count"=0 "managedcluster"="sno1-0" -I0417 11:34:12.498801 1 filter.go:238] kmm-hub "msg"="Adding reconciliation requests" "count"=0 "managedcluster"="sno1-0" -I0417 11:34:12.501947 1 controller.go:227] kmm-hub "msg"="Starting workers" "controller"="imagestream" "controllerGroup"="image.openshift.io" "controllerKind"="ImageStream" "worker count"=1 -I0417 11:34:12.501948 1 controller.go:227] kmm-hub "msg"="Starting workers" "controller"="ManagedClusterModule" "controllerGroup"="hub.kmm.sigs.x-k8s.io" "controllerKind"="ManagedClusterModule" "worker count"=1 -I0417 11:34:12.502285 1 imagestream_reconciler.go:50] kmm-hub "msg"="registered imagestream info mapping" "ImageStream"={"name":"driver-toolkit","namespace":"openshift"} "controller"="imagestream" "controllerGroup"="image.openshift.io" "controllerKind"="ImageStream" "dtkImage"="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:df42b4785a7a662b30da53bdb0d206120cf4d24b45674227b16051ba4b7c3934" "name"="driver-toolkit" "namespace"="openshift" "osImageVersion"="412.86.202302211547-0" "reconcileID"="e709ff0a-5664-4007-8270-49b5dff8bae9" ----- -==== diff --git a/modules/kmm-gathering-data-for-kmm.adoc b/modules/kmm-gathering-data-for-kmm.adoc deleted file mode 100644 index 70ed121fbe67..000000000000 --- a/modules/kmm-gathering-data-for-kmm.adoc +++ /dev/null @@ -1,72 +0,0 @@ -// Module included in the following assemblies: -// -// * hardware_enablement/kmm-kernel-module-management.adoc - -:_content-type: PROCEDURE -[id="kmm-gathering-data-for-kmm_{context}"] -= Gathering data for KMM - -.Procedure - -. Gather the data for the KMM Operator controller manager: - -.. Set the `MUST_GATHER_IMAGE` variable: -+ -[source,terminal] ----- -$ export MUST_GATHER_IMAGE=$(oc get deployment -n openshift-kmm kmm-operator-controller-manager -ojsonpath='{.spec.template.spec.containers[?(@.name=="manager")].env[?(@.name=="RELATED_IMAGES_MUST_GATHER")].value}') ----- -+ -[NOTE] -==== -Use the `-n <namespace>` switch to specify a namespace if you installed KMM in a custom namespace. -==== - -.. Run the `must-gather` tool: -+ -[source,terminal] ----- -$ oc adm must-gather --image="${MUST_GATHER_IMAGE}" -- /usr/bin/gather ----- - -. View the Operator logs: -+ -[source,terminal] ----- -$ oc logs -fn openshift-kmm deployments/kmm-operator-controller-manager ----- -+ -.Example output -[%collapsible] -==== -[source,terminal] ----- -I0228 09:36:37.352405 1 request.go:682] Waited for 1.001998746s due to client-side throttling, not priority and fairness, request: GET:https://172.30.0.1:443/apis/machine.openshift.io/v1beta1?timeout=32s -I0228 09:36:40.767060 1 listener.go:44] kmm/controller-runtime/metrics "msg"="Metrics server is starting to listen" "addr"="127.0.0.1:8080" -I0228 09:36:40.769483 1 main.go:234] kmm/setup "msg"="starting manager" -I0228 09:36:40.769907 1 internal.go:366] kmm "msg"="Starting server" "addr"={"IP":"127.0.0.1","Port":8080,"Zone":""} "kind"="metrics" "path"="/metrics" -I0228 09:36:40.770025 1 internal.go:366] kmm "msg"="Starting server" "addr"={"IP":"::","Port":8081,"Zone":""} "kind"="health probe" -I0228 09:36:40.770128 1 leaderelection.go:248] attempting to acquire leader lease openshift-kmm/kmm.sigs.x-k8s.io... -I0228 09:36:40.784396 1 leaderelection.go:258] successfully acquired lease openshift-kmm/kmm.sigs.x-k8s.io -I0228 09:36:40.784876 1 controller.go:185] kmm "msg"="Starting EventSource" "controller"="Module" "controllerGroup"="kmm.sigs.x-k8s.io" "controllerKind"="Module" "source"="kind source: *v1beta1.Module" -I0228 09:36:40.784925 1 controller.go:185] kmm "msg"="Starting EventSource" "controller"="Module" "controllerGroup"="kmm.sigs.x-k8s.io" "controllerKind"="Module" "source"="kind source: *v1.DaemonSet" -I0228 09:36:40.784968 1 controller.go:185] kmm "msg"="Starting EventSource" "controller"="Module" "controllerGroup"="kmm.sigs.x-k8s.io" "controllerKind"="Module" "source"="kind source: *v1.Build" -I0228 09:36:40.785001 1 controller.go:185] kmm "msg"="Starting EventSource" "controller"="Module" "controllerGroup"="kmm.sigs.x-k8s.io" "controllerKind"="Module" "source"="kind source: *v1.Job" -I0228 09:36:40.785025 1 controller.go:185] kmm "msg"="Starting EventSource" "controller"="Module" "controllerGroup"="kmm.sigs.x-k8s.io" "controllerKind"="Module" "source"="kind source: *v1.Node" -I0228 09:36:40.785039 1 controller.go:193] kmm "msg"="Starting Controller" "controller"="Module" "controllerGroup"="kmm.sigs.x-k8s.io" "controllerKind"="Module" -I0228 09:36:40.785458 1 controller.go:185] kmm "msg"="Starting EventSource" "controller"="PodNodeModule" "controllerGroup"="" "controllerKind"="Pod" "source"="kind source: *v1.Pod" -I0228 09:36:40.786947 1 controller.go:185] kmm "msg"="Starting EventSource" "controller"="PreflightValidation" "controllerGroup"="kmm.sigs.x-k8s.io" "controllerKind"="PreflightValidation" "source"="kind source: *v1beta1.PreflightValidation" -I0228 09:36:40.787406 1 controller.go:185] kmm "msg"="Starting EventSource" "controller"="PreflightValidation" "controllerGroup"="kmm.sigs.x-k8s.io" "controllerKind"="PreflightValidation" "source"="kind source: *v1.Build" -I0228 09:36:40.787474 1 controller.go:185] kmm "msg"="Starting EventSource" "controller"="PreflightValidation" "controllerGroup"="kmm.sigs.x-k8s.io" "controllerKind"="PreflightValidation" "source"="kind source: *v1.Job" -I0228 09:36:40.787488 1 controller.go:185] kmm "msg"="Starting EventSource" "controller"="PreflightValidation" "controllerGroup"="kmm.sigs.x-k8s.io" "controllerKind"="PreflightValidation" "source"="kind source: *v1beta1.Module" -I0228 09:36:40.787603 1 controller.go:185] kmm "msg"="Starting EventSource" "controller"="NodeKernel" "controllerGroup"="" "controllerKind"="Node" "source"="kind source: *v1.Node" -I0228 09:36:40.787634 1 controller.go:193] kmm "msg"="Starting Controller" "controller"="NodeKernel" "controllerGroup"="" "controllerKind"="Node" -I0228 09:36:40.787680 1 controller.go:193] kmm "msg"="Starting Controller" "controller"="PreflightValidation" "controllerGroup"="kmm.sigs.x-k8s.io" "controllerKind"="PreflightValidation" -I0228 09:36:40.785607 1 controller.go:185] kmm "msg"="Starting EventSource" "controller"="imagestream" "controllerGroup"="image.openshift.io" "controllerKind"="ImageStream" "source"="kind source: *v1.ImageStream" -I0228 09:36:40.787822 1 controller.go:185] kmm "msg"="Starting EventSource" "controller"="preflightvalidationocp" "controllerGroup"="kmm.sigs.x-k8s.io" "controllerKind"="PreflightValidationOCP" "source"="kind source: *v1beta1.PreflightValidationOCP" -I0228 09:36:40.787853 1 controller.go:193] kmm "msg"="Starting Controller" "controller"="imagestream" "controllerGroup"="image.openshift.io" "controllerKind"="ImageStream" -I0228 09:36:40.787879 1 controller.go:185] kmm "msg"="Starting EventSource" "controller"="preflightvalidationocp" "controllerGroup"="kmm.sigs.x-k8s.io" "controllerKind"="PreflightValidationOCP" "source"="kind source: *v1beta1.PreflightValidation" -I0228 09:36:40.787905 1 controller.go:193] kmm "msg"="Starting Controller" "controller"="preflightvalidationocp" "controllerGroup"="kmm.sigs.x-k8s.io" "controllerKind"="PreflightValidationOCP" -I0228 09:36:40.786489 1 controller.go:193] kmm "msg"="Starting Controller" "controller"="PodNodeModule" "controllerGroup"="" "controllerKind"="Pod" ----- -==== diff --git a/modules/kmm-image-validation-stage.adoc b/modules/kmm-image-validation-stage.adoc deleted file mode 100644 index e6591455ad6b..000000000000 --- a/modules/kmm-image-validation-stage.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Module included in the following assemblies: -// -// * updating/kmm-preflight-validation.adoc - -:_content-type: CONCEPT -[id="kmm-image-validation-stage_{context}"] -= Image validation stage - -Image validation is always the first stage of the preflight validation to be executed. If image validation is successful, no other validations are run on that specific module. - -Image validation consists of two stages: - -. Image existence and accessibility. The code tries to access the image defined for the upgraded kernel in the module and get its manifests. - -. Verify the presence of the kernel module defined in the `Module` in the correct path for future `modprobe` execution. The correct path is `<dirname>/lib/modules/<upgraded_kernel>/`. - -If this validation is successful, it probably means that the kernel module was compiled with the correct Linux headers. diff --git a/modules/kmm-installation.adoc b/modules/kmm-installation.adoc deleted file mode 100644 index fd18595fc29a..000000000000 --- a/modules/kmm-installation.adoc +++ /dev/null @@ -1,13 +0,0 @@ -// Module included in the following assemblies: -// -// * hardware_enablement/kmm-kernel-module-management.adoc - -:_content-type: CONCEPT -[id="kmm-install_{context}"] -= Installing the Kernel Module Management Operator - -As a cluster administrator, you can install the Kernel Module Management (KMM) Operator by using the OpenShift CLI or the web console. - -The KMM Operator is supported on {product-title} 4.12 and later. -Installing KMM on version 4.11 does not require specific additional steps. -For details on installing KMM on version 4.10 and earlier, see the section "Installing the Kernel Module Management Operator on earlier versions of {product-title}". \ No newline at end of file diff --git a/modules/kmm-installing-older-versions.adoc b/modules/kmm-installing-older-versions.adoc deleted file mode 100644 index 42892f5e4f59..000000000000 --- a/modules/kmm-installing-older-versions.adoc +++ /dev/null @@ -1,140 +0,0 @@ -// Module included in the following assemblies: -// -// * hardware_enablement/kmm-kernel-module-management.adoc - -:_content-type: PROCEDURE -[id="kmm-install-older-version_{context}"] -= Installing the Kernel Module Management Operator on earlier versions of {product-title} - -The KMM Operator is supported on {product-title} 4.12 and later. -For version 4.10 and earlier, you must create a new `SecurityContextConstraint` object and bind it to the Operator's `ServiceAccount`. -As a cluster administrator, you can install the Kernel Module Management (KMM) Operator by using the OpenShift CLI. - -.Prerequisites - -* You have a running {product-title} cluster. -* You installed the OpenShift CLI (`oc`). -* You are logged into the OpenShift CLI as a user with `cluster-admin` privileges. - -.Procedure - -. Install KMM in the `openshift-kmm` namespace: - -.. Create the following `Namespace` CR and save the YAML file, for example, `kmm-namespace.yaml` file: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Namespace -metadata: - name: openshift-kmm ----- - -.. Create the following `SecurityContextConstraint` object and save the YAML file, for example, `kmm-security-constraint.yaml`: -+ -[source,yaml] ----- -allowHostDirVolumePlugin: false -allowHostIPC: false -allowHostNetwork: false -allowHostPID: false -allowHostPorts: false -allowPrivilegeEscalation: false -allowPrivilegedContainer: false -allowedCapabilities: - - NET_BIND_SERVICE -apiVersion: security.openshift.io/v1 -defaultAddCapabilities: null -fsGroup: - type: MustRunAs -groups: [] -kind: SecurityContextConstraints -metadata: - name: restricted-v2 -priority: null -readOnlyRootFilesystem: false -requiredDropCapabilities: - - ALL -runAsUser: - type: MustRunAsRange -seLinuxContext: - type: MustRunAs -seccompProfiles: - - runtime/default -supplementalGroups: - type: RunAsAny -users: [] -volumes: - - configMap - - downwardAPI - - emptyDir - - persistentVolumeClaim - - projected - - secret ----- - -.. Bind the `SecurityContextConstraint` object to the Operator's `ServiceAccount` by running the following commands: -+ -[source,terminal] ----- -$ oc apply -f kmm-security-constraint.yaml ----- -+ -[source,terminal] ----- -$ oc adm policy add-scc-to-user kmm-security-constraint -z kmm-operator-controller-manager -n openshift-kmm ----- - -.. Create the following `OperatorGroup` CR and save the YAML file, for example, `kmm-op-group.yaml`: -+ -[source,yaml] ----- -apiVersion: operators.coreos.com/v1 -kind: OperatorGroup -metadata: - name: kernel-module-management - namespace: openshift-kmm ----- - -.. Create the following `Subscription` CR and save the YAML file, for example, `kmm-sub.yaml`: -+ -[source,yaml] ----- -apiVersion: operators.coreos.com/v1alpha1 -kind: Subscription -metadata: - name: kernel-module-management - namespace: openshift-kmm -spec: - channel: release-1.0 - installPlanApproval: Automatic - name: kernel-module-management - source: redhat-operators - sourceNamespace: openshift-marketplace - startingCSV: kernel-module-management.v1.0.0 ----- - -.. Create the subscription object by running the following command: -+ -[source,terminal] ----- -$ oc create -f kmm-sub.yaml ----- - -.Verification - -* To verify that the Operator deployment is successful, run the following command: -+ -[source,terminal] ----- -$ oc get -n openshift-kmm deployments.apps kmm-operator-controller-manager ----- -+ -.Example output -[source,terminal] ----- -NAME READY UP-TO-DATE AVAILABLE AGE -kmm-operator-controller-manager 1/1 1 1 97s ----- -+ -The Operator is available. \ No newline at end of file diff --git a/modules/kmm-installing-using-cli.adoc b/modules/kmm-installing-using-cli.adoc deleted file mode 100644 index 5256434d6e3f..000000000000 --- a/modules/kmm-installing-using-cli.adoc +++ /dev/null @@ -1,83 +0,0 @@ -// Module included in the following assemblies: -// -// * hardware_enablement/kmm-kernel-module-management.adoc - -:_content-type: PROCEDURE -[id="kmm-install-using-cli_{context}"] -= Installing the Kernel Module Management Operator by using the CLI - -As a cluster administrator, you can install the Kernel Module Management (KMM) Operator by using the OpenShift CLI. - -.Prerequisites - -* You have a running {product-title} cluster. -* You installed the OpenShift CLI (`oc`). -* You are logged into the OpenShift CLI as a user with `cluster-admin` privileges. - -.Procedure - -. Install KMM in the `openshift-kmm` namespace: - -.. Create the following `Namespace` CR and save the YAML file, for example, `kmm-namespace.yaml`: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Namespace -metadata: - name: openshift-kmm ----- - -.. Create the following `OperatorGroup` CR and save the YAML file, for example, `kmm-op-group.yaml`: -+ -[source,yaml] ----- -apiVersion: operators.coreos.com/v1 -kind: OperatorGroup -metadata: - name: kernel-module-management - namespace: openshift-kmm ----- - -.. Create the following `Subscription` CR and save the YAML file, for example, `kmm-sub.yaml`: -+ -[source,yaml] ----- -apiVersion: operators.coreos.com/v1alpha1 -kind: Subscription -metadata: - name: kernel-module-management - namespace: openshift-kmm -spec: - channel: release-1.0 - installPlanApproval: Automatic - name: kernel-module-management - source: redhat-operators - sourceNamespace: openshift-marketplace - startingCSV: kernel-module-management.v1.0.0 ----- - -.. Create the subscription object by running the following command: -+ -[source,terminal] ----- -$ oc create -f kmm-sub.yaml ----- - -.Verification - -* To verify that the Operator deployment is successful, run the following command: -+ -[source,terminal] ----- -$ oc get -n openshift-kmm deployments.apps kmm-operator-controller-manager ----- -+ -.Example output -[source,terminal] ----- -NAME READY UP-TO-DATE AVAILABLE AGE -kmm-operator-controller-manager 1/1 1 1 97s ----- -+ -The Operator is available. \ No newline at end of file diff --git a/modules/kmm-installing-using-web-console.adoc b/modules/kmm-installing-using-web-console.adoc deleted file mode 100644 index 11dbb369b7df..000000000000 --- a/modules/kmm-installing-using-web-console.adoc +++ /dev/null @@ -1,41 +0,0 @@ -// Module included in the following assemblies: -// -// * hardware_enablement/kmm-kernel-module-management.adoc - -:_content-type: PROCEDURE -[id="kmm-install-using-web-console_{context}"] -= Installing the Kernel Module Management Operator using the web console - -As a cluster administrator, you can install the Kernel Module Management (KMM) Operator using the {product-title} web console. - -.Procedure - -. Log in to the {product-title} web console. -. Install the Kernel Module Management Operator: -.. In the {product-title} web console, click *Operators* -> *OperatorHub*. - -.. Select *Kernel Module Management Operator* from the list of available Operators, and then click *Install*. - -.. On the *Install Operator* page, select the *Installation mode* as *A specific namespace on the cluster*. - -.. From the *Installed Namespace* list, select the `openshift-kmm` namespace. - -.. Click *Install*. - -.Verification - -To verify that KMM Operator installed successfully: - -. Navigate to the *Operators* -> *Installed Operators* page. -. Ensure that *Kernel Module Management Operator* is listed in the *openshift-kmm* project with a *Status* of *InstallSucceeded*. -+ -[NOTE] -==== -During installation, an Operator might display a *Failed* status. If the installation later succeeds with an *InstallSucceeded* message, you can ignore the *Failed* message. -==== - -.Troubleshooting -. To troubleshoot issues with Operator installation: -+ -.. Navigate to the *Operators* -> *Installed Operators* page and inspect the *Operator Subscriptions* and *Install Plans* tabs for any failure or errors under *Status*. -.. Navigate to the *Workloads* -> *Pods* page and check the logs for pods in the `openshift-kmm` project. diff --git a/modules/kmm-must-gather-tool.adoc b/modules/kmm-must-gather-tool.adoc deleted file mode 100644 index 6ae9fa0f0352..000000000000 --- a/modules/kmm-must-gather-tool.adoc +++ /dev/null @@ -1,10 +0,0 @@ -// Module included in the following assemblies: -// -// * hardware_enablement/kmm-kernel-module-management.adoc - -:_content-type: PROCEDURE -[id="kmm-must-gather-tool_{context}"] -= Using the must-gather tool - -The `oc adm must-gather` command is the preferred way to collect a support bundle and provide debugging information to Red Hat -Support. Collect specific information by running the command with the appropriate arguments as described in the following sections. diff --git a/modules/kmm-preflight-validation-stages-per-module.adoc b/modules/kmm-preflight-validation-stages-per-module.adoc deleted file mode 100644 index 9fda2f48bd4d..000000000000 --- a/modules/kmm-preflight-validation-stages-per-module.adoc +++ /dev/null @@ -1,13 +0,0 @@ -// Module included in the following assemblies: -// -// * updating/kmm-preflight-validation.adoc - -:_content-type: CONCEPT -[id="kmm-preflight-validation-stages-per-module_{context}"] -= Preflight validation stages per Module - -Preflight runs the following validations on every KMM Module present in the cluster: - -. Image validation stage -. Build validation stage -. Sign validation stage diff --git a/modules/kmm-running-depmod.adoc b/modules/kmm-running-depmod.adoc deleted file mode 100644 index 20355ad8d855..000000000000 --- a/modules/kmm-running-depmod.adoc +++ /dev/null @@ -1,49 +0,0 @@ -// Module included in the following assemblies: -// -// * hardware_enablement/kmm-kernel-module-management.adoc - -:_content-type: PROCEDURE -[id="kmm-running-depmod_{context}"] - -= Running depmod - -If your module loader image contains several kernel modules and if one of the modules depends on another module, it is best practice to run `depmod` at the end of the build process to generate dependencies and map files. - -[NOTE] -==== -You must have a Red Hat subscription to download the `kernel-devel` package. -==== - -.Procedure - -. To generate `modules.dep` and `.map` files for a specific kernel version, run `+depmod -b /opt ${KERNEL_VERSION}+`. - -[id="example-dockerfile_{context}"] -== Example Dockerfile - -If you are building your image on {product-title}, consider using the Driver Tool Kit (DTK). - -For further information, see link:https://cloud.redhat.com/blog/how-to-use-entitled-image-builds-to-build-drivercontainers-with-ubi-on-openshift[using an entitled build]. - -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: kmm-ci-dockerfile -data: - dockerfile: | - ARG DTK_AUTO - FROM ${DTK_AUTO} as builder - ARG KERNEL_VERSION - WORKDIR /usr/src - RUN ["git", "clone", "https://github.com/rh-ecosystem-edge/kernel-module-management.git"] - WORKDIR /usr/src/kernel-module-management/ci/kmm-kmod - RUN KERNEL_SRC_DIR=/lib/modules/${KERNEL_VERSION}/build make all - FROM registry.redhat.io/ubi9/ubi-minimal - ARG KERNEL_VERSION - RUN microdnf install kmod - COPY --from=builder /usr/src/kernel-module-management/ci/kmm-kmod/kmm_ci_a.ko /opt/lib/modules/${KERNEL_VERSION}/ - COPY --from=builder /usr/src/kernel-module-management/ci/kmm-kmod/kmm_ci_b.ko /opt/lib/modules/${KERNEL_VERSION}/ - RUN depmod -b /opt ${KERNEL_VERSION} ----- diff --git a/modules/kmm-security.adoc b/modules/kmm-security.adoc deleted file mode 100644 index 86c674d4df3a..000000000000 --- a/modules/kmm-security.adoc +++ /dev/null @@ -1,46 +0,0 @@ -// Module included in the following assemblies: -// -// * hardware_enablement/kmm-kernel-module-management.adoc - -:_content-type: REFERENCE -[id="kmm-security_{context}"] -= Security and permissions - -[IMPORTANT] -==== -Loading kernel modules is a highly sensitive operation. -After they are loaded, kernel modules have all possible permissions to do any kind of operation on the node. -==== - -[id="serviceaccounts-and-securitycontextconstraint_{context}"] -== ServiceAccounts and SecurityContextConstraints - -Kernel Module Management (KMM) creates a privileged workload to load the kernel modules on nodes. -That workload needs `ServiceAccounts` allowed to use the `privileged` `SecurityContextConstraint` (SCC) resource. - -The authorization model for that workload depends on the namespace of the `Module` resource, as well as its spec. - -* If the `.spec.moduleLoader.serviceAccountName` or `.spec.devicePlugin.serviceAccountName` fields are set, they are always used. -* If those fields are not set, then: - ** If the `Module` resource is created in the operator's namespace (`openshift-kmm` by default), then KMM uses its default, powerful `ServiceAccounts` to run the daemon sets. - ** If the `Module` resource is created in any other namespace, then KMM runs the daemon sets as the namespace's `default` `ServiceAccount`. The `Module` resource cannot run a privileged workload unless you manually enable it to use the `privileged` SCC. - -[IMPORTANT] -==== -`openshift-kmm` is a trusted namespace. - -When setting up RBAC permissions, remember that any user or `ServiceAccount` creating a `Module` resource in the `openshift-kmm` namespace results in KMM automatically running privileged workloads on potentially all nodes in the cluster. -==== - -To allow any `ServiceAccount` to use the `privileged` SCC and therefore to run module loader or device plugin pods, use the following command: - -[source,terminal] ----- -$ oc adm policy add-scc-to-user privileged -z "${serviceAccountName}" [ -n "${namespace}" ] ----- - -[id="pod-security-standards_{context}"] -== Pod security standards - -OpenShift runs a synchronization mechanism that sets the namespace Pod Security level automatically based on -the security contexts in use. No action is needed. diff --git a/modules/kmm-sign-validation-stage.adoc b/modules/kmm-sign-validation-stage.adoc deleted file mode 100644 index fec56f2b7b50..000000000000 --- a/modules/kmm-sign-validation-stage.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module included in the following assemblies: -// -// * updating/kmm-preflight-validation.adoc - -:_content-type: CONCEPT -[id="kmm-sign-validation-stage_{context}"] -= Sign validation stage - -Sign validation is executed only when image validation has failed, there is a `sign` section in the `Module` that is relevant for the upgrade kernel, and build validation finished successfully in the event there was a `build` section in the `Module` relevant for the upgraded kernel. Sign validation will try to run the sign job and validate that it finishes successfully. - -If the `PushBuiltImage` flag is defined in the `PreflightValidationOCP` CR, sign validation will also try to push the resulting image to its registry. - -The resulting image is always the image defined in the `containerImage` field of the `Module`. The input image is either the output of the Build stage, or an image defined in the `UnsignedImage` field. - -[NOTE] -==== -If a `build` section exists, the `sign` section input image is the `build` section's output image. Therefore, in order for the input image to be available for the `sign` section, the `PushBuiltImage` flag must be defined in the `PreflightValidationOCP` CR. -==== diff --git a/modules/kmm-signing-a-prebuilt-driver-container.adoc b/modules/kmm-signing-a-prebuilt-driver-container.adoc deleted file mode 100644 index cb17d0a58350..000000000000 --- a/modules/kmm-signing-a-prebuilt-driver-container.adoc +++ /dev/null @@ -1,58 +0,0 @@ -// Module included in the following assemblies: -// -// * hardware_enablement/kmm-kernel-module-management.adoc - -:_content-type: PROCEDURE -[id="kmm-signing-a-prebuilt-driver-container_{context}"] -= Signing a pre-built driver container - -Use this procedure if you have a pre-built image, such as an image either distributed by a hardware vendor or built elsewhere. - -The following YAML file adds the public/private key-pair as secrets with the required key names - `key` for the private key, `cert` for the public key. The cluster then pulls down the `unsignedImage` image, opens it, signs the kernel modules listed in `filesToSign`, adds them back, and pushes the resulting image as `containerImage`. - - -Kernel Module Management (KMM) should then deploy the DaemonSet that loads the signed kmods onto all the nodes that match the selector. The driver containers should run successfully on any nodes that have the public key in their MOK database, and any nodes that are not secure-boot enabled, which ignore the signature. They should fail to load on any that have secure-boot enabled but do not have that key in their MOK database. - -.Prerequisites - -* The `keySecret` and `certSecret` secrets have been created. - -.Procedure - -. Apply the YAML file: -+ -[source,yaml] ----- ---- -apiVersion: kmm.sigs.x-k8s.io/v1beta1 -kind: Module -metadata: - name: example-module -spec: - moduleLoader: - serviceAccountName: default - container: - modprobe: <1> - moduleName: '<your module name>' - kernelMappings: - # the kmods will be deployed on all nodes in the cluster with a kernel that matches the regexp - - regexp: '^.*\.x86_64$' - # the container to produce containing the signed kmods - containerImage: <image name e.g. quay.io/myuser/my-driver:<kernelversion>-signed> - sign: - # the image containing the unsigned kmods (we need this because we are not building the kmods within the cluster) - unsignedImage: <image name e.g. quay.io/myuser/my-driver:<kernelversion> > - keySecret: # a secret holding the private secureboot key with the key 'key' - name: <private key secret name> - certSecret: # a secret holding the public secureboot key with the key 'cert' - name: <certificate secret name> - filesToSign: # full path within the unsignedImage container to the kmod(s) to sign - - /opt/lib/modules/4.18.0-348.2.1.el8_5.x86_64/kmm_ci_a.ko - imageRepoSecret: - # the name of a secret containing credentials to pull unsignedImage and push containerImage to the registry - name: repo-pull-secret - selector: - kubernetes.io/arch: amd64 ----- - -<1> `modprobe` - The name of the kmod to load. diff --git a/modules/kmm-troubleshooting.adoc b/modules/kmm-troubleshooting.adoc deleted file mode 100644 index f36ec9acd82a..000000000000 --- a/modules/kmm-troubleshooting.adoc +++ /dev/null @@ -1,10 +0,0 @@ -// Module included in the following assemblies: -// -// * hardware_enablement/kmm-kernel-module-management.adoc - -:_content-type: CONCEPT -[id="kmm-troubleshooting_{context}"] -= Troubleshooting KMM - -When troubleshooting KMM installation issues, you can monitor logs to determine at which stage issues occur. -Then, retrieve diagnostic data relevant to that stage. diff --git a/modules/kmm-tuning-the-module-resource.adoc b/modules/kmm-tuning-the-module-resource.adoc deleted file mode 100644 index 01122b9a57f4..000000000000 --- a/modules/kmm-tuning-the-module-resource.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// -// * hardware_enablement/kmm-kernel-module-management.adoc - -:_content-type: PROCEDURE -[id="kmm-tuning-the-module-resource_{context}"] -= Tuning the Module resource - -.Procedure - -* Set `.spec.moduleLoader.container.modprobe.firmwarePath` in the `Module` custom resource (CR): -+ -[source,yaml] ----- -apiVersion: kmm.sigs.x-k8s.io/v1beta1 -kind: Module -metadata: - name: my-kmod -spec: - moduleLoader: - container: - modprobe: - moduleName: my-kmod # Required - - firmwarePath: /firmware <1> ----- -<1> Optional: Copies `/firmware/*` into `/var/lib/firmware/` on the node. diff --git a/modules/kmm-using-driver-toolkit.adoc b/modules/kmm-using-driver-toolkit.adoc deleted file mode 100644 index 4ec01a876e35..000000000000 --- a/modules/kmm-using-driver-toolkit.adoc +++ /dev/null @@ -1,39 +0,0 @@ -// Module included in the following assemblies: -// -// * hardware_enablement/kmm-kernel-module-management.adoc - -:_content-type: PROCEDURE -[id="kmm-using-driver-toolkit_{context}"] - -= Using the Driver Toolkit - -The Driver Toolkit (DTK) is a convenient base image for building build module loader images. -It contains tools and libraries for the OpenShift version currently running in the cluster. - -.Procedure - -Use DTK as the first stage of a multi-stage `Dockerfile`. - -. Build the kernel modules. - -. Copy the `.ko` files into a smaller end-user image such as https://catalog.redhat.com/software/containers/ubi9/ubi-minimal[`ubi-minimal`]. - -. To leverage DTK in your in-cluster build, use the `DTK_AUTO` build argument. -The value is automatically set by KMM when creating the `Build` resource. See the following example. -+ -[source,dockerfile] ----- -ARG DTK_AUTO -FROM ${DTK_AUTO} as builder -ARG KERNEL_VERSION -WORKDIR /usr/src -RUN ["git", "clone", "https://github.com/rh-ecosystem-edge/kernel-module-management.git"] -WORKDIR /usr/src/kernel-module-management/ci/kmm-kmod -RUN KERNEL_SRC_DIR=/lib/modules/${KERNEL_VERSION}/build make all -FROM registry.redhat.io/ubi9/ubi-minimal -ARG KERNEL_VERSION -RUN microdnf install kmod -COPY --from=builder /usr/src/kernel-module-management/ci/kmm-kmod/kmm_ci_a.ko /opt/lib/modules/${KERNEL_VERSION}/ -COPY --from=builder /usr/src/kernel-module-management/ci/kmm-kmod/kmm_ci_b.ko /opt/lib/modules/${KERNEL_VERSION}/ -RUN depmod -b /opt ${KERNEL_VERSION} ----- diff --git a/modules/kmm-using-signing-with-kmm.adoc b/modules/kmm-using-signing-with-kmm.adoc deleted file mode 100644 index 6a68e5d2176e..000000000000 --- a/modules/kmm-using-signing-with-kmm.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Module included in the following assemblies: -// -// * hardware_enablement/kmm-kernel-module-management.adoc - -:_content-type: CONCEPT -[id="kmm-using-signing-with-kmm_{context}"] -= Using signing with Kernel Module Management (KMM) - -On a Secure Boot enabled system, all kernel modules (kmods) must be signed with a public/private key-pair enrolled into the Machine Owner's Key (MOK) database. Drivers distributed as part of a distribution should already be signed by the distribution's private key, but for kernel modules build out-of-tree, KMM supports signing kernel modules using the `sign` section of the kernel mapping. - -For more details on using Secure Boot, see link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html/managing_monitoring_and_updating_the_kernel/signing-a-kernel-and-modules-for-secure-boot_managing-monitoring-and-updating-the-kernel#generating-a-public-and-private-key-pair_signing-a-kernel-and-modules-for-secure-boot[Generating a public and private key pair] - -.Prerequisites - -* A public private key pair in the correct (DER) format. -* At least one secure-boot enabled node with the public key enrolled in its MOK database. -* Either a pre-built driver container image, or the source code and `Dockerfile` needed to build one in-cluster. diff --git a/modules/kmm-validation-kickoff.adoc b/modules/kmm-validation-kickoff.adoc deleted file mode 100644 index 2a62e90d220b..000000000000 --- a/modules/kmm-validation-kickoff.adoc +++ /dev/null @@ -1,26 +0,0 @@ -// Module included in the following assemblies: -// -// * updating/kmm-preflight-validation.adoc - -:_content-type: CONCEPT -[id="kmm-validation-kickoff_{context}"] -= Validation kickoff - -Preflight validation is triggered by creating a `PreflightValidationOCP` resource in the cluster. This spec contains two fields: - -[source,terminal] ----- -type PreflightValidationOCPSpec struct { - // releaseImage describes the OCP release image that all Modules need to be checked against. - // +kubebuilder:validation:Required - ReleaseImage string `json:"releaseImage"` <1> - // Boolean flag that determines whether images build during preflight must also - // be pushed to a defined repository - // +optional - PushBuiltImage bool `json:"pushBuiltImage"` <2> -} ----- - -<1> `ReleaseImage` - Mandatory field that provides the name of the release image for the {product-title} version the cluster is upgraded to. - -<2> `PushBuiltImage` - If `true`, then the images created during the Build and Sign validation are pushed to their repositories (`false` by default). diff --git a/modules/kmm-validation-lifecycle.adoc b/modules/kmm-validation-lifecycle.adoc deleted file mode 100644 index e1b5359aac46..000000000000 --- a/modules/kmm-validation-lifecycle.adoc +++ /dev/null @@ -1,11 +0,0 @@ -// Module included in the following assemblies: -// -// * updating/kmm-preflight-validation.adoc - -:_content-type: CONCEPT -[id="kmm-validation-lifecycle_{context}"] -= Validation lifecycle - -Preflight validation attempts to validate every module loaded in the cluster. Preflight will stop running validation on a `Module` resource after the validation is successful. In case module validation has failed, you can change the module definitions and Preflight will try to validate the module again in the next loop. - -If you want to run Preflight validation for an additional kernel, then you should create another `PreflightValidationOCP` resource for that kernel. After all the modules have been validated, it is recommended to delete the `PreflightValidationOCP` resource. diff --git a/modules/kmm-validation-status.adoc b/modules/kmm-validation-status.adoc deleted file mode 100644 index be88f2aa541b..000000000000 --- a/modules/kmm-validation-status.adoc +++ /dev/null @@ -1,48 +0,0 @@ -// Module included in the following assemblies: -// -// * updating/kmm-preflight-validation.adoc - -:_content-type: CONCEPT -[id="kmm-validation-status_{context}"] -= Validation status - -Preflight reports the status and progress of each module in the cluster that it attempts to -validate. - -[source,terminal] ----- -type CRStatus struct { - // Status of Module CR verification: true (verified), false (verification failed), - // error (error during verification process), unknown (verification has not started yet) - // +required - // +kubebuilder:validation:Required - // +kubebuilder:validation:Enum=True;False - VerificationStatus string `json:"verificationStatus"` <1> - // StatusReason contains a string describing the status source. - // +optional - StatusReason string `json:"statusReason,omitempty"` <2> - // Current stage of the verification process: - // image (image existence verification), build(build process verification) - // +required - // +kubebuilder:validation:Required - // +kubebuilder:validation:Enum=Image;Build;Sign;Requeued;Done - VerificationStage string `json:"verificationStage"` <3> - // LastTransitionTime is the last time the CR status transitioned from one status to another. - // This should be when the underlying status changed. If that is not known, then using the time when the API field changed is acceptable. - // +required - // +kubebuilder:validation:Required - // +kubebuilder:validation:Type=string - // +kubebuilder:validation:Format=date-time - LastTransitionTime metav1.Time `json:"lastTransitionTime" protobuf:"bytes,4,opt,name=lastTransitionTime"` <4> -} ----- - -The following fields apply to each module: - -<1> `VerificationStatus` - `true` or `false`, validated or not. - -<2> `StatusReason` - Verbal explanation regarding the status. - -<3> `VerificationStage` - Describes the validation stage being executed (Image, Build, Sign). - -<4> `LastTransitionTime` - The time of the last update to the status. diff --git a/modules/kn-service-apply.adoc b/modules/kn-service-apply.adoc deleted file mode 100644 index 28cbfdcc49bd..000000000000 --- a/modules/kn-service-apply.adoc +++ /dev/null @@ -1,36 +0,0 @@ -// Module included in the following assemblies: -// -// * serverless/reference/kn-serving-ref.adoc - -:_content-type: REFERENCE -[id="kn-service-apply_{context}"] -= Applying service declarations - -You can declaratively configure a Knative service by using the `kn service apply` command. If the service does not exist it is created, otherwise the existing service is updated with the options that have been changed. - -The `kn service apply` command is especially useful for shell scripts or in a continuous integration pipeline, where users typically want to fully specify the state of the service in a single command to declare the target state. - -When using `kn service apply` you must provide the full configuration for the Knative service. This is different from the `kn service update` command, which only requires you to specify in the command the options that you want to update. - -.Example commands - -* Create a service: -+ -[source,terminal] ----- -$ kn service apply <service_name> --image <image> ----- - -* Add an environment variable to a service: -+ -[source,terminal] ----- -$ kn service apply <service_name> --image <image> --env <key>=<value> ----- - -* Read the service declaration from a JSON or YAML file: -+ -[source,terminal] ----- -$ kn service apply <service_name> -f <filename> ----- diff --git a/modules/kn-service-describe.adoc b/modules/kn-service-describe.adoc deleted file mode 100644 index 55ea22ba6756..000000000000 --- a/modules/kn-service-describe.adoc +++ /dev/null @@ -1,83 +0,0 @@ -// Module included in the following assemblies: -// -// * serverless/reference/kn-serving-ref.adoc - -:_content-type: REFERENCE -[id="kn-service-describe_{context}"] -= Describing serverless applications by using the Knative CLI - -You can describe a Knative service by using the `kn service describe` command. - -.Example commands - -* Describe a service: -+ -[source,terminal] ----- -$ kn service describe --verbose <service_name> ----- -+ -The `--verbose` flag is optional but can be included to provide a more detailed description. The difference between a regular and verbose output is shown in the following examples: -+ -.Example output without `--verbose` flag -[source,terminal] ----- -Name: hello -Namespace: default -Age: 2m -URL: http://hello-default.apps.ocp.example.com - -Revisions: - 100% @latest (hello-00001) [1] (2m) - Image: docker.io/openshift/hello-openshift (pinned to aaea76) - -Conditions: - OK TYPE AGE REASON - ++ Ready 1m - ++ ConfigurationsReady 1m - ++ RoutesReady 1m ----- -+ -.Example output with `--verbose` flag -[source,terminal] ----- -Name: hello -Namespace: default -Annotations: serving.knative.dev/creator=system:admin - serving.knative.dev/lastModifier=system:admin -Age: 3m -URL: http://hello-default.apps.ocp.example.com -Cluster: http://hello.default.svc.cluster.local - -Revisions: - 100% @latest (hello-00001) [1] (3m) - Image: docker.io/openshift/hello-openshift (pinned to aaea76) - Env: RESPONSE=Hello Serverless! - -Conditions: - OK TYPE AGE REASON - ++ Ready 3m - ++ ConfigurationsReady 3m - ++ RoutesReady 3m ----- - -* Describe a service in YAML format: -+ -[source,terminal] ----- -$ kn service describe <service_name> -o yaml ----- - -* Describe a service in JSON format: -+ -[source,terminal] ----- -$ kn service describe <service_name> -o json ----- - -* Print the service URL only: -+ -[source,terminal] ----- -$ kn service describe <service_name> -o url ----- diff --git a/modules/kn-service-offline-about.adoc b/modules/kn-service-offline-about.adoc deleted file mode 100644 index 0fea06818825..000000000000 --- a/modules/kn-service-offline-about.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// -// * serverless/reference/kn-serving-ref.adoc - -:_content-type: CONCEPT -[id="kn-service-offline-about_{context}"] -= About the Knative CLI offline mode - -When you execute `kn service` commands, the changes immediately propagate to the cluster. However, as an alternative, you can execute `kn service` commands in offline mode. When you create a service in offline mode, no changes happen on the cluster, and instead the service descriptor file is created on your local machine. - -:FeatureName: The offline mode of the Knative CLI -include::snippets/technology-preview.adoc[leveloffset=+1] - -After the descriptor file is created, you can manually modify it and track it in a version control system. You can also propagate changes to the cluster by using the `kn service create -f`, `kn service apply -f`, or `oc apply -f` commands on the descriptor files. -// Once `update` works, add it here and make it into a list - -The offline mode has several uses: - -* You can manually modify the descriptor file before using it to make changes on the cluster. -* You can locally track the descriptor file of a service in a version control system. This enables you to reuse the descriptor file in places other than the target cluster, for example in continuous integration (CI) pipelines, development environments, or demos. -* You can examine the created descriptor files to learn about Knative services. In particular, you can see how the resulting service is influenced by the different arguments passed to the `kn` command. - -The offline mode has its advantages: it is fast, and does not require a connection to the cluster. However, offline mode lacks server-side validation. Consequently, you cannot, for example, verify that the service name is unique or that the specified image can be pulled. diff --git a/modules/kn-service-offline-create.adoc b/modules/kn-service-offline-create.adoc deleted file mode 100644 index 5417ce8f3412..000000000000 --- a/modules/kn-service-offline-create.adoc +++ /dev/null @@ -1,151 +0,0 @@ -// Module included in the following assemblies: -// -// * serverless/reference/kn-serving-ref.adoc -// * serverless/develop/serverless-applications.adoc - -:_content-type: PROCEDURE -[id="creating-an-offline-service_{context}"] -= Creating a service using offline mode - -You can execute `kn service` commands in offline mode, so that no changes happen on the cluster, and instead the service descriptor file is created on your local machine. After the descriptor file is created, you can modify the file before propagating changes to the cluster. - -:FeatureName: The offline mode of the Knative CLI -include::snippets/technology-preview.adoc[leveloffset=+1] - -.Prerequisites - -* {ServerlessOperatorName} and Knative Serving are installed on your cluster. -* You have installed the Knative (`kn`) CLI. - -.Procedure - -. In offline mode, create a local Knative service descriptor file: -+ -[source,terminal] ----- -$ kn service create event-display \ - --image quay.io/openshift-knative/knative-eventing-sources-event-display:latest \ - --target ./ \ - --namespace test ----- -+ -.Example output -[source,terminal] ----- -Service 'event-display' created in namespace 'test'. ----- -+ -* The `--target ./` flag enables offline mode and specifies `./` as the directory for storing the new directory tree. -+ -If you do not specify an existing directory, but use a filename, such as `--target my-service.yaml`, then no directory tree is created. Instead, only the service descriptor file `my-service.yaml` is created in the current directory. -+ -The filename can have the `.yaml`, `.yml`, or `.json` extension. Choosing `.json` creates the service descriptor file in the JSON format. -+ -* The `--namespace test` option places the new service in the `test` namespace. -+ -If you do not use `--namespace`, and you are logged in to an {product-title} cluster, the descriptor file is created in the current namespace. Otherwise, the descriptor file is created in the `default` namespace. - -. Examine the created directory structure: -+ -[source,terminal] ----- -$ tree ./ ----- -+ -.Example output -[source,terminal] ----- -./ -└── test - └── ksvc - └── event-display.yaml - -2 directories, 1 file ----- -+ -* The current `./` directory specified with `--target` contains the new `test/` directory that is named after the specified namespace. -* The `test/` directory contains the `ksvc` directory, named after the resource type. -* The `ksvc` directory contains the descriptor file `event-display.yaml`, named according to the specified service name. - -. Examine the generated service descriptor file: -+ -[source,terminal] ----- -$ cat test/ksvc/event-display.yaml ----- -+ -.Example output -[source,yaml] ----- -apiVersion: serving.knative.dev/v1 -kind: Service -metadata: - creationTimestamp: null - name: event-display - namespace: test -spec: - template: - metadata: - annotations: - client.knative.dev/user-image: quay.io/openshift-knative/knative-eventing-sources-event-display:latest - creationTimestamp: null - spec: - containers: - - image: quay.io/openshift-knative/knative-eventing-sources-event-display:latest - name: "" - resources: {} -status: {} ----- - -. List information about the new service: -+ -[source,terminal] ----- -$ kn service describe event-display --target ./ --namespace test ----- -+ -.Example output -[source,terminal] ----- -Name: event-display -Namespace: test -Age: -URL: - -Revisions: - -Conditions: - OK TYPE AGE REASON ----- - -* The `--target ./` option specifies the root directory for the directory structure containing namespace subdirectories. -+ -Alternatively, you can directly specify a YAML or JSON filename with the `--target` option. The accepted file extensions are `.yaml`, `.yml`, and `.json`. -+ -* The `--namespace` option specifies the namespace, which communicates to `kn` the subdirectory that contains the necessary service descriptor file. -+ -If you do not use `--namespace`, and you are logged in to an {product-title} cluster, `kn` searches for the service in the subdirectory that is named after the current namespace. Otherwise, `kn` searches in the `default/` subdirectory. - -. Use the service descriptor file to create the service on the cluster: -+ -[source,terminal] ----- -$ kn service create -f test/ksvc/event-display.yaml ----- -+ -.Example output -[source,terminal] ----- -Creating service 'event-display' in namespace 'test': - - 0.058s The Route is still working to reflect the latest desired specification. - 0.098s ... - 0.168s Configuration "event-display" is waiting for a Revision to become ready. - 23.377s ... - 23.419s Ingress has not yet been reconciled. - 23.534s Waiting for load balancer to be ready - 23.723s Ready to serve. - -Service 'event-display' created to latest revision 'event-display-00001' is available at URL: -http://event-display-test.apps.example.com ----- diff --git a/modules/kn-service-update.adoc b/modules/kn-service-update.adoc deleted file mode 100644 index 194ae874d9fb..000000000000 --- a/modules/kn-service-update.adoc +++ /dev/null @@ -1,53 +0,0 @@ -// Module included in the following assemblies: -// -// * serverless/reference/kn-serving-ref.adoc - -:_content-type: REFERENCE -[id="kn-service-update_{context}"] -= Updating serverless applications by using the Knative CLI - -You can use the `kn service update` command for interactive sessions on the command line as you build up a service incrementally. In contrast to the `kn service apply` command, when using the `kn service update` command you only have to specify the changes that you want to update, rather than the full configuration for the Knative service. - -.Example commands - -* Update a service by adding a new environment variable: -+ -[source,terminal] ----- -$ kn service update <service_name> --env <key>=<value> ----- - -* Update a service by adding a new port: -+ -[source,terminal] ----- -$ kn service update <service_name> --port 80 ----- - -* Update a service by adding new request and limit parameters: -+ -[source,terminal] ----- -$ kn service update <service_name> --request cpu=500m --limit memory=1024Mi --limit cpu=1000m ----- - -* Assign the `latest` tag to a revision: -+ -[source,terminal] ----- -$ kn service update <service_name> --tag <revision_name>=latest ----- - -* Update a tag from `testing` to `staging` for the latest `READY` revision of a service: -+ -[source,terminal] ----- -$ kn service update <service_name> --untag testing --tag @latest=staging ----- - -* Add the `test` tag to a revision that receives 10% of traffic, and send the rest of the traffic to the latest `READY` revision of a service: -+ -[source,terminal] ----- -$ kn service update <service_name> --tag <revision_name>=test --traffic test=10,@latest=90 ----- diff --git a/modules/kn-trigger-describe.adoc b/modules/kn-trigger-describe.adoc deleted file mode 100644 index c0468b339f98..000000000000 --- a/modules/kn-trigger-describe.adoc +++ /dev/null @@ -1,50 +0,0 @@ -// Module included in the following assemblies: -// -// * /serverless/eventing/triggers/describe-triggers-cli.adoc - -:_content-type: PROCEDURE -[id="kn-trigger-describe_{context}"] -= Describing a trigger by using the Knative CLI - -You can use the `kn trigger describe` command to print information about existing triggers in your cluster by using the Knative CLI. - -.Prerequisites - -* The {ServerlessOperatorName} and Knative Eventing are installed on your {product-title} cluster. -* You have installed the Knative (`kn`) CLI. -* You have created a trigger. - -.Procedure - -* Enter the command: -+ -[source,terminal] ----- -$ kn trigger describe <trigger_name> ----- -+ -.Example output -[source,terminal] ----- -Name: ping -Namespace: default -Labels: eventing.knative.dev/broker=default -Annotations: eventing.knative.dev/creator=kube:admin, eventing.knative.dev/lastModifier=kube:admin -Age: 2m -Broker: default -Filter: - type: dev.knative.event - -Sink: - Name: edisplay - Namespace: default - Resource: Service (serving.knative.dev/v1) - -Conditions: - OK TYPE AGE REASON - ++ Ready 2m - ++ BrokerReady 2m - ++ DependencyReady 2m - ++ Subscribed 2m - ++ SubscriberResolved 2m ----- diff --git a/modules/kn-trigger-filtering.adoc b/modules/kn-trigger-filtering.adoc deleted file mode 100644 index 1a8dd29bbbb1..000000000000 --- a/modules/kn-trigger-filtering.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// Module included in the following assemblies: -// -// * /serverless/eventing/triggers/filter-triggers-cli.adoc - -:_content-type: REFERENCE -[id="kn-trigger-filtering_{context}"] -= Filtering events with triggers by using the Knative CLI -// should be a procedure module but out of scope for this PR - -In the following trigger example, only events with the attribute `type: dev.knative.samples.helloworld` are sent to the event sink: - -[source,terminal] ----- -$ kn trigger create <trigger_name> --broker <broker_name> --filter type=dev.knative.samples.helloworld --sink ksvc:<service_name> ----- - -You can also filter events by using multiple attributes. The following example shows how to filter events using the type, source, and extension attributes: - -[source,terminal] ----- -$ kn trigger create <trigger_name> --broker <broker_name> --sink ksvc:<service_name> \ ---filter type=dev.knative.samples.helloworld \ ---filter source=dev.knative.samples/helloworldsource \ ---filter myextension=my-extension-value ----- diff --git a/modules/kn-trigger-list.adoc b/modules/kn-trigger-list.adoc deleted file mode 100644 index 890e330e7d3b..000000000000 --- a/modules/kn-trigger-list.adoc +++ /dev/null @@ -1,39 +0,0 @@ -// Module included in the following assemblies: -// -// * /serverless/eventing/triggers/list-triggers-cli.adoc - -:_content-type: PROCEDURE -[id="kn-trigger-list_{context}"] -= Listing triggers by using the Knative CLI - -You can use the `kn trigger list` command to list existing triggers in your cluster. - -.Prerequisites - -* The {ServerlessOperatorName} and Knative Eventing are installed on your {product-title} cluster. -* You have installed the Knative (`kn`) CLI. - -.Procedure - -. Print a list of available triggers: -+ -[source,terminal] ----- -$ kn trigger list ----- -+ -.Example output -[source,terminal] ----- -NAME BROKER SINK AGE CONDITIONS READY REASON -email default ksvc:edisplay 4s 5 OK / 5 True -ping default ksvc:edisplay 32s 5 OK / 5 True ----- - -. Optional: Print a list of triggers in JSON format: -+ -[source,terminal] ----- -$ kn trigger list -o json ----- -//example output? diff --git a/modules/kn-trigger-update.adoc b/modules/kn-trigger-update.adoc deleted file mode 100644 index ca44e9624ae8..000000000000 --- a/modules/kn-trigger-update.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * /serverless/develop/serverless-triggers.adoc - -:_content-type: PROCEDURE -[id="kn-trigger-update_{context}"] -= Updating a trigger by using the Knative CLI - -You can use the `kn trigger update` command with certain flags to update attributes for a trigger. - -.Prerequisites - -* The {ServerlessOperatorName} and Knative Eventing are installed on your {product-title} cluster. -* You have installed the Knative (`kn`) CLI. -* You have created a project or have access to a project with the appropriate roles and permissions to create applications and other workloads in {product-title}. - -.Procedure - -* Update a trigger: -+ -[source,terminal] ----- -$ kn trigger update <trigger_name> --filter <key=value> --sink <sink_name> [flags] ----- -** You can update a trigger to filter exact event attributes that match incoming events. For example, using the `type` attribute: -+ -[source,terminal] ----- -$ kn trigger update <trigger_name> --filter type=knative.dev.event ----- -** You can remove a filter attribute from a trigger. For example, you can remove the filter attribute with key `type`: -+ -[source,terminal] ----- -$ kn trigger update <trigger_name> --filter type- ----- -** You can use the `--sink` parameter to change the event sink of a trigger: -+ -[source,terminal] ----- -$ kn trigger update <trigger_name> --sink ksvc:my-event-sink ----- diff --git a/modules/knative-eventing-CR-system-deployments.adoc b/modules/knative-eventing-CR-system-deployments.adoc deleted file mode 100644 index 3d5bc5d12655..000000000000 --- a/modules/knative-eventing-CR-system-deployments.adoc +++ /dev/null @@ -1,60 +0,0 @@ -// Module included in the following assemblies: -// -// * serverless/eventing/tuning/overriding-config-eventing.adoc - -:_content-type: REFERENCE -[id="knative-eventing-CR-system-deployments_{context}"] -= Overriding deployment configurations - -Currently, overriding default configuration settings is supported for the `eventing-controller`, `eventing-webhook`, and `imc-controller` fields, as well as for the `readiness` and `liveness` fields for probes. - -[IMPORTANT] -==== -The `replicas` spec cannot override the number of replicas for deployments that use the Horizontal Pod Autoscaler (HPA), and does not work for the `eventing-webhook` deployment. -==== - -In the following example, a `KnativeEventing` CR overrides the `eventing-controller` deployment so that: - -* The `readiness` probe timeout `eventing-controller` is set to be 10 seconds. -* The deployment has specified CPU and memory resource limits. -* The deployment has 3 replicas. -* The `example-label: label` label is added. -* The `example-annotation: annotation` annotation is added. -* The `nodeSelector` field is set to select nodes with the `disktype: hdd` label. - -.KnativeEventing CR example -[source,yaml] ----- -apiVersion: operator.knative.dev/v1beta1 -kind: KnativeEventing -metadata: - name: knative-eventing - namespace: knative-eventing -spec: - deployments: - - name: eventing-controller - readinessProbes: <1> - - container: controller - timeoutSeconds: 10 - resources: - - container: eventing-controller - requests: - cpu: 300m - memory: 100Mi - limits: - cpu: 1000m - memory: 250Mi - replicas: 3 - labels: - example-label: label - annotations: - example-annotation: annotation - nodeSelector: - disktype: hdd ----- -<1> You can use the `readiness` and `liveness` probe overrides to override all fields of a probe in a container of a deployment as specified in the Kubernetes API except for the fields related to the probe handler: `exec`, `grpc`, `httpGet`, and `tcpSocket`. - -[NOTE] -==== -The `KnativeEventing` CR label and annotation settings override the deployment's labels and annotations for both the deployment itself and the resulting pods. -==== diff --git a/modules/knative-service-cluster-local.adoc b/modules/knative-service-cluster-local.adoc deleted file mode 100644 index 834950c7128b..000000000000 --- a/modules/knative-service-cluster-local.adoc +++ /dev/null @@ -1,40 +0,0 @@ -// Module included in the following assemblies: -// -// * serverless/knative-serving/external-ingress-routing/routing-overview.adoc - -:_content-type: PROCEDURE -[id="knative-service-cluster-local_{context}"] -= Setting cluster availability to cluster local - - -// remove note for 4.10, OSD - -.Prerequisites - -* The {ServerlessOperatorName} and Knative Serving are installed on the cluster. -* You have created a Knative service. - -.Procedure - -* Set the visibility for your service by adding the `networking.knative.dev/visibility=cluster-local` label: -+ -[source,terminal] ----- -$ oc label ksvc <service_name> networking.knative.dev/visibility=cluster-local ----- - -.Verification - -* Check that the URL for your service is now in the format `\http://<service_name>.<namespace>.svc.cluster.local`, by entering the following command and reviewing the output: -+ -[source,termina] ----- -$ oc get ksvc ----- -+ -.Example output -[source,terminal] ----- -NAME URL LATESTCREATED LATESTREADY READY REASON -hello http://hello.default.svc.cluster.local hello-tx2g7 hello-tx2g7 True ----- diff --git a/modules/knative-serving-CR-system-deployments.adoc b/modules/knative-serving-CR-system-deployments.adoc deleted file mode 100644 index 35dba8b6e8e4..000000000000 --- a/modules/knative-serving-CR-system-deployments.adoc +++ /dev/null @@ -1,58 +0,0 @@ -// Module included in the following assemblies: -// -// * serverless/admin_guide/serverless-configuration.adoc - -:_content-type: REFERENCE -[id="knative-serving-CR-system-deployments_{context}"] -= Overriding system deployment configurations - -Currently, overriding default configuration settings is supported for the `resources`, `replicas`, `labels`, `annotations`, and `nodeSelector` fields, as well as for the `readiness` and `liveness` fields for probes. - -In the following example, a `KnativeServing` CR overrides the `webhook` deployment so that: - -* The `readiness` probe timeout for `net-kourier-controller` is set to be 10 seconds. -* The deployment has specified CPU and memory resource limits. -* The deployment has 3 replicas. -* The `example-label: label` label is added. -* The `example-annotation: annotation` annotation is added. -* The `nodeSelector` field is set to select nodes with the `disktype: hdd` label. - -[NOTE] -==== -The `KnativeServing` CR label and annotation settings override the deployment's labels and annotations for both the deployment itself and the resulting pods. -==== - -.KnativeServing CR example -[source,yaml] ----- -apiVersion: operator.knative.dev/v1beta1 -kind: KnativeServing -metadata: - name: ks - namespace: knative-serving -spec: - high-availability: - replicas: 2 - deployments: - - name: net-kourier-controller - readinessProbes: <1> - - container: controller - timeoutSeconds: 10 - - name: webhook - resources: - - container: webhook - requests: - cpu: 300m - memory: 60Mi - limits: - cpu: 1000m - memory: 1000Mi - replicas: 3 - labels: - example-label: label - annotations: - example-annotation: annotation - nodeSelector: - disktype: hdd ----- -<1> You can use the `readiness` and `liveness` probe overrides to override all fields of a probe in a container of a deployment as specified in the Kubernetes API except for the fields related to the probe handler: `exec`, `grpc`, `httpGet`, and `tcpSocket`. diff --git a/modules/knative-serving-controller-custom-certs-secrets.adoc b/modules/knative-serving-controller-custom-certs-secrets.adoc deleted file mode 100644 index eac049b78366..000000000000 --- a/modules/knative-serving-controller-custom-certs-secrets.adoc +++ /dev/null @@ -1,47 +0,0 @@ -// Module included in the following assemblies -// -// * serverless/admin_guide/serverless-configuration.adoc - -:_content-type: PROCEDURE -[id="knative-serving-controller-custom-certs-secrets_{context}"] -= Configuring tag-to-digest resolution by using a secret - -If the `controller-custom-certs` spec uses the `Secret` type, the secret is mounted as a secret volume. Knative components consume the secret directly, assuming that the secret has the required certificates. - -.Prerequisites - -ifdef::openshift-enterprise[] -* You have cluster administrator permissions on {product-title}. -endif::[] - -ifdef::openshift-dedicated,openshift-rosa[] -* You have cluster or dedicated administrator permissions on {product-title}. -endif::[] - -* You have installed the {ServerlessOperatorName} and Knative Serving on your cluster. - -.Procedure - -. Create a secret: -+ -.Example command -[source,yaml] ----- -$ oc -n knative-serving create secret generic custom-secret --from-file=<secret_name>.crt=<path_to_certificate> ----- - -. Configure the `controller-custom-certs` spec in the `KnativeServing` custom resource (CR) to use the `Secret` type: -+ -.Example KnativeServing CR -[source,yaml] ----- -apiVersion: operator.knative.dev/v1beta1 -kind: KnativeServing -metadata: - name: knative-serving - namespace: knative-serving -spec: - controller-custom-certs: - name: custom-secret - type: Secret ----- diff --git a/modules/kube-apiserver-operator.adoc b/modules/kube-apiserver-operator.adoc deleted file mode 100644 index a23ce6fc6633..000000000000 --- a/modules/kube-apiserver-operator.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator-reference.adoc - -[id="kube-apiserver-operator_{context}"] -= Kubernetes API Server Operator - -[discrete] -== Purpose - -The Kubernetes API Server Operator manages and updates the Kubernetes API server deployed on top of {product-title}. The Operator is based on the {product-title} `library-go` framework and it is installed using the Cluster Version Operator (CVO). - -[discrete] -== Project - -link:https://github.com/openshift/cluster-kube-apiserver-operator[openshift-kube-apiserver-operator] - -[discrete] -== CRDs - -* `kubeapiservers.operator.openshift.io` -** Scope: Cluster -** CR: `kubeapiserver` -** Validation: Yes - -[discrete] -== Configuration objects - -[source,terminal] ----- -$ oc edit kubeapiserver ----- diff --git a/modules/kube-controller-manager-operator.adoc b/modules/kube-controller-manager-operator.adoc deleted file mode 100644 index 77b71aa2d25a..000000000000 --- a/modules/kube-controller-manager-operator.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator-reference.adoc - -[id="kube-controller-manager-operator_{context}"] -= Kubernetes Controller Manager Operator - -[discrete] -== Purpose - -The Kubernetes Controller Manager Operator manages and updates the Kubernetes Controller Manager deployed on top of {product-title}. The Operator is based on {product-title} `library-go` framework and it is installed via the Cluster Version Operator (CVO). - -It contains the following components: - -* Operator -* Bootstrap manifest renderer -* Installer based on static pods -* Configuration observer - -By default, the Operator exposes Prometheus metrics through the `metrics` service. - -[discrete] -== Project - -link:https://github.com/openshift/cluster-kube-controller-manager-operator[cluster-kube-controller-manager-operator] diff --git a/modules/kubernetes-about.adoc b/modules/kubernetes-about.adoc deleted file mode 100644 index 1f364df7b1c0..000000000000 --- a/modules/kubernetes-about.adoc +++ /dev/null @@ -1,16 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_architecture/osd-architecture.adoc - -:_content-type: CONCEPT -[id="kubernetes-about_{context}"] -= About Kubernetes - -Kubernetes is an open source container orchestration engine for automating deployment, scaling, and management of containerized applications. The general concept of Kubernetes is fairly simple: - -* Start with one or more worker nodes to run the container workloads. -* Manage the deployment of those workloads from one or more control nodes. -* Wrap containers in a deployment unit called a pod. Using pods provides extra metadata with the container and offers the ability to group several containers in a single deployment entity. -* Create special kinds of assets. For example, services are represented by a set of pods and a policy that defines how they are accessed. This policy allows containers to connect to the services that they need even if they do not have the specific IP addresses for the services. Replication controllers are another special asset that indicates how many pod Replicas are required to run at a time. You can use this capability to automatically scale your application to adapt to its current demand. - -To learn more about Kubernetes, see the link:https://kubernetes.io/docs/home/?path=users&persona=app-developer&level=foundational[Kubernetes documentation]. diff --git a/modules/kubernetes-components.adoc b/modules/kubernetes-components.adoc deleted file mode 100644 index 8561bb265b08..000000000000 --- a/modules/kubernetes-components.adoc +++ /dev/null @@ -1,49 +0,0 @@ -// Module included in the following assemblies: -// -// * getting_started/kubernetes-overview.adoc - -:_content-type: REFERENCE -[id="kubernetes-components_{context}"] -= Kubernetes components - -.Kubernetes components -[cols="1,2",options="header"] -|=== -|Component |Purpose - -|`kube-proxy` -|Runs on every node in the cluster and maintains the network traffic between the Kubernetes resources. - -|`kube-controller-manager` -|Governs the state of the cluster. - -|`kube-scheduler` -|Allocates pods to nodes. - -|`etcd` -|Stores cluster data. - -|`kube-apiserver` -|Validates and configures data for the API objects. - -|`kubelet` -|Runs on nodes and reads the container manifests. Ensures that the defined containers have started and are running. - -|`kubectl` -|Allows you to define how you want to run workloads. Use the `kubectl` command to interact with the `kube-apiserver`. - -|Node -|Node is a physical machine or a VM in a Kubernetes cluster. The control plane manages every node and schedules pods across the nodes in the Kubernetes cluster. - -|container runtime -|container runtime runs containers on a host operating system. You must install a container runtime on each node so that pods can run on the node. - -|Persistent storage -|Stores the data even after the device is shut down. Kubernetes uses persistent volumes to store the application data. - -|`container-registry` -|Stores and accesses the container images. - -|Pod -|The pod is the smallest logical unit in Kubernetes. A pod contains one or more containers to run in a worker node. -|=== diff --git a/modules/kubernetes-conceptual-guidelines.adoc b/modules/kubernetes-conceptual-guidelines.adoc deleted file mode 100644 index 74057633fb3b..000000000000 --- a/modules/kubernetes-conceptual-guidelines.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module included in the following assemblies: -// -// * getting_started/kubernetes-overview.adoc - -:_content-type: CONCEPT -[id="kubernetes-conceptual-guidelines_{context}"] -= Kubernetes conceptual guidelines - -Before getting started with the {product-title}, consider these conceptual guidelines of Kubernetes: - -* Start with one or more worker nodes to run the container workloads. -* Manage the deployment of those workloads from one or more control plane nodes. -* Wrap containers in a deployment unit called a pod. By using pods provides extra metadata with the container and offers the ability to group several containers in a single deployment entity. -* Create special kinds of assets. For example, services are represented by a set of pods and a policy that defines how they are accessed. This policy allows containers to connect to the services that they need even if they do not have the specific IP addresses for the services. Replication controllers are another special asset that indicates how many pod replicas are required to run at a time. You can use this capability to automatically scale your application to adapt to its current demand. - -The API to {product-title} cluster is 100% Kubernetes. Nothing changes between a container running on any other Kubernetes and running on {product-title}. No changes to the application. -{product-title} brings added-value features to provide enterprise-ready enhancements to Kubernetes. {product-title} CLI tool (`oc`) is compatible with `kubectl`. While the Kubernetes API is 100% accessible within {product-title}, the `kubectl` command-line lacks many features that could make it more user-friendly. {product-title} offers a set of features and command-line tool like `oc`. -Although Kubernetes excels at managing your applications, it does not specify or manage platform-level requirements or deployment processes. Powerful and flexible platform management tools and processes are important benefits that {product-title} offers. You must add authentication, networking, security, monitoring, and logs management to your containerization platform. diff --git a/modules/kubernetes-resources.adoc b/modules/kubernetes-resources.adoc deleted file mode 100644 index e022091f4bf3..000000000000 --- a/modules/kubernetes-resources.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -// * getting_started/kubernetes-overview.adoc - -:_content-type: CONCEPT -[id="kubernetes-resources_{context}"] -= Kubernetes resources - -A custom resource is an extension of the Kubernetes API. You can customize Kubernetes clusters by using custom resources. Operators are software extensions which manage applications and their components with the help of custom resources. Kubernetes uses a declarative model when you want a fixed desired result while dealing with cluster resources. By using Operators, Kubernetes defines its states in a declarative way. You can modify the Kubernetes cluster resources by using imperative commands. -An Operator acts as a control loop which continuously compares the desired state of resources with the actual state of resources and puts actions in place to bring reality in line with the desired state. - -.Kubernetes cluster overview -image::247_OpenShift_Kubernetes_Overview-1.png[] - -.Kubernetes Resources -[cols="1,2",options="header"] -|=== -|Resource |Purpose - -|Service -|Kubernetes uses services to expose a running application on a set of pods. - -|`ReplicaSets` -|Kubernetes uses the `ReplicaSets` to maintain the constant pod number. - -|Deployment -|A resource object that maintains the life cycle of an application. -|=== - -Kubernetes is a core component of an {product-title}. You can use {product-title} for developing and running containerized applications. With its foundation in Kubernetes, the {product-title} incorporates the same technology that serves as the engine for massive telecommunications, streaming video, gaming, banking, and other applications. You can extend your containerized applications beyond a single cloud to on-premise and multi-cloud environments by using the {product-title}. diff --git a/modules/ldap-auto-syncing.adoc b/modules/ldap-auto-syncing.adoc deleted file mode 100644 index 1fd35e086b7c..000000000000 --- a/modules/ldap-auto-syncing.adoc +++ /dev/null @@ -1,223 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/ldap-syncing.adoc - -:_content-type: PROCEDURE -[id="ldap-auto-syncing_{context}"] -= Automatically syncing LDAP groups - -You can automatically sync LDAP groups on a periodic basis by configuring a cron job. - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. -* You have configured an LDAP identity provider (IDP). -+ -This procedure assumes that you created an LDAP secret named `ldap-secret` and a config map named `ca-config-map`. - -.Procedure - -. Create a project where the cron job will run: -+ -[source,terminal] ----- -$ oc new-project ldap-sync <1> ----- -<1> This procedure uses a project called `ldap-sync`. - -. Locate the secret and config map that you created when configuring the LDAP identity provider and copy them to this new project. -+ -The secret and config map exist in the `openshift-config` project and must be copied to the new `ldap-sync` project. - -. Define a service account: -+ -.Example `ldap-sync-service-account.yaml` -[source,yaml] ----- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: ldap-group-syncer - namespace: ldap-sync ----- - -. Create the service account: -+ -[source,terminal] ----- -$ oc create -f ldap-sync-service-account.yaml ----- - -. Define a cluster role: -+ -.Example `ldap-sync-cluster-role.yaml` -[source,yaml] ----- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: ldap-group-syncer -rules: - - apiGroups: - - '' - - user.openshift.io - resources: - - groups - verbs: - - get - - list - - create - - update ----- - -. Create the cluster role: -+ -[source,terminal] ----- -$ oc create -f ldap-sync-cluster-role.yaml ----- - -. Define a cluster role binding to bind the cluster role to the service account: -+ -.Example `ldap-sync-cluster-role-binding.yaml` -[source,yaml] ----- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: ldap-group-syncer -subjects: - - kind: ServiceAccount - name: ldap-group-syncer <1> - namespace: ldap-sync -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: ldap-group-syncer <2> ----- -<1> Reference to the service account created earlier in this procedure. -<2> Reference to the cluster role created earlier in this procedure. - -. Create the cluster role binding: -+ -[source,terminal] ----- -$ oc create -f ldap-sync-cluster-role-binding.yaml ----- - -. Define a config map that specifies the sync configuration file: -+ -.Example `ldap-sync-config-map.yaml` -[source,yaml] ----- -kind: ConfigMap -apiVersion: v1 -metadata: - name: ldap-group-syncer - namespace: ldap-sync -data: - sync.yaml: | <1> - kind: LDAPSyncConfig - apiVersion: v1 - url: ldaps://10.0.0.0:389 <2> - insecure: false - bindDN: cn=admin,dc=example,dc=com <3> - bindPassword: - file: "/etc/secrets/bindPassword" - ca: /etc/ldap-ca/ca.crt - rfc2307: <4> - groupsQuery: - baseDN: "ou=groups,dc=example,dc=com" <5> - scope: sub - filter: "(objectClass=groupOfMembers)" - derefAliases: never - pageSize: 0 - groupUIDAttribute: dn - groupNameAttributes: [ cn ] - groupMembershipAttributes: [ member ] - usersQuery: - baseDN: "ou=users,dc=example,dc=com" <6> - scope: sub - derefAliases: never - pageSize: 0 - userUIDAttribute: dn - userNameAttributes: [ uid ] - tolerateMemberNotFoundErrors: false - tolerateMemberOutOfScopeErrors: false ----- -<1> Define the sync configuration file. -<2> Specify the URL. -<3> Specify the `bindDN`. -<4> This example uses the RFC2307 schema; adjust values as necessary. You can also use a different schema. -<5> Specify the `baseDN` for `groupsQuery`. -<6> Specify the `baseDN` for `usersQuery`. - -. Create the config map: -+ -[source,terminal] ----- -$ oc create -f ldap-sync-config-map.yaml ----- - -. Define a cron job: -+ -.Example `ldap-sync-cron-job.yaml` -[source,yaml] ----- -kind: CronJob -apiVersion: batch/v1 -metadata: - name: ldap-group-syncer - namespace: ldap-sync -spec: <1> - schedule: "*/30 * * * *" <2> - concurrencyPolicy: Forbid - jobTemplate: - spec: - backoffLimit: 0 - ttlSecondsAfterFinished: 1800 <3> - template: - spec: - containers: - - name: ldap-group-sync - image: "registry.redhat.io/openshift4/ose-cli:latest" - command: - - "/bin/bash" - - "-c" - - "oc adm groups sync --sync-config=/etc/config/sync.yaml --confirm" <4> - volumeMounts: - - mountPath: "/etc/config" - name: "ldap-sync-volume" - - mountPath: "/etc/secrets" - name: "ldap-bind-password" - - mountPath: "/etc/ldap-ca" - name: "ldap-ca" - volumes: - - name: "ldap-sync-volume" - configMap: - name: "ldap-group-syncer" - - name: "ldap-bind-password" - secret: - secretName: "ldap-secret" <5> - - name: "ldap-ca" - configMap: - name: "ca-config-map" <6> - restartPolicy: "Never" - terminationGracePeriodSeconds: 30 - activeDeadlineSeconds: 500 - dnsPolicy: "ClusterFirst" - serviceAccountName: "ldap-group-syncer" ----- -<1> Configure the settings for the cron job. See "Creating cron jobs" for more information on cron job settings. -<2> The schedule for the job specified in link:https://en.wikipedia.org/wiki/Cron[cron format]. This example cron job runs every 30 minutes. Adjust the frequency as necessary, making sure to take into account how long the sync takes to run. -<3> How long, in seconds, to keep finished jobs. This should match the period of the job schedule in order to clean old failed jobs and prevent unnecessary alerts. For more information, see link:https://kubernetes.io/docs/concepts/workloads/controllers/ttlafterfinished[TTL-after-finished Controller] in the Kubernetes documentation. -<4> The LDAP sync command for the cron job to run. Passes in the sync configuration file that was defined in the config map. -<5> This secret was created when the LDAP IDP was configured. -<6> This config map was created when the LDAP IDP was configured. - -. Create the cron job: -+ -[source,terminal] ----- -$ oc create -f ldap-sync-cron-job.yaml ----- diff --git a/modules/ldap-failover-configure-apache.adoc b/modules/ldap-failover-configure-apache.adoc deleted file mode 100644 index 6986b2a7c33b..000000000000 --- a/modules/ldap-failover-configure-apache.adoc +++ /dev/null @@ -1,191 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/configuring-ldap-failover.adoc - -[id="sssd-configuring-apache_{context}"] -= Configuring Apache to use SSSD - -.Procedure - -. Create a `/etc/pam.d/openshift` file that contains the -following contents: -+ ----- -auth required pam_sss.so -account required pam_sss.so ----- -+ -This configuration enables PAM, the pluggable authentication module, to use -`pam_sss.so` to determine authentication and access control when an -authentication request is issued for the `openshift` stack. - -. Edit the `/etc/httpd/conf.modules.d/55-authnz_pam.conf` file and uncomment - the following line: -+ ----- -LoadModule authnz_pam_module modules/mod_authnz_pam.so ----- - -. To configure the Apache `httpd.conf` file for remote basic authentication, -create the `openshift-remote-basic-auth.conf` file in the -`/etc/httpd/conf.d` directory. Use the following template to provide your -required settings and values: -+ -[IMPORTANT] -==== -Carefully review the template and customize its contents to fit your -environment. -==== -+ ----- -LoadModule request_module modules/mod_request.so -LoadModule php7_module modules/libphp7.so - -# Nothing needs to be served over HTTP. This virtual host simply redirects to -# HTTPS. -<VirtualHost *:80> - DocumentRoot /var/www/html - RewriteEngine On - RewriteRule ^(.*)$ https://%{HTTP_HOST}$1 [R,L] -</VirtualHost> - -<VirtualHost *:443> - # This needs to match the certificates you generated. See the CN and X509v3 - # Subject Alternative Name in the output of: - # openssl x509 -text -in /etc/pki/tls/certs/remote-basic.example.com.crt - ServerName remote-basic.example.com - - DocumentRoot /var/www/html - - # Secure all connections with TLS - SSLEngine on - SSLCertificateFile /etc/pki/tls/certs/remote-basic.example.com.crt - SSLCertificateKeyFile /etc/pki/tls/private/remote-basic.example.com.key - SSLCACertificateFile /etc/pki/CA/certs/ca.crt - - # Require that TLS clients provide a valid certificate - SSLVerifyClient require - SSLVerifyDepth 10 - - # Other SSL options that may be useful - # SSLCertificateChainFile ... - # SSLCARevocationFile ... - - # Send logs to a specific location to make them easier to find - ErrorLog logs/remote_basic_error_log - TransferLog logs/remote_basic_access_log - LogLevel warn - - # PHP script that turns the Apache REMOTE_USER env var - # into a JSON formatted response that OpenShift understands - <Location /check_user.php> - # all requests not using SSL are denied - SSLRequireSSL - # denies access when SSLRequireSSL is applied - SSLOptions +StrictRequire - # Require both a valid basic auth user (so REMOTE_USER is always set) - # and that the CN of the TLS client matches that of the OpenShift master - <RequireAll> - Require valid-user - Require expr %{SSL_CLIENT_S_DN_CN} == 'system:openshift-master' - </RequireAll> - # Use basic auth since OpenShift will call this endpoint with a basic challenge - AuthType Basic - AuthName openshift - AuthBasicProvider PAM - AuthPAMService openshift - - # Store attributes in environment variables. Specify the email attribute that - # you confirmed. - LookupOutput Env - LookupUserAttr mail REMOTE_USER_MAIL - LookupUserGECOS REMOTE_USER_DISPLAY_NAME - - # Other options that might be useful - - # While REMOTE_USER is used as the sub field and serves as the immutable ID, - # REMOTE_USER_PREFERRED_USERNAME could be used to have a different username - # LookupUserAttr <attr_name> REMOTE_USER_PREFERRED_USERNAME - - # Group support may be added in a future release - # LookupUserGroupsIter REMOTE_USER_GROUP - </Location> - - # Deny everything else - <Location ~ "^((?!\/check_user\.php).)*$"> - Deny from all - </Location> -</VirtualHost> ----- - -. Create the `check_user.php` script in the `/var/www/html` directory. -Include the following code: -+ ----- -<?php -// Get the user based on the Apache var, this should always be -// set because we 'Require valid-user' in the configuration -$user = apache_getenv('REMOTE_USER'); - -// However, we assume it may not be set and -// build an error response by default -$data = array( - 'error' => 'remote PAM authentication failed' -); - -// Build a success response if we have a user -if (!empty($user)) { - $data = array( - 'sub' => $user - ); - // Map of optional environment variables to optional JSON fields - $env_map = array( - 'REMOTE_USER_MAIL' => 'email', - 'REMOTE_USER_DISPLAY_NAME' => 'name', - 'REMOTE_USER_PREFERRED_USERNAME' => 'preferred_username' - ); - - // Add all non-empty environment variables to JSON data - foreach ($env_map as $env_name => $json_name) { - $env_data = apache_getenv($env_name); - if (!empty($env_data)) { - $data[$json_name] = $env_data; - } - } -} - -// We always output JSON from this script -header('Content-Type: application/json', true); - -// Write the response as JSON -echo json_encode($data); -?> ----- - -. Enable Apache to load the module. Modify the -`/etc/httpd/conf.modules.d/55-lookup_identity.conf` file and uncomment the -following line: -+ ----- -LoadModule lookup_identity_module modules/mod_lookup_identity.so ----- - -. Set an SELinux boolean so that SElinux allows Apache to connect to SSSD over -D-BUS: -+ ----- -# setsebool -P httpd_dbus_sssd on ----- - -. Set a boolean to tell SELinux that it is acceptable for Apache to contact the -PAM subsystem: -+ ----- -# setsebool -P allow_httpd_mod_auth_pam on ----- - -. Start Apache: -+ ----- -# systemctl start httpd.service ----- diff --git a/modules/ldap-failover-configure-openshift.adoc b/modules/ldap-failover-configure-openshift.adoc deleted file mode 100644 index 07bc208955ec..000000000000 --- a/modules/ldap-failover-configure-openshift.adoc +++ /dev/null @@ -1,54 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/configuring-ldap-failover.adoc - -[id="sssd-for-ldap-configure-openshift_{context}"] -= Configuring {product-title} to use SSSD as the basic remote authentication server - -Modify the default configuration of your cluster to use the new identity -provider that you created. Complete the following steps on the first control plane host -listed in the Ansible host inventory file. - -.Procedure - -. Open the `/etc/origin/master/master-config.yaml` file. - -. Locate the `identityProviders` section and replace it with the following code: -+ ----- - identityProviders: - - name: sssd - challenge: true - login: true - mappingMethod: claim - provider: - apiVersion: v1 - kind: BasicAuthPasswordIdentityProvider - url: https://remote-basic.example.com/check_user.php - ca: /etc/origin/master/ca.crt - certFile: /etc/origin/master/openshift-master.crt - keyFile: /etc/origin/master/openshift-master.key ----- - -. Start {product-title} with the updated configuration: -+ ----- -# openshift start \ - --public-master=https://openshift.example.com:8443 \ - --master-config=/etc/origin/master/master-config.yaml \ - --node-config=/etc/origin/node-node1.example.com/node-config.yaml ----- - -. Test a login by using the `oc` CLI: -+ ----- -$ oc login https://openshift.example.com:8443 -u user1 ----- -+ -You can log in only with valid LDAP credentials. -. List the identities and confirm that an email address is displayed for each -user name. Run the following command: -+ ----- -$ oc get identity -o yaml ----- diff --git a/modules/ldap-failover-configure-sssd.adoc b/modules/ldap-failover-configure-sssd.adoc deleted file mode 100644 index 1f7f69e5177f..000000000000 --- a/modules/ldap-failover-configure-sssd.adoc +++ /dev/null @@ -1,124 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/configuring-ldap-failover.adoc - -[id="sssd-configuring-sssd_{context}"] -= Configuring SSSD for LDAP failover -Complete these steps on the remote basic authentication server. - -You can configure the SSSD to retrieve attributes, such as email addresses and -display names, and pass them to {product-title} to display in the web interface. -In the following steps, you configure the SSSD to provide email addresses to -{product-title}. - -.Procedure - -. Install the required SSSD and the web server components: -+ ----- -# yum install -y sssd \ - sssd-dbus \ - realmd \ - httpd \ - mod_session \ - mod_ssl \ - mod_lookup_identity \ - mod_authnz_pam \ - php \ - mod_php ----- - -. Set up SSSD to authenticate this VM against the LDAP server. If the LDAP server -is a FreeIPA or Active Directory environment, then use `realmd` to join -this machine to the domain. -+ ----- -# realm join ldap.example.com ----- -+ -For more advanced cases, see the -https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/7/html/System-Level_Authentication_Guide/authconfig-ldap.html[System-Level Authentication Guide] - -. To use SSSD to manage failover situations for LDAP, add more entries to the - `/etc/sssd/sssd.conf` file on the `ldap_uri` line. Systems that are -enrolled with FreeIPA can automatically handle failover by using DNS SRV records. - -. Modify the `[domain/DOMAINNAME]` section of the `/etc/sssd/sssd.conf` file -and add this attribute: -+ ----- -[domain/example.com] -... -ldap_user_extra_attrs = mail <1> ----- -<1> Specify the correct attribute to retrieve email addresses for your LDAP -solution. For IPA, specify `mail`. Other LDAP solutions might use another -attribute, such as `email`. - -. Confirm that the `domain` parameter in the `/etc/sssd/sssd.conf` file -contains only the domain name listed in the `[domain/DOMAINNAME]` section. -+ ----- -domains = example.com ----- - -. Grant Apache permission to retrieve the email attribute. Add the following -lines to the `[ifp]` section of the `/etc/sssd/sssd.conf` file: -+ ----- -[ifp] -user_attributes = +mail -allowed_uids = apache, root ----- - -. To ensure that all of the changes are applied properly, restart SSSD: -+ ----- -$ systemctl restart sssd.service ----- - -. Test that the user information can be retrieved properly: -+ ----- -$ getent passwd <username> -username:*:12345:12345:Example User:/home/username:/usr/bin/bash ----- - -. Confirm that the mail attribute you specified returns an email address from -your domain: -+ ----- -# dbus-send --print-reply --system --dest=org.freedesktop.sssd.infopipe \ - /org/freedesktop/sssd/infopipe org.freedesktop.sssd.infopipe.GetUserAttr \ - string:username \ <1> - array:string:mail <2> - -method return time=1528091855.672691 sender=:1.2787 -> destination=:1.2795 serial=13 reply_serial=2 - array [ - dict entry( - string "mail" - variant array [ - string "username@example.com" - ] - ) - ] ----- -<1> Provide a user name in your LDAP solution. -<2> Specify the attribute that you configured. - -. Attempt to log in to the VM as an LDAP user and confirm that you can log in -using LDAP credentials. You can use either the local console or a remote service -like SSH to log in. - -[IMPORTANT] -==== -By default, all users can log in to the remote basic authentication server by using -their LDAP credentials. You can change this behavior: - -* If you use IPA joined systems, -link:https://www.freeipa.org/page/Howto/HBAC_and_allow_all[configure host-based access control]. -* If you use Active Directory joined systems, use a -link:https://docs.pagure.org/SSSD.sssd/design_pages/active_directory_gpo_integration.html[group policy object]. -* For other cases, see the -link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/html/system-level_authentication_guide/sssd[SSSD configuration] documentation. -==== diff --git a/modules/ldap-failover-generate-certs.adoc b/modules/ldap-failover-generate-certs.adoc deleted file mode 100644 index d4efdbc9f876..000000000000 --- a/modules/ldap-failover-generate-certs.adoc +++ /dev/null @@ -1,65 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/configuring-ldap-failover.adoc - -[id="sssd-generating-certificates_{context}"] -= Generating and sharing certificates with the remote basic authentication server - -Complete the following steps on the first control plane host listed in the Ansible host inventory file, -by default `/etc/ansible/hosts`. - -.Procedure - -. To ensure that communication between the remote basic authentication server and -{product-title} is trustworthy, create a set of Transport Layer Security (TLS) -certificates to use during the other phases of this set up. Run the following command: -+ ----- -# openshift start \ - --public-master=https://openshift.example.com:8443 \ - --write-config=/etc/origin/ ----- -+ -The output inclues the `/etc/origin/master/ca.crt` and -`/etc/origin/master/ca.key` signing certificates. -. Use the signing certificate to generate keys to use on the remote basic -authentication server: -+ ----- -# mkdir -p /etc/origin/remote-basic/ -# oc adm ca create-server-cert \ - --cert='/etc/origin/remote-basic/remote-basic.example.com.crt' \ - --key='/etc/origin/remote-basic/remote-basic.example.com.key' \ - --hostnames=remote-basic.example.com \ <1> - --signer-cert='/etc/origin/master/ca.crt' \ - --signer-key='/etc/origin/master/ca.key' \ - --signer-serial='/etc/origin/master/ca.serial.txt' ----- -+ -<1> A comma-separated list of all the hostnames and interface IP addresses that must access the -remote basic authentication server. -+ -[NOTE] -==== -The certificate files that you generate are valid for two years. You can alter -this period by changing the `--expire-days` and `--signer-expire-days` values, -but for security reasons, do not make them greater than 730. -==== -+ -[IMPORTANT] -==== -If you do not list all hostnames and interface IP addresses that must access the -remote basic authentication server, the HTTPS connection will fail. -==== -. Copy the necessary certificates and key to the remote basic authentication server: -+ ----- -# scp /etc/origin/master/ca.crt \ - root@remote-basic.example.com:/etc/pki/CA/certs/ - -# scp /etc/origin/remote-basic/remote-basic.example.com.crt \ - root@remote-basic.example.com:/etc/pki/tls/certs/ - -# scp /etc/origin/remote-basic/remote-basic.example.com.key \ - root@remote-basic.example.com:/etc/pki/tls/private/ ----- diff --git a/modules/ldap-failover-overview.adoc b/modules/ldap-failover-overview.adoc deleted file mode 100644 index afe0e3577db1..000000000000 --- a/modules/ldap-failover-overview.adoc +++ /dev/null @@ -1,26 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/configuring-ldap-failover.adoc - -[id="sssd-for-ldap-overview_{context}"] - -{product-title} provides an authentication -provider for use with Lightweight Directory Access Protocol (LDAP) setups, but -it can connect to only a single LDAP server. During {product-title} installation, -you can configure the System Security -Services Daemon (SSSD) for LDAP failover to ensure access to your cluster if one -LDAP server fails. - -The setup for this configuration is advanced and requires a separate -authentication server, also called an *remote basic authentication server*, for -{product-title} to communicate with. You configure this server -to pass extra attributes, such as email addresses, to {product-title} so it can -display them in the web console. - -This topic describes how to complete this set up on a dedicated physical or -virtual machine (VM), but you can also configure SSSD in containers. - -[IMPORTANT] -==== -You must complete all sections of this topic. -==== diff --git a/modules/ldap-failover-prereqs.adoc b/modules/ldap-failover-prereqs.adoc deleted file mode 100644 index 4bf4a40eb217..000000000000 --- a/modules/ldap-failover-prereqs.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/configuring-ldap-failover.adoc - -[id="sssd-for-ldap-prereqs_{context}"] -= Prerequisites for configuring basic remote authentication - -* Before starting setup, you must know the following information about your -LDAP server: -** Whether the directory server is powered by -http://www.freeipa.org/page/Main_Page[FreeIPA], Active Directory, or another -LDAP solution. -** The Uniform Resource Identifier (URI) for the LDAP server, for example, -`ldap.example.com`. -** The location of the CA certificate for the LDAP server. -** Whether the LDAP server corresponds to RFC 2307 or RFC2307bis for user groups. -* Prepare the servers: -** `remote-basic.example.com`: A VM to use as the remote basic authentication server. -*** Select an operating system that includes SSSD version 1.12.0 for this server -such as Red Hat Enterprise Linux 7.0 or later. -ifeval::["{context}" == "sssd-ldap-failover-extend"] -*** Install mod_lookup_identity version 0.9.4 or later. You can obtain this -package link:https://github.com/adelton/mod_lookup_identity/releases[from -upstream]. -endif::[] -** `openshift.example.com`: A new installation of {product-title}. -*** You must not -have an authentication method configured for this cluster. -*** Do not start {product-title} on this cluster. diff --git a/modules/ldap-syncing-about.adoc b/modules/ldap-syncing-about.adoc deleted file mode 100644 index df00d8e75487..000000000000 --- a/modules/ldap-syncing-about.adoc +++ /dev/null @@ -1,123 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/ldap-syncing.adoc - -:_content-type: CONCEPT -[id="ldap-syncing-about_{context}"] -= About configuring LDAP sync - -Before you can run LDAP sync, you need a sync -configuration file. This file contains the following LDAP client configuration details: - -* Configuration for connecting to your LDAP server. -* Sync configuration options that are dependent on the schema used in your LDAP -server. -* An administrator-defined list of name mappings that maps {product-title} group names to groups in your LDAP server. - -The format of the configuration file depends upon the schema you are using: RFC 2307, Active Directory, or augmented Active Directory. - -[[ldap-client-configuration]] -LDAP client configuration:: - -The LDAP client configuration section of the configuration defines the connections to your LDAP server. - -The LDAP client configuration section of the configuration defines the connections to your LDAP server. - -.LDAP client configuration -[source,yaml] ----- -url: ldap://10.0.0.0:389 <1> -bindDN: cn=admin,dc=example,dc=com <2> -bindPassword: password <3> -insecure: false <4> -ca: my-ldap-ca-bundle.crt <5> ----- -<1> The connection protocol, IP address of the LDAP server hosting your -database, and the port to connect to, formatted as `scheme://host:port`. -<2> Optional distinguished name (DN) to use as the Bind DN. -{product-title} uses this if elevated privilege is required to retrieve entries for -the sync operation. -<3> Optional password to use to bind. {product-title} uses this if elevated privilege is -necessary to retrieve entries for the sync operation. This value may also be -provided in an environment variable, external file, or encrypted file. -<4> When `false`, secure -LDAP (`ldaps://`) URLs connect using TLS, and insecure LDAP (`ldap://`) URLs are -upgraded to TLS. When `true`, no TLS connection is made to the server and you cannot use `ldaps://` URL schemes. -<5> The certificate bundle to use for validating server certificates for the -configured URL. If empty, {product-title} uses system-trusted roots. This only applies -if `insecure` is set to `false`. - -[[ldap-query-definition]] -LDAP query definition:: -Sync configurations consist of LDAP query definitions for the entries that are -required for synchronization. The specific definition of an LDAP query depends -on the schema used to store membership information in the LDAP server. - -.LDAP query definition -[source,yaml] ----- -baseDN: ou=users,dc=example,dc=com <1> -scope: sub <2> -derefAliases: never <3> -timeout: 0 <4> -filter: (objectClass=person) <5> -pageSize: 0 <6> ----- -<1> The distinguished name (DN) of the branch of the directory where all -searches will start from. It is required that you specify the top of your -directory tree, but you can also specify a subtree in the directory. -<2> The scope of the search. Valid values are `base`, `one`, or `sub`. If this -is left undefined, then a scope of `sub` is assumed. Descriptions of the scope -options can be found in the table below. -<3> The behavior of the search with respect to aliases in the LDAP tree. Valid -values are `never`, `search`, `base`, or `always`. If this is left undefined, -then the default is to `always` dereference aliases. Descriptions of the -dereferencing behaviors can be found in the table below. -<4> The time limit allowed for the search by the client, in seconds. A value of -`0` imposes no client-side limit. -<5> A valid LDAP search filter. If this is left undefined, then the default is -`(objectClass=*)`. -<6> The optional maximum size of response pages from the server, measured in LDAP -entries. If set to `0`, no size restrictions will be made on pages of responses. -Setting paging sizes is necessary when queries return more entries than the -client or server allow by default. - -[[ldap-search]] -.LDAP search scope options -[cols="2a,8a",options="header"] -|=== -|LDAP search scope | Description -.^|`base` | Only consider the object specified by the base DN given for the query. -.^|`one` | Consider all of the objects on the same level in the tree as the base DN for -the query. -.^|`sub` | Consider the entire subtree rooted at the base DN given for the query. -|=== - -[[deref-aliases]] -.LDAP dereferencing behaviors -[cols="2a,8a",options="header"] -|=== -|Dereferencing behavior | Description -.^|`never` | Never dereference any aliases found in the LDAP tree. -.^|`search` | Only dereference aliases found while searching. -.^|`base` | Only dereference aliases while finding the base object. -.^|`always` | Always dereference all aliases found in the LDAP tree. -|=== - -[[user-defined-name-mapping]] -User-defined name mapping:: -A user-defined name mapping explicitly maps the names of {product-title} groups to -unique identifiers that find groups on your LDAP server. The mapping uses normal -YAML syntax. A user-defined mapping can contain an entry for every group in your -LDAP server or only a subset of those groups. If there are groups on the LDAP -server that do not have a user-defined name mapping, the default behavior during -sync is to use the attribute specified as the {product-title} group's name. - -.User-defined name mapping -[source,yaml] ----- -groupUIDNameMapping: - "cn=group1,ou=groups,dc=example,dc=com": firstgroup - "cn=group2,ou=groups,dc=example,dc=com": secondgroup - "cn=group3,ou=groups,dc=example,dc=com": thirdgroup ----- diff --git a/modules/ldap-syncing-activedir.adoc b/modules/ldap-syncing-activedir.adoc deleted file mode 100644 index 10e7782c2789..000000000000 --- a/modules/ldap-syncing-activedir.adoc +++ /dev/null @@ -1,87 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/ldap-syncing.adoc - -:_content-type: PROCEDURE -[id="ldap-syncing-activedir_{context}"] -= Syncing groups using the Active Directory schema - -In the Active Directory schema, both users (Jane and Jim) exist in the LDAP -server as first-class entries, and group membership is stored in attributes on -the user. The following snippet of `ldif` defines the users and group for this -schema: - -.LDAP entries that use Active Directory schema: `active_directory.ldif` -[source,ldif] ----- -dn: ou=users,dc=example,dc=com -objectClass: organizationalUnit -ou: users - -dn: cn=Jane,ou=users,dc=example,dc=com -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: testPerson -cn: Jane -sn: Smith -displayName: Jane Smith -mail: jane.smith@example.com -memberOf: admins <1> - -dn: cn=Jim,ou=users,dc=example,dc=com -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: testPerson -cn: Jim -sn: Adams -displayName: Jim Adams -mail: jim.adams@example.com -memberOf: admins ----- -<1> The user's group memberships are listed as attributes on the user, and the -group does not exist as an entry on the server. The `memberOf` attribute does -not have to be a literal attribute on the user; in some LDAP servers, it is created -during search and returned to the client, but not committed to the database. - -.Prerequisites - -* Create the configuration file. - -.Procedure - -* Run the sync with the `active_directory_config.yaml` file: -+ -[source,terminal] ----- -$ oc adm groups sync --sync-config=active_directory_config.yaml --confirm ----- -+ -{product-title} creates the following group record as a result of the above sync -operation: -+ -.{product-title} group created by using the `active_directory_config.yaml` file -[source,yaml] ----- -apiVersion: user.openshift.io/v1 -kind: Group -metadata: - annotations: - openshift.io/ldap.sync-time: 2015-10-13T10:08:38-0400 <1> - openshift.io/ldap.uid: admins <2> - openshift.io/ldap.url: LDAP_SERVER_IP:389 <3> - creationTimestamp: - name: admins <4> -users: <5> -- jane.smith@example.com -- jim.adams@example.com ----- -<1> The last time this {product-title} group was synchronized with the LDAP server, in ISO 6801 -format. -<2> The unique identifier for the group on the LDAP server. -<3> The IP address and host of the LDAP server where this group's record is -stored. -<4> The name of the group as listed in the LDAP server. -<5> The users that are members of the group, named as specified by the sync -file. diff --git a/modules/ldap-syncing-augmented-activedir.adoc b/modules/ldap-syncing-augmented-activedir.adoc deleted file mode 100644 index de0a6d0e452d..000000000000 --- a/modules/ldap-syncing-augmented-activedir.adoc +++ /dev/null @@ -1,95 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/ldap-syncing.adoc - -:_content-type: PROCEDURE -[id="ldap-syncing-augmented-activedir_{context}"] -= Syncing groups using the augmented Active Directory schema - -In the augmented Active Directory schema, both users (Jane and Jim) and groups -exist in the LDAP server as first-class entries, and group membership is stored -in attributes on the user. The following snippet of `ldif` defines the users and -group for this schema: - -.LDAP entries that use augmented Active Directory schema: `augmented_active_directory.ldif` -[source,ldif] ----- -dn: ou=users,dc=example,dc=com -objectClass: organizationalUnit -ou: users - -dn: cn=Jane,ou=users,dc=example,dc=com -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: testPerson -cn: Jane -sn: Smith -displayName: Jane Smith -mail: jane.smith@example.com -memberOf: cn=admins,ou=groups,dc=example,dc=com <1> - -dn: cn=Jim,ou=users,dc=example,dc=com -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: testPerson -cn: Jim -sn: Adams -displayName: Jim Adams -mail: jim.adams@example.com -memberOf: cn=admins,ou=groups,dc=example,dc=com - -dn: ou=groups,dc=example,dc=com -objectClass: organizationalUnit -ou: groups - -dn: cn=admins,ou=groups,dc=example,dc=com <2> -objectClass: groupOfNames -cn: admins -owner: cn=admin,dc=example,dc=com -description: System Administrators -member: cn=Jane,ou=users,dc=example,dc=com -member: cn=Jim,ou=users,dc=example,dc=com ----- -<1> The user's group memberships are listed as attributes on the user. -<2> The group is a first-class entry on the LDAP server. - -.Prerequisites - -* Create the configuration file. - -.Procedure - -* Run the sync with the `augmented_active_directory_config.yaml` file: -+ -[source,terminal] ----- -$ oc adm groups sync --sync-config=augmented_active_directory_config.yaml --confirm ----- -+ -{product-title} creates the following group record as a result of the above sync -operation: -+ -.{product-title} group created by using the `augmented_active_directory_config.yaml` file - -[source,yaml] ----- -apiVersion: user.openshift.io/v1 -kind: Group -metadata: - annotations: - openshift.io/ldap.sync-time: 2015-10-13T10:08:38-0400 <1> - openshift.io/ldap.uid: cn=admins,ou=groups,dc=example,dc=com <2> - openshift.io/ldap.url: LDAP_SERVER_IP:389 <3> - creationTimestamp: - name: admins <4> -users: <5> -- jane.smith@example.com -- jim.adams@example.com ----- -<1> The last time this {product-title} group was synchronized with the LDAP server, in ISO 6801 format. -<2> The unique identifier for the group on the LDAP server. -<3> The IP address and host of the LDAP server where this group's record is stored. -<4> The name of the group as specified by the sync file. -<5> The users that are members of the group, named as specified by the sync file. diff --git a/modules/ldap-syncing-config-activedir.adoc b/modules/ldap-syncing-config-activedir.adoc deleted file mode 100644 index e084bae8a645..000000000000 --- a/modules/ldap-syncing-config-activedir.adoc +++ /dev/null @@ -1,36 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/ldap-syncing.adoc - -:_content-type: CONCEPT -[id="ldap-syncing-config-activedir_{context}"] -= About the Active Directory configuration file - -The Active Directory schema requires you to provide an LDAP query definition for -user entries, as well as the attributes to represent them with in the internal -{product-title} group records. - -For clarity, the group you create in {product-title} should use attributes other -than the distinguished name whenever possible for user- or administrator-facing -fields. For example, identify the users of an {product-title} group by their e-mail, but define -the name of the group by the name of the group on the LDAP server. -The following configuration file creates these relationships: - -.LDAP sync configuration that uses Active Directory schema: `active_directory_config.yaml` -[source,yaml] ----- -kind: LDAPSyncConfig -apiVersion: v1 -url: ldap://LDAP_SERVICE_IP:389 -activeDirectory: - usersQuery: - baseDN: "ou=users,dc=example,dc=com" - scope: sub - derefAliases: never - filter: (objectclass=person) - pageSize: 0 - userNameAttributes: [ mail ] <1> - groupMembershipAttributes: [ memberOf ] <2> ----- -<1> The attribute to use as the name of the user in the {product-title} group record. -<2> The attribute on the user that stores the membership information. diff --git a/modules/ldap-syncing-config-augmented-activedir.adoc b/modules/ldap-syncing-config-augmented-activedir.adoc deleted file mode 100644 index a8c1af7c51f0..000000000000 --- a/modules/ldap-syncing-config-augmented-activedir.adoc +++ /dev/null @@ -1,47 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/ldap-syncing.adoc - -:_content-type: CONCEPT -[id="ldap-syncing-config-augmented-activedir_{context}"] -= About the augmented Active Directory configuration file - -The augmented Active Directory schema requires you to provide an LDAP query -definition for both user entries and group entries, as well as the attributes -with which to represent them in the internal {product-title} group records. - -For clarity, the group you create in {product-title} should use attributes other -than the distinguished name whenever possible for user- or administrator-facing -fields. For example, identify the users of an {product-title} group by their e-mail, -and use the name of the group as the common name. The following configuration -file creates these relationships. - -.LDAP sync configuration that uses augmented Active Directory schema: `augmented_active_directory_config.yaml` -[source,yaml] ----- -kind: LDAPSyncConfig -apiVersion: v1 -url: ldap://LDAP_SERVICE_IP:389 -augmentedActiveDirectory: - groupsQuery: - baseDN: "ou=groups,dc=example,dc=com" - scope: sub - derefAliases: never - pageSize: 0 - groupUIDAttribute: dn <1> - groupNameAttributes: [ cn ] <2> - usersQuery: - baseDN: "ou=users,dc=example,dc=com" - scope: sub - derefAliases: never - filter: (objectclass=person) - pageSize: 0 - userNameAttributes: [ mail ] <3> - groupMembershipAttributes: [ memberOf ] <4> ----- -<1> The attribute that uniquely identifies a group on the LDAP server. You -cannot specify `groupsQuery` filters when using DN for groupUIDAttribute. For -fine-grained filtering, use the whitelist / blacklist method. -<2> The attribute to use as the name of the group. -<3> The attribute to use as the name of the user in the {product-title} group record. -<4> The attribute on the user that stores the membership information. diff --git a/modules/ldap-syncing-config-rfc2307.adoc b/modules/ldap-syncing-config-rfc2307.adoc deleted file mode 100644 index 864187a36b6a..000000000000 --- a/modules/ldap-syncing-config-rfc2307.adoc +++ /dev/null @@ -1,63 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/ldap-syncing.adoc - -:_content-type: CONCEPT -[id="ldap-syncing-config-rfc2307_{context}"] -= About the RFC 2307 configuration file - -The RFC 2307 schema requires you to provide an LDAP query definition for both user -and group entries, as well as the attributes with which to represent them in the -internal {product-title} records. - -For clarity, the group you create in {product-title} should use attributes other -than the distinguished name whenever possible for user- or administrator-facing -fields. For example, identify the users of an {product-title} group by their e-mail, and use the -name of the group as the common name. The following configuration file creates -these relationships: - -[NOTE] -==== -If using user-defined name mappings, your configuration file will differ. -==== - -.LDAP sync configuration that uses RFC 2307 schema: `rfc2307_config.yaml` -[source,yaml] ----- -kind: LDAPSyncConfig -apiVersion: v1 -url: ldap://LDAP_SERVICE_IP:389 <1> -insecure: false <2> -rfc2307: - groupsQuery: - baseDN: "ou=groups,dc=example,dc=com" - scope: sub - derefAliases: never - pageSize: 0 - groupUIDAttribute: dn <3> - groupNameAttributes: [ cn ] <4> - groupMembershipAttributes: [ member ] <5> - usersQuery: - baseDN: "ou=users,dc=example,dc=com" - scope: sub - derefAliases: never - pageSize: 0 - userUIDAttribute: dn <6> - userNameAttributes: [ mail ] <7> - tolerateMemberNotFoundErrors: false - tolerateMemberOutOfScopeErrors: false ----- -<1> The IP address and host of the LDAP server where this group's record is -stored. -<2> When `false`, secure -LDAP (`ldaps://`) URLs connect using TLS, and insecure LDAP (`ldap://`) URLs are -upgraded to TLS. When `true`, no TLS connection is made to the server and you cannot use `ldaps://` URL schemes. -<3> The attribute that uniquely identifies a group on the LDAP server. -You cannot specify `groupsQuery` filters when using DN for `groupUIDAttribute`. -For fine-grained filtering, use the whitelist / blacklist method. -<4> The attribute to use as the name of the group. -<5> The attribute on the group that stores the membership information. -<6> The attribute that uniquely identifies a user on the LDAP server. You -cannot specify `usersQuery` filters when using DN for userUIDAttribute. For -fine-grained filtering, use the whitelist / blacklist method. -<7> The attribute to use as the name of the user in the {product-title} group record. diff --git a/modules/ldap-syncing-examples.adoc b/modules/ldap-syncing-examples.adoc deleted file mode 100644 index bbd308ee093c..000000000000 --- a/modules/ldap-syncing-examples.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/ldap-syncing.adoc - -[id="ldap-syncing-examples_{context}"] -= LDAP group sync examples - -This section contains examples for the RFC 2307, Active Directory, and -augmented Active Directory schemas. - -[NOTE] -==== -These examples assume that all users are direct members of their respective -groups. Specifically, no groups have other groups as members. See -the Nested Membership Sync Example for information on -how to sync nested groups. -==== - diff --git a/modules/ldap-syncing-nesting.adoc b/modules/ldap-syncing-nesting.adoc deleted file mode 100644 index 759391f4436c..000000000000 --- a/modules/ldap-syncing-nesting.adoc +++ /dev/null @@ -1,182 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/ldap-syncing.adoc - -:_content-type: PROCEDURE -[id="ldap-syncing-nesting_{context}"] -== LDAP nested membership sync example - -Groups in {product-title} do not nest. The LDAP server must flatten group -membership before the data can be consumed. Microsoft's Active Directory Server -supports this feature via the -link:https://msdn.microsoft.com/en-us/library/aa746475(v=vs.85).aspx[`LDAP_MATCHING_RULE_IN_CHAIN`] -rule, which has the OID `1.2.840.113556.1.4.1941`. Furthermore, only explicitly -whitelisted groups can be synced when using this matching rule. - -This section has an example for the augmented Active Directory schema, which -synchronizes a group named `admins` that has one user `Jane` and one group -`otheradmins` as members. The `otheradmins` group has one user member: `Jim`. -This example explains: - -* How the group and users are added to the LDAP server. -* What the LDAP sync configuration file looks like. -* What the resulting group record in {product-title} will be after synchronization. - -In the augmented Active Directory schema, both users (`Jane` and `Jim`) and -groups exist in the LDAP server as first-class entries, and group membership is -stored in attributes on the user or the group. The following snippet of `ldif` -defines the users and groups for this schema: - -.LDAP entries that use augmented Active Directory schema with nested members: `augmented_active_directory_nested.ldif` -[source,ldif] ----- -dn: ou=users,dc=example,dc=com -objectClass: organizationalUnit -ou: users - -dn: cn=Jane,ou=users,dc=example,dc=com -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: testPerson -cn: Jane -sn: Smith -displayName: Jane Smith -mail: jane.smith@example.com -memberOf: cn=admins,ou=groups,dc=example,dc=com <1> - -dn: cn=Jim,ou=users,dc=example,dc=com -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: testPerson -cn: Jim -sn: Adams -displayName: Jim Adams -mail: jim.adams@example.com -memberOf: cn=otheradmins,ou=groups,dc=example,dc=com <1> - -dn: ou=groups,dc=example,dc=com -objectClass: organizationalUnit -ou: groups - -dn: cn=admins,ou=groups,dc=example,dc=com <2> -objectClass: group -cn: admins -owner: cn=admin,dc=example,dc=com -description: System Administrators -member: cn=Jane,ou=users,dc=example,dc=com -member: cn=otheradmins,ou=groups,dc=example,dc=com - -dn: cn=otheradmins,ou=groups,dc=example,dc=com <2> -objectClass: group -cn: otheradmins -owner: cn=admin,dc=example,dc=com -description: Other System Administrators -memberOf: cn=admins,ou=groups,dc=example,dc=com <1> <3> -member: cn=Jim,ou=users,dc=example,dc=com ----- -<1> The user's and group's memberships are listed as attributes on the object. -<2> The groups are first-class entries on the LDAP server. -<3> The `otheradmins` group is a member of the `admins` group. - -When syncing nested groups with Active Directory, you must provide an LDAP query -definition for both user entries and group entries, as well as the attributes -with which to represent them in the internal {product-title} group records. -Furthermore, certain changes are required in this configuration: - -- The `oc adm groups sync` command must explicitly whitelist groups. -- The user's `groupMembershipAttributes` must include -`"memberOf:1.2.840.113556.1.4.1941:"` to comply with the -https://msdn.microsoft.com/en-us/library/aa746475(v=vs.85).aspx[`LDAP_MATCHING_RULE_IN_CHAIN`] -rule. -- The `groupUIDAttribute` must be set to `dn`. -- The `groupsQuery`: - * Must not set `filter`. - * Must set a valid `derefAliases`. - * Should not set `baseDN` as that value is ignored. - * Should not set `scope` as that value is ignored. - -For clarity, the group you create in {product-title} should use attributes other -than the distinguished name whenever possible for user- or administrator-facing -fields. For example, identify the users of an {product-title} group by their e-mail, and use the -name of the group as the common name. The following configuration file creates -these relationships: - -.LDAP sync configuration that uses augmented Active Directory schema with nested members: `augmented_active_directory_config_nested.yaml` -[source,yaml] ----- -kind: LDAPSyncConfig -apiVersion: v1 -url: ldap://LDAP_SERVICE_IP:389 -augmentedActiveDirectory: - groupsQuery: <1> - derefAliases: never - pageSize: 0 - groupUIDAttribute: dn <2> - groupNameAttributes: [ cn ] <3> - usersQuery: - baseDN: "ou=users,dc=example,dc=com" - scope: sub - derefAliases: never - filter: (objectclass=person) - pageSize: 0 - userNameAttributes: [ mail ] <4> - groupMembershipAttributes: [ "memberOf:1.2.840.113556.1.4.1941:" ] <5> ----- -<1> `groupsQuery` filters cannot be specified. The `groupsQuery` base DN and scope -values are ignored. `groupsQuery` must set a valid `derefAliases`. -<2> The attribute that uniquely identifies a group on the LDAP server. It must be set to `dn`. -<3> The attribute to use as the name of the group. -<4> The attribute to use as the name of the user in the {product-title} group -record. `mail` or `sAMAccountName` are preferred choices in most installations. -<5> The attribute on the user that stores the membership information. Note the use -of https://msdn.microsoft.com/en-us/library/aa746475(v=vs.85).aspx[`LDAP_MATCHING_RULE_IN_CHAIN`]. - -.Prerequisites - -* Create the configuration file. - -.Procedure - -* Run the sync with the `augmented_active_directory_config_nested.yaml` file: -+ -[source,terminal] ----- -$ oc adm groups sync \ - 'cn=admins,ou=groups,dc=example,dc=com' \ - --sync-config=augmented_active_directory_config_nested.yaml \ - --confirm ----- -+ -[NOTE] -==== -You must explicitly whitelist the `cn=admins,ou=groups,dc=example,dc=com` group. -==== -+ -{product-title} creates the following group record as a result of the above sync -operation: -+ -.{product-title} group created by using the `augmented_active_directory_config_nested.yaml` file -[source,yaml] ----- -apiVersion: user.openshift.io/v1 -kind: Group -metadata: - annotations: - openshift.io/ldap.sync-time: 2015-10-13T10:08:38-0400 <1> - openshift.io/ldap.uid: cn=admins,ou=groups,dc=example,dc=com <2> - openshift.io/ldap.url: LDAP_SERVER_IP:389 <3> - creationTimestamp: - name: admins <4> -users: <5> -- jane.smith@example.com -- jim.adams@example.com ----- -<1> The last time this {product-title} group was synchronized with the LDAP server, in ISO 6801 format. -<2> The unique identifier for the group on the LDAP server. -<3> The IP address and host of the LDAP server where this group's record is stored. -<4> The name of the group as specified by the sync file. -<5> The users that are members of the group, named as specified by the sync file. -Note that members of nested groups are included since the group membership was -flattened by the Microsoft Active Directory Server. diff --git a/modules/ldap-syncing-pruning.adoc b/modules/ldap-syncing-pruning.adoc deleted file mode 100644 index d1c70f572b47..000000000000 --- a/modules/ldap-syncing-pruning.adoc +++ /dev/null @@ -1,28 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/ldap-syncing.adoc - -[id="ldap-syncing-pruning_{context}"] -= Running a group pruning job - -An administrator can also choose to remove groups from {product-title} records -if the records on the LDAP server that created them are no longer present. The -prune job will accept the same sync configuration file and whitelists or blacklists -as used for the sync job. - -For example: - -[source,terminal] ----- -$ oc adm prune groups --sync-config=/path/to/ldap-sync-config.yaml --confirm ----- - -[source,terminal] ----- -$ oc adm prune groups --whitelist=/path/to/whitelist.txt --sync-config=/path/to/ldap-sync-config.yaml --confirm ----- - -[source,terminal] ----- -$ oc adm prune groups --blacklist=/path/to/blacklist.txt --sync-config=/path/to/ldap-sync-config.yaml --confirm ----- diff --git a/modules/ldap-syncing-rfc2307-user-defined-error.adoc b/modules/ldap-syncing-rfc2307-user-defined-error.adoc deleted file mode 100644 index efed1ad1f4b2..000000000000 --- a/modules/ldap-syncing-rfc2307-user-defined-error.adoc +++ /dev/null @@ -1,151 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/ldap-syncing.adoc - -:_content-type: PROCEDURE -[id="ldap-syncing-rfc2307-user-defined-error_{context}"] -= Syncing groups using RFC 2307 with user-defined error tolerances - -By default, if the groups being synced contain members whose entries are outside -of the scope defined in the member query, the group sync fails with an error: - ----- -Error determining LDAP group membership for "<group>": membership lookup for user "<user>" in group "<group>" failed because of "search for entry with dn="<user-dn>" would search outside of the base dn specified (dn="<base-dn>")". ----- - -This often indicates a misconfigured `baseDN` in the `usersQuery` field. -However, in cases where the `baseDN` intentionally does not contain some of the -members of the group, setting `tolerateMemberOutOfScopeErrors: true` allows -the group sync to continue. Out of scope members will be ignored. - -Similarly, when the group sync process fails to locate a member for a group, it -fails outright with errors: - ----- -Error determining LDAP group membership for "<group>": membership lookup for user "<user>" in group "<group>" failed because of "search for entry with base dn="<user-dn>" refers to a non-existent entry". -Error determining LDAP group membership for "<group>": membership lookup for user "<user>" in group "<group>" failed because of "search for entry with base dn="<user-dn>" and filter "<filter>" did not return any results". ----- - -This often indicates a misconfigured `usersQuery` field. However, in cases -where the group contains member entries that are known to be missing, setting -`tolerateMemberNotFoundErrors: true` allows the group sync to continue. -Problematic members will be ignored. - -[WARNING] -==== -Enabling error tolerances for the LDAP group sync causes the sync process to -ignore problematic member entries. If the LDAP group sync is not configured -correctly, this could result in synced {product-title} groups missing members. -==== - -.LDAP entries that use RFC 2307 schema with problematic group membership: `rfc2307_problematic_users.ldif` -[source,ldif] ----- - dn: ou=users,dc=example,dc=com - objectClass: organizationalUnit - ou: users - dn: cn=Jane,ou=users,dc=example,dc=com - objectClass: person - objectClass: organizationalPerson - objectClass: inetOrgPerson - cn: Jane - sn: Smith - displayName: Jane Smith - mail: jane.smith@example.com - dn: cn=Jim,ou=users,dc=example,dc=com - objectClass: person - objectClass: organizationalPerson - objectClass: inetOrgPerson - cn: Jim - sn: Adams - displayName: Jim Adams - mail: jim.adams@example.com - dn: ou=groups,dc=example,dc=com - objectClass: organizationalUnit - ou: groups - dn: cn=admins,ou=groups,dc=example,dc=com - objectClass: groupOfNames - cn: admins - owner: cn=admin,dc=example,dc=com - description: System Administrators - member: cn=Jane,ou=users,dc=example,dc=com - member: cn=Jim,ou=users,dc=example,dc=com - member: cn=INVALID,ou=users,dc=example,dc=com <1> - member: cn=Jim,ou=OUTOFSCOPE,dc=example,dc=com <2> ----- -<1> A member that does not exist on the LDAP server. -<2> A member that may exist, but is not under the `baseDN` in the -user query for the sync job. - -To tolerate the errors in the above example, the following additions to -your sync configuration file must be made: - -.LDAP sync configuration that uses RFC 2307 schema tolerating errors: `rfc2307_config_tolerating.yaml` -[source,yaml] ----- -kind: LDAPSyncConfig -apiVersion: v1 -url: ldap://LDAP_SERVICE_IP:389 -rfc2307: - groupsQuery: - baseDN: "ou=groups,dc=example,dc=com" - scope: sub - derefAliases: never - groupUIDAttribute: dn - groupNameAttributes: [ cn ] - groupMembershipAttributes: [ member ] - usersQuery: - baseDN: "ou=users,dc=example,dc=com" - scope: sub - derefAliases: never - userUIDAttribute: dn <1> - userNameAttributes: [ mail ] - tolerateMemberNotFoundErrors: true <2> - tolerateMemberOutOfScopeErrors: true <3> ----- -<1> The attribute that uniquely identifies a user on the LDAP server. You -cannot specify `usersQuery` filters when using DN for userUIDAttribute. For -fine-grained filtering, use the whitelist / blacklist method. -<2> When `true`, the sync job tolerates groups for which some members were not -found, and members whose LDAP entries are not found are ignored. The -default behavior for the sync job is to fail if a member of a group is not -found. -<3> When `true`, the sync job tolerates groups for which some members are outside -the user scope given in the `usersQuery` base DN, and members outside the member -query scope are ignored. The default behavior for the sync job is to fail if a -member of a group is out of scope. - -.Prerequisites - -* Create the configuration file. - -.Procedure - -* Run the sync with the `rfc2307_config_tolerating.yaml` file: -+ -[source,terminal] ----- -$ oc adm groups sync --sync-config=rfc2307_config_tolerating.yaml --confirm ----- -+ -{product-title} creates the following group record as a result of the above sync -operation: -+ -.{product-title} group created by using the `rfc2307_config.yaml` file -[source,yaml] ----- -apiVersion: user.openshift.io/v1 -kind: Group -metadata: - annotations: - openshift.io/ldap.sync-time: 2015-10-13T10:08:38-0400 - openshift.io/ldap.uid: cn=admins,ou=groups,dc=example,dc=com - openshift.io/ldap.url: LDAP_SERVER_IP:389 - creationTimestamp: - name: admins -users: <1> -- jane.smith@example.com -- jim.adams@example.com ----- -<1> The users that are members of the group, as specified by the sync file. -Members for which lookup encountered tolerated errors are absent. diff --git a/modules/ldap-syncing-rfc2307-user-defined.adoc b/modules/ldap-syncing-rfc2307-user-defined.adoc deleted file mode 100644 index b6030661fd6b..000000000000 --- a/modules/ldap-syncing-rfc2307-user-defined.adoc +++ /dev/null @@ -1,80 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/ldap-syncing.adoc - -:_content-type: PROCEDURE -[id="ldap-syncing-rfc2307-user-defined_{context}"] -= Syncing groups using the RFC2307 schema with user-defined name mappings - -When syncing groups with user-defined name mappings, the configuration file -changes to contain these mappings as shown below. - -.LDAP sync configuration that uses RFC 2307 schema with user-defined name mappings: `rfc2307_config_user_defined.yaml` -[source,yaml] ----- -kind: LDAPSyncConfig -apiVersion: v1 -groupUIDNameMapping: - "cn=admins,ou=groups,dc=example,dc=com": Administrators <1> -rfc2307: - groupsQuery: - baseDN: "ou=groups,dc=example,dc=com" - scope: sub - derefAliases: never - pageSize: 0 - groupUIDAttribute: dn <2> - groupNameAttributes: [ cn ] <3> - groupMembershipAttributes: [ member ] - usersQuery: - baseDN: "ou=users,dc=example,dc=com" - scope: sub - derefAliases: never - pageSize: 0 - userUIDAttribute: dn <4> - userNameAttributes: [ mail ] - tolerateMemberNotFoundErrors: false - tolerateMemberOutOfScopeErrors: false ----- -<1> The user-defined name mapping. -<2> The unique identifier attribute that is used for the keys in the -user-defined name mapping. You cannot specify `groupsQuery` filters when using -DN for groupUIDAttribute. For fine-grained filtering, use the whitelist / blacklist method. -<3> The attribute to name {product-title} groups with if their unique identifier is -not in the user-defined name mapping. -<4> The attribute that uniquely identifies a user on the LDAP server. You -cannot specify `usersQuery` filters when using DN for userUIDAttribute. For -fine-grained filtering, use the whitelist / blacklist method. - -.Prerequisites - -* Create the configuration file. - -.Procedure - -* Run the sync with the `rfc2307_config_user_defined.yaml` file: -+ -[source,terminal] ----- -$ oc adm groups sync --sync-config=rfc2307_config_user_defined.yaml --confirm ----- -+ -{product-title} creates the following group record as a result of the above sync -operation: -+ -.{product-title} group created by using the `rfc2307_config_user_defined.yaml` file -[source,yaml] ----- -apiVersion: user.openshift.io/v1 -kind: Group -metadata: - annotations: - openshift.io/ldap.sync-time: 2015-10-13T10:08:38-0400 - openshift.io/ldap.uid: cn=admins,ou=groups,dc=example,dc=com - openshift.io/ldap.url: LDAP_SERVER_IP:389 - creationTimestamp: - name: Administrators <1> -users: -- jane.smith@example.com -- jim.adams@example.com ----- -<1> The name of the group as specified by the user-defined name mapping. diff --git a/modules/ldap-syncing-rfc2307.adoc b/modules/ldap-syncing-rfc2307.adoc deleted file mode 100644 index 3f2aa3bb074a..000000000000 --- a/modules/ldap-syncing-rfc2307.adoc +++ /dev/null @@ -1,103 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/ldap-syncing.adoc - -:_content-type: PROCEDURE -[id="ldap-syncing-rfc2307_{context}"] -= Syncing groups using the RFC 2307 schema - -For the RFC 2307 schema, the following examples synchronize a group named `admins` that has two -members: `Jane` and `Jim`. The examples explain: - -* How the group and users are added to the LDAP server. -* What the resulting group record in {product-title} will be after synchronization. - -[NOTE] -==== -These examples assume that all users are direct members of their respective -groups. Specifically, no groups have other groups as members. See -the Nested Membership Sync Example for information on -how to sync nested groups. -==== - -In the RFC 2307 schema, both users (Jane and Jim) and groups exist on the LDAP -server as first-class entries, and group membership is stored in attributes on -the group. The following snippet of `ldif` defines the users and group for this -schema: - -.LDAP entries that use RFC 2307 schema: `rfc2307.ldif` -[source,ldif] ----- - dn: ou=users,dc=example,dc=com - objectClass: organizationalUnit - ou: users - dn: cn=Jane,ou=users,dc=example,dc=com - objectClass: person - objectClass: organizationalPerson - objectClass: inetOrgPerson - cn: Jane - sn: Smith - displayName: Jane Smith - mail: jane.smith@example.com - dn: cn=Jim,ou=users,dc=example,dc=com - objectClass: person - objectClass: organizationalPerson - objectClass: inetOrgPerson - cn: Jim - sn: Adams - displayName: Jim Adams - mail: jim.adams@example.com - dn: ou=groups,dc=example,dc=com - objectClass: organizationalUnit - ou: groups - dn: cn=admins,ou=groups,dc=example,dc=com <1> - objectClass: groupOfNames - cn: admins - owner: cn=admin,dc=example,dc=com - description: System Administrators - member: cn=Jane,ou=users,dc=example,dc=com <2> - member: cn=Jim,ou=users,dc=example,dc=com ----- -<1> The group is a first-class entry in the LDAP server. -<2> Members of a group are listed with an identifying reference as attributes on -the group. - -.Prerequisites - -* Create the configuration file. - -.Procedure - -* Run the sync with the `rfc2307_config.yaml` file: -+ -[source,terminal] ----- -$ oc adm groups sync --sync-config=rfc2307_config.yaml --confirm ----- -+ -{product-title} creates the following group record as a result of the above sync -operation: -+ -.{product-title} group created by using the `rfc2307_config.yaml` file -[source,yaml] ----- -apiVersion: user.openshift.io/v1 -kind: Group -metadata: - annotations: - openshift.io/ldap.sync-time: 2015-10-13T10:08:38-0400 <1> - openshift.io/ldap.uid: cn=admins,ou=groups,dc=example,dc=com <2> - openshift.io/ldap.url: LDAP_SERVER_IP:389 <3> - creationTimestamp: - name: admins <4> -users: <5> -- jane.smith@example.com -- jim.adams@example.com ----- -<1> The last time this {product-title} group was synchronized with the LDAP server, in ISO 6801 -format. -<2> The unique identifier for the group on the LDAP server. -<3> The IP address and host of the LDAP server where this group's record is -stored. -<4> The name of the group as specified by the sync file. -<5> The users that are members of the group, named as specified by the sync file. diff --git a/modules/ldap-syncing-running-all-ldap.adoc b/modules/ldap-syncing-running-all-ldap.adoc deleted file mode 100644 index 6250c8612069..000000000000 --- a/modules/ldap-syncing-running-all-ldap.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/ldap-syncing-groups.adoc - -:_content-type: PROCEDURE -[id="ldap-syncing-running-all-ldap_{context}"] -= Syncing the LDAP server with {product-title} - -You can sync all groups from the LDAP server with {product-title}. - -.Prerequisites - -* Create a sync configuration file. - -.Procedure - -* To sync all groups from the LDAP server with {product-title}: -+ -[source,terminal] ----- -$ oc adm groups sync --sync-config=config.yaml --confirm ----- -+ -[NOTE] -==== -By default, all group synchronization operations are dry-run, so you -must set the `--confirm` flag on the `oc adm groups sync` command to make -changes to {product-title} group records. -==== diff --git a/modules/ldap-syncing-running-openshift.adoc b/modules/ldap-syncing-running-openshift.adoc deleted file mode 100644 index de584140c0a6..000000000000 --- a/modules/ldap-syncing-running-openshift.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/ldap-syncing-groups.adoc - -:_content-type: PROCEDURE -[id="ldap-syncing-running-openshift_{context}"] -= Syncing {product-title} groups with the LDAP server - -You can sync all groups already in {product-title} that correspond to groups in the -LDAP server specified in the configuration file. - -.Prerequisites - -* Create a sync configuration file. - -.Procedure - -* To sync {product-title} groups with the LDAP server: -+ -[source,terminal] ----- -$ oc adm groups sync --type=openshift --sync-config=config.yaml --confirm ----- -+ -[NOTE] -==== -By default, all group synchronization operations are dry-run, so you -must set the `--confirm` flag on the `oc adm groups sync` command to make -changes to {product-title} group records. -==== diff --git a/modules/ldap-syncing-running-subset.adoc b/modules/ldap-syncing-running-subset.adoc deleted file mode 100644 index 0ce682af850a..000000000000 --- a/modules/ldap-syncing-running-subset.adoc +++ /dev/null @@ -1,72 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/ldap-syncing-groups.adoc - -:_content-type: PROCEDURE -[id="ldap-syncing-running-subset_{context}"] -= Syncing subgroups from the LDAP server with {product-title} - -You can sync a subset of LDAP groups with {product-title} using whitelist files, -blacklist files, or both. - -[NOTE] -==== -You can use any combination of blacklist files, whitelist files, or whitelist -literals. Whitelist and blacklist files must contain one unique group identifier -per line, and you can include whitelist literals directly in the command itself. -These guidelines apply to groups found on LDAP servers as well as groups already -present in {product-title}. -==== - -.Prerequisites - -* Create a sync configuration file. - -.Procedure - -* To sync a subset of LDAP groups with {product-title}, use any the following commands: -+ -[source,terminal] ----- -$ oc adm groups sync --whitelist=<whitelist_file> \ - --sync-config=config.yaml \ - --confirm ----- -+ -[source,terminal] ----- -$ oc adm groups sync --blacklist=<blacklist_file> \ - --sync-config=config.yaml \ - --confirm ----- -+ -[source,terminal] ----- -$ oc adm groups sync <group_unique_identifier> \ - --sync-config=config.yaml \ - --confirm ----- -+ -[source,terminal] ----- -$ oc adm groups sync <group_unique_identifier> \ - --whitelist=<whitelist_file> \ - --blacklist=<blacklist_file> \ - --sync-config=config.yaml \ - --confirm ----- -+ -[source,terminal] ----- -$ oc adm groups sync --type=openshift \ - --whitelist=<whitelist_file> \ - --sync-config=config.yaml \ - --confirm ----- -+ -[NOTE] -==== -By default, all group synchronization operations are dry-run, so you -must set the `--confirm` flag on the `oc adm groups sync` command to make -changes to {product-title} group records. -==== diff --git a/modules/ldap-syncing-running.adoc b/modules/ldap-syncing-running.adoc deleted file mode 100644 index 67b9c7e31a9f..000000000000 --- a/modules/ldap-syncing-running.adoc +++ /dev/null @@ -1,11 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/ldap-syncing-groups.adoc - -[id="ldap-syncing-running_{context}"] -= Running LDAP sync - -Once you have created a sync configuration file, -you can begin to sync. {product-title} allows administrators to perform a number of -different sync types with the same server. - diff --git a/modules/ldap-syncing-spec.adoc b/modules/ldap-syncing-spec.adoc deleted file mode 100644 index f450d8de5e9b..000000000000 --- a/modules/ldap-syncing-spec.adoc +++ /dev/null @@ -1,250 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/ldap-syncing.adoc - -[id="ldap-syncing-spec_{context}"] -= LDAP sync configuration specification - -The object specification for the configuration file is below. Note that the different schema -objects have different fields. For example, v1.ActiveDirectoryConfig has no `groupsQuery` -field whereas v1.RFC2307Config and v1.AugmentedActiveDirectoryConfig both do. - -[IMPORTANT] -==== -There is no support for binary attributes. All attribute data coming from the -LDAP server must be in the format of a UTF-8 encoded string. For example, never -use a binary attribute, such as `objectGUID`, as an ID attribute. You must use -string attributes, such as `sAMAccountName` or `userPrincipalName`, instead. -==== - -[[sync-ldap-v1-ldapsyncconfig]] -== v1.LDAPSyncConfig - -`LDAPSyncConfig` holds the necessary configuration options to define an LDAP -group sync. - -[options="header"] -|=== -|Name |Description |Schema - -|`kind` -|String value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: link:https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#types-kinds[] -|string - -|`apiVersion` -|Defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: link:https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#resources[] -|string - -|`url` -|Host is the scheme, host and port of the LDAP server to connect to: `scheme://host:port` -|string - -|`bindDN` -|Optional DN to bind to the LDAP server with. -|string - -|`bindPassword` -|Optional password to bind with during the search phase. |v1.StringSource - -|`insecure` -|If `true`, indicates the connection should not use TLS. If `false`, `ldaps://` URLs connect using TLS, and `ldap://` URLs are upgraded to a TLS connection using StartTLS as specified in link:https://tools.ietf.org/html/rfc2830[]. If you set `insecure` to `true`, you cannot use `ldaps://` URL schemes. -|boolean - -|`ca` -|Optional trusted certificate authority bundle to use when making requests to the server. If empty, the default system roots are used. -|string - -|`groupUIDNameMapping` -|Optional direct mapping of LDAP group UIDs to {product-title} group names. -|object - -|`rfc2307` -|Holds the configuration for extracting data from an LDAP server set up in a fashion similar to RFC2307: first-class group and user entries, with group membership determined by a multi-valued attribute on the group entry listing its members. -|v1.RFC2307Config - -|`activeDirectory` -|Holds the configuration for extracting data from an LDAP server set up in a fashion similar to that used in Active Directory: first-class user entries, with group membership determined by a multi-valued attribute on members listing groups they are a member of. -|v1.ActiveDirectoryConfig - -|`augmentedActiveDirectory` -|Holds the configuration for extracting data from an LDAP server set up in a fashion similar to that used in Active Directory as described above, with one addition: first-class group entries exist and are used to hold metadata but not group membership. -|v1.AugmentedActiveDirectoryConfig -|=== - -[[sync-ldap-v1-stringsource]] -== v1.StringSource - -`StringSource` allows specifying a string inline, or externally via environment -variable or file. When it contains only a string value, it marshals to a simple -JSON string. - -[options="header"] -|=== -|Name |Description |Schema - -|`value` -|Specifies the cleartext value, or an encrypted value if `keyFile` is specified. -|string - -|`env` -|Specifies an environment variable containing the cleartext value, or an -encrypted value if the `keyFile` is specified. -|string - -|`file` -|References a file containing the cleartext value, or an encrypted value if a `keyFile` is specified. -|string - -|`keyFile` -|References a file containing the key to use to decrypt the value. -|string -|=== - -[[sync-ldap-v1-ldapquery]] -== v1.LDAPQuery - -`LDAPQuery` holds the options necessary to build an LDAP query. - -[options="header"] -|=== -|Name |Description |Schema - -|`baseDN` -|DN of the branch of the directory where all searches should start from. -|string - -|`scope` -|The optional scope of the search. Can be `base`: only the base object, `one`: -all objects on the base level, `sub`: the entire subtree. Defaults to `sub` -if not set. -|string - -|`derefAliases` -|The optional behavior of the search with regards to aliases. Can be `never`: -never dereference aliases, `search`: only dereference in searching, `base`: -only dereference in finding the base object, `always`: always dereference. -Defaults to `always` if not set. -|string - -|`timeout` -|Holds the limit of time in seconds that any request to the server can remain outstanding before the wait for a response is given up. If this is `0`, no client-side limit is imposed. -|integer - -|`filter` -|A valid LDAP search filter that retrieves all relevant entries from the LDAP server with the base DN. -|string - -|`pageSize` -|Maximum preferred page size, measured in LDAP entries. A page size of `0` means no paging will be done. -|integer -|=== - -[[sync-ldap-v1-rfc2307config]] -== v1.RFC2307Config - -`RFC2307Config` holds the necessary configuration options to define how an LDAP -group sync interacts with an LDAP server using the RFC2307 schema. - -[options="header"] -|=== -|Name |Description |Schema - -|`groupsQuery` -|Holds the template for an LDAP query that returns group entries. -|v1.LDAPQuery - -|`groupUIDAttribute` -|Defines which attribute on an LDAP group entry will be interpreted as its unique identifier. (`ldapGroupUID`) -|string - -|`groupNameAttributes` -|Defines which attributes on an LDAP group entry will be interpreted as its name to use for an {product-title} group. -|string array - -|`groupMembershipAttributes` -|Defines which attributes on an LDAP group entry will be interpreted as its members. The values contained in those attributes must be queryable by your `UserUIDAttribute`. -|string array - -|`usersQuery` -|Holds the template for an LDAP query that returns user entries. -|v1.LDAPQuery - -|`userUIDAttribute` -|Defines which attribute on an LDAP user entry will be interpreted as its unique identifier. It must correspond to values that will be found from the `GroupMembershipAttributes`. -|string - -|`userNameAttributes` -|Defines which attributes on an LDAP user entry will be used, in order, as its {product-title} user name. The first attribute with a non-empty value is used. This should match your `PreferredUsername` setting for your `LDAPPasswordIdentityProvider`. The attribute to use as the name of the user in the {product-title} group -record. `mail` or `sAMAccountName` are preferred choices in most installations. -|string array - -|`tolerateMemberNotFoundErrors` -|Determines the behavior of the LDAP sync job when missing user entries are encountered. If `true`, an LDAP query for users that does not find any will be tolerated and an only and error will be logged. If `false`, the LDAP sync job will fail if a query for users doesn't find any. The default value is `false`. Misconfigured LDAP sync jobs with this flag set to `true` can cause group membership to be removed, so it is recommended to use this flag with caution. -|boolean - -|`tolerateMemberOutOfScopeErrors` -|Determines the behavior of the LDAP sync job when out-of-scope user entries are encountered. If `true`, an LDAP query for a user that falls outside of the base DN given for the all user query will be tolerated and only an error will be logged. If `false`, the LDAP sync job will fail if a user query would search outside of the base DN specified by the all user query. Misconfigured LDAP sync jobs with this flag set to `true` can result in groups missing users, so it is recommended to use this flag with caution. -|boolean -|=== - -[[sync-ldap-v1-activedirectoryconfig]] -== v1.ActiveDirectoryConfig - -`ActiveDirectoryConfig` holds the necessary configuration options to define how -an LDAP group sync interacts with an LDAP server using the Active Directory -schema. - -[options="header"] -|=== -|Name |Description |Schema - -|`usersQuery` -|Holds the template for an LDAP query that returns user entries. -|v1.LDAPQuery - -|`userNameAttributes` -|Defines which attributes on an LDAP user entry will be interpreted as its {product-title} user name. The attribute to use as the name of the user in the {product-title} group -record. `mail` or `sAMAccountName` are preferred choices in most installations. -|string array - -|`groupMembershipAttributes` -|Defines which attributes on an LDAP user entry will be interpreted as the groups it is a member of. -|string array -|=== - -[[sync-ldap-v1-augmentedactivedirectoryconfig]] -== v1.AugmentedActiveDirectoryConfig - -`AugmentedActiveDirectoryConfig` holds the necessary configuration options to -define how an LDAP group sync interacts with an LDAP server using the augmented -Active Directory schema. - -[options="header"] -|=== -|Name |Description |Schema - -|`usersQuery` -|Holds the template for an LDAP query that returns user entries. -|v1.LDAPQuery - -|`userNameAttributes` -|Defines which attributes on an LDAP user entry will be interpreted as its {product-title} user name. The attribute to use as the name of the user in the {product-title} group -record. `mail` or `sAMAccountName` are preferred choices in most installations. -|string array - -|`groupMembershipAttributes` -|Defines which attributes on an LDAP user entry will be interpreted as the groups it is a member of. -|string array - -|`groupsQuery` -|Holds the template for an LDAP query that returns group entries. -|v1.LDAPQuery - -|`groupUIDAttribute` -|Defines which attribute on an LDAP group entry will be interpreted as its unique identifier. (`ldapGroupUID`) -|string - -|`groupNameAttributes` -|Defines which attributes on an LDAP group entry will be interpreted as its name to use for an {product-title} group. -|string array -|=== diff --git a/modules/life-cycle-dates.adoc b/modules/life-cycle-dates.adoc deleted file mode 100644 index 8ed98f779707..000000000000 --- a/modules/life-cycle-dates.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_architecture/osd_policy/osd-life-cycle.adoc -// * rosa_architecture/rosa_policy_service_definition/rosa-life-cycle.adoc - -[id="sd-life-cycle-dates_{context}"] -= Life cycle dates - -[options="header"] -|=== -|Version |General availability |End of life -|4.13 |May 17, 2023 |Sep 17, 2024 -|4.12 |Jan 17, 2023 |May 17, 2024 -|4.11 |Aug 10, 2022 |Dec 10, 2023 -|4.10 |Mar 10, 2022 |Sep 10, 2023 -|4.9 |Oct 18, 2021 |Dec 18, 2022 -|4.8 |Jul 27, 2021 |Sep 27, 2022 - -|=== diff --git a/modules/life-cycle-definitions.adoc b/modules/life-cycle-definitions.adoc deleted file mode 100644 index aea52718cc54..000000000000 --- a/modules/life-cycle-definitions.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * rosa_architecture/rosa_policy_service_definition/rosa-life-cycle.adoc -// * osd_architecture/osd_policy/osd-life-cycle.adoc - -[id="rosa-life-cycle-definitions_{context}"] -= Definitions - -.Version reference -[options="header"] -|=== -|Version format |Major |Minor |Patch |Major.minor.patch -| |x |y |z |x.y.z -|Example |4 |5 |21 |4.5.21 -|=== - -Major releases or X-releases:: Referred to only as _major releases_ or _X-releases_ (X.y.z). -+ --- -.Examples -* "Major release 5" -> 5.y.z -* "Major release 4" -> 4.y.z -* "Major release 3" -> 3.y.z --- - -Minor releases or Y-releases:: Referred to only as _minor releases_ or _Y-releases_ (x.Y.z). -+ --- -.Examples -* "Minor release 4" -> 4.4.z -* "Minor release 5" -> 4.5.z -* "Minor release 6" -> 4.6.z --- - -Patch releases or Z-releases:: Referred to only as _patch releases_ or _Z-releases_ (x.y.Z). -+ --- -.Examples -* "Patch release 14 of minor release 5" -> 4.5.14 -* "Patch release 25 of minor release 5" -> 4.5.25 -* "Patch release 26 of minor release 6" -> 4.6.26 --- diff --git a/modules/life-cycle-install.adoc b/modules/life-cycle-install.adoc deleted file mode 100644 index a0415581c610..000000000000 --- a/modules/life-cycle-install.adoc +++ /dev/null @@ -1,10 +0,0 @@ -// Module included in the following assemblies: -// -// * rosa_architecture/rosa_policy_service_definition/rosa-life-cycle.adoc -// * osd_architecture/osd_policy/osd-life-cycle.adoc - -[id="rosa-install-policy_{context}"] -= Installation policy - -While Red Hat recommends installation of the latest support release, {product-title} supports -installation of any supported release as covered by the preceding policy. diff --git a/modules/life-cycle-limited-support.adoc b/modules/life-cycle-limited-support.adoc deleted file mode 100644 index 745c0189d9ae..000000000000 --- a/modules/life-cycle-limited-support.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// * rosa_architecture/rosa_policy_service_definition/rosa-life-cycle.adoc -// * osd_architecture/osd_policy/osd-life-cycle.adoc - -[id="rosa-limited-support_{context}"] -= Limited support status - -When a cluster transitions to a _Limited Support_ status, Red Hat no longer proactively monitors the cluster, the SLA is no longer applicable, and credits requested against the SLA are denied. It does not mean that you no longer have product support. In some cases, the cluster can return to a fully-supported status if you remediate the violating factors. However, in other cases, you might have to delete and recreate the cluster. - -A cluster might transition to a Limited Support status for many reasons, including the following scenarios: - -If you do not upgrade a cluster to a supported version before the end-of-life date:: Red Hat does not make any runtime or SLA guarantees for versions after their end-of-life date. To receive continued support, upgrade the cluster to a supported version prior to the end-of-life date. If you do not upgrade the cluster prior to the end-of-life date, the cluster transitions to a Limited Support status until it is upgraded to a supported version. -+ -Red Hat provides commercially reasonable support to upgrade from an unsupported version to a supported version. However, if a supported upgrade path is no longer available, you might have to create a new cluster and migrate your workloads. - -If you remove or replace any native {product-title} components or any other component that is installed and managed by Red Hat:: If cluster administrator permissions were used, Red Hat is not responsible for any of your or your authorized users’ actions, including those that affect infrastructure services, service availability, or data loss. If Red Hat detects any such actions, the cluster might transition to a Limited Support status. Red Hat notifies you of the status change and you should either revert the action or create a support case to explore remediation steps that might require you to delete and recreate the cluster. - -If you have questions about a specific action that might cause a cluster to transition to a Limited Support status or need further assistance, open a support ticket. diff --git a/modules/life-cycle-major-versions.adoc b/modules/life-cycle-major-versions.adoc deleted file mode 100644 index 8a5ac28c93cc..000000000000 --- a/modules/life-cycle-major-versions.adoc +++ /dev/null @@ -1,15 +0,0 @@ -// Module included in the following assemblies: -// -// * rosa_architecture/rosa_policy_service_definition/rosa-life-cycle.adoc -// * osd_architecture/osd_policy/osd-life-cycle.adoc - -[id="rosa-major-versions_{context}"] -= Major versions (X.y.z) - -Major versions of {product-title}, for example version 4, are supported for one year following the -release of a subsequent major version or the retirement of the product. - -.Example -* If version 5 were made available on {product-title} on January 1, version 4 would be allowed to - continue running on managed clusters for 12 months, until December 31. After this time, clusters - would need to be upgraded or migrated to version 5. diff --git a/modules/life-cycle-mandatory-upgrades.adoc b/modules/life-cycle-mandatory-upgrades.adoc deleted file mode 100644 index 93e6cf3b0e37..000000000000 --- a/modules/life-cycle-mandatory-upgrades.adoc +++ /dev/null @@ -1,13 +0,0 @@ -// Module included in the following assemblies: -// * rosa_architecture/rosa_policy_service_definition/rosa-life-cycle.adoc -// * osd_architecture/osd_policy/osd-life-cycle.adoc - -[id="rosa-mandatory-upgrades_{context}"] -= Mandatory upgrades - -In the event that a Critical or Important CVE, or other bug identified by Red Hat, significantly -impacts the security or stability of the cluster, the customer must upgrade to the next supported -patch release within two link:https://access.redhat.com/articles/2623321[business days]. - -In extreme circumstances and based on Red Hat's assessment of the CVE criticality to the -environment, Red Hat will notify customers that they have two link:https://access.redhat.com/articles/2623321[business days] to schedule or manually update their cluster to the latest, secure patch release. In the case that an update has not been performed, Red Hat will automatically update the cluster to the latest, secure patch release to mitigate potential security breach(es) or instability. Red Hat may, at its own discretion, temporarily delay an automated update if requested by a customer through a link:https://access.redhat.com/support[support case]. \ No newline at end of file diff --git a/modules/life-cycle-minor-versions.adoc b/modules/life-cycle-minor-versions.adoc deleted file mode 100644 index 114e4d59092e..000000000000 --- a/modules/life-cycle-minor-versions.adoc +++ /dev/null @@ -1,22 +0,0 @@ -// Module included in the following assemblies: -// * rosa_architecture/rosa_policy_service_definition/rosa-life-cycle.adoc -// * osd_architecture/osd_policy/osd-life-cycle.adoc - -[id="rosa-minor-versions_{context}"] -= Minor versions (x.Y.z) - -Starting with the 4.8 OpenShift Container Platform minor version, Red Hat supports all minor -versions for at least a 14 month period following general availability of the given minor version. Patch -versions are not affected by the support period. - -Customers are notified 60, 30, and 15 days prior to the end of the support period. Clusters must be upgraded to -a supported minor version prior to the end of the support period, or the cluster will enter -a "Limited Support" status. - -.Example -. A customer's cluster is currently running on 4.8.14. The 4.8 minor version became generally - available on July 27, 2021. -. On July 29, August 28, and September 12, 2022, the customer is notified that their cluster will enter "Limited Support" status - on September 27, 2022 if the cluster has not already been upgraded to a supported minor version. -. The cluster must be upgraded to 4.9 or later by September 27, 2022. -. If the upgrade has not been performed, the cluster will be flagged as being in a "Limited Support" status. diff --git a/modules/life-cycle-overview.adoc b/modules/life-cycle-overview.adoc deleted file mode 100644 index b9fc3b5dfd0d..000000000000 --- a/modules/life-cycle-overview.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// * rosa_architecture/rosa_policy_service_definition/rosa-life-cycle.adoc -// * osd_architecture/osd_policy/osd-life-cycle.adoc - -:_content-type: CONCEPT -[id="life-cycle-overview_{context}"] -= Overview - -Red Hat provides a published product life cycle for {product-title} in order for customers and -partners to effectively plan, deploy, and support their applications running on the platform. Red -Hat publishes this life cycle in order to provide as much transparency as possible and might make -exceptions from these policies as conflicts arise. - -{product-title} is a managed instance of Red Hat OpenShift and maintains an independent release -schedule. More details about the managed offering can be found in the {product-title} service -definition. The availability of Security Advisories and Bug Fix Advisories for a specific version -are dependent upon the Red Hat OpenShift Container Platform life cycle policy and subject to the -{product-title} maintenance schedule. diff --git a/modules/life-cycle-patch-versions.adoc b/modules/life-cycle-patch-versions.adoc deleted file mode 100644 index c8772bff5dda..000000000000 --- a/modules/life-cycle-patch-versions.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module included in the following assemblies: -// -// * rosa_architecture/rosa_policy_service_definition/rosa-life-cycle.adoc -// * osd_architecture/osd_policy/osd-life-cycle.adoc - -[id="rosa-patch-versions_{context}"] -= Patch versions (x.y.Z) - -During the period in which a minor version is supported, Red Hat supports all OpenShift Container -Platform patch versions unless otherwise specified. - -For reasons of platform security and stability, a patch release may be deprecated, which would -prevent installations of that release and trigger mandatory upgrades off that release. - -.Example -. 4.7.6 is found to contain a critical CVE. -. Any releases impacted by the CVE will be removed from the supported patch release list. In - addition, any clusters running 4.7.6 will be scheduled for automatic upgrades within 48 hours. diff --git a/modules/life-cycle-supported-versions.adoc b/modules/life-cycle-supported-versions.adoc deleted file mode 100644 index fa6f54927c85..000000000000 --- a/modules/life-cycle-supported-versions.adoc +++ /dev/null @@ -1,11 +0,0 @@ -// Module included in the following assemblies: -// -// * rosa_architecture/rosa_policy_service_definition/rosa-life-cycle.adoc -// * osd_architecture/osd_policy/osd-life-cycle.adoc - -[id="rosa-supported-versions_{context}"] -= Supported versions exception policy - -Red Hat reserves the right to add or remove new or existing versions, or delay upcoming minor -release versions, that have been identified to have one or more critical production impacting bugs -or security issues without advance notice. diff --git a/modules/listing-alerts-that-are-firing.adoc b/modules/listing-alerts-that-are-firing.adoc deleted file mode 100644 index 7bf46e0fc18b..000000000000 --- a/modules/listing-alerts-that-are-firing.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// Module included in the following assemblies: -// -// *installing/validating-an-installation.adoc - -:_content-type: PROCEDURE -[id="listing-alerts-that-are-firing_{context}"] -= Listing alerts that are firing - -Alerts provide notifications when a set of defined conditions are true in an {product-title} cluster. You can review the alerts that are firing in your cluster by using the Alerting UI in the {product-title} web console. - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. - -.Procedure - -. In the *Administrator* perspective, navigate to the *Observe* -> *Alerting* -> *Alerts* page. - -. Review the alerts that are firing, including their *Severity*, *State*, and *Source*. - -. Select an alert to view more detailed information in the *Alert Details* page. diff --git a/modules/load-and-merge-rules.adoc b/modules/load-and-merge-rules.adoc deleted file mode 100644 index c8d8beaa0256..000000000000 --- a/modules/load-and-merge-rules.adoc +++ /dev/null @@ -1,41 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/openshift_cli/managing-cli-profiles.adoc - -:_content-type: CONCEPT -[id="load-and-merge-rules_{context}"] -= Load and merge rules - -You can follow these rules, when issuing CLI operations for the loading and merging order for the CLI configuration: - -* CLI config files are retrieved from your workstation, using the following hierarchy and merge rules: - -** If the `--config` option is set, then only that file is loaded. The flag is set once and no merging takes place. -** If the `$KUBECONFIG` environment variable is set, then it is used. The variable can be a list of paths, and if so the paths are merged together. When a value is modified, it is modified in the file that defines the stanza. When a value is created, it is created in the first file that exists. If no files in the chain exist, then it creates the last file in the list. -** Otherwise, the `_~/.kube/config_` file is used and no merging takes place. - -* The context to use is determined based on the first match in the following flow: - -** The value of the `--context` option. -** The `current-context` value from the CLI config file. -** An empty value is allowed at this stage. - -* The user and cluster to use is determined. At this point, you may or may not have a context; they are built based on the first match in the following flow, which is run once for the user and once for the cluster: -** The value of the `--user` for user name and `--cluster` option for -cluster name. -** If the `--context` option is present, then use the context's value. -** An empty value is allowed at this stage. -* The actual cluster information to use is determined. At this point, you may or may not have cluster information. Each piece of the cluster information is built based on the first match in the following flow: -** The values of any of the following command line options: -*** `--server`, -*** `--api-version` -*** `--certificate-authority` -*** `--insecure-skip-tls-verify` -** If cluster information and a value for the attribute is present, then use it. -** If you do not have a server location, then there is an error. -* The actual user information to use is determined. Users are built using the same rules as clusters, except that you can only have one authentication technique per user; conflicting techniques cause the operation to fail. Command line options take precedence over config file values. Valid command line options are: -** `--auth-path` -** `--client-certificate` -** `--client-key` -** `--token` -* For any information that is still missing, default values are used and prompts are given for additional information. diff --git a/modules/log-verbosity-descriptions.adoc b/modules/log-verbosity-descriptions.adoc deleted file mode 100644 index 4c618949df16..000000000000 --- a/modules/log-verbosity-descriptions.adoc +++ /dev/null @@ -1,32 +0,0 @@ -[id="log-verbosity-descriptions_{context}"] -= Log verbosity descriptions - -[cols="2a,8a",options="header"] -|=== -|Log verbosity -|Description - -|`--v=0` -|Always visible to an Operator. - -|`--v=1` -|A reasonable default log level if you do not want verbosity. - -|`--v=2` -|Useful steady state information about the service and important log messages that might correlate to significant changes in the system. This is the recommended default log level. - -|`--v=3` -|Extended information about changes. - -|`--v=4` -|Debug level verbosity. - -|`--v=6` -|Display requested resources. - -|`--v=7` -|Display HTTP request headers. - -|`--v=8` -|Display HTTP request contents. -|=== diff --git a/modules/logging-5.6-api-ref.adoc b/modules/logging-5.6-api-ref.adoc deleted file mode 100644 index fc9789b1f48d..000000000000 --- a/modules/logging-5.6-api-ref.adoc +++ /dev/null @@ -1,1360 +0,0 @@ -// Module included in the following assemblies: -// -// Note: This content is automatically generated from source, do not edit. -:_content-type: REFERENCE -[id="logging-5-6-api-ref"] -= Logging 5.6 API reference -:toc: -:toclevels: 4 - -== ClusterLogForwarder -ClusterLogForwarder is an API to configure forwarding logs. - -You configure forwarding by specifying a list of `pipelines`, -which forward from a set of named inputs to a set of named outputs. - -There are built-in input names for common log categories, and you can -define custom inputs to do additional filtering. - -There is a built-in output name for the default openshift log store, but -you can define your own outputs with a URL and other connection information -to forward logs to other stores or processors, inside or outside the cluster. - -For more details see the documentation on the API fields. - -[options="header"] -|====================== -|Property|Type|Description - -|spec|object| Specification of the desired behavior of ClusterLogForwarder -|status|object| Status of the ClusterLogForwarder -|====================== - -=== .spec -==== Description -ClusterLogForwarderSpec defines how logs should be forwarded to remote targets. - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|inputs|array| *(optional)* Inputs are named filters for log messages to be forwarded. -|outputDefaults|object| *(optional)* DEPRECATED OutputDefaults specify forwarder config explicitly for the default store. -|outputs|array| *(optional)* Outputs are named destinations for log messages. -|pipelines|array| Pipelines forward the messages selected by a set of inputs to a set of outputs. -|====================== - -=== .spec.inputs[] -==== Description -InputSpec defines a selector of log messages. - -===== Type -* array - -[options="header"] -|====================== -|Property|Type|Description - -|application|object| *(optional)* Application, if present, enables named set of `application` logs that -|name|string| Name used to refer to the input of a `pipeline`. -|====================== - -=== .spec.inputs[].application -==== Description -Application log selector. -All conditions in the selector must be satisfied (logical AND) to select logs. - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|namespaces|array| *(optional)* Namespaces from which to collect application logs. -|selector|object| *(optional)* Selector for logs from pods with matching labels. -|====================== - -=== .spec.inputs[].application.namespaces[] -==== Description - -===== Type -* array - -=== .spec.inputs[].application.selector -==== Description -A label selector is a label query over a set of resources. - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|matchLabels|object| *(optional)* matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels -|====================== - -=== .spec.inputs[].application.selector.matchLabels -==== Description - -===== Type -* object - -=== .spec.outputDefaults -==== Description - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|elasticsearch|object| *(optional)* Elasticsearch OutputSpec default values -|====================== - -=== .spec.outputDefaults.elasticsearch -==== Description -ElasticsearchStructuredSpec is spec related to structured log changes to determine the elasticsearch index - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|enableStructuredContainerLogs|bool| *(optional)* EnableStructuredContainerLogs enables multi-container structured logs to allow -|structuredTypeKey|string| *(optional)* StructuredTypeKey specifies the metadata key to be used as name of elasticsearch index -|structuredTypeName|string| *(optional)* StructuredTypeName specifies the name of elasticsearch schema -|====================== - -=== .spec.outputs[] -==== Description -Output defines a destination for log messages. - -===== Type -* array - -[options="header"] -|====================== -|Property|Type|Description - -|syslog|object| *(optional)* -|fluentdForward|object| *(optional)* -|elasticsearch|object| *(optional)* -|kafka|object| *(optional)* -|cloudwatch|object| *(optional)* -|loki|object| *(optional)* -|googleCloudLogging|object| *(optional)* -|splunk|object| *(optional)* -|name|string| Name used to refer to the output from a `pipeline`. -|secret|object| *(optional)* Secret for authentication. -|tls|object| TLS contains settings for controlling options on TLS client connections. -|type|string| Type of output plugin. -|url|string| *(optional)* URL to send log records to. -|====================== - -=== .spec.outputs[].secret -==== Description -OutputSecretSpec is a secret reference containing name only, no namespace. - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|name|string| Name of a secret in the namespace configured for log forwarder secrets. -|====================== - -=== .spec.outputs[].tls -==== Description -OutputTLSSpec contains options for TLS connections that are agnostic to the output type. - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|insecureSkipVerify|bool| If InsecureSkipVerify is true, then the TLS client will be configured to ignore errors with certificates. -|====================== - -=== .spec.pipelines[] -==== Description -PipelinesSpec link a set of inputs to a set of outputs. - -===== Type -* array - -[options="header"] -|====================== -|Property|Type|Description - -|detectMultilineErrors|bool| *(optional)* DetectMultilineErrors enables multiline error detection of container logs -|inputRefs|array| InputRefs lists the names (`input.name`) of inputs to this pipeline. -|labels|object| *(optional)* Labels applied to log records passing through this pipeline. -|name|string| *(optional)* Name is optional, but must be unique in the `pipelines` list if provided. -|outputRefs|array| OutputRefs lists the names (`output.name`) of outputs from this pipeline. -|parse|string| *(optional)* Parse enables parsing of log entries into structured logs -|====================== - -=== .spec.pipelines[].inputRefs[] -==== Description - -===== Type -* array - -=== .spec.pipelines[].labels -==== Description - -===== Type -* object - -=== .spec.pipelines[].outputRefs[] -==== Description - -===== Type -* array - -=== .status -==== Description -ClusterLogForwarderStatus defines the observed state of ClusterLogForwarder - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|conditions|object| Conditions of the log forwarder. -|inputs|Conditions| Inputs maps input name to condition of the input. -|outputs|Conditions| Outputs maps output name to condition of the output. -|pipelines|Conditions| Pipelines maps pipeline name to condition of the pipeline. -|====================== - -=== .status.conditions -==== Description - -===== Type -* object - -=== .status.inputs -==== Description - -===== Type -* Conditions - -=== .status.outputs -==== Description - -===== Type -* Conditions - -=== .status.pipelines -==== Description - -===== Type -* Conditions== ClusterLogging -A Red Hat OpenShift Logging instance. ClusterLogging is the Schema for the clusterloggings API - -[options="header"] -|====================== -|Property|Type|Description - -|spec|object| Specification of the desired behavior of ClusterLogging -|status|object| Status defines the observed state of ClusterLogging -|====================== - -=== .spec -==== Description -ClusterLoggingSpec defines the desired state of ClusterLogging - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|collection|object| Specification of the Collection component for the cluster -|curation|object| **(DEPRECATED)** *(optional)* Deprecated. Specification of the Curation component for the cluster -|forwarder|object| **(DEPRECATED)** *(optional)* Deprecated. Specification for Forwarder component for the cluster -|logStore|object| *(optional)* Specification of the Log Storage component for the cluster -|managementState|string| *(optional)* Indicator if the resource is 'Managed' or 'Unmanaged' by the operator -|visualization|object| *(optional)* Specification of the Visualization component for the cluster -|====================== - -=== .spec.collection -==== Description -This is the struct that will contain information pertinent to Log and event collection - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|resources|object| *(optional)* The resource requirements for the collector -|nodeSelector|object| *(optional)* Define which Nodes the Pods are scheduled on. -|tolerations|array| *(optional)* Define the tolerations the Pods will accept -|fluentd|object| *(optional)* Fluentd represents the configuration for forwarders of type fluentd. -|logs|object| **(DEPRECATED)** *(optional)* Deprecated. Specification of Log Collection for the cluster -|type|string| *(optional)* The type of Log Collection to configure -|====================== - -=== .spec.collection.fluentd -==== Description -FluentdForwarderSpec represents the configuration for forwarders of type fluentd. - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|buffer|object| -|inFile|object| -|====================== - -=== .spec.collection.fluentd.buffer -==== Description -FluentdBufferSpec represents a subset of fluentd buffer parameters to tune -the buffer configuration for all fluentd outputs. It supports a subset of -parameters to configure buffer and queue sizing, flush operations and retry -flushing. - -For general parameters refer to: -https://docs.fluentd.org/configuration/buffer-section#buffering-parameters - -For flush parameters refer to: -https://docs.fluentd.org/configuration/buffer-section#flushing-parameters - -For retry parameters refer to: -https://docs.fluentd.org/configuration/buffer-section#retries-parameters - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|chunkLimitSize|string| *(optional)* ChunkLimitSize represents the maximum size of each chunk. Events will be -|flushInterval|string| *(optional)* FlushInterval represents the time duration to wait between two consecutive flush -|flushMode|string| *(optional)* FlushMode represents the mode of the flushing thread to write chunks. The mode -|flushThreadCount|int| *(optional)* FlushThreadCount reprents the number of threads used by the fluentd buffer -|overflowAction|string| *(optional)* OverflowAction represents the action for the fluentd buffer plugin to -|retryMaxInterval|string| *(optional)* RetryMaxInterval represents the maximum time interval for exponential backoff -|retryTimeout|string| *(optional)* RetryTimeout represents the maximum time interval to attempt retries before giving up -|retryType|string| *(optional)* RetryType represents the type of retrying flush operations. Flush operations can -|retryWait|string| *(optional)* RetryWait represents the time duration between two consecutive retries to flush -|totalLimitSize|string| *(optional)* TotalLimitSize represents the threshold of node space allowed per fluentd -|====================== - -=== .spec.collection.fluentd.inFile -==== Description -FluentdInFileSpec represents a subset of fluentd in-tail plugin parameters -to tune the configuration for all fluentd in-tail inputs. - -For general parameters refer to: -https://docs.fluentd.org/input/tail#parameters - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|readLinesLimit|int| *(optional)* ReadLinesLimit represents the number of lines to read with each I/O operation -|====================== - -=== .spec.collection.logs -==== Description - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|fluentd|object| Specification of the Fluentd Log Collection component -|type|string| The type of Log Collection to configure -|====================== - -=== .spec.collection.logs.fluentd -==== Description -CollectorSpec is spec to define scheduling and resources for a collector - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|nodeSelector|object| *(optional)* Define which Nodes the Pods are scheduled on. -|resources|object| *(optional)* The resource requirements for the collector -|tolerations|array| *(optional)* Define the tolerations the Pods will accept -|====================== - -=== .spec.collection.logs.fluentd.nodeSelector -==== Description - -===== Type -* object - -=== .spec.collection.logs.fluentd.resources -==== Description - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|limits|object| *(optional)* Limits describes the maximum amount of compute resources allowed. -|requests|object| *(optional)* Requests describes the minimum amount of compute resources required. -|====================== - -=== .spec.collection.logs.fluentd.resources.limits -==== Description - -===== Type -* object - -=== .spec.collection.logs.fluentd.resources.requests -==== Description - -===== Type -* object - -=== .spec.collection.logs.fluentd.tolerations[] -==== Description - -===== Type -* array - -[options="header"] -|====================== -|Property|Type|Description - -|effect|string| *(optional)* Effect indicates the taint effect to match. Empty means match all taint effects. -|key|string| *(optional)* Key is the taint key that the toleration applies to. Empty means match all taint keys. -|operator|string| *(optional)* Operator represents a key's relationship to the value. -|tolerationSeconds|int| *(optional)* TolerationSeconds represents the period of time the toleration (which must be -|value|string| *(optional)* Value is the taint value the toleration matches to. -|====================== - -=== .spec.collection.logs.fluentd.tolerations[].tolerationSeconds -==== Description - -===== Type -* int - -=== .spec.curation -==== Description -This is the struct that will contain information pertinent to Log curation (Curator) - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|curator|object| The specification of curation to configure -|type|string| The kind of curation to configure -|====================== - -=== .spec.curation.curator -==== Description - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|nodeSelector|object| Define which Nodes the Pods are scheduled on. -|resources|object| *(optional)* The resource requirements for Curator -|schedule|string| The cron schedule that the Curator job is run. Defaults to "30 3 * * *" -|tolerations|array| -|====================== - -=== .spec.curation.curator.nodeSelector -==== Description - -===== Type -* object - -=== .spec.curation.curator.resources -==== Description - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|limits|object| *(optional)* Limits describes the maximum amount of compute resources allowed. -|requests|object| *(optional)* Requests describes the minimum amount of compute resources required. -|====================== - -=== .spec.curation.curator.resources.limits -==== Description - -===== Type -* object - -=== .spec.curation.curator.resources.requests -==== Description - -===== Type -* object - -=== .spec.curation.curator.tolerations[] -==== Description - -===== Type -* array - -[options="header"] -|====================== -|Property|Type|Description - -|effect|string| *(optional)* Effect indicates the taint effect to match. Empty means match all taint effects. -|key|string| *(optional)* Key is the taint key that the toleration applies to. Empty means match all taint keys. -|operator|string| *(optional)* Operator represents a key's relationship to the value. -|tolerationSeconds|int| *(optional)* TolerationSeconds represents the period of time the toleration (which must be -|value|string| *(optional)* Value is the taint value the toleration matches to. -|====================== - -=== .spec.curation.curator.tolerations[].tolerationSeconds -==== Description - -===== Type -* int - -=== .spec.forwarder -==== Description -ForwarderSpec contains global tuning parameters for specific forwarder implementations. -This field is not required for general use, it allows performance tuning by users -familiar with the underlying forwarder technology. -Currently supported: `fluentd`. - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|fluentd|object| -|====================== - -=== .spec.forwarder.fluentd -==== Description -FluentdForwarderSpec represents the configuration for forwarders of type fluentd. - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|buffer|object| -|inFile|object| -|====================== - -=== .spec.forwarder.fluentd.buffer -==== Description -FluentdBufferSpec represents a subset of fluentd buffer parameters to tune -the buffer configuration for all fluentd outputs. It supports a subset of -parameters to configure buffer and queue sizing, flush operations and retry -flushing. - -For general parameters refer to: -https://docs.fluentd.org/configuration/buffer-section#buffering-parameters - -For flush parameters refer to: -https://docs.fluentd.org/configuration/buffer-section#flushing-parameters - -For retry parameters refer to: -https://docs.fluentd.org/configuration/buffer-section#retries-parameters - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|chunkLimitSize|string| *(optional)* ChunkLimitSize represents the maximum size of each chunk. Events will be -|flushInterval|string| *(optional)* FlushInterval represents the time duration to wait between two consecutive flush -|flushMode|string| *(optional)* FlushMode represents the mode of the flushing thread to write chunks. The mode -|flushThreadCount|int| *(optional)* FlushThreadCount reprents the number of threads used by the fluentd buffer -|overflowAction|string| *(optional)* OverflowAction represents the action for the fluentd buffer plugin to -|retryMaxInterval|string| *(optional)* RetryMaxInterval represents the maximum time interval for exponential backoff -|retryTimeout|string| *(optional)* RetryTimeout represents the maximum time interval to attempt retries before giving up -|retryType|string| *(optional)* RetryType represents the type of retrying flush operations. Flush operations can -|retryWait|string| *(optional)* RetryWait represents the time duration between two consecutive retries to flush -|totalLimitSize|string| *(optional)* TotalLimitSize represents the threshold of node space allowed per fluentd -|====================== - -=== .spec.forwarder.fluentd.inFile -==== Description -FluentdInFileSpec represents a subset of fluentd in-tail plugin parameters -to tune the configuration for all fluentd in-tail inputs. - -For general parameters refer to: -https://docs.fluentd.org/input/tail#parameters - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|readLinesLimit|int| *(optional)* ReadLinesLimit represents the number of lines to read with each I/O operation -|====================== - -=== .spec.logStore -==== Description -The LogStoreSpec contains information about how logs are stored. - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|elasticsearch|object| Specification of the Elasticsearch Log Store component -|lokistack|object| LokiStack contains information about which LokiStack to use for log storage if Type is set to LogStoreTypeLokiStack. -|retentionPolicy|object| *(optional)* Retention policy defines the maximum age for an index after which it should be deleted -|type|string| The Type of Log Storage to configure. The operator currently supports either using ElasticSearch -|====================== - -=== .spec.logStore.elasticsearch -==== Description - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|nodeCount|int| Number of nodes to deploy for Elasticsearch -|nodeSelector|object| Define which Nodes the Pods are scheduled on. -|proxy|object| Specification of the Elasticsearch Proxy component -|redundancyPolicy|string| *(optional)* -|resources|object| *(optional)* The resource requirements for Elasticsearch -|storage|object| *(optional)* The storage specification for Elasticsearch data nodes -|tolerations|array| -|====================== - -=== .spec.logStore.elasticsearch.nodeSelector -==== Description - -===== Type -* object - -=== .spec.logStore.elasticsearch.proxy -==== Description - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|resources|object| -|====================== - -=== .spec.logStore.elasticsearch.proxy.resources -==== Description - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|limits|object| *(optional)* Limits describes the maximum amount of compute resources allowed. -|requests|object| *(optional)* Requests describes the minimum amount of compute resources required. -|====================== - -=== .spec.logStore.elasticsearch.proxy.resources.limits -==== Description - -===== Type -* object - -=== .spec.logStore.elasticsearch.proxy.resources.requests -==== Description - -===== Type -* object - -=== .spec.logStore.elasticsearch.resources -==== Description - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|limits|object| *(optional)* Limits describes the maximum amount of compute resources allowed. -|requests|object| *(optional)* Requests describes the minimum amount of compute resources required. -|====================== - -=== .spec.logStore.elasticsearch.resources.limits -==== Description - -===== Type -* object - -=== .spec.logStore.elasticsearch.resources.requests -==== Description - -===== Type -* object - -=== .spec.logStore.elasticsearch.storage -==== Description - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|size|object| The max storage capacity for the node to provision. -|storageClassName|string| *(optional)* The name of the storage class to use with creating the node's PVC. -|====================== - -=== .spec.logStore.elasticsearch.storage.size -==== Description - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|Format|string| Change Format at will. See the comment for Canonicalize for -|d|object| d is the quantity in inf.Dec form if d.Dec != nil -|i|int| i is the quantity in int64 scaled form, if d.Dec == nil -|s|string| s is the generated value of this quantity to avoid recalculation -|====================== - -=== .spec.logStore.elasticsearch.storage.size.d -==== Description - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|Dec|object| -|====================== - -=== .spec.logStore.elasticsearch.storage.size.d.Dec -==== Description - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|scale|int| -|unscaled|object| -|====================== - -=== .spec.logStore.elasticsearch.storage.size.d.Dec.unscaled -==== Description - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|abs|Word| sign -|neg|bool| -|====================== - -=== .spec.logStore.elasticsearch.storage.size.d.Dec.unscaled.abs -==== Description - -===== Type -* Word - -=== .spec.logStore.elasticsearch.storage.size.i -==== Description - -===== Type -* int - -[options="header"] -|====================== -|Property|Type|Description - -|scale|int| -|value|int| -|====================== - -=== .spec.logStore.elasticsearch.tolerations[] -==== Description - -===== Type -* array - -[options="header"] -|====================== -|Property|Type|Description - -|effect|string| *(optional)* Effect indicates the taint effect to match. Empty means match all taint effects. -|key|string| *(optional)* Key is the taint key that the toleration applies to. Empty means match all taint keys. -|operator|string| *(optional)* Operator represents a key's relationship to the value. -|tolerationSeconds|int| *(optional)* TolerationSeconds represents the period of time the toleration (which must be -|value|string| *(optional)* Value is the taint value the toleration matches to. -|====================== - -=== .spec.logStore.elasticsearch.tolerations[].tolerationSeconds -==== Description - -===== Type -* int - -=== .spec.logStore.lokistack -==== Description -LokiStackStoreSpec is used to set up cluster-logging to use a LokiStack as logging storage. -It points to an existing LokiStack in the same namespace. - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|name|string| Name of the LokiStack resource. -|====================== - -=== .spec.logStore.retentionPolicy -==== Description - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|application|object| -|audit|object| -|infra|object| -|====================== - -=== .spec.logStore.retentionPolicy.application -==== Description - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|diskThresholdPercent|int| *(optional)* The threshold percentage of ES disk usage that when reached, old indices should be deleted (e.g. 75) -|maxAge|string| *(optional)* -|namespaceSpec|array| *(optional)* The per namespace specification to delete documents older than a given minimum age -|pruneNamespacesInterval|string| *(optional)* How often to run a new prune-namespaces job -|====================== - -=== .spec.logStore.retentionPolicy.application.namespaceSpec[] -==== Description - -===== Type -* array - -[options="header"] -|====================== -|Property|Type|Description - -|minAge|string| *(optional)* Delete the records matching the namespaces which are older than this MinAge (e.g. 1d) -|namespace|string| Target Namespace to delete logs older than MinAge (defaults to 7d) -|====================== - -=== .spec.logStore.retentionPolicy.audit -==== Description - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|diskThresholdPercent|int| *(optional)* The threshold percentage of ES disk usage that when reached, old indices should be deleted (e.g. 75) -|maxAge|string| *(optional)* -|namespaceSpec|array| *(optional)* The per namespace specification to delete documents older than a given minimum age -|pruneNamespacesInterval|string| *(optional)* How often to run a new prune-namespaces job -|====================== - -=== .spec.logStore.retentionPolicy.audit.namespaceSpec[] -==== Description - -===== Type -* array - -[options="header"] -|====================== -|Property|Type|Description - -|minAge|string| *(optional)* Delete the records matching the namespaces which are older than this MinAge (e.g. 1d) -|namespace|string| Target Namespace to delete logs older than MinAge (defaults to 7d) -|====================== - -=== .spec.logStore.retentionPolicy.infra -==== Description - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|diskThresholdPercent|int| *(optional)* The threshold percentage of ES disk usage that when reached, old indices should be deleted (e.g. 75) -|maxAge|string| *(optional)* -|namespaceSpec|array| *(optional)* The per namespace specification to delete documents older than a given minimum age -|pruneNamespacesInterval|string| *(optional)* How often to run a new prune-namespaces job -|====================== - -=== .spec.logStore.retentionPolicy.infra.namespaceSpec[] -==== Description - -===== Type -* array - -[options="header"] -|====================== -|Property|Type|Description - -|minAge|string| *(optional)* Delete the records matching the namespaces which are older than this MinAge (e.g. 1d) -|namespace|string| Target Namespace to delete logs older than MinAge (defaults to 7d) -|====================== - -=== .spec.visualization -==== Description -This is the struct that will contain information pertinent to Log visualization (Kibana) - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|kibana|object| Specification of the Kibana Visualization component -|type|string| The type of Visualization to configure -|====================== - -=== .spec.visualization.kibana -==== Description - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|nodeSelector|object| Define which Nodes the Pods are scheduled on. -|proxy|object| Specification of the Kibana Proxy component -|replicas|int| Number of instances to deploy for a Kibana deployment -|resources|object| *(optional)* The resource requirements for Kibana -|tolerations|array| -|====================== - -=== .spec.visualization.kibana.nodeSelector -==== Description - -===== Type -* object - -=== .spec.visualization.kibana.proxy -==== Description - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|resources|object| -|====================== - -=== .spec.visualization.kibana.proxy.resources -==== Description - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|limits|object| *(optional)* Limits describes the maximum amount of compute resources allowed. -|requests|object| *(optional)* Requests describes the minimum amount of compute resources required. -|====================== - -=== .spec.visualization.kibana.proxy.resources.limits -==== Description - -===== Type -* object - -=== .spec.visualization.kibana.proxy.resources.requests -==== Description - -===== Type -* object - -=== .spec.visualization.kibana.replicas -==== Description - -===== Type -* int - -=== .spec.visualization.kibana.resources -==== Description - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|limits|object| *(optional)* Limits describes the maximum amount of compute resources allowed. -|requests|object| *(optional)* Requests describes the minimum amount of compute resources required. -|====================== - -=== .spec.visualization.kibana.resources.limits -==== Description - -===== Type -* object - -=== .spec.visualization.kibana.resources.requests -==== Description - -===== Type -* object - -=== .spec.visualization.kibana.tolerations[] -==== Description - -===== Type -* array - -[options="header"] -|====================== -|Property|Type|Description - -|effect|string| *(optional)* Effect indicates the taint effect to match. Empty means match all taint effects. -|key|string| *(optional)* Key is the taint key that the toleration applies to. Empty means match all taint keys. -|operator|string| *(optional)* Operator represents a key's relationship to the value. -|tolerationSeconds|int| *(optional)* TolerationSeconds represents the period of time the toleration (which must be -|value|string| *(optional)* Value is the taint value the toleration matches to. -|====================== - -=== .spec.visualization.kibana.tolerations[].tolerationSeconds -==== Description - -===== Type -* int - -=== .status -==== Description -ClusterLoggingStatus defines the observed state of ClusterLogging - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|collection|object| *(optional)* -|conditions|object| *(optional)* -|curation|object| *(optional)* -|logStore|object| *(optional)* -|visualization|object| *(optional)* -|====================== - -=== .status.collection -==== Description - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|logs|object| *(optional)* -|====================== - -=== .status.collection.logs -==== Description - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|fluentdStatus|object| *(optional)* -|====================== - -=== .status.collection.logs.fluentdStatus -==== Description - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|clusterCondition|object| *(optional)* -|daemonSet|string| *(optional)* -|nodes|object| *(optional)* -|pods|string| *(optional)* -|====================== - -=== .status.collection.logs.fluentdStatus.clusterCondition -==== Description -`operator-sdk generate crds` does not allow map-of-slice, must use a named type. - -===== Type -* object - -=== .status.collection.logs.fluentdStatus.nodes -==== Description - -===== Type -* object - -=== .status.conditions -==== Description - -===== Type -* object - -=== .status.curation -==== Description - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|curatorStatus|array| *(optional)* -|====================== - -=== .status.curation.curatorStatus[] -==== Description - -===== Type -* array - -[options="header"] -|====================== -|Property|Type|Description - -|clusterCondition|object| *(optional)* -|cronJobs|string| *(optional)* -|schedules|string| *(optional)* -|suspended|bool| *(optional)* -|====================== - -=== .status.curation.curatorStatus[].clusterCondition -==== Description -`operator-sdk generate crds` does not allow map-of-slice, must use a named type. - -===== Type -* object - -=== .status.logStore -==== Description - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|elasticsearchStatus|array| *(optional)* -|====================== - -=== .status.logStore.elasticsearchStatus[] -==== Description - -===== Type -* array - -[options="header"] -|====================== -|Property|Type|Description - -|cluster|object| *(optional)* -|clusterConditions|object| *(optional)* -|clusterHealth|string| *(optional)* -|clusterName|string| *(optional)* -|deployments|array| *(optional)* -|nodeConditions|object| *(optional)* -|nodeCount|int| *(optional)* -|pods|object| *(optional)* -|replicaSets|array| *(optional)* -|shardAllocationEnabled|string| *(optional)* -|statefulSets|array| *(optional)* -|====================== - -=== .status.logStore.elasticsearchStatus[].cluster -==== Description - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|activePrimaryShards|int| The number of Active Primary Shards for the Elasticsearch Cluster -|activeShards|int| The number of Active Shards for the Elasticsearch Cluster -|initializingShards|int| The number of Initializing Shards for the Elasticsearch Cluster -|numDataNodes|int| The number of Data Nodes for the Elasticsearch Cluster -|numNodes|int| The number of Nodes for the Elasticsearch Cluster -|pendingTasks|int| -|relocatingShards|int| The number of Relocating Shards for the Elasticsearch Cluster -|status|string| The current Status of the Elasticsearch Cluster -|unassignedShards|int| The number of Unassigned Shards for the Elasticsearch Cluster -|====================== - -=== .status.logStore.elasticsearchStatus[].clusterConditions -==== Description - -===== Type -* object - -=== .status.logStore.elasticsearchStatus[].deployments[] -==== Description - -===== Type -* array - -=== .status.logStore.elasticsearchStatus[].nodeConditions -==== Description - -===== Type -* object - -=== .status.logStore.elasticsearchStatus[].pods -==== Description - -===== Type -* object - -=== .status.logStore.elasticsearchStatus[].replicaSets[] -==== Description - -===== Type -* array - -=== .status.logStore.elasticsearchStatus[].statefulSets[] -==== Description - -===== Type -* array - -=== .status.visualization -==== Description - -===== Type -* object - -[options="header"] -|====================== -|Property|Type|Description - -|kibanaStatus|array| *(optional)* -|====================== - -=== .status.visualization.kibanaStatus[] -==== Description - -===== Type -* array - -[options="header"] -|====================== -|Property|Type|Description - -|clusterCondition|object| *(optional)* -|deployment|string| *(optional)* -|pods|string| *(optional)* The status for each of the Kibana pods for the Visualization component -|replicaSets|array| *(optional)* -|replicas|int| *(optional)* -|====================== - -=== .status.visualization.kibanaStatus[].clusterCondition -==== Description - -===== Type -* object - -=== .status.visualization.kibanaStatus[].replicaSets[] -==== Description - -===== Type -* array diff --git a/modules/logging-architecture-overview.adoc b/modules/logging-architecture-overview.adoc deleted file mode 100644 index 0e017d71cd7f..000000000000 --- a/modules/logging-architecture-overview.adoc +++ /dev/null @@ -1,24 +0,0 @@ -// Module included in the following assemblies: -// -// - -:_content-type: CONCEPT -[id="logging-architecture-overview_{context}"] -= Logging architecture - -The {logging} consists of these logical components: - -* `Collector` - Reads container log data from each node and forwards log data to configured outputs. - -* `Store` - Stores log data for analysis; the default output for the forwarder. - -* `Visualization` - Graphical interface for searching, querying, and viewing stored logs. - -These components are managed by Operators and Custom Resource (CR) YAML files. - -include::snippets/logging-log-types-snip.adoc[] - - -The logging collector is a daemonset that deploys pods to each {product-title} node. System and infrastructure logs are generated by journald log messages from the operating system, the container runtime, and {product-title}. - -Container logs are generated by containers running in pods running on the cluster. Each container generates a separate log stream. The collector collects the logs from these sources and forwards them internally or externally as configured in the `ClusterLogForwarder` custom resource. diff --git a/modules/logging-common-terms.adoc b/modules/logging-common-terms.adoc deleted file mode 100644 index 6683d0bec698..000000000000 --- a/modules/logging-common-terms.adoc +++ /dev/null @@ -1,87 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/cluster-logging.adoc - -:_content-type: REFERENCE -[id="openshift-logging-common-terms_{context}"] -= Glossary of common terms for {product-title} Logging - -This glossary defines common terms that are used in the {product-title} Logging content. - -annotation:: -You can use annotations to attach metadata to objects. - -Cluster Logging Operator (CLO):: -The Cluster Logging Operator provides a set of APIs to control the collection and forwarding of application, infrastructure, and audit logs. - -Custom Resource (CR):: -A CR is an extension of the Kubernetes API. To configure {product-title} Logging and log forwarding, you can customize the `ClusterLogging` and the `ClusterLogForwarder` custom resources. - -event router:: -The event router is a pod that watches {product-title} events. It collects logs by using {product-title} Logging. - -Fluentd:: -Fluentd is a log collector that resides on each {product-title} node. It gathers application, infrastructure, and audit logs and forwards them to different outputs. - -garbage collection:: -Garbage collection is the process of cleaning up cluster resources, such as terminated containers and images that are not referenced by any running pods. - -Elasticsearch:: -Elasticsearch is a distributed search and analytics engine. {product-title} uses ELasticsearch as a default log store for {product-title} Logging. - -Elasticsearch Operator:: -Elasticsearch operator is used to run Elasticsearch cluster on top of {product-title}. The Elasticsearch Operator provides self-service for the Elasticsearch cluster operations and is used by {product-title} Logging. - -indexing:: -Indexing is a data structure technique that is used to quickly locate and access data. Indexing optimizes the performance by minimizing the amount of disk access required when a query is processed. - -JSON logging:: -{product-title} Logging Log Forwarding API enables you to parse JSON logs into a structured object and forward them to either {product-title} Logging-managed Elasticsearch or any other third-party system supported by the Log Forwarding API. - -Kibana:: -Kibana is a browser-based console interface to query, discover, and visualize your Elasticsearch data through histograms, line graphs, and pie charts. - -Kubernetes API server:: -Kubernetes API server validates and configures data for the API objects. - -Labels:: -Labels are key-value pairs that you can use to organize and select subsets of objects, such as a pod. - -Logging:: -With {product-title} Logging you can aggregate application, infrastructure, and audit logs throughout your cluster. You can also store them to a default log store, forward them to third party systems, and query and visualize the stored logs in the default log store. - -logging collector:: -A logging collector collects logs from the cluster, formats them, and forwards them to the log store or third party systems. - -log store:: -A log store is used to store aggregated logs. You can use the default Elasticsearch log store or forward logs to external log stores. The default log store is optimized and tested for short-term storage. - -log visualizer:: -Log visualizer is the user interface (UI) component you can use to view information such as logs, graphs, charts, and other metrics. The current implementation is Kibana. - -node:: -A node is a worker machine in the {product-title} cluster. A node is either a virtual machine (VM) or a physical machine. - -Operators:: -Operators are the preferred method of packaging, deploying, and managing a Kubernetes application in an {product-title} cluster. An Operator takes human operational knowledge and encodes it into software that is packaged and shared with customers. - -pod:: -A pod is the smallest logical unit in Kubernetes. A pod consists of one or more containers and runs on a worker node.. - -Role-based access control (RBAC):: -RBAC is a key security control to ensure that cluster users and workloads have access only to resources required to execute their roles. - -shards:: -Elasticsearch organizes the log data from Fluentd into datastores, or indices, then subdivides each index into multiple pieces called shards. - -taint:: -Taints ensure that pods are scheduled onto appropriate nodes. You can apply one or more taints on a node. - -toleration:: -You can apply tolerations to pods. Tolerations allow the scheduler to schedule pods with matching taints. - -web console:: -A user interface (UI) to manage {product-title}. -ifdef::openshift-rosa,openshift-dedicated[] -The web console for {product-title} can be found at link:https://console.redhat.com/openshift[https://console.redhat.com/openshift]. -endif::[] diff --git a/modules/logging-deploy-RHOL-console.adoc b/modules/logging-deploy-RHOL-console.adoc deleted file mode 100644 index f48b24b12bb3..000000000000 --- a/modules/logging-deploy-RHOL-console.adoc +++ /dev/null @@ -1,71 +0,0 @@ -// Module included in the following assemblies: -// -// * - - -:_content-type: PROCEDURE -[id="logging-deploy-RHOL-console_{context}"] -= Deploying Red Hat OpenShift Logging Operator using the web console - -You can use the {product-title} web console to deploy the Red Hat OpenShift Logging Operator. - -.Prerequisites - -include::snippets/logging-compatibility-snip.adoc[] - -.Procedure - -To deploy the Red Hat OpenShift Logging Operator using the {product-title} web console: - -. Install the Red Hat OpenShift Logging Operator: - -.. In the {product-title} web console, click *Operators* -> *OperatorHub*. - -.. Type *Logging* in the *Filter by keyword* field. - -.. Choose *Red Hat OpenShift Logging* from the list of available Operators, and click *Install*. - -.. Select *stable* or *stable-5.y* as the *Update Channel*. -+ --- -include::snippets/logging-stable-updates-snip.adoc[] --- -.. Ensure that *A specific namespace on the cluster* is selected under *Installation Mode*. - -.. Ensure that *Operator recommended namespace* is *openshift-logging* under *Installed Namespace*. - -.. Select *Enable Operator recommended cluster monitoring on this Namespace*. - -.. Select an option for *Update approval*. -+ -* The *Automatic* option allows Operator Lifecycle Manager (OLM) to automatically update the Operator when a new version is available. -+ -* The *Manual* option requires a user with appropriate credentials to approve the Operator update. - -.. Select *Enable* or *Disable* for the Console plugin. - -.. Click *Install*. - -. Verify that the *Red Hat OpenShift Logging Operator* is installed by switching to the *Operators* → *Installed Operators* page. - -.. Ensure that *Red Hat OpenShift Logging* is listed in the *openshift-logging* project with a *Status* of *Succeeded*. - -. Create a *ClusterLogging* instance. -+ -[NOTE] -==== -The form view of the web console does not include all available options. The *YAML view* is recommended for completing your setup. -==== -+ -.. In the *collection* section, select a Collector Implementation. -+ --- -include::snippets/logging-fluentd-dep-snip.adoc[] --- -.. In the *logStore* section, select a type. -+ --- -include::snippets/logging-elastic-dep-snip.adoc[] --- - -.. Click *Create*. diff --git a/modules/logging-deploy-loki-console.adoc b/modules/logging-deploy-loki-console.adoc deleted file mode 100644 index 81173c425c9c..000000000000 --- a/modules/logging-deploy-loki-console.adoc +++ /dev/null @@ -1,127 +0,0 @@ -// Module included in the following assemblies: -// -// * - -:_content-type: PROCEDURE -[id="logging-deploy-loki-console_{context}"] -= Deploying the Loki Operator using the web console - -You can use the {product-title} web console to install the Loki Operator. - -.Prerequisites - -* Supported Log Store (AWS S3, Google Cloud Storage, Azure, Swift, Minio, OpenShift Data Foundation) - -.Procedure - -To install the Loki Operator using the {product-title} web console: - -. In the {product-title} web console, click *Operators* -> *OperatorHub*. - -. Type *Loki* in the *Filter by keyword* field. - -.. Choose *Loki Operator* from the list of available Operators, and click *Install*. - -. Select *stable* or *stable-5.y* as the *Update Channel*. -+ --- -include::snippets/logging-stable-updates-snip.adoc[] --- -. Ensure that *All namespaces on the cluster* is selected under *Installation Mode*. - -. Ensure that *openshift-operators-redhat* is selected under *Installed Namespace*. - -. Select *Enable Operator recommended cluster monitoring on this Namespace*. -+ -This option sets the `openshift.io/cluster-monitoring: "true"` label in the Namespace object. You must select this option to ensure that cluster monitoring scrapes the `openshift-operators-redhat` namespace. - -. Select an option for *Update approval*. -+ -* The *Automatic* option allows Operator Lifecycle Manager (OLM) to automatically update the Operator when a new version is available. -+ -* The *Manual* option requires a user with appropriate credentials to approve the Operator update. - -. Click *Install*. - -. Verify that the *LokiOperator* installed by switching to the *Operators* → *Installed Operators* page. - -.. Ensure that *LokiOperator* is listed with *Status* as *Succeeded* in all the projects. - -+ -. Create a `Secret` YAML file that uses the `access_key_id` and `access_key_secret` fields to specify your credentials and `bucketnames`, `endpoint`, and `region` to define the object storage location. AWS is used in the following example: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: logging-loki-s3 - namespace: openshift-logging -stringData: - access_key_id: AKIAIOSFODNN7EXAMPLE - access_key_secret: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY - bucketnames: s3-bucket-name - endpoint: https://s3.eu-central-1.amazonaws.com - region: eu-central-1 ----- -+ -. Select *Create instance* under LokiStack on the *Details* tab. Then select *YAML view*. Paste in the following template, subsituting values where appropriate. -+ -[source,yaml] ----- - apiVersion: loki.grafana.com/v1 - kind: LokiStack - metadata: - name: logging-loki <1> - namespace: openshift-logging - spec: - size: 1x.small <2> - storage: - schemas: - - version: v12 - effectiveDate: '2022-06-01' - secret: - name: logging-loki-s3 <3> - type: s3 <4> - storageClassName: <storage_class_name> <5> - tenants: - mode: openshift-logging ----- -<1> Name should be `logging-loki`. -<2> Select your Loki deployment size. -<3> Define the secret used for your log storage. -<4> Define corresponding storage type. -<5> Enter the name of an existing storage class for temporary storage. For best performance, specify a storage class that allocates block storage. Available storage classes for your cluster can be listed using `oc get storageclasses`. -+ -.. Apply the configuration: -+ -[source,terminal] ----- -oc apply -f logging-loki.yaml ----- -+ -. Create or edit a `ClusterLogging` CR: -+ -[source,yaml] ----- - apiVersion: logging.openshift.io/v1 - kind: ClusterLogging - metadata: - name: instance - namespace: openshift-logging - spec: - managementState: Managed - logStore: - type: lokistack - lokistack: - name: logging-loki - collection: - type: vector ----- -+ -.. Apply the configuration: -+ -[source,terminal] ----- -oc apply -f cr-lokistack.yaml ----- diff --git a/modules/logging-feature-reference-5.6.adoc b/modules/logging-feature-reference-5.6.adoc deleted file mode 100644 index 2e3d3411dbf2..000000000000 --- a/modules/logging-feature-reference-5.6.adoc +++ /dev/null @@ -1,102 +0,0 @@ -// Module is included in the following assemblies: -// -:_content-type: REFERENCE -[id="logging-5-6-collector-ref_{context}"] - -= Collector features - -include::snippets/logging-outputs-5.6-snip.adoc[] - -.Log Sources -[options="header"] -|=============================================================== -| Feature | Fluentd | Vector -| App container logs | ✓ | ✓ -| App-specific routing | ✓ | ✓ -| App-specific routing by namespace | ✓ | ✓ -| Infra container logs | ✓ | ✓ -| Infra journal logs | ✓ | ✓ -| Kube API audit logs | ✓ | ✓ -| OpenShift API audit logs | ✓ | ✓ -| Open Virtual Network (OVN) audit logs| ✓ | ✓ -|=============================================================== - -.Authorization and Authentication -[options="header"] -|================================================================= -| Feature | Fluentd | Vector -| Elasticsearch certificates | ✓ | ✓ -| Elasticsearch username / password | ✓ | ✓ -| Cloudwatch keys | ✓ | ✓ -| Cloudwatch STS | ✓ | ✓ -| Kafka certificates | ✓ | ✓ -| Kafka username / password | ✓ | ✓ -| Kafka SASL | ✓ | ✓ -| Loki bearer token | ✓ | ✓ -|================================================================= - -.Normalizations and Transformations -[options="header"] -|============================================================================ -| Feature | Fluentd | Vector -| Viaq data model - app | ✓ | ✓ -| Viaq data model - infra | ✓ | ✓ -| Viaq data model - infra(journal) | ✓ | ✓ -| Viaq data model - Linux audit | ✓ | ✓ -| Viaq data model - kube-apiserver audit | ✓ | ✓ -| Viaq data model - OpenShift API audit | ✓ | ✓ -| Viaq data model - OVN | ✓ | ✓ -| Loglevel Normalization | ✓ | ✓ -| JSON parsing | ✓ | ✓ -| Structured Index | ✓ | ✓ -| Multiline error detection | ✓ | -| Multicontainer / split indices | ✓ | ✓ -| Flatten labels | ✓ | ✓ -| CLF static labels | ✓ | ✓ -|============================================================================ - -.Tuning -[options="header"] -|========================================================== -| Feature | Fluentd | Vector -| Fluentd readlinelimit | ✓ | -| Fluentd buffer | ✓ | -| - chunklimitsize | ✓ | -| - totallimitsize | ✓ | -| - overflowaction | ✓ | -| - flushthreadcount | ✓ | -| - flushmode | ✓ | -| - flushinterval | ✓ | -| - retrywait | ✓ | -| - retrytype | ✓ | -| - retrymaxinterval | ✓ | -| - retrytimeout | ✓ | -|========================================================== - -.Visibility -[options="header"] -|===================================================== -| Feature | Fluentd | Vector -| Metrics | ✓ | ✓ -| Dashboard | ✓ | ✓ -| Alerts | ✓ | -|===================================================== - -.Miscellaneous -[options="header"] -|=========================================================== -| Feature | Fluentd | Vector -| Global proxy support | ✓ | ✓ -| x86 support | ✓ | ✓ -| ARM support | ✓ | ✓ -| {ibmpowerProductName} support | ✓ | ✓ -| {ibmzProductName} support | ✓ | ✓ -| IPv6 support | ✓ | ✓ -| Log event buffering | ✓ | -| Disconnected Cluster | ✓ | ✓ -|=========================================================== - - -[role="_additional-resources"] -.Additional resources -* link:https://vector.dev/docs/about/what-is-vector/[Vector Documentation] diff --git a/modules/logging-forward-splunk.adoc b/modules/logging-forward-splunk.adoc deleted file mode 100644 index 583fd4760bde..000000000000 --- a/modules/logging-forward-splunk.adoc +++ /dev/null @@ -1,62 +0,0 @@ -// Module included in the following assemblies: -// cluster-logging-external.adoc -// - -:_content-type: PROCEDURE -[id="logging-forward-splunk_{context}"] -= Forwarding logs to Splunk - -You can forward logs to the link:https://docs.splunk.com/Documentation/Splunk/9.0.0/Data/UsetheHTTPEventCollector[Splunk HTTP Event Collector (HEC)] in addition to, or instead of, the internal default {product-title} log store. - -[NOTE] -==== -Using this feature with Fluentd is not supported. -==== - -.Prerequisites -* Red Hat OpenShift Logging Operator 5.6 and higher -* ClusterLogging instance with vector specified as collector -* Base64 encoded Splunk HEC token - -.Procedure - -. Create a secret using your Base64 encoded Splunk HEC token. -+ -[source,terminal] ----- -$ oc -n openshift-logging create secret generic vector-splunk-secret --from-literal hecToken=<HEC_Token> ----- -+ -. Create or edit the `ClusterLogForwarder` Custom Resource (CR) using the template below: -+ -[source,yaml] ----- - apiVersion: "logging.openshift.io/v1" - kind: "ClusterLogForwarder" - metadata: - name: "instance" <1> - namespace: "openshift-logging" <2> - spec: - outputs: - - name: splunk-receiver <3> - secret: - name: vector-splunk-secret <4> - type: splunk <5> - url: <http://your.splunk.hec.url:8088> <6> - pipelines: <7> - - inputRefs: - - application - - infrastructure - name: <8> - outputRefs: - - splunk-receiver <9> ----- -<1> The name of the ClusterLogForwarder CR must be `instance`. -<2> The namespace for the ClusterLogForwarder CR must be `openshift-logging`. -<3> Specify a name for the output. -<4> Specify the name of the secret that contains your HEC token. -<5> Specify the output type as `splunk`. -<6> Specify the URL (including port) of your Splunk HEC. -<7> Specify which log types to forward by using the pipeline: `application`, `infrastructure`, or `audit`. -<8> Optional: Specify a name for the pipeline. -<9> Specify the name of the output to use when forwarding logs with this pipeline. diff --git a/modules/logging-getting-started.adoc b/modules/logging-getting-started.adoc deleted file mode 100644 index 305ad915a1c8..000000000000 --- a/modules/logging-getting-started.adoc +++ /dev/null @@ -1,45 +0,0 @@ -:_content-type: PROCEDURE -[id="logging-getting-started_{context}"] -= Getting started with logging - -This overview of the logging deployment process is provided for ease of reference. It is not a substitute for full documentation. For new installations, Vector and LokiStack are recommended. - --- -include::snippets/logging-under-construction-snip.adoc[] --- - --- -include::snippets/logging-compatibility-snip.adoc[] --- - -.Prerequisites -* Log store preference: Elasticsearch or LokiStack -* Collector implementation preference: Fluentd or Vector -* Credentials for your log forwarding outputs - -.Procedure - --- -include::snippets/logging-elastic-dep-snip.adoc[] --- - -. Install the Operator for the log store you'd like to use. -** For Elasticsearch, install the *OpenShift Elasticsearch Operator*. -** For LokiStack, install the *Loki Operator*. -+ -[NOTE] -==== -If you have installed the Loki Operator, create a `LokiStack` custom resource (CR) instance as well. -==== - -. Install the *Red Hat OpenShift Logging* Operator. - -. Create a `ClusterLogging` CR instance. -** Select your collector implementation. -+ --- -include::snippets/logging-fluentd-dep-snip.adoc[] --- -. Create a `ClusterLogForwarder` CR instance. - -. Create a secret for the selected output pipeline. diff --git a/modules/logging-http-forward.adoc b/modules/logging-http-forward.adoc deleted file mode 100644 index b0ddb49eef5a..000000000000 --- a/modules/logging-http-forward.adoc +++ /dev/null @@ -1,48 +0,0 @@ -// Module included in the following assemblies: -// logging/cluster-logging-external -// * - -:_content-type: PROCEDURE -[id="logging-deploy-loki-console_{context}"] -= Forwarding logs over HTTP - -Forwarding logs over HTTP is supported for both fluentd and vector collectors. To enable, specify `http` as the output type in the `ClusterLogForwarder` custom resource (CR). - -.Procedure - -* Create or edit the ClusterLogForwarder Custom Resource (CR) using the template below: - -.Example ClusterLogForwarder CR -[source,yaml] ----- -apiVersion: "logging.openshift.io/v1" -kind: "ClusterLogForwarder" -metadata: - name: "instance" - namespace: "openshift-logging" -spec: - outputs: - - name: httpout-app - type: http - url: <1> - http: - headers: <2> - h1: v1 - h2: v2 - method: POST - secret: - name: <3> - tls: - insecureSkipVerify: <4> - pipelines: - - name: - inputRefs: - - application - outputRefs: - - <5> ----- -<1> Destination address for logs. -<2> Additional headers to send with the log record. -<3> Secret name for destination credentials. -<4> Values are either `true` or `false`. -<5> This value should be the same as the output name. diff --git a/modules/logging-in-by-using-the-web-console.adoc b/modules/logging-in-by-using-the-web-console.adoc deleted file mode 100644 index 9a3766a16b03..000000000000 --- a/modules/logging-in-by-using-the-web-console.adoc +++ /dev/null @@ -1,56 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_alibaba/installing-alibaba-network-customizations.adoc -// * installing/installing_alibaba/installing-alibaba-vpc.adoc -// * installing/installing_aws/installing-aws-china.adoc. -// * installing/installing_aws/installing-aws-secret-region.adoc -// *installing/validating-an-installation.adoc -// *installing/installing_aws/installing-aws-user-infra.adoc -// *installing/installing_azure_stack_hub/installing-azure-stack-hub-default.adoc -// *installing/installing_aws/installing-restricted-networks-aws.adoc -// * installing/installing_aws/installing-aws-outposts-remote-workers.adoc - -:_content-type: PROCEDURE -[id="logging-in-by-using-the-web-console_{context}"] -= Logging in to the cluster by using the web console - -The `kubeadmin` user exists by default after an {product-title} installation. You can log in to your cluster as the `kubeadmin` user by using the {product-title} web console. - -.Prerequisites - -* You have access to the installation host. -* You completed a cluster installation and all cluster Operators are available. - -.Procedure - -. Obtain the password for the `kubeadmin` user from the `kubeadmin-password` file on the installation host: -+ -[source,terminal] ----- -$ cat <installation_directory>/auth/kubeadmin-password ----- -+ -[NOTE] -==== -Alternatively, you can obtain the `kubeadmin` password from the `<installation_directory>/.openshift_install.log` log file on the installation host. -==== - -. List the {product-title} web console route: -+ -[source,terminal] ----- -$ oc get routes -n openshift-console | grep 'console-openshift' ----- -+ -[NOTE] -==== -Alternatively, you can obtain the {product-title} route from the `<installation_directory>/.openshift_install.log` log file on the installation host. -==== -+ -.Example output -[source,terminal] ----- -console console-openshift-console.apps.<cluster_name>.<base_domain> console https reencrypt/Redirect None ----- - -. Navigate to the route detailed in the output of the preceding command in a web browser and log in as the `kubeadmin` user. diff --git a/modules/logging-into-a-cluster-after-installation.adoc b/modules/logging-into-a-cluster-after-installation.adoc deleted file mode 100644 index 6196889d4f82..000000000000 --- a/modules/logging-into-a-cluster-after-installation.adoc +++ /dev/null @@ -1,10 +0,0 @@ -// Module included in the following assemblies: -// -// *installing/validating-an-installation.adoc - -[id="logging-into-a-cluster-after-installation_{context}"] -= Logging into a cluster after an installation - -After an installation, the `kubeadmin` user exists by default. You can log in to your cluster as the `kubeadmin` user by using the CLI or the {product-title} web console. - - diff --git a/modules/logging-loki-alerts.adoc b/modules/logging-loki-alerts.adoc deleted file mode 100644 index 770a603cbfce..000000000000 --- a/modules/logging-loki-alerts.adoc +++ /dev/null @@ -1,56 +0,0 @@ -// Module included in the following assemblies: -// logging-5-7-configuration - -:_content-type: PROCEDURE -[id="logging-loki-alerts_{context}"] -= Enabling log based alerts with Loki -Loki alerting rules use link:https://grafana.com/docs/loki/latest/logql/[LogQL] and follow link:https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/#recording-rules[Prometheus formatting]. You can set log based alerts by creating an `AlertingRule` custom resource (CR). `AlertingRule` CRs may be created for `application`, `audit`, or `infrastructure` tenants. - -[options="header"] -|================================================ -| Tenant type | Valid namespaces -| application | -| audit | `openshift-logging` -| infrastructure | `openshift-/*`, `kube-/*`, `default` -|================================================ - -Application, Audit, and Infrastructure alerts are sent to the Cluster Monitoring Operator (CMO) Alertmanager in the `openshift-monitoring` namespace by default unless you have disabled the local `Alertmanager` instance. - -Application alerts are not sent to the CMO Alertmanager in the `openshift-user-workload-monitoring` namespace by default unless you have enabled a separate `Alertmanager` instance. - -The `AlertingRule` CR contains a set of specifications and webhook validation definitions to declare groups of alerting rules for a single LokiStack instance. In addition, the webhook validation definition provides support for rule validation conditions: - -* If an `AlertingRule` CR includes an invalid `interval` period, it is an invalid alerting rule -* If an `AlertingRule` CR includes an invalid `for` period, it is an invalid alerting rule. -* If an `AlertingRule` CR includes an invalid LogQL `expr`, it is an invalid alerting rule. -* If an `AlertingRule` CR includes two groups with the same name, it is an invalid alerting rule. -* If none of above applies, an `AlertingRule` is considered a valid alerting rule. - -.Prerequisites - -* {logging-title-uc} Operator 5.7 and later -* {product-title} 4.13 and later - -.Procedure - -1. Create an AlertingRule CR: - --- -include::snippets/logging-create-apply-cr-snip.adoc[lines=9..12] --- - -2. Populate your AlertingRule CR using the appropriate example below: - --- -include::snippets/logging-alertingrule-inf-callouts-snip.adoc[] --- - --- -include::snippets/logging-alertingrule-app-callouts-snip.adoc[] --- - -3. Apply the CR. - --- -include::snippets/logging-create-apply-cr-snip.adoc[lines=14..17] --- diff --git a/modules/logging-loki-retention.adoc b/modules/logging-loki-retention.adoc deleted file mode 100644 index c98b48be61e5..000000000000 --- a/modules/logging-loki-retention.adoc +++ /dev/null @@ -1,104 +0,0 @@ -// Module included in the following assemblies: -// - -:_content-type: PROCEDURE -[id="logging-loki-retention_{context}"] -= Enabling stream-based retention with Loki - -With Logging version 5.6 and higher, you can configure retention policies based on log streams. Rules for these may be set globally, per tenant, or both. If you configure both, tenant rules apply before global rules. - -. To enable stream-based retention, create or edit the `LokiStack` custom resource (CR): - -include::snippets/logging-create-apply-cr-snip.adoc[lines=9..12] - -. You can refer to the examples below to configure your LokiStack CR. - -.Example global stream-based retention -[source,yaml] ----- -apiVersion: loki.grafana.com/v1 -kind: LokiStack -metadata: - name: logging-loki - namespace: openshift-logging -spec: - limits: - global: <1> - retention: <2> - days: 20 - streams: - - days: 4 - priority: 1 - selector: '{kubernetes_namespace_name=~"test.+"}' <3> - - days: 1 - priority: 1 - selector: '{log_type="infrastructure"}' - managementState: Managed - replicationFactor: 1 - size: 1x.small - storage: - schemas: - - effectiveDate: "2020-10-11" - version: v11 - secret: - name: logging-loki-s3 - type: aws - storageClassName: standard - tenants: - mode: openshift-logging ----- -<1> Sets retention policy for all log streams. *Note: This field does not impact the retention period for stored logs in object storage.* -<2> Retention is enabled in the cluster when this block is added to the CR. -<3> Contains the link:https://grafana.com/docs/loki/latest/logql/query_examples/#query-examples[LogQL query] used to define the log stream. - -.Example per-tenant stream-based retention -[source,yaml] ----- -apiVersion: loki.grafana.com/v1 -kind: LokiStack -metadata: - name: logging-loki - namespace: openshift-logging -spec: - limits: - global: - retention: - days: 20 - tenants: <1> - application: - retention: - days: 1 - streams: - - days: 4 - selector: '{kubernetes_namespace_name=~"test.+"}' <2> - infrastructure: - retention: - days: 5 - streams: - - days: 1 - selector: '{kubernetes_namespace_name=~"openshift-cluster.+"}' - managementState: Managed - replicationFactor: 1 - size: 1x.small - storage: - schemas: - - effectiveDate: "2020-10-11" - version: v11 - secret: - name: logging-loki-s3 - type: aws - storageClassName: standard - tenants: - mode: openshift-logging ----- -<1> Sets retention policy by tenant. Valid tenant types are `application`, `audit`, and `infrastructure`. -<2> Contains the link:https://grafana.com/docs/loki/latest/logql/query_examples/#query-examples[LogQL query] used to define the log stream. - -. Then apply your configuration: - -include::snippets/logging-create-apply-cr-snip.adoc[lines=14..17] - -[NOTE] -==== -This is not for managing the retention for stored logs. Global retention periods for stored logs to a supported maximum of 30 days is configured with your object storage. -==== diff --git a/modules/logging-multiline-except.adoc b/modules/logging-multiline-except.adoc deleted file mode 100644 index 19efa8df840a..000000000000 --- a/modules/logging-multiline-except.adoc +++ /dev/null @@ -1,88 +0,0 @@ -// Module included in the following assemblies: -// -:_content-type: PROCEDURE -[id="logging-multiline-except_{context}"] -= Enabling multi-line exception detection - -Enables multi-line error detection of container logs. - -[WARNING] -==== -Enabling this feature could have performance implications and may require additional computing resources or alternate logging solutions. -==== - -Log parsers often incorrectly identify separate lines of the same exception as separate exceptions. This leads to extra log entries and an incomplete or inaccurate view of the traced information. - -.Example java exception -[,text] ----- -java.lang.NullPointerException: Cannot invoke "String.toString()" because "<param1>" is null - at testjava.Main.handle(Main.java:47) - at testjava.Main.printMe(Main.java:19) - at testjava.Main.main(Main.java:10) ----- - -* To enable logging to detect multi-line exceptions and reassemble them into a single log entry, ensure that the `ClusterLogForwarder` Custom Resource (CR) contains a `detectMultilineErrors` field, with a value of `true`. - - -.Example ClusterLogForwarder CR -[source,yaml] ----- -apiVersion: logging.openshift.io/v1 -kind: ClusterLogForwarder -metadata: - name: instance - namespace: openshift-logging -spec: - pipelines: - - name: my-app-logs - inputRefs: - - application - outputRefs: - - default - detectMultilineErrors: true ----- - -== Details -When log messages appear as a consecutive sequence forming an exception stack trace, they are combined into a single, unified log record. The first log message's content is replaced with the concatenated content of all the message fields in the sequence. - -.Supported languages per collector: -|=== -|Language | Fluentd | Vector - -|Java | ✓ | ✓ -|JS | ✓ | ✓ -|Ruby | ✓ | ✓ -|Python | ✓ | ✓ -|Golang | ✓ | ✓ -|PHP | ✓ | -|Dart | ✓ | ✓ -|=== - -== Troubleshooting -When enabled, the collector configuration will include a new section with type: `detect_exceptions` - -.Example vector configuration section ----- -[transforms.detect_exceptions_app-logs] - type = "detect_exceptions" - inputs = ["application"] - languages = ["All"] - group_by = ["kubernetes.namespace_name","kubernetes.pod_name","kubernetes.container_name"] - expire_after_ms = 2000 - multiline_flush_interval_ms = 1000 ----- - -.Example fluentd config section ----- -<label @MULTILINE_APP_LOGS> - <match kubernetes.**> - @type detect_exceptions - remove_tag_prefix 'kubernetes' - message message - force_line_breaks true - multiline_flush_interval .2 - </match> -</label> - ----- diff --git a/modules/logging-rn-5.5.0.adoc b/modules/logging-rn-5.5.0.adoc deleted file mode 100644 index f405f3935313..000000000000 --- a/modules/logging-rn-5.5.0.adoc +++ /dev/null @@ -1,47 +0,0 @@ -// Module included in the following assemblies: -//logging-5-5-release-notes -[id="logging-release-notes-5-5-0"] -= Logging 5.5.0 -This release includes:link:https://access.redhat.com/errata/RHSA-2022:6051[OpenShift Logging Bug Fix Release 5.5.0]. - -[id="logging-5-5-0-enhancements"] -== Enhancements -* With this update, you can forward structured logs from different containers within the same pod to different indices. To use this feature, you must configure the pipeline with multi-container support and annotate the pods. (link:https://issues.redhat.com/browse/LOG-1296[LOG-1296]) - -[IMPORTANT] -==== -JSON formatting of logs varies by application. Because creating too many indices impacts performance, limit your use of this feature to creating indices for logs that have incompatible JSON formats. Use queries to separate logs from different namespaces, or applications with compatible JSON formats. -==== - -* With this update, you can filter logs with Elasticsearch outputs by using the Kubernetes common labels, `app.kubernetes.io/component`, `app.kubernetes.io/managed-by`, `app.kubernetes.io/part-of`, and `app.kubernetes.io/version`. Non-Elasticsearch output types can use all labels included in `kubernetes.labels`. (link:https://issues.redhat.com/browse/LOG-2388[LOG-2388]) - -* With this update, clusters with AWS Security Token Service (STS) enabled may use STS authentication to forward logs to Amazon CloudWatch. (link:https://issues.redhat.com/browse/LOG-1976[LOG-1976]) - -* With this update, the 'LokiOperator' Operator and Vector collector move from Technical Preview to General Availability. Full feature parity with prior releases are pending, and some APIs remain Technical Previews. See the *Logging with the LokiStack* section for details. - -[id="logging-5-5-0-bug-fixes"] -== Bug fixes -* Before this update, clusters configured to forward logs to Amazon CloudWatch wrote rejected log files to temporary storage, causing cluster instability over time. With this update, chunk backup for all storage options has been disabled, resolving the issue. (link:https://issues.redhat.com/browse/LOG-2746[LOG-2746]) - -* Before this update, the Operator was using versions of some APIs that are deprecated and planned for removal in future versions of {product-title}. This update moves dependencies to the supported API versions. (link:https://issues.redhat.com/browse/LOG-2656[LOG-2656]) - -* Before this update, multiple `ClusterLogForwarder` pipelines configured for multiline error detection caused the collector to go into a `crashloopbackoff` error state. This update fixes the issue where multiple configuration sections had the same unique ID. (link:https://issues.redhat.com/browse/LOG-2241[LOG-2241]) - -* Before this update, the collector could not save non UTF-8 symbols to the Elasticsearch storage logs. With this update the collector encodes non UTF-8 symbols, resolving the issue. (link:https://issues.redhat.com/browse/LOG-2203[LOG-2203]) - -* Before this update, non-latin characters displayed incorrectly in Kibana. With this update, Kibana displays all valid UTF-8 symbols correctly. (link:https://issues.redhat.com/browse/LOG-2784[LOG-2784]) - -== CVEs -[id="logging-5-5-0-CVEs"] -* link:https://access.redhat.com/security/cve/CVE-2021-38561[CVE-2021-38561] -* link:https://access.redhat.com/security/cve/CVE-2022-1012[CVE-2022-1012] -* link:https://access.redhat.com/security/cve/CVE-2022-1292[CVE-2022-1292] -* link:https://access.redhat.com/security/cve/CVE-2022-1586[CVE-2022-1586] -* link:https://access.redhat.com/security/cve/CVE-2022-1785[CVE-2022-1785] -* link:https://access.redhat.com/security/cve/CVE-2022-1897[CVE-2022-1897] -* link:https://access.redhat.com/security/cve/CVE-2022-1927[CVE-2022-1927] -* link:https://access.redhat.com/security/cve/CVE-2022-2068[CVE-2022-2068] -* link:https://access.redhat.com/security/cve/CVE-2022-2097[CVE-2022-2097] -* link:https://access.redhat.com/security/cve/CVE-2022-21698[CVE-2022-21698] -* link:https://access.redhat.com/security/cve/CVE-2022-30631[CVE-2022-30631] -* link:https://access.redhat.com/security/cve/CVE-2022-32250[CVE-2022-32250] diff --git a/modules/logging-rn-5.5.1.adoc b/modules/logging-rn-5.5.1.adoc deleted file mode 100644 index a12aee053f3c..000000000000 --- a/modules/logging-rn-5.5.1.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -//logging-5-5-release-notes -:_content-type: REFERENCE -[id="logging-release-notes-5-5-1"] -= Logging 5.5.1 -This release includes link:https://access.redhat.com/errata/RHSA-2022:6344[OpenShift Logging Bug Fix Release 5.5.1]. - -[id="logging-5-5-1-enhancements_{context}"] -== Enhancements -* This enhancement adds an *Aggregated Logs* tab to the *Pod Details* page of the {product-title} web console when the Logging Console Plug-in is in use. This enhancement is only available on {product-title} 4.10 and later. (link:https://issues.redhat.com/browse/LOG-2647[LOG-2647]) - -* This enhancement adds Google Cloud Logging as an output option for log forwarding. (link:https://issues.redhat.com/browse/LOG-1482[LOG-1482]) - -[id="logging-5-5-1-bug-fixes_{context}"] -== Bug fixes -* Before this update, the Operator did not ensure that the pod was ready, which caused the cluster to reach an inoperable state during a cluster restart. With this update, the Operator marks new pods as ready before continuing to a new pod during a restart, which resolves the issue. (link:https://issues.redhat.com/browse/LOG-2745[LOG-2745]) - -* Before this update, Fluentd would sometimes not recognize that the Kubernetes platform rotated the log file and would no longer read log messages. This update corrects that by setting the configuration parameter suggested by the upstream development team. (link:https://issues.redhat.com/browse/LOG-2995[LOG-2995]) - -* Before this update, the addition of multi-line error detection caused internal routing to change and forward records to the wrong destination. With this update, the internal routing is correct. (link:https://issues.redhat.com/browse/LOG-2801[LOG-2801]) - -* Before this update, changing the {product-title} web console's refresh interval created an error when the *Query* field was empty. With this update, changing the interval is not an available option when the *Query* field is empty. (link:https://issues.redhat.com/browse/LOG-2917[LOG-2917]) - -[id="logging-5-5-1-cves_{context}"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2022-1705[CVE-2022-1705] -* link:https://access.redhat.com/security/cve/CVE-2022-2526[CVE-2022-2526] -* link:https://access.redhat.com/security/cve/CVE-2022-29154[CVE-2022-29154] -* link:https://access.redhat.com/security/cve/CVE-2022-30631[CVE-2022-30631] -* link:https://access.redhat.com/security/cve/CVE-2022-32148[CVE-2022-32148] -* link:https://access.redhat.com/security/cve/CVE-2022-32206[CVE-2022-32206] -* link:https://access.redhat.com/security/cve/CVE-2022-32208[CVE-2022-32208] diff --git a/modules/logging-rn-5.5.10.adoc b/modules/logging-rn-5.5.10.adoc deleted file mode 100644 index 166f9fb37a46..000000000000 --- a/modules/logging-rn-5.5.10.adoc +++ /dev/null @@ -1,18 +0,0 @@ -//module included in logging-release-notes.adoc -:content-type: REFERENCE -[id="logging-release-notes-5-5-10{context}"] -= Logging 5.5.10 -This release includes link:https://access.redhat.com/errata/RHBA-2023:1827[OpenShift Logging Bug Fix Release 5.5.10]. - -[id="logging-5-5-10-bug-fixes"] -== Bug fixes -* Before this update, the logging view plugin of the OpenShift Web Console showed only an error text when the LokiStack was not reachable. After this update the plugin shows a proper error message with details on how to fix the unreachable LokiStack. (link:https://issues.redhat.com/browse/LOG-2874[LOG-2874]) - -[id="logging-5-5-10-CVEs"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2022-4304[CVE-2022-4304] -* link:https://access.redhat.com/security/cve/CVE-2022-4450[CVE-2022-4450] -* link:https://access.redhat.com/security/cve/CVE-2023-0215[CVE-2023-0215] -* link:https://access.redhat.com/security/cve/CVE-2023-0286[CVE-2023-0286] -* link:https://access.redhat.com/security/cve/CVE-2023-0361[CVE-2023-0361] -* link:https://access.redhat.com/security/cve/CVE-2023-23916[CVE-2023-23916] diff --git a/modules/logging-rn-5.5.2.adoc b/modules/logging-rn-5.5.2.adoc deleted file mode 100644 index e02c78703e31..000000000000 --- a/modules/logging-rn-5.5.2.adoc +++ /dev/null @@ -1,46 +0,0 @@ -// Module included in the following assemblies: -//logging-5-5-release-notes -:_content-type: REFERENCE -[id="logging-release-notes-5-5-2_{context}"] -= Logging 5.5.2 -This release includes link:https://access.redhat.com/errata/RHBA-2022:6559[OpenShift Logging Bug Fix Release 5.5.2]. - -[id="logging-5-5-2-bug-fixes_{context}"] -== Bug fixes -* Before this update, alerting rules for the Fluentd collector did not adhere to the {product-title} monitoring style guidelines. This update modifies those alerts to include the namespace label, resolving the issue. (link:https://issues.redhat.com/browse/LOG-1823[LOG-1823]) - -* Before this update, the index management rollover script failed to generate a new index name whenever there was more than one hyphen character in the name of the index. With this update, index names generate correctly. (link:https://issues.redhat.com/browse/LOG-2644[LOG-2644]) - -* Before this update, the Kibana route was setting a `caCertificate` value without a certificate present. With this update, no `caCertificate` value is set. (link:https://issues.redhat.com/browse/LOG-2661[LOG-2661]) - -* Before this update, a change in the collector dependencies caused it to issue a warning message for unused parameters. With this update, removing unused configuration parameters resolves the issue. (link:https://issues.redhat.com/browse/LOG-2859[LOG-2859]) - -* Before this update, pods created for deployments that Loki Operator created were mistakenly scheduled on nodes with non-Linux operating systems, if such nodes were available in the cluster the Operator was running in. With this update, the Operator attaches an additional node-selector to the pod definitions which only allows scheduling the pods on Linux-based nodes. (link:https://issues.redhat.com/browse/LOG-2895[LOG-2895]) - -* Before this update, the OpenShift Console Logs view did not filter logs by severity due to a LogQL parser issue in the LokiStack gateway. With this update, a parser fix resolves the issue and the OpenShift Console Logs view can filter by severity. (link:https://issues.redhat.com/browse/LOG-2908[LOG-2908]) - -* Before this update, a refactoring of the Fluentd collector plugins removed the timestamp field for events. This update restores the timestamp field, sourced from the event's received time. (link:https://issues.redhat.com/browse/LOG-2923[LOG-2923]) - -* Before this update, absence of a `level` field in audit logs caused an error in vector logs. With this update, the addition of a `level` field in the audit log record resolves the issue. (link:https://issues.redhat.com/browse/LOG-2961[LOG-2961]) - -* Before this update, if you deleted the Kibana Custom Resource, the {product-title} web console continued displaying a link to Kibana. With this update, removing the Kibana Custom Resource also removes that link. (link:https://issues.redhat.com/browse/LOG-3053[LOG-3053]) - -* Before this update, each rollover job created empty indices when the `ClusterLogForwarder` custom resource had JSON parsing defined. With this update, new indices are not empty. (link:https://issues.redhat.com/browse/LOG-3063[LOG-3063]) - -* Before this update, when the user deleted the LokiStack after an update to Loki Operator 5.5 resources originally created by Loki Operator 5.4 remained. With this update, the resources' owner-references point to the 5.5 LokiStack. (link:https://issues.redhat.com/browse/LOG-2945[LOG-2945]) - -* Before this update, a user was not able to view the application logs of namespaces they have access to. With this update, the Loki Operator automatically creates a cluster role and cluster role binding allowing users to read application logs. (link:https://issues.redhat.com/browse/LOG-2918[LOG-2918]) - -* Before this update, users with cluster-admin privileges were not able to properly view infrastructure and audit logs using the logging console. With this update, the authorization check has been extended to also recognize users in cluster-admin and dedicated-admin groups as admins. (link:https://issues.redhat.com/browse/LOG-2970[LOG-2970]) - -[id="logging-5-5-2-cves_{context}"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2015-20107[CVE-2015-20107] -* link:https://access.redhat.com/security/cve/CVE-2022-0391[CVE-2022-0391] -* link:https://access.redhat.com/security/cve/CVE-2022-21123[CVE-2022-21123] -* link:https://access.redhat.com/security/cve/CVE-2022-21125[CVE-2022-21125] -* link:https://access.redhat.com/security/cve/CVE-2022-21166[CVE-2022-21166] -* link:https://access.redhat.com/security/cve/CVE-2022-29154[CVE-2022-29154] -* link:https://access.redhat.com/security/cve/CVE-2022-32206[CVE-2022-32206] -* link:https://access.redhat.com/security/cve/CVE-2022-32208[CVE-2022-32208] -* link:https://access.redhat.com/security/cve/CVE-2022-34903[CVE-2022-34903] diff --git a/modules/logging-rn-5.5.3.adoc b/modules/logging-rn-5.5.3.adoc deleted file mode 100644 index f2c459d907d8..000000000000 --- a/modules/logging-rn-5.5.3.adoc +++ /dev/null @@ -1,37 +0,0 @@ -// Module included in the following assemblies: -//logging-5-5-release-notes -:_content-type: REFERENCE -[id="logging-release-notes-5-5-3_{context}"] -= Logging 5.5.3 -This release includes link:https://access.redhat.com/errata/RHBA-2022:6858[OpenShift Logging Bug Fix Release 5.5.3]. - -[id="logging-5-5-3-bug-fixes_{context}"] -== Bug fixes -* Before this update, log entries that had structured messages included the original message field, which made the entry larger. This update removes the message field for structured logs to reduce the increased size. (link:https://issues.redhat.com/browse/LOG-2759[LOG-2759]) - -* Before this update, the collector configuration excluded logs from `collector`, `default-log-store`, and `visualization` pods, but was unable to exclude logs archived in a `.gz` file. With this update, archived logs stored as `.gz` files of `collector`, `default-log-store`, and `visualization` pods are also excluded. (link:https://issues.redhat.com/browse/LOG-2844[LOG-2844]) - -* Before this update, when requests to an unavailable pod were sent through the gateway, no alert would warn of the disruption. With this update, individual alerts will generate if the gateway has issues completing a write or read request. (link:https://issues.redhat.com/browse/LOG-2884[LOG-2884]) - -* Before this update, pod metadata could be altered by fluent plugins because the values passed through the pipeline by reference. This update ensures each log message receives a copy of the pod metadata so each message processes independently. (link:https://issues.redhat.com/browse/LOG-3046[LOG-3046]) - -* Before this update, selecting *unknown* severity in the OpenShift Console Logs view excluded logs with a `level=unknown` value. With this update, logs without level and with `level=unknown` values are visible when filtering by *unknown* severity. (link:https://issues.redhat.com/browse/LOG-3062[LOG-3062]) - -* Before this update, log records sent to Elasticsearch had an extra field named `write-index` that contained the name of the index to which the logs needed to be sent. This field is not a part of the data model. After this update, this field is no longer sent. (link:https://issues.redhat.com/browse/LOG-3075[LOG-3075]) - -* With the introduction of the new built-in link:https://cloud.redhat.com/blog/pod-security-admission-in-openshift-4.11[Pod Security Admission Controller], Pods not configured in accordance with the enforced security standards defined globally or on the namespace level cannot run. With this update, the Operator and collectors allow privileged execution and run without security audit warnings or errors. (link:https://issues.redhat.com/browse/LOG-3077[LOG-3077]) - -* Before this update, the Operator removed any custom outputs defined in the `ClusterLogForwarder` custom resource when using LokiStack as the default log storage. With this update, the Operator merges custom outputs with the default outputs when processing the `ClusterLogForwarder` custom resource. (link:https://issues.redhat.com/browse/LOG-3095[LOG-3095]) - -[id="logging-5-5-3-cves_{context}"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2015-20107[CVE-2015-20107] -* link:https://access.redhat.com/security/cve/CVE-2022-0391[CVE-2022-0391] -* link:https://access.redhat.com/security/cve/CVE-2022-2526[CVE-2022-2526] -* link:https://access.redhat.com/security/cve/CVE-2022-21123[CVE-2022-21123] -* link:https://access.redhat.com/security/cve/CVE-2022-21125[CVE-2022-21125] -* link:https://access.redhat.com/security/cve/CVE-2022-21166[CVE-2022-21166] -* link:https://access.redhat.com/security/cve/CVE-2022-29154[CVE-2022-29154] -* link:https://access.redhat.com/security/cve/CVE-2022-32206[CVE-2022-32206] -* link:https://access.redhat.com/security/cve/CVE-2022-32208[CVE-2022-32208] -* link:https://access.redhat.com/security/cve/CVE-2022-34903[CVE-2022-34903] diff --git a/modules/logging-rn-5.5.4.adoc b/modules/logging-rn-5.5.4.adoc deleted file mode 100644 index 96ff0ba57dd7..000000000000 --- a/modules/logging-rn-5.5.4.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -//logging-5-5-release-notes -:_content-type: REFERENCE -[id="logging-release-notes-5-5-4_{context}"] -= Logging 5.5.4 -This release includes link:https://access.redhat.com/errata/RHSA-2022:7434[OpenShift Logging Bug Fix Release 5.5.4]. - -[id="logging-5-5-4-bug-fixes"] -== Bug fixes -* Before this update, an error in the query parser of the logging view plugin caused parts of the logs query to disappear if the query contained curly brackets `{}`. This made the queries invalid, leading to errors being returned for valid queries. With this update, the parser correctly handles these queries. (link:https://issues.redhat.com/browse/LOG-3042[LOG-3042]) - -* Before this update, the Operator could enter a loop of removing and recreating the collector daemonset while the Elasticsearch or Kibana deployments changed their status. With this update, a fix in the status handling of the Operator resolves the issue. (link:https://issues.redhat.com/browse/LOG-3049[LOG-3049]) - -* Before this update, no alerts were implemented to support the collector implementation of Vector. This change adds Vector alerts and deploys separate alerts, depending upon the chosen collector implementation. (link:https://issues.redhat.com/browse/LOG-3127[LOG-3127]) - -* Before this update, the secret creation component of the Elasticsearch Operator modified internal secrets constantly. With this update, the existing secret is properly handled. (link:https://issues.redhat.com/browse/LOG-3138[LOG-3138]) - -* Before this update, a prior refactoring of the logging `must-gather` scripts removed the expected location for the artifacts. This update reverts that change to write artifacts to the `/must-gather` folder. (link:https://issues.redhat.com/browse/LOG-3213[LOG-3213]) - -* Before this update, on certain clusters, the Prometheus exporter would bind on IPv4 instead of IPv6. After this update, Fluentd detects the IP version and binds to `0.0.0.0` for IPv4 or `[::]` for IPv6. (link:https://issues.redhat.com/browse/LOG-3162[LOG-3162]) - -[id="logging-5-5-4-CVEs"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2020-35525[CVE-2020-35525] -* link:https://access.redhat.com/security/cve/CVE-2020-35527[CVE-2020-35527] -* link:https://access.redhat.com/security/cve/CVE-2022-0494[CVE-2022-0494] -* link:https://access.redhat.com/security/cve/CVE-2022-1353[CVE-2022-1353] -* link:https://access.redhat.com/security/cve/CVE-2022-2509[CVE-2022-2509] -* link:https://access.redhat.com/security/cve/CVE-2022-2588[CVE-2022-2588] -* link:https://access.redhat.com/security/cve/CVE-2022-3515[CVE-2022-3515] -* link:https://access.redhat.com/security/cve/CVE-2022-21618[CVE-2022-21618] -* link:https://access.redhat.com/security/cve/CVE-2022-21619[CVE-2022-21619] -* link:https://access.redhat.com/security/cve/CVE-2022-21624[CVE-2022-21624] -* link:https://access.redhat.com/security/cve/CVE-2022-21626[CVE-2022-21626] -* link:https://access.redhat.com/security/cve/CVE-2022-21628[CVE-2022-21628] -* link:https://access.redhat.com/security/cve/CVE-2022-23816[CVE-2022-23816] -* link:https://access.redhat.com/security/cve/CVE-2022-23825[CVE-2022-23825] -* link:https://access.redhat.com/security/cve/CVE-2022-29900[CVE-2022-29900] -* link:https://access.redhat.com/security/cve/CVE-2022-29901[CVE-2022-29901] -* link:https://access.redhat.com/security/cve/CVE-2022-32149[CVE-2022-32149] -* link:https://access.redhat.com/security/cve/CVE-2022-37434[CVE-2022-37434] -* link:https://access.redhat.com/security/cve/CVE-2022-40674[CVE-2022-40674] diff --git a/modules/logging-rn-5.5.5.adoc b/modules/logging-rn-5.5.5.adoc deleted file mode 100644 index 63285a89387a..000000000000 --- a/modules/logging-rn-5.5.5.adoc +++ /dev/null @@ -1,94 +0,0 @@ -// Module included in the following assemblies: -//logging-5-5-release-notes -:_content-type: REFERENCE -[id="logging-release-notes-5-5-5_{context}"] -= Logging 5.5.5 -This release includes link:https://access.redhat.com/errata/RHSA-2022:8781[OpenShift Logging Bug Fix Release 5.5.5]. - -[id="logging-5-5-5-bug-fixes"] -== Bug fixes -* Before this update, Kibana had a fixed `24h` OAuth cookie expiration time, which resulted in 401 errors in Kibana whenever the `accessTokenInactivityTimeout` field was set to a value lower than `24h`. With this update, Kibana's OAuth cookie expiration time synchronizes to the `accessTokenInactivityTimeout`, with a default value of `24h`. (link:https://issues.redhat.com/browse/LOG-3305[LOG-3305]) - -* Before this update, Vector parsed the message field when JSON parsing was enabled without also defining `structuredTypeKey` or `structuredTypeName` values. With this update, a value is required for either `structuredTypeKey` or `structuredTypeName` when writing structured logs to Elasticsearch. (link:https://issues.redhat.com/browse/LOG-3284[LOG-3284]) - -* Before this update, the `FluentdQueueLengthIncreasing` alert could fail to fire when there was a cardinality issue with the set of labels returned from this alert expression. This update reduces labels to only include those required for the alert. (https://issues.redhat.com/browse/LOG-3226[LOG-3226]) - -* Before this update, Loki did not have support to reach an external storage in a disconnected cluster. With this update, proxy environment variables and proxy trusted CA bundles are included in the container image to support these connections. (link:https://issues.redhat.com/browse/LOG-2860[LOG-2860]) - -* Before this update, {product-title} web console users could not choose the `ConfigMap` object that includes the CA certificate for Loki, causing pods to operate without the CA. With this update, web console users can select the config map, resolving the issue. (link:https://issues.redhat.com/browse/LOG-3310[LOG-3310]) - -* Before this update, the CA key was used as volume name for mounting the CA into Loki, causing error states when the CA Key included non-conforming characters (such as dots). With this update, the volume name is standardized to an internal string which resolves the issue. (link:https://issues.redhat.com/browse/LOG-3332[LOG-3332]) - -[id="logging-5-5-5-CVEs"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2016-3709[CVE-2016-3709] -* link:https://access.redhat.com/security/cve/CVE-2020-35525[CVE-2020-35525] -* link:https://access.redhat.com/security/cve/CVE-2020-35527[CVE-2020-35527] -* link:https://access.redhat.com/security/cve/CVE-2020-36516[CVE-2020-36516] -* link:https://access.redhat.com/security/cve/CVE-2020-36558[CVE-2020-36558] -* link:https://access.redhat.com/security/cve/CVE-2021-3640[CVE-2021-3640] -* link:https://access.redhat.com/security/cve/CVE-2021-30002[CVE-2021-30002] -* link:https://access.redhat.com/security/cve/CVE-2022-0168[CVE-2022-0168] -* link:https://access.redhat.com/security/cve/CVE-2022-0561[CVE-2022-0561] -* link:https://access.redhat.com/security/cve/CVE-2022-0562[CVE-2022-0562] -* link:https://access.redhat.com/security/cve/CVE-2022-0617[CVE-2022-0617] -* link:https://access.redhat.com/security/cve/CVE-2022-0854[CVE-2022-0854] -* link:https://access.redhat.com/security/cve/CVE-2022-0865[CVE-2022-0865] -* link:https://access.redhat.com/security/cve/CVE-2022-0891[CVE-2022-0891] -* link:https://access.redhat.com/security/cve/CVE-2022-0908[CVE-2022-0908] -* link:https://access.redhat.com/security/cve/CVE-2022-0909[CVE-2022-0909] -* link:https://access.redhat.com/security/cve/CVE-2022-0924[CVE-2022-0924] -* link:https://access.redhat.com/security/cve/CVE-2022-1016[CVE-2022-1016] -* link:https://access.redhat.com/security/cve/CVE-2022-1048[CVE-2022-1048] -* link:https://access.redhat.com/security/cve/CVE-2022-1055[CVE-2022-1055] -* link:https://access.redhat.com/security/cve/CVE-2022-1184[CVE-2022-1184] -* link:https://access.redhat.com/security/cve/CVE-2022-1292[CVE-2022-1292] -* link:https://access.redhat.com/security/cve/CVE-2022-1304[CVE-2022-1304] -* link:https://access.redhat.com/security/cve/CVE-2022-1355[CVE-2022-1355] -* link:https://access.redhat.com/security/cve/CVE-2022-1586[CVE-2022-1586] -* link:https://access.redhat.com/security/cve/CVE-2022-1785[CVE-2022-1785] -* link:https://access.redhat.com/security/cve/CVE-2022-1852[CVE-2022-1852] -* link:https://access.redhat.com/security/cve/CVE-2022-1897[CVE-2022-1897] -* link:https://access.redhat.com/security/cve/CVE-2022-1927[CVE-2022-1927] -* link:https://access.redhat.com/security/cve/CVE-2022-2068[CVE-2022-2068] -* link:https://access.redhat.com/security/cve/CVE-2022-2078[CVE-2022-2078] -* link:https://access.redhat.com/security/cve/CVE-2022-2097[CVE-2022-2097] -* link:https://access.redhat.com/security/cve/CVE-2022-2509[CVE-2022-2509] -* link:https://access.redhat.com/security/cve/CVE-2022-2586[CVE-2022-2586] -* link:https://access.redhat.com/security/cve/CVE-2022-2639[CVE-2022-2639] -* link:https://access.redhat.com/security/cve/CVE-2022-2938[CVE-2022-2938] -* link:https://access.redhat.com/security/cve/CVE-2022-3515[CVE-2022-3515] -* link:https://access.redhat.com/security/cve/CVE-2022-20368[CVE-2022-20368] -* link:https://access.redhat.com/security/cve/CVE-2022-21499[CVE-2022-21499] -* link:https://access.redhat.com/security/cve/CVE-2022-21618[CVE-2022-21618] -* link:https://access.redhat.com/security/cve/CVE-2022-21619[CVE-2022-21619] -* link:https://access.redhat.com/security/cve/CVE-2022-21624[CVE-2022-21624] -* link:https://access.redhat.com/security/cve/CVE-2022-21626[CVE-2022-21626] -* link:https://access.redhat.com/security/cve/CVE-2022-21628[CVE-2022-21628] -* link:https://access.redhat.com/security/cve/CVE-2022-22624[CVE-2022-22624] -* link:https://access.redhat.com/security/cve/CVE-2022-22628[CVE-2022-22628] -* link:https://access.redhat.com/security/cve/CVE-2022-22629[CVE-2022-22629] -* link:https://access.redhat.com/security/cve/CVE-2022-22662[CVE-2022-22662] -* link:https://access.redhat.com/security/cve/CVE-2022-22844[CVE-2022-22844] -* link:https://access.redhat.com/security/cve/CVE-2022-23960[CVE-2022-23960] -* link:https://access.redhat.com/security/cve/CVE-2022-24448[CVE-2022-24448] -* link:https://access.redhat.com/security/cve/CVE-2022-25255[CVE-2022-25255] -* link:https://access.redhat.com/security/cve/CVE-2022-26373[CVE-2022-26373] -* link:https://access.redhat.com/security/cve/CVE-2022-26700[CVE-2022-26700] -* link:https://access.redhat.com/security/cve/CVE-2022-26709[CVE-2022-26709] -* link:https://access.redhat.com/security/cve/CVE-2022-26710[CVE-2022-26710] -* link:https://access.redhat.com/security/cve/CVE-2022-26716[CVE-2022-26716] -* link:https://access.redhat.com/security/cve/CVE-2022-26717[CVE-2022-26717] -* link:https://access.redhat.com/security/cve/CVE-2022-26719[CVE-2022-26719] -* link:https://access.redhat.com/security/cve/CVE-2022-27404[CVE-2022-27404] -* link:https://access.redhat.com/security/cve/CVE-2022-27405[CVE-2022-27405] -* link:https://access.redhat.com/security/cve/CVE-2022-27406[CVE-2022-27406] -* link:https://access.redhat.com/security/cve/CVE-2022-27950[CVE-2022-27950] -* link:https://access.redhat.com/security/cve/CVE-2022-28390[CVE-2022-28390] -* link:https://access.redhat.com/security/cve/CVE-2022-28893[CVE-2022-28893] -* link:https://access.redhat.com/security/cve/CVE-2022-29581[CVE-2022-29581] -* link:https://access.redhat.com/security/cve/CVE-2022-30293[CVE-2022-30293] -* link:https://access.redhat.com/security/cve/CVE-2022-34903[CVE-2022-34903] -* link:https://access.redhat.com/security/cve/CVE-2022-36946[CVE-2022-36946] -* link:https://access.redhat.com/security/cve/CVE-2022-37434[CVE-2022-37434] -* link:https://access.redhat.com/security/cve/CVE-2022-39399[CVE-2022-39399] diff --git a/modules/logging-rn-5.5.6.adoc b/modules/logging-rn-5.5.6.adoc deleted file mode 100644 index 2dbc60c76f38..000000000000 --- a/modules/logging-rn-5.5.6.adoc +++ /dev/null @@ -1,49 +0,0 @@ -//module included in logging-5-5-release-notes.adoc -:_content-type: REFERENCE -[id="logging-release-notes-5-5-6_{context}"] -= Logging 5.5.6 -This release includes link:https://access.redhat.com/errata/RHBA-2023:0386[OpenShift Logging Bug Fix Release 5.5.6]. - -[id="logging-5-5-6-bug-fixes"] -== Bug fixes -* Before this update, the Pod Security admission controller added the label `podSecurityLabelSync = true` to the `openshift-logging` namespace. This resulted in our specified security labels being overwritten, and as a result Collector pods would not start. With this update, the label `podSecurityLabelSync = false` preserves security labels. Collector pods deploy as expected. (link:https://issues.redhat.com/browse/LOG-3340[LOG-3340]) - -* Before this update, the Operator installed the console view plugin, even when it was not enabled on the cluster. This caused the Operator to crash. With this update, if an account for a cluster does not have the console view enabled, the Operator functions normally and does not install the console view. (link:https://issues.redhat.com/browse/LOG-3407[LOG-3407]) - -* Before this update, a prior fix to support a regression where the status of the Elasticsearch deployment was not being updated caused the Operator to crash unless the `Red Hat Elasticsearch Operator` was deployed. With this update, that fix has been reverted so the Operator is now stable but re-introduces the previous issue related to the reported status. (link:https://issues.redhat.com/browse/LOG-3428[LOG-3428]) - -* Before this update, the Loki Operator only deployed one replica of the LokiStack gateway regardless of the chosen stack size. With this update, the number of replicas is correctly configured according to the selected size. (link:https://issues.redhat.com/browse/LOG-3478[LOG-3478]) - -* Before this update, records written to Elasticsearch would fail if multiple label keys had the same prefix and some keys included dots. With this update, underscores replace dots in label keys, resolving the issue. (link:https://issues.redhat.com/browse/LOG-3341[LOG-3341]) - -* Before this update, the logging view plugin contained an incompatible feature for certain versions of {product-title}. With this update, the correct release stream of the plugin resolves the issue. (link:https://issues.redhat.com/browse/LOG-3467[LOG-3467]) - -* Before this update, the reconciliation of the `ClusterLogForwarder` custom resource would incorrectly report a degraded status of one or more pipelines causing the collector pods to restart every 8-10 seconds. With this update, reconciliation of the `ClusterLogForwarder` custom resource processes correctly, resolving the issue. (link:https://issues.redhat.com/browse/LOG-3469[LOG-3469]) - -* Before this change the spec for the `outputDefaults` field of the ClusterLogForwarder custom resource would apply the settings to every declared Elasticsearch output type. This change corrects the behavior to match the enhancement specification where the setting specifically applies to the default managed Elasticsearch store. (link:https://issues.redhat.com/browse/LOG-3342[LOG-3342]) - -* Before this update, the OpenShift CLI (oc) `must-gather` script did not complete because the OpenShift CLI (oc) needs a folder with write permission to build its cache. With this update, the OpenShift CLI (oc) has write permissions to a folder, and the `must-gather` script completes successfully. (link:https://issues.redhat.com/browse/LOG-3472[LOG-3472]) - -* Before this update, the Loki Operator webhook server caused TLS errors. With this update, the Loki Operator webhook PKI is managed by the Operator Lifecycle Manager's dynamic webhook management resolving the issue. (link:https://issues.redhat.com/browse/LOG-3511[LOG-3511]) - -[id="logging-5-5-6-CVEs"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2021-46848[CVE-2021-46848] -* link:https://access.redhat.com/security/cve/CVE-2022-2056[CVE-2022-2056] -* link:https://access.redhat.com/security/cve/CVE-2022-2057[CVE-2022-2057] -* link:https://access.redhat.com/security/cve/CVE-2022-2058[CVE-2022-2058] -* link:https://access.redhat.com/security/cve/CVE-2022-2519[CVE-2022-2519] -* link:https://access.redhat.com/security/cve/CVE-2022-2520[CVE-2022-2520] -* link:https://access.redhat.com/security/cve/CVE-2022-2521[CVE-2022-2521] -* link:https://access.redhat.com/security/cve/CVE-2022-2867[CVE-2022-2867] -* link:https://access.redhat.com/security/cve/CVE-2022-2868[CVE-2022-2868] -* link:https://access.redhat.com/security/cve/CVE-2022-2869[CVE-2022-2869] -* link:https://access.redhat.com/security/cve/CVE-2022-2953[CVE-2022-2953] -* link:https://access.redhat.com/security/cve/CVE-2022-2964[CVE-2022-2964] -* link:https://access.redhat.com/security/cve/CVE-2022-4139[CVE-2022-4139] -* link:https://access.redhat.com/security/cve/CVE-2022-35737[CVE-2022-35737] -* link:https://access.redhat.com/security/cve/CVE-2022-42010[CVE-2022-42010] -* link:https://access.redhat.com/security/cve/CVE-2022-42011[CVE-2022-42011] -* link:https://access.redhat.com/security/cve/CVE-2022-42012[CVE-2022-42012] -* link:https://access.redhat.com/security/cve/CVE-2022-42898[CVE-2022-42898] -* link:https://access.redhat.com/security/cve/CVE-2022-43680[CVE-2022-43680] diff --git a/modules/logging-rn-5.5.7.adoc b/modules/logging-rn-5.5.7.adoc deleted file mode 100644 index f91bbb44d4c0..000000000000 --- a/modules/logging-rn-5.5.7.adoc +++ /dev/null @@ -1,22 +0,0 @@ -//module included in logging-5-5-release-notes.adoc -:_content-type: REFERENCE -[id="logging-release-notes-5-5-7_{context}"] -= Logging 5.5.7 -This release includes link:https://access.redhat.com/errata/RHSA-2023:0633[OpenShift Logging Bug Fix Release 5.5.7]. - -[id="logging-5-5-7-bug-fixes"] -== Bug fixes -* Before this update, the LokiStack Gateway Labels Enforcer generated parsing errors for valid LogQL queries when using combined label filters with boolean expressions. With this update, the LokiStack LogQL implementation supports label filters with boolean expression and resolves the issue. (link:https://issues.redhat.com/browse/LOG-3534[LOG-3534]) - -* Before this update, the `ClusterLogForwarder` custom resource (CR) did not pass TLS credentials for syslog output to Fluentd, resulting in errors during forwarding. With this update, credentials pass correctly to Fluentd, resolving the issue. (link:https://issues.redhat.com/browse/LOG-3533[LOG-3533]) - -[id="logging-5-5-7-CVEs"] -== CVEs -link:https://access.redhat.com/security/cve/CVE-2021-46848[CVE-2021-46848] -link:https://access.redhat.com/security/cve/CVE-2022-3821[CVE-2022-3821] -link:https://access.redhat.com/security/cve/CVE-2022-35737[CVE-2022-35737] -link:https://access.redhat.com/security/cve/CVE-2022-42010[CVE-2022-42010] -link:https://access.redhat.com/security/cve/CVE-2022-42011[CVE-2022-42011] -link:https://access.redhat.com/security/cve/CVE-2022-42012[CVE-2022-42012] -link:https://access.redhat.com/security/cve/CVE-2022-42898[CVE-2022-42898] -link:https://access.redhat.com/security/cve/CVE-2022-43680[CVE-2022-43680] diff --git a/modules/logging-rn-5.5.8.adoc b/modules/logging-rn-5.5.8.adoc deleted file mode 100644 index 4ab12f6250fb..000000000000 --- a/modules/logging-rn-5.5.8.adoc +++ /dev/null @@ -1,22 +0,0 @@ -:_content-type: REFERENCE -[id="logging-release-notes-5-5-8_{context}"] -= Logging 5.5.8 -This release includes link:https://access.redhat.com/errata/RHSA-2023:0930[OpenShift Logging Bug Fix Release 5.5.8]. - -[id="logging-5-5-8-bug-fixes"] -== Bug fixes -* Before this update, the `priority` field was missing from `systemd` logs due to an error in how the collector set `level` fields. With this update, these fields are set correctly, resolving the issue. (link:https://issues.redhat.com/browse/LOG-3630[LOG-3630]) - -[id="logging-5-5-8-CVEs"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2020-10735[CVE-2020-10735] -* link:https://access.redhat.com/security/cve/CVE-2021-28861[CVE-2021-28861] -* link:https://access.redhat.com/security/cve/CVE-2022-2873[CVE-2022-2873] -* link:https://access.redhat.com/security/cve/CVE-2022-4415[CVE-2022-4415] -* link:https://access.redhat.com/security/cve/CVE-2022-24999[CVE-2022-24999] -* link:https://access.redhat.com/security/cve/CVE-2022-40897[CVE-2022-40897] -* link:https://access.redhat.com/security/cve/CVE-2022-41222[CVE-2022-41222] -* link:https://access.redhat.com/security/cve/CVE-2022-41717[CVE-2022-41717] -* link:https://access.redhat.com/security/cve/CVE-2022-43945[CVE-2022-43945] -* link:https://access.redhat.com/security/cve/CVE-2022-45061[CVE-2022-45061] -* link:https://access.redhat.com/security/cve/CVE-2022-48303[CVE-2022-48303] diff --git a/modules/logging-rn-5.5.9.adoc b/modules/logging-rn-5.5.9.adoc deleted file mode 100644 index 7bded1a43190..000000000000 --- a/modules/logging-rn-5.5.9.adoc +++ /dev/null @@ -1,21 +0,0 @@ -//module included in logging-5-5-release-notes.adoc -:_content-type: REFERENCE -[id="logging-release-notes-5-5-9_{context}"] -= Logging 5.5.9 -This release includes link:https://access.redhat.com/errata/RHSA-2023:1310[OpenShift Logging Bug Fix Release 5.5.9]. - -[id="logging-5-5-9-bug-fixes"] -== Bug fixes -* Before this update, a problem with the Fluentd collector caused it to not capture OAuth login events stored in `/var/log/auth-server/audit.log`. This led to incomplete collection of login events from the OAuth service. With this update, the Fluentd collector now resolves this issue by capturing all login events from the OAuth service, including those stored in `/var/log/auth-server/audit.log`, as expected.(link:https://issues.redhat.com/browse/LOG-3730[LOG-3730]) - -* Before this update, when structured parsing was enabled and messages were forwarded to multiple destinations, they were not deep copied. This resulted in some of the received logs including the structured message, while others did not. With this update, the configuration generation has been modified to deep copy messages before JSON parsing. As a result, all received logs now have structured messages included, even when they are forwarded to multiple destinations.(link:https://issues.redhat.com/browse/LOG-3767[LOG-3767]) - -[id="logging-5-5-9-CVEs"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2022-4304[CVE-2022-4304] -* link:https://access.redhat.com/security/cve/CVE-2022-4450[CVE-2022-4450] -* link:https://access.redhat.com/security/cve/CVE-2022-41717[CVE-2022-41717] -* link:https://access.redhat.com/security/cve/CVE-2023-0215[CVE-2023-0215] -* link:https://access.redhat.com/security/cve/CVE-2023-0286[CVE-2023-0286] -* link:https://access.redhat.com/security/cve/CVE-2023-0767[CVE-2023-0767] -* link:https://access.redhat.com/security/cve/CVE-2023-23916[CVE-2023-23916] diff --git a/modules/logging-rn-5.6.0.adoc b/modules/logging-rn-5.6.0.adoc deleted file mode 100644 index 338f8ea1e7cf..000000000000 --- a/modules/logging-rn-5.6.0.adoc +++ /dev/null @@ -1,86 +0,0 @@ -//included in cluster-logging-release-notes.adoc -:_content-type: ASSEMBLY -[id="logging-release-notes-5-6-0_{context}"] -= Logging 5.6.0 - -This release includes link:https://access.redhat.com/errata/RHSA-2023:0264[OpenShift Logging Release 5.6]. - -[id="logging-5-6-dep-notice_{context}"] -== Deprecation notice -In logging version 5.6, Fluentd is deprecated and is planned to be removed in a future release. Red Hat will provide bug fixes and support for this feature during the current release lifecycle, but this feature will no longer receive enhancements and will be removed. As an alternative to Fluentd, you can use Vector instead. - -[id="logging-5-6-enhancements_{context}"] -== Enhancements -* With this update, Logging is compliant with {product-title} cluster-wide cryptographic policies. - (link:https://issues.redhat.com/browse/LOG-895[LOG-895]) - -* With this update, you can declare per-tenant, per-stream, and global policies retention policies through the LokiStack custom resource, ordered by priority. (link:https://issues.redhat.com/browse/LOG-2695[LOG-2695]) - -* With this update, Splunk is an available output option for log forwarding. (link:https://issues.redhat.com/browse/LOG-2913[LOG-2913]) - -* With this update, Vector replaces Fluentd as the default Collector. (link:https://issues.redhat.com/browse/LOG-2222[LOG-2222]) - -* With this update, the *Developer* role can access the per-project workload logs they are assigned to within the Log Console Plugin on clusters running {product-title} 4.11 and higher. (link:https://issues.redhat.com/browse/LOG-3388[LOG-3388]) - -* With this update, logs from any source contain a field `openshift.cluster_id`, the unique identifier of the cluster in which the Operator is deployed. You can view the `clusterID` value with the command below. (link:https://issues.redhat.com/browse/LOG-2715[LOG-2715]) - -include::snippets/logging-get-clusterid-snip.adoc[lines=9..12] - -[id="logging-5-6-known-issues_{context}"] -== Known Issues -* Before this update, Elasticsearch would reject logs if multiple label keys had the same prefix and some keys included the `.` character. This fixes the limitation of Elasticsearch by replacing `.` in the label keys with `_`. As a workaround for this issue, remove the labels that cause errors, or add a namespace to the label. (link:https://issues.redhat.com/browse/LOG-3463[LOG-3463]) - -[id="logging-5-6-bug-fixes_{context}"] -== Bug fixes -* Before this update, if you deleted the Kibana Custom Resource, the {product-title} web console continued displaying a link to Kibana. With this update, removing the Kibana Custom Resource also removes that link. (link:https://issues.redhat.com/browse/LOG-2993[LOG-2993]) - -* Before this update, a user was not able to view the application logs of namespaces they have access to. With this update, the Loki Operator automatically creates a cluster role and cluster role binding allowing users to read application logs. (link:https://issues.redhat.com/browse/LOG-3072[LOG-3072]) - -* Before this update, the Operator removed any custom outputs defined in the `ClusterLogForwarder` custom resource when using LokiStack as the default log storage. With this update, the Operator merges custom outputs with the default outputs when processing the `ClusterLogForwarder` custom resource. (link:https://issues.redhat.com/browse/LOG-3090[LOG-3090]) - -* Before this update, the CA key was used as the volume name for mounting the CA into Loki, causing error states when the CA Key included non-conforming characters, such as dots. With this update, the volume name is standardized to an internal string which resolves the issue. (link:https://issues.redhat.com/browse/LOG-3331[LOG-3331]) - -* Before this update, a default value set within the LokiStack Custom Resource Definition, caused an inability to create a LokiStack instance without a `ReplicationFactor` of `1`. With this update, the operator sets the actual value for the size used. (link:https://issues.redhat.com/browse/LOG-3296[LOG-3296]) - -* Before this update, Vector parsed the message field when JSON parsing was enabled without also defining `structuredTypeKey` or `structuredTypeName` values. With this update, a value is required for either `structuredTypeKey` or `structuredTypeName` when writing structured logs to Elasticsearch. (link:https://issues.redhat.com/browse/LOG-3195[LOG-3195]) - -* Before this update, the secret creation component of the Elasticsearch Operator modified internal secrets constantly. With this update, the existing secret is properly handled. (link:https://issues.redhat.com/browse/LOG-3161[LOG-3161]) - -* Before this update, the Operator could enter a loop of removing and recreating the collector daemonset while the Elasticsearch or Kibana deployments changed their status. With this update, a fix in the status handling of the Operator resolves the issue. (link:https://issues.redhat.com/browse/LOG-3157[LOG-3157]) - -* Before this update, Kibana had a fixed `24h` OAuth cookie expiration time, which resulted in 401 errors in Kibana whenever the `accessTokenInactivityTimeout` field was set to a value lower than `24h`. With this update, Kibana's OAuth cookie expiration time synchronizes to the `accessTokenInactivityTimeout`, with a default value of `24h`. (link:https://issues.redhat.com/browse/LOG-3129[LOG-3129]) - -* Before this update, the Operators general pattern for reconciling resources was to try and create before attempting to get or update which would lead to constant HTTP 409 responses after creation. With this update, Operators first attempt to retrieve an object and only create or update it if it is either missing or not as specified. (link:https://issues.redhat.com/browse/LOG-2919[LOG-2919]) - -* Before this update, the `.level` and`.structure.level` fields in Fluentd could contain different values. With this update, the values are the same for each field. (link:https://issues.redhat.com/browse/LOG-2819[LOG-2819]) - -* Before this update, the Operator did not wait for the population of the trusted CA bundle and deployed the collector a second time once the bundle updated. With this update, the Operator waits briefly to see if the bundle has been populated before it continues the collector deployment. (link:https://issues.redhat.com/browse/LOG-2789[LOG-2789]) - -* Before this update, logging telemetry info appeared twice when reviewing metrics. With this update, logging telemetry info displays as expected. (link:https://issues.redhat.com/browse/LOG-2315[LOG-2315]) - -* Before this update, Fluentd pod logs contained a warning message after enabling the JSON parsing addition. With this update, that warning message does not appear. (link:https://issues.redhat.com/browse/LOG-1806[LOG-1806]) - -* Before this update, the `must-gather` script did not complete because `oc` needs a folder with write permission to build its cache. With this update, `oc` has write permissions to a folder, and the `must-gather` script completes successfully. (link:https://issues.redhat.com/browse/LOG-3446[LOG-3446]) - -* Before this update the log collector SCC could be superseded by other SCCs on the cluster, rendering the collector unusable. This update sets the priority of the log collector SCC so that it takes precedence over the others. (link:https://issues.redhat.com/browse/LOG-3235[LOG-3235]) - -* Before this update, Vector was missing the field `sequence`, which was added to fluentd as a way to deal with a lack of actual nanoseconds precision. With this update, the field `openshift.sequence` has been added to the event logs. (link:https://issues.redhat.com/browse/LOG-3106[LOG-3106]) - -[id="logging-5-6-cves_{context}"] -== CVEs -* https://access.redhat.com/security/cve/CVE-2020-36518[CVE-2020-36518] -* https://access.redhat.com/security/cve/CVE-2021-46848[CVE-2021-46848] -* https://access.redhat.com/security/cve/CVE-2022-2879[CVE-2022-2879] -* https://access.redhat.com/security/cve/CVE-2022-2880[CVE-2022-2880] -* https://access.redhat.com/security/cve/CVE-2022-27664[CVE-2022-27664] -* https://access.redhat.com/security/cve/CVE-2022-32190[CVE-2022-32190] -* https://access.redhat.com/security/cve/CVE-2022-35737[CVE-2022-35737] -* https://access.redhat.com/security/cve/CVE-2022-37601[CVE-2022-37601] -* https://access.redhat.com/security/cve/CVE-2022-41715[CVE-2022-41715] -* https://access.redhat.com/security/cve/CVE-2022-42003[CVE-2022-42003] -* https://access.redhat.com/security/cve/CVE-2022-42004[CVE-2022-42004] -* https://access.redhat.com/security/cve/CVE-2022-42010[CVE-2022-42010] -* https://access.redhat.com/security/cve/CVE-2022-42011[CVE-2022-42011] -* https://access.redhat.com/security/cve/CVE-2022-42012[CVE-2022-42012] -* https://access.redhat.com/security/cve/CVE-2022-42898[CVE-2022-42898] -* https://access.redhat.com/security/cve/CVE-2022-43680[CVE-2022-43680] diff --git a/modules/logging-rn-5.6.1.adoc b/modules/logging-rn-5.6.1.adoc deleted file mode 100644 index f01a180b63b1..000000000000 --- a/modules/logging-rn-5.6.1.adoc +++ /dev/null @@ -1,35 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="logging-release-notes-5-6-1_{context}"] -= Logging 5.6.1 -This release includes link:https://access.redhat.com/errata/RHSA-2023:0634[OpenShift Logging Bug Fix Release 5.6.1]. - -[id="logging-5-6-1-bug-fixes"] -== Bug fixes -* Before this update, the compactor would report TLS certificate errors from communications with the querier when retention was active. With this update, the compactor and querier no longer communicate erroneously over HTTP. (link:https://issues.redhat.com/browse/LOG-3494[LOG-3494]) - -* Before this update, the Loki Operator would not retry setting the status of the `LokiStack` CR, which caused stale status information. With this update, the Operator retries status information updates on conflict. (link:https://issues.redhat.com/browse/LOG-3496[LOG-3496]) - -* Before this update, the Loki Operator Webhook server caused TLS errors when the `kube-apiserver-operator` Operator checked the webhook validity. With this update, the Loki Operator Webhook PKI is managed by the Operator Lifecycle Manager (OLM), resolving the issue. (link:https://issues.redhat.com/browse/LOG-3510[LOG-3510]) - -* Before this update, the LokiStack Gateway Labels Enforcer generated parsing errors for valid LogQL queries when using combined label filters with boolean expressions. With this update, the LokiStack LogQL implementation supports label filters with boolean expression and resolves the issue. (link:https://issues.redhat.com/browse/LOG-3441[LOG-3441]), (link:https://issues.redhat.com/browse/LOG-3397[LOG-3397]) - -* Before this update, records written to Elasticsearch would fail if multiple label keys had the same prefix and some keys included dots. With this update, underscores replace dots in label keys, resolving the issue. (link:https://issues.redhat.com/browse/LOG-3463[LOG-3463]) - -* Before this update, the `Red Hat OpenShift Logging` Operator was not available for {product-title} 4.10 clusters because of an incompatibility between {product-title} console and the logging-view-plugin. With this update, the plugin is properly integrated with the {product-title} 4.10 admin console. (link:https://issues.redhat.com/browse/LOG-3447[LOG-3447]) - -* Before this update the reconciliation of the `ClusterLogForwarder` custom resource would incorrectly report a degraded status of pipelines that reference the default logstore. With this update, the pipeline validates properly.(link:https://issues.redhat.com/browse/LOG-3477[LOG-3477]) - - -[id="logging-5-6-1-CVEs"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2021-46848[CVE-2021-46848] -* link:https://access.redhat.com/security/cve/CVE-2022-3821[CVE-2022-3821] -* link:https://access.redhat.com/security/cve/CVE-2022-35737[CVE-2022-35737] -* link:https://access.redhat.com/security/cve/CVE-2022-42010[CVE-2022-42010] -* link:https://access.redhat.com/security/cve/CVE-2022-42011[CVE-2022-42011] -* link:https://access.redhat.com/security/cve/CVE-2022-42012[CVE-2022-42012] -* link:https://access.redhat.com/security/cve/CVE-2022-42898[CVE-2022-42898] -* link:https://access.redhat.com/security/cve/CVE-2022-43680[CVE-2022-43680] -* link:https://access.redhat.com/security/cve/CVE-2021-35065[CVE-2021-35065] -* link:https://access.redhat.com/security/cve/CVE-2022-46175[CVE-2022-46175] diff --git a/modules/logging-rn-5.6.2.adoc b/modules/logging-rn-5.6.2.adoc deleted file mode 100644 index 99dc99ba7c57..000000000000 --- a/modules/logging-rn-5.6.2.adoc +++ /dev/null @@ -1,29 +0,0 @@ -//module included in cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="logging-release-notes-5-6-2_{context}"] -= Logging 5.6.2 -This release includes link:https://access.redhat.com/errata/RHBA-2023:0793[OpenShift Logging Bug Fix Release 5.6.2]. - -[id="logging-5-6-2-bug-fixes"] -== Bug fixes -* Before this update, the collector did not set `level` fields correctly based on priority for systemd logs. With this update, `level` fields are set correctly. (link:https://issues.redhat.com/browse/LOG-3429[LOG-3429]) - -* Before this update, the Operator incorrectly generated incompatibility warnings on {product-title} 4.12 or later. With this update, the Operator max {product-title} version value has been corrected, resolving the issue. (link:https://issues.redhat.com/browse/LOG-3584[LOG-3584]) - -* Before this update, creating a `ClusterLogForwarder` custom resource (CR) with an output value of `default` did not generate any errors. With this update, an error warning that this value is invalid generates appropriately. (link:https://issues.redhat.com/browse/LOG-3437[LOG-3437]) - -* Before this update, when the `ClusterLogForwarder` custom resource (CR) had multiple pipelines configured with one output set as `default`, the collector pods restarted. With this update, the logic for output validation has been corrected, resolving the issue. (link:https://issues.redhat.com/browse/LOG-3559[LOG-3559]) - -* Before this update, collector pods restarted after being created. With this update, the deployed collector does not restart on its own. (link:https://issues.redhat.com/browse/LOG-3608[LOG-3608]) - -* Before this update, patch releases removed previous versions of the Operators from the catalog. This made installing the old versions impossible. This update changes bundle configurations so that previous releases of the same minor version stay in the catalog. (link:https://issues.redhat.com/browse/LOG-3635[LOG-3635]) - -[id="logging-5-6-2-CVEs"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2022-23521[CVE-2022-23521] -* link:https://access.redhat.com/security/cve/CVE-2022-40303[CVE-2022-40303] -* link:https://access.redhat.com/security/cve/CVE-2022-40304[CVE-2022-40304] -* link:https://access.redhat.com/security/cve/CVE-2022-41903[CVE-2022-41903] -* link:https://access.redhat.com/security/cve/CVE-2022-47629[CVE-2022-47629] -* link:https://access.redhat.com/security/cve/CVE-2023-21835[CVE-2023-21835] -* link:https://access.redhat.com/security/cve/CVE-2023-21843[CVE-2023-21843] diff --git a/modules/logging-rn-5.6.3.adoc b/modules/logging-rn-5.6.3.adoc deleted file mode 100644 index b920287d4904..000000000000 --- a/modules/logging-rn-5.6.3.adoc +++ /dev/null @@ -1,22 +0,0 @@ -:_content-type: REFERENCE -[id="logging-release-notes-5-6-3_{context}"] -= Logging 5.6.3 -This release includes link:https://access.redhat.com/errata/RHSA-2023:0932[OpenShift Logging Bug Fix Release 5.6.3]. - -[id="logging-5-6-3-bug-fixes"] -== Bug fixes -* Before this update, the operator stored gateway tenant secret information in a config map. With this update, the operator stores this information in a secret. (link:https://issues.redhat.com/browse/LOG-3717[LOG-3717]) - -* Before this update, the Fluentd collector did not capture OAuth login events stored in `/var/log/auth-server/audit.log`. With this update, Fluentd captures these OAuth login events, resolving the issue. (link:https://issues.redhat.com/browse/LOG-3729[LOG-3729]) - -[id="logging-5-6-3-CVEs"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2020-10735[CVE-2020-10735] -* link:https://access.redhat.com/security/cve/CVE-2021-28861[CVE-2021-28861] -* link:https://access.redhat.com/security/cve/CVE-2022-2873[CVE-2022-2873] -* link:https://access.redhat.com/security/cve/CVE-2022-4415[CVE-2022-4415] -* link:https://access.redhat.com/security/cve/CVE-2022-40897[CVE-2022-40897] -* link:https://access.redhat.com/security/cve/CVE-2022-41222[CVE-2022-41222] -* link:https://access.redhat.com/security/cve/CVE-2022-43945[CVE-2022-43945] -* link:https://access.redhat.com/security/cve/CVE-2022-45061[CVE-2022-45061] -* link:https://access.redhat.com/security/cve/CVE-2022-48303[CVE-2022-48303] diff --git a/modules/logging-rn-5.6.4.adoc b/modules/logging-rn-5.6.4.adoc deleted file mode 100644 index 14e09b280df6..000000000000 --- a/modules/logging-rn-5.6.4.adoc +++ /dev/null @@ -1,34 +0,0 @@ -//module included in logging-release-notes.adoc -:_content-type: REFERENCE -[id="logging-release-notes-5-6-4_{context}"] -= Logging 5.6.4 -This release includes link:https://access.redhat.com/errata/RHBA-2023:1311[OpenShift Logging Bug Fix Release 5.6.4]. - -[id="logging-5-6-4-bug-fixes"] -== Bug fixes -* Before this update, when LokiStack was deployed as the log store, the logs generated by Loki pods were collected and sent to LokiStack. With this update, the logs generated by Loki are excluded from collection and will not be stored. (link:https://issues.redhat.com/browse/LOG-3280[LOG-3280]) - -* Before this update, when the query editor on the Logs page of the OpenShift Web Console was empty, the drop-down menus did not populate. With this update, if an empty query is attempted, an error message is displayed and the drop-down menus now populate as expected. (link:https://issues.redhat.com/browse/LOG-3454[LOG-3454]) - -* Before this update, when the `tls.insecureSkipVerify` option was set to `true`, the Cluster Logging Operator would generate incorrect configuration. As a result, the operator would fail to send data to Elasticsearch when attempting to skip certificate validation. With this update, the Cluster Logging Operator generates the correct TLS configuration even when `tls.insecureSkipVerify` is enabled. As a result, data can be sent successfully to Elasticsearch even when attempting to skip certificate validation. (link:https://issues.redhat.com/browse/LOG-3475[LOG-3475]) - -* Before this update, when structured parsing was enabled and messages were forwarded to multiple destinations, they were not deep copied. This resulted in some of the received logs including the structured message, while others did not. With this update, the configuration generation has been modified to deep copy messages before JSON parsing. As a result, all received messages now have structured messages included, even when they are forwarded to multiple destinations. (link:https://issues.redhat.com/browse/LOG-3640[LOG-3640]) - -* Before this update, if the `collection` field contained `{}` it could result in the Operator crashing. With this update, the Operator will ignore this value, allowing the operator to continue running smoothly without interruption. (link:https://issues.redhat.com/browse/LOG-3733[LOG-3733]) - -* Before this update, the `nodeSelector` attribute for the Gateway component of LokiStack did not have any effect. With this update, the `nodeSelector` attribute functions as expected. (link:https://issues.redhat.com/browse/LOG-3783[LOG-3783]) - -* Before this update, the static LokiStack memberlist configuration relied solely on private IP networks. As a result, when the {product-title} cluster pod network was configured with a public IP range, the LokiStack pods would crashloop. With this update, the LokiStack administrator now has the option to use the pod network for the memberlist configuration. This resolves the issue and prevents the LokiStack pods from entering a crashloop state when the {product-title} cluster pod network is configured with a public IP range. (link:https://issues.redhat.com/browse/LOG-3814[LOG-3814]) - -* Before this update, if the `tls.insecureSkipVerify` field was set to `true`, the Cluster Logging Operator would generate an incorrect configuration. As a result, the Operator would fail to send data to Elasticsearch when attempting to skip certificate validation. With this update, the Operator generates the correct TLS configuration even when `tls.insecureSkipVerify` is enabled. As a result, data can be sent successfully to Elasticsearch even when attempting to skip certificate validation. (link:https://issues.redhat.com/browse/LOG-3838[LOG-3838]) - -* Before this update, if the Cluster Logging Operator (CLO) was installed without the Elasticsearch Operator, the CLO pod would continuously display an error message related to the deletion of Elasticsearch. With this update, the CLO now performs additional checks before displaying any error messages. As a result, error messages related to Elasticsearch deletion are no longer displayed in the absence of the Elasticsearch Operator.(link:https://issues.redhat.com/browse/LOG-3763[LOG-3763]) - -[id="logging-5-6-4-CVEs"] -== CVEs -* https://access.redhat.com/security/cve/CVE-2022-4304[CVE-2022-4304] -* https://access.redhat.com/security/cve/CVE-2022-4450[CVE-2022-4450] -* https://access.redhat.com/security/cve/CVE-2023-0215[CVE-2023-0215] -* https://access.redhat.com/security/cve/CVE-2023-0286[CVE-2023-0286] -* https://access.redhat.com/security/cve/CVE-2023-0767[CVE-2023-0767] -* https://access.redhat.com/security/cve/CVE-2023-23916[CVE-2023-23916] diff --git a/modules/logging-rn-5.6.5.adoc b/modules/logging-rn-5.6.5.adoc deleted file mode 100644 index fa8808ddec79..000000000000 --- a/modules/logging-rn-5.6.5.adoc +++ /dev/null @@ -1,27 +0,0 @@ -//module included in logging-release-notes.adoc -:content-type: REFERENCE -[id="logging-release-notes-5-6-5{context}"] -= Logging 5.6.5 -This release includes link:https://access.redhat.com/errata/RHSA-2023:1953[OpenShift Logging Bug Fix Release 5.6.5]. - -[id="logging-5-6-5-bug-fixes"] -== Bug fixes -* Before this update, the template definitions prevented Elasticsearch from indexing some labels and namespace_labels, causing issues with data ingestion. With this update, the fix replaces dots and slashes in labels to ensure proper ingestion, effectively resolving the issue. (link:https://issues.redhat.com/browse/LOG-3419[LOG-3419]) - -* Before this update, if the Logs page of the OpenShift Web Console failed to connect to the LokiStack, a generic error message was displayed, providing no additional context or troubleshooting suggestions. With this update, the error message has been enhanced to include more specific details and recommendations for troubleshooting. (link:https://issues.redhat.com/browse/LOG-3750[LOG-3750]) - -* Before this update, time range formats were not validated, leading to errors selecting a custom date range. With this update, time formats are now validated, enabling users to select a valid range. If an invalid time range format is selected, an error message is displayed to the user. (link:https://issues.redhat.com/browse/LOG-3583[LOG-3583]) - -* Before this update, when searching logs in Loki, even if the length of an expression did not exceed 5120 characters, the query would fail in many cases. With this update, query authorization label matchers have been optimized, resolving the issue. (link:https://issues.redhat.com/browse/LOG-3480[LOG-3480]) - -* Before this update, the Loki Operator failed to produce a memberlist configuration that was sufficient for locating all the components when using a memberlist for private IPs. With this update, the fix ensures that the generated configuration includes the advertised port, allowing for successful lookup of all components. (link:https://issues.redhat.com/browse/LOG-4008[LOG-4008]) - -[id="logging-5-6-5-CVEs"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2022-4269[CVE-2022-4269] -* link:https://access.redhat.com/security/cve/CVE-2022-4378[CVE-2022-4378] -* link:https://access.redhat.com/security/cve/CVE-2023-0266[CVE-2023-0266] -* link:https://access.redhat.com/security/cve/CVE-2023-0361[CVE-2023-0361] -* link:https://access.redhat.com/security/cve/CVE-2023-0386[CVE-2023-0386] -* link:https://access.redhat.com/security/cve/CVE-2023-27539[CVE-2023-27539] -* link:https://access.redhat.com/security/cve/CVE-2023-28120[CVE-2023-28120] diff --git a/modules/logging-rn-5.7.0.adoc b/modules/logging-rn-5.7.0.adoc deleted file mode 100644 index 067cd857f25e..000000000000 --- a/modules/logging-rn-5.7.0.adoc +++ /dev/null @@ -1,24 +0,0 @@ -//module included in logging-5-7-release-notes.adoc -:content-type: REFERENCE -[id="logging-release-notes-5-7-0{context}"] -= Logging 5.7.0 -This release includes link:https://access.redhat.com/errata/RHBA-2023:2133[OpenShift Logging Bug Fix Release 5.7.0]. - -[id="logging-5-7-enhancements"] -== Enhancements -With this update, you can enable logging to detect multi-line exceptions and reassemble them into a single log entry. - -To enable logging to detect multi-line exceptions and reassemble them into a single log entry, ensure that the `ClusterLogForwarder` Custom Resource (CR) contains a `detectMultilineErrors` field, with a value of `true`. - -[id="logging-5-7-known-issues"] -== Known Issues -None. - -[id="logging-5-7-0-bug-fixes"] -== Bug fixes -* Before this update, the `nodeSelector` attribute for the Gateway component of the LokiStack did not impact node scheduling. With this update, the `nodeSelector` attribute works as expected. (link:https://issues.redhat.com/browse/LOG-3713[LOG-3713]) - -[id="logging-5-7-0-CVEs"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2023-1999[CVE-2023-1999] -* link:https://access.redhat.com/security/cve/CVE-2023-28617[CVE-2023-28617] diff --git a/modules/logging-rn-5.7.1.adoc b/modules/logging-rn-5.7.1.adoc deleted file mode 100644 index 2dafeada478d..000000000000 --- a/modules/logging-rn-5.7.1.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// Module included in the following assemblies: -// -// logging-5-7-release-notes.adoc -// cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="logging-release-notes-5-7-1_{context}"] -= Logging 5.7.1 -This release includes: link:https://access.redhat.com/errata/RHBA-2023:3197[OpenShift Logging Bug Fix Release 5.7.1]. - -[id="logging-5-7-1-bug-fixes_{context}"] -== Bug fixes -* Before this update, the presence of numerous noisy messages within the Cluster Logging Operator pod logs caused reduced log readability, and increased difficulty in identifying important system events. With this update, the issue is resolved by significantly reducing the noisy messages within Cluster Logging Operator pod logs. (link:https://issues.redhat.com/browse/LOG-3482[LOG-3482]) - -* Before this update, the API server would reset the value for the `CollectorSpec.Type` field to `vector`, even when the custom resource used a different value. This update removes the default for the `CollectorSpec.Type` field to restore the previous behavior. (link:https://issues.redhat.com/browse/LOG-4086[LOG-4086]) - -* Before this update, a time range could not be selected in the {Product-Title} web console by clicking and dragging over the logs histogram. With this update, clicking and dragging can be used to successfully select a time range. (link:https://issues.redhat.com/browse/LOG-4501[LOG-4501]) - -* Before this update, clicking on the *Show Resources* link in the {Product-Title} web console did not produce any effect. With this update, the issue is resolved by fixing the functionality of the "Show Resources" link to toggle the display of resources for each log entry. (link:https://issues.redhat.com/browse/LOG-3218[LOG-3218]) - -[id="logging-5-7-1-CVEs_{context}"] -== CVEs -* link:https://access.redhat.com/security/cve/CVE-2023-21930[CVE-2023-21930] -* link:https://access.redhat.com/security/cve/CVE-2023-21937[CVE-2023-21937] -* link:https://access.redhat.com/security/cve/CVE-2023-21938[CVE-2023-21938] -* link:https://access.redhat.com/security/cve/CVE-2023-21939[CVE-2023-21939] -* link:https://access.redhat.com/security/cve/CVE-2023-21954[CVE-2023-21954] -* link:https://access.redhat.com/security/cve/CVE-2023-21967[CVE-2023-21967] -* link:https://access.redhat.com/security/cve/CVE-2023-21968[CVE-2023-21968] -* link:https://access.redhat.com/security/cve/CVE-2023-28617[CVE-2023-28617] diff --git a/modules/logging-rn-5.7.2.adoc b/modules/logging-rn-5.7.2.adoc deleted file mode 100644 index 0e96a4163257..000000000000 --- a/modules/logging-rn-5.7.2.adoc +++ /dev/null @@ -1,122 +0,0 @@ -// Module included in the following assemblies: -// cluster-logging-release-notes.adoc -:_content-type: REFERENCE -[id="cluster-logging-release-notes-5-7-2_{context}"] -= Logging 5.7.2 -This release includes link:https://access.redhat.com/errata/RHSA-2023:3495[OpenShift Logging Bug Fix Release 5.7.2]. - -[id="openshift-logging-5-7-2-bug-fixes_{context}"] -== Bug fixes -* Before this update, it was not possible to delete the `openshift-logging` namespace directly due to the presence of a pending finalizer. With this update, the finalizer is no longer utilized, enabling direct deletion of the namespace. (link:https://issues.redhat.com/browse/LOG-3316[LOG-3316]) - -* Before this update, the `run.sh` script would display an incorrect `chunk_limit_size` value if it was changed according to the {product-title} documentation. However, when setting the `chunk_limit_size` via the environment variable `$BUFFER_SIZE_LIMIT`, the script would show the correct value. With this update, the `run.sh` script now consistently displays the correct `chunk_limit_size` value in both scenarios. (link:https://issues.redhat.com/browse/LOG-3330[LOG-3330]) - -* Before this update, the {product-title} web console's logging view plugin did not allow for custom node placement or tolerations. This update adds the ability to define node placement and tolerations for the logging view plugin. (link:https://issues.redhat.com/browse/LOG-3749[LOG-3749]) - -* Before this update, the Cluster Logging Operator encountered an Unsupported Media Type exception when trying to send logs to DataDog via the Fluentd HTTP Plugin. With this update, users can seamlessly assign the content type for log forwarding by configuring the HTTP header Content-Type. The value provided is automatically assigned to the `content_type` parameter within the plugin, ensuring successful log transmission. (link:https://issues.redhat.com/browse/LOG-3784[LOG-3784]) - -* Before this update, when the `detectMultilineErrors` field was set to `true` in the `ClusterLogForwarder` custom resource (CR), PHP multi-line errors were recorded as separate log entries, causing the stack trace to be split across multiple messages. With this update, multi-line error detection for PHP is enabled, ensuring that the entire stack trace is included in a single log message. (link:https://issues.redhat.com/browse/LOG-3878[LOG-3878]) - -* Before this update, `ClusterLogForwarder` pipelines containing a space in their name caused the Vector collector pods to continuously crash. With this update, all spaces, dashes (-), and dots (.) in pipeline names are replaced with underscores (_). (link:https://issues.redhat.com/browse/LOG-3945[LOG-3945]) - -* Before this update, the `log_forwarder_output` metric did not include the `http` parameter. This update adds the missing parameter to the metric. (link:https://issues.redhat.com/browse/LOG-3997[LOG-3997]) - -* Before this update, Fluentd did not identify some multi-line JavaScript client exceptions when they ended with a colon. With this update, the Fluentd buffer name is prefixed with an underscore, resolving the issue. (link:https://issues.redhat.com/browse/LOG-4019[LOG-4019]) - -* Before this update, when configuring log forwarding to write to a Kafka output topic which matched a key in the payload, logs dropped due to an error. With this update, Fluentd's buffer name has been prefixed with an underscore, resolving the issue.(link:https://issues.redhat.com/browse/LOG-4027[LOG-4027]) - -* Before this update, the LokiStack gateway returned label values for namespaces without applying the access rights of a user. With this update, the LokiStack gateway applies permissions to label value requests, resolving the issue. (link:https://issues.redhat.com/browse/LOG-4049[LOG-4049]) - -* Before this update, the Cluster Logging Operator API required a certificate to be provided by a secret when the `tls.insecureSkipVerify` option was set to `true`. With this update, the Cluster Logging Operator API no longer requires a certificate to be provided by a secret in such cases. The following configuration has been added to the Operator's CR: -+ -[source,yaml] ----- -tls.verify_certificate = false -tls.verify_hostname = false ----- -+ -(link:https://issues.redhat.com/browse/LOG-3445[LOG-3445]) - -* Before this update, the LokiStack route configuration caused queries running longer than 30 seconds to timeout. With this update, the LokiStack global and per-tenant `queryTimeout` settings affect the route timeout settings, resolving the issue. (link:https://issues.redhat.com/browse/LOG-4052[LOG-4052]) - -* Before this update, a prior fix to remove defaulting of the `collection.type` resulted in the Operator no longer honoring the deprecated specs for resource, node selections, and tolerations. This update modifies the Operator behavior to always prefer the `collection.logs` spec over those of `collection`. This varies from previous behavior that allowed using both the preferred fields and deprecated fields but would ignore the deprecated fields when `collection.type` was populated. (link:https://issues.redhat.com/browse/LOG-4185[LOG-4185]) - -* Before this update, the Vector log collector did not generate TLS configuration for forwarding logs to multiple Kafka brokers if the broker URLs were not specified in the output. With this update, TLS configuration is generated appropriately for multiple brokers. (link:https://issues.redhat.com/browse/LOG-4163[LOG-4163]) - -* Before this update, the option to enable passphrase for log forwarding to Kafka was unavailable. This limitation presented a security risk as it could potentially expose sensitive information. With this update, users now have a seamless option to enable passphrase for log forwarding to Kafka. (link:https://issues.redhat.com/browse/LOG-3314[LOG-3314]) - -* Before this update, Vector log collector did not honor the `tlsSecurityProfile` settings for outgoing TLS connections. After this update, Vector handles TLS connection settings appropriately. (link:https://issues.redhat.com/browse/LOG-4011[LOG-4011]) - -* Before this update, not all available output types were included in the `log_forwarder_output_info` metrics. With this update, metrics contain Splunk and Google Cloud Logging data which was missing previously. (link:https://issues.redhat.com/browse/LOG-4098[LOG-4098]) - -* Before this update, when `follow_inodes` was set to `true`, the Fluentd collector could crash on file rotation. With this update, the `follow_inodes` setting does not crash the collector. (link:https://issues.redhat.com/browse/LOG-4151[LOG-4151]) - -* Before this update, the Fluentd collector could incorrectly close files that should be watched because of how those files were tracked. With this update, the tracking parameters have been corrected. (link:https://issues.redhat.com/browse/LOG-4149[LOG-4149]) - -* Before this update, forwarding logs with the Vector collector and naming a pipeline in the `ClusterLogForwarder` instance `audit`, `application` or `infrastructure` resulted in collector pods staying in the `CrashLoopBackOff` state with the following error in the collector log: -+ -[source,text] ----- -ERROR vector::cli: Configuration error. error=redefinition of table transforms.audit for key transforms.audit ----- -+ -After this update, pipeline names no longer clash with reserved input names, and pipelines can be named `audit`, `application` or `infrastructure`. (link:https://issues.redhat.com/browse/LOG-4218[LOG-4218]) - -* Before this update, when forwarding logs to a syslog destination with the Vector collector and setting the `addLogSource` flag to `true`, the following extra empty fields were added to the forwarded messages: `namespace_name=`, `container_name=`, and `pod_name=`. With this update, these fields are no longer added to journal logs. (link:https://issues.redhat.com/browse/[LOG-4219]) - -* Before this update, when a `structuredTypeKey` was not found, and a `structuredTypeName` was not specified, log messages were still parsed into structured object. With this update, parsing of logs is as expected. (link:https://issues.redhat.com/browse/LOG-4220[LOG-4220]) - - -[id="openshift-logging-5-7-2-CVEs_{context}"] -== CVEs - -* link:https://access.redhat.com/security/cve/CVE-2021-26341[CVE-2021-26341] -* link:https://access.redhat.com/security/cve/CVE-2021-33655[CVE-2021-33655] -* link:https://access.redhat.com/security/cve/CVE-2021-33656[CVE-2021-33656] -* link:https://access.redhat.com/security/cve/CVE-2022-1462[CVE-2022-1462] -* link:https://access.redhat.com/security/cve/CVE-2022-1679[CVE-2022-1679] -* link:https://access.redhat.com/security/cve/CVE-2022-1789[CVE-2022-1789] -* link:https://access.redhat.com/security/cve/CVE-2022-2196[CVE-2022-2196] -* link:https://access.redhat.com/security/cve/CVE-2022-2663[CVE-2022-2663] -* link:https://access.redhat.com/security/cve/CVE-2022-3028[CVE-2022-3028] -* link:https://access.redhat.com/security/cve/CVE-2022-3239[CVE-2022-3239] -* link:https://access.redhat.com/security/cve/CVE-2022-3522[CVE-2022-3522] -* link:https://access.redhat.com/security/cve/CVE-2022-3524[CVE-2022-3524] -* link:https://access.redhat.com/security/cve/CVE-2022-3564[CVE-2022-3564] -* link:https://access.redhat.com/security/cve/CVE-2022-3566[CVE-2022-3566] -* link:https://access.redhat.com/security/cve/CVE-2022-3567[CVE-2022-3567] -* link:https://access.redhat.com/security/cve/CVE-2022-3619[CVE-2022-3619] -* link:https://access.redhat.com/security/cve/CVE-2022-3623[CVE-2022-3623] -* link:https://access.redhat.com/security/cve/CVE-2022-3625[CVE-2022-3625] -* link:https://access.redhat.com/security/cve/CVE-2022-3627[CVE-2022-3627] -* link:https://access.redhat.com/security/cve/CVE-2022-3628[CVE-2022-3628] -* link:https://access.redhat.com/security/cve/CVE-2022-3707[CVE-2022-3707] -* link:https://access.redhat.com/security/cve/CVE-2022-3970[CVE-2022-3970] -* link:https://access.redhat.com/security/cve/CVE-2022-4129[CVE-2022-4129] -* link:https://access.redhat.com/security/cve/CVE-2022-20141[CVE-2022-20141] -* link:https://access.redhat.com/security/cve/CVE-2022-25147[CVE-2022-25147] -* link:https://access.redhat.com/security/cve/CVE-2022-25265[CVE-2022-25265] -* link:https://access.redhat.com/security/cve/CVE-2022-30594[CVE-2022-30594] -* link:https://access.redhat.com/security/cve/CVE-2022-36227[CVE-2022-36227] -* link:https://access.redhat.com/security/cve/CVE-2022-39188[CVE-2022-39188] -* link:https://access.redhat.com/security/cve/CVE-2022-39189[CVE-2022-39189] -* link:https://access.redhat.com/security/cve/CVE-2022-41218[CVE-2022-41218] -* link:https://access.redhat.com/security/cve/CVE-2022-41674[CVE-2022-41674] -* link:https://access.redhat.com/security/cve/CVE-2022-42703[CVE-2022-42703] -* link:https://access.redhat.com/security/cve/CVE-2022-42720[CVE-2022-42720] -* link:https://access.redhat.com/security/cve/CVE-2022-42721[CVE-2022-42721] -* link:https://access.redhat.com/security/cve/CVE-2022-42722[CVE-2022-42722] -* link:https://access.redhat.com/security/cve/CVE-2022-43750[CVE-2022-43750] -* link:https://access.redhat.com/security/cve/CVE-2022-47929[CVE-2022-47929] -* link:https://access.redhat.com/security/cve/CVE-2023-0394[CVE-2023-0394] -* link:https://access.redhat.com/security/cve/CVE-2023-0461[CVE-2023-0461] -* link:https://access.redhat.com/security/cve/CVE-2023-1195[CVE-2023-1195] -* link:https://access.redhat.com/security/cve/CVE-2023-1582[CVE-2023-1582] -* link:https://access.redhat.com/security/cve/CVE-2023-2491[CVE-2023-2491] -* link:https://access.redhat.com/security/cve/CVE-2023-22490[CVE-2023-22490] -* link:https://access.redhat.com/security/cve/CVE-2023-23454[CVE-2023-23454] -* link:https://access.redhat.com/security/cve/CVE-2023-23946[CVE-2023-23946] -* link:https://access.redhat.com/security/cve/CVE-2023-25652[CVE-2023-25652] -* link:https://access.redhat.com/security/cve/CVE-2023-25815[CVE-2023-25815] -* link:https://access.redhat.com/security/cve/CVE-2023-27535[CVE-2023-27535] -* link:https://access.redhat.com/security/cve/CVE-2023-29007[CVE-2023-29007] diff --git a/modules/logging-support-considerations.adoc b/modules/logging-support-considerations.adoc deleted file mode 100644 index bbb45e65e44b..000000000000 --- a/modules/logging-support-considerations.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// logging/v5_6/logging-5-6-architecture.adoc - -:_content-type: CONCEPT -[id="logging-support-considerations_{context}"] -= Support considerations for logging - -include::snippets/logging-compatibility-snip.adoc[] - -include::snippets/logging-supported-config-snip.adoc[] - -The following modifications are explicitly not supported: - -* *Deploying logging to namespaces not specified in the documentation.* -* *Installing custom Elasticsearch, Kibana, Fluentd, or Loki instances on {product-title}.* -* *Changes to the Kibana Custom Resource (CR) or Elasticsearch CR.* -* *Changes to secrets or config maps not specified in the documentation.* - -The {logging-title} is an opinionated collector and normalizer of application, infrastructure, and audit logs. It is intended to be used for forwarding logs to various supported systems. - -The {logging-title} is not: - -* A high scale log collection system -* Security Information and Event Monitoring (SIEM) compliant -* Historical or long term log retention or storage -* A guaranteed log sink -* Secure storage - audit logs are not stored by default diff --git a/modules/lvms-creating-logical-volume-manager-cluster.adoc b/modules/lvms-creating-logical-volume-manager-cluster.adoc deleted file mode 100644 index 0d2e586cf5d8..000000000000 --- a/modules/lvms-creating-logical-volume-manager-cluster.adoc +++ /dev/null @@ -1,206 +0,0 @@ -// Module included in the following assemblies: -// -// storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc - -:_content-type: PROCEDURE -[id="lvms-creating-lvms-cluster_{context}"] -= Creating a Logical Volume Manager cluster on a {sno} worker node - -You can configure a {sno} worker node as a Logical Volume Manager cluster. -On the control-plane {sno} node, {lvms} detects and uses the additional worker nodes when the new nodes become active in the cluster. - -[NOTE] -==== -When you create a Logical Volume Manager cluster, `StorageClass` and `LVMVolumeGroup` resources work together to provide dynamic provisioning of storage. -`StorageClass` CRs define the properties of the storage that you can dynamically provision. -`LVMVolumeGroup` is a specific type of persistent volume (PV) that is backed by an LVM Volume Group. -`LVMVolumeGroup` CRs provide the back-end storage for the persistent volumes that you create. -==== - -Perform the following procedure to create a Logical Volume Manager cluster on a {sno} worker node. - -[NOTE] -==== -You also can perform the same task by using the {product-title} web console. -==== - -.Prerequisites - -* You have installed the OpenShift CLI (`oc`). - -* You have logged in as a user with `cluster-admin` privileges. - -* You installed {lvms} in a {sno} cluster and have installed a worker node for use in the {sno} cluster. - -.Procedure - -. Create the `LVMCluster` custom resource (CR). - -.. Save the following YAML in the `lvmcluster.yaml` file: -+ -[source,yaml] ----- -apiVersion: lvm.topolvm.io/v1alpha1 -kind: LVMCluster -metadata: - name: lvmcluster -spec: - storage: - deviceClasses: <1> - - name: vg1 - default: true <2> - deviceSelector: - paths: - - /dev/disk/by-path/pci-0000:87:00.0-nvme-1 - - /dev/disk/by-path/pci-0000:88:00.0-nvme-1 - thinPoolConfig: - name: thin-pool-1 - sizePercent: 90 - overprovisionRatio: 10 - nodeSelector: <3> - nodeSelectorTerms: - - matchExpressions: - - key: app - operator: In - Values: - - test1 ----- -<1> To create multiple device storage classes in the cluster, create a YAML array under `deviceClasses` for each required storage class. -Configure the local device paths of the disks as an array of values in the `deviceSelector` field. -When configuring multiple device classes, you must specify the device path for each device. -<2> Mandatory: The `LVMCluster` resource must contain a single default storage class. Set `default: false` for secondary device storage classes. -If you are upgrading the `LVMCluster` resource from a previous version, you must specify a single default device class. -<3> Optional: To control what worker nodes the `LVMCluster` CR is applied to, specify a set of node selector labels. -The specified labels must be present on the node in order for the `LVMCluster` to be scheduled on that node. - -.. Create the `LVMCluster` CR: -+ -[source,terminal] ----- -$ oc create -f lvmcluster.yaml ----- -+ -.Example output -[source,terminal] ----- -lvmcluster/lvmcluster created ----- -+ -The `LVMCluster` resource creates the following system-managed CRs: -+ -`LVMVolumeGroup`:: Tracks individual volume groups across multiple nodes. -`LVMVolumeGroupNodeStatus`:: Tracks the status of the volume groups on a node. - -.Verification - -Verify that the `LVMCluster` resource has created the `StorageClass`, `LVMVolumeGroup`, and `LVMVolumeGroupNodeStatus` CRs. - -[IMPORTANT] -==== -`LVMVolumeGroup` and `LVMVolumeGroupNodeStatus` are managed by {lvms}. Do not edit these CRs directly. -==== - -. Check that the `LVMCluster` CR is in a `ready` state by running the following command: -+ -[source,terminal] ----- -$ oc get lvmclusters.lvm.topolvm.io -o jsonpath='{.items[*].status.deviceClassStatuses[*]}' ----- -+ -.Example output -[source,json] ----- -{ - "name": "vg1", - "nodeStatus": [ - { - "devices": [ - "/dev/nvme0n1", - "/dev/nvme1n1", - "/dev/nvme2n1" - ], - "node": "kube-node", - "status": "Ready" - } - ] -} ----- - -. Check that the storage class is created: -+ -[source,terminal] ----- -$ oc get storageclass ----- -+ -.Example output -[source,terminal] ----- -NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE -lvms-vg1 topolvm.io Delete WaitForFirstConsumer true 31m ----- - -. Check that the volume snapshot class is created: -+ -[source,terminal] ----- -$ oc get volumesnapshotclass ----- -+ -.Example output -[source,terminal] ----- -NAME DRIVER DELETIONPOLICY AGE -lvms-vg1 topolvm.io Delete 24h ----- - -. Check that the `LVMVolumeGroup` resource is created: -+ -[source,terminal] ----- -$ oc get lvmvolumegroup vg1 -o yaml ----- -+ -.Example output -[source,yaml] ----- -apiVersion: lvm.topolvm.io/v1alpha1 -kind: LVMVolumeGroup -metadata: - creationTimestamp: "2022-02-02T05:16:42Z" - generation: 1 - name: vg1 - namespace: lvm-operator-system - resourceVersion: "17242461" - uid: 88e8ad7d-1544-41fb-9a8e-12b1a66ab157 -spec: {} ----- - -. Check that the `LVMVolumeGroupNodeStatus` resource is created: -+ -[source,terminal] ----- -$ oc get lvmvolumegroupnodestatuses.lvm.topolvm.io kube-node -o yaml ----- -+ -.Example output -[source,yaml] ----- -apiVersion: lvm.topolvm.io/v1alpha1 -kind: LVMVolumeGroupNodeStatus -metadata: - creationTimestamp: "2022-02-02T05:17:59Z" - generation: 1 - name: kube-node - namespace: lvm-operator-system - resourceVersion: "17242882" - uid: 292de9bb-3a9b-4ee8-946a-9b587986dafd -spec: - nodeStatus: - - devices: - - /dev/nvme0n1 - - /dev/nvme1n1 - - /dev/nvme2n1 - name: vg1 - status: Ready ----- diff --git a/modules/lvms-creating-volume-clones-in-single-node-openshift.adoc b/modules/lvms-creating-volume-clones-in-single-node-openshift.adoc deleted file mode 100644 index 50ed30840998..000000000000 --- a/modules/lvms-creating-volume-clones-in-single-node-openshift.adoc +++ /dev/null @@ -1,52 +0,0 @@ -// Module included in the following assemblies: -// -// storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc - -:_content-type: PROCEDURE -[id="lvms-creating-volume-clones-in-single-node-openshift_{context}"] -= Creating volume clones in {sno} - -You create a clone of a volume to make a point-in-time copy of the data. -A persistent volume claim (PVC) cannot be cloned with a different size. - -[IMPORTANT] -==== -The cloned PVC has write access. -==== - -.Prerequisites - -* You ensured that the PVC is in `Bound` state. This is required for a consistent snapshot. -* You ensured that the `StorageClass` is the same as that of the source PVC. - -.Procedure - -. Identify the storage class of the source PVC. -. To create a volume clone, save the following YAML to a file with a name such as `lvms-vol-clone.yaml`: -+ -.Example YAML to clone a volume -[source,yaml] ----- -apiVersion: v1 -kind: PersistentVolumeClaim -Metadata: - name: lvm-block-1-clone -Spec: - storageClassName: lvms-vg1 - dataSource: - name: lvm-block-1 - kind: PersistentVolumeClaim - accessModes: - - ReadWriteOnce - volumeMode: Block - Resources: - Requests: - storage: 2Gi ----- - -. Create the policy in the same namespace as the source PVC by running the following command: -+ -[source,terminal] ----- -# oc create -f lvms-vol-clone.yaml ----- \ No newline at end of file diff --git a/modules/lvms-creating-volume-snapshots-in-single-node-openshift.adoc b/modules/lvms-creating-volume-snapshots-in-single-node-openshift.adoc deleted file mode 100644 index 5445e38d9af2..000000000000 --- a/modules/lvms-creating-volume-snapshots-in-single-node-openshift.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc - -:_content-type: PROCEDURE -[id="lvms-creating-volume-snapshots-in-single-node-openshift_{context}"] -= Creating volume snapshots in {sno} - -You can create volume snapshots based on the available capacity of the thin pool and the overprovisioning limits. -{lvms} creates a `VolumeSnapshotClass` with the `lvms-<deviceclass-name>` name. - -.Prerequisites - -* You ensured that the persistent volume claim (PVC) is in `Bound` state. This is required for a consistent snapshot. -* You stopped all the I/O to the PVC before taking the snapshot. - -.Procedure - -. Log in to the {sno} for which you need to run the `oc` command. -. Save the following YAML to a file with a name such as `lvms-vol-snapshot.yaml`. -+ -.Example YAML to create a volume snapshot -[source,yaml] ----- -apiVersion: snapshot.storage.k8s.io/v1 -kind: VolumeSnapshot -metadata: - name: lvm-block-1-snap -spec: - volumeSnapshotClassName: lvms-vg1 - source: - persistentVolumeClaimName: lvm-block-1 ----- - -. Create the snapshot by running the following command in the same namespace as the PVC: -+ -[source,terminal] ----- -# oc create -f lvms-vol-snapshot.yaml ----- - -A read-only copy of the PVC is created as a volume snapshot. \ No newline at end of file diff --git a/modules/lvms-deleting-cloned-volumes-in-single-node-openshift.adoc b/modules/lvms-deleting-cloned-volumes-in-single-node-openshift.adoc deleted file mode 100644 index 598f40752947..000000000000 --- a/modules/lvms-deleting-cloned-volumes-in-single-node-openshift.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module included in the following assemblies: -// -// storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc - -:_content-type: PROCEDURE -[id="lvms-deleting-cloned-volumes-in-single-node-openshift_{context}"] -= Deleting cloned volumes in {sno} - -You can delete cloned volumes. - -.Procedure - -* To delete the cloned volume, delete the cloned PVC by running the following command: -+ -[source,terminal] ----- -# oc delete pvc <clone_pvc_name> -n <namespace> ----- \ No newline at end of file diff --git a/modules/lvms-deleting-volume-snapshots-in-single-node-openshift.adoc b/modules/lvms-deleting-volume-snapshots-in-single-node-openshift.adoc deleted file mode 100644 index f553c6cf9738..000000000000 --- a/modules/lvms-deleting-volume-snapshots-in-single-node-openshift.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -// storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc - -:_content-type: PROCEDURE -[id="lvms-deleting-volume-snapshots-in-single-node-openshift_{context}"] -= Deleting volume snapshots in {sno} - -You can delete volume snapshots resources and persistent volume claims (PVCs). - -.Procedure - -. Delete the volume snapshot resource by running the following command: -+ -[source,terminal] ----- -# oc delete volumesnapshot <volume_snapshot_name> -n <namespace> ----- -+ -[NOTE] -==== -When you delete a persistent volume claim (PVC), the snapshots of the PVC are not deleted. -==== - -. To delete the restored volume snapshot, delete the PVC that was created to restore the volume snapshot by running the following command: -+ -[source,terminal] ----- -# oc delete pvc <pvc_name> -n <namespace> ----- \ No newline at end of file diff --git a/modules/lvms-download-log-files-and-diagnostics.adoc b/modules/lvms-download-log-files-and-diagnostics.adoc deleted file mode 100644 index 1768028dacff..000000000000 --- a/modules/lvms-download-log-files-and-diagnostics.adoc +++ /dev/null @@ -1,16 +0,0 @@ -// Module included in the following assemblies: -// -// storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc - -:_content-type: PROCEDURE -[id="lvms-dowloading-log-files-and-diagnostics_{context}"] -= Downloading log files and diagnostic information using must-gather - -When {lvms} is unable to automatically resolve a problem, use the must-gather tool to collect the log files and diagnostic information so that you or the Red Hat Support can review the problem and determine a solution. - -* Run the must-gather command from the client connected to {lvms} cluster by running the following command: -+ -[source,terminal,subs="attributes+"] ----- -$ oc adm must-gather --image=registry.redhat.io/lvms4/lvms-must-gather-rhel8:v{product-version} --dest-dir=<directory-name> ----- \ No newline at end of file diff --git a/modules/lvms-installing-logical-volume-manager-operator-disconnected-environment.adoc b/modules/lvms-installing-logical-volume-manager-operator-disconnected-environment.adoc deleted file mode 100644 index 4847691b2d23..000000000000 --- a/modules/lvms-installing-logical-volume-manager-operator-disconnected-environment.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// Module included in the following assemblies: -// -// storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc - -:_content-type: PROCEDURE -[id="lvms-installing-lvms-disconnected-env_{context}"] -= Installing {lvms} in a disconnected environment - -You can install {lvms} on {product-title} {product-version} in a disconnected environment. All sections referenced in this procedure are linked in _Additional resources_. - -.Prerequisites - -* You read the _About disconnected installation mirroring_ section. -* You have access to the {product-title} image repository. -* You created a mirror registry. - -.Procedure - -. Follow the steps in the _Creating the image set configuration_ procedure. To create an `ImageSetConfiguration` resource for {lvms}, you can use the following example YAML file: -+ -include::snippets/lvms-disconnected-ImageSetConfig.adoc[] - -. Follow the procedure in the _Mirroring an image set to a mirror registry_ section. - -. Follow the procedure in the _Configuring image registry repository mirroring_ section. \ No newline at end of file diff --git a/modules/lvms-installing-logical-volume-manager-operator-using-cli.adoc b/modules/lvms-installing-logical-volume-manager-operator-using-cli.adoc deleted file mode 100644 index 862bf6c959ef..000000000000 --- a/modules/lvms-installing-logical-volume-manager-operator-using-cli.adoc +++ /dev/null @@ -1,143 +0,0 @@ -// Module included in the following assemblies: -// -// * storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc - -:_content-type: PROCEDURE -[id="install-lvms-operator-cli_{context}"] -= Installing {lvms} with the CLI - -As a cluster administrator, you can install {lvms-first} by using the CLI. - -.Prerequisites - -* You have installed the OpenShift CLI (`oc`). - -* You have logged in as a user with `cluster-admin` privileges. - -.Procedure - -. Create a namespace for the {lvms} Operator. - -.. Save the following YAML in the `lvms-namespace.yaml` file: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Namespace -metadata: - labels: - openshift.io/cluster-monitoring: "true" - pod-security.kubernetes.io/enforce: privileged - pod-security.kubernetes.io/audit: privileged - pod-security.kubernetes.io/warn: privileged - name: openshift-storage ----- - -.. Create the `Namespace` CR: -+ -[source,terminal] ----- -$ oc create -f lvms-namespace.yaml ----- - -. Create an Operator group for the {lvms} Operator. - -.. Save the following YAML in the `lvms-operatorgroup.yaml` file: -+ -[source,yaml] ----- -apiVersion: operators.coreos.com/v1 -kind: OperatorGroup -metadata: - name: openshift-storage-operatorgroup - namespace: openshift-storage -spec: - targetNamespaces: - - openshift-storage ----- - -.. Create the `OperatorGroup` CR: -+ -[source,terminal] ----- -$ oc create -f lvms-operatorgroup.yaml ----- - -. Subscribe to the {lvms} Operator. - -.. Save the following YAML in the `lvms-sub.yaml` file: -+ -[source,yaml] ----- -apiVersion: operators.coreos.com/v1alpha1 -kind: Subscription -metadata: - name: lvms - namespace: openshift-storage -spec: - installPlanApproval: Automatic - name: lvms-operator - source: redhat-operators - sourceNamespace: openshift-marketplace ----- - -.. Create the `Subscription` CR: -+ -[source,terminal] ----- -$ oc create -f lvms-sub.yaml ----- - -. Create the `LVMCluster` resource: - -.. Save the following YAML in the `lvmcluster.yaml` file: -+ -[source,yaml] ----- -apiVersion: lvm.topolvm.io/v1alpha1 -kind: LVMCluster -metadata: - name: my-lvmcluster - namespace: openshift-storage -spec: - storage: - deviceClasses: - - name: vg1 - deviceSelector: - paths: - - /dev/disk/by-path/pci-0000:87:00.0-nvme-1 - - /dev/disk/by-path/pci-0000:88:00.0-nvme-1 - thinPoolConfig: - name: thin-pool-1 - sizePercent: 90 - overprovisionRatio: 10 - nodeSelector: - nodeSelectorTerms: - - matchExpressions: - - key: app - operator: In - values: - - test1 ----- - -.. Create the `LVMCluster` CR: -+ -[source,yaml] ----- -$ oc create -f lvmcluster.yaml ----- - - -. To verify that the Operator is installed, enter the following command: -+ -[source,terminal] ----- -$ oc get csv -n openshift-storage -o custom-columns=Name:.metadata.name,Phase:.status.phase ----- -+ -.Example output -[source,terminal] ----- -Name Phase -4.13.0-202301261535 Succeeded ----- diff --git a/modules/lvms-installing-logical-volume-manager-operator-using-openshift-web-console.adoc b/modules/lvms-installing-logical-volume-manager-operator-using-openshift-web-console.adoc deleted file mode 100644 index 7450e0c86024..000000000000 --- a/modules/lvms-installing-logical-volume-manager-operator-using-openshift-web-console.adoc +++ /dev/null @@ -1,38 +0,0 @@ -// Module included in the following assemblies: -// -// storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc - -:_content-type: PROCEDURE -[id="lvms-installing-lvms-with-web-console_{context}"] -= Installing {lvms} with the web console - -You can install {lvms-first} by using the Red Hat {product-title} OperatorHub. - -.Prerequisites - -* You have access to the {sno} cluster. -* You are using an account with the `cluster-admin` and Operator installation permissions. - -.Procedure - -. Log in to the {product-title} Web Console. -. Click *Operators -> OperatorHub*. -. Scroll or type `LVM Storage` into the *Filter by keyword* box to find {lvms}. -. Click *Install*. -. Set the following options on the *Install Operator* page: -.. *Update Channel* as *stable-{product-version}*. -.. *Installation Mode* as *A specific namespace on the cluster*. -.. *Installed Namespace* as *Operator recommended namespace openshift-storage*. - If the `openshift-storage` namespace does not exist, it is created during the operator installation. -.. *Approval Strategy* as *Automatic* or *Manual*. -+ -If you select *Automatic* updates, then the Operator Lifecycle Manager (OLM) automatically upgrades the running instance of your Operator without any intervention. -+ -If you select *Manual* updates, then the OLM creates an update request. -As a cluster administrator, you must then manually approve that update request to update the Operator to a newer version. - -. Click *Install*. - -.Verification steps - -* Verify that {lvms} shows a green tick, indicating successful installation. diff --git a/modules/lvms-installing-logical-volume-manager-operator-using-rhacm.adoc b/modules/lvms-installing-logical-volume-manager-operator-using-rhacm.adoc deleted file mode 100644 index 59b2a2ae563c..000000000000 --- a/modules/lvms-installing-logical-volume-manager-operator-using-rhacm.adoc +++ /dev/null @@ -1,181 +0,0 @@ -// Module included in the following assemblies: -// -// storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc - -:_content-type: PROCEDURE -[id="lvms-installing-odf-logical-volume-manager-operator-using-rhacm_{context}"] -= Installing {lvms} using {rh-rhacm} - -{lvms} is deployed on {sno} clusters using {rh-rhacm-first}. -You create a `Policy` object on {rh-rhacm} that deploys and configures the Operator when it is applied to managed clusters which match the selector specified in the `PlacementRule` resource. -The policy is also applied to clusters that are imported later and satisfy the placement rule. - -.Prerequisites -* Access to the {rh-rhacm} cluster using an account with `cluster-admin` and Operator installation permissions. -* Dedicated disks on each {sno} cluster to be used by {lvms}. -* The {sno} cluster needs to be managed by {rh-rhacm}, either imported or created. - -.Procedure - -. Log in to the {rh-rhacm} CLI using your {product-title} credentials. - -. Create a namespace in which you will create policies. -+ -[source,terminal] ----- -# oc create ns lvms-policy-ns ----- - -. To create a policy, save the following YAML to a file with a name such as `policy-lvms-operator.yaml`: -+ -[source,yaml] ----- -apiVersion: apps.open-cluster-management.io/v1 -kind: PlacementRule -metadata: - name: placement-install-lvms -spec: - clusterConditions: - - status: "True" - type: ManagedClusterConditionAvailable - clusterSelector: <1> - matchExpressions: - - key: mykey - operator: In - values: - - myvalue ---- -apiVersion: policy.open-cluster-management.io/v1 -kind: PlacementBinding -metadata: - name: binding-install-lvms -placementRef: - apiGroup: apps.open-cluster-management.io - kind: PlacementRule - name: placement-install-lvms -subjects: -- apiGroup: policy.open-cluster-management.io - kind: Policy - name: install-lvms ---- -apiVersion: policy.open-cluster-management.io/v1 -kind: Policy -metadata: - annotations: - policy.open-cluster-management.io/categories: CM Configuration Management - policy.open-cluster-management.io/controls: CM-2 Baseline Configuration - policy.open-cluster-management.io/standards: NIST SP 800-53 - name: install-lvms -spec: - disabled: false - remediationAction: enforce - policy-templates: - - objectDefinition: - apiVersion: policy.open-cluster-management.io/v1 - kind: ConfigurationPolicy - metadata: - name: install-lvms - spec: - object-templates: - - complianceType: musthave - objectDefinition: - apiVersion: v1 - kind: Namespace - metadata: - labels: - openshift.io/cluster-monitoring: "true" - pod-security.kubernetes.io/enforce: privileged - pod-security.kubernetes.io/audit: privileged - pod-security.kubernetes.io/warn: privileged - name: openshift-storage - - complianceType: musthave - objectDefinition: - apiVersion: operators.coreos.com/v1 - kind: OperatorGroup - metadata: - name: openshift-storage-operatorgroup - namespace: openshift-storage - spec: - targetNamespaces: - - openshift-storage - - complianceType: musthave - objectDefinition: - apiVersion: operators.coreos.com/v1alpha1 - kind: Subscription - metadata: - name: lvms - namespace: openshift-storage - spec: - installPlanApproval: Automatic - name: lvms-operator - source: redhat-operators - sourceNamespace: openshift-marketplace - remediationAction: enforce - severity: low - - objectDefinition: - apiVersion: policy.open-cluster-management.io/v1 - kind: ConfigurationPolicy - metadata: - name: lvms - spec: - object-templates: - - complianceType: musthave - objectDefinition: - apiVersion: lvm.topolvm.io/v1alpha1 - kind: LVMCluster - metadata: - name: my-lvmcluster - namespace: openshift-storage - spec: - storage: - deviceClasses: - - name: vg1 - default: true - deviceSelector: <2> - paths: - - /dev/disk/by-path/pci-0000:87:00.0-nvme-1 - - /dev/disk/by-path/pci-0000:88:00.0-nvme-1 - thinPoolConfig: - name: thin-pool-1 - sizePercent: 90 - overprovisionRatio: 10 - nodeSelector: <3> - nodeSelectorTerms: - - matchExpressions: - - key: app - operator: In - values: - - test1 - remediationAction: enforce - severity: low ----- -<1> Replace the key and value in `PlacementRule.spec.clusterSelector` to match the labels set on the {sno} clusters on which you want to install {lvms}. -<2> To control or restrict the volume group to your preferred disks, you can manually specify the local paths of the disks in the `deviceSelector` section of the `LVMCluster` YAML. -<3> To add a node filter, which is a subset of the additional worker nodes, specify the required filter in the `nodeSelector` section. {lvms} detects and uses the additional worker nodes when the new nodes show up. -+ --- -[IMPORTANT] -==== -This `nodeSelector` node filter matching is not the same as the pod label matching. -==== --- - -. Create the policy in the namespace by running the following command: -+ -[source,terminal] ----- -# oc create -f policy-lvms-operator.yaml -n lvms-policy-ns <1> ----- -<1> The `policy-lvms-operator.yaml` is the name of the file to which the policy is saved. - -+ -This creates a `Policy`, a `PlacementRule`, and a `PlacementBinding` object in the `lvms-policy-ns` namespace. -The policy creates a `Namespace`, `OperatorGroup`, `Subscription`, and `LVMCluster` resource on the clusters that match the placement rule. -This deploys the Operator on the {sno} clusters which match the selection criteria and configures it to set up the required resources to provision storage. -The Operator uses all the disks specified in the `LVMCluster` CR. -If no disks are specified, the Operator uses all the unused disks on the {sno} node. -+ -[IMPORTANT] -==== -After a device is added to the `LVMCluster`, it cannot be removed. -==== diff --git a/modules/lvms-monitoring-logical-volume-manager-operator.adoc b/modules/lvms-monitoring-logical-volume-manager-operator.adoc deleted file mode 100644 index b53405cf920e..000000000000 --- a/modules/lvms-monitoring-logical-volume-manager-operator.adoc +++ /dev/null @@ -1,48 +0,0 @@ -// Module included in the following assemblies: -// -// storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc - -:_content-type: PROCEDURE -[id="lvms-monitoring-using-lvms_{context}"] -= Monitoring {lvms} - -When {lvms} is installed using the {product-title} Web Console, you can monitor the cluster by using the *Block and File* dashboard in the console by default. -However, when you use {rh-rhacm} to install {lvms}, you need to configure {rh-rhacm} Observability to monitor all the {sno} clusters from one place. - -[id="lvms-monitoring-using-lvms-metrics_{context}"] -== Metrics - -You can monitor {lvms} by viewing the metrics exported by the Operator on the {rh-rhacm} dashboards and the alerts that are triggered. - -* Add the following `topolvm` metrics to the `allow` list: -+ -[source,terminal] ----- -topolvm_thinpool_data_percent -topolvm_thinpool_metadata_percent -topolvm_thinpool_size_bytes ----- - -[NOTE] -==== -Metrics are updated every 10 minutes or when there is a change in the thin pool, such as a new logical volume creation. -==== - -[id="lvms-monitoring-using-lvms-alerts_{context}"] -== Alerts - -When the thin pool and volume group are filled up, further operations fail and might lead to data loss. -{lvms} sends the following alerts about the usage of the thin pool and volume group when utilization crosses a certain value: - -.Alerts for Logical Volume Manager cluster in {rh-rhacm} -[[alerts_for_LVMCluster_in_{rh-rhacm}]] -[%autowidth,frame="topbot",options="header"] -|=== -|Alert| Description -|`VolumeGroupUsageAtThresholdNearFull`|This alert is triggered when both the volume group and thin pool utilization cross 75% on nodes. Data deletion or volume group expansion is required. -|`VolumeGroupUsageAtThresholdCritical`|This alert is triggered when both the volume group and thin pool utilization cross 85% on nodes. `VolumeGroup` is critically full. Data deletion or volume group expansion is required. -|`ThinPoolDataUsageAtThresholdNearFull`|This alert is triggered when the thin pool data utilization in the volume group crosses 75% on nodes. Data deletion or thin pool expansion is required. -|`ThinPoolDataUsageAtThresholdCritical`|This alert is triggered when the thin pool data utilization in the volume group crosses 85% on nodes. Data deletion or thin pool expansion is required. -|`ThinPoolMetaDataUsageAtThresholdNearFull`|This alert is triggered when the thin pool metadata utilization in the volume group crosses 75% on nodes. Data deletion or thin pool expansion is required. -|`ThinPoolMetaDataUsageAtThresholdCritical`|This alert is triggered when the thin pool metadata utilization in the volume group crosses 85% on nodes. Data deletion or thin pool expansion is required. -|=== \ No newline at end of file diff --git a/modules/lvms-provisioning-storage-using-logical-volume-manager-operator.adoc b/modules/lvms-provisioning-storage-using-logical-volume-manager-operator.adoc deleted file mode 100644 index ba8d6630f09b..000000000000 --- a/modules/lvms-provisioning-storage-using-logical-volume-manager-operator.adoc +++ /dev/null @@ -1,70 +0,0 @@ -// Module included in the following assemblies: -// -// storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc - -:_content-type: PROCEDURE -[id="lvms-provisioning-storage-using-lvms_{context}"] -= Provisioning storage using {lvms} - -You can provision persistent volume claims (PVCs) using the storage class that is created during the Operator installation. You can provision block and file PVCs, however, the storage is allocated only when a pod that uses the PVC is created. - -[NOTE] -==== -{lvms} provisions PVCs in units of 1 GiB. The requested storage is rounded up to the nearest GiB. -==== - -.Procedure - -. Identify the `StorageClass` that is created when {lvms} is deployed. -+ -The `StorageClass` name is in the format, `lvms-<device-class-name>`. -The `device-class-name` is the name of the device class that you provided in the `LVMCluster` of the `Policy` YAML. -For example, if the `deviceClass` is called `vg1`, then the `storageClass` name is `lvms-vg1`. -+ -The `volumeBindingMode` of the storage class is set to `WaitForFirstConsumer`. - -. To create a PVC where the application requires storage, save the following YAML to a file with a name such as `pvc.yaml`. -+ -.Example YAML to create a PVC -[source,yaml] ----- -# block pvc -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: lvm-block-1 - namespace: default -spec: - accessModes: - - ReadWriteOnce - volumeMode: Block - resources: - requests: - storage: 10Gi - storageClassName: lvms-vg1 ---- -# file pvc -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: lvm-file-1 - namespace: default -spec: - accessModes: - - ReadWriteOnce - volumeMode: Filesystem - resources: - requests: - storage: 10Gi - storageClassName: lvms-vg1 ----- - -. Create the PVC by running the following command: -+ -[source,terminal] ----- -# oc create -f pvc.yaml -ns <application_namespace> ----- - -+ -The created PVCs remain in `pending` state until you deploy the pods that use them. \ No newline at end of file diff --git a/modules/lvms-reference-file.adoc b/modules/lvms-reference-file.adoc deleted file mode 100644 index e2a62503b872..000000000000 --- a/modules/lvms-reference-file.adoc +++ /dev/null @@ -1,72 +0,0 @@ -// Module included in the following assemblies: -// -// storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc - -:_content-type: REFERENCE -[id="lvms-reference-file_{context}"] -= {lvms} reference YAML file - -The sample `LVMCluster` custom resource (CR) describes all the fields in the YAML file. - -.Example LVMCluster CR -[source,yaml] ----- -apiVersion: lvm.topolvm.io/v1alpha1 -kind: LVMCluster -metadata: - name: my-lvmcluster -spec: - tolerations: - - effect: NoSchedule - key: xyz - operator: Equal - value: "true" - storage: - deviceClasses: <1> - - name: vg1 <2> - default: true - nodeSelector: <3> - nodeSelectorTerms: <4> - - matchExpressions: - - key: mykey - operator: In - values: - - ssd - deviceSelector: <5> - paths: - - /dev/disk/by-path/pci-0000:87:00.0-nvme-1 - - /dev/disk/by-path/pci-0000:88:00.0-nvme-1 - - /dev/disk/by-path/pci-0000:89:00.0-nvme-1 - thinPoolConfig: <6> - name: thin-pool-1 <7> - sizePercent: 90 <8> - overprovisionRatio: 10 <9> -status: - deviceClassStatuses: <10> - - name: vg1 - nodeStatus: <11> - - devices: <12> - - /dev/nvme0n1 - - /dev/nvme1n1 - - /dev/nvme2n1 - node: my-node.example.com <13> - status: Ready <14> - ready: true <15> - state: Ready <16> ----- -<1> The LVM volume groups to be created on the cluster. Currently, only a single `deviceClass` is supported. -<2> The name of the LVM volume group to be created on the nodes. -<3> The nodes on which to create the LVM volume group. If the field is empty, all nodes are considered. -<4> A list of node selector requirements. -<5> A list of device paths which is used to create the LVM volume group. If this field is empty, all unused disks on the node will be used. -<6> The LVM thin pool configuration. -<7> The name of the thin pool to be created in the LVM volume group. -<8> The percentage of remaining space in the LVM volume group that should be used for creating the thin pool. -<9> The factor by which additional storage can be provisioned compared to the available storage in the thin pool. -<10> The status of the `deviceClass`. -<11> The status of the LVM volume group on each node. -<12> The list of devices used to create the LVM volume group. -<13> The node on which the `deviceClass` was created. -<14> The status of the LVM volume group on the node. -<15> This field is deprecated. -<16> The status of the `LVMCluster`. diff --git a/modules/lvms-restoring-volume-snapshots-in-single-node-openshift.adoc b/modules/lvms-restoring-volume-snapshots-in-single-node-openshift.adoc deleted file mode 100644 index e8fe9e93d04d..000000000000 --- a/modules/lvms-restoring-volume-snapshots-in-single-node-openshift.adoc +++ /dev/null @@ -1,53 +0,0 @@ -// Module included in the following assemblies: -// -// storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc - -:_content-type: PROCEDURE -[id="lvms-restoring-volume-snapshots-in-single-node-openshift_{context}"] -= Restoring volume snapshots in {sno} - -When you restore a volume snapshot, a new persistent volume claim (PVC) is created. -The restored PVC is independent of the volume snapshot and the source PVC. - -.Prerequisites - -* The storage class must be the same as that of the source PVC. -* The size of the requested PVC must be the same as that of the source volume of the snapshot. -+ -[IMPORTANT] -==== -A snapshot must be restored to a PVC of the same size as the source volume of the snapshot. If a larger PVC is required, you can resize the PVC after the snapshot is restored successfully. -==== - -.Procedure - -. Identify the storage class name of the source PVC and volume snapshot name. -. Save the following YAML to a file with a name such as `lvms-vol-restore.yaml` to restore the snapshot. -+ -.Example YAML to restore a PVC. -[source,yaml] ----- -kind: PersistentVolumeClaim -apiVersion: v1 -metadata: - name: lvm-block-1-restore -spec: - accessModes: - - ReadWriteOnce - volumeMode: Block - Resources: - Requests: - storage: 2Gi - storageClassName: lvms-vg1 - dataSource: - name: lvm-block-1-snap - kind: VolumeSnapshot - apiGroup: snapshot.storage.k8s.io ----- - -. Create the policy by running the following command in the same namespace as the snapshot: -+ -[source,terminal] ----- -# oc create -f lvms-vol-restore.yaml ----- \ No newline at end of file diff --git a/modules/lvms-scaling-storage-expand-pvc.adoc b/modules/lvms-scaling-storage-expand-pvc.adoc deleted file mode 100644 index e1a2bf689a12..000000000000 --- a/modules/lvms-scaling-storage-expand-pvc.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// Module included in the following assemblies: -// -// storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc - -:_content-type: PROCEDURE -[id="lvms-scaling-expand-pvc_{context}"] -= Expanding PVCs - -To leverage the new storage after adding additional capacity, you can expand existing persistent volume claims (PVCs) with LVM Storage. - -.Prerequisites - -* Dynamic provisioning is used. -* The controlling `StorageClass` object has `allowVolumeExpansion` set to `true`. - -.Procedure - -. Modify the `.spec.resources.requests.storage` field in the desired PVC resource to the new size by running the following command: -+ -[source,terminal] ----- -oc patch <pvc_name> -n <application_namespace> -p '{ "spec": { "resources": { "requests": { "storage": "<desired_size>" }}}}' ----- - -. Watch the `status.conditions` field of the PVC to see if the resize has completed. {product-title} adds the `Resizing` condition to the PVC during expansion, which is removed after the expansion completes. \ No newline at end of file diff --git a/modules/lvms-scaling-storage-of-single-node-open-concept.adoc b/modules/lvms-scaling-storage-of-single-node-open-concept.adoc deleted file mode 100644 index ea46fc692f85..000000000000 --- a/modules/lvms-scaling-storage-of-single-node-open-concept.adoc +++ /dev/null @@ -1,10 +0,0 @@ -// Module included in the following assemblies: -// -// storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc - -:_content-type: CONCEPT -[id="lvms-scaling-storage-of-single-node-openshift-cluster-con_{context}"] -= Scaling storage of {sno} clusters - -The {product-title} supports additional worker nodes for {sno} clusters on bare-metal user-provisioned infrastructure. -{lvms} detects and uses the new additional worker nodes when the nodes show up. \ No newline at end of file diff --git a/modules/lvms-scaling-storage-of-single-node-openshift-cluster-using-rhacm.adoc b/modules/lvms-scaling-storage-of-single-node-openshift-cluster-using-rhacm.adoc deleted file mode 100644 index 7cf76fdceb84..000000000000 --- a/modules/lvms-scaling-storage-of-single-node-openshift-cluster-using-rhacm.adoc +++ /dev/null @@ -1,161 +0,0 @@ -// Module included in the following assemblies: -// -// storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc - -:_content-type: PROCEDURE -[id="lvms-scaling-storage-of-single-node-openshift-cluster-using-rhacm_{context}"] -= Scaling up storage by adding capacity to your {sno} cluster using {rh-rhacm} - -You can scale the storage capacity of your configured worker nodes on a {sno} cluster using {rh-rhacm}. - -.Prerequisites - -* You have access to the {rh-rhacm} cluster using an account with `cluster-admin` privilages. -* You have additional unused disks on each {sno} cluster to be used by {lvms}. - -.Procedure - -. Log in to the {rh-rhacm} CLI using your {product-title} credentials. -. Find the disk that you want to add. The disk to be added needs to match with the device name and path of the existing disks. -. To add capacity to the {sno} cluster, edit the `deviceSelector` section of the existing policy YAML, for example, `policy-lvms-operator.yaml`. - -+ -[NOTE] -==== -In case the `deviceSelector` field is not included during the `LVMCluster` creation, it is not possible to add the `deviceSelector` section to the CR. -You need to remove the `LVMCluster` and then recreate from the new CR. -==== - -+ -[source,yaml] ----- -apiVersion: apps.open-cluster-management.io/v1 -kind: PlacementRule -metadata: - name: placement-install-lvms -spec: - clusterConditions: - - status: "True" - type: ManagedClusterConditionAvailable - clusterSelector: - matchExpressions: - - key: mykey - operator: In - values: - - myvalue ---- -apiVersion: policy.open-cluster-management.io/v1 -kind: PlacementBinding -metadata: - name: binding-install-lvms -placementRef: - apiGroup: apps.open-cluster-management.io - kind: PlacementRule - name: placement-install-lvms -subjects: -- apiGroup: policy.open-cluster-management.io - kind: Policy - name: install-lvms ---- -apiVersion: policy.open-cluster-management.io/v1 -kind: Policy -metadata: - annotations: - policy.open-cluster-management.io/categories: CM Configuration Management - policy.open-cluster-management.io/controls: CM-2 Baseline Configuration - policy.open-cluster-management.io/standards: NIST SP 800-53 - name: install-lvms -spec: - disabled: false - remediationAction: enforce - policy-templates: - - objectDefinition: - apiVersion: policy.open-cluster-management.io/v1 - kind: ConfigurationPolicy - metadata: - name: install-lvms - spec: - object-templates: - - complianceType: musthave - objectDefinition: - apiVersion: v1 - kind: Namespace - metadata: - labels: - openshift.io/cluster-monitoring: "true" - pod-security.kubernetes.io/enforce: privileged - pod-security.kubernetes.io/audit: privileged - pod-security.kubernetes.io/warn: privileged - name: openshift-storage - - complianceType: musthave - objectDefinition: - apiVersion: operators.coreos.com/v1 - kind: OperatorGroup - metadata: - name: openshift-storage-operatorgroup - namespace: openshift-storage - spec: - targetNamespaces: - - openshift-storage - - complianceType: musthave - objectDefinition: - apiVersion: operators.coreos.com/v1alpha1 - kind: Subscription - metadata: - name: lvms - namespace: openshift-storage - spec: - installPlanApproval: Automatic - name: lvms-operator - source: redhat-operators - sourceNamespace: openshift-marketplace - remediationAction: enforce - severity: low - - objectDefinition: - apiVersion: policy.open-cluster-management.io/v1 - kind: ConfigurationPolicy - metadata: - name: lvms - spec: - object-templates: - - complianceType: musthave - objectDefinition: - apiVersion: lvm.topolvm.io/v1alpha1 - kind: LVMCluster - metadata: - name: my-lvmcluster - namespace: openshift-storage - spec: - storage: - deviceClasses: - - name: vg1 - default: true - deviceSelector: - paths: - - /dev/disk/by-path/pci-0000:87:00.0-nvme-1 - - /dev/disk/by-path/pci-0000:88:00.0-nvme-1 - - /dev/disk/by-path/pci-0000:89:00.0-nvme-1 # new disk is added - thinPoolConfig: - name: thin-pool-1 - sizePercent: 90 - overprovisionRatio: 10 - nodeSelector: - nodeSelectorTerms: - - matchExpressions: - - key: app - operator: In - values: - - test1 - remediationAction: enforce - severity: low ----- - -. Edit the policy by running the following command: -+ -[source,terminal] ----- -# oc edit -f policy-lvms-operator.yaml -ns lvms-policy-ns <1> ----- -<1> The `policy-lvms-operator.yaml` is the name of the existing policy. -+ -This uses the new disk specified in the `LVMCluster` CR to provision storage. diff --git a/modules/lvms-scaling-storage-of-single-node-openshift-cluster.adoc b/modules/lvms-scaling-storage-of-single-node-openshift-cluster.adoc deleted file mode 100644 index 94cba88a5737..000000000000 --- a/modules/lvms-scaling-storage-of-single-node-openshift-cluster.adoc +++ /dev/null @@ -1,54 +0,0 @@ -// Module included in the following assemblies: -// -// storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc - -:_content-type: PROCEDURE -[id="lvms-scaling-storage-of-single-node-openshift-cluster_{context}"] -= Scaling up storage by adding capacity to your {sno} cluster - -To scale the storage capacity of your configured worker nodes on a {sno} cluster, you can increase the capacity by adding disks. - -.Prerequisites - -* You have additional unused disks on each {sno} cluster to be used by {lvms}. - -.Procedure - -. Log in to {product-title} console of the {sno} cluster. -. From the *Operators* -> *Installed Operators* page, click on the *LVM Storage Operator* in the `openshift-storage` namespace. -. Click on the *LVMCluster* tab to list the `LVMCluster` CR created on the cluster. -. Select *Edit LVMCluster* from the *Actions* drop-down menu. -. Click on the *YAML* tab. -. Edit the `LVMCluster` CR YAML to add the new device path in the `deviceSelector` section: - -+ -[NOTE] -==== -In case the `deviceSelector` field is not included during the `LVMCluster` creation, it is not possible to add the `deviceSelector` section to the CR. -You need to remove the `LVMCluster` and then create a new CR. -==== - -+ -[source,yaml] ----- -apiVersion: lvm.topolvm.io/v1alpha1 -kind: LVMCluster -metadata: - name: my-lvmcluster -spec: - storage: - deviceClasses: - - name: vg1 - default: true - deviceSelector: - paths: - - /dev/disk/by-path/pci-0000:87:00.0-nvme-1 <1> - - /dev/disk/by-path/pci-0000:88:00.0-nvme-1 - - /dev/disk/by-path/pci-0000:89:00.0-nvme-1 <2> - thinPoolConfig: - name: thin-pool-1 - sizePercent: 90 - overprovisionRatio: 10 ----- -<1> The path can be added by name (`/dev/sdb`) or by path. -<2> A new disk is added. diff --git a/modules/lvms-uninstalling-logical-volume-manager-operator-using-openshift-web-console.adoc b/modules/lvms-uninstalling-logical-volume-manager-operator-using-openshift-web-console.adoc deleted file mode 100644 index 18d6507bc792..000000000000 --- a/modules/lvms-uninstalling-logical-volume-manager-operator-using-openshift-web-console.adoc +++ /dev/null @@ -1,26 +0,0 @@ -// Module included in the following assemblies: -// -// storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc - -:_content-type: PROCEDURE -[id="lvms-unstalling-lvms-with-web-console_{context}"] -= Uninstalling {lvms} installed using the OpenShift Web Console - -You can unstall {lvms} using the Red Hat OpenShift Container Platform Web Console. - -.Prerequisites - -* You deleted all the applications on the clusters that are using the storage provisioned by {lvms}. -* You deleted the persistent volume claims (PVCs) and persistent volumes (PVs) provisioned using {lvms}. -* You deleted all volume snapshots provisioned by {lvms}. -* You verified that no logical volume resources exist by using the `oc get logicalvolume` command. -* You have access to the {sno} cluster using an account with `cluster-admin` permissions. - -.Procedure - -. From the *Operators* → *Installed Operators* page, scroll to *LVM Storage* or type `LVM Storage` into the *Filter by name* to find and click on it. -. Click on the *LVMCluster* tab. -. On the right-hand side of the *LVMCluster* page, select *Delete LVMCluster* from the *Actions* drop-down menu. -. Click on the *Details* tab. -. On the right-hand side of the *Operator Details* page, select *Uninstall Operator* from the *Actions* drop-down menu. -. Select *Remove*. {lvms} stops running and is completely removed. \ No newline at end of file diff --git a/modules/lvms-uninstalling-logical-volume-manager-operator-using-rhacm.adoc b/modules/lvms-uninstalling-logical-volume-manager-operator-using-rhacm.adoc deleted file mode 100644 index a1b84ea72735..000000000000 --- a/modules/lvms-uninstalling-logical-volume-manager-operator-using-rhacm.adoc +++ /dev/null @@ -1,314 +0,0 @@ -// Module included in the following assemblies: -// -// storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc - -:_content-type: PROCEDURE -[id="lvms-uninstalling-lvms-rhacm_{context}"] -= Uninstalling {lvms} installed using {rh-rhacm} - -To uninstall {lvms} that you installed using {rh-rhacm}, you need to delete the {rh-rhacm} policy that you created for deploying and configuring the Operator. - -When you delete the {rh-rhacm} policy, the resources that the policy has created are not removed. -You need to create additional policies to remove the resources. - -As the created resources are not removed when you delete the policy, you need to perform the following steps: - -. Remove all the Persistent volume claims (PVCs) and volume snapshots provisioned by {lvms}. -. Remove the `LVMCluster` resources to clean up Logical Volume Manager resources created on the disks. -. Create an additional policy to uninstall the Operator. - -.Prerequisites - -* Ensure that the following are deleted before deleting the policy: -** All the applications on the managed clusters that are using the storage provisioned by {lvms}. -** PVCs and persistent volumes (PVs) provisioned using {lvms}. -** All volume snapshots provisioned by {lvms}. -* Ensure you have access to the {rh-rhacm} cluster using an account with a `cluster-admin` role. - -.Procedure - -. In the OpenShift CLI (`oc`), delete the {rh-rhacm} policy that you created for deploying and configuring {lvms} on the hub cluster by using the following command: -+ -[source,terminal] ----- -# oc delete -f policy-lvms-operator.yaml -n lvms-policy-ns <1> ----- -<1> The `policy-lvms-operator.yaml` is the name of the file to which the policy was saved. - -. To create a policy for removing the `LVMCluster` resource, save the following YAML to a file with a name such as `lvms-remove-policy.yaml`. -This enables the Operator to clean up all Logical Volume Manager resources that it created on the cluster. -+ -[source,yaml] ----- -apiVersion: policy.open-cluster-management.io/v1 -kind: Policy -metadata: - name: policy-lvmcluster-delete - annotations: - policy.open-cluster-management.io/standards: NIST SP 800-53 - policy.open-cluster-management.io/categories: CM Configuration Management - policy.open-cluster-management.io/controls: CM-2 Baseline Configuration -spec: - remediationAction: enforce - disabled: false - policy-templates: - - objectDefinition: - apiVersion: policy.open-cluster-management.io/v1 - kind: ConfigurationPolicy - metadata: - name: policy-lvmcluster-removal - spec: - remediationAction: enforce <1> - severity: low - object-templates: - - complianceType: mustnothave - objectDefinition: - kind: LVMCluster - apiVersion: lvm.topolvm.io/v1alpha1 - metadata: - name: my-lvmcluster - namespace: openshift-storage <2> ---- -apiVersion: policy.open-cluster-management.io/v1 -kind: PlacementBinding -metadata: - name: binding-policy-lvmcluster-delete -placementRef: - apiGroup: apps.open-cluster-management.io - kind: PlacementRule - name: placement-policy-lvmcluster-delete -subjects: - - apiGroup: policy.open-cluster-management.io - kind: Policy - name: policy-lvmcluster-delete ---- -apiVersion: apps.open-cluster-management.io/v1 -kind: PlacementRule -metadata: - name: placement-policy-lvmcluster-delete -spec: - clusterConditions: - - status: "True" - type: ManagedClusterConditionAvailable - clusterSelector: - matchExpressions: - - key: mykey - operator: In - values: - - myvalue ----- -<1> The `policy-template` `spec.remediationAction` is overridden by the preceding parameter value for `spec.remediationAction`. -<2> This `namespace` field must have the `openshift-storage` value. - -. Set the value of the `PlacementRule.spec.clusterSelector` field to select the clusters from which to uninstall {lvms}. - -. Create the policy by running the following command: -+ -[source,terminal] ----- -# oc create -f lvms-remove-policy.yaml -n lvms-policy-ns ----- - -. To create a policy to check if the `LVMCluster` CR has been removed, save the following YAML to a file with a name such as `check-lvms-remove-policy.yaml`: -+ -[source,yaml] ----- -apiVersion: policy.open-cluster-management.io/v1 -kind: Policy -metadata: - name: policy-lvmcluster-inform - annotations: - policy.open-cluster-management.io/standards: NIST SP 800-53 - policy.open-cluster-management.io/categories: CM Configuration Management - policy.open-cluster-management.io/controls: CM-2 Baseline Configuration -spec: - remediationAction: inform - disabled: false - policy-templates: - - objectDefinition: - apiVersion: policy.open-cluster-management.io/v1 - kind: ConfigurationPolicy - metadata: - name: policy-lvmcluster-removal-inform - spec: - remediationAction: inform <1> - severity: low - object-templates: - - complianceType: mustnothave - objectDefinition: - kind: LVMCluster - apiVersion: lvm.topolvm.io/v1alpha1 - metadata: - name: my-lvmcluster - namespace: openshift-storage <2> ---- -apiVersion: policy.open-cluster-management.io/v1 -kind: PlacementBinding -metadata: - name: binding-policy-lvmcluster-check -placementRef: - apiGroup: apps.open-cluster-management.io - kind: PlacementRule - name: placement-policy-lvmcluster-check -subjects: - - apiGroup: policy.open-cluster-management.io - kind: Policy - name: policy-lvmcluster-inform ---- -apiVersion: apps.open-cluster-management.io/v1 -kind: PlacementRule -metadata: - name: placement-policy-lvmcluster-check -spec: - clusterConditions: - - status: "True" - type: ManagedClusterConditionAvailable - clusterSelector: - matchExpressions: - - key: mykey - operator: In - values: - - myvalue ----- -<1> The `policy-template` `spec.remediationAction` is overridden by the preceding parameter value for `spec.remediationAction`. -<2> The `namespace` field must have the `openshift-storage` value. - -. Create the policy by running the following command: -+ -[source,terminal] ----- -# oc create -f check-lvms-remove-policy.yaml -n lvms-policy-ns ----- - -. Check the policy status by running the following command: -+ -[source,terminal] ----- -# oc get policy -n lvms-policy-ns ----- - -+ -.Example output -[source,terminal] ----- -NAME REMEDIATION ACTION COMPLIANCE STATE AGE -policy-lvmcluster-delete enforce Compliant 15m -policy-lvmcluster-inform inform Compliant 15m ----- - -. After both the policies are compliant, save the following YAML to a file with a name such as `lvms-uninstall-policy.yaml` to create a policy to uninstall {lvms}. -+ -[source,yaml] ----- -apiVersion: apps.open-cluster-management.io/v1 -kind: PlacementRule -metadata: - name: placement-uninstall-lvms -spec: - clusterConditions: - - status: "True" - type: ManagedClusterConditionAvailable - clusterSelector: - matchExpressions: - - key: mykey - operator: In - values: - - myvalue ---- -apiVersion: policy.open-cluster-management.io/v1 -kind: PlacementBinding -metadata: - name: binding-uninstall-lvms -placementRef: - apiGroup: apps.open-cluster-management.io - kind: PlacementRule - name: placement-uninstall-lvms -subjects: -- apiGroup: policy.open-cluster-management.io - kind: Policy - name: uninstall-lvms ---- -apiVersion: policy.open-cluster-management.io/v1 -kind: Policy -metadata: - annotations: - policy.open-cluster-management.io/categories: CM Configuration Management - policy.open-cluster-management.io/controls: CM-2 Baseline Configuration - policy.open-cluster-management.io/standards: NIST SP 800-53 - name: uninstall-lvms -spec: - disabled: false - policy-templates: - - objectDefinition: - apiVersion: policy.open-cluster-management.io/v1 - kind: ConfigurationPolicy - metadata: - name: uninstall-lvms - spec: - object-templates: - - complianceType: mustnothave - objectDefinition: - apiVersion: v1 - kind: Namespace - metadata: - name: openshift-storage - - complianceType: mustnothave - objectDefinition: - apiVersion: operators.coreos.com/v1 - kind: OperatorGroup - metadata: - name: openshift-storage-operatorgroup - namespace: openshift-storage - spec: - targetNamespaces: - - openshift-storage - - complianceType: mustnothave - objectDefinition: - apiVersion: operators.coreos.com/v1alpha1 - kind: Subscription - metadata: - name: lvms-operator - namespace: openshift-storage - remediationAction: enforce - severity: low - - objectDefinition: - apiVersion: policy.open-cluster-management.io/v1 - kind: ConfigurationPolicy - metadata: - name: policy-remove-lvms-crds - spec: - object-templates: - - complianceType: mustnothave - objectDefinition: - apiVersion: apiextensions.k8s.io/v1 - kind: CustomResourceDefinition - metadata: - name: logicalvolumes.topolvm.io - - complianceType: mustnothave - objectDefinition: - apiVersion: apiextensions.k8s.io/v1 - kind: CustomResourceDefinition - metadata: - name: lvmclusters.lvm.topolvm.io - - complianceType: mustnothave - objectDefinition: - apiVersion: apiextensions.k8s.io/v1 - kind: CustomResourceDefinition - metadata: - name: lvmvolumegroupnodestatuses.lvm.topolvm.io - - complianceType: mustnothave - objectDefinition: - apiVersion: apiextensions.k8s.io/v1 - kind: CustomResourceDefinition - metadata: - name: lvmvolumegroups.lvm.topolvm.io - remediationAction: enforce - severity: high ----- - -. Create the policy by running the following command: -+ -[source,terminal] ----- -# oc create -f lvms-uninstall-policy.yaml -ns lvms-policy-ns ----- \ No newline at end of file diff --git a/modules/lvms-upgrading-lvms-on-sno.adoc b/modules/lvms-upgrading-lvms-on-sno.adoc deleted file mode 100644 index 105cffccb88a..000000000000 --- a/modules/lvms-upgrading-lvms-on-sno.adoc +++ /dev/null @@ -1,22 +0,0 @@ -// Module included in the following assemblies: -// -// storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc - -:_content-type: PROCEDURE -[id="lvms-upgrading-lvms-on-sno_{context}"] -= Upgrading {lvms} on {sno} clusters - -Currently, it is not possible to upgrade from {rh-storage} Logical Volume Manager Operator 4.11 to {lvms} 4.12 on {sno} clusters. - -[IMPORTANT] -==== -The data will not be preserved during this process. -==== - -.Procedure - -. Back up any data that you want to preserve on the persistent volume claims (PVCs). -. Delete all PVCs provisioned by the {rh-storage} Logical Volume Manager Operator and their pods. -. Reinstall {lvms} on {product-title} 4.12. -. Recreate the workloads. -. Copy the backup data to the PVCs created after upgrading to 4.12. \ No newline at end of file diff --git a/modules/lvms-volume-clones-in-single-node-openshift.adoc b/modules/lvms-volume-clones-in-single-node-openshift.adoc deleted file mode 100644 index 44bfc5d2241e..000000000000 --- a/modules/lvms-volume-clones-in-single-node-openshift.adoc +++ /dev/null @@ -1,9 +0,0 @@ -// Module included in the following assemblies: -// -// storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc - -:_content-type: CONCEPT -[id="lvms-volume-cloning-for-single-node-openshift-cluster_{context}"] -= Volume cloning for {sno} - -A clone is a duplicate of an existing storage volume that can be used like any standard volume. \ No newline at end of file diff --git a/modules/lvms-volume-snapshots-in-single-node-openshift.adoc b/modules/lvms-volume-snapshots-in-single-node-openshift.adoc deleted file mode 100644 index 188216ca7d89..000000000000 --- a/modules/lvms-volume-snapshots-in-single-node-openshift.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc - -:_content-type: CONCEPT -[id="lvms-volume-snapsot-for-sno_{context}"] -= Volume snapshots for {sno} - -You can take volume snapshots of persistent volumes (PVs) that are provisioned by {lvms}. -You can also create volume snapshots of the cloned volumes. Volume snapshots help you to do the following: - -* Back up your application data. -+ -[IMPORTANT] -==== -Volume snapshots are located on the same devices as the original data. To use the volume snapshots as backups, you need to move the snapshots to a secure location. You can use OpenShift API for Data Protection backup and restore solutions. -==== - -* Revert to a state at which the volume snapshot was taken. \ No newline at end of file diff --git a/modules/machine-adding-aws-compute-cloudformation.adoc b/modules/machine-adding-aws-compute-cloudformation.adoc deleted file mode 100644 index eeafd8a953ed..000000000000 --- a/modules/machine-adding-aws-compute-cloudformation.adoc +++ /dev/null @@ -1,48 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/user_infra/adding-aws-compute-user-infra.adoc - -:_content-type: PROCEDURE -[id="machine-adding-aws-compute-cloudformation_{context}"] -= Adding more compute machines to your AWS cluster by using CloudFormation templates - -You can add more compute machines to your {product-title} cluster on Amazon Web Services (AWS) that you created by using the sample CloudFormation templates. - -[IMPORTANT] -==== -The CloudFormation template creates a stack that represents one compute machine. You must create a stack for each compute machine. -==== - -[NOTE] -==== -If you do not use the provided CloudFormation template to create your compute nodes, you must review the provided information and manually create the infrastructure. If your cluster does not initialize correctly, you might have to contact Red Hat support with your installation logs. -==== - -.Prerequisites - -* You installed an {product-title} cluster by using CloudFormation templates and have access to the JSON file and CloudFormation template that you used to create the compute machines during cluster installation. -* You installed the AWS CLI. - -.Procedure - -. Create another compute stack. -.. Launch the template: -+ -[source,terminal] ----- -$ aws cloudformation create-stack --stack-name <name> \ <1> - --template-body file://<template>.yaml \ <2> - --parameters file://<parameters>.json <3> ----- -<1> `<name>` is the name for the CloudFormation stack, such as `cluster-workers`. You must provide the name of this stack if you remove the cluster. -<2> `<template>` is the relative path to and name of the CloudFormation template YAML file that you saved. -<3> `<parameters>` is the relative path to and name of the CloudFormation parameters JSON file. - -.. Confirm that the template components exist: -+ -[source,terminal] ----- -$ aws cloudformation describe-stacks --stack-name <name> ----- - -. Continue to create compute stacks until you have created enough compute machines for your cluster. diff --git a/modules/machine-api-operator.adoc b/modules/machine-api-operator.adoc deleted file mode 100644 index 58679ccd0cfc..000000000000 --- a/modules/machine-api-operator.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator-reference.adoc - -[id="machine-api-operator_{context}"] -= Machine API Operator - -[discrete] -== Purpose - -The Machine API Operator manages the lifecycle of specific purpose custom resource definitions (CRD), controllers, and RBAC objects that extend the Kubernetes API. This declares the desired state of machines in a cluster. - -[discrete] -== Project - -link:https://github.com/openshift/machine-api-operator[machine-api-operator] - -[discrete] -== CRDs - -* `MachineSet` -* `Machine` -* `MachineHealthCheck` diff --git a/modules/machine-api-overview.adoc b/modules/machine-api-overview.adoc deleted file mode 100644 index 132270d780de..000000000000 --- a/modules/machine-api-overview.adoc +++ /dev/null @@ -1,53 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/index.adoc -// * machine_management/creating_machinesets/creating-machineset-aws.adoc -// * machine_management/creating_machinesets/creating-machineset-azure.adoc -// * machine_management/creating_machinesets/creating-machineset-azure-stack-hub.adoc -// * machine_management/creating_machinesets/creating-machineset-gcp.adoc -// * machine_management/creating_machinesets/creating-machineset-osp.adoc -// * machine_management/creating_machinesets/creating-machineset-vsphere.adoc -// * windows_containers/creating_windows_machinesets/creating-windows-machineset-aws.adoc -// * windows_containers/creating_windows_machinesets/creating-windows-machineset-azure.adoc -// * windows_containers/creating_windows_machinesets/creating-windows-machineset-vsphere.adoc -// * windows_containers/creating_windows_machinesets/creating-windows-machineset-gcp.adoc - -:_content-type: CONCEPT -[id="machine-api-overview_{context}"] -= Machine API overview - -The Machine API is a combination of primary resources that are based on the upstream Cluster API project and custom {product-title} resources. - -For {product-title} {product-version} clusters, the Machine API performs all node host provisioning management actions after the cluster installation finishes. Because of this system, {product-title} {product-version} offers an elastic, dynamic provisioning method on top of public or private cloud infrastructure. - -The two primary resources are: - -Machines:: A fundamental unit that describes the host for a node. A machine has a `providerSpec` specification, which describes the types of compute nodes that are offered for different cloud platforms. For example, a machine type for a worker node on Amazon Web Services (AWS) might define a specific machine type and required metadata. - -Machine sets:: `MachineSet` resources are groups of compute machines. Compute machine sets are to compute machines as replica sets are to pods. If you need more compute machines or must scale them down, you change the `replicas` field on the `MachineSet` resource to meet your compute need. -+ -[WARNING] -==== -Control plane machines cannot be managed by compute machine sets. - -Control plane machine sets provide management capabilities for supported control plane machines that are similar to what compute machine sets provide for compute machines. - -For more information, see “Managing control plane machines". -==== - -The following custom resources add more capabilities to your cluster: - -Machine autoscaler:: The `MachineAutoscaler` resource automatically scales compute machines in a cloud. You can set the minimum and maximum scaling boundaries for nodes in a specified compute machine set, and the machine autoscaler maintains that range of nodes. -+ -The `MachineAutoscaler` object takes effect after a `ClusterAutoscaler` object exists. Both `ClusterAutoscaler` and `MachineAutoscaler` resources are made available by the `ClusterAutoscalerOperator` object. - -Cluster autoscaler:: This resource is based on the upstream cluster autoscaler project. In the {product-title} implementation, it is integrated with the Machine API by extending the compute machine set API. You can use the cluster autoscaler to manage your cluster in the following ways: -+ -* Set cluster-wide scaling limits for resources such as cores, nodes, memory, and GPU -* Set the priority so that the cluster prioritizes pods and new nodes are not brought online for less important pods -* Set the scaling policy so that you can scale up nodes but not scale them down - -Machine health check:: The `MachineHealthCheck` resource detects when a machine is unhealthy, deletes it, and, on supported platforms, makes a new machine. - -// Should this paragraph still be in here in 2022? Or at least should it be rephrased to avoid comparing to 3.11? -In {product-title} version 3.11, you could not roll out a multi-zone architecture easily because the cluster did not manage machine provisioning. Beginning with {product-title} version 4.1, this process is easier. Each compute machine set is scoped to a single zone, so the installation program sends out compute machine sets across availability zones on your behalf. And then because your compute is dynamic, and in the face of a zone failure, you always have a zone for when you must rebalance your machines. In global Azure regions that do not have multiple availability zones, you can use availability sets to ensure high availability. The autoscaler provides best-effort balancing over the life of a cluster. diff --git a/modules/machine-autoscaler-about.adoc b/modules/machine-autoscaler-about.adoc deleted file mode 100644 index 8799cfb61101..000000000000 --- a/modules/machine-autoscaler-about.adoc +++ /dev/null @@ -1,15 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/applying-autoscaling.adoc -// * post_installation_configuration/cluster-tasks.adoc - -:_content-type: CONCEPT -[id="machine-autoscaler-about_{context}"] -= About the machine autoscaler - -The machine autoscaler adjusts the number of Machines in the compute machine sets that you deploy in an {product-title} cluster. You can scale both the default `worker` compute machine set and any other compute machine sets that you create. The machine autoscaler makes more Machines when the cluster runs out of resources to support more deployments. Any changes to the values in `MachineAutoscaler` resources, such as the minimum or maximum number of instances, are immediately applied to the compute machine set they target. - -[IMPORTANT] -==== -You must deploy a machine autoscaler for the cluster autoscaler to scale your machines. The cluster autoscaler uses the annotations on compute machine sets that the machine autoscaler sets to determine the resources that it can scale. If you define a cluster autoscaler without also defining machine autoscalers, the cluster autoscaler will never scale your cluster. -==== diff --git a/modules/machine-autoscaler-cr.adoc b/modules/machine-autoscaler-cr.adoc deleted file mode 100644 index b78861a2c98e..000000000000 --- a/modules/machine-autoscaler-cr.adoc +++ /dev/null @@ -1,40 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/applying-autoscaling.adoc -// * post_installation_configuration/cluster-tasks.adoc - -:_content-type: REFERENCE -[id="machine-autoscaler-cr_{context}"] -= MachineAutoscaler resource definition - -This `MachineAutoscaler` resource definition shows the parameters and sample values for the machine autoscaler. - - -[source,yaml] ----- -apiVersion: "autoscaling.openshift.io/v1beta1" -kind: "MachineAutoscaler" -metadata: - name: "worker-us-east-1a" <1> - namespace: "openshift-machine-api" -spec: - minReplicas: 1 <2> - maxReplicas: 12 <3> - scaleTargetRef: <4> - apiVersion: machine.openshift.io/v1beta1 - kind: MachineSet <5> - name: worker-us-east-1a <6> ----- -<1> Specify the machine autoscaler name. To make it easier to identify which compute machine set this machine autoscaler scales, specify or include the name of the compute machine set to scale. The compute machine set name takes the following form: `<clusterid>-<machineset>-<region>`. -<2> Specify the minimum number machines of the specified type that must remain in the specified zone after the cluster autoscaler initiates cluster scaling. If running in AWS, GCP, Azure, {rh-openstack}, or vSphere, this value can be set to `0`. For other providers, do not set this value to `0`. -+ -You can save on costs by setting this value to `0` for use cases such as running expensive or limited-usage hardware that is used for specialized workloads, or by scaling a compute machine set with extra large machines. The cluster autoscaler scales the compute machine set down to zero if the machines are not in use. -+ -[IMPORTANT] -==== -Do not set the `spec.minReplicas` value to `0` for the three compute machine sets that are created during the {product-title} installation process for an installer provisioned infrastructure. -==== -<3> Specify the maximum number machines of the specified type that the cluster autoscaler can deploy in the specified zone after it initiates cluster scaling. Ensure that the `maxNodesTotal` value in the `ClusterAutoscaler` resource definition is large enough to allow the machine autoscaler to deploy this number of machines. -<4> In this section, provide values that describe the existing compute machine set to scale. -<5> The `kind` parameter value is always `MachineSet`. -<6> The `name` value must match the name of an existing compute machine set, as shown in the `metadata.name` parameter value. diff --git a/modules/machine-config-daemon-metrics.adoc b/modules/machine-config-daemon-metrics.adoc deleted file mode 100644 index 890fd0d4181b..000000000000 --- a/modules/machine-config-daemon-metrics.adoc +++ /dev/null @@ -1,90 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes/nodes-nodes-machine-config-daemon-metrics.adoc - -[id="machine-config-daemon-metrics_{context}"] -= Machine Config Daemon metrics - -Beginning with {product-title} 4.3, the Machine Config Daemon provides a set of metrics. These metrics can be accessed using the Prometheus Cluster Monitoring stack. - -The following table describes this set of metrics. Some entries contain commands for getting specific logs. Hpwever, the most comprehensive set of logs is available using the `oc adm must-gather` command. - -[NOTE] -==== -Metrics marked with `+*+` in the *Name* and *Description* columns represent serious errors that might cause performance problems. Such problems might prevent updates and upgrades from proceeding. -==== - -[cols="1,1,2,2", options="header"] -.MCO metrics -|=== -|Name -|Format -|Description -|Notes - -ifndef::openshift-origin[] -|`mcd_host_os_and_version` -|`[]string{"os", "version"}` -|Shows the OS that MCD is running on, such as RHCOS or RHEL. In case of RHCOS, the version is provided. -| -endif::openshift-origin[] - -ifdef::openshift-origin[] -|`mcd_host_os_and_version` -|`[]string{"os", "version"}` -|Shows the OS that MCD is running on, such as Fedora. -| -endif::openshift-origin[] - -|`mcd_drain_err*` -| -|Logs errors received during failed drain. * -|While drains might need multiple tries to succeed, terminal failed drains prevent updates from proceeding. The `drain_time` metric, which shows how much time the drain took, might help with troubleshooting. - -For further investigation, see the logs by running: - -`$ oc logs -f -n openshift-machine-config-operator machine-config-daemon-<hash> -c machine-config-daemon` - -|`mcd_pivot_err*` -|`[]string{"err", "node", "pivot_target"}` -|Logs errors encountered during pivot. * -|Pivot errors might prevent OS upgrades from proceeding. - -For further investigation, run this command to see the logs from the `machine-config-daemon` container: - -`$ oc logs -f -n openshift-machine-config-operator machine-config-daemon-<hash> -c machine-config-daemon` - -|`mcd_state` -|`[]string{"state", "reason"}` -|State of Machine Config Daemon for the indicated node. Possible states are "Done", "Working", and "Degraded". In case of "Degraded", the reason is included. -|For further investigation, see the logs by running: - -`$ oc logs -f -n openshift-machine-config-operator machine-config-daemon-<hash> -c machine-config-daemon` - -|`mcd_kubelet_state*` -| -|Logs kubelet health failures. * -|This is expected to be empty, with failure count of 0. If failure count exceeds 2, the error indicating threshold is exceeded. This indicates a possible issue with the health of the kubelet. - -For further investigation, run this command to access the node and see all its logs: - -`$ oc debug node/<node> -- chroot /host journalctl -u kubelet` - -|`mcd_reboot_err*` -|`[]string{"message", "err", "node"}` -|Logs the failed reboots and the corresponding errors. * -|This is expected to be empty, which indicates a successful reboot. - -For further investigation, see the logs by running: - -`$ oc logs -f -n openshift-machine-config-operator machine-config-daemon-<hash> -c machine-config-daemon` - -|`mcd_update_state` -|`[]string{"config", "err"}` -|Logs success or failure of configuration updates and the corresponding errors. -|The expected value is `rendered-master/rendered-worker-XXXX`. If the update fails, an error is present. - -For further investigation, see the logs by running: - -`$ oc logs -f -n openshift-machine-config-operator machine-config-daemon-<hash> -c machine-config-daemon` -|=== diff --git a/modules/machine-config-drift-detection.adoc b/modules/machine-config-drift-detection.adoc deleted file mode 100644 index 34d7ad6e846e..000000000000 --- a/modules/machine-config-drift-detection.adoc +++ /dev/null @@ -1,106 +0,0 @@ -// Module included in the following assemblies: -// -// * post_installation_configuration/machine-configuration-tasks.adoc - -:_content-type: CONCEPT -[id="machine-config-drift-detection_{context}"] -= Understanding configuration drift detection - -There might be situations when the on-disk state of a node differs from what is configured in the machine config. This is known as _configuration drift_. For example, a cluster admin might manually modify a file, a systemd unit file, or a file permission that was configured through a machine config. This causes configuration drift. Configuration drift can cause problems between nodes in a Machine Config Pool or when the machine configs are updated. - -The Machine Config Operator (MCO) uses the Machine Config Daemon (MCD) to check nodes for configuration drift on a regular basis. If detected, the MCO sets the node and the machine config pool (MCP) to `Degraded` and reports the error. A degraded node is online and operational, but, it cannot be updated. - -The MCD performs configuration drift detection upon each of the following conditions: - -* When a node boots. -* After any of the files (Ignition files and systemd drop-in units) specified in the machine config are modified outside of the machine config. -* Before a new machine config is applied. -+ -[NOTE] -==== -If you apply a new machine config to the nodes, the MCD temporarily shuts down configuration drift detection. This shutdown is needed because the new machine config necessarily differs from the machine config on the nodes. After the new machine config is applied, the MCD restarts detecting configuration drift using the new machine config. -==== - -When performing configuration drift detection, the MCD validates that the file contents and permissions fully match what the currently-applied machine config specifies. Typically, the MCD detects configuration drift in less than a second after the detection is triggered. - -If the MCD detects configuration drift, the MCD performs the following tasks: - -* Emits an error to the console logs -* Emits a Kubernetes event -* Stops further detection on the node -* Sets the node and MCP to `degraded` - -You can check if you have a degraded node by listing the MCPs: - -[source,terminal] ----- -$ oc get mcp worker ----- - -If you have a degraded MCP, the `DEGRADEDMACHINECOUNT` field is non-zero, similar to the following output: - -.Example output -[source,terminal] ----- -NAME CONFIG UPDATED UPDATING DEGRADED MACHINECOUNT READYMACHINECOUNT UPDATEDMACHINECOUNT DEGRADEDMACHINECOUNT AGE -worker rendered-worker-404caf3180818d8ac1f50c32f14b57c3 False True True 2 1 1 1 5h51m ----- - -You can determine if the problem is caused by configuration drift by examining the machine config pool: - -[source,terminal] ----- -$ oc describe mcp worker ----- - -.Example output -[source,terminal] ----- - ... - Last Transition Time: 2021-12-20T18:54:00Z - Message: Node ci-ln-j4h8nkb-72292-pxqxz-worker-a-fjks4 is reporting: "content mismatch for file \"/etc/mco-test-file\"" <1> - Reason: 1 nodes are reporting degraded status on sync - Status: True - Type: NodeDegraded <2> - ... ----- -<1> This message shows that a node's `/etc/mco-test-file` file, which was added by the machine config, has changed outside of the machine config. -<2> The state of the node is `NodeDegraded`. - -Or, if you know which node is degraded, examine that node: - -[source,terminal] ----- -$ oc describe node/ci-ln-j4h8nkb-72292-pxqxz-worker-a-fjks4 ----- - -.Example output -[source,terminal] ----- - ... - -Annotations: cloud.network.openshift.io/egress-ipconfig: [{"interface":"nic0","ifaddr":{"ipv4":"10.0.128.0/17"},"capacity":{"ip":10}}] - csi.volume.kubernetes.io/nodeid: - {"pd.csi.storage.gke.io":"projects/openshift-gce-devel-ci/zones/us-central1-a/instances/ci-ln-j4h8nkb-72292-pxqxz-worker-a-fjks4"} - machine.openshift.io/machine: openshift-machine-api/ci-ln-j4h8nkb-72292-pxqxz-worker-a-fjks4 - machineconfiguration.openshift.io/controlPlaneTopology: HighlyAvailable - machineconfiguration.openshift.io/currentConfig: rendered-worker-67bd55d0b02b0f659aef33680693a9f9 - machineconfiguration.openshift.io/desiredConfig: rendered-worker-67bd55d0b02b0f659aef33680693a9f9 - machineconfiguration.openshift.io/reason: content mismatch for file "/etc/mco-test-file" <1> - machineconfiguration.openshift.io/state: Degraded <2> - ... ----- -<1> The error message indicating that configuration drift was detected between the node and the listed machine config. Here the error message indicates that the contents of the `/etc/mco-test-file`, which was added by the machine config, has changed outside of the machine config. -<2> The state of the node is `Degraded`. - -You can correct configuration drift and return the node to the `Ready` state by performing one of the following remediations: - -* Ensure that the contents and file permissions of the files on the node match what is configured in the machine config. You can manually rewrite the file -contents or change the file permissions. -* Generate a link:https://access.redhat.com/solutions/5414371[force file] on the degraded node. The force file causes the MCD to bypass the usual configuration drift detection and reapplies the current machine config. -+ -[NOTE] -==== -Generating a force file on a node causes that node to reboot. -==== - diff --git a/modules/machine-config-operator.adoc b/modules/machine-config-operator.adoc deleted file mode 100644 index 26286845323d..000000000000 --- a/modules/machine-config-operator.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator-reference.adoc -// * post_installation_configuration/machine-configuration-tasks.adoc - -[id="machine-config-operator_{context}"] -= Machine Config Operator - -[discrete] -== Purpose - -The Machine Config Operator manages and applies configuration and updates of the base operating system and container runtime, including everything between the kernel and kubelet. - -There are four components: - -* `machine-config-server`: Provides Ignition configuration to new machines joining the cluster. -* `machine-config-controller`: Coordinates the upgrade of machines to the desired configurations defined by a `MachineConfig` object. Options are provided to control the upgrade for sets of machines individually. -* `machine-config-daemon`: Applies new machine configuration during update. Validates and verifies the state of the machine to the requested machine configuration. -* `machine-config`: Provides a complete source of machine configuration at installation, first start up, and updates for a machine. - -include::snippets/mcs-endpoint-limitation.adoc[] - -.Additional resources - -* xref:../networking/openshift_sdn/about-openshift-sdn.adoc#about-openshift-sdn[About the OpenShift SDN network plugin]. - -[discrete] -== Project - -link:https://github.com/openshift/machine-config-operator[openshift-machine-config-operator] diff --git a/modules/machine-config-overview.adoc b/modules/machine-config-overview.adoc deleted file mode 100644 index 948baf52b32e..000000000000 --- a/modules/machine-config-overview.adoc +++ /dev/null @@ -1,77 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator-reference.adoc -// * post_installation_configuration/machine-configuration-tasks.adoc - -:_content-type: CONCEPT -[id="machine-config-overview-{context}"] -= Machine config overview - -The Machine Config Operator (MCO) manages updates to systemd, CRI-O and Kubelet, the kernel, Network Manager and other system features. It also offers a `MachineConfig` CRD that can write configuration files onto the host (see link:https://github.com/openshift/machine-config-operator#machine-config-operator[machine-config-operator]). Understanding what MCO does and how it interacts with other components is critical to making advanced, system-level changes to an {product-title} cluster. Here are some things you should know about MCO, machine configs, and how they are used: - -* A machine config can make a specific change to a file or service on the operating system of each system representing a pool of {product-title} nodes. - -* MCO applies changes to operating systems in pools of machines. All {product-title} clusters start with worker and control plane node pools. By adding more role labels, you can configure custom pools of nodes. For example, you can set up a custom pool of worker nodes that includes particular hardware features needed by an application. However, examples in this section focus on changes to the default pool types. -+ -[IMPORTANT] -==== -A node can have multiple labels applied that indicate its type, such as `master` or `worker`, however it can be a member of only a *single* machine config pool. -==== - -* After a machine config change, the MCO updates the affected nodes alphabetically by zone, based on the `topology.kubernetes.io/zone` label. If a zone has more than one node, the oldest nodes are updated first. For nodes that do not use zones, such as in bare metal deployments, the nodes are upgraded by age, with the oldest nodes updated first. The MCO updates the number of nodes as specified by the `maxUnavailable` field on the machine configuration pool at a time. - -* Some machine configuration must be in place before {product-title} is installed to disk. In most cases, this can be accomplished by creating -a machine config that is injected directly into the {product-title} installer process, instead of running as a post-installation machine config. In other cases, you might need to do bare metal installation where you pass kernel arguments at {product-title} installer startup, to do such things as setting per-node individual IP addresses or advanced disk partitioning. - -* MCO manages items that are set in machine configs. Manual changes you do to your systems will not be overwritten by MCO, unless MCO is explicitly told to manage a conflicting file. In other words, MCO only makes specific updates you request, it does not claim control over the whole node. - -* Manual changes to nodes are strongly discouraged. If you need to decommission a node and start a new one, those direct changes would be lost. - -* MCO is only supported for writing to files in `/etc` and `/var` directories, although there are symbolic links to some directories that can be writeable by being symbolically linked to one of those areas. The `/opt` and `/usr/local` directories are examples. - -* Ignition is the configuration format used in MachineConfigs. See the link:https://coreos.github.io/ignition/configuration-v3_2/[Ignition Configuration Specification v3.2.0] for details. - -* Although Ignition config settings can be delivered directly at {product-title} installation time, and are formatted in the same way that MCO delivers Ignition configs, MCO has no way of seeing what those original Ignition configs are. Therefore, you should wrap Ignition config settings into a machine config before deploying them. - -* When a file managed by MCO changes outside of MCO, the Machine Config Daemon (MCD) sets the node as `degraded`. It will not overwrite the -offending file, however, and should continue to operate in a `degraded` state. - -* A key reason for using a machine config is that it will be applied when you spin up new nodes for a pool in your {product-title} cluster. The `machine-api-operator` provisions a new machine and MCO configures it. - -MCO uses link:https://coreos.github.io/ignition/[Ignition] as the configuration format. {product-title} 4.6 moved from Ignition config specification version 2 to version 3. - -== What can you change with machine configs? -The kinds of components that MCO can change include: - -* **config**: Create Ignition config objects (see the link:https://coreos.github.io/ignition/configuration-v3_2/[Ignition configuration specification]) to do things like modify files, systemd services, and other features on {product-title} machines, including: -- **Configuration files**: Create or overwrite files in the `/var` or `/etc` directory. -- **systemd units**: Create and set the status of a systemd service or add to an existing systemd service by dropping in additional settings. -- **users and groups**: Change SSH keys in the passwd section post-installation. -+ -[IMPORTANT] -==== -* Changing SSH keys by using a machine config is supported only for the `core` user. -* Adding new users by using a machine config is not supported. -==== -* **kernelArguments**: Add arguments to the kernel command line when {product-title} nodes boot. -* **kernelType**: Optionally identify a non-standard kernel to use instead of the standard kernel. Use `realtime` to use the RT kernel (for RAN). This is only supported on select platforms. -ifndef::openshift-origin[] -* **fips**: Enable link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html-single/security_hardening/index#using-the-system-wide-cryptographic-policies_security-hardening[FIPS] mode. FIPS should be set at installation-time setting and not a post-installation procedure. - -[IMPORTANT] -==== -The use of FIPS Validated / Modules in Process cryptographic libraries is only supported on {product-title} deployments on `x86_64`, `ppc64le`, and `s390x` architectures. -==== -endif::openshift-origin[] -* **extensions**: Extend {op-system} features by adding selected pre-packaged software. For this feature, available extensions include link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html-single/security_hardening/index#protecting-systems-against-intrusive-usb-devices_security-hardening[usbguard] and kernel modules. -* **Custom resources (for `ContainerRuntime` and `Kubelet`)**: Outside of machine configs, MCO manages two special custom resources for modifying CRI-O container runtime settings (`ContainerRuntime` CR) and the Kubelet service (`Kubelet` CR). - -The MCO is not the only Operator that can change operating system components on {product-title} nodes. Other Operators can modify operating system-level features as well. One example is the Node Tuning Operator, which allows you to do node-level tuning through Tuned daemon profiles. - -Tasks for the MCO configuration that can be done post-installation are included in the following procedures. See descriptions of {op-system} bare metal installation for system configuration tasks that must be done during or before {product-title} installation. - -There might be situations where the configuration on a node does not fully match what the currently-applied machine config specifies. This state is called _configuration drift_. The Machine Config Daemon (MCD) regularly checks the nodes for configuration drift. If the MCD detects configuration drift, the MCO marks the node `degraded` until an administrator corrects the node configuration. A degraded node is online and operational, but, it cannot be updated. For more information on configuration drift, see _Understanding configuration drift detection_. - -== Project - -See the link:https://github.com/openshift/machine-config-operator[openshift-machine-config-operator] GitHub site for details. diff --git a/modules/machine-delete.adoc b/modules/machine-delete.adoc deleted file mode 100644 index b7d5422a0589..000000000000 --- a/modules/machine-delete.adoc +++ /dev/null @@ -1,50 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/deleting-machine.adoc -// * windows_containers/removing-windows-nodes.adoc - -:_content-type: PROCEDURE -[id="machine-delete_{context}"] -= Deleting a specific machine - -You can delete a specific machine. - -[IMPORTANT] -==== -Do not delete a control plane machine unless your cluster uses a control plane machine set. -==== - -.Prerequisites - -* Install an {product-title} cluster. -* Install the OpenShift CLI (`oc`). -* Log in to `oc` as a user with `cluster-admin` permission. - -.Procedure - -. View the machines that are in the cluster by running the following command: -+ -[source,terminal] ----- -$ oc get machine -n openshift-machine-api ----- -+ -The command output contains a list of machines in the `<clusterid>-<role>-<cloud_region>` format. - -. Identify the machine that you want to delete. - -. Delete the machine by running the following command: -+ -[source,terminal] ----- -$ oc delete machine <machine> -n openshift-machine-api ----- -+ -[IMPORTANT] -==== -By default, the machine controller tries to drain the node that is backed by the machine until it succeeds. In some situations, such as with a misconfigured pod disruption budget, the drain operation might not be able to succeed. If the drain operation fails, the machine controller cannot proceed removing the machine. - -You can skip draining the node by annotating `machine.openshift.io/exclude-node-draining` in a specific machine. -==== -+ -If the machine that you delete belongs to a machine set, a new machine is immediately created to satisfy the specified number of replicas. \ No newline at end of file diff --git a/modules/machine-edge-pool-review-nodes.adoc b/modules/machine-edge-pool-review-nodes.adoc deleted file mode 100644 index de108e5aaa10..000000000000 --- a/modules/machine-edge-pool-review-nodes.adoc +++ /dev/null @@ -1,58 +0,0 @@ -// Module included in the following assemblies -// * installing/installing_aws/installing-aws-localzone.adoc - -:_content-type: PROCEDURE -[id="machine-edge-pool-review-nodes_{context}"] -= Verifying nodes that were created with edge compute pool - -After you install a cluster that uses AWS Local Zones, check the status of the machine that was created by the machine set manifests created at install time. - -. To check the machine sets created from the subnet you added to the `install-config.yaml` file, run the following command: -+ -[source,terminal] ----- -$ oc get machineset -n openshift-machine-api ----- -+ -.Example output -[source,terminal] ----- -NAME DESIRED CURRENT READY AVAILABLE AGE -cluster-7xw5g-edge-us-east-1-nyc-1a 1 1 1 1 3h4m -cluster-7xw5g-worker-us-east-1a 1 1 1 1 3h4m -cluster-7xw5g-worker-us-east-1b 1 1 1 1 3h4m -cluster-7xw5g-worker-us-east-1c 1 1 1 1 3h4m ----- - -. To check the machines that were created from the machine sets, run the following command: -+ -[source,terminal] ----- -$ oc get machines -n openshift-machine-api ----- -+ -.Example output ----- -NAME PHASE TYPE REGION ZONE AGE -cluster-7xw5g-edge-us-east-1-nyc-1a-wbclh Running c5d.2xlarge us-east-1 us-east-1-nyc-1a 3h -cluster-7xw5g-master-0 Running m6i.xlarge us-east-1 us-east-1a 3h4m -cluster-7xw5g-master-1 Running m6i.xlarge us-east-1 us-east-1b 3h4m -cluster-7xw5g-master-2 Running m6i.xlarge us-east-1 us-east-1c 3h4m -cluster-7xw5g-worker-us-east-1a-rtp45 Running m6i.xlarge us-east-1 us-east-1a 3h -cluster-7xw5g-worker-us-east-1b-glm7c Running m6i.xlarge us-east-1 us-east-1b 3h -cluster-7xw5g-worker-us-east-1c-qfvz4 Running m6i.xlarge us-east-1 us-east-1c 3h ----- - -. To check nodes with edge roles, run the following command: -+ -[source,terminal] ----- -$ oc get nodes -l node-role.kubernetes.io/edge ----- -+ -.Example output -[source,terminal] ----- -NAME STATUS ROLES AGE VERSION -ip-10-0-207-188.ec2.internal Ready edge,worker 172m v1.25.2+d2e245f ----- \ No newline at end of file diff --git a/modules/machine-health-checks-about.adoc b/modules/machine-health-checks-about.adoc deleted file mode 100644 index d669e0ce68e2..000000000000 --- a/modules/machine-health-checks-about.adoc +++ /dev/null @@ -1,39 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/deploying-machine-health-checks.adoc -// * post_installation_configuration/node-tasks.adoc - -:_content-type: CONCEPT -[id="machine-health-checks-about_{context}"] -= About machine health checks - -[NOTE] -==== -You can only apply a machine health check to control plane machines on clusters that use control plane machine sets. -==== - -To monitor machine health, create a resource to define the configuration for a controller. Set a condition to check, such as staying in the `NotReady` status for five minutes or displaying a permanent condition in the node-problem-detector, and a label for the set of machines to monitor. - -The controller that observes a `MachineHealthCheck` resource checks for the defined condition. If a machine fails the health check, the machine is automatically deleted and one is created to take its place. When a machine is deleted, you see a `machine deleted` event. - -To limit disruptive impact of the machine deletion, the controller drains and deletes only one node at a time. If there are more unhealthy machines than the `maxUnhealthy` threshold allows for in the targeted pool of machines, remediation stops and therefore enables manual intervention. - -[NOTE] -==== -Consider the timeouts carefully, accounting for workloads and requirements. - -* Long timeouts can result in long periods of downtime for the workload on the unhealthy machine. -* Too short timeouts can result in a remediation loop. For example, the timeout for checking the `NotReady` status must be long enough to allow the machine to complete the startup process. -==== - -To stop the check, remove the resource. - -[id="machine-health-checks-limitations_{context}"] -== Limitations when deploying machine health checks - -There are limitations to consider before deploying a machine health check: - -* Only machines owned by a machine set are remediated by a machine health check. -* If the node for a machine is removed from the cluster, a machine health check considers the machine to be unhealthy and remediates it immediately. -* If the corresponding node for a machine does not join the cluster after the `nodeStartupTimeout`, the machine is remediated. -* A machine is remediated immediately if the `Machine` resource phase is `Failed`. diff --git a/modules/machine-health-checks-creating.adoc b/modules/machine-health-checks-creating.adoc deleted file mode 100644 index 74f5f4ce891e..000000000000 --- a/modules/machine-health-checks-creating.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/deploying-machine-health-checks.adoc -// * post_installation_configuration/node-tasks.adoc - -:_content-type: PROCEDURE -[id="machine-health-checks-creating_{context}"] -= Creating a machine health check resource - -You can create a `MachineHealthCheck` resource for machine sets in your cluster. - -[NOTE] -==== -You can only apply a machine health check to control plane machines on clusters that use control plane machine sets. -==== - -.Prerequisites - -* Install the `oc` command line interface. - -.Procedure - -. Create a `healthcheck.yml` file that contains the definition of your machine health check. - -. Apply the `healthcheck.yml` file to your cluster: -+ -[source,terminal] ----- -$ oc apply -f healthcheck.yml ----- diff --git a/modules/machine-health-checks-pausing-web-console.adoc b/modules/machine-health-checks-pausing-web-console.adoc deleted file mode 100644 index 5638634d320c..000000000000 --- a/modules/machine-health-checks-pausing-web-console.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: - -// * updating/updating-cluster-within-minor.adoc - -:_content-type: PROCEDURE -[id="machine-health-checks-pausing-web-console_{context}"] -= Pausing a MachineHealthCheck resource by using the web console - -During the update process, nodes in the cluster might become temporarily unavailable. In the case of worker nodes, the machine health check might identify such nodes as unhealthy and reboot them. To avoid rebooting such nodes, pause all the `MachineHealthCheck` resources before updating the cluster. - -.Prerequisites - -* You have access to the cluster with `cluster-admin` privileges. -* You have access to the {product-title} web console. - -.Procedure - -. Log in to the {product-title} web console. -. Navigate to *Compute* -> *MachineHealthChecks*. -. To pause the machine health checks, add the `cluster.x-k8s.io/paused=""` annotation to each `MachineHealthCheck` resource. For example, to add the annotation to the `machine-api-termination-handler` resource, complete the following steps: -.. Click the Options menu {kebab} next to the `machine-api-termination-handler` and click *Edit annotations*. -.. In the *Edit annotations* dialog, click *Add more*. -.. In the *Key* and *Value* fields, add `cluster.x-k8s.io/paused` and `""` values, respectively, and click *Save*. diff --git a/modules/machine-health-checks-pausing.adoc b/modules/machine-health-checks-pausing.adoc deleted file mode 100644 index 7e7fd69e06ce..000000000000 --- a/modules/machine-health-checks-pausing.adoc +++ /dev/null @@ -1,69 +0,0 @@ -// Module included in the following assemblies: - -// * updating/updating-cluster-cli.adoc -// * updating/updating-cluster-within-minor.adoc -// * updating/updating-restricted-network-cluster/restricted-network-update.adoc - -:_content-type: PROCEDURE -[id="machine-health-checks-pausing_{context}"] -= Pausing a MachineHealthCheck resource - -During the update process, nodes in the cluster might become temporarily unavailable. In the case of worker nodes, the machine health check might identify such nodes as unhealthy and reboot them. To avoid rebooting such nodes, pause all the `MachineHealthCheck` resources before updating the cluster. - -.Prerequisites - -* Install the OpenShift CLI (`oc`). - -.Procedure - -. To list all the available `MachineHealthCheck` resources that you want to pause, run the following command: -+ -[source,terminal] ----- -$ oc get machinehealthcheck -n openshift-machine-api ----- - -. To pause the machine health checks, add the `cluster.x-k8s.io/paused=""` annotation to the `MachineHealthCheck` resource. Run the following command: -+ -[source,terminal] ----- -$ oc -n openshift-machine-api annotate mhc <mhc-name> cluster.x-k8s.io/paused="" ----- -+ -The annotated `MachineHealthCheck` resource resembles the following YAML file: -+ -[source,yaml] ----- -apiVersion: machine.openshift.io/v1beta1 -kind: MachineHealthCheck -metadata: - name: example - namespace: openshift-machine-api - annotations: - cluster.x-k8s.io/paused: "" -spec: - selector: - matchLabels: - role: worker - unhealthyConditions: - - type: "Ready" - status: "Unknown" - timeout: "300s" - - type: "Ready" - status: "False" - timeout: "300s" - maxUnhealthy: "40%" -status: - currentHealthy: 5 - expectedMachines: 5 ----- -+ -[IMPORTANT] -==== -Resume the machine health checks after updating the cluster. To resume the check, remove the pause annotation from the `MachineHealthCheck` resource by running the following command: - -[source,terminal] ----- -$ oc -n openshift-machine-api annotate mhc <mhc-name> cluster.x-k8s.io/paused- ----- -==== diff --git a/modules/machine-health-checks-resource.adoc b/modules/machine-health-checks-resource.adoc deleted file mode 100644 index fbfa75ef1a8f..000000000000 --- a/modules/machine-health-checks-resource.adoc +++ /dev/null @@ -1,98 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/deploying-machine-health-checks.adoc -// * post_installation_configuration/node-tasks.adoc - - -[id="machine-health-checks-resource_{context}"] -= Sample MachineHealthCheck resource - -The `MachineHealthCheck` resource for all cloud-based installation types, and other than bare metal, resembles the following YAML file: - -[source,yaml] ----- -apiVersion: machine.openshift.io/v1beta1 -kind: MachineHealthCheck -metadata: - name: example <1> - namespace: openshift-machine-api -spec: - selector: - matchLabels: - machine.openshift.io/cluster-api-machine-role: <role> <2> - machine.openshift.io/cluster-api-machine-type: <role> <2> - machine.openshift.io/cluster-api-machineset: <cluster_name>-<label>-<zone> <3> - unhealthyConditions: - - type: "Ready" - timeout: "300s" <4> - status: "False" - - type: "Ready" - timeout: "300s" <4> - status: "Unknown" - maxUnhealthy: "40%" <5> - nodeStartupTimeout: "10m" <6> ----- -<1> Specify the name of the machine health check to deploy. -<2> Specify a label for the machine pool that you want to check. -<3> Specify the machine set to track in `<cluster_name>-<label>-<zone>` format. For example, `prod-node-us-east-1a`. -<4> Specify the timeout duration for a node condition. If a condition is met for the duration of the timeout, the machine will be remediated. Long timeouts can result in long periods of downtime for a workload on an unhealthy machine. -<5> Specify the amount of machines allowed to be concurrently remediated in the targeted pool. This can be set as a percentage or an integer. If the number of unhealthy machines exceeds the limit set by `maxUnhealthy`, remediation is not performed. -<6> Specify the timeout duration that a machine health check must wait for a node to join the cluster before a machine is determined to be unhealthy. - -[NOTE] -==== -The `matchLabels` are examples only; you must map your machine groups based on your specific needs. -==== - -[id="machine-health-checks-short-circuiting_{context}"] -== Short-circuiting machine health check remediation - -Short-circuiting ensures that machine health checks remediate machines only when the cluster is healthy. -Short-circuiting is configured through the `maxUnhealthy` field in the `MachineHealthCheck` resource. - -If the user defines a value for the `maxUnhealthy` field, before remediating any machines, the `MachineHealthCheck` compares the value of `maxUnhealthy` with the number of machines within its target pool that it has determined to be unhealthy. Remediation is not performed if the number of unhealthy machines exceeds the `maxUnhealthy` limit. - -[IMPORTANT] -==== -If `maxUnhealthy` is not set, the value defaults to `100%` and the machines are remediated regardless of the state of the cluster. -==== - -The appropriate `maxUnhealthy` value depends on the scale of the cluster you deploy and how many machines the `MachineHealthCheck` covers. For example, you can use the `maxUnhealthy` value to cover multiple compute machine sets across multiple availability zones so that if you lose an entire zone, your `maxUnhealthy` setting prevents further remediation within the cluster. In global Azure regions that do not have multiple availability zones, you can use availability sets to ensure high availability. - -[IMPORTANT] -==== -If you configure a `MachineHealthCheck` resource for the control plane, set the value of `maxUnhealthy` to `1`. - -This configuration ensures that the machine health check takes no action when multiple control plane machines appear to be unhealthy. Multiple unhealthy control plane machines can indicate that the etcd cluster is degraded or that a scaling operation to replace a failed machine is in progress. - -If the etcd cluster is degraded, manual intervention might be required. If a scaling operation is in progress, the machine health check should allow it to finish. -==== - -The `maxUnhealthy` field can be set as either an integer or percentage. -There are different remediation implementations depending on the `maxUnhealthy` value. - -=== Setting maxUnhealthy by using an absolute value - -If `maxUnhealthy` is set to `2`: - -* Remediation will be performed if 2 or fewer nodes are unhealthy -* Remediation will not be performed if 3 or more nodes are unhealthy - -These values are independent of how many machines are being checked by the machine health check. - -=== Setting maxUnhealthy by using percentages - -If `maxUnhealthy` is set to `40%` and there are 25 machines being checked: - -* Remediation will be performed if 10 or fewer nodes are unhealthy -* Remediation will not be performed if 11 or more nodes are unhealthy - -If `maxUnhealthy` is set to `40%` and there are 6 machines being checked: - -* Remediation will be performed if 2 or fewer nodes are unhealthy -* Remediation will not be performed if 3 or more nodes are unhealthy - -[NOTE] -==== -The allowed number of machines is rounded down when the percentage of `maxUnhealthy` machines that are checked is not a whole number. -==== diff --git a/modules/machine-lifecycle-hook-deletion-etcd.adoc b/modules/machine-lifecycle-hook-deletion-etcd.adoc deleted file mode 100644 index f01a1677df45..000000000000 --- a/modules/machine-lifecycle-hook-deletion-etcd.adoc +++ /dev/null @@ -1,55 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/deleting-machine.adoc - -:_content-type: CONCEPT -[id="machine-lifecycle-hook-deletion-etcd_{context}"] -= Quorum protection with machine lifecycle hooks - -For {product-title} clusters that use the Machine API Operator, the etcd Operator uses lifecycle hooks for the machine deletion phase to implement a quorum protection mechanism. - -By using a `preDrain` lifecycle hook, the etcd Operator can control when the pods on a control plane machine are drained and removed. To protect etcd quorum, the etcd Operator prevents the removal of an etcd member until it migrates that member onto a new node within the cluster. - -This mechanism allows the etcd Operator precise control over the members of the etcd quorum and allows the Machine API Operator to safely create and remove control plane machines without specific operational knowledge of the etcd cluster. - -[id="machine-lifecycle-hook-deletion-etcd-order_{context}"] -== Control plane deletion with quorum protection processing order - -When a control plane machine is replaced on a cluster that uses a control plane machine set, the cluster temporarily has four control plane machines. When the fourth control plane node joins the cluster, the etcd Operator starts a new etcd member on the replacement node. When the etcd Operator observes that the old control plane machine is marked for deletion, it stops the etcd member on the old node and promotes the replacement etcd member to join the quorum of the cluster. - -The control plane machine `Deleting` phase proceeds in the following order: - -. A control plane machine is slated for deletion. -. The control plane machine enters the `Deleting` phase. -. To satisfy the `preDrain` lifecycle hook, the etcd Operator takes the following actions: -+ --- -.. The etcd Operator waits until a fourth control plane machine is added to the cluster as an etcd member. This new etcd member has a state of `Running` but not `ready` until it receives the full database update from the etcd leader. -.. When the new etcd member receives the full database update, the etcd Operator promotes the new etcd member to a voting member and removes the old etcd member from the cluster. --- -After this transition is complete, it is safe for the old etcd pod and its data to be removed, so the `preDrain` lifecycle hook is removed. -. The control plane machine status condition `Drainable` is set to `True`. -. The machine controller attempts to drain the node that is backed by the control plane machine. -** If draining fails, `Drained` is set to `False` and the machine controller attempts to drain the node again. -** If draining succeeds, `Drained` is set to `True`. -. The control plane machine status condition `Drained` is set to `True`. -. If no other Operators have added a `preTerminate` lifecycle hook, the control plane machine status condition `Terminable` is set to `True`. -. The machine controller removes the instance from the infrastructure provider. -. The machine controller deletes the `Node` object. - -.YAML snippet demonstrating the etcd quorum protection `preDrain` lifecycle hook -[source,yaml] ----- -apiVersion: machine.openshift.io/v1beta1 -kind: Machine -metadata: - ... -spec: - lifecycleHooks: - preDrain: - - name: EtcdQuorumOperator <1> - owner: clusteroperator/etcd <2> - ... ----- -<1> The name of the `preDrain` lifecycle hook. -<2> The hook-implementing controller that manages the `preDrain` lifecycle hook. \ No newline at end of file diff --git a/modules/machine-lifecycle-hook-deletion-format.adoc b/modules/machine-lifecycle-hook-deletion-format.adoc deleted file mode 100644 index 442e40aef11d..000000000000 --- a/modules/machine-lifecycle-hook-deletion-format.adoc +++ /dev/null @@ -1,74 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/deleting-machine.adoc - -:_content-type: REFERENCE -[id="machine-lifecycle-hook-deletion-format_{context}"] -= Deletion lifecycle hook configuration - -The following YAML snippets demonstrate the format and placement of deletion lifecycle hook configurations within a machine set: - -.YAML snippet demonstrating a `preDrain` lifecycle hook -[source,yaml] ----- -apiVersion: machine.openshift.io/v1beta1 -kind: Machine -metadata: - ... -spec: - lifecycleHooks: - preDrain: - - name: <hook_name> <1> - owner: <hook_owner> <2> - ... ----- -<1> The name of the `preDrain` lifecycle hook. -<2> The hook-implementing controller that manages the `preDrain` lifecycle hook. - -.YAML snippet demonstrating a `preTerminate` lifecycle hook -[source,yaml] ----- -apiVersion: machine.openshift.io/v1beta1 -kind: Machine -metadata: - ... -spec: - lifecycleHooks: - preTerminate: - - name: <hook_name> <1> - owner: <hook_owner> <2> - ... ----- -<1> The name of the `preTerminate` lifecycle hook. -<2> The hook-implementing controller that manages the `preTerminate` lifecycle hook. - -[discrete] -[id="machine-lifecycle-hook-deletion-example_{context}"] -== Example lifecycle hook configuration - -The following example demonstrates the implementation of multiple fictional lifecycle hooks that interrupt the machine deletion process: - -.Example configuration for lifecycle hooks -[source,yaml] ----- -apiVersion: machine.openshift.io/v1beta1 -kind: Machine -metadata: - ... -spec: - lifecycleHooks: - preDrain: <1> - - name: MigrateImportantApp - owner: my-app-migration-controller - preTerminate: <2> - - name: BackupFileSystem - owner: my-backup-controller - - name: CloudProviderSpecialCase - owner: my-custom-storage-detach-controller <3> - - name: WaitForStorageDetach - owner: my-custom-storage-detach-controller - ... ----- -<1> A `preDrain` lifecycle hook stanza that contains a single lifecycle hook. -<2> A `preTerminate` lifecycle hook stanza that contains three lifecycle hooks. -<3> A hook-implementing controller that manages two `preTerminate` lifecycle hooks: `CloudProviderSpecialCase` and `WaitForStorageDetach`. diff --git a/modules/machine-lifecycle-hook-deletion-uses.adoc b/modules/machine-lifecycle-hook-deletion-uses.adoc deleted file mode 100644 index ca7c3846cefe..000000000000 --- a/modules/machine-lifecycle-hook-deletion-uses.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/deleting-machine.adoc - -:_content-type: CONCEPT -[id="machine-lifecycle-hook-deletion-uses_{context}"] -= Machine deletion lifecycle hook examples for Operator developers - -Operators can use lifecycle hooks for the machine deletion phase to modify the machine deletion process. The following examples demonstrate possible ways that an Operator can use this functionality. - -[discrete] -[id="machine-lifecycle-hook-deletion-uses-predrain_{context}"] -== Example use cases for `preDrain` lifecycle hooks - -Proactively replacing machines:: An Operator can use a `preDrain` lifecycle hook to ensure that a replacement machine is successfully created and joined to the cluster before removing the instance of a deleted machine. This can mitigate the impact of disruptions during machine replacement or of replacement instances that do not initialize promptly. - -Implementing custom draining logic:: An Operator can use a `preDrain` lifecycle hook to replace the machine controller draining logic with a different draining controller. By replacing the draining logic, the Operator would have more flexibility and control over the lifecycle of the workloads on each node. -+ -For example, the machine controller drain libraries do not support ordering, but a custom drain provider could provide this functionality. By using a custom drain provider, an Operator could prioritize moving mission-critical applications before draining the node to ensure that service interruptions are minimized in cases where cluster capacity is limited. - -[discrete] -[id="machine-lifecycle-hook-deletion-uses-preterminate_{context}"] -== Example use cases for `preTerminate` lifecycle hooks - -Verifying storage detachment:: An Operator can use a `preTerminate` lifecycle hook to ensure that storage that is attached to a machine is detached before the machine is removed from the infrastructure provider. - -Improving log reliability:: After a node is drained, the log exporter daemon requires some time to synchronize logs to the centralized logging system. -+ -A logging Operator can use a `preTerminate` lifecycle hook to add a delay between when the node drains and when the machine is removed from the infrastructure provider. This delay would provide time for the Operator to ensure that the main workloads are removed and no longer adding to the log backlog. When no new data is being added to the log backlog, the log exporter can catch up on the synchronization process, thus ensuring that all application logs are captured. \ No newline at end of file diff --git a/modules/machine-lifecycle-hook-deletion.adoc b/modules/machine-lifecycle-hook-deletion.adoc deleted file mode 100644 index 336c62a81a4f..000000000000 --- a/modules/machine-lifecycle-hook-deletion.adoc +++ /dev/null @@ -1,79 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/deleting-machine.adoc -// Others TBD. - -//Placement considerations: Is this general info? Does it go with deletion docs? CPMS docs? etcd docs? Possibly some combo of those, or perhaps etcd as an example of a use case? - -:_content-type: CONCEPT -[id="machine-lifecycle-hook-deletion_{context}"] -= Lifecycle hooks for the machine deletion phase - -Machine lifecycle hooks are points in the reconciliation lifecycle of a machine where the normal lifecycle process can be interrupted. In the machine `Deleting` phase, these interruptions provide the opportunity for components to modify the machine deletion process. - -[id="machine-lifecycle-hook-deletion-terms_{context}"] -== Terminology and definitions - -To understand the behavior of lifecycle hooks for the machine deletion phase, you must understand the following concepts: - -Reconciliation:: Reconciliation is the process by which a controller attempts to make the real state of the cluster and the objects that it comprises match the requirements in an object specification. - -Machine controller:: The machine controller manages the reconciliation lifecycle for a machine. For machines on cloud platforms, the machine controller is the combination of an {product-title} controller and a platform-specific actuator from the cloud provider. -+ -In the context of machine deletion, the machine controller performs the following actions: --- -* Drain the node that is backed by the machine. -* Delete the machine instance from the cloud provider. -* Delete the `Node` object. --- - -Lifecycle hook:: A lifecycle hook is a defined point in the reconciliation lifecycle of an object where the normal lifecycle process can be interrupted. Components can use a lifecycle hook to inject changes into the process to accomplish a desired outcome. -+ -There are two lifecycle hooks in the machine `Deleting` phase: --- -* `preDrain` lifecycle hooks must be resolved before the node that is backed by the machine can be drained. -* `preTerminate` lifecycle hooks must be resolved before the instance can be removed from the infrastructure provider. --- - -Hook-implementing controller:: A hook-implementing controller is a controller, other than the machine controller, that can interact with a lifecycle hook. A hook-implementing controller can do one or more of the following actions: -+ --- -* Add a lifecycle hook. -* Respond to a lifecycle hook. -* Remove a lifecycle hook. --- -+ -Each lifecycle hook has a single hook-implementing controller, but a hook-implementing controller can manage one or more hooks. - -[id="machine-lifecycle-hook-deletion-order_{context}"] -== Machine deletion processing order - -In {product-title} {product-version}, there are two lifecycle hooks for the machine deletion phase: `preDrain` and `preTerminate`. When all hooks for a given lifecycle point are removed, reconciliation continues as normal. - -.Machine deletion flow -image::310_OpenShift_machine_deletion_hooks_0223.png["The sequence of events in the machine `Deleting` phase."] - -The machine `Deleting` phase proceeds in the following order: - -. An existing machine is slated for deletion for one of the following reasons: -** A user with `cluster-admin` permissions uses the `oc delete machine` command. -** The machine gets a `machine.openshift.io/delete-machine` annotation. -** The machine set that manages the machine marks it for deletion to reduce the replica count as part of reconciliation. -** The cluster autoscaler identifies a node that is unnecessary to meet the deployment needs of the cluster. -** A machine health check is configured to replace an unhealthy machine. -. The machine enters the `Deleting` phase, in which it is marked for deletion but is still present in the API. -. If a `preDrain` lifecycle hook exists, the hook-implementing controller that manages it does a specified action. -+ -Until all `preDrain` lifecycle hooks are satisfied, the machine status condition `Drainable` is set to `False`. -. There are no unresolved `preDrain` lifecycle hooks and the machine status condition `Drainable` is set to `True`. -. The machine controller attempts to drain the node that is backed by the machine. -** If draining fails, `Drained` is set to `False` and the machine controller attempts to drain the node again. -** If draining succeeds, `Drained` is set to `True`. -. The machine status condition `Drained` is set to `True`. -. If a `preTerminate` lifecycle hook exists, the hook-implementing controller that manages it does a specified action. -+ -Until all `preTerminate` lifecycle hooks are satisfied, the machine status condition `Terminable` is set to `False`. -. There are no unresolved `preTerminate` lifecycle hooks and the machine status condition `Terminable` is set to `True`. -. The machine controller removes the instance from the infrastructure provider. -. The machine controller deletes the `Node` object. - diff --git a/modules/machine-node-custom-partition.adoc b/modules/machine-node-custom-partition.adoc deleted file mode 100644 index dd35e1c92c23..000000000000 --- a/modules/machine-node-custom-partition.adoc +++ /dev/null @@ -1,276 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/ -// * machine_management/ -// * post_installation_configuration/node-tasks.adoc - -:_content-type: PROCEDURE -[id="machine-node-custom-partition_{context}"] -= Adding a new {op-system} worker node with a custom `/var` partition in AWS - -{product-title} supports partitioning devices during installation by using machine configs that are processed during the bootstrap. However, if you use `/var` partitioning, the device name must be determined at installation and cannot be changed. You cannot add different instance types as nodes if they have a different device naming schema. For example, if you configured the `/var` partition with the default AWS device name for `m4.large` instances, `dev/xvdb`, you cannot directly add an AWS `m5.large` instance, as `m5.large` instances use a `/dev/nvme1n1` device by default. The device might fail to partition due to the different naming schema. - -The procedure in this section shows how to add a new {op-system-first} compute node with an instance that uses a different device name from what was configured at installation. You create a custom user data secret and configure a new compute machine set. These steps are specific to an AWS cluster. The principles apply to other cloud deployments also. However, the device naming schema is different for other deployments and should be determined on a per-case basis. - -.Procedure - -. On a command line, change to the `openshift-machine-api` namespace: -+ -[source,terminal] ----- -$ oc project openshift-machine-api ----- - -. Create a new secret from the `worker-user-data` secret: - -.. Export the `userData` section of the secret to a text file: -+ -[source,terminal] ----- -$ oc get secret worker-user-data --template='{{index .data.userData | base64decode}}' | jq > userData.txt ----- - -.. Edit the text file to add the `storage`, `filesystems`, and `systemd` stanzas for the partitions you want to use for the new node. You can specify any link:https://coreos.github.io/ignition/configuration-v3_2/[Ignition configuration parameters] as needed. -+ -[NOTE] -==== -Do not change the values in the `ignition` stanza. -==== -+ -[source,terminal] ----- -{ - "ignition": { - "config": { - "merge": [ - { - "source": "https:...." - } - ] - }, - "security": { - "tls": { - "certificateAuthorities": [ - { - "source": "data:text/plain;charset=utf-8;base64,.....==" - } - ] - } - }, - "version": "3.2.0" - }, - "storage": { - "disks": [ - { - "device": "/dev/nvme1n1", <1> - "partitions": [ - { - "label": "var", - "sizeMiB": 50000, <2> - "startMiB": 0 <3> - } - ] - } - ], - "filesystems": [ - { - "device": "/dev/disk/by-partlabel/var", <4> - "format": "xfs", <5> - "path": "/var" <6> - } - ] - }, - "systemd": { - "units": [ <7> - { - "contents": "[Unit]\nBefore=local-fs.target\n[Mount]\nWhere=/var\nWhat=/dev/disk/by-partlabel/var\nOptions=defaults,pquota\n[Install]\nWantedBy=local-fs.target\n", - "enabled": true, - "name": "var.mount" - } - ] - } -} ----- -//Copied from installation-disk-partitioning-upi-templates.adoc -<1> Specifies an absolute path to the AWS block device. -<2> Specifies the size of the data partition in Mebibytes. -<3> Specifies the start of the partition in Mebibytes. When adding a data partition to the boot disk, a minimum value of 25000 MB (Mebibytes) is recommended. The root file system is automatically resized to fill all available space up to the specified offset. If no value is specified, or if the specified value is smaller than the recommended minimum, the resulting root file system will be too small, and future reinstalls of {op-system} might overwrite the beginning of the data partition. -<4> Specifies an absolute path to the `/var` partition. -<5> Specifies the filesystem format. -<6> Specifies the mount-point of the filesystem while Ignition is running relative to where the root filesystem will be mounted. This is not necessarily the same as where it should be mounted in the real root, but it is encouraged to make it the same. -<7> Defines a systemd mount unit that mounts the `/dev/disk/by-partlabel/var` device to the `/var` partition. - -.. Extract the `disableTemplating` section from the `work-user-data` secret to a text file: -+ -[source,terminal] ----- -$ oc get secret worker-user-data --template='{{index .data.disableTemplating | base64decode}}' | jq > disableTemplating.txt ----- - -.. Create the new user data secret file from the two text files. This user data secret passes the additional node partition information in the `userData.txt` file to the newly created node. -+ -[source,terminal] ----- -$ oc create secret generic worker-user-data-x5 --from-file=userData=userData.txt --from-file=disableTemplating=disableTemplating.txt ----- - -. Create a new compute machine set for the new node: - -.. Create a new compute machine set YAML file, similar to the following, which is configured for AWS. Add the required partitions and the newly-created user data secret: -+ -[TIP] -==== -Use an existing compute machine set as a template and change the parameters as needed for the new node. -==== -+ -[source,terminal] ----- -apiVersion: machine.openshift.io/v1beta1 -kind: MachineSet -metadata: - labels: - machine.openshift.io/cluster-api-cluster: auto-52-92tf4 - name: worker-us-east-2-nvme1n1 <1> - namespace: openshift-machine-api -spec: - replicas: 1 - selector: - matchLabels: - machine.openshift.io/cluster-api-cluster: auto-52-92tf4 - machine.openshift.io/cluster-api-machineset: auto-52-92tf4-worker-us-east-2b - template: - metadata: - labels: - machine.openshift.io/cluster-api-cluster: auto-52-92tf4 - machine.openshift.io/cluster-api-machine-role: worker - machine.openshift.io/cluster-api-machine-type: worker - machine.openshift.io/cluster-api-machineset: auto-52-92tf4-worker-us-east-2b - spec: - metadata: {} - providerSpec: - value: - ami: - id: ami-0c2dbd95931a - apiVersion: awsproviderconfig.openshift.io/v1beta1 - blockDevices: - - DeviceName: /dev/nvme1n1 <2> - ebs: - encrypted: true - iops: 0 - volumeSize: 120 - volumeType: gp2 - - DeviceName: /dev/nvme1n2 <3> - ebs: - encrypted: true - iops: 0 - volumeSize: 50 - volumeType: gp2 - credentialsSecret: - name: aws-cloud-credentials - deviceIndex: 0 - iamInstanceProfile: - id: auto-52-92tf4-worker-profile - instanceType: m6i.large - kind: AWSMachineProviderConfig - metadata: - creationTimestamp: null - placement: - availabilityZone: us-east-2b - region: us-east-2 - securityGroups: - - filters: - - name: tag:Name - values: - - auto-52-92tf4-worker-sg - subnet: - id: subnet-07a90e5db1 - tags: - - name: kubernetes.io/cluster/auto-52-92tf4 - value: owned - userDataSecret: - name: worker-user-data-x5 <4> ----- -<1> Specifies a name for the new node. -<2> Specifies an absolute path to the AWS block device, here an encrypted EBS volume. -<3> Optional. Specifies an additional EBS volume. -<4> Specifies the user data secret file. - -.. Create the compute machine set: -+ -[source,yaml] ----- -$ oc create -f <file-name>.yaml ----- -+ -The machines might take a few moments to become available. - -. Verify that the new partition and nodes are created: - -.. Verify that the compute machine set is created: -+ -[source,terminal] ----- -$ oc get machineset ----- -+ -.Example output -+ -[source,terminal] ----- -NAME DESIRED CURRENT READY AVAILABLE AGE -ci-ln-2675bt2-76ef8-bdgsc-worker-us-east-1a 1 1 1 1 124m -ci-ln-2675bt2-76ef8-bdgsc-worker-us-east-1b 2 2 2 2 124m -worker-us-east-2-nvme1n1 1 1 1 1 2m35s <1> ----- -<1> This is the new compute machine set. - -.. Verify that the new node is created: -+ -[source,terminal] ----- -$ oc get nodes ----- -+ -.Example output -+ -[source,terminal] ----- -NAME STATUS ROLES AGE VERSION -ip-10-0-128-78.ec2.internal Ready worker 117m v1.26.0 -ip-10-0-146-113.ec2.internal Ready master 127m v1.26.0 -ip-10-0-153-35.ec2.internal Ready worker 118m v1.26.0 -ip-10-0-176-58.ec2.internal Ready master 126m v1.26.0 -ip-10-0-217-135.ec2.internal Ready worker 2m57s v1.26.0 <1> -ip-10-0-225-248.ec2.internal Ready master 127m v1.26.0 -ip-10-0-245-59.ec2.internal Ready worker 116m v1.26.0 ----- -<1> This is new new node. - -.. Verify that the custom `/var` partition is created on the new node: -+ -[source,terminal] ----- -$ oc debug node/<node-name> -- chroot /host lsblk ----- -+ -For example: -+ -[source,terminal] ----- -$ oc debug node/ip-10-0-217-135.ec2.internal -- chroot /host lsblk ----- -+ -.Example output -+ -[source,terminal] ----- -NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT -nvme0n1 202:0 0 120G 0 disk -|-nvme0n1p1 202:1 0 1M 0 part -|-nvme0n1p2 202:2 0 127M 0 part -|-nvme0n1p3 202:3 0 384M 0 part /boot -`-nvme0n1p4 202:4 0 119.5G 0 part /sysroot -nvme1n1 202:16 0 50G 0 disk -`-nvme1n1p1 202:17 0 48.8G 0 part /var <1> ----- -<1> The `nvme1n1` device is mounted to the `/var` partition. diff --git a/modules/machine-user-infra-machines-iso.adoc b/modules/machine-user-infra-machines-iso.adoc deleted file mode 100644 index b292a2c0ab5f..000000000000 --- a/modules/machine-user-infra-machines-iso.adoc +++ /dev/null @@ -1,88 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/user_infra/adding-bare-metal-compute-user-infra.adoc -// * post_installation_configuration/node-tasks.adoc -ifeval::["{context}" == "multi-architecture-configuration"] -:multi: -endif::[] - -:_content-type: PROCEDURE -[id="machine-user-infra-machines-iso_{context}"] -= Creating {op-system} machines using an ISO image - -You can create more {op-system-first} compute machines for your bare metal cluster by using an ISO image to create the machines. - -.Prerequisites - -* Obtain the URL of the Ignition config file for the compute machines for your cluster. You uploaded this file to your HTTP server during installation. -* You must have the OpenShift CLI (`oc`) installed. - -.Procedure - -. Extract the Ignition config file from the cluster by running the following command: -+ -[source,terminal] ----- -$ oc extract -n openshift-machine-api secret/worker-user-data-managed --keys=userData --to=- > worker.ign ----- - -. Upload the `worker.ign` Ignition config file you exported from your cluster to your HTTP server. Note the URLs of these files. - -. You can validate that the ignition files are available on the URLs. The following example gets the Ignition config files for the compute node: -+ -[source,terminal] ----- -$ curl -k http://<HTTP_server>/worker.ign ----- - -. You can access the ISO image for booting your new machine by running to following command: -+ -[source,terminal] ----- -RHCOS_VHD_ORIGIN_URL=$(oc -n openshift-machine-config-operator get configmap/coreos-bootimages -o jsonpath='{.data.stream}' | jq -r '.architectures.<architecture>.artifacts.metal.formats.iso.disk.location') ----- - -. Use the ISO file to install {op-system} on more compute machines. Use the same method that you used when you created machines before you installed the cluster: -** Burn the ISO image to a disk and boot it directly. -** Use ISO redirection with a LOM interface. - -. Boot the {op-system} ISO image without specifying any options, or interrupting the live boot sequence. Wait for the installer to boot into a shell prompt in the {op-system} live environment. -+ -[NOTE] -==== -You can interrupt the {op-system} installation boot process to add kernel arguments. However, for this ISO procedure you must use the `coreos-installer` command as outlined in the following steps, instead of adding kernel arguments. -==== - -. Run the `coreos-installer` command and specify the options that meet your installation requirements. At a minimum, you must specify the URL that points to the Ignition config file for the node type, and the device that you are installing to: -+ -[source,terminal] ----- -$ sudo coreos-installer install --ignition-url=http://<HTTP_server>/<node_type>.ign <device> --ignition-hash=sha512-<digest> <1><2> ----- -<1> You must run the `coreos-installer` command by using `sudo`, because the `core` user does not have the required root privileges to perform the installation. -<2> The `--ignition-hash` option is required when the Ignition config file is obtained through an HTTP URL to validate the authenticity of the Ignition config file on the cluster node. `<digest>` is the Ignition config file SHA512 digest obtained in a preceding step. -+ -[NOTE] -==== -If you want to provide your Ignition config files through an HTTPS server that uses TLS, you can add the internal certificate authority (CA) to the system trust store before running `coreos-installer`. -==== -+ -The following example initializes a bootstrap node installation to the `/dev/sda` device. The Ignition config file for the bootstrap node is obtained from an HTTP web server with the IP address 192.168.1.2: -+ -[source,terminal] ----- -$ sudo coreos-installer install --ignition-url=http://192.168.1.2:80/installation_directory/bootstrap.ign /dev/sda --ignition-hash=sha512-a5a2d43879223273c9b60af66b44202a1d1248fc01cf156c46d4a79f552b6bad47bc8cc78ddf0116e80c59d2ea9e32ba53bc807afbca581aa059311def2c3e3b ----- - -. Monitor the progress of the {op-system} installation on the console of the machine. -+ -[IMPORTANT] -==== -Ensure that the installation is successful on each node before commencing with the {product-title} installation. Observing the installation process can also help to determine the cause of {op-system} installation issues that might arise. -==== - -. Continue to create more compute machines for your cluster. - -ifeval::["{context}" == "multi-architecture-configuration"] -:!multi: -endif::[] \ No newline at end of file diff --git a/modules/machine-user-infra-machines-pxe.adoc b/modules/machine-user-infra-machines-pxe.adoc deleted file mode 100644 index 9707599f42a4..000000000000 --- a/modules/machine-user-infra-machines-pxe.adoc +++ /dev/null @@ -1,84 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/user_infra/adding-bare-metal-compute-user-infra.adoc -// * post_installation_configuration/node-tasks.adoc -// * post_installation_configuration/multi-architecture-configuration.adoc - -:_content-type: PROCEDURE -[id="machine-user-infra-machines-pxe_{context}"] -= Creating {op-system} machines by PXE or iPXE booting - -You can create more {op-system-first} compute machines for your bare metal cluster by using PXE or iPXE booting. - -.Prerequisites - -* Obtain the URL of the Ignition config file for the compute machines for your cluster. You uploaded this file to your HTTP server during installation. -* Obtain the URLs of the {op-system} ISO image, compressed metal BIOS, `kernel`, and `initramfs` files that you uploaded to your HTTP server during cluster installation. -* You have access to the PXE booting infrastructure that you used to create the machines for your {product-title} cluster during installation. The machines must boot from their local disks after {op-system} is installed on them. -* If you use UEFI, you have access to the `grub.conf` file that you modified during {product-title} installation. - -.Procedure - -. Confirm that your PXE or iPXE installation for the {op-system} images is correct. - -** For PXE: -+ ----- -DEFAULT pxeboot -TIMEOUT 20 -PROMPT 0 -LABEL pxeboot - KERNEL http://<HTTP_server>/rhcos-<version>-live-kernel-<architecture> <1> - APPEND initrd=http://<HTTP_server>/rhcos-<version>-live-initramfs.<architecture>.img coreos.inst.install_dev=/dev/sda coreos.inst.ignition_url=http://<HTTP_server>/worker.ign coreos.live.rootfs_url=http://<HTTP_server>/rhcos-<version>-live-rootfs.<architecture>.img <2> ----- -<1> Specify the location of the live `kernel` file that you uploaded to your HTTP server. -<2> Specify locations of the {op-system} files that you uploaded to your HTTP server. The `initrd` parameter value is the location of the live `initramfs` file, the `coreos.inst.ignition_url` parameter value is the location of the worker Ignition config file, and the `coreos.live.rootfs_url` parameter value is the location of the live `rootfs` file. The `coreos.inst.ignition_url` and `coreos.live.rootfs_url` parameters only support HTTP and HTTPS. -+ -[NOTE] -==== -This configuration does not enable serial console access on machines with a graphical console. To configure a different console, add one or more `console=` arguments to the `APPEND` line. For example, add `console=tty0 console=ttyS0` to set the first PC serial port as the primary console and the graphical console as a secondary console. For more information, see link:https://access.redhat.com/articles/7212[How does one set up a serial terminal and/or console in Red Hat Enterprise Linux?]. -==== - -** For iPXE (`x86_64` + `aarch64`): -+ ----- -kernel http://<HTTP_server>/rhcos-<version>-live-kernel-<architecture> initrd=main coreos.live.rootfs_url=http://<HTTP_server>/rhcos-<version>-live-rootfs.<architecture>.img coreos.inst.install_dev=/dev/sda coreos.inst.ignition_url=http://<HTTP_server>/worker.ign <1> <2> -initrd --name main http://<HTTP_server>/rhcos-<version>-live-initramfs.<architecture>.img <3> -boot ----- -<1> Specify the locations of the {op-system} files that you uploaded to your -HTTP server. The `kernel` parameter value is the location of the `kernel` file, -the `initrd=main` argument is needed for booting on UEFI systems, -the `coreos.live.rootfs_url` parameter value is the location of the `rootfs` file, -and the `coreos.inst.ignition_url` parameter value is the -location of the worker Ignition config file. -<2> If you use multiple NICs, specify a single interface in the `ip` option. -For example, to use DHCP on a NIC that is named `eno1`, set `ip=eno1:dhcp`. -<3> Specify the location of the `initramfs` file that you uploaded to your HTTP server. -+ -[NOTE] -==== -This configuration does not enable serial console access on machines with a graphical console To configure a different console, add one or more `console=` arguments to the `kernel` line. For example, add `console=tty0 console=ttyS0` to set the first PC serial port as the primary console and the graphical console as a secondary console. For more information, see link:https://access.redhat.com/articles/7212[How does one set up a serial terminal and/or console in Red Hat Enterprise Linux?] and "Enabling the serial console for PXE and ISO installation" in the "Advanced {op-system} installation configuration" section. -==== -+ -[NOTE] -==== -To network boot the CoreOS `kernel` on `aarch64` architecture, you need to use a version of iPXE build with the `IMAGE_GZIP` option enabled. See link:https://ipxe.org/buildcfg/image_gzip[`IMAGE_GZIP` option in iPXE]. -==== - -** For PXE (with UEFI and GRUB as second stage) on `aarch64`: -+ ----- -menuentry 'Install CoreOS' { - linux rhcos-<version>-live-kernel-<architecture> coreos.live.rootfs_url=http://<HTTP_server>/rhcos-<version>-live-rootfs.<architecture>.img coreos.inst.install_dev=/dev/sda coreos.inst.ignition_url=http://<HTTP_server>/worker.ign <1> <2> - initrd rhcos-<version>-live-initramfs.<architecture>.img <3> -} ----- -<1> Specify the locations of the {op-system} files that you uploaded to your -HTTP/TFTP server. The `kernel` parameter value is the location of the `kernel` file on your TFTP server. -The `coreos.live.rootfs_url` parameter value is the location of the `rootfs` file, and the `coreos.inst.ignition_url` parameter value is the location of the worker Ignition config file on your HTTP Server. -<2> If you use multiple NICs, specify a single interface in the `ip` option. -For example, to use DHCP on a NIC that is named `eno1`, set `ip=eno1:dhcp`. -<3> Specify the location of the `initramfs` file that you uploaded to your TFTP server. - -. Use the PXE or iPXE infrastructure to create the required compute machines for your cluster. \ No newline at end of file diff --git a/modules/machine-user-provisioned-limitations.adoc b/modules/machine-user-provisioned-limitations.adoc deleted file mode 100644 index 814eb8bfcf7d..000000000000 --- a/modules/machine-user-provisioned-limitations.adoc +++ /dev/null @@ -1,28 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/creating-infrastructure-machinesets.adoc -// * machine_management/creating_machinesets/creating-machineset-aws.adoc -// * machine_management/creating_machinesets/creating-machineset-azure.adoc -// * machine_management/creating_machinesets/creating-machineset-azure-stack-hub.adoc -// * machine_management/creating_machinesets/creating-machineset-gcp.adoc -// * machine_management/creating_machinesets/creating-machineset-osp.adoc -// * machine_management/creating_machinesets/creating-machineset-rhv.adoc -// * machine_management/creating_machinesets/creating-machineset-vsphere.adoc -// * machine_management/deploying-machine-health-checks.adoc -// * machine_management/manually-scaling-machinesets.adoc -// * post_installation_configuration/node-tasks.adoc -// * nodes-nodes-creating-infrastructure-nodes.adoc - -[IMPORTANT] -==== -You can use the advanced machine management and scaling capabilities only in clusters where the Machine API is operational. Clusters with user-provisioned infrastructure require additional validation and configuration to use the Machine API. - -Clusters with the infrastructure platform type `none` cannot use the Machine API. This limitation applies even if the compute machines that are attached to the cluster are installed on a platform that supports the feature. This parameter cannot be changed after installation. - -To view the platform type for your cluster, run the following command: - -[source,terminal] ----- -$ oc get infrastructure cluster -o jsonpath='{.status.platform}' ----- -==== diff --git a/modules/machine-user-provisioned-rhv.adoc b/modules/machine-user-provisioned-rhv.adoc deleted file mode 100644 index 9da834d1a7a3..000000000000 --- a/modules/machine-user-provisioned-rhv.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// Module included in the following assemblies: -// * machine_management/user_infra/adding-rhv-compute-user-infra.adoc - -:_content-type: PROCEDURE -[id="machine-user-provisioned-rhv_{context}"] -= Adding more compute machines to a cluster on {rh-virtualization} - -.Procedure - -. Modify the `inventory.yml` file to include the new workers. -. Run the `create-templates-and-vms` Ansible playbook to create the disks and virtual machines: -+ -[source,terminal] ----- -$ ansible-playbook -i inventory.yml create-templates-and-vms.yml ----- -. Run the `workers.yml` Ansible playbook to start the virtual machines: -+ -[source,terminal] ----- -$ ansible-playbook -i inventory.yml workers.yml ----- -. CSRs for new workers joining the cluster must be approved by the administrator. -The following command helps to approve all pending requests: -+ -[source,terminal] ----- -$ oc get csr -ojson | jq -r '.items[] | select(.status == {} ) | .metadata.name' | xargs oc adm certificate approve ----- diff --git a/modules/machine-vsphere-machines.adoc b/modules/machine-vsphere-machines.adoc deleted file mode 100644 index 9597ef6458c2..000000000000 --- a/modules/machine-vsphere-machines.adoc +++ /dev/null @@ -1,57 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_vsphere/installing-restricted-networks-vsphere.adoc -// * installing/installing_vsphere/installing-vsphere.adoc -// * installing/installing_vsphere/installing-vsphere-network-customizations.adoc -// * machine_management/user_infra/adding-vsphere-compute-user-infra.adoc - -ifeval::["{context}" == "installing-vsphere"] -:three-node-cluster: -endif::[] - -:_content-type: PROCEDURE -[id="machine-vsphere-machines_{context}"] -= Adding more compute machines to a cluster in vSphere - -You can add more compute machines to a user-provisioned {product-title} cluster on VMware vSphere. - -ifdef::three-node-cluster[] -[NOTE] -==== -If you are installing a three-node cluster, skip this step. A three-node cluster consists of three control plane machines, which also act as compute machines. -==== -endif::three-node-cluster[] - -.Prerequisites - -* Obtain the base64-encoded Ignition file for your compute machines. -* You have access to the vSphere template that you created for your cluster. - -.Procedure - -. After the template deploys, deploy a VM for a machine in the cluster. -.. Right-click the template's name and click *Clone* -> *Clone to Virtual Machine*. -.. On the *Select a name and folder* tab, specify a name for the VM. You might include the machine type in the name, such as `compute-1`. -+ -[NOTE] -==== -Ensure that all virtual machine names across a vSphere installation are unique. -==== -.. On the *Select a name and folder* tab, select the name of the folder that you created for the cluster. -.. On the *Select a compute resource* tab, select the name of a host in your datacenter. -.. Optional: On the *Select storage* tab, customize the storage options. -.. On the *Select clone options*, select *Customize this virtual machine's hardware*. -.. On the *Customize hardware* tab, click *VM Options* -> *Advanced*. -*** From the *Latency Sensitivity* list, select *High*. -*** Click *Edit Configuration*, and on the *Configuration Parameters* window, click *Add Configuration Params*. Define the following parameter names and values: -**** `guestinfo.ignition.config.data`: Paste the contents of the base64-encoded compute Ignition config file for this machine type. -**** `guestinfo.ignition.config.data.encoding`: Specify `base64`. -**** `disk.EnableUUID`: Specify `TRUE`. -.. In the *Virtual Hardware* panel of the *Customize hardware* tab, modify the specified values as required. Ensure that the amount of RAM, CPU, and disk storage meets the minimum requirements for the machine type. Also, make sure to select the correct network under *Add network adapter* if there are multiple networks available. -.. Complete the configuration and power on the VM. - -. Continue to create more compute machines for your cluster. - -ifeval::["{context}" == "installing-vsphere"] -:!three-node-cluster: -endif::[] diff --git a/modules/machineconfig-modify-journald.adoc b/modules/machineconfig-modify-journald.adoc deleted file mode 100644 index 4c0612576329..000000000000 --- a/modules/machineconfig-modify-journald.adoc +++ /dev/null @@ -1,92 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/post_installation_configuration/machine-configuration-tasks.adoc -// * post_installation_configuration/machine-configuration-tasks.adoc - -:_content-type: PROCEDURE -[id="machineconfig-modify-journald_{context}"] -= Configuring journald settings - -If you need to configure settings for the `journald` service on {product-title} nodes, you can do that by modifying the appropriate configuration file and passing the file to the appropriate pool of nodes as a machine config. - -This procedure describes how to modify `journald` rate limiting settings in the `/etc/systemd/journald.conf` file and apply them to worker nodes. See the `journald.conf` man page for information on how to use that file. - -.Prerequisites -* Have a running {product-title} cluster. -* Log in to the cluster as a user with administrative privileges. - -.Procedure - -. Create a Butane config file, `40-worker-custom-journald.bu`, that includes an `/etc/systemd/journald.conf` file with the required settings. -+ -[NOTE] -==== -See "Creating machine configs with Butane" for information about Butane. -==== -+ -[source,yaml] ----- -variant: openshift -version: 4.13.0 -metadata: - name: 40-worker-custom-journald - labels: - machineconfiguration.openshift.io/role: worker -storage: - files: - - path: /etc/systemd/journald.conf - mode: 0644 - overwrite: true - contents: - inline: | - # Disable rate limiting - RateLimitInterval=1s - RateLimitBurst=10000 - Storage=volatile - Compress=no - MaxRetentionSec=30s ----- - -. Use Butane to generate a `MachineConfig` object file, `40-worker-custom-journald.yaml`, containing the configuration to be delivered to the worker nodes: -+ -[source,terminal] ----- -$ butane 40-worker-custom-journald.bu -o 40-worker-custom-journald.yaml ----- - -. Apply the machine config to the pool: -+ -[source,terminal] ----- -$ oc apply -f 40-worker-custom-journald.yaml ----- - -. Check that the new machine config is applied and that the nodes are not in a degraded state. It might take a few minutes. The worker pool will show the updates in progress, as each node successfully has the new machine config applied: -+ -[source,terminal] ----- -$ oc get machineconfigpool -NAME CONFIG UPDATED UPDATING DEGRADED MACHINECOUNT READYMACHINECOUNT UPDATEDMACHINECOUNT DEGRADEDMACHINECOUNT AGE -master rendered-master-35 True False False 3 3 3 0 34m -worker rendered-worker-d8 False True False 3 1 1 0 34m ----- - -. To check that the change was applied, you can log in to a worker node: -+ -[source,terminal] ----- -$ oc get node | grep worker -ip-10-0-0-1.us-east-2.compute.internal Ready worker 39m v0.0.0-master+$Format:%h$ -$ oc debug node/ip-10-0-0-1.us-east-2.compute.internal -Starting pod/ip-10-0-141-142us-east-2computeinternal-debug ... -... -sh-4.2# chroot /host -sh-4.4# cat /etc/systemd/journald.conf -# Disable rate limiting -RateLimitInterval=1s -RateLimitBurst=10000 -Storage=volatile -Compress=no -MaxRetentionSec=30s -sh-4.4# exit ----- diff --git a/modules/machines-edge-machine-pool.adoc b/modules/machines-edge-machine-pool.adoc deleted file mode 100644 index 5054d3529c72..000000000000 --- a/modules/machines-edge-machine-pool.adoc +++ /dev/null @@ -1,105 +0,0 @@ -// Module included in the following assemblies -// * installing/installing_aws/installing-aws-localzone.adoc - -:_content-type: CONCEPT -[id="machines-edge-machine-pool_{context}"] -= The edge compute pool for AWS Local Zones - -{product-title} 4.12 introduced a new compute pool, _edge_, that is designed for use in remote zones. The edge compute pool configuration is common between AWS Local Zone locations. However, due to the type and size limitations of resources like EC2 and EBS on Local Zone resources, the default instance type that is created can vary from the traditional worker pool. - -The default Elastic Block Store (EBS) for Local Zone locations is `gp2`, which differs from the regular worker pool. The instance type used for each Local Zone on edge compute pool also might differ from worker pools, depending on the instance offerings on the zone. - -The edge compute pool creates new labels that developers can use to deploy applications onto AWS Local Zone nodes. The new labels are: - -* `node-role.kubernetes.io/edge=''` -* `machine.openshift.io/zone-type=local-zone` -* `machine.openshift.io/zone-group=$ZONE_GROUP_NAME` - - -By default, the system creates the edge compute pool manifests only if users add AWS Local Zone subnet IDs to the list `platform.aws.subnets`. - -The edge compute pool's machine sets have a `NoSchedule taint` by default to prevent regular workloads from being spread out on those machines. Users can only run user workloads if the tolerations are defined on the pod spec. - -The following examples show `install-config.yaml` files that use the edge machine pool. - -.Configuration that uses an edge pool with default settings -[source,yaml] ----- -apiVersion: v1 -baseDomain: devcluster.openshift.com -metadata: - name: ipi-localzone -platform: - aws: - region: us-west-2 - subnets: - - publicSubnetId-1 - - publicSubnetId-2 - - publicSubnetId-3 - - privateSubnetId-1 - - privateSubnetId-2 - - privateSubnetId-3 - - publicSubnetId-LocalZone-1 - pullSecret: '{"auths": ...}' -sshKey: ssh-ed25519 AAAA... ----- - -.Configuration that uses an edge pool with a custom instance type -[source,yaml] ----- -apiVersion: v1 -baseDomain: devcluster.openshift.com -metadata: - name: ipi-localzone -compute: -- name: edge - platform: - aws: - type: m5.4xlarge -platform: - aws: - region: us-west-2 - subnets: - - publicSubnetId-1 - - publicSubnetId-2 - - publicSubnetId-3 - - privateSubnetId-1 - - privateSubnetId-2 - - privateSubnetId-3 - - publicSubnetId-LocalZone-1 -pullSecret: '{"auths": ...}' -sshKey: ssh-ed25519 AAAA... ----- - -Instance types differ between locations. To verify availability in the Local Zone in which the cluster will run, see the AWS documentation. - -.Configuration that uses an edge pool with a custom EBS type -[source,yaml] ----- -apiVersion: v1 -baseDomain: devcluster.openshift.com -metadata: - name: ipi-localzone -compute: -- name: edge - platform: - aws: - rootVolume: - type: gp3 - size: 120 -platform: - aws: - region: us-west-2 - subnets: - - publicSubnetId-1 - - publicSubnetId-2 - - publicSubnetId-3 - - privateSubnetId-1 - - privateSubnetId-2 - - privateSubnetId-3 - - publicSubnetId-LocalZone-1 -pullSecret: '{"auths": ...}' -sshKey: ssh-ed25519 AAAA... ----- - -EBS types differ between locations. Check the AWS documentation to verify availability in the Local Zone in which the cluster will run. \ No newline at end of file diff --git a/modules/machineset-azure-accelerated-networking.adoc b/modules/machineset-azure-accelerated-networking.adoc deleted file mode 100644 index b03aff823e33..000000000000 --- a/modules/machineset-azure-accelerated-networking.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/creating_machinesets/creating-machineset-azure.adoc -// * machine_management/control_plane_machine_management/cpmso-using.adoc - -ifeval::["{context}" == "creating-machineset-azure"] -:compute: -endif::[] -ifeval::["{context}" == "cpmso-using"] -:cpmso: -endif::[] - -[id="machineset-azure-accelerated-networking_{context}"] -= Accelerated Networking for Microsoft Azure VMs - -Accelerated Networking uses single root I/O virtualization (SR-IOV) to provide Microsoft Azure VMs with a more direct path to the switch. This enhances network performance. This feature can be enabled -ifdef::compute[during or ] -after installation. - -[id="machineset-azure-accelerated-networking-limits_{context}"] -== Limitations - -Consider the following limitations when deciding whether to use Accelerated Networking: - -* Accelerated Networking is only supported on clusters where the Machine API is operational. - -* {empty} -+ -ifdef::compute[Although the minimum requirement for an Azure worker node is two vCPUs, ] -Accelerated Networking requires an Azure VM size that includes at least four vCPUs. To satisfy this requirement, you can change the value of `vmSize` in your machine set. For information about Azure VM sizes, see link:https://docs.microsoft.com/en-us/azure/virtual-machines/sizes[Microsoft Azure documentation]. - -//iiuc, this is not true for control planes since the operator will roll out changes according to the update strategy -ifdef::compute[] -* When this feature is enabled on an existing Azure cluster, only newly provisioned nodes are affected. Currently running nodes are not reconciled. To enable the feature on all nodes, you must replace each existing machine. This can be done for each machine individually, or by scaling the replicas down to zero, and then scaling back up to your desired number of replicas. -endif::compute[] - -ifeval::["{context}" == "creating-machineset-azure"] -:!compute: -endif::[] -ifeval::["{context}" == "cpmso-using"] -:!cpmso: -endif::[] \ No newline at end of file diff --git a/modules/machineset-azure-boot-diagnostics.adoc b/modules/machineset-azure-boot-diagnostics.adoc deleted file mode 100644 index 32dc230dac8a..000000000000 --- a/modules/machineset-azure-boot-diagnostics.adoc +++ /dev/null @@ -1,65 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/creating_machinesets/creating-machineset-azure.adoc -// * machine_management/creating_machinesets/creating-machineset-azure-stack-hub.adoc -// * machine_management/control_plane_machine_management/cpmso-using.adoc - -ifeval::["{context}" == "creating-machineset-azure-stack-hub"] -:ash: -endif::[] - -:_content-type: PROCEDURE -[id="machineset-azure-boot-diagnostics_{context}"] -= Enabling Azure boot diagnostics - -You can enable boot diagnostics on Azure machines that your machine set creates. - -.Prerequisites - -* Have an existing Microsoft Azure -ifdef::ash[Stack Hub] -cluster. - -.Procedure - -* Add the `diagnostics` configuration that is applicable to your storage type to the `providerSpec` field in your machine set YAML file: - -** For an Azure Managed storage account: -+ -[source,yaml] ----- -providerSpec: - diagnostics: - boot: - storageAccountType: AzureManaged <1> ----- -+ -<1> Specifies an Azure Managed storage account. - -** For an Azure Unmanaged storage account: -+ -[source,yaml] ----- -providerSpec: - diagnostics: - boot: - storageAccountType: CustomerManaged <1> - customerManaged: - storageAccountURI: https://<storage-account>.blob.core.windows.net <2> ----- -+ -<1> Specifies an Azure Unmanaged storage account. -<2> Replace `<storage-account>` with the name of your storage account. -+ -[NOTE] -==== -Only the Azure Blob Storage data service is supported. -==== - -.Verification - -* On the Microsoft Azure portal, review the *Boot diagnostics* page for a machine deployed by the machine set, and verify that you can see the serial logs for the machine. - -ifeval::["{context}" == "creating-machineset-azure-stack-hub"] -:!ash: -endif::[] \ No newline at end of file diff --git a/modules/machineset-azure-enabling-accelerated-networking-existing.adoc b/modules/machineset-azure-enabling-accelerated-networking-existing.adoc deleted file mode 100644 index 2778984e7e23..000000000000 --- a/modules/machineset-azure-enabling-accelerated-networking-existing.adoc +++ /dev/null @@ -1,83 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/creating_machinesets/creating-machineset-azure.adoc -// * machine_management/control_plane_machine_management/cpmso-using.adoc - -ifeval::["{context}" == "creating-machineset-azure"] -:compute: -endif::[] -ifeval::["{context}" == "cpmso-using"] -:cpmso: -endif::[] - -:_content-type: PROCEDURE -[id="machineset-azure-enabling-accelerated-networking-existing_{context}"] -= Enabling Accelerated Networking on an existing Microsoft Azure cluster - -You can enable Accelerated Networking on Azure by adding `acceleratedNetworking` to your machine set YAML file. - -.Prerequisites - -* Have an existing Microsoft Azure cluster where the Machine API is operational. - -.Procedure -//// -//Trying to move towards a more streamlined approach, but leaving this in in case needed -. List the compute machine sets in your cluster by running the following command: -+ -[source,terminal] ----- -$ oc get machinesets -n openshift-machine-api ----- -+ -The compute machine sets are listed in the form of `<cluster-id>-worker-<region>`. -+ -.Example output -[source,terminal] ----- -NAME DESIRED CURRENT READY AVAILABLE AGE -jmywbfb-8zqpx-worker-centralus1 1 1 1 1 15m -jmywbfb-8zqpx-worker-centralus2 1 1 1 1 15m -jmywbfb-8zqpx-worker-centralus3 1 1 1 1 15m ----- - -. For each compute machine set: - -.. Edit the custom resource (CR) by running the following command: -+ -[source,terminal] ----- -$ oc edit machineset <machine-set-name> ----- - -.. Add the following to the `providerSpec` field: -//// -* Add the following to the `providerSpec` field: -+ -[source,yaml] ----- -providerSpec: - value: - acceleratedNetworking: true <1> - vmSize: <azure-vm-size> <2> ----- -+ -<1> This line enables Accelerated Networking. -<2> Specify an Azure VM size that includes at least four vCPUs. For information about VM sizes, see link:https://docs.microsoft.com/en-us/azure/virtual-machines/sizes[Microsoft Azure documentation]. - -ifdef::compute[] -.Next steps - -* To enable the feature on currently running nodes, you must replace each existing machine. This can be done for each machine individually, or by scaling the replicas down to zero, and then scaling back up to your desired number of replicas. -endif::compute[] - -.Verification - -* On the Microsoft Azure portal, review the *Networking* settings page for a machine provisioned by the machine set, and verify that the `Accelerated networking` field is set to `Enabled`. - -ifeval::["{context}" == "creating-machineset-azure"] -:!compute: -endif::[] -ifeval::["{context}" == "cpmso-using"] -:!cpmso: -endif::[] \ No newline at end of file diff --git a/modules/machineset-azure-ephemeral-os.adoc b/modules/machineset-azure-ephemeral-os.adoc deleted file mode 100644 index cd3f1d64f2c9..000000000000 --- a/modules/machineset-azure-ephemeral-os.adoc +++ /dev/null @@ -1,13 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/creating_machinesets/creating-machineset-azure.adoc - -[id="machineset-azure-ephemeral-os_{context}"] -= Machine sets that deploy machines on Ephemeral OS disks - -You can create a compute machine set running on Azure that deploys machines on Ephemeral OS disks. Ephemeral OS disks use local VM capacity rather than remote Azure Storage. This configuration therefore incurs no additional cost and provides lower latency for reading, writing, and reimaging. - -[role="_additional-resources"] -.Additional resources - -* For more information, see the Microsoft Azure documentation about link:https://docs.microsoft.com/en-us/azure/virtual-machines/ephemeral-os-disks[Ephemeral OS disks for Azure VMs]. diff --git a/modules/machineset-azure-ultra-disk.adoc b/modules/machineset-azure-ultra-disk.adoc deleted file mode 100644 index 3e3dfb8ca1ad..000000000000 --- a/modules/machineset-azure-ultra-disk.adoc +++ /dev/null @@ -1,52 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/creating_machinesets/creating-machineset-azure.adoc -// * storage/persistent_storage/persistent-storage-azure.adoc -// * storage/persistent_storage/persistent-storage-csi-azure.adoc -// * machine_management/control_plane_machine_management/cpmso-using.adoc - -ifeval::["{context}" == "creating-machineset-azure"] -:mapi: -endif::[] -ifeval::["{context}" == "cpmso-using"] -:cpmso: -endif::[] -ifeval::["{context}" == "persistent-storage-azure"] -:pvc: -endif::[] -ifeval::["{context}" == "persistent-storage-csi-azure"] -:pvc: -endif::[] - -:_content-type: CONCEPT -[id="machineset-azure-ultra-disk_{context}"] -ifdef::mapi,cpmso[= Machine sets that deploy machines with ultra disks as data disks] -ifdef::pvc[= Machine sets that deploy machines with ultra disks using PVCs] - -You can create a machine set running on Azure that deploys machines with ultra disks. Ultra disks are high-performance storage that are intended for use with the most demanding data workloads. - -ifdef::mapi[] -You can also create a persistent volume claim (PVC) that dynamically binds to a storage class backed by Azure ultra disks and mounts them to pods. - -[NOTE] -==== -Data disks do not support the ability to specify disk throughput or disk IOPS. You can configure these properties by using PVCs. -==== -endif::mapi[] - -ifdef::pvc[] -Both the in-tree plugin and CSI driver support using PVCs to enable ultra disks. You can also deploy machines with ultra disks as data disks without creating a PVC. -endif::pvc[] - -ifeval::["{context}" == "creating-machineset-azure"] -:!mapi: -endif::[] -ifeval::["{context}" == "cpmso-using"] -:!cpmso: -endif::[] -ifeval::["{context}" == "persistent-storage-azure"] -:!pvc: -endif::[] -ifeval::["{context}" == "persistent-storage-csi-azure"] -:!pvc: -endif::[] diff --git a/modules/machineset-creating-azure-ephemeral-os.adoc b/modules/machineset-creating-azure-ephemeral-os.adoc deleted file mode 100644 index 8e8ce1361688..000000000000 --- a/modules/machineset-creating-azure-ephemeral-os.adoc +++ /dev/null @@ -1,60 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/creating_machinesets/creating-machineset-azure.adoc - -:_content-type: PROCEDURE -[id="machineset-creating-azure-ephemeral-os_{context}"] -= Creating machines on Ephemeral OS disks by using compute machine sets - -You can launch machines on Ephemeral OS disks on Azure by editing your compute machine set YAML file. - -.Prerequisites - -* Have an existing Microsoft Azure cluster. - -.Procedure - -. Edit the custom resource (CR) by running the following command: -+ -[source,terminal] ----- -$ oc edit machineset <machine-set-name> ----- -+ -where `<machine-set-name>` is the compute machine set that you want to provision machines on Ephemeral OS disks. - -. Add the following to the `providerSpec` field: -+ -[source,yaml] ----- -providerSpec: - value: - ... - osDisk: - ... - diskSettings: <1> - ephemeralStorageLocation: Local <1> - cachingType: ReadOnly <1> - managedDisk: - storageAccountType: Standard_LRS <2> - ... ----- -+ -<1> These lines enable the use of Ephemeral OS disks. -<2> Ephemeral OS disks are only supported for VMs or scale set instances that use the Standard LRS storage account type. -+ -[IMPORTANT] -==== -The implementation of Ephemeral OS disk support in {product-title} only supports the `CacheDisk` placement type. Do not change the `placement` configuration setting. -==== - -. Create a compute machine set using the updated configuration: -+ -[source,terminal] ----- -$ oc create -f <machine-set-config>.yaml ----- - -.Verification - -* On the Microsoft Azure portal, review the *Overview* page for a machine deployed by the compute machine set, and verify that the `Ephemeral OS disk` field is set to `OS cache placement`. diff --git a/modules/machineset-creating-azure-ultra-disk.adoc b/modules/machineset-creating-azure-ultra-disk.adoc deleted file mode 100644 index da0527115f74..000000000000 --- a/modules/machineset-creating-azure-ultra-disk.adoc +++ /dev/null @@ -1,354 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/creating_machinesets/creating-machineset-azure.adoc -// * storage/persistent_storage/persistent-storage-azure.adoc -// * storage/persistent_storage/persistent-storage-csi-azure.adoc -// * machine_management/control_plane_machine_management/cpmso-using.adoc - -ifeval::["{context}" == "creating-machineset-azure"] -:mapi: -endif::[] -ifeval::["{context}" == "cpmso-using"] -:cpmso: -endif::[] -ifeval::["{context}" == "persistent-storage-azure"] -:pvc: -endif::[] -ifeval::["{context}" == "persistent-storage-csi-azure"] -:pvc: -endif::[] - -ifdef::mapi[:machine-role: worker] -ifdef::cpmso[:machine-role: master] - -:_content-type: PROCEDURE -[id="machineset-creating-azure-ultra-disk_{context}"] -= Creating machines with ultra disks by using machine sets - -You can deploy machines with ultra disks on Azure by editing your machine set YAML file. - -.Prerequisites - -* Have an existing Microsoft Azure cluster. - -.Procedure - -ifdef::mapi,cpmso[] -. Create a custom secret in the `openshift-machine-api` namespace using the `{machine-role}` data secret by running the following command: -+ -[source,terminal] ----- -$ oc -n openshift-machine-api \ -get secret <role>-user-data \ <1> ---template='{{index .data.userData | base64decode}}' | jq > userData.txt <2> ----- -<1> Replace `<role>` with `{machine-role}`. -<2> Specify `userData.txt` as the name of the new custom secret. - -. In a text editor, open the `userData.txt` file and locate the final `}` character in the file. - -.. On the immediately preceding line, add a `,`. - -.. Create a new line after the `,` and add the following configuration details: -+ -[source,json] ----- -"storage": { - "disks": [ <1> - { - "device": "/dev/disk/azure/scsi1/lun0", <2> - "partitions": [ <3> - { - "label": "lun0p1", <4> - "sizeMiB": 1024, <5> - "startMiB": 0 - } - ] - } - ], - "filesystems": [ <6> - { - "device": "/dev/disk/by-partlabel/lun0p1", - "format": "xfs", - "path": "/var/lib/lun0p1" - } - ] -}, -"systemd": { - "units": [ <7> - { - "contents": "[Unit]\nBefore=local-fs.target\n[Mount]\nWhere=/var/lib/lun0p1\nWhat=/dev/disk/by-partlabel/lun0p1\nOptions=defaults,pquota\n[Install]\nWantedBy=local-fs.target\n", <8> - "enabled": true, - "name": "var-lib-lun0p1.mount" - } - ] -} ----- -<1> The configuration details for the disk that you want to attach to a node as an ultra disk. -<2> Specify the `lun` value that is defined in the `dataDisks` stanza of the machine set you are using. For example, if the machine set contains `lun: 0`, specify `lun0`. You can initialize multiple data disks by specifying multiple `"disks"` entries in this configuration file. If you specify multiple `"disks"` entries, ensure that the `lun` value for each matches the value in the machine set. -<3> The configuration details for a new partition on the disk. -<4> Specify a label for the partition. You might find it helpful to use hierarchical names, such as `lun0p1` for the first partition of `lun0`. -<5> Specify the total size in MiB of the partition. -<6> Specify the filesystem to use when formatting a partition. Use the partition label to specify the partition. -<7> Specify a `systemd` unit to mount the partition at boot. Use the partition label to specify the partition. You can create multiple partitions by specifying multiple `"partitions"` entries in this configuration file. If you specify multiple `"partitions"` entries, you must specify a `systemd` unit for each. -<8> For `Where`, specify the value of `storage.filesystems.path`. For `What`, specify the value of `storage.filesystems.device`. - -. Extract the disabling template value to a file called `disableTemplating.txt` by running the following command: -+ -[source,terminal] ----- -$ oc -n openshift-machine-api get secret <role>-user-data \ <1> ---template='{{index .data.disableTemplating | base64decode}}' | jq > disableTemplating.txt ----- -<1> Replace `<role>` with `{machine-role}`. - -. Combine the `userData.txt` file and `disableTemplating.txt` file to create a data secret file by running the following command: -+ -[source,terminal] ----- -$ oc -n openshift-machine-api create secret generic <role>-user-data-x5 \ <1> ---from-file=userData=userData.txt \ ---from-file=disableTemplating=disableTemplating.txt ----- -<1> For `<role>-user-data-x5`, specify the name of the secret. Replace `<role>` with `{machine-role}`. -endif::mapi,cpmso[] - -ifndef::cpmso[] -. Copy an existing Azure `MachineSet` custom resource (CR) and edit it by running the following command: -+ -[source,terminal] ----- -$ oc edit machineset <machine-set-name> ----- -+ -where `<machine-set-name>` is the machine set that you want to provision machines with ultra disks. - -. Add the following lines in the positions indicated: -+ -[source,yaml] ----- -apiVersion: machine.openshift.io/v1beta1 -kind: MachineSet -spec: - template: - spec: - metadata: - labels: - disk: ultrassd <1> - providerSpec: - value: - ultraSSDCapability: Enabled <2> -ifdef::mapi[] - dataDisks: <2> - - nameSuffix: ultrassd - lun: 0 - diskSizeGB: 4 - deletionPolicy: Delete - cachingType: None - managedDisk: - storageAccountType: UltraSSD_LRS - userDataSecret: - name: <role>-user-data-x5 <3> -endif::mapi[] ----- -<1> Specify a label to use to select a node that is created by this machine set. This procedure uses `disk.ultrassd` for this value. -<2> These lines enable the use of ultra disks. -ifdef::mapi[] -For `dataDisks`, include the entire stanza. -<3> Specify the user data secret created earlier. Replace `<role>` with `{machine-role}`. -endif::mapi[] - -. Create a machine set using the updated configuration by running the following command: -+ -[source,terminal] ----- -$ oc create -f <machine-set-name>.yaml ----- -endif::cpmso[] - -ifdef::cpmso[] -. Edit your control plane machine set CR by running the following command: -+ -[source,terminal] ----- -$ oc --namespace openshift-machine-api edit controlplanemachineset.machine.openshift.io cluster ----- - -. Add the following lines in the positions indicated: -+ -[source,yaml] ----- -apiVersion: machine.openshift.io/v1beta1 -kind: ControlPlaneMachineSet -spec: - template: - spec: - metadata: - labels: - disk: ultrassd <1> - providerSpec: - value: - ultraSSDCapability: Enabled <2> - dataDisks: <2> - - nameSuffix: ultrassd - lun: 0 - diskSizeGB: 4 - deletionPolicy: Delete - cachingType: None - managedDisk: - storageAccountType: UltraSSD_LRS - userDataSecret: - name: <role>-user-data-x5 <3> ----- -<1> Specify a label to use to select a node that is created by this machine set. This procedure uses `disk.ultrassd` for this value. -<2> These lines enable the use of ultra disks. For `dataDisks`, include the entire stanza. -<3> Specify the user data secret created earlier. Replace `<role>` with `{machine-role}`. - -. Save your changes. - -** For clusters that use the default `RollingUpdate` update strategy, the Operator automatically propagates the changes to your control plane configuration. - -** For clusters that are configured to use the `OnDelete` update strategy, you must replace your control plane machines manually. -endif::cpmso[] - -ifdef::pvc[] -. Create a storage class that contains the following YAML definition: -+ -[source,yaml] ----- -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: ultra-disk-sc <1> -parameters: - cachingMode: None - diskIopsReadWrite: "2000" <2> - diskMbpsReadWrite: "320" <3> - kind: managed - skuname: UltraSSD_LRS -provisioner: disk.csi.azure.com <4> -reclaimPolicy: Delete -volumeBindingMode: WaitForFirstConsumer <5> ----- -<1> Specify the name of the storage class. This procedure uses `ultra-disk-sc` for this value. -<2> Specify the number of IOPS for the storage class. -<3> Specify the throughput in MBps for the storage class. -<4> For Azure Kubernetes Service (AKS) version 1.21 or later, use `disk.csi.azure.com`. For earlier versions of AKS, use `kubernetes.io/azure-disk`. -<5> Optional: Specify this parameter to wait for the creation of the pod that will use the disk. - -. Create a persistent volume claim (PVC) to reference the `ultra-disk-sc` storage class that contains the following YAML definition: -+ -[source,yaml] ----- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: ultra-disk <1> -spec: - accessModes: - - ReadWriteOnce - storageClassName: ultra-disk-sc <2> - resources: - requests: - storage: 4Gi <3> ----- -<1> Specify the name of the PVC. This procedure uses `ultra-disk` for this value. -<2> This PVC references the `ultra-disk-sc` storage class. -<3> Specify the size for the storage class. The minimum value is `4Gi`. - -. Create a pod that contains the following YAML definition: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: nginx-ultra -spec: - nodeSelector: - disk: ultrassd <1> - containers: - - name: nginx-ultra - image: alpine:latest - command: - - "sleep" - - "infinity" - volumeMounts: - - mountPath: "/mnt/azure" - name: volume - volumes: - - name: volume - persistentVolumeClaim: - claimName: ultra-disk <2> ----- -<1> Specify the label of the machine set that enables the use of ultra disks. This procedure uses `disk.ultrassd` for this value. -<2> This pod references the `ultra-disk` PVC. -endif::pvc[] - -.Verification - -. Validate that the machines are created by running the following command: -+ -[source,terminal] ----- -$ oc get machines ----- -+ -The machines should be in the `Running` state. - -. For a machine that is running and has a node attached, validate the partition by running the following command: -+ -[source,terminal] ----- -$ oc debug node/<node-name> -- chroot /host lsblk ----- -+ -In this command, `oc debug node/<node-name>` starts a debugging shell on the node `<node-name>` and passes a command with `--`. The passed command `chroot /host` provides access to the underlying host OS binaries, and `lsblk` shows the block devices that are attached to the host OS machine. - -.Next steps - -ifndef::cpmso[] -* To use an ultra disk from within a pod, create a workload that uses the mount point. Create a YAML file similar to the following example: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: ssd-benchmark1 -spec: - containers: - - name: ssd-benchmark1 - image: nginx - ports: - - containerPort: 80 - name: "http-server" - volumeMounts: - - name: lun0p1 - mountPath: "/tmp" - volumes: - - name: lun0p1 - hostPath: - path: /var/lib/lun0p1 - type: DirectoryOrCreate - nodeSelector: - disktype: ultrassd ----- -endif::cpmso[] - -ifdef::cpmso[] -* To use an ultra disk on the control plane, reconfigure your workload to use the control plane's ultra disk mount point. -endif::cpmso[] - -ifeval::["{context}" == "creating-machineset-azure"] -:!mapi: -endif::[] -ifeval::["{context}" == "cpmso-using"] -:!cpmso: -endif::[] -ifeval::["{context}" == "persistent-storage-azure"] -:!pvc: -endif::[] -ifeval::["{context}" == "persistent-storage-csi-azure"] -:!pvc: -endif::[] diff --git a/modules/machineset-creating-dedicated-instances.adoc b/modules/machineset-creating-dedicated-instances.adoc deleted file mode 100644 index 6e3e5906c376..000000000000 --- a/modules/machineset-creating-dedicated-instances.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/creating_machinesets/creating-machineset-aws.adoc -// * machine_management/control_plane_machine_management/cpmso-using.adoc - -:_content-type: PROCEDURE -[id="machineset-creating-dedicated-instance_{context}"] -= Creating Dedicated Instances by using machine sets - -You can run a machine that is backed by a Dedicated Instance by using Machine API integration. Set the `tenancy` field in your machine set YAML file to launch a Dedicated Instance on AWS. - -.Procedure - -* Specify a dedicated tenancy under the `providerSpec` field: -+ -[source,yaml] ----- -providerSpec: - placement: - tenancy: dedicated ----- diff --git a/modules/machineset-creating-imds-options.adoc b/modules/machineset-creating-imds-options.adoc deleted file mode 100644 index 491a6560848a..000000000000 --- a/modules/machineset-creating-imds-options.adoc +++ /dev/null @@ -1,22 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/creating_machinesets/creating-machineset-aws.adoc -// * machine_management/control_plane_machine_management/cpmso-using.adoc - -:_content-type: PROCEDURE -[id="machineset-creating-imds-options_{context}"] -= Configuring IMDS by using machine sets - -You can specify whether to require the use of IMDSv2 by adding or editing the value of `metadataServiceOptions.authentication` in the machine set YAML file for your machines. - -.Procedure -* Add or edit the following lines under the `providerSpec` field: -+ -[source,yaml] ----- -providerSpec: - value: - metadataServiceOptions: - authentication: Required <1> ----- -<1> To require IMDSv2, set the parameter value to `Required`. To allow the use of both IMDSv1 and IMDSv2, set the parameter value to `Optional`. If no value is specified, both IMDSv1 and IMDSv2 are allowed. diff --git a/modules/machineset-creating-non-guaranteed-instances.adoc b/modules/machineset-creating-non-guaranteed-instances.adoc deleted file mode 100644 index 3e8c675a16ed..000000000000 --- a/modules/machineset-creating-non-guaranteed-instances.adoc +++ /dev/null @@ -1,84 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/creating_machinesets/creating-machineset-aws.adoc -// * machine_management/creating_machinesets/creating-machineset-gcp.adoc -// * machine_management/creating_machinesets/creating-machineset-azure.adoc - -ifeval::["{context}" == "creating-machineset-aws"] -:aws: -endif::[] -ifeval::["{context}" == "creating-machineset-azure"] -:azure: -endif::[] -ifeval::["{context}" == "creating-machineset-gcp"] -:gcp: -endif::[] - -:_content-type: PROCEDURE -[id="machineset-creating-non-guaranteed-instance_{context}"] -ifdef::aws[= Creating Spot Instances by using compute machine sets] -ifdef::azure[= Creating Spot VMs by using compute machine sets] -ifdef::gcp[= Creating preemptible VM instances by using compute machine sets] - -ifdef::aws[You can launch a Spot Instance on AWS by adding `spotMarketOptions` to your compute machine set YAML file.] -ifdef::azure[You can launch a Spot VM on Azure by adding `spotVMOptions` to your compute machine set YAML file.] -ifdef::gcp[You can launch a preemptible VM instance on GCP by adding `preemptible` to your compute machine set YAML file.] - -.Procedure -* Add the following line under the `providerSpec` field: -+ -ifdef::aws[] -[source,yaml] ----- -providerSpec: - value: - spotMarketOptions: {} ----- -+ -You can optionally set the `spotMarketOptions.maxPrice` field to limit the cost of the Spot Instance. For example you can set `maxPrice: '2.50'`. -+ -If the `maxPrice` is set, this value is used as the hourly maximum spot price. If it is not set, the maximum price defaults to charge up to the On-Demand Instance price. -+ -[NOTE] -==== -It is strongly recommended to use the default On-Demand price as the `maxPrice` value and to not set the maximum price for Spot Instances. -==== -endif::aws[] -ifdef::azure[] -[source,yaml] ----- -providerSpec: - value: - spotVMOptions: {} ----- -+ -You can optionally set the `spotVMOptions.maxPrice` field to limit the cost of the Spot VM. For example you can set `maxPrice: '0.98765'`. If the `maxPrice` is set, this value is used as the hourly maximum spot price. If it is not set, the maximum price defaults to `-1` and charges up to the standard VM price. -+ -Azure caps Spot VM prices at the standard price. Azure will not evict an instance due to pricing if the instance is set with the default `maxPrice`. However, an instance can still be evicted due to capacity restrictions. - -[NOTE] -==== -It is strongly recommended to use the default standard VM price as the `maxPrice` value and to not set the maximum price for Spot VMs. -==== -endif::azure[] -ifdef::gcp[] -[source,yaml] ----- -providerSpec: - value: - preemptible: true ----- -+ -If `preemptible` is set to `true`, the machine is labelled as an `interruptable-instance` after the instance is launched. - -endif::gcp[] - -ifeval::["{context}" == "creating-machineset-aws"] -:!aws: -endif::[] -ifeval::["{context}" == "creating-machineset-azure"] -:!azure: -endif::[] -ifeval::["{context}" == "creating-machineset-gcp"] -:!gcp: -endif::[] diff --git a/modules/machineset-creating.adoc b/modules/machineset-creating.adoc deleted file mode 100644 index ce229cf44627..000000000000 --- a/modules/machineset-creating.adoc +++ /dev/null @@ -1,239 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/creating-infrastructure-machinesets.adoc -// * machine_management/creating_machinesets/creating-machineset-aws.adoc -// * machine_management/creating_machinesets/creating-machineset-azure.adoc -// * machine_management/creating_machinesets/creating-machineset-azure-stack-hub.adoc -// * machine_management/creating_machinesets/creating-machineset-gcp.adoc -// * machine_management/creating_machinesets/creating-machineset-osp.adoc -// * machine_management/creating_machinesets/creating-machineset-vsphere.adoc -// * post_installation_configuration/cluster-tasks.adoc -// * windows_containers/creating_windows_machinesets/creating-windows-machineset-aws.adoc -// * windows_containers/creating_windows_machinesets/creating-windows-machineset-azure.adoc -// * windows_containers/creating_windows_machinesets/creating-windows-machineset-vsphere.adoc -// * windows_containers/creating_windows_machinesets/creating-windows-machineset-gcp.adoc - -ifeval::["{context}" == "creating-windows-machineset-aws"] -:win: -endif::[] -ifeval::["{context}" == "creating-windows-machineset-azure"] -:win: -endif::[] -ifeval::["{context}" == "creating-machineset-azure-stack-hub"] -:ash: -endif::[] -ifeval::["{context}" == "creating-windows-machineset-vsphere"] -:win: -endif::[] -ifeval::["{context}" == "creating-machineset-vsphere"] -:vsphere: -endif::[] - -:_content-type: PROCEDURE -[id="machineset-creating_{context}"] -= Creating a compute machine set - -In addition to the compute machine sets created by the installation program, you can create your own to dynamically manage the machine compute resources for specific workloads of your choice. - -ifdef::vsphere[] -[NOTE] -==== -Clusters that are installed with user-provisioned infrastructure have a different networking stack than clusters with infrastructure that is provisioned by the installation program. As a result of this difference, automatic load balancer management is unsupported on clusters that have user-provisioned infrastructure. For these clusters, a compute machine set can only create `worker` and `infra` type machines. -==== -endif::vsphere[] - -.Prerequisites - -* Deploy an {product-title} cluster. -* Install the OpenShift CLI (`oc`). -* Log in to `oc` as a user with `cluster-admin` permission. -ifdef::vsphere[] -* Have the necessary permissions to deploy VMs in your vCenter instance and have the required access to the datastore specified. -* If your cluster uses user-provisioned infrastructure, you have satisfied the specific Machine API requirements for that configuration. -endif::vsphere[] -ifdef::ash[] -* Create an availability set in which to deploy Azure Stack Hub compute machines. -endif::ash[] - -.Procedure - -. Create a new YAML file that contains the compute machine set custom resource (CR) sample and is named `<file_name>.yaml`. -+ -ifndef::ash[] -Ensure that you set the `<clusterID>` and `<role>` parameter values. -endif::ash[] -ifdef::ash[] -Ensure that you set the `<availabilitySet>`, `<clusterID>`, and `<role>` parameter values. -endif::ash[] - -. Optional: If you are not sure which value to set for a specific field, you can check an existing compute machine set from your cluster. - -.. To list the compute machine sets in your cluster, run the following command: -+ -[source,terminal] ----- -$ oc get machinesets -n openshift-machine-api ----- -+ -.Example output -[source,terminal] ----- -NAME DESIRED CURRENT READY AVAILABLE AGE -agl030519-vplxk-worker-us-east-1a 1 1 1 1 55m -agl030519-vplxk-worker-us-east-1b 1 1 1 1 55m -agl030519-vplxk-worker-us-east-1c 1 1 1 1 55m -agl030519-vplxk-worker-us-east-1d 0 0 55m -agl030519-vplxk-worker-us-east-1e 0 0 55m -agl030519-vplxk-worker-us-east-1f 0 0 55m ----- - -.. To view values of a specific compute machine set custom resource (CR), run the following command: -+ -[source,terminal] ----- -$ oc get machineset <machineset_name> \ - -n openshift-machine-api -o yaml ----- -+ --- -.Example output -[source,yaml] ----- -apiVersion: machine.openshift.io/v1beta1 -kind: MachineSet -metadata: - labels: - machine.openshift.io/cluster-api-cluster: <infrastructure_id> <1> - name: <infrastructure_id>-<role> <2> - namespace: openshift-machine-api -spec: - replicas: 1 - selector: - matchLabels: - machine.openshift.io/cluster-api-cluster: <infrastructure_id> - machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> - template: - metadata: - labels: - machine.openshift.io/cluster-api-cluster: <infrastructure_id> - machine.openshift.io/cluster-api-machine-role: <role> - machine.openshift.io/cluster-api-machine-type: <role> - machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> - spec: - providerSpec: <3> - ... ----- -<1> The cluster infrastructure ID. -<2> A default node label. -+ -[NOTE] -==== -For clusters that have user-provisioned infrastructure, a compute machine set can only create `worker` and `infra` type machines. -==== -<3> The values in the `<providerSpec>` section of the compute machine set CR are platform-specific. For more information about `<providerSpec>` parameters in the CR, see the sample compute machine set CR configuration for your provider. --- - -ifdef::vsphere[] -.. If you are creating a compute machine set for a cluster that has user-provisioned infrastructure, note the following important values: -+ -.Example vSphere `providerSpec` values -[source,yaml] ----- -apiVersion: machine.openshift.io/v1beta1 -kind: MachineSet -... -template: - ... - spec: - providerSpec: - value: - apiVersion: machine.openshift.io/v1beta1 - credentialsSecret: - name: vsphere-cloud-credentials <1> - diskGiB: 120 - kind: VSphereMachineProviderSpec - memoryMiB: 16384 - network: - devices: - - networkName: "<vm_network_name>" - numCPUs: 4 - numCoresPerSocket: 4 - snapshot: "" - template: <vm_template_name> <2> - userDataSecret: - name: worker-user-data <3> - workspace: - datacenter: <vcenter_datacenter_name> - datastore: <vcenter_datastore_name> - folder: <vcenter_vm_folder_path> - resourcepool: <vsphere_resource_pool> - server: <vcenter_server_address> <4> ----- -<1> The name of the secret in the `openshift-machine-api` namespace that contains the required vCenter credentials. -<2> The name of the {op-system} VM template for your cluster that was created during installation. -<3> The name of the secret in the `openshift-machine-api` namespace that contains the required Ignition configuration credentials. -<4> The IP address or fully qualified domain name (FQDN) of the vCenter server. -endif::vsphere[] - -. Create a `MachineSet` CR by running the following command: -+ -[source,terminal] ----- -$ oc create -f <file_name>.yaml ----- - -ifeval::["{context}" == "creating-machineset-aws"] -. If you need compute machine sets in other availability zones, repeat this process to create more compute machine sets. -endif::[] - -.Verification - -* View the list of compute machine sets by running the following command: -+ -[source,terminal] ----- -$ oc get machineset -n openshift-machine-api ----- -+ -.Example output -[source,terminal] ----- -ifdef::win[] -NAME DESIRED CURRENT READY AVAILABLE AGE -agl030519-vplxk-windows-worker-us-east-1a 1 1 1 1 11m -agl030519-vplxk-worker-us-east-1a 1 1 1 1 55m -agl030519-vplxk-worker-us-east-1b 1 1 1 1 55m -agl030519-vplxk-worker-us-east-1c 1 1 1 1 55m -agl030519-vplxk-worker-us-east-1d 0 0 55m -agl030519-vplxk-worker-us-east-1e 0 0 55m -agl030519-vplxk-worker-us-east-1f 0 0 55m -endif::win[] -ifndef::win[] -NAME DESIRED CURRENT READY AVAILABLE AGE -agl030519-vplxk-infra-us-east-1a 1 1 1 1 11m -agl030519-vplxk-worker-us-east-1a 1 1 1 1 55m -agl030519-vplxk-worker-us-east-1b 1 1 1 1 55m -agl030519-vplxk-worker-us-east-1c 1 1 1 1 55m -agl030519-vplxk-worker-us-east-1d 0 0 55m -agl030519-vplxk-worker-us-east-1e 0 0 55m -agl030519-vplxk-worker-us-east-1f 0 0 55m -endif::win[] ----- -+ -When the new compute machine set is available, the `DESIRED` and `CURRENT` values match. If the compute machine set is not available, wait a few minutes and run the command again. - -ifeval::["{context}" == "creating-machineset-vsphere"] -:!vsphere: -endif::[] -ifeval::["{context}" == "creating-windows-machineset-aws"] -:!win: -endif::[] -ifeval::["{context}" == "creating-machineset-azure-stack-hub"] -:!ash: -endif::[] -ifeval::["{context}" == "creating-windows-machineset-azure"] -:!win: -endif::[] -ifeval::["{context}" == "creating-windows-machineset-vsphere"] -:!win: -endif::[] diff --git a/modules/machineset-customer-managed-encryption-azure.adoc b/modules/machineset-customer-managed-encryption-azure.adoc deleted file mode 100644 index 95b7a3b2f72d..000000000000 --- a/modules/machineset-customer-managed-encryption-azure.adoc +++ /dev/null @@ -1,39 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/creating_machinesets/creating-machineset-gcp.adoc -// * machine_management/creating_machinesets/creating-machineset-azure-stack-hub.adoc -// * machine_management/control_plane_machine_management/cpmso-using.adoc - -:_content-type: PROCEDURE -[id="machineset-enabling-customer-managed-encryption-azure_{context}"] -= Enabling customer-managed encryption keys for a machine set - -You can supply an encryption key to Azure to encrypt data on managed disks at rest. You can enable server-side encryption with customer-managed keys by using the Machine API. - -An Azure Key Vault, a disk encryption set, and an encryption key are required to use a customer-managed key. The disk encryption set must be in a resource group where the Cloud Credential Operator (CCO) has granted permissions. If not, an additional reader role is required to be granted on the disk encryption set. - -.Prerequisites - -* link:https://docs.microsoft.com/en-us/azure/aks/azure-disk-customer-managed-keys#create-an-azure-key-vault-instance[Create an Azure Key Vault instance]. -* link:https://docs.microsoft.com/en-us/azure/aks/azure-disk-customer-managed-keys#create-an-instance-of-a-diskencryptionset[Create an instance of a disk encryption set]. -* link:https://docs.microsoft.com/en-us/azure/aks/azure-disk-customer-managed-keys#grant-the-diskencryptionset-access-to-key-vault[Grant the disk encryption set access to key vault]. - -.Procedure - -* Configure the disk encryption set under the `providerSpec` field in your machine set YAML file. For example: -+ -[source,yaml] ----- -providerSpec: - value: - osDisk: - diskSizeGB: 128 - managedDisk: - diskEncryptionSet: - id: /subscriptions/<subscription_id>/resourceGroups/<resource_group_name>/providers/Microsoft.Compute/diskEncryptionSets/<disk_encryption_set_name> - storageAccountType: Premium_LRS ----- - -[role="_additional-resources"] -.Additional resources -* https://docs.microsoft.com/en-us/azure/virtual-machines/disk-encryption#customer-managed-keys[Azure documentation about customer-managed keys] \ No newline at end of file diff --git a/modules/machineset-dedicated-instances.adoc b/modules/machineset-dedicated-instances.adoc deleted file mode 100644 index 2fc94eee3424..000000000000 --- a/modules/machineset-dedicated-instances.adoc +++ /dev/null @@ -1,11 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/creating_machinesets/creating-machineset-aws.adoc -// * machine_management/control_plane_machine_management/cpmso-using.adoc - -[id="machineset-dedicated-instance_{context}"] -= Machine sets that deploy machines as Dedicated Instances - -You can create a machine set running on AWS that deploys machines as Dedicated Instances. Dedicated Instances run in a virtual private cloud (VPC) on hardware that is dedicated to a single customer. These Amazon EC2 instances are physically isolated at the host hardware level. The isolation of Dedicated Instances occurs even if the instances belong to different AWS accounts that are linked to a single payer account. However, other instances that are not dedicated can share hardware with Dedicated Instances if they belong to the same AWS account. - -Instances with either public or dedicated tenancy are supported by the Machine API. Instances with public tenancy run on shared hardware. Public tenancy is the default tenancy. Instances with dedicated tenancy run on single-tenant hardware. diff --git a/modules/machineset-delete-policy.adoc b/modules/machineset-delete-policy.adoc deleted file mode 100644 index 5c63252cb821..000000000000 --- a/modules/machineset-delete-policy.adoc +++ /dev/null @@ -1,28 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/manually-scaling-machineset.adoc -// * post_installation_configuration/cluster-tasks.adoc - -[id="machineset-delete-policy_{context}"] -= The compute machine set deletion policy - -`Random`, `Newest`, and `Oldest` are the three supported deletion options. The default is `Random`, meaning that random machines are chosen and deleted when scaling compute machine sets down. The deletion policy can be set according to the use case by modifying the particular compute machine set: - -[source,yaml] ----- -spec: - deletePolicy: <delete_policy> - replicas: <desired_replica_count> ----- - -Specific machines can also be prioritized for deletion by adding the annotation `machine.openshift.io/delete-machine=true` to the machine of interest, regardless of the deletion policy. - -[IMPORTANT] -==== -By default, the {product-title} router pods are deployed on workers. Because the router is required to access some cluster resources, including the web console, do not scale the worker compute machine set to `0` unless you first relocate the router pods. -==== - -[NOTE] -==== -Custom compute machine sets can be used for use cases requiring that services run on specific nodes and that those services are ignored by the controller when the worker compute machine sets are scaling down. This prevents service disruption. -==== diff --git a/modules/machineset-gcp-confidential-vm.adoc b/modules/machineset-gcp-confidential-vm.adoc deleted file mode 100644 index 6a3934bb4806..000000000000 --- a/modules/machineset-gcp-confidential-vm.adoc +++ /dev/null @@ -1,58 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/creating_machinesets/creating-machineset-gcp.adoc -// * machine_management/control_plane_machine_management/cpmso-using.adoc - -ifeval::["{context}" == "cpmso-using"] -:cpmso: -endif::[] - -:_content-type: PROCEDURE -[id="machineset-gcp-confidential-vm_{context}"] -= Configuring Confidential VM by using machine sets - -By editing the machine set YAML file, you can configure the Confidential VM options that a machine set uses for machines that it deploys. - -For more information about Confidential Compute features, functionality, and compatibility, see the GCP Compute Engine documentation about link:https://cloud.google.com/compute/confidential-vm/docs/about-cvm[Confidential VM]. - -:FeatureName: Confidential Computing -include::snippets/technology-preview.adoc[] - -.Procedure - -. In a text editor, open the YAML file for an existing machine set or create a new one. - -. Edit the following section under the `providerSpec` field: -+ -[source,yaml] ----- -ifndef::cpmso[] -apiVersion: machine.openshift.io/v1beta1 -kind: MachineSet -endif::cpmso[] -ifdef::cpmso[] -apiVersion: machine.openshift.io/v1 -kind: ControlPlaneMachineSet -endif::cpmso[] -... -spec: - template: - spec: - providerSpec: - value: - confidentialCompute: Enabled <1> - onHostMaintenance: Terminate <2> - machineType: n2d-standard-8 <3> -... ----- -<1> Specify whether Confidential VM is enabled. Valid values are `Disabled` or `Enabled`. -<2> Specify the behavior of the VM during a host maintenance event, such as a hardware or software update. For a machine that uses Confidential VM, this value must be set to `Terminate`, which stops the VM. Confidential VM does not support live VM migration. -<3> Specify a machine type that supports Confidential VM. Confidential VM supports the N2D and C2D series of machine types. - -.Verification - -* On the Google Cloud console, review the details for a machine deployed by the machine set and verify that the Confidential VM options match the values that you configured. - -ifeval::["{context}" == "cpmso-using"] -:!cpmso: -endif::[] \ No newline at end of file diff --git a/modules/machineset-gcp-enabling-customer-managed-encryption.adoc b/modules/machineset-gcp-enabling-customer-managed-encryption.adoc deleted file mode 100644 index 63ae3426096f..000000000000 --- a/modules/machineset-gcp-enabling-customer-managed-encryption.adoc +++ /dev/null @@ -1,74 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/creating_machinesets/creating-machineset-gcp.adoc -// * machine_management/control_plane_machine_management/cpmso-using.adoc - -ifeval::["{context}" == "cpmso-using"] -:cpmso: -endif::[] - -:_content-type: PROCEDURE -[id="machineset-gcp-enabling-customer-managed-encryption_{context}"] -= Enabling customer-managed encryption keys for a machine set - -Google Cloud Platform (GCP) Compute Engine allows users to supply an encryption key to encrypt data on disks at rest. The key is used to encrypt the data encryption key, not to encrypt the customer's data. By default, Compute Engine encrypts this data by using Compute Engine keys. - -You can enable encryption with a customer-managed key in clusters that use the Machine API. You must first link:https://cloud.google.com/compute/docs/disks/customer-managed-encryption#before_you_begin[create a KMS key] and assign the correct permissions to a service account. The KMS key name, key ring name, and location are required to allow a service account to use your key. - -[NOTE] -==== -If you do not want to use a dedicated service account for the KMS encryption, the Compute Engine default service account is used instead. You must grant the default service account permission to access the keys if you do not use a dedicated service account. The Compute Engine default service account name follows the `service-<project_number>@compute-system.iam.gserviceaccount.com` pattern. -==== - -.Procedure - -. To allow a specific service account to use your KMS key and to grant the service account the correct IAM role, run the following command with your KMS key name, key ring name, and location: -+ -[source,terminal] ----- -$ gcloud kms keys add-iam-policy-binding <key_name> \ - --keyring <key_ring_name> \ - --location <key_ring_location> \ - --member "serviceAccount:service-<project_number>@compute-system.iam.gserviceaccount.com” \ - --role roles/cloudkms.cryptoKeyEncrypterDecrypter ----- - -. Configure the encryption key under the `providerSpec` field in your machine set YAML file. For example: -+ -[source,yaml] ----- -ifndef::cpmso[] -apiVersion: machine.openshift.io/v1beta1 -kind: MachineSet -endif::cpmso[] -ifdef::cpmso[] -apiVersion: machine.openshift.io/v1 -kind: ControlPlaneMachineSet -endif::cpmso[] -... -spec: - template: - spec: - providerSpec: - value: - disks: - - type: - encryptionKey: - kmsKey: - name: machine-encryption-key <1> - keyRing: openshift-encrpytion-ring <2> - location: global <3> - projectID: openshift-gcp-project <4> - kmsKeyServiceAccount: openshift-service-account@openshift-gcp-project.iam.gserviceaccount.com <5> ----- -<1> The name of the customer-managed encryption key that is used for the disk encryption. -<2> The name of the KMS key ring that the KMS key belongs to. -<3> The GCP location in which the KMS key ring exists. -<4> Optional: The ID of the project in which the KMS key ring exists. If a project ID is not set, the machine set `projectID` in which the machine set was created is used. -<5> Optional: The service account that is used for the encryption request for the given KMS key. If a service account is not set, the Compute Engine default service account is used. -+ -When a new machine is created by using the updated `providerSpec` object configuration, the disk encryption key is encrypted with the KMS key. - -ifeval::["{context}" == "cpmso-using"] -:!cpmso: -endif::[] \ No newline at end of file diff --git a/modules/machineset-gcp-enabling-gpu-support.adoc b/modules/machineset-gcp-enabling-gpu-support.adoc deleted file mode 100644 index bbfa71d28a3e..000000000000 --- a/modules/machineset-gcp-enabling-gpu-support.adoc +++ /dev/null @@ -1,110 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/creating_machinesets/creating-machineset-gcp.adoc - -:_content-type: PROCEDURE -[id="machineset-gcp-enabling-gpu-support_{context}"] -= Enabling GPU support for a compute machine set - -Google Cloud Platform (GCP) Compute Engine enables users to add GPUs to VM instances. Workloads that benefit from access to GPU resources can perform better on compute machines with this feature enabled. {product-title} on GCP supports NVIDIA GPU models in the A2 and N1 machine series. - -.Supported GPU configurations -|==== -|Model name |GPU type |Machine types ^[1]^ - -|NVIDIA A100 -|`nvidia-tesla-a100` -a|* `a2-highgpu-1g` -* `a2-highgpu-2g` -* `a2-highgpu-4g` -* `a2-highgpu-8g` -* `a2-megagpu-16g` - -|NVIDIA K80 -|`nvidia-tesla-k80` -.5+a|* `n1-standard-1` -* `n1-standard-2` -* `n1-standard-4` -* `n1-standard-8` -* `n1-standard-16` -* `n1-standard-32` -* `n1-standard-64` -* `n1-standard-96` -* `n1-highmem-2` -* `n1-highmem-4` -* `n1-highmem-8` -* `n1-highmem-16` -* `n1-highmem-32` -* `n1-highmem-64` -* `n1-highmem-96` -* `n1-highcpu-2` -* `n1-highcpu-4` -* `n1-highcpu-8` -* `n1-highcpu-16` -* `n1-highcpu-32` -* `n1-highcpu-64` -* `n1-highcpu-96` - -|NVIDIA P100 -|`nvidia-tesla-p100` - -|NVIDIA P4 -|`nvidia-tesla-p4` - -|NVIDIA T4 -|`nvidia-tesla-t4` - -|NVIDIA V100 -|`nvidia-tesla-v100` - -|==== -[.small] --- -1. For more information about machine types, including specifications, compatibility, regional availability, and limitations, see the GCP Compute Engine documentation about link:https://cloud.google.com/compute/docs/general-purpose-machines#n1_machines[N1 machine series], link:https://cloud.google.com/compute/docs/accelerator-optimized-machines#a2_vms[A2 machine series], and link:https://cloud.google.com/compute/docs/gpus/gpu-regions-zones#gpu_regions_and_zones[GPU regions and zones availability]. --- - -You can define which supported GPU to use for an instance by using the Machine API. - -You can configure machines in the N1 machine series to deploy with one of the supported GPU types. Machines in the A2 machine series come with associated GPUs, and cannot use guest accelerators. - -[NOTE] -==== -GPUs for graphics workloads are not supported. -==== - -.Procedure - -. In a text editor, open the YAML file for an existing compute machine set or create a new one. - -. Specify a GPU configuration under the `providerSpec` field in your compute machine set YAML file. See the following examples of valid configurations: -+ -.Example configuration for the A2 machine series: -[source,yaml] ----- - providerSpec: - value: - machineType: a2-highgpu-1g <1> - onHostMaintenance: Terminate <2> - restartPolicy: Always <3> ----- -<1> Specify the machine type. Ensure that the machine type is included in the A2 machine series. -<2> When using GPU support, you must set `onHostMaintenance` to `Terminate`. -<3> Specify the restart policy for machines deployed by the compute machine set. Allowed values are `Always` or `Never`. -+ -.Example configuration for the N1 machine series: -[source,yaml] ----- -providerSpec: - value: - gpus: - - count: 1 <1> - type: nvidia-tesla-p100 <2> - machineType: n1-standard-1 <3> - onHostMaintenance: Terminate <4> - restartPolicy: Always <5> ----- -<1> Specify the number of GPUs to attach to the machine. -<2> Specify the type of GPUs to attach to the machine. Ensure that the machine type and GPU type are compatible. -<3> Specify the machine type. Ensure that the machine type and GPU type are compatible. -<4> When using GPU support, you must set `onHostMaintenance` to `Terminate`. -<5> Specify the restart policy for machines deployed by the compute machine set. Allowed values are `Always` or `Never`. diff --git a/modules/machineset-gcp-pd-disk-types.adoc b/modules/machineset-gcp-pd-disk-types.adoc deleted file mode 100644 index 658936d28ee7..000000000000 --- a/modules/machineset-gcp-pd-disk-types.adoc +++ /dev/null @@ -1,51 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/creating_machinesets/creating-machineset-gcp.adoc -// * machine_management/control_plane_machine_management/cpmso-using.adoc - -ifeval::["{context}" == "cpmso-using"] -:cpmso: -endif::[] - -:_content-type: PROCEDURE -[id="machineset-gcp-pd-disk-types_{context}"] -= Configuring persistent disk types by using machine sets - -You can configure the type of persistent disk that a machine set deploys machines on by editing the machine set YAML file. - -For more information about persistent disk types, compatibility, regional availability, and limitations, see the GCP Compute Engine documentation about link:https://cloud.google.com/compute/docs/disks#pdspecs[persistent disks]. - -.Procedure - -. In a text editor, open the YAML file for an existing machine set or create a new one. - -. Edit the following line under the `providerSpec` field: -+ -[source,yaml] ----- -ifndef::cpmso[] -apiVersion: machine.openshift.io/v1beta1 -kind: MachineSet -endif::cpmso[] -ifdef::cpmso[] -apiVersion: machine.openshift.io/v1 -kind: ControlPlaneMachineSet -endif::cpmso[] -... -spec: - template: - spec: - providerSpec: - value: - disks: - type: <pd-disk-type> <1> ----- -<1> Specify the disk persistent type. Valid values are `pd-ssd`, `pd-standard`, and `pd-balanced`. The default value is `pd-standard`. - -.Verification - -* Using the Google Cloud console, review the details for a machine deployed by the machine set and verify that the `Type` field matches the configured disk type. - -ifeval::["{context}" == "cpmso-using"] -:!cpmso: -endif::[] diff --git a/modules/machineset-gcp-shielded-vms.adoc b/modules/machineset-gcp-shielded-vms.adoc deleted file mode 100644 index f317f022f247..000000000000 --- a/modules/machineset-gcp-shielded-vms.adoc +++ /dev/null @@ -1,65 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/creating_machinesets/creating-machineset-gcp.adoc - -ifeval::["{context}" == "cpmso-using"] -:cpmso: -endif::[] - -:_content-type: PROCEDURE -[id="machineset-gcp-shielded-vms_{context}"] -= Configuring Shielded VM options by using machine sets - -By editing the machine set YAML file, you can configure the Shielded VM options that a machine set uses for machines that it deploys. - -For more information about Shielded VM features and functionality, see the GCP Compute Engine documentation about link:https://cloud.google.com/compute/shielded-vm/docs/shielded-vm[Shielded VM]. - -.Procedure - -. In a text editor, open the YAML file for an existing machine set or create a new one. - -. Edit the following section under the `providerSpec` field: -+ -[source,yaml] ----- -ifndef::cpmso[] -apiVersion: machine.openshift.io/v1beta1 -kind: MachineSet -endif::cpmso[] -ifdef::cpmso[] -apiVersion: machine.openshift.io/v1 -kind: ControlPlaneMachineSet -endif::cpmso[] -... -spec: - template: - spec: - providerSpec: - value: - shieldedInstanceConfig: <1> - integrityMonitoring: Enabled <2> - secureBoot: Disabled <3> - virtualizedTrustedPlatformModule: Enabled <4> -... ----- -+ --- -<1> In this section, specify any Shielded VM options that you want. -<2> Specify whether UEFI Secure Boot is enabled. Valid values are `Disabled` or `Enabled`. -<3> Specify whether integrity monitoring is enabled. Valid values are `Disabled` or `Enabled`. -+ -[NOTE] -==== -When integrity monitoring is enabled, you must not disable virtual trusted platform module (vTPM). -==== - -<4> Specify whether vTPM is enabled. Valid values are `Disabled` or `Enabled`. --- - -.Verification - -* Using the Google Cloud console, review the details for a machine deployed by the machine set and verify that the Shielded VM options match the values that you configured. - -ifeval::["{context}" == "cpmso-using"] -:!cpmso: -endif::[] \ No newline at end of file diff --git a/modules/machineset-imds-options.adoc b/modules/machineset-imds-options.adoc deleted file mode 100644 index 4d094864db34..000000000000 --- a/modules/machineset-imds-options.adoc +++ /dev/null @@ -1,28 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/creating_machinesets/creating-machineset-aws.adoc -// * machine_management/control_plane_machine_management/cpmso-using.adoc - -ifeval::["{context}" == "cpmso-using"] -:cpmso: -endif::[] - -:_content-type: CONCEPT -[id="machineset-imds-options_{context}"] -= Machine set options for the Amazon EC2 Instance Metadata Service - -You can use machine sets to create machines that use a specific version of the Amazon EC2 Instance Metadata Service (IMDS). Machine sets can create machines that allow the use of both IMDSv1 and link:https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html[IMDSv2] or machines that require the use of IMDSv2. - -To change the IMDS configuration for existing machines, edit the machine set YAML file that manages those machines. -ifndef::cpmso[] -To deploy new compute machines with your preferred IMDS configuration, create a compute machine set YAML file with the appropriate values. -endif::cpmso[] - -[IMPORTANT] -==== -Before configuring a machine set to create machines that require IMDSv2, ensure that any workloads that interact with the AWS metadata service support IMDSv2. -==== - -ifeval::["{context}" == "cpmso-using"] -:!cpmso: -endif::[] \ No newline at end of file diff --git a/modules/machineset-manually-scaling.adoc b/modules/machineset-manually-scaling.adoc deleted file mode 100644 index 3d1689066cab..000000000000 --- a/modules/machineset-manually-scaling.adoc +++ /dev/null @@ -1,91 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/manually-scaling-machineset.adoc -// * post_installation_configuration/cluster-tasks.adoc -// * windows_containers/scheduling-windows-workloads.adoc - -:_content-type: PROCEDURE -[id="machineset-manually-scaling_{context}"] -= Scaling a compute machine set manually - -To add or remove an instance of a machine in a compute machine set, you can manually scale the compute machine set. - -This guidance is relevant to fully automated, installer-provisioned infrastructure installations. Customized, user-provisioned infrastructure installations do not have compute machine sets. - -.Prerequisites - -* Install an {product-title} cluster and the `oc` command line. -* Log in to `oc` as a user with `cluster-admin` permission. - -.Procedure - -. View the compute machine sets that are in the cluster by running the following command: -+ -[source,terminal] ----- -$ oc get machinesets -n openshift-machine-api ----- -+ -The compute machine sets are listed in the form of `<clusterid>-worker-<aws-region-az>`. - -. View the compute machines that are in the cluster by running the following command: -+ -[source,terminal] ----- -$ oc get machine -n openshift-machine-api ----- - -. Set the annotation on the compute machine that you want to delete by running the following command: -+ -[source,terminal] ----- -$ oc annotate machine/<machine_name> -n openshift-machine-api machine.openshift.io/delete-machine="true" ----- - -. Scale the compute machine set by running one of the following commands: -+ -[source,terminal] ----- -$ oc scale --replicas=2 machineset <machineset> -n openshift-machine-api ----- -+ -Or: -+ -[source,terminal] ----- -$ oc edit machineset <machineset> -n openshift-machine-api ----- -+ -[TIP] -==== -You can alternatively apply the following YAML to scale the compute machine set: - -[source,yaml] ----- -apiVersion: machine.openshift.io/v1beta1 -kind: MachineSet -metadata: - name: <machineset> - namespace: openshift-machine-api -spec: - replicas: 2 ----- -==== -+ -You can scale the compute machine set up or down. It takes several minutes for the new machines to be available. -+ -[IMPORTANT] -==== -By default, the machine controller tries to drain the node that is backed by the machine until it succeeds. In some situations, such as with a misconfigured pod disruption budget, the drain operation might not be able to succeed. If the drain operation fails, the machine controller cannot proceed removing the machine. - -You can skip draining the node by annotating `machine.openshift.io/exclude-node-draining` in a specific machine. -==== - -.Verification - -* Verify the deletion of the intended machine by running the following command: -+ -[source,terminal] ----- -$ oc get machines ----- \ No newline at end of file diff --git a/modules/machineset-migrating-compute-nodes-to-diff-sd-rhv.adoc b/modules/machineset-migrating-compute-nodes-to-diff-sd-rhv.adoc deleted file mode 100644 index 44995f5c5701..000000000000 --- a/modules/machineset-migrating-compute-nodes-to-diff-sd-rhv.adoc +++ /dev/null @@ -1,54 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/modifying-machineset.adoc -:_content-type: PROCEDURE -[id="machineset-migrating-compute-nodes-to-diff-sd-rhv_{context}"] -= Migrating compute nodes to a different storage domain in {rh-virtualization} - -.Prerequisites - -* You are logged in to the {rh-virtualization-engine-name}. -* You have the name of the target storage domain. - -.Procedure - -. Identify the virtual machine template by running the following command: -+ -[source,terminal] ----- -$ oc get -o jsonpath='{.items[0].spec.template.spec.providerSpec.value.template_name}{"\n"}' machineset -A ----- - -. Create a new virtual machine in the {rh-virtualization-engine-name}, based on the template you identified. Leave all other settings unchanged. For details, see link:https://access.redhat.com/documentation/en-us/red_hat_virtualization/4.4/html-single/virtual_machine_management_guide/index#Creating_a_Virtual_Machine_Based_on_a_Template[Creating a Virtual Machine Based on a Template] in the Red Hat Virtualization _Virtual Machine Management Guide_. -+ -[TIP] -==== -You do not need to start the new virtual machine. -==== - -. Create a new template from the new virtual machine. Specify the target storage domain under *Target*. For details, see link:https://access.redhat.com/documentation/en-us/red_hat_virtualization/4.4/html-single/virtual_machine_management_guide/index#Creating_a_template_from_an_existing_virtual_machine[Creating a Template] in the Red Hat Virtualization _Virtual Machine Management Guide_. - -. Add a new compute machine set to the {product-title} cluster with the new template. -.. Get the details of the current compute machine set by running the following command: -+ -[source,terminal] ----- -$ oc get machineset -o yaml ----- -.. Use these details to create a compute machine set. For more information see _Creating a compute machine set_. -+ -Enter the new virtual machine template name in the *template_name* field. Use the same template name you used in the *New template* dialog in the {rh-virtualization-engine-name}. -.. Note the names of both the old and new compute machine sets. You need to refer to them in subsequent steps. - -. Migrate the workloads. -.. Scale up the new compute machine set. For details on manually scaling compute machine sets, see _Scaling a compute machine set manually_. -+ -{product-title} moves the pods to an available worker when the old machine is removed. -.. Scale down the old compute machine set. - -. Remove the old compute machine set by running the following command: -+ -[source,terminal] ----- -$ oc delete machineset <machineset-name> ----- diff --git a/modules/machineset-migrating-control-plane-nodes-to-diff-sd-rhv.adoc b/modules/machineset-migrating-control-plane-nodes-to-diff-sd-rhv.adoc deleted file mode 100644 index a055de6ce19d..000000000000 --- a/modules/machineset-migrating-control-plane-nodes-to-diff-sd-rhv.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/modifying-machineset.adoc -:_content-type: PROCEDURE -[id="machineset-migrating-control-plane-nodes-to-diff-sd-rhv_{context}"] -= Migrating control plane nodes to a different storage domain on {rh-virtualization} - -{product-title} does not manage control plane nodes, so they are easier to migrate than compute nodes. You can migrate them like any other virtual machine on {rh-virtualization-first}. - -Perform this procedure for each node separately. - -.Prerequisites - -* You are logged in to the {rh-virtualization-engine-name}. -* You have identified the control plane nodes. They are labeled *master* in the {rh-virtualization-engine-name}. - -.Procedure - -. Select the virtual machine labeled *master*. - -. Shut down the virtual machine. - -. Click the *Disks* tab. - -. Click the virtual machine's disk. - -. Click *More Actions*{kebab} and select *Move*. - -. Select the target storage domain and wait for the migration process to complete. - -. Start the virtual machine. - -. Verify that the {product-title} cluster is stable: -+ -[source,terminal] ----- -$ oc get nodes ----- -+ -The output should display the node with the status `Ready`. - -. Repeat this procedure for each control plane node. diff --git a/modules/machineset-modifying.adoc b/modules/machineset-modifying.adoc deleted file mode 100644 index 1577648e58ce..000000000000 --- a/modules/machineset-modifying.adoc +++ /dev/null @@ -1,95 +0,0 @@ -// Module included in the following assemblies: -// -// -// * machine_management/modifying-machineset.adoc - -:_content-type: PROCEDURE -[id="machineset-modifying_{context}"] -= Modifying a compute machine set - -To make changes to a compute machine set, edit the `MachineSet` YAML. Then, remove all machines associated with the compute machine set by deleting each machine or scaling down the compute machine set to `0` replicas. Then, scale the replicas back to the desired number. Changes you make to a compute machine set do not affect existing machines. - -If you need to scale a compute machine set without making other changes, you do not need to delete the machines. - -[NOTE] -==== -By default, the {product-title} router pods are deployed on workers. Because the router is required to access some cluster resources, including the web console, do not scale the compute machine set to `0` unless you first relocate the router pods. -==== - -.Prerequisites - -* Install an {product-title} cluster and the `oc` command line. -* Log in to `oc` as a user with `cluster-admin` permission. - -.Procedure - -. Edit the compute machine set by running the following command: -+ -[source,terminal] ----- -$ oc edit machineset <machineset> -n openshift-machine-api ----- - -. Scale down the compute machine set to `0` by running one of the following commands: -+ -[source,terminal] ----- -$ oc scale --replicas=0 machineset <machineset> -n openshift-machine-api ----- -+ -Or: -+ -[source,terminal] ----- -$ oc edit machineset <machineset> -n openshift-machine-api ----- -+ -[TIP] -==== -You can alternatively apply the following YAML to scale the compute machine set: - -[source,yaml] ----- -apiVersion: machine.openshift.io/v1beta1 -kind: MachineSet -metadata: - name: <machineset> - namespace: openshift-machine-api -spec: - replicas: 0 ----- -==== -+ -Wait for the machines to be removed. - -. Scale up the compute machine set as needed by running one of the following commands: -+ -[source,terminal] ----- -$ oc scale --replicas=2 machineset <machineset> -n openshift-machine-api ----- -+ -Or: -+ -[source,terminal] ----- -$ oc edit machineset <machineset> -n openshift-machine-api ----- -+ -[TIP] -==== -You can alternatively apply the following YAML to scale the compute machine set: - -[source,yaml] ----- -apiVersion: machine.openshift.io/v1beta1 -kind: MachineSet -metadata: - name: <machineset> - namespace: openshift-machine-api -spec: - replicas: 2 ----- -==== -+ -Wait for the machines to start. The new machines contain changes you made to the compute machine set. diff --git a/modules/machineset-non-guaranteed-instance.adoc b/modules/machineset-non-guaranteed-instance.adoc deleted file mode 100644 index 6ecb68390bb0..000000000000 --- a/modules/machineset-non-guaranteed-instance.adoc +++ /dev/null @@ -1,74 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/creating_machinesets/creating-machineset-aws.adoc -// * machine_management/creating_machinesets/creating-machineset-gcp.adoc -// * machine_management/creating_machinesets/creating-machineset-azure.adoc - -ifeval::["{context}" == "creating-machineset-aws"] -:aws: -endif::[] -ifeval::["{context}" == "creating-machineset-azure"] -:azure: -endif::[] -ifeval::["{context}" == "creating-machineset-gcp"] -:gcp: -endif::[] - -[id="machineset-non-guaranteed-instance_{context}"] -ifdef::aws[= Machine sets that deploy machines as Spot Instances] -ifdef::azure[= Machine sets that deploy machines as Spot VMs] -ifdef::gcp[= Machine sets that deploy machines as preemptible VM instances] -ifdef::aws[] -You can save on costs by creating a compute machine set running on AWS that deploys machines as non-guaranteed Spot Instances. Spot Instances utilize unused AWS EC2 capacity and are less expensive than On-Demand Instances. You can use Spot Instances for workloads that can tolerate interruptions, such as batch or stateless, horizontally scalable workloads. -endif::aws[] -ifdef::azure[] -You can save on costs by creating a compute machine set running on Azure that deploys machines as non-guaranteed Spot VMs. Spot VMs utilize unused Azure capacity and are less expensive than standard VMs. You can use Spot VMs for workloads that can tolerate interruptions, such as batch or stateless, horizontally scalable workloads. -endif::azure[] -ifdef::gcp[] -You can save on costs by creating a compute machine set running on GCP that deploys machines as non-guaranteed preemptible VM instances. Preemptible VM instances utilize excess Compute Engine capacity and are less expensive than normal instances. You can use preemptible VM instances for workloads that can tolerate interruptions, such as batch or stateless, horizontally scalable workloads. -endif::gcp[] - -ifdef::aws[] -AWS EC2 can terminate a Spot Instance at any time. AWS gives a two-minute warning to the user when an interruption occurs. {product-title} begins to remove the workloads from the affected instances when AWS issues the termination warning. - -Interruptions can occur when using Spot Instances for the following reasons: - -* The instance price exceeds your maximum price -* The demand for Spot Instances increases -* The supply of Spot Instances decreases - -When AWS terminates an instance, a termination handler running on the Spot Instance node deletes the machine resource. To satisfy the compute machine set `replicas` quantity, the compute machine set creates a machine that requests a Spot Instance. -endif::aws[] -ifdef::azure[] -Azure can terminate a Spot VM at any time. Azure gives a 30-second warning to the user when an interruption occurs. {product-title} begins to remove the workloads from the affected instances when Azure issues the termination warning. - -Interruptions can occur when using Spot VMs for the following reasons: - -* The instance price exceeds your maximum price -* The supply of Spot VMs decreases -* Azure needs capacity back - - -When Azure terminates an instance, a termination handler running on the Spot VM node deletes the machine resource. To satisfy the compute machine set `replicas` quantity, the compute machine set creates a machine that requests a Spot VM. -endif::azure[] -ifdef::gcp[] -GCP Compute Engine can terminate a preemptible VM instance at any time. Compute Engine sends a preemption notice to the user indicating that an interruption will occur in 30 seconds. {product-title} begins to remove the workloads from the affected instances when Compute Engine issues the preemption notice. An ACPI G3 Mechanical Off signal is sent to the operating system after 30 seconds if the instance is not stopped. The preemptible VM instance is then transitioned to a `TERMINATED` state by Compute Engine. - -Interruptions can occur when using preemptible VM instances for the following reasons: - -* There is a system or maintenance event -* The supply of preemptible VM instances decreases -* The instance reaches the end of the allotted 24-hour period for preemptible VM instances - -When GCP terminates an instance, a termination handler running on the preemptible VM instance node deletes the machine resource. To satisfy the compute machine set `replicas` quantity, the compute machine set creates a machine that requests a preemptible VM instance. -endif::gcp[] - -ifeval::["{context}" == "creating-machineset-aws"] -:!aws: -endif::[] -ifeval::["{context}" == "creating-machineset-azure"] -:!azure: -endif::[] -ifeval::["{context}" == "creating-machineset-gcp"] -:!gcp: -endif::[] diff --git a/modules/machineset-osp-adding-bare-metal.adoc b/modules/machineset-osp-adding-bare-metal.adoc deleted file mode 100644 index 6da48fa2149c..000000000000 --- a/modules/machineset-osp-adding-bare-metal.adoc +++ /dev/null @@ -1,93 +0,0 @@ -[id="machineset-osp-adding-bare-metal_{context}"] -= Adding bare-metal compute machines to a {rh-openstack} cluster -// TODO -// Mothballed -// Reintroduce when feature is available. -You can add bare-metal compute machines to an {product-title} cluster after you deploy it -on {rh-openstack-first}. In this configuration, all machines are attached to an -existing, installer-provisioned network, and traffic between control plane and -compute machines is routed between subnets. - -[NOTE] -==== -Bare-metal compute machines are not supported on clusters that use Kuryr. -==== - -.Prerequisites - -* The {rh-openstack} link:https://access.redhat.com/documentation/en-us/red_hat_openstack_platform/16.1/html/bare_metal_provisioning/index[Bare Metal service (Ironic)] is enabled and accessible by using the {rh-openstack} Compute API. - -* Bare metal is available as link:https://access.redhat.com/documentation/en-us/red_hat_openstack_platform/16.1/html/bare_metal_provisioning/sect-configure#creating_the_bare_metal_flavor[an {rh-openstack} flavor]. - -* You deployed an {product-title} cluster on installer-provisioned infrastructure. - -* Your {rh-openstack} cloud provider is configured to route traffic between the installer-created VM -subnet and the pre-existing bare metal subnet. - -.Procedure -. Create a file called `baremetalMachineSet.yaml`, and then add the bare metal flavor to it: -+ -FIXME: May require update before publication. -.A sample bare metal MachineSet file -[source,yaml] ----- -apiVersion: machine.openshift.io/v1beta1 -kind: MachineSet -metadata: - labels: - machine.openshift.io/cluster-api-cluster: <infrastructure_id> - machine.openshift.io/cluster-api-machine-role: <node_role> - machine.openshift.io/cluster-api-machine-type: <node_role> - name: <infrastructure_id>-<node_role> - namespace: openshift-machine-api -spec: - replicas: <number_of_replicas> - selector: - matchLabels: - machine.openshift.io/cluster-api-cluster: <infrastructure_id> - machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<node_role> - template: - metadata: - labels: - machine.openshift.io/cluster-api-cluster: <infrastructure_id> - machine.openshift.io/cluster-api-machine-role: <node_role> - machine.openshift.io/cluster-api-machine-type: <node_role> - machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<node_role> - spec: - providerSpec: - value: - apiVersion: openstackproviderconfig.openshift.io/v1alpha1 - cloudName: openstack - cloudsSecret: - name: openstack-cloud-credentials - namespace: openshift-machine-api - flavor: <nova_flavor> - image: <glance_image_name_or_location> - kind: OpenstackProviderSpec - networks: - - filter: {} - subnets: - - filter: - name: <subnet_name> - tags: openshiftClusterID=<infrastructure_id> - securityGroups: - - filter: {} - name: <infrastructure_id>-<node_role> - serverMetadata: - Name: <infrastructure_id>-<node_role> - openshiftClusterID: <infrastructure_id> - tags: - - openshiftClusterID=<infrastructure_id> - trunk: true - userDataSecret: - name: <node_role>-user-data ----- - -. On a command line, to create the MachineSet resource, type: -+ -[source,terminal] ----- -$ oc create -v baremetalMachineSet.yaml ----- - -You can now use bare-metal compute machines in your {product-title} cluster. diff --git a/modules/machineset-troubleshooting-azure-ultra-disk.adoc b/modules/machineset-troubleshooting-azure-ultra-disk.adoc deleted file mode 100644 index 95d66a111092..000000000000 --- a/modules/machineset-troubleshooting-azure-ultra-disk.adoc +++ /dev/null @@ -1,93 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/creating_machinesets/creating-machineset-azure.adoc -// * storage/persistent_storage/persistent-storage-azure.adoc -// * storage/persistent_storage/persistent-storage-csi-azure.adoc -// * machine_management/control_plane_machine_management/cpmso-using.adoc - -ifeval::["{context}" == "creating-machineset-azure"] -:mapi: -endif::[] -ifeval::["{context}" == "cpmso-using"] -:mapi: -endif::[] -ifeval::["{context}" == "persistent-storage-azure"] -:pvc: -endif::[] -ifeval::["{context}" == "persistent-storage-csi-azure"] -:pvc: -endif::[] - -:_content-type: REFERENCE -[id="machineset-troubleshooting-azure-ultra-disk_{context}"] -= Troubleshooting resources for machine sets that enable ultra disks - -Use the information in this section to understand and recover from issues you might encounter. - -ifdef::pvc[] -[id="ts-pvc-mounting-ultra_{context}"] -== Unable to mount a persistent volume claim backed by an ultra disk - -If there is an issue mounting a persistent volume claim backed by an ultra disk, the pod becomes stuck in the `ContainerCreating` state and an alert is triggered. - -For example, if the `additionalCapabilities.ultraSSDEnabled` parameter is not set on the machine that backs the node that hosts the pod, the following error message appears: - -[source,terminal] ----- -StorageAccountType UltraSSD_LRS can be used only when additionalCapabilities.ultraSSDEnabled is set. ----- - -* To resolve this issue, describe the pod by running the following command: -+ -[source,terminal] ----- -$ oc -n <stuck_pod_namespace> describe pod <stuck_pod_name> ----- -endif::pvc[] - -ifdef::mapi[] -[id="ts-mapi-attach-misconfigure_{context}"] -== Incorrect ultra disk configuration - -If an incorrect configuration of the `ultraSSDCapability` parameter is specified in the machine set, the machine provisioning fails. - -For example, if the `ultraSSDCapability` parameter is set to `Disabled`, but an ultra disk is specified in the `dataDisks` parameter, the following error message appears: - -[source,terminal] ----- -StorageAccountType UltraSSD_LRS can be used only when additionalCapabilities.ultraSSDEnabled is set. ----- - -* To resolve this issue, verify that your machine set configuration is correct. - -[id="ts-mapi-attach-unsupported_{context}"] -== Unsupported disk parameters - -If a region, availability zone, or instance size that is not compatible with ultra disks is specified in the machine set, the machine provisioning fails. Check the logs for the following error message: - -[source,terminal] ----- -failed to create vm <machine_name>: failure sending request for machine <machine_name>: cannot create vm: compute.VirtualMachinesClient#CreateOrUpdate: Failure sending request: StatusCode=400 -- Original Error: Code="BadRequest" Message="Storage Account type 'UltraSSD_LRS' is not supported <more_information_about_why>." ----- - -* To resolve this issue, verify that you are using this feature in a supported environment and that your machine set configuration is correct. - -[id="ts-mapi-delete_{context}"] -== Unable to delete disks - -If the deletion of ultra disks as data disks is not working as expected, the machines are deleted and the data disks are orphaned. You must delete the orphaned disks manually if desired. - -endif::mapi[] - -ifeval::["{context}" == "creating-machineset-azure"] -:!mapi: -endif::[] -ifeval::["{context}" == "cpmso-using"] -:!mapi: -endif::[] -ifeval::["{context}" == "persistent-storage-azure"] -:!pvc: -endif::[] -ifeval::["{context}" == "persistent-storage-csi-azure"] -:!pvc: -endif::[] diff --git a/modules/machineset-upi-reqs-ignition-config.adoc b/modules/machineset-upi-reqs-ignition-config.adoc deleted file mode 100644 index e8b18c958013..000000000000 --- a/modules/machineset-upi-reqs-ignition-config.adoc +++ /dev/null @@ -1,57 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/creating_machinesets/creating-machineset-vsphere.adoc -// -// Currently only in the vSphere compute machine set content, but we will want this for other platforms such as AWS and GCP. - -ifeval::["{context}" == "creating-machineset-vsphere"] -:vsphere: -endif::[] - -:_content-type: PROCEDURE -[id="machineset-upi-reqs-ignition-config_{context}"] -= Satisfying Ignition configuration requirements - -Provisioning virtual machines (VMs) requires a valid Ignition configuration. The Ignition configuration contains the `machine-config-server` address and a system trust bundle for obtaining further Ignition configurations from the Machine Config Operator. - -By default, this configuration is stored in the `worker-user-data` secret in the `machine-api-operator` namespace. Compute machine sets reference the secret during the machine creation process. - -.Procedure - -. To determine whether the required secret exists, run the following command: -+ -[source,terminal] ----- -$ oc get secret \ - -n openshift-machine-api worker-user-data \ - -o go-template='{{range $k,$v := .data}}{{printf "%s: " $k}}{{if not $v}}{{$v}}{{else}}{{$v | base64decode}}{{end}}{{"\n"}}{{end}}' ----- -+ -.Sample output -[source,terminal] ----- -disableTemplating: false -userData: <1> - { - "ignition": { - ... - }, - ... - } ----- -<1> The full output is omitted here, but should have this format. - -. If the secret does not exist, create it by running the following command: -+ -[source,terminal] ----- -$ oc create secret generic worker-user-data \ - -n openshift-machine-api \ - --from-file=<installation_directory>/worker.ign ----- -+ -where `<installation_directory>` is the directory that was used to store your installation assets during cluster installation. - -ifeval::["{context}" == "creating-machineset-vsphere"] -:!vsphere: -endif::[] diff --git a/modules/machineset-upi-reqs-infra-id.adoc b/modules/machineset-upi-reqs-infra-id.adoc deleted file mode 100644 index f18f4e82ea41..000000000000 --- a/modules/machineset-upi-reqs-infra-id.adoc +++ /dev/null @@ -1,28 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/creating_machinesets/creating-machineset-vsphere.adoc -// -// Currently only in the vSphere compute machine set content, but we will want this for other platforms such as AWS and GCP. - -ifeval::["{context}" == "creating-machineset-vsphere"] -:vsphere: -endif::[] - -:_content-type: PROCEDURE -[id="machineset-upi-reqs-infra-id_{context}"] -= Obtaining the infrastructure ID - -To create compute machine sets, you must be able to supply the infrastructure ID for your cluster. - -.Procedure - -* To obtain the infrastructure ID for your cluster, run the following command: -+ -[source,terminal] ----- -$ oc get infrastructure cluster -o jsonpath='{.status.infrastructureName}' ----- - -ifeval::["{context}" == "creating-machineset-vsphere"] -:!vsphere: -endif::[] diff --git a/modules/machineset-upi-reqs-vsphere-creds.adoc b/modules/machineset-upi-reqs-vsphere-creds.adoc deleted file mode 100644 index 50b1c9e0cb2a..000000000000 --- a/modules/machineset-upi-reqs-vsphere-creds.adoc +++ /dev/null @@ -1,38 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/creating_machinesets/creating-machineset-vsphere.adoc - -:_content-type: PROCEDURE -[id="machineset-upi-reqs-vsphere-creds_{context}"] -= Satisfying vSphere credentials requirements - -To use compute machine sets, the Machine API must be able to interact with vCenter. Credentials that authorize the Machine API components to interact with vCenter must exist in a secret in the `openshift-machine-api` namespace. - -.Procedure - -. To determine whether the required credentials exist, run the following command: -+ -[source,terminal] ----- -$ oc get secret \ - -n openshift-machine-api vsphere-cloud-credentials \ - -o go-template='{{range $k,$v := .data}}{{printf "%s: " $k}}{{if not $v}}{{$v}}{{else}}{{$v | base64decode}}{{end}}{{"\n"}}{{end}}' ----- -+ -.Sample output -[source,terminal] ----- -<vcenter-server>.password=<openshift-user-password> -<vcenter-server>.username=<openshift-user> ----- -+ -where `<vcenter-server>` is the IP address or fully qualified domain name (FQDN) of the vCenter server and `<openshift-user>` and `<openshift-user-password>` are the {product-title} administrator credentials to use. - -. If the secret does not exist, create it by running the following command: -+ -[source,terminal] ----- -$ oc create secret generic vsphere-cloud-credentials \ - -n openshift-machine-api \ - --from-literal=<vcenter-server>.username=<openshift-user> --from-literal=<vcenter-server>.password=<openshift-user-password> ----- \ No newline at end of file diff --git a/modules/machineset-vsphere-required-permissions.adoc b/modules/machineset-vsphere-required-permissions.adoc deleted file mode 100644 index e08d87fbd75d..000000000000 --- a/modules/machineset-vsphere-required-permissions.adoc +++ /dev/null @@ -1,135 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_vsphere/installing-restricted-networks-vsphere.adoc - -[id="machineset-vsphere-requirements-user-provisioned-machine-sets_{context}"] -= Minimum required vCenter privileges for compute machine set management - -To manage compute machine sets in an {product-title} cluster on vCenter, you must use an account with privileges to read, create, and delete the required resources. Using an account that has global administrative privileges is the simplest way to access all of the necessary permissions. - -If you cannot use an account with global administrative privileges, you must create roles to grant the minimum required privileges. The following table lists the minimum vCenter roles and privileges that are required to create, scale, and delete compute machine sets and to delete machines in your {product-title} cluster. - -.Minimum vCenter roles and privileges required for compute machine set management -[%collapsible] -==== -[cols="3a,3a,3a",options="header"] -|=== -|vSphere object for role -|When required -|Required privileges - -|vSphere vCenter -|Always -| -[%hardbreaks] -`InventoryService.Tagging.AttachTag` -`InventoryService.Tagging.CreateCategory` -`InventoryService.Tagging.CreateTag` -`InventoryService.Tagging.DeleteCategory` -`InventoryService.Tagging.DeleteTag` -`InventoryService.Tagging.EditCategory` -`InventoryService.Tagging.EditTag` -`Sessions.ValidateSession` -`StorageProfile.Update`^1^ -`StorageProfile.View`^1^ - -|vSphere vCenter Cluster -|Always -| -[%hardbreaks] -`Resource.AssignVMToPool` - -|vSphere Datastore -|Always -| -[%hardbreaks] -`Datastore.AllocateSpace` -`Datastore.Browse` - -|vSphere Port Group -|Always -|`Network.Assign` - -|Virtual Machine Folder -|Always -| -[%hardbreaks] -`VirtualMachine.Config.AddRemoveDevice` -`VirtualMachine.Config.AdvancedConfig` -`VirtualMachine.Config.Annotation` -`VirtualMachine.Config.CPUCount` -`VirtualMachine.Config.DiskExtend` -`VirtualMachine.Config.Memory` -`VirtualMachine.Config.Settings` -`VirtualMachine.Interact.PowerOff` -`VirtualMachine.Interact.PowerOn` -`VirtualMachine.Inventory.CreateFromExisting` -`VirtualMachine.Inventory.Delete` -`VirtualMachine.Provisioning.Clone` - -|vSphere vCenter Datacenter -|If the installation program creates the virtual machine folder -| -[%hardbreaks] -`Resource.AssignVMToPool` -`VirtualMachine.Provisioning.DeployTemplate` - -3+a| -^1^ The `StorageProfile.Update` and `StorageProfile.View` permissions are required only for storage backends that use the Container Storage Interface (CSI). -|=== -==== - -The following table details the permissions and propagation settings that are required for compute machine set management. - -.Required permissions and propagation settings -[%collapsible] -==== -[cols="3a,3a,3a,3a",options="header"] -|=== -|vSphere object -|Folder type -|Propagate to children -|Permissions required - -|vSphere vCenter -|Always -|Not required -|Listed required privileges - -.2+|vSphere vCenter Datacenter -|Existing folder -|Not required -|`ReadOnly` permission - -|Installation program creates the folder -|Required -|Listed required privileges - -|vSphere vCenter Cluster -|Always -|Required -|Listed required privileges - -|vSphere vCenter Datastore -|Always -|Not required -|Listed required privileges - -|vSphere Switch -|Always -|Not required -|`ReadOnly` permission - -|vSphere Port Group -|Always -|Not required -|Listed required privileges - -|vSphere vCenter Virtual Machine Folder -|Existing folder -|Required -|Listed required privileges -|=== -==== - -For more information about creating an account with only the required privileges, see link:https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.vsphere.security.doc/GUID-5372F580-5C23-4E9C-8A4E-EF1B4DD9033E.html[vSphere Permissions and User Management Tasks] in the vSphere documentation. diff --git a/modules/machineset-yaml-alibaba-usage-stats.adoc b/modules/machineset-yaml-alibaba-usage-stats.adoc deleted file mode 100644 index aeda5ceb0a96..000000000000 --- a/modules/machineset-yaml-alibaba-usage-stats.adoc +++ /dev/null @@ -1,95 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/creating-infrastructure-machinesets.adoc -// * machine_management/creating_machinesets/creating-machineset-alibaba.adoc - -ifeval::["{context}" == "creating-infrastructure-machinesets"] -:infra: -endif::[] - -:_content-type: REFERENCE -[id="machineset-yaml-alibaba-usage-stats_{context}"] -= Machine set parameters for Alibaba Cloud usage statistics - -The default compute machine sets that the installer creates for Alibaba Cloud clusters include nonessential tag values that Alibaba Cloud uses internally to track usage statistics. These tags are populated in the `securityGroups`, `tag`, and `vSwitch` parameters of the `spec.template.spec.providerSpec.value` list. - -When creating compute machine sets to deploy additional machines, you must include the required Kubernetes tags. The usage statistics tags are applied by default, even if they are not specified in the compute machine sets you create. You can also include additional tags as needed. - -The following YAML snippets indicate which tags in the default compute machine sets are optional and which are required. - -.Tags in `spec.template.spec.providerSpec.value.securityGroups` -[source,yaml] ----- -spec: - template: - spec: - providerSpec: - value: - securityGroups: - - tags: - - Key: kubernetes.io/cluster/<infrastructure_id> <1> - Value: owned - - Key: GISV - Value: ocp - - Key: sigs.k8s.io/cloud-provider-alibaba/origin <1> - Value: ocp - - Key: Name - Value: <infrastructure_id>-sg-<role> <2> - type: Tags ----- -<1> Optional: This tag is applied even when not specified in the compute machine set. -<2> Required. -+ -where: -+ -* `<infrastructure_id>` is the infrastructure ID that is based on the cluster ID that you set when you provisioned the cluster. -* `<role>` is the node label to add. - -.Tags in `spec.template.spec.providerSpec.value.tag` -[source,yaml] ----- -spec: - template: - spec: - providerSpec: - value: - tag: - - Key: kubernetes.io/cluster/<infrastructure_id> <2> - Value: owned - - Key: GISV <1> - Value: ocp - - Key: sigs.k8s.io/cloud-provider-alibaba/origin <1> - Value: ocp ----- -<1> Optional: This tag is applied even when not specified in the compute machine set. -<2> Required. -+ -where `<infrastructure_id>` is the infrastructure ID that is based on the cluster ID that you set when you provisioned the cluster. - -.Tags in `spec.template.spec.providerSpec.value.vSwitch` -[source,yaml] ----- -spec: - template: - spec: - providerSpec: - value: - vSwitch: - tags: - - Key: kubernetes.io/cluster/<infrastructure_id> <1> - Value: owned - - Key: GISV <1> - Value: ocp - - Key: sigs.k8s.io/cloud-provider-alibaba/origin <1> - Value: ocp - - Key: Name - Value: <infrastructure_id>-vswitch-<zone> <2> - type: Tags ----- -<1> Optional: This tag is applied even when not specified in the compute machine set. -<2> Required. -+ -where: -+ -* `<infrastructure_id>` is the infrastructure ID that is based on the cluster ID that you set when you provisioned the cluster. -* `<zone>` is the zone within your region to place machines on. diff --git a/modules/machineset-yaml-alibaba.adoc b/modules/machineset-yaml-alibaba.adoc deleted file mode 100644 index 12122580b1d3..000000000000 --- a/modules/machineset-yaml-alibaba.adoc +++ /dev/null @@ -1,159 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/creating-infrastructure-machinesets.adoc -// * machine_management/creating_machinesets/creating-machineset-alibaba.adoc - -ifeval::["{context}" == "creating-infrastructure-machinesets"] -:infra: -endif::[] - -:_content-type: REFERENCE -[id="machineset-yaml-alibaba_{context}"] -= Sample YAML for a compute machine set custom resource on Alibaba Cloud - -This sample YAML defines a compute machine set that runs in a specified Alibaba Cloud zone in a region and creates nodes that are labeled with -ifndef::infra[`node-role.kubernetes.io/<role>: ""`.] -ifdef::infra[`node-role.kubernetes.io/infra: ""`.] - -In this sample, `<infrastructure_id>` is the infrastructure ID label that is based on the cluster ID that you set when you provisioned the cluster, and -ifndef::infra[`<role>`] -ifdef::infra[`<infra>`] -is the node label to add. - -[source,yaml] ----- -apiVersion: machine.openshift.io/v1beta1 -kind: MachineSet -metadata: - labels: - machine.openshift.io/cluster-api-cluster: <infrastructure_id> <1> -ifndef::infra[] - machine.openshift.io/cluster-api-machine-role: <role> <2> - machine.openshift.io/cluster-api-machine-type: <role> <2> - name: <infrastructure_id>-<role>-<zone> <3> -endif::infra[] -ifdef::infra[] - machine.openshift.io/cluster-api-machine-role: <infra> <2> - machine.openshift.io/cluster-api-machine-type: <infra> <2> - name: <infrastructure_id>-<infra>-<zone> <3> -endif::infra[] - namespace: openshift-machine-api -spec: - replicas: 1 - selector: - matchLabels: - machine.openshift.io/cluster-api-cluster: <infrastructure_id> <1> -ifndef::infra[] - machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role>-<zone> <3> -endif::infra[] -ifdef::infra[] - machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<infra>-<zone> <3> -endif::infra[] - template: - metadata: - labels: - machine.openshift.io/cluster-api-cluster: <infrastructure_id> <1> -ifndef::infra[] - machine.openshift.io/cluster-api-machine-role: <role> <2> - machine.openshift.io/cluster-api-machine-type: <role> <2> - machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role>-<zone> <3> -endif::infra[] -ifdef::infra[] - machine.openshift.io/cluster-api-machine-role: <infra> <2> - machine.openshift.io/cluster-api-machine-type: <infra> <2> - machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<infra>-<zone> <3> -endif::infra[] - spec: - metadata: - labels: -ifndef::infra[] - node-role.kubernetes.io/<role>: "" -endif::infra[] -ifdef::infra[] - node-role.kubernetes.io/infra: "" -endif::infra[] - providerSpec: - value: - apiVersion: machine.openshift.io/v1 - credentialsSecret: - name: alibabacloud-credentials - imageId: <image_id> <4> - instanceType: <instance_type> <5> - kind: AlibabaCloudMachineProviderConfig - ramRoleName: <infrastructure_id>-role-worker <6> - regionId: <region> <7> - resourceGroup: <8> - id: <resource_group_id> - type: ID - securityGroups: - - tags: <9> - - Key: Name - Value: <infrastructure_id>-sg-<role> - type: Tags - systemDisk: <10> - category: cloud_essd - size: <disk_size> - tag: <9> - - Key: kubernetes.io/cluster/<infrastructure_id> - Value: owned - userDataSecret: - name: <user_data_secret> <11> - vSwitch: - tags: <9> - - Key: Name - Value: <infrastructure_id>-vswitch-<zone> - type: Tags - vpcId: "" - zoneId: <zone> <12> -ifdef::infra[] - taints: <13> - - key: node-role.kubernetes.io/infra - effect: NoSchedule -endif::infra[] ----- -<1> Specify the infrastructure ID that is based on the cluster ID that you set when you provisioned the cluster. If you have the OpenShift CLI (`oc`) installed, you can obtain the infrastructure ID by running the following command: -+ -[source,terminal] ----- -$ oc get -o jsonpath='{.status.infrastructureName}{"\n"}' infrastructure cluster ----- -ifndef::infra[] -<2> Specify the node label to add. -<3> Specify the infrastructure ID, node label, and zone. -endif::infra[] -ifdef::infra[] -<2> Specify the `<infra>` node label. -<3> Specify the infrastructure ID, `<infra>` node label, and zone. -endif::infra[] -<4> Specify the image to use. Use an image from an existing default compute machine set for the cluster. -<5> Specify the instance type you want to use for the compute machine set. -<6> Specify the name of the RAM role to use for the compute machine set. Use the value that the installer populates in the default compute machine set. -<7> Specify the region to place machines on. -<8> Specify the resource group and type for the cluster. You can use the value that the installer populates in the default compute machine set, or specify a different one. -<9> Specify the tags to use for the compute machine set. Minimally, you must include the tags shown in this example, with appropriate values for your cluster. You can include additional tags, including the tags that the installer populates in the default compute machine set it creates, as needed. -<10> Specify the type and size of the root disk. Use the `category` value that the installer populates in the default compute machine set it creates. If required, specify a different value in gigabytes for `size`. -<11> Specify the name of the secret in the user data YAML file that is in the `openshift-machine-api` namespace. Use the value that the installer populates in the default compute machine set. -<12> Specify the zone within your region to place machines on. Be sure that your region supports the zone that you specify. -ifdef::infra[] -<13> Specify a taint to prevent user workloads from being scheduled on infra nodes. -+ -[NOTE] -==== -After adding the `NoSchedule` taint on the infrastructure node, existing DNS pods running on that node are marked as `misscheduled`. You must either delete or link:https://access.redhat.com/solutions/6592171[add toleration on `misscheduled` DNS pods]. -==== -endif::infra[] - -ifeval::["{context}" == "creating-infrastructure-machinesets"] -:!infra: -endif::[] -ifeval::["{context}" == "cluster-tasks"] -:!infra: -endif::[] - -//// -Not needed for this release, but the process to create a new value for the name of the secret in the user data YAML file is: -1. Create a file (script with things you want to run). -2. Run base64 encoding on the script. -3. Add the base64-encoded string to a user data YAML file like this one: https://github.com/openshift/cluster-api-provider-alibaba/blob/main/examples/userdata.yml#L1 The `name` in that file should match the `userDataSecret` name in the compute machine set. -4. Place the user data file in the `openshift-machine-api` namespace. -//// diff --git a/modules/machineset-yaml-aws.adoc b/modules/machineset-yaml-aws.adoc deleted file mode 100644 index 912b1a17578c..000000000000 --- a/modules/machineset-yaml-aws.adoc +++ /dev/null @@ -1,167 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/creating-infrastructure-machinesets.adoc -// * machine_management/creating_machinesets/creating-machineset-aws.adoc - -ifeval::["{context}" == "creating-infrastructure-machinesets"] -:infra: -endif::[] - -:_content-type: REFERENCE -[id="machineset-yaml-aws_{context}"] -= Sample YAML for a compute machine set custom resource on AWS - -This sample YAML defines a compute machine set that runs in the `us-east-1a` Amazon Web Services (AWS) zone and creates nodes that are labeled with -ifndef::infra[`node-role.kubernetes.io/<role>: ""`.] -ifdef::infra[`node-role.kubernetes.io/infra: ""`.] - -In this sample, `<infrastructure_id>` is the infrastructure ID label that is based on the cluster ID that you set when you provisioned the cluster, and -ifndef::infra[`<role>`] -ifdef::infra[`<infra>`] -is the node label to add. - -[source,yaml] ----- -apiVersion: machine.openshift.io/v1beta1 -kind: MachineSet -metadata: - labels: - machine.openshift.io/cluster-api-cluster: <infrastructure_id> <1> -ifndef::infra[] - name: <infrastructure_id>-<role>-<zone> <2> -endif::infra[] -ifdef::infra[] - name: <infrastructure_id>-infra-<zone> <2> -endif::infra[] - namespace: openshift-machine-api -spec: - replicas: 1 - selector: - matchLabels: - machine.openshift.io/cluster-api-cluster: <infrastructure_id> <1> -ifndef::infra[] - machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role>-<zone> <2> -endif::infra[] -ifdef::infra[] - machine.openshift.io/cluster-api-machineset: <infrastructure_id>-infra-<zone> <2> -endif::infra[] - template: - metadata: - labels: - machine.openshift.io/cluster-api-cluster: <infrastructure_id> <1> -ifndef::infra[] - machine.openshift.io/cluster-api-machine-role: <role> <3> - machine.openshift.io/cluster-api-machine-type: <role> <3> -endif::infra[] -ifdef::infra[] - machine.openshift.io/cluster-api-machine-role: infra <3> - machine.openshift.io/cluster-api-machine-type: infra <3> -endif::infra[] -ifndef::infra[] - machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role>-<zone> <2> -endif::infra[] -ifdef::infra[] - machine.openshift.io/cluster-api-machineset: <infrastructure_id>-infra-<zone> <2> -endif::infra[] - spec: - metadata: - labels: -ifndef::infra[] - node-role.kubernetes.io/<role>: "" <3> -endif::infra[] -ifdef::infra[] - node-role.kubernetes.io/infra: "" <3> -endif::infra[] - providerSpec: - value: - ami: - id: ami-046fe691f52a953f9 <4> - apiVersion: awsproviderconfig.openshift.io/v1beta1 - blockDevices: - - ebs: - iops: 0 - volumeSize: 120 - volumeType: gp2 - credentialsSecret: - name: aws-cloud-credentials - deviceIndex: 0 - iamInstanceProfile: - id: <infrastructure_id>-worker-profile <1> - instanceType: m6i.large - kind: AWSMachineProviderConfig - placement: - availabilityZone: <zone> <6> - region: <region> <7> - securityGroups: - - filters: - - name: tag:Name - values: - - <infrastructure_id>-worker-sg <1> - subnet: - filters: - - name: tag:Name - values: - - <infrastructure_id>-private-<zone> <8> - tags: - - name: kubernetes.io/cluster/<infrastructure_id> <1> - value: owned - - name: <custom_tag_name> <5> - value: <custom_tag_value> <5> - userDataSecret: - name: worker-user-data -ifdef::infra[] - taints: <9> - - key: node-role.kubernetes.io/infra - effect: NoSchedule -endif::infra[] ----- -<1> Specify the infrastructure ID that is based on the cluster ID that you set when you provisioned the cluster. If you have the OpenShift CLI installed, you can obtain the infrastructure ID by running the following command: -+ -[source,terminal] ----- -$ oc get -o jsonpath='{.status.infrastructureName}{"\n"}' infrastructure cluster ----- -ifndef::infra[] -<2> Specify the infrastructure ID, role node label, and zone. -<3> Specify the role node label to add. -endif::infra[] -ifdef::infra[] -<2> Specify the infrastructure ID, `infra` role node label, and zone. -<3> Specify the `infra` role node label. -endif::infra[] -<4> Specify a valid {op-system-first} Amazon -Machine Image (AMI) for your AWS zone for your {product-title} nodes. If you want to use an AWS Marketplace image, you must complete the {product-title} subscription from the link:https://aws.amazon.com/marketplace/fulfillment?productId=59ead7de-2540-4653-a8b0-fa7926d5c845[AWS Marketplace] to obtain an AMI ID for your region. -+ -[source,terminal] ----- -$ oc -n openshift-machine-api \ - -o jsonpath='{.spec.template.spec.providerSpec.value.ami.id}{"\n"}' \ - get machineset/<infrastructure_id>-<role>-<zone> ----- -<5> Optional: Specify custom tag data for your cluster. For example, you might add an admin contact email address by specifying a `name:value` pair of `Email:\admin-email@example.com`. -+ -[NOTE] -==== -Custom tags can also be specified during installation in the `install-config.yml` file. If the `install-config.yml` file and the machine set include a tag with the same `name` data, the value for the tag from the machine set takes priority over the value for the tag in the `install-config.yml` file. -==== - -<6> Specify the zone, for example, `us-east-1a`. -<7> Specify the region, for example, `us-east-1`. -<8> Specify the infrastructure ID and zone. - -ifdef::infra[] -<9> Specify a taint to prevent user workloads from being scheduled on infra nodes. -+ -[NOTE] -==== -After adding the `NoSchedule` taint on the infrastructure node, existing DNS pods running on that node are marked as `misscheduled`. You must either delete or link:https://access.redhat.com/solutions/6592171[add toleration on `misscheduled` DNS pods]. -==== - -endif::infra[] - -ifeval::["{context}" == "creating-infrastructure-machinesets"] -:!infra: -endif::[] -ifeval::["{context}" == "cluster-tasks"] -:!infra: -endif::[] diff --git a/modules/machineset-yaml-azure-stack-hub.adoc b/modules/machineset-yaml-azure-stack-hub.adoc deleted file mode 100644 index f59418d73527..000000000000 --- a/modules/machineset-yaml-azure-stack-hub.adoc +++ /dev/null @@ -1,180 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/creating-infrastructure-machinesets.adoc -// * machine_management/creating_machinesets/creating-machineset-azure-stack-hub.adoc - -ifeval::["{context}" == "creating-infrastructure-machinesets"] -:infra: -endif::[] - -:_content-type: REFERENCE -[id="machineset-yaml-azure-stack-hub_{context}"] -= Sample YAML for a compute machine set custom resource on Azure Stack Hub - -This sample YAML defines a compute machine set that runs in the `1` Microsoft Azure zone in a region and creates nodes that are labeled with -ifndef::infra[`node-role.kubernetes.io/<role>: ""`.] -ifdef::infra[`node-role.kubernetes.io/infra: ""`.] - -In this sample, `<infrastructure_id>` is the infrastructure ID label that is based on the cluster ID that you set when you provisioned the cluster, and -ifndef::infra[`<role>`] -ifdef::infra[`<infra>`] -is the node label to add. - -[source,yaml] ----- -apiVersion: machine.openshift.io/v1beta1 -kind: MachineSet -metadata: - labels: - machine.openshift.io/cluster-api-cluster: <infrastructure_id> <1> -ifndef::infra[] - machine.openshift.io/cluster-api-machine-role: <role> <2> - machine.openshift.io/cluster-api-machine-type: <role> <2> - name: <infrastructure_id>-<role>-<region> <3> -endif::infra[] -ifdef::infra[] - machine.openshift.io/cluster-api-machine-role: <infra> <2> - machine.openshift.io/cluster-api-machine-type: <infra> <2> - name: <infrastructure_id>-infra-<region> <3> -endif::infra[] - namespace: openshift-machine-api -spec: - replicas: 1 - selector: - matchLabels: - machine.openshift.io/cluster-api-cluster: <infrastructure_id> <1> -ifndef::infra[] - machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role>-<region> <3> -endif::infra[] -ifdef::infra[] - machine.openshift.io/cluster-api-machineset: <infrastructure_id>-infra-<region> <3> -endif::infra[] - template: - metadata: - creationTimestamp: null - labels: - machine.openshift.io/cluster-api-cluster: <infrastructure_id> <1> -ifndef::infra[] - machine.openshift.io/cluster-api-machine-role: <role> <2> - machine.openshift.io/cluster-api-machine-type: <role> <2> - machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role>-<region> <3> -endif::infra[] -ifdef::infra[] - machine.openshift.io/cluster-api-machine-role: <infra> <2> - machine.openshift.io/cluster-api-machine-type: <infra> <2> - machine.openshift.io/cluster-api-machineset: <infrastructure_id>-infra-<region> <3> -endif::infra[] - spec: - metadata: - creationTimestamp: null - labels: -ifndef::infra[] - node-role.kubernetes.io/<role>: "" <2> -endif::infra[] -ifdef::infra[] - node-role.kubernetes.io/infra: "" <2> - taints: <4> - - key: node-role.kubernetes.io/infra - effect: NoSchedule -endif::infra[] - providerSpec: - value: - apiVersion: machine.openshift.io/v1beta1 - availabilitySet: <availability_set> <6> - credentialsSecret: - name: azure-cloud-credentials - namespace: openshift-machine-api - image: - offer: "" - publisher: "" - resourceID: /resourceGroups/<infrastructure_id>-rg/providers/Microsoft.Compute/images/<infrastructure_id> <1> - sku: "" - version: "" - internalLoadBalancer: "" - kind: AzureMachineProviderSpec -ifndef::infra[] - location: <region> <4> -endif::infra[] -ifdef::infra[] - location: <region> <5> -endif::infra[] - managedIdentity: <infrastructure_id>-identity <1> - metadata: - creationTimestamp: null - natRule: null - networkResourceGroup: "" - osDisk: - diskSizeGB: 128 - managedDisk: - storageAccountType: Premium_LRS - osType: Linux - publicIP: false - publicLoadBalancer: "" - resourceGroup: <infrastructure_id>-rg <1> - sshPrivateKey: "" - sshPublicKey: "" - subnet: <infrastructure_id>-<role>-subnet <1> <2> - userDataSecret: - name: worker-user-data <2> - vmSize: Standard_DS4_v2 - vnet: <infrastructure_id>-vnet <1> -ifndef::infra[] - zone: "1" <5> -endif::infra[] -ifdef::infra[] - zone: "1" <7> -endif::infra[] ----- -<1> Specify the infrastructure ID that is based on the cluster ID that you set when you provisioned the cluster. If you have the OpenShift CLI installed, you can obtain the infrastructure ID by running the following command: -+ -[source,terminal] ----- -$ oc get -o jsonpath='{.status.infrastructureName}{"\n"}' infrastructure cluster ----- -+ -You can obtain the subnet by running the following command: -+ -[source,terminal] ----- -$ oc -n openshift-machine-api \ - -o jsonpath='{.spec.template.spec.providerSpec.value.subnet}{"\n"}' \ - get machineset/<infrastructure_id>-worker-centralus1 ----- -You can obtain the vnet by running the following command: -+ -[source,terminal] ----- -$ oc -n openshift-machine-api \ - -o jsonpath='{.spec.template.spec.providerSpec.value.vnet}{"\n"}' \ - get machineset/<infrastructure_id>-worker-centralus1 ----- -ifndef::infra[] -<2> Specify the node label to add. -<3> Specify the infrastructure ID, node label, and region. -<4> Specify the region to place machines on. -<5> Specify the zone within your region to place machines on. Be sure that your region supports the zone that you specify. -<6> Specify the availability set for the cluster. -endif::infra[] -ifdef::infra[] -<2> Specify the `<infra>` node label. -<3> Specify the infrastructure ID, `<infra>` node label, and region. -<4> Specify a taint to prevent user workloads from being scheduled on infra nodes. -+ -[NOTE] -==== -After adding the `NoSchedule` taint on the infrastructure node, existing DNS pods running on that node are marked as `misscheduled`. You must either delete or link:https://access.redhat.com/solutions/6592171[add toleration on `misscheduled` DNS pods]. -==== - -<5> Specify the region to place machines on. -<6> Specify the availability set for the cluster. -<7> Specify the zone within your region to place machines on. Be sure that your region supports the zone that you specify. - -endif::infra[] - - -ifeval::["{context}" == "creating-infrastructure-machinesets"] -:!infra: -endif::[] -ifeval::["{context}" == "cluster-tasks"] -:!infra: -endif::[] diff --git a/modules/machineset-yaml-azure.adoc b/modules/machineset-yaml-azure.adoc deleted file mode 100644 index 369e2bb34d22..000000000000 --- a/modules/machineset-yaml-azure.adoc +++ /dev/null @@ -1,174 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/creating-infrastructure-machinesets.adoc -// * machine_management/creating-machineset-azure.adoc - -ifeval::["{context}" == "creating-infrastructure-machinesets"] -:infra: -endif::[] - -:_content-type: REFERENCE -[id="machineset-yaml-azure_{context}"] -= Sample YAML for a compute machine set custom resource on Azure - -This sample YAML defines a compute machine set that runs in the `1` Microsoft Azure zone in a region and creates nodes that are labeled with -ifndef::infra[`node-role.kubernetes.io/<role>: ""`.] -ifdef::infra[`node-role.kubernetes.io/infra: ""`.] - -In this sample, `<infrastructure_id>` is the infrastructure ID label that is based on the cluster ID that you set when you provisioned the cluster, and -ifndef::infra[`<role>`] -ifdef::infra[`<infra>`] -is the node label to add. - -[source,yaml] ----- -apiVersion: machine.openshift.io/v1beta1 -kind: MachineSet -metadata: - labels: - machine.openshift.io/cluster-api-cluster: <infrastructure_id> <1> -ifndef::infra[] - machine.openshift.io/cluster-api-machine-role: <role> <2> - machine.openshift.io/cluster-api-machine-type: <role> <2> - name: <infrastructure_id>-<role>-<region> <3> -endif::infra[] -ifdef::infra[] - machine.openshift.io/cluster-api-machine-role: <infra> <2> - machine.openshift.io/cluster-api-machine-type: <infra> <2> - name: <infrastructure_id>-infra-<region> <3> -endif::infra[] - namespace: openshift-machine-api -spec: - replicas: 1 - selector: - matchLabels: - machine.openshift.io/cluster-api-cluster: <infrastructure_id> <1> -ifndef::infra[] - machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role>-<region> <3> -endif::infra[] -ifdef::infra[] - machine.openshift.io/cluster-api-machineset: <infrastructure_id>-infra-<region> <3> -endif::infra[] - template: - metadata: - creationTimestamp: null - labels: - machine.openshift.io/cluster-api-cluster: <infrastructure_id> <1> -ifndef::infra[] - machine.openshift.io/cluster-api-machine-role: <role> <2> - machine.openshift.io/cluster-api-machine-type: <role> <2> - machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role>-<region> <3> -endif::infra[] -ifdef::infra[] - machine.openshift.io/cluster-api-machine-role: <infra> <2> - machine.openshift.io/cluster-api-machine-type: <infra> <2> - machine.openshift.io/cluster-api-machineset: <infrastructure_id>-infra-<region> <3> -endif::infra[] - spec: - metadata: - creationTimestamp: null - labels: - machine.openshift.io/cluster-api-machineset: <machineset_name> <4> -ifndef::infra[] - node-role.kubernetes.io/<role>: "" <2> -endif::infra[] -ifdef::infra[] - node-role.kubernetes.io/infra: "" <2> -endif::infra[] - providerSpec: - value: - apiVersion: azureproviderconfig.openshift.io/v1beta1 - credentialsSecret: - name: azure-cloud-credentials - namespace: openshift-machine-api - image: <5> - offer: "" - publisher: "" - resourceID: /resourceGroups/<infrastructure_id>-rg/providers/Microsoft.Compute/images/<infrastructure_id> <6> - sku: "" - version: "" - internalLoadBalancer: "" - kind: AzureMachineProviderSpec - location: <region> <7> - managedIdentity: <infrastructure_id>-identity <1> - metadata: - creationTimestamp: null - natRule: null - networkResourceGroup: "" - osDisk: - diskSizeGB: 128 - managedDisk: - storageAccountType: Premium_LRS - osType: Linux - publicIP: false - publicLoadBalancer: "" - resourceGroup: <infrastructure_id>-rg <1> - sshPrivateKey: "" - sshPublicKey: "" - tags: - - name: <custom_tag_name> <9> - value: <custom_tag_value> <9> - subnet: <infrastructure_id>-<role>-subnet <1> <2> - userDataSecret: - name: worker-user-data <2> - vmSize: Standard_D4s_v3 - vnet: <infrastructure_id>-vnet <1> - zone: "1" <8> -ifdef::infra[] - taints: <10> - - key: node-role.kubernetes.io/infra - effect: NoSchedule -endif::infra[] ----- -<1> Specify the infrastructure ID that is based on the cluster ID that you set when you provisioned the cluster. If you have the OpenShift CLI installed, you can obtain the infrastructure ID by running the following command: -+ -[source,terminal] ----- -$ oc get -o jsonpath='{.status.infrastructureName}{"\n"}' infrastructure cluster ----- -+ -You can obtain the subnet by running the following command: -+ -[source,terminal] ----- -$ oc -n openshift-machine-api \ - -o jsonpath='{.spec.template.spec.providerSpec.value.subnet}{"\n"}' \ - get machineset/<infrastructure_id>-worker-centralus1 ----- -You can obtain the vnet by running the following command: -+ -[source,terminal] ----- -$ oc -n openshift-machine-api \ - -o jsonpath='{.spec.template.spec.providerSpec.value.vnet}{"\n"}' \ - get machineset/<infrastructure_id>-worker-centralus1 ----- -ifndef::infra[] -<2> Specify the node label to add. -<3> Specify the infrastructure ID, node label, and region. -endif::infra[] -ifdef::infra[] -<2> Specify the `<infra>` node label. -<3> Specify the infrastructure ID, `<infra>` node label, and region. -endif::infra[] -<4> Optional: Specify the compute machine set name to enable the use of availability sets. This setting only applies to new compute machines. -<5> Specify the image details for your compute machine set. If you want to use an Azure Marketplace image, see "Selecting an Azure Marketplace image". -<6> Specify an image that is compatible with your instance type. The Hyper-V generation V2 images created by the installation program have a `-gen2` suffix, while V1 images have the same name without the suffix. -<7> Specify the region to place machines on. -<8> Specify the zone within your region to place machines on. Be sure that your region supports the zone that you specify. -<9> Optional: Specify custom tags in your machine set. Provide the tag name in `<custom_tag_name>` field and the corresponding tag value in `<custom_tag_value>` field. -ifdef::infra[] -<10> Specify a taint to prevent user workloads from being scheduled on infra nodes. -+ -[NOTE] -==== -After adding the `NoSchedule` taint on the infrastructure node, existing DNS pods running on that node are marked as `misscheduled`. You must either delete or link:https://access.redhat.com/solutions/6592171[add toleration on `misscheduled` DNS pods]. -==== -endif::infra[] - -ifeval::["{context}" == "creating-infrastructure-machinesets"] -:!infra: -endif::[] -ifeval::["{context}" == "cluster-tasks"] -:!infra: -endif::[] diff --git a/modules/machineset-yaml-baremetal.adoc b/modules/machineset-yaml-baremetal.adoc deleted file mode 100644 index 9eaed6d51bde..000000000000 --- a/modules/machineset-yaml-baremetal.adoc +++ /dev/null @@ -1,108 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/creating-infrastructure-machinesets.adoc -// * machine_management/creating_machinesets/creating-machineset-bare-metal.adoc - -ifeval::["{context}" == "creating-infrastructure-machinesets"] -:infra: -endif::[] - -:_content-type: REFERENCE -[id="machineset-yaml-vsphere_{context}"] -= Sample YAML for a compute machine set custom resource on bare metal - -This sample YAML defines a compute machine set that runs on bare metal and creates nodes that are labeled with -ifndef::infra[`node-role.kubernetes.io/<role>: ""`.] -ifdef::infra[`node-role.kubernetes.io/infra: ""`.] - -In this sample, `<infrastructure_id>` is the infrastructure ID label that is based on the cluster ID that you set when you provisioned the cluster, and -ifndef::infra[`<role>`] -ifdef::infra[`<infra>`] -is the node label to add. - -[source,yaml] ----- -apiVersion: machine.openshift.io/v1beta1 -kind: MachineSet -metadata: - creationTimestamp: null - labels: - machine.openshift.io/cluster-api-cluster: <infrastructure_id> <1> -ifndef::infra[] - name: <infrastructure_id>-<role> <2> -endif::infra[] -ifdef::infra[] - name: <infrastructure_id>-infra <2> -endif::infra[] - namespace: openshift-machine-api -spec: - replicas: 1 - selector: - matchLabels: - machine.openshift.io/cluster-api-cluster: <infrastructure_id> <1> -ifndef::infra[] - machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> <2> -endif::infra[] -ifdef::infra[] - machine.openshift.io/cluster-api-machineset: <infrastructure_id>-infra <2> -endif::infra[] - template: - metadata: - creationTimestamp: null - labels: - machine.openshift.io/cluster-api-cluster: <infrastructure_id> <1> -ifndef::infra[] - machine.openshift.io/cluster-api-machine-role: <role> <3> - machine.openshift.io/cluster-api-machine-type: <role> <3> - machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> <2> -endif::infra[] -ifdef::infra[] - machine.openshift.io/cluster-api-machine-role: <infra> <3> - machine.openshift.io/cluster-api-machine-type: <infra> <3> - machine.openshift.io/cluster-api-machineset: <infrastructure_id>-infra <2> -endif::infra[] - spec: - metadata: - creationTimestamp: null - labels: -ifndef::infra[] - node-role.kubernetes.io/<role>: "" <3> -endif::infra[] -ifdef::infra[] - node-role.kubernetes.io/infra: "" <3> - taints: <4> - - key: node-role.kubernetes.io/infra - effect: NoSchedule -endif::infra[] - providerSpec: - value: - apiVersion: baremetal.cluster.k8s.io/v1alpha1 - hostSelector: {} - image: - checksum: http:/172.22.0.3:6181/images/rhcos-<version>.<architecture>.qcow2.<md5sum> <4> - url: http://172.22.0.3:6181/images/rhcos-<version>.<architecture>.qcow2 <5> - kind: BareMetalMachineProviderSpec - metadata: - creationTimestamp: null - userData: - name: worker-user-data ----- -<1> Specify the infrastructure ID that is based on the cluster ID that you set when you provisioned the cluster. If you have the OpenShift CLI (`oc`) installed, you can obtain the infrastructure ID by running the following command: -+ -[source,terminal] ----- -$ oc get -o jsonpath='{.status.infrastructureName}{"\n"}' infrastructure cluster ----- -ifndef::infra[] -<2> Specify the infrastructure ID and node label. -<3> Specify the node label to add. -<4> Edit the `checksum` URL to use the API VIP address. -<5> Edit the `url` URL to use the API VIP address. -endif::infra[] - -ifeval::["{context}" == "creating-infrastructure-machinesets"] -:!infra: -endif::[] -ifeval::["{context}" == "cluster-tasks"] -:!infra: -endif::[] diff --git a/modules/machineset-yaml-gcp.adoc b/modules/machineset-yaml-gcp.adoc deleted file mode 100644 index 5855c0e1e950..000000000000 --- a/modules/machineset-yaml-gcp.adoc +++ /dev/null @@ -1,153 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/creating-infrastructure-machinesets.adoc -// * machine_management/creating-machineset-gcp.adoc - -ifeval::["{context}" == "creating-infrastructure-machinesets"] -:infra: -endif::[] - -:_content-type: REFERENCE -[id="machineset-yaml-gcp_{context}"] -= Sample YAML for a compute machine set custom resource on GCP - -This sample YAML defines a compute machine set that runs in Google Cloud Platform (GCP) and creates nodes that are labeled with -ifndef::infra[`node-role.kubernetes.io/<role>: ""`,] -ifdef::infra[`node-role.kubernetes.io/infra: ""`,] -where -ifndef::infra[`<role>`] -ifdef::infra[`infra`] -is the node label to add. - -[discrete] -[id="cpmso-yaml-provider-spec-gcp-oc_{context}"] -== Values obtained by using the OpenShift CLI - -In the following example, you can obtain some of the values for your cluster by using the OpenShift CLI. - -Infrastructure ID:: The `<infrastructure_id>` string is the infrastructure ID that is based on the cluster ID that you set when you provisioned the cluster. If you have the OpenShift CLI installed, you can obtain the infrastructure ID by running the following command: -+ -[source,terminal] ----- -$ oc get -o jsonpath='{.status.infrastructureName}{"\n"}' infrastructure cluster ----- - -Image path:: The `<path_to_image>` string is the path to the image that was used to create the disk. If you have the OpenShift CLI installed, you can obtain the path to the image by running the following command: -+ -[source,terminal] ----- -$ oc -n openshift-machine-api \ - -o jsonpath='{.spec.template.spec.providerSpec.value.disks[0].image}{"\n"}' \ - get machineset/<infrastructure_id>-worker-a ----- - -.Sample GCP `MachineSet` values -[source,yaml] ----- -apiVersion: machine.openshift.io/v1beta1 -kind: MachineSet -metadata: - labels: - machine.openshift.io/cluster-api-cluster: <infrastructure_id> <1> - name: <infrastructure_id>-w-a - namespace: openshift-machine-api -spec: - replicas: 1 - selector: - matchLabels: - machine.openshift.io/cluster-api-cluster: <infrastructure_id> - machine.openshift.io/cluster-api-machineset: <infrastructure_id>-w-a - template: - metadata: - creationTimestamp: null - labels: - machine.openshift.io/cluster-api-cluster: <infrastructure_id> -ifndef::infra[] - machine.openshift.io/cluster-api-machine-role: <role> <2> - machine.openshift.io/cluster-api-machine-type: <role> -endif::infra[] -ifdef::infra[] - machine.openshift.io/cluster-api-machine-role: <infra> <2> - machine.openshift.io/cluster-api-machine-type: <infra> -endif::infra[] - machine.openshift.io/cluster-api-machineset: <infrastructure_id>-w-a - spec: - metadata: - labels: -ifndef::infra[] - node-role.kubernetes.io/<role>: "" -endif::infra[] -ifdef::infra[] - node-role.kubernetes.io/infra: "" -endif::infra[] - providerSpec: - value: - apiVersion: gcpprovider.openshift.io/v1beta1 - canIPForward: false - credentialsSecret: - name: gcp-cloud-credentials - deletionProtection: false - disks: - - autoDelete: true - boot: true - image: <path_to_image> <3> - labels: null - sizeGb: 128 - type: pd-ssd - gcpMetadata: <4> - - key: <custom_metadata_key> - value: <custom_metadata_value> - kind: GCPMachineProviderSpec - machineType: n1-standard-4 - metadata: - creationTimestamp: null - networkInterfaces: - - network: <infrastructure_id>-network - subnetwork: <infrastructure_id>-worker-subnet - projectID: <project_name> <5> - region: us-central1 - serviceAccounts: - - email: <infrastructure_id>-w@<project_name>.iam.gserviceaccount.com - scopes: - - https://www.googleapis.com/auth/cloud-platform - tags: - - <infrastructure_id>-worker - userDataSecret: - name: worker-user-data - zone: us-central1-a -ifdef::infra[] - taints: <6> - - key: node-role.kubernetes.io/infra - effect: NoSchedule -endif::infra[] ----- -<1> For `<infrastructure_id>`, specify the infrastructure ID that is based on the cluster ID that you set when you provisioned the cluster. -ifndef::infra[] -<2> For `<node>`, specify the node label to add. -endif::infra[] -ifdef::infra[] -<2> For `<infra>`, specify the `<infra>` node label. -endif::infra[] -<3> Specify the path to the image that is used in current compute machine sets. -+ -To use a GCP Marketplace image, specify the offer to use: -+ --- -* {product-title}: `\https://www.googleapis.com/compute/v1/projects/redhat-marketplace-public/global/images/redhat-coreos-ocp-48-x86-64-202210040145` -* {opp}: `\https://www.googleapis.com/compute/v1/projects/redhat-marketplace-public/global/images/redhat-coreos-opp-48-x86-64-202206140145` -* {oke}: `\https://www.googleapis.com/compute/v1/projects/redhat-marketplace-public/global/images/redhat-coreos-oke-48-x86-64-202206140145` --- -<4> Optional: Specify custom metadata in the form of a `key:value` pair. For example use cases, see the GCP documentation for link:https://cloud.google.com/compute/docs/metadata/setting-custom-metadata[setting custom metadata]. -<5> For `<project_name>`, specify the name of the GCP project that you use for your cluster. -ifdef::infra[] -<6> Specify a taint to prevent user workloads from being scheduled on infra nodes. -+ -[NOTE] -==== -After adding the `NoSchedule` taint on the infrastructure node, existing DNS pods running on that node are marked as `misscheduled`. You must either delete or link:https://access.redhat.com/solutions/6592171[add toleration on `misscheduled` DNS pods]. -==== -endif::infra[] - -ifeval::["{context}" == "creating-infrastructure-machinesets"] -:!infra: -endif::[] diff --git a/modules/machineset-yaml-ibm-cloud.adoc b/modules/machineset-yaml-ibm-cloud.adoc deleted file mode 100644 index ad09b45644a7..000000000000 --- a/modules/machineset-yaml-ibm-cloud.adoc +++ /dev/null @@ -1,133 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/creating-infrastructure-machinesets.adoc -// * machine_management/creating_machinesets/creating-machineset-ibm-cloud.adoc - -ifeval::["{context}" == "creating-infrastructure-machinesets"] -:infra: -endif::[] - -:_content-type: REFERENCE -[id="machineset-yaml-ibm-cloud_{context}"] -= Sample YAML for a compute machine set custom resource on IBM Cloud - -This sample YAML defines a compute machine set that runs in a specified IBM Cloud zone in a region and creates nodes that are labeled with -ifndef::infra[`node-role.kubernetes.io/<role>: ""`.] -ifdef::infra[`node-role.kubernetes.io/infra: ""`.] - -In this sample, `<infrastructure_id>` is the infrastructure ID label that is based on the cluster ID that you set when you provisioned the cluster, and -ifndef::infra[`<role>`] -ifdef::infra[`<infra>`] -is the node label to add. - -[source,yaml] ----- -apiVersion: machine.openshift.io/v1beta1 -kind: MachineSet -metadata: - labels: - machine.openshift.io/cluster-api-cluster: <infrastructure_id> <1> -ifndef::infra[] - machine.openshift.io/cluster-api-machine-role: <role> <2> - machine.openshift.io/cluster-api-machine-type: <role> <2> - name: <infrastructure_id>-<role>-<region> <3> -endif::infra[] -ifdef::infra[] - machine.openshift.io/cluster-api-machine-role: <infra> <2> - machine.openshift.io/cluster-api-machine-type: <infra> <2> - name: <infrastructure_id>-<infra>-<region> <3> -endif::infra[] - namespace: openshift-machine-api -spec: - replicas: 1 - selector: - matchLabels: - machine.openshift.io/cluster-api-cluster: <infrastructure_id> <1> -ifndef::infra[] - machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role>-<region> <3> -endif::infra[] -ifdef::infra[] - machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<infra>-<region> <3> -endif::infra[] - template: - metadata: - labels: - machine.openshift.io/cluster-api-cluster: <infrastructure_id> <1> -ifndef::infra[] - machine.openshift.io/cluster-api-machine-role: <role> <2> - machine.openshift.io/cluster-api-machine-type: <role> <2> - machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role>-<region> <3> -endif::infra[] -ifdef::infra[] - machine.openshift.io/cluster-api-machine-role: <infra> <2> - machine.openshift.io/cluster-api-machine-type: <infra> <2> - machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<infra>-<region> <3> -endif::infra[] - spec: - metadata: - labels: -ifndef::infra[] - node-role.kubernetes.io/<role>: "" -endif::infra[] -ifdef::infra[] - node-role.kubernetes.io/infra: "" -endif::infra[] - providerSpec: - value: - apiVersion: ibmcloudproviderconfig.openshift.io/v1beta1 - credentialsSecret: - name: ibmcloud-credentials - image: <infrastructure_id>-rhcos <4> - kind: IBMCloudMachineProviderSpec - primaryNetworkInterface: - securityGroups: - - <infrastructure_id>-sg-cluster-wide - - <infrastructure_id>-sg-openshift-net - subnet: <infrastructure_id>-subnet-compute-<zone> <5> - profile: <instance_profile> <6> - region: <region> <7> - resourceGroup: <resource_group> <8> - userDataSecret: - name: <role>-user-data <2> - vpc: <vpc_name> <9> - zone: <zone> <10> -ifdef::infra[] - taints: <11> - - key: node-role.kubernetes.io/infra - effect: NoSchedule -endif::infra[] ----- -<1> The infrastructure ID that is based on the cluster ID that you set when you provisioned the cluster. If you have the OpenShift CLI installed, you can obtain the infrastructure ID by running the following command: -+ -[source,terminal] ----- -$ oc get -o jsonpath='{.status.infrastructureName}{"\n"}' infrastructure cluster ----- -ifndef::infra[] -<2> The node label to add. -<3> The infrastructure ID, node label, and region. -endif::infra[] -ifdef::infra[] -<2> The `<infra>` node label. -<3> The infrastructure ID, `<infra>` node label, and region. -endif::infra[] -<4> The custom {op-system-first} image that was used for cluster installation. -<5> The infrastructure ID and zone within your region to place machines on. Be sure that your region supports the zone that you specify. -<6> Specify the link:https://cloud.ibm.com/docs/vpc?topic=vpc-profiles&interface=ui[IBM Cloud instance profile]. -<7> Specify the region to place machines on. -<8> The resource group that machine resources are placed in. This is either an existing resource group specified at installation time, or an installer-created resource group named based on the infrastructure ID. -<9> The VPC name. -<10> Specify the zone within your region to place machines on. Be sure that your region supports the zone that you specify. -ifdef::infra[] -<11> The taint to prevent user workloads from being scheduled on infra nodes. -+ -[NOTE] -==== -After adding the `NoSchedule` taint on the infrastructure node, existing DNS pods running on that node are marked as `misscheduled`. You must either delete or link:https://access.redhat.com/solutions/6592171[add toleration on `misscheduled` DNS pods]. -==== -endif::infra[] - - -ifeval::["{context}" == "creating-infrastructure-machinesets"] -:!infra: -endif::[] diff --git a/modules/machineset-yaml-ibm-power-vs.adoc b/modules/machineset-yaml-ibm-power-vs.adoc deleted file mode 100644 index 233b30b15633..000000000000 --- a/modules/machineset-yaml-ibm-power-vs.adoc +++ /dev/null @@ -1,73 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/creating_machinesets/creating-machineset-ibm-power-vs.adoc - -:_content-type: REFERENCE -[id="machineset-yaml-ibm-power-vs_{context}"] -= Sample YAML for a compute machine set custom resource on {ibmpowerProductName} Virtual Server - -This sample YAML file defines a compute machine set that runs in a specified {ibmpowerProductName} Virtual Server zone in a region and creates nodes that are labeled with `node-role.kubernetes.io/<role>: ""`. - -In this sample, `<infrastructure_id>` is the infrastructure ID label that is based on the cluster ID that you set when you provisioned the cluster, and `<role>` is the node label to add. - -[source,yaml] ----- -apiVersion: machine.openshift.io/v1beta1 -kind: MachineSet -metadata: - labels: - machine.openshift.io/cluster-api-cluster: <infrastructure_id> <1> - machine.openshift.io/cluster-api-machine-role: <role> <2> - machine.openshift.io/cluster-api-machine-type: <role> <2> - name: <infrastructure_id>-<role>-<region> <3> - namespace: openshift-machine-api -spec: - replicas: 1 - selector: - matchLabels: - machine.openshift.io/cluster-api-cluster: <infrastructure_id> <1> - machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role>-<region> <3> - template: - metadata: - labels: - machine.openshift.io/cluster-api-cluster: <infrastructure_id> <1> - machine.openshift.io/cluster-api-machine-role: <role> <2> - machine.openshift.io/cluster-api-machine-type: <role> <2> - machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role>-<region> <3> - spec: - metadata: - labels: - node-role.kubernetes.io/<role>: "" - providerSpec: - value: - apiVersion: machine.openshift.io/v1 - credentialsSecret: - name: powervs-credentials - image: - name: rhcos-<infrastructure_id> <4> - type: Name - keyPairName: <infrastructure_id>-key - kind: PowerVSMachineProviderConfig - memoryGiB: 32 - network: - regex: ^DHCPSERVER[0-9a-z]{32}_Private$ - type: RegEx - processorType: Shared - processors: "0.5" - serviceInstance: - id: <ibm_power_vs_service_instance_id> - type: ID <5> - systemType: s922 - userDataSecret: - name: <role>-user-data ----- -<1> The infrastructure ID that is based on the cluster ID that you set when you provisioned the cluster. If you have the OpenShift CLI installed, you can obtain the infrastructure ID by running the following command: -+ -[source,terminal] ----- -$ oc get -o jsonpath='{.status.infrastructureName}{"\n"}' infrastructure cluster ----- -<2> The node label to add. -<3> The infrastructure ID, node label, and region. -<4> The custom {op-system-first} image that was used for cluster installation. -<5> The infrastructure ID within your region to place machines on. diff --git a/modules/machineset-yaml-nutanix.adoc b/modules/machineset-yaml-nutanix.adoc deleted file mode 100644 index cd690a5746f3..000000000000 --- a/modules/machineset-yaml-nutanix.adoc +++ /dev/null @@ -1,164 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/creating-infrastructure-machinesets.adoc -// * machine_management/creating_machinesets/creating-machineset-nutanix.adoc - -ifeval::["{context}" == "creating-infrastructure-machinesets"] -:infra: -endif::[] - -:_content-type: REFERENCE -[id="machineset-yaml-nutanix_{context}"] -= Sample YAML for a compute machine set custom resource on Nutanix - -This sample YAML defines a Nutanix compute machine set that creates nodes that are labeled with -ifndef::infra[`node-role.kubernetes.io/<role>: ""`.] -ifdef::infra[`node-role.kubernetes.io/infra: ""`.] - -In this sample, `<infrastructure_id>` is the infrastructure ID label that is based on the cluster ID that you set when you provisioned the cluster, and -ifndef::infra[`<role>`] -ifdef::infra[`<infra>`] -is the node label to add. - -[discrete] -[id="machineset-yaml-nutanix-oc_{context}"] -== Values obtained by using the OpenShift CLI - -In the following example, you can obtain some of the values for your cluster by using the OpenShift CLI (`oc`). - -Infrastructure ID:: The `<infrastructure_id>` string is the infrastructure ID that is based on the cluster ID that you set when you provisioned the cluster. If you have the OpenShift CLI installed, you can obtain the infrastructure ID by running the following command: -+ -[source,terminal] ----- -$ oc get -o jsonpath='{.status.infrastructureName}{"\n"}' infrastructure cluster ----- - -[source,yaml] ----- -apiVersion: machine.openshift.io/v1beta1 -kind: MachineSet -metadata: - labels: - machine.openshift.io/cluster-api-cluster: <infrastructure_id> <1> -ifndef::infra[] - machine.openshift.io/cluster-api-machine-role: <role> <2> - machine.openshift.io/cluster-api-machine-type: <role> - name: <infrastructure_id>-<role>-<zone> <3> -endif::infra[] -ifdef::infra[] - machine.openshift.io/cluster-api-machine-role: <infra> <2> - machine.openshift.io/cluster-api-machine-type: <infra> - name: <infrastructure_id>-<infra>-<zone> <3> -endif::infra[] - namespace: openshift-machine-api - annotations: <4> - machine.openshift.io/memoryMb: "16384" - machine.openshift.io/vCPU: "4" -spec: - replicas: 3 - selector: - matchLabels: - machine.openshift.io/cluster-api-cluster: <infrastructure_id> -ifndef::infra[] - machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role>-<zone> -endif::infra[] -ifdef::infra[] - machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<infra>-<zone> -endif::infra[] - template: - metadata: - labels: - machine.openshift.io/cluster-api-cluster: <infrastructure_id> -ifndef::infra[] - machine.openshift.io/cluster-api-machine-role: <role> - machine.openshift.io/cluster-api-machine-type: <role> - machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role>-<zone> -endif::infra[] -ifdef::infra[] - machine.openshift.io/cluster-api-machine-role: <infra> - machine.openshift.io/cluster-api-machine-type: <infra> - machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<infra>-<zone> -endif::infra[] - spec: - metadata: - labels: -ifndef::infra[] - node-role.kubernetes.io/<role>: "" -endif::infra[] -ifdef::infra[] - node-role.kubernetes.io/infra: "" -endif::infra[] - providerSpec: - value: - apiVersion: machine.openshift.io/v1 - bootType: "" <5> - categories: <6> - - key: <category_name> - value: <category_value> - cluster: <7> - type: uuid - uuid: <cluster_uuid> - credentialsSecret: - name: nutanix-creds-secret - image: - name: <infrastructure_id>-rhcos <8> - type: name - kind: NutanixMachineProviderConfig - memorySize: 16Gi <9> - project: <10> - type: name - name: <project_name> - subnets: - - type: uuid - uuid: <subnet_uuid> - systemDiskSize: 120Gi <11> - userDataSecret: - name: <user_data_secret> <12> - vcpuSockets: 4 <13> - vcpusPerSocket: 1 <14> -ifdef::infra[] - taints: <15> - - key: node-role.kubernetes.io/infra - effect: NoSchedule -endif::infra[] ----- -<1> For `<infrastructure_id>`, specify the infrastructure ID that is based on the cluster ID that you set when you provisioned the cluster. -ifndef::infra[] -<2> Specify the node label to add. -<3> Specify the infrastructure ID, node label, and zone. -endif::infra[] -ifdef::infra[] -<2> Specify the `<infra>` node label. -<3> Specify the infrastructure ID, `<infra>` node label, and zone. -endif::infra[] -<4> Annotations for the cluster autoscaler. -<5> Specifies the boot type that the compute machines use. For more information about boot types, see link:https://portal.nutanix.com/page/documents/kbs/details?targetId=kA07V000000H3K9SAK[Understanding UEFI, Secure Boot, and TPM in the Virtualized Environment]. Valid values are `Legacy`, `SecureBoot`, or `UEFI`. The default is `Legacy`. -+ -[NOTE] -==== -You must use the `Legacy` boot type in {product-title} {product-version}. -==== -<6> Specify one or more Nutanix Prism categories to apply to compute machines. This stanza requires `key` and `value` parameters for a category key-value pair that exists in Prism Central. For more information about categories, see link:https://portal.nutanix.com/page/documents/details?targetId=Prism-Central-Guide-vpc_2022_6:ssp-ssp-categories-manage-pc-c.html[Category management]. -<7> Specify a Nutanix Prism Element cluster configuration. In this example, the cluster type is `uuid`, so there is a `uuid` stanza. -<8> Specify the image to use. Use an image from an existing default compute machine set for the cluster. -<9> Specify the amount of memory for the cluster in Gi. -<10> Specify the Nutanix project that you use for your cluster. In this example, the project type is `name`, so there is a `name` stanza. -<11> Specify the size of the system disk in Gi. -<12> Specify the name of the secret in the user data YAML file that is in the `openshift-machine-api` namespace. Use the value that installation program populates in the default compute machine set. -<13> Specify the number of vCPU sockets. -<14> Specify the number of vCPUs per socket. -ifdef::infra[] -<15> Specify a taint to prevent user workloads from being scheduled on infra nodes. -+ -[NOTE] -==== -After adding the `NoSchedule` taint on the infrastructure node, existing DNS pods running on that node are marked as `misscheduled`. You must either delete or link:https://access.redhat.com/solutions/6592171[add toleration on `misscheduled` DNS pods]. -==== -endif::infra[] - -ifeval::["{context}" == "creating-infrastructure-machinesets"] -:!infra: -endif::[] -ifeval::["{context}" == "cluster-tasks"] -:!infra: -endif::[] diff --git a/modules/machineset-yaml-osp-sr-iov-port-security.adoc b/modules/machineset-yaml-osp-sr-iov-port-security.adoc deleted file mode 100644 index c4cd0510b447..000000000000 --- a/modules/machineset-yaml-osp-sr-iov-port-security.adoc +++ /dev/null @@ -1,133 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/creating_machinesets/creating-machineset-osp.adoc - -[id="machineset-yaml-osp-sr-iov-port-security_{context}"] -= Sample YAML for SR-IOV deployments where port security is disabled - -To create single-root I/O virtualization (SR-IOV) ports on a network that has port security disabled, define a compute machine set that includes the ports as items in the `spec.template.spec.providerSpec.value.ports` list. This difference from the standard SR-IOV compute machine set is due to the automatic security group and allowed address pair configuration that occurs for ports that are created by using the network and subnet interfaces. - -Ports that you define for machines subnets require: - -* Allowed address pairs for the API and ingress virtual IP ports -* The compute security group -* Attachment to the machines network and subnet - -[NOTE] -==== -Only parameters that are specific to SR-IOV deployments where port security is disabled are described in this sample. To review a more general sample, see Sample YAML for a compute machine set custom resource that uses SR-IOV on {rh-openstack}". -==== - -.An example compute machine set that uses SR-IOV networks and has port security disabled -[source,yaml] ----- -apiVersion: machine.openshift.io/v1beta1 -kind: MachineSet -metadata: - labels: - machine.openshift.io/cluster-api-cluster: <infrastructure_id> - machine.openshift.io/cluster-api-machine-role: <node_role> - machine.openshift.io/cluster-api-machine-type: <node_role> - name: <infrastructure_id>-<node_role> - namespace: openshift-machine-api -spec: - replicas: <number_of_replicas> - selector: - matchLabels: - machine.openshift.io/cluster-api-cluster: <infrastructure_id> - machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<node_role> - template: - metadata: - labels: - machine.openshift.io/cluster-api-cluster: <infrastructure_id> - machine.openshift.io/cluster-api-machine-role: <node_role> - machine.openshift.io/cluster-api-machine-type: <node_role> - machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<node_role> - spec: - metadata: {} - providerSpec: - value: - apiVersion: openstackproviderconfig.openshift.io/v1alpha1 - cloudName: openstack - cloudsSecret: - name: openstack-cloud-credentials - namespace: openshift-machine-api - flavor: <nova_flavor> - image: <glance_image_name_or_location> - kind: OpenstackProviderSpec - ports: - - allowedAddressPairs: <1> - - ipAddress: <API_VIP_port_IP> - - ipAddress: <ingress_VIP_port_IP> - fixedIPs: - - subnetID: <machines_subnet_UUID> <2> - nameSuffix: nodes - networkID: <machines_network_UUID> <2> - securityGroups: - - <compute_security_group_UUID> <3> - - networkID: <SRIOV_network_UUID> - nameSuffix: sriov - fixedIPs: - - subnetID: <SRIOV_subnet_UUID> - tags: - - sriov - vnicType: direct - portSecurity: False - primarySubnet: <machines_subnet_UUID> - serverMetadata: - Name: <infrastructure_ID>-<node_role> - openshiftClusterID: <infrastructure_id> - tags: - - openshiftClusterID=<infrastructure_id> - trunk: false - userDataSecret: - name: worker-user-data ----- -<1> Specify allowed address pairs for the API and ingress ports. -<2> Specify the machines network and subnet. -<3> Specify the compute machines security group. - -[NOTE] -==== -Trunking is enabled for ports that are created by entries in the networks and subnets lists. The names of ports that are created from these lists follow the pattern `<machine_name>-<nameSuffix>`. The `nameSuffix` field is required in port definitions. - -You can enable trunking for each port. - -Optionally, you can add tags to ports as part of their `tags` lists. -==== - -If your cluster uses Kuryr and the {rh-openstack} SR-IOV network has port security disabled, the primary port for compute machines must have: - -* The value of the `spec.template.spec.providerSpec.value.networks.portSecurityEnabled` parameter set to `false`. - -* For each subnet, the value of the `spec.template.spec.providerSpec.value.networks.subnets.portSecurityEnabled` parameter set to `false`. - -* The value of `spec.template.spec.providerSpec.value.securityGroups` set to empty: `[]`. - -.An example section of a compute machine set for a cluster on Kuryr that uses SR-IOV and has port security disabled -[source,yaml] ----- -... - networks: - - subnets: - - uuid: <machines_subnet_UUID> - portSecurityEnabled: false - portSecurityEnabled: false - securityGroups: [] -... ----- - -In that case, you can apply the compute security group to the primary VM interface after the VM is created. For example, from a command line: -[source,terminal] ----- -$ openstack port set --enable-port-security --security-group <infrastructure_id>-<node_role> <main_port_ID> ----- - -[IMPORTANT] -==== -After you deploy compute machines that are SR-IOV-capable, you must label them as such. For example, from a command line, enter: -[source,terminal] ----- -$ oc label node <NODE_NAME> feature.node.kubernetes.io/network-sriov.capable="true" ----- -==== \ No newline at end of file diff --git a/modules/machineset-yaml-osp-sr-iov.adoc b/modules/machineset-yaml-osp-sr-iov.adoc deleted file mode 100644 index 285d18adcd57..000000000000 --- a/modules/machineset-yaml-osp-sr-iov.adoc +++ /dev/null @@ -1,118 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/creating_machinesets/creating-machineset-osp.adoc - -[id="machineset-yaml-osp-sr-iov_{context}"] -= Sample YAML for a compute machine set custom resource that uses SR-IOV on {rh-openstack} - -If you configured your cluster for single-root I/O virtualization (SR-IOV), you can create compute machine sets that use that technology. - -This sample YAML defines a compute machine set that uses SR-IOV networks. The nodes that it creates are labeled with `node-role.openshift.io/<node_role>: ""` - -In this sample, `infrastructure_id` is the infrastructure ID label that is based on the cluster ID that you set when you provisioned the cluster, and `node_role` is the node label to add. - -The sample assumes two SR-IOV networks that are named "radio" and "uplink". The networks are used in port definitions in the `spec.template.spec.providerSpec.value.ports` list. - -[NOTE] -==== -Only parameters that are specific to SR-IOV deployments are described in this sample. To review a more general sample, see "Sample YAML for a compute machine set custom resource on {rh-openstack}". -==== - -.An example compute machine set that uses SR-IOV networks -[source,yaml] ----- -apiVersion: machine.openshift.io/v1beta1 -kind: MachineSet -metadata: - labels: - machine.openshift.io/cluster-api-cluster: <infrastructure_id> - machine.openshift.io/cluster-api-machine-role: <node_role> - machine.openshift.io/cluster-api-machine-type: <node_role> - name: <infrastructure_id>-<node_role> - namespace: openshift-machine-api -spec: - replicas: <number_of_replicas> - selector: - matchLabels: - machine.openshift.io/cluster-api-cluster: <infrastructure_id> - machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<node_role> - template: - metadata: - labels: - machine.openshift.io/cluster-api-cluster: <infrastructure_id> - machine.openshift.io/cluster-api-machine-role: <node_role> - machine.openshift.io/cluster-api-machine-type: <node_role> - machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<node_role> - spec: - metadata: - providerSpec: - value: - apiVersion: openstackproviderconfig.openshift.io/v1alpha1 - cloudName: openstack - cloudsSecret: - name: openstack-cloud-credentials - namespace: openshift-machine-api - flavor: <nova_flavor> - image: <glance_image_name_or_location> - serverGroupID: <optional_UUID_of_server_group> - kind: OpenstackProviderSpec - networks: - - subnets: - - UUID: <machines_subnet_UUID> - ports: - - networkID: <radio_network_UUID> <1> - nameSuffix: radio - fixedIPs: - - subnetID: <radio_subnet_UUID> <2> - tags: - - sriov - - radio - vnicType: direct <3> - portSecurity: false <4> - - networkID: <uplink_network_UUID> <1> - nameSuffix: uplink - fixedIPs: - - subnetID: <uplink_subnet_UUID> <2> - tags: - - sriov - - uplink - vnicType: direct <3> - portSecurity: false <4> - primarySubnet: <machines_subnet_UUID> - securityGroups: - - filter: {} - name: <infrastructure_id>-<node_role> - serverMetadata: - Name: <infrastructure_id>-<node_role> - openshiftClusterID: <infrastructure_id> - tags: - - openshiftClusterID=<infrastructure_id> - trunk: true - userDataSecret: - name: <node_role>-user-data - availabilityZone: <optional_openstack_availability_zone> ----- -<1> Enter a network UUID for each port. -<2> Enter a subnet UUID for each port. -<3> The value of the `vnicType` parameter must be `direct` for each port. -<4> The value of the `portSecurity` parameter must be `false` for each port. -+ -You cannot set security groups and allowed address pairs for ports when port security is disabled. Setting security groups on the instance applies the groups to all ports that are attached to it. - -[IMPORTANT] -==== -After you deploy compute machines that are SR-IOV-capable, you must label them as such. For example, from a command line, enter: -[source,terminal] ----- -$ oc label node <NODE_NAME> feature.node.kubernetes.io/network-sriov.capable="true" ----- -==== - -[NOTE] -==== -Trunking is enabled for ports that are created by entries in the networks and subnets lists. The names of ports that are created from these lists follow the pattern `<machine_name>-<nameSuffix>`. The `nameSuffix` field is required in port definitions. - -You can enable trunking for each port. - -Optionally, you can add tags to ports as part of their `tags` lists. -==== \ No newline at end of file diff --git a/modules/machineset-yaml-osp.adoc b/modules/machineset-yaml-osp.adoc deleted file mode 100644 index 70a7daf68d2e..000000000000 --- a/modules/machineset-yaml-osp.adoc +++ /dev/null @@ -1,156 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/creating-infrastructure-machinesets.adoc -// * machine_management/creating_machinesets/creating-machineset-osp.adoc - -ifeval::["{context}" == "creating-infrastructure-machinesets"] -:infra: -endif::[] - -:_content-type: REFERENCE -[id="machineset-yaml-osp_{context}"] -= Sample YAML for a compute machine set custom resource on {rh-openstack} - -This sample YAML defines a compute machine set that runs on {rh-openstack-first} and creates nodes that are labeled with -ifndef::infra[`node-role.kubernetes.io/<role>: ""`.] -ifdef::infra[`node-role.kubernetes.io/infra: ""`.] - -In this sample, `<infrastructure_id>` is the infrastructure ID label that is based on the cluster ID that you set when you provisioned the cluster, and -ifndef::infra[`<role>`] -ifdef::infra[`<infra>`] -is the node label to add. - -[source,yaml] ----- -apiVersion: machine.openshift.io/v1beta1 -kind: MachineSet -metadata: - labels: - machine.openshift.io/cluster-api-cluster: <infrastructure_id> <1> -ifndef::infra[] - machine.openshift.io/cluster-api-machine-role: <role> <2> - machine.openshift.io/cluster-api-machine-type: <role> <2> - name: <infrastructure_id>-<role> <3> -endif::infra[] -ifdef::infra[] - machine.openshift.io/cluster-api-machine-role: <infra> <2> - machine.openshift.io/cluster-api-machine-type: <infra> <2> - name: <infrastructure_id>-infra <3> -endif::infra[] - namespace: openshift-machine-api -spec: - replicas: <number_of_replicas> - selector: - matchLabels: - machine.openshift.io/cluster-api-cluster: <infrastructure_id> <1> -ifndef::infra[] - machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> <3> -endif::infra[] -ifdef::infra[] - machine.openshift.io/cluster-api-machineset: <infrastructure_id>-infra <3> -endif::infra[] - template: - metadata: - labels: - machine.openshift.io/cluster-api-cluster: <infrastructure_id> <1> -ifndef::infra[] - machine.openshift.io/cluster-api-machine-role: <role> <2> - machine.openshift.io/cluster-api-machine-type: <role> <2> - machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> <3> - spec: -endif::infra[] -ifdef::infra[] - machine.openshift.io/cluster-api-machine-role: <infra> <2> - machine.openshift.io/cluster-api-machine-type: <infra> <2> - machine.openshift.io/cluster-api-machineset: <infrastructure_id>-infra <3> - spec: - metadata: - creationTimestamp: null - labels: - node-role.kubernetes.io/infra: "" - taints: <4> - - key: node-role.kubernetes.io/infra - effect: NoSchedule -endif::infra[] - providerSpec: - value: - apiVersion: openstackproviderconfig.openshift.io/v1alpha1 - cloudName: openstack - cloudsSecret: - name: openstack-cloud-credentials - namespace: openshift-machine-api - flavor: <nova_flavor> - image: <glance_image_name_or_location> -ifndef::infra[] - serverGroupID: <optional_UUID_of_server_group> <4> -endif::infra[] -ifdef::infra[] - serverGroupID: <optional_UUID_of_server_group> <5> -endif::infra[] - kind: OpenstackProviderSpec -ifndef::infra[] - networks: <5> -endif::infra[] -ifdef::infra[] - networks: <6> -endif::infra[] - - filter: {} - subnets: - - filter: - name: <subnet_name> - tags: openshiftClusterID=<infrastructure_id> <1> -ifndef::infra[] - primarySubnet: <rhosp_subnet_UUID> <6> -endif::infra[] -ifdef::infra[] - primarySubnet: <rhosp_subnet_UUID> <7> -endif::infra[] - securityGroups: - - filter: {} - name: <infrastructure_id>-worker <1> - serverMetadata: - Name: <infrastructure_id>-worker <1> - openshiftClusterID: <infrastructure_id> <1> - tags: - - openshiftClusterID=<infrastructure_id> <1> - trunk: true - userDataSecret: - name: worker-user-data <2> - availabilityZone: <optional_openstack_availability_zone> ----- -<1> Specify the infrastructure ID that is based on the cluster ID that you set when you provisioned the cluster. If you have the OpenShift CLI installed, you can obtain the infrastructure ID by running the following command: -+ -[source,terminal] ----- -$ oc get -o jsonpath='{.status.infrastructureName}{"\n"}' infrastructure cluster ----- -ifndef::infra[] -<2> Specify the node label to add. -<3> Specify the infrastructure ID and node label. -<4> To set a server group policy for the MachineSet, enter the value that is returned from -link:https://access.redhat.com/documentation/en-us/red_hat_openstack_platform/16.0/html/command_line_interface_reference/server#server_group_create[creating a server group]. For most deployments, `anti-affinity` or `soft-anti-affinity` policies are recommended. -<5> Required for deployments to multiple networks. To specify multiple networks, add another entry in the networks array. Also, you must include the network that is used as the `primarySubnet` value. -<6> Specify the {rh-openstack} subnet that you want the endpoints of nodes to be published on. Usually, this is the same subnet that is used as the value of `machinesSubnet` in the `install-config.yaml` file. -endif::infra[] -ifdef::infra[] -<2> Specify the `<infra>` node label. -<3> Specify the infrastructure ID and `<infra>` node label. -<4> Specify a taint to prevent user workloads from being scheduled on infra nodes. -+ -[NOTE] -==== -After adding the `NoSchedule` taint on the infrastructure node, existing DNS pods running on that node are marked as `misscheduled`. You must either delete or link:https://access.redhat.com/solutions/6592171[add toleration on `misscheduled` DNS pods]. -==== - -<5> To set a server group policy for the MachineSet, enter the value that is returned from -link:https://access.redhat.com/documentation/en-us/red_hat_openstack_platform/16.0/html/command_line_interface_reference/server#server_group_create[creating a server group]. For most deployments, `anti-affinity` or `soft-anti-affinity` policies are recommended. -<6> Required for deployments to multiple networks. If deploying to multiple networks, this list must include the network that is used as the `primarySubnet` value. -<7> Specify the {rh-openstack} subnet that you want the endpoints of nodes to be published on. Usually, this is the same subnet that is used as the value of `machinesSubnet` in the `install-config.yaml` file. -endif::infra[] - -ifeval::["{context}" == "creating-infrastructure-machinesets"] -:!infra: -endif::[] -ifeval::["{context}" == "cluster-tasks"] -:!infra: -endif::[] diff --git a/modules/machineset-yaml-rhv.adoc b/modules/machineset-yaml-rhv.adoc deleted file mode 100644 index 904ffbdb1500..000000000000 --- a/modules/machineset-yaml-rhv.adoc +++ /dev/null @@ -1,133 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/creating-infrastructure-machinesets.adoc -// * machine_management/creating_machinesets/creating-machineset-rhv.adoc - -[id="machineset-yaml-rhv_{context}"] -= Sample YAML for a compute machine set custom resource on {rh-virtualization} - -This sample YAML defines a compute machine set that runs on {rh-virtualization} and creates nodes that are labeled with `node-role.kubernetes.io/<node_role>: ""`. - -In this sample, `<infrastructure_id>` is the infrastructure ID label that is based on the cluster ID that you set when you provisioned the cluster, and `<role>` is the node label to add. - -[source,yaml,subs="+quotes"] ----- -apiVersion: machine.openshift.io/v1beta1 -kind: MachineSet -metadata: - labels: - machine.openshift.io/cluster-api-cluster: <infrastructure_id> <1> - machine.openshift.io/cluster-api-machine-role: <role> <2> - machine.openshift.io/cluster-api-machine-type: <role> <2> - name: <infrastructure_id>-<role> <3> - namespace: openshift-machine-api -spec: - replicas: <number_of_replicas> <4> - Selector: <5> - matchLabels: - machine.openshift.io/cluster-api-cluster: <infrastructure_id> <1> - machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> <3> - template: - metadata: - labels: - machine.openshift.io/cluster-api-cluster: <infrastructure_id> <1> - machine.openshift.io/cluster-api-machine-role: <role> <2> - machine.openshift.io/cluster-api-machine-type: <role> <2> - machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> <3> - spec: - metadata: - labels: - node-role.kubernetes.io/<role>: "" <2> - providerSpec: - value: - apiVersion: ovirtproviderconfig.machine.openshift.io/v1beta1 - cluster_id: <ovirt_cluster_id> <6> - template_name: <ovirt_template_name> <7> - sparse: <boolean_value> <8> - format: <raw_or_cow> <9> - cpu: <10> - sockets: <number_of_sockets> <11> - cores: <number_of_cores> <12> - threads: <number_of_threads> <13> - memory_mb: <memory_size> <14> - guaranteed_memory_mb: <memory_size> <15> - os_disk: <16> - size_gb: <disk_size> <17> - storage_domain_id: <storage_domain_UUID> <18> - network_interfaces: <19> - vnic_profile_id: <vnic_profile_id> <20> - credentialsSecret: - name: ovirt-credentials <21> - kind: OvirtMachineProviderSpec - type: <workload_type> <22> - auto_pinning_policy: <auto_pinning_policy> <23> - hugepages: <hugepages> <24> - affinityGroupsNames: - - compute <25> - userDataSecret: - name: worker-user-data ----- -<1> Specify the infrastructure ID that is based on the cluster ID that you set when you provisioned the cluster. If you have the OpenShift CLI (`oc`) installed, you can obtain the infrastructure ID by running the following command: -+ -[source,terminal] ----- -$ oc get -o jsonpath='{.status.infrastructureName}{"\n"}' infrastructure cluster ----- - -<2> Specify the node label to add. - -<3> Specify the infrastructure ID and node label. These two strings together cannot be longer than 35 characters. - -<4> Specify the number of machines to create. - -<5> Selector for the machines. - -<6> Specify the UUID for the {rh-virtualization} cluster to which this VM instance belongs. - -<7> Specify the {rh-virtualization} VM template to use to create the machine. - -<8> Setting this option to `false` enables preallocation of disks. The default is `true`. Setting `sparse` to `true` with `format` set to `raw` is not available for block storage domains. The `raw` format writes the entire virtual disk to the underlying physical disk. - -<9> Can be set to `cow` or `raw`. The default is `cow`. The `cow` format is optimized for virtual machines. -+ -[NOTE] -==== -Preallocating disks on file storage domains writes zeroes to the file. This might not actually preallocate disks depending on the underlying storage. -==== -<10> Optional: The CPU field contains the CPU configuration, including sockets, cores, and threads. - -<11> Optional: Specify the number of sockets for a VM. - -<12> Optional: Specify the number of cores per socket. - -<13> Optional: Specify the number of threads per core. - -<14> Optional: Specify the size of a VM's memory in MiB. - -<15> Optional: Specify the size of a virtual machine's guaranteed memory in MiB. This is the amount of memory that is guaranteed not to be drained by the ballooning mechanism. For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_virtualization/4.4/html-single/administration_guide#memory_ballooning[Memory Ballooning] and link:https://access.redhat.com/documentation/en-us/red_hat_virtualization/4.4/html-single/administration_guide#Cluster_Optimization_Settings_Explained[Optimization Settings Explained]. -+ -[NOTE] -==== -If you are using a version earlier than {rh-virtualization} 4.4.8, see link:https://access.redhat.com/articles/6454811[Guaranteed memory requirements for OpenShift on Red Hat Virtualization clusters]. -==== -<16> Optional: Root disk of the node. - -<17> Optional: Specify the size of the bootable disk in GiB. - -<18> Optional: Specify the UUID of the storage domain for the compute node's disks. If none is provided, the compute node is created on the same storage domain as the control nodes. (default) - -<19> Optional: List of the network interfaces of the VM. If you include this parameter, {product-title} discards all network interfaces from the template and creates new ones. - -<20> Optional: Specify the vNIC profile ID. - -<21> Specify the name of the secret object that holds the {rh-virtualization} credentials. - -<22> Optional: Specify the workload type for which the instance is optimized. This value affects the `{rh-virtualization} VM` parameter. Supported values: `desktop`, `server` (default), `high_performance`. `high_performance` improves performance on the VM. Limitations exist, for example, you cannot access the VM with a graphical console. For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_virtualization/4.4/html-single/virtual_machine_management_guide/index#Configuring_High_Performance_Virtual_Machines_Templates_and_Pools[Configuring High Performance Virtual Machines, Templates, and Pools] in the _Virtual Machine Management Guide_. -<23> Optional: AutoPinningPolicy defines the policy that automatically sets CPU and NUMA settings, including pinning to the host for this instance. Supported values: `none`, `resize_and_pin`. For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_virtualization/4.4/html-single/virtual_machine_management_guide/index#Setting_NUMA_Nodes[Setting NUMA Nodes] in the _Virtual Machine Management Guide_. -<24> Optional: Hugepages is the size in KiB for defining hugepages in a VM. Supported values: `2048` or `1048576`. For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_virtualization/4.4/html-single/virtual_machine_management_guide/index#Configuring_Huge_Pages[Configuring Huge Pages] in the _Virtual Machine Management Guide_. -<25> Optional: A list of affinity group names to be applied to the VMs. The affinity groups must exist in oVirt. - -[NOTE] -==== -Because {rh-virtualization} uses a template when creating a VM, if you do not specify a value for an optional parameter, {rh-virtualization} uses the value for that parameter that is specified in the template. -==== diff --git a/modules/machineset-yaml-vsphere.adoc b/modules/machineset-yaml-vsphere.adoc deleted file mode 100644 index bad3dd990311..000000000000 --- a/modules/machineset-yaml-vsphere.adoc +++ /dev/null @@ -1,162 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/creating-infrastructure-machinesets.adoc -// * machine_management/creating_machinesets/creating-machineset-vsphere.adoc - -ifeval::["{context}" == "creating-infrastructure-machinesets"] -:infra: -endif::[] - -:_content-type: REFERENCE -[id="machineset-yaml-vsphere_{context}"] -= Sample YAML for a compute machine set custom resource on vSphere - -This sample YAML defines a compute machine set that runs on VMware vSphere and creates nodes that are labeled with -ifndef::infra[`node-role.kubernetes.io/<role>: ""`.] -ifdef::infra[`node-role.kubernetes.io/infra: ""`.] - -In this sample, `<infrastructure_id>` is the infrastructure ID label that is based on the cluster ID that you set when you provisioned the cluster, and -ifndef::infra[`<role>`] -ifdef::infra[`<infra>`] -is the node label to add. - -[source,yaml] ----- -apiVersion: machine.openshift.io/v1beta1 -kind: MachineSet -metadata: - creationTimestamp: null - labels: - machine.openshift.io/cluster-api-cluster: <infrastructure_id> <1> -ifndef::infra[] - name: <infrastructure_id>-<role> <2> -endif::infra[] -ifdef::infra[] - name: <infrastructure_id>-infra <2> -endif::infra[] - namespace: openshift-machine-api -spec: - replicas: 1 - selector: - matchLabels: - machine.openshift.io/cluster-api-cluster: <infrastructure_id> <1> -ifndef::infra[] - machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> <2> -endif::infra[] -ifdef::infra[] - machine.openshift.io/cluster-api-machineset: <infrastructure_id>-infra <2> -endif::infra[] - template: - metadata: - creationTimestamp: null - labels: - machine.openshift.io/cluster-api-cluster: <infrastructure_id> <1> -ifndef::infra[] - machine.openshift.io/cluster-api-machine-role: <role> <3> - machine.openshift.io/cluster-api-machine-type: <role> <3> - machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> <2> -endif::infra[] -ifdef::infra[] - machine.openshift.io/cluster-api-machine-role: <infra> <3> - machine.openshift.io/cluster-api-machine-type: <infra> <3> - machine.openshift.io/cluster-api-machineset: <infrastructure_id>-infra <2> -endif::infra[] - spec: - metadata: - creationTimestamp: null - labels: -ifndef::infra[] - node-role.kubernetes.io/<role>: "" <3> -endif::infra[] -ifdef::infra[] - node-role.kubernetes.io/infra: "" <3> - taints: <4> - - key: node-role.kubernetes.io/infra - effect: NoSchedule -endif::infra[] - providerSpec: - value: - apiVersion: vsphereprovider.openshift.io/v1beta1 - credentialsSecret: - name: vsphere-cloud-credentials - diskGiB: 120 - kind: VSphereMachineProviderSpec - memoryMiB: 8192 - metadata: - creationTimestamp: null - network: - devices: -ifndef::infra[] - - networkName: "<vm_network_name>" <4> -endif::infra[] -ifdef::infra[] - - networkName: "<vm_network_name>" <5> -endif::infra[] - numCPUs: 4 - numCoresPerSocket: 1 - snapshot: "" -ifndef::infra[] - template: <vm_template_name> <5> - userDataSecret: - name: worker-user-data - workspace: - datacenter: <vcenter_datacenter_name> <6> - datastore: <vcenter_datastore_name> <7> - folder: <vcenter_vm_folder_path> <8> - resourcepool: <vsphere_resource_pool> <9> - server: <vcenter_server_ip> <10> -endif::infra[] -ifdef::infra[] - template: <vm_template_name> <6> - userDataSecret: - name: worker-user-data - workspace: - datacenter: <vcenter_datacenter_name> <7> - datastore: <vcenter_datastore_name> <8> - folder: <vcenter_vm_folder_path> <9> - resourcepool: <vsphere_resource_pool> <10> - server: <vcenter_server_ip> <11> -endif::infra[] ----- -<1> Specify the infrastructure ID that is based on the cluster ID that you set when you provisioned the cluster. If you have the OpenShift CLI (`oc`) installed, you can obtain the infrastructure ID by running the following command: -+ -[source,terminal] ----- -$ oc get -o jsonpath='{.status.infrastructureName}{"\n"}' infrastructure cluster ----- -ifndef::infra[] -<2> Specify the infrastructure ID and node label. -<3> Specify the node label to add. -<4> Specify the vSphere VM network to deploy the compute machine set to. This VM network must be where other compute machines reside in the cluster. -<5> Specify the vSphere VM template to use, such as `user-5ddjd-rhcos`. -<6> Specify the vCenter Datacenter to deploy the compute machine set on. -<7> Specify the vCenter Datastore to deploy the compute machine set on. -<8> Specify the path to the vSphere VM folder in vCenter, such as `/dc1/vm/user-inst-5ddjd`. -<9> Specify the vSphere resource pool for your VMs. -<10> Specify the vCenter server IP or fully qualified domain name. -endif::infra[] -ifdef::infra[] -<2> Specify the infrastructure ID and `<infra>` node label. -<3> Specify the `<infra>` node label. -<4> Specify a taint to prevent user workloads from being scheduled on infra nodes. -+ -[NOTE] -==== -After adding the `NoSchedule` taint on the infrastructure node, existing DNS pods running on that node are marked as `misscheduled`. You must either delete or link:https://access.redhat.com/solutions/6592171[add toleration on `misscheduled` DNS pods]. -==== - -<5> Specify the vSphere VM network to deploy the compute machine set to. This VM network must be where other compute machines reside in the cluster. -<6> Specify the vSphere VM template to use, such as `user-5ddjd-rhcos`. -<7> Specify the vCenter Datacenter to deploy the compute machine set on. -<8> Specify the vCenter Datastore to deploy the compute machine set on. -<9> Specify the path to the vSphere VM folder in vCenter, such as `/dc1/vm/user-inst-5ddjd`. -<10> Specify the vSphere resource pool for your VMs. -<11> Specify the vCenter server IP or fully qualified domain name. -endif::infra[] - -ifeval::["{context}" == "creating-infrastructure-machinesets"] -:!infra: -endif::[] -ifeval::["{context}" == "cluster-tasks"] -:!infra: -endif::[] diff --git a/modules/maintaining-bare-metal-hosts.adoc b/modules/maintaining-bare-metal-hosts.adoc deleted file mode 100644 index 7de4a21ff3f3..000000000000 --- a/modules/maintaining-bare-metal-hosts.adoc +++ /dev/null @@ -1,20 +0,0 @@ -// Module included in the following assemblies: -// -// scalability_and_performance/managing-bare-metal-hosts.adoc - -[id="maintaining-bare-metal-hosts_{context}"] -= Maintaining bare metal hosts - -You can maintain the details of the bare metal hosts in your cluster from the {product-title} web console. Navigate to *Compute* -> *Bare Metal Hosts*, and select a task from the *Actions* drop down menu. Here you can manage items such as BMC details, boot MAC address for the host, enable power management, and so on. You can also review the details of the network interfaces and drives for the host. - -You can move a bare metal host into maintenance mode. When you move a host into maintenance mode, the scheduler moves all managed workloads off the corresponding bare metal node. No new workloads are scheduled while in maintenance mode. - -You can deprovision a bare metal host in the web console. Deprovisioning a host does the following actions: - -. Annotates the bare metal host CR with `cluster.k8s.io/delete-machine: true` -. Scales down the related compute machine set - -[NOTE] -==== -Powering off the host without first moving the daemon set and unmanaged static pods to another node can cause service disruption and loss of data. -==== diff --git a/modules/managing-dedicated-administrators.adoc b/modules/managing-dedicated-administrators.adoc deleted file mode 100644 index bf1179bb7a67..000000000000 --- a/modules/managing-dedicated-administrators.adoc +++ /dev/null @@ -1,35 +0,0 @@ -// Module included in the following assemblies: -// -// * osd_cluster_admin/osd-admin-roles.adoc - -:_content-type: PROCEDURE -[id="managing-dedicated-administrators_{context}"] -= Managing {product-title} administrators - -Administrator roles are managed using a `cluster-admin` or `dedicated-admin` group on the cluster. Existing members of this group can edit membership through {cluster-manager-url}. - -// TODO: These two procedures should be separated and created as proper procedure modules. - -[id="dedicated-administrators-adding-user_{context}"] -== Adding a user - -.Procedure - -. Navigate to the *Cluster Details* page and *Access Control* tab. -. Select the *Cluster Roles and Access* tab and click *Add user*. -. Enter the user name and select the group. -. Click *Add user*. - - -[NOTE] -==== -Adding a user to the `cluster-admin` group can take several minutes to complete. -==== - -[id="dedicated-administrators-removing-user_{context}"] -== Removing a user - -.Procedure - -. Navigate to the *Cluster Details* page and *Access Control* tab. -. Click the Options menu {kebab} to the right of the user and group combination and click *Delete*. diff --git a/modules/manual-configuration-of-cli-profiles.adoc b/modules/manual-configuration-of-cli-profiles.adoc deleted file mode 100644 index 82c1567578ce..000000000000 --- a/modules/manual-configuration-of-cli-profiles.adoc +++ /dev/null @@ -1,133 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/openshift_cli/managing-cli-profiles.adoc - -:_content-type: CONCEPT -[id="manual-configuration-of-cli-profiles_{context}"] -= Manual configuration of CLI profiles - -[NOTE] -==== -This section covers more advanced usage of CLI configurations. In most situations, you can use the `oc login` and `oc project` commands to log in and switch between contexts and projects. -==== - -If you want to manually configure your CLI config files, you can use the `oc config` command instead of directly modifying the files. The `oc config` command includes a number of helpful sub-commands for this purpose: - -.CLI configuration subcommands -[cols="1,8",options="header"] -|=== - -|Subcommand |Usage - -a|`set-cluster` -a|Sets a cluster entry in the CLI config file. If the referenced cluster -nickname already exists, the specified information is merged in. -[source,terminal,options="nowrap"] ----- -$ oc config set-cluster <cluster_nickname> [--server=<master_ip_or_fqdn>] -[--certificate-authority=<path/to/certificate/authority>] -[--api-version=<apiversion>] [--insecure-skip-tls-verify=true] ----- - -a|`set-context` -a|Sets a context entry in the CLI config file. If the referenced context -nickname already exists, the specified information is merged in. -[source,terminal,options="nowrap"] ----- -$ oc config set-context <context_nickname> [--cluster=<cluster_nickname>] -[--user=<user_nickname>] [--namespace=<namespace>] ----- - -a|`use-context` -a|Sets the current context using the specified context nickname. -[source,terminal,options="nowrap"] ----- -$ oc config use-context <context_nickname> ----- - -a|`set` -a|Sets an individual value in the CLI config file. -[source,terminal,options="nowrap"] ----- -$ oc config set <property_name> <property_value> ----- -The `<property_name>` is a dot-delimited name where each token represents either an attribute name or a map key. The `<property_value>` is the new value being set. - -a|`unset` -a|Unsets individual values in the CLI config file. -[source,terminal,options="nowrap"] ----- -$ oc config unset <property_name> ----- -The `<property_name>` is a dot-delimited name where each token represents either an attribute name or a map key. - -a|`view` -a|Displays the merged CLI configuration currently in use. -[source,terminal,options="nowrap"] ----- -$ oc config view ----- - -Displays the result of the specified CLI config file. -[source,terminal,options="nowrap"] ----- -$ oc config view --config=<specific_filename> ----- -|=== - -.Example usage - -* Log in as a user that uses an access token. -This token is used by the `alice` user: - -[source,terminal,options="nowrap"] ----- -$ oc login https://openshift1.example.com --token=ns7yVhuRNpDM9cgzfhhxQ7bM5s7N2ZVrkZepSRf4LC0 ----- - -* View the cluster entry automatically created: - -[source,terminal,options="nowrap"] ----- -$ oc config view ----- - -.Example output -[source,terminal] ----- -apiVersion: v1 -clusters: -- cluster: - insecure-skip-tls-verify: true - server: https://openshift1.example.com - name: openshift1-example-com -contexts: -- context: - cluster: openshift1-example-com - namespace: default - user: alice/openshift1-example-com - name: default/openshift1-example-com/alice -current-context: default/openshift1-example-com/alice -kind: Config -preferences: {} -users: -- name: alice/openshift1.example.com - user: - token: ns7yVhuRNpDM9cgzfhhxQ7bM5s7N2ZVrkZepSRf4LC0 ----- - -* Update the current context to have users log in to the desired namespace: - -[source,terminal] ----- -$ oc config set-context `oc config current-context` --namespace=<project_name> ----- - -* Examine the current context, to confirm that the changes are implemented: - -[source,terminal] ----- -$ oc whoami -c ----- - -All subsequent CLI operations uses the new context, unless otherwise specified by overriding CLI options or until the context is switched. diff --git a/modules/manually-configure-iam-nutanix.adoc b/modules/manually-configure-iam-nutanix.adoc deleted file mode 100644 index 24cc6c72979a..000000000000 --- a/modules/manually-configure-iam-nutanix.adoc +++ /dev/null @@ -1,152 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_nutanix/configuring-iam-nutanix.adoc -// * installing/installing-restricted-networks-nutanix-installer-provisioned.adoc - -:_content-type: PROCEDURE -[id="manually-create-iam-nutanix_{context}"] -= Configuring IAM for Nutanix - -Installing the cluster requires that the Cloud Credential Operator (CCO) operate in manual mode. While the installation program configures the CCO for manual mode, you must specify the identity and access management secrets. - -.Prerequisites - -* You have configured the `ccoctl` binary. - -* You have an `install-config.yaml` file. - -.Procedure - -. Create a YAML file that contains the credentials data in the following format: -+ -.Credentials data format -[source,yaml] ----- -credentials: -- type: basic_auth <1> - data: - prismCentral: <2> - username: <username_for_prism_central> - password: <password_for_prism_central> - prismElements: <3> - - name: <name_of_prism_element> - username: <username_for_prism_element> - password: <password_for_prism_element> ----- -<1> Specify the authentication type. Only basic authentication is supported. -<2> Specify the Prism Central credentials. -<3> Optional: Specify the Prism Element credentials. - -. Extract the list of `CredentialsRequest` custom resources (CRs) from the {product-title} release image by running the following command: -+ -[source,terminal] ----- -$ oc adm release extract --credentials-requests --cloud=nutanix \// ---to=<path_to_directory_with_list_of_credentials_requests>/credrequests \ <1> -quay.io/<path_to>/ocp-release:<version> ----- -+ -<1> Specify the path to the directory that contains the files for the component `CredentialsRequests` objects. If the specified directory does not exist, this command creates it. -+ -.Sample `CredentialsRequest` object -[source,yaml] ----- - apiVersion: cloudcredential.openshift.io/v1 - kind: CredentialsRequest - metadata: - annotations: - include.release.openshift.io/self-managed-high-availability: "true" - labels: - controller-tools.k8s.io: "1.0" - name: openshift-machine-api-nutanix - namespace: openshift-cloud-credential-operator - spec: - providerSpec: - apiVersion: cloudcredential.openshift.io/v1 - kind: NutanixProviderSpec - secretRef: - name: nutanix-credentials - namespace: openshift-machine-api ----- - -. If your cluster uses cluster capabilities to disable one or more optional components, delete the `CredentialsRequest` custom resources for any disabled components. -+ -.Example `credrequests` directory contents for {product-title} 4.12 on Nutanix -+ -[source,terminal] ----- -0000_30_machine-api-operator_00_credentials-request.yaml <1> ----- -+ -<1> The Machine API Operator CR is required. - -. Use the `ccoctl` tool to process all of the `CredentialsRequest` objects in the `credrequests` directory by running the following command: -+ -[source,terminal] ----- -$ ccoctl nutanix create-shared-secrets \ ---credentials-requests-dir=<path_to_directory_with_list_of_credentials_requests>/credrequests \// <1> ---output-dir=<ccoctl_output_dir> \// <2> ---credentials-source-filepath=<path_to_credentials_file> <3> ----- -+ -<1> Specify the path to the directory that contains the files for the component `CredentialsRequests` objects. -<2> Specify the directory that contains the files of the component credentials secrets, under the `manifests` directory. By default, the `ccoctl` tool creates objects in the directory in which the commands are run. To create the objects in a different directory, use the `--output-dir` flag. -<3> Optional: Specify the directory that contains the credentials data YAML file. By default, `ccoctl` expects this file to be in `<home_directory>/.nutanix/credentials`. To specify a different directory, use the `--credentials-source-filepath` flag. - -. Edit the `install-config.yaml` configuration file so that the `credentialsMode` parameter is set to `Manual`. -+ -.Example `install-config.yaml` configuration file -[source,yaml] ----- -apiVersion: v1 -baseDomain: cluster1.example.com -credentialsMode: Manual <1> -... ----- -<1> Add this line to set the `credentialsMode` parameter to `Manual`. - -. Create the installation manifests by running the following command: -+ -[source,terminal] ----- -$ openshift-install create manifests --dir <installation_directory> <1> ----- -<1> Specify the path to the directory that contains the `install-config.yaml` file for your cluster. - -. Copy the generated credential files to the target manifests directory by running the following command: -+ -[source,terminal] ----- -$ cp <ccoctl_output_dir>/manifests/*credentials.yaml ./<installation_directory>/manifests ----- - -.Verification - -* Ensure that the appropriate secrets exist in the `manifests` directory. -+ -[source,terminal] ----- -$ ls ./<installation_directory>/manifests ----- -+ -.Example output -+ -[source,terminal] ----- -total 64 --rw-r----- 1 <user> <user> 2335 Jul 8 12:22 cluster-config.yaml --rw-r----- 1 <user> <user> 161 Jul 8 12:22 cluster-dns-02-config.yml --rw-r----- 1 <user> <user> 864 Jul 8 12:22 cluster-infrastructure-02-config.yml --rw-r----- 1 <user> <user> 191 Jul 8 12:22 cluster-ingress-02-config.yml --rw-r----- 1 <user> <user> 9607 Jul 8 12:22 cluster-network-01-crd.yml --rw-r----- 1 <user> <user> 272 Jul 8 12:22 cluster-network-02-config.yml --rw-r----- 1 <user> <user> 142 Jul 8 12:22 cluster-proxy-01-config.yaml --rw-r----- 1 <user> <user> 171 Jul 8 12:22 cluster-scheduler-02-config.yml --rw-r----- 1 <user> <user> 200 Jul 8 12:22 cvo-overrides.yaml --rw-r----- 1 <user> <user> 118 Jul 8 12:22 kube-cloud-config.yaml --rw-r----- 1 <user> <user> 1304 Jul 8 12:22 kube-system-configmap-root-ca.yaml --rw-r----- 1 <user> <user> 4090 Jul 8 12:22 machine-config-server-tls-secret.yaml --rw-r----- 1 <user> <user> 3961 Jul 8 12:22 openshift-config-secret-pull-secret.yaml --rw------- 1 <user> <user> 283 Jul 8 12:24 openshift-machine-api-nutanix-credentials-credentials.yaml ----- diff --git a/modules/manually-create-iam-ibm-cloud.adoc b/modules/manually-create-iam-ibm-cloud.adoc deleted file mode 100644 index 03d6744488c9..000000000000 --- a/modules/manually-create-iam-ibm-cloud.adoc +++ /dev/null @@ -1,207 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-customizations.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-network-customizations.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-vpc.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-private.adoc -// * installing/installing_ibm_powervs/installing-ibm-power-vs-customizations.adoc -// * installing/installing_ibm_powervs/installing-ibm-power-vs-private-cluster.adoc -// * installing/installing_ibm_powervs/installing-restricted-networks-ibm-power-vs.adoc -// * installing/installing_ibm_powervs/installing-ibm-powervs-vpc.adoc - -ifeval::["{context}" == "installing-ibm-cloud-customizations"] -:ibm-vpc: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-network-customizations"] -:ibm-vpc: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-vpc"] -:ibm-vpc: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-private"] -:ibm-vpc: -endif::[] -ifeval::["{context}" == "installing-ibm-power-vs-customizations"] -:ibm-power-vs: -endif::[] -ifeval::["{context}" == "installing-ibm-power-vs-private-cluster"] -:ibm-power-vs: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power-vs"] -:ibm-power-vs: -endif::[] -ifeval::["{context}" == "installing-ibm-powervs-vpc"] -:ibm-power-vs: -endif::[] - -:_content-type: PROCEDURE -[id="manually-create-iam-ibm-cloud_{context}"] -= Manually creating IAM - -Installing the cluster requires that the Cloud Credential Operator (CCO) operate in manual mode. While the installation program configures the CCO for manual mode, you must specify the identity and access management secrets for you cloud provider. - -You can use the Cloud Credential Operator (CCO) utility (`ccoctl`) to create the required IBM Cloud VPC resources. - -.Prerequisites - -* You have configured the `ccoctl` binary. -* You have an existing `install-config.yaml` file. - -.Procedure - -. Edit the `install-config.yaml` configuration file so that it contains the `credentialsMode` parameter set to `Manual`. -+ -.Example `install-config.yaml` configuration file -[source,yaml] ----- -apiVersion: v1 -baseDomain: cluster1.example.com -credentialsMode: Manual <1> -compute: -ifdef::ibm-vpc[] -- architecture: amd64 -endif::ibm-vpc[] -ifdef::ibm-power-vs[] -- architecture: ppc64le -endif::ibm-power-vs[] - hyperthreading: Enabled ----- -<1> This line is added to set the `credentialsMode` parameter to `Manual`. - -. To generate the manifests, run the following command from the directory that contains the installation program: -+ -[source,terminal] ----- -$ openshift-install create manifests --dir <installation_directory> ----- - -. From the directory that contains the installation program, obtain the {product-title} release image that your `openshift-install` binary is built to use: -+ -[source,terminal] ----- -$ RELEASE_IMAGE=$(./openshift-install version | awk '/release image/ {print $3}') ----- - -. Extract the `CredentialsRequest` objects from the {product-title} release image: -+ -[source,terminal] ----- -$ oc adm release extract --cloud=<provider_name> --credentials-requests $RELEASE_IMAGE \ <1> - --to=<path_to_credential_requests_directory> <2> ----- -<1> The name of the provider. For example: `ibmcloud` or `powervs`. -<2> The directory where the credential requests will be stored. -+ -This command creates a YAML file for each `CredentialsRequest` object. -+ -.Sample `CredentialsRequest` object -+ -[source,yaml] ----- - apiVersion: cloudcredential.openshift.io/v1 - kind: CredentialsRequest - metadata: - labels: - controller-tools.k8s.io: "1.0" - name: openshift-image-registry-ibmcos - namespace: openshift-cloud-credential-operator - spec: - secretRef: - name: installer-cloud-credentials - namespace: openshift-image-registry - providerSpec: - apiVersion: cloudcredential.openshift.io/v1 - kind: IBMCloudProviderSpec - policies: - - attributes: - - name: serviceName - value: cloud-object-storage - roles: - - crn:v1:bluemix:public:iam::::role:Viewer - - crn:v1:bluemix:public:iam::::role:Operator - - crn:v1:bluemix:public:iam::::role:Editor - - crn:v1:bluemix:public:iam::::serviceRole:Reader - - crn:v1:bluemix:public:iam::::serviceRole:Writer - - attributes: - - name: resourceType - value: resource-group - roles: - - crn:v1:bluemix:public:iam::::role:Viewer ----- - -ifndef::ibm-power-vs[] -. If your cluster uses cluster capabilities to disable one or more optional components, delete the `CredentialsRequest` custom resources for any disabled components. -+ -.Example `credrequests` directory contents for {product-title} 4.12 on IBM Cloud VPC -+ -[source,terminal] ----- -0000_26_cloud-controller-manager-operator_15_credentialsrequest-ibm.yaml <1> -0000_30_machine-api-operator_00_credentials-request.yaml <2> -0000_50_cluster-image-registry-operator_01-registry-credentials-request-ibmcos.yaml <3> -0000_50_cluster-ingress-operator_00-ingress-credentials-request.yaml <4> -0000_50_cluster-storage-operator_03_credentials_request_ibm.yaml <5> ----- -<1> The Cloud Controller Manager Operator CR is required. -<2> The Machine API Operator CR is required. -<3> The Image Registry Operator CR is required. -<4> The Ingress Operator CR is required. -<5> The Storage Operator CR is an optional component and might be disabled in your cluster. -endif::ibm-power-vs[] - -. Create the service ID for each credential request, assign the policies defined, create an API key, and generate the secret: -+ -[source,terminal] ----- -$ ccoctl ibmcloud create-service-id \ - --credentials-requests-dir <path_to_credential_requests_directory> \ <1> - --name <cluster_name> \ <2> - --output-dir <installation_directory> \ - --resource-group-name <resource_group_name> <3> ----- -<1> The directory where the credential requests are stored. -<2> The name of the {product-title} cluster. -<3> Optional: The name of the resource group used for scoping the access policies. -+ --- -[NOTE] -==== -If your cluster uses Technology Preview features that are enabled by the `TechPreviewNoUpgrade` feature set, you must include the `--enable-tech-preview` parameter. - -If an incorrect resource group name is provided, the installation fails during the bootstrap phase. To find the correct resource group name, run the following command: - -[source,terminal] ----- -$ grep resourceGroupName <installation_directory>/manifests/cluster-infrastructure-02-config.yml ----- -==== --- - -.Verification - -* Ensure that the appropriate secrets were generated in your cluster's `manifests` directory. - -ifeval::["{context}" == "installing-ibm-cloud-customizations"] -:!ibm-vpc: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-network-customizations"] -:!ibm-vpc: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-vpc"] -:!ibm-vpc: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-private"] -:!ibm-vpc: -endif::[] -ifeval::["{context}" == "installing-ibm-power-vs-customizations"] -:!ibm-power-vs: -endif::[] -ifeval::["{context}" == "installing-ibm-power-vs-private-cluster"] -:!ibm-power-vs: -endif::[] -ifeval::["{context}" == "installing-restricted-networks-ibm-power-vs"] -:!ibm-power-vs: -endif::[] -ifeval::["{context}" == "installing-ibm-powervs-vpc"] -:!ibm-power-vs: -endif::[] \ No newline at end of file diff --git a/modules/manually-create-identity-access-management.adoc b/modules/manually-create-identity-access-management.adoc deleted file mode 100644 index eb7b414c6fff..000000000000 --- a/modules/manually-create-identity-access-management.adoc +++ /dev/null @@ -1,287 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/manually-creating-iam.adoc -// * installing/installing_azure/manually-creating-iam-azure.adoc -// * installing/installing_gcp/manually-creating-iam-gcp.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-default.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-network-customizations.adoc - -ifeval::["{context}" == "manually-creating-iam-aws"] -:aws: -:cco-multi-mode: -endif::[] -ifeval::["{context}" == "manually-creating-iam-azure"] -:azure: -:cco-multi-mode: -endif::[] -ifeval::["{context}" == "manually-creating-iam-gcp"] -:google-cloud-platform: -:cco-multi-mode: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-default"] -:ash: -:cco-manual-mode: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-network-customizations"] -:ash: -:cco-manual-mode: -endif::[] - - -:_content-type: PROCEDURE -[id="manually-create-iam_{context}"] - -//For providers that support multiple modes of operation -ifdef::cco-multi-mode[] -= Manually create IAM -endif::cco-multi-mode[] - -//For providers who only support manual mode -ifdef::cco-manual-mode[] -= Manually manage cloud credentials -endif::cco-manual-mode[] - -//For providers that support multiple modes of operation -ifdef::cco-multi-mode[] -The Cloud Credential Operator (CCO) can be put into manual mode prior to installation in environments where the cloud identity and access management (IAM) APIs are not reachable, or the administrator prefers not to store an administrator-level credential secret in the cluster `kube-system` namespace. -endif::cco-multi-mode[] - -//For providers who only support manual mode -ifdef::cco-manual-mode[] -The Cloud Credential Operator (CCO) only supports your cloud provider in manual mode. As a result, you must specify the identity and access management (IAM) secrets for your cloud provider. -endif::cco-manual-mode[] - -.Procedure - -ifdef::cco-multi-mode[] -. Change to the directory that contains the installation program and create the `install-config.yaml` file by running the following command: -+ -[source,terminal] ----- -$ openshift-install create install-config --dir <installation_directory> ----- -+ -where `<installation_directory>` is the directory in which the installation program creates files. - -. Edit the `install-config.yaml` configuration file so that it contains the `credentialsMode` parameter set to `Manual`. -+ -.Example `install-config.yaml` configuration file -[source,yaml] ----- -apiVersion: v1 -baseDomain: cluster1.example.com -credentialsMode: Manual <1> -compute: -- architecture: amd64 - hyperthreading: Enabled -... ----- -<1> This line is added to set the `credentialsMode` parameter to `Manual`. -endif::cco-multi-mode[] - -. Generate the manifests by running the following command from the directory that contains the installation program: -+ -[source,terminal] ----- -$ openshift-install create manifests --dir <installation_directory> ----- -+ -where `<installation_directory>` is the directory in which the installation program creates files. - -. From the directory that contains the installation program, obtain details of the {product-title} release image that your `openshift-install` binary is built to use by running the following command: -+ -[source,terminal] ----- -$ openshift-install version ----- -+ -.Example output -[source,terminal] ----- -release image quay.io/openshift-release-dev/ocp-release:4.y.z-x86_64 ----- - -. Locate all `CredentialsRequest` objects in this release image that target the cloud you are deploying on by running the following command: -+ -[source,terminal] ----- -$ oc adm release extract quay.io/openshift-release-dev/ocp-release:4.y.z-x86_64 \ - --credentials-requests \ -ifdef::aws[] - --cloud=aws -endif::aws[] -ifdef::azure,ash[] - --cloud=azure -endif::azure,ash[] -ifdef::google-cloud-platform[] - --cloud=gcp -endif::google-cloud-platform[] ----- -+ -This command creates a YAML file for each `CredentialsRequest` object. -+ -.Sample `CredentialsRequest` object -[source,yaml] ----- -apiVersion: cloudcredential.openshift.io/v1 -kind: CredentialsRequest -metadata: - name: <component-credentials-request> - namespace: openshift-cloud-credential-operator - ... -spec: - providerSpec: - apiVersion: cloudcredential.openshift.io/v1 -ifdef::aws[] - kind: AWSProviderSpec - statementEntries: - - effect: Allow - action: - - iam:GetUser - - iam:GetUserPolicy - - iam:ListAccessKeys - resource: "*" -endif::aws[] -ifdef::azure,ash[] - kind: AzureProviderSpec - roleBindings: - - role: Contributor -endif::azure,ash[] -ifdef::google-cloud-platform[] - kind: GCPProviderSpec - predefinedRoles: - - roles/storage.admin - - roles/iam.serviceAccountUser - skipServiceCheck: true -endif::google-cloud-platform[] - ... ----- - -. Create YAML files for secrets in the `openshift-install` manifests directory that you generated previously. The secrets must be stored using the namespace and secret name defined in the `spec.secretRef` for each `CredentialsRequest` object. -+ -.Sample `CredentialsRequest` object with secrets -[source,yaml] ----- -apiVersion: cloudcredential.openshift.io/v1 -kind: CredentialsRequest -metadata: - name: <component-credentials-request> - namespace: openshift-cloud-credential-operator - ... -spec: - providerSpec: - apiVersion: cloudcredential.openshift.io/v1 -ifdef::aws[] - kind: AWSProviderSpec - statementEntries: - - effect: Allow - action: - - s3:CreateBucket - - s3:DeleteBucket - resource: "*" -endif::aws[] -ifdef::ash,azure[] - kind: AzureProviderSpec - roleBindings: - - role: Contributor -endif::ash,azure[] -ifdef::gcp[] - kind: GCPProviderSpec - predefinedRoles: - - roles/iam.securityReviewer - - roles/iam.roleViewer - skipServiceCheck: true -endif::gcp[] - ... - secretRef: - name: <component-secret> - namespace: <component-namespace> - ... ----- -+ -.Sample `Secret` object -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: <component-secret> - namespace: <component-namespace> -ifdef::aws[] -data: - aws_access_key_id: <base64_encoded_aws_access_key_id> - aws_secret_access_key: <base64_encoded_aws_secret_access_key> -endif::aws[] -ifdef::azure,ash[] -data: - azure_subscription_id: <base64_encoded_azure_subscription_id> - azure_client_id: <base64_encoded_azure_client_id> - azure_client_secret: <base64_encoded_azure_client_secret> - azure_tenant_id: <base64_encoded_azure_tenant_id> - azure_resource_prefix: <base64_encoded_azure_resource_prefix> - azure_resourcegroup: <base64_encoded_azure_resourcegroup> - azure_region: <base64_encoded_azure_region> -endif::azure,ash[] -ifdef::google-cloud-platform[] -data: - service_account.json: <base64_encoded_gcp_service_account_file> -endif::google-cloud-platform[] ----- -+ -[IMPORTANT] -==== -The release image includes `CredentialsRequest` objects for Technology Preview features that are enabled by the `TechPreviewNoUpgrade` feature set. You can identify these objects by their use of the `release.openshift.io/feature-set: TechPreviewNoUpgrade` annotation. - -* If you are not using any of these features, do not create secrets for these objects. Creating secrets for Technology Preview features that you are not using can cause the installation to fail. - -* If you are using any of these features, you must create secrets for the corresponding objects. -==== - -** To find `CredentialsRequest` objects with the `TechPreviewNoUpgrade` annotation, run the following command: -+ -[source,terminal] ----- -$ grep "release.openshift.io/feature-set" * ----- -+ -.Example output -[source,terminal] ----- -0000_30_capi-operator_00_credentials-request.yaml: release.openshift.io/feature-set: TechPreviewNoUpgrade ----- -// Right now, only the CAPI Operator is an issue, but it might make sense to update `0000_30_capi-operator_00_credentials-request.yaml` to `<tech_preview_credentials_request>.yaml` for the future. - -ifdef::cco-multi-mode[] -. From the directory that contains the installation program, proceed with your cluster creation: -+ -[source,terminal] ----- -$ openshift-install create cluster --dir <installation_directory> ----- -endif::cco-multi-mode[] -+ -[IMPORTANT] -==== -Before upgrading a cluster that uses manually maintained credentials, you must ensure that the CCO is in an upgradeable state. -==== - -ifeval::["{context}" == "manually-creating-iam-aws"] -:!aws: -:!cco-multi-mode: -endif::[] -ifeval::["{context}" == "manually-creating-iam-azure"] -:!azure: -:!cco-multi-mode: -endif::[] -ifeval::["{context}" == "manually-creating-iam-gcp"] -:!google-cloud-platform: -:!cco-multi-mode: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-default"] -:!ash: -:!cco-manual-mode: -endif::[] -ifeval::["{context}" == "installing-azure-stack-hub-network-customizations"] -:!ash: -:!cco-manual-mode: -endif::[] diff --git a/modules/manually-creating-alibaba-manifests.adoc b/modules/manually-creating-alibaba-manifests.adoc deleted file mode 100644 index 73c33a04f39e..000000000000 --- a/modules/manually-creating-alibaba-manifests.adoc +++ /dev/null @@ -1,24 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_alibaba/installing-alibaba-default.adoc -// * installing/installing_alibaba/installing-alibaba-network-customizations.adoc -// * installing/installing_alibaba/installing-alibaba-vpc.adoc - -:_content-type: PROCEDURE -[id="manually-creating-alibaba-manifests_{context}"] -= Generating the required installation manifests - -You must generate the Kubernetes manifest and Ignition config files that the cluster needs to configure the machines. - -.Procedure - -. Generate the manifests by running the following command from the directory that contains the installation program: -+ -[source,terminal] ----- -$ openshift-install create manifests --dir <installation_directory> ----- -+ -where: - -`<installation_directory>`:: Specifies the directory in which the installation program creates files. diff --git a/modules/manually-creating-alibaba-ram-user.adoc b/modules/manually-creating-alibaba-ram-user.adoc deleted file mode 100644 index 160b4e6e62f4..000000000000 --- a/modules/manually-creating-alibaba-ram-user.adoc +++ /dev/null @@ -1,247 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_alibaba/manually-creating-alibaba-ram.adoc - -:_content-type: PROCEDURE -[id="manually-creating-alibaba-ram-user_{context}"] -= Creating the required RAM user - -// https://github.com/openshift/cloud-credential-operator/pull/412/files#diff-2480a11ca4927139d6eaa9883946b6f4cb38358cd98def8c57dd73e9319dbc9cR232 - -You must have a Alibaba Cloud Resource Access Management (RAM) user for the installation that has sufficient privileges. You can use the Alibaba Cloud Resource Access Management console to create a new user or modify an existing user. Later, you create credentials in {product-title} based on this user's permissions. - -When you configure the RAM user, be sure to consider the following requirements: - -* The user must have an Alibaba Cloud AccessKey ID and AccessKey secret pair. - -** For a new user, you can select `Open API Access` for the Access Mode when creating the user. This mode generates the required AccessKey pair. -** For an existing user, you can add an AccessKey pair or you can link:https://www.alibabacloud.com/help/en/doc-detail/53045.htm[obtain the AccessKey pair] for that user. -+ -[NOTE] -==== -When created, the AccessKey secret is displayed only once. You must immediately save the AccessKey pair because the AccessKey pair is required for API calls. -==== - -* Add the AccessKey ID and secret to the link:https://www.alibabacloud.com/help/en/doc-detail/311667.htm#h2-sls-mfm-3p3[`~/.alibabacloud/credentials` file] on your local computer. Alibaba Cloud automatically creates this file when you log in to the console. The Cloud Credential Operator (CCO) utility, ccoutil, uses these credentials when processing `Credential Request` objects. -+ -For example: -+ -[source,terminal] ----- -[default] # Default client -type = access_key # Certification type: access_key -access_key_id = LTAI5t8cefXKmt # Key <1> -access_key_secret = wYx56mszAN4Uunfh # Secret ----- -<1> Add your AccessKeyID and AccessKeySecret here. - -* The RAM user must have the `AdministratorAccess` policy to ensure that the account has sufficient permission to create the {product-title} cluster. This policy grants permissions to manage all Alibaba Cloud resources. -+ -When you attach the `AdministratorAccess` policy to a RAM user, you grant that user full access to all Alibaba Cloud services and resources. If you do not want to create a user with full access, create a custom policy with the following actions that you can add to your RAM user for installation. These actions are sufficient to install {product-title}. -+ -[TIP] -==== -You can copy and paste the following JSON code into the Alibaba Cloud console to create a custom poicy. For information on creating custom policies, see link:https://www.alibabacloud.com/help/en/doc-detail/93733.html[Create a custom policy] in the Alibaba Cloud documentation. -==== -+ -.Example custom policy JSON file -[%collapsible] -==== -[source,json] ----- -{ - "Version": "1", - "Statement": [ - { - "Action": [ - "tag:ListTagResources", - "tag:UntagResources" - ], - "Resource": "*", - "Effect": "Allow" - }, - { - "Action": [ - "vpc:DescribeVpcs", - "vpc:DeleteVpc", - "vpc:DescribeVSwitches", - "vpc:DeleteVSwitch", - "vpc:DescribeEipAddresses", - "vpc:DescribeNatGateways", - "vpc:ReleaseEipAddress", - "vpc:DeleteNatGateway", - "vpc:DescribeSnatTableEntries", - "vpc:CreateSnatEntry", - "vpc:AssociateEipAddress", - "vpc:ListTagResources", - "vpc:TagResources", - "vpc:DescribeVSwitchAttributes", - "vpc:CreateVSwitch", - "vpc:CreateNatGateway", - "vpc:DescribeRouteTableList", - "vpc:CreateVpc", - "vpc:AllocateEipAddress", - "vpc:ListEnhanhcedNatGatewayAvailableZones" - ], - "Resource": "*", - "Effect": "Allow" - }, - { - "Action": [ - "ecs:ModifyInstanceAttribute", - "ecs:DescribeSecurityGroups", - "ecs:DeleteSecurityGroup", - "ecs:DescribeSecurityGroupReferences", - "ecs:DescribeSecurityGroupAttribute", - "ecs:RevokeSecurityGroup", - "ecs:DescribeInstances", - "ecs:DeleteInstances", - "ecs:DescribeNetworkInterfaces", - "ecs:DescribeInstanceRamRole", - "ecs:DescribeUserData", - "ecs:DescribeDisks", - "ecs:ListTagResources", - "ecs:AuthorizeSecurityGroup", - "ecs:RunInstances", - "ecs:TagResources", - "ecs:ModifySecurityGroupPolicy", - "ecs:CreateSecurityGroup", - "ecs:DescribeAvailableResource", - "ecs:DescribeRegions", - "ecs:AttachInstanceRamRole" - ], - "Resource": "*", - "Effect": "Allow" - }, - { - "Action": [ - "pvtz:DescribeRegions", - "pvtz:DescribeZones", - "pvtz:DeleteZone", - "pvtz:DeleteZoneRecord", - "pvtz:BindZoneVpc", - "pvtz:DescribeZoneRecords", - "pvtz:AddZoneRecord", - "pvtz:SetZoneRecordStatus", - "pvtz:DescribeZoneInfo", - "pvtz:DescribeSyncEcsHostTask", - "pvtz:AddZone" - ], - "Resource": "*", - "Effect": "Allow" - }, - { - "Action": [ - "slb:DescribeLoadBalancers", - "slb:SetLoadBalancerDeleteProtection", - "slb:DeleteLoadBalancer", - "slb:SetLoadBalancerModificationProtection", - "slb:DescribeLoadBalancerAttribute", - "slb:AddBackendServers", - "slb:DescribeLoadBalancerTCPListenerAttribute", - "slb:SetLoadBalancerTCPListenerAttribute", - "slb:StartLoadBalancerListener", - "slb:CreateLoadBalancerTCPListener", - "slb:ListTagResources", - "slb:TagResources", - "slb:CreateLoadBalancer" - ], - "Resource": "*", - "Effect": "Allow" - }, - { - "Action": [ - "ram:ListResourceGroups", - "ram:DeleteResourceGroup", - "ram:ListPolicyAttachments", - "ram:DetachPolicy", - "ram:GetResourceGroup", - "ram:CreateResourceGroup", - "ram:DeleteRole", - "ram:GetPolicy", - "ram:DeletePolicy", - "ram:ListPoliciesForRole", - "ram:CreateRole", - "ram:AttachPolicyToRole", - "ram:GetRole", - "ram:CreatePolicy", - "ram:CreateUser", - "ram:DetachPolicyFromRole", - "ram:CreatePolicyVersion", - "ram:DetachPolicyFromUser", - "ram:ListPoliciesForUser", - "ram:AttachPolicyToUser", - "ram:CreateUser", - "ram:GetUser", - "ram:DeleteUser", - "ram:CreateAccessKey", - "ram:ListAccessKeys", - "ram:DeleteAccessKey", - "ram:ListUsers", - "ram:ListPolicyVersions" - ], - "Resource": "*", - "Effect": "Allow" - }, - { - "Action": [ - "oss:DeleteBucket", - "oss:DeleteBucketTagging", - "oss:GetBucketTagging", - "oss:GetBucketCors", - "oss:GetBucketPolicy", - "oss:GetBucketLifecycle", - "oss:GetBucketReferer", - "oss:GetBucketTransferAcceleration", - "oss:GetBucketLog", - "oss:GetBucketWebSite", - "oss:GetBucketInfo", - "oss:PutBucketTagging", - "oss:PutBucket", - "oss:OpenOssService", - "oss:ListBuckets", - "oss:GetService", - "oss:PutBucketACL", - "oss:GetBucketLogging", - "oss:ListObjects", - "oss:GetObject", - "oss:PutObject", - "oss:DeleteObject" - ], - "Resource": "*", - "Effect": "Allow" - }, - { - "Action": [ - "alidns:DescribeDomainRecords", - "alidns:DeleteDomainRecord", - "alidns:DescribeDomains", - "alidns:DescribeDomainRecordInfo", - "alidns:AddDomainRecord", - "alidns:SetDomainRecordStatus" - ], - "Resource": "*", - "Effect": "Allow" - }, - { - "Action": "bssapi:CreateInstance", - "Resource": "*", - "Effect": "Allow" - }, - { - "Action": "ram:PassRole", - "Resource": "*", - "Effect": "Allow", - "Condition": { - "StringEquals": { - "acs:Service": "ecs.aliyuncs.com" - } - } - } - ] -} ----- -==== - -For more information about creating a RAM user and granting permissions, see link:https://www.alibabacloud.com/help/en/doc-detail/93720.htm[Create a RAM user] and link:https://www.alibabacloud.com/help/en/doc-detail/116146.htm[Grant permissions to a RAM user] in the Alibaba Cloud documentation. - diff --git a/modules/manually-gathering-logs-with-ssh.adoc b/modules/manually-gathering-logs-with-ssh.adoc deleted file mode 100644 index e9bb19b2378a..000000000000 --- a/modules/manually-gathering-logs-with-ssh.adoc +++ /dev/null @@ -1,61 +0,0 @@ -// Module included in the following assemblies: -// -// *installing/installing-troubleshooting.adoc - -:_content-type: PROCEDURE -[id="installation-manually-gathering-logs-with-SSH_{context}"] -= Manually gathering logs with SSH access to your host(s) - -Manually gather logs in situations where `must-gather` or automated collection -methods do not work. - -[IMPORTANT] -==== -By default, SSH access to the {product-title} nodes is disabled on the {rh-openstack-first} based installations. -==== - -.Prerequisites - -* You must have SSH access to your host(s). - -.Procedure - -. Collect the `bootkube.service` service logs from the bootstrap host using the -`journalctl` command by running: -+ -[source,terminal] ----- -$ journalctl -b -f -u bootkube.service ----- - -. Collect the bootstrap host's container logs using the podman logs. This is shown -as a loop to get all of the container logs from the host: -+ -[source,terminal] ----- -$ for pod in $(sudo podman ps -a -q); do sudo podman logs $pod; done ----- - -. Alternatively, collect the host's container logs using the `tail` command by -running: -+ -[source,terminal] ----- -# tail -f /var/lib/containers/storage/overlay-containers/*/userdata/ctr.log ----- - -. Collect the `kubelet.service` and `crio.service` service logs from the master -and worker hosts using the `journalctl` command by running: -+ -[source,terminal] ----- -$ journalctl -b -f -u kubelet.service -u crio.service ----- - -. Collect the master and worker host container logs using the `tail` command by -running: -+ -[source,terminal] ----- -$ sudo tail -f /var/log/containers/* ----- diff --git a/modules/manually-gathering-logs-without-ssh.adoc b/modules/manually-gathering-logs-without-ssh.adoc deleted file mode 100644 index a4ccaefcb7ce..000000000000 --- a/modules/manually-gathering-logs-without-ssh.adoc +++ /dev/null @@ -1,35 +0,0 @@ -// Module included in the following assemblies: -// -// *installing/installing-troubleshooting.adoc - -:_content-type: PROCEDURE -[id="installation-manually-gathering-logs-without-SSH_{context}"] -= Manually gathering logs without SSH access to your host(s) - -Manually gather logs in situations where `must-gather` or automated collection -methods do not work. - -If you do not have SSH access to your node, you can access the systems journal -to investigate what is happening on your host. - -.Prerequisites - -* Your {product-title} installation must be complete. -* Your API service is still functional. -* You have system administrator privileges. - -.Procedure - -. Access `journald` unit logs under `/var/log` by running: -+ -[source,terminal] ----- -$ oc adm node-logs --role=master -u kubelet ----- - -. Access host file paths under `/var/log` by running: -+ -[source,terminal] ----- -$ oc adm node-logs --role=master --path=openshift-apiserver ----- diff --git a/modules/manually-maintained-credentials-upgrade.adoc b/modules/manually-maintained-credentials-upgrade.adoc deleted file mode 100644 index db0f8eea87d4..000000000000 --- a/modules/manually-maintained-credentials-upgrade.adoc +++ /dev/null @@ -1,73 +0,0 @@ -// Module included in the following assemblies: -// -// * updating/updating-cluster-within-minor.adoc -// * updating/updating-cluster-cli.adoc - -:_content-type: PROCEDURE - -[id="manually-maintained-credentials-upgrade_{context}"] -= Updating cloud provider resources with manually maintained credentials - -Before upgrading a cluster with manually maintained credentials, you must create any new credentials for the release image that you are upgrading to. You must also review the required permissions for existing credentials and accommodate any new permissions requirements in the new release for those components. - -.Procedure - -. Extract and examine the `CredentialsRequest` custom resource for the new release. -+ -The "Manually creating IAM" section of the installation content for your cloud provider explains how to obtain and use the credentials required for your cloud. - -. Update the manually maintained credentials on your cluster: -+ --- -* Create new secrets for any `CredentialsRequest` custom resources that are added by the new release image. -* If the `CredentialsRequest` custom resources for any existing credentials that are stored in secrets have changed permissions requirements, update the permissions as required. --- - -. If your cluster uses cluster capabilities to disable one or more optional components, delete the `CredentialsRequest` custom resources for any disabled components. -+ -.Example `credrequests` directory contents for {product-title} 4.12 on AWS -+ -[source,terminal] ----- -0000_30_machine-api-operator_00_credentials-request.yaml <1> -0000_50_cloud-credential-operator_05-iam-ro-credentialsrequest.yaml <2> -0000_50_cluster-image-registry-operator_01-registry-credentials-request.yaml <3> -0000_50_cluster-ingress-operator_00-ingress-credentials-request.yaml <4> -0000_50_cluster-network-operator_02-cncc-credentials.yaml <5> -0000_50_cluster-storage-operator_03_credentials_request_aws.yaml <6> ----- -+ --- -<1> The Machine API Operator CR is required. -<2> The Cloud Credential Operator CR is required. -<3> The Image Registry Operator CR is required. -<4> The Ingress Operator CR is required. -<5> The Network Operator CR is required. -<6> The Storage Operator CR is an optional component and might be disabled in your cluster. --- -+ -.Example `credrequests` directory contents for {product-title} 4.12 on GCP -+ -[source,terminal] ----- -0000_26_cloud-controller-manager-operator_16_credentialsrequest-gcp.yaml <1> -0000_30_machine-api-operator_00_credentials-request.yaml <2> -0000_50_cloud-credential-operator_05-gcp-ro-credentialsrequest.yaml <3> -0000_50_cluster-image-registry-operator_01-registry-credentials-request-gcs.yaml <4> -0000_50_cluster-ingress-operator_00-ingress-credentials-request.yaml <5> -0000_50_cluster-network-operator_02-cncc-credentials.yaml <6> -0000_50_cluster-storage-operator_03_credentials_request_gcp.yaml <7> ----- -+ --- -<1> The Cloud Controller Manager Operator CR is required. -<2> The Machine API Operator CR is required. -<3> The Cloud Credential Operator CR is required. -<4> The Image Registry Operator CR is required. -<5> The Ingress Operator CR is required. -<6> The Network Operator CR is required. -<7> The Storage Operator CR is an optional component and might be disabled in your cluster. --- - -.Next steps -* Update the `upgradeable-to` annotation to indicate that the cluster is ready to upgrade. diff --git a/modules/manually-removing-cloud-creds.adoc b/modules/manually-removing-cloud-creds.adoc deleted file mode 100644 index 9c9647f0416f..000000000000 --- a/modules/manually-removing-cloud-creds.adoc +++ /dev/null @@ -1,39 +0,0 @@ -// Module included in the following assemblies: -// -// * post_installation_configuration/cluster-tasks.adoc - -:_content-type: PROCEDURE -[id="manually-removing-cloud-creds_{context}"] -= Removing cloud provider credentials - -After installing an {product-title} cluster with the Cloud Credential Operator (CCO) in mint mode, you can remove the administrator-level credential secret from the `kube-system` namespace in the cluster. The administrator-level credential is required only during changes that require its elevated permissions, such as upgrades. - -[NOTE] -==== -Prior to a non z-stream upgrade, you must reinstate the credential secret with the administrator-level credential. If the credential is not present, the upgrade might be blocked. -==== - -.Prerequisites - -* Your cluster is installed on a platform that supports removing cloud credentials from the CCO. Supported platforms are AWS and GCP. - -.Procedure - -. In the *Administrator* perspective of the web console, navigate to *Workloads* -> *Secrets*. - -. In the table on the *Secrets* page, find the root secret for your cloud provider. -+ -[cols=2,options=header] -|=== -|Platform -|Secret name - -|AWS -|`aws-creds` - -|GCP -|`gcp-credentials` - -|=== - -. Click the *Options* menu {kebab} in the same row as the secret and select *Delete Secret*. diff --git a/modules/manually-rotating-cloud-creds.adoc b/modules/manually-rotating-cloud-creds.adoc deleted file mode 100644 index 11fa1e97e6d8..000000000000 --- a/modules/manually-rotating-cloud-creds.adoc +++ /dev/null @@ -1,240 +0,0 @@ -// Module included in the following assemblies: -// -// * post_installation_configuration/cluster-tasks.adoc -// * authentication/managing_cloud_provider_credentials/cco-mode-mint.adoc -// * authentication/managing_cloud_provider_credentials/cco-mode-passthrough.adoc - -ifeval::["{context}" == "post-install-cluster-tasks"] -:post-install: -endif::[] -ifeval::["{context}" == "cco-mode-mint"] -:mint: -endif::[] -ifeval::["{context}" == "cco-mode-passthrough"] -:passthrough: -endif::[] - -:_content-type: PROCEDURE -[id="manually-rotating-cloud-creds_{context}"] -= Rotating cloud provider credentials manually - -If your cloud provider credentials are changed for any reason, you must manually update the secret that the Cloud Credential Operator (CCO) uses to manage cloud provider credentials. - -The process for rotating cloud credentials depends on the mode that the CCO is configured to use. After you rotate credentials for a cluster that is using mint mode, you must manually remove the component credentials that were created by the removed credential. - -//// -[NOTE] -==== -You can also use the command line interface to complete all parts of this procedure. -==== -//// - -.Prerequisites - -* Your cluster is installed on a platform that supports rotating cloud credentials manually with the CCO mode that you are using: - -ifndef::passthrough[] -** For mint mode, Amazon Web Services (AWS) and Google Cloud Platform (GCP) are supported. -endif::passthrough[] - -ifndef::mint[] -** For passthrough mode, Amazon Web Services (AWS), Microsoft Azure, Google Cloud Platform (GCP), {rh-openstack-first}, {rh-virtualization-first}, and VMware vSphere are supported. -endif::mint[] - -* You have changed the credentials that are used to interface with your cloud provider. - -* The new credentials have sufficient permissions for the mode CCO is configured to use in your cluster. - -.Procedure - -. In the *Administrator* perspective of the web console, navigate to *Workloads* -> *Secrets*. - -. In the table on the *Secrets* page, find the root secret for your cloud provider. -+ -[cols=2,options=header] -|=== -|Platform -|Secret name - -|AWS -|`aws-creds` - -ifndef::mint[] -|Azure -|`azure-credentials` -endif::mint[] - -|GCP -|`gcp-credentials` - -ifndef::mint[] -|{rh-openstack} -|`openstack-credentials` - -|{rh-virtualization} -|`ovirt-credentials` - -|VMware vSphere -|`vsphere-creds` -endif::mint[] - -|=== - -. Click the *Options* menu {kebab} in the same row as the secret and select *Edit Secret*. - -. Record the contents of the *Value* field or fields. You can use this information to verify that the value is different after updating the credentials. - -. Update the text in the *Value* field or fields with the new authentication information for your cloud provider, and then click *Save*. - -ifndef::mint[] -. If you are updating the credentials for a vSphere cluster that does not have the vSphere CSI Driver Operator enabled, you must force a rollout of the Kubernetes controller manager to apply the updated credentials. -+ -[NOTE] -==== -If the vSphere CSI Driver Operator is enabled, this step is not required. -==== -+ -To apply the updated vSphere credentials, log in to the {product-title} CLI as a user with the `cluster-admin` role and run the following command: -+ -[source,terminal] ----- -$ oc patch kubecontrollermanager cluster \ - -p='{"spec": {"forceRedeploymentReason": "recovery-'"$( date )"'"}}' \ - --type=merge ----- -+ -While the credentials are rolling out, the status of the Kubernetes Controller Manager Operator reports `Progressing=true`. To view the status, run the following command: -+ -[source,terminal] ----- -$ oc get co kube-controller-manager ----- -endif::mint[] - -ifdef::post-install[] -. If the CCO for your cluster is configured to use mint mode, delete each component secret that is referenced by the individual `CredentialsRequest` objects. -endif::post-install[] -ifdef::mint[] -. Delete each component secret that is referenced by the individual `CredentialsRequest` objects. -endif::mint[] - -ifndef::passthrough[] -.. Log in to the {product-title} CLI as a user with the `cluster-admin` role. - -.. Get the names and namespaces of all referenced component secrets: -+ -[source,terminal] ----- -$ oc -n openshift-cloud-credential-operator get CredentialsRequest \ - -o json | jq -r '.items[] | select (.spec.providerSpec.kind=="<provider_spec>") | .spec.secretRef' ----- -+ -where `<provider_spec>` is the corresponding value for your cloud provider: -+ --- -* AWS: `AWSProviderSpec` -* GCP: `GCPProviderSpec` --- -+ -.Partial example output for AWS -+ -[source,json] ----- -{ - "name": "ebs-cloud-credentials", - "namespace": "openshift-cluster-csi-drivers" -} -{ - "name": "cloud-credential-operator-iam-ro-creds", - "namespace": "openshift-cloud-credential-operator" -} ----- - -.. Delete each of the referenced component secrets: -+ -[source,terminal] ----- -$ oc delete secret <secret_name> \//<1> - -n <secret_namespace> <2> ----- -+ -<1> Specify the name of a secret. -<2> Specify the namespace that contains the secret. -+ -.Example deletion of an AWS secret -+ -[source,terminal] ----- -$ oc delete secret ebs-cloud-credentials -n openshift-cluster-csi-drivers ----- -+ -You do not need to manually delete the credentials from your provider console. Deleting the referenced component secrets will cause the CCO to delete the existing credentials from the platform and create new ones. -endif::passthrough[] - -.Verification - -To verify that the credentials have changed: - -. In the *Administrator* perspective of the web console, navigate to *Workloads* -> *Secrets*. - -. Verify that the contents of the *Value* field or fields have changed. - -//// -// Provider-side verification also possible, though cluster-side is cleaner process. -. To verify that the credentials have changed from the console of your cloud provider: - -.. Get the `CredentialsRequest` CR names for your platform: -+ -[source,terminal] ----- -$ oc -n openshift-cloud-credential-operator get CredentialsRequest -o json | jq -r '.items[] | select (.spec[].kind=="<provider_spec>") | .metadata.name' ----- -+ -Where `<provider_spec>` is the corresponding value for your cloud provider: `AWSProviderSpec` for AWS, `AzureProviderSpec` for Azure, or `GCPProviderSpec` for GCP. -+ -.Example output for AWS -+ -[source,terminal] ----- -aws-ebs-csi-driver-operator -cloud-credential-operator-iam-ro -openshift-image-registry -openshift-ingress -openshift-machine-api-aws ----- - -.. Get the IAM username that corresponds to each `CredentialsRequest` CR name: -+ -[source,terminal] ----- -$ oc get credentialsrequest <cr_name> -n openshift-cloud-credential-operator -o json | jq -r ".status.providerStatus" ----- -+ -Where `<cr_name>` is the name of a `CredentialsRequest` CR. -+ -.Example output for AWS -+ -[source,json] ----- -{ - "apiVersion": "cloudcredential.openshift.io/v1", - "kind": "AWSProviderStatus", - "policy": "<example-iam-username-policy>", - "user": "<example-iam-username>" -} ----- -+ -Where `<example-iam-username>` is the name of an IAM user on the cloud provider. - -.. For each IAM username, view the details for the user on the cloud provider. The credentials should show that they were created after being rotated on the cluster. -//// - -ifeval::["{context}" == "post-install-cluster-tasks"] -:!post-install: -endif::[] -ifeval::["{context}" == "cco-mode-mint"] -:!mint: -endif::[] -ifeval::["{context}" == "cco-mode-passthrough"] -:!passthrough: -endif::[] diff --git a/modules/master-node-sizing.adoc b/modules/master-node-sizing.adoc deleted file mode 100644 index eaeb71fd2da9..000000000000 --- a/modules/master-node-sizing.adoc +++ /dev/null @@ -1,131 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/recommended-performance-scale-practices/recommended-control-plane-practices.adoc -// * post_installation_configuration/node-tasks.adoc - -[id="master-node-sizing_{context}"] -= Control plane node sizing - -The control plane node resource requirements depend on the number and type of nodes and objects in the cluster. The following control plane node size recommendations are based on the results of a control plane density focused testing, or _Cluster-density_. This test creates the following objects across a given number of namespaces: - -- 1 image stream -- 1 build -- 5 deployments, with 2 pod replicas in a `sleep` state, mounting 4 secrets, 4 config maps, and 1 downward API volume each -- 5 services, each one pointing to the TCP/8080 and TCP/8443 ports of one of the previous deployments -- 1 route pointing to the first of the previous services -- 10 secrets containing 2048 random string characters -- 10 config maps containing 2048 random string characters - - -[options="header",cols="4*"] -|=== -| Number of worker nodes |Cluster-density (namespaces) | CPU cores |Memory (GB) - -| 24 -| 500 -| 4 -| 16 - -| 120 -| 1000 -| 8 -| 32 - -| 252 -| 4000 -| 16, but 24 if using the OVN-Kubernetes network plug-in -| 64, but 128 if using the OVN-Kubernetes network plug-in - -| 501, but untested with the OVN-Kubernetes network plug-in -| 4000 -| 16 -| 96 - -|=== - -The data from the table above is based on an {product-title} running on top of AWS, using r5.4xlarge instances as control-plane nodes and m5.2xlarge instances as worker nodes. - -On a large and dense cluster with three control plane nodes, the CPU and memory usage will spike up when one of the nodes is stopped, rebooted, or fails. The failures can be due to unexpected issues with power, network, underlying infrastructure, or intentional cases where the cluster is restarted after shutting it down to save costs. The remaining two control plane nodes must handle the load in order to be highly available, which leads to increase in the resource usage. This is also expected during upgrades because the control plane nodes are cordoned, drained, and rebooted serially to apply the operating system updates, as well as the control plane Operators update. To avoid cascading failures, keep the overall CPU and memory resource usage on the control plane nodes to at most 60% of all available capacity to handle the resource usage spikes. Increase the CPU and memory on the control plane nodes accordingly to avoid potential downtime due to lack of resources. - -[IMPORTANT] -==== -The node sizing varies depending on the number of nodes and object counts in the cluster. It also depends on whether the objects are actively being created on the cluster. During object creation, the control plane is more active in terms of resource usage compared to when the objects are in the `running` phase. -==== - -Operator Lifecycle Manager (OLM ) runs on the control plane nodes and its memory footprint depends on the number of namespaces and user installed operators that OLM needs to manage on the cluster. Control plane nodes need to be sized accordingly to avoid OOM kills. Following data points are based on the results from cluster maximums testing. - -[options="header",cols="3*"] -|=== -| Number of namespaces |OLM memory at idle state (GB) |OLM memory with 5 user operators installed (GB) - -| 500 -| 0.823 -| 1.7 - -| 1000 -| 1.2 -| 2.5 - -| 1500 -| 1.7 -| 3.2 - -| 2000 -| 2 -| 4.4 - -| 3000 -| 2.7 -| 5.6 - -| 4000 -| 3.8 -| 7.6 - -| 5000 -| 4.2 -| 9.02 - -| 6000 -| 5.8 -| 11.3 - -| 7000 -| 6.6 -| 12.9 - -| 8000 -| 6.9 -| 14.8 - -| 9000 -| 8 -| 17.7 - -| 10,000 -| 9.9 -| 21.6 - -|=== - - -[IMPORTANT] -==== -You can modify the control plane node size in a running {product-title} {product-version} cluster for the following configurations only: - -* Clusters installed with a user-provisioned installation method. -* AWS clusters installed with an installer-provisioned infrastructure installation method. -* Clusters that use a control plane machine set to manage control plane machines. - -For all other configurations, you must estimate your total node count and use the suggested control plane node size during installation. -==== - -[IMPORTANT] -==== -The recommendations are based on the data points captured on {product-title} clusters with OpenShift SDN as the network plugin. -==== - -[NOTE] -==== -In {product-title} {product-version}, half of a CPU core (500 millicore) is now reserved by the system by default compared to {product-title} 3.11 and previous versions. The sizes are determined taking that into consideration. -==== diff --git a/modules/metallb-installing-using-web-console.adoc b/modules/metallb-installing-using-web-console.adoc deleted file mode 100644 index db37fc2a52f5..000000000000 --- a/modules/metallb-installing-using-web-console.adoc +++ /dev/null @@ -1,37 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/metallb/metallb-operator-install.adoc - -:_content-type: PROCEDURE -[id="installing-the-metallb-operator-using-web-console_{context}"] -= Installing the MetalLB Operator from the OperatorHub using the web console - -As a cluster administrator, you can install the MetalLB Operator by using the {product-title} web console. - -.Prerequisites - -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -. In the {product-title} web console, navigate to *Operators* -> *OperatorHub*. - -. Type a keyword into the *Filter by keyword* box or scroll to find the Operator you want. For example, type `metallb` to find the MetalLB Operator. -+ -You can also filter options by *Infrastructure Features*. For example, select *Disconnected* if you want to see Operators that work in disconnected environments, also known as restricted network environments. - -. On the *Install Operator* page, accept the defaults and click *Install*. - -.Verification - -. To confirm that the installation is successful: - -.. Navigate to the *Operators* -> *Installed Operators* page. - -.. Check that the Operator is installed in the `openshift-operators` namespace and that its status is `Succeeded`. - -. If the Operator is not installed successfully, check the status of the Operator and review the logs: - -.. Navigate to the *Operators* -> *Installed Operators* page and inspect the `Status` column for any errors or failures. - -.. Navigate to the *Workloads* -> *Pods* page and check the logs in any pods in the `openshift-operators` project that are reporting issues. diff --git a/modules/metering-cluster-capacity-examples.adoc b/modules/metering-cluster-capacity-examples.adoc deleted file mode 100644 index 3bc78d3e5d22..000000000000 --- a/modules/metering-cluster-capacity-examples.adoc +++ /dev/null @@ -1,48 +0,0 @@ -// Module included in the following assemblies: -// -// * metering/metering-usage-examples.adoc - -[id="metering-cluster-capacity-examples_{context}"] -= Measure cluster capacity hourly and daily - -The following report demonstrates how to measure cluster capacity both hourly and daily. The daily report works by aggregating the hourly report's results. - -The following report measures cluster CPU capacity every hour. - -.Hourly CPU capacity by cluster example - -[source,yaml] ----- -apiVersion: metering.openshift.io/v1 -kind: Report -metadata: - name: cluster-cpu-capacity-hourly -spec: - query: "cluster-cpu-capacity" - schedule: - period: "hourly" <1> ----- -<1> You could change this period to `daily` to get a daily report, but with larger data sets it is more efficient to use an hourly report, then aggregate your hourly data into a daily report. - -The following report aggregates the hourly data into a daily report. - -.Daily CPU capacity by cluster example - -[source,yaml] ----- -apiVersion: metering.openshift.io/v1 -kind: Report -metadata: - name: cluster-cpu-capacity-daily <1> -spec: - query: "cluster-cpu-capacity" <2> - inputs: <3> - - name: ClusterCpuCapacityReportName - value: cluster-cpu-capacity-hourly - schedule: - period: "daily" ----- - -<1> To stay organized, remember to change the `name` of your report if you change any of the other values. -<2> You can also measure `cluster-memory-capacity`. Remember to update the query in the associated hourly report as well. -<3> The `inputs` section configures this report to aggregate the hourly report. Specifically, `value: cluster-cpu-capacity-hourly` is the name of the hourly report that gets aggregated. diff --git a/modules/metering-cluster-usage-examples.adoc b/modules/metering-cluster-usage-examples.adoc deleted file mode 100644 index ed6188e8ca3b..000000000000 --- a/modules/metering-cluster-usage-examples.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// -// * metering/metering-usage-examples.adoc - -[id="metering-cluster-usage-examples_{context}"] -= Measure cluster usage with a one-time report - -The following report measures cluster usage from a specific starting date forward. The report only runs once, after you save it and apply it. - -.CPU usage by cluster example - -[source,yaml] ----- -apiVersion: metering.openshift.io/v1 -kind: Report -metadata: - name: cluster-cpu-usage-2020 <1> -spec: - reportingStart: '2020-01-01T00:00:00Z' <2> - reportingEnd: '2020-12-30T23:59:59Z' - query: cluster-cpu-usage <3> - runImmediately: true <4> ----- -<1> To stay organized, remember to change the `name` of your report if you change any of the other values. -<2> Configures the report to start using data from the `reportingStart` timestamp until the `reportingEnd` timestamp. -<3> Adjust your query here. You can also measure cluster usage with the `cluster-memory-usage` query. -<4> Configures the report to run immediately after saving it and applying it. diff --git a/modules/metering-cluster-utilization-examples.adoc b/modules/metering-cluster-utilization-examples.adoc deleted file mode 100644 index 4c1856b5217f..000000000000 --- a/modules/metering-cluster-utilization-examples.adoc +++ /dev/null @@ -1,26 +0,0 @@ -// Module included in the following assemblies: -// -// * metering/metering-usage-examples.adoc - -[id="metering-cluster-utilization-examples_{context}"] -= Measure cluster utilization using cron expressions - -You can also use cron expressions when configuring the period of your reports. The following report measures cluster utilization by looking at CPU utilization from 9am-5pm every weekday. - -.Weekday CPU utilization by cluster example - -[source,yaml] ----- -apiVersion: metering.openshift.io/v1 -kind: Report -metadata: - name: cluster-cpu-utilization-weekdays <1> -spec: - query: "cluster-cpu-utilization" <2> - schedule: - period: "cron" - expression: 0 0 * * 1-5 <3> ----- -<1> To say organized, remember to change the `name` of your report if you change any of the other values. -<2> Adjust your query here. You can also measure cluster utilization with the `cluster-memory-utilization` query. -<3> For cron periods, normal cron expressions are valid. diff --git a/modules/metering-configure-persistentvolumes.adoc b/modules/metering-configure-persistentvolumes.adoc deleted file mode 100644 index 418782ec8b2b..000000000000 --- a/modules/metering-configure-persistentvolumes.adoc +++ /dev/null @@ -1,57 +0,0 @@ -// Module included in the following assemblies: -// -// * metering/configuring_metering/metering-configure-hive-metastore.adoc - -[id="metering-configure-persistentvolumes_{context}"] -= Configuring persistent volumes - -By default, Hive requires one persistent volume to operate. - -`hive-metastore-db-data` is the main persistent volume claim (PVC) required by default. This PVC is used by the Hive metastore to store metadata about tables, such as table name, columns, and location. Hive metastore is used by Presto and the Hive server to look up table metadata when processing queries. You remove this requirement by using MySQL or PostgreSQL for the Hive metastore database. - -To install, Hive metastore requires that dynamic volume provisioning is enabled in a storage class, a persistent volume of the correct size must be manually pre-created, or you use a pre-existing MySQL or PostgreSQL database. - -[id="metering-configure-persistentvolumes-storage-class-hive_{context}"] -== Configuring the storage class for the Hive metastore -To configure and specify a storage class for the `hive-metastore-db-data` persistent volume claim, specify the storage class in your `MeteringConfig` custom resource. An example `storage` section with the `class` field is included in the `metastore-storage.yaml` file below. - -[source,yaml] ----- -apiVersion: metering.openshift.io/v1 -kind: MeteringConfig -metadata: - name: "operator-metering" -spec: - hive: - spec: - metastore: - storage: - # Default is null, which means using the default storage class if it exists. - # If you wish to use a different storage class, specify it here - # class: "null" <1> - size: "5Gi" ----- -<1> Uncomment this line and replace `null` with the name of the storage class to use. Leaving the value `null` will cause metering to use the default storage class for the cluster. - -[id="metering-configure-persistentvolumes-volume-size-hive_{context}"] -== Configuring the volume size for the Hive metastore - -Use the `metastore-storage.yaml` file below as a template to configure the volume size for the Hive metastore. - -[source,yaml] ----- -apiVersion: metering.openshift.io/v1 -kind: MeteringConfig -metadata: - name: "operator-metering" -spec: - hive: - spec: - metastore: - storage: - # Default is null, which means using the default storage class if it exists. - # If you wish to use a different storage class, specify it here - # class: "null" - size: "5Gi" <1> ----- -<1> Replace the value for `size` with your desired capacity. The example file shows "5Gi". diff --git a/modules/metering-debugging.adoc b/modules/metering-debugging.adoc deleted file mode 100644 index dab3a52a1eb4..000000000000 --- a/modules/metering-debugging.adoc +++ /dev/null @@ -1,228 +0,0 @@ -// Module included in the following assemblies: -// -// * metering/metering-troubleshooting-debugging.adoc - -[id="metering-debugging_{context}"] -= Debugging metering - -Debugging metering is much easier when you interact directly with the various components. The sections below detail how you can connect and query Presto and Hive as well as view the dashboards of the Presto and HDFS components. - -[NOTE] -==== -All of the commands in this section assume you have installed metering through OperatorHub in the `openshift-metering` namespace. -==== - -[id="metering-get-reporting-operator-logs_{context}"] -== Get reporting operator logs -Use the command below to follow the logs of the `reporting-operator`: - -[source,terminal] ----- -$ oc -n openshift-metering logs -f "$(oc -n openshift-metering get pods -l app=reporting-operator -o name | cut -c 5-)" -c reporting-operator ----- - -[id="metering-query-presto-using-presto-cli_{context}"] -== Query Presto using presto-cli -The following command opens an interactive presto-cli session where you can query Presto. This session runs in the same container as Presto and launches an additional Java instance, which can create memory limits for the pod. If this occurs, you should increase the memory request and limits of the Presto pod. - -By default, Presto is configured to communicate using TLS. You must use the following command to run Presto queries: - -[source,terminal] ----- -$ oc -n openshift-metering exec -it "$(oc -n openshift-metering get pods -l app=presto,presto=coordinator -o name | cut -d/ -f2)" \ - -- /usr/local/bin/presto-cli --server https://presto:8080 --catalog hive --schema default --user root --keystore-path /opt/presto/tls/keystore.pem ----- - -Once you run this command, a prompt appears where you can run queries. Use the `show tables from metering;` query to view the list of tables: - -[source,terminal] ----- -$ presto:default> show tables from metering; ----- - -.Example output -[source,terminal] ----- - Table - - datasource_your_namespace_cluster_cpu_capacity_raw - datasource_your_namespace_cluster_cpu_usage_raw - datasource_your_namespace_cluster_memory_capacity_raw - datasource_your_namespace_cluster_memory_usage_raw - datasource_your_namespace_node_allocatable_cpu_cores - datasource_your_namespace_node_allocatable_memory_bytes - datasource_your_namespace_node_capacity_cpu_cores - datasource_your_namespace_node_capacity_memory_bytes - datasource_your_namespace_node_cpu_allocatable_raw - datasource_your_namespace_node_cpu_capacity_raw - datasource_your_namespace_node_memory_allocatable_raw - datasource_your_namespace_node_memory_capacity_raw - datasource_your_namespace_persistentvolumeclaim_capacity_bytes - datasource_your_namespace_persistentvolumeclaim_capacity_raw - datasource_your_namespace_persistentvolumeclaim_phase - datasource_your_namespace_persistentvolumeclaim_phase_raw - datasource_your_namespace_persistentvolumeclaim_request_bytes - datasource_your_namespace_persistentvolumeclaim_request_raw - datasource_your_namespace_persistentvolumeclaim_usage_bytes - datasource_your_namespace_persistentvolumeclaim_usage_raw - datasource_your_namespace_persistentvolumeclaim_usage_with_phase_raw - datasource_your_namespace_pod_cpu_request_raw - datasource_your_namespace_pod_cpu_usage_raw - datasource_your_namespace_pod_limit_cpu_cores - datasource_your_namespace_pod_limit_memory_bytes - datasource_your_namespace_pod_memory_request_raw - datasource_your_namespace_pod_memory_usage_raw - datasource_your_namespace_pod_persistentvolumeclaim_request_info - datasource_your_namespace_pod_request_cpu_cores - datasource_your_namespace_pod_request_memory_bytes - datasource_your_namespace_pod_usage_cpu_cores - datasource_your_namespace_pod_usage_memory_bytes -(32 rows) - -Query 20210503_175727_00107_3venm, FINISHED, 1 node -Splits: 19 total, 19 done (100.00%) -0:02 [32 rows, 2.23KB] [19 rows/s, 1.37KB/s] - -presto:default> ----- - -[id="metering-query-hive-using-beeline_{context}"] -== Query Hive using beeline -The following opens an interactive beeline session where you can query Hive. This session runs in the same container as Hive and launches an additional Java instance, which can create memory limits for the pod. If this occurs, you should increase the memory request and limits of the Hive pod. - -[source,terminal] ----- -$ oc -n openshift-metering exec -it $(oc -n openshift-metering get pods -l app=hive,hive=server -o name | cut -d/ -f2) \ - -c hiveserver2 -- beeline -u 'jdbc:hive2://127.0.0.1:10000/default;auth=noSasl' ----- - -Once you run this command, a prompt appears where you can run queries. Use the `show tables;` query to view the list of tables: - -[source,terminal] ----- -$ 0: jdbc:hive2://127.0.0.1:10000/default> show tables from metering; ----- - -.Example output -[source,terminal] ----- -+----------------------------------------------------+ -| tab_name | -+----------------------------------------------------+ -| datasource_your_namespace_cluster_cpu_capacity_raw | -| datasource_your_namespace_cluster_cpu_usage_raw | -| datasource_your_namespace_cluster_memory_capacity_raw | -| datasource_your_namespace_cluster_memory_usage_raw | -| datasource_your_namespace_node_allocatable_cpu_cores | -| datasource_your_namespace_node_allocatable_memory_bytes | -| datasource_your_namespace_node_capacity_cpu_cores | -| datasource_your_namespace_node_capacity_memory_bytes | -| datasource_your_namespace_node_cpu_allocatable_raw | -| datasource_your_namespace_node_cpu_capacity_raw | -| datasource_your_namespace_node_memory_allocatable_raw | -| datasource_your_namespace_node_memory_capacity_raw | -| datasource_your_namespace_persistentvolumeclaim_capacity_bytes | -| datasource_your_namespace_persistentvolumeclaim_capacity_raw | -| datasource_your_namespace_persistentvolumeclaim_phase | -| datasource_your_namespace_persistentvolumeclaim_phase_raw | -| datasource_your_namespace_persistentvolumeclaim_request_bytes | -| datasource_your_namespace_persistentvolumeclaim_request_raw | -| datasource_your_namespace_persistentvolumeclaim_usage_bytes | -| datasource_your_namespace_persistentvolumeclaim_usage_raw | -| datasource_your_namespace_persistentvolumeclaim_usage_with_phase_raw | -| datasource_your_namespace_pod_cpu_request_raw | -| datasource_your_namespace_pod_cpu_usage_raw | -| datasource_your_namespace_pod_limit_cpu_cores | -| datasource_your_namespace_pod_limit_memory_bytes | -| datasource_your_namespace_pod_memory_request_raw | -| datasource_your_namespace_pod_memory_usage_raw | -| datasource_your_namespace_pod_persistentvolumeclaim_request_info | -| datasource_your_namespace_pod_request_cpu_cores | -| datasource_your_namespace_pod_request_memory_bytes | -| datasource_your_namespace_pod_usage_cpu_cores | -| datasource_your_namespace_pod_usage_memory_bytes | -+----------------------------------------------------+ -32 rows selected (13.101 seconds) -0: jdbc:hive2://127.0.0.1:10000/default> ----- - -[id="metering-port-forward-hive-web-ui_{context}"] -== Port-forward to the Hive web UI -Run the following command to port-forward to the Hive web UI: - -[source,terminal] ----- -$ oc -n openshift-metering port-forward hive-server-0 10002 ----- - -You can now open http://127.0.0.1:10002 in your browser window to view the Hive web interface. - -[id="metering-port-forward-hdfs_{context}"] -== Port-forward to HDFS -Run the following command to port-forward to the HDFS namenode: - -[source,terminal] ----- -$ oc -n openshift-metering port-forward hdfs-namenode-0 9870 ----- - -You can now open http://127.0.0.1:9870 in your browser window to view the HDFS web interface. - -Run the following command to port-forward to the first HDFS datanode: - -[source,terminal] ----- -$ oc -n openshift-metering port-forward hdfs-datanode-0 9864 <1> ----- -<1> To check other datanodes, replace `hdfs-datanode-0` with the pod you want to view information on. - -[id="metering-ansible-operator_{context}"] -== Metering Ansible Operator -Metering uses the Ansible Operator to watch and reconcile resources in a cluster environment. When debugging a failed metering installation, it can be helpful to view the Ansible logs or status of your `MeteringConfig` custom resource. - -[id="metering-accessing-ansible-logs_{context}"] -=== Accessing Ansible logs -In the default installation, the Metering Operator is deployed as a pod. In this case, you can check the logs of the Ansible container within this pod: - -[source,terminal] ----- -$ oc -n openshift-metering logs $(oc -n openshift-metering get pods -l app=metering-operator -o name | cut -d/ -f2) -c ansible ----- - -Alternatively, you can view the logs of the Operator container (replace `-c ansible` with `-c operator`) for condensed output. - -[id="metering-checking-meteringconfig-status_{context}"] -=== Checking the MeteringConfig Status -It can be helpful to view the `.status` field of your `MeteringConfig` custom resource to debug any recent failures. The following command shows status messages with type `Invalid`: - -[source,terminal] ----- -$ oc -n openshift-metering get meteringconfig operator-metering -o=jsonpath='{.status.conditions[?(@.type=="Invalid")].message}' ----- -// $ oc -n openshift-metering get meteringconfig operator-metering -o json | jq '.status' - -[id="metering-checking-meteringconfig-events_{context}"] -=== Checking MeteringConfig Events -Check events that the Metering Operator is generating. This can be helpful during installation or upgrade to debug any resource failures. Sort events by the last timestamp: - -[source,terminal] ----- -$ oc -n openshift-metering get events --field-selector involvedObject.kind=MeteringConfig --sort-by='.lastTimestamp' ----- - -.Example output with latest changes in the MeteringConfig resources -[source,terminal] ----- -LAST SEEN TYPE REASON OBJECT MESSAGE -4m40s Normal Validating meteringconfig/operator-metering Validating the user-provided configuration -4m30s Normal Started meteringconfig/operator-metering Configuring storage for the metering-ansible-operator -4m26s Normal Started meteringconfig/operator-metering Configuring TLS for the metering-ansible-operator -3m58s Normal Started meteringconfig/operator-metering Configuring reporting for the metering-ansible-operator -3m53s Normal Reconciling meteringconfig/operator-metering Reconciling metering resources -3m47s Normal Reconciling meteringconfig/operator-metering Reconciling monitoring resources -3m41s Normal Reconciling meteringconfig/operator-metering Reconciling HDFS resources -3m23s Normal Reconciling meteringconfig/operator-metering Reconciling Hive resources -2m59s Normal Reconciling meteringconfig/operator-metering Reconciling Presto resources -2m35s Normal Reconciling meteringconfig/operator-metering Reconciling reporting-operator resources -2m14s Normal Reconciling meteringconfig/operator-metering Reconciling reporting resources ----- diff --git a/modules/metering-exposing-the-reporting-api.adoc b/modules/metering-exposing-the-reporting-api.adoc deleted file mode 100644 index 4ee62a5184af..000000000000 --- a/modules/metering-exposing-the-reporting-api.adoc +++ /dev/null @@ -1,159 +0,0 @@ -// Module included in the following assemblies: -// -// * metering/configuring_metering/metering-configure-reporting-operator.adoc - -[id="metering-exposing-the-reporting-api_{context}"] -= Exposing the reporting API - -On {product-title} the default metering installation automatically exposes a route, making the reporting API available. This provides the following features: - -* Automatic DNS -* Automatic TLS based on the cluster CA - -Also, the default installation makes it possible to use the {product-title} service for serving certificates to protect the reporting API with TLS. The {product-title} OAuth proxy is deployed as a sidecar container for the Reporting Operator, which protects the reporting API with authentication. - -[id="metering-openshift-authentication_{context}"] -== Using {product-title} Authentication - -By default, the reporting API is secured with TLS and authentication. This is done by configuring the Reporting Operator to deploy a pod containing both the Reporting Operator's container, and a sidecar container running {product-title} auth-proxy. - -To access the reporting API, the Metering Operator exposes a route. After that route has been installed, you can run the following command to get the route's hostname. - -[source,terminal] ----- -$ METERING_ROUTE_HOSTNAME=$(oc -n openshift-metering get routes metering -o json | jq -r '.status.ingress[].host') ----- - -Next, set up authentication using either a service account token or basic authentication with a username and password. - -[id="metering-authenticate-using-service-account_{context}"] -=== Authenticate using a service account token -With this method, you use the token in the Reporting Operator's service account, and pass that bearer token to the Authorization header in the following command: - -[source,terminal] ----- -$ TOKEN=$(oc -n openshift-metering serviceaccounts get-token reporting-operator) -curl -H "Authorization: Bearer $TOKEN" -k "https://$METERING_ROUTE_HOSTNAME/api/v1/reports/get?name=[Report Name]&namespace=openshift-metering&format=[Format]" ----- - -Be sure to replace the `name=[Report Name]` and `format=[Format]` parameters in the URL above. The `format` parameter can be json, csv, or tabular. - -[id="metering-authenticate-using-username-password_{context}"] -=== Authenticate using a username and password - -Metering supports configuring basic authentication using a username and password combination, which is specified in the contents of an htpasswd file. By default, a secret containing empty htpasswd data is created. You can, however, configure the `reporting-operator.spec.authProxy.htpasswd.data` and `reporting-operator.spec.authProxy.htpasswd.createSecret` keys to use this method. - -Once you have specified the above in your `MeteringConfig` resource, you can run the following command: - -[source,terminal] ----- -$ curl -u testuser:password123 -k "https://$METERING_ROUTE_HOSTNAME/api/v1/reports/get?name=[Report Name]&namespace=openshift-metering&format=[Format]" ----- - -Be sure to replace `testuser:password123` with a valid username and password combination. - -[id="metering-manually-configure-authentication_{context}"] -== Manually Configuring Authentication -To manually configure, or disable OAuth in the Reporting Operator, you must set `spec.tls.enabled: false` in your `MeteringConfig` resource. - -[WARNING] -==== -This also disables all TLS and authentication between the Reporting Operator, Presto, and Hive. You would need to manually configure these resources yourself. -==== - -Authentication can be enabled by configuring the following options. Enabling authentication configures the Reporting Operator pod to run the {product-title} auth-proxy as a sidecar container in the pod. This adjusts the ports so that the reporting API isn't exposed directly, but instead is proxied to via the auth-proxy sidecar container. - -* `reporting-operator.spec.authProxy.enabled` -* `reporting-operator.spec.authProxy.cookie.createSecret` -* `reporting-operator.spec.authProxy.cookie.seed` - -You need to set `reporting-operator.spec.authProxy.enabled` and `reporting-operator.spec.authProxy.cookie.createSecret` to `true` and `reporting-operator.spec.authProxy.cookie.seed` to a 32-character random string. - -You can generate a 32-character random string using the following command. - -[source,terminal] ----- -$ openssl rand -base64 32 | head -c32; echo. ----- - -[id="metering-token-authentication_{context}"] -=== Token authentication - -When the following options are set to `true`, authentication using a bearer token is enabled for the reporting REST API. Bearer tokens can come from service accounts or users. - -* `reporting-operator.spec.authProxy.subjectAccessReview.enabled` -* `reporting-operator.spec.authProxy.delegateURLs.enabled` - -When authentication is enabled, the Bearer token used to query the reporting API of the user or service account must be granted access using one of the following roles: - -* report-exporter -* reporting-admin -* reporting-viewer -* metering-admin -* metering-viewer - -The Metering Operator is capable of creating role bindings for you, granting these permissions by specifying a list of subjects in the `spec.permissions` section. For an example, see the following `advanced-auth.yaml` example configuration. - -[source,yaml] ----- -apiVersion: metering.openshift.io/v1 -kind: MeteringConfig -metadata: - name: "operator-metering" -spec: - permissions: - # anyone in the "metering-admins" group can create, update, delete, etc any - # metering.openshift.io resources in the namespace. - # This also grants permissions to get query report results from the reporting REST API. - meteringAdmins: - - kind: Group - name: metering-admins - # Same as above except read only access and for the metering-viewers group. - meteringViewers: - - kind: Group - name: metering-viewers - # the default serviceaccount in the namespace "my-custom-ns" can: - # create, update, delete, etc reports. - # This also gives permissions query the results from the reporting REST API. - reportingAdmins: - - kind: ServiceAccount - name: default - namespace: my-custom-ns - # anyone in the group reporting-readers can get, list, watch reports, and - # query report results from the reporting REST API. - reportingViewers: - - kind: Group - name: reporting-readers - # anyone in the group cluster-admins can query report results - # from the reporting REST API. So can the user bob-from-accounting. - reportExporters: - - kind: Group - name: cluster-admins - - kind: User - name: bob-from-accounting - - reporting-operator: - spec: - authProxy: - # htpasswd.data can contain htpasswd file contents for allowing auth - # using a static list of usernames and their password hashes. - # - # username is 'testuser' password is 'password123' - # generated htpasswdData using: `htpasswd -nb -s testuser password123` - # htpasswd: - # data: | - # testuser:{SHA}y/2sYAj5yrQIN4TL0YdPdmGNKpc= - # - # change REPLACEME to the output of your htpasswd command - htpasswd: - data: | - REPLACEME ----- - -Alternatively, you can use any role which has rules granting `get` permissions to `reports/export`. This means `get` access to the `export` sub-resource of the `Report` resources in the namespace of the Reporting Operator. For example: `admin` and `cluster-admin`. - -By default, the Reporting Operator and Metering Operator service accounts both have these permissions, and their tokens can be used for authentication. - -[id="metering-basic-authentication_{context}"] -=== Basic authentication with a username and password -For basic authentication you can supply a username and password in the `reporting-operator.spec.authProxy.htpasswd.data` field. The username and password must be the same format as those found in an htpasswd file. When set, you can use HTTP basic authentication to provide your username and password that has a corresponding entry in the `htpasswdData` contents. diff --git a/modules/metering-install-operator.adoc b/modules/metering-install-operator.adoc deleted file mode 100644 index 08b53a0edcdb..000000000000 --- a/modules/metering-install-operator.adoc +++ /dev/null @@ -1,132 +0,0 @@ -// Module included in the following assemblies: -// -// * metering/metering-installing-metering.adoc - -[id="metering-install-operator_{context}"] -= Installing the Metering Operator - -You can install metering by deploying the Metering Operator. The Metering Operator creates and manages the components of the metering stack. - -[NOTE] -==== -You cannot create a project starting with `openshift-` using the web console or by using the `oc new-project` command in the CLI. -==== - -[NOTE] -==== -If the Metering Operator is installed using a namespace other than `openshift-metering`, the metering reports are only viewable using the CLI. It is strongly suggested throughout the installation steps to use the `openshift-metering` namespace. -==== - -[id="metering-install-web-console_{context}"] -== Installing metering using the web console -You can use the {product-title} web console to install the Metering Operator. - -.Procedure - -. Create a namespace object YAML file for the Metering Operator with the `oc create -f <file-name>.yaml` command. You must use the CLI to create the namespace. For example, `metering-namespace.yaml`: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Namespace -metadata: - name: openshift-metering <1> - annotations: - openshift.io/node-selector: "" <2> - labels: - openshift.io/cluster-monitoring: "true" ----- -<1> It is strongly recommended to deploy metering in the `openshift-metering` namespace. -<2> Include this annotation before configuring specific node selectors for the operand pods. - -. In the {product-title} web console, click *Operators* -> *OperatorHub*. Filter for `metering` to find the Metering Operator. - -. Click the *Metering* card, review the package description, and then click *Install*. -. Select an *Update Channel*, *Installation Mode*, and *Approval Strategy*. -. Click *Install*. - -. Verify that the Metering Operator is installed by switching to the *Operators* -> *Installed Operators* page. The Metering Operator has a *Status* of *Succeeded* when the installation is complete. -+ -[NOTE] -==== -It might take several minutes for the Metering Operator to appear. -==== - -. Click *Metering* on the *Installed Operators* page for Operator *Details*. From the *Details* page you can create different resources related to metering. - -To complete the metering installation, create a `MeteringConfig` resource to configure metering and install the components of the metering stack. - -[id="metering-install-cli_{context}"] -== Installing metering using the CLI - -You can use the {product-title} CLI to install the Metering Operator. - -.Procedure - -. Create a `Namespace` object YAML file for the Metering Operator. You must use the CLI to create the namespace. For example, `metering-namespace.yaml`: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Namespace -metadata: - name: openshift-metering <1> - annotations: - openshift.io/node-selector: "" <2> - labels: - openshift.io/cluster-monitoring: "true" ----- -<1> It is strongly recommended to deploy metering in the `openshift-metering` namespace. -<2> Include this annotation before configuring specific node selectors for the operand pods. - -. Create the `Namespace` object: -+ -[source,terminal] ----- -$ oc create -f <file-name>.yaml ----- -+ -For example: -+ -[source,terminal] ----- -$ oc create -f openshift-metering.yaml ----- - -. Create the `OperatorGroup` object YAML file. For example, `metering-og`: -+ -[source,yaml] ----- -apiVersion: operators.coreos.com/v1 -kind: OperatorGroup -metadata: - name: openshift-metering <1> - namespace: openshift-metering <2> -spec: - targetNamespaces: - - openshift-metering ----- -<1> The name is arbitrary. -<2> Specify the `openshift-metering` namespace. - -. Create a `Subscription` object YAML file to subscribe a namespace to the Metering Operator. This object targets the most recently released version in the `redhat-operators` catalog source. For example, `metering-sub.yaml`: -+ -[source,yaml, subs="attributes+"] ----- -apiVersion: operators.coreos.com/v1alpha1 -kind: Subscription -metadata: - name: metering-ocp <1> - namespace: openshift-metering <2> -spec: - channel: "{product-version}" <3> - source: "redhat-operators" <4> - sourceNamespace: "openshift-marketplace" - name: "metering-ocp" - installPlanApproval: "Automatic" <5> ----- -<1> The name is arbitrary. -<2> You must specify the `openshift-metering` namespace. -<3> Specify {product-version} as the channel. -<4> Specify the `redhat-operators` catalog source, which contains the `metering-ocp` package manifests. If your {product-title} is installed on a restricted network, also known as a disconnected cluster, specify the name of the `CatalogSource` object you created when you configured the Operator LifeCycle Manager (OLM). -<5> Specify "Automatic" install plan approval. diff --git a/modules/metering-install-prerequisites.adoc b/modules/metering-install-prerequisites.adoc deleted file mode 100644 index 293f9f55b897..000000000000 --- a/modules/metering-install-prerequisites.adoc +++ /dev/null @@ -1,13 +0,0 @@ -// Module included in the following assemblies: -// -// * metering/metering-installing-metering.adoc - -[id="metering-install-prerequisites_{context}"] -= Prerequisites - -Metering requires the following components: - -* A `StorageClass` resource for dynamic volume provisioning. Metering supports a number of different storage solutions. -* 4GB memory and 4 CPU cores available cluster capacity and at least one node with 2 CPU cores and 2GB memory capacity available. -* The minimum resources needed for the largest single pod installed by metering are 2GB of memory and 2 CPU cores. -** Memory and CPU consumption may often be lower, but will spike when running reports, or collecting data for larger clusters. diff --git a/modules/metering-install-verify.adoc b/modules/metering-install-verify.adoc deleted file mode 100644 index 9b575dfa7962..000000000000 --- a/modules/metering-install-verify.adoc +++ /dev/null @@ -1,95 +0,0 @@ -// Module included in the following assemblies: -// -// * metering/metering-installing-metering.adc - -[id="metering-install-verify_{context}"] -= Verifying the metering installation - -You can verify the metering installation by performing any of the following checks: - -* Check the Metering Operator `ClusterServiceVersion` (CSV) resource for the metering version. This can be done through either the web console or CLI. -+ --- -.Procedure (UI) - . Navigate to *Operators* -> *Installed Operators* in the `openshift-metering` namespace. - . Click *Metering Operator*. - . Click *Subscription* for *Subscription Details*. - . Check the *Installed Version*. - -.Procedure (CLI) -* Check the Metering Operator CSV in the `openshift-metering` namespace: -+ -[source,terminal] ----- -$ oc --namespace openshift-metering get csv ----- -+ -.Example output -[source,terminal,subs="attributes+"] ----- -NAME DISPLAY VERSION REPLACES PHASE -elasticsearch-operator.{product-version}.0-202006231303.p0 OpenShift Elasticsearch Operator {product-version}.0-202006231303.p0 Succeeded -metering-operator.v{product-version}.0 Metering {product-version}.0 Succeeded ----- --- - -* Check that all required pods in the `openshift-metering` namespace are created. This can be done through either the web console or CLI. -+ --- -[NOTE] -==== -Many pods rely on other components to function before they themselves can be considered ready. Some pods may restart if other pods take too long to start. This is to be expected during the Metering Operator installation. -==== - -.Procedure (UI) -* Navigate to *Workloads* -> *Pods* in the metering namespace and verify that pods are being created. This can take several minutes after installing the metering stack. - -.Procedure (CLI) -* Check that all required pods in the `openshift-metering` namespace are created: -+ -[source,terminal] ----- -$ oc -n openshift-metering get pods ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -hive-metastore-0 2/2 Running 0 3m28s -hive-server-0 3/3 Running 0 3m28s -metering-operator-68dd64cfb6-2k7d9 2/2 Running 0 5m17s -presto-coordinator-0 2/2 Running 0 3m9s -reporting-operator-5588964bf8-x2tkn 2/2 Running 0 2m40s ----- --- - -* Verify that the `ReportDataSource` resources are beginning to import data, indicated by a valid timestamp in the `EARLIEST METRIC` column. This might take several minutes. Filter out the "-raw" `ReportDataSource` resources, which do not import data: -+ -[source,terminal] ----- -$ oc get reportdatasources -n openshift-metering | grep -v raw ----- -+ -.Example output -[source,terminal] ----- -NAME EARLIEST METRIC NEWEST METRIC IMPORT START IMPORT END LAST IMPORT TIME AGE -node-allocatable-cpu-cores 2019-08-05T16:52:00Z 2019-08-05T18:52:00Z 2019-08-05T16:52:00Z 2019-08-05T18:52:00Z 2019-08-05T18:54:45Z 9m50s -node-allocatable-memory-bytes 2019-08-05T16:51:00Z 2019-08-05T18:51:00Z 2019-08-05T16:51:00Z 2019-08-05T18:51:00Z 2019-08-05T18:54:45Z 9m50s -node-capacity-cpu-cores 2019-08-05T16:51:00Z 2019-08-05T18:29:00Z 2019-08-05T16:51:00Z 2019-08-05T18:29:00Z 2019-08-05T18:54:39Z 9m50s -node-capacity-memory-bytes 2019-08-05T16:52:00Z 2019-08-05T18:41:00Z 2019-08-05T16:52:00Z 2019-08-05T18:41:00Z 2019-08-05T18:54:44Z 9m50s -persistentvolumeclaim-capacity-bytes 2019-08-05T16:51:00Z 2019-08-05T18:29:00Z 2019-08-05T16:51:00Z 2019-08-05T18:29:00Z 2019-08-05T18:54:43Z 9m50s -persistentvolumeclaim-phase 2019-08-05T16:51:00Z 2019-08-05T18:29:00Z 2019-08-05T16:51:00Z 2019-08-05T18:29:00Z 2019-08-05T18:54:28Z 9m50s -persistentvolumeclaim-request-bytes 2019-08-05T16:52:00Z 2019-08-05T18:30:00Z 2019-08-05T16:52:00Z 2019-08-05T18:30:00Z 2019-08-05T18:54:34Z 9m50s -persistentvolumeclaim-usage-bytes 2019-08-05T16:52:00Z 2019-08-05T18:30:00Z 2019-08-05T16:52:00Z 2019-08-05T18:30:00Z 2019-08-05T18:54:36Z 9m49s -pod-limit-cpu-cores 2019-08-05T16:52:00Z 2019-08-05T18:30:00Z 2019-08-05T16:52:00Z 2019-08-05T18:30:00Z 2019-08-05T18:54:26Z 9m49s -pod-limit-memory-bytes 2019-08-05T16:51:00Z 2019-08-05T18:40:00Z 2019-08-05T16:51:00Z 2019-08-05T18:40:00Z 2019-08-05T18:54:30Z 9m49s -pod-persistentvolumeclaim-request-info 2019-08-05T16:51:00Z 2019-08-05T18:40:00Z 2019-08-05T16:51:00Z 2019-08-05T18:40:00Z 2019-08-05T18:54:37Z 9m49s -pod-request-cpu-cores 2019-08-05T16:51:00Z 2019-08-05T18:18:00Z 2019-08-05T16:51:00Z 2019-08-05T18:18:00Z 2019-08-05T18:54:24Z 9m49s -pod-request-memory-bytes 2019-08-05T16:52:00Z 2019-08-05T18:08:00Z 2019-08-05T16:52:00Z 2019-08-05T18:08:00Z 2019-08-05T18:54:32Z 9m49s -pod-usage-cpu-cores 2019-08-05T16:52:00Z 2019-08-05T17:57:00Z 2019-08-05T16:52:00Z 2019-08-05T17:57:00Z 2019-08-05T18:54:10Z 9m49s -pod-usage-memory-bytes 2019-08-05T16:52:00Z 2019-08-05T18:08:00Z 2019-08-05T16:52:00Z 2019-08-05T18:08:00Z 2019-08-05T18:54:20Z 9m49s ----- - -After all pods are ready and you have verified that data is being imported, you can begin using metering to collect data and report on your cluster. diff --git a/modules/metering-overview.adoc b/modules/metering-overview.adoc deleted file mode 100644 index abb20ed8c452..000000000000 --- a/modules/metering-overview.adoc +++ /dev/null @@ -1,33 +0,0 @@ -// Module included in the following assemblies: -// -// * metering/metering-installing-metering.adoc -// * metering/metering-using-metering.adoc - -[id="metering-overview_{context}"] -= Metering overview - -Metering is a general purpose data analysis tool that enables you to write reports to process data from different data sources. As a cluster administrator, you can use metering to analyze what is happening in your cluster. You can either write your own, or use predefined SQL queries to define how you want to process data from the different data sources you have available. - -Metering focuses primarily on in-cluster metric data using Prometheus as a default data source, enabling users of metering to do reporting on pods, namespaces, and most other Kubernetes resources. - -You can install metering on {product-title} 4.x clusters and above. - -[id="metering-resources_{context}"] -== Metering resources - -Metering has many resources which can be used to manage the deployment and installation of metering, as well as the reporting functionality metering provides. - -Metering is managed using the following custom resource definitions (CRDs): - -[cols="1,7"] -|=== - -|*MeteringConfig* |Configures the metering stack for deployment. Contains customizations and configuration options to control each component that makes up the metering stack. - -|*Report* |Controls what query to use, when, and how often the query should be run, and where to store the results. - -|*ReportQuery* |Contains the SQL queries used to perform analysis on the data contained within `ReportDataSource` resources. - -|*ReportDataSource* |Controls the data available to `ReportQuery` and `Report` resources. Allows configuring access to different databases for use within metering. - -|=== diff --git a/modules/metering-prometheus-connection.adoc b/modules/metering-prometheus-connection.adoc deleted file mode 100644 index abc94d6a433e..000000000000 --- a/modules/metering-prometheus-connection.adoc +++ /dev/null @@ -1,54 +0,0 @@ -// Module included in the following assemblies: -// -// * metering/configuring_metering/metering-configure-reporting-operator.adoc - -[id="metering-prometheus-connection_{context}"] -= Securing a Prometheus connection - -When you install metering on {product-title}, Prometheus is available at https://prometheus-k8s.openshift-monitoring.svc:9091/. - -To secure the connection to Prometheus, the default metering installation uses the {product-title} certificate authority (CA). If your Prometheus instance uses a different CA, you can inject the CA through a config map. You can also configure the Reporting Operator to use a specified bearer token to authenticate with Prometheus. - -.Procedure - -* Inject the CA that your Prometheus instance uses through a config map. For example: -+ -[source,yaml] ----- -spec: - reporting-operator: - spec: - config: - prometheus: - certificateAuthority: - useServiceAccountCA: false - configMap: - enabled: true - create: true - name: reporting-operator-certificate-authority-config - filename: "internal-ca.crt" - value: | - -----BEGIN CERTIFICATE----- - (snip) - -----END CERTIFICATE----- ----- -+ -Alternatively, to use the system certificate authorities for publicly valid certificates, set both `useServiceAccountCA` and `configMap.enabled` to `false`. - -* Specify a bearer token to authenticate with Prometheus. For example: - -[source,yaml] ----- -spec: - reporting-operator: - spec: - config: - prometheus: - metricsImporter: - auth: - useServiceAccountToken: false - tokenSecret: - enabled: true - create: true - value: "abc-123" ----- diff --git a/modules/metering-reports.adoc b/modules/metering-reports.adoc deleted file mode 100644 index e9cb4025d9e7..000000000000 --- a/modules/metering-reports.adoc +++ /dev/null @@ -1,381 +0,0 @@ -// Module included in the following assemblies: -// -// * metering/metering-about-reports.adoc -[id="metering-reports_{context}"] -= Reports - -The `Report` custom resource is used to manage the execution and status of reports. Metering produces reports derived from usage data sources, which can be used in further analysis and filtering. A single `Report` resource represents a job that manages a database table and updates it with new information according to a schedule. The report exposes the data in that table via the Reporting Operator HTTP API. - -Reports with a `spec.schedule` field set are always running and track what time periods it has collected data for. This ensures that if metering is shutdown or unavailable for an extended period of time, it backfills the data starting where it left off. If the schedule is unset, then the report runs once for the time specified by the `reportingStart` and `reportingEnd`. By default, reports wait for `ReportDataSource` resources to have fully imported any data covered in the reporting period. If the report has a schedule, it waits to run until the data in the period currently being processed has finished importing. - -[id="metering-example-report-with-schedule_{context}"] -== Example report with a schedule - -The following example `Report` object contains information on every pod's CPU requests, and runs every hour, adding the last hours worth of data each time it runs. - -[source,yaml] ----- -apiVersion: metering.openshift.io/v1 -kind: Report -metadata: - name: pod-cpu-request-hourly -spec: - query: "pod-cpu-request" - reportingStart: "2021-07-01T00:00:00Z" - schedule: - period: "hourly" - hourly: - minute: 0 - second: 0 ----- - -[id="metering-example-report-without-schedule_{context}"] -== Example report without a schedule (run-once) - -The following example `Report` object contains information on every pod's CPU requests for all of July. After completion, it does not run again. - -[source,yaml] ----- -apiVersion: metering.openshift.io/v1 -kind: Report -metadata: - name: pod-cpu-request-hourly -spec: - query: "pod-cpu-request" - reportingStart: "2021-07-01T00:00:00Z" - reportingEnd: "2021-07-31T00:00:00Z" ----- - -[id="metering-query_{context}"] -== query - -The `query` field names the `ReportQuery` resource used to generate the report. The report query controls the schema of the report as well as how the results are processed. - -*`query` is a required field.* - -Use the following command to list available `ReportQuery` resources: - -[source,terminal] ----- -$ oc -n openshift-metering get reportqueries ----- - -.Example output -[source,terminal] ----- -NAME AGE -cluster-cpu-capacity 23m -cluster-cpu-capacity-raw 23m -cluster-cpu-usage 23m -cluster-cpu-usage-raw 23m -cluster-cpu-utilization 23m -cluster-memory-capacity 23m -cluster-memory-capacity-raw 23m -cluster-memory-usage 23m -cluster-memory-usage-raw 23m -cluster-memory-utilization 23m -cluster-persistentvolumeclaim-request 23m -namespace-cpu-request 23m -namespace-cpu-usage 23m -namespace-cpu-utilization 23m -namespace-memory-request 23m -namespace-memory-usage 23m -namespace-memory-utilization 23m -namespace-persistentvolumeclaim-request 23m -namespace-persistentvolumeclaim-usage 23m -node-cpu-allocatable 23m -node-cpu-allocatable-raw 23m -node-cpu-capacity 23m -node-cpu-capacity-raw 23m -node-cpu-utilization 23m -node-memory-allocatable 23m -node-memory-allocatable-raw 23m -node-memory-capacity 23m -node-memory-capacity-raw 23m -node-memory-utilization 23m -persistentvolumeclaim-capacity 23m -persistentvolumeclaim-capacity-raw 23m -persistentvolumeclaim-phase-raw 23m -persistentvolumeclaim-request 23m -persistentvolumeclaim-request-raw 23m -persistentvolumeclaim-usage 23m -persistentvolumeclaim-usage-raw 23m -persistentvolumeclaim-usage-with-phase-raw 23m -pod-cpu-request 23m -pod-cpu-request-raw 23m -pod-cpu-usage 23m -pod-cpu-usage-raw 23m -pod-memory-request 23m -pod-memory-request-raw 23m -pod-memory-usage 23m -pod-memory-usage-raw 23m ----- - -Report queries with the `-raw` suffix are used by other `ReportQuery` resources to build more complex queries, and should not be used directly for reports. - -`namespace-` prefixed queries aggregate pod CPU and memory requests by namespace, providing a list of namespaces and their overall usage based on resource requests. - -`pod-` prefixed queries are similar to `namespace-` prefixed queries but aggregate information by pod rather than namespace. These queries include the pod's namespace and node. - -`node-` prefixed queries return information about each node's total available resources. - -`aws-` prefixed queries are specific to AWS. Queries suffixed with `-aws` return the same data as queries of the same name without the suffix, and correlate usage with the EC2 billing data. - -The `aws-ec2-billing-data` report is used by other queries, and should not be used as a standalone report. The `aws-ec2-cluster-cost` report provides a total cost based on the nodes included in the cluster, and the sum of their costs for the time period being reported on. - -Use the following command to get the `ReportQuery` resource as YAML, and check the `spec.columns` field. For example, run: - -[source,terminal] ----- -$ oc -n openshift-metering get reportqueries namespace-memory-request -o yaml ----- - -.Example output -[source,yaml] ----- -apiVersion: metering.openshift.io/v1 -kind: ReportQuery -metadata: - name: namespace-memory-request - labels: - operator-metering: "true" -spec: - columns: - - name: period_start - type: timestamp - unit: date - - name: period_end - type: timestamp - unit: date - - name: namespace - type: varchar - unit: kubernetes_namespace - - name: pod_request_memory_byte_seconds - type: double - unit: byte_seconds ----- - -[id="metering-schedule_{context}"] -== schedule - -The `spec.schedule` configuration block defines when the report runs. The main fields in the `schedule` section are `period`, and then depending on the value of `period`, the fields `hourly`, `daily`, `weekly`, and `monthly` allow you to fine-tune when the report runs. - -For example, if `period` is set to `weekly`, you can add a `weekly` field to the `spec.schedule` block. The following example will run once a week on Wednesday, at 1 PM (hour 13 in the day). - -[source,yaml] ----- -... - schedule: - period: "weekly" - weekly: - dayOfWeek: "wednesday" - hour: 13 -... ----- - -[id="metering-period_{context}"] -=== period - -Valid values of `schedule.period` are listed below, and the options available to set for a given period are also listed. - -* `hourly` -** `minute` -** `second` -* `daily` -** `hour` -** `minute` -** `second` -* `weekly` -** `dayOfWeek` -** `hour` -** `minute` -** `second` -* `monthly` -** `dayOfMonth` -** `hour` -** `minute` -** `second` -* `cron` -** `expression` - -Generally, the `hour`, `minute`, `second` fields control when in the day the report runs, and `dayOfWeek`/`dayOfMonth` control what day of the week, or day of month the report runs on, if it is a weekly or monthly report period. - -For each of these fields, there is a range of valid values: - -* `hour` is an integer value between 0-23. -* `minute` is an integer value between 0-59. -* `second` is an integer value between 0-59. -* `dayOfWeek` is a string value that expects the day of the week (spelled out). -* `dayOfMonth` is an integer value between 1-31. - -For cron periods, normal cron expressions are valid: - -* `expression: "*/5 * * * *"` - -[id="metering-reportingStart_{context}"] -== reportingStart - -To support running a report against existing data, you can set the `spec.reportingStart` field to a link:https://tools.ietf.org/html/rfc3339#section-5.8[RFC3339 timestamp] to tell the report to run according to its `schedule` starting from `reportingStart` rather than the current time. - -[NOTE] -==== -Setting the `spec.reportingStart` field to a specific time will result in the Reporting Operator running many queries in succession for each interval in the schedule that is between the `reportingStart` time and the current time. This could be thousands of queries if the period is less than daily and the `reportingStart` is more than a few months back. If `reportingStart` is left unset, the report will run at the next full `reportingPeriod` after the time the report is created. -==== - -As an example of how to use this field, if you had data already collected dating back to January 1st, 2019 that you want to include in your `Report` object, you can create a report with the following values: - -[source,yaml] ----- -apiVersion: metering.openshift.io/v1 -kind: Report -metadata: - name: pod-cpu-request-hourly -spec: - query: "pod-cpu-request" - schedule: - period: "hourly" - reportingStart: "2021-01-01T00:00:00Z" ----- - -[id="metering-reportingEnd_{context}"] -== reportingEnd - -To configure a report to only run until a specified time, you can set the `spec.reportingEnd` field to an link:https://tools.ietf.org/html/rfc3339#section-5.8[RFC3339 timestamp]. The value of this field will cause the report to stop running on its schedule after it has finished generating reporting data for the period covered from its start time until `reportingEnd`. - -Because a schedule will most likely not align with the `reportingEnd`, the last period in the schedule will be shortened to end at the specified `reportingEnd` time. If left unset, then the report will run forever, or until a `reportingEnd` is set on the report. - -For example, if you want to create a report that runs once a week for the month of July: - -[source,yaml] ----- -apiVersion: metering.openshift.io/v1 -kind: Report -metadata: - name: pod-cpu-request-hourly -spec: - query: "pod-cpu-request" - schedule: - period: "weekly" - reportingStart: "2021-07-01T00:00:00Z" - reportingEnd: "2021-07-31T00:00:00Z" ----- - -[id="metering-expiration_{context}"] -== expiration - -Add the `expiration` field to set a retention period on a scheduled metering report. You can avoid manually removing the report by setting the `expiration` duration value. The retention period is equal to the `Report` object `creationDate` plus the `expiration` duration. The report is removed from the cluster at the end of the retention period if no other reports or report queries depend on the expiring report. Deleting the report from the cluster can take several minutes. - -[NOTE] -==== -Setting the `expiration` field is not recommended for roll-up or aggregated reports. If a report is depended upon by other reports or report queries, then the report is not removed at the end of the retention period. You can view the `report-operator` logs at debug level for the timing output around a report retention decision. -==== - -For example, the following scheduled report is deleted 30 minutes after the `creationDate` of the report: - -[source,yaml] ----- -apiVersion: metering.openshift.io/v1 -kind: Report -metadata: - name: pod-cpu-request-hourly -spec: - query: "pod-cpu-request" - schedule: - period: "weekly" - reportingStart: "2021-07-01T00:00:00Z" - expiration: "30m" <1> ----- -<1> Valid time units for the `expiration` duration are `ns`, `us` (or `µs`), `ms`, `s`, `m`, and `h`. - -[NOTE] -==== -The `expiration` retention period for a `Report` object is not precise and works on the order of several minutes, not nanoseconds. -==== - -[id="metering-runImmediately_{context}"] -== runImmediately - -When `runImmediately` is set to `true`, the report runs immediately. This behavior ensures that the report is immediately processed and queued without requiring additional scheduling parameters. - -[NOTE] -==== -When `runImmediately` is set to `true`, you must set a `reportingEnd` and `reportingStart` value. -==== - -[id="metering-inputs_{context}"] -== inputs - -The `spec.inputs` field of a `Report` object can be used to override or set values defined in a `ReportQuery` resource's `spec.inputs` field. - -`spec.inputs` is a list of name-value pairs: - -[source,yaml] ----- -spec: - inputs: - - name: "NamespaceCPUUsageReportName" <1> - value: "namespace-cpu-usage-hourly" <2> ----- - -<1> The `name` of an input must exist in the ReportQuery's `inputs` list. -<2> The `value` of the input must be the correct type for the input's `type`. - -// TODO(chance): include modules/metering-reportquery-inputs.adoc module - -[id="metering-roll-up-reports_{context}"] -== Roll-up reports - -Report data is stored in the database much like metrics themselves, and therefore, can be used in aggregated or roll-up reports. A simple use case for a roll-up report is to spread the time required to produce a report over a longer period of time. This is instead of requiring a monthly report to query and add all data over an entire month. For example, the task can be split into daily reports that each run over 1/30 of the data. - -A custom roll-up report requires a custom report query. The `ReportQuery` resource template processor provides a `reportTableName` function that can get the necessary table name from a `Report` object's `metadata.name`. - -Below is a snippet taken from a built-in query: - -.pod-cpu.yaml -[source,yaml] ----- -spec: -... - inputs: - - name: ReportingStart - type: time - - name: ReportingEnd - type: time - - name: NamespaceCPUUsageReportName - type: Report - - name: PodCpuUsageRawDataSourceName - type: ReportDataSource - default: pod-cpu-usage-raw -... - - query: | -... - {|- if .Report.Inputs.NamespaceCPUUsageReportName |} - namespace, - sum(pod_usage_cpu_core_seconds) as pod_usage_cpu_core_seconds - FROM {| .Report.Inputs.NamespaceCPUUsageReportName | reportTableName |} -... ----- - -.Example `aggregated-report.yaml` roll-up report -[source,yaml] ----- -spec: - query: "namespace-cpu-usage" - inputs: - - name: "NamespaceCPUUsageReportName" - value: "namespace-cpu-usage-hourly" ----- - -// TODO(chance): replace the comment below with an include on the modules/metering-rollup-report.adoc -// For more information on setting up a roll-up report, see the [roll-up report guide](rollup-reports.md). - -[id="metering-report-status_{context}"] -=== Report status - -The execution of a scheduled report can be tracked using its status field. Any errors occurring during the preparation of a report will be recorded here. - -The `status` field of a `Report` object currently has two fields: - -* `conditions`: Conditions is a list of conditions, each of which have a `type`, `status`, `reason`, and `message` field. Possible values of a condition's `type` field are `Running` and `Failure`, indicating the current state of the scheduled report. The `reason` indicates why its `condition` is in its current state with the `status` being either `true`, `false` or, `unknown`. The `message` provides a human readable indicating why the condition is in the current state. For detailed information on the `reason` values, see link:https://github.com/operator-framework/operator-metering/blob/master/pkg/apis/metering/v1/util/report_util.go#L10[`pkg/apis/metering/v1/util/report_util.go`]. -* `lastReportTime`: Indicates the time metering has collected data up to. diff --git a/modules/metering-store-data-in-azure.adoc b/modules/metering-store-data-in-azure.adoc deleted file mode 100644 index 6913ed279252..000000000000 --- a/modules/metering-store-data-in-azure.adoc +++ /dev/null @@ -1,56 +0,0 @@ -// Module included in the following assemblies: -// -// * metering/configuring_metering/metering-configure-persistent-storage.adoc - -[id="metering-store-data-in-azure_{context}"] -= Storing data in Microsoft Azure - -To store data in Azure blob storage, you must use an existing container. - -.Procedure - -. Edit the `spec.storage` section in the `azure-blob-storage.yaml` file: -+ -.Example `azure-blob-storage.yaml` file -[source,yaml] ----- -apiVersion: metering.openshift.io/v1 -kind: MeteringConfig -metadata: - name: "operator-metering" -spec: - storage: - type: "hive" - hive: - type: "azure" - azure: - container: "bucket1" <1> - secretName: "my-azure-secret" <2> - rootDirectory: "/testDir" <3> ----- -<1> Specify the container name. -<2> Specify a secret in the metering namespace. See the example `Secret` object below for more details. -<3> Optional: Specify the directory where you would like to store your data. - -. Use the following `Secret` object as a template: -+ -.Example Azure `Secret` object -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: my-azure-secret -data: - azure-storage-account-name: "dGVzdAo=" - azure-secret-access-key: "c2VjcmV0Cg==" ----- - -. Create the secret: -+ -[source,terminal] ----- -$ oc create secret -n openshift-metering generic my-azure-secret \ - --from-literal=azure-storage-account-name=my-storage-account-name \ - --from-literal=azure-secret-access-key=my-secret-key ----- diff --git a/modules/metering-store-data-in-gcp.adoc b/modules/metering-store-data-in-gcp.adoc deleted file mode 100644 index f9233c409d4b..000000000000 --- a/modules/metering-store-data-in-gcp.adoc +++ /dev/null @@ -1,52 +0,0 @@ -// Module included in the following assemblies: -// -// * metering/configuring_metering/metering-configure-persistent-storage.adoc - -[id="metering-store-data-in-gcp_{context}"] -= Storing data in Google Cloud Storage - -To store your data in Google Cloud Storage, you must use an existing bucket. - -.Procedure - -. Edit the `spec.storage` section in the `gcs-storage.yaml` file: -+ -.Example `gcs-storage.yaml` file -[source,yaml] ----- -apiVersion: metering.openshift.io/v1 -kind: MeteringConfig -metadata: - name: "operator-metering" -spec: - storage: - type: "hive" - hive: - type: "gcs" - gcs: - bucket: "metering-gcs/test1" <1> - secretName: "my-gcs-secret" <2> ----- -<1> Specify the name of the bucket. You can optionally specify the directory within the bucket where you would like to store your data. -<2> Specify a secret in the metering namespace. See the example `Secret` object below for more details. - -. Use the following `Secret` object as a template: -+ -.Example Google Cloud Storage `Secret` object -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: my-gcs-secret -data: - gcs-service-account.json: "c2VjcmV0Cg==" ----- - -. Create the secret: -+ -[source,terminal] ----- -$ oc create secret -n openshift-metering generic my-gcs-secret \ - --from-file gcs-service-account.json=/path/to/my/service-account-key.json ----- diff --git a/modules/metering-store-data-in-s3-compatible.adoc b/modules/metering-store-data-in-s3-compatible.adoc deleted file mode 100644 index 8dfb1c5acacb..000000000000 --- a/modules/metering-store-data-in-s3-compatible.adoc +++ /dev/null @@ -1,47 +0,0 @@ -// Module included in the following assemblies: -// -// * metering/configuring_metering/metering-configure-persistent-storage.adoc - -[id="metering-store-data-in-s3-compatible_{context}"] -= Storing data in S3-compatible storage - -You can use S3-compatible storage such as Noobaa. - -.Procedure - -. Edit the `spec.storage` section in the `s3-compatible-storage.yaml` file: -+ -.Example `s3-compatible-storage.yaml` file -[source,yaml] ----- -apiVersion: metering.openshift.io/v1 -kind: MeteringConfig -metadata: - name: "operator-metering" -spec: - storage: - type: "hive" - hive: - type: "s3Compatible" - s3Compatible: - bucket: "bucketname" <1> - endpoint: "http://example:port-number" <2> - secretName: "my-aws-secret" <3> ----- -<1> Specify the name of your S3-compatible bucket. -<2> Specify the endpoint for your storage. -<3> The name of a secret in the metering namespace containing the AWS credentials in the `data.aws-access-key-id` and `data.aws-secret-access-key` fields. See the example `Secret` object below for more details. - -. Use the following `Secret` object as a template: -+ -.Example S3-compatible `Secret` object -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: my-aws-secret -data: - aws-access-key-id: "dGVzdAo=" - aws-secret-access-key: "c2VjcmV0Cg==" ----- diff --git a/modules/metering-store-data-in-s3.adoc b/modules/metering-store-data-in-s3.adoc deleted file mode 100644 index 917ff73d2270..000000000000 --- a/modules/metering-store-data-in-s3.adoc +++ /dev/null @@ -1,135 +0,0 @@ -// Module included in the following assemblies: -// -// * metering/configuring_metering/metering-configure-persistent-storage.adoc - -[id="metering-store-data-in-s3_{context}"] -= Storing data in Amazon S3 - -Metering can use an existing Amazon S3 bucket or create a bucket for storage. - -[NOTE] -==== -Metering does not manage or delete any S3 bucket data. You must manually clean up S3 buckets that are used to store metering data. -==== - -.Procedure - -. Edit the `spec.storage` section in the `s3-storage.yaml` file: -+ -.Example `s3-storage.yaml` file -[source,yaml] ----- -apiVersion: metering.openshift.io/v1 -kind: MeteringConfig -metadata: - name: "operator-metering" -spec: - storage: - type: "hive" - hive: - type: "s3" - s3: - bucket: "bucketname/path/" <1> - region: "us-west-1" <2> - secretName: "my-aws-secret" <3> - # Set to false if you want to provide an existing bucket, instead of - # having metering create the bucket on your behalf. - createBucket: true <4> ----- -<1> Specify the name of the bucket where you would like to store your data. Optional: Specify the path within the bucket. -<2> Specify the region of your bucket. -<3> The name of a secret in the metering namespace containing the AWS credentials in the `data.aws-access-key-id` and `data.aws-secret-access-key` fields. See the example `Secret` object below for more details. -<4> Set this field to `false` if you want to provide an existing S3 bucket, or if you do not want to provide IAM credentials that have `CreateBucket` permissions. - -. Use the following `Secret` object as a template: -+ -.Example AWS `Secret` object -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: my-aws-secret -data: - aws-access-key-id: "dGVzdAo=" - aws-secret-access-key: "c2VjcmV0Cg==" ----- -+ -[NOTE] -==== -The values of the `aws-access-key-id` and `aws-secret-access-key` must be base64 encoded. -==== - -. Create the secret: -+ -[source,terminal] ----- -$ oc create secret -n openshift-metering generic my-aws-secret \ - --from-literal=aws-access-key-id=my-access-key \ - --from-literal=aws-secret-access-key=my-secret-key ----- -+ -[NOTE] -==== -This command automatically base64 encodes your `aws-access-key-id` and `aws-secret-access-key` values. -==== - -The `aws-access-key-id` and `aws-secret-access-key` credentials must have read and write access to the bucket. The following `aws/read-write.json` file shows an IAM policy that grants the required permissions: - -.Example `aws/read-write.json` file -[source,json] ----- -{ - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "1", - "Effect": "Allow", - "Action": [ - "s3:AbortMultipartUpload", - "s3:DeleteObject", - "s3:GetObject", - "s3:HeadBucket", - "s3:ListBucket", - "s3:ListMultipartUploadParts", - "s3:PutObject" - ], - "Resource": [ - "arn:aws:s3:::operator-metering-data/*", - "arn:aws:s3:::operator-metering-data" - ] - } - ] -} ----- - -If `spec.storage.hive.s3.createBucket` is set to `true` or unset in your `s3-storage.yaml` file, then you should use the `aws/read-write-create.json` file that contains permissions for creating and deleting buckets: - -.Example `aws/read-write-create.json` file -[source,json] ----- -{ - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "1", - "Effect": "Allow", - "Action": [ - "s3:AbortMultipartUpload", - "s3:DeleteObject", - "s3:GetObject", - "s3:HeadBucket", - "s3:ListBucket", - "s3:CreateBucket", - "s3:DeleteBucket", - "s3:ListMultipartUploadParts", - "s3:PutObject" - ], - "Resource": [ - "arn:aws:s3:::operator-metering-data/*", - "arn:aws:s3:::operator-metering-data" - ] - } - ] -} ----- diff --git a/modules/metering-store-data-in-shared-volumes.adoc b/modules/metering-store-data-in-shared-volumes.adoc deleted file mode 100644 index e020ef33d4aa..000000000000 --- a/modules/metering-store-data-in-shared-volumes.adoc +++ /dev/null @@ -1,149 +0,0 @@ -// Module included in the following assemblies: -// -// * metering/configuring_metering/metering-configure-persistent-storage.adoc - -[id="metering-store-data-in-shared-volumes_{context}"] -= Storing data in shared volumes - -Metering does not configure storage by default. However, you can use any ReadWriteMany persistent volume (PV) or any storage class that provisions a ReadWriteMany PV for metering storage. - -[NOTE] -==== -NFS is not recommended to use in production. Using an NFS server on RHEL as a storage back end can fail to meet metering requirements and to provide the performance that is needed for the Metering Operator to work appropriately. - -Other NFS implementations on the marketplace might not have these issues, such as a Parallel Network File System (pNFS). pNFS is an NFS implementation with distributed and parallel capability. Contact the individual NFS implementation vendor for more information on any testing that was possibly completed against {product-title} core components. -==== - -.Procedure - -. Modify the `shared-storage.yaml` file to use a ReadWriteMany persistent volume for storage: -+ -.Example `shared-storage.yaml` file --- -[source,yaml] ----- -apiVersion: metering.openshift.io/v1 -kind: MeteringConfig -metadata: - name: "operator-metering" -spec: - storage: - type: "hive" - hive: - type: "sharedPVC" - sharedPVC: - claimName: "metering-nfs" <1> - # Uncomment the lines below to provision a new PVC using the specified storageClass. <2> - # createPVC: true - # storageClass: "my-nfs-storage-class" - # size: 5Gi ----- - -Select one of the configuration options below: - -<1> Set `storage.hive.sharedPVC.claimName` to the name of an existing ReadWriteMany persistent volume claim (PVC). This configuration is necessary if you do not have dynamic volume provisioning or want to have more control over how the persistent volume is created. - -<2> Set `storage.hive.sharedPVC.createPVC` to `true` and set the `storage.hive.sharedPVC.storageClass` to the name of a storage class with ReadWriteMany access mode. This configuration uses dynamic volume provisioning to create a volume automatically. --- - -. Create the following resource objects that are required to deploy an NFS server for metering. Use the `oc create -f <file-name>.yaml` command to create the object YAML files. - -.. Configure a `PersistentVolume` resource object: -+ -.Example `nfs_persistentvolume.yaml` file -[source,yaml] ----- -apiVersion: v1 -kind: PersistentVolume -metadata: - name: nfs - labels: - role: nfs-server -spec: - capacity: - storage: 5Gi - accessModes: - - ReadWriteMany - storageClassName: nfs-server <1> - nfs: - path: "/" - server: REPLACEME - persistentVolumeReclaimPolicy: Delete ----- -<1> Must exactly match the `[kind: StorageClass].metadata.name` field value. - -.. Configure a `Pod` resource object with the `nfs-server` role: -+ -.Example `nfs_server.yaml` file -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: nfs-server - labels: - role: nfs-server -spec: - containers: - - name: nfs-server - image: <image_name> <1> - imagePullPolicy: IfNotPresent - ports: - - name: nfs - containerPort: 2049 - securityContext: - privileged: true - volumeMounts: - - mountPath: "/mnt/data" - name: local - volumes: - - name: local - emptyDir: {} ----- -<1> Install your NFS server image. - -.. Configure a `Service` resource object with the `nfs-server` role: -+ -.Example `nfs_service.yaml` file -[source,yaml] ----- -apiVersion: v1 -kind: Service -metadata: - name: nfs-service - labels: - role: nfs-server -spec: - ports: - - name: 2049-tcp - port: 2049 - protocol: TCP - targetPort: 2049 - selector: - role: nfs-server - sessionAffinity: None - type: ClusterIP ----- - -.. Configure a `StorageClass` resource object: -+ -.Example `nfs_storageclass.yaml` file -[source,yaml] ----- -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: nfs-server <1> -provisioner: example.com/nfs -parameters: - archiveOnDelete: "false" -reclaimPolicy: Delete -volumeBindingMode: Immediate ----- -<1> Must exactly match the `[kind: PersistentVolume].spec.storageClassName` field value. - - -[WARNING] -==== -Configuration of your NFS storage, and any relevant resource objects, will vary depending on the NFS server image that you use for metering storage. -==== diff --git a/modules/metering-troubleshooting.adoc b/modules/metering-troubleshooting.adoc deleted file mode 100644 index 79c25c249754..000000000000 --- a/modules/metering-troubleshooting.adoc +++ /dev/null @@ -1,194 +0,0 @@ -// Module included in the following assemblies: -// -// * metering/metering-troubleshooting-debugging.adoc - -[id="metering-troubleshooting_{context}"] -= Troubleshooting metering - -A common issue with metering is pods failing to start. Pods might fail to start due to lack of resources or if they have a dependency on a resource that does not exist, such as a `StorageClass` or `Secret` resource. - -[id="metering-not-enough-compute-resources_{context}"] -== Not enough compute resources - -A common issue when installing or running metering is a lack of compute resources. As the cluster grows and more reports are created, the Reporting Operator pod requires more memory. If memory usage reaches the pod limit, the cluster considers the pod out of memory (OOM) and terminates it with an `OOMKilled` status. Ensure that metering is allocated the minimum resource requirements described in the installation prerequisites. - -[NOTE] -==== -The Metering Operator does not autoscale the Reporting Operator based on the load in the cluster. Therefore, CPU usage for the Reporting Operator pod does not increase as the cluster grows. -==== - -To determine if the issue is with resources or scheduling, follow the troubleshooting instructions included in the Kubernetes document https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#troubleshooting[Managing Compute Resources for Containers]. - -To troubleshoot issues due to a lack of compute resources, check the following within the `openshift-metering` namespace. - -.Prerequisites - -* You are currently in the `openshift-metering` namespace. Change to the `openshift-metering` namespace by running: -+ -[source,terminal] ----- -$ oc project openshift-metering ----- - -.Procedure - -. Check for metering `Report` resources that fail to complete and show the status of `ReportingPeriodUnmetDependencies`: -+ -[source,terminal] ----- -$ oc get reports ----- -+ -.Example output -[source,terminal] ----- -NAME QUERY SCHEDULE RUNNING FAILED LAST REPORT TIME AGE -namespace-cpu-utilization-adhoc-10 namespace-cpu-utilization Finished 2020-10-31T00:00:00Z 2m38s -namespace-cpu-utilization-adhoc-11 namespace-cpu-utilization ReportingPeriodUnmetDependencies 2m23s -namespace-memory-utilization-202010 namespace-memory-utilization ReportingPeriodUnmetDependencies 26s -namespace-memory-utilization-202011 namespace-memory-utilization ReportingPeriodUnmetDependencies 14s ----- - -. Check the `ReportDataSource` resources where the `NEWEST METRIC` is less than the report end date: -+ -[source,terminal] ----- -$ oc get reportdatasource ----- -+ -.Example output -[source,terminal] ----- -NAME EARLIEST METRIC NEWEST METRIC IMPORT START IMPORT END LAST IMPORT TIME AGE -... -node-allocatable-cpu-cores 2020-04-23T09:14:00Z 2020-08-31T10:07:00Z 2020-04-23T09:14:00Z 2020-10-15T17:13:00Z 2020-12-09T12:45:10Z 230d -node-allocatable-memory-bytes 2020-04-23T09:14:00Z 2020-08-30T05:19:00Z 2020-04-23T09:14:00Z 2020-10-14T08:01:00Z 2020-12-09T12:45:12Z 230d -... -pod-usage-memory-bytes 2020-04-23T09:14:00Z 2020-08-24T20:25:00Z 2020-04-23T09:14:00Z 2020-10-09T23:31:00Z 2020-12-09T12:45:12Z 230d ----- - -. Check the health of the `reporting-operator` `Pod` resource for a high number of pod restarts: -+ -[source,terminal] ----- -$ oc get pods -l app=reporting-operator ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -reporting-operator-84f7c9b7b6-fr697 2/2 Running 542 8d <1> ----- -<1> The Reporting Operator pod is restarting at a high rate. - -. Check the `reporting-operator` `Pod` resource for an `OOMKilled` termination: -+ -[source,terminal] ----- -$ oc describe pod/reporting-operator-84f7c9b7b6-fr697 ----- -+ -.Example output -[source,terminal] ----- -Name: reporting-operator-84f7c9b7b6-fr697 -Namespace: openshift-metering -Priority: 0 -Node: ip-10-xx-xx-xx.ap-southeast-1.compute.internal/10.xx.xx.xx -... - Ports: 8080/TCP, 6060/TCP, 8082/TCP - Host Ports: 0/TCP, 0/TCP, 0/TCP - State: Running - Started: Thu, 03 Dec 2020 20:59:45 +1000 - Last State: Terminated - Reason: OOMKilled <1> - Exit Code: 137 - Started: Thu, 03 Dec 2020 20:38:05 +1000 - Finished: Thu, 03 Dec 2020 20:59:43 +1000 ----- -<1> The Reporting Operator pod was terminated due to OOM kill. - -[discrete] -[id="metering-check-and-increase-memory-limits_{context}"] -=== Increasing the reporting-operator pod memory limit - -If you are experiencing an increase in pod restarts and OOM kill events, you can check the current memory limit set for the Reporting Operator pod. Increasing the memory limit allows the Reporting Operator pod to update the report data sources. If necessary, increase the memory limit in your `MeteringConfig` resource by 25% - 50%. - -.Procedure - -. Check the current memory limits of the `reporting-operator` `Pod` resource: -+ -[source,terminal] ----- -$ oc describe pod reporting-operator-67d6f57c56-79mrt ----- -+ -.Example output -[source,terminal] ----- -Name: reporting-operator-67d6f57c56-79mrt -Namespace: openshift-metering -Priority: 0 -... - Ports: 8080/TCP, 6060/TCP, 8082/TCP - Host Ports: 0/TCP, 0/TCP, 0/TCP - State: Running - Started: Tue, 08 Dec 2020 14:26:21 +1000 - Ready: True - Restart Count: 0 - Limits: - cpu: 1 - memory: 500Mi <1> - Requests: - cpu: 500m - memory: 250Mi - Environment: -... ----- -<1> The current memory limit for the Reporting Operator pod. - -. Edit the `MeteringConfig` resource to update the memory limit: -+ -[source,terminal] ----- -$ oc edit meteringconfig/operator-metering ----- -+ -.Example `MeteringConfig` resource -[source,yaml] ----- -kind: MeteringConfig -metadata: - name: operator-metering - namespace: openshift-metering -spec: - reporting-operator: - spec: - resources: <1> - limits: - cpu: 1 - memory: 750Mi - requests: - cpu: 500m - memory: 500Mi -... ----- -<1> Add or increase memory limits within the `resources` field of the `MeteringConfig` resource. -+ -[NOTE] -==== -If there continue to be numerous OOM killed events after memory limits are increased, this might indicate that a different issue is causing the reports to be in a pending state. -==== - -[id="metering-storageclass-not-configured_{context}"] -== StorageClass resource not configured - -Metering requires that a default `StorageClass` resource be configured for dynamic provisioning. - -See the documentation on configuring metering for information on how to check if there are any `StorageClass` resources configured for the cluster, how to set the default, and how to configure metering to use a storage class other than the default. - -[id="metering-secret-not-configured-correctly_{context}"] -== Secret not configured correctly - -A common issue with metering is providing the incorrect secret when configuring your persistent storage. Be sure to review the example configuration files and create you secret according to the guidelines for your storage provider. diff --git a/modules/metering-uninstall-crds.adoc b/modules/metering-uninstall-crds.adoc deleted file mode 100644 index 8528b80ad976..000000000000 --- a/modules/metering-uninstall-crds.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// -// * metering/metering-uninstall.adoc - -[id="metering-uninstall-crds_{context}"] -= Uninstalling metering custom resource definitions - -The metering custom resource definitions (CRDs) remain in the cluster after the Metering Operator is uninstalled and the `openshift-metering` namespace is deleted. - -[IMPORTANT] -==== -Deleting the metering CRDs disrupts any additional metering installations in other namespaces in your cluster. Ensure that there are no other metering installations before proceeding. -==== - -.Prerequisites - -* The `MeteringConfig` custom resource in the `openshift-metering` namespace is deleted. -* The `openshift-metering` namespace is deleted. - -.Procedure - -* Delete the remaining metering CRDs: -+ -[source,terminal] ----- -$ oc get crd -o name | grep "metering.openshift.io" | xargs oc delete ----- diff --git a/modules/metering-uninstall.adoc b/modules/metering-uninstall.adoc deleted file mode 100644 index f285090aa964..000000000000 --- a/modules/metering-uninstall.adoc +++ /dev/null @@ -1,35 +0,0 @@ -// Module included in the following assemblies: -// -// * metering/metering-uninstall.adoc - -[id="metering-uninstall_{context}"] -= Uninstalling a metering namespace - -Uninstall your metering namespace, for example the `openshift-metering` namespace, by removing the `MeteringConfig` resource and deleting the `openshift-metering` namespace. - -.Prerequisites - -* The Metering Operator is removed from your cluster. - -.Procedure - -. Remove all resources created by the Metering Operator: -+ -[source,terminal] ----- -$ oc --namespace openshift-metering delete meteringconfig --all ----- - -. After the previous step is complete, verify that all pods in the `openshift-metering` namespace are deleted or are reporting a terminating state: -+ -[source,terminal] ----- -$ oc --namespace openshift-metering get pods ----- - -. Delete the `openshift-metering` namespace: -+ -[source,terminal] ----- -$ oc delete namespace openshift-metering ----- diff --git a/modules/metering-use-mysql-or-postgresql-for-hive.adoc b/modules/metering-use-mysql-or-postgresql-for-hive.adoc deleted file mode 100644 index 7135166249b1..000000000000 --- a/modules/metering-use-mysql-or-postgresql-for-hive.adoc +++ /dev/null @@ -1,88 +0,0 @@ -// Module included in the following assemblies: -// -// * metering/configuring_metering/metering-configure-hive-metastore.adoc - -[id="metering-use-mysql-or-postgresql-for-hive_{context}"] -= Using MySQL or PostgreSQL for the Hive metastore - -The default installation of metering configures Hive to use an embedded Java database called Derby. This is unsuited for larger environments and can be replaced with either a MySQL or PostgreSQL database. Use the following example configuration files if your deployment requires a MySQL or PostgreSQL database for Hive. - -There are three configuration options you can use to control the database that is used by Hive metastore: `url`, `driver`, and `secretName`. - -Create your MySQL or Postgres instance with a user name and password. Then create a secret by using the OpenShift CLI (`oc`) or a YAML file. The `secretName` you create for this secret must map to the `spec.hive.spec.config.db.secretName` field in the `MeteringConfig` object resource. - -.Procedure - -. Create a secret using the OpenShift CLI (`oc`) or by using a YAML file: -+ -* Create a secret by using the following command: -+ -[source,terminal] ----- -$ oc --namespace openshift-metering create secret generic <YOUR_SECRETNAME> --from-literal=username=<YOUR_DATABASE_USERNAME> --from-literal=password=<YOUR_DATABASE_PASSWORD> ----- -+ -* Create a secret by using a YAML file. For example: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: <YOUR_SECRETNAME> <1> -data: - username: <BASE64_ENCODED_DATABASE_USERNAME> <2> - password: <BASE64_ENCODED_DATABASE_PASSWORD> <3> ----- -<1> The name of the secret. -<2> Base64 encoded database user name. -<3> Base64 encoded database password. - -. Create a configuration file to use a MySQL or PostgreSQL database for Hive: -+ -* To use a MySQL database for Hive, use the example configuration file below. Metering supports configuring the internal Hive metastore to use the MySQL server versions 5.6, 5.7, and 8.0. -+ --- -[source,yaml] ----- -spec: - hive: - spec: - metastore: - storage: - create: false - config: - db: - url: "jdbc:mysql://mysql.example.com:3306/hive_metastore" <1> - driver: "com.mysql.cj.jdbc.Driver" - secretName: "REPLACEME" <2> ----- -[NOTE] -==== -When configuring Metering to work with older MySQL server versions, such as 5.6 or 5.7, you might need to add the link:https://dev.mysql.com/doc/connector-j/8.0/en/connector-j-usagenotes-known-issues-limitations.html[`enabledTLSProtocols` JDBC URL parameter] when configuring the internal Hive metastore. -==== -<1> To use the TLS v1.2 cipher suite, set `url` to `"jdbc:mysql://<hostname>:<port>/<schema>?enabledTLSProtocols=TLSv1.2"`. -<2> The name of the secret containing the base64-encrypted user name and password database credentials. --- -+ -You can pass additional JDBC parameters using the `spec.hive.config.url`. For more details, see the link:https://dev.mysql.com/doc/connector-j/8.0/en/connector-j-reference-configuration-properties.html[MySQL Connector/J 8.0 documentation]. -+ -* To use a PostgreSQL database for Hive, use the example configuration file below: -+ -[source,yaml] ----- -spec: - hive: - spec: - metastore: - storage: - create: false - config: - db: - url: "jdbc:postgresql://postgresql.example.com:5432/hive_metastore" - driver: "org.postgresql.Driver" - username: "REPLACEME" - password: "REPLACEME" ----- -+ -You can pass additional JDBC parameters using the `spec.hive.config.url`. For more details, see the link:https://jdbc.postgresql.org/documentation/head/connect.html#connection-parameters[PostgreSQL JDBC driver documentation]. diff --git a/modules/metering-viewing-report-results.adoc b/modules/metering-viewing-report-results.adoc deleted file mode 100644 index 0ca7c183a52b..000000000000 --- a/modules/metering-viewing-report-results.adoc +++ /dev/null @@ -1,101 +0,0 @@ -// Module included in the following assemblies: -// -// * metering/metering-using-metering.adoc -[id="metering-viewing-report-results_{context}"] -= Viewing report results - -Viewing a report's results involves querying the reporting API route and authenticating to the API using your {product-title} credentials. -Reports can be retrieved as `JSON`, `CSV`, or `Tabular` formats. - -.Prerequisites - -* Metering is installed. -* To access report results, you must either be a cluster administrator, or you need to be granted access using the `report-exporter` role in the `openshift-metering` namespace. - -.Procedure - -. Change to the `openshift-metering` project: -+ -[source,terminal] ----- -$ oc project openshift-metering ----- - -. Query the reporting API for results: - -.. Create a variable for the metering `reporting-api` route then get the route: -+ -[source,terminal] ----- -$ meteringRoute="$(oc get routes metering -o jsonpath='{.spec.host}')" ----- -+ -[source,terminal] ----- -$ echo "$meteringRoute" ----- - -.. Get the token of your current user to be used in the request: -+ -[source,terminal] ----- -$ token="$(oc whoami -t)" ----- - -.. Set `reportName` to the name of the report you created: -+ -[source,terminal] ----- -$ reportName=namespace-cpu-request-2020 ----- - -.. Set `reportFormat` to one of `csv`, `json`, or `tabular` to specify the output format of the API response: -+ -[source,terminal] ----- -$ reportFormat=csv ----- - -.. To get the results, use `curl` to make a request to the reporting API for your report: -+ -[source,terminal] ----- -$ curl --insecure -H "Authorization: Bearer ${token}" "https://${meteringRoute}/api/v1/reports/get?name=${reportName}&namespace=openshift-metering&format=$reportFormat" ----- -+ -.Example output with `reportName=namespace-cpu-request-2020` and `reportFormat=csv` -[source,terminal] ----- -period_start,period_end,namespace,pod_request_cpu_core_seconds -2020-01-01 00:00:00 +0000 UTC,2020-12-30 23:59:59 +0000 UTC,openshift-apiserver,11745.000000 -2020-01-01 00:00:00 +0000 UTC,2020-12-30 23:59:59 +0000 UTC,openshift-apiserver-operator,261.000000 -2020-01-01 00:00:00 +0000 UTC,2020-12-30 23:59:59 +0000 UTC,openshift-authentication,522.000000 -2020-01-01 00:00:00 +0000 UTC,2020-12-30 23:59:59 +0000 UTC,openshift-authentication-operator,261.000000 -2020-01-01 00:00:00 +0000 UTC,2020-12-30 23:59:59 +0000 UTC,openshift-cloud-credential-operator,261.000000 -2020-01-01 00:00:00 +0000 UTC,2020-12-30 23:59:59 +0000 UTC,openshift-cluster-machine-approver,261.000000 -2020-01-01 00:00:00 +0000 UTC,2020-12-30 23:59:59 +0000 UTC,openshift-cluster-node-tuning-operator,3385.800000 -2020-01-01 00:00:00 +0000 UTC,2020-12-30 23:59:59 +0000 UTC,openshift-cluster-samples-operator,261.000000 -2020-01-01 00:00:00 +0000 UTC,2020-12-30 23:59:59 +0000 UTC,openshift-cluster-version,522.000000 -2020-01-01 00:00:00 +0000 UTC,2020-12-30 23:59:59 +0000 UTC,openshift-console,522.000000 -2020-01-01 00:00:00 +0000 UTC,2020-12-30 23:59:59 +0000 UTC,openshift-console-operator,261.000000 -2020-01-01 00:00:00 +0000 UTC,2020-12-30 23:59:59 +0000 UTC,openshift-controller-manager,7830.000000 -2020-01-01 00:00:00 +0000 UTC,2020-12-30 23:59:59 +0000 UTC,openshift-controller-manager-operator,261.000000 -2020-01-01 00:00:00 +0000 UTC,2020-12-30 23:59:59 +0000 UTC,openshift-dns,34372.800000 -2020-01-01 00:00:00 +0000 UTC,2020-12-30 23:59:59 +0000 UTC,openshift-dns-operator,261.000000 -2020-01-01 00:00:00 +0000 UTC,2020-12-30 23:59:59 +0000 UTC,openshift-etcd,23490.000000 -2020-01-01 00:00:00 +0000 UTC,2020-12-30 23:59:59 +0000 UTC,openshift-image-registry,5993.400000 -2020-01-01 00:00:00 +0000 UTC,2020-12-30 23:59:59 +0000 UTC,openshift-ingress,5220.000000 -2020-01-01 00:00:00 +0000 UTC,2020-12-30 23:59:59 +0000 UTC,openshift-ingress-operator,261.000000 -2020-01-01 00:00:00 +0000 UTC,2020-12-30 23:59:59 +0000 UTC,openshift-kube-apiserver,12528.000000 -2020-01-01 00:00:00 +0000 UTC,2020-12-30 23:59:59 +0000 UTC,openshift-kube-apiserver-operator,261.000000 -2020-01-01 00:00:00 +0000 UTC,2020-12-30 23:59:59 +0000 UTC,openshift-kube-controller-manager,8613.000000 -2020-01-01 00:00:00 +0000 UTC,2020-12-30 23:59:59 +0000 UTC,openshift-kube-controller-manager-operator,261.000000 -2020-01-01 00:00:00 +0000 UTC,2020-12-30 23:59:59 +0000 UTC,openshift-machine-api,1305.000000 -2020-01-01 00:00:00 +0000 UTC,2020-12-30 23:59:59 +0000 UTC,openshift-machine-config-operator,9637.800000 -2020-01-01 00:00:00 +0000 UTC,2020-12-30 23:59:59 +0000 UTC,openshift-metering,19575.000000 -2020-01-01 00:00:00 +0000 UTC,2020-12-30 23:59:59 +0000 UTC,openshift-monitoring,6256.800000 -2020-01-01 00:00:00 +0000 UTC,2020-12-30 23:59:59 +0000 UTC,openshift-network-operator,261.000000 -2020-01-01 00:00:00 +0000 UTC,2020-12-30 23:59:59 +0000 UTC,openshift-sdn,94503.000000 -2020-01-01 00:00:00 +0000 UTC,2020-12-30 23:59:59 +0000 UTC,openshift-service-ca,783.000000 -2020-01-01 00:00:00 +0000 UTC,2020-12-30 23:59:59 +0000 UTC,openshift-service-ca-operator,261.000000 ----- diff --git a/modules/metering-writing-reports.adoc b/modules/metering-writing-reports.adoc deleted file mode 100644 index c1c1a70b2006..000000000000 --- a/modules/metering-writing-reports.adoc +++ /dev/null @@ -1,71 +0,0 @@ -// Module included in the following assemblies: -// -// * metering/metering-using-metering.adoc -[id="metering-writing-reports_{context}"] -= Writing Reports - -Writing a report is the way to process and analyze data using metering. - -To write a report, you must define a `Report` resource in a YAML file, specify the required parameters, and create it in the `openshift-metering` namespace. - -.Prerequisites - -* Metering is installed. - -.Procedure - -. Change to the `openshift-metering` project: -+ -[source,terminal] ----- -$ oc project openshift-metering ----- - -. Create a `Report` resource as a YAML file: -+ -.. Create a YAML file with the following content: -+ -[source,yaml] ----- -apiVersion: metering.openshift.io/v1 -kind: Report -metadata: - name: namespace-cpu-request-2020 <2> - namespace: openshift-metering -spec: - reportingStart: '2020-01-01T00:00:00Z' - reportingEnd: '2020-12-30T23:59:59Z' - query: namespace-cpu-request <1> - runImmediately: true <3> ----- -<1> The `query` specifies the `ReportQuery` resources used to generate the report. Change this based on what you want to report on. For a list of options, run `oc get reportqueries | grep -v raw`. -<2> Use a descriptive name about what the report does for `metadata.name`. A good name describes the query, and the schedule or period you used. -<3> Set `runImmediately` to `true` for it to run with whatever data is available, or set it to `false` if you want it to wait for `reportingEnd` to pass. - -.. Run the following command to create the `Report` resource: -+ -[source,terminal] ----- -$ oc create -f <file-name>.yaml ----- -+ -.Example output -[source,terminal] ----- -report.metering.openshift.io/namespace-cpu-request-2020 created ----- -+ - -. You can list reports and their `Running` status with the following command: -+ -[source,terminal] ----- -$ oc get reports ----- -+ -.Example output -[source,terminal] ----- -NAME QUERY SCHEDULE RUNNING FAILED LAST REPORT TIME AGE -namespace-cpu-request-2020 namespace-cpu-request Finished 2020-12-30T23:59:59Z 26s ----- diff --git a/modules/mgmt-power-remediation-baremetal-about.adoc b/modules/mgmt-power-remediation-baremetal-about.adoc deleted file mode 100644 index 88b871f42c63..000000000000 --- a/modules/mgmt-power-remediation-baremetal-about.adoc +++ /dev/null @@ -1,179 +0,0 @@ -// Module included in the following assemblies: - -// * machine_management/mgmt-power-remediation-baremetal - -:_content-type: PROCEDURE -[id="mgmt-power-remediation-baremetal-about_{context}"] -= About power-based remediation of bare metal -In a bare metal cluster, remediation of nodes is critical to ensuring the overall health of the cluster. Physically remediating a cluster can be challenging and any delay in putting the machine into a safe or an operational state increases the time the cluster remains in a degraded state, and the risk that subsequent failures might bring the cluster offline. Power-based remediation helps counter such challenges. - -Instead of reprovisioning the nodes, power-based remediation uses a power controller to power off an inoperable node. This type of remediation is also called power fencing. - -{product-title} uses the `MachineHealthCheck` controller to detect faulty bare metal nodes. Power-based remediation is fast and reboots faulty nodes instead of removing them from the cluster. - -Power-based remediation provides the following capabilities: - -* Allows the recovery of control plane nodes -* Reduces the risk of data loss in hyperconverged environments -* Reduces the downtime associated with recovering physical machines - -[id="machine-health-checks-bare-metal_{context}"] -== MachineHealthChecks on bare metal - -Machine deletion on bare metal cluster triggers reprovisioning of a bare metal host. -Usually bare metal reprovisioning is a lengthy process, during which the cluster -is missing compute resources and applications might be interrupted. - -There are two ways to change the default remediation process from machine deletion to host power-cycle: - -. Annotate the `MachineHealthCheck` resource with the -`machine.openshift.io/remediation-strategy: external-baremetal` annotation. -. Create a `Metal3RemediationTemplate` resource, and refer to it in the `spec.remediationTemplate` of the `MachineHealthCheck`. - -After using one of these methods, unhealthy machines are power-cycled by using Baseboard Management Controller (BMC) credentials. - -[id="mgmt-understanding-remediation-process_{context}"] -== Understanding the annotation-based remediation process - -The remediation process operates as follows: - -. The MachineHealthCheck (MHC) controller detects that a node is unhealthy. -. The MHC notifies the bare metal machine controller which requests to power-off the unhealthy node. -. After the power is off, the node is deleted, which allows the cluster to reschedule the affected workload on other nodes. -. The bare metal machine controller requests to power on the node. -. After the node is up, the node re-registers itself with the cluster, resulting in the creation of a new node. -. After the node is recreated, the bare metal machine controller restores the annotations and labels that existed on the unhealthy node before its deletion. - -[NOTE] -==== -If the power operations did not complete, the bare metal machine controller triggers the reprovisioning of the unhealthy node unless this is a control plane node or a node that was provisioned externally. -==== - -[id="mgmt-understanding-metal3-remediation-process_{context}"] -== Understanding the metal3-based remediation process - -The remediation process operates as follows: - -. The MachineHealthCheck (MHC) controller detects that a node is unhealthy. -. The MHC creates a metal3 remediation custom resource for the metal3 remediation controller, which requests to power-off the unhealthy node. -. After the power is off, the node is deleted, which allows the cluster to reschedule the affected workload on other nodes. -. The metal3 remediation controller requests to power on the node. -. After the node is up, the node re-registers itself with the cluster, resulting in the creation of a new node. -. After the node is recreated, the metal3 remediation controller restores the annotations and labels that existed on the unhealthy node before its deletion. - -[NOTE] -==== -If the power operations did not complete, the metal3 remediation controller triggers the reprovisioning of the unhealthy node unless this is a control plane node or a node that was provisioned externally. -==== - -[id="mgmt-creating-mhc-baremetal_{context}"] -== Creating a MachineHealthCheck resource for bare metal - -.Prerequisites - -* The {product-title} is installed using installer-provisioned infrastructure (IPI). -* Access to BMC credentials (or BMC access to each node). -* Network access to the BMC interface of the unhealthy node. - -.Procedure -. Create a `healthcheck.yaml` file that contains the definition of your machine health check. -. Apply the `healthcheck.yaml` file to your cluster using the following command: - -[source,terminal] ----- -$ oc apply -f healthcheck.yaml ----- - -.Sample `MachineHealthCheck` resource for bare metal, annotation-based remediation -[source,yaml] ----- -apiVersion: machine.openshift.io/v1beta1 -kind: MachineHealthCheck -metadata: - name: example <1> - namespace: openshift-machine-api - annotations: - machine.openshift.io/remediation-strategy: external-baremetal <2> -spec: - selector: - matchLabels: - machine.openshift.io/cluster-api-machine-role: <role> <3> - machine.openshift.io/cluster-api-machine-type: <role> <3> - machine.openshift.io/cluster-api-machineset: <cluster_name>-<label>-<zone> <4> - unhealthyConditions: - - type: "Ready" - timeout: "300s" <5> - status: "False" - - type: "Ready" - timeout: "300s" <5> - status: "Unknown" - maxUnhealthy: "40%" <6> - nodeStartupTimeout: "10m" <7> ----- - -<1> Specify the name of the machine health check to deploy. -<2> For bare metal clusters, you must include the `machine.openshift.io/remediation-strategy: external-baremetal` annotation in the `annotations` section to enable power-cycle remediation. With this remediation strategy, unhealthy hosts are rebooted instead of removed from the cluster. -<3> Specify a label for the machine pool that you want to check. -<4> Specify the compute machine set to track in `<cluster_name>-<label>-<zone>` format. For example, `prod-node-us-east-1a`. -<5> Specify the timeout duration for the node condition. If the condition is met for the duration of the timeout, the machine will be remediated. Long timeouts can result in long periods of downtime for a workload on an unhealthy machine. -<6> Specify the amount of machines allowed to be concurrently remediated in the targeted pool. This can be set as a percentage or an integer. If the number of unhealthy machines exceeds the limit set by `maxUnhealthy`, remediation is not performed. -<7> Specify the timeout duration that a machine health check must wait for a node to join the cluster before a machine is determined to be unhealthy. - -[NOTE] -==== -The `matchLabels` are examples only; you must map your machine groups based on your specific needs. -==== - -.Sample `MachineHealthCheck` resource for bare metal, metal3-based remediation -[source,yaml] ----- -apiVersion: machine.openshift.io/v1beta1 -kind: MachineHealthCheck -metadata: - name: example - namespace: openshift-machine-api -spec: - selector: - matchLabels: - machine.openshift.io/cluster-api-machine-role: <role> - machine.openshift.io/cluster-api-machine-type: <role> - machine.openshift.io/cluster-api-machineset: <cluster_name>-<label>-<zone> - selector: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 - kind: Metal3RemediationTemplate - name: metal3-remediation-template - namespace: openshift-machine-api - unhealthyConditions: - - type: "Ready" - timeout: "300s" ----- - -.Sample `Metal3RemediationTemplate` resource for bare metal, metal3-based remediation -[source,yaml] ----- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 -kind: Metal3RemediationTemplate -metadata: - name: metal3-remediation-template - namespace: openshift-machine-api -spec: - template: - spec: - strategy: - type: Reboot - retryLimit: 1 - timeout: 5m0s ----- - -[NOTE] -==== -The `matchLabels` are examples only; you must map your machine groups based on your specific needs. The `annotations` section does not apply to metal3-based remediation. Annotation-based remediation and metal3-based remediation are mutually exclusive. -==== - -[mgmt-troubleshooting-issue-power-remediation_{context}] -== Troubleshooting issues with power-based remediation - -To troubleshoot an issue with power-based remediation, verify the following: - -* You have access to the BMC. -* BMC is connected to the control plane node that is responsible for running the remediation task. diff --git a/modules/microshift-about-sos-reports.adoc b/modules/microshift-about-sos-reports.adoc deleted file mode 100644 index f36187496d0b..000000000000 --- a/modules/microshift-about-sos-reports.adoc +++ /dev/null @@ -1,14 +0,0 @@ -// Module included in the following assemblies: - -// * microshift_support/microshift-sos-report - -:_content-type: CONCEPT -[id="about-microshift-sos-reports_{context}"] -= About {product-title} sos reports - -The `sos` tool is composed of different plugins that will help you gather information from different applications. A {product-title} specific plugin has been added from sos version 4.5.1, and it can gather the following data: - -* {product-title} configuration and version -* YAML output for cluster-wide and system namespaced resources -* OVN-Kubernetes information - diff --git a/modules/microshift-about.adoc b/modules/microshift-about.adoc deleted file mode 100644 index e53218fb6e92..000000000000 --- a/modules/microshift-about.adoc +++ /dev/null @@ -1,16 +0,0 @@ -// Module included in the following assemblies: -// -// microshift/understanding-microshift.adoc - -[id="con-about-microshift_{context}"] -= About {product-title} - -Working with resource-constrained field environments and hardware presents many challenges not experienced with cloud computing. {product-title} enables you to solve problems for edge devices by: - -* Running the same Kubernetes workloads you run in the cloud, but at the edge. -* Overcoming the operational challenge of minimal system resources. -* Addressing the environmental challenges of severe networking constraints such as low or no connectivity. -* Meeting the physical constraint of hard-to-access locations by installing your system images directly on edge devices. -* Building on and integrating with edge-optimized operating systems such as {op-system-ostree-first}. - -{product-title} has the simplicity of single-node deployment with the functions and services you need for computing in resource-constrained locations. You can have many deployments on different hosts, creating the specific system image needed for each of your applications. diff --git a/modules/microshift-accessing-cluster-locally.adoc b/modules/microshift-accessing-cluster-locally.adoc deleted file mode 100644 index c9126296e922..000000000000 --- a/modules/microshift-accessing-cluster-locally.adoc +++ /dev/null @@ -1,47 +0,0 @@ -// Module included in the following assemblies: -// -// microshift/microshift_install/microshift-install-rpm.adoc -// microshift/microshift_install/microshift-embed-in-rpm-ostree.adoc -// microshift/microshift_configuring/microshift-cluster-access-kubeconfig.adoc - -:_content-type: PROCEDURE -[id="accessing-microshift-cluster-locally_{context}"] -= Accessing the {product-title} cluster locally - -Use the following procedure to access the {product-title} cluster locally by using a `kubeconfig` file. - -.Prerequisites - -* You have installed the `oc` binary. - -.Procedure - -. Optional: to create a `~/.kube/` folder if your {op-system} machine does not have one, run the following command: -+ -[source,terminal] ----- -$ mkdir -p ~/.kube/ ----- - -. Copy the generated local access `kubeconfig` file to the `~/.kube/` directory by running the following command: -+ -[source,terminal] ----- -$ sudo cat /var/lib/microshift/resources/kubeadmin/kubeconfig > ~/.kube/config ----- - -. Update the permissions on your `~/.kube/config` file by running the following command: -+ -[source,terminal] ----- -$ chmod go-r ~/.kube/config ----- - -.Verification - -* Verify that {product-title} is running by entering the following command: -+ -[source,terminal] ----- -$ oc get all -A ----- diff --git a/modules/microshift-accessing-cluster-open-firewall.adoc b/modules/microshift-accessing-cluster-open-firewall.adoc deleted file mode 100644 index 41e102916b83..000000000000 --- a/modules/microshift-accessing-cluster-open-firewall.adoc +++ /dev/null @@ -1,37 +0,0 @@ -// Module included in the following assemblies: -// -// microshift/microshift_install/microshift-install-rpm.adoc -// microshift/microshift_install/microshift-embed-in-rpm-ostree.adoc -// microshift/microshift_configuring/microshift-cluster-access-kubeconfig.adoc - -:_content-type: PROCEDURE -[id="microshift-accessing-cluster-open-firewall_{context}"] -= Opening the firewall for remote access to the {product-title} cluster - -Use the following procedure to open the firewall so that a remote user can access the {product-title} cluster. This procedure must be completed before a workstation user can access the cluster remotely. - -For this procedure, `user@microshift` is the user on the {product-title} host machine and is responsible for setting up that machine so that it can be accessed by a remote user on a separate workstation. - -.Prerequisites - -* You have installed the `oc` binary. - -* Your account has cluster administration privileges. - -.Procedure - -* As `user@microshift` on the {product-title} host, open the firewall port for the Kubernetes API server (`6443/tcp`) by running the following command: -+ -[source,terminal] ----- -[user@microshift]$ sudo firewall-cmd --permanent --zone=public --add-port=6443/tcp && sudo firewall-cmd --reload ----- - -.Verification - -* As `user@microshift`, verify that {product-title} is running by entering the following command: -+ -[source,terminal] ----- -[user@microshift]$ oc get all -A ----- diff --git a/modules/microshift-accessing-cluster-remotely.adoc b/modules/microshift-accessing-cluster-remotely.adoc deleted file mode 100644 index 7c6fcf68cf66..000000000000 --- a/modules/microshift-accessing-cluster-remotely.adoc +++ /dev/null @@ -1,58 +0,0 @@ -// Module included in the following assemblies: -// -// microshift/microshift_install/microshift-install-rpm.adoc -// microshift/microshift_install/microshift-embed-in-rpm-ostree.adoc -// microshift/microshift_configuring/microshift-cluster-access-kubeconfig.adoc - -:_content-type: PROCEDURE -[id="accessing-microshift-cluster-remotely_{context}"] -= Accessing the {product-title} cluster remotely - -Use the following procedure to access the {product-title} cluster from a remote workstation by using a `kubeconfig` file. - -The `user@workstation` login is used to access the host machine remotely. The `<user>` value in the procedure is the name of the user that `user@workstation` logs in with to the {product-title} host. - -.Prerequisites - -* You have installed the `oc` binary. - -* The `@user@microshift` has opened the firewall from the local host. - -.Procedure - -. As `user@workstation`, create a `~/.kube/` folder if your {op-system} machine does not have one by running the following command: -+ -[source,terminal,subs="attributes+"] ----- -[user@workstation]$ mkdir -p ~/.kube/ ----- - -. As `user@workstation`, set a variable for the hostname of your {product-title} host by running the following command: -+ -[source,terminal,subs="attributes+"] ----- -[user@workstation]$ MICROSHIFT_MACHINE=<name or IP address of {product-title} machine> ----- - -. As `user@workstation`, copy the generated `kubeconfig` file that contains the host name or IP address you want to connect with from the {op-system} machine running {product-title} to your local machine by running the following command: -+ -[source,terminal] ----- -[user@workstation]$ ssh <user>@$MICROSHIFT_MACHINE "sudo cat /var/lib/microshift/resources/kubeadmin/$MICROSHIFT_MACHINE/kubeconfig" > ~/.kube/config ----- - -. As `user@workstation`, update the permissions on your `~/.kube/config` file by running the following command: -+ -[source,terminal] ----- -$ chmod go-r ~/.kube/config ----- - -.Verification - -* As `user@workstation`, verify that {product-title} is running by entering the following command: -+ -[source,terminal] ----- -[user@workstation]$ oc get all -A ----- diff --git a/modules/microshift-accessing.adoc b/modules/microshift-accessing.adoc deleted file mode 100644 index 1846875894d3..000000000000 --- a/modules/microshift-accessing.adoc +++ /dev/null @@ -1,10 +0,0 @@ -// Module included in the following assemblies: -// -// microshift/microshift_install/microshift-install-rpm.adoc -// microshift/microshift_install/microshift-embed-in-rpm-ostree.adoc - -:_content-type: CONCEPT -[id="accessing-microshift-cluster_{context}"] -= How to access the {product-title} cluster - -Use the procedures in this section to access the {product-title} cluster, either from the same machine running the {product-title} service or remotely from a workstation. You can use this access to observe and administrate workloads. When using these steps, choose the `kubeconfig` file that contains the host name or IP address you want to connect with and place it in the relevant directory. As listed in each procedure, you use the {OCP} CLI tool (`oc`) for cluster activities. diff --git a/modules/microshift-add-blueprint-build-iso.adoc b/modules/microshift-add-blueprint-build-iso.adoc deleted file mode 100644 index 3736e5735d81..000000000000 --- a/modules/microshift-add-blueprint-build-iso.adoc +++ /dev/null @@ -1,47 +0,0 @@ -// Module included in the following assemblies: -// -// microshift/microshift-embed-into-rpm-ostree.adoc - -:_content-type: PROCEDURE -[id="microshift-add-blueprint-build-iso_{context}"] -= Add the blueprint to Image Builder and build the ISO - -. Add the blueprint to the Image Builder by running the following command: -+ -[source,terminal] -+ ----- -$ sudo composer-cli blueprints push microshift-installer.toml ----- - -. Start the `ostree` ISO build by running the following command: -+ -[source,terminal] ----- -$ BUILDID=$(sudo composer-cli compose start-ostree --url http://localhost:8085/repo/ --ref "rhel/9/$(uname -m)/edge" microshift-installer edge-installer | awk '{print $2}') ----- -+ -This command also returns the identification (ID) of the build for monitoring. - -. You can check the status of the build periodically by running the following command: -+ -[source,terminal] ----- -$ sudo composer-cli compose status ----- -+ -.Example output for a running build -+ -[source,terminal] ----- -ID Status Time Blueprint Version Type Size -c793c24f-ca2c-4c79-b5b7-ba36f5078e8d RUNNING Wed Jun 7 13:22:20 2023 microshift-installer 0.0.0 edge-installer ----- -+ -.Example output for a completed build -+ -[source,terminal] ----- -ID Status Time Blueprint Version Type Size -c793c24f-ca2c-4c79-b5b7-ba36f5078e8d FINISHED Wed Jun 7 13:34:49 2023 microshift-installer 0.0.0 edge-installer ----- diff --git a/modules/microshift-adding-repos-to-image-builder.adoc b/modules/microshift-adding-repos-to-image-builder.adoc deleted file mode 100644 index 2cfc47cbb113..000000000000 --- a/modules/microshift-adding-repos-to-image-builder.adoc +++ /dev/null @@ -1,79 +0,0 @@ -// Module included in the following assemblies: -// -// microshift/microshift-embed-into-rpm-ostree.adoc - -:_content-type: PROCEDURE -[id="adding-microshift-repos-image-builder_{context}"] -= Adding {product-title} repositories to Image Builder - -Use the following procedure to add the {product-title} repositories to Image Builder on your build host. - -.Prerequisites -* Your build host meets the Image Builder system requirements. -* You have installed and set up Image Builder and the `composer-cli` tool. -* You have root-user access to your build host. - -.Procedure - -. Create an Image Builder configuration file for adding the `rhocp-4.13` RPM repository source required to pull {product-title} RPMs by running the following command: -+ -[source,terminal] ----- -$ cat > rhocp-4.13.toml <<EOF -id = "rhocp-4.13" -name = "Red Hat OpenShift Container Platform 4.13 for RHEL 9" -type = "yum-baseurl" -url = "https://cdn.redhat.com/content/dist/layered/rhel9/$(uname -m)/rhocp/4.13/os" -check_gpg = true -check_ssl = true -system = false -rhsm = true -EOF ----- - -. Create an Image Builder configuration file for adding the `fast-datapath` RPM repository by running the following command: -+ -[source,terminal] ----- -$ cat > fast-datapath.toml <<EOF -id = "fast-datapath" -name = "Fast Datapath for RHEL 9" -type = "yum-baseurl" -url = "https://cdn.redhat.com/content/dist/layered/rhel9/$(uname -m)/fast-datapath/os" -check_gpg = true -check_ssl = true -system = false -rhsm = true -EOF ----- - -. Add the sources to the Image Builder by running the following commands: -+ -[source,terminal] ----- -$ sudo composer-cli sources add rhocp-4.13.toml ----- -+ -[source,terminal] ----- -$ sudo composer-cli sources add fast-datapath.toml ----- - -.Verification - -* Confirm that the sources were added properly by running the following command: -+ -[source,terminal] ----- -$ sudo composer-cli sources list ----- -+ -.Example output -+ -[source,terminal] ----- -appstream -baseos -fast-datapath -rhocp-4.13 ----- \ No newline at end of file diff --git a/modules/microshift-adding-service-to-blueprint.adoc b/modules/microshift-adding-service-to-blueprint.adoc deleted file mode 100644 index cf159bc7ee5d..000000000000 --- a/modules/microshift-adding-service-to-blueprint.adoc +++ /dev/null @@ -1,80 +0,0 @@ -// Module included in the following assemblies: -// -// microshift/microshift-embed-into-rpm-ostree.adoc - -:_content-type: PROCEDURE -[id="adding-microshift-service-to-blueprint_{context}"] -= Adding the {product-title} service to a blueprint - -Adding the {product-title} RPM package to an Image Builder blueprint enables the build of a {op-system-ostree} image with {product-title} embedded. - -.Procedure - -. Use the following example to create your blueprint: -+ -.Image Builder blueprint example -+ -[source,terminal] ----- -$ cat > minimal-microshift.toml <<EOF -name = "minimal-microshift" - -description = "" -version = "0.0.1" -modules = [] -groups = [] - -[[packages]] -name = "microshift" -version = "*" - -[[packages]] -name = "microshift-greenboot" <1> -version = "*" - -[customizations.services] -enabled = ["microshift"] -EOF ----- -[.small] -<1> Optional `microshift-greenboot` RPM. For more information, read the "Greenboot health check" guide in the "Running Applications" section. -+ -[NOTE] -==== -The wildcard `*` in the commands uses the latest {product-title} RPMs. If you need a specific version, substitute the wildcard for the version you want. For example, insert `4.13.1` to download the {product-title} 4.13.1 RPMs. -==== - -. Add the blueprint to the Image Builder by running the following command: -+ -[source,terminal] ----- -$ sudo composer-cli blueprints push minimal-microshift.toml ----- - -.Verification - -. Verify the Image Builder configuration listing only {product-title} packages by running the following command: -+ -[source,terminal] ----- -$ sudo composer-cli blueprints depsolve minimal-microshift | grep microshift ----- -+ -.Example output -+ -[source,terminal] ----- -blueprint: minimal-microshift v0.0.1 - microshift-greenboot-4.13.1-202305250827.p0.g4105d3b.assembly.4.13.1.el9.noarch - microshift-networking-4.13.1-202305250827.p0.g4105d3b.assembly.4.13.1.el9.x86_64 - microshift-release-info-4.13.1-202305250827.p0.g4105d3b.assembly.4.13.1.el9.noarch - microshift-4.13.1-202305250827.p0.g4105d3b.assembly.4.13.1.el9.x86_64 - microshift-selinux-4.13.1-202305250827.p0.g4105d3b.assembly.4.13.1.el9.noarch ----- - -. Optional: Verify the Image Builder configuration listing all components to be installed by running the following command: -+ -[source,terminal] ----- -$ sudo composer-cli blueprints depsolve minimal-microshift ----- diff --git a/modules/microshift-applying-manifests-example.adoc b/modules/microshift-applying-manifests-example.adoc deleted file mode 100644 index 428af1b94c5d..000000000000 --- a/modules/microshift-applying-manifests-example.adoc +++ /dev/null @@ -1,91 +0,0 @@ -// Module included in the following assemblies: -// -// * microshift/running_applications/microshift-operators.adoc - -:_content-type: PROCEDURE -[id="microshift-manifests-example_{context}"] -= Using manifests example -This example demonstrates automatic deployment of a BusyBox container using `kustomize` manifests in the `/etc/microshift/manifests` directory. - -.Procedure -. Create the BusyBox manifest files by running the following commands: -+ -.. Define the directory location: -+ -[source,terminal] ----- -$ MANIFEST_DIR=/etc/microshift/manifests ----- -+ -.. Make the directory: -+ -[source,terminal] ----- -$ sudo mkdir -p ${MANIFEST_DIR} ----- -+ -.. Place the YAML file in the directory: -+ -[source,terminal] ----- -$ sudo tee ${MANIFEST_DIR}/busybox.yaml &>/dev/null <<EOF - -apiVersion: v1 -kind: Namespace -metadata: - name: busybox ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: busybox-deployment -spec: - selector: - matchLabels: - app: busybox - template: - metadata: - labels: - app: busybox - spec: - containers: - - name: busybox - image: BUSYBOX_IMAGE - command: - - sleep - - "3600" -EOF ----- - -. Next, create the `kustomize` manifest files by running the following commands: -+ -.. Place the YAML file in the directory: -+ -[source,terminal] ----- -$ sudo tee ${MANIFEST_DIR}/kustomization.yaml &>/dev/null <<EOF ---- -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -namespace: busybox -resources: - - busybox.yaml -images: - - name: BUSYBOX_IMAGE - newName: registry.k8s.io/busybox -EOF ----- - -. Restart {product-title} to apply the manifests by running the following command: -+ -[source,terminal] ----- -$ sudo systemctl restart microshift ----- -+ -. Apply the manifests and start the `busybox` pod by running the following command: -+ -[source,terminal] ----- -$ oc get pods -n busybox ----- diff --git a/modules/microshift-blocking-nodeport-access.adoc b/modules/microshift-blocking-nodeport-access.adoc deleted file mode 100644 index 09ad9db37f3e..000000000000 --- a/modules/microshift-blocking-nodeport-access.adoc +++ /dev/null @@ -1,61 +0,0 @@ -// Module included in the following assemblies: -// -// * microshift_networking/microshift-networking.adoc - -:_content-type: PROCEDURE -[id="microshift-blocking-nodeport-access_{context}"] -= Blocking external access to the NodePort service on a specific host interface - -OVN-Kubernetes does not restrict the host interface where a NodePort service can be accessed from outside a {product-title} node. The following procedure explains how to block the NodePort service on a specific host interface and restrict external access. - -.Prerequisites - -* You must have an account with root privileges. - -.Procedure - -. Change the `NODEPORT` variable to the host port number assigned to your Kubernetes NodePort service by running the following command: -+ -[source,terminal] ----- -# export NODEPORT=30700 ----- -. Change the `INTERFACE_IP` value to the IP address from the host interface that you want to block. For example: -+ -[source,terminal] ----- -# export INTERFACE_IP=192.168.150.33 ----- -. Insert a new rule in the `nat` table PREROUTING chain to drop all packets that match the destination port and IP address. For example: -+ -[source,terminal] ----- -$ sudo nft -a insert rule ip nat PREROUTING tcp dport $NODEPORT ip daddr $INTERFACE_IP drop ----- -. List the new rule by running the following command: -+ -[source,terminal] ----- -$ sudo nft -a list chain ip nat PREROUTING -table ip nat { - chain PREROUTING { # handle 1 - type nat hook prerouting priority dstnat; policy accept; - tcp dport 30700 ip daddr 192.168.150.33 drop # handle 134 - counter packets 108 bytes 18074 jump OVN-KUBE-ETP # handle 116 - counter packets 108 bytes 18074 jump OVN-KUBE-EXTERNALIP # handle 114 - counter packets 108 bytes 18074 jump OVN-KUBE-NODEPORT # handle 112 - } -} ----- -+ -[NOTE] -==== -Note the `handle` number of the newly added rule. You need to remove the `handle` number in the following step. -==== -. Remove the custom rule with the following sample command: -+ -[source,terminal] ----- -$ sudo nft -a delete rule ip nat PREROUTING handle 134 ----- - diff --git a/modules/microshift-certificate-lifetime.adoc b/modules/microshift-certificate-lifetime.adoc deleted file mode 100644 index e6bd6ac28c75..000000000000 --- a/modules/microshift-certificate-lifetime.adoc +++ /dev/null @@ -1,51 +0,0 @@ -// Module included in the following assemblies: -// -// * microshift/microshift-things-to-know.adoc - -:_content-type: CONCEPT -[id="microshift-certificate-lifetime_{context}"] -= Security certificate lifetime - -{product-title} certificates are separated into two basic groups: - -. Short-lived certificates having certificate validity of one year. -. Long-lived certificates having certificate validity of 10 years. - -Most server or leaf certificates are short-term. - -An example of a long-lived certificate is the client certificate for `system:admin user` authentication, or the certificate of the signer of the `kube-apiserver` external serving certificate. - -[id="microshift-certificate-rotation_{context}"] -== Certificate rotation -Certificates that are expired or close to their expiration dates need to be rotated to ensure continued {product-title} operation. When {product-title} restarts for any reason, certificates that are close to expiring are rotated. A certificate that is set to expire imminently, or has expired, can cause an automatic {product-title} restart to perform a rotation. - -[NOTE] -==== -If the rotated certificate is a Certificate Authority, all of the certificates it signed rotate. -==== - -[id="microshift-st-certificate-rotation_{context}"] -=== Short-term certificates -The following situations describe {product-title} actions during short-term certificate lifetimes: - -. No rotation: -.. When a short-term certificate is up to 5 months old, no rotation occurs. - -. Rotation at restart: -.. When a short-term certificate is 5 to 8 months old, it is rotated when {product-title} starts or restarts. - -. Automatic restart for rotation: -.. When a short-term certificate is more than 8 months old, {product-title} can automatically restart to rotate and apply a new certificate. - -[id="microshift-lt-certificate-rotation_{context}"] -=== Long-term certificates -The following situations describe {product-title} actions during long-term certificate lifetimes: - -. No rotation: -.. When a long-term certificate is up to 8.5 years old, no rotation occurs. - -. Rotation at restart: -.. When a long-term certificate is 8.5 to 9 years old, it is rotated when {product-title} starts or restarts. - -. Automatic restart for rotation: -.. When a long-term certificate is more than 9 years old, {product-title} can automatically restart to rotate and apply a new certificate. diff --git a/modules/microshift-cli-oc-about.adoc b/modules/microshift-cli-oc-about.adoc deleted file mode 100644 index 192c97f6de17..000000000000 --- a/modules/microshift-cli-oc-about.adoc +++ /dev/null @@ -1,13 +0,0 @@ -// Module included in the following assemblies: -// -// * microshift-cli_ref/microshift-cli-using-oc.adoc - -:_content-type: CONCEPT -[id="microshift-cli-oc-about_{context}"] -= About the OpenShift CLI - -With the OpenShift command-line interface (CLI), the `oc` command, you can deploy and manage {product-title} projects from a terminal. The CLI `oc` tool is ideal in the following situations: - -* Working directly with project source code -* Scripting {product-title} operations -* Managing projects while restricted by bandwidth resources diff --git a/modules/microshift-cli-oc-get-help.adoc b/modules/microshift-cli-oc-get-help.adoc deleted file mode 100644 index e28c3517aecb..000000000000 --- a/modules/microshift-cli-oc-get-help.adoc +++ /dev/null @@ -1,61 +0,0 @@ -// Module included in the following assemblies: -// -// * microshift_cli_ref/microshift_cli_getting_help.adoc - -:_content-type: CONCEPT -[id="cli-getting-help_{context}"] -= Getting help - -You can get help with CLI commands and {product-title} resources in the following ways. - -* Use `oc help --flag` to get information about a specific CLI command: -+ -.Example: Get help for the `oc create` command -[source,terminal] ----- -$ oc create --help ----- -+ -.Example output -[source,terminal] ----- -Create a resource by filename or stdin - -JSON and YAML formats are accepted. - -Usage: - oc create -f FILENAME [flags] - -... ----- - -* Use the `oc explain` command to view the description and fields for a particular resource: -+ -.Example: View documentation for the `Pod` resource -[source,terminal] ----- -$ oc explain pods ----- -+ -.Example output -[source,terminal] ----- -KIND: Pod -VERSION: v1 - -DESCRIPTION: - Pod is a collection of containers that can run on a host. This resource is - created by clients and scheduled onto hosts. - -FIELDS: - apiVersion <string> - APIVersion defines the versioned schema of this representation of an - object. Servers should convert recognized schemas to the latest internal - value, and may reject unrecognized values. More info: - https://git.k8s.io/community/contributors/devel/api-conventions.md#resources - -... ----- -//removed reference to oc help, as I thought this would just create noise for the MicroShift user -//are these other two ways viable for MicroShift? -//are there better examples for MicroShift use cases? \ No newline at end of file diff --git a/modules/microshift-cni.adoc b/modules/microshift-cni.adoc deleted file mode 100644 index 9fb3427552cf..000000000000 --- a/modules/microshift-cni.adoc +++ /dev/null @@ -1,118 +0,0 @@ -// Module included in the following assemblies: -// -// * microshift_networking/microshift-understanding networking.adoc - -:_content-type: CONCEPT -[id="microshift-cni_{context}"] -= About the OVN-Kubernetes network plugin - -OVN-Kubernetes is the default networking solution for {product-title} deployments. OVN-Kubernetes is a virtualized network for pods and services that is based on Open Virtual Network (OVN). The OVN-Kubernetes Container Network Interface (CNI) plugin is the network plugin for the cluster. A cluster that uses the OVN-Kubernetes network plugin also runs Open vSwitch (OVS) on the node. OVN configures OVS on the node to implement the declared network configuration. - -[id="microshift-network-topology_{context}"] -== Network topology -OVN-Kubernetes provides an overlay-based networking implementation. This overlay includes an OVS-based implementation of Service and NetworkPolicy. The overlay network uses the Geneve (Generic Network Virtualization Encapsulation) tunnel protocol. The pod maximum transmission unit (MTU) for the Geneve tunnel is set to a smaller value than the MTU of the physical interface on the host. This smaller MTU makes room for the required information that is added to the tunnel header before it is transmitted. - -OVS runs as a systemd service on the {product-title} node. The OVS RPM package is installed as a dependency to the `microshift-networking` RPM package. OVS is started immediately when the `microshift-networking` RPM is installed. - -.{product-title} network topology -image:317_RHbM_OVN_topology_0323.png[title="{product-title} uses an overlay-based networking implementation, details follow."] - -[id="microshift-description-ovn-logical-components_{context}"] -=== Description of the OVN logical components of the virtualized network -OVN node switch:: -A virtual switch named `<node-name>`. The OVN node switch is named according to the hostname of the node. -** In this example, the `node-name` is `microshift-dev`. - -OVN cluster router:: -A virtual router named `ovn_cluster_router`, also known as the distributed router. -** In this example, the cluster network is `10.42.0.0/16`. - -OVN join switch:: -A virtual switch named `join`. - -OVN gateway router:: -A virtual router named `GR_<node-name>`, also known as the external gateway router. - -OVN external switch:: -A virtual switch named `ext_<node-name>.` - -[id="microshift-description-connections-network-topology_{context}"] -=== Description of the connections in the network topology figure -* The north-south traffic between the network service device `enp1s0` and the OVN external switch `ext_microshift-dev`, is provided through the OVS patch port by the gateway bridge `br-ex`. -* The OVN gateway router `GR_microshift-dev` is connected to the external network switch `ext_microshift-dev` through the logical router port 4. Port 4 is attached with the node IP address 192.168.122.14. -* The join switch `join` connects the OVN gateway router `GR_microshift-dev` to the OVN cluster router `ovn_cluster_router`. The IP address range is 100.62.0.0/16. -** The OVN gateway router `GR_microshift-dev` connects to the OVN join switch `join` through the logical router port 3. Port 3 attaches with the internal IP address 100.64.0.2. -** The OVN cluster router `ovn_cluster_router` connects to the join switch `join` through the logical router port 2. Port 2 attaches with the internal IP address 100.64.0.1. -* The OVN cluster router `ovn_cluster_router` connects to the node switch `microshift-dev` through the logical router port 1. Port 1 is attached with the OVN cluster network IP address 10.42.0.1. -* The east-west traffic between the pods and the network service is provided by the OVN cluster router `ovn_cluster_router` and the node switch `microshift-dev`. The IP address range is 10.42.0.0/24. -* The east-west traffic between pods is provided by the node switch `microshift-dev` without network address translation (NAT). -* The north-south traffic between the pods and the external network is provided by the OVN cluster router `ovn_cluster_router` and the host network. This router is connected through the `ovn-kubernetes` management port `ovn-k8s-mp0`, with the IP address 10.42.0.2. -* All the pods are connected to the OVN node switch through their interfaces. -** In this example, Pod 1 and Pod 2 are connected to the node switch through `Interface 1` and `Interface 2`. - -[id="microshift-ip-forward_{context}"] -== IP forward -The host network `sysctl net.ipv4.ip_forward` kernel parameter is automatically enabled by the `ovnkube-master` container when started. This is required to forward incoming traffic to the CNI. For example, accessing the NodePort service from outside of a cluster fails if `ip_forward` is disabled. - -[id="microshift-network-performance_{context}"] -== Network performance optimizations -By default, three performance optimizations are applied to OVS services to minimize resource consumption: - -* CPU affinity to `ovs-vswitchd.service` and `ovsdb-server.service` -* `no-mlockall` to `openvswitch.service` -* Limit handler and `revalidator` threads to `ovs-vswitchd.service` - -[id="microshift-network-features_{context}"] -== Network features -Networking features available with {product-title} {product-version} include: - -* Kubernetes network policy -* Dynamic node IP -* Cluster network on specified host interface - -Networking features not available with {product-title} {product-version}: - -* Egress IP/firewall/qos: disabled -* Hybrid networking: not supported -* IPsec: not supported -* Hardware offload: not supported - -//Q: are there immutable network settings we should tell users about? -[id="microshift-network-comps-svcs_{context}"] -== {product-title} networking components and services -This brief overview describes networking components and their operation in {product-title}. The `microshift-networking` RPM is a package that automatically pulls in any networking-related dependencies and systemd services to initialize networking, for example, the `microshift-ovs-init` systemd service. - -NetworkManager:: -NetworkManager is required to set up the initial gateway bridge on the {product-title} node. The NetworkManager and `NetworkManager-ovs` RPM packages are installed as dependencies to the `microshift-networking` RPM package, which contains the necessary configuration files. NetworkManager in {product-title} uses the `keyfile` plugin and is restarted after installation of the `microshift-networking` RPM package. - -microshift-ovs-init:: -The `microshift-ovs-init.service` is installed by the `microshift-networking` RPM package as a dependent systemd service to microshift.service. It is responsible for setting up the OVS gateway bridge. - -OVN containers:: -Two OVN-Kubernetes daemon sets are rendered and applied by {product-title}. - -* *ovnkube-master* -Includes the `northd`, `nbdb`, `sbdb` and `ovnkube-master` containers. - -* *ovnkube-node* -The ovnkube-node includes the OVN-Controller container. -+ -After {product-title} boots, the OVN-Kubernetes daemon sets are deployed in the `openshift-ovn-kubernetes` namespace. - -Packaging:: -OVN-Kubernetes manifests and startup logic are built into {product-title}. The systemd services and configurations included in `microshift-networking` RPM are: - -* `/etc/NetworkManager/conf.d/microshift-nm.conf` for NetworkManager.service -* `/etc/systemd/system/ovs-vswitchd.service.d/microshift-cpuaffinity.conf` for ovs-vswitchd.service -* `/etc/systemd/system/ovsdb-server.service.d/microshift-cpuaffinity.conf` -* `/usr/bin/configure-ovs-microshift.sh` for microshift-ovs-init.service -* `/usr/bin/configure-ovs.sh` for microshift-ovs-init.service -* `/etc/crio/crio.conf.d/microshift-ovn.conf` for CRI-O service - -[id="microshift-bridge-mapping_{context}"] -== Bridge mappings -Bridge mappings allow provider network traffic to reach the physical network. Traffic leaves the provider network and arrives at the `br-int` bridge. A patch port between `br-int` and `br-ex` then allows the traffic to traverse to and from the provider network and the edge network. Kubernetes pods are connected to the `br-int` bridge through virtual ethernet pair: one end of the virtual ethernet pair is attached to the pod namespace, and the other end is attached to the `br-int` bridge. - -[id="microshift-primary-gateway-interface_{context}"] -=== Primary gateway interface -You can specify the desired host interface name in the `ovn.yaml` config file as `gatewayInterface`. The specified interface is added in OVS bridge br-ex which acts as gateway bridge for the CNI network. diff --git a/modules/microshift-config-cli-manifests.adoc b/modules/microshift-config-cli-manifests.adoc deleted file mode 100644 index 95cc498aaaec..000000000000 --- a/modules/microshift-config-cli-manifests.adoc +++ /dev/null @@ -1,66 +0,0 @@ -// Module included in the following assemblies: -// -// * microshift/using-config-tools.adoc - -:_content-type: CONCEPT -[id="microshift-config-cli-manifests_{context}"] -= Using CLI tools and creating manifests - -Configure your {product-title} using the supported command line (CLI) arguments and environment variables. - -[id="microshift-config-cli-environ-vars_{context}"] -== Supported command-line arguments and environment variables - -[cols="4",options="header"] -|=== -|Field name -|CLI argument -|Environment variable -|Description - -|`clusterCIDR` -|`--cluster-cidr` -|`MICROSHIFT_CLUSTER_CLUSTERCIDR` -|A block of IP addresses from which pod IP addresses are allocated. - -|`serviceCIDR` -|`--service-cidr` -|`MICROSHIFT_CLUSTER_SERVICECIDR` -|A block of virtual IP addresses for Kubernetes services. - -|`serviceNodePortRange` -|`--service-node-port-range` -|`MICROSHIFT_CLUSTER_SERVICENODEPORTRANGE` -|The port range allowed for Kubernetes services of type NodePort. - -|`dns` -|`--cluster-dns` -|`MICROSHIFT_CLUSTER_DNS` -|The Kubernetes service IP address where pods query for name resolution. - -|`domain` -|`--cluster-domain` -|`MICROSHIFT_CLUSTER_DOMAIN` -|Base DNS domain used to construct fully qualified pod and service domain names. - -|`url` -|`--url` -|`MICROSHIFT_CLUSTER_URL` -|URL of the API server for the cluster. - -|`nodeIP` -|`--node-ip` -|`MICROSHIFT_NODEIP` -|The IP address of the node, defaults to IP of the default route. - -|`nodeName` -|`--node-name` -|`MICROSHIFT_NODENAME` -|The name of the node, defaults to hostname. - -|`logVLevel` -|`--v` -|`MICROSHIFT_LOGVLEVEL` -|Log verbosity (0-5) -|=== - diff --git a/modules/microshift-config-etcd.adoc b/modules/microshift-config-etcd.adoc deleted file mode 100644 index ee1128700489..000000000000 --- a/modules/microshift-config-etcd.adoc +++ /dev/null @@ -1,40 +0,0 @@ -// Module included in the following assemblies: -// -//* microshift_support/microshift-etcd.adoc - -:_content-type: PROCEDURE -[id="microshift-config-etcd_{context}"] -= Configuring the memoryLimitMB value to set parameters for the {product-title} etcd server - -By default, etcd will use as much memory as necessary to handle the load on the system. In some memory constrained systems, it might be necessary to limit the amount of memory etcd is allowed to use at a given time. - -.Procedure - -* Edit the `/etc/microshift/config.yaml` file to set the `memoryLimitMB` value. -+ -[source,yaml] ----- -etcd: - memoryLimitMB: 128 ----- -+ -[NOTE] -==== -The minimum permissible value for `memoryLimitMB` on {product-title} is 128 MB. Values close to the minimum value are more likely to impact etcd performance. The lower the limit, the longer etcd takes to respond to queries. If the limit is too low or the etcd usage is high, queries time out. -==== - -.Verification - -. After modifying the `memoryLimitMB` value in `/etc/microshift/config.yaml`, restart {product-title} by running the following command: -+ -[source, terminal] ----- -$ sudo systemctl restart microshift ----- - -. Verify the new `memoryLimitMB` value is in use by running the following command: -+ -[source, terminal] ----- -$ systemctl show --property=MemoryHigh microshift-etcd.scope ----- diff --git a/modules/microshift-config-nodeport-limits.adoc b/modules/microshift-config-nodeport-limits.adoc deleted file mode 100644 index 9bf50baee284..000000000000 --- a/modules/microshift-config-nodeport-limits.adoc +++ /dev/null @@ -1,65 +0,0 @@ -// Module included in the following assemblies: -// -// * microshift/using-config-tools.adoc - -:_content-type: CONCEPT -[id="microshift-nodeport-range-limits_{context}"] -= Extending the port range for NodePort services - -The `serviceNodePortRange` setting extends the port range available to NodePort services. This option is useful when specific standard ports under the `30000-32767` range need to be exposed. For example, if your device needs to expose the `1883/tcp` MQ Telemetry Transport (MQTT) port on the network because client devices cannot use a different port. - -[IMPORTANT] -==== -NodePorts can overlap with system ports, causing a malfunction of the system or {product-title}. -==== - -Consider the following when configuring the NodePort service ranges: - -* Do not create any NodePort service without an explicit `nodePort` selection. When an explicit `nodePort` is not specified, the port is assigned randomly by the `kube-apiserver` and cannot be predicted. - -* Do not create any NodePort service for any system service port, {product-title} port, or other services you expose on your device `HostNetwork`. - -* Table one specifies ports to avoid when extending the port range: -+ -.Ports to avoid. -[cols="2",options="header"] -|=== -|Port -|Description - -|22/tcp -|SSH port - -|80/tcp -|OpenShift Router HTTP endpoint - -|443/tcp -|OpenShift Router HTTPS endpoint - -|1936/tcp -|Metrics service for the openshift-router, not exposed today - -|2379/tcp -|etcd port - -|2380/tcp -|etcd port - -|6443 -|kubernetes API - -|8445/tcp -|openshift-route-controller-manager - -|9537/tcp -|cri-o metrics - -|10250/tcp -|kubelet - -|10248/tcp -|kubelet healthz port - -|10259/tcp -|kube scheduler -|=== diff --git a/modules/microshift-config-yaml.adoc b/modules/microshift-config-yaml.adoc deleted file mode 100644 index a53845a4b49d..000000000000 --- a/modules/microshift-config-yaml.adoc +++ /dev/null @@ -1,46 +0,0 @@ -// Module included in the following assemblies: -// -// * microshift/using-config-tools.adoc - -:_content-type: CONCEPT -[id="microshift-config-yaml_{context}"] -= Using a YAML configuration file - -{product-title} searches for a configuration file in the user-specific directory, `~/.microshift/config.yaml`, then the system-wide `/etc/microshift/config.yaml` directory. You must create the configuration file and specify any settings that should override the defaults before starting {product-title}. - -[id="microshift-yaml-default_{context}"] -== Default settings -If you do not create a `config.yaml` file, the default values are used. The following example configuration contains the default settings. You must change any settings that should override the defaults before starting {product-title}. - -.Default YAML file example -[source,yaml] ----- -dns: - baseDomain: microshift.example.com <1> -network: - clusterNetwork: - - cidr: 10.42.0.0/16 <2> - serviceNetwork: - - 10.43.0.0/16 <3> - serviceNodePortRange: 30000-32767 <4> -node: - hostnameOverride: "" <5> - nodeIP: "" <6> -apiServer: - subjectAltNames: [] <7> -debugging: - logLevel: "Normal" <8> ----- -<1> Base domain of the cluster. All managed DNS records will be subdomains of this base. -<2> A block of IP addresses from which Pod IP addresses are allocated. -<3> A block of virtual IP addresses for Kubernetes services. -<4> The port range allowed for Kubernetes services of type NodePort. -<5> The name of the node. The default value is the hostname. -<6> The IP address of the node. The default value is the IP address of the default route. -<7> Subject Alternative Names for API server certificates. -<8> Log verbosity. Valid values for this field are `Normal`, `Debug`, `Trace`, or `TraceAll`. - -[IMPORTANT] -==== -Restart {product-title} after changing any configuration settings to have them take effect. {product-title} reads the configuration file only on start. -==== diff --git a/modules/microshift-configuring-ovn.adoc b/modules/microshift-configuring-ovn.adoc deleted file mode 100644 index 51ee910b4918..000000000000 --- a/modules/microshift-configuring-ovn.adoc +++ /dev/null @@ -1,92 +0,0 @@ -// Module included in the following assemblies: -// -// * microshift_networking/microshift-networking.adoc - -:_content-type: PROCEDURE -[id="microshift-config-OVN-K_{context}"] -= Creating an OVN-Kubernetes configuration file - -{product-title} uses built-in default OVN-Kubernetes values if an OVN-Kubernetes configuration file is not created. You can write an OVN-Kubernetes configuration file to `/etc/microshift/ovn.yaml`. An example file is provided for your configuration. - -.Procedure - -. To create your `ovn.yaml` file, run the following command: -+ -[source, yaml] ----- -$ sudo cp /etc/microshift/ovn.yaml.default /etc/microshift/ovn.yaml ----- - -. To list the contents of the configuration file you created, run the following command: -+ -[source, yaml] ----- -$ cat /etc/microshift/ovn.yaml.default ----- -+ -.Example 'yaml' configuration file with default values - -[source,yaml] ----- -ovsInit: - disableOVSInit: false - gatewayInterface: "" <1> -mtu: 1400 ----- -<1> The default value is an empty string that means "not-specified." The CNI network plugin auto-detects to interface with the default route. - -. To customize your configuration, use the following table that lists the valid values you can use: -+ -.Supported optional OVN-Kubernetes configurations for {product-title} - -[cols="5",options="header"] -|=== -|Field -|Type -|Default -|Description -|Example - -|`ovsInit.disableOVSInit` -|bool -|false -|Skip configuring OVS bridge `br-ex` in `microshift-ovs-init.service` -|true ^[1]^ - -|`ovsInit.gatewayInterface` -|Alpha -|eth0 -|Ingress that is the API gateway -|eth0 - -|mtu -|uint32 -|auto -|MTU value used for the pods -|1300 -|=== -+ -[.small] --- -1. The OVS bridge is required. When `disableOVSInit` is true, OVS bridge `br-ex` must be configured manually. -+ -[IMPORTANT] -==== -If you change the `mtu` configuration value in the `ovn.yaml` file, you must restart the host that {product-title} is running on to apply the updated setting. -==== --- - -.Example custom `ovn.yaml` configuration file - -[source, yaml] ----- -ovsInit: - disableOVSInit: true - gatewayInterface: eth0 -mtu: 1300 ----- - -[IMPORTANT] -==== -When `disableOVSInit` is set to true in the `ovn.yaml` config file, the `br-ex` OVS bridge must be manually configured. -==== diff --git a/modules/microshift-creating-ostree-iso.adoc b/modules/microshift-creating-ostree-iso.adoc deleted file mode 100644 index bb49b8bce4d0..000000000000 --- a/modules/microshift-creating-ostree-iso.adoc +++ /dev/null @@ -1,108 +0,0 @@ -// Module included in the following assemblies: -// -// microshift/microshift-embed-into-rpm-ostree.adoc - -:_content-type: PROCEDURE -[id="microshift-creating-ostree-iso_{context}"] -= Creating the {op-system-ostree-first} image - -Use the following procedure to create the ISO. The {op-system-ostree} Installer image pulls the commit from the running container and creates an installable boot ISO with a Kickstart file configured to use the embedded OSTree commit. - -.Prerequisites -* Your build host meets the Image Builder system requirements. -* You have installed and set up Image Builder and the `composer-cli` tool. -* You have root-user access to your build host. -* You have the `podman` tool. - -.Procedure - -. Start an `ostree` container image build by running the following command: -+ -[source,terminal] ----- -$ BUILDID=$(sudo composer-cli compose start-ostree --ref "rhel/9/$(uname -m)/edge" minimal-microshift edge-container | awk '{print $2}') ----- -+ -This command also returns the identification (ID) of the build for monitoring. - -. You can check the status of the build periodically by running the following command: -+ -[source,terminal] ----- -$ sudo composer-cli compose status ----- -+ -.Example output of a running build - -[source,terminal] ----- -ID Status Time Blueprint Version Type Size -cc3377ec-4643-4483-b0e7-6b0ad0ae6332 RUNNING Wed Jun 7 12:26:23 2023 minimal-microshift 0.0.1 edge-container ----- -+ -.Example output of a completed build - -[source,terminal] ----- -ID Status Time Blueprint Version Type Size -cc3377ec-4643-4483-b0e7-6b0ad0ae6332 FINISHED Wed Jun 7 12:32:37 2023 minimal-microshift 0.0.1 edge-container ----- -+ -[NOTE] -==== -You can use the `watch` command to monitor your build if you are familiar with how to start and stop it. -==== - -. Download the container image using the ID and get the image ready for use by running the following command: -+ -[source,terminal] ----- -$ sudo composer-cli compose image ${BUILDID} ----- - -. Change the ownership of the downloaded container image to the current user by running the following command: -+ -[source,terminal] ----- -$ sudo chown $(whoami). ${BUILDID}-container.tar ----- - -. Add read permissions for the current user to the image by running the following command: -+ -[source,terminal] ----- -$ sudo chmod a+r ${BUILDID}-container.tar ----- - -. Bootstrap a server on port 8085 for the `ostree` container image to be consumed by the ISO build by completing the following steps: - -.. Get the `IMAGEID` variable result by running the following command: -+ -[source,terminal] ----- -$ IMAGEID=$(cat < "./${BUILDID}-container.tar" | sudo podman load | grep -o -P '(?<=sha256[@:])[a-z0-9]*') ----- - -.. Use the `IMAGEID` variable result to execute the podman command step by running the following command: -+ -[source,terminal] ----- -$ sudo podman run -d --name=minimal-microshift-server -p 8085:8080 ${IMAGEID} ----- -+ -This command also returns the ID of the container saved in the `IMAGEID` variable for monitoring. - -. Generate the installer blueprint file by running the following command: -+ -[source,terminal] ----- -$ cat > microshift-installer.toml <<EOF -name = "microshift-installer" - -description = "" -version = "0.0.0" -modules = [] -groups = [] -packages = [] -EOF ----- \ No newline at end of file diff --git a/modules/microshift-cri-o-container-runtime.adoc b/modules/microshift-cri-o-container-runtime.adoc deleted file mode 100644 index f79ea297f283..000000000000 --- a/modules/microshift-cri-o-container-runtime.adoc +++ /dev/null @@ -1,34 +0,0 @@ -// Module included in the following assemblies: -// -// * microshift_networking/microshift-networking.adoc - -:_content-type: PROCEDURE -[id="microshift-CRI-O-container-engine_{context}"] -= Using a proxy in the CRI-O container runtime - -To use an HTTP(S) proxy in `CRI-O`, you need to set the `HTTP_PROXY` and `HTTPS_PROXY` environment variables. You can also set the `NO_PROXY` variable to exclude a list of hosts from being proxied. - -.Procedure - -. Add the following settings to the `/etc/systemd/system/crio.service.d/00-proxy.conf` file: -+ -[source, config] ----- -Environment=NO_PROXY="localhost,127.0.0.1" -Environment=HTTP_PROXY="http://$PROXY_USER:$PROXY_PASSWORD@$PROXY_SERVER:$PROXY_PORT/" -Environment=HTTPS_PROXY="http://$PROXY_USER:$PROXY_PASSWORD@$PROXY_SERVER:$PROXY_PORT/" ----- - -. Reload the configuration settings: -+ -[source, terminal] ----- -$ sudo systemctl daemon-reload ----- - -. Restart the CRI-O service to apply the settings: -+ -[source, terminal] ----- -$ sudo systemctl restart crio ----- diff --git a/modules/microshift-deploying-a-load-balancer.adoc b/modules/microshift-deploying-a-load-balancer.adoc deleted file mode 100644 index 205aca8d4029..000000000000 --- a/modules/microshift-deploying-a-load-balancer.adoc +++ /dev/null @@ -1,158 +0,0 @@ -// Module included in the following assemblies: -// -// * microshift_networking/microshift-networking.adoc - -:_content-type: PROCEDURE -[id="microshift-deploying-a-load-balancer_{context}"] -= Deploying a load balancer for a workload - -{product-title} offers a built-in implementation of network load balancers. The following example procedure uses the node IP address as the external IP address for the `LoadBalancer` service configuration file. - -.Prerequisites - -* The OpenShift CLI (`oc`) is installed. -* You have access to the cluster as a user with the cluster administration role. -* You installed a cluster on an infrastructure configured with the OVN-Kubernetes network plugin. -* The `KUBECONFIG` environment variable is set. - -.Procedure - -. Verify that your pods are running by running the following command: -+ -[source,terminal] ----- -$ oc get pods -A ----- - -. Create the example namespace by running the following commands: -+ -[source,terminal] ----- -$ NAMESPACE=nginx-lb-test ----- -+ -[source,terminal] ----- -$ oc create ns $NAMESPACE ----- - -. The following example deploys three replicas of the test `nginx` application in your namespace: -+ -[source,terminal] ----- -$ oc apply -n $NAMESPACE -f - <<EOF -apiVersion: v1 -kind: ConfigMap -metadata: - name: nginx -data: - headers.conf: | - add_header X-Server-IP \$server_addr always; ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: nginx -spec: - replicas: 3 - selector: - matchLabels: - app: nginx - template: - metadata: - labels: - app: nginx - spec: - containers: - - image: quay.io/packit/nginx-unprivileged - imagePullPolicy: Always - name: nginx - ports: - - containerPort: 8080 - volumeMounts: - - name: nginx-configs - subPath: headers.conf - mountPath: /etc/nginx/conf.d/headers.conf - securityContext: - allowPrivilegeEscalation: false - seccompProfile: - type: RuntimeDefault - capabilities: - drop: ["ALL"] - runAsNonRoot: true - volumes: - - name: nginx-configs - configMap: - name: nginx - items: - - key: headers.conf - path: headers.conf -EOF ----- - -. You can verify that the three sample replicas started successfully by running the following command: -+ -[source,terminal] ----- -$ oc get pods -n $NAMESPACE ----- - -. Create a `LoadBalancer` service for the `nginx` test application with the following sample commands: -+ -[source,terminal] ----- -$ oc create -n $NAMESPACE -f - <<EOF -apiVersion: v1 -kind: Service -metadata: - name: nginx -spec: - ports: - - port: 81 - targetPort: 8080 - selector: - app: nginx - type: LoadBalancer -EOF ----- -+ -[NOTE] -==== -You must ensure that the `port` parameter is a host port that is not occupied by other `LoadBalancer` services or {product-title} components. -==== - -. Verify that the service file exists, that the external IP address is properly assigned, and that the external IP is identical to the node IP by running the following command: -+ -[source,terminal] ----- -$ oc get svc -n $NAMESPACE ----- -+ -.Example output -[source,terminal] ----- -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -nginx LoadBalancer 10.43.183.104 192.168.1.241 81:32434/TCP 2m ----- - -.Verification - -* The following command forms five connections to the example `nginx` application using the external IP address of the `LoadBalancer` service configuration. The result of the command is a list of those server IP addresses. Verify that the load balancer sends requests to all the running applications with the following command: -+ -[source,terminal] ----- -EXTERNAL_IP=192.168.1.241 -seq 5 | xargs -Iz curl -s -I http://$EXTERNAL_IP:81 | grep X-Server-IP ----- -+ -The output of the previous command contains different IP addresses if the load balancer is successfully distributing the traffic to the applications, for example: -+ -.Example output -[source,terminal] ----- -X-Server-IP: 10.42.0.41 -X-Server-IP: 10.42.0.41 -X-Server-IP: 10.42.0.43 -X-Server-IP: 10.42.0.41 -X-Server-IP: 10.42.0.43 ----- \ No newline at end of file diff --git a/modules/microshift-download-iso-prep-for-use.adoc b/modules/microshift-download-iso-prep-for-use.adoc deleted file mode 100644 index dd7675fc1281..000000000000 --- a/modules/microshift-download-iso-prep-for-use.adoc +++ /dev/null @@ -1,28 +0,0 @@ -// Module included in the following assemblies: -// -// microshift/microshift-embed-into-rpm-ostree.adoc - -:_content-type: PROCEDURE -[id="microshift-download-iso-prep-for-use_{context}"] -= Download the ISO and prepare it for use - -. Download the ISO using the ID by running the following command: -+ -[source,terminal] ----- -$ sudo composer-cli compose image ${BUILDID} ----- - -. Change the ownership of the downloaded container image to the current user by running the following command: -+ -[source,terminal] ----- -$ sudo chown $(whoami). ${BUILDID}-installer.iso ----- - -. Add read permissions for the current user to the image by running the following command: -+ -[source,terminal] ----- -$ sudo chmod a+r ${BUILDID}-installer.iso ----- diff --git a/modules/microshift-firewall-about.adoc b/modules/microshift-firewall-about.adoc deleted file mode 100644 index d5a6475f33ca..000000000000 --- a/modules/microshift-firewall-about.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// Module included in the following assemblies: -// -// * microshift_networking/microshift-firewall.adoc - -:_content-type: CONCEPT -[id="microshift-firewall-about_{context}"] -= About network traffic through the firewall - -Firewalld is a networking service that runs in the background and responds to connection requests, creating a dynamic customizable host-based firewall. If you are using {op-system-ostree-first} with {product-title}, firewalld should already be installed and you just need to configure it. Details are provided in procedures that follow. Overall, you must explicitly allow the following OVN-Kubernetes traffic when the `firewalld` service is running: - -CNI pod to CNI pod:: -CNI pod to Host-Network pod -Host-Network pod to Host-Network pod - -CNI pod:: -The Kubernetes pod that uses the CNI network - -Host-Network pod:: -The Kubernetes pod that uses host network -You can configure the `firewalld` service by using the following procedures. In most cases, firewalld is part of {rhel} installations. If you do not have firewalld, you can install it with the simple procedure in this section. - -[IMPORTANT] -==== -{product-title} pods must have access to the internal CoreDNS component and API servers. -==== \ No newline at end of file diff --git a/modules/microshift-firewall-allow-traffic.adoc b/modules/microshift-firewall-allow-traffic.adoc deleted file mode 100644 index 79d1723fb7fc..000000000000 --- a/modules/microshift-firewall-allow-traffic.adoc +++ /dev/null @@ -1,34 +0,0 @@ -// Module included in the following assemblies: -// -// * microshift_networking/microshift-firewall.adoc - -:_content-type: PROCEDURE -[id="microshift-firewall-allow-traffic_{context}"] -= Allowing network traffic through the firewall - -You can allow network traffic through the firewall by configuring the IP address range and inserting the DNS server to allow internal traffic from pods through the network gateway. - -.Procedure - -. Use one of the following commands to set the IP address range: - -.. Configure the IP address range with default values by running the following command: -+ -[source,terminal] ----- -$ sudo firewall-offline-cmd --permanent --zone=trusted --add-source=10.42.0.0/16 ----- - -.. Configure the IP address range with custom values by running the following command: -+ -[source,terminal] ----- -$ sudo firewall-offline-cmd --permanent --zone=trusted --add-source=<custom IP range> ----- - -. To allow internal traffic from pods through the network gateway, run the following command: -+ -[source, terminal] ----- -$ sudo firewall-offline-cmd --permanent --zone=trusted --add-source=169.254.169.1 ----- diff --git a/modules/microshift-firewall-apply-settings.adoc b/modules/microshift-firewall-apply-settings.adoc deleted file mode 100644 index 9bcfb36f8fc0..000000000000 --- a/modules/microshift-firewall-apply-settings.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module included in the following assemblies: -// -// * microshift_networking/microshift-firewall.adoc - -:_content-type: PROCEDURE -[id="microshift-firewall-applying-settings_{context}"] -== Applying firewall settings - -To apply firewall settings, use the following one-step procedure: - -.Procedure - -* After you have finished configuring network access through the firewall, run the following command to restart the firewall and apply the settings: - -[source,terminal] ----- -$ sudo firewall-cmd --reload ----- diff --git a/modules/microshift-firewall-known-issue.adoc b/modules/microshift-firewall-known-issue.adoc deleted file mode 100644 index 45b0a44c7e20..000000000000 --- a/modules/microshift-firewall-known-issue.adoc +++ /dev/null @@ -1,11 +0,0 @@ -// Module included in the following assemblies: -// -// * microshift_networking/microshift-networking.adoc - -:_content-type: CONCEPT -[id="microshift-firewall-known-issue_{context}"] -= Known firewall issue - -* To avoid breaking traffic flows with a firewall reload or restart, execute firewall commands before starting {product-title}. The CNI driver in {product-title} makes use of iptable rules for some traffic flows, such as those using the NodePort service. The iptable rules are generated and inserted by the CNI driver, but are deleted when the firewall reloads or restarts. The absence of the iptable rules breaks traffic flows. If firewall commands have to be executed after {product-title} is running, manually restart `ovnkube-master` pod in the `openshift-ovn-kubernetes` namespace to reset the rules controlled by the CNI driver. - -//Revise and use the unused ki-cni-iptables-deleted procedure in release notes? Need to verify status for 4.14 \ No newline at end of file diff --git a/modules/microshift-firewall-opt-settings.adoc b/modules/microshift-firewall-opt-settings.adoc deleted file mode 100644 index cbb6dee775b0..000000000000 --- a/modules/microshift-firewall-opt-settings.adoc +++ /dev/null @@ -1,72 +0,0 @@ -// Module included in the following assemblies: -// -// * microshift_networking/microshift-firewall.adoc - -:_content-type: PROCEDURE -[id="microshift-firewall-optional-settings_{context}"] -= Using optional port settings - -The {product-title} firewall service allows optional port settings. - -.Procedure - -* To add customized ports to your firewall configuration, use the following command syntax: -+ -[source,terminal] ----- -$ sudo firewall-cmd --permanent --zone=public --add-port=<port number>/<port protocol> ----- -+ -.Optional ports -[option="header"] -|=== -|Port(s)|Protocol(s)|Description - -|80 -|TCP -|HTTP port used to serve applications through the {ocp} router. - -|443 -|TCP -|HTTPS port used to serve applications through the {ocp} router. - -|5353 -|UDP -|mDNS service to respond for {ocp} route mDNS hosts. - -|30000-32767 -|TCP -|Port range reserved for NodePort services; can be used to expose applications on the LAN. - -|30000-32767 -|UDP -|Port range reserved for NodePort services; can be used to expose applications on the LAN. - -|6443 -|TCP -|HTTPS API port for the {product-title} API. -|=== - -The following are examples of commands used when requiring external access through the firewall to services running on {product-title}, such as port 6443 for the API server, for example, ports 80 and 443 for applications exposed through the router. - -.Example commands - -* Configuring a port for the {product-title} API server: -+ -[source, terminal] ----- -$ sudo firewall-cmd --permanent --zone=public --add-port=6443/tcp ----- - -* Configuring ports for applications exposed through the router: -+ -[source, terminal] ----- -$ sudo firewall-cmd --permanent --zone=public --add-port=80/tcp ----- -+ -[source, terminal] ----- -$ sudo firewall-cmd --permanent --zone=public --add-port=443/tcp ----- - diff --git a/modules/microshift-firewall-req-settings.adoc b/modules/microshift-firewall-req-settings.adoc deleted file mode 100644 index 37418ac36275..000000000000 --- a/modules/microshift-firewall-req-settings.adoc +++ /dev/null @@ -1,43 +0,0 @@ -// Module included in the following assemblies: -// -// * microshift_networking/microshift-firewall.adoc - -:_content-type: CONCEPT -[id="microshift-firewall-req-settings_{context}"] -= Required firewall settings - -An IP address range for the cluster network must be enabled during firewall configuration. You can use the default values or customize the IP address range. If you choose to customize the cluster network IP address range from the default `10.42.0.0/16` setting, you must also use the same custom range in the firewall configuration. - -.Firewall IP address settings -[cols="3",options="header"] -|=== -|IP Range -|Firewall rule required -|Description - -|10.42.0.0/16 -|No -|Host network pod access to other pods - -|169.254.169.1 -|Yes -|Host network pod access to {product-title} API server -|=== - -The following are examples of commands for settings that are mandatory for firewall configuration: - -.Example commands - -* Configure host network pod access to other pods: -+ -[source, terminal] ----- -$ sudo firewall-cmd --permanent --zone=trusted --add-source=10.42.0.0/16 ----- - -* Configure host network pod access to services backed by Host endpoints, such as the {product-title} API: -+ -[source, terminal] ----- -$ sudo firewall-cmd --permanent --zone=trusted --add-source=169.254.169.1 ----- diff --git a/modules/microshift-firewall-verify-settings.adoc b/modules/microshift-firewall-verify-settings.adoc deleted file mode 100644 index 4bda23d144d0..000000000000 --- a/modules/microshift-firewall-verify-settings.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// Module included in the following assemblies: -// -// * microshift_networking/microshift-firewall.adoc - -:_content-type: PROCEDURE -[id="microshift-firewall-verifying-settings_{context}"] -= Verifying firewall settings - -After you have restarted the firewall, you can verify your settings by listing them. - -.Procedure - -* To verify rules added in the default public zone, such as ports-related rules, run the following command: -+ -[source,terminal] ----- -$ sudo firewall-cmd --list-all ----- - -* To verify rules added in the trusted zone, such as IP-range related rules, run the following command: -+ -[source,terminal] ----- -$ sudo firewall-cmd --zone=trusted --list-all ----- diff --git a/modules/microshift-firewalld-install.adoc b/modules/microshift-firewalld-install.adoc deleted file mode 100644 index 8f211360333b..000000000000 --- a/modules/microshift-firewalld-install.adoc +++ /dev/null @@ -1,35 +0,0 @@ -// Module included in the following assemblies: -// -// * microshift_networking/microshift-firewall.adoc - -:_content-type: PROCEDURE -[id="microshift-firewall-install_{context}"] -= Installing the firewalld service - -If you are using {op-system-ostree}, firewalld should be installed. To use the service, you can simply configure it. The following procedure can be used if you do not have firewalld, but want to use it. - -Install and run the `firewalld` service for {product-title} by using the following steps. - -.Procedure - -. Optional: Check for firewalld on your system by running the following command: -+ -[source,terminal] ----- -$ rpm -q firewalld ----- - -. If the `firewalld` service is not installed, run the following command: -+ -[source,terminal] ----- -$ sudo dnf install -y firewalld ----- - -. To start the firewall, run the following command: -+ -[source,terminal] ----- -$ sudo systemctl enable firewalld --now ----- - diff --git a/modules/microshift-gathering-sos-report.adoc b/modules/microshift-gathering-sos-report.adoc deleted file mode 100644 index a1f7766d939d..000000000000 --- a/modules/microshift-gathering-sos-report.adoc +++ /dev/null @@ -1,79 +0,0 @@ -// Module included in the following assemblies: - -// * microshift_support/microshift-sos-report - -:_content-type: PROCEDURE -[id="gathering-data-microshift-sos-report_{context}"] -= Gathering data from a {product-title} sos report - -.Prerequisites - -* You must have the `sos` package installed. - -.Procedure - -. Log into the failing host as a root user. - -. Perform the debug report creation procedure by running the following command: -+ -[source,terminal] ----- -$ sos report --batch --clean --all-logs --profile microshift ----- -+ -.Example output -[source,terminal] ----- -sosreport (version 4.5.1) - -This command will collect diagnostic and configuration information from -this Red Hat Enterprise Linux system and installed applications. - -An archive containing the collected information will be generated in -/var/tmp/sos.o0sznf_8 and may be provided to a Red Hat support -representative. - -Any information provided to Red Hat will be treated in accordance with -the published support policies at: - - Distribution Website : https://www.redhat.com/ - Commercial Support : https://www.access.redhat.com/ - -The generated archive may contain data considered sensitive and its -content should be reviewed by the originating organization before being -passed to any third party. - -No changes will be made to system configuration. - - - Setting up archive ... - Setting up plugins ... - Running plugins. Please wait ... - - Starting 1/2 microshift [Running: microshift] - Starting 2/2 microshift_ovn [Running: microshift microshift_ovn] - Finishing plugins [Running: microshift] - - Finished running plugins - -Found 1 total reports to obfuscate, processing up to 4 concurrently - -sosreport-microshift-rhel9-2023-03-31-axjbyxw : Beginning obfuscation... -sosreport-microshift-rhel9-2023-03-31-axjbyxw : Obfuscation completed - -Successfully obfuscated 1 report(s) - -Creating compressed archive... - -A mapping of obfuscated elements is available at - /var/tmp/sosreport-microshift-rhel9-2023-03-31-axjbyxw-private_map - -Your sosreport has been generated and saved in: - /var/tmp/sosreport-microshift-rhel9-2023-03-31-axjbyxw-obfuscated.tar.xz - - Size 444.14KiB - Owner root - sha256 922e5ff2db25014585b7c6c749d2c44c8492756d619df5e9838ce863f83d4269 - -Please send this file to your support representative. ----- diff --git a/modules/microshift-greenboot-check-update.adoc b/modules/microshift-greenboot-check-update.adoc deleted file mode 100644 index c5aa5054c513..000000000000 --- a/modules/microshift-greenboot-check-update.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// -// * microshift_running applications/microshift-greenboot.adoc - -:_content-type: PROCEDURE -[id="greenboot-check-updates_{context}"] -= Checking updates with a health script - -Access the output of health check scripts in the system log after an update by using the following procedure. - -.Procedure - -* To access the result of update checks, run the following command: -+ -[source, terminal] ----- -$ sudo grub2-editenv - list | grep ^boot_success ----- - -.Example output for a successful update - -[source, terminal] ----- -boot_success=1 ----- - -If your command returns `boot_success=0`, either the greenboot health check is still running, or the update is a failure. \ No newline at end of file diff --git a/modules/microshift-greenboot-create-health-check-script.adoc b/modules/microshift-greenboot-create-health-check-script.adoc deleted file mode 100644 index 6a61c7e9beb0..000000000000 --- a/modules/microshift-greenboot-create-health-check-script.adoc +++ /dev/null @@ -1,57 +0,0 @@ -// Module included in the following assemblies: -// -// * microshift_running applications/microshift-greenboot.adoc - -:_content-type: PROCEDURE -[id="microshift-greenboot-create-health-check-script_{context}"] -= Creating a health check script - -You can create a health check script for installed workloads by placing them in the `/etc/greenboot/check/required.d` directory. The following procedure provides an example of installing the busybox application and creating a health check script for busybox. You can use this example as a general guide for creating health check scripts for your applications. - -.Prerequisite - -* You have installed a workload. For this example, the busybox application is used as a workload. The "Additional resources" section that follows this procedure has a link to instructions on deploying workloads using manifests. - -.Procedure - -. To create a health check script, run the following command: -+ -[source, terminal] ----- -$ SCRIPT_FILE=/etc/greenboot/check/required.d/50_busybox_running_check.sh -sudo curl -s https://raw.githubusercontent.com/openshift/microshift/3b7f6025cd77bd1bf827416fd026783ead82b7c8/docs/config/busybox_running_check.sh \ - -o ${SCRIPT_FILE} && echo SUCCESS || echo ERROR -sudo chmod 755 ${SCRIPT_FILE} ----- -+ -In this example, the script verifies that busybox is running as expected. You can replace `/etc/greenboot/check/required.d/50_busybox_running_check.sh` with your own workload details. -+ -[NOTE] -==== -In this example, the {product-title} core service health checks run before the user workload health checks. -==== - -. To test that your script is running as expected: - -.. Restart the system. - -.. Once the system has restarted, run the following command: -+ -[source, terminal] ----- -$ sudo journalctl -o cat -u greenboot-healthcheck.service ----- -+ -.Example output for the busybox health check script -+ -[source, terminal] ----- -... -... -STARTED -Waiting 300s for pod image(s) from the 'busybox' namespace to be downloaded -Waiting 300s for 1 pod(s) from the 'busybox' namespace to be in 'Ready' state -Checking pod restart count in the 'busybox' namespace -FINISHED -Script '50_busybox_running_check.sh' SUCCESS ----- diff --git a/modules/microshift-greenboot-dir-structure.adoc b/modules/microshift-greenboot-dir-structure.adoc deleted file mode 100644 index c4d8aceedd11..000000000000 --- a/modules/microshift-greenboot-dir-structure.adoc +++ /dev/null @@ -1,41 +0,0 @@ -// Module included in the following assemblies: -// -// * microshift_running applications/microshift-greenboot.adoc - -:_content-type: CONCEPT -[id="microshift-greenboot-dir-structure_{context}"] -= How greenboot uses directories to run scripts - -Health check scripts run from four `/etc/greenboot` directories. These scripts run in alphabetical order. Keep this in mind when you configure the scripts for your workloads. - -When the system starts, greenboot runs the scripts in the `required.d` and `wanted.d` directories. Depending on the outcome of those scripts, greenboot continues the startup or attempts a rollback as follows: - -. System as expected: When all of the scripts in the `required.d` directory are successful, greenboot runs any scripts present in the `/etc/greenboot/green.d` directory. - -. System trouble: If any of the scripts in the `required.d` directory fail, greenboot runs any prerollback scripts present in the `red.d` directory, then restarts the system. - -[NOTE] -==== -Greenboot redirects script and health check output to the system log. When you are logged in, a daily message provides the overall system health output. -==== - -[id="greenboot-directories-details_{context}"] -== Greenboot directories details - -Returning a nonzero exit code from any script means that script has failed. Greenboot restarts the system a few times to retry the scripts before attempting to roll back to the previous version. - -* `/etc/greenboot/check/required.d` contains the health checks that must not fail. - -** If the scripts fail, greenboot retries them three times by default. You can configure the number of retries in the `/etc/greenboot/greenboot.conf` file by setting the `GREENBOOT_MAX_BOOTS` parameter to the desired number of retries. - -** After all retries fail, greenboot automatically initiates a rollback if one is available. If a rollback is not available, the system log output shows that manual intervention is required. - -** The `40_microshift_running_check.sh` health check script for {product-title} is installed into this directory. - -* `/etc/greenboot/check/wanted.d` contains health scripts that are allowed to fail without causing the system to be rolled back. - -** If any of these scripts fail, greenboot logs the failure but does not initiate a rollback. - -* `/etc/greenboot/green.d` contains scripts that run after greenboot has declared the start successful. - -* `/etc/greenboot/red.d` contains scripts that run after greenboot has declared the startup as failed, including the `40_microshift_pre_rollback.sh` prerollback script. This script is executed right before a system rollback. The script performs {product-title} pod and OVN-Kubernetes cleanup to avoid potential conflicts after the system is rolled back to a previous version. diff --git a/modules/microshift-greenboot-health-check-log.adoc b/modules/microshift-greenboot-health-check-log.adoc deleted file mode 100644 index 55241751b17f..000000000000 --- a/modules/microshift-greenboot-health-check-log.adoc +++ /dev/null @@ -1,37 +0,0 @@ -// Module included in the following assemblies: -// -// * microshift_running applications/microshift-greenboot.adoc - -:_content-type: PROCEDURE -[id="microshift-greenboot-access-health-check_{context}"] -= Accessing health check output in the system log - -You can manually access the output of health checks in the system log by using the following procedure. - -.Procedure - -* To access the results of a health check, run the following command: -+ -[source, terminal] ----- -$ sudo journalctl -o cat -u greenboot-healthcheck.service ----- - -.Example output of a failed health check -[source, terminal] ----- -... -... -Running Required Health Check Scripts... -STARTED -GRUB boot variables: -boot_success=0 -boot_indeterminate=0 -boot_counter=2 -... -... -Waiting 300s for MicroShift service to be active and not failed -FAILURE -... -... ----- \ No newline at end of file diff --git a/modules/microshift-greenboot-microshift-health-script.adoc b/modules/microshift-greenboot-microshift-health-script.adoc deleted file mode 100644 index 80bbbd733646..000000000000 --- a/modules/microshift-greenboot-microshift-health-script.adoc +++ /dev/null @@ -1,58 +0,0 @@ -// Module included in the following assemblies: -// -// * microshift_running applications/microshift-greenboot.adoc - -:_content-type: CONCEPT -[id="microshift-health-script_{context}"] -= The {product-title} health script - -The `40_microshift_running_check.sh` health check script only performs validation of core {product-title} services. Install your customized workload validation scripts in the greenboot directories to ensure successful application operations after system updates. Scripts run in alphabetical order. - -{product-title} health checks are listed in the following table: - -.Validation statuses and outcome for {product-title} - -[cols="3", options="header"] -|=== -|Validation -|Pass -|Fail - -|Check that the script runs with `root` permissions -|Next -|`exit 0` - -|Check that the `microshift.service` is enabled -|Next -|`exit 0` - -|Wait for the `microshift.service` to be active (!failed) -|Next -|`exit 1` - -|Wait for Kubernetes API health endpoints to be working and receiving traffic -|Next -|`exit 1` - -|Wait for any pod to start -|Next -|`exit 1` - -|For each core namespace, wait for images to be pulled -|Next -|`exit 1` - -|For each core namespace, wait for pods to be ready -|Next -|`exit 1` - -|For each core namespace, check if pods are not restarting -|`exit 0` -|`exit 1` -|=== - -[id="validation-wait-period"] -== Validation wait period -The wait period in each validation is five minutes by default. After the wait period, if the validation has not succeeded, it is declared a failure. This wait period is incrementally increased by the base wait period after each boot in the verification loop. - -* You can override the base-time wait period by setting the `MICROSHIFT_WAIT_TIMEOUT_SEC` environment variable in the `/etc/greenboot/greenboot.conf` configuration file. For example, you can change the wait time to three minutes by resetting the value to 180 seconds, such as `MICROSHIFT_WAIT_TIMEOUT_SEC=180`. diff --git a/modules/microshift-greenboot-prerollback-log.adoc b/modules/microshift-greenboot-prerollback-log.adoc deleted file mode 100644 index da151df88842..000000000000 --- a/modules/microshift-greenboot-prerollback-log.adoc +++ /dev/null @@ -1,50 +0,0 @@ - -// Module included in the following assemblies: -// -// * microshift_running applications/microshift-greenboot.adoc - -:_content-type: PROCEDURE -[id="microshift-greenboot-access-prerollback-check_{context}"] -= Accessing prerollback health check output in the system log - -You can access the output of health check scripts in the system log. For example, check the results of a prerollback script using the following procedure. - -.Procedure - -* To access the results of a prerollback script, run the following command: -+ -[source, terminal] ----- -$ sudo journalctl -o cat -u redboot-task-runner.service ----- - -.Example output of a prerollback script - -[source, terminal] ----- -... -... -Running Red Scripts... -STARTED -GRUB boot variables: -boot_success=0 -boot_indeterminate=0 -boot_counter=0 -The ostree status: -* rhel c0baa75d9b585f3dd989a9cf05f647eb7ca27ee0dbd4b94fe8c93ed3a4b9e4a5.0 - Version: 9.1 - origin: <unknown origin type> - rhel 6869c1347b0e0ba1bbf0be750cdf32da5138a1fcbc5a4c6325ab9eb647b64663.0 (rollback) - Version: 9.1 - origin refspec: edge:rhel/9/x86_64/edge -System rollback imminent - preparing MicroShift for a clean start -Stopping MicroShift services -Removing MicroShift pods -Killing conmon, pause and OVN processes -Removing OVN configuration -Finished greenboot Failure Scripts Runner. -Cleanup succeeded -Script '40_microshift_pre_rollback.sh' SUCCESS -FINISHED -redboot-task-runner.service: Deactivated successfully. ----- diff --git a/modules/microshift-greenboot-systemd-journal-data.adoc b/modules/microshift-greenboot-systemd-journal-data.adoc deleted file mode 100644 index a0675cc07857..000000000000 --- a/modules/microshift-greenboot-systemd-journal-data.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * microshift_running applications/microshift-greenboot.adoc - -:_content-type: PROCEDURE -[id="microshift-greenboot-systemd-journal-data_{context}"] -= Enabling systemd journal service data persistency - -The default configuration of the `systemd` journal service stores the data in the volatile `/run/log/journal` directory. To persist system logs across system starts and restarts, you must enable log persistence and set limits on the maximal journal data size. - -.Procedure - -. Make the directory by running the following command: -+ -[source, terminal] ----- -$ sudo mkdir -p /etc/systemd/journald.conf.d ----- - -. Create the configuration file by running the following command: -+ -[source, terminal] ----- -cat <<EOF | sudo tee /etc/systemd/journald.conf.d/microshift.conf &>/dev/null -[Journal] -Storage=persistent -SystemMaxUse=1G -RuntimeMaxUse=1G -EOF ----- - -. Edit the configuration file values for your size requirements. diff --git a/modules/microshift-greenboot-updates-workloads.adoc b/modules/microshift-greenboot-updates-workloads.adoc deleted file mode 100644 index 6a78a5696bfe..000000000000 --- a/modules/microshift-greenboot-updates-workloads.adoc +++ /dev/null @@ -1,16 +0,0 @@ -// Module included in the following assemblies: -// -// * microshift_running applications/microshift-greenboot.adoc - -:_content-type: CONCEPT -[id="microshift-greenboot-updates-workloads_{context}"] -= Updates and third-party workloads - -Health checks are especially useful after an update. You can examine the output of greenboot health checks and determine whether the update was declared valid. This health check can help you determine if the system is working properly. - -Health check scripts for updates are installed into the `/etc/greenboot/check/required.d` directory and are automatically executed during each system start. Exiting scripts with a nonzero status means the system start is declared as failed. - -[IMPORTANT] -==== -Wait until after an update is declared valid before starting third-party workloads. If a rollback is performed after workloads start, you can lose data. Some third-party workloads create or update data on a device before an update is complete. Upon rollback, the file system reverts to its state before the update. -==== \ No newline at end of file diff --git a/modules/microshift-greenboot-workloads-validation.adoc b/modules/microshift-greenboot-workloads-validation.adoc deleted file mode 100644 index becaf8b7dac2..000000000000 --- a/modules/microshift-greenboot-workloads-validation.adoc +++ /dev/null @@ -1,24 +0,0 @@ -// Module included in the following assemblies: -// -// * microshift_running applications/microshift-greenboot.adoc - -:_content-type: PROCEDURE -[id="microshift-greenboot-workloads-validation_{context}"] -= Checking the results of an update - -After a successful start, greenboot sets the variable `boot_success=` to `1` in GRUB. You can view the overall status of system health checks after an update in the system log by using the following procedure. - -.Procedure - -* To access the overall status of system health checks, run the following command: -+ -[source, terminal] ----- -$ sudo grub2-editenv - list | grep ^boot_success ----- - -.Example output for a successful system start -[source, terminal] ----- -boot_success=1 ----- \ No newline at end of file diff --git a/modules/microshift-http-proxy.adoc b/modules/microshift-http-proxy.adoc deleted file mode 100644 index df03d3f87bfa..000000000000 --- a/modules/microshift-http-proxy.adoc +++ /dev/null @@ -1,13 +0,0 @@ -// Module included in the following assemblies: -// -// * microshift_networking/microshift-networking.adoc - -:_content-type: CONCEPT -[id="microshift-http-proxy_{context}"] -= Deploying {product-title} behind an HTTP(S) proxy - -Deploy a {product-title} cluster behind an HTTP(S) proxy when you want to add basic anonymity and security measures to your pods. - -You must configure the host operating system to use the proxy service with all components initiating HTTP(S) requests when deploying {product-title} behind a proxy. - -All the user-specific workloads or pods with egress traffic, such as accessing cloud services, must be configured to use the proxy. There is no built-in transparent proxying of egress traffic in {product-title}. diff --git a/modules/microshift-install-rpm-before.adoc b/modules/microshift-install-rpm-before.adoc deleted file mode 100644 index 253e3a160dd7..000000000000 --- a/modules/microshift-install-rpm-before.adoc +++ /dev/null @@ -1,11 +0,0 @@ -// Module included in the following assemblies: -// -// microshift/microshift-install-rpm.adoc - -:_content-type: CONCEPT -[id="microshift-install-rpm-before_{context}"] -= Before installing {product-title} from an RPM package - -{product-title} uses the logical volume manager storage (LVMS) Container Storage Interface (CSI) plugin for providing storage to persistent volumes (PVs). LVMS relies on the Linux logical volume manager (LVM) to dynamically manage the backing logical volumes (LVs) for PVs. For this reason, your machine must have an LVM volume group (VG) with unused space in which LVMS can create the LVs for your workload's PVs. - -To configure a volume group (VG) that allows LVMS to create the LVs for your workload's PVs, lower the *Desired Size* of your root volume during the installation of {op-system}. Lowering the size of your root volume allows unallocated space on the disk for additional LVs created by LVMS at runtime. diff --git a/modules/microshift-install-rpm-preparing.adoc b/modules/microshift-install-rpm-preparing.adoc deleted file mode 100644 index ba63a7ada34b..000000000000 --- a/modules/microshift-install-rpm-preparing.adoc +++ /dev/null @@ -1,45 +0,0 @@ -// Module included in the following assemblies: -// -// microshift/microshift-install-rpm.adoc - -:_content-type: PROCEDURE -[id="microshift-install-rpm-preparing_{context}"] -= Preparing to install {product-title} from an RPM package - -Configure your {op-system} machine to have a logical volume manager (LVM) volume group (VG) with sufficient capacity for the persistent volumes (PVs) of your workload. - -.Prerequisites - -* The system requirements for installing {product-title} have been met. -* You have root user access to your machine. -* You have configured your LVM VG with the capacity needed for the PVs of your workload. - -.Procedure - -. In the graphical installer under *Installation Destination* in the *Storage Configuration* subsection, select *Custom* -> *Done* to open the dialog for configuring partitions and volumes. The Manual Partitioning window is displayed. - -. Under *New Red Hat Enterprise Linux {op-system-version-major}.x Installation*, select *Click here to create them automatically*. - -. Select the root partition, */*, reduce *Desired Capacity* so that the VG has sufficient capacity for your PVs, and then click *Update Settings*. - -. Complete your installation. -+ -[NOTE] -==== -For more options on partition configuration, read the guide linked in the Additional information section for Configuring Manual Partitioning. -==== - -. As a root user, verify the VG capacity available on your system by running the following command: -+ -[source,terminal] ----- -$ sudo vgs ----- -+ -Example output: -+ -[source,terminal] ----- -VG #PV #LV #SN Attr VSize VFree -rhel 1 2 0 wz--n- <127.00g 54.94g ----- diff --git a/modules/microshift-install-rpms.adoc b/modules/microshift-install-rpms.adoc deleted file mode 100644 index 584e142a9b11..000000000000 --- a/modules/microshift-install-rpms.adoc +++ /dev/null @@ -1,81 +0,0 @@ -// Module included in the following assemblies: -// -// microshift/microshift-install-rpm.adoc - -:_content-type: PROCEDURE -[id="installing-microshift-from-rpm-package_{context}"] -= Installing {product-title} from an RPM package - -Use the following procedure to install {product-title} from an RPM package. - -.Prerequisites - -* The system requirements for installing {product-title} have been met. -* You have completed the steps of preparing to install {product-title} from an RPM package. - -.Procedure - -. As a root user, enable the {product-title} repositories by running the following command: -+ -[source,terminal,subs="attributes+"] ----- -$ sudo subscription-manager repos \ - --enable rhocp-{ocp-version}-for-{rhel-major}-$(uname -i)-rpms \ - --enable fast-datapath-for-{rhel-major}-$(uname -i)-rpms ----- - -. Install {product-title} by running the following command: -+ -[source,terminal] ----- -$ sudo dnf install -y microshift ----- - -. Optional: Install greenboot for {product-title} by running the following command: -+ -[source,terminal] ----- -$ sudo dnf install -y microshift-greenboot ----- - -. Download your installation pull secret from the https://console.redhat.com/openshift/install/pull-secret[Red Hat Hybrid Cloud Console] to a temporary folder, for example, `$HOME/openshift-pull-secret`. This pull secret allows you to authenticate with the container registries that serve the container images used by {product-title}. - -. To copy the pull secret to the `/etc/crio` folder of your {op-system} machine, run the following command: -+ -[source,terminal] ----- -$ sudo cp $HOME/openshift-pull-secret /etc/crio/openshift-pull-secret ----- - -. Make the root user the owner of the `/etc/crio/openshift-pull-secret` file by running the following command: -+ -[source,terminal] ----- -$ sudo chown root:root /etc/crio/openshift-pull-secret ----- - -. Make the `/etc/crio/openshift-pull-secret` file readable and writeable by the root user only by running the following command: -+ -[source,terminal] ----- -$ sudo chmod 600 /etc/crio/openshift-pull-secret ----- - -. If your {op-system} machine has a firewall enabled, you must configure a few mandatory firewall rules. For `firewalld`, run the following commands: -+ -[source,terminal] ----- -$ sudo firewall-cmd --permanent --zone=trusted --add-source=10.42.0.0/16 ----- -+ -[source,terminal] ----- -$ sudo firewall-cmd --permanent --zone=trusted --add-source=169.254.169.1 ----- -+ -[source,terminal] ----- -$ sudo firewall-cmd --reload ----- - -If the Volume Group (VG) that you have prepared for {product-title} used the default name `rhel`, no further configuration is necessary. If you have used a different name, or if you want to change more configuration settings, see the Configuring {product-title} section. diff --git a/modules/microshift-install-system-requirements.adoc b/modules/microshift-install-system-requirements.adoc deleted file mode 100644 index 29d45234faec..000000000000 --- a/modules/microshift-install-system-requirements.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Module included in the following assemblies: -// -// microshift/microshift-install-rpm.adoc - -:_content-type: REFERENCE -[id="microshift-install-system-requirements_{context}"] -= System requirements for installing {product-title} - -The following conditions must be met prior to installing {product-title}: - -* {op-system} {op-system-version} -* 2 CPU cores -* 2 GB RAM for {product-title} or 3 GB RAM, required by {op-system} for networked-based HTTPs or FTP installations -* 10 GB of storage -* You have an active {product-title} subscription on your Red Hat account. If you do not have a subscription, contact your sales representative for more information. -* You have a subscription that includes {product-title} RPMs. -* You have a Logical Volume Manager (LVM) Volume Group (VG) with sufficient capacity for the Persistent Volumes (PVs) of your workload. diff --git a/modules/microshift-k8s-apis.adoc b/modules/microshift-k8s-apis.adoc deleted file mode 100644 index cfe4e4ef110e..000000000000 --- a/modules/microshift-k8s-apis.adoc +++ /dev/null @@ -1,9 +0,0 @@ -// Module included in the following assemblies: -// -// * microshift_troubleshooting/microshift-version.adoc - -:_content-type: CONCEPT -[id="microshift-k8s-apis_{context}"] -= {product-title} Kubernetes APIs - -The Kubernetes API is fully accessible within {product-title} and can be managed with the `kubectl` command-line tool or the {OCP} CLI tool (`oc`). The `oc` binary is compatible with `kubectl` and offers a set of features that can be used with {product-title}. Using these command-line tools with {product-title} can help you access all of the resources you need to work with your deployments. \ No newline at end of file diff --git a/modules/microshift-ki-cni-iptables-deleted.adoc b/modules/microshift-ki-cni-iptables-deleted.adoc deleted file mode 100644 index ebb47c1a9e88..000000000000 --- a/modules/microshift-ki-cni-iptables-deleted.adoc +++ /dev/null @@ -1,82 +0,0 @@ -// Module included in the following assemblies: -// -// * this module is unused as of the 4.13 release; it can be kept for the procedure of deleting the ovnkube master pod if the iptables flush issue with the firewall persists - -:_content-type: PROCEDURE -[id="microshift-ki-cni-iptables-deleted_{context}"] -= Reloading the firewall deletes iptable rules - -OVN-Kubernetes handles incoming NodePort traffic by using iptable rules. When you reload firewall rules with the `firewall-cmd --reload` command, the iptable rules are deleted. This stops the NodePort service traffic and any other host traffic that uses iptable rules. - -[id="microshift-ki-cni-iptables-deleted-workaround_{context}"] -== Restarting the daemon set pod as a workaround -To troubleshoot this issue, delete the ovnkube-master pod to restart the ovnkube daemon set pod. Restarting the ovnkube daemon set pod will trigger a reconciliation of the iptable rules. - -.Prerequisites - -* The OpenShift CLI (`oc`) is installed. -* Access to the cluster as a user with the `cluster-admin` role. -* A cluster installed on infrastructure configured with the OVN-Kubernetes network plugin. -* The KUBECONFIG environment variable is set. - -.Procedure - -Run the commands listed in each step that follows to restore the iptable rules. - -. Find the name of the ovnkube-master pod that you want to restart by running the following command: -+ -[source, terminal] ----- -$ pod=$(oc get pods -n openshift-ovn-kubernetes | grep ovnkube-master | awk -F " " '{print $1}') ----- - -. Delete the ovnkube-master pod: -+ -[source, terminal] ----- -$ oc -n openshift-ovn-kubernetes delete pod $pod ----- -+ -This command causes the daemon set pod to be automatically restarted, causing a reconciliation of the iptable rules. - -. Confirm that the iptables have reconciled by running the following command: -+ -[source, terminal] ----- -$ sudo iptables-save | grep NODEPORT -:OVN-KUBE-NODEPORT - [0:0] --A PREROUTING -j OVN-KUBE-NODEPORT --A OUTPUT -j OVN-KUBE-NODEPORT --A OVN-KUBE-NODEPORT -p tcp -m addrtype --dst-type LOCAL -m tcp --dport 30768 -j DNAT --to-destination 10.43.17.173:443 --A OVN-KUBE-NODEPORT -p tcp -m addrtype --dst-type LOCAL -m tcp --dport 32122 -j DNAT --to-destination 10.43.17.173:80 ----- - -. You can also confirm that a new ovnkube-master pod has been started by running the following command: -+ -[source, terminal] ----- -$ oc get pods -n openshift-ovn-kubernetes ----- -The listing of the running pods shows a new ovnkube-master pod name and age. -+ -.Example output -[cols="5",options="header"] -|=== -|NAME -|READY -|STATUS -|RESTARTS -|AGE - -|ovnkube-master-kg7ms -|4/4 -|Running -|0 -|11s - -|ovnkube-node-84gxn -|1/1 -|Running -|0 -|4d22h -|=== diff --git a/modules/microshift-kubeconfig-generating-remote-kcfiles.adoc b/modules/microshift-kubeconfig-generating-remote-kcfiles.adoc deleted file mode 100644 index 25abc420ba44..000000000000 --- a/modules/microshift-kubeconfig-generating-remote-kcfiles.adoc +++ /dev/null @@ -1,89 +0,0 @@ -// Module included in the following assemblies: -// -// * microshift/microshift_configuring/microshift-cluster-access-kubeconfig.adoc - -:_content-type: PROCEDURE -[id="generating-additional-kubeconfig-files_{context}"] -= Generating additional kubeconfig files for remote access - -You can generate additional `kubeconfig` files to use if you need more host names or IP addresses than the default remote access file provides. - -[IMPORTANT] -==== -You must restart {product-title} for configuration changes to be implemented. -==== - -.Prerequisites - -* You have created a `config.yaml` for {product-title}. - -.Procedure - -. (Optional) You can show the contents of the `config.yaml` by running the following command: -+ -[source,terminal] ----- -$ cat /etc/microshift/config.yaml ----- - -. (Optional) You can show the contents of the remote-access `kubeconfig` file, by running the following command: -+ -[source,terminal] ----- -$ cat /var/lib/microshift/resources/kubeadmin/<hostname>/kubeconfig ----- -+ -[IMPORTANT] -==== -Additional remote access `kubeconfig` files must include one of the server names listed in the {product-title} `config.yaml` file. Additional `kubeconfig` files must also use the same CA for validation. -==== - -. To generate additional `kubeconfig` files for additional DNS names SANs or external IP addresses, add the entries you need to the `apiServer.subjectAltNames` field. In the following example, the DNS name used is `alt-name-1` and the IP address is `1.2.3.4`. -+ -.Example `config.yaml` with additional authentication values -[source,yaml] ----- -dns: - baseDomain: example.com -node: - hostnameOverride: "microshift-rhel9" <1> - nodeIP: 10.0.0.1 -apiServer: - subjectAltNames: - - alt-name-1 <2> - - 1.2.3.4 <3> ----- -<1> Hostname -<2> DNS name -<3> IP address or range - -. Restart {product-title} to apply configuration changes and auto-generate the `kubeconfig` files you need by running the following command: -+ -[source,terminal] ----- -$ sudo systemctl restart microshift ----- - -. To check the contents of additional remote-access `kubeconfig` files, insert the name or IP address as listed in the `config.yaml` into the `cat` command. For example, `alt-name-1` is used in the following example command: -+ -[source,terminal] ----- -$ cat /var/lib/microshift/resources/kubeadmin/alt-name-1/kubeconfig ----- - -. Choose the `kubeconfig` file to use that contains the SAN or IP address you want to use to connect your cluster. In this example, the `kubeconfig` containing`alt-name-1` in the `cluster.server` field is the correct file. -+ -.Example contents of an additional `kubeconfig` file -[source,yaml] ----- -clusters: -- cluster: - certificate-authority-data: <base64 CA> - server: https://alt-name-1:6443 <1> ----- -<1> The `/var/lib/microshift/resources/kubeadmin/alt-name-1/kubeconfig` file values are from the `apiServer.subjectAltNames` configuration values. - -[NOTE] -==== -All of these parameters are included as common names (CN) and subject alternative names (SAN) in the external serving certificates for the API server. -==== diff --git a/modules/microshift-kubeconfig-local-access.adoc b/modules/microshift-kubeconfig-local-access.adoc deleted file mode 100644 index 234029111d40..000000000000 --- a/modules/microshift-kubeconfig-local-access.adoc +++ /dev/null @@ -1,20 +0,0 @@ -// Module included in the following assemblies: -// -// * microshift/microshift_configuring/microshift-cluster-access-kubeconfig.adoc - -:_content-type: CONCEPT -[id="microshift-kubeconfig-local-access_{context}"] -= Local access kubeconfig file - -The local access `kubeconfig` file is written to `/var/lib/microshift/resources/kubeadmin/kubeconfig`. This `kubeconfig` file provides access to the API server using `localhost`. Choose this file when you are connecting the cluster locally. - -.Example contents of `kubeconfig` for local access -[source,yaml] ----- -clusters: -- cluster: - certificate-authority-data: <base64 CA> - server: https://localhost:6443 ----- - -The `localhost` `kubeconfig` file can only be used from a client connecting to the API server from the same host. The certificates in the file do not work for remote connections. \ No newline at end of file diff --git a/modules/microshift-kubeconfig-overview.adoc b/modules/microshift-kubeconfig-overview.adoc deleted file mode 100644 index a336ea2d255a..000000000000 --- a/modules/microshift-kubeconfig-overview.adoc +++ /dev/null @@ -1,35 +0,0 @@ -// Module included in the following assemblies: -// -// * microshift/microshift_configuring/microshift-cluster-access-kubeconfig.adoc - -:_content-type: CONCEPT -[id="kubeconfig-files-overview_{context}"] -= Kubeconfig files for configuring cluster access - -The two categories of `kubeconfig` files used in {product-title} are local access and remote access. Every time {product-title} starts, a set of `kubeconfig` files for local and remote access to the API server are generated. These files are generated in the `/var/lib/microshift/resources/kubeadmin/` directory using preexisting configuration information. - -Each access type requires a different authentication certificate signed by different Certificate Authorities (CAs). The generation of multiple `kubeconfig` files accommodates this need. - -You can use the appropriate `kubeconfig` file for the access type needed in each case to provide authentication details. The contents of {product-title} `kubeconfig` files are determined by either default built-in values or a `config.yaml` file. - -[NOTE] -==== -A `kubeconfig` file must exist for the cluster to be accessible. The values are applied from built-in default values or a `config.yaml`, if one was created. -==== - -.Example contents of the kubeconfig files -[source,terminal] ----- -/var/lib/microshift/resources/kubeadmin/ -├── kubeconfig <1> -├── alt-name-1 <2> -│ └── kubeconfig -├── 1.2.3.4 <3> -│ └── kubeconfig -└── microshift-rhel9 <4> - └── kubeconfig ----- -<1> Local host name. The main IP address of the host is always the default. -<2> Subject Alternative Names for API server certificates. -<3> DNS name. -<4> {product-title} host name. diff --git a/modules/microshift-kubeconfig-remote-con.adoc b/modules/microshift-kubeconfig-remote-con.adoc deleted file mode 100644 index 44ca66fc5e8d..000000000000 --- a/modules/microshift-kubeconfig-remote-con.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// Module included in the following assemblies: -// -// * microshift/microshift_configuring/microshift-cluster-access-kubeconfig.adoc - -:_content-type: CONCEPT -[id="remote-access-con_{context}"] -= Remote access kubeconfig files - -When a {product-title} cluster connects to the API server from an external source, a certificate with all of the alternative names in the SAN field is used for validation. {product-title} generates a default `kubeconfig` for external access using the `hostname` value. The defaults are set in the `<node.hostnameOverride>`, `<node.nodeIP>` and `api.<dns.baseDomain>` parameter values of the default `kubeconfig` file. - -The `/var/lib/microshift/resources/kubeadmin/<hostname>/kubeconfig` file uses the `hostname` of the machine, or `node.hostnameOverride` if that option is set, to reach the API server. The CA of the `kubeconfig` file is able to validate certificates when accessed externally. - -.Example contents of a default `kubeconfig` file for remote access -[source,yaml] ----- -clusters: -- cluster: - certificate-authority-data: <base64 CA> - server: https://microshift-rhel9:6443 ----- - -//line space was not showing on PV1 preview, so added extra blank line -[id="remote-access-customization_{context}"] -== Remote access customization -Multiple remote access `kubeconfig` file values can be generated for accessing the cluster with different IP addresses or host names. An additional `kubeconfig` file generates for each entry in the `apiServer.subjectAltNames` parameter. You can copy remote access `kubeconfig` files from the host during times of IP connectivity and then use them to access the API server from other workstations. diff --git a/modules/microshift-lvmd-yaml-creating.adoc b/modules/microshift-lvmd-yaml-creating.adoc deleted file mode 100644 index 6a75da025913..000000000000 --- a/modules/microshift-lvmd-yaml-creating.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module included in the following assemblies: -// -// * microshift_storage/microshift-storage-plugin-overview.adoc - -:_content-type: PROCEDURE -[id="microshift-lvmd-yaml-creating_{context}"] -= Creating an LVMS configuration file - -When {product-title} runs, it uses LVMS configuration from `/etc/microshift/lvmd.yaml`, if provided. You must place any configuration files that you create into the `/etc/microshift/` directory. - -.Procedure - -* To create the `lvmd.yaml` configuration file, run the following command: -+ -[source, terminal] ----- -$ sudo cp /etc/microshift/lvmd.yaml.default /etc/microshift/lvmd.yaml ----- diff --git a/modules/microshift-lvms-config-example-basic.adoc b/modules/microshift-lvms-config-example-basic.adoc deleted file mode 100644 index 3b32a967f012..000000000000 --- a/modules/microshift-lvms-config-example-basic.adoc +++ /dev/null @@ -1,34 +0,0 @@ -// Module included in the following assemblies: -// -// * microshift_storage/microshift-storage-plugin-overview.adoc - -:_content-type: CONCEPT -[id="microshift-lvmd-config-example-basic_{context}"] -= Basic LVMS configuration example - -{product-title} supports passing through your LVM configuration and allows you to specify custom volume groups, thin volume provisioning parameters, and reserved unallocated volume group space. You can edit the LVMS configuration file you created at any time. You must restart {product-title} to deploy configuration changes after editing the file. - -The following `lvmd.yaml` example file shows a basic LVMS configuration: - -.LVMS configuration example - -[source,yaml] ----- -socket-name: <1> -device-classes: <2> - - name: "default" <3> - volume-group: "VGNAMEHERE" <4> - spare-gb: 0 <5> - default: <6> ----- -<1> String. The UNIX domain socket endpoint of gRPC. Defaults to '/run/lvmd/lvmd.socket'. -<2> A list of maps for the settings for each `device-class`. -<3> String. The name of the `device-class`. -<4> String. The group where the `device-class` creates the logical volumes. -<5> Unsigned 64-bit integer. Storage capacity in GiB to be left unallocated in the volume group. Defaults to `0`. -<6> Boolean. Indicates that the `device-class` is used by default. Defaults to `false`. At least one value must be entered in the YAML file values when this is set to `true`. - -[IMPORTANT] -==== -A race condition prevents LVMS from accurately tracking the allocated space and preserving the `spare-gb` for a device class when multiple PVCs are created simultaneously. Use separate volume groups and device classes to protect the storage of highly dynamic workloads from each other. -==== diff --git a/modules/microshift-lvms-deployment.adoc b/modules/microshift-lvms-deployment.adoc deleted file mode 100644 index 7673e01d687d..000000000000 --- a/modules/microshift-lvms-deployment.adoc +++ /dev/null @@ -1,11 +0,0 @@ -// Module included in the following assemblies: -// -// * microshift_storage/microshift-storage-plugin-overview.adoc - -:_content-type: CONCEPT -[id="microshift-lvms-deployment_{context}"] -= LVMS deployment - -LVMS is automatically deployed on to the cluster in the `openshift-storage` namespace after {product-title} starts. - -LVMS uses `StorageCapacity` tracking to ensure that pods with an LVMS PVC are not scheduled if the requested storage is greater than the free storage of the volume group. For more information about `StorageCapacity` tracking, read link:https://kubernetes.io/docs/concepts/storage/storage-capacity/[Storage Capacity]. \ No newline at end of file diff --git a/modules/microshift-lvms-system-requirements.adoc b/modules/microshift-lvms-system-requirements.adoc deleted file mode 100644 index e8db08078234..000000000000 --- a/modules/microshift-lvms-system-requirements.adoc +++ /dev/null @@ -1,34 +0,0 @@ -// Module included in the following assemblies: -// -// * microshift_storage/microshift-storage-plugin-overview.adoc - -:_content-type: CONCEPT -[id="microshift-lvms-system-requirements_{context}"] -= LVMS system requirements - -Using LVMS in {product-title} requires the following system specifications. - -[id="lvms-volume-group-name_{context}"] -== Volume group name - -The default integration of LVMS selects the default volume group (VG) dynamically. If there are no volume groups on the {product-title} host, LVMS is disabled. - -If there is only one VG on the {product-title} host, that VG is used. If there are multiple volume groups, the group `microshift` is used. If the `microshift` group is not found, LVMS is disabled. - -If you want to use a specific VG, LVMS must be configured to select that VG. You can change the default name of the VG in the configuration file. For details, read the "Configuring the LVMS" section of this document. - -You can change the default name of the VG in the configuration file. For details, read the "Configuring the LVMS" section of this document. - -Prior to launching, the `lvmd.yaml` configuration file must specify an existing VG on the node with sufficient capacity for workload storage. If the VG does not exist, the node controller fails to start and enters a `CrashLoopBackoff` state. - -[id="lvms-volume-size-increments_{context}"] -== Volume size increments - -The LVMS provisions storage in increments of 1 gigabyte (GB). Storage requests are rounded up to the nearest GB. When the capacity of a VG is less than 1 GB, the `PersistentVolumeClaim` registers a `ProvisioningFailed` event, for example: - -.Example output -[source,terminal] ----- -Warning ProvisioningFailed 3s (x2 over 5s) topolvm.cybozu.com_topolvm-controller-858c78d96c-xttzp_0fa83aef-2070-4ae2-bcb9-163f818dcd9f failed to provision volume with -StorageClass "topolvm-provisioner": rpc error: code = ResourceExhausted desc = no enough space left on VG: free=(BYTES_INT), requested=(BYTES_INT) ----- diff --git a/modules/microshift-lvms-using.adoc b/modules/microshift-lvms-using.adoc deleted file mode 100644 index 2ba49899258e..000000000000 --- a/modules/microshift-lvms-using.adoc +++ /dev/null @@ -1,55 +0,0 @@ -// Module included in the following assemblies: -// -// * microshift_storage/microshift-storage-plugin-overview.adoc - -:_content-type: PROCEDURE -[id="microshift-lvms-using_{context}"] -= Using the LVMS - -The LVMS `StorageClass` is deployed with a default `StorageClass`. Any `PersistentVolumeClaim` objects without a `.spec.storageClassName` defined automatically has a `PersistentVolume` provisioned from the default `StorageClass`. Use the following procedure to provision and mount a logical volume to a pod. - -.Procedure - -* To provision and mount a logical volume to a pod, run the following command: -+ -[source,terminal] ----- -$ cat <<EOF | oc apply -f - -kind: PersistentVolumeClaim -apiVersion: v1 -metadata: - name: my-lv-pvc -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1G ---- -apiVersion: v1 -kind: Pod -metadata: - name: my-pod -spec: - containers: - - name: nginx - image: nginx - command: ["/usr/bin/sh", "-c"] - args: ["sleep", "1h"] - volumeMounts: - - mountPath: /mnt - name: my-volume - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - runAsNonRoot: true - seccompProfile: - type: RuntimeDefault - volumes: - - name: my-volume - persistentVolumeClaim: - claimName: my-lv-pvc -EOF ----- diff --git a/modules/microshift-mDNS.adoc b/modules/microshift-mDNS.adoc deleted file mode 100644 index a4d57253a67f..000000000000 --- a/modules/microshift-mDNS.adoc +++ /dev/null @@ -1,11 +0,0 @@ -// Module included in the following assemblies: -// -// * microshift_networking/microshift-networking.adoc - -:_content-type: CONCEPT -[id="microshift-mDNS_{context}"] -= The multicast DNS protocol - -You can use the multicast DNS protocol (mDNS) to allow name resolution and service discovery within a Local Area Network (LAN) using multicast exposed on the `5353/UDP` port. - -{product-title} includes an embedded mDNS server for deployment scenarios in which the authoritative DNS server cannot be reconfigured to point clients to services on {product-title}. The embedded DNS server allows `.local` domains exposed by {product-title} to be discovered by other elements on the LAN. diff --git a/modules/microshift-manifests-overview.adoc b/modules/microshift-manifests-overview.adoc deleted file mode 100644 index c9af8ad7f757..000000000000 --- a/modules/microshift-manifests-overview.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// Module included in the following assemblies: -// -// * microshift/using-config-tools.adoc - -:_content-type: CONCEPT -[id="microshift-manifests-overview_{context}"] -= How manifests work with kustomize - -The `kustomize` configuration management tool is integrated with {product-title}. At every start, {product-title} searches the `/etc/microshift/manifests` and `/usr/lib/microshift/` manifest directories for a `kustomization.yaml` file. If it finds one, {product-title} automatically runs the equivalent of the `kubectl apply -k` command to apply the identified manifests to the cluster. - -[cols="2",options="header"] -|=== -|Location -|Intent - -|`/etc/microshift/manifests` -|Read-write location for configuration management systems or development. - -|`/usr/lib/microshift/manifests` -|Read-only location for embedding configuration manifests on OSTree-based systems. -|=== \ No newline at end of file diff --git a/modules/microshift-nodeport-unreachable-workaround.adoc b/modules/microshift-nodeport-unreachable-workaround.adoc deleted file mode 100644 index 4bef2a62fce3..000000000000 --- a/modules/microshift-nodeport-unreachable-workaround.adoc +++ /dev/null @@ -1,45 +0,0 @@ -// Module included in the following assemblies: -// -// * module may be unused in 4.13 - -:_content-type: PROCEDURE -[id="microshift-nodeport-unreachable-workaround_{context}"] -= Manually restarting the `ovnkube-master` pod to resume node port traffic - -After you install {product-title}, NodePort service traffic might stop. To troubleshoot this issue, manually restart the `ovnkube-master` pod in the `openshift-ovn-kubernetes` namespace. - -.Prerequisites - -* The OpenShift CLI (`oc`) is installed. -* A cluster installed on infrastructure configured with the Open Virtual Network (OVN)-Kubernetes network plugin. -* Access to the `kubeconfig` file. -* The KUBECONFIG environment variable is set. - -.Procedure - -Run the commands listed in each step that follows to restore the `NodePort` service traffic after you install{product-title}: - -. Find the name of the ovn-master pod that you want to restart by running the following command: -+ -[source, terminal] ----- -$ pod=$(oc get pods -n openshift-ovn-kubernetes | grep ovnkube-master | awk -F " " '{print $1}') ----- - -. Force a restart of the of the ovnkube-master pod by running the following command: -+ -[source, terminal] ----- -$ oc -n openshift-ovn-kubernetes delete pod $pod ----- - -. Optional: To confirm that the ovnkube-master pod restarted, run the following command: -+ -[source, terminal] ----- -$ oc get pods -n openshift-ovn-kubernetes ----- -If the pod restarted, the listing of the running pods shows a different ovnkube-master pod name and age consistent with the procedure you just completed. - -. Verify that the `NodePort` service can now be reached. - diff --git a/modules/microshift-observe-debug-etcd-server.adoc b/modules/microshift-observe-debug-etcd-server.adoc deleted file mode 100644 index 943f7d3550ef..000000000000 --- a/modules/microshift-observe-debug-etcd-server.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// -//* microshift_support/microshift-etcd.adoc - -:_content-type: PROCEDURE -[id="microshift-observe-debug-etcd-server_{context}"] -= Observe and debug the {product-title} etcd server - -You can gather `journalctl` logs to observe and debug the etcd server logs. - -.Prerequisites - -* The {product-title} service is running. - -.Procedure - -* To get the logs for etcd, run the following command: -+ -[source,terminal] ----- -$ sudo journalctl -u microshift-etcd.scope ----- -+ -[NOTE] -==== -{product-title} logs can be accessed separately from etcd logs using the `journalctl -u microshift` command. -==== \ No newline at end of file diff --git a/modules/microshift-oc-adm-by-example-content.adoc b/modules/microshift-oc-adm-by-example-content.adoc deleted file mode 100644 index 670459b44c64..000000000000 --- a/modules/microshift-oc-adm-by-example-content.adoc +++ /dev/null @@ -1,241 +0,0 @@ -// NOTE: The contents of this file are auto-generated -// This template is for admin ('oc adm ...') commands -// Uses 'source,bash' for proper syntax highlighting for comments in examples - -:_content-type: REFERENCE -[id="microshift-oc-cli-admin_{context}"] -= OpenShift CLI (oc) administrator commands - -//IMPORTANT: QE'd and hand-edited for relevance to MicroShift; use this version to check auto-generated files for 4.14 - -//== oc adm build-chain - -== oc adm catalog mirror -Mirror an operator-registry catalog - -.Example usage -[source,bash,options="nowrap"] ----- - # Mirror an operator-registry image and its contents to a registry - oc adm catalog mirror quay.io/my/image:latest myregistry.com - - # Mirror an operator-registry image and its contents to a particular namespace in a registry - oc adm catalog mirror quay.io/my/image:latest myregistry.com/my-namespace - - # Mirror to an airgapped registry by first mirroring to files - oc adm catalog mirror quay.io/my/image:latest file:///local/index - oc adm catalog mirror file:///local/index/my/image:latest my-airgapped-registry.com - - # Configure a cluster to use a mirrored registry - oc apply -f manifests/imageContentSourcePolicy.yaml - - # Edit the mirroring mappings and mirror with "oc image mirror" manually - oc adm catalog mirror --manifests-only quay.io/my/image:latest myregistry.com - oc image mirror -f manifests/mapping.txt - - # Delete all ImageContentSourcePolicies generated by oc adm catalog mirror - oc delete imagecontentsourcepolicy -l operators.openshift.org/catalog=true ----- - -//== oc adm certificate approve -//== oc adm certificate deny -//== oc adm cordon -//== oc adm create-bootstrap-project-template -//== oc adm create-error-template -//== oc adm create-login-template -//== oc adm create-provider-selection-template -//== oc adm drain -//== oc adm groups add-users -//== oc adm groups new -//== oc adm groups prune -//== oc adm groups remove-users -//== oc adm groups sync - -== oc adm inspect -Collect debugging data for a given resource -//NOTE: This was hand-edited per QE in 4.13. This section is correct as is. -.Example usage -[source,bash,options="nowrap"] ----- - # Collect debugging data for the "microshift-apiserver" - oc adm inspect service/kubernetes - - # Collect debugging data for the "microshift-apiserver" and "toptlvm-apiserver" - oc adm inspect service/kubernetes crd/logicalvolumes.topolvm.io - - # Collect debugging data for services - oc adm inspect service - - # Collect debugging data for all clusterversions - oc adm inspect service,crd ----- - -== oc adm migrate icsp -Update imagecontentsourcepolicy file(s) to imagedigestmirrorset file(s). - -.Example usage -[source,bash,options="nowrap"] ----- - # update the imagecontentsourcepolicy.yaml to new imagedigestmirrorset file under directory mydir - oc adm migrate icsp imagecontentsourcepolicy.yaml --dest-dir mydir ----- - -//== oc adm migrate template-instances -//== oc adm must-gather -//== oc adm new-project - - -//== oc adm node-logs -Display and filter node logs - -.Example usage -[source,bash,options="nowrap"] ----- - # Show kubelet logs from all masters - oc adm node-logs --role master -u kubelet - - # See what logs are available in masters in /var/logs - oc adm node-logs --role master --path=/ - - # Display cron log file from all masters - oc adm node-logs --role master --path=cron ----- - -//== oc adm pod-network isolate-projects -//== oc adm pod-network join-projects -//== oc adm pod-network make-projects-global -//== oc adm policy add-role-to-user -//== oc adm policy add-scc-to-group -//== oc adm policy add-scc-to-user -//== oc adm policy scc-review -//== oc adm policy scc-subject-review -//== oc adm prune builds -//== oc adm prune deployments -//== oc adm prune groups -//== oc adm prune images - - -== oc adm release extract -Extract the contents of an update payload to disk - -.Example usage -[source,bash,options="nowrap"] ----- - # Use git to check out the source code for the current cluster release to DIR - oc adm release extract --git=DIR - - # Extract cloud credential requests for AWS - oc adm release extract --credentials-requests --cloud=aws - - # Use git to check out the source code for the current cluster release to DIR from linux/s390x image - # Note: Wildcard filter is not supported. Pass a single os/arch to extract - oc adm release extract --git=DIR quay.io/openshift-release-dev/ocp-release:4.11.2 --filter-by-os=linux/s390x ----- - - - -== oc adm release info -Display information about a release - -.Example usage -[source,bash,options="nowrap"] ----- - # Show information about the cluster's current release - oc adm release info - - # Show the source code that comprises a release - oc adm release info 4.11.2 --commit-urls - - # Show the source code difference between two releases - oc adm release info 4.11.0 4.11.2 --commits - - # Show where the images referenced by the release are located - oc adm release info quay.io/openshift-release-dev/ocp-release:4.11.2 --pullspecs - - # Show information about linux/s390x image - # Note: Wildcard filter is not supported. Pass a single os/arch to extract - oc adm release info quay.io/openshift-release-dev/ocp-release:4.11.2 --filter-by-os=linux/s390x ----- - - - -== oc adm release mirror -Mirror a release to a different image registry location - -.Example usage -[source,bash,options="nowrap"] ----- - # Perform a dry run showing what would be mirrored, including the mirror objects - oc adm release mirror 4.11.0 --to myregistry.local/openshift/release \ - --release-image-signature-to-dir /tmp/releases --dry-run - - # Mirror a release into the current directory - oc adm release mirror 4.11.0 --to file://openshift/release \ - --release-image-signature-to-dir /tmp/releases - - # Mirror a release to another directory in the default location - oc adm release mirror 4.11.0 --to-dir /tmp/releases - - # Upload a release from the current directory to another server - oc adm release mirror --from file://openshift/release --to myregistry.com/openshift/release \ - --release-image-signature-to-dir /tmp/releases - - # Mirror the 4.11.0 release to repository registry.example.com and apply signatures to connected cluster - oc adm release mirror --from=quay.io/openshift-release-dev/ocp-release:4.11.0-x86_64 \ - --to=registry.example.com/your/repository --apply-release-image-signature ----- - - -//== oc adm release new - -== oc adm taint -Update the taints on one or more nodes - -.Example usage -[source,bash,options="nowrap"] ----- - # Update node 'foo' with a taint with key 'dedicated' and value 'special-user' and effect 'NoSchedule' - # If a taint with that key and effect already exists, its value is replaced as specified - oc adm taint nodes foo dedicated=special-user:NoSchedule - - # Remove from node 'foo' the taint with key 'dedicated' and effect 'NoSchedule' if one exists - oc adm taint nodes foo dedicated:NoSchedule- - - # Remove from node 'foo' all the taints with key 'dedicated' - oc adm taint nodes foo dedicated- - - # Add a taint with key 'dedicated' on nodes having label mylabel=X - oc adm taint node -l myLabel=X dedicated=foo:PreferNoSchedule - - # Add to node 'foo' a taint with key 'bar' and no value - oc adm taint nodes foo bar:NoSchedule ----- - - -//== oc adm top images -//== oc adm top imagestreams -//== oc adm top node - - -== oc adm top pod -Display resource (CPU/memory) usage of pods - -.Example usage -[source,bash,options="nowrap"] ----- - # Show metrics for all pods in the default namespace - oc adm top pod - - # Show metrics for all pods in the given namespace - oc adm top pod --namespace=NAMESPACE - - # Show metrics for a given pod and its containers - oc adm top pod POD_NAME --containers - - # Show metrics for the pods defined by label name=myLabel - oc adm top pod -l name=myLabel ----- - -//== oc adm uncordon -//== oc adm upgrade -//== oc adm verify-image-signature diff --git a/modules/microshift-oc-apis-errors.adoc b/modules/microshift-oc-apis-errors.adoc deleted file mode 100644 index 63b4b9abfc96..000000000000 --- a/modules/microshift-oc-apis-errors.adoc +++ /dev/null @@ -1,33 +0,0 @@ -// Module included in the following assemblies: -// -// * microshift-cli-using-oc/microshift-oc-apis-errors.adoc - -:_content-type: CONCEPT -[id="microshift-oc-apis-errors_{context}"] -= oc command errors in {product-title} - -Not all OpenShift CLI (oc) tool commands are relevant for {product-title} deployments. When you use `oc` to make a request call against an unsupported API, the `oc` binary usually generates an error message about a resource that cannot be found. - -.Example output - -For example, when the following `new-project` command is run: - -[source, terminal] ----- -$ oc new-project test ----- - -The following error message can be generated: - -[source, terminal] ----- -Error from server (NotFound): the server could not find the requested resource (get projectrequests.project.openshift.io) ----- - -And when the `get projects` command is run, another error can be generated as follows: - -[source, terminal] ----- -$ oc get projects -error: the server doesn't have a resource type "projects" ----- \ No newline at end of file diff --git a/modules/microshift-oc-by-example-content.adoc b/modules/microshift-oc-by-example-content.adoc deleted file mode 100644 index aae63daafda0..000000000000 --- a/modules/microshift-oc-by-example-content.adoc +++ /dev/null @@ -1,2371 +0,0 @@ -// NOTE: The contents of this file are auto-generated -// This template is for non-admin (not 'oc adm ...') commands -// Uses 'source,bash' for proper syntax highlighting for comments in examples - -:_content-type: REFERENCE -[id="microshift-oc-cli-developer_{context}"] -= OpenShift CLI (oc) developer commands - -//NOTE: this is the autogenerated version, one command edited out - -== oc annotate -Update the annotations on a resource - -.Example usage -[source,bash,options="nowrap"] ----- - # Update pod 'foo' with the annotation 'description' and the value 'my frontend' - # If the same annotation is set multiple times, only the last value will be applied - oc annotate pods foo description='my frontend' - - # Update a pod identified by type and name in "pod.json" - oc annotate -f pod.json description='my frontend' - - # Update pod 'foo' with the annotation 'description' and the value 'my frontend running nginx', overwriting any existing value - oc annotate --overwrite pods foo description='my frontend running nginx' - - # Update all pods in the namespace - oc annotate pods --all description='my frontend running nginx' - - # Update pod 'foo' only if the resource is unchanged from version 1 - oc annotate pods foo description='my frontend running nginx' --resource-version=1 - - # Update pod 'foo' by removing an annotation named 'description' if it exists - # Does not require the --overwrite flag - oc annotate pods foo description- ----- - - - -== oc api-resources -Print the supported API resources on the server - -.Example usage -[source,bash,options="nowrap"] ----- - # Print the supported API resources - oc api-resources - - # Print the supported API resources with more information - oc api-resources -o wide - - # Print the supported API resources sorted by a column - oc api-resources --sort-by=name - - # Print the supported namespaced resources - oc api-resources --namespaced=true - - # Print the supported non-namespaced resources - oc api-resources --namespaced=false - - # Print the supported API resources with a specific APIGroup - oc api-resources --api-group=rbac.authorization.k8s.io ----- - - - -== oc api-versions -Print the supported API versions on the server, in the form of "group/version" - -.Example usage -[source,bash,options="nowrap"] ----- - # Print the supported API versions - oc api-versions ----- - - - -== oc apply -Apply a configuration to a resource by file name or stdin - -.Example usage -[source,bash,options="nowrap"] ----- - # Apply the configuration in pod.json to a pod - oc apply -f ./pod.json - - # Apply resources from a directory containing kustomization.yaml - e.g. dir/kustomization.yaml - oc apply -k dir/ - - # Apply the JSON passed into stdin to a pod - cat pod.json | oc apply -f - - - # Apply the configuration from all files that end with '.json' - i.e. expand wildcard characters in file names - oc apply -f '*.json' - - # Note: --prune is still in Alpha - # Apply the configuration in manifest.yaml that matches label app=nginx and delete all other resources that are not in the file and match label app=nginx - oc apply --prune -f manifest.yaml -l app=nginx - - # Apply the configuration in manifest.yaml and delete all the other config maps that are not in the file - oc apply --prune -f manifest.yaml --all --prune-allowlist=core/v1/ConfigMap ----- - - - -== oc apply edit-last-applied -Edit latest last-applied-configuration annotations of a resource/object - -.Example usage -[source,bash,options="nowrap"] ----- - # Edit the last-applied-configuration annotations by type/name in YAML - oc apply edit-last-applied deployment/nginx - - # Edit the last-applied-configuration annotations by file in JSON - oc apply edit-last-applied -f deploy.yaml -o json ----- - - - -== oc apply set-last-applied -Set the last-applied-configuration annotation on a live object to match the contents of a file - -.Example usage -[source,bash,options="nowrap"] ----- - # Set the last-applied-configuration of a resource to match the contents of a file - oc apply set-last-applied -f deploy.yaml - - # Execute set-last-applied against each configuration file in a directory - oc apply set-last-applied -f path/ - - # Set the last-applied-configuration of a resource to match the contents of a file; will create the annotation if it does not already exist - oc apply set-last-applied -f deploy.yaml --create-annotation=true ----- - - - -== oc apply view-last-applied -View the latest last-applied-configuration annotations of a resource/object - -.Example usage -[source,bash,options="nowrap"] ----- - # View the last-applied-configuration annotations by type/name in YAML - oc apply view-last-applied deployment/nginx - - # View the last-applied-configuration annotations by file in JSON - oc apply view-last-applied -f deploy.yaml -o json ----- - - - -== oc attach -Attach to a running container - -.Example usage -[source,bash,options="nowrap"] ----- - # Get output from running pod mypod; use the 'oc.kubernetes.io/default-container' annotation - # for selecting the container to be attached or the first container in the pod will be chosen - oc attach mypod - - # Get output from ruby-container from pod mypod - oc attach mypod -c ruby-container - - # Switch to raw terminal mode; sends stdin to 'bash' in ruby-container from pod mypod - # and sends stdout/stderr from 'bash' back to the client - oc attach mypod -c ruby-container -i -t - - # Get output from the first pod of a replica set named nginx - oc attach rs/nginx ----- - - - -== oc auth can-i -Check whether an action is allowed - -.Example usage -[source,bash,options="nowrap"] ----- - # Check to see if I can create pods in any namespace - oc auth can-i create pods --all-namespaces - - # Check to see if I can list deployments in my current namespace - oc auth can-i list deployments.apps - - # Check to see if I can do everything in my current namespace ("*" means all) - oc auth can-i '*' '*' - - # Check to see if I can get the job named "bar" in namespace "foo" - oc auth can-i list jobs.batch/bar -n foo - - # Check to see if I can read pod logs - oc auth can-i get pods --subresource=log - - # Check to see if I can access the URL /logs/ - oc auth can-i get /logs/ - - # List all allowed actions in namespace "foo" - oc auth can-i --list --namespace=foo ----- - - - -== oc auth reconcile -Reconciles rules for RBAC role, role binding, cluster role, and cluster role binding objects - -.Example usage -[source,bash,options="nowrap"] ----- - # Reconcile RBAC resources from a file - oc auth reconcile -f my-rbac-rules.yaml ----- - -//== oc autoscale -//removed, does not apply to MicroShift - -== oc cluster-info -Display cluster information - -.Example usage -[source,bash,options="nowrap"] ----- - # Print the address of the control plane and cluster services - oc cluster-info ----- - - - -== oc cluster-info dump -Dump relevant information for debugging and diagnosis - -.Example usage -[source,bash,options="nowrap"] ----- - # Dump current cluster state to stdout - oc cluster-info dump - - # Dump current cluster state to /path/to/cluster-state - oc cluster-info dump --output-directory=/path/to/cluster-state - - # Dump all namespaces to stdout - oc cluster-info dump --all-namespaces - - # Dump a set of namespaces to /path/to/cluster-state - oc cluster-info dump --namespaces default,kube-system --output-directory=/path/to/cluster-state ----- - - - -== oc completion -Output shell completion code for the specified shell (bash, zsh, fish, or powershell) - -.Example usage -[source,bash,options="nowrap"] ----- - # Installing bash completion on macOS using homebrew - ## If running Bash 3.2 included with macOS - brew install bash-completion - ## or, if running Bash 4.1+ - brew install bash-completion@2 - ## If oc is installed via homebrew, this should start working immediately - ## If you've installed via other means, you may need add the completion to your completion directory - oc completion bash > $(brew --prefix)/etc/bash_completion.d/oc - - - # Installing bash completion on Linux - ## If bash-completion is not installed on Linux, install the 'bash-completion' package - ## via your distribution's package manager. - ## Load the oc completion code for bash into the current shell - source <(oc completion bash) - ## Write bash completion code to a file and source it from .bash_profile - oc completion bash > ~/.kube/completion.bash.inc - printf " - # Kubectl shell completion - source '$HOME/.kube/completion.bash.inc' - " >> $HOME/.bash_profile - source $HOME/.bash_profile - - # Load the oc completion code for zsh[1] into the current shell - source <(oc completion zsh) - # Set the oc completion code for zsh[1] to autoload on startup - oc completion zsh > "${fpath[1]}/_oc" - - - # Load the oc completion code for fish[2] into the current shell - oc completion fish | source - # To load completions for each session, execute once: - oc completion fish > ~/.config/fish/completions/oc.fish - - # Load the oc completion code for powershell into the current shell - oc completion powershell | Out-String | Invoke-Expression - # Set oc completion code for powershell to run on startup - ## Save completion code to a script and execute in the profile - oc completion powershell > $HOME\.kube\completion.ps1 - Add-Content $PROFILE "$HOME\.kube\completion.ps1" - ## Execute completion code in the profile - Add-Content $PROFILE "if (Get-Command oc -ErrorAction SilentlyContinue) { - oc completion powershell | Out-String | Invoke-Expression - }" - ## Add completion code directly to the $PROFILE script - oc completion powershell >> $PROFILE ----- - - - -== oc config current-context -Display the current-context - -.Example usage -[source,bash,options="nowrap"] ----- - # Display the current-context - oc config current-context ----- - - - -== oc config delete-cluster -Delete the specified cluster from the kubeconfig - -.Example usage -[source,bash,options="nowrap"] ----- - # Delete the minikube cluster - oc config delete-cluster minikube ----- - - - -== oc config delete-context -Delete the specified context from the kubeconfig - -.Example usage -[source,bash,options="nowrap"] ----- - # Delete the context for the minikube cluster - oc config delete-context minikube ----- - - - -== oc config delete-user -Delete the specified user from the kubeconfig - -.Example usage -[source,bash,options="nowrap"] ----- - # Delete the minikube user - oc config delete-user minikube ----- - - - -== oc config get-clusters -Display clusters defined in the kubeconfig - -.Example usage -[source,bash,options="nowrap"] ----- - # List the clusters that oc knows about - oc config get-clusters ----- - - - -== oc config get-contexts -Describe one or many contexts - -.Example usage -[source,bash,options="nowrap"] ----- - # List all the contexts in your kubeconfig file - oc config get-contexts - - # Describe one context in your kubeconfig file - oc config get-contexts my-context ----- - - - -== oc config get-users -Display users defined in the kubeconfig - -.Example usage -[source,bash,options="nowrap"] ----- - # List the users that oc knows about - oc config get-users ----- - - - -== oc config rename-context -Rename a context from the kubeconfig file - -.Example usage -[source,bash,options="nowrap"] ----- - # Rename the context 'old-name' to 'new-name' in your kubeconfig file - oc config rename-context old-name new-name ----- - - - -== oc config set -Set an individual value in a kubeconfig file - -.Example usage -[source,bash,options="nowrap"] ----- - # Set the server field on the my-cluster cluster to https://1.2.3.4 - oc config set clusters.my-cluster.server https://1.2.3.4 - - # Set the certificate-authority-data field on the my-cluster cluster - oc config set clusters.my-cluster.certificate-authority-data $(echo "cert_data_here" | base64 -i -) - - # Set the cluster field in the my-context context to my-cluster - oc config set contexts.my-context.cluster my-cluster - - # Set the client-key-data field in the cluster-admin user using --set-raw-bytes option - oc config set users.cluster-admin.client-key-data cert_data_here --set-raw-bytes=true ----- - - - -== oc config set-cluster -Set a cluster entry in kubeconfig - -.Example usage -[source,bash,options="nowrap"] ----- - # Set only the server field on the e2e cluster entry without touching other values - oc config set-cluster e2e --server=https://1.2.3.4 - - # Embed certificate authority data for the e2e cluster entry - oc config set-cluster e2e --embed-certs --certificate-authority=~/.kube/e2e/kubernetes.ca.crt - - # Disable cert checking for the e2e cluster entry - oc config set-cluster e2e --insecure-skip-tls-verify=true - - # Set custom TLS server name to use for validation for the e2e cluster entry - oc config set-cluster e2e --tls-server-name=my-cluster-name - - # Set proxy url for the e2e cluster entry - oc config set-cluster e2e --proxy-url=https://1.2.3.4 ----- - - - -== oc config set-context -Set a context entry in kubeconfig - -.Example usage -[source,bash,options="nowrap"] ----- - # Set the user field on the gce context entry without touching other values - oc config set-context gce --user=cluster-admin ----- - - - -== oc config set-credentials -Set a user entry in kubeconfig - -.Example usage -[source,bash,options="nowrap"] ----- - # Set only the "client-key" field on the "cluster-admin" - # entry, without touching other values - oc config set-credentials cluster-admin --client-key=~/.kube/admin.key - - # Set basic auth for the "cluster-admin" entry - oc config set-credentials cluster-admin --username=admin --password=uXFGweU9l35qcif - - # Embed client certificate data in the "cluster-admin" entry - oc config set-credentials cluster-admin --client-certificate=~/.kube/admin.crt --embed-certs=true - - # Enable the Google Compute Platform auth provider for the "cluster-admin" entry - oc config set-credentials cluster-admin --auth-provider=gcp - - # Enable the OpenID Connect auth provider for the "cluster-admin" entry with additional args - oc config set-credentials cluster-admin --auth-provider=oidc --auth-provider-arg=client-id=foo --auth-provider-arg=client-secret=bar - - # Remove the "client-secret" config value for the OpenID Connect auth provider for the "cluster-admin" entry - oc config set-credentials cluster-admin --auth-provider=oidc --auth-provider-arg=client-secret- - - # Enable new exec auth plugin for the "cluster-admin" entry - oc config set-credentials cluster-admin --exec-command=/path/to/the/executable --exec-api-version=client.authentication.k8s.io/v1beta1 - - # Define new exec auth plugin args for the "cluster-admin" entry - oc config set-credentials cluster-admin --exec-arg=arg1 --exec-arg=arg2 - - # Create or update exec auth plugin environment variables for the "cluster-admin" entry - oc config set-credentials cluster-admin --exec-env=key1=val1 --exec-env=key2=val2 - - # Remove exec auth plugin environment variables for the "cluster-admin" entry - oc config set-credentials cluster-admin --exec-env=var-to-remove- ----- - - - -== oc config unset -Unset an individual value in a kubeconfig file - -.Example usage -[source,bash,options="nowrap"] ----- - # Unset the current-context - oc config unset current-context - - # Unset namespace in foo context - oc config unset contexts.foo.namespace ----- - - - -== oc config use-context -Set the current-context in a kubeconfig file - -.Example usage -[source,bash,options="nowrap"] ----- - # Use the context for the minikube cluster - oc config use-context minikube ----- - - - -== oc config view -Display merged kubeconfig settings or a specified kubeconfig file - -.Example usage -[source,bash,options="nowrap"] ----- - # Show merged kubeconfig settings - oc config view - - # Show merged kubeconfig settings and raw certificate data and exposed secrets - oc config view --raw - - # Get the password for the e2e user - oc config view -o jsonpath='{.users[?(@.name == "e2e")].user.password}' ----- - - - -== oc cp -Copy files and directories to and from containers - -.Example usage -[source,bash,options="nowrap"] ----- - # !!!Important Note!!! - # Requires that the 'tar' binary is present in your container - # image. If 'tar' is not present, 'oc cp' will fail. - # - # For advanced use cases, such as symlinks, wildcard expansion or - # file mode preservation, consider using 'oc exec'. - - # Copy /tmp/foo local file to /tmp/bar in a remote pod in namespace <some-namespace> - tar cf - /tmp/foo | oc exec -i -n <some-namespace> <some-pod> -- tar xf - -C /tmp/bar - - # Copy /tmp/foo from a remote pod to /tmp/bar locally - oc exec -n <some-namespace> <some-pod> -- tar cf - /tmp/foo | tar xf - -C /tmp/bar - - # Copy /tmp/foo_dir local directory to /tmp/bar_dir in a remote pod in the default namespace - oc cp /tmp/foo_dir <some-pod>:/tmp/bar_dir - - # Copy /tmp/foo local file to /tmp/bar in a remote pod in a specific container - oc cp /tmp/foo <some-pod>:/tmp/bar -c <specific-container> - - # Copy /tmp/foo local file to /tmp/bar in a remote pod in namespace <some-namespace> - oc cp /tmp/foo <some-namespace>/<some-pod>:/tmp/bar - - # Copy /tmp/foo from a remote pod to /tmp/bar locally - oc cp <some-namespace>/<some-pod>:/tmp/foo /tmp/bar ----- - - - -== oc create -Create a resource from a file or from stdin - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a pod using the data in pod.json - oc create -f ./pod.json - - # Create a pod based on the JSON passed into stdin - cat pod.json | oc create -f - - - # Edit the data in registry.yaml in JSON then create the resource using the edited data - oc create -f registry.yaml --edit -o json ----- - - - -== oc create clusterrole -Create a cluster role - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a cluster role named "pod-reader" that allows user to perform "get", "watch" and "list" on pods - oc create clusterrole pod-reader --verb=get,list,watch --resource=pods - - # Create a cluster role named "pod-reader" with ResourceName specified - oc create clusterrole pod-reader --verb=get --resource=pods --resource-name=readablepod --resource-name=anotherpod - - # Create a cluster role named "foo" with API Group specified - oc create clusterrole foo --verb=get,list,watch --resource=rs.apps - - # Create a cluster role named "foo" with SubResource specified - oc create clusterrole foo --verb=get,list,watch --resource=pods,pods/status - - # Create a cluster role name "foo" with NonResourceURL specified - oc create clusterrole "foo" --verb=get --non-resource-url=/logs/* - - # Create a cluster role name "monitoring" with AggregationRule specified - oc create clusterrole monitoring --aggregation-rule="rbac.example.com/aggregate-to-monitoring=true" ----- - - - -== oc create clusterrolebinding -Create a cluster role binding for a particular cluster role - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a cluster role binding for user1, user2, and group1 using the cluster-admin cluster role - oc create clusterrolebinding cluster-admin --clusterrole=cluster-admin --user=user1 --user=user2 --group=group1 ----- - - - -== oc create configmap -Create a config map from a local file, directory or literal value - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a new config map named my-config based on folder bar - oc create configmap my-config --from-file=path/to/bar - - # Create a new config map named my-config with specified keys instead of file basenames on disk - oc create configmap my-config --from-file=key1=/path/to/bar/file1.txt --from-file=key2=/path/to/bar/file2.txt - - # Create a new config map named my-config with key1=config1 and key2=config2 - oc create configmap my-config --from-literal=key1=config1 --from-literal=key2=config2 - - # Create a new config map named my-config from the key=value pairs in the file - oc create configmap my-config --from-file=path/to/bar - - # Create a new config map named my-config from an env file - oc create configmap my-config --from-env-file=path/to/foo.env --from-env-file=path/to/bar.env ----- - - - -== oc create cronjob -Create a cron job with the specified name - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a cron job - oc create cronjob my-job --image=busybox --schedule="*/1 * * * *" - - # Create a cron job with a command - oc create cronjob my-job --image=busybox --schedule="*/1 * * * *" -- date ----- - - - -== oc create deployment -Create a deployment with the specified name - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a deployment named my-dep that runs the busybox image - oc create deployment my-dep --image=busybox - - # Create a deployment with a command - oc create deployment my-dep --image=busybox -- date - - # Create a deployment named my-dep that runs the nginx image with 3 replicas - oc create deployment my-dep --image=nginx --replicas=3 - - # Create a deployment named my-dep that runs the busybox image and expose port 5701 - oc create deployment my-dep --image=busybox --port=5701 ----- - - - -== oc create ingress -Create an ingress with the specified name - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a single ingress called 'simple' that directs requests to foo.com/bar to svc - # svc1:8080 with a tls secret "my-cert" - oc create ingress simple --rule="foo.com/bar=svc1:8080,tls=my-cert" - - # Create a catch all ingress of "/path" pointing to service svc:port and Ingress Class as "otheringress" - oc create ingress catch-all --class=otheringress --rule="/path=svc:port" - - # Create an ingress with two annotations: ingress.annotation1 and ingress.annotations2 - oc create ingress annotated --class=default --rule="foo.com/bar=svc:port" \ - --annotation ingress.annotation1=foo \ - --annotation ingress.annotation2=bla - - # Create an ingress with the same host and multiple paths - oc create ingress multipath --class=default \ - --rule="foo.com/=svc:port" \ - --rule="foo.com/admin/=svcadmin:portadmin" - - # Create an ingress with multiple hosts and the pathType as Prefix - oc create ingress ingress1 --class=default \ - --rule="foo.com/path*=svc:8080" \ - --rule="bar.com/admin*=svc2:http" - - # Create an ingress with TLS enabled using the default ingress certificate and different path types - oc create ingress ingtls --class=default \ - --rule="foo.com/=svc:https,tls" \ - --rule="foo.com/path/subpath*=othersvc:8080" - - # Create an ingress with TLS enabled using a specific secret and pathType as Prefix - oc create ingress ingsecret --class=default \ - --rule="foo.com/*=svc:8080,tls=secret1" - - # Create an ingress with a default backend - oc create ingress ingdefault --class=default \ - --default-backend=defaultsvc:http \ - --rule="foo.com/*=svc:8080,tls=secret1" ----- - - - -== oc create job -Create a job with the specified name - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a job - oc create job my-job --image=busybox - - # Create a job with a command - oc create job my-job --image=busybox -- date - - # Create a job from a cron job named "a-cronjob" - oc create job test-job --from=cronjob/a-cronjob ----- - - - -== oc create namespace -Create a namespace with the specified name - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a new namespace named my-namespace - oc create namespace my-namespace ----- - - - -== oc create poddisruptionbudget -Create a pod disruption budget with the specified name - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a pod disruption budget named my-pdb that will select all pods with the app=rails label - # and require at least one of them being available at any point in time - oc create poddisruptionbudget my-pdb --selector=app=rails --min-available=1 - - # Create a pod disruption budget named my-pdb that will select all pods with the app=nginx label - # and require at least half of the pods selected to be available at any point in time - oc create pdb my-pdb --selector=app=nginx --min-available=50% ----- - - - -== oc create priorityclass -Create a priority class with the specified name - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a priority class named high-priority - oc create priorityclass high-priority --value=1000 --description="high priority" - - # Create a priority class named default-priority that is considered as the global default priority - oc create priorityclass default-priority --value=1000 --global-default=true --description="default priority" - - # Create a priority class named high-priority that cannot preempt pods with lower priority - oc create priorityclass high-priority --value=1000 --description="high priority" --preemption-policy="Never" ----- - - - -== oc create quota -Create a quota with the specified name - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a new resource quota named my-quota - oc create quota my-quota --hard=cpu=1,memory=1G,pods=2,services=3,replicationcontrollers=2,resourcequotas=1,secrets=5,persistentvolumeclaims=10 - - # Create a new resource quota named best-effort - oc create quota best-effort --hard=pods=100 --scopes=BestEffort ----- - - - -== oc create role -Create a role with single rule - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a role named "pod-reader" that allows user to perform "get", "watch" and "list" on pods - oc create role pod-reader --verb=get --verb=list --verb=watch --resource=pods - - # Create a role named "pod-reader" with ResourceName specified - oc create role pod-reader --verb=get --resource=pods --resource-name=readablepod --resource-name=anotherpod - - # Create a role named "foo" with API Group specified - oc create role foo --verb=get,list,watch --resource=rs.apps - - # Create a role named "foo" with SubResource specified - oc create role foo --verb=get,list,watch --resource=pods,pods/status ----- - - - -== oc create rolebinding -Create a role binding for a particular role or cluster role - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a role binding for user1, user2, and group1 using the admin cluster role - oc create rolebinding admin --clusterrole=admin --user=user1 --user=user2 --group=group1 ----- - - - -== oc create route edge -Create a route that uses edge TLS termination - -.Example usage -[source,bash,options="nowrap"] ----- - # Create an edge route named "my-route" that exposes the frontend service - oc create route edge my-route --service=frontend - - # Create an edge route that exposes the frontend service and specify a path - # If the route name is omitted, the service name will be used - oc create route edge --service=frontend --path /assets ----- - - - -== oc create route passthrough -Create a route that uses passthrough TLS termination - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a passthrough route named "my-route" that exposes the frontend service - oc create route passthrough my-route --service=frontend - - # Create a passthrough route that exposes the frontend service and specify - # a host name. If the route name is omitted, the service name will be used - oc create route passthrough --service=frontend --hostname=www.example.com ----- - - - -== oc create route reencrypt -Create a route that uses reencrypt TLS termination - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a route named "my-route" that exposes the frontend service - oc create route reencrypt my-route --service=frontend --dest-ca-cert cert.cert - - # Create a reencrypt route that exposes the frontend service, letting the - # route name default to the service name and the destination CA certificate - # default to the service CA - oc create route reencrypt --service=frontend ----- - - - -== oc create secret docker-registry -Create a secret for use with a Docker registry - -.Example usage -[source,bash,options="nowrap"] ----- - # If you don't already have a .dockercfg file, you can create a dockercfg secret directly by using: - oc create secret docker-registry my-secret --docker-server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL - - # Create a new secret named my-secret from ~/.docker/config.json - oc create secret docker-registry my-secret --from-file=.dockerconfigjson=path/to/.docker/config.json ----- - - - -== oc create secret generic -Create a secret from a local file, directory, or literal value - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a new secret named my-secret with keys for each file in folder bar - oc create secret generic my-secret --from-file=path/to/bar - - # Create a new secret named my-secret with specified keys instead of names on disk - oc create secret generic my-secret --from-file=ssh-privatekey=path/to/id_rsa --from-file=ssh-publickey=path/to/id_rsa.pub - - # Create a new secret named my-secret with key1=supersecret and key2=topsecret - oc create secret generic my-secret --from-literal=key1=supersecret --from-literal=key2=topsecret - - # Create a new secret named my-secret using a combination of a file and a literal - oc create secret generic my-secret --from-file=ssh-privatekey=path/to/id_rsa --from-literal=passphrase=topsecret - - # Create a new secret named my-secret from env files - oc create secret generic my-secret --from-env-file=path/to/foo.env --from-env-file=path/to/bar.env ----- - - - -== oc create secret tls -Create a TLS secret - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a new TLS secret named tls-secret with the given key pair - oc create secret tls tls-secret --cert=path/to/tls.cert --key=path/to/tls.key ----- - - - -== oc create service clusterip -Create a ClusterIP service - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a new ClusterIP service named my-cs - oc create service clusterip my-cs --tcp=5678:8080 - - # Create a new ClusterIP service named my-cs (in headless mode) - oc create service clusterip my-cs --clusterip="None" ----- - - - -== oc create service externalname -Create an ExternalName service - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a new ExternalName service named my-ns - oc create service externalname my-ns --external-name bar.com ----- - - - -== oc create service loadbalancer -Create a LoadBalancer service - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a new LoadBalancer service named my-lbs - oc create service loadbalancer my-lbs --tcp=5678:8080 ----- - - - -== oc create service nodeport -Create a NodePort service - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a new NodePort service named my-ns - oc create service nodeport my-ns --tcp=5678:8080 ----- - - - -== oc create serviceaccount -Create a service account with the specified name - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a new service account named my-service-account - oc create serviceaccount my-service-account ----- - - - -== oc create token -Request a service account token - -.Example usage -[source,bash,options="nowrap"] ----- - # Request a token to authenticate to the kube-apiserver as the service account "myapp" in the current namespace - oc create token myapp - - # Request a token for a service account in a custom namespace - oc create token myapp --namespace myns - - # Request a token with a custom expiration - oc create token myapp --duration 10m - - # Request a token with a custom audience - oc create token myapp --audience https://example.com - - # Request a token bound to an instance of a Secret object - oc create token myapp --bound-object-kind Secret --bound-object-name mysecret - - # Request a token bound to an instance of a Secret object with a specific uid - oc create token myapp --bound-object-kind Secret --bound-object-name mysecret --bound-object-uid 0d4691ed-659b-4935-a832-355f77ee47cc ----- - - - -== oc debug -Launch a new instance of a pod for debugging - -.Example usage -[source,bash,options="nowrap"] ----- - # Start a shell session into a pod using the OpenShift tools image - oc debug - - # Debug a currently running deployment by creating a new pod - oc debug deploy/test - - # Debug a node as an administrator - oc debug node/master-1 - - # Launch a shell in a pod using the provided image stream tag - oc debug istag/mysql:latest -n openshift - - # Test running a job as a non-root user - oc debug job/test --as-user=1000000 - - # Debug a specific failing container by running the env command in the 'second' container - oc debug daemonset/test -c second -- /bin/env - - # See the pod that would be created to debug - oc debug mypod-9xbc -o yaml - - # Debug a resource but launch the debug pod in another namespace - # Note: Not all resources can be debugged using --to-namespace without modification. For example, - # volumes and service accounts are namespace-dependent. Add '-o yaml' to output the debug pod definition - # to disk. If necessary, edit the definition then run 'oc debug -f -' or run without --to-namespace - oc debug mypod-9xbc --to-namespace testns ----- - - - -== oc delete -Delete resources by file names, stdin, resources and names, or by resources and label selector - -.Example usage -[source,bash,options="nowrap"] ----- - # Delete a pod using the type and name specified in pod.json - oc delete -f ./pod.json - - # Delete resources from a directory containing kustomization.yaml - e.g. dir/kustomization.yaml - oc delete -k dir - - # Delete resources from all files that end with '.json' - i.e. expand wildcard characters in file names - oc delete -f '*.json' - - # Delete a pod based on the type and name in the JSON passed into stdin - cat pod.json | oc delete -f - - - # Delete pods and services with same names "baz" and "foo" - oc delete pod,service baz foo - - # Delete pods and services with label name=myLabel - oc delete pods,services -l name=myLabel - - # Delete a pod with minimal delay - oc delete pod foo --now - - # Force delete a pod on a dead node - oc delete pod foo --force - - # Delete all pods - oc delete pods --all ----- - - - -== oc describe -Show details of a specific resource or group of resources - -.Example usage -[source,bash,options="nowrap"] ----- - # Describe a node - oc describe nodes kubernetes-node-emt8.c.myproject.internal - - # Describe a pod - oc describe pods/nginx - - # Describe a pod identified by type and name in "pod.json" - oc describe -f pod.json - - # Describe all pods - oc describe pods - - # Describe pods by label name=myLabel - oc describe po -l name=myLabel - - # Describe all pods managed by the 'frontend' replication controller - # (rc-created pods get the name of the rc as a prefix in the pod name) - oc describe pods frontend ----- - - - -== oc diff -Diff the live version against a would-be applied version - -.Example usage -[source,bash,options="nowrap"] ----- - # Diff resources included in pod.json - oc diff -f pod.json - - # Diff file read from stdin - cat service.yaml | oc diff -f - ----- - - - -== oc edit -Edit a resource on the server - -.Example usage -[source,bash,options="nowrap"] ----- - # Edit the service named 'registry' - oc edit svc/registry - - # Use an alternative editor - KUBE_EDITOR="nano" oc edit svc/registry - - # Edit the job 'myjob' in JSON using the v1 API format - oc edit job.v1.batch/myjob -o json - - # Edit the deployment 'mydeployment' in YAML and save the modified config in its annotation - oc edit deployment/mydeployment -o yaml --save-config - - # Edit the deployment/mydeployment's status subresource - oc edit deployment mydeployment --subresource='status' ----- - - - -== oc events -List events - -.Example usage -[source,bash,options="nowrap"] ----- - # List recent events in the default namespace. - oc events - - # List recent events in all namespaces. - oc events --all-namespaces - - # List recent events for the specified pod, then wait for more events and list them as they arrive. - oc events --for pod/web-pod-13je7 --watch - - # List recent events in given format. Supported ones, apart from default, are json and yaml. - oc events -oyaml - - # List recent only events in given event types - oc events --types=Warning,Normal ----- - - - -== oc exec -Execute a command in a container - -.Example usage -[source,bash,options="nowrap"] ----- - # Get output from running the 'date' command from pod mypod, using the first container by default - oc exec mypod -- date - - # Get output from running the 'date' command in ruby-container from pod mypod - oc exec mypod -c ruby-container -- date - - # Switch to raw terminal mode; sends stdin to 'bash' in ruby-container from pod mypod - # and sends stdout/stderr from 'bash' back to the client - oc exec mypod -c ruby-container -i -t -- bash -il - - # List contents of /usr from the first container of pod mypod and sort by modification time - # If the command you want to execute in the pod has any flags in common (e.g. -i), - # you must use two dashes (--) to separate your command's flags/arguments - # Also note, do not surround your command and its flags/arguments with quotes - # unless that is how you would execute it normally (i.e., do ls -t /usr, not "ls -t /usr") - oc exec mypod -i -t -- ls -t /usr - - # Get output from running 'date' command from the first pod of the deployment mydeployment, using the first container by default - oc exec deploy/mydeployment -- date - - # Get output from running 'date' command from the first pod of the service myservice, using the first container by default - oc exec svc/myservice -- date ----- - - - -== oc explain -Get documentation for a resource - -.Example usage -[source,bash,options="nowrap"] ----- - # Get the documentation of the resource and its fields - oc explain pods - - # Get the documentation of a specific field of a resource - oc explain pods.spec.containers ----- - - - -== oc expose -Expose a replicated application as a service or route - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a route based on service nginx. The new route will reuse nginx's labels - oc expose service nginx - - # Create a route and specify your own label and route name - oc expose service nginx -l name=myroute --name=fromdowntown - - # Create a route and specify a host name - oc expose service nginx --hostname=www.example.com - - # Create a route with a wildcard - oc expose service nginx --hostname=x.example.com --wildcard-policy=Subdomain - # This would be equivalent to *.example.com. NOTE: only hosts are matched by the wildcard; subdomains would not be included - - # Expose a deployment configuration as a service and use the specified port - oc expose dc ruby-hello-world --port=8080 - - # Expose a service as a route in the specified path - oc expose service nginx --path=/nginx ----- - - - -== oc extract -Extract secrets or config maps to disk - -.Example usage -[source,bash,options="nowrap"] ----- - # Extract the secret "test" to the current directory - oc extract secret/test - - # Extract the config map "nginx" to the /tmp directory - oc extract configmap/nginx --to=/tmp - - # Extract the config map "nginx" to STDOUT - oc extract configmap/nginx --to=- - - # Extract only the key "nginx.conf" from config map "nginx" to the /tmp directory - oc extract configmap/nginx --to=/tmp --keys=nginx.conf ----- - - - -== oc get -Display one or many resources - -.Example usage -[source,bash,options="nowrap"] ----- - # List all pods in ps output format - oc get pods - - # List all pods in ps output format with more information (such as node name) - oc get pods -o wide - - # List a single replication controller with specified NAME in ps output format - oc get replicationcontroller web - - # List deployments in JSON output format, in the "v1" version of the "apps" API group - oc get deployments.v1.apps -o json - - # List a single pod in JSON output format - oc get -o json pod web-pod-13je7 - - # List a pod identified by type and name specified in "pod.yaml" in JSON output format - oc get -f pod.yaml -o json - - # List resources from a directory with kustomization.yaml - e.g. dir/kustomization.yaml - oc get -k dir/ - - # Return only the phase value of the specified pod - oc get -o template pod/web-pod-13je7 --template={{.status.phase}} - - # List resource information in custom columns - oc get pod test-pod -o custom-columns=CONTAINER:.spec.containers[0].name,IMAGE:.spec.containers[0].image - - # List all replication controllers and services together in ps output format - oc get rc,services - - # List one or more resources by their type and names - oc get rc/web service/frontend pods/web-pod-13je7 - - # List status subresource for a single pod. - oc get pod web-pod-13je7 --subresource status ----- - - - -== oc image append -Add layers to images and push them to a registry - -.Example usage -[source,bash,options="nowrap"] ----- - # Remove the entrypoint on the mysql:latest image - oc image append --from mysql:latest --to myregistry.com/myimage:latest --image '{"Entrypoint":null}' - - # Add a new layer to the image - oc image append --from mysql:latest --to myregistry.com/myimage:latest layer.tar.gz - - # Add a new layer to the image and store the result on disk - # This results in $(pwd)/v2/mysql/blobs,manifests - oc image append --from mysql:latest --to file://mysql:local layer.tar.gz - - # Add a new layer to the image and store the result on disk in a designated directory - # This will result in $(pwd)/mysql-local/v2/mysql/blobs,manifests - oc image append --from mysql:latest --to file://mysql:local --dir mysql-local layer.tar.gz - - # Add a new layer to an image that is stored on disk (~/mysql-local/v2/image exists) - oc image append --from-dir ~/mysql-local --to myregistry.com/myimage:latest layer.tar.gz - - # Add a new layer to an image that was mirrored to the current directory on disk ($(pwd)/v2/image exists) - oc image append --from-dir v2 --to myregistry.com/myimage:latest layer.tar.gz - - # Add a new layer to a multi-architecture image for an os/arch that is different from the system's os/arch - # Note: Wildcard filter is not supported with append. Pass a single os/arch to append - oc image append --from docker.io/library/busybox:latest --filter-by-os=linux/s390x --to myregistry.com/myimage:latest layer.tar.gz ----- - - - -== oc image extract -Copy files from an image to the file system - -.Example usage -[source,bash,options="nowrap"] ----- - # Extract the busybox image into the current directory - oc image extract docker.io/library/busybox:latest - - # Extract the busybox image into a designated directory (must exist) - oc image extract docker.io/library/busybox:latest --path /:/tmp/busybox - - # Extract the busybox image into the current directory for linux/s390x platform - # Note: Wildcard filter is not supported with extract. Pass a single os/arch to extract - oc image extract docker.io/library/busybox:latest --filter-by-os=linux/s390x - - # Extract a single file from the image into the current directory - oc image extract docker.io/library/centos:7 --path /bin/bash:. - - # Extract all .repo files from the image's /etc/yum.repos.d/ folder into the current directory - oc image extract docker.io/library/centos:7 --path /etc/yum.repos.d/*.repo:. - - # Extract all .repo files from the image's /etc/yum.repos.d/ folder into a designated directory (must exist) - # This results in /tmp/yum.repos.d/*.repo on local system - oc image extract docker.io/library/centos:7 --path /etc/yum.repos.d/*.repo:/tmp/yum.repos.d - - # Extract an image stored on disk into the current directory ($(pwd)/v2/busybox/blobs,manifests exists) - # --confirm is required because the current directory is not empty - oc image extract file://busybox:local --confirm - - # Extract an image stored on disk in a directory other than $(pwd)/v2 into the current directory - # --confirm is required because the current directory is not empty ($(pwd)/busybox-mirror-dir/v2/busybox exists) - oc image extract file://busybox:local --dir busybox-mirror-dir --confirm - - # Extract an image stored on disk in a directory other than $(pwd)/v2 into a designated directory (must exist) - oc image extract file://busybox:local --dir busybox-mirror-dir --path /:/tmp/busybox - - # Extract the last layer in the image - oc image extract docker.io/library/centos:7[-1] - - # Extract the first three layers of the image - oc image extract docker.io/library/centos:7[:3] - - # Extract the last three layers of the image - oc image extract docker.io/library/centos:7[-3:] ----- - - - -== oc image info -Display information about an image - -.Example usage -[source,bash,options="nowrap"] ----- - # Show information about an image - oc image info quay.io/openshift/cli:latest - - # Show information about images matching a wildcard - oc image info quay.io/openshift/cli:4.* - - # Show information about a file mirrored to disk under DIR - oc image info --dir=DIR file://library/busybox:latest - - # Select which image from a multi-OS image to show - oc image info library/busybox:latest --filter-by-os=linux/arm64 ----- - - - -== oc image mirror -Mirror images from one repository to another - -.Example usage -[source,bash,options="nowrap"] ----- - # Copy image to another tag - oc image mirror myregistry.com/myimage:latest myregistry.com/myimage:stable - - # Copy image to another registry - oc image mirror myregistry.com/myimage:latest docker.io/myrepository/myimage:stable - - # Copy all tags starting with mysql to the destination repository - oc image mirror myregistry.com/myimage:mysql* docker.io/myrepository/myimage - - # Copy image to disk, creating a directory structure that can be served as a registry - oc image mirror myregistry.com/myimage:latest file://myrepository/myimage:latest - - # Copy image to S3 (pull from <bucket>.s3.amazonaws.com/image:latest) - oc image mirror myregistry.com/myimage:latest s3://s3.amazonaws.com/<region>/<bucket>/image:latest - - # Copy image to S3 without setting a tag (pull via @<digest>) - oc image mirror myregistry.com/myimage:latest s3://s3.amazonaws.com/<region>/<bucket>/image - - # Copy image to multiple locations - oc image mirror myregistry.com/myimage:latest docker.io/myrepository/myimage:stable \ - docker.io/myrepository/myimage:dev - - # Copy multiple images - oc image mirror myregistry.com/myimage:latest=myregistry.com/other:test \ - myregistry.com/myimage:new=myregistry.com/other:target - - # Copy manifest list of a multi-architecture image, even if only a single image is found - oc image mirror myregistry.com/myimage:latest=myregistry.com/other:test \ - --keep-manifest-list=true - - # Copy specific os/arch manifest of a multi-architecture image - # Run 'oc image info myregistry.com/myimage:latest' to see available os/arch for multi-arch images - # Note that with multi-arch images, this results in a new manifest list digest that includes only - # the filtered manifests - oc image mirror myregistry.com/myimage:latest=myregistry.com/other:test \ - --filter-by-os=os/arch - - # Copy all os/arch manifests of a multi-architecture image - # Run 'oc image info myregistry.com/myimage:latest' to see list of os/arch manifests that will be mirrored - oc image mirror myregistry.com/myimage:latest=myregistry.com/other:test \ - --keep-manifest-list=true - - # Note the above command is equivalent to - oc image mirror myregistry.com/myimage:latest=myregistry.com/other:test \ - --filter-by-os=.* ----- - - - -== oc kustomize -Build a kustomization target from a directory or URL. - -.Example usage -[source,bash,options="nowrap"] ----- - # Build the current working directory - oc kustomize - - # Build some shared configuration directory - oc kustomize /home/config/production - - # Build from github - oc kustomize https://github.com/kubernetes-sigs/kustomize.git/examples/helloWorld?ref=v1.0.6 ----- - - - -== oc label -Update the labels on a resource - -.Example usage -[source,bash,options="nowrap"] ----- - # Update pod 'foo' with the label 'unhealthy' and the value 'true' - oc label pods foo unhealthy=true - - # Update pod 'foo' with the label 'status' and the value 'unhealthy', overwriting any existing value - oc label --overwrite pods foo status=unhealthy - - # Update all pods in the namespace - oc label pods --all status=unhealthy - - # Update a pod identified by the type and name in "pod.json" - oc label -f pod.json status=unhealthy - - # Update pod 'foo' only if the resource is unchanged from version 1 - oc label pods foo status=unhealthy --resource-version=1 - - # Update pod 'foo' by removing a label named 'bar' if it exists - # Does not require the --overwrite flag - oc label pods foo bar- ----- - - - -== oc logs -Print the logs for a container in a pod - -.Example usage -[source,bash,options="nowrap"] ----- - # Start streaming the logs of the most recent build of the openldap build config - oc logs -f bc/openldap - - # Start streaming the logs of the latest deployment of the mysql deployment config - oc logs -f dc/mysql - - # Get the logs of the first deployment for the mysql deployment config. Note that logs - # from older deployments may not exist either because the deployment was successful - # or due to deployment pruning or manual deletion of the deployment - oc logs --version=1 dc/mysql - - # Return a snapshot of ruby-container logs from pod backend - oc logs backend -c ruby-container - - # Start streaming of ruby-container logs from pod backend - oc logs -f pod/backend -c ruby-container ----- - - - -== oc observe -Observe changes to resources and react to them (experimental) - -.Example usage -[source,bash,options="nowrap"] ----- - # Observe changes to services - oc observe services - - # Observe changes to services, including the clusterIP and invoke a script for each - oc observe services --template '{ .spec.clusterIP }' -- register_dns.sh - - # Observe changes to services filtered by a label selector - oc observe namespaces -l regist-dns=true --template '{ .spec.clusterIP }' -- register_dns.sh ----- - - - -== oc patch -Update fields of a resource - -.Example usage -[source,bash,options="nowrap"] ----- - # Partially update a node using a strategic merge patch, specifying the patch as JSON - oc patch node k8s-node-1 -p '{"spec":{"unschedulable":true}}' - - # Partially update a node using a strategic merge patch, specifying the patch as YAML - oc patch node k8s-node-1 -p $'spec:\n unschedulable: true' - - # Partially update a node identified by the type and name specified in "node.json" using strategic merge patch - oc patch -f node.json -p '{"spec":{"unschedulable":true}}' - - # Update a container's image; spec.containers[*].name is required because it's a merge key - oc patch pod valid-pod -p '{"spec":{"containers":[{"name":"kubernetes-serve-hostname","image":"new image"}]}}' - - # Update a container's image using a JSON patch with positional arrays - oc patch pod valid-pod --type='json' -p='[{"op": "replace", "path": "/spec/containers/0/image", "value":"new image"}]' - - # Update a deployment's replicas through the scale subresource using a merge patch. - oc patch deployment nginx-deployment --subresource='scale' --type='merge' -p '{"spec":{"replicas":2}}' ----- - - - -== oc plugin list -List all visible plugin executables on a user's PATH - -.Example usage -[source,bash,options="nowrap"] ----- - # List all available plugins - oc plugin list ----- - - - -== oc policy add-role-to-user -Add a role to users or service accounts for the current project - -.Example usage -[source,bash,options="nowrap"] ----- - # Add the 'view' role to user1 for the current project - oc policy add-role-to-user view user1 - - # Add the 'edit' role to serviceaccount1 for the current project - oc policy add-role-to-user edit -z serviceaccount1 ----- - - - -== oc policy scc-review -Check which service account can create a pod - -.Example usage -[source,bash,options="nowrap"] ----- - # Check whether service accounts sa1 and sa2 can admit a pod with a template pod spec specified in my_resource.yaml - # Service Account specified in myresource.yaml file is ignored - oc policy scc-review -z sa1,sa2 -f my_resource.yaml - - # Check whether service accounts system:serviceaccount:bob:default can admit a pod with a template pod spec specified in my_resource.yaml - oc policy scc-review -z system:serviceaccount:bob:default -f my_resource.yaml - - # Check whether the service account specified in my_resource_with_sa.yaml can admit the pod - oc policy scc-review -f my_resource_with_sa.yaml - - # Check whether the default service account can admit the pod; default is taken since no service account is defined in myresource_with_no_sa.yaml - oc policy scc-review -f myresource_with_no_sa.yaml ----- - - - -== oc policy scc-subject-review -Check whether a user or a service account can create a pod - -.Example usage -[source,bash,options="nowrap"] ----- - # Check whether user bob can create a pod specified in myresource.yaml - oc policy scc-subject-review -u bob -f myresource.yaml - - # Check whether user bob who belongs to projectAdmin group can create a pod specified in myresource.yaml - oc policy scc-subject-review -u bob -g projectAdmin -f myresource.yaml - - # Check whether a service account specified in the pod template spec in myresourcewithsa.yaml can create the pod - oc policy scc-subject-review -f myresourcewithsa.yaml ----- - - - -== oc port-forward -Forward one or more local ports to a pod - -.Example usage -[source,bash,options="nowrap"] ----- - # Listen on ports 5000 and 6000 locally, forwarding data to/from ports 5000 and 6000 in the pod - oc port-forward pod/mypod 5000 6000 - - # Listen on ports 5000 and 6000 locally, forwarding data to/from ports 5000 and 6000 in a pod selected by the deployment - oc port-forward deployment/mydeployment 5000 6000 - - # Listen on port 8443 locally, forwarding to the targetPort of the service's port named "https" in a pod selected by the service - oc port-forward service/myservice 8443:https - - # Listen on port 8888 locally, forwarding to 5000 in the pod - oc port-forward pod/mypod 8888:5000 - - # Listen on port 8888 on all addresses, forwarding to 5000 in the pod - oc port-forward --address 0.0.0.0 pod/mypod 8888:5000 - - # Listen on port 8888 on localhost and selected IP, forwarding to 5000 in the pod - oc port-forward --address localhost,10.19.21.23 pod/mypod 8888:5000 - - # Listen on a random port locally, forwarding to 5000 in the pod - oc port-forward pod/mypod :5000 ----- - - - -== oc proxy -Run a proxy to the Kubernetes API server - -.Example usage -[source,bash,options="nowrap"] ----- - # To proxy all of the Kubernetes API and nothing else - oc proxy --api-prefix=/ - - # To proxy only part of the Kubernetes API and also some static files - # You can get pods info with 'curl localhost:8001/api/v1/pods' - oc proxy --www=/my/files --www-prefix=/static/ --api-prefix=/api/ - - # To proxy the entire Kubernetes API at a different root - # You can get pods info with 'curl localhost:8001/custom/api/v1/pods' - oc proxy --api-prefix=/custom/ - - # Run a proxy to the Kubernetes API server on port 8011, serving static content from ./local/www/ - oc proxy --port=8011 --www=./local/www/ - - # Run a proxy to the Kubernetes API server on an arbitrary local port - # The chosen port for the server will be output to stdout - oc proxy --port=0 - - # Run a proxy to the Kubernetes API server, changing the API prefix to k8s-api - # This makes e.g. the pods API available at localhost:8001/k8s-api/v1/pods/ - oc proxy --api-prefix=/k8s-api ----- - - - -== oc rollback -Revert part of an application back to a previous deployment - -.Example usage -[source,bash,options="nowrap"] ----- - # Perform a rollback to the last successfully completed deployment for a deployment config - oc rollback frontend - - # See what a rollback to version 3 will look like, but do not perform the rollback - oc rollback frontend --to-version=3 --dry-run - - # Perform a rollback to a specific deployment - oc rollback frontend-2 - - # Perform the rollback manually by piping the JSON of the new config back to oc - oc rollback frontend -o json | oc replace dc/frontend -f - - - # Print the updated deployment configuration in JSON format instead of performing the rollback - oc rollback frontend -o json ----- - - - -== oc rollout cancel -Cancel the in-progress deployment - -.Example usage -[source,bash,options="nowrap"] ----- - # Cancel the in-progress deployment based on 'nginx' - oc rollout cancel dc/nginx ----- - - - -== oc rollout history -View rollout history - -.Example usage -[source,bash,options="nowrap"] ----- - # View the rollout history of a deployment - oc rollout history dc/nginx - - # View the details of deployment revision 3 - oc rollout history dc/nginx --revision=3 ----- - - - -== oc rollout latest -Start a new rollout for a deployment config with the latest state from its triggers - -.Example usage -[source,bash,options="nowrap"] ----- - # Start a new rollout based on the latest images defined in the image change triggers - oc rollout latest dc/nginx - - # Print the rolled out deployment config - oc rollout latest dc/nginx -o json ----- - - - -== oc rollout pause -Mark the provided resource as paused - -.Example usage -[source,bash,options="nowrap"] ----- - # Mark the nginx deployment as paused. Any current state of - # the deployment will continue its function, new updates to the deployment will not - # have an effect as long as the deployment is paused - oc rollout pause dc/nginx ----- - - - -== oc rollout restart -Restart a resource - -.Example usage -[source,bash,options="nowrap"] ----- - # Restart a deployment - oc rollout restart deployment/nginx - - # Restart a daemon set - oc rollout restart daemonset/abc - - # Restart deployments with the app=nginx label - oc rollout restart deployment --selector=app=nginx ----- - - - -== oc rollout resume -Resume a paused resource - -.Example usage -[source,bash,options="nowrap"] ----- - # Resume an already paused deployment - oc rollout resume dc/nginx ----- - - - -== oc rollout retry -Retry the latest failed rollout - -.Example usage -[source,bash,options="nowrap"] ----- - # Retry the latest failed deployment based on 'frontend' - # The deployer pod and any hook pods are deleted for the latest failed deployment - oc rollout retry dc/frontend ----- - - - -== oc rollout status -Show the status of the rollout - -.Example usage -[source,bash,options="nowrap"] ----- - # Watch the status of the latest rollout - oc rollout status dc/nginx ----- - - - -== oc rollout undo -Undo a previous rollout - -.Example usage -[source,bash,options="nowrap"] ----- - # Roll back to the previous deployment - oc rollout undo dc/nginx - - # Roll back to deployment revision 3. The replication controller for that version must exist - oc rollout undo dc/nginx --to-revision=3 ----- - - - -== oc rsh -Start a shell session in a container - -.Example usage -[source,bash,options="nowrap"] ----- - # Open a shell session on the first container in pod 'foo' - oc rsh foo - - # Open a shell session on the first container in pod 'foo' and namespace 'bar' - # (Note that oc client specific arguments must come before the resource name and its arguments) - oc rsh -n bar foo - - # Run the command 'cat /etc/resolv.conf' inside pod 'foo' - oc rsh foo cat /etc/resolv.conf - - # See the configuration of your internal registry - oc rsh dc/docker-registry cat config.yml - - # Open a shell session on the container named 'index' inside a pod of your job - oc rsh -c index job/sheduled ----- - - - -== oc rsync -Copy files between a local file system and a pod - -.Example usage -[source,bash,options="nowrap"] ----- - # Synchronize a local directory with a pod directory - oc rsync ./local/dir/ POD:/remote/dir - - # Synchronize a pod directory with a local directory - oc rsync POD:/remote/dir/ ./local/dir ----- - - - -== oc run -Run a particular image on the cluster - -.Example usage -[source,bash,options="nowrap"] ----- - # Start a nginx pod - oc run nginx --image=nginx - - # Start a hazelcast pod and let the container expose port 5701 - oc run hazelcast --image=hazelcast/hazelcast --port=5701 - - # Start a hazelcast pod and set environment variables "DNS_DOMAIN=cluster" and "POD_NAMESPACE=default" in the container - oc run hazelcast --image=hazelcast/hazelcast --env="DNS_DOMAIN=cluster" --env="POD_NAMESPACE=default" - - # Start a hazelcast pod and set labels "app=hazelcast" and "env=prod" in the container - oc run hazelcast --image=hazelcast/hazelcast --labels="app=hazelcast,env=prod" - - # Dry run; print the corresponding API objects without creating them - oc run nginx --image=nginx --dry-run=client - - # Start a nginx pod, but overload the spec with a partial set of values parsed from JSON - oc run nginx --image=nginx --overrides='{ "apiVersion": "v1", "spec": { ... } }' - - # Start a busybox pod and keep it in the foreground, don't restart it if it exits - oc run -i -t busybox --image=busybox --restart=Never - - # Start the nginx pod using the default command, but use custom arguments (arg1 .. argN) for that command - oc run nginx --image=nginx -- <arg1> <arg2> ... <argN> - - # Start the nginx pod using a different command and custom arguments - oc run nginx --image=nginx --command -- <cmd> <arg1> ... <argN> ----- - - - -== oc scale -Set a new size for a deployment, replica set, or replication controller - -.Example usage -[source,bash,options="nowrap"] ----- - # Scale a replica set named 'foo' to 3 - oc scale --replicas=3 rs/foo - - # Scale a resource identified by type and name specified in "foo.yaml" to 3 - oc scale --replicas=3 -f foo.yaml - - # If the deployment named mysql's current size is 2, scale mysql to 3 - oc scale --current-replicas=2 --replicas=3 deployment/mysql - - # Scale multiple replication controllers - oc scale --replicas=5 rc/foo rc/bar rc/baz - - # Scale stateful set named 'web' to 3 - oc scale --replicas=3 statefulset/web ----- - - - -== oc secrets link -Link secrets to a service account - -.Example usage -[source,bash,options="nowrap"] ----- - # Add an image pull secret to a service account to automatically use it for pulling pod images - oc secrets link serviceaccount-name pull-secret --for=pull - - # Add an image pull secret to a service account to automatically use it for both pulling and pushing build images - oc secrets link builder builder-image-secret --for=pull,mount ----- - - - -== oc secrets unlink -Detach secrets from a service account - -.Example usage -[source,bash,options="nowrap"] ----- - # Unlink a secret currently associated with a service account - oc secrets unlink serviceaccount-name secret-name another-secret-name ... ----- - - - -== oc set data -Update the data within a config map or secret - -.Example usage -[source,bash,options="nowrap"] ----- - # Set the 'password' key of a secret - oc set data secret/foo password=this_is_secret - - # Remove the 'password' key from a secret - oc set data secret/foo password- - - # Update the 'haproxy.conf' key of a config map from a file on disk - oc set data configmap/bar --from-file=../haproxy.conf - - # Update a secret with the contents of a directory, one key per file - oc set data secret/foo --from-file=secret-dir ----- - - - -== oc set env -Update environment variables on a pod template - -.Example usage -[source,bash,options="nowrap"] ----- - # Update deployment config 'myapp' with a new environment variable - oc set env dc/myapp STORAGE_DIR=/local - - # List the environment variables defined on a build config 'sample-build' - oc set env bc/sample-build --list - - # List the environment variables defined on all pods - oc set env pods --all --list - - # Output modified build config in YAML - oc set env bc/sample-build STORAGE_DIR=/data -o yaml - - # Update all containers in all replication controllers in the project to have ENV=prod - oc set env rc --all ENV=prod - - # Import environment from a secret - oc set env --from=secret/mysecret dc/myapp - - # Import environment from a config map with a prefix - oc set env --from=configmap/myconfigmap --prefix=MYSQL_ dc/myapp - - # Remove the environment variable ENV from container 'c1' in all deployment configs - oc set env dc --all --containers="c1" ENV- - - # Remove the environment variable ENV from a deployment config definition on disk and - # update the deployment config on the server - oc set env -f dc.json ENV- - - # Set some of the local shell environment into a deployment config on the server - oc set env | grep RAILS_ | oc env -e - dc/myapp ----- - - - -== oc set image -Update the image of a pod template - -.Example usage -[source,bash,options="nowrap"] ----- - # Set a deployment configs's nginx container image to 'nginx:1.9.1', and its busybox container image to 'busybox'. - oc set image dc/nginx busybox=busybox nginx=nginx:1.9.1 - - # Set a deployment configs's app container image to the image referenced by the imagestream tag 'openshift/ruby:2.3'. - oc set image dc/myapp app=openshift/ruby:2.3 --source=imagestreamtag - - # Update all deployments' and rc's nginx container's image to 'nginx:1.9.1' - oc set image deployments,rc nginx=nginx:1.9.1 --all - - # Update image of all containers of daemonset abc to 'nginx:1.9.1' - oc set image daemonset abc *=nginx:1.9.1 - - # Print result (in yaml format) of updating nginx container image from local file, without hitting the server - oc set image -f path/to/file.yaml nginx=nginx:1.9.1 --local -o yaml ----- - - - -== oc set image-lookup -Change how images are resolved when deploying applications - -.Example usage -[source,bash,options="nowrap"] ----- - # Print all of the image streams and whether they resolve local names - oc set image-lookup - - # Use local name lookup on image stream mysql - oc set image-lookup mysql - - # Force a deployment to use local name lookup - oc set image-lookup deploy/mysql - - # Show the current status of the deployment lookup - oc set image-lookup deploy/mysql --list - - # Disable local name lookup on image stream mysql - oc set image-lookup mysql --enabled=false - - # Set local name lookup on all image streams - oc set image-lookup --all ----- - - - -== oc set probe -Update a probe on a pod template - -.Example usage -[source,bash,options="nowrap"] ----- - # Clear both readiness and liveness probes off all containers - oc set probe dc/myapp --remove --readiness --liveness - - # Set an exec action as a liveness probe to run 'echo ok' - oc set probe dc/myapp --liveness -- echo ok - - # Set a readiness probe to try to open a TCP socket on 3306 - oc set probe rc/mysql --readiness --open-tcp=3306 - - # Set an HTTP startup probe for port 8080 and path /healthz over HTTP on the pod IP - oc set probe dc/webapp --startup --get-url=http://:8080/healthz - - # Set an HTTP readiness probe for port 8080 and path /healthz over HTTP on the pod IP - oc set probe dc/webapp --readiness --get-url=http://:8080/healthz - - # Set an HTTP readiness probe over HTTPS on 127.0.0.1 for a hostNetwork pod - oc set probe dc/router --readiness --get-url=https://127.0.0.1:1936/stats - - # Set only the initial-delay-seconds field on all deployments - oc set probe dc --all --readiness --initial-delay-seconds=30 ----- - - - -== oc set resources -Update resource requests/limits on objects with pod templates - -.Example usage -[source,bash,options="nowrap"] ----- - # Set a deployments nginx container CPU limits to "200m and memory to 512Mi" - oc set resources deployment nginx -c=nginx --limits=cpu=200m,memory=512Mi - - # Set the resource request and limits for all containers in nginx - oc set resources deployment nginx --limits=cpu=200m,memory=512Mi --requests=cpu=100m,memory=256Mi - - # Remove the resource requests for resources on containers in nginx - oc set resources deployment nginx --limits=cpu=0,memory=0 --requests=cpu=0,memory=0 - - # Print the result (in YAML format) of updating nginx container limits locally, without hitting the server - oc set resources -f path/to/file.yaml --limits=cpu=200m,memory=512Mi --local -o yaml ----- - - - -== oc set route-backends -Update the backends for a route - -.Example usage -[source,bash,options="nowrap"] ----- - # Print the backends on the route 'web' - oc set route-backends web - - # Set two backend services on route 'web' with 2/3rds of traffic going to 'a' - oc set route-backends web a=2 b=1 - - # Increase the traffic percentage going to b by 10%% relative to a - oc set route-backends web --adjust b=+10%% - - # Set traffic percentage going to b to 10%% of the traffic going to a - oc set route-backends web --adjust b=10%% - - # Set weight of b to 10 - oc set route-backends web --adjust b=10 - - # Set the weight to all backends to zero - oc set route-backends web --zero ----- - - - -== oc set selector -Set the selector on a resource - -.Example usage -[source,bash,options="nowrap"] ----- - # Set the labels and selector before creating a deployment/service pair. - oc create service clusterip my-svc --clusterip="None" -o yaml --dry-run | oc set selector --local -f - 'environment=qa' -o yaml | oc create -f - - oc create deployment my-dep -o yaml --dry-run | oc label --local -f - environment=qa -o yaml | oc create -f - ----- - - - -== oc set serviceaccount -Update the service account of a resource - -.Example usage -[source,bash,options="nowrap"] ----- - # Set deployment nginx-deployment's service account to serviceaccount1 - oc set serviceaccount deployment nginx-deployment serviceaccount1 - - # Print the result (in YAML format) of updated nginx deployment with service account from a local file, without hitting the API server - oc set sa -f nginx-deployment.yaml serviceaccount1 --local --dry-run -o yaml ----- - - - -== oc set subject -Update the user, group, or service account in a role binding or cluster role binding - -.Example usage -[source,bash,options="nowrap"] ----- - # Update a cluster role binding for serviceaccount1 - oc set subject clusterrolebinding admin --serviceaccount=namespace:serviceaccount1 - - # Update a role binding for user1, user2, and group1 - oc set subject rolebinding admin --user=user1 --user=user2 --group=group1 - - # Print the result (in YAML format) of updating role binding subjects locally, without hitting the server - oc create rolebinding admin --role=admin --user=admin -o yaml --dry-run | oc set subject --local -f - --user=foo -o yaml ----- - - - -== oc set volumes -Update volumes on a pod template - -.Example usage -[source,bash,options="nowrap"] ----- - # List volumes defined on all deployment configs in the current project - oc set volume dc --all - - # Add a new empty dir volume to deployment config (dc) 'myapp' mounted under - # /var/lib/myapp - oc set volume dc/myapp --add --mount-path=/var/lib/myapp - - # Use an existing persistent volume claim (pvc) to overwrite an existing volume 'v1' - oc set volume dc/myapp --add --name=v1 -t pvc --claim-name=pvc1 --overwrite - - # Remove volume 'v1' from deployment config 'myapp' - oc set volume dc/myapp --remove --name=v1 - - # Create a new persistent volume claim that overwrites an existing volume 'v1' - oc set volume dc/myapp --add --name=v1 -t pvc --claim-size=1G --overwrite - - # Change the mount point for volume 'v1' to /data - oc set volume dc/myapp --add --name=v1 -m /data --overwrite - - # Modify the deployment config by removing volume mount "v1" from container "c1" - # (and by removing the volume "v1" if no other containers have volume mounts that reference it) - oc set volume dc/myapp --remove --name=v1 --containers=c1 - - # Add new volume based on a more complex volume source (AWS EBS, GCE PD, - # Ceph, Gluster, NFS, ISCSI, ...) - oc set volume dc/myapp --add -m /data --source=<json-string> ----- - - - -== oc tag -Tag existing images into image streams - -.Example usage -[source,bash,options="nowrap"] ----- - # Tag the current image for the image stream 'openshift/ruby' and tag '2.0' into the image stream 'yourproject/ruby with tag 'tip' - oc tag openshift/ruby:2.0 yourproject/ruby:tip - - # Tag a specific image - oc tag openshift/ruby@sha256:6b646fa6bf5e5e4c7fa41056c27910e679c03ebe7f93e361e6515a9da7e258cc yourproject/ruby:tip - - # Tag an external container image - oc tag --source=docker openshift/origin-control-plane:latest yourproject/ruby:tip - - # Tag an external container image and request pullthrough for it - oc tag --source=docker openshift/origin-control-plane:latest yourproject/ruby:tip --reference-policy=local - - # Tag an external container image and include the full manifest list - oc tag --source=docker openshift/origin-control-plane:latest yourproject/ruby:tip --import-mode=PreserveOriginal - - # Remove the specified spec tag from an image stream - oc tag openshift/origin-control-plane:latest -d ----- - - - -== oc version -Print the client and server version information - -.Example usage -[source,bash,options="nowrap"] ----- - # Print the OpenShift client, kube-apiserver, and openshift-apiserver version information for the current context - oc version - - # Print the OpenShift client, kube-apiserver, and openshift-apiserver version numbers for the current context - oc version --short - - # Print the OpenShift client version information for the current context - oc version --client ----- - - - -== oc wait -Experimental: Wait for a specific condition on one or many resources - -.Example usage -[source,bash,options="nowrap"] ----- - # Wait for the pod "busybox1" to contain the status condition of type "Ready" - oc wait --for=condition=Ready pod/busybox1 - - # The default value of status condition is true; you can wait for other targets after an equal delimiter (compared after Unicode simple case folding, which is a more general form of case-insensitivity): - oc wait --for=condition=Ready=false pod/busybox1 - - # Wait for the pod "busybox1" to contain the status phase to be "Running". - oc wait --for=jsonpath='{.status.phase}'=Running pod/busybox1 - - # Wait for the pod "busybox1" to be deleted, with a timeout of 60s, after having issued the "delete" command - oc delete pod/busybox1 - oc wait --for=delete pod/busybox1 --timeout=60s ----- - - diff --git a/modules/microshift-ovs-snapshot.adoc b/modules/microshift-ovs-snapshot.adoc deleted file mode 100644 index e89775f08094..000000000000 --- a/modules/microshift-ovs-snapshot.adoc +++ /dev/null @@ -1,58 +0,0 @@ -// Module included in the following assemblies: -// -// * microshift_networking/microshift-networking.adoc - -:_content-type: PROCEDURE -[id="microshift-OVS-snapshot_{context}"] -= Getting a snapshot of OVS interfaces from a running cluster - -A snapshot represents the state and data of OVS interfaces at a specific point in time. - -.Procedure - -* To see a snapshot of OVS interfaces from a running {product-title} cluster, use the following command: - -[source, terminal] ----- -$ sudo ovs-vsctl show ----- - -.Example OVS interfaces in a running cluster -[source, terminal] ----- -9d9f5ea2-9d9d-4e34-bbd2-dbac154fdc93 - Bridge br-ex - Port enp1s0 - Interface enp1s0 - type: system - Port br-ex - Interface br-ex - type: internal - Port patch-br-ex_localhost.localdomain-to-br-int <1> - Interface patch-br-ex_localhost.localdomain-to-br-int - type: patch - options: {peer=patch-br-int-to-br-ex_localhost.localdomain} <1> - Bridge br-int - fail_mode: secure - datapath_type: system - Port patch-br-int-to-br-ex_localhost.localdomain - Interface patch-br-int-to-br-ex_localhost.localdomain - type: patch - options: {peer=patch-br-ex_localhost.localdomain-to-br-int} - Port eebee1ce5568761 - Interface eebee1ce5568761 <2> - Port b47b1995ada84f4 - Interface b47b1995ada84f4 <2> - Port "3031f43d67c167f" - Interface "3031f43d67c167f" <2> - Port br-int - Interface br-int - type: internal - Port ovn-k8s-mp0 <3> - Interface ovn-k8s-mp0 - type: internal - ovs_version: "2.17.3" ----- -<1> The `patch-br-ex_localhost.localdomain-to-br-int` and `patch-br-int-to-br-ex_localhost.localdomain` are OVS patch ports that connect `br-ex` and `br-int`. -<2> The pod interfaces `eebee1ce5568761`, `b47b1995ada84f4` and `3031f43d67c167f` are named with the first 15 bits of pod sandbox ID and are plugged in the `br-int` bridge. -<3> The OVS internal port for hairpin traffic,`ovn-k8s-mp0` is created by the `ovnkube-master` container. \ No newline at end of file diff --git a/modules/microshift-preparing-for-image-building.adoc b/modules/microshift-preparing-for-image-building.adoc deleted file mode 100644 index a63aaa3a3444..000000000000 --- a/modules/microshift-preparing-for-image-building.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module included in the following assemblies: -// -// microshift_install/microshift-embed-rpm-ostree.adoc - -:_content-type: CONCEPT -[id="preparing-for-image-building_{context}"] -= Preparing for image building - -Read link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html/composing_installing_and_managing_rhel_for_edge_images[Composing, installing, and managing RHEL for Edge images]. - -[IMPORTANT] -==== -{product-title} {ocp-version} deployments have only been tested with {op-system-ostree-first} {op-system-version}. Other versions of {op-system} are not supported. -==== - -To build an {op-system-ostree-first} {op-system-version} image for a given CPU architecture, you need a {op-system} {op-system-version} build host of the same CPU architecture that meets the link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html/composing_installing_and_managing_rhel_for_edge_images/setting-up-image-builder_composing-installing-managing-rhel-for-edge-images#edge-image-builder-system-requirements_setting-up-image-builder[Image Builder system requirements]. - -Follow the instructions in link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html/composing_installing_and_managing_rhel_for_edge_images/setting-up-image-builder_composing-installing-managing-rhel-for-edge-images#edge-installing-image-builder_setting-up-image-builder[Installing Image Builder] to install Image Builder and the `composer-cli` tool. diff --git a/modules/microshift-provisioning-ostree.adoc b/modules/microshift-provisioning-ostree.adoc deleted file mode 100644 index b66cd1c768e2..000000000000 --- a/modules/microshift-provisioning-ostree.adoc +++ /dev/null @@ -1,103 +0,0 @@ -// Module included in the following assemblies: -// -// microshift/microshift-embed-into-rpm-ostree.adoc - -:_content-type: PROCEDURE -[id="provisioning-a-machine_{context}"] -= Provisioning a machine for {product-title} - -Provision a machine with your {op-system-ostree} image by using the procedures from the {op-system-ostree} documentation. - -To use {product-title}, you must provision the system so that it meets the following requirements: - -* The machine you are provisioning must meet the system requirements for installing {product-title}. -* The file system must have a logical volume manager (LVM) volume group (VG) with sufficient capacity for the persistent volumes (PVs) of your workload. -* A pull secret from the https://console.redhat.com/openshift/install/pull-secret[Red Hat Hybrid Cloud Console] must be present as `/etc/crio/openshift-pull-secret` and have root user-only read/write permissions. -* The firewall must be configured with {product-title}'s required firewall settings. - -[NOTE] -==== -If you are using a Kickstart such as the {op-system-ostree} Installer (ISO) image, you can update your Kickstart file to meet the provisioning requirements. -==== - -.Prerequisites - -. You have created an {op-system-ostree} Installer (ISO) image containing your {op-system-ostree} commit with {product-title}. -.. This requirement includes the steps of composing an RFE Container image, creating the RFE Installer blueprint, starting the RFE container, and composing the RFE Installer image. -. Create a Kickstart file or use an existing one. In the Kickstart file, you must include: -.. Detailed instructions about how to create a user. -.. How to fetch and deploy the {op-system-ostree} image. - -For more information, read "Additional resources." - -.Procedure - -. In the main section of the Kickstart file, update the setup of the filesystem such that it contains an LVM volume group called `rhel` with at least 10GB system root. Leave free space for the LVMS CSI driver to use for storing the data for your workloads. -+ -.Example kickstart snippet for configuring the filesystem -+ -[source,text] ----- -# Partition disk such that it contains an LVM volume group called `rhel` with a -# 10GB+ system root but leaving free space for the LVMS CSI driver for storing data. -# -# For example, a 20GB disk would be partitioned in the following way: -# -# NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT -# sda 8:0 0 20G 0 disk -# ├─sda1 8:1 0 200M 0 part /boot/efi -# ├─sda1 8:1 0 800M 0 part /boot -# └─sda2 8:2 0 19G 0 part -# └─rhel-root 253:0 0 10G 0 lvm /sysroot -# -ostreesetup --nogpg --osname=rhel --remote=edge \ ---url=file:///run/install/repo/ostree/repo --ref=rhel/<RHEL VERSION NUMBER>/x86_64/edge -zerombr -clearpart --all --initlabel -part /boot/efi --fstype=efi --size=200 -part /boot --fstype=xfs --asprimary --size=800 -# Uncomment this line to add a SWAP partition of the recommended size -#part swap --fstype=swap --recommended -part pv.01 --grow -volgroup rhel pv.01 -logvol / --vgname=rhel --fstype=xfs --size=10000 --name=root -# To add users, use a line such as the following -user --name=user \ ---password=$6$HFVVV521NB4kOKVG$0.hM652uIOBNsC45kvFpMuRVkfNGHToMdQ6PDTU8DcEF30Gz/3DUwW153Gc9EvNMkH50qYfBO5os/FJsXTLLt. \ ---iscrypted --groups=wheel ----- - -. In the `%post` section of the Kickstart file, add your pull secret and the mandatory firewall rules. -+ -.Example Kickstart snippet for adding the pull secret and firewall rules - -[source,terminal] ----- -%post --log=/var/log/anaconda/post-install.log --erroronfail - -# Add the pull secret to CRI-O and set root user-only read/write permissions -cat > /etc/crio/openshift-pull-secret << EOF -YOUR_OPENSHIFT_PULL_SECRET_HERE -EOF -chmod 600 /etc/crio/openshift-pull-secret - -# Configure the firewall with the mandatory rules for MicroShift -firewall-offline-cmd --zone=trusted --add-source=10.42.0.0/16 -firewall-offline-cmd --zone=trusted --add-source=169.254.169.1 - -%end ----- - -. Install the `mkksiso` tool by running the following command: -+ -[source,terminal] ----- -$ sudo yum install -y lorax ----- - -. Update the Kickstart file in the ISO with your new Kickstart file by running the following command: -+ -[source,terminal] ----- -$ sudo mkksiso <your_kickstart>.ks <your_installer>.iso <updated_installer>.iso ----- diff --git a/modules/microshift-restart-ovnkube-master.adoc b/modules/microshift-restart-ovnkube-master.adoc deleted file mode 100644 index 63c3b05de1a1..000000000000 --- a/modules/microshift-restart-ovnkube-master.adoc +++ /dev/null @@ -1,51 +0,0 @@ -// Module included in the following assemblies: -// -// * microshift_networking/microshift-networking.adoc - -:_content-type: PROCEDURE -[id="microshift-restart-ovnkube-master_{context}"] -= Restarting the ovnkube-master pod - -The following procedure restarts the `ovnkube-master` pod. - -.Prerequisites - -* The OpenShift CLI (`oc`) is installed. -* Access to the cluster as a user with the `cluster-admin` role. -* A cluster installed on infrastructure configured with the OVN-Kubernetes network plugin. -* The KUBECONFIG environment variable is set. - -.Procedure - -Use the following steps to restart the `ovnkube-master` pod. - -. Access the remote cluster by running the following command: -+ -[source, terminal] ----- -$ export KUBECONFIG=$PWD/kubeconfig ----- - -. Find the name of the `ovnkube-master` pod that you want to restart by running the following command: -+ -[source, terminal] ----- -$ pod=$(oc get pods -n openshift-ovn-kubernetes | awk -F " " '/ovnkube-master/{print $1}') ----- - -. Delete the `ovnkube-master` pod by running the following command: -+ -[source, terminal] ----- -$ oc -n openshift-ovn-kubernetes delete pod $pod ----- - -. Confirm that a new `ovnkube-master` pod is running by using the following command: -+ -[source, terminal] ----- -$ oc get pods -n openshift-ovn-kubernetes ----- -The listing of the running pods shows a new `ovnkube-master` pod name and age. - -//.Example output needs to be added here \ No newline at end of file diff --git a/modules/microshift-rpm-ostree-https.adoc b/modules/microshift-rpm-ostree-https.adoc deleted file mode 100644 index 3a0c47e36a16..000000000000 --- a/modules/microshift-rpm-ostree-https.adoc +++ /dev/null @@ -1,34 +0,0 @@ -// Module included in the following assemblies: -// -// * microshift_networking/microshift-networking.adoc - -:_content-type: PROCEDURE -[id="microshift-rpm-ostree-https_{context}"] -= Using the RPM-OStree HTTP(S) proxy - -To use the HTTP(S) proxy in RPM-OStree, set the `http_proxy environment` variable for the `rpm-ostreed` service. - -.Procedure - -. Add this setting to the `/etc/systemd/system/rpm-ostreed.service.d/00-proxy.conf` file by running the following command: -+ -[source, terminal] ----- -Environment="http_proxy=http://$PROXY_USER:$PROXY_PASSWORD@$PROXY_SERVER:$PROXY_PORT/" ----- - -. Next, reload the configuration settings and restart the service to apply your changes. - -.. Reload the configuration settings by running the following command: -+ -[source, terminal] ----- -$ sudo systemctl daemon-reload ----- -.. Restart the rpm-ostree service by running the following command: -+ -[source, terminal] ----- -$ sudo systemctl restart rpm-ostreed.service ----- -//Q: Instructions for how to test that the proxy works by booting the image, verifying that MicroShift starts, and that the application is accessible? diff --git a/modules/microshift-service-starting.adoc b/modules/microshift-service-starting.adoc deleted file mode 100644 index 27f291aa9c0e..000000000000 --- a/modules/microshift-service-starting.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// microshift/microshift-install-rpm.adoc - -:_content-type: PROCEDURE -[id="starting-microshift_service_{context}"] -= Starting the {product-title} service - -Use the following procedure to start the {product-title} service. - -.Prerequisites - -* You have installed {product-title} from an RPM package. - -.Procedure - -. As a root user, start the {product-title} service by entering the following command: -+ -[source,terminal] ----- -$ sudo systemctl start microshift ----- - -. Optional: To configure your {op-system} machine to start {product-title} when your machine starts, enter the following command: -+ -[source,terminal] ----- -$ sudo systemctl enable microshift ----- - -. Optional: To disable {product-title} from automatically starting when your machine starts, enter the following command: -+ -[source,terminal] ----- -$ sudo systemctl disable microshift ----- -+ -[NOTE] -==== -The first time that the {product-title} service starts, it downloads and initializes {product-title}'s container images. As a result, it can take several minutes for {product-title} to start the first time that the service is deployed. -Boot time is reduced for subsequent starts of the {product-title} service. -==== \ No newline at end of file diff --git a/modules/microshift-service-stopping.adoc b/modules/microshift-service-stopping.adoc deleted file mode 100644 index 90af8bba53b6..000000000000 --- a/modules/microshift-service-stopping.adoc +++ /dev/null @@ -1,37 +0,0 @@ -// Module included in the following assemblies: -// -// microshift/microshift-install-rpm.adoc - -:_content-type: PROCEDURE -[id="stopping-microshift-service_{context}"] -= Stopping the {product-title} service - -Use the following procedure to stop the {product-title} service. - -.Prerequisites - -* The {product-title} service is running. - -.Procedure - -. Enter the following command to stop the {product-title} service: -+ -[source,terminal] ----- -$ sudo systemctl stop microshift ----- - -. Workloads deployed on {product-title} will continue running even after the {product-title} service has been stopped. Enter the following command to display running workloads: -+ -[source,terminal] ----- -$ sudo crictl ps -a ----- - -. Enter the following commands to stop the deployed workloads: -+ -[source,terminal] ----- -$ sudo systemctl stop kubepods.slice ----- - diff --git a/modules/microshift-troubleshooting-nodeport.adoc b/modules/microshift-troubleshooting-nodeport.adoc deleted file mode 100644 index 9c7b9553638f..000000000000 --- a/modules/microshift-troubleshooting-nodeport.adoc +++ /dev/null @@ -1,40 +0,0 @@ -// Module included in the following assemblies: -// -// * module may be unused in 4.13 - -:_content-type: PROCEDURE -[id="microshift-troubleshooting-nodeport_{context}"] -= Troubleshooting the NodePort service iptable rules - -OVN-Kubernetes sets up an iptable chain in the network address translation (NAT) table to handle incoming traffic to the NodePort service. When the NodePort service is not reachable or the connection is refused, check the iptable rules on the host to make sure the relevant rules are properly inserted. - -.Procedure - -. View the iptable rules for the NodePort service by running the following command: -+ -[source, terminal] ----- -$ iptables-save | grep NODEPORT ----- -+ -.Example output -[source, terminal] ----- --A OUTPUT -j OVN-KUBE-NODEPORT --A OVN-KUBE-NODEPORT -p tcp -m addrtype --dst-type LOCAL -m tcp --dport 30326 -j DNAT --to-destination 10.43.95.170:80 ----- -OVN-Kubernetes configures the `OVN-KUBE-NODEPORT` iptable chain in the NAT table to match the destination port and Destination Network Address Translates (DNATs) the packet to the `clusterIP` service. The packet is then routed to the OVN network through the gateway bridge `br-ex` via routing rules on the host. - -. Route the packet through the network with routing rules by running the following command: -+ -[source, terminal] ----- -$ ip route ----- -+ -.Example output -[source, terminal] ----- -10.43.0.0/16 via 192.168.122.1 dev br-ex mtu 1400 ----- -This routing rule matches the Kubernetes service IP address range and forwards the packet to the gateway bridge `br-ex`. You must enable `ip_forward` on the host. After the packet is forwarded to the OVS bridge `br-ex`, it is handled by OpenFlow rules in OVS which steers the packet to the OVN network and eventually to the pod. diff --git a/modules/microshift-version-api.adoc b/modules/microshift-version-api.adoc deleted file mode 100644 index 3804db194a1e..000000000000 --- a/modules/microshift-version-api.adoc +++ /dev/null @@ -1,33 +0,0 @@ -// Module included in the following assemblies: -// -// * microshift_troubleshooting/microshift-version.adoc - -:_content-type: PROCEDURE -[id="microshift-version-api_{context}"] -= Checking the {product-title} version using the API - -To begin troubleshooting, you must know your {product-title} version. One way to get this information is by using the API. - -.Procedure - -* To get the version number using the OpenShift CLI (`oc`), view the `kube-public/microshift-version` config map by running the following command: -+ -[source,terminal] ----- -$ oc get configmap -n kube-public microshift-version -o yaml ----- -+ -.Example output -[source,yaml] ----- -apiVersion: v1 -data: - major: "4" - minor: "10" - version: 4.10.0-0.microshift-e6980e25 -kind: ConfigMap -metadata: - creationTimestamp: "2022-08-08T21:06:11Z" - name: microshift-version - namespace: kube-public ----- diff --git a/modules/microshift-version-cli.adoc b/modules/microshift-version-cli.adoc deleted file mode 100644 index 378efcd0b984..000000000000 --- a/modules/microshift-version-cli.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// Module included in the following assemblies: -// -// * microshift_troubleshooting/microshift-version.adoc - -:_content-type: PROCEDURE -[id="microshift-version-cli_{context}"] -= Checking the {product-title} version using the command-line interface - -To begin troubleshooting, you must know your {product-title} version. One way to get this information is by using the CLI. - -.Procedure - -* Run the following command to check the version information: -+ -[source,terminal] ----- -$ microshift version ----- -+ -.Example output -[source,terminal,subs="attributes+"] ----- -{product-title} Version: {product-version}-0.microshift-e6980e25 -Base OCP Version: {product-version} ----- diff --git a/modules/migrating-custom-types-pkg-apis.adoc b/modules/migrating-custom-types-pkg-apis.adoc deleted file mode 100644 index 66ed79cd92f0..000000000000 --- a/modules/migrating-custom-types-pkg-apis.adoc +++ /dev/null @@ -1,57 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-migrating-to-v0-1-0.adoc - -:_content-type: PROCEDURE -[id="migrating-custom-types-from-pkg-apis_{context}"] -= Migrating custom types from pkg/apis - -Migrate your project's custom types to the updated Operator SDK v0.1.0 usage. - -.Prerequisites - -- Operator SDK v0.1.0 CLI installed on the development workstation -- `memcached-operator` project previously deployed using an earlier version of -Operator SDK -- New project created using Operator SDK v0.1.0 - -.Procedure - -. *Create the scaffold API for custom types.* - -.. Create the API for your custom resource (CR) in the new project with -`operator-sdk add api --api-version=<apiversion> --kind=<kind>`: -+ ----- -$ cd memcached-operator -$ operator-sdk add api --api-version=cache.example.com/v1alpha1 --kind=Memcached - -$ tree pkg/apis -pkg/apis/ -├── addtoscheme_cache_v1alpha1.go -├── apis.go -└── cache - └── v1alpha1 - ├── doc.go - ├── memcached_types.go - ├── register.go - └── zz_generated.deepcopy.go ----- - -.. Repeat the previous command for as many custom types as you had defined in your -old project. Each type will be defined in the file -`pkg/apis/<group>/<version>/<kind>_types.go`. - -. *Copy the contents of the type.* - -.. Copy the `Spec` and `Status` contents of the -`pkg/apis/<group>/<version>/types.go` file from the old project to the new -project's `pkg/apis/<group>/<version>/<kind>_types.go` file. - -.. Each `<kind>_types.go` file has an `init()` function. Be sure not to remove that -since that registers the type with the Manager's scheme: -+ ----- -func init() { - SchemeBuilder.Register(&Memcached{}, &MemcachedList{}) ----- diff --git a/modules/migrating-reconcile-code.adoc b/modules/migrating-reconcile-code.adoc deleted file mode 100644 index 8af974a056bb..000000000000 --- a/modules/migrating-reconcile-code.adoc +++ /dev/null @@ -1,305 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-migrating-to-v0-1-0.adoc - -:_content-type: PROCEDURE -[id="migrating-reconcile-code_{context}"] -= Migrating reconcile code - -Migrate your project's reconcile code to the update Operator SDK v0.1.0 usage. - -.Prerequisites - -- Operator SDK v0.1.0 CLI installed on the development workstation -- `memcached-operator` project previously deployed using an earlier version of -Operator SDK -- Custom types migrated from `pkg/apis/` - -.Procedure - -. *Add a controller to watch your CR.* -+ -In v0.0.x projects, resources to be watched were previously defined in -`cmd/<operator-name>/main.go`: -+ ----- -sdk.Watch("cache.example.com/v1alpha1", "Memcached", "default", time.Duration(5)*time.Second) ----- -+ -For v0.1.0 projects, you must define a -link:https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg#hdr-Controller[Controller] -to watch resources: - -.. Add a controller to watch your CR type with `operator-sdk add controller --api-version=<apiversion> --kind=<kind>`. -+ ----- -$ operator-sdk add controller --api-version=cache.example.com/v1alpha1 --kind=Memcached - -$ tree pkg/controller -pkg/controller/ -├── add_memcached.go -├── controller.go -└── memcached - └── memcached_controller.go ----- - -.. Inspect the `add()` function in your `pkg/controller/<kind>/<kind>_controller.go` file: -+ ----- -import ( - cachev1alpha1 "github.com/example-inc/memcached-operator/pkg/apis/cache/v1alpha1" - ... -) - -func add(mgr manager.Manager, r reconcile.Reconciler) error { - c, err := controller.New("memcached-controller", mgr, controller.Options{Reconciler: r}) - - // Watch for changes to the primary resource Memcached - err = c.Watch(&source.Kind{Type: &cachev1alpha1.Memcached{}}, &handler.EnqueueRequestForObject{}) - - // Watch for changes to the secondary resource pods and enqueue reconcile requests for the owner Memcached - err = c.Watch(&source.Kind{Type: &corev1.Pod{}}, &handler.EnqueueRequestForOwner{ - IsController: true, - OwnerType: &cachev1alpha1.Memcached{}, - }) -} ----- -+ -Remove the second `Watch()` or modify it to watch a secondary resource type that -is owned by your CR. -+ -Watching multiple resources lets you trigger the reconcile loop for multiple -resources relevant to your application. See the -link:https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg#hdr-Watching_and_EventHandling[watching and eventhandling] -documentation and the Kubernetes -link:https://github.com/kubernetes/community/blob/master/contributors/devel/sig-api-machinery/controllers.md[controller conventions] -documentation for more details. -+ -If your operator is watching more than one CR type, you can do one of the -following depending on your application: -+ --- -** If the CR is owned by your primary CR, watch it as a secondary resource in -the same controller to trigger the reconcile loop for the primary resource. -+ ----- -// Watch for changes to the primary resource Memcached - err = c.Watch(&source.Kind{Type: &cachev1alpha1.Memcached{}}, &handler.EnqueueRequestForObject{}) - - // Watch for changes to the secondary resource AppService and enqueue reconcile requests for the owner Memcached - err = c.Watch(&source.Kind{Type: &appv1alpha1.AppService{}}, &handler.EnqueueRequestForOwner{ - IsController: true, - OwnerType: &cachev1alpha1.Memcached{}, - }) ----- - -** Add a new controller to watch and reconcile the CR independently of the other CR. -+ ----- -$ operator-sdk add controller --api-version=app.example.com/v1alpha1 --kind=AppService ----- -+ ----- - // Watch for changes to the primary resource AppService - err = c.Watch(&source.Kind{Type: &appv1alpha1.AppService{}}, &handler.EnqueueRequestForObject{}) ----- --- - -. *Copy and modify reconcile code from `pkg/stub/handler.go`.* -+ -In a v0.1.0 project, the reconcile code is defined in the `Reconcile()` method -of a controller's -link:https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/reconcile#Reconciler[Reconciler]. -This is similar to the `Handle()` function in the older project. Note the -difference in the arguments and return values: -+ --- -- Reconcile: -+ ----- - func (r *ReconcileMemcached) Reconcile(request reconcile.Request) (reconcile.Result, error) ----- - -- Handle: -+ ----- - func (h *Handler) Handle(ctx context.Context, event sdk.Event) error ----- --- -+ -Instead of receiving an `sdk.Event` (with the object), the `Reconcile()` -function receives a -link:https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/reconcile#Request[Request] -(`Name`/`Namespace` key) to lookup the object. -+ -If the `Reconcile()` function returns an error, the controller will requeue and -retry the `Request`. If no error is returned, then depending on the -link:https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/reconcile#Result[Result], -the controller will either not retry the `Request`, immediately retry, or retry -after a specified duration. - -.. Copy the code from the old project's `Handle()` function over the existing code -in your controller's `Reconcile()` function. Be sure to keep the initial section -in the `Reconcile()` code that looks up the object for the `Request` and checks -to see if it is deleted. -+ ----- -import ( - apierrors "k8s.io/apimachinery/pkg/api/errors" - cachev1alpha1 "github.com/example-inc/memcached-operator/pkg/apis/cache/v1alpha1" - ... -) -func (r *ReconcileMemcached) Reconcile(request reconcile.Request) (reconcile.Result, error) { - // Fetch the Memcached instance - instance := &cachev1alpha1.Memcached{} - err := r.client.Get(context.TODO() - request.NamespacedName, instance) - if err != nil { - if apierrors.IsNotFound(err) { - // Request object not found, could have been deleted after reconcile request. - // Owned objects are automatically garbage collected. - // Return and don't requeue - return reconcile.Result{}, nil - } - // Error reading the object - requeue the request. - return reconcile.Result{}, err - } - - // Rest of your reconcile code goes here. - ... -} ----- - -.. Change the return values in your reconcile code: - -... Replace `return err` with `return reconcile.Result{}, err`. - -... Replace `return nil` with `return reconcile.Result{}, nil`. - -.. To periodically reconcile a CR in your controller, you can set the -link:https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/reconcile#Result[RequeueAfter] -field for `reconcile.Result`. This will cause the controller to requeue the -`Request` and trigger the reconcile after the desired duration. Note that the -default value of `0` means no requeue. -+ ----- -reconcilePeriod := 30 * time.Second -reconcileResult := reconcile.Result{RequeueAfter: reconcilePeriod} -... - -// Update the status -err := r.client.Update(context.TODO(), memcached) -if err != nil { - log.Printf("failed to update memcached status: %v", err) - return reconcileResult, err -} -return reconcileResult, nil ----- - -.. Replace the calls to the SDK client (Create, Update, Delete, Get, List) with the -reconciler's client. -+ -See the examples below and the `controller-runtime` -link:https://sdk.operatorframework.io/docs/building-operators/golang/references/client/[client API documentation] -in the `operator-sdk` project for more details: -+ ----- -// Create -dep := &appsv1.Deployment{...} -err := sdk.Create(dep) -// v0.0.1 -err := r.client.Create(context.TODO(), dep) - -// Update -err := sdk.Update(dep) -// v0.0.1 -err := r.client.Update(context.TODO(), dep) - -// Delete -err := sdk.Delete(dep) -// v0.0.1 -err := r.client.Delete(context.TODO(), dep) - -// List -podList := &corev1.PodList{} -labelSelector := labels.SelectorFromSet(labelsForMemcached(memcached.Name)) -listOps := &metav1.ListOptions{LabelSelector: labelSelector} -err := sdk.List(memcached.Namespace, podList, sdk.WithListOptions(listOps)) -// v0.1.0 -listOps := &client.ListOptions{Namespace: memcached.Namespace, LabelSelector: labelSelector} -err := r.client.List(context.TODO(), listOps, podList) - -// Get -dep := &appsv1.Deployment{APIVersion: "apps/v1", Kind: "Deployment", Name: name, Namespace: namespace} -err := sdk.Get(dep) -// v0.1.0 -dep := &appsv1.Deployment{} -err = r.client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: namespace}, dep) ----- - -.. Copy and initialize any other fields that you may have had in your `Handler` -struct into the `Reconcile<Kind>` struct: -+ ----- -// newReconciler returns a new reconcile.Reconciler -func newReconciler(mgr manager.Manager) reconcile.Reconciler { - return &ReconcileMemcached{client: mgr.GetClient(), scheme: mgr.GetScheme(), foo: "bar"} -} - -// ReconcileMemcached reconciles a Memcached object -type ReconcileMemcached struct { - client client.Client - scheme *runtime.Scheme - // Other fields - foo string -} ----- - -. *Copy changes from `main.go`.* -+ -The main function for a v0.1.0 operator in `cmd/manager/main.go` sets up the -link:https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/manager[Manager], -which registers the custom resources and starts all the controllers. -+ -There is no requirement to migrate the SDK functions `sdk.Watch()`,`sdk.Handle()`, and -`sdk.Run()` from the old `main.go` since that logic is now defined in a -controller. -+ -However, if there are any Operator-specific flags or settings defined in the old -`main.go` file, copy those over. -+ -If you have any third party resource types registered with the SDK's scheme, see -link:https://sdk.operatorframework.io/docs/building-operators/golang/advanced-topics/#adding-3rd-party-resources-to-your-operator[Advanced Topics] -in the `operator-sdk` project for how to register those with the Manager's -scheme in the new project. - -. *Copy user defined files.* -+ -If there are any user defined `pkgs`, scripts, or documentation in the older -project, copy these files into the new project. - -. *Copy changes to deployment manifests.* -+ -For any updates made to the following manifests in the old project, copy over -the changes to their corresponding files in the new project. Be careful not to -directly overwrite the files, but inspect and make any changes necessary: -+ --- -* `tmp/build/Dockerfile` to `build/Dockerfile` -** There is no tmp directory in the new project layout -* RBAC rules updates from `deploy/rbac.yaml` to `deploy/role.yaml` and -`deploy/role_binding.yaml` -* `deploy/cr.yaml` to `deploy/crds/<group>_<version>_<kind>_cr.yaml` -* `deploy/crd.yaml` to `deploy/crds/<group>_<version>_<kind>_crd.yaml` --- - -. *Copy user defined dependencies.* -+ -For any user defined dependencies added to the old project's `Gopkg.toml`, copy -and append them to the new project's `Gopkg.toml`. Run `dep ensure` to update -the vendor in the new project. - -. *Confirm your changes.* -+ -At this point, you should be able to build and run your Operator to verify that -it works. diff --git a/modules/migrating-to-multi-arch-cli.adoc b/modules/migrating-to-multi-arch-cli.adoc deleted file mode 100644 index cbce33068c38..000000000000 --- a/modules/migrating-to-multi-arch-cli.adoc +++ /dev/null @@ -1,85 +0,0 @@ -// Module included in the following assemblies: -// -// * updating/migrating-to-multi-payload.adoc - -:_content-type: PROCEDURE -[id="migrating-to-multi-arch-cli_{context}"] -= Migrating to a cluster with multi-architecture compute machines using the CLI - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. -* Your {product-title} version is up to date to at least version 4.13.0. -+ -For more information on how to update your cluster version, see _Updating a cluster using the web console_ or _Updating a cluster using the CLI_. -* You have installed the OpenShift CLI (`oc`) that matches the version for your current cluster. -* Your `oc` client is updated to at least verion 4.13.0. -* Your {product-title} cluster is installed on either the AWS or Azure platform. -+ -For more information on selecting a supported platform for your cluster installation, see _Selecting a cluster installation type_. - - -.Procedure -. Verify that the `RetrievedUpdates` condition is `True` in the Cluster Version Operator (CVO) by running the following command: -+ -[source,terminal] ----- -$ oc get clusterversion/version -o=jsonpath="{.status.conditions[?(.type=='RetrievedUpdates')].status}" ----- -+ -If the `RetrievedUpates` condition is `False`, you can find supplemental information regarding the failure by using the following command: -+ -[source,terminal] ----- -$ oc adm upgrade ----- -+ -For more information about cluster version condition types, see _Understanding cluster version condition types_. - -. If the condition `RetrievedUpdates` is `False`, change the channel to `stable-<4.y>` or `fast-<4.y>` with the following command: -+ -[source,terminal] ----- -$ oc adm upgrade channel <channel> ----- -+ -After setting the channel, verify if `RetrievedUpdates` is `True`. -+ -For more information about channels, see _Understanding update channels and releases_. - -. Migrate to the multi-architecture payload with following command: -+ -[source,terminal] ----- -$ oc adm upgrade --to-multi-arch ----- - -.Verification - -* You can monitor the migration by running the following command: -+ -[source,terminal] ----- -$ oc adm upgrade ----- -+ -[IMPORTANT] -==== -Machine launches may fail as the cluster settles into the new state. To notice and recover when machines fail to launch, we recommend deploying machine health checks. For more information about machine health checks and how to deploy them, see _About machine health checks_. -==== -+ -//commenting this section out until https://issues.redhat.com/browse/OCPBUGS-8256 is resolved: -//For `oc get co`, expect `AVAILABLE=True`, `PROGRESSING=False`, and `DEGRADED=False` on all cluster Operators. -+ -//For `oc get mcp`, expect `UPDATED=True`, `UPDATING=False`, and `DEGRADED=False` on all machine config pools. -+ -//For `oc adm upgrade`, here is an example of a response: -+ -//[source,terminal] -//---- -//working towards ${VERSION}: 106 of 841 done (12% complete), waiting on etcd, kube-apiserver -//---- -+ -The migrations must be complete and all the cluster operators must be stable before you can add compute machine sets with different architectures to your cluster. - - diff --git a/modules/migration-about-configuring-proxies.adoc b/modules/migration-about-configuring-proxies.adoc deleted file mode 100644 index 33011101e3f3..000000000000 --- a/modules/migration-about-configuring-proxies.adoc +++ /dev/null @@ -1,183 +0,0 @@ -// Module included in the following assemblies: -// -// * migrating_from_ocp_3_to_4/installing-3-4.adoc -// * migrating_from_ocp_3_to_4/installing-restricted-3-4.adoc -// * migration_toolkit_for_containers/installing-mtc.adoc -// * migration_toolkit_for_containers/installing-mtc-restricted.adoc - -:_content-type: CONCEPT -[id="migration-about-configuring-proxies_{context}"] -= Proxy configuration - -For {product-title} 4.1 and earlier versions, you must configure proxies in the `MigrationController` custom resource (CR) manifest after you install the {mtc-full} Operator because these versions do not support a cluster-wide `proxy` object. - -For {product-title} 4.2 to {product-version}, the {mtc-full} ({mtc-short}) inherits the cluster-wide proxy settings. You can change the proxy parameters if you want to override the cluster-wide proxy settings. - -[id="direct-volume-migration_{context}"] -== Direct volume migration - -Direct Volume Migration (DVM) was introduced in MTC 1.4.2. DVM supports only one proxy. The source cluster cannot access the route of the target cluster if the target cluster is also behind a proxy. - -If you want to perform a DVM from a source cluster behind a proxy, you must configure a TCP proxy that works at the transport layer and forwards the SSL connections transparently without decrypting and re-encrypting them with their own SSL certificates. A Stunnel proxy is an example of such a proxy. - -[id="tcp-proxy-setup-for-dvm_{context}"] -=== TCP proxy setup for DVM - -You can set up a direct connection between the source and the target cluster through a TCP proxy and configure the `stunnel_tcp_proxy` variable in the `MigrationController` CR to use the proxy: - -[source, yaml] ----- -apiVersion: migration.openshift.io/v1alpha1 -kind: MigrationController -metadata: - name: migration-controller - namespace: openshift-migration -spec: - [...] - stunnel_tcp_proxy: http://username:password@ip:port ----- - -Direct volume migration (DVM) supports only basic authentication for the proxy. Moreover, DVM works only from behind proxies that can tunnel a TCP connection transparently. HTTP/HTTPS proxies in man-in-the-middle mode do not work. The existing cluster-wide proxies might not support this behavior. As a result, the proxy settings for DVM are intentionally kept different from the usual proxy configuration in {mtc-short}. - -[id="why-tcp-proxy-instead-of-an-http-https-proxy_{context}"] -=== Why use a TCP proxy instead of an HTTP/HTTPS proxy? - -You can enable DVM by running Rsync between the source and the target cluster over an OpenShift route. Traffic is encrypted using Stunnel, a TCP proxy. The Stunnel running on the source cluster initiates a TLS connection with the target Stunnel and transfers data over an encrypted channel. - -Cluster-wide HTTP/HTTPS proxies in OpenShift are usually configured in man-in-the-middle mode where they negotiate their own TLS session with the outside servers. However, this does not work with Stunnel. Stunnel requires that its TLS session be untouched by the proxy, essentially making the proxy a transparent tunnel which simply forwards the TCP connection as-is. Therefore, you must use a TCP proxy. - -[id="dvm-known-issues_{context}"] -=== Known issue - -.Migration fails with error `Upgrade request required` - -The migration Controller uses the SPDY protocol to execute commands within remote pods. If the remote cluster is behind a proxy or a firewall that does not support the SPDY protocol, the migration controller fails to execute remote commands. The migration fails with the error message `Upgrade request required`. -Workaround: Use a proxy that supports the SPDY protocol. - -In addition to supporting the SPDY protocol, the proxy or firewall also must pass the `Upgrade` HTTP header to the API server. The client uses this header to open a websocket connection with the API server. If the `Upgrade` header is blocked by the proxy or firewall, the migration fails with the error message `Upgrade request required`. -Workaround: Ensure that the proxy forwards the `Upgrade` header. - -[id="tuning-network-policies-for-migrations_{context}"] -== Tuning network policies for migrations - -OpenShift supports restricting traffic to or from pods using _NetworkPolicy_ or _EgressFirewalls_ based on the network plugin used by the cluster. If any of the source namespaces involved in a migration use such mechanisms to restrict network traffic to pods, the restrictions might inadvertently stop traffic to Rsync pods during migration. - -Rsync pods running on both the source and the target clusters must connect to each other over an OpenShift Route. Existing _NetworkPolicy_ or _EgressNetworkPolicy_ objects can be configured to automatically exempt Rsync pods from these traffic restrictions. - -[id="dvm-network-policy-configuration_{context}"] -=== NetworkPolicy configuration - -[id="egress-traffic-from-rsync-pods_{context}"] -==== Egress traffic from Rsync pods - -You can use the unique labels of Rsync pods to allow egress traffic to pass from them if the `NetworkPolicy` configuration in the source or destination namespaces blocks this type of traffic. The following policy allows *all* egress traffic from Rsync pods in the namespace: - -[source, yaml] ----- -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: allow-all-egress-from-rsync-pods -spec: - podSelector: - matchLabels: - owner: directvolumemigration - app: directvolumemigration-rsync-transfer - egress: - - {} - policyTypes: - - Egress ----- - -[id="ingress-traffic-to-rsync-pods_{context}"] -==== Ingress traffic to Rsync pods - -[source, yaml] ----- -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: allow-all-egress-from-rsync-pods -spec: - podSelector: - matchLabels: - owner: directvolumemigration - app: directvolumemigration-rsync-transfer - ingress: - - {} - policyTypes: - - Ingress ----- - -[id="egressnetworkpolicy-config_{context}"] -=== EgressNetworkPolicy configuration - -The `EgressNetworkPolicy` object or _Egress Firewalls_ are OpenShift constructs designed to block egress traffic leaving the cluster. - -Unlike the `NetworkPolicy` object, the Egress Firewall works at a project level because it applies to all pods in the namespace. Therefore, the unique labels of Rsync pods do not exempt only Rsync pods from the restrictions. However, you can add the CIDR ranges of the source or target cluster to the _Allow_ rule of the policy so that a direct connection can be setup between two clusters. - -Based on which cluster the Egress Firewall is present in, you can add the CIDR range of the other cluster to allow egress traffic between the two: - -[source, yaml] ----- -apiVersion: network.openshift.io/v1 -kind: EgressNetworkPolicy -metadata: - name: test-egress-policy - namespace: <namespace> -spec: - egress: - - to: - cidrSelector: <cidr_of_source_or_target_cluster> - type: Deny ----- - -[id="choosing-alternate-endpoints-for-data-transfer_{context}"] -=== Choosing alternate endpoints for data transfer - -By default, DVM uses an {product-title} route as an endpoint to transfer PV data to destination clusters. You can choose another type of supported endpoint, if cluster topologies allow. - -For each cluster, you can configure an endpoint by setting the `rsync_endpoint_type` variable on the appropriate *destination* cluster in your `MigrationController` CR: - -[source, yaml] ----- -apiVersion: migration.openshift.io/v1alpha1 -kind: MigrationController -metadata: - name: migration-controller - namespace: openshift-migration -spec: - [...] - rsync_endpoint_type: [NodePort|ClusterIP|Route] ----- - -[id="configuring-supplemental-groups-for-rsync-pods_{context}"] -=== Configuring supplemental groups for Rsync pods -When your PVCs use a shared storage, you can configure the access to that storage by adding supplemental groups to Rsync pod definitions in order for the pods to allow access: - -.Supplementary groups for Rsync pods -[option="header"] -|=== -|Variable|Type|Default|Description - -|`src_supplemental_groups` -|string -|Not set -|Comma-separated list of supplemental groups for source Rsync pods - -|`target_supplemental_groups` -|string -|Not set -|Comma-separated list of supplemental groups for target Rsync pods -|=== - -.Example usage - -The `MigrationController` CR can be updated to set values for these supplemental groups: - -[source, yaml] ----- -spec: - src_supplemental_groups: "1000,2000" - target_supplemental_groups: "2000,3000" ----- diff --git a/modules/migration-about-mtc-custom-resources.adoc b/modules/migration-about-mtc-custom-resources.adoc deleted file mode 100644 index c9912e60f89d..000000000000 --- a/modules/migration-about-mtc-custom-resources.adoc +++ /dev/null @@ -1,41 +0,0 @@ -// Module included in the following assemblies: -// -// * migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc -// * migration_toolkit_for_containers/advanced-migration-options-mtc.adoc - -:_content-type: CONCEPT -[id="migration-about-mtc-custom-resources_{context}"] -= About {mtc-short} custom resources - -The {mtc-full} ({mtc-short}) creates the following custom resources (CRs): - -image::migration-architecture.png[migration architecture diagram] - -image:darkcircle-1.png[20,20] link:https://github.com/konveyor/mig-controller/blob/master/pkg/apis/migration/v1alpha1/migcluster_types.go[MigCluster] (configuration, {mtc-short} cluster): Cluster definition - -image:darkcircle-2.png[20,20] link:https://github.com/konveyor/mig-controller/blob/master/pkg/apis/migration/v1alpha1/migstorage_types.go[MigStorage] (configuration, {mtc-short} cluster): Storage definition - -image:darkcircle-3.png[20,20] link:https://github.com/konveyor/mig-controller/blob/master/pkg/apis/migration/v1alpha1/migplan_types.go[MigPlan] (configuration, {mtc-short} cluster): Migration plan - -The `MigPlan` CR describes the source and target clusters, replication repository, and namespaces being migrated. It is associated with 0, 1, or many `MigMigration` CRs. - -[NOTE] -==== -Deleting a `MigPlan` CR deletes the associated `MigMigration` CRs. -==== - -image:darkcircle-4.png[20,20] link:https://github.com/vmware-tanzu/velero/blob/main/pkg/apis/velero/v1/backupstoragelocation_types.go[BackupStorageLocation] (configuration, {mtc-short} cluster): Location of `Velero` backup objects - -image:darkcircle-5.png[20,20] link:https://github.com/vmware-tanzu/velero/blob/main/pkg/apis/velero/v1/volume_snapshot_location.go[VolumeSnapshotLocation] (configuration, {mtc-short} cluster): Location of `Velero` volume snapshots - -image:darkcircle-6.png[20,20] link:https://github.com/konveyor/mig-controller/blob/master/pkg/apis/migration/v1alpha1/migmigration_types.go[MigMigration] (action, {mtc-short} cluster): Migration, created every time you stage or migrate data. Each `MigMigration` CR is associated with a `MigPlan` CR. - -image:darkcircle-7.png[20,20] link:https://github.com/vmware-tanzu/velero/blob/main/pkg/apis/velero/v1/backup.go[Backup] (action, source cluster): When you run a migration plan, the `MigMigration` CR creates two `Velero` backup CRs on each source cluster: - -* Backup CR #1 for Kubernetes objects -* Backup CR #2 for PV data - -image:darkcircle-8.png[20,20] link:https://github.com/vmware-tanzu/velero/blob/main/pkg/apis/velero/v1/restore.go[Restore] (action, target cluster): When you run a migration plan, the `MigMigration` CR creates two `Velero` restore CRs on the target cluster: - -* Restore CR #1 (using Backup CR #2) for PV data -* Restore CR #2 (using Backup CR #1) for Kubernetes objects diff --git a/modules/migration-accessing-performance-metrics.adoc b/modules/migration-accessing-performance-metrics.adoc deleted file mode 100644 index b72a16ddef01..000000000000 --- a/modules/migration-accessing-performance-metrics.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// * migrating_from_ocp_3_to_4/troubleshooting-3-4.adoc -// * migration-toolkit-for-containers/troubleshooting-mtc.adoc - -:_content-type: PROCEDURE -[id="migration-accessing-performance-metrics_{context}"] -= Accessing performance metrics - -The `MigrationController` custom resource (CR) records metrics and pulls them into on-cluster monitoring storage. You can query the metrics by using Prometheus Query Language (PromQL) to diagnose migration performance issues. All metrics are reset when the Migration Controller pod restarts. - -You can access the performance metrics and run queries by using the {product-title} web console. - -.Procedure - -. In the {product-title} web console, click *Observe* -> *Metrics*. -. Enter a PromQL query, select a time window to display, and click *Run Queries*. -+ -If your web browser does not display all the results, use the Prometheus console. diff --git a/modules/migration-adding-cluster-to-cam.adoc b/modules/migration-adding-cluster-to-cam.adoc deleted file mode 100644 index d91bf3b690ff..000000000000 --- a/modules/migration-adding-cluster-to-cam.adoc +++ /dev/null @@ -1,68 +0,0 @@ -// Module included in the following assemblies: -// -// * migrating_from_ocp_3_to_4/migrating-applications-3-4.adoc -// * migration_toolkit_for_containers/migrating-applications-with-mtc - -:_content-type: PROCEDURE -[id="migration-adding-cluster-to-cam_{context}"] -= Adding a cluster to the {mtc-short} web console - -You can add a cluster to the {mtc-full} ({mtc-short}) web console. - -.Prerequisites - -* If you are using Azure snapshots to copy data: -** You must specify the Azure resource group name for the cluster. -** The clusters must be in the same Azure resource group. -** The clusters must be in the same geographic location. -* If you are using direct image migration, you must expose a route to the image registry of the source cluster. - -.Procedure - -. Log in to the cluster. -. Obtain the `migration-controller` service account token: -+ -[source,terminal] ----- -$ oc sa get-token migration-controller -n openshift-migration ----- -+ -.Example output -+ -[source,terminal] ----- -eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJtaWciLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlY3JldC5uYW1lIjoibWlnLXRva2VuLWs4dDJyIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6Im1pZyIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6ImE1YjFiYWMwLWMxYmYtMTFlOS05Y2NiLTAyOWRmODYwYjMwOCIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDptaWc6bWlnIn0.xqeeAINK7UXpdRqAtOj70qhBJPeMwmgLomV9iFxr5RoqUgKchZRG2J2rkqmPm6vr7K-cm7ibD1IBpdQJCcVDuoHYsFgV4mp9vgOfn9osSDp2TGikwNz4Az95e81xnjVUmzh-NjDsEpw71DH92iHV_xt2sTwtzftS49LpPW2LjrV0evtNBP_t_RfskdArt5VSv25eORl7zScqfe1CiMkcVbf2UqACQjo3LbkpfN26HAioO2oH0ECPiRzT0Xyh-KwFutJLS9Xgghyw-LD9kPKcE_xbbJ9Y4Rqajh7WdPYuB0Jd9DPVrslmzK-F6cgHHYoZEv0SvLQi-PO0rpDrcjOEQQ ----- - -. In the {mtc-short} web console, click *Clusters*. -. Click *Add cluster*. -. Fill in the following fields: - -* *Cluster name*: The cluster name can contain lower-case letters (`a-z`) and numbers (`0-9`). It must not contain spaces or international characters. -* *URL*: Specify the API server URL, for example, `\https://<www.example.com>:8443`. -* *Service account token*: Paste the `migration-controller` service account token. -* *Exposed route host to image registry*: If you are using direct image migration, specify the exposed route to the image registry of the source cluster. -+ -To create the route, run the following command: -+ -** For {product-title} 3: -+ -[source,terminal] ----- -$ oc create route passthrough --service=docker-registry --port=5000 -n default ----- -** For {product-title} 4: -+ -[source,terminal] ----- -$ oc create route passthrough --service=image-registry --port=5000 -n openshift-image-registry ----- - -* *Azure cluster*: You must select this option if you use Azure snapshots to copy your data. -* *Azure resource group*: This field is displayed if *Azure cluster* is selected. Specify the Azure resource group. -* *Require SSL verification*: Optional: Select this option to verify SSL connections to the cluster. -* *CA bundle file*: This field is displayed if *Require SSL verification* is selected. If you created a custom CA certificate bundle file for self-signed certificates, click *Browse*, select the CA bundle file, and upload it. - -. Click *Add cluster*. -+ -The cluster appears in the *Clusters* list. diff --git a/modules/migration-adding-replication-repository-to-cam.adoc b/modules/migration-adding-replication-repository-to-cam.adoc deleted file mode 100644 index e2f3c6c13af9..000000000000 --- a/modules/migration-adding-replication-repository-to-cam.adoc +++ /dev/null @@ -1,58 +0,0 @@ -// Module included in the following assemblies: -// -// * migrating_from_ocp_3_to_4/migrating-applications-3-4.adoc -// * migration_toolkit_for_containers/migrating-applications-with-mtc - -:_content-type: PROCEDURE -[id="migration-adding-replication-repository-to-cam_{context}"] -= Adding a replication repository to the {mtc-short} web console - -You can add an object storage as a replication repository to the {mtc-full} ({mtc-short}) web console. - -{mtc-short} supports the following storage providers: - -* Amazon Web Services (AWS) S3 -* Multi-Cloud Object Gateway (MCG) -* Generic S3 object storage, for example, Minio or Ceph S3 -* Google Cloud Provider (GCP) -* Microsoft Azure Blob - -.Prerequisites - -* You must configure the object storage as a replication repository. - -.Procedure - -. In the {mtc-short} web console, click *Replication repositories*. -. Click *Add repository*. -. Select a *Storage provider type* and fill in the following fields: - -* *AWS* for S3 providers, including AWS and MCG: - -** *Replication repository name*: Specify the replication repository name in the {mtc-short} web console. -** *S3 bucket name*: Specify the name of the S3 bucket. -** *S3 bucket region*: Specify the S3 bucket region. *Required* for AWS S3. *Optional* for some S3 providers. Check the product documentation of your S3 provider for expected values. -** *S3 endpoint*: Specify the URL of the S3 service, not the bucket, for example, `\https://<s3-storage.apps.cluster.com>`. *Required* for a generic S3 provider. You must use the `https://` prefix. -** *S3 provider access key*: Specify the `<AWS_SECRET_ACCESS_KEY>` for AWS or the S3 provider access key for MCG and other S3 providers. -** *S3 provider secret access key*: Specify the `<AWS_ACCESS_KEY_ID>` for AWS or the S3 provider secret access key for MCG and other S3 providers. -** *Require SSL verification*: Clear this checkbox if you are using a generic S3 provider. -** If you created a custom CA certificate bundle for self-signed certificates, click *Browse* and browse to the Base64-encoded file. - -* *GCP*: - -** *Replication repository name*: Specify the replication repository name in the {mtc-short} web console. -** *GCP bucket name*: Specify the name of the GCP bucket. -** *GCP credential JSON blob*: Specify the string in the `credentials-velero` file. - -* *Azure*: - -** *Replication repository name*: Specify the replication repository name in the {mtc-short} web console. -** *Azure resource group*: Specify the resource group of the Azure Blob storage. -** *Azure storage account name*: Specify the Azure Blob storage account name. -** *Azure credentials - INI file contents*: Specify the string in the `credentials-velero` file. - -. Click *Add repository* and wait for connection validation. - -. Click *Close*. -+ -The new repository appears in the *Replication repositories* list. diff --git a/modules/migration-changing-migration-plan-limits.adoc b/modules/migration-changing-migration-plan-limits.adoc deleted file mode 100644 index 53c959c85783..000000000000 --- a/modules/migration-changing-migration-plan-limits.adoc +++ /dev/null @@ -1,52 +0,0 @@ -// Module included in the following assemblies: -// -// * migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc -// * migration_toolkit_for_containers/advanced-migration-options-mtc.adoc - -:_content-type: PROCEDURE -[id="migration-changing-migration-plan-limits_{context}"] -= Increasing limits for large migrations - -You can increase the limits on migration objects and container resources for large migrations with the {mtc-full} ({mtc-short}). - -[IMPORTANT] -==== -You must test these changes before you perform a migration in a production environment. -==== - -.Procedure - -. Edit the `MigrationController` custom resource (CR) manifest: -+ -[source,terminal] ----- -$ oc edit migrationcontroller -n openshift-migration ----- - -. Update the following parameters: -+ -[source,yaml] ----- -... -mig_controller_limits_cpu: "1" <1> -mig_controller_limits_memory: "10Gi" <2> -... -mig_controller_requests_cpu: "100m" <3> -mig_controller_requests_memory: "350Mi" <4> -... -mig_pv_limit: 100 <5> -mig_pod_limit: 100 <6> -mig_namespace_limit: 10 <7> -... ----- -<1> Specifies the number of CPUs available to the `MigrationController` CR. -<2> Specifies the amount of memory available to the `MigrationController` CR. -<3> Specifies the number of CPU units available for `MigrationController` CR requests. `100m` represents 0.1 CPU units (100 * 1e-3). -<4> Specifies the amount of memory available for `MigrationController` CR requests. -<5> Specifies the number of persistent volumes that can be migrated. -<6> Specifies the number of pods that can be migrated. -<7> Specifies the number of namespaces that can be migrated. - -. Create a migration plan that uses the updated parameters to verify the changes. -+ -If your migration plan exceeds the `MigrationController` CR limits, the {mtc-short} console displays a warning message when you save the migration plan. diff --git a/modules/migration-compatibility-guidelines.adoc b/modules/migration-compatibility-guidelines.adoc deleted file mode 100644 index ec4f873b6e9d..000000000000 --- a/modules/migration-compatibility-guidelines.adoc +++ /dev/null @@ -1,48 +0,0 @@ -// Module included in the following assemblies: -// -// * migrating_from_ocp_3_to_4/installing-3-4.adoc -// * migrating_from_ocp_3_to_4/installing-restricted-3-4.adoc -// * migration_toolkit_for_containers/installing-mtc.adoc -// * migration_toolkit_for_containers/installing-mtc-restricted.adoc - -:_content-type: CONCEPT -[id="migration-compatibility-guidelines_{context}"] -= Compatibility guidelines - -You must install the {mtc-full} ({mtc-short}) Operator that is compatible with your {product-title} version. - -.Definitions - -legacy platform:: {product-title} 4.5 and earlier. -modern platform:: {product-title} 4.6 and later. -legacy operator:: The {mtc-short} Operator designed for legacy platforms. -modern operator:: The {mtc-short} Operator designed for modern platforms. -control cluster:: The cluster that runs the {mtc-short} controller and GUI. -remote cluster:: A source or destination cluster for a migration that runs Velero. The Control Cluster communicates with Remote clusters via the Velero API to drive migrations. - - -[cols="1,2,2", options="header"] -.{mtc-short} compatibility: Migrating from a legacy platform -|=== -||{product-title} 4.5 or earlier |{product-title} 4.6 or later -|Stable {mtc-short} version a|{mtc-short} {mtc-version}._z_ - -Legacy {mtc-version} operator: Install manually with the `operator.yml` file. -[IMPORTANT] -==== -This cluster cannot be the control cluster. -==== - -|{mtc-short} {mtc-version}._z_ - -Install with OLM, release channel `release-v1.7` -|=== - -[NOTE] -==== -Edge cases exist in which network restrictions prevent modern clusters from connecting to other clusters involved in the migration. For example, when migrating from an {product-title} 3.11 cluster on premises to a modern {product-title} cluster in the cloud, where the modern cluster cannot connect to the {product-title} 3.11 cluster. - -With {mtc-short} {mtc-version}, if one of the remote clusters is unable to communicate with the control cluster because of network restrictions, use the `crane tunnel-api` command. - -With the stable {mtc-short} release, although you should always designate the most modern cluster as the control cluster, in this specific case it is possible to designate the legacy cluster as the control cluster and push workloads to the remote cluster. -==== diff --git a/modules/migration-configuring-aws-s3.adoc b/modules/migration-configuring-aws-s3.adoc deleted file mode 100644 index e6475d03a955..000000000000 --- a/modules/migration-configuring-aws-s3.adoc +++ /dev/null @@ -1,162 +0,0 @@ -// Module included in the following assemblies: -// -// * migrating_from_ocp_3_to_4/installing-3-4.adoc -// * migration_toolkit_for_containers/installing-mtc.adoc -// * backup_and_restore/application_backup_and_restore/installing/installing-oadp-aws.adoc - -:_content-type: PROCEDURE -[id="migration-configuring-aws-s3_{context}"] -= Configuring Amazon Web Services - -ifdef::installing-3-4,installing-mtc[] -You configure Amazon Web Services (AWS) S3 object storage as a replication repository for the {mtc-full} ({mtc-short}). -endif::[] -ifdef::installing-oadp-aws[] -You configure Amazon Web Services (AWS) for the OpenShift API for Data Protection (OADP). -endif::[] - -.Prerequisites - -* You must have the link:https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-welcome.html[AWS CLI] installed. -ifdef::installing-3-4,installing-mtc[] -* The AWS S3 storage bucket must be accessible to the source and target clusters. -* If you are using the snapshot copy method: -** You must have access to EC2 Elastic Block Storage (EBS). -** The source and target clusters must be in the same region. -** The source and target clusters must have the same storage class. -** The storage class must be compatible with snapshots. -endif::[] - -.Procedure - -. Set the `BUCKET` variable: -+ -[source,terminal] ----- -$ BUCKET=<your_bucket> ----- - -. Set the `REGION` variable: -+ -[source,terminal] ----- -$ REGION=<your_region> ----- - -. Create an AWS S3 bucket: -+ -[source,terminal] ----- -$ aws s3api create-bucket \ - --bucket $BUCKET \ - --region $REGION \ - --create-bucket-configuration LocationConstraint=$REGION <1> ----- -<1> `us-east-1` does not support a `LocationConstraint`. If your region is `us-east-1`, omit `--create-bucket-configuration LocationConstraint=$REGION`. - -. Create an IAM user: -+ -[source,terminal] ----- -$ aws iam create-user --user-name velero <1> ----- -<1> If you want to use Velero to back up multiple clusters with multiple S3 buckets, create a unique user name for each cluster. - -. Create a `velero-policy.json` file: -+ -[source,terminal] ----- -$ cat > velero-policy.json <<EOF -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "ec2:DescribeVolumes", - "ec2:DescribeSnapshots", - "ec2:CreateTags", - "ec2:CreateVolume", - "ec2:CreateSnapshot", - "ec2:DeleteSnapshot" - ], - "Resource": "*" - }, - { - "Effect": "Allow", - "Action": [ - "s3:GetObject", - "s3:DeleteObject", - "s3:PutObject", - "s3:AbortMultipartUpload", - "s3:ListMultipartUploadParts" - ], - "Resource": [ - "arn:aws:s3:::${BUCKET}/*" - ] - }, - { - "Effect": "Allow", - "Action": [ - "s3:ListBucket", - "s3:GetBucketLocation", - "s3:ListBucketMultipartUploads" - ], - "Resource": [ - "arn:aws:s3:::${BUCKET}" - ] - } - ] -} -EOF ----- - -. Attach the policies to give the `velero` user the minimum necessary permissions: -+ -[source,terminal] ----- -$ aws iam put-user-policy \ - --user-name velero \ - --policy-name velero \ - --policy-document file://velero-policy.json ----- - -. Create an access key for the `velero` user: -+ -[source,terminal] ----- -$ aws iam create-access-key --user-name velero ----- -+ -.Example output -+ -[source,terminal] ----- -{ - "AccessKey": { - "UserName": "velero", - "Status": "Active", - "CreateDate": "2017-07-31T22:24:41.576Z", - "SecretAccessKey": <AWS_SECRET_ACCESS_KEY>, - "AccessKeyId": <AWS_ACCESS_KEY_ID> - } -} ----- -ifdef::installing-3-4,installing-mtc[] -+ -Record the `AWS_SECRET_ACCESS_KEY` and the `AWS_ACCESS_KEY_ID`. You use the credentials to add AWS as a replication repository. -endif::[] -ifdef::installing-oadp-aws[] -. Create a `credentials-velero` file: -+ -[source,terminal,subs="attributes+"] ----- -$ cat << EOF > ./credentials-velero -[default] -aws_access_key_id=<AWS_ACCESS_KEY_ID> -aws_secret_access_key=<AWS_SECRET_ACCESS_KEY> -EOF ----- -+ -You use the `credentials-velero` file to create a `Secret` object for AWS before you install the Data Protection Application. -endif::[] diff --git a/modules/migration-configuring-azure.adoc b/modules/migration-configuring-azure.adoc deleted file mode 100644 index 4d796c263a85..000000000000 --- a/modules/migration-configuring-azure.adoc +++ /dev/null @@ -1,170 +0,0 @@ -// Module included in the following assemblies: -// -// * migrating_from_ocp_3_to_4/installing-3-4.adoc -// * migration_toolkit_for_containers/installing-mtc.adoc -// * backup_and_restore/application_backup_and_restore/installing/installing-oadp-azure.adoc - -:_content-type: PROCEDURE -[id="migration-configuring-azure_{context}"] -= Configuring Microsoft Azure - -ifdef::installing-3-4,installing-mtc[] -You configure a Microsoft Azure Blob storage container as a replication repository for the {mtc-full} ({mtc-short}). -endif::[] -ifdef::installing-oadp-azure[] -You configure a Microsoft Azure for the OpenShift API for Data Protection (OADP). -endif::[] - -.Prerequisites - -* You must have the link:https://docs.microsoft.com/en-us/cli/azure/install-azure-cli[Azure CLI] installed. -ifdef::installing-3-4,installing-mtc[] -* The Azure Blob storage container must be accessible to the source and target clusters. -* If you are using the snapshot copy method: -** The source and target clusters must be in the same region. -** The source and target clusters must have the same storage class. -** The storage class must be compatible with snapshots. -endif::[] - -.Procedure - -. Log in to Azure: -+ -[source,terminal] ----- -$ az login ----- - -. Set the `AZURE_RESOURCE_GROUP` variable: -+ -[source,terminal] ----- -$ AZURE_RESOURCE_GROUP=Velero_Backups ----- - -. Create an Azure resource group: -+ -[source,terminal] ----- -$ az group create -n $AZURE_RESOURCE_GROUP --location CentralUS <1> ----- -<1> Specify your location. - -. Set the `AZURE_STORAGE_ACCOUNT_ID` variable: -+ -[source,terminal] ----- -$ AZURE_STORAGE_ACCOUNT_ID="velero$(uuidgen | cut -d '-' -f5 | tr '[A-Z]' '[a-z]')" ----- - -. Create an Azure storage account: -+ -[source,terminal] ----- -$ az storage account create \ - --name $AZURE_STORAGE_ACCOUNT_ID \ - --resource-group $AZURE_RESOURCE_GROUP \ - --sku Standard_GRS \ - --encryption-services blob \ - --https-only true \ - --kind BlobStorage \ - --access-tier Hot ----- - -. Set the `BLOB_CONTAINER` variable: -+ -[source,terminal] ----- -$ BLOB_CONTAINER=velero ----- - -. Create an Azure Blob storage container: -+ -[source,terminal] ----- -$ az storage container create \ - -n $BLOB_CONTAINER \ - --public-access off \ - --account-name $AZURE_STORAGE_ACCOUNT_ID ----- - -ifdef::installing-3-4,installing-mtc[] -. Create a service principal and credentials for `velero`: -+ -[source,terminal] ----- -$ AZURE_SUBSCRIPTION_ID=`az account list --query '[?isDefault].id' -o tsv` \ - AZURE_TENANT_ID=`az account list --query '[?isDefault].tenantId' -o tsv` \ - AZURE_CLIENT_SECRET=`az ad sp create-for-rbac --name "velero" \ - --role "Contributor" --query 'password' -o tsv` \ - AZURE_CLIENT_ID=`az ad sp list --display-name "velero" \ - --query '[0].appId' -o tsv` ----- - -. Save the service principal credentials in the `credentials-velero` file: -+ -[source,terminal] ----- -$ cat << EOF > ./credentials-velero -AZURE_SUBSCRIPTION_ID=${AZURE_SUBSCRIPTION_ID} -AZURE_TENANT_ID=${AZURE_TENANT_ID} -AZURE_CLIENT_ID=${AZURE_CLIENT_ID} -AZURE_CLIENT_SECRET=${AZURE_CLIENT_SECRET} -AZURE_RESOURCE_GROUP=${AZURE_RESOURCE_GROUP} -AZURE_CLOUD_NAME=AzurePublicCloud -EOF ----- -+ -You use the `credentials-velero` file to add Azure as a replication repository. -endif::[] -ifdef::installing-oadp-azure[] -. Obtain the storage account access key: -+ -[source,terminal] ----- -$ AZURE_STORAGE_ACCOUNT_ACCESS_KEY=`az storage account keys list \ - --account-name $AZURE_STORAGE_ACCOUNT_ID \ - --query "[?keyName == 'key1'].value" -o tsv` ----- - -. Create a custom role that has the minimum required permissions: -+ -[source,terminal,subs="attributes+"] ----- -AZURE_ROLE=Velero -az role definition create --role-definition '{ - "Name": "'$AZURE_ROLE'", - "Description": "Velero related permissions to perform backups, restores and deletions", - "Actions": [ - "Microsoft.Compute/disks/read", - "Microsoft.Compute/disks/write", - "Microsoft.Compute/disks/endGetAccess/action", - "Microsoft.Compute/disks/beginGetAccess/action", - "Microsoft.Compute/snapshots/read", - "Microsoft.Compute/snapshots/write", - "Microsoft.Compute/snapshots/delete", - "Microsoft.Storage/storageAccounts/listkeys/action", - "Microsoft.Storage/storageAccounts/regeneratekey/action" - ], - "AssignableScopes": ["/subscriptions/'$AZURE_SUBSCRIPTION_ID'"] - }' ----- - -. Create a `credentials-velero` file: -+ -[source,terminal,subs="attributes+"] ----- -$ cat << EOF > ./credentials-velero -AZURE_SUBSCRIPTION_ID=${AZURE_SUBSCRIPTION_ID} -AZURE_TENANT_ID=${AZURE_TENANT_ID} -AZURE_CLIENT_ID=${AZURE_CLIENT_ID} -AZURE_CLIENT_SECRET=${AZURE_CLIENT_SECRET} -AZURE_RESOURCE_GROUP=${AZURE_RESOURCE_GROUP} -AZURE_STORAGE_ACCOUNT_ACCESS_KEY=${AZURE_STORAGE_ACCOUNT_ACCESS_KEY} <1> -AZURE_CLOUD_NAME=AzurePublicCloud -EOF ----- -<1> Mandatory. You cannot back up internal images if the `credentials-velero` file contains only the service principal credentials. -+ -You use the `credentials-velero` file to create a `Secret` object for Azure before you install the Data Protection Application. -endif::[] diff --git a/modules/migration-configuring-gcp.adoc b/modules/migration-configuring-gcp.adoc deleted file mode 100644 index 717183e21aec..000000000000 --- a/modules/migration-configuring-gcp.adoc +++ /dev/null @@ -1,146 +0,0 @@ -// Module included in the following assemblies: -// -// * migrating_from_ocp_3_to_4/installing-3-4.adoc -// * migration_toolkit_for_containers/installing-mtc.adoc -// * backup_and_restore/application_backup_and_restore/installing/installing-oadp-gcp.adoc - -:_content-type: PROCEDURE -[id="migration-configuring-gcp_{context}"] -= Configuring Google Cloud Platform - -ifdef::installing-3-4,installing-mtc[] -You configure a Google Cloud Platform (GCP) storage bucket as a replication repository for the {mtc-full} ({mtc-short}). -endif::[] -ifdef::installing-oadp-gcp[] -You configure Google Cloud Platform (GCP) for the OpenShift API for Data Protection (OADP). -endif::[] - -.Prerequisites - -* You must have the `gcloud` and `gsutil` CLI tools installed. See the link:https://cloud.google.com/sdk/docs/[Google cloud documentation] for details. - -ifdef::installing-3-4,installing-mtc[] -* The GCP storage bucket must be accessible to the source and target clusters. -* If you are using the snapshot copy method: -** The source and target clusters must be in the same region. -** The source and target clusters must have the same storage class. -** The storage class must be compatible with snapshots. -endif::[] - -.Procedure - -. Log in to GCP: -+ -[source,terminal] ----- -$ gcloud auth login ----- - -. Set the `BUCKET` variable: -+ -[source,terminal] ----- -$ BUCKET=<bucket> <1> ----- -<1> Specify your bucket name. - -. Create the storage bucket: -+ -[source,terminal] ----- -$ gsutil mb gs://$BUCKET/ ----- - -. Set the `PROJECT_ID` variable to your active project: -+ -[source,terminal] ----- -$ PROJECT_ID=$(gcloud config get-value project) ----- - -. Create a service account: -+ -[source,terminal] ----- -$ gcloud iam service-accounts create velero \ - --display-name "Velero service account" ----- - -. List your service accounts: -+ -[source,terminal] ----- -$ gcloud iam service-accounts list ----- - -. Set the `SERVICE_ACCOUNT_EMAIL` variable to match its `email` value: -+ -[source,terminal] ----- -$ SERVICE_ACCOUNT_EMAIL=$(gcloud iam service-accounts list \ - --filter="displayName:Velero service account" \ - --format 'value(email)') ----- - -. Attach the policies to give the `velero` user the minimum necessary permissions: -+ -[source,terminal] ----- -$ ROLE_PERMISSIONS=( - compute.disks.get - compute.disks.create - compute.disks.createSnapshot - compute.snapshots.get - compute.snapshots.create - compute.snapshots.useReadOnly - compute.snapshots.delete - compute.zones.get - storage.objects.create - storage.objects.delete - storage.objects.get - storage.objects.list - iam.serviceAccounts.signBlob -) ----- - -. Create the `velero.server` custom role: -+ -[source,terminal] ----- -$ gcloud iam roles create velero.server \ - --project $PROJECT_ID \ - --title "Velero Server" \ - --permissions "$(IFS=","; echo "${ROLE_PERMISSIONS[*]}")" ----- - -. Add IAM policy binding to the project: -+ -[source,terminal] ----- -$ gcloud projects add-iam-policy-binding $PROJECT_ID \ - --member serviceAccount:$SERVICE_ACCOUNT_EMAIL \ - --role projects/$PROJECT_ID/roles/velero.server ----- - -. Update the IAM service account: -+ -[source,terminal] ----- -$ gsutil iam ch serviceAccount:$SERVICE_ACCOUNT_EMAIL:objectAdmin gs://${BUCKET} ----- - -. Save the IAM service account keys to the `credentials-velero` file in the current directory: -+ -[source,terminal] ----- -$ gcloud iam service-accounts keys create credentials-velero \ - --iam-account $SERVICE_ACCOUNT_EMAIL ----- -ifdef::installing-3-4,installing-mtc[] -+ -You use the `credentials-velero` file to add GCP as a replication repository. -endif::[] -ifdef::installing-oadp-gcp[] -+ -You use the `credentials-velero` file to create a `Secret` object for GCP before you install the Data Protection Application. -endif::[] diff --git a/modules/migration-configuring-mcg.adoc b/modules/migration-configuring-mcg.adoc deleted file mode 100644 index 111708250880..000000000000 --- a/modules/migration-configuring-mcg.adoc +++ /dev/null @@ -1,50 +0,0 @@ -// Module included in the following assemblies: -// -// * migrating_from_ocp_3_to_4/installing-3-4.adoc -// * migrating_from_ocp_3_to_4/installing-restricted-3-4.adoc -// * migration_toolkit_for_containers/installing-mtc.adoc -// * migration_toolkit_for_containers/installing-mtc-restricted.adoc -// * backup_and_restore/application_backup_and_restore/installing/installing-oadp-mcg.adoc - -:_content-type: PROCEDURE -[id="migration-configuring-mcg_{context}"] -= Retrieving Multicloud Object Gateway credentials - -ifdef::installing-3-4,installing-mtc[] -You must retrieve the Multicloud Object Gateway (MCG) credentials and S3 endpoint in order to configure MCG as a replication repository for the {mtc-full} ({mtc-short}). -endif::[] -You must retrieve the Multicloud Object Gateway (MCG) credentials in order to create a `Secret` custom resource (CR) for the OpenShift API for Data Protection (OADP). -//ifdef::installing-oadp-mcg[] -//endif::[] - -MCG is a component of {rh-storage}. - -.Prerequisites -ifdef::openshift-origin[] -* Ensure that you have downloaded the {cluster-manager-url-pull} as shown in _Obtaining the installation program_ in the installation documentation for your platform. -+ -If you have the pull secret, add the `redhat-operators` catalog to the OperatorHub custom resource (CR) as shown in _Configuring {product-title} to use Red Hat Operators_. -endif::[] -* You must deploy {rh-storage} by using the appropriate link:https://access.redhat.com/documentation/en-us/red_hat_openshift_data_foundation/4.9[OpenShift Data Foundation deployment guide]. - -.Procedure - -. Obtain the S3 endpoint, `AWS_ACCESS_KEY_ID`, and `AWS_SECRET_ACCESS_KEY` by running the link:https://access.redhat.com/documentation/en-us/red_hat_openshift_data_foundation/4.9/html/managing_hybrid_and_multicloud_resources/accessing-the-multicloud-object-gateway-with-your-applications_rhodf#accessing-the-Multicloud-object-gateway-from-the-terminal_rhodf[`describe` command] on the `NooBaa` custom resource. -ifdef::installing-3-4,installing-mtc[] -+ -You use these credentials to add MCG as a replication repository. -endif::[] -ifdef::installing-oadp-mcg[] -. Create a `credentials-velero` file: -+ -[source,terminal] ----- -$ cat << EOF > ./credentials-velero -[default] -aws_access_key_id=<AWS_ACCESS_KEY_ID> -aws_secret_access_key=<AWS_SECRET_ACCESS_KEY> -EOF ----- -+ -You use the `credentials-velero` file to create a `Secret` object when you install the Data Protection Application. -endif::[] diff --git a/modules/migration-configuring-proxies.adoc b/modules/migration-configuring-proxies.adoc deleted file mode 100644 index 0af1804f9719..000000000000 --- a/modules/migration-configuring-proxies.adoc +++ /dev/null @@ -1,53 +0,0 @@ -// Module included in the following assemblies: -// -// * migrating_from_ocp_3_to_4/installing-3-4.adoc -// * migrating_from_ocp_3_to_4/installing-restricted-3-4.adoc -// * migration_toolkit_for_containers/installing-mtc.adoc -// * migration_toolkit_for_containers/installing-mtc-restricted.adoc - -:_content-type: PROCEDURE -[id="migration-configuring-proxies_{context}"] -= Configuring proxies - -.Prerequisites - -* You must be logged in as a user with `cluster-admin` privileges on all clusters. - -.Procedure - -. Get the `MigrationController` CR manifest: -+ -[source,terminal] ----- -$ oc get migrationcontroller <migration_controller> -n openshift-migration ----- - -. Update the proxy parameters: -+ -[source,yaml] ----- -apiVersion: migration.openshift.io/v1alpha1 -kind: MigrationController -metadata: - name: <migration_controller> - namespace: openshift-migration -... -spec: - stunnel_tcp_proxy: http://<username>:<password>@<ip>:<port> <1> - noProxy: example.com <2> ----- -<1> Stunnel proxy URL for direct volume migration. -<2> Comma-separated list of destination domain names, domains, IP addresses, or other network CIDRs to exclude proxying. -+ -Preface a domain with `.` to match subdomains only. For example, `.y.com` matches `x.y.com`, but not `y.com`. Use `*` to bypass proxy for all destinations. -If you scale up workers that are not included in the network defined by the `networking.machineNetwork[].cidr` field from the installation configuration, you must add them to this list to prevent connection issues. -+ -This field is ignored if neither the `httpProxy` nor the `httpsProxy` field is set. - -. Save the manifest as `migration-controller.yaml`. -. Apply the updated manifest: -+ -[source,terminal] ----- -$ oc replace -f migration-controller.yaml -n openshift-migration ----- diff --git a/modules/migration-converting-storage-classes.adoc b/modules/migration-converting-storage-classes.adoc deleted file mode 100644 index f10ca7ee3c9c..000000000000 --- a/modules/migration-converting-storage-classes.adoc +++ /dev/null @@ -1,73 +0,0 @@ -// Module included in the following assemblies: -// -// * migration_toolkit_for_containers/advanced-migration-options-mtc.adoc - -:_content-type: PROCEDURE -[id="migration-converting-storage-classes_{context}"] -= Converting storage classes in the {mtc-short} web console - -You can convert the storage class of a persistent volume (PV) by migrating it within the same cluster. To do so, you must create and run a migration plan in the {mtc-full} ({mtc-short}) web console. - -.Prerequisites - -* You must be logged in as a user with `cluster-admin` privileges on the cluster on which {mtc-short} is running. -* You must add the cluster to the {mtc-short} web console. - -.Procedure - -. In the left-side navigation pane of the {product-title} web console, click *Projects*. -. In the list of projects, click your project. -+ -The *Project details* page opens. -. Click the *DeploymentConfig* name. Note the name of its running pod. -. Open the YAML tab of the project. Find the PVs and note the names of their corresponding persistent volume claims (PVCs). -. In the {mtc-short} web console, click *Migration plans*. -. Click *Add migration plan*. -. Enter the *Plan name*. -+ -The migration plan name must contain 3 to 63 lower-case alphanumeric characters (`a-z, 0-9`) and must not contain spaces or underscores (`_`). - -. From the *Migration type* menu, select *Storage class conversion*. -. From the *Source cluster* list, select the desired cluster for storage class conversion. -. Click *Next*. -+ -The *Namespaces* page opens. -. Select the required project. -. Click *Next*. -+ -The *Persistent volumes* page opens. The page displays the PVs in the project, all selected by default. -. For each PV, select the desired target storage class. -. Click *Next*. -+ -The wizard validates the new migration plan and shows that it is ready. -. Click *Close*. -+ -The new plan appears on the *Migration plans* page. -. To start the conversion, click the options menu of the new plan. -+ -Under *Migrations*, two options are displayed, *Stage* and *Cutover*. -+ -[NOTE] -===== -Cutover migration updates PVC references in the applications. - -Stage migration does not update PVC references in the applications. -===== -. Select the desired option. -+ -Depending on which option you selected, the *Stage migration* or *Cutover migration* notification appears. -. Click *Migrate*. -+ -Depending on which option you selected, the *Stage started* or *Cutover started* message appears. -. To see the status of the current migration, click the number in the *Migrations* column. -+ -The *Migrations* page opens. -. To see more details on the current migration and monitor its progress, select the migration from the *Type* column. -+ -The *Migration details* page opens. -When the migration progresses to the DirectVolume step and the status of the step becomes `Running Rsync Pods to migrate Persistent Volume data`, you can click *View details* and see the detailed status of the copies. -. In the breadcrumb bar, click *Stage* or *Cutover* and wait for all steps to complete. -. Open the *PersistentVolumeClaims* tab of the {product-title} web console. -+ -You can see new PVCs with the names of the initial PVCs but ending in `new`, which are using the target storage class. -. In the left-side navigation pane, click *Pods*. See that the pod of your project is running again. diff --git a/modules/migration-creating-ca-bundle.adoc b/modules/migration-creating-ca-bundle.adoc deleted file mode 100644 index f8c8f506544d..000000000000 --- a/modules/migration-creating-ca-bundle.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// -// * migrating_from_ocp_3_to_4/migrating-applications-3-4.adoc -// * migration_toolkit_for_containers/migrating-applications-with-mtc - -[id="creating-ca-bundle_{context}"] -= Creating a CA certificate bundle file for self-signed certificates - -If you use a self-signed certificate to secure a cluster or a replication repository for the {mtc-full} ({mtc-short}), certificate verification might fail with the following error message: `Certificate signed by unknown authority`. - -You can create a custom CA certificate bundle file and upload it in the {mtc-short} web console when you add a cluster or a replication repository. - -.Procedure - -Download a CA certificate from a remote endpoint and save it as a CA bundle file: - -[source,terminal] ----- -$ echo -n | openssl s_client -connect <host_FQDN>:<port> \ <1> - | sed -ne '/-BEGIN CERTIFICATE-/,/-END CERTIFICATE-/p' > <ca_bundle.cert> <2> ----- -<1> Specify the host FQDN and port of the endpoint, for example, `api.my-cluster.example.com:6443`. -<2> Specify the name of the CA bundle file. diff --git a/modules/migration-creating-migration-plan-cam.adoc b/modules/migration-creating-migration-plan-cam.adoc deleted file mode 100644 index 20c03538d93f..000000000000 --- a/modules/migration-creating-migration-plan-cam.adoc +++ /dev/null @@ -1,82 +0,0 @@ -// Module included in the following assemblies: -// -// * migrating_from_ocp_3_to_4/migrating-applications-3-4.adoc -// * migration_toolkit_for_containers/migrating-applications-with-mtc - -:_content-type: PROCEDURE -[id="migration-creating-migration-plan-cam_{context}"] -= Creating a migration plan in the {mtc-short} web console - -You can create a migration plan in the {mtc-full} ({mtc-short}) web console. - -.Prerequisites - -* You must be logged in as a user with `cluster-admin` privileges on all clusters. -* You must ensure that the same {mtc-short} version is installed on all clusters. -* You must add the clusters and the replication repository to the {mtc-short} web console. -* If you want to use the _move_ data copy method to migrate a persistent volume (PV), the source and target clusters must have uninterrupted network access to the remote volume. -* If you want to use direct image migration, you must specify the exposed route to the image registry of the source cluster. This can be done by using the {mtc-short} web console or by updating the `MigCluster` custom resource manifest. - -.Procedure - -. In the {mtc-short} web console, click *Migration plans*. -. Click *Add migration plan*. -. Enter the *Plan name*. -+ -The migration plan name must not exceed 253 lower-case alphanumeric characters (`a-z, 0-9`) and must not contain spaces or underscores (`_`). - -. Select a *Source cluster*, a *Target cluster*, and a *Repository*. -. Click *Next*. -. Select the projects for migration. -. Optional: Click the edit icon beside a project to change the target namespace. -. Click *Next*. -. Select a *Migration type* for each PV: - -* The *Copy* option copies the data from the PV of a source cluster to the replication repository and then restores the data on a newly created PV, with similar characteristics, in the target cluster. -* The *Move* option unmounts a remote volume, for example, NFS, from the source cluster, creates a PV resource on the target cluster pointing to the remote volume, and then mounts the remote volume on the target cluster. Applications running on the target cluster use the same remote volume that the source cluster was using. - -. Click *Next*. -. Select a *Copy method* for each PV: - -* *Snapshot copy* backs up and restores data using the cloud provider's snapshot functionality. It is significantly faster than *Filesystem copy*. -* *Filesystem copy* backs up the files on the source cluster and restores them on the target cluster. -+ -The file system copy method is required for direct volume migration. - -. You can select *Verify copy* to verify data migrated with *Filesystem copy*. Data is verified by generating a checksum for each source file and checking the checksum after restoration. Data verification significantly reduces performance. - -. Select a *Target storage class*. -+ -If you selected *Filesystem copy*, you can change the target storage class. - -. Click *Next*. -. On the *Migration options* page, the *Direct image migration* option is selected if you specified an exposed image registry route for the source cluster. The *Direct PV migration* option is selected if you are migrating data with *Filesystem copy*. -+ -The direct migration options copy images and files directly from the source cluster to the target cluster. This option is much faster than copying images and files from the source cluster to the replication repository and then from the replication repository to the target cluster. - -. Click *Next*. -. Optional: Click *Add Hook* to add a hook to the migration plan. -+ -A hook runs custom code. You can add up to four hooks to a single migration plan. Each hook runs during a different migration step. - -.. Enter the name of the hook to display in the web console. -.. If the hook is an Ansible playbook, select *Ansible playbook* and click *Browse* to upload the playbook or paste the contents of the playbook in the field. -.. Optional: Specify an Ansible runtime image if you are not using the default hook image. -.. If the hook is not an Ansible playbook, select *Custom container image* and specify the image name and path. -+ -A custom container image can include Ansible playbooks. - -.. Select *Source cluster* or *Target cluster*. -.. Enter the *Service account name* and the *Service account namespace*. -.. Select the migration step for the hook: - -* *preBackup*: Before the application workload is backed up on the source cluster -* *postBackup*: After the application workload is backed up on the source cluster -* *preRestore*: Before the application workload is restored on the target cluster -* *postRestore*: After the application workload is restored on the target cluster - -.. Click *Add*. - -. Click *Finish*. -+ -The migration plan is displayed in the *Migration plans* list. diff --git a/modules/migration-creating-registry-route-for-dim.adoc b/modules/migration-creating-registry-route-for-dim.adoc deleted file mode 100644 index 78d185fae9d7..000000000000 --- a/modules/migration-creating-registry-route-for-dim.adoc +++ /dev/null @@ -1,38 +0,0 @@ -// Module included in the following assemblies: -// -// * migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc -// * migration_toolkit_for_containers/advanced-migration-options-mtc.adoc - -:_content-type: PROCEDURE -[id="migration-creating-registry-route-for-dim_{context}"] -= Creating a registry route for direct image migration - -For direct image migration, you must create a route to the exposed {product-registry} on all remote clusters. - -.Prerequisites - -* The {product-registry} must be exposed to external traffic on all remote clusters. -+ -The {product-title} 4 registry is exposed by default. -ifdef::advanced-migration-options-3-4[] -+ -The {product-title} 3 registry must be link:https://docs.openshift.com/container-platform/3.11/install_config/registry/securing_and_exposing_registry.html#exposing-the-registry[exposed manually]. -endif::[] - -.Procedure - -ifdef::advanced-migration-options-3-4[] -* To create a route to an {product-title} 3 registry, run the following command: -+ -[source,terminal] ----- -$ oc create route passthrough --service=docker-registry -n default ----- -endif::[] - -* To create a route to an {product-title} 4 registry, run the following command: -+ -[source,terminal] ----- -$ oc create route passthrough --service=image-registry -n openshift-image-registry ----- diff --git a/modules/migration-debugging-velero-admission-webhooks-ibm-appconnect.adoc b/modules/migration-debugging-velero-admission-webhooks-ibm-appconnect.adoc deleted file mode 100644 index 644a194a2e73..000000000000 --- a/modules/migration-debugging-velero-admission-webhooks-ibm-appconnect.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// Module included in the following assemblies: -// -// * backup_and_restore/application_backup_and_restore/troubleshooting.adoc -:_content-type: PROCEDURE -[id="migration-debugging-velero-admission-webhooks-ibm-appconnect_{context}"] -= Restoring IBM AppConnect resources - -If you experience issues when you use Velero to a restore an IBM AppConnect resource that has an admission webhook, you can run the checks in this procedure. - -.Procedure - -. Check if you have any mutating admission plugins of `kind: MutatingWebhookConfiguration` in the cluster: -+ -[source,terminal] ----- -$ oc get mutatingwebhookconfigurations ----- - -. Examine the YAML file of each `kind: MutatingWebhookConfiguration` to ensure that none of its rules block creation of the objects that are experiencing issues. For more information, see link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#rulewithoperations-v1-admissionregistration-k8s-io[the official Kuberbetes documentation]. - -. Check that any `spec.version` in `type: Configuration.appconnect.ibm.com/v1beta1` used at backup time is supported by the installed Operator. diff --git a/modules/migration-debugging-velero-admission-webhooks-knative.adoc b/modules/migration-debugging-velero-admission-webhooks-knative.adoc deleted file mode 100644 index bc8b81cd6a30..000000000000 --- a/modules/migration-debugging-velero-admission-webhooks-knative.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// Module included in the following assemblies: -// -// * backup_and_restore/application_backup_and_restore/troubleshooting.adoc -:_content-type: PROCEDURE -[id="migration-debugging-velero-admission-webhooks-knative_{context}"] -= Restoring Knative resources - -You might encounter problems using Velero to back up Knative resources that use admission webhooks. - -You can avoid such problems by restoring the top level `Service` resource first whenever you back up and restore Knative resources that use admission webhooks. - -.Procedure - -* Restore the top level `service.serving.knavtive.dev Service` resource: -+ -[source,terminal] ----- -$ velero restore <restore_name> \ - --from-backup=<backup_name> --include-resources \ - service.serving.knavtive.dev ----- diff --git a/modules/migration-debugging-velero-resources.adoc b/modules/migration-debugging-velero-resources.adoc deleted file mode 100644 index 053df1b80543..000000000000 --- a/modules/migration-debugging-velero-resources.adoc +++ /dev/null @@ -1,81 +0,0 @@ -// Module included in the following assemblies: -// -// * backup_and_restore/application_backup_and_restore/troubleshooting.adoc -// * migrating_from_ocp_3_to_4/troubleshooting-3-4.adoc -// * migration_toolkit_for_containers/troubleshooting-mtc - -[id="migration-debugging-velero-resources_{context}"] -= Debugging Velero resources with the Velero CLI tool - -You can debug `Backup` and `Restore` custom resources (CRs) and retrieve logs with the Velero CLI tool. - -The Velero CLI tool provides more detailed information than the OpenShift CLI tool. - -[discrete] -[id="velero-command-syntax_{context}"] -== Syntax - -Use the `oc exec` command to run a Velero CLI command: - -[source,terminal,subs="attributes+"] ----- -$ oc -n {namespace} exec deployment/velero -c velero -- ./velero \ - <backup_restore_cr> <command> <cr_name> ----- - -.Example -[source,terminal,subs="attributes+"] ----- -$ oc -n {namespace} exec deployment/velero -c velero -- ./velero \ - backup describe 0e44ae00-5dc3-11eb-9ca8-df7e5254778b-2d8ql ----- - -[discrete] -[id="velero-help-option_{context}"] -== Help option - -Use the `velero --help` option to list all Velero CLI commands: - -[source,terminal,subs="attributes+"] ----- -$ oc -n {namespace} exec deployment/velero -c velero -- ./velero \ - --help ----- - -[discrete] -[id="velero-describe-command_{context}"] -== Describe command - -Use the `velero describe` command to retrieve a summary of warnings and errors associated with a `Backup` or `Restore` CR: - -[source,terminal,subs="attributes+"] ----- -$ oc -n {namespace} exec deployment/velero -c velero -- ./velero \ - <backup_restore_cr> describe <cr_name> ----- - -.Example -[source,terminal,subs="attributes+"] ----- -$ oc -n {namespace} exec deployment/velero -c velero -- ./velero \ - backup describe 0e44ae00-5dc3-11eb-9ca8-df7e5254778b-2d8ql ----- - -[discrete] -[id="velero-logs-command_{context}"] -== Logs command - -Use the `velero logs` command to retrieve the logs of a `Backup` or `Restore` CR: - -[source,terminal,subs="attributes+"] ----- -$ oc -n {namespace} exec deployment/velero -c velero -- ./velero \ - <backup_restore_cr> logs <cr_name> ----- - -.Example -[source,terminal,subs="attributes+"] ----- -$ oc -n {namespace} exec deployment/velero -c velero -- ./velero \ - restore logs ccc7c2d0-6017-11eb-afab-85d0007f5a19-x4lbf ----- diff --git a/modules/migration-direct-volume-migration-and-direct-image-migration.adoc b/modules/migration-direct-volume-migration-and-direct-image-migration.adoc deleted file mode 100644 index d54976499cc1..000000000000 --- a/modules/migration-direct-volume-migration-and-direct-image-migration.adoc +++ /dev/null @@ -1,15 +0,0 @@ -// Module included in the following assemblies: -// -// * migrating_from_ocp_3_to_4/migrating-applications-3-4.adoc -// * migration_toolkit_for_containers/migrating-applications-with-mtc.adoc - -[id="migration-direct-volume-migration-and-direct-image-migration_{context}"] -= Direct volume migration and direct image migration - -You can use direct image migration (DIM) and direct volume migration (DVM) to migrate images and data directly from the source cluster to the target cluster. - -If you run DVM with nodes that are in different availability zones, the migration might fail because the migrated pods cannot access the persistent volume claim. - -DIM and DVM have significant performance benefits because the intermediate steps of backing up files from the source cluster to the replication repository and restoring files from the replication repository to the target cluster are skipped. The data is transferred with link:https://rsync.samba.org/[Rsync]. - -DIM and DVM have additional prerequisites. diff --git a/modules/migration-dvm-error-node-selectors.adoc b/modules/migration-dvm-error-node-selectors.adoc deleted file mode 100644 index 1ee856e96fea..000000000000 --- a/modules/migration-dvm-error-node-selectors.adoc +++ /dev/null @@ -1,60 +0,0 @@ -// Module included in the following assemblies: -// -// * migrating_from_ocp_3_to_4/troubleshooting-3-4.adoc -// * migration_toolkit_for_containers/troubleshooting-mtc - -:_content-type: PROCEDURE -[id="migration-dvm-error-node-selectors_{context}"] -= Direct volume migration does not complete - -If direct volume migration does not complete, the target cluster might not have the same `node-selector` annotations as the source cluster. - -{mtc-full} ({mtc-short}) migrates namespaces with all annotations to preserve security context constraints and scheduling requirements. During direct volume migration, {mtc-short} creates Rsync transfer pods on the target cluster in the namespaces that were migrated from the source cluster. If a target cluster namespace does not have the same annotations as the source cluster namespace, the Rsync transfer pods cannot be scheduled. The Rsync pods remain in a `Pending` state. - -You can identify and fix this issue by performing the following procedure. - -.Procedure - -. Check the status of the `MigMigration` CR: -+ -[source,terminal] ----- -$ oc describe migmigration <pod> -n openshift-migration ----- -+ -The output includes the following status message: -+ -.Example output -[source,terminal] ----- -Some or all transfer pods are not running for more than 10 mins on destination cluster ----- - -. On the source cluster, obtain the details of a migrated namespace: -+ -[source,terminal] ----- -$ oc get namespace <namespace> -o yaml <1> ----- -<1> Specify the migrated namespace. - -. On the target cluster, edit the migrated namespace: -+ -[source,terminal] ----- -$ oc edit namespace <namespace> ----- - -. Add the missing `openshift.io/node-selector` annotations to the migrated namespace as in the following example: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Namespace -metadata: - annotations: - openshift.io/node-selector: "region=east" -... ----- - -. Run the migration plan again. diff --git a/modules/migration-editing-pvs-in-migplan.adoc b/modules/migration-editing-pvs-in-migplan.adoc deleted file mode 100644 index 2a6549e687e7..000000000000 --- a/modules/migration-editing-pvs-in-migplan.adoc +++ /dev/null @@ -1,66 +0,0 @@ -// Module included in the following assemblies: -// -// * migration_toolkit_for_containers/advanced-migration-options-mtc.adoc -// * migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc - -:_content-type: PROCEDURE -[id="migration-editing-pvs-in-migplan_{context}"] -= Editing persistent volume attributes - -After you create a `MigPlan` custom resource (CR), the `MigrationController` CR discovers the persistent volumes (PVs). The `spec.persistentVolumes` block and the `status.destStorageClasses` block are added to the `MigPlan` CR. - -You can edit the values in the `spec.persistentVolumes.selection` block. If you change values outside the `spec.persistentVolumes.selection` block, the values are overwritten when the `MigPlan` CR is reconciled by the `MigrationController` CR. - -[NOTE] -==== -The default value for the `spec.persistentVolumes.selection.storageClass` parameter is determined by the following logic: - -. If the source cluster PV is Gluster or NFS, the default is either `cephfs`, for `accessMode: ReadWriteMany`, or `cephrbd`, for `accessMode: ReadWriteOnce`. -. If the PV is neither Gluster nor NFS _or_ if `cephfs` or `cephrbd` are not available, the default is a storage class for the same provisioner. -. If a storage class for the same provisioner is not available, the default is the default storage class of the destination cluster. - -You can change the `storageClass` value to the value of any `name` parameter in the `status.destStorageClasses` block of the `MigPlan` CR. - -If the `storageClass` value is empty, the PV will have no storage class after migration. This option is appropriate if, for example, you want to move the PV to an NFS volume on the destination cluster. -==== - - -.Prerequisites - -* `MigPlan` CR is in a `Ready` state. - -.Procedure - -* Edit the `spec.persistentVolumes.selection` values in the `MigPlan` CR: -+ -[source,yaml] ----- -apiVersion: migration.openshift.io/v1alpha1 -kind: MigPlan -metadata: - name: <migplan> - namespace: openshift-migration -spec: - persistentVolumes: - - capacity: 10Gi - name: pvc-095a6559-b27f-11eb-b27f-021bddcaf6e4 - proposedCapacity: 10Gi - pvc: - accessModes: - - ReadWriteMany - hasReference: true - name: mysql - namespace: mysql-persistent - selection: - action: <copy> <1> - copyMethod: <filesystem> <2> - verify: true <3> - storageClass: <gp2> <4> - accessMode: <ReadWriteMany> <5> - storageClass: cephfs ----- -<1> Allowed values are `move`, `copy`, and `skip`. If only one action is supported, the default value is the supported action. If multiple actions are supported, the default value is `copy`. -<2> Allowed values are `snapshot` and `filesystem`. Default value is `filesystem`. -<3> The `verify` parameter is displayed if you select the verification option for file system copy in the {mtc-short} web console. You can set it to `false`. -<4> You can change the default value to the value of any `name` parameter in the `status.destStorageClasses` block of the `MigPlan` CR. If no value is specified, the PV will have no storage class after migration. -<5> Allowed values are `ReadWriteOnce` and `ReadWriteMany`. If this value is not specified, the default is the access mode of the source cluster PVC. You can only edit the access mode in the `MigPlan` CR. You cannot edit it by using the {mtc-short} web console. diff --git a/modules/migration-enabling-cached-kubernetes-clients.adoc b/modules/migration-enabling-cached-kubernetes-clients.adoc deleted file mode 100644 index fb9d34cf24d9..000000000000 --- a/modules/migration-enabling-cached-kubernetes-clients.adoc +++ /dev/null @@ -1,45 +0,0 @@ -// Module included in the following assemblies: -// -// * migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc -// * migration_toolkit_for_containers/advanced-migration-options-mtc.adoc - -:_content-type: PROCEDURE -[id="migration-enabling-cached-kubernetes-clients_{context}"] -= Enabling cached Kubernetes clients - -You can enable cached Kubernetes clients in the `MigrationController` custom resource (CR) for improved performance during migration. The greatest performance benefit is displayed when migrating between clusters in different regions or with significant network latency. - -[NOTE] -==== -Delegated tasks, for example, Rsync backup for direct volume migration or Velero backup and restore, however, do not show improved performance with cached clients. -==== - -Cached clients require extra memory because the `MigrationController` CR caches all API resources that are required for interacting with `MigCluster` CRs. Requests that are normally sent to the API server are directed to the cache instead. The cache watches the API server for updates. - -You can increase the memory limits and requests of the `MigrationController` CR if `OOMKilled` errors occur after you enable cached clients. - -.Procedure - -. Enable cached clients by running the following command: -+ -[source,terminal] ----- -$ oc -n openshift-migration patch migrationcontroller migration-controller --type=json --patch \ - '[{ "op": "replace", "path": "/spec/mig_controller_enable_cache", "value": true}]' ----- - -. Optional: Increase the `MigrationController` CR memory limits by running the following command: -+ -[source,terminal] ----- -$ oc -n openshift-migration patch migrationcontroller migration-controller --type=json --patch \ - '[{ "op": "replace", "path": "/spec/mig_controller_limits_memory", "value": <10Gi>}]' ----- - -. Optional: Increase the `MigrationController` CR memory requests by running the following command: -+ -[source,terminal] ----- -$ oc -n openshift-migration patch migrationcontroller migration-controller --type=json --patch \ - '[{ "op": "replace", "path": "/spec/mig_controller_requests_memory", "value": <350Mi>}]' ----- diff --git a/modules/migration-enabling-pv-resizing-dvm.adoc b/modules/migration-enabling-pv-resizing-dvm.adoc deleted file mode 100644 index f846d3726a1f..000000000000 --- a/modules/migration-enabling-pv-resizing-dvm.adoc +++ /dev/null @@ -1,65 +0,0 @@ -// Module included in the following assemblies: -// -// * migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc -// * migration_toolkit_for_containers/advanced-migration-options-mtc.adoc - -:_content-type: PROCEDURE -[id="migration-enabling-pv-resizing-dvm_{context}"] -= Enabling persistent volume resizing for direct volume migration - -You can enable persistent volume (PV) resizing for direct volume migration to avoid running out of disk space on the destination cluster. - -When the disk usage of a PV reaches a configured level, the `MigrationController` custom resource (CR) compares the requested storage capacity of a persistent volume claim (PVC) to its actual provisioned capacity. Then, it calculates the space required on the destination cluster. - -A `pv_resizing_threshold` parameter determines when PV resizing is used. The default threshold is `3%`. This means that PV resizing occurs when the disk usage of a PV is more than `97%`. You can increase this threshold so that PV resizing occurs at a lower disk usage level. - -PVC capacity is calculated according to the following criteria: - -* If the requested storage capacity (`spec.resources.requests.storage`) of the PVC is not equal to its actual provisioned capacity (`status.capacity.storage`), the greater value is used. -* If a PV is provisioned through a PVC and then subsequently changed so that its PV and PVC capacities no longer match, the greater value is used. - -.Prerequisites - -* The PVCs must be attached to one or more running pods so that the `MigrationController` CR can execute commands. - -.Procedure - -. Log in to the host cluster. -. Enable PV resizing by patching the `MigrationController` CR: -+ -[source,terminal] ----- -$ oc patch migrationcontroller migration-controller -p '{"spec":{"enable_dvm_pv_resizing":true}}' \ <1> - --type='merge' -n openshift-migration ----- -<1> Set the value to `false` to disable PV resizing. - -. Optional: Update the `pv_resizing_threshold` parameter to increase the threshold: -+ -[source,terminal] ----- -$ oc patch migrationcontroller migration-controller -p '{"spec":{"pv_resizing_threshold":41}}' \ <1> - --type='merge' -n openshift-migration ----- -<1> The default value is `3`. -+ -When the threshold is exceeded, the following status message is displayed in the `MigPlan` CR status: -+ -[source,yaml] ----- -status: - conditions: -... - - category: Warn - durable: true - lastTransitionTime: "2021-06-17T08:57:01Z" - message: 'Capacity of the following volumes will be automatically adjusted to avoid disk capacity issues in the target cluster: [pvc-b800eb7b-cf3b-11eb-a3f7-0eae3e0555f3]' - reason: Done - status: "False" - type: PvCapacityAdjustmentRequired ----- -+ -[NOTE] -==== -For AWS gp2 storage, this message does not appear unless the `pv_resizing_threshold` is 42% or greater because of the way gp2 calculates volume usage and size. (link:https://bugzilla.redhat.com/show_bug.cgi?id=1973148[*BZ#1973148*]) -==== diff --git a/modules/migration-error-messages.adoc b/modules/migration-error-messages.adoc deleted file mode 100644 index 156f6a7e4e95..000000000000 --- a/modules/migration-error-messages.adoc +++ /dev/null @@ -1,215 +0,0 @@ -// Module included in the following assemblies: -// -// * migrating_from_ocp_3_to_4/troubleshooting-3-4.adoc -// * migration_toolkit_for_containers/troubleshooting-mtc - -:_content-type: PROCEDURE -[id="migration-error-messages_{context}"] -= Error messages and resolutions - -This section describes common error messages you might encounter with the {mtc-full} ({mtc-short}) and how to resolve their underlying causes. - -[id="ca-certificate-error-displayed-when-accessing-console-for-first-time_{context}"] -== CA certificate error displayed when accessing the {mtc-short} console for the first time - -If the {mtc-short} console displays a `CA certificate error` message the first time you try to access it, the likely cause is that a cluster uses self-signed CA certificates. - -Navigate to the `oauth-authorization-server` URL in the error message and accept the certificate. To resolve this issue permanently, install the certificate authority so that it is trusted. - -If the browser displays an `Unauthorized` message after you have accepted the CA certificate, navigate to the {mtc-short} console and then refresh the web page. - -[id="oauth-timeout-error-in-console_{context}"] -== OAuth timeout error in the {mtc-short} console - -If the {mtc-short} console displays a `connection has timed out` message after you have accepted a self-signed certificate, the cause is likely to be one of the following: - -* Interrupted network access to the OAuth server -* Interrupted network access to the {product-title} console -* Proxy configuration blocking access to the OAuth server. See link:https://access.redhat.com/solutions/5514491[MTC console inaccessible because of OAuth timeout error] for details. - -To determine the cause: - -* Inspect the {mtc-short} console web page with a browser web inspector. -* Check the `Migration UI` pod log for errors. - -[id="certificate-signed-by-unknown-authority-error_{context}"] -== Certificate signed by unknown authority error - -If you use a self-signed certificate to secure a cluster or a replication repository for the {mtc-full} ({mtc-short}), certificate verification might fail with the following error message: `Certificate signed by unknown authority`. - -You can create a custom CA certificate bundle file and upload it in the {mtc-short} web console when you add a cluster or a replication repository. - -.Procedure - -Download a CA certificate from a remote endpoint and save it as a CA bundle file: - -[source,terminal] ----- -$ echo -n | openssl s_client -connect <host_FQDN>:<port> \ <1> - | sed -ne '/-BEGIN CERTIFICATE-/,/-END CERTIFICATE-/p' > <ca_bundle.cert> <2> ----- -<1> Specify the host FQDN and port of the endpoint, for example, `api.my-cluster.example.com:6443`. -<2> Specify the name of the CA bundle file. - -[id="backup-storage-location-errors-in-velero-pod-log_{context}"] -== Backup storage location errors in the Velero pod log - -If a `Velero` `Backup` custom resource contains a reference to a backup storage location (BSL) that does not exist, the `Velero` pod log might display the following error messages: - -[source,terminal] ----- -$ oc logs <Velero_Pod> -n openshift-migration ----- - -.Example output -[source,terminal] ----- -level=error msg="Error checking repository for stale locks" error="error getting backup storage location: BackupStorageLocation.velero.io \"ts-dpa-1\" not found" error.file="/remote-source/src/github.com/vmware-tanzu/velero/pkg/restic/repository_manager.go:259" ----- - -You can ignore these error messages. A missing BSL cannot cause a migration to fail. - -[id="pod-volume-backup-timeout-error-in-velero-pod-log_{context}"] -== Pod volume backup timeout error in the Velero pod log - -If a migration fails because `Restic` times out, the `Velero` pod log displays the following error: - -[source,terminal] ----- -level=error msg="Error backing up item" backup=velero/monitoring error="timed out -waiting for all PodVolumeBackups to complete" error.file="/go/src/github.com/ -heptio/velero/pkg/restic/backupper.go:165" error.function="github.com/heptio/ -velero/pkg/restic.(*backupper).BackupPodVolumes" group=v1 ----- - -The default value of `restic_timeout` is one hour. You can increase this parameter for large migrations, keeping in mind that a higher value may delay the return of error messages. - -.Procedure - -. In the {product-title} web console, navigate to *Operators* -> *Installed Operators*. -. Click *{mtc-full} Operator*. -. In the *MigrationController* tab, click *migration-controller*. -. In the *YAML* tab, update the following parameter value: -+ -[source,yaml] ----- -spec: - restic_timeout: 1h <1> ----- -<1> Valid units are `h` (hours), `m` (minutes), and `s` (seconds), for example, `3h30m15s`. - -. Click *Save*. - -[id="restic-verification-errors-in-migmigration-custom-resource_{context}"] -== Restic verification errors in the MigMigration custom resource - -If data verification fails when migrating a persistent volume with the file system data copy method, the `MigMigration` CR displays the following error: - -.MigMigration CR status -[source,yaml] ----- -status: - conditions: - - category: Warn - durable: true - lastTransitionTime: 2020-04-16T20:35:16Z - message: There were verify errors found in 1 Restic volume restores. See restore `<registry-example-migration-rvwcm>` - for details <1> - status: "True" - type: ResticVerifyErrors <2> ----- -<1> The error message identifies the `Restore` CR name. -<2> `ResticVerifyErrors` is a general error warning type that includes verification errors. - -[NOTE] -==== -A data verification error does not cause the migration process to fail. -==== - -You can check the `Restore` CR to troubleshoot the data verification error. - -.Procedure - -. Log in to the target cluster. -. View the `Restore` CR: -+ -[source,terminal] ----- -$ oc describe <registry-example-migration-rvwcm> -n openshift-migration ----- -+ -The output identifies the persistent volume with `PodVolumeRestore` errors. -+ -.Example output -[source,yaml] ----- -status: - phase: Completed - podVolumeRestoreErrors: - - kind: PodVolumeRestore - name: <registry-example-migration-rvwcm-98t49> - namespace: openshift-migration - podVolumeRestoreResticErrors: - - kind: PodVolumeRestore - name: <registry-example-migration-rvwcm-98t49> - namespace: openshift-migration ----- - -. View the `PodVolumeRestore` CR: -+ -[source,terminal] ----- -$ oc describe <migration-example-rvwcm-98t49> ----- -+ -The output identifies the `Restic` pod that logged the errors. -+ -.PodVolumeRestore CR with Restic pod error -[source,yaml] ----- - completionTimestamp: 2020-05-01T20:49:12Z - errors: 1 - resticErrors: 1 - ... - resticPod: <restic-nr2v5> ----- - -. View the `Restic` pod log to locate the errors: -+ -[source,terminal] ----- -$ oc logs -f <restic-nr2v5> ----- - -[id="restic-permission-error-when-migrating-from-nfs-storage-with-root-squash-enabled_{context}"] -== Restic permission error when migrating from NFS storage with root_squash enabled - -If you are migrating data from NFS storage and `root_squash` is enabled, `Restic` maps to `nfsnobody` and does not have permission to perform the migration. The `Restic` pod log displays the following error: - -.Restic permission error -[source,terminal] ----- -backup=openshift-migration/<backup_id> controller=pod-volume-backup error="fork/exec -/usr/bin/restic: permission denied" error.file="/go/src/github.com/vmware-tanzu/ -velero/pkg/controller/pod_volume_backup_controller.go:280" error.function= -"github.com/vmware-tanzu/velero/pkg/controller.(*podVolumeBackupController).processBackup" -logSource="pkg/controller/pod_volume_backup_controller.go:280" name=<backup_id> -namespace=openshift-migration ----- - -You can resolve this issue by creating a supplemental group for `Restic` and adding the group ID to the `MigrationController` CR manifest. - -.Procedure - -. Create a supplemental group for `Restic` on the NFS storage. -. Set the `setgid` bit on the NFS directories so that group ownership is inherited. -. Add the `restic_supplemental_groups` parameter to the `MigrationController` CR manifest on the source and target clusters: -+ -[source,yaml] ----- -spec: - restic_supplemental_groups: <group_id> <1> ----- -<1> Specify the supplemental group ID. - -. Wait for the `Restic` pods to restart so that the changes are applied. diff --git a/modules/migration-excluding-pvcs.adoc b/modules/migration-excluding-pvcs.adoc deleted file mode 100644 index 5e5237919457..000000000000 --- a/modules/migration-excluding-pvcs.adoc +++ /dev/null @@ -1,36 +0,0 @@ -// Module included in the following assemblies: -// -// * migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc -// * migration_toolkit_for_containers/advanced-migration-options-mtc.adoc - -:_content-type: PROCEDURE -[id="migration-excluding-pvcs_{context}"] -= Excluding persistent volume claims - -You select persistent volume claims (PVCs) for state migration by excluding the PVCs that you do not want to migrate. You exclude PVCs by setting the `spec.persistentVolumes.pvc.selection.action` parameter of the `MigPlan` custom resource (CR) after the persistent volumes (PVs) have been discovered. - -.Prerequisites - -* `MigPlan` CR is in a `Ready` state. - -.Procedure - -* Add the `spec.persistentVolumes.pvc.selection.action` parameter to the `MigPlan` CR and set it to `skip`: -+ -[source,yaml] ----- -apiVersion: migration.openshift.io/v1alpha1 -kind: MigPlan -metadata: - name: <migplan> - namespace: openshift-migration -spec: -... - persistentVolumes: - - capacity: 10Gi - name: <pv_name> - pvc: -... - selection: - action: skip ----- diff --git a/modules/migration-excluding-resources.adoc b/modules/migration-excluding-resources.adoc deleted file mode 100644 index ab198f82fc46..000000000000 --- a/modules/migration-excluding-resources.adoc +++ /dev/null @@ -1,62 +0,0 @@ -// Module included in the following assemblies: -// -// * migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc -// * migration_toolkit_for_containers/advanced-migration-options-mtc.adoc - -:_content-type: PROCEDURE -[id="migration-excluding-resources_{context}"] -= Excluding resources - -You can exclude resources, for example, image streams, persistent volumes (PVs), or subscriptions, from a {mtc-full} ({mtc-short}) migration plan to reduce the resource load for migration or to migrate images or PVs with a different tool. - -By default, the {mtc-short} excludes service catalog resources and Operator Lifecycle Manager (OLM) resources from migration. These resources are parts of the service catalog API group and the OLM API group, neither of which is supported for migration at this time. - -.Procedure - -. Edit the `MigrationController` custom resource manifest: -+ -[source,terminal] ----- -$ oc edit migrationcontroller <migration_controller> -n openshift-migration ----- - -. Update the `spec` section by adding parameters to exclude specific resources. For those resources that do not have their own exclusion parameters, add the `additional_excluded_resources` parameter: -+ -[source,yaml] ----- -apiVersion: migration.openshift.io/v1alpha1 -kind: MigrationController -metadata: - name: migration-controller - namespace: openshift-migration -spec: - disable_image_migration: true <1> - disable_pv_migration: true <2> - additional_excluded_resources: <3> - - resource1 - - resource2 - ... ----- -<1> Add `disable_image_migration: true` to exclude image streams from the migration. `imagestreams` is added to the `excluded_resources` list in `main.yml` when the `MigrationController` pod restarts. -<2> Add `disable_pv_migration: true` to exclude PVs from the migration plan. `persistentvolumes` and `persistentvolumeclaims` are added to the `excluded_resources` list in `main.yml` when the `MigrationController` pod restarts. Disabling PV migration also disables PV discovery when you create the migration plan. -<3> You can add {product-title} resources that you want to exclude to the `additional_excluded_resources` list. - - -. Wait two minutes for the `MigrationController` pod to restart so that the changes are applied. - -. Verify that the resource is excluded: -+ -[source,terminal] ----- -$ oc get deployment -n openshift-migration migration-controller -o yaml | grep EXCLUDED_RESOURCES -A1 ----- -+ -The output contains the excluded resources: -+ -.Example output -[source,yaml] ----- -name: EXCLUDED_RESOURCES -value: -resource1,resource2,imagetags,templateinstances,clusterserviceversions,packagemanifests,subscriptions,servicebrokers,servicebindings,serviceclasses,serviceinstances,serviceplans,imagestreams,persistentvolumes,persistentvolumeclaims ----- diff --git a/modules/migration-hooks.adoc b/modules/migration-hooks.adoc deleted file mode 100644 index dd8517fa407b..000000000000 --- a/modules/migration-hooks.adoc +++ /dev/null @@ -1,28 +0,0 @@ -// Module included in the following assemblies: -// -// * migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc -// * migration_toolkit_for_containers/advanced-migration-options-mtc.adoc - -[id="migration-hooks_{context}"] -= Migration hooks - -You can add up to four migration hooks to a single migration plan, with each hook running at a different phase of the migration. Migration hooks perform tasks such as customizing application quiescence, manually migrating unsupported data types, and updating applications after migration. - -A migration hook runs on a source or a target cluster at one of the following migration steps: - -* `PreBackup`: Before resources are backed up on the source cluster. -* `PostBackup`: After resources are backed up on the source cluster. -* `PreRestore`: Before resources are restored on the target cluster. -* `PostRestore`: After resources are restored on the target cluster. - -You can create a hook by creating an Ansible playbook that runs with the default Ansible image or with a custom hook container. - -.Ansible playbook - -The Ansible playbook is mounted on a hook container as a config map. The hook container runs as a job, using the cluster, service account, and namespace specified in the `MigPlan` custom resource. The job continues to run until it reaches the default limit of 6 retries or a successful completion. This continues even if the initial pod is evicted or killed. - -The default Ansible runtime image is `registry.redhat.io/rhmtc/openshift-migration-hook-runner-rhel7:{mtc-version}`. This image is based on the Ansible Runner image and includes `python-openshift` for Ansible Kubernetes resources and an updated `oc` binary. - -.Custom hook container - -You can use a custom hook container instead of the default Ansible image. diff --git a/modules/migration-installing-legacy-operator.adoc b/modules/migration-installing-legacy-operator.adoc deleted file mode 100644 index 245dae30d026..000000000000 --- a/modules/migration-installing-legacy-operator.adoc +++ /dev/null @@ -1,143 +0,0 @@ -// Module included in the following assemblies: -// -// * migrating_from_ocp_3_to_4/installing-3-4.adoc -// * migrating_from_ocp_3_to_4/installing-restricted-3-4.adoc -// * migration_toolkit_for_containers/installing-mtc.adoc -// * migration_toolkit_for_containers/installing-mtc-restricted.adoc - -:_content-type: PROCEDURE -[id="migration-installing-legacy-operator_{context}"] -ifdef::installing-3-4,installing-restricted-3-4[] -= Installing the legacy {mtc-full} Operator on {product-title} 3 - -You can install the legacy {mtc-full} Operator manually on {product-title} 3. -endif::[] -ifdef::installing-mtc,installing-mtc-restricted[] -= Installing the legacy {mtc-full} Operator on {product-title} 4.2 to 4.5 - -You can install the legacy {mtc-full} Operator manually on {product-title} versions 4.2 to 4.5. -endif::[] - -.Prerequisites - -* You must be logged in as a user with `cluster-admin` privileges on all clusters. -* You must have access to `registry.redhat.io`. -* You must have `podman` installed. -ifdef::installing-3-4,installing-restricted-3-4[] -* You must create an link:https://access.redhat.com/solutions/3772061[image stream secret] and copy it to each node in the cluster. -endif::[] -ifdef::installing-restricted-3-4,installing-mtc-restricted[] -* You must have a Linux workstation with network access in order to download files from `registry.redhat.io`. -* You must create a mirror image of the Operator catalog. -* You must install the {mtc-full} Operator from the mirrored Operator catalog on {product-title} {product-version}. -endif::[] - -.Procedure - -. Log in to `registry.redhat.io` with your Red Hat Customer Portal credentials: -+ -[source,terminal] ----- -$ podman login registry.redhat.io ----- - -. Download the `operator.yml` file by entering the following command: -+ -[source,terminal,subs="attributes+"] ----- -$ podman cp $(podman create \ - registry.redhat.io/rhmtc/openshift-migration-legacy-rhel8-operator:v{mtc-version}):/operator.yml ./ ----- - -. Download the `controller.yml` file by entering the following command: -+ -[source,terminal,subs="attributes+"] ----- -$ podman cp $(podman create \ - registry.redhat.io/rhmtc/openshift-migration-legacy-rhel8-operator:v{mtc-version}):/controller.yml ./ ----- - -ifdef::installing-restricted-3-4,installing-mtc-restricted[] -. Obtain the Operator image mapping by running the following command: -+ -[source,terminal,subs="attributes+"] ----- -$ grep openshift-migration-legacy-rhel8-operator ./mapping.txt | grep rhmtc ----- -+ -The `mapping.txt` file was created when you mirrored the Operator catalog. The output shows the mapping between the `registry.redhat.io` image and your mirror registry image. -+ -.Example output -[source,terminal] ----- -registry.redhat.io/rhmtc/openshift-migration-legacy-rhel8-operator@sha256:468a6126f73b1ee12085ca53a312d1f96ef5a2ca03442bcb63724af5e2614e8a=<registry.apps.example.com>/rhmtc/openshift-migration-legacy-rhel8-operator ----- - -. Update the `image` values for the `ansible` and `operator` containers and the `REGISTRY` value in the `operator.yml` file: -+ -[source,yaml] ----- -containers: - - name: ansible - image: <registry.apps.example.com>/rhmtc/openshift-migration-legacy-rhel8-operator@sha256:<468a6126f73b1ee12085ca53a312d1f96ef5a2ca03442bcb63724af5e2614e8a> <1> -... - - name: operator - image: <registry.apps.example.com>/rhmtc/openshift-migration-legacy-rhel8-operator@sha256:<468a6126f73b1ee12085ca53a312d1f96ef5a2ca03442bcb63724af5e2614e8a> <1> -... - env: - - name: REGISTRY - value: <registry.apps.example.com> <2> ----- -<1> Specify your mirror registry and the `sha256` value of the Operator image. -<2> Specify your mirror registry. -endif::[] - -. Log in to your {product-title} source cluster. - -ifdef::installing-3-4,installing-mtc[] -. Verify that the cluster can authenticate with `registry.redhat.io`: -+ -[source,terminal] ----- -$ oc run test --image registry.redhat.io/ubi9 --command sleep infinity ----- -endif::[] - -. Create the {mtc-full} Operator object: -+ -[source,terminal] ----- -$ oc create -f operator.yml ----- -+ -.Example output -[source,terminal] ----- -namespace/openshift-migration created -rolebinding.rbac.authorization.k8s.io/system:deployers created -serviceaccount/migration-operator created -customresourcedefinition.apiextensions.k8s.io/migrationcontrollers.migration.openshift.io created -role.rbac.authorization.k8s.io/migration-operator created -rolebinding.rbac.authorization.k8s.io/migration-operator created -clusterrolebinding.rbac.authorization.k8s.io/migration-operator created -deployment.apps/migration-operator created -Error from server (AlreadyExists): error when creating "./operator.yml": -rolebindings.rbac.authorization.k8s.io "system:image-builders" already exists <1> -Error from server (AlreadyExists): error when creating "./operator.yml": -rolebindings.rbac.authorization.k8s.io "system:image-pullers" already exists ----- -<1> You can ignore `Error from server (AlreadyExists)` messages. They are caused by the {mtc-full} Operator creating resources for earlier versions of {product-title} 4 that are provided in later releases. - -. Create the `MigrationController` object: -+ -[source,terminal] ----- -$ oc create -f controller.yml ----- - -. Verify that the {mtc-short} pods are running: -+ -[source,terminal] ----- -$ oc get pods -n openshift-migration ----- diff --git a/modules/migration-installing-mtc-on-ocp-4.adoc b/modules/migration-installing-mtc-on-ocp-4.adoc deleted file mode 100644 index a90274050c4b..000000000000 --- a/modules/migration-installing-mtc-on-ocp-4.adoc +++ /dev/null @@ -1,33 +0,0 @@ -// Module included in the following assemblies: -// -// * migrating_from_ocp_3_to_4/installing-3-4.adoc -// * migrating_from_ocp_3_to_4/installing-restricted-3-4.adoc -// * migration_toolkit_for_containers/installing-mtc.adoc -// * migration_toolkit_for_containers/installing-mtc-restricted.adoc - -:_content-type: PROCEDURE -[id="migration-installing-mtc-on-ocp-4_{context}"] -= Installing the {mtc-full} Operator on {product-title} {product-version} - -You install the {mtc-full} Operator on {product-title} {product-version} by using the Operator Lifecycle Manager. - -.Prerequisites - -* You must be logged in as a user with `cluster-admin` privileges on all clusters. -ifdef::installing-restricted-3-4,installing-mtc-restricted[] -* You must create an Operator catalog from a mirror image in a local registry. -endif::[] - -.Procedure - -. In the {product-title} web console, click *Operators* -> *OperatorHub*. -. Use the *Filter by keyword* field to find the *{mtc-full} Operator*. -. Select the *{mtc-full} Operator* and click *Install*. -. Click *Install*. -+ -On the *Installed Operators* page, the *{mtc-full} Operator* appears in the *openshift-migration* project with the status *Succeeded*. - -. Click *{mtc-full} Operator*. -. Under *Provided APIs*, locate the *Migration Controller* tile, and click *Create Instance*. -. Click *Create*. -. Click *Workloads* -> *Pods* to verify that the {mtc-short} pods are running. diff --git a/modules/migration-isolating-dns-domain-of-target-cluster-from-clients.adoc b/modules/migration-isolating-dns-domain-of-target-cluster-from-clients.adoc deleted file mode 100644 index 6bf58a8a1204..000000000000 --- a/modules/migration-isolating-dns-domain-of-target-cluster-from-clients.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// Module included in the following assemblies: -// -// * migrating_from_ocp_3_to_4/planning-considerations-3-4.adoc -// * migration_toolkit_for_containers/network-considerations-mtc.adoc - -:_content-type: PROCEDURE -[id="migration-isolating-dns-domain-of-target-cluster-from-clients_{context}"] -= Isolating the DNS domain of the target cluster from the clients - -You can allow the clients' requests sent to the DNS domain of the source cluster to reach the DNS domain of the target cluster without exposing the target cluster to the clients. - -.Procedure - -. Place an exterior network component, such as an application load balancer or a reverse proxy, between the clients and the target cluster. - -. Update the application FQDN on the source cluster in the DNS server to return the IP address of the exterior network component. - -. Configure the network component to send requests received for the application in the source domain to the load balancer in the target cluster domain. - -. Create a wildcard DNS record for the `*.apps.source.example.com` domain that points to the IP address of the load balancer of the source cluster. - -. Create a DNS record for each application that points to the IP address of the exterior network component in front of the target cluster. A specific DNS record has higher priority than a wildcard record, so no conflict arises when the application FQDN is resolved. - -[NOTE] -==== -* The exterior network component must terminate all secure TLS connections. If the connections pass through to the target cluster load balancer, the FQDN of the target application is exposed to the client and certificate errors occur. - -* The applications must not return links referencing the target cluster domain to the clients. Otherwise, parts of the application might not load or work properly. -==== diff --git a/modules/migration-known-issues.adoc b/modules/migration-known-issues.adoc deleted file mode 100644 index ba4faf34e338..000000000000 --- a/modules/migration-known-issues.adoc +++ /dev/null @@ -1,36 +0,0 @@ -// Module included in the following assemblies: -// -// * migrating_from_ocp_3_to_4/troubleshooting-3-4.adoc -// * migration_toolkit_for_containers/troubleshooting-mtc - -[id="migration-known-issues_{context}"] -= Known issues - -This release has the following known issues: - -* During migration, the {mtc-full} ({mtc-short}) preserves the following namespace annotations: - -** `openshift.io/sa.scc.mcs` -** `openshift.io/sa.scc.supplemental-groups` -** `openshift.io/sa.scc.uid-range` -+ -These annotations preserve the UID range, ensuring that the containers retain their file system permissions on the target cluster. There is a risk that the migrated UIDs could duplicate UIDs within an existing or future namespace on the target cluster. (link:https://bugzilla.redhat.com/show_bug.cgi?id=1748440[*BZ#1748440*]) - -* Most cluster-scoped resources are not yet handled by {mtc-short}. If your applications require cluster-scoped resources, you might have to create them manually on the target cluster. -* If a migration fails, the migration plan does not retain custom PV settings for quiesced pods. You must manually roll back the migration, delete the migration plan, and create a new migration plan with your PV settings. (link:https://bugzilla.redhat.com/show_bug.cgi?id=1784899[*BZ#1784899*]) -* If a large migration fails because Restic times out, you can increase the `restic_timeout` parameter value (default: `1h`) in the `MigrationController` custom resource (CR) manifest. -* If you select the data verification option for PVs that are migrated with the file system copy method, performance is significantly slower. -* If you are migrating data from NFS storage and `root_squash` is enabled, `Restic` maps to `nfsnobody`. The migration fails and a permission error is displayed in the `Restic` pod log. (link:https://bugzilla.redhat.com/show_bug.cgi?id=1873641[*BZ#1873641*]) -+ -You can resolve this issue by adding supplemental groups for `Restic` to the `MigrationController` CR manifest: -+ -[source,yaml] ----- -spec: -... - restic_supplemental_groups: - - 5555 - - 6666 ----- - -* If you perform direct volume migration with nodes that are in different availability zones or availability sets, the migration might fail because the migrated pods cannot access the PVC. (link:https://bugzilla.redhat.com/show_bug.cgi?id=1947487[*BZ#1947487*]) diff --git a/modules/migration-kubernetes-objects.adoc b/modules/migration-kubernetes-objects.adoc deleted file mode 100644 index 8b1437a9bc2a..000000000000 --- a/modules/migration-kubernetes-objects.adoc +++ /dev/null @@ -1,88 +0,0 @@ -// Module included in the following assemblies: -// -// * migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc -// * migration_toolkit_for_containers/advanced-migration-options-mtc.adoc - -:_content-type: PROCEDURE -[id="migration-kubernetes-objects_{context}"] -= Performing a state migration of Kubernetes objects by using the {mtc-short} API - -After you migrate all the PV data, you can use the Migration Toolkit for Containers (MTC) API to perform a one-time state migration of Kubernetes objects that constitute an application. - -You do this by configuring `MigPlan` custom resource (CR) fields to provide a list of Kubernetes resources with an additional label selector to further filter those resources, and then performing a migration by creating a `MigMigration` CR. The `MigPlan` resource is closed after the migration. - -[NOTE] -==== -Selecting Kubernetes resources is an API-only feature. You must update the `MigPlan` CR and create a `MigMigration` CR for it by using the CLI. The {mtc-short} web console does not support migrating Kubernetes objects. -==== - -[NOTE] -==== -After migration, the `closed` parameter of the `MigPlan` CR is set to `true`. You cannot create another `MigMigration` CR for this `MigPlan` CR. -==== - -You add Kubernetes objects to the `MigPlan` CR by using one of the following options: - -* Adding the Kubernetes objects to the `includedResources` section. When the `includedResources` field is specified in the `MigPlan` CR, the plan takes a list of `group-kind` as input. Only resources present in the list are included in the migration. -* Adding the optional `labelSelector` parameter to filter the `includedResources` in the `MigPlan`. When this field is specified, only resources matching the label selector are included in the migration. For example, you can filter a list of `Secret` and `ConfigMap` resources by using the label `app: frontend` as a filter. - -.Procedure - -. Update the `MigPlan` CR to include Kubernetes resources and, optionally, to filter the included resources by adding the `labelSelector` parameter: - -.. To update the `MigPlan` CR to include Kubernetes resources: -+ -[source,yaml] ----- -apiVersion: migration.openshift.io/v1alpha1 -kind: MigPlan -metadata: - name: <migplan> - namespace: openshift-migration -spec: - includedResources: - - kind: <kind> <1> - group: "" - - kind: <kind> - group: "" ----- -<1> Specify the Kubernetes object, for example, `Secret` or `ConfigMap`. - -.. Optional: To filter the included resources by adding the `labelSelector` parameter: -+ -[source,yaml] ----- -apiVersion: migration.openshift.io/v1alpha1 -kind: MigPlan -metadata: - name: <migplan> - namespace: openshift-migration -spec: - includedResources: - - kind: <kind> <1> - group: "" - - kind: <kind> - group: "" -... - labelSelector: - matchLabels: - <label> <2> ----- -<1> Specify the Kubernetes object, for example, `Secret` or `ConfigMap`. -<2> Specify the label of the resources to migrate, for example, `app: frontend`. - -. Create a `MigMigration` CR to migrate the selected Kubernetes resources. Verify that the correct `MigPlan` is referenced in `migPlanRef`: -+ -[source,yaml] ----- -apiVersion: migration.openshift.io/v1alpha1 -kind: MigMigration -metadata: - generateName: <migplan> - namespace: openshift-migration -spec: - migPlanRef: - name: <migplan> - namespace: openshift-migration - stage: false ----- diff --git a/modules/migration-launching-cam.adoc b/modules/migration-launching-cam.adoc deleted file mode 100644 index e5ea2129b27c..000000000000 --- a/modules/migration-launching-cam.adoc +++ /dev/null @@ -1,38 +0,0 @@ -// Module included in the following assemblies: -// -// * migrating_from_ocp_3_to_4/migrating-applications-3-4.adoc -// * migration_toolkit_for_containers/migrating-applications-with-mtc - -:_content-type: PROCEDURE -[id="migration-launching-cam_{context}"] -= Launching the {mtc-short} web console - -You can launch the {mtc-full} ({mtc-short}) web console in a browser. - -.Prerequisites - -* The {mtc-short} web console must have network access to the {product-title} web console. -* The {mtc-short} web console must have network access to the OAuth authorization server. - -.Procedure - -. Log in to the {product-title} cluster on which you have installed {mtc-short}. -. Obtain the {mtc-short} web console URL by entering the following command: -+ -[source,terminal] ----- -$ oc get -n openshift-migration route/migration -o go-template='https://{{ .spec.host }}' ----- -+ -The output resembles the following: `\https://migration-openshift-migration.apps.cluster.openshift.com`. - -. Launch a browser and navigate to the {mtc-short} web console. -+ -[NOTE] -==== -If you try to access the {mtc-short} web console immediately after installing the {mtc-full} Operator, the console might not load because the Operator is still configuring the cluster. Wait a few minutes and retry. -==== - -. If you are using self-signed CA certificates, you will be prompted to accept the CA certificate of the source cluster API server. The web page guides you through the process of accepting the remaining certificates. - -. Log in with your {product-title} *username* and *password*. diff --git a/modules/migration-mapping-destination-namespaces-in-the-migplan-cr.adoc b/modules/migration-mapping-destination-namespaces-in-the-migplan-cr.adoc deleted file mode 100644 index b1b56cd4b996..000000000000 --- a/modules/migration-mapping-destination-namespaces-in-the-migplan-cr.adoc +++ /dev/null @@ -1,35 +0,0 @@ -// Module included in the following assemblies: -// -// * migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc - -[id="migration-mapping-destination-namespaces-in-the-migplan-cr_{context}"] -= Mapping namespaces - -If you map namespaces in the `MigPlan` custom resource (CR), you must ensure that the namespaces are not duplicated on the source or the destination clusters because the UID and GID ranges of the namespaces are copied during migration. - -.Two source namespaces mapped to the same destination namespace -[source,yaml] ----- -spec: - namespaces: - - namespace_2 - - namespace_1:namespace_2 ----- - -If you want the source namespace to be mapped to a namespace of the same name, you do not need to create a mapping. By default, a source namespace and a target namespace have the same name. - -.Incorrect namespace mapping -[source,yaml] ----- -spec: - namespaces: - - namespace_1:namespace_1 ----- - -.Correct namespace reference -[source,yaml] ----- -spec: - namespaces: - - namespace_1 ----- diff --git a/modules/migration-mapping-pvcs.adoc b/modules/migration-mapping-pvcs.adoc deleted file mode 100644 index a080f04c7e1b..000000000000 --- a/modules/migration-mapping-pvcs.adoc +++ /dev/null @@ -1,37 +0,0 @@ -// Module included in the following assemblies: -// -// * migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc -// * migration_toolkit_for_containers/advanced-migration-options-mtc.adoc - -:_content-type: PROCEDURE -[id="migration-mapping-pvcs_{context}"] -= Mapping persistent volume claims - -You can migrate persistent volume (PV) data from the source cluster to persistent volume claims (PVCs) that are already provisioned in the destination cluster in the `MigPlan` CR by mapping the PVCs. This mapping ensures that the destination PVCs of migrated applications are synchronized with the source PVCs. - -You map PVCs by updating the `spec.persistentVolumes.pvc.name` parameter in the `MigPlan` custom resource (CR) after the PVs have been discovered. - -.Prerequisites - -* `MigPlan` CR is in a `Ready` state. - -.Procedure - -* Update the `spec.persistentVolumes.pvc.name` parameter in the `MigPlan` CR: -+ -[source,yaml] ----- -apiVersion: migration.openshift.io/v1alpha1 -kind: MigPlan -metadata: - name: <migplan> - namespace: openshift-migration -spec: -... - persistentVolumes: - - capacity: 10Gi - name: <pv_name> - pvc: - name: <source_pvc>:<destination_pvc> <1> ----- -<1> Specify the PVC on the source cluster and the PVC on the destination cluster. If the destination PVC does not exist, it will be created. You can use this mapping to change the PVC name during migration. diff --git a/modules/migration-migrating-applications-api.adoc b/modules/migration-migrating-applications-api.adoc deleted file mode 100644 index 7740ed42099e..000000000000 --- a/modules/migration-migrating-applications-api.adoc +++ /dev/null @@ -1,298 +0,0 @@ -// Module included in the following assemblies: -// -// * migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc -// * migration_toolkit_for_containers/advanced-migration-options-mtc.adoc - -:_content-type: PROCEDURE -[id="migration-migrating-applications-api_{context}"] -= Migrating an application by using the {mtc-short} API - -You can migrate an application from the command line by using the {mtc-full} ({mtc-short}) API. - -.Procedure - -. Create a `MigCluster` CR manifest for the host cluster: -+ -[source,yaml] ----- -$ cat << EOF | oc apply -f - -apiVersion: migration.openshift.io/v1alpha1 -kind: MigCluster -metadata: - name: <host_cluster> - namespace: openshift-migration -spec: - isHostCluster: true -EOF ----- - -. Create a `Secret` object manifest for each remote cluster: -+ -[source,yaml] ----- -$ cat << EOF | oc apply -f - -apiVersion: v1 -kind: Secret -metadata: - name: <cluster_secret> - namespace: openshift-config -type: Opaque -data: - saToken: <sa_token> <1> -EOF ----- -<1> Specify the base64-encoded `migration-controller` service account (SA) token of the remote cluster. You can obtain the token by running the following command: -+ -[source,terminal] ----- -$ oc sa get-token migration-controller -n openshift-migration | base64 -w 0 ----- - -. Create a `MigCluster` CR manifest for each remote cluster: -+ -[source,yaml] ----- -$ cat << EOF | oc apply -f - -apiVersion: migration.openshift.io/v1alpha1 -kind: MigCluster -metadata: - name: <remote_cluster> <1> - namespace: openshift-migration -spec: - exposedRegistryPath: <exposed_registry_route> <2> - insecure: false <3> - isHostCluster: false - serviceAccountSecretRef: - name: <remote_cluster_secret> <4> - namespace: openshift-config - url: <remote_cluster_url> <5> -EOF ----- -<1> Specify the `Cluster` CR of the remote cluster. -<2> Optional: For direct image migration, specify the exposed registry route. -<3> SSL verification is enabled if `false`. CA certificates are not required or checked if `true`. -<4> Specify the `Secret` object of the remote cluster. -<5> Specify the URL of the remote cluster. - -. Verify that all clusters are in a `Ready` state: -+ -[source,terminal] ----- -$ oc describe cluster <cluster> ----- - -. Create a `Secret` object manifest for the replication repository: -+ -[source,yaml] ----- -$ cat << EOF | oc apply -f - -apiVersion: v1 -kind: Secret -metadata: - namespace: openshift-config - name: <migstorage_creds> -type: Opaque -data: - aws-access-key-id: <key_id_base64> <1> - aws-secret-access-key: <secret_key_base64> <2> -EOF ----- -<1> Specify the key ID in base64 format. -<2> Specify the secret key in base64 format. -+ -AWS credentials are base64-encoded by default. For other storage providers, you must encode your credentials by running the following command with each key: -+ -[source,terminal] ----- -$ echo -n "<key>" | base64 -w 0 <1> ----- -<1> Specify the key ID or the secret key. Both keys must be base64-encoded. - -. Create a `MigStorage` CR manifest for the replication repository: -+ -[source,yaml] ----- -$ cat << EOF | oc apply -f - -apiVersion: migration.openshift.io/v1alpha1 -kind: MigStorage -metadata: - name: <migstorage> - namespace: openshift-migration -spec: - backupStorageConfig: - awsBucketName: <bucket> <1> - credsSecretRef: - name: <storage_secret> <2> - namespace: openshift-config - backupStorageProvider: <storage_provider> <3> - volumeSnapshotConfig: - credsSecretRef: - name: <storage_secret> <4> - namespace: openshift-config - volumeSnapshotProvider: <storage_provider> <5> -EOF ----- -<1> Specify the bucket name. -<2> Specify the `Secrets` CR of the object storage. You must ensure that the credentials stored in the `Secrets` CR of the object storage are correct. -<3> Specify the storage provider. -<4> Optional: If you are copying data by using snapshots, specify the `Secrets` CR of the object storage. You must ensure that the credentials stored in the `Secrets` CR of the object storage are correct. -<5> Optional: If you are copying data by using snapshots, specify the storage provider. - -. Verify that the `MigStorage` CR is in a `Ready` state: -+ -[source,terminal] ----- -$ oc describe migstorage <migstorage> ----- - -. Create a `MigPlan` CR manifest: -+ -[source,yaml] ----- -$ cat << EOF | oc apply -f - -apiVersion: migration.openshift.io/v1alpha1 -kind: MigPlan -metadata: - name: <migplan> - namespace: openshift-migration -spec: - destMigClusterRef: - name: <host_cluster> - namespace: openshift-migration - indirectImageMigration: true <1> - indirectVolumeMigration: true <2> - migStorageRef: - name: <migstorage> <3> - namespace: openshift-migration - namespaces: - - <source_namespace_1> <4> - - <source_namespace_2> - - <source_namespace_3>:<destination_namespace> <5> - srcMigClusterRef: - name: <remote_cluster> <6> - namespace: openshift-migration -EOF ----- -<1> Direct image migration is enabled if `false`. -<2> Direct volume migration is enabled if `false`. -<3> Specify the name of the `MigStorage` CR instance. -<4> Specify one or more source namespaces. By default, the destination namespace has the same name. -<5> Specify a destination namespace if it is different from the source namespace. -<6> Specify the name of the source cluster `MigCluster` instance. - -. Verify that the `MigPlan` instance is in a `Ready` state: -+ -[source,terminal] ----- -$ oc describe migplan <migplan> -n openshift-migration ----- - -. Create a `MigMigration` CR manifest to start the migration defined in the `MigPlan` instance: -+ -[source,yaml] ----- -$ cat << EOF | oc apply -f - -apiVersion: migration.openshift.io/v1alpha1 -kind: MigMigration -metadata: - name: <migmigration> - namespace: openshift-migration -spec: - migPlanRef: - name: <migplan> <1> - namespace: openshift-migration - quiescePods: true <2> - stage: false <3> - rollback: false <4> -EOF ----- -<1> Specify the `MigPlan` CR name. -<2> The pods on the source cluster are stopped before migration if `true`. -<3> A stage migration, which copies most of the data without stopping the application, is performed if `true`. -<4> A completed migration is rolled back if `true`. - -. Verify the migration by watching the `MigMigration` CR progress: -+ -[source,terminal] ----- -$ oc watch migmigration <migmigration> -n openshift-migration ----- -+ -The output resembles the following: -+ -.Example output -+ -[source,text] ----- -Name: c8b034c0-6567-11eb-9a4f-0bc004db0fbc -Namespace: openshift-migration -Labels: migration.openshift.io/migplan-name=django -Annotations: openshift.io/touch: e99f9083-6567-11eb-8420-0a580a81020c -API Version: migration.openshift.io/v1alpha1 -Kind: MigMigration -... -Spec: - Mig Plan Ref: - Name: migplan - Namespace: openshift-migration - Stage: false -Status: - Conditions: - Category: Advisory - Last Transition Time: 2021-02-02T15:04:09Z - Message: Step: 19/47 - Reason: InitialBackupCreated - Status: True - Type: Running - Category: Required - Last Transition Time: 2021-02-02T15:03:19Z - Message: The migration is ready. - Status: True - Type: Ready - Category: Required - Durable: true - Last Transition Time: 2021-02-02T15:04:05Z - Message: The migration registries are healthy. - Status: True - Type: RegistriesHealthy - Itinerary: Final - Observed Digest: 7fae9d21f15979c71ddc7dd075cb97061895caac5b936d92fae967019ab616d5 - Phase: InitialBackupCreated - Pipeline: - Completed: 2021-02-02T15:04:07Z - Message: Completed - Name: Prepare - Started: 2021-02-02T15:03:18Z - Message: Waiting for initial Velero backup to complete. - Name: Backup - Phase: InitialBackupCreated - Progress: - Backup openshift-migration/c8b034c0-6567-11eb-9a4f-0bc004db0fbc-wpc44: 0 out of estimated total of 0 objects backed up (5s) - Started: 2021-02-02T15:04:07Z - Message: Not started - Name: StageBackup - Message: Not started - Name: StageRestore - Message: Not started - Name: DirectImage - Message: Not started - Name: DirectVolume - Message: Not started - Name: Restore - Message: Not started - Name: Cleanup - Start Timestamp: 2021-02-02T15:03:18Z -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal Running 57s migmigration_controller Step: 2/47 - Normal Running 57s migmigration_controller Step: 3/47 - Normal Running 57s (x3 over 57s) migmigration_controller Step: 4/47 - Normal Running 54s migmigration_controller Step: 5/47 - Normal Running 54s migmigration_controller Step: 6/47 - Normal Running 52s (x2 over 53s) migmigration_controller Step: 7/47 - Normal Running 51s (x2 over 51s) migmigration_controller Step: 8/47 - Normal Ready 50s (x12 over 57s) migmigration_controller The migration is ready. - Normal Running 50s migmigration_controller Step: 9/47 - Normal Running 50s migmigration_controller Step: 10/47 ----- diff --git a/modules/migration-migrating-on-prem-to-cloud.adoc b/modules/migration-migrating-on-prem-to-cloud.adoc deleted file mode 100644 index 11a64f547221..000000000000 --- a/modules/migration-migrating-on-prem-to-cloud.adoc +++ /dev/null @@ -1,118 +0,0 @@ -// Module included in the following assemblies: -// -// * migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc -// * migration_toolkit_for_containers/advanced-migration-options-mtc.adoc -:_content-type: PROCEDURE -[id="migration-migrating-applications-on-prem-to-cloud_{context}"] -= Migrating an application from on-premises to a cloud-based cluster - -You can migrate from a source cluster that is behind a firewall to a cloud-based destination cluster by establishing a network tunnel between the two clusters. The `crane tunnel-api` command establishes such a tunnel by creating a VPN tunnel on the source cluster and then connecting to a VPN server running on the destination cluster. The VPN server is exposed to the client using a load balancer address on the destination cluster. - -A service created on the destination cluster exposes the source cluster's API to {mtc-short}, which is running on the destination cluster. - -.Prerequisites - -* The system that creates the VPN tunnel must have access and be logged in to both clusters. -* It must be possible to create a load balancer on the destination cluster. Refer to your cloud provider to ensure this is possible. -* Have names prepared to assign to namespaces, on both the source cluster and the destination cluster, in which to run the VPN tunnel. These namespaces should not be created in advance. For information about namespace rules, see \https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-subdomain-names. -* When connecting multiple firewall-protected source clusters to the cloud cluster, each source cluster requires its own namespace. -* OpenVPN server is installed on the destination cluster. -* OpenVPN client is installed on the source cluster. -* When configuring the source cluster in {mtc-short}, the API URL takes the form of `\https://proxied-cluster.<namespace>.svc.cluster.local:8443`. -** If you use the API, see _Create a MigCluster CR manifest for each remote cluster_. -** If you use the {mtc-short} web console, see _Migrating your applications using the {mtc-short} web console_. -* The {mtc-short} web console and Migration Controller must be installed on the target cluster. - -.Procedure - -. Install the `crane` utility: -+ -[source,terminal,subs=attributes+] ----- -$ podman cp $(podman create registry.redhat.io/rhmtc/openshift-migration-controller-rhel8:v{mtc-version}):/crane ./ ----- -. Log in remotely to a node on the source cluster and a node on the destination cluster. - -. Obtain the cluster context for both clusters after logging in: -+ -[source,terminal,subs="+quotes"] ----- -$ oc config view ----- - -. Establish a tunnel by entering the following command on the command system: -+ -[source,terminal,sub="+quotes"] ----- -$ crane tunnel-api [--namespace <namespace>] \ - --destination-context <destination-cluster> \ - --source-context <source-cluster> ----- -+ -If you don't specify a namespace, the command uses the default value `openvpn`. -+ -For example: -+ -[source,terminal,subs="+quotes"] ----- -$ crane tunnel-api --namespace my_tunnel \ - --destination-context openshift-migration/c131-e-us-east-containers-cloud-ibm-com/admin \ - --source-context default/192-168-122-171-nip-io:8443/admin ----- -+ -[TIP] -==== -See all available parameters for the `crane tunnel-api` command by entering `crane tunnel-api --help`. -==== -+ -The command generates TSL/SSL Certificates. This process might take several minutes. A message appears when the process completes. -+ -The OpenVPN server starts on the destination cluster and the OpenVPN client starts on the source cluster. -+ -After a few minutes, the load balancer resolves on the source node. -+ -[TIP] -==== -You can view the log for the OpenVPN pods to check the status of this process by entering the following commands with root privileges: - -[source,terminal,subs="+quotes"] ----- -# oc get po -n <namespace> ----- - -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -<pod_name> 2/2 Running 0 44s ----- - -[source,terminal,subs="+quotes"] ----- -# oc logs -f -n <namespace> <pod_name> -c openvpn ----- -When the address of the load balancer is resolved, the message `Initialization Sequence Completed` appears at the end of the log. -==== - -. On the OpenVPN server, which is on a destination control node, verify that the `openvpn` service and the `proxied-cluster` service are running: -+ -[source,terminal,subs="+quotes"] ----- -$ oc get service -n <namespace> ----- - -. On the source node, get the service account (SA) token for the migration controller: -+ -[source,terminal] ----- -# oc sa get-token -n openshift-migration migration-controller ----- - -. Open the {mtc-short} web console and add the source cluster, using the following values: -+ -* *Cluster name*: The source cluster name. -* *URL*: `proxied-cluster.<namespace>.svc.cluster.local:8443`. If you did not define a value for `<namespace>`, use `openvpn`. -* *Service account token*: The token of the migration controller service account. -* *Exposed route host to image registry*: `proxied-cluster.<namespace>.svc.cluster.local:5000`. If you did not define a value for `<namespace>`, use `openvpn`. - -After {mtc-short} has successfully validated the connection, you can proceed to create and run a migration plan. The namespace for the source cluster should appear in the list of namespaces. diff --git a/modules/migration-mtc-cr-manifests.adoc b/modules/migration-mtc-cr-manifests.adoc deleted file mode 100644 index 23083ee91c95..000000000000 --- a/modules/migration-mtc-cr-manifests.adoc +++ /dev/null @@ -1,364 +0,0 @@ -// Module included in the following assemblies: -// -// * migrating_from_ocp_3_to_4/migrating-applications-3-4.adoc -// * migration_toolkit_for_containers/migrating-applications-with-mtc - -[id="migration-mtc-cr-manifests_{context}"] -= {mtc-short} custom resource manifests - -{mtc-full} ({mtc-short}) uses the following custom resource (CR) manifests for migrating applications. - -[id="directimagemigration_{context}"] -== DirectImageMigration - -The `DirectImageMigration` CR copies images directly from the source cluster to the destination cluster. - -[source,yaml] ----- -apiVersion: migration.openshift.io/v1alpha1 -kind: DirectImageMigration -metadata: - labels: - controller-tools.k8s.io: "1.0" - name: <direct_image_migration> -spec: - srcMigClusterRef: - name: <source_cluster> - namespace: openshift-migration - destMigClusterRef: - name: <destination_cluster> - namespace: openshift-migration - namespaces: <1> - - <source_namespace_1> - - <source_namespace_2>:<destination_namespace_3> <2> ----- -<1> One or more namespaces containing images to be migrated. By default, the destination namespace has the same name as the source namespace. -<2> Source namespace mapped to a destination namespace with a different name. - -[id="directimagestreammigration_{context}"] -== DirectImageStreamMigration - -The `DirectImageStreamMigration` CR copies image stream references directly from the source cluster to the destination cluster. - -[source,yaml] ----- -apiVersion: migration.openshift.io/v1alpha1 -kind: DirectImageStreamMigration -metadata: - labels: - controller-tools.k8s.io: "1.0" - name: <direct_image_stream_migration> -spec: - srcMigClusterRef: - name: <source_cluster> - namespace: openshift-migration - destMigClusterRef: - name: <destination_cluster> - namespace: openshift-migration - imageStreamRef: - name: <image_stream> - namespace: <source_image_stream_namespace> - destNamespace: <destination_image_stream_namespace> ----- - -[id="directvolumemigration_{context}"] -== DirectVolumeMigration - -The `DirectVolumeMigration` CR copies persistent volumes (PVs) directly from the source cluster to the destination cluster. - -[source,yaml] ----- -apiVersion: migration.openshift.io/v1alpha1 -kind: DirectVolumeMigration -metadata: - name: <direct_volume_migration> - namespace: openshift-migration -spec: - createDestinationNamespaces: false <1> - deleteProgressReportingCRs: false <2> - destMigClusterRef: - name: <host_cluster> <3> - namespace: openshift-migration - persistentVolumeClaims: - - name: <pvc> <4> - namespace: <pvc_namespace> - srcMigClusterRef: - name: <source_cluster> - namespace: openshift-migration ----- -<1> Set to `true` to create namespaces for the PVs on the destination cluster. -<2> Set to `true` to delete `DirectVolumeMigrationProgress` CRs after migration. The default is `false` so that `DirectVolumeMigrationProgress` CRs are retained for troubleshooting. -<3> Update the cluster name if the destination cluster is not the host cluster. -<4> Specify one or more PVCs to be migrated. - -[id="directvolumemigrationprogress_{context}"] -== DirectVolumeMigrationProgress - -The `DirectVolumeMigrationProgress` CR shows the progress of the `DirectVolumeMigration` CR. - -[source,yaml] ----- -apiVersion: migration.openshift.io/v1alpha1 -kind: DirectVolumeMigrationProgress -metadata: - labels: - controller-tools.k8s.io: "1.0" - name: <direct_volume_migration_progress> -spec: - clusterRef: - name: <source_cluster> - namespace: openshift-migration - podRef: - name: <rsync_pod> - namespace: openshift-migration ----- - -[id="miganalytic_{context}"] -== MigAnalytic - -The `MigAnalytic` CR collects the number of images, Kubernetes resources, and the persistent volume (PV) capacity from an associated `MigPlan` CR. - -You can configure the data that it collects. - -[source,yaml] ----- -apiVersion: migration.openshift.io/v1alpha1 -kind: MigAnalytic -metadata: - annotations: - migplan: <migplan> - name: <miganalytic> - namespace: openshift-migration - labels: - migplan: <migplan> -spec: - analyzeImageCount: true <1> - analyzeK8SResources: true <2> - analyzePVCapacity: true <3> - listImages: false <4> - listImagesLimit: 50 <5> - migPlanRef: - name: <migplan> - namespace: openshift-migration ----- -<1> Optional: Returns the number of images. -<2> Optional: Returns the number, kind, and API version of the Kubernetes resources. -<3> Optional: Returns the PV capacity. -<4> Returns a list of image names. The default is `false` so that the output is not excessively long. -<5> Optional: Specify the maximum number of image names to return if `listImages` is `true`. - -[id="migcluster_{context}"] -== MigCluster - -The `MigCluster` CR defines a host, local, or remote cluster. - -[source,yaml] ----- -apiVersion: migration.openshift.io/v1alpha1 -kind: MigCluster -metadata: - labels: - controller-tools.k8s.io: "1.0" - name: <host_cluster> <1> - namespace: openshift-migration -spec: - isHostCluster: true <2> -# The 'azureResourceGroup' parameter is relevant only for Microsoft Azure. - azureResourceGroup: <azure_resource_group> <3> - caBundle: <ca_bundle_base64> <4> - insecure: false <5> - refresh: false <6> -# The 'restartRestic' parameter is relevant for a source cluster. - restartRestic: true <7> -# The following parameters are relevant for a remote cluster. - exposedRegistryPath: <registry_route> <8> - url: <destination_cluster_url> <9> - serviceAccountSecretRef: - name: <source_secret> <10> - namespace: openshift-config ----- -<1> Update the cluster name if the `migration-controller` pod is not running on this cluster. -<2> The `migration-controller` pod runs on this cluster if `true`. -<3> Microsoft Azure only: Specify the resource group. -<4> Optional: If you created a certificate bundle for self-signed CA certificates and if the `insecure` parameter value is `false`, specify the base64-encoded certificate bundle. -<5> Set to `true` to disable SSL verification. -<6> Set to `true` to validate the cluster. -<7> Set to `true` to restart the `Restic` pods on the source cluster after the `Stage` pods are created. -<8> Remote cluster and direct image migration only: Specify the exposed secure registry path. -<9> Remote cluster only: Specify the URL. -<10> Remote cluster only: Specify the name of the `Secret` object. - -[id="mighook_{context}"] -== MigHook - -The `MigHook` CR defines a migration hook that runs custom code at a specified stage of the migration. You can create up to four migration hooks. Each hook runs during a different phase of the migration. - -You can configure the hook name, runtime duration, a custom image, and the cluster where the hook will run. - -The migration phases and namespaces of the hooks are configured in the `MigPlan` CR. - -[source,yaml] ----- -apiVersion: migration.openshift.io/v1alpha1 -kind: MigHook -metadata: - generateName: <hook_name_prefix> <1> - name: <mighook> <2> - namespace: openshift-migration -spec: - activeDeadlineSeconds: 1800 <3> - custom: false <4> - image: <hook_image> <5> - playbook: <ansible_playbook_base64> <6> - targetCluster: source <7> ----- -<1> Optional: A unique hash is appended to the value for this parameter so that each migration hook has a unique name. You do not need to specify the value of the `name` parameter. -<2> Specify the migration hook name, unless you specify the value of the `generateName` parameter. -<3> Optional: Specify the maximum number of seconds that a hook can run. The default is `1800`. -<4> The hook is a custom image if `true`. The custom image can include Ansible or it can be written in a different programming language. -<5> Specify the custom image, for example, `quay.io/konveyor/hook-runner:latest`. Required if `custom` is `true`. -<6> Base64-encoded Ansible playbook. Required if `custom` is `false`. -<7> Specify the cluster on which the hook will run. Valid values are `source` or `destination`. - -[id="migmigration_{context}"] -== MigMigration - -The `MigMigration` CR runs a `MigPlan` CR. - -You can configure a `Migmigration` CR to run a stage or incremental migration, to cancel a migration in progress, or to roll back a completed migration. - -[source,yaml] ----- -apiVersion: migration.openshift.io/v1alpha1 -kind: MigMigration -metadata: - labels: - controller-tools.k8s.io: "1.0" - name: <migmigration> - namespace: openshift-migration -spec: - canceled: false <1> - rollback: false <2> - stage: false <3> - quiescePods: true <4> - keepAnnotations: true <5> - verify: false <6> - migPlanRef: - name: <migplan> - namespace: openshift-migration ----- -<1> Set to `true` to cancel a migration in progress. -<2> Set to `true` to roll back a completed migration. -<3> Set to `true` to run a stage migration. Data is copied incrementally and the pods on the source cluster are not stopped. -<4> Set to `true` to stop the application during migration. The pods on the source cluster are scaled to `0` after the `Backup` stage. -<5> Set to `true` to retain the labels and annotations applied during the migration. -<6> Set to `true` to check the status of the migrated pods on the destination cluster are checked and to return the names of pods that are not in a `Running` state. - -[id="migplan_{context}"] -== MigPlan - -The `MigPlan` CR defines the parameters of a migration plan. - -You can configure destination namespaces, hook phases, and direct or indirect migration. - -[NOTE] -==== -By default, a destination namespace has the same name as the source namespace. If you configure a different destination namespace, you must ensure that the namespaces are not duplicated on the source or the destination clusters because the UID and GID ranges are copied during migration. -==== - -[source,yaml] ----- -apiVersion: migration.openshift.io/v1alpha1 -kind: MigPlan -metadata: - labels: - controller-tools.k8s.io: "1.0" - name: <migplan> - namespace: openshift-migration -spec: - closed: false <1> - srcMigClusterRef: - name: <source_cluster> - namespace: openshift-migration - destMigClusterRef: - name: <destination_cluster> - namespace: openshift-migration - hooks: <2> - - executionNamespace: <namespace> <3> - phase: <migration_phase> <4> - reference: - name: <hook> <5> - namespace: <hook_namespace> <6> - serviceAccount: <service_account> <7> - indirectImageMigration: true <8> - indirectVolumeMigration: false <9> - migStorageRef: - name: <migstorage> - namespace: openshift-migration - namespaces: - - <source_namespace_1> <10> - - <source_namespace_2> - - <source_namespace_3>:<destination_namespace_4> <11> - refresh: false <12> ----- -<1> The migration has completed if `true`. You cannot create another `MigMigration` CR for this `MigPlan` CR. -<2> Optional: You can specify up to four migration hooks. Each hook must run during a different migration phase. -<3> Optional: Specify the namespace in which the hook will run. -<4> Optional: Specify the migration phase during which a hook runs. One hook can be assigned to one phase. Valid values are `PreBackup`, `PostBackup`, `PreRestore`, and `PostRestore`. -<5> Optional: Specify the name of the `MigHook` CR. -<6> Optional: Specify the namespace of `MigHook` CR. -<7> Optional: Specify a service account with `cluster-admin` privileges. -<8> Direct image migration is disabled if `true`. Images are copied from the source cluster to the replication repository and from the replication repository to the destination cluster. -<9> Direct volume migration is disabled if `true`. PVs are copied from the source cluster to the replication repository and from the replication repository to the destination cluster. -<10> Specify one or more source namespaces. If you specify only the source namespace, the destination namespace is the same. -<11> Specify the destination namespace if it is different from the source namespace. -<12> The `MigPlan` CR is validated if `true`. - -[id="migstorage_{context}"] -== MigStorage - -The `MigStorage` CR describes the object storage for the replication repository. - -Amazon Web Services (AWS), Microsoft Azure, Google Cloud Storage, Multi-Cloud Object Gateway, and generic S3-compatible cloud storage are supported. - -AWS and the snapshot copy method have additional parameters. - -[source,yaml] ----- -apiVersion: migration.openshift.io/v1alpha1 -kind: MigStorage -metadata: - labels: - controller-tools.k8s.io: "1.0" - name: <migstorage> - namespace: openshift-migration -spec: - backupStorageProvider: <backup_storage_provider> <1> - volumeSnapshotProvider: <snapshot_storage_provider> <2> - backupStorageConfig: - awsBucketName: <bucket> <3> - awsRegion: <region> <4> - credsSecretRef: - namespace: openshift-config - name: <storage_secret> <5> - awsKmsKeyId: <key_id> <6> - awsPublicUrl: <public_url> <7> - awsSignatureVersion: <signature_version> <8> - volumeSnapshotConfig: - awsRegion: <region> <9> - credsSecretRef: - namespace: openshift-config - name: <storage_secret> <10> - refresh: false <11> ----- -<1> Specify the storage provider. -<2> Snapshot copy method only: Specify the storage provider. -<3> AWS only: Specify the bucket name. -<4> AWS only: Specify the bucket region, for example, `us-east-1`. -<5> Specify the name of the `Secret` object that you created for the storage. -<6> AWS only: If you are using the AWS Key Management Service, specify the unique identifier of the key. -<7> AWS only: If you granted public access to the AWS bucket, specify the bucket URL. -<8> AWS only: Specify the AWS signature version for authenticating requests to the bucket, for example, `4`. -<9> Snapshot copy method only: Specify the geographical region of the clusters. -<10> Snapshot copy method only: Specify the name of the `Secret` object that you created for the storage. -<11> Set to `true` to validate the cluster. diff --git a/modules/migration-mtc-release-notes-1-5.adoc b/modules/migration-mtc-release-notes-1-5.adoc deleted file mode 100644 index a3f308fd936f..000000000000 --- a/modules/migration-mtc-release-notes-1-5.adoc +++ /dev/null @@ -1,48 +0,0 @@ -// Module included in the following assemblies: -// -// * migration_toolkit_for_containers/mtc-release-notes.adoc -:_content-type: REFERENCE -[id="migration-mtc-release-notes-1-5_{context}"] -= {mtc-full} 1.5 release notes - -[id="new-features-and-enhancements-1-5_{context}"] -== New features and enhancements - -This release has the following new features and enhancements: - -* The *Migration resource* tree on the *Migration details* page of the web console has been enhanced with additional resources, Kubernetes events, and live status information for monitoring and debugging migrations. -* The web console can support hundreds of migration plans. -* A source namespace can be mapped to a different target namespace in a migration plan. Previously, the source namespace was mapped to a target namespace with the same name. -* Hook phases with status information are displayed in the web console during a migration. -* The number of Rsync retry attempts is displayed in the web console during direct volume migration. -* Persistent volume (PV) resizing can be enabled for direct volume migration to ensure that the target cluster does not run out of disk space. -* The threshold that triggers PV resizing is configurable. Previously, PV resizing occurred when the disk usage exceeded 97%. -* Velero has been updated to version 1.6, which provides numerous fixes and enhancements. -* Cached Kubernetes clients can be enabled to provide improved performance. - -[id="deprecated-features-1-5_{context}"] -== Deprecated features - -The following features are deprecated: - -// https://issues.redhat.com/browse/MIG-623 -* {mtc-short} versions 1.2 and 1.3 are no longer supported. -* The procedure for updating deprecated APIs has been removed from the troubleshooting section of the documentation because the `oc convert` command is deprecated. - -[id="known-issues-1-5_{context}"] -== Known issues - -This release has the following known issues: - -* PV resizing does not work as expected for AWS gp2 storage unless the `pv_resizing_threshold` is 42% or greater. (link:https://bugzilla.redhat.com/show_bug.cgi?id=1973148[*BZ#1973148*]) -* If a migration fails, the migration plan does not retain custom PV settings for quiesced pods. You must manually roll back the migration, delete the migration plan, and create a new migration plan with your PV settings. (link:https://bugzilla.redhat.com/show_bug.cgi?id=1784899[*BZ#1784899*]) -* Microsoft Azure storage is unavailable if you create more than 400 migration plans. The `MigStorage` custom resource displays the following message: `The request is being throttled as the limit has been reached for operation type`. (link:https://bugzilla.redhat.com/show_bug.cgi?id=1977226[*BZ#1977226*]) -* On {product-title} 3.10, the `MigrationController` pod takes too long to restart. The bug report contains a workaround. (link:https://bugzilla.redhat.com/show_bug.cgi?id=1986796[*BZ#1986796*]) - -[id="technical-changes-1-5_{context}"] -== Technical changes - -This release has the following technical changes: - -* The legacy {mtc-full} Operator version 1.5.1 is installed manually on {product-title} versions 3.7 to 4.5. -* The {mtc-full} Operator version 1.5.1 is installed on {product-title} versions 4.6 and later by using the Operator Lifecycle Manager. diff --git a/modules/migration-mtc-release-notes-1-6.adoc b/modules/migration-mtc-release-notes-1-6.adoc deleted file mode 100644 index 33dae42d221e..000000000000 --- a/modules/migration-mtc-release-notes-1-6.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * migration_toolkit_for_containers/mtc-release-notes.adoc -:_content-type: REFERENCE -[id="migration-mtc-release-notes-1-6_{context}"] -= {mtc-full} 1.6 release notes - -[id="new-features-and-enhancements-1-6_{context}"] -== New features and enhancements - -This release has the following new features and enhancements: - -* State migration: You can perform repeatable, state-only migrations by selecting specific persistent volume claims (PVCs). - -* "New operator version available" notification: The Clusters page of the {mtc-short} web console displays a notification when a new {mtc-full} Operator is available. - -[id="deprecated-features-1-6_{context}"] -== Deprecated features - -The following features are deprecated: - -* {mtc-short} version 1.4 is no longer supported. - -[id="known-issues-1-6_{context}"] -== Known issues - -This release has the following known issues: - -* On {product-title} 3.10, the `MigrationController` pod takes too long to restart. The Bugzilla report contains a workaround. (link:https://bugzilla.redhat.com/show_bug.cgi?id=1986796[*BZ#1986796*]) -* `Stage` pods fail during direct volume migration from a classic {product-title} source cluster on IBM Cloud. The IBM block storage plugin does not allow the same volume to be mounted on multiple pods of the same node. As a result, the PVCs cannot be mounted on the Rsync pods and on the application pods simultaneously. To resolve this issue, stop the application pods before migration. (link:https://bugzilla.redhat.com/show_bug.cgi?id=1887526[*BZ#1887526*]) -* `MigPlan` custom resource does not display a warning when an AWS gp2 PVC has no available space. (link:https://bugzilla.redhat.com/show_bug.cgi?id=1963927[*BZ#1963927*]) -* Block storage for IBM Cloud must be in the same availability zone. See the link:https://cloud.ibm.com/docs/vpc?topic=vpc-block-storage-vpc-faq[IBM FAQ for block storage for virtual private cloud]. diff --git a/modules/migration-mtc-release-notes-1-7-10.adoc b/modules/migration-mtc-release-notes-1-7-10.adoc deleted file mode 100644 index ff1c4b6f11a2..000000000000 --- a/modules/migration-mtc-release-notes-1-7-10.adoc +++ /dev/null @@ -1,15 +0,0 @@ - -// Module included in the following assemblies: -// -// * migration_toolkit_for_containers/mtc-release-notes.adoc -:_content-type: REFERENCE -[id="migration-mtc-release-notes-1-7-10_{context}"] -= {mtc-full} 1.7.10 release notes - -[id="resolved-issues-1-7-10_{context}"] -== Resolved issues - -This release has the following major resolved issue: - -* In this release, you can prevent absolute symlinks from being manipulated by Rsync in the course of direct volume migration (DVM). Running DVM in privileged mode preserves absolute symlinks inside the persistent volume claims (PVCs). To switch to privileged mode, in the `MigrationController` CR, set the `migration_rsync_privileged` spec to `true`. (link:https://bugzilla.redhat.com/show_bug.cgi?id=2204461[*BZ#2204461*]) - diff --git a/modules/migration-mtc-release-notes-1-7.adoc b/modules/migration-mtc-release-notes-1-7.adoc deleted file mode 100644 index 4f4aa81c8ab4..000000000000 --- a/modules/migration-mtc-release-notes-1-7.adoc +++ /dev/null @@ -1,27 +0,0 @@ - -// Module included in the following assemblies: -// -// * migration_toolkit_for_containers/mtc-release-notes.adoc -:_content-type: REFERENCE -[id="migration-mtc-release-notes-1-7_{context}"] -= {mtc-full} 1.7 release notes - -[id="new-features-and-enhancements-1-7_{context}"] -== New features and enhancements - -This release has the following new features and enhancements: - -* The {mtc-full} ({mtc-short}) Operator now depends upon the OpenShift API for Data Protection (OADP) Operator. When you install the {mtc-short} Operator, the Operator Lifecycle Manager (OLM) automatically installs the OADP Operator in the same namespace. - -* You can migrate from a source cluster that is behind a firewall to a cloud-based destination cluster by establishing a network tunnel between the two clusters by using the `crane tunnel-api` command. - -* Converting storage classes in the MTC web console: You can convert the storage class of a persistent volume (PV) by migrating it within the same cluster. - -[id="known-issues-1-7_{context}"] -== Known issues - -This release has the following known issues: - -* `MigPlan` custom resource does not display a warning when an AWS gp2 PVC has no available space. (link:https://bugzilla.redhat.com/show_bug.cgi?id=1963927[*BZ#1963927*]) -* Direct and indirect data transfers do not work if the destination storage is a PV that is dynamically provisioned by the AWS Elastic File System (EFS). This is due to limitations of the AWS EFS Container Storage Interface (CSI) driver. (link:https://bugzilla.redhat.com/show_bug.cgi?id=2085097[*BZ#2085097*]) -* Block storage for IBM Cloud must be in the same availability zone. See the link:https://cloud.ibm.com/docs/vpc?topic=vpc-block-storage-vpc-faq[IBM FAQ for block storage for virtual private cloud]. diff --git a/modules/migration-mtc-workflow.adoc b/modules/migration-mtc-workflow.adoc deleted file mode 100644 index 30cd193c23cd..000000000000 --- a/modules/migration-mtc-workflow.adoc +++ /dev/null @@ -1,69 +0,0 @@ -// Module included in the following assemblies: -// -// * migrating_from_ocp_3_to_4/about-mtc-3-4.adoc -// * migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc -// * migration_toolkit_for_containers/about-mtc.adoc -// * migration_toolkit_for_containers/advanced-migration-options-mtc.adoc - -[id="migration-mtc-workflow_{context}"] -= {mtc-short} workflow - -You can migrate Kubernetes resources, persistent volume data, and internal container images to {product-title} {product-version} by using the {mtc-full} ({mtc-short}) web console or the Kubernetes API. - -{mtc-short} migrates the following resources: - -* A namespace specified in a migration plan. -* Namespace-scoped resources: When the {mtc-short} migrates a namespace, it migrates all the objects and resources associated with that namespace, such as services or pods. Additionally, if a resource that exists in the namespace but not at the cluster level depends on a resource that exists at the cluster level, the {mtc-short} migrates both resources. -+ -For example, a security context constraint (SCC) is a resource that exists at the cluster level and a service account (SA) is a resource that exists at the namespace level. If an SA exists in a namespace that the {mtc-short} migrates, the {mtc-short} automatically locates any SCCs that are linked to the SA and also migrates those SCCs. Similarly, the {mtc-short} migrates persistent volumes that are linked to the persistent volume claims of the namespace. -+ -[NOTE] -==== -Cluster-scoped resources might have to be migrated manually, depending on the resource. -==== - -* Custom resources (CRs) and custom resource definitions (CRDs): {mtc-short} automatically migrates CRs and CRDs at the namespace level. - -Migrating an application with the {mtc-short} web console involves the following steps: - -. Install the {mtc-full} Operator on all clusters. -+ -You can install the {mtc-full} Operator in a restricted environment with limited or no internet access. The source and target clusters must have network access to each other and to a mirror registry. - -. Configure the replication repository, an intermediate object storage that {mtc-short} uses to migrate data. -+ -The source and target clusters must have network access to the replication repository during migration. If you are using a proxy server, you must configure it to allow network traffic between the replication repository and the clusters. - -. Add the source cluster to the {mtc-short} web console. -. Add the replication repository to the {mtc-short} web console. -. Create a migration plan, with one of the following data migration options: - -* *Copy*: {mtc-short} copies the data from the source cluster to the replication repository, and from the replication repository to the target cluster. -+ -[NOTE] -==== -If you are using direct image migration or direct volume migration, the images or volumes are copied directly from the source cluster to the target cluster. -==== -+ -image::migration-PV-copy.png[] - -* *Move*: {mtc-short} unmounts a remote volume, for example, NFS, from the source cluster, creates a PV resource on the target cluster pointing to the remote volume, and then mounts the remote volume on the target cluster. Applications running on the target cluster use the same remote volume that the source cluster was using. The remote volume must be accessible to the source and target clusters. -+ -[NOTE] -==== -Although the replication repository does not appear in this diagram, it is required for migration. -==== -+ -image::migration-PV-move.png[] - -. Run the migration plan, with one of the following options: - -* *Stage* copies data to the target cluster without stopping the application. -+ -A stage migration can be run multiple times so that most of the data is copied to the target before migration. Running one or more stage migrations reduces the duration of the cutover migration. - -* *Cutover* stops the application on the source cluster and moves the resources to the target cluster. -+ -Optional: You can clear the *Halt transactions on the source cluster during migration* checkbox. - -image::OCP_3_to_4_App_migration.png[] diff --git a/modules/migration-network-traffic-redirection-strategies.adoc b/modules/migration-network-traffic-redirection-strategies.adoc deleted file mode 100644 index 5045afa1b73d..000000000000 --- a/modules/migration-network-traffic-redirection-strategies.adoc +++ /dev/null @@ -1,44 +0,0 @@ -// Module included in the following assemblies: -// -// * migrating_from_ocp_3_to_4/planning-considerations-3-4.adoc -// * migration_toolkit_for_containers/network-considerations-mtc.adoc - -[id="migration-network-traffic-redirection-strategies_{context}"] -= Network traffic redirection strategies - -After a successful migration, you must redirect network traffic of your stateless applications from the source cluster to the target cluster. - -The strategies for redirecting network traffic are based on the following assumptions: - -* The application pods are running on both the source and target clusters. -* Each application has a route that contains the source cluster hostname. -* The route with the source cluster hostname contains a CA certificate. -* For HTTPS, the target router CA certificate contains a Subject Alternative Name for the wildcard DNS record of the source cluster. - -Consider the following strategies and select the one that meets your objectives. - -* Redirecting all network traffic for all applications at the same time -+ -Change the wildcard DNS record of the source cluster to point to the target cluster router's virtual IP address (VIP). -+ -This strategy is suitable for simple applications or small migrations. - -* Redirecting network traffic for individual applications -+ -Create a DNS record for each application with the source cluster hostname pointing to the target cluster router's VIP. This DNS record takes precedence over the source cluster wildcard DNS record. - -* Redirecting network traffic gradually for individual applications - -. Create a proxy that can direct traffic to both the source cluster router's VIP and the target cluster router's VIP, for each application. -. Create a DNS record for each application with the source cluster hostname pointing to the proxy. -. Configure the proxy entry for the application to route a percentage of the traffic to the target cluster router's VIP and the rest of the traffic to the source cluster router's VIP. -. Gradually increase the percentage of traffic that you route to the target cluster router's VIP until all the network traffic is redirected. - -* User-based redirection of traffic for individual applications -+ -Using this strategy, you can filter TCP/IP headers of user requests to redirect network traffic for predefined groups of users. This allows you to test the redirection process on specific populations of users before redirecting the entire network traffic. - -. Create a proxy that can direct traffic to both the source cluster router's VIP and the target cluster router's VIP, for each application. -. Create a DNS record for each application with the source cluster hostname pointing to the proxy. -. Configure the proxy entry for the application to route traffic matching a given header pattern, such as `test customers`, to the target cluster router's VIP and the rest of the traffic to the source cluster router's VIP. -. Redirect traffic to the target cluster router's VIP in stages until all the traffic is on the target cluster router's VIP. diff --git a/modules/migration-partial-failure-velero.adoc b/modules/migration-partial-failure-velero.adoc deleted file mode 100644 index 11e62b6aa62a..000000000000 --- a/modules/migration-partial-failure-velero.adoc +++ /dev/null @@ -1,83 +0,0 @@ -// Module included in the following assemblies: -// -// * migrating_from_ocp_3_to_4/troubleshooting-3-4.adoc -// * migration_toolkit_for_containers/troubleshooting-mtc - -:_content-type: PROCEDURE -[id="migration-partial-failure-velero_{context}"] -= Debugging a partial migration failure - -You can debug a partial migration failure warning message by using the Velero CLI to examine the `Restore` custom resource (CR) logs. - -A partial failure occurs when Velero encounters an issue that does not cause a migration to fail. For example, if a custom resource definition (CRD) is missing or if there is a discrepancy between CRD versions on the source and target clusters, the migration completes but the CR is not created on the target cluster. - -Velero logs the issue as a partial failure and then processes the rest of the objects in the `Backup` CR. - -.Procedure - -. Check the status of a `MigMigration` CR: -+ -[source,terminal] ----- -$ oc get migmigration <migmigration> -o yaml ----- -+ -.Example output -+ -[source,yaml] ----- -status: - conditions: - - category: Warn - durable: true - lastTransitionTime: "2021-01-26T20:48:40Z" - message: 'Final Restore openshift-migration/ccc7c2d0-6017-11eb-afab-85d0007f5a19-x4lbf: partially failed on destination cluster' - status: "True" - type: VeleroFinalRestorePartiallyFailed - - category: Advisory - durable: true - lastTransitionTime: "2021-01-26T20:48:42Z" - message: The migration has completed with warnings, please look at `Warn` conditions. - reason: Completed - status: "True" - type: SucceededWithWarnings ----- - -. Check the status of the `Restore` CR by using the Velero `describe` command: -+ -[source,yaml] ----- -$ oc -n {namespace} exec deployment/velero -c velero -- ./velero \ - restore describe <restore> ----- -+ -.Example output -+ -[source,text] ----- -Phase: PartiallyFailed (run 'velero restore logs ccc7c2d0-6017-11eb-afab-85d0007f5a19-x4lbf' for more information) - -Errors: - Velero: <none> - Cluster: <none> - Namespaces: - migration-example: error restoring example.com/migration-example/migration-example: the server could not find the requested resource ----- - -. Check the `Restore` CR logs by using the Velero `logs` command: -+ -[source,yaml] ----- -$ oc -n {namespace} exec deployment/velero -c velero -- ./velero \ - restore logs <restore> ----- -+ -.Example output -+ -[source,yaml] ----- -time="2021-01-26T20:48:37Z" level=info msg="Attempting to restore migration-example: migration-example" logSource="pkg/restore/restore.go:1107" restore=openshift-migration/ccc7c2d0-6017-11eb-afab-85d0007f5a19-x4lbf -time="2021-01-26T20:48:37Z" level=info msg="error restoring migration-example: the server could not find the requested resource" logSource="pkg/restore/restore.go:1170" restore=openshift-migration/ccc7c2d0-6017-11eb-afab-85d0007f5a19-x4lbf ----- -+ -The `Restore` CR log error message, `the server could not find the requested resource`, indicates the cause of the partially failed migration. diff --git a/modules/migration-prerequisites.adoc b/modules/migration-prerequisites.adoc deleted file mode 100644 index 451cbda7fb50..000000000000 --- a/modules/migration-prerequisites.adoc +++ /dev/null @@ -1,59 +0,0 @@ -// Module included in the following assemblies: -// -// * migrating_from_ocp_3_to_4/migrating-applications-3-4.adoc -// * migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc -// * migration_toolkit_for_containers/migrating-applications-with-mtc -// * migration_toolkit_for_containers/advanced-migration-options-mtc.adoc - -[id="migration-prerequisites_{context}"] -= Migration prerequisites - -* You must be logged in as a user with `cluster-admin` privileges on all clusters. - -.Direct image migration - -* You must ensure that the secure {product-registry} of the source cluster is exposed. -* You must create a route to the exposed registry. - -.Direct volume migration - -* If your clusters use proxies, you must configure an Stunnel TCP proxy. - -ifdef::migrating-applications-3-4,advanced-migration-options-3-4[] -.Internal images - -* If your application uses internal images from the `openshift` namespace, you must ensure that the required versions of the images are present on the target cluster. -+ -You can manually update an image stream tag in order to use a deprecated {product-title} 3 image on an {product-title} {product-version} cluster. -endif::[] - -.Clusters - -* The source cluster must be upgraded to the latest {mtc-short} z-stream release. -* The {mtc-short} version must be the same on all clusters. - -.Network - -* The clusters have unrestricted network access to each other and to the replication repository. -* If you copy the persistent volumes with `move`, the clusters must have unrestricted network access to the remote volumes. -ifdef::migrating-applications-3-4,advanced-migration-options-3-4[] -* You must enable the following ports on an {product-title} 3 cluster: -** `8443` (API server) -** `443` (routes) -** `53` (DNS) -endif::[] -* You must enable the following ports on an {product-title} 4 cluster: -** `6443` (API server) -** `443` (routes) -** `53` (DNS) -* You must enable port `443` on the replication repository if you are using TLS. - -.Persistent volumes (PVs) - -* The PVs must be valid. -* The PVs must be bound to persistent volume claims. -* If you use snapshots to copy the PVs, the following additional prerequisites apply: -** The cloud provider must support snapshots. -** The PVs must have the same cloud provider. -** The PVs must be located in the same geographic region. -** The PVs must have the same storage class. diff --git a/modules/migration-provided-metrics.adoc b/modules/migration-provided-metrics.adoc deleted file mode 100644 index 84c8741f68e2..000000000000 --- a/modules/migration-provided-metrics.adoc +++ /dev/null @@ -1,115 +0,0 @@ -// Module included in the following assemblies: -// -// * migrating_from_ocp_3_to_4/troubleshooting-3-4.adoc -// * migration-toolkit-for-containers/troubleshooting-mtc.adoc - -[id="migration-provided-metrics_{context}"] -= Provided metrics - -The `MigrationController` custom resource (CR) provides metrics for the `MigMigration` CR count and for its API requests. - -[id="cam_app_workload_migrations-metric_{context}"] -== cam_app_workload_migrations - -This metric is a count of `MigMigration` CRs over time. It is useful for viewing alongside the `mtc_client_request_count` and `mtc_client_request_elapsed` metrics to collate API request information with migration status changes. This metric is included in Telemetry. - -.cam_app_workload_migrations metric -[%header,cols="3,3,3"] -|=== -|Queryable label name |Sample label values |Label description - -|status -|`running`, `idle`, `failed`, `completed` -|Status of the `MigMigration` CR - -|type -|stage, final -|Type of the `MigMigration` CR -|=== - -[id="mtc_client_request_count-metric_{context}"] -== mtc_client_request_count - -This metric is a cumulative count of Kubernetes API requests that `MigrationController` issued. It is not included in Telemetry. - -.mtc_client_request_count metric -[%header,cols="3,3,3"] -|=== -|Queryable label name |Sample label values |Label description - -|cluster -|`\https://migcluster-url:443` -|Cluster that the request was issued against - -|component -|`MigPlan`, `MigCluster` -|Sub-controller API that issued request - -|function -|`(*ReconcileMigPlan).Reconcile` -|Function that the request was issued from - -|kind -|`SecretList`, `Deployment` -|Kubernetes kind the request was issued for -|=== - -[id="mtc_client_request_elapsed-metric_{context}"] -== mtc_client_request_elapsed - -This metric is a cumulative latency, in milliseconds, of Kubernetes API requests that `MigrationController` issued. It is not included in Telemetry. - -.mtc_client_request_elapsed metric -[%header,cols="3,3,3"] -|=== -|Queryable label name |Sample label values |Label description - -|cluster -|`\https://cluster-url.com:443` -|Cluster that the request was issued against - -|component -|`migplan`, `migcluster` -|Sub-controller API that issued request - -|function -|`(*ReconcileMigPlan).Reconcile` -|Function that the request was issued from - -|kind -|`SecretList`, `Deployment` -|Kubernetes resource that the request was issued for -|=== - -[id="useful-queries_{context}"] -== Useful queries - -The table lists some helpful queries that can be used for monitoring performance. - -.Useful queries - -[%header,cols="3,3"] -|=== -|Query |Description - -|`mtc_client_request_count` -|Number of API requests issued, sorted by request type - -|`sum(mtc_client_request_count)` -|Total number of API requests issued - -|`mtc_client_request_elapsed` -|API request latency, sorted by request type - -|`sum(mtc_client_request_elapsed)` -|Total latency of API requests - -|`sum(mtc_client_request_elapsed) / sum(mtc_client_request_count)` -|Average latency of API requests - -|`mtc_client_request_elapsed / mtc_client_request_count` -|Average latency of API requests, sorted by request type - -|`cam_app_workload_migrations{status="running"} * 100` -|Count of running migrations, multiplied by 100 for easier viewing alongside request counts -|=== diff --git a/modules/migration-rolling-back-migration-cli.adoc b/modules/migration-rolling-back-migration-cli.adoc deleted file mode 100644 index 57423b077db5..000000000000 --- a/modules/migration-rolling-back-migration-cli.adoc +++ /dev/null @@ -1,55 +0,0 @@ -// Module included in the following assemblies: -// -// * migrating_from_ocp_3_to_4/troubleshooting-3-4.adoc -// * migration_toolkit_for_containers/troubleshooting-mtc - -:_content-type: PROCEDURE -[id="migration-rolling-back-migration-cli_{context}"] -= Rolling back a migration from the command line interface - -You can roll back a migration by creating a `MigMigration` custom resource (CR) from the command line interface. - -[NOTE] -==== -The following resources remain in the migrated namespaces for debugging after a failed direct volume migration (DVM): - -* Config maps (source and destination clusters) -* `Secret` objects (source and destination clusters) -* `Rsync` CRs (source cluster) - -These resources do not affect rollback. You can delete them manually. - -If you later run the same migration plan successfully, the resources from the failed migration are deleted automatically. -==== - -If your application was stopped during a failed migration, you must roll back the migration to prevent data corruption in the persistent volume. - -Rollback is not required if the application was not stopped during migration because the original application is still running on the source cluster. - -.Procedure - -. Create a `MigMigration` CR based on the following example: -+ -[source,yaml] ----- -$ cat << EOF | oc apply -f - -apiVersion: migration.openshift.io/v1alpha1 -kind: MigMigration -metadata: - labels: - controller-tools.k8s.io: "1.0" - name: <migmigration> - namespace: openshift-migration -spec: -... - rollback: true -... - migPlanRef: - name: <migplan> <1> - namespace: openshift-migration -EOF ----- -<1> Specify the name of the associated `MigPlan` CR. - -. In the {mtc-short} web console, verify that the migrated project resources have been removed from the target cluster. -. Verify that the migrated project resources are present in the source cluster and that the application is running. diff --git a/modules/migration-rolling-back-migration-manually.adoc b/modules/migration-rolling-back-migration-manually.adoc deleted file mode 100644 index 899ffaa64fa1..000000000000 --- a/modules/migration-rolling-back-migration-manually.adoc +++ /dev/null @@ -1,59 +0,0 @@ -// Module included in the following assemblies: -// -// * migrating_from_ocp_3_to_4/troubleshooting-3-4.adoc -// * migration_toolkit_for_containers/troubleshooting-mtc - -:_content-type: PROCEDURE -[id="migration-rolling-back-migration-manually_{context}"] -= Rolling back a migration manually - -You can roll back a failed migration manually by deleting the `stage` pods and unquiescing the application. - -If you run the same migration plan successfully, the resources from the failed migration are deleted automatically. - -[NOTE] -==== -The following resources remain in the migrated namespaces after a failed direct volume migration (DVM): - -* Config maps (source and destination clusters) -* `Secret` objects (source and destination clusters) -* `Rsync` CRs (source cluster) - -These resources do not affect rollback. You can delete them manually. -==== - -.Procedure - -. Delete the `stage` pods on all clusters: -+ -[source,terminal] ----- -$ oc delete $(oc get pods -l migration.openshift.io/is-stage-pod -n <namespace>) <1> ----- -<1> Namespaces specified in the `MigPlan` CR. - -. Unquiesce the application on the source cluster by scaling the replicas to their premigration number: -+ -[source,terminal] ----- -$ oc scale deployment <deployment> --replicas=<premigration_replicas> ----- -+ -The `migration.openshift.io/preQuiesceReplicas` annotation in the `Deployment` CR displays the premigration number of replicas: -+ -[source,yaml] ----- -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - annotations: - deployment.kubernetes.io/revision: "1" - migration.openshift.io/preQuiesceReplicas: "1" ----- - -. Verify that the application pods are running on the source cluster: -+ -[source,terminal] ----- -$ oc get pod -n <namespace> ----- diff --git a/modules/migration-rolling-back-migration-web-console.adoc b/modules/migration-rolling-back-migration-web-console.adoc deleted file mode 100644 index 65bd906a8c79..000000000000 --- a/modules/migration-rolling-back-migration-web-console.adoc +++ /dev/null @@ -1,43 +0,0 @@ -// Module included in the following assemblies: -// -// * migrating_from_ocp_3_to_4/troubleshooting-3-4.adoc -// * migration_toolkit_for_containers/troubleshooting-mtc - -:_content-type: PROCEDURE -[id="migration-rolling-back-migration-web-console_{context}"] -= Rolling back a migration by using the {mtc-short} web console - -You can roll back a migration by using the {mtc-full} ({mtc-short}) web console. - -[NOTE] -==== -The following resources remain in the migrated namespaces for debugging after a failed direct volume migration (DVM): - -* Config maps (source and destination clusters) -* `Secret` objects (source and destination clusters) -* `Rsync` CRs (source cluster) - -These resources do not affect rollback. You can delete them manually. - -If you later run the same migration plan successfully, the resources from the failed migration are deleted automatically. -==== - -If your application was stopped during a failed migration, you must roll back the migration to prevent data corruption in the persistent volume. - -Rollback is not required if the application was not stopped during migration because the original application is still running on the source cluster. - -.Procedure - -. In the {mtc-short} web console, click *Migration plans*. -. Click the Options menu {kebab} beside a migration plan and select *Rollback* under *Migration*. -. Click *Rollback* and wait for rollback to complete. -+ -In the migration plan details, *Rollback succeeded* is displayed. - -. Verify that rollback was successful in the {product-title} web console of the source cluster: - -.. Click *Home* -> *Projects*. -.. Click the migrated project to view its status. -.. In the *Routes* section, click *Location* to verify that the application is functioning, if applicable. -.. Click *Workloads* -> *Pods* to verify that the pods are running in the migrated namespace. -.. Click *Storage* -> *Persistent volumes* to verify that the migrated persistent volume is correctly provisioned. diff --git a/modules/migration-rsync-mig-migration-root-non-root.adoc b/modules/migration-rsync-mig-migration-root-non-root.adoc deleted file mode 100644 index ace62407047f..000000000000 --- a/modules/migration-rsync-mig-migration-root-non-root.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * migration_toolkit_for_containers/installing-mtc.adoc -// * migration_toolkit_for_containers/installing-mtc-restricted.adoc -[id="migration-rsync-mig-migration-root-non-root_{context}"] -== Configuring the MigMigration CR as root or non-root per migration - -On the destination cluster, you can configure the `MigMigration` CR to run Rsync as root or non-root, with the following non-root options: - -* As a specific user ID (UID) -* As a specific group ID (GID) - -.Procedure - -. To run Rsync as root, configure the `MigMigration` CR according to this example: -+ -[source,yaml] ----- -apiVersion: migration.openshift.io/v1alpha1 -kind: MigMigration -metadata: - name: migration-controller - namespace: openshift-migration -spec: - [...] - runAsRoot: true ----- - -. To run Rsync as a specific User ID (UID) or as a specific Group ID (GID), configure the `MigMigration` CR according to this example: -+ -[source,yaml] ----- -apiVersion: migration.openshift.io/v1alpha1 -kind: MigMigration -metadata: - name: migration-controller - namespace: openshift-migration -spec: - [...] - runAsUser: 10010001 - runAsGroup: 3 ----- diff --git a/modules/migration-rsync-migration-controller-root-non-root.adoc b/modules/migration-rsync-migration-controller-root-non-root.adoc deleted file mode 100644 index d8a4c937fa24..000000000000 --- a/modules/migration-rsync-migration-controller-root-non-root.adoc +++ /dev/null @@ -1,28 +0,0 @@ -// Module included in the following assemblies: -// -// * migration_toolkit_for_containers/installing-mtc.adoc -// * migration_toolkit_for_containers/installing-mtc-restricted.adoc -[id="migration-rsync-migration-controller-root-non-root_{context}"] -== Configuring the MigrationController CR as root or non-root for all migrations - -By default, Rsync runs as non-root. - -On the destination cluster, you can configure the `MigrationController` CR to run Rsync as root. - -.Procedure - -* Configure the `MigrationController` CR as follows: -+ -[source,yaml] ----- -apiVersion: migration.openshift.io/v1alpha1 -kind: MigrationController -metadata: - name: migration-controller - namespace: openshift-migration -spec: - [...] - migration_rsync_privileged: true ----- -+ -This configuration will apply to all future migrations. diff --git a/modules/migration-running-migration-plan-cam.adoc b/modules/migration-running-migration-plan-cam.adoc deleted file mode 100644 index c52eadf49742..000000000000 --- a/modules/migration-running-migration-plan-cam.adoc +++ /dev/null @@ -1,53 +0,0 @@ -// Module included in the following assemblies: -// -// * migrating_from_ocp_3_to_4/migrating-applications-3-4.adoc -// * migration_toolkit_for_containers/migrating-applications-with-mtc - -:_content-type: PROCEDURE -[id="migration-running-migration-plan-cam_{context}"] -= Running a migration plan in the {mtc-short} web console - -You can migrate applications and data with the migration plan you created in the {mtc-full} ({mtc-short}) web console. - -[NOTE] -==== -During migration, {mtc-short} sets the reclaim policy of migrated persistent volumes (PVs) to `Retain` on the target cluster. - -The `Backup` custom resource contains a `PVOriginalReclaimPolicy` annotation that indicates the original reclaim policy. You can manually restore the reclaim policy of the migrated PVs. -==== - -.Prerequisites - -The {mtc-short} web console must contain the following: - -* Source cluster in a `Ready` state -* Target cluster in a `Ready` state -* Replication repository -* Valid migration plan - -.Procedure - -. Log in to the {mtc-short} web console and click *Migration plans*. -. Click the Options menu {kebab} next to a migration plan and select one of the following options under *Migration*: - -* *Stage* copies data from the source cluster to the target cluster without stopping the application. -* *Cutover* stops the transactions on the source cluster and moves the resources to the target cluster. -+ -Optional: In the *Cutover migration* dialog, you can clear the *Halt transactions on the source cluster during migration* checkbox. - -* *State* copies selected persistent volume claims (PVCs). -+ -[IMPORTANT] -==== -Do not use state migration to migrate a namespace between clusters. Use stage or cutover migration instead. -==== - -** Select one or more PVCs in the *State migration* dialog and click *Migrate*. - -. When the migration is complete, verify that the application migrated successfully in the {product-title} web console: - -.. Click *Home* -> *Projects*. -.. Click the migrated project to view its status. -.. In the *Routes* section, click *Location* to verify that the application is functioning, if applicable. -.. Click *Workloads* -> *Pods* to verify that the pods are running in the migrated namespace. -.. Click *Storage* -> *Persistent volumes* to verify that the migrated persistent volumes are correctly provisioned. diff --git a/modules/migration-setting-up-target-cluster-to-accept-source-dns-domain.adoc b/modules/migration-setting-up-target-cluster-to-accept-source-dns-domain.adoc deleted file mode 100644 index 45ce02067538..000000000000 --- a/modules/migration-setting-up-target-cluster-to-accept-source-dns-domain.adoc +++ /dev/null @@ -1,36 +0,0 @@ -// Module included in the following assemblies: -// -// * migrating_from_ocp_3_to_4/planning-considerations-3-4.adoc -// * migration_toolkit_for_containers/network-considerations-mtc.adoc - -:_content-type: PROCEDURE -[id="migration-setting-up-target-cluster-to-accept-source-dns-domain_{context}"] -= Setting up the target cluster to accept the source DNS domain - -You can set up the target cluster to accept requests for a migrated application in the DNS domain of the source cluster. - -.Procedure - -For both non-secure HTTP access and secure HTTPS access, perform the following steps: - -. Create a route in the target cluster's project that is configured to accept requests addressed to the application's FQDN in the source cluster: -+ -[source,terminal] ----- -$ oc expose svc <app1-svc> --hostname <app1.apps.source.example.com> \ - -n <app1-namespace> ----- -+ -With this new route in place, the server accepts any request for that FQDN and sends it to the corresponding application pods. -In addition, when you migrate the application, another route is created in the target cluster domain. Requests reach the migrated application using either of these hostnames. - -. Create a DNS record with your DNS provider that points the application's FQDN in the source cluster to the IP address of the default load balancer of the target cluster. This will redirect traffic away from your source cluster to your target cluster. -+ -The FQDN of the application resolves to the load balancer of the target cluster. The default Ingress Controller router accept requests for that FQDN because a route for that hostname is exposed. - -For secure HTTPS access, perform the following additional step: - -. Replace the x509 certificate of the default Ingress Controller created during the installation process with a custom certificate. -. Configure this certificate to include the wildcard DNS domains for both the source and target clusters in the `subjectAltName` field. -+ -The new certificate is valid for securing connections made using either DNS domain. diff --git a/modules/migration-state-migration-cli.adoc b/modules/migration-state-migration-cli.adoc deleted file mode 100644 index 24d85daa9dbe..000000000000 --- a/modules/migration-state-migration-cli.adoc +++ /dev/null @@ -1,57 +0,0 @@ -// Module included in the following assemblies: -// -// * migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc -// * migration_toolkit_for_containers/advanced-migration-options-mtc.adoc - -:_content-type: PROCEDURE -[id="migration-state-migration-cli_{context}"] -= State migration - -You can perform repeatable, state-only migrations by using {mtc-full} ({mtc-short}) to migrate persistent volume claims (PVCs) that constitute an application's state. You migrate specified PVCs by excluding other PVCs from the migration plan. You can map the PVCs to ensure that the source and the target PVCs are synchronized. Persistent volume (PV) data is copied to the target cluster. The PV references are not moved, and the application pods continue to run on the source cluster. - -State migration is specifically designed to be used in conjunction with external CD mechanisms, such as OpenShift Gitops. You can migrate application manifests using GitOps while migrating the state using {mtc-short}. - -If you have a CI/CD pipeline, you can migrate stateless components by deploying them on the target cluster. Then you can migrate stateful components by using {mtc-short}. - -You can perform a state migration between clusters or within the same cluster. - -[IMPORTANT] -==== -State migration migrates only the components that constitute an application's state. If you want to migrate an entire namespace, use stage or cutover migration. -==== - -.Prerequisites - -* The state of the application on the source cluster is persisted in `PersistentVolumes` provisioned through `PersistentVolumeClaims`. - -* The manifests of the application are available in a central repository that is accessible from both the source and the target clusters. - - -.Procedure - -. Migrate persistent volume data from the source to the target cluster. -+ -You can perform this step as many times as needed. The source application continues running. - -. Quiesce the source application. -+ -You can do this by setting the replicas of workload resources to `0`, either directly on the source cluster or by updating the manifests in GitHub and re-syncing the Argo CD application. - -. Clone application manifests to the target cluster. -+ -You can use Argo CD to clone the application manifests to the target cluster. - -. Migrate the remaining volume data from the source to the target cluster. -+ -Migrate any new data created by the application during the state migration process by performing a final data migration. - -. If the cloned application is in a quiesced state, unquiesce it. - -. Switch the DNS record to the target cluster to re-direct user traffic to the migrated application. - -[NOTE] -==== -{mtc-short} 1.6 cannot quiesce applications automatically when performing state migration. It can only migrate PV data. Therefore, you must use your CD mechanisms for quiescing or unquiescing applications. - -{mtc-short} 1.7 introduces explicit Stage and Cutover flows. You can use staging to perform initial data transfers as many times as needed. Then you can perform a cutover, in which the source applications are quiesced automatically. -==== diff --git a/modules/migration-terminology.adoc b/modules/migration-terminology.adoc deleted file mode 100644 index e31bd5b6eddf..000000000000 --- a/modules/migration-terminology.adoc +++ /dev/null @@ -1,40 +0,0 @@ -// Module included in the following assemblies: -// -// * migrating_from_ocp_3_to_4/about-mtc-3-4.adoc -// * migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc -// * migration_toolkit_for_containers/about-mtc.adoc -// * migration_toolkit_for_containers/advanced-migration-options-mtc.adoc - -[id="migration-terminology_{context}"] -= Terminology - -[cols="1,3a", options="header"] -.{mtc-short} terminology -|=== -|Term |Definition -|Source cluster |Cluster from which the applications are migrated. -|Destination cluster^[1]^ |Cluster to which the applications are migrated. -|Replication repository |Object storage used for copying images, volumes, and Kubernetes objects during indirect migration or for Kubernetes objects during direct volume migration or direct image migration. - -The replication repository must be accessible to all clusters. - -|Host cluster |Cluster on which the `migration-controller` pod and the web console are running. The host cluster is usually the destination cluster but this is not required. - -The host cluster does not require an exposed registry route for direct image migration. -|Remote cluster |A remote cluster is usually the source cluster but this is not required. - -A remote cluster requires a `Secret` custom resource that contains the `migration-controller` service account token. - -A remote cluster requires an exposed secure registry route for direct image migration. - -|Indirect migration |Images, volumes, and Kubernetes objects are copied from the source cluster to the replication repository and then from the replication repository to the destination cluster. -|Direct volume migration |Persistent volumes are copied directly from the source cluster to the destination cluster. -|Direct image migration |Images are copied directly from the source cluster to the destination cluster. -|Stage migration |Data is copied to the destination cluster without stopping the application. - -Running a stage migration multiple times reduces the duration of the cutover migration. -|Cutover migration |The application is stopped on the source cluster and its resources are migrated to the destination cluster. -|State migration |Application state is migrated by copying specific persistent volume claims to the destination cluster. -|Rollback migration |Rollback migration rolls back a completed migration. -|=== -^1^ Called the _target_ cluster in the {mtc-short} web console. diff --git a/modules/migration-understanding-data-copy-methods.adoc b/modules/migration-understanding-data-copy-methods.adoc deleted file mode 100644 index 4b9504108ccd..000000000000 --- a/modules/migration-understanding-data-copy-methods.adoc +++ /dev/null @@ -1,56 +0,0 @@ -// Module included in the following assemblies: -// -// * migrating_from_ocp_3_to_4/migrating-applications-3-4.adoc -// * migration_toolkit_for_containers/migrating-applications-with-mtc.adoc - -:_content-type: CONCEPT -[id="migration-understanding-data-copy-methods_{context}"] -= About data copy methods - -The {mtc-full} ({mtc-short}) supports the file system and snapshot data copy methods for migrating data from the source cluster to the target cluster. You can select a method that is suited for your environment and is supported by your storage provider. - -[id="file-system-copy-method_{context}"] -== File system copy method - -{mtc-short} copies data files from the source cluster to the replication repository, and from there to the target cluster. - -The file system copy method uses Restic for indirect migration or Rsync for direct volume migration. - -[cols="1,1", options="header"] -.File system copy method summary -|=== -|Benefits |Limitations -a|* Clusters can have different storage classes. -* Supported for all S3 storage providers. -* Optional data verification with checksum. -* Supports direct volume migration, which significantly increases performance. -a|* Slower than the snapshot copy method. -* Optional data verification significantly reduces performance. -|=== - -[NOTE] -==== -The Restic and Rsync PV migration assumes that the PVs supported are only `volumeMode=filesystem`. Using `volumeMode=Block` for file system migration is _not_ -supported. -==== - - -[id="snapshot-copy-method_{context}"] -== Snapshot copy method - -{mtc-short} copies a snapshot of the source cluster data to the replication repository of a cloud provider. The data is restored on the target cluster. - -The snapshot copy method can be used with Amazon Web Services, Google Cloud Provider, and Microsoft Azure. - -[cols="1,1", options="header"] -.Snapshot copy method summary -|=== -|Benefits |Limitations -a|* Faster than the file system copy method. -a|* Cloud provider must support snapshots. -* Clusters must be on the same cloud provider. -* Clusters must be in the same location or region. -* Clusters must have the same storage class. -* Storage class must be compatible with snapshots. -* Does not support direct volume migration. -|=== diff --git a/modules/migration-uninstalling-mtc-clean-up.adoc b/modules/migration-uninstalling-mtc-clean-up.adoc deleted file mode 100644 index 807a880686bf..000000000000 --- a/modules/migration-uninstalling-mtc-clean-up.adoc +++ /dev/null @@ -1,97 +0,0 @@ -// Module included in the following assemblies: -// -// * migrating_from_ocp_3_to_4/troubleshooting-3-4.adoc -// * migration_toolkit_for_containers/installing-mtc.adoc -// * migration_toolkit_for_containers/installing-mtc-restricted.adoc - -:_content-type: PROCEDURE -[id="migration-uninstalling-mtc-clean-up_{context}"] -= Uninstalling {mtc-short} and deleting resources - -You can uninstall the {mtc-full} ({mtc-short}) and delete its resources to clean up the cluster. - -[NOTE] -==== -Deleting the `velero` CRDs removes Velero from the cluster. -==== - -.Prerequisites - -* You must be logged in as a user with `cluster-admin` privileges. - -.Procedure - -. Delete the `MigrationController` custom resource (CR) on all clusters: -+ -[source,terminal] ----- -$ oc delete migrationcontroller <migration_controller> ----- - -. Uninstall the {mtc-full} Operator on {product-title} 4 by using the Operator Lifecycle Manager. -ifdef::troubleshooting-3-4[] -. Uninstall the {mtc-full} Operator on {product-title} 3 by deleting the `operator` CR manifest: -+ -[source,terminal] ----- -$ oc delete -f operator.yml ----- -endif::[] - -. Delete cluster-scoped resources on all clusters by running the following commands: - -* `migration` custom resource definitions (CRDs): -+ -[source,terminal] ----- -$ oc delete $(oc get crds -o name | grep 'migration.openshift.io') ----- - -* `velero` CRDs: -+ -[source,terminal] ----- -$ oc delete $(oc get crds -o name | grep 'velero') ----- - -* `migration` cluster roles: -+ -[source,terminal] ----- -$ oc delete $(oc get clusterroles -o name | grep 'migration.openshift.io') ----- - -* `migration-operator` cluster role: -+ -[source,terminal] ----- -$ oc delete clusterrole migration-operator ----- - -* `velero` cluster roles: -+ -[source,terminal] ----- -$ oc delete $(oc get clusterroles -o name | grep 'velero') ----- - -* `migration` cluster role bindings: -+ -[source,terminal] ----- -$ oc delete $(oc get clusterrolebindings -o name | grep 'migration.openshift.io') ----- - -* `migration-operator` cluster role bindings: -+ -[source,terminal] ----- -$ oc delete clusterrolebindings migration-operator ----- - -* `velero` cluster role bindings: -+ -[source,terminal] ----- -$ oc delete $(oc get clusterrolebindings -o name | grep 'velero') ----- diff --git a/modules/migration-updating-deprecated-internal-images.adoc b/modules/migration-updating-deprecated-internal-images.adoc deleted file mode 100644 index c7743b8be913..000000000000 --- a/modules/migration-updating-deprecated-internal-images.adoc +++ /dev/null @@ -1,79 +0,0 @@ -// Module included in the following assemblies: -// -// * migrating_from_ocp_3_to_4/troubleshooting-3-4.adoc -// * migration_toolkit_for_containers/troubleshooting-mtc.adoc - -:_content-type: PROCEDURE -[id="migration-updating-deprecated-internal-images_{context}"] -= Updating deprecated internal images - -If your application uses images from the `openshift` namespace, the required versions of the images must be present on the target cluster. - -If an {product-title} 3 image is deprecated in {product-title} {product-version}, you can manually update the image stream tag by using `podman`. - -.Prerequisites - -* You must have `podman` installed. -* You must be logged in as a user with `cluster-admin` privileges. -* If you are using insecure registries, add your registry host values to the `[registries.insecure]` section of `/etc/container/registries.conf` to ensure that `podman` does not encounter a TLS verification error. -* The internal registries must be exposed on the source and target clusters. - -.Procedure - -. Ensure that the internal registries are exposed on the {product-title} 3 and 4 clusters. -+ -The {product-registry} is exposed by default on {product-title} 4. - -. If you are using insecure registries, add your registry host values to the `[registries.insecure]` section of `/etc/container/registries.conf` to ensure that `podman` does not encounter a TLS verification error. -. Log in to the {product-title} 3 registry: -+ -[source,terminal] ----- -$ podman login -u $(oc whoami) -p $(oc whoami -t) --tls-verify=false <registry_url>:<port> ----- - -. Log in to the {product-title} 4 registry: -+ -[source,terminal] ----- -$ podman login -u $(oc whoami) -p $(oc whoami -t) --tls-verify=false <registry_url>:<port> ----- - -. Pull the {product-title} 3 image: -+ -[source,terminal] ----- -$ podman pull <registry_url>:<port>/openshift/<image> ----- - -. Tag the {product-title} 3 image for the {product-title} 4 registry: -+ -[source,terminal] ----- -$ podman tag <registry_url>:<port>/openshift/<image> \ <1> - <registry_url>:<port>/openshift/<image> <2> ----- -<1> Specify the registry URL and port for the {product-title} 3 cluster. -<2> Specify the registry URL and port for the {product-title} 4 cluster. - -. Push the image to the {product-title} 4 registry: -+ -[source,terminal] ----- -$ podman push <registry_url>:<port>/openshift/<image> <1> ----- -<1> Specify the {product-title} 4 cluster. - -. Verify that the image has a valid image stream: -+ -[source,terminal] ----- -$ oc get imagestream -n openshift | grep <image> ----- -+ -.Example output -[source,terminal] ----- -NAME IMAGE REPOSITORY TAGS UPDATED -my_image image-registry.openshift-image-registry.svc:5000/openshift/my_image latest 32 seconds ago ----- diff --git a/modules/migration-upgrading-from-mtc-1-3.adoc b/modules/migration-upgrading-from-mtc-1-3.adoc deleted file mode 100644 index 876dd906a959..000000000000 --- a/modules/migration-upgrading-from-mtc-1-3.adoc +++ /dev/null @@ -1,50 +0,0 @@ -// Module included in the following assemblies: -// -// * migrating_from_ocp_3_to_4/upgrading-3-4.adoc -// * migration_toolkit_for_containers/upgrading-mtc.adoc - -:_content-type: PROCEDURE -[id="migration-upgrading-from-mtc-1-3_{context}"] -= Upgrading {mtc-short} 1.3 to {mtc-version} - -If you are upgrading {mtc-full} ({mtc-short}) version 1.3.x to {mtc-version}, you must update the `MigPlan` custom resource (CR) manifest on the cluster on which the `MigrationController` pod is running. - -Because the `indirectImageMigration` and `indirectVolumeMigration` parameters do not exist in {mtc-short} 1.3, their default value in version 1.4 is `false`, which means that direct image migration and direct volume migration are enabled. Because the direct migration requirements are not fulfilled, the migration plan cannot reach a `Ready` state unless these parameter values are changed to `true`. - -.Prerequisites - -* You must be logged in as a user with `cluster-admin` privileges. - -.Procedure - -. Log in to the cluster on which the `MigrationController` pod is running. -. Get the `MigPlan` CR manifest: -+ -[source,terminal] ----- -$ oc get migplan <migplan> -o yaml -n openshift-migration ----- - -. Update the following parameter values and save the file as `migplan.yaml`: -+ -[source,yaml] ----- -... -spec: - indirectImageMigration: true - indirectVolumeMigration: true ----- - -. Replace the `MigPlan` CR manifest to apply the changes: -+ -[source,terminal] ----- -$ oc replace -f migplan.yaml -n openshift-migration ----- - -. Get the updated `MigPlan` CR manifest to verify the changes: -+ -[source,terminal] ----- -$ oc get migplan <migplan> -o yaml -n openshift-migration ----- diff --git a/modules/migration-upgrading-mtc-on-ocp-4.adoc b/modules/migration-upgrading-mtc-on-ocp-4.adoc deleted file mode 100644 index f6b3395cc2a4..000000000000 --- a/modules/migration-upgrading-mtc-on-ocp-4.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// -// * migrating_from_ocp_3_to_4/upgrading-3-4.adoc -// * migration_toolkit_for_containers/upgrading-mtc.adoc - -:_content-type: PROCEDURE -[id="migration-upgrading-mtc-on-ocp-4_{context}"] -= Upgrading the {mtc-full} on {product-title} {product-version} - -You can upgrade the {mtc-full} ({mtc-short}) on {product-title} {product-version} by using the Operator Lifecycle Manager. - -.Prerequisites - -* You must be logged in as a user with `cluster-admin` privileges. - -.Procedure - -. In the {product-title} console, navigate to *Operators* -> *Installed Operators*. -+ -Operators that have a pending upgrade display an *Upgrade available* status. - -. Click *{mtc-full} Operator*. -. Click the *Subscription* tab. Any upgrades requiring approval are displayed next to *Upgrade Status*. For example, it might display *1 requires approval*. -. Click *1 requires approval*, then click *Preview Install Plan*. -. Review the resources that are listed as available for upgrade and click *Approve*. -. Navigate back to the *Operators -> Installed Operators* page to monitor the progress of the upgrade. When complete, the status changes to *Succeeded* and *Up to date*. -. Click *Workloads* -> *Pods* to verify that the {mtc-short} pods are running. diff --git a/modules/migration-upgrading-mtc-with-legacy-operator.adoc b/modules/migration-upgrading-mtc-with-legacy-operator.adoc deleted file mode 100644 index 25f4b7dda2c5..000000000000 --- a/modules/migration-upgrading-mtc-with-legacy-operator.adoc +++ /dev/null @@ -1,106 +0,0 @@ -// Module included in the following assemblies: -// -// * migrating_from_ocp_3_to_4/upgrading-3-4.adoc -// * migration_toolkit_for_containers/upgrading-mtc.adoc - -:_content-type: PROCEDURE -[id="migration-upgrading-mtc-with-legacy-operator_{context}"] -ifdef::upgrading-3-4[] -= Upgrading the {mtc-full} on {product-title} 3 - -You can upgrade {mtc-full} ({mtc-short}) on {product-title} 3 by manually installing the legacy {mtc-full} Operator. -endif::[] -ifdef::upgrading-mtc[] -= Upgrading the {mtc-full} on {product-title} versions 4.2 to 4.5 - -You can upgrade {mtc-full} ({mtc-short}) on {product-title} versions 4.2 to 4.5 by manually installing the legacy {mtc-full} Operator. -endif::[] - -.Prerequisites - -* You must be logged in as a user with `cluster-admin` privileges. -* You must have access to `registry.redhat.io`. -* You must have `podman` installed. - -.Procedure - -. Log in to `registry.redhat.io` with your Red Hat Customer Portal credentials by entering the following command: -+ -[source,terminal] ----- -$ podman login registry.redhat.io ----- - -. Download the `operator.yml` file by entering the following command: -+ -[source,terminal,subs="attributes+"] ----- -$ podman cp $(podman create \ - registry.redhat.io/rhmtc/openshift-migration-legacy-rhel8-operator:v{mtc-version}):/operator.yml ./ ----- - -. Replace the {mtc-full} Operator by entering the following command: -+ -[source,terminal] ----- -$ oc replace --force -f operator.yml ----- - -. Scale the `migration-operator` deployment to `0` to stop the deployment by entering the following command: -+ -[source,terminal] ----- -$ oc scale -n openshift-migration --replicas=0 deployment/migration-operator ----- - -. Scale the `migration-operator` deployment to `1` to start the deployment and apply the changes by entering the following command: -+ -[source,terminal] ----- -$ oc scale -n openshift-migration --replicas=1 deployment/migration-operator ----- - -. Verify that the `migration-operator` was upgraded by entering the following command: -+ -[source,terminal] ----- -$ oc -o yaml -n openshift-migration get deployment/migration-operator | grep image: | awk -F ":" '{ print $NF }' ----- - -. Download the `controller.yml` file by entering the following command: -+ -[source,terminal,subs="attributes+"] ----- -$ podman cp $(podman create \ - registry.redhat.io/rhmtc/openshift-migration-legacy-rhel8-operator:v{mtc-version}):/controller.yml ./ ----- - -. Create the `migration-controller` object by entering the following command: -+ -[source,terminal] ----- -$ oc create -f controller.yml ----- - -ifdef::upgrading-3-4[] -. If you have previously added the {product-title} 3 cluster to the {mtc-short} web console, you must update the service account token in the web console because the upgrade process deletes and restores the `openshift-migration` namespace: - -.. Obtain the service account token by entering the following command: -+ -[source,terminal] ----- -$ oc sa get-token migration-controller -n openshift-migration ----- - -.. In the {mtc-short} web console, click *Clusters*. -.. Click the Options menu {kebab} next to the cluster and select *Edit*. -.. Enter the new service account token in the *Service account token* field. -.. Click *Update cluster* and then click *Close*. -endif::[] - -. Verify that the {mtc-short} pods are running by entering the following command: -+ -[source,terminal] ----- -$ oc get pods -n openshift-migration ----- diff --git a/modules/migration-using-mig-log-reader.adoc b/modules/migration-using-mig-log-reader.adoc deleted file mode 100644 index 28a1fbe4d35e..000000000000 --- a/modules/migration-using-mig-log-reader.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// -// * migrating_from_ocp_3_to_4/troubleshooting-3-4.adoc -// * migration_toolkit_for_containers/troubleshooting-mtc - -:_content-type: PROCEDURE -[id="migration-using-mig-log-reader_{context}"] -= Using the migration log reader - -You can use the migration log reader to display a single filtered view of all the migration logs. - -.Procedure - -. Get the `mig-log-reader` pod: -+ -[source,terminal] ----- -$ oc -n openshift-migration get pods | grep log ----- - -. Enter the following command to display a single migration log: -+ -[source,terminal] ----- -$ oc -n openshift-migration logs -f <mig-log-reader-pod> -c color <1> ----- -<1> The `-c plain` option displays the log without colors. diff --git a/modules/migration-using-mtc-crs-for-troubleshooting.adoc b/modules/migration-using-mtc-crs-for-troubleshooting.adoc deleted file mode 100644 index 3332e271f866..000000000000 --- a/modules/migration-using-mtc-crs-for-troubleshooting.adoc +++ /dev/null @@ -1,208 +0,0 @@ -// Module included in the following assemblies: -// -// * migrating_from_ocp_3_to_4/troubleshooting-3-4.adoc -// * migration_toolkit_for_containers/troubleshooting-mtc.adoc - -:_content-type: PROCEDURE -[id="migration-using-mtc-crs-for-troubleshooting_{context}"] -= Using {mtc-short} custom resources for troubleshooting - -You can check the following {mtc-full} ({mtc-short}) custom resources (CRs) to troubleshoot a failed migration: - -* `MigCluster` -* `MigStorage` -* `MigPlan` -* `BackupStorageLocation` -+ -The `BackupStorageLocation` CR contains a `migrationcontroller` label to identify the {mtc-short} instance that created the CR: -+ -[source,yaml] ----- - labels: - migrationcontroller: ebe13bee-c803-47d0-a9e9-83f380328b93 ----- - -* `VolumeSnapshotLocation` -+ -The `VolumeSnapshotLocation` CR contains a `migrationcontroller` label to identify the {mtc-short} instance that created the CR: -+ -[source,yaml] ----- - labels: - migrationcontroller: ebe13bee-c803-47d0-a9e9-83f380328b93 ----- - -* `MigMigration` -* `Backup` -+ -{mtc-short} changes the reclaim policy of migrated persistent volumes (PVs) to `Retain` on the target cluster. The `Backup` CR contains an `openshift.io/orig-reclaim-policy` annotation that indicates the original reclaim policy. You can manually restore the reclaim policy of the migrated PVs. - -* `Restore` - -.Procedure - -. List the `MigMigration` CRs in the `openshift-migration` namespace: -+ -[source,terminal] ----- -$ oc get migmigration -n openshift-migration ----- -+ -.Example output -[source,terminal] ----- -NAME AGE -88435fe0-c9f8-11e9-85e6-5d593ce65e10 6m42s ----- - -. Inspect the `MigMigration` CR: -+ -[source,terminal] ----- -$ oc describe migmigration 88435fe0-c9f8-11e9-85e6-5d593ce65e10 -n openshift-migration ----- -+ -The output is similar to the following examples. - -.`MigMigration` example output -[source,text] ----- -name: 88435fe0-c9f8-11e9-85e6-5d593ce65e10 -namespace: openshift-migration -labels: <none> -annotations: touch: 3b48b543-b53e-4e44-9d34-33563f0f8147 -apiVersion: migration.openshift.io/v1alpha1 -kind: MigMigration -metadata: - creationTimestamp: 2019-08-29T01:01:29Z - generation: 20 - resourceVersion: 88179 - selfLink: /apis/migration.openshift.io/v1alpha1/namespaces/openshift-migration/migmigrations/88435fe0-c9f8-11e9-85e6-5d593ce65e10 - uid: 8886de4c-c9f8-11e9-95ad-0205fe66cbb6 -spec: - migPlanRef: - name: socks-shop-mig-plan - namespace: openshift-migration - quiescePods: true - stage: false -status: - conditions: - category: Advisory - durable: True - lastTransitionTime: 2019-08-29T01:03:40Z - message: The migration has completed successfully. - reason: Completed - status: True - type: Succeeded - phase: Completed - startTimestamp: 2019-08-29T01:01:29Z -events: <none> ----- - -.`Velero` backup CR #2 example output that describes the PV data -[source,yaml] ----- -apiVersion: velero.io/v1 -kind: Backup -metadata: - annotations: - openshift.io/migrate-copy-phase: final - openshift.io/migrate-quiesce-pods: "true" - openshift.io/migration-registry: 172.30.105.179:5000 - openshift.io/migration-registry-dir: /socks-shop-mig-plan-registry-44dd3bd5-c9f8-11e9-95ad-0205fe66cbb6 - openshift.io/orig-reclaim-policy: delete - creationTimestamp: "2019-08-29T01:03:15Z" - generateName: 88435fe0-c9f8-11e9-85e6-5d593ce65e10- - generation: 1 - labels: - app.kubernetes.io/part-of: migration - migmigration: 8886de4c-c9f8-11e9-95ad-0205fe66cbb6 - migration-stage-backup: 8886de4c-c9f8-11e9-95ad-0205fe66cbb6 - velero.io/storage-location: myrepo-vpzq9 - name: 88435fe0-c9f8-11e9-85e6-5d593ce65e10-59gb7 - namespace: openshift-migration - resourceVersion: "87313" - selfLink: /apis/velero.io/v1/namespaces/openshift-migration/backups/88435fe0-c9f8-11e9-85e6-5d593ce65e10-59gb7 - uid: c80dbbc0-c9f8-11e9-95ad-0205fe66cbb6 -spec: - excludedNamespaces: [] - excludedResources: [] - hooks: - resources: [] - includeClusterResources: null - includedNamespaces: - - sock-shop - includedResources: - - persistentvolumes - - persistentvolumeclaims - - namespaces - - imagestreams - - imagestreamtags - - secrets - - configmaps - - pods - labelSelector: - matchLabels: - migration-included-stage-backup: 8886de4c-c9f8-11e9-95ad-0205fe66cbb6 - storageLocation: myrepo-vpzq9 - ttl: 720h0m0s - volumeSnapshotLocations: - - myrepo-wv6fx -status: - completionTimestamp: "2019-08-29T01:02:36Z" - errors: 0 - expiration: "2019-09-28T01:02:35Z" - phase: Completed - startTimestamp: "2019-08-29T01:02:35Z" - validationErrors: null - version: 1 - volumeSnapshotsAttempted: 0 - volumeSnapshotsCompleted: 0 - warnings: 0 ----- - -.`Velero` restore CR #2 example output that describes the Kubernetes resources - -[source,yaml] ----- -apiVersion: velero.io/v1 -kind: Restore -metadata: - annotations: - openshift.io/migrate-copy-phase: final - openshift.io/migrate-quiesce-pods: "true" - openshift.io/migration-registry: 172.30.90.187:5000 - openshift.io/migration-registry-dir: /socks-shop-mig-plan-registry-36f54ca7-c925-11e9-825a-06fa9fb68c88 - creationTimestamp: "2019-08-28T00:09:49Z" - generateName: e13a1b60-c927-11e9-9555-d129df7f3b96- - generation: 3 - labels: - app.kubernetes.io/part-of: migration - migmigration: e18252c9-c927-11e9-825a-06fa9fb68c88 - migration-final-restore: e18252c9-c927-11e9-825a-06fa9fb68c88 - name: e13a1b60-c927-11e9-9555-d129df7f3b96-gb8nx - namespace: openshift-migration - resourceVersion: "82329" - selfLink: /apis/velero.io/v1/namespaces/openshift-migration/restores/e13a1b60-c927-11e9-9555-d129df7f3b96-gb8nx - uid: 26983ec0-c928-11e9-825a-06fa9fb68c88 -spec: - backupName: e13a1b60-c927-11e9-9555-d129df7f3b96-sz24f - excludedNamespaces: null - excludedResources: - - nodes - - events - - events.events.k8s.io - - backups.velero.io - - restores.velero.io - - resticrepositories.velero.io - includedNamespaces: null - includedResources: null - namespaceMapping: null - restorePVs: true -status: - errors: 0 - failureReason: "" - phase: Completed - validationErrors: null - warnings: 15 ----- diff --git a/modules/migration-using-must-gather.adoc b/modules/migration-using-must-gather.adoc deleted file mode 100644 index 77422e3feb36..000000000000 --- a/modules/migration-using-must-gather.adoc +++ /dev/null @@ -1,121 +0,0 @@ -// Module included in the following assemblies: -// -// * migrating_from_ocp_3_to_4/troubleshooting-3-4.adoc -// * migration_toolkit_for_containers/troubleshooting-mtc.adoc -// * backup_and_restore/application_backup_and_restore/troubleshooting.adoc - -:_content-type: PROCEDURE -[id="migration-using-must-gather_{context}"] -= Using the must-gather tool - -You can collect logs, metrics, and information about {local-product} custom resources by using the `must-gather` tool. - -The `must-gather` data must be attached to all customer cases. - -ifdef::troubleshooting-3-4,troubleshooting-mtc[] -You can collect data for a one-hour or a 24-hour period and view the data with the Prometheus console. -endif::[] -ifdef::oadp-troubleshooting[] -You can run the `must-gather` tool with the following data collection options: - -* Full `must-gather` data collection collects Prometheus metrics, pod logs, and Velero CR information for all namespaces where the OADP Operator is installed. -* Essential `must-gather` data collection collects pod logs and Velero CR information for a specific duration of time, for example, one hour or 24 hours. Prometheus metrics and duplicate logs are not included. -* `must-gather` data collection with timeout. Data collection can take a long time if there are many failed `Backup` CRs. You can improve performance by setting a timeout value. -* Prometheus metrics data dump downloads an archive file containing the metrics data collected by Prometheus. -endif::[] - -.Prerequisites - -* You must be logged in to the {product-title} cluster as a user with the `cluster-admin` role. -* You must have the OpenShift CLI (`oc`) installed. - -.Procedure - -. Navigate to the directory where you want to store the `must-gather` data. -. Run the `oc adm must-gather` command for one of the following data collection options: - -ifdef::troubleshooting-3-4,troubleshooting-mtc[] -* To collect data for the past hour: -endif::[] -ifdef::oadp-troubleshooting[] -* Full `must-gather` data collection, including Prometheus metrics: -endif::[] -+ -[source,terminal,subs="attributes+"] ----- -$ oc adm must-gather --image={must-gather} ----- -+ -The data is saved as `must-gather/must-gather.tar.gz`. You can upload this file to a support case on the link:https://access.redhat.com/[Red Hat Customer Portal]. - -ifdef::oadp-troubleshooting[] -* Essential `must-gather` data collection, without Prometheus metrics, for a specific time duration: -+ -[source,terminal,subs="attributes+"] ----- -$ oc adm must-gather --image={must-gather} \ - -- /usr/bin/gather_<time>_essential <1> ----- -<1> Specify the time in hours. Allowed values are `1h`, `6h`, `24h`, `72h`, or `all`, for example, `gather_1h_essential` or `gather_all_essential`. - -* `must-gather` data collection with timeout: -+ -[source,terminal,subs="attributes+"] ----- -$ oc adm must-gather --image={must-gather} \ - -- /usr/bin/gather_with_timeout <timeout> <1> ----- -<1> Specify a timeout value in seconds. -endif::[] -ifdef::troubleshooting-3-4,troubleshooting-mtc[] -* To collect data for the past 24 hours: -endif::[] -ifdef::oadp-troubleshooting[] -* Prometheus metrics data dump: -endif::[] -+ -[source,terminal,subs="attributes+"] ----- -$ oc adm must-gather --image={must-gather} \ - -- /usr/bin/gather_metrics_dump ----- -+ -This operation can take a long time. The data is saved as `must-gather/metrics/prom_data.tar.gz`. - -[discrete] -[id="viewing-data-with-prometheus-console_{context}"] -== Viewing metrics data with the Prometheus console - -You can view the metrics data with the Prometheus console. - -.Procedure - -. Decompress the `prom_data.tar.gz` file: -+ -[source,terminal] ----- -$ tar -xvzf must-gather/metrics/prom_data.tar.gz ----- - -. Create a local Prometheus instance: -+ -[source,terminal] ----- -$ make prometheus-run ----- -+ -The command outputs the Prometheus URL. -+ -.Output -[source,terminal] ----- -Started Prometheus on http://localhost:9090 ----- - -. Launch a web browser and navigate to the URL to view the data by using the Prometheus web console. -. After you have viewed the data, delete the Prometheus instance and data: -+ -[source,terminal] ----- -$ make prometheus-cleanup ----- diff --git a/modules/migration-viewing-migration-plan-log.adoc b/modules/migration-viewing-migration-plan-log.adoc deleted file mode 100644 index ac0189105ffc..000000000000 --- a/modules/migration-viewing-migration-plan-log.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// Module included in the following assemblies: -// -// * migrating_from_ocp_3_to_4/troubleshooting-3-4.adoc -// * migration_toolkit_for_containers/troubleshooting-mtc - -:_content-type: PROCEDURE -[id="migration-viewing-migration-plan-log_{context}"] -= Viewing a migration plan log - -You can view an aggregated log for a migration plan. You use the {mtc-short} web console to copy a command to your clipboard and then run the command from the command line interface (CLI). - -The command displays the filtered logs of the following pods: - -* `Migration Controller` -* `Velero` -* `Restic` -* `Rsync` -* `Stunnel` -* `Registry` - -.Procedure - -. In the {mtc-short} web console, click *Migration Plans*. -. Click the *Migrations* number next to a migration plan. -. Click *View logs*. -. Click the Copy icon to copy the `oc logs` command to your clipboard. -. Log in to the relevant cluster and enter the command on the CLI. -+ -The aggregated log for the migration plan is displayed. diff --git a/modules/migration-viewing-migration-plan-resources.adoc b/modules/migration-viewing-migration-plan-resources.adoc deleted file mode 100644 index 099b72944d0a..000000000000 --- a/modules/migration-viewing-migration-plan-resources.adoc +++ /dev/null @@ -1,40 +0,0 @@ -// Module included in the following assemblies: -// -// * migrating_from_ocp_3_to_4/troubleshooting-3-4.adoc -// * migration_toolkit_for_containers/troubleshooting-mtc.adoc - -:_content-type: PROCEDURE -[id="migration-viewing-migration-plan-resources_{context}"] -= Viewing migration plan resources - -You can view migration plan resources to monitor a running migration or to troubleshoot a failed migration by using the {mtc-short} web console and the command line interface (CLI). - -.Procedure - -. In the {mtc-short} web console, click *Migration Plans*. -. Click the *Migrations* number next to a migration plan to view the *Migrations* page. -. Click a migration to view the *Migration details*. -. Expand *Migration resources* to view the migration resources and their status in a tree view. -+ -[NOTE] -==== -To troubleshoot a failed migration, start with a high-level resource that has failed and then work down the resource tree towards the lower-level resources. -==== - -. Click the Options menu {kebab} next to a resource and select one of the following options: - -* *Copy `oc describe` command* copies the command to your clipboard. - -** Log in to the relevant cluster and then run the command. -+ -The conditions and events of the resource are displayed in YAML format. - -* *Copy `oc logs` command* copies the command to your clipboard. - -** Log in to the relevant cluster and then run the command. -+ -If the resource supports log filtering, a filtered log is displayed. - -* *View JSON* displays the resource data in JSON format in a web browser. -+ -The data is the same as the output for the `oc get <resource>` command. diff --git a/modules/migration-writing-ansible-playbook-hook.adoc b/modules/migration-writing-ansible-playbook-hook.adoc deleted file mode 100644 index f2238d0a8446..000000000000 --- a/modules/migration-writing-ansible-playbook-hook.adoc +++ /dev/null @@ -1,87 +0,0 @@ -// Module included in the following assemblies: -// -// * migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc -// * migration_toolkit_for_containers/advanced-migration-options-mtc.adoc - -[id="migration-writing-ansible-playbook-hook_{context}"] -= Writing an Ansible playbook for a migration hook - -You can write an Ansible playbook to use as a migration hook. The hook is added to a migration plan by using the {mtc-short} web console or by specifying values for the `spec.hooks` parameters in the `MigPlan` custom resource (CR) manifest. - -The Ansible playbook is mounted onto a hook container as a config map. The hook container runs as a job, using the cluster, service account, and namespace specified in the `MigPlan` CR. The hook container uses a specified service account token so that the tasks do not require authentication before they run in the cluster. - -[id="migration-writing-ansible-playbook-hook-ansible-modules_{context}"] -== Ansible modules - -You can use the Ansible `shell` module to run `oc` commands. - -.Example `shell` module -[source,yaml] ----- -- hosts: localhost - gather_facts: false - tasks: - - name: get pod name - shell: oc get po --all-namespaces ----- - -You can use `kubernetes.core` modules, such as `k8s_info`, to interact with Kubernetes resources. - -.Example `k8s_facts` module -[source,yaml] ----- -- hosts: localhost - gather_facts: false - tasks: - - name: Get pod - k8s_info: - kind: pods - api: v1 - namespace: openshift-migration - name: "{{ lookup( 'env', 'HOSTNAME') }}" - register: pods - - - name: Print pod name - debug: - msg: "{{ pods.resources[0].metadata.name }}" ----- - -You can use the `fail` module to produce a non-zero exit status in cases where a non-zero exit status would not normally be produced, ensuring that the success or failure of a hook is detected. Hooks run as jobs and the success or failure status of a hook is based on the exit status of the job container. - -.Example `fail` module -[source,yaml] ----- -- hosts: localhost - gather_facts: false - tasks: - - name: Set a boolean - set_fact: - do_fail: true - - - name: "fail" - fail: - msg: "Cause a failure" - when: do_fail ----- - -[id="migration-writing-ansible-playbook-hook-environment-variables_{context}"] -== Environment variables - -The `MigPlan` CR name and migration namespaces are passed as environment variables to the hook container. These variables are accessed by using the `lookup` plugin. - -.Example environment variables -[source,yaml] ----- -- hosts: localhost - gather_facts: false - tasks: - - set_fact: - namespaces: "{{ (lookup( 'env', 'MIGRATION_NAMESPACES')).split(',') }}" - - - debug: - msg: "{{ item }}" - with_items: "{{ namespaces }}" - - - debug: - msg: "{{ lookup( 'env', 'MIGRATION_PLAN_NAME') }}" ----- diff --git a/modules/minimum-ibm-power-system-requirements.adoc b/modules/minimum-ibm-power-system-requirements.adoc deleted file mode 100644 index 397e5b2cb32f..000000000000 --- a/modules/minimum-ibm-power-system-requirements.adoc +++ /dev/null @@ -1,52 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_ibm_power/installing-ibm-power.adoc -// * installing/installing_ibm_power/installing-restricted-networks-ibm-power.adoc - -:_content-type: CONCEPT -[id="minimum-ibm-power-system-requirements_{context}"] -= Minimum IBM Power requirements - -You can install {product-title} version {product-version} on the following IBM hardware: - -* IBM Power9 or Power10 processor-based systems - -[NOTE] -==== -Support for {op-system} functionality for all IBM Power8 models, IBM Power AC922, IBM Power IC922, and IBM Power LC922 is deprecated in {product-title} {product-version}. Red Hat recommends that you use later hardware models. -==== - -[discrete] -== Hardware requirements - -* Six IBM Power bare metal servers or six LPARs across multiple PowerVM servers - -[discrete] -== Operating system requirements - -* One instance of an IBM Power9 or Power10 processor-based system - -On your IBM Power instance, set up: - -* Three guest virtual machines for {product-title} control plane machines -* Two guest virtual machines for {product-title} compute machines -* One guest virtual machine for the temporary {product-title} bootstrap machine - -[discrete] -== Disk storage for the IBM Power guest virtual machines - -* Local storage, or storage provisioned by the Virtual I/O Server using vSCSI, NPIV (N-Port ID Virtualization) or SSP (shared storage pools) - -[discrete] -== Network for the PowerVM guest virtual machines - -* Dedicated physical adapter, or SR-IOV virtual function -* Available by the Virtual I/O Server using Shared Ethernet Adapter -* Virtualized by the Virtual I/O Server using IBM vNIC - -[discrete] -== Storage / main memory - -* 100 GB / 16 GB for {product-title} control plane machines -* 100 GB / 8 GB for {product-title} compute machines -* 100 GB / 16 GB for the temporary {product-title} bootstrap machine diff --git a/modules/minimum-ibm-z-system-requirements.adoc b/modules/minimum-ibm-z-system-requirements.adoc deleted file mode 100644 index 5a2f0744ef49..000000000000 --- a/modules/minimum-ibm-z-system-requirements.adoc +++ /dev/null @@ -1,61 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_ibm_z/installing-ibm-z.adoc -// * installing/installing_ibm_z/installing-restricted-networks-ibm-z.adoc - -:_content-type: CONCEPT -[id="minimum-ibm-z-system-requirements_{context}"] -= Minimum {ibmzProductName} system environment - -You can install {product-title} version {product-version} on the following IBM hardware: - -* IBM z16 (all models), IBM z15 (all models), IBM z14 (all models) -* {linuxoneProductName} 4 (all models), {linuxoneProductName} III (all models), {linuxoneProductName} Emperor II, {linuxoneProductName} Rockhopper II - -[discrete] -== Hardware requirements - -* The equivalent of six Integrated Facilities for Linux (IFL), which are SMT2 enabled, for each cluster. -* At least one network connection to both connect to the `LoadBalancer` service and to serve data for traffic outside the cluster. - -[NOTE] -==== -You can use dedicated or shared IFLs to assign sufficient compute resources. Resource sharing is one of the key strengths of {ibmzProductName}. However, you must adjust capacity correctly on each hypervisor layer and ensure sufficient resources for every {product-title} cluster. -==== - -[IMPORTANT] -==== -Since the overall performance of the cluster can be impacted, the LPARs that are used to set up the {product-title} clusters must provide sufficient compute capacity. In this context, LPAR weight management, entitlements, and CPU shares on the hypervisor level play an important role. -==== - -[discrete] -== Operating system requirements - -* One instance of z/VM 7.1 or later - -On your z/VM instance, set up: - -* Three guest virtual machines for {product-title} control plane machines -* Two guest virtual machines for {product-title} compute machines -* One guest virtual machine for the temporary {product-title} bootstrap machine - -[discrete] -== {ibmzProductName} network connectivity requirements - -To install on {ibmzProductName} under z/VM, you require a single z/VM virtual NIC in layer 2 mode. You also need: - -* A direct-attached OSA or RoCE network adapter -* A z/VM VSwitch set up. For a preferred setup, use OSA link aggregation. - -[discrete] -=== Disk storage for the z/VM guest virtual machines - -* FICON attached disk storage (DASDs). These can be z/VM minidisks, fullpack minidisks, or dedicated DASDs, all of which must be formatted as CDL, which is the default. To reach the minimum required DASD size for {op-system-first} installations, you need extended address volumes (EAV). If available, use HyperPAV to ensure optimal performance. -* FCP attached disk storage - -[discrete] -=== Storage / Main Memory - -* 16 GB for {product-title} control plane machines -* 8 GB for {product-title} compute machines -* 16 GB for the temporary {product-title} bootstrap machine diff --git a/modules/minimum-required-permissions-ipi-azure.adoc b/modules/minimum-required-permissions-ipi-azure.adoc deleted file mode 100644 index abb45058008e..000000000000 --- a/modules/minimum-required-permissions-ipi-azure.adoc +++ /dev/null @@ -1,280 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_azure/installing-azure-account.adoc - -[id="minimum-required-permissions-ipi-azure_{context}"] -= Required Azure permissions for installer-provisioned infrastructure - -When you assign `Contributor` and `User Access Administrator` roles to the service principal, you automatically grant all the required permissions. - -If your organization's security policies require a more restrictive set of permissions, you can create a link:https://learn.microsoft.com/en-us/azure/role-based-access-control/custom-roles[custom role] with the necessary permissions. The following permissions are required for creating an {product-title} cluster on Microsoft Azure. - -.Required permissions for creating authorization resources -[%collapsible] -==== -* `Microsoft.Authorization/policies/audit/action` -* `Microsoft.Authorization/policies/auditIfNotExists/action` -* `Microsoft.Authorization/roleAssignments/read` -* `Microsoft.Authorization/roleAssignments/write` -==== - -.Required permissions for creating compute resources -[%collapsible] -==== -* `Microsoft.Compute/availabilitySets/write` -* `Microsoft.Compute/availabilitySets/read` -* `Microsoft.Compute/disks/beginGetAccess/action` -* `Microsoft.Compute/disks/delete` -* `Microsoft.Compute/disks/read` -* `Microsoft.Compute/disks/write` -* `Microsoft.Compute/galleries/images/read` -* `Microsoft.Compute/galleries/images/versions/read` -* `Microsoft.Compute/galleries/images/versions/write` -* `Microsoft.Compute/galleries/images/write` -* `Microsoft.Compute/galleries/read` -* `Microsoft.Compute/galleries/write` -* `Microsoft.Compute/snapshots/read` -* `Microsoft.Compute/snapshots/write` -* `Microsoft.Compute/snapshots/delete` -* `Microsoft.Compute/virtualMachines/delete` -* `Microsoft.Compute/virtualMachines/powerOff/action` -* `Microsoft.Compute/virtualMachines/read` -* `Microsoft.Compute/virtualMachines/write` -==== - -.Required permissions for creating identity management resources -[%collapsible] -==== -* `Microsoft.ManagedIdentity/userAssignedIdentities/assign/action` -* `Microsoft.ManagedIdentity/userAssignedIdentities/read` -* `Microsoft.ManagedIdentity/userAssignedIdentities/write` -==== - -.Required permissions for creating network resources -[%collapsible] -==== -* `Microsoft.Network/dnsZones/A/write` -* `Microsoft.Network/dnsZones/CNAME/write` -* `Microsoft.Network/dnszones/CNAME/read` -* `Microsoft.Network/dnszones/read` -* `Microsoft.Network/loadBalancers/backendAddressPools/join/action` -* `Microsoft.Network/loadBalancers/backendAddressPools/read` -* `Microsoft.Network/loadBalancers/backendAddressPools/write` -* `Microsoft.Network/loadBalancers/read` -* `Microsoft.Network/loadBalancers/write` -* `Microsoft.Network/networkInterfaces/delete` -* `Microsoft.Network/networkInterfaces/join/action` -* `Microsoft.Network/networkInterfaces/read` -* `Microsoft.Network/networkInterfaces/write` -* `Microsoft.Network/networkSecurityGroups/join/action` -* `Microsoft.Network/networkSecurityGroups/read` -* `Microsoft.Network/networkSecurityGroups/securityRules/delete` -* `Microsoft.Network/networkSecurityGroups/securityRules/read` -* `Microsoft.Network/networkSecurityGroups/securityRules/write` -* `Microsoft.Network/networkSecurityGroups/write` -* `Microsoft.Network/privateDnsZones/A/read` -* `Microsoft.Network/privateDnsZones/A/write` -* `Microsoft.Network/privateDnsZones/A/delete` -* `Microsoft.Network/privateDnsZones/SOA/read` -* `Microsoft.Network/privateDnsZones/read` -* `Microsoft.Network/privateDnsZones/virtualNetworkLinks/read` -* `Microsoft.Network/privateDnsZones/virtualNetworkLinks/write` -* `Microsoft.Network/privateDnsZones/write` -* `Microsoft.Network/publicIPAddresses/delete` -* `Microsoft.Network/publicIPAddresses/join/action` -* `Microsoft.Network/publicIPAddresses/read` -* `Microsoft.Network/publicIPAddresses/write` -* `Microsoft.Network/virtualNetworks/join/action` -* `Microsoft.Network/virtualNetworks/read` -* `Microsoft.Network/virtualNetworks/subnets/join/action` -* `Microsoft.Network/virtualNetworks/subnets/read` -* `Microsoft.Network/virtualNetworks/subnets/write` -* `Microsoft.Network/virtualNetworks/write` -==== -[NOTE] -==== -The following permissions are not required to create the private {product-title} cluster on Azure. - -* `Microsoft.Network/dnsZones/A/write` -* `Microsoft.Network/dnsZones/CNAME/write` -* `Microsoft.Network/dnszones/CNAME/read` -* `Microsoft.Network/dnszones/read` -==== - -.Required permissions for checking the health of resources -[%collapsible] -==== -* `Microsoft.Resourcehealth/healthevent/Activated/action` -* `Microsoft.Resourcehealth/healthevent/InProgress/action` -* `Microsoft.Resourcehealth/healthevent/Pending/action` -* `Microsoft.Resourcehealth/healthevent/Resolved/action` -* `Microsoft.Resourcehealth/healthevent/Updated/action` -==== - -.Required permissions for creating a resource group -[%collapsible] -==== -* `Microsoft.Resources/subscriptions/resourceGroups/read` -* `Microsoft.Resources/subscriptions/resourcegroups/write` -==== - -.Required permissions for creating resource tags -[%collapsible] -==== -* `Microsoft.Resources/tags/write` -==== - -.Required permissions for creating storage resources -[%collapsible] -==== -* `Microsoft.Storage/storageAccounts/blobServices/read` -* `Microsoft.Storage/storageAccounts/blobServices/containers/write` -* `Microsoft.Storage/storageAccounts/fileServices/read` -* `Microsoft.Storage/storageAccounts/fileServices/shares/read` -* `Microsoft.Storage/storageAccounts/fileServices/shares/write` -* `Microsoft.Storage/storageAccounts/fileServices/shares/delete` -* `Microsoft.Storage/storageAccounts/listKeys/action` -* `Microsoft.Storage/storageAccounts/read` -* `Microsoft.Storage/storageAccounts/write` -==== - -.Optional permissions for creating marketplace virtual machine resources -[%collapsible] -==== -* `Microsoft.MarketplaceOrdering/offertypes/publishers/offers/plans/agreements/read` -* `Microsoft.MarketplaceOrdering/offertypes/publishers/offers/plans/agreements/write` -==== - -.Optional permissions for creating compute resources -[%collapsible] -==== -* `Microsoft.Compute/images/read` -* `Microsoft.Compute/images/write` -* `Microsoft.Compute/images/delete` -==== - -.Optional permissions for enabling user-managed encryption -[%collapsible] -==== -* `Microsoft.Compute/diskEncryptionSets/read` -* `Microsoft.Compute/diskEncryptionSets/write` -* `Microsoft.Compute/diskEncryptionSets/delete` -* `Microsoft.KeyVault/vaults/read` -* `Microsoft.KeyVault/vaults/write` -* `Microsoft.KeyVault/vaults/delete` -* `Microsoft.KeyVault/vaults/deploy/action` -* `Microsoft.KeyVault/vaults/keys/read` -* `Microsoft.KeyVault/vaults/keys/write` -* `Microsoft.Features/providers/features/register/action` -==== - -.Optional permissions for installing a private cluster with Azure Network Address Translation (NAT) -[%collapsible] -==== -* `Microsoft.Network/natGateways/join/action` -* `Microsoft.Network/natGateways/read` -* `Microsoft.Network/natGateways/write` -==== - -.Optional permissions for installing a private cluster with Azure firewall -[%collapsible] -==== -* `Microsoft.Network/azureFirewalls/applicationRuleCollections/write` -* `Microsoft.Network/azureFirewalls/read` -* `Microsoft.Network/azureFirewalls/write` -* `Microsoft.Network/routeTables/join/action` -* `Microsoft.Network/routeTables/read` -* `Microsoft.Network/routeTables/routes/read` -* `Microsoft.Network/routeTables/routes/write` -* `Microsoft.Network/routeTables/write` -* `Microsoft.Network/virtualNetworks/peer/action` -* `Microsoft.Network/virtualNetworks/virtualNetworkPeerings/read` -* `Microsoft.Network/virtualNetworks/virtualNetworkPeerings/write` -==== - -.Optional permission for running gather bootstrap -[%collapsible] -==== -* `Microsoft.Compute/virtualMachines/instanceView/read` -==== - -The following permissions are required for deleting an {product-title} cluster on Microsoft Azure. You can use the same permissions to delete a private {product-title} cluster on Azure. - -.Required permissions for deleting authorization resources -[%collapsible] -==== -* `Microsoft.Authorization/roleAssignments/delete` -==== - -.Required permissions for deleting compute resources -[%collapsible] -==== -* `Microsoft.Compute/disks/delete` -* `Microsoft.Compute/galleries/delete` -* `Microsoft.Compute/galleries/images/delete` -* `Microsoft.Compute/galleries/images/versions/delete` -* `Microsoft.Compute/virtualMachines/delete` -==== - -.Required permissions for deleting identity management resources -[%collapsible] -==== -* `Microsoft.ManagedIdentity/userAssignedIdentities/delete` -==== - -.Required permissions for deleting network resources -[%collapsible] -==== -* `Microsoft.Network/dnszones/read` -* `Microsoft.Network/dnsZones/A/read` -* `Microsoft.Network/dnsZones/A/delete` -* `Microsoft.Network/dnsZones/CNAME/read` -* `Microsoft.Network/dnsZones/CNAME/delete` -* `Microsoft.Network/loadBalancers/delete` -* `Microsoft.Network/networkInterfaces/delete` -* `Microsoft.Network/networkSecurityGroups/delete` -* `Microsoft.Network/privateDnsZones/read` -* `Microsoft.Network/privateDnsZones/A/read` -* `Microsoft.Network/privateDnsZones/delete` -* `Microsoft.Network/privateDnsZones/virtualNetworkLinks/delete` -* `Microsoft.Network/publicIPAddresses/delete` -* `Microsoft.Network/virtualNetworks/delete` -==== -[NOTE] -==== -The following permissions are not required to delete a private {product-title} cluster on Azure. - -* `Microsoft.Network/dnszones/read` -* `Microsoft.Network/dnsZones/A/read` -* `Microsoft.Network/dnsZones/A/delete` -* `Microsoft.Network/dnsZones/CNAME/read` -* `Microsoft.Network/dnsZones/CNAME/delete` -==== - -.Required permissions for checking the health of resources -[%collapsible] -==== -* `Microsoft.Resourcehealth/healthevent/Activated/action` -* `Microsoft.Resourcehealth/healthevent/Resolved/action` -* `Microsoft.Resourcehealth/healthevent/Updated/action` -==== - -.Required permissions for deleting a resource group -[%collapsible] -==== -* `Microsoft.Resources/subscriptions/resourcegroups/delete` -==== - -.Required permissions for deleting storage resources -[%collapsible] -==== -* `Microsoft.Storage/storageAccounts/delete` -* `Microsoft.Storage/storageAccounts/listKeys/action` -==== - -[NOTE] -==== -To install {product-title} on Azure, you must scope the permissions to your subscription. Later, you can re-scope these permissions to the installer created resource group. If the public DNS zone is present in a different resource group, then the network DNS zone related permissions must always be applied to your subscription. By default, the {product-title} installation program assigns the Azure identity the `Contributor` role. - -You can scope all the permissions to your subscription when deleting an {product-title} cluster. -==== \ No newline at end of file diff --git a/modules/minimum-required-permissions-ipi-gcp-xpn.adoc b/modules/minimum-required-permissions-ipi-gcp-xpn.adoc deleted file mode 100644 index d8a1a1030d70..000000000000 --- a/modules/minimum-required-permissions-ipi-gcp-xpn.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_gcp/installing-gcp-account.adoc - -[id="minimum-required-permissions-ipi-gcp-xpn"] -= Required GCP permissions for shared VPC installations - -When you are installing a cluster to a link:https://cloud.google.com/vpc/docs/shared-vpc[shared VPC], you must configure the service account for both the host project and the service project. If you are not installing to a shared VPC, you can skip this section. - -You must apply the minimum roles required for a standard installation as listed above, to the service project. Note that custom roles, and therefore fine-grained permissions, cannot be used in shared VPC installations because GCP does not support adding the required permission `compute.organizations.administerXpn` to custom roles. - -In addition, the host project must apply one of the following configurations to the service account: - -.Required permissions for creating firewalls in the host project -[%collapsible] -==== -* `projects/<host-project>/roles/dns.networks.bindPrivateDNSZone` -* `roles/compute.networkAdmin` -* `roles/compute.securityAdmin` -==== - -.Required minimal permissions -[%collapsible] -==== -* `projects/<host-project>/roles/dns.networks.bindPrivateDNSZone` -* `roles/compute.networkUser` -==== diff --git a/modules/minimum-required-permissions-ipi-gcp.adoc b/modules/minimum-required-permissions-ipi-gcp.adoc deleted file mode 100644 index 95b55c8ecdc1..000000000000 --- a/modules/minimum-required-permissions-ipi-gcp.adoc +++ /dev/null @@ -1,266 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_gcp/installing-gcp-account.adoc - -[id="minimum-required-permissions-ipi-gcp_{context}"] -= Required GCP permissions for installer-provisioned infrastructure - -When you attach the `Owner` role to the service account that you create, you grant that service account all permissions, including those that are required to install {product-title}. - -If your organization’s security policies require a more restrictive set of permissions, you can create link:https://cloud.google.com/iam/docs/creating-custom-roles[custom roles] with the necessary permissions. The following permissions are required for the installer-provisioned infrastructure for creating and deleting the {product-title} cluster. - -.Required permissions for creating network resources -[%collapsible] -==== -* `compute.addresses.create` -* `compute.addresses.createInternal` -* `compute.addresses.delete` -* `compute.addresses.get` -* `compute.addresses.list` -* `compute.addresses.use` -* `compute.addresses.useInternal` -* `compute.firewalls.create` -* `compute.firewalls.delete` -* `compute.firewalls.get` -* `compute.firewalls.list` -* `compute.forwardingRules.create` -* `compute.forwardingRules.get` -* `compute.forwardingRules.list` -* `compute.forwardingRules.setLabels` -* `compute.networks.create` -* `compute.networks.get` -* `compute.networks.list` -* `compute.networks.updatePolicy` -* `compute.routers.create` -* `compute.routers.get` -* `compute.routers.list` -* `compute.routers.update` -* `compute.routes.list` -* `compute.subnetworks.create` -* `compute.subnetworks.get` -* `compute.subnetworks.list` -* `compute.subnetworks.use` -* `compute.subnetworks.useExternalIp` -==== - -.Required permissions for creating load balancer resources -[%collapsible] -==== -* `compute.regionBackendServices.create` -* `compute.regionBackendServices.get` -* `compute.regionBackendServices.list` -* `compute.regionBackendServices.update` -* `compute.regionBackendServices.use` -* `compute.targetPools.addInstance` -* `compute.targetPools.create` -* `compute.targetPools.get` -* `compute.targetPools.list` -* `compute.targetPools.removeInstance` -* `compute.targetPools.use` -==== - -.Required permissions for creating DNS resources -[%collapsible] -==== -* `dns.changes.create` -* `dns.changes.get` -* `dns.managedZones.create` -* `dns.managedZones.get` -* `dns.managedZones.list` -* `dns.networks.bindPrivateDNSZone` -* `dns.resourceRecordSets.create` -* `dns.resourceRecordSets.list` -==== - -.Required permissions for creating Service Account resources -[%collapsible] -==== -* `iam.serviceAccountKeys.create` -* `iam.serviceAccountKeys.delete` -* `iam.serviceAccountKeys.get` -* `iam.serviceAccountKeys.list` -* `iam.serviceAccounts.actAs` -* `iam.serviceAccounts.create` -* `iam.serviceAccounts.delete` -* `iam.serviceAccounts.get` -* `iam.serviceAccounts.list` -* `resourcemanager.projects.get` -* `resourcemanager.projects.getIamPolicy` -* `resourcemanager.projects.setIamPolicy` -==== - -.Required permissions for creating compute resources -[%collapsible] -==== -* `compute.disks.create` -* `compute.disks.get` -* `compute.disks.list` -* `compute.instanceGroups.create` -* `compute.instanceGroups.delete` -* `compute.instanceGroups.get` -* `compute.instanceGroups.list` -* `compute.instanceGroups.update` -* `compute.instanceGroups.use` -* `compute.instances.create` -* `compute.instances.delete` -* `compute.instances.get` -* `compute.instances.list` -* `compute.instances.setLabels` -* `compute.instances.setMetadata` -* `compute.instances.setServiceAccount` -* `compute.instances.setTags` -* `compute.instances.use` -* `compute.machineTypes.get` -* `compute.machineTypes.list` -==== - -.Required for creating storage resources -[%collapsible] -==== -* `storage.buckets.create` -* `storage.buckets.delete` -* `storage.buckets.get` -* `storage.buckets.list` -* `storage.objects.create` -* `storage.objects.delete` -* `storage.objects.get` -* `storage.objects.list` -==== - -.Required permissions for creating health check resources -[%collapsible] -==== -* `compute.healthChecks.create` -* `compute.healthChecks.get` -* `compute.healthChecks.list` -* `compute.healthChecks.useReadOnly` -* `compute.httpHealthChecks.create` -* `compute.httpHealthChecks.get` -* `compute.httpHealthChecks.list` -* `compute.httpHealthChecks.useReadOnly` -==== - -.Required permissions to get GCP zone and region related information -[%collapsible] -==== -* `compute.globalOperations.get` -* `compute.regionOperations.get` -* `compute.regions.list` -* `compute.zoneOperations.get` -* `compute.zones.get` -* `compute.zones.list` -==== - -.Required permissions for checking services and quotas -[%collapsible] -==== -* `monitoring.timeSeries.list` -* `serviceusage.quotas.get` -* `serviceusage.services.list` -==== - -.Required IAM permissions for installation -[%collapsible] -==== -* `iam.roles.get` -==== - -.Optional Images permissions for installation -[%collapsible] -==== -* `compute.images.list` -==== - -.Optional permission for running gather bootstrap -[%collapsible] -==== -* `compute.instances.getSerialPortOutput` -==== - -.Required permissions for deleting network resources -[%collapsible] -==== -* `compute.addresses.delete` -* `compute.addresses.deleteInternal` -* `compute.addresses.list` -* `compute.firewalls.delete` -* `compute.firewalls.list` -* `compute.forwardingRules.delete` -* `compute.forwardingRules.list` -* `compute.networks.delete` -* `compute.networks.list` -* `compute.networks.updatePolicy` -* `compute.routers.delete` -* `compute.routers.list` -* `compute.routes.list` -* `compute.subnetworks.delete` -* `compute.subnetworks.list` -==== - -.Required permissions for deleting load balancer resources -[%collapsible] -==== -* `compute.regionBackendServices.delete` -* `compute.regionBackendServices.list` -* `compute.targetPools.delete` -* `compute.targetPools.list` -==== - -.Required permissions for deleting DNS resources -[%collapsible] -==== -* `dns.changes.create` -* `dns.managedZones.delete` -* `dns.managedZones.get` -* `dns.managedZones.list` -* `dns.resourceRecordSets.delete` -* `dns.resourceRecordSets.list` -==== - -.Required permissions for deleting Service Account resources -[%collapsible] -==== -* `iam.serviceAccounts.delete` -* `iam.serviceAccounts.get` -* `iam.serviceAccounts.list` -* `resourcemanager.projects.getIamPolicy` -* `resourcemanager.projects.setIamPolicy` -==== - -.Required permissions for deleting compute resources -[%collapsible] -==== -* `compute.disks.delete` -* `compute.disks.list` -* `compute.instanceGroups.delete` -* `compute.instanceGroups.list` -* `compute.instances.delete` -* `compute.instances.list` -* `compute.instances.stop` -* `compute.machineTypes.list` -==== - -.Required for deleting storage resources -[%collapsible] -==== -* `storage.buckets.delete` -* `storage.buckets.getIamPolicy` -* `storage.buckets.list` -* `storage.objects.delete` -* `storage.objects.list` -==== - -.Required permissions for deleting health check resources -[%collapsible] -==== -* `compute.healthChecks.delete` -* `compute.healthChecks.list` -* `compute.httpHealthChecks.delete` -* `compute.httpHealthChecks.list` -==== - -.Required Images permissions for deletion -[%collapsible] -==== -* `compute.images.list` -==== \ No newline at end of file diff --git a/modules/minimum-required-permissions-upi-azure.adoc b/modules/minimum-required-permissions-upi-azure.adoc deleted file mode 100644 index cf53674d87de..000000000000 --- a/modules/minimum-required-permissions-upi-azure.adoc +++ /dev/null @@ -1,242 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_azure/installing-azure-user-infra.adoc - -[id="minimum-required-permissions-upi-azure_{context}"] -= Required Azure permissions for user-provisioned infrastructure - -When you assign `Contributor` and `User Access Administrator` roles to the service principal, you automatically grant all the required permissions. - -If your organization's security policies require a more restrictive set of permissions, you can create a link:https://learn.microsoft.com/en-us/azure/role-based-access-control/custom-roles[custom role] with the necessary permissions. The following permissions are required for creating an {product-title} cluster on Microsoft Azure. - -.Required permissions for creating authorization resources -[%collapsible] -==== -* `Microsoft.Authorization/policies/audit/action` -* `Microsoft.Authorization/policies/auditIfNotExists/action` -* `Microsoft.Authorization/roleAssignments/read` -* `Microsoft.Authorization/roleAssignments/write` -==== - -.Required permissions for creating compute resources -[%collapsible] -==== -* `Microsoft.Compute/images/read` -* `Microsoft.Compute/images/write` -* `Microsoft.Compute/images/delete` -* `Microsoft.Compute/availabilitySets/read` -* `Microsoft.Compute/disks/beginGetAccess/action` -* `Microsoft.Compute/disks/delete` -* `Microsoft.Compute/disks/read` -* `Microsoft.Compute/disks/write` -* `Microsoft.Compute/galleries/images/read` -* `Microsoft.Compute/galleries/images/versions/read` -* `Microsoft.Compute/galleries/images/versions/write` -* `Microsoft.Compute/galleries/images/write` -* `Microsoft.Compute/galleries/read` -* `Microsoft.Compute/galleries/write` -* `Microsoft.Compute/snapshots/read` -* `Microsoft.Compute/snapshots/write` -* `Microsoft.Compute/snapshots/delete` -* `Microsoft.Compute/virtualMachines/delete` -* `Microsoft.Compute/virtualMachines/powerOff/action` -* `Microsoft.Compute/virtualMachines/read` -* `Microsoft.Compute/virtualMachines/write` -* `Microsoft.Compute/virtualMachines/deallocate/action` -==== - -.Required permissions for creating identity management resources -[%collapsible] -==== -* `Microsoft.ManagedIdentity/userAssignedIdentities/assign/action` -* `Microsoft.ManagedIdentity/userAssignedIdentities/read` -* `Microsoft.ManagedIdentity/userAssignedIdentities/write` -==== - -.Required permissions for creating network resources -[%collapsible] -==== -* `Microsoft.Network/dnsZones/A/write` -* `Microsoft.Network/dnsZones/CNAME/write` -* `Microsoft.Network/dnszones/CNAME/read` -* `Microsoft.Network/dnszones/read` -* `Microsoft.Network/loadBalancers/backendAddressPools/join/action` -* `Microsoft.Network/loadBalancers/backendAddressPools/read` -* `Microsoft.Network/loadBalancers/backendAddressPools/write` -* `Microsoft.Network/loadBalancers/read` -* `Microsoft.Network/loadBalancers/write` -* `Microsoft.Network/networkInterfaces/delete` -* `Microsoft.Network/networkInterfaces/join/action` -* `Microsoft.Network/networkInterfaces/read` -* `Microsoft.Network/networkInterfaces/write` -* `Microsoft.Network/networkSecurityGroups/join/action` -* `Microsoft.Network/networkSecurityGroups/read` -* `Microsoft.Network/networkSecurityGroups/securityRules/delete` -* `Microsoft.Network/networkSecurityGroups/securityRules/read` -* `Microsoft.Network/networkSecurityGroups/securityRules/write` -* `Microsoft.Network/networkSecurityGroups/write` -* `Microsoft.Network/privateDnsZones/A/read` -* `Microsoft.Network/privateDnsZones/A/write` -* `Microsoft.Network/privateDnsZones/A/delete` -* `Microsoft.Network/privateDnsZones/SOA/read` -* `Microsoft.Network/privateDnsZones/read` -* `Microsoft.Network/privateDnsZones/virtualNetworkLinks/read` -* `Microsoft.Network/privateDnsZones/virtualNetworkLinks/write` -* `Microsoft.Network/privateDnsZones/write` -* `Microsoft.Network/publicIPAddresses/delete` -* `Microsoft.Network/publicIPAddresses/join/action` -* `Microsoft.Network/publicIPAddresses/read` -* `Microsoft.Network/publicIPAddresses/write` -* `Microsoft.Network/virtualNetworks/join/action` -* `Microsoft.Network/virtualNetworks/read` -* `Microsoft.Network/virtualNetworks/subnets/join/action` -* `Microsoft.Network/virtualNetworks/subnets/read` -* `Microsoft.Network/virtualNetworks/subnets/write` -* `Microsoft.Network/virtualNetworks/write` -==== - -.Required permissions for checking the health of resources -[%collapsible] -==== -* `Microsoft.Resourcehealth/healthevent/Activated/action` -* `Microsoft.Resourcehealth/healthevent/InProgress/action` -* `Microsoft.Resourcehealth/healthevent/Pending/action` -* `Microsoft.Resourcehealth/healthevent/Resolved/action` -* `Microsoft.Resourcehealth/healthevent/Updated/action` -==== - -.Required permissions for creating a resource group -[%collapsible] -==== -* `Microsoft.Resources/subscriptions/resourceGroups/read` -* `Microsoft.Resources/subscriptions/resourcegroups/write` -==== - -.Required permissions for creating resource tags -[%collapsible] -==== -* `Microsoft.Resources/tags/write` -==== - -.Required permissions for creating storage resources -[%collapsible] -==== -* `Microsoft.Storage/storageAccounts/blobServices/read` -* `Microsoft.Storage/storageAccounts/blobServices/containers/write` -* `Microsoft.Storage/storageAccounts/fileServices/read` -* `Microsoft.Storage/storageAccounts/fileServices/shares/read` -* `Microsoft.Storage/storageAccounts/fileServices/shares/write` -* `Microsoft.Storage/storageAccounts/fileServices/shares/delete` -* `Microsoft.Storage/storageAccounts/listKeys/action` -* `Microsoft.Storage/storageAccounts/read` -* `Microsoft.Storage/storageAccounts/write` -==== - -.Required permissions for creating deployments -[%collapsible] -==== -* `Microsoft.Resources/deployments/read` -* `Microsoft.Resources/deployments/write` -* `Microsoft.Resources/deployments/validate/action` -* `Microsoft.Resources/deployments/operationstatuses/read` -==== - -.Optional permissions for creating compute resources -[%collapsible] -==== -* `Microsoft.Compute/availabilitySets/write` -==== - -.Optional permissions for creating marketplace virtual machine resources -[%collapsible] -==== -* `Microsoft.MarketplaceOrdering/offertypes/publishers/offers/plans/agreements/read` -* `Microsoft.MarketplaceOrdering/offertypes/publishers/offers/plans/agreements/write` -==== - -.Optional permissions for enabling user-managed encryption -[%collapsible] -==== -* `Microsoft.Compute/diskEncryptionSets/read` -* `Microsoft.Compute/diskEncryptionSets/write` -* `Microsoft.Compute/diskEncryptionSets/delete` -* `Microsoft.KeyVault/vaults/read` -* `Microsoft.KeyVault/vaults/write` -* `Microsoft.KeyVault/vaults/delete` -* `Microsoft.KeyVault/vaults/deploy/action` -* `Microsoft.KeyVault/vaults/keys/read` -* `Microsoft.KeyVault/vaults/keys/write` -* `Microsoft.Features/providers/features/register/action` -==== - -The following permissions are required for deleting an {product-title} cluster on Microsoft Azure. - -.Required permissions for deleting authorization resources -[%collapsible] -==== -* `Microsoft.Authorization/roleAssignments/delete` -==== - -.Required permissions for deleting compute resources -[%collapsible] -==== -* `Microsoft.Compute/disks/delete` -* `Microsoft.Compute/galleries/delete` -* `Microsoft.Compute/galleries/images/delete` -* `Microsoft.Compute/galleries/images/versions/delete` -* `Microsoft.Compute/virtualMachines/delete` -* `Microsoft.Compute/images/delete` -==== - -.Required permissions for deleting identity management resources -[%collapsible] -==== -* `Microsoft.ManagedIdentity/userAssignedIdentities/delete` -==== - -.Required permissions for deleting network resources -[%collapsible] -==== -* `Microsoft.Network/dnszones/read` -* `Microsoft.Network/dnsZones/A/read` -* `Microsoft.Network/dnsZones/A/delete` -* `Microsoft.Network/dnsZones/CNAME/read` -* `Microsoft.Network/dnsZones/CNAME/delete` -* `Microsoft.Network/loadBalancers/delete` -* `Microsoft.Network/networkInterfaces/delete` -* `Microsoft.Network/networkSecurityGroups/delete` -* `Microsoft.Network/privateDnsZones/read` -* `Microsoft.Network/privateDnsZones/A/read` -* `Microsoft.Network/privateDnsZones/delete` -* `Microsoft.Network/privateDnsZones/virtualNetworkLinks/delete` -* `Microsoft.Network/publicIPAddresses/delete` -* `Microsoft.Network/virtualNetworks/delete` -==== - -.Required permissions for checking the health of resources -[%collapsible] -==== -* `Microsoft.Resourcehealth/healthevent/Activated/action` -* `Microsoft.Resourcehealth/healthevent/Resolved/action` -* `Microsoft.Resourcehealth/healthevent/Updated/action` -==== - -.Required permissions for deleting a resource group -[%collapsible] -==== -* `Microsoft.Resources/subscriptions/resourcegroups/delete` -==== - -.Required permissions for deleting storage resources -[%collapsible] -==== -* `Microsoft.Storage/storageAccounts/delete` -* `Microsoft.Storage/storageAccounts/listKeys/action` -==== - -[NOTE] -==== -To install {product-title} on Azure, you must scope the permissions related to resource group creation to your subscription. After the resource group is created, you can scope the rest of the permissions to the created resource group. If the public DNS zone is present in a different resource group, then the network DNS zone related permissions must always be applied to your subscription. - -You can scope all the permissions to your subscription when deleting an {product-title} cluster. -==== \ No newline at end of file diff --git a/modules/minimum-required-permissions-upi-gcp.adoc b/modules/minimum-required-permissions-upi-gcp.adoc deleted file mode 100644 index d282a5c20ab5..000000000000 --- a/modules/minimum-required-permissions-upi-gcp.adoc +++ /dev/null @@ -1,291 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_gcp/installing-gcp-user-infra.adoc -// * installing/installing_gcp/installing-restricted-networks-gcp.adoc -// * installing/installing_gcp/installing-gcp-user-infra-vpc.adoc - -[id="minimum-required-permissions-upi-gcp_{context}"] -= Required GCP permissions for user-provisioned infrastructure - -When you attach the `Owner` role to the service account that you create, you grant that service account all permissions, including those that are required to install {product-title}. - -If your organization’s security policies require a more restrictive set of permissions, you can create link:https://cloud.google.com/iam/docs/creating-custom-roles[custom roles] with the necessary permissions. The following permissions are required for the user-provisioned infrastructure for creating and deleting the {product-title} cluster. - -.Required permissions for creating network resources -[%collapsible] -==== -* `compute.addresses.create` -* `compute.addresses.createInternal` -* `compute.addresses.delete` -* `compute.addresses.get` -* `compute.addresses.list` -* `compute.addresses.use` -* `compute.addresses.useInternal` -* `compute.firewalls.create` -* `compute.firewalls.delete` -* `compute.firewalls.get` -* `compute.firewalls.list` -* `compute.forwardingRules.create` -* `compute.forwardingRules.get` -* `compute.forwardingRules.list` -* `compute.forwardingRules.setLabels` -* `compute.networks.create` -* `compute.networks.get` -* `compute.networks.list` -* `compute.networks.updatePolicy` -* `compute.routers.create` -* `compute.routers.get` -* `compute.routers.list` -* `compute.routers.update` -* `compute.routes.list` -* `compute.subnetworks.create` -* `compute.subnetworks.get` -* `compute.subnetworks.list` -* `compute.subnetworks.use` -* `compute.subnetworks.useExternalIp` -==== - -.Required permissions for creating load balancer resources -[%collapsible] -==== -* `compute.regionBackendServices.create` -* `compute.regionBackendServices.get` -* `compute.regionBackendServices.list` -* `compute.regionBackendServices.update` -* `compute.regionBackendServices.use` -* `compute.targetPools.addInstance` -* `compute.targetPools.create` -* `compute.targetPools.get` -* `compute.targetPools.list` -* `compute.targetPools.removeInstance` -* `compute.targetPools.use` -==== - -.Required permissions for creating DNS resources -[%collapsible] -==== -* `dns.changes.create` -* `dns.changes.get` -* `dns.managedZones.create` -* `dns.managedZones.get` -* `dns.managedZones.list` -* `dns.networks.bindPrivateDNSZone` -* `dns.resourceRecordSets.create` -* `dns.resourceRecordSets.list` -* `dns.resourceRecordSets.update` -==== - -.Required permissions for creating Service Account resources -[%collapsible] -==== -* `iam.serviceAccountKeys.create` -* `iam.serviceAccountKeys.delete` -* `iam.serviceAccountKeys.get` -* `iam.serviceAccountKeys.list` -* `iam.serviceAccounts.actAs` -* `iam.serviceAccounts.create` -* `iam.serviceAccounts.delete` -* `iam.serviceAccounts.get` -* `iam.serviceAccounts.list` -* `resourcemanager.projects.get` -* `resourcemanager.projects.getIamPolicy` -* `resourcemanager.projects.setIamPolicy` -==== - -.Required permissions for creating compute resources -[%collapsible] -==== -* `compute.disks.create` -* `compute.disks.get` -* `compute.disks.list` -* `compute.instanceGroups.create` -* `compute.instanceGroups.delete` -* `compute.instanceGroups.get` -* `compute.instanceGroups.list` -* `compute.instanceGroups.update` -* `compute.instanceGroups.use` -* `compute.instances.create` -* `compute.instances.delete` -* `compute.instances.get` -* `compute.instances.list` -* `compute.instances.setLabels` -* `compute.instances.setMetadata` -* `compute.instances.setServiceAccount` -* `compute.instances.setTags` -* `compute.instances.use` -* `compute.machineTypes.get` -* `compute.machineTypes.list` -==== - -.Required for creating storage resources -[%collapsible] -==== -* `storage.buckets.create` -* `storage.buckets.delete` -* `storage.buckets.get` -* `storage.buckets.list` -* `storage.objects.create` -* `storage.objects.delete` -* `storage.objects.get` -* `storage.objects.list` -==== - -.Required permissions for creating health check resources -[%collapsible] -==== -* `compute.healthChecks.create` -* `compute.healthChecks.get` -* `compute.healthChecks.list` -* `compute.healthChecks.useReadOnly` -* `compute.httpHealthChecks.create` -* `compute.httpHealthChecks.get` -* `compute.httpHealthChecks.list` -* `compute.httpHealthChecks.useReadOnly` -==== - -.Required permissions to get GCP zone and region related information -[%collapsible] -==== -* `compute.globalOperations.get` -* `compute.regionOperations.get` -* `compute.regions.list` -* `compute.zoneOperations.get` -* `compute.zones.get` -* `compute.zones.list` -==== - -.Required permissions for checking services and quotas -[%collapsible] -==== -* `monitoring.timeSeries.list` -* `serviceusage.quotas.get` -* `serviceusage.services.list` -==== - -.Required IAM permissions for installation -[%collapsible] -==== -* `iam.roles.get` -==== - -.Required Images permissions for installation -[%collapsible] -==== -* `compute.images.create` -* `compute.images.delete` -* `compute.images.get` -* `compute.images.list` -==== - -.Optional permission for running gather bootstrap -[%collapsible] -==== -* `compute.instances.getSerialPortOutput` -==== - -.Required permissions for deleting network resources -[%collapsible] -==== -* `compute.addresses.delete` -* `compute.addresses.deleteInternal` -* `compute.addresses.list` -* `compute.firewalls.delete` -* `compute.firewalls.list` -* `compute.forwardingRules.delete` -* `compute.forwardingRules.list` -* `compute.networks.delete` -* `compute.networks.list` -* `compute.networks.updatePolicy` -* `compute.routers.delete` -* `compute.routers.list` -* `compute.routes.list` -* `compute.subnetworks.delete` -* `compute.subnetworks.list` -==== - -.Required permissions for deleting load balancer resources -[%collapsible] -==== -* `compute.regionBackendServices.delete` -* `compute.regionBackendServices.list` -* `compute.targetPools.delete` -* `compute.targetPools.list` -==== - -.Required permissions for deleting DNS resources -[%collapsible] -==== -* `dns.changes.create` -* `dns.managedZones.delete` -* `dns.managedZones.get` -* `dns.managedZones.list` -* `dns.resourceRecordSets.delete` -* `dns.resourceRecordSets.list` -==== - -.Required permissions for deleting Service Account resources -[%collapsible] -==== -* `iam.serviceAccounts.delete` -* `iam.serviceAccounts.get` -* `iam.serviceAccounts.list` -* `resourcemanager.projects.getIamPolicy` -* `resourcemanager.projects.setIamPolicy` -==== - -.Required permissions for deleting compute resources -[%collapsible] -==== -* `compute.disks.delete` -* `compute.disks.list` -* `compute.instanceGroups.delete` -* `compute.instanceGroups.list` -* `compute.instances.delete` -* `compute.instances.list` -* `compute.instances.stop` -* `compute.machineTypes.list` -==== - -.Required for deleting storage resources -[%collapsible] -==== -* `storage.buckets.delete` -* `storage.buckets.getIamPolicy` -* `storage.buckets.list` -* `storage.objects.delete` -* `storage.objects.list` -==== - -.Required permissions for deleting health check resources -[%collapsible] -==== -* `compute.healthChecks.delete` -* `compute.healthChecks.list` -* `compute.httpHealthChecks.delete` -* `compute.httpHealthChecks.list` -==== - -.Required Images permissions for deletion -[%collapsible] -==== -* `compute.images.delete` -* `compute.images.list` -==== - -.Required permissions to get Region related information -[%collapsible] -==== -* `compute.regions.get` -==== - -.Required Deployment Manager permissions -[%collapsible] -==== -* `deploymentmanager.deployments.create` -* `deploymentmanager.deployments.delete` -* `deploymentmanager.deployments.get` -* `deploymentmanager.deployments.list` -* `deploymentmanager.manifests.get` -* `deploymentmanager.operations.get` -* `deploymentmanager.resources.list` -==== \ No newline at end of file diff --git a/modules/mint-mode-with-removal-of-admin-credential.adoc b/modules/mint-mode-with-removal-of-admin-credential.adoc deleted file mode 100644 index 5eb9739f4b1b..000000000000 --- a/modules/mint-mode-with-removal-of-admin-credential.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/manually-creating-iam.adoc - -[id="mint-mode-with-removal-or-rotation-of-admin-credential_{context}"] -= Mint mode with removal or rotation of the administrator-level credential - -Currently, this mode is only supported on AWS and GCP. - -In this mode, a user installs {product-title} with an administrator-level credential just like the normal mint mode. However, this process removes the administrator-level credential secret from the cluster post-installation. - -The administrator can have the Cloud Credential Operator make its own request for a read-only credential that allows it to verify if all `CredentialsRequest` objects have their required permissions, thus the administrator-level credential is not required unless something needs to be changed. After the associated credential is removed, it can be deleted or deactivated on the underlying cloud, if desired. - -[NOTE] -==== -Prior to a non z-stream upgrade, you must reinstate the credential secret with the administrator-level credential. If the credential is not present, the upgrade might be blocked. -==== - -The administrator-level credential is not stored in the cluster permanently. - -Following these steps still requires the administrator-level credential in the cluster for brief periods of time. It also requires manually re-instating the secret with administrator-level credentials for each upgrade. diff --git a/modules/mint-mode.adoc b/modules/mint-mode.adoc deleted file mode 100644 index 336407749062..000000000000 --- a/modules/mint-mode.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/manually-creating-iam.adoc -// * installing/installing_gcp/manually-creating-iam-gcp.adoc - -:_content-type: CONCEPT -[id="mint-mode_{context}"] -= Mint mode - -Mint mode is the default Cloud Credential Operator (CCO) credentials mode for {product-title} on platforms that support it. In this mode, the CCO uses the provided administrator-level cloud credential to run the cluster. Mint mode is supported for AWS and GCP. - -In mint mode, the `admin` credential is stored in the `kube-system` namespace and then used by the CCO to process the `CredentialsRequest` objects in the cluster and create users for each with specific permissions. - -The benefits of mint mode include: - -* Each cluster component has only the permissions it requires -* Automatic, on-going reconciliation for cloud credentials, including additional credentials or permissions that might be required for upgrades - -One drawback is that mint mode requires `admin` credential storage in a cluster `kube-system` secret. diff --git a/modules/mirror-registry-flags.adoc b/modules/mirror-registry-flags.adoc deleted file mode 100644 index 21550b310bfc..000000000000 --- a/modules/mirror-registry-flags.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/disconnected_install/installing-mirroring-installation-images.adoc - -[id="mirror-registry-flags_{context}"] -= Mirror registry for Red Hat OpenShift flags - -The following flags are available for the _mirror registry for Red Hat OpenShift_: - -[options="header",cols="1,3"] -|=== -| Flags | Description -| `--autoApprove` | A boolean value that disables interactive prompts. If set to `true`, the `quayRoot` directory is automatically deleted when uninstalling the mirror registry. Defaults to `false` if left unspecified. -| `--initPassword` | The password of the init user created during Quay installation. Must be at least eight characters and contain no whitespace. -|`--initUser string` | Shows the username of the initial user. Defaults to `init` if left unspecified. -| `--no-color`, `-c` | Allows users to disable color sequences and propagate that to Ansible when running install, uninstall, and upgrade commands. -| `--pgStorage` | The folder where Postgres persistent storage data is saved. Defaults to the `pg-storage` Podman volume. Root privileges are required to uninstall. -| `--quayHostname` | The fully-qualified domain name of the mirror registry that clients will use to contact the registry. Equivalent to `SERVER_HOSTNAME` in the Quay `config.yaml`. Must resolve by DNS. Defaults to `<targetHostname>:8443` if left unspecified. ^[1]^ -| `--quayStorage` | The folder where Quay persistent storage data is saved. Defaults to the `quay-storage` Podman volume. Root privileges are required to uninstall. -| `--quayRoot`, `-r` | The directory where container image layer and configuration data is saved, including `rootCA.key`, `rootCA.pem`, and `rootCA.srl` certificates. Defaults to `$HOME/quay-install` if left unspecified. -| `--ssh-key`, `-k` | The path of your SSH identity key. Defaults to `~/.ssh/quay_installer` if left unspecified. -| `--sslCert` | The path to the SSL/TLS public key / certificate. Defaults to `{quayRoot}/quay-config` and is auto-generated if left unspecified. -| `--sslCheckSkip` | Skips the check for the certificate hostname against the `SERVER_HOSTNAME` in the `config.yaml` file. ^[2]^ -| `--sslKey` | The path to the SSL/TLS private key used for HTTPS communication. Defaults to `{quayRoot}/quay-config` and is auto-generated if left unspecified. -| `--targetHostname`, `-H` | The hostname of the target you want to install Quay to. Defaults to `$HOST`, for example, a local host, if left unspecified. -| `--targetUsername`, `-u` | The user on the target host which will be used for SSH. Defaults to `$USER`, for example, the current user if left unspecified. -| `--verbose`, `-v` | Shows debug logs and Ansible playbook outputs. -| `--version` | Shows the version for the _mirror registry for Red Hat OpenShift_. -|=== -[.small] -1. `--quayHostname` must be modified if the public DNS name of your system is different from the local hostname. Additionally, the `--quayHostname` flag does not support installation with an IP address. Installation with a hostname is required. -2. `--sslCheckSkip` is used in cases when the mirror registry is set behind a proxy and the exposed hostname is different from the internal Quay hostname. It can also be used when users do not want the certificates to be validated against the provided Quay hostname during installation. diff --git a/modules/mirror-registry-introduction.adoc b/modules/mirror-registry-introduction.adoc deleted file mode 100644 index fadcda05ea4d..000000000000 --- a/modules/mirror-registry-introduction.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/disconnected_install/installing-mirroring-installation-images.adoc - -[id="mirror-registry-introduction_{context}"] -= Mirror registry for Red Hat OpenShift introduction - -For disconnected deployments of {product-title}, a container registry is required to carry out the installation of the clusters. To run a production-grade registry service on such a cluster, you must create a separate registry deployment to install the first cluster. The _mirror registry for Red Hat OpenShift_ addresses this need and is included in every OpenShift subscription. It is available for download on the link:https://console.redhat.com/openshift/downloads#tool-mirror-registry[OpenShift console *Downloads*] page. - -The _mirror registry for Red Hat OpenShift_ allows users to install a small-scale version of Red Hat Quay and its required components using the `mirror-registry` command line interface (CLI) tool. The _mirror registry for Red Hat OpenShift_ is deployed automatically with pre-configured local storage and a local database. It also includes auto-generated user credentials and access permissions with a single set of inputs and no additional configuration choices to get started. - -The _mirror registry for Red Hat OpenShift_ provides a pre-determined network configuration and reports deployed component credentials and access URLs upon success. A limited set of optional configuration inputs like fully qualified domain name (FQDN) services, superuser name and password, and custom TLS certificates are also provided. This provides users with a container registry so that they can easily create an offline mirror of all {product-title} release content when running {product-title} in restricted network environments. - -The _mirror registry for Red Hat OpenShift_ is limited to hosting images that are required to install a disconnected {product-title} cluster, such as Release images or Red Hat Operator images. It uses local storage on your {op-system-base-full} machine, and storage supported by {op-system-base} is supported by the _mirror registry for Red Hat OpenShift_. Content built by customers should not be hosted by the _mirror registry for Red Hat OpenShift_. - -Unlike Red Hat Quay, the _mirror registry for Red Hat OpenShift_ is not a highly-available registry and only local file system storage is supported. Using the _mirror registry for Red Hat OpenShift_ with more than one cluster is discouraged, because multiple clusters can create a single point of failure when updating your cluster fleet. It is advised to leverage the _mirror registry for Red Hat OpenShift_ to install a cluster that can host a production-grade, highly-available registry such as Red Hat Quay, which can serve {product-title} content to other clusters. - -Use of the _mirror registry for Red Hat OpenShift_ is optional if another container registry is already available in the install environment. diff --git a/modules/mirror-registry-localhost-update.adoc b/modules/mirror-registry-localhost-update.adoc deleted file mode 100644 index 4bac23925806..000000000000 --- a/modules/mirror-registry-localhost-update.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// module included in the following assembly: -// -// * installing-mirroring-creating-registry.adoc - -:_content-type: PROCEDURE -[id="mirror-registry-localhost-update_{context}"] -= Updating mirror registry for Red Hat OpenShift from a local host - -This procedure explains how to update the _mirror registry for Red Hat OpenShift_ from a local host using the `upgrade` command. Updating to the latest version ensures new features, bug fixes, and security vulnerability fixes. - -[IMPORTANT] -==== -When updating, there is intermittent downtime of your mirror registry, as it is restarted during the update process. -==== - -.Prerequisites - -* You have installed the _mirror registry for Red Hat OpenShift_ on a local host. - -.Procedure - -* If you are upgrading the _mirror registry for Red Hat OpenShift_ from 1.2.z -> 1.3.0, and your installation directory is the default at `/etc/quay-install`, you can enter the following command: -+ -[source,terminal] ----- -$ sudo ./mirror-registry upgrade -v ----- -+ -[NOTE] -==== -* _mirror registry for Red Hat OpenShift_ migrates Podman volumes for Quay storage, Postgres data, and `/etc/quay-install` data to the new `$HOME/quay-install` location. This allows you to use _mirror registry for Red Hat OpenShift_ without the `--quayRoot` flag during future upgrades. - -* Users who upgrade _mirror registry for Red Hat OpenShift_ with the `./mirror-registry upgrade -v` flag must include the same credentials used when creating their mirror registry. For example, if you installed the _mirror registry for Red Hat OpenShift_ with `--quayHostname <host_example_com>` and `--quayRoot <example_directory_name>`, you must include that string to properly upgrade the mirror registry. -==== - -* If you are upgrading the _mirror registry for Red Hat OpenShift_ from 1.2.z -> 1.3.0 and you used a specified directory in your 1.2.z deployment, you must pass in the new `--pgStorage`and `--quayStorage` flags. For example: -+ -[source,terminal] ----- -$ sudo ./mirror-registry upgrade --quayHostname <host_example_com> --quayRoot <example_directory_name> --pgStorage <example_directory_name>/pg-data --quayStorage <example_directory_name>/quay-storage -v ----- - diff --git a/modules/mirror-registry-localhost.adoc b/modules/mirror-registry-localhost.adoc deleted file mode 100644 index abce498ce3a2..000000000000 --- a/modules/mirror-registry-localhost.adoc +++ /dev/null @@ -1,50 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/disconnected_install/installing-mirroring-installation-images.adoc - -:_content-type: PROCEDURE -[id="mirror-registry-localhost_{context}"] -= Mirroring on a local host with mirror registry for Red Hat OpenShift - -This procedure explains how to install the _mirror registry for Red Hat OpenShift_ on a local host using the `mirror-registry` installer tool. By doing so, users can create a local host registry running on port 443 for the purpose of storing a mirror of {product-title} images. - -[NOTE] -==== -Installing the _mirror registry for Red Hat OpenShift_ using the `mirror-registry` CLI tool makes several changes to your machine. After installation, a `$HOME/quay-install` directory is created, which has installation files, local storage, and the configuration bundle. Trusted SSH keys are generated in case the deployment target is the local host, and systemd files on the host machine are set up to ensure that container runtimes are persistent. Additionally, an initial user named `init` is created with an automatically generated password. All access credentials are printed at the end of the install routine. -==== - -.Procedure - -. Download the `mirror-registry.tar.gz` package for the latest version of the _mirror registry for Red Hat OpenShift_ found on the link:https://console.redhat.com/openshift/downloads#tool-mirror-registry[OpenShift console *Downloads*] page. - -. Install the _mirror registry for Red Hat OpenShift_ on your local host with your current user account by using the `mirror-registry` tool. For a full list of available flags, see "mirror registry for Red Hat OpenShift flags". -+ -[source,terminal] ----- -$ ./mirror-registry install \ - --quayHostname <host_example_com> \ - --quayRoot <example_directory_name> ----- - -. Use the user name and password generated during installation to log into the registry by running the following command: -+ -[source,terminal] ----- -$ podman login -u init \ - -p <password> \ - <host_example_com>:8443> \ - --tls-verify=false <1> ----- -<1> You can avoid running `--tls-verify=false` by configuring your system to trust the generated rootCA certificates. See "Using SSL to protect connections to Red Hat Quay" and "Configuring the system to trust the certificate authority" for more information. -+ -[NOTE] -==== -You can also log in by accessing the UI at `\https://<host.example.com>:8443` after installation. -==== - -. You can mirror {product-title} images after logging in. Depending on your needs, see either the "Mirroring the {product-title} image repository" or the "Mirroring Operator catalogs for use with disconnected clusters" sections of this document. -+ -[NOTE] -==== -If there are issues with images stored by the _mirror registry for Red Hat OpenShift_ due to storage layer problems, you can remirror the {product-title} images, or reinstall mirror registry on more stable storage. -==== diff --git a/modules/mirror-registry-release-notes.adoc b/modules/mirror-registry-release-notes.adoc deleted file mode 100644 index c92934a4d27f..000000000000 --- a/modules/mirror-registry-release-notes.adoc +++ /dev/null @@ -1,247 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/disconnected_install/installing-mirroring-installation-images.adoc - -[id="mirror-registry-release-notes_{context}"] -= Mirror registry for Red Hat OpenShift release notes - -The _mirror registry for Red Hat OpenShift_ is a small and streamlined container registry that you can use as a target for mirroring the required container images of {product-title} for disconnected installations. - -These release notes track the development of the _mirror registry for Red Hat OpenShift_ in {product-title}. - -For an overview of the _mirror registry for Red Hat OpenShift_, see xref:../../installing/disconnected_install/installing-mirroring-creating-registry.html#mirror-registry-flags_installing-mirroring-creating-registry[Creating a mirror registry with mirror registry for Red Hat OpenShift]. - -[id="mirror-registry-for-openshift-1-3-6"] -== Mirror registry for Red Hat OpenShift 1.3.6 - -Issued: 2023-05-30 - -_Mirror registry for Red Hat OpenShift_ is now available with Red Hat Quay 3.8.8. - -The following advisory is available for the _mirror registry for Red Hat OpenShift_: - -* link:https://access.redhat.com/errata/RHBA-2023:3302[RHBA-2023:3302 - mirror registry for Red Hat OpenShift 1.3.6] - -[id="mirror-registry-for-openshift-1-3-5"] -== Mirror registry for Red Hat OpenShift 1.3.5 - -Issued: 2023-05-18 - -_Mirror registry for Red Hat OpenShift_ is now available with Red Hat Quay 3.8.7. - -The following advisory is available for the _mirror registry for Red Hat OpenShift_: - -* link:https://access.redhat.com/errata/RHBA-2023:3225[RHBA-2023:3225 - mirror registry for Red Hat OpenShift 1.3.5] - -[id="mirror-registry-for-openshift-1-3-4"] -== Mirror registry for Red Hat OpenShift 1.3.4 - -Issued: 2023-04-25 - -_Mirror registry for Red Hat OpenShift_ is now available with Red Hat Quay 3.8.6. - -The following advisory is available for the _mirror registry for Red Hat OpenShift_: - -* link:https://access.redhat.com/errata/RHBA-2023:1914[RHBA-2023:1914 - mirror registry for Red Hat OpenShift 1.3.4] - -[id="mirror-registry-for-openshift-1-3-3"] -== Mirror registry for Red Hat OpenShift 1.3.3 - -Issued: 2023-04-05 - -_Mirror registry for Red Hat OpenShift_ is now available with Red Hat Quay 3.8.5. - -The following advisory is available for the _mirror registry for Red Hat OpenShift_: - -* link:https://access.redhat.com/errata/RHBA-2023:1528[RHBA-2023:1528 - mirror registry for Red Hat OpenShift 1.3.3] - -[id="mirror-registry-for-openshift-1-3-2"] -== Mirror registry for Red Hat OpenShift 1.3.2 - -Issued: 2023-03-21 - -_Mirror registry for Red Hat OpenShift_ is now available with Red Hat Quay 3.8.4. - -The following advisory is available for the _mirror registry for Red Hat OpenShift_: - -* link:https://access.redhat.com/errata/RHBA-2023:1376[RHBA-2023:1376 - mirror registry for Red Hat OpenShift 1.3.2] - -[id="mirror-registry-for-openshift-1-3-1"] -== Mirror registry for Red Hat OpenShift 1.3.1 - -Issued: 2023-03-7 - -_Mirror registry for Red Hat OpenShift_ is now available with Red Hat Quay 3.8.3. - -The following advisory is available for the _mirror registry for Red Hat OpenShift_: - -* link:https://access.redhat.com/errata/RHBA-2023:1086[RHBA-2023:1086 - mirror registry for Red Hat OpenShift 1.3.1] - -[id="mirror-registry-for-openshift-1-3-0"] -== Mirror registry for Red Hat OpenShift 1.3.0 - -Issued: 2023-02-20 - -_Mirror registry for Red Hat OpenShift_ is now available with Red Hat Quay 3.8.1. - -The following advisory is available for the _mirror registry for Red Hat OpenShift_: - -* link:https://access.redhat.com/errata/RHBA-2023:0558[RHBA-2023:0558 - mirror registry for Red Hat OpenShift 1.3.0] - -=== New features - -* _Mirror registry for Red Hat OpenShift_ is now supported on {op-system-base-full} 9 installations. - -* IPv6 support is now available on _mirror registry for Red Hat OpenShift_ local host installations. -+ -IPv6 is currently unsupported on _mirror registry for Red Hat OpenShift_ remote host installations. - -* A new feature flag, `--quayStorage`, has been added. With this flag, users with root privileges can manually set the location of their Quay persistent storage. - -* A new feature flag, `--pgStorage`, has been added. With this flag, users with root privileges can manually set the location of their Postgres persistent storage. - -* Previously, users were required to have root privileges (`sudo`) to install _mirror registry for Red Hat OpenShift_. With this update, `sudo` is no longer required to install _mirror registry for Red Hat OpenShift_. -+ -When _mirror registry for Red Hat OpenShift_ was installed with `sudo`, an `/etc/quay-install` directory that contained installation files, local storage, and the configuration bundle was created. With the removal of the `sudo` requirement, installation files and the configuration bundle are now installed to `$HOME/quay-install`. Local storage, for example Postgres and Quay, are now stored in named volumes automatically created by Podman. -+ -To override the default directories that these files are stored in, you can use the command line arguments for _mirror registry for Red Hat OpenShift_. For more information about _mirror registry for Red Hat OpenShift_ command line arguments, see "_Mirror registry for Red Hat OpenShift_ flags". - -=== Bug fixes - -* Previously, the following error could be returned when attempting to uninstall _mirror registry for Red Hat OpenShift_: `["Error: no container with name or ID \"quay-postgres\" found: no such container"], "stdout": "", "stdout_lines": []***`. With this update, the order that _mirror registry for Red Hat OpenShift_ services are stopped and uninstalled have been changed so that the error no longer occurs when uninstalling _mirror registry for Red Hat OpenShift_. For more information, see link:https://issues.redhat.com/browse/PROJQUAY-4629[*PROJQUAY-4629*]. - - -[id="mirror-registry-for-openshift-1-2-9"] -== Mirror registry for Red Hat OpenShift 1.2.9 - -_Mirror registry for Red Hat OpenShift_ is now available with Red Hat Quay 3.7.10. - -The following advisory is available for the _mirror registry for Red Hat OpenShift_: - -* link:https://access.redhat.com/errata/RHBA-2022:7369[RHBA-2022:7369 - mirror registry for Red Hat OpenShift 1.2.9] - - -[id="mirror-registry-for-openshift-1-2-8"] -== Mirror registry for Red Hat OpenShift 1.2.8 - -_Mirror registry for Red Hat OpenShift_ is now available with Red Hat Quay 3.7.9. - -The following advisory is available for the _mirror registry for Red Hat OpenShift_: - -* link:https://access.redhat.com/errata/RHBA-2022:7065[RHBA-2022:7065 - mirror registry for Red Hat OpenShift 1.2.8] - - -[id="mirror-registry-for-openshift-1-2-7"] -== Mirror registry for Red Hat OpenShift 1.2.7 - -_Mirror registry for Red Hat OpenShift_ is now available with Red Hat Quay 3.7.8. - -The following advisory is available for the _mirror registry for Red Hat OpenShift_: - -* link:https://access.redhat.com/errata/RHBA-2022:6500[RHBA-2022:6500 - mirror registry for Red Hat OpenShift 1.2.7] - -=== Bug fixes - -* Previously, `getFQDN()` relied on the fully-qualified domain name (FQDN) library to determine its FQDN, and the FQDN library tried to read the `/etc/hosts` folder directly. Consequently, on some {op-system-first} installations with uncommon DNS configurations, the FQDN library would fail to install and abort the installation. With this update, _mirror registry for Red Hat OpenShift_ uses `hostname` to determine the FQDN. As a result, the FQDN library does not fail to install. (link:https://issues.redhat.com/browse/PROJQUAY-4139[*PROJQUAY-4139*]) - -[id="mirror-registry-for-openshift-1-2-6"] -== Mirror registry for Red Hat OpenShift 1.2.6 - -_Mirror registry for Red Hat OpenShift_ is now available with Red Hat Quay 3.7.7. - -The following advisory is available for the _mirror registry for Red Hat OpenShift_: - -* link:https://access.redhat.com/errata/RHBA-2022:6278[RHBA-2022:6278 - mirror registry for Red Hat OpenShift 1.2.6] - -=== New features - -A new feature flag, `--no-color` (`-c`) has been added. This feature flag allows users to disable color sequences and propagate that to Ansible when running install, uninstall, and upgrade commands. - -[id="mirror-registry-for-openshift-1-2-5"] -== Mirror registry for Red Hat OpenShift 1.2.5 - -_Mirror registry for Red Hat OpenShift_ is now available with Red Hat Quay 3.7.6. - -The following advisory is available for the _mirror registry for Red Hat OpenShift_: - -* link:https://access.redhat.com/errata/RHBA-2022:6071[RHBA-2022:6071 - mirror registry for Red Hat OpenShift 1.2.5] - -[id="mirror-registry-for-openshift-1-2-4"] -== Mirror registry for Red Hat OpenShift 1.2.4 - -_Mirror registry for Red Hat OpenShift_ is now available with Red Hat Quay 3.7.5. - -The following advisory is available for the _mirror registry for Red Hat OpenShift_: - -* link:https://access.redhat.com/errata/RHBA-2022:5884[RHBA-2022:5884 - mirror registry for Red Hat OpenShift 1.2.4] - -[id="mirror-registry-for-openshift-1-2-3"] -== Mirror registry for Red Hat OpenShift 1.2.3 - -_Mirror registry for Red Hat OpenShift_ is now available with Red Hat Quay 3.7.4. - -The following advisory is available for the _mirror registry for Red Hat OpenShift_: - -* link:https://access.redhat.com/errata/RHBA-2022:5649[RHBA-2022:5649 - mirror registry for Red Hat OpenShift 1.2.3] - -[id="mirror-registry-for-openshift-1-2-2"] -== Mirror registry for Red Hat OpenShift 1.2.2 - -_Mirror registry for Red Hat OpenShift_ is now available with Red Hat Quay 3.7.3. - -The following advisory is available for the _mirror registry for Red Hat OpenShift_: - -* link:https://access.redhat.com/errata/RHBA-2022:5501[RHBA-2022:5501 - mirror registry for Red Hat OpenShift 1.2.2] - - -[id="mirror-registry-for-openshift-1-2-1"] -== Mirror registry for Red Hat OpenShift 1.2.1 - -_Mirror registry for Red Hat OpenShift_ is now available with Red Hat Quay 3.7.2. - -The following advisory is available for the _mirror registry for Red Hat OpenShift_: - -* link:https://access.redhat.com/errata/RHBA-2022:5200[RHBA-2022:4986 - mirror registry for Red Hat OpenShift 1.2.1] - -[id="mirror-registry-for-openshift-1-2-0"] -== Mirror registry for Red Hat OpenShift 1.2.0 - -_Mirror registry for Red Hat OpenShift_ is now available with Red Hat Quay 3.7.1. - -The following advisory is available for the _mirror registry for Red Hat OpenShift_: - -* link:https://access.redhat.com/errata/RHBA-2022:4986[RHBA-2022:4986 - mirror registry for Red Hat OpenShift 1.2.0] - - -[id="mirror-registry-1-2-0-bug-fixes"] -=== Bug fixes - -* Previously, all components and workers running inside of the Quay pod Operator had log levels set to `DEBUG`. As a result, large traffic logs were created that consumed unnecessary space. With this update, log levels are set to `WARN` by default, which reduces traffic information while emphasizing problem scenarios. (link:https://issues.redhat.com/browse/PROJQUAY-3504[*PROJQUAY-3504*]) - -[id="mirror-registry-for-openshift-1-1-0"] -== Mirror registry for Red Hat OpenShift 1.1.0 - -The following advisory is available for the _mirror registry for Red Hat OpenShift_: - -* link:https://access.redhat.com/errata/RHBA-2022:0956[RHBA-2022:0956 - mirror registry for Red Hat OpenShift 1.1.0] - -[id="mirror-registry-1-2-0-new-features"] -=== New features - -* A new command, `mirror-registry upgrade` has been added. This command upgrades all container images without interfering with configurations or data. -+ -[NOTE] -==== -If `quayRoot` was previously set to something other than default, it must be passed into the upgrade command. -==== - -[id="mirror-registry-1-1-0-bug-fixes"] -=== Bug fixes - -* Previously, the absence of `quayHostname` or `targetHostname` did not default to the local hostname. With this update, `quayHostname` and `targetHostname` now default to the local hostname if they are missing. (link:https://issues.redhat.com/browse/PROJQUAY-3079[*PROJQUAY-3079*]) - -* Previously, the command `./mirror-registry --version` returned an `unknown flag` error. Now, running `./mirror-registry --version` returns the current version of the _mirror registry for Red Hat OpenShift_. (link:https://issues.redhat.com/browse/PROJQUAY-3086[*PROJQUAY-3086*]) - -* Previously, users could not set a password during installation, for example, when running `./mirror-registry install --initUser <user_name> --initPassword <password> --verbose`. With this update, users can set a password during installation. (link:https://issues.redhat.com/browse/PROJQUAY-3149[*PROJQUAY-3149*]) - -* Previously, the _mirror registry for Red Hat OpenShift_ did not recreate pods if they were destroyed. Now, pods are recreated if they are destroyed. (link:https://issues.redhat.com/browse/PROJQUAY-3261[*PROJQUAY-3261*]) diff --git a/modules/mirror-registry-remote-host-update.adoc b/modules/mirror-registry-remote-host-update.adoc deleted file mode 100644 index b2c956df7355..000000000000 --- a/modules/mirror-registry-remote-host-update.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// module included in the following assembly: -// -// * installing-mirroring-creating-registry.adoc - -:_content-type: PROCEDURE -[id="mirror-registry-remote-host-update_{context}"] -= Updating mirror registry for Red Hat OpenShift from a remote host - -This procedure explains how to update the _mirror registry for Red Hat OpenShift_ from a remote host using the `upgrade` command. Updating to the latest version ensures bug fixes and security vulnerability fixes. - -[IMPORTANT] -==== -When updating, there is intermittent downtime of your mirror registry, as it is restarted during the update process. -==== - -.Prerequisites - -* You have installed the _mirror registry for Red Hat OpenShift_ on a remote host. - -.Procedure - -* To upgrade the _mirror registry for Red Hat OpenShift_ from a remote host, enter the following command: -+ -[source,terminal] ----- -$ ./mirror-registry upgrade -v --targetHostname <remote_host_url> --targetUsername <user_name> -k ~/.ssh/my_ssh_key ----- -+ -[NOTE] -==== -Users who upgrade the _mirror registry for Red Hat OpenShift_ with the `./mirror-registry upgrade -v` flag must include the same credentials used when creating their mirror registry. For example, if you installed the _mirror registry for Red Hat OpenShift_ with `--quayHostname <host_example_com>` and `--quayRoot <example_directory_name>`, you must include that string to properly upgrade the mirror registry. -==== diff --git a/modules/mirror-registry-remote.adoc b/modules/mirror-registry-remote.adoc deleted file mode 100644 index f8f0a157d4fe..000000000000 --- a/modules/mirror-registry-remote.adoc +++ /dev/null @@ -1,53 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/disconnected_install/installing-mirroring-installation-images.adoc - -:_content-type: PROCEDURE -[id="mirror-registry-remote_{context}"] -= Mirroring on a remote host with mirror registry for Red Hat OpenShift - -This procedure explains how to install the _mirror registry for Red Hat OpenShift_ on a remote host using the `mirror-registry` tool. By doing so, users can create a registry to hold a mirror of {product-title} images. - -[NOTE] -==== -Installing the _mirror registry for Red Hat OpenShift_ using the `mirror-registry` CLI tool makes several changes to your machine. After installation, a `$HOME/quay-install` directory is created, which has installation files, local storage, and the configuration bundle. Trusted SSH keys are generated in case the deployment target is the local host, and systemd files on the host machine are set up to ensure that container runtimes are persistent. Additionally, an initial user named `init` is created with an automatically generated password. All access credentials are printed at the end of the install routine. -==== - -.Procedure - -. Download the `mirror-registry.tar.gz` package for the latest version of the _mirror registry for Red Hat OpenShift_ found on the link:https://console.redhat.com/openshift/downloads#tool-mirror-registry[OpenShift console *Downloads*] page. - -. Install the _mirror registry for Red Hat OpenShift_ on your local host with your current user account by using the `mirror-registry` tool. For a full list of available flags, see "mirror registry for Red Hat OpenShift flags". -+ -[source,terminal] ----- -$ ./mirror-registry install -v \ - --targetHostname <host_example_com> \ - --targetUsername <example_user> \ - -k ~/.ssh/my_ssh_key \ - --quayHostname <host_example_com> \ - --quayRoot <example_directory_name> ----- - -. Use the user name and password generated during installation to log into the mirror registry by running the following command: -+ -[source,terminal] ----- -$ podman login -u init \ - -p <password> \ - <host_example_com>:8443> \ - --tls-verify=false <1> ----- -<1> You can avoid running `--tls-verify=false` by configuring your system to trust the generated rootCA certificates. See "Using SSL to protect connections to Red Hat Quay" and "Configuring the system to trust the certificate authority" for more information. -+ -[NOTE] -==== -You can also log in by accessing the UI at `\https://<host.example.com>:8443` after installation. -==== - -. You can mirror {product-title} images after logging in. Depending on your needs, see either the "Mirroring the OpenShift Container Platform image repository" or the "Mirroring Operator catalogs for use with disconnected clusters" sections of this document. -+ -[NOTE] -==== -If there are issues with images stored by the _mirror registry for Red Hat OpenShift_ due to storage layer problems, you can remirror the {product-title} images, or reinstall mirror registry on more stable storage. -==== diff --git a/modules/mirror-registry-troubleshooting.adoc b/modules/mirror-registry-troubleshooting.adoc deleted file mode 100644 index 2cc9ce2e25de..000000000000 --- a/modules/mirror-registry-troubleshooting.adoc +++ /dev/null @@ -1,34 +0,0 @@ -// module included in the following assembly: -// -// * installing-mirroring-creating-registry.adoc - -:_content-type: PROCEDURE -[id="mirror-registry-troubleshooting_{context}"] -= Troubleshooting mirror registry for Red Hat OpenShift - -To assist in troubleshooting _mirror registry for Red Hat OpenShift_, you can gather logs of systemd services installed by the mirror registry. The following services are installed: - -* quay-app.service -* quay-postgres.service -* quay-redis.service -* quay-pod.service - -.Prerequisites - -* You have installed _mirror registry for Red Hat OpenShift_. - -.Procedure - -* If you installed _mirror registry for Red Hat OpenShift_ with root privileges, you can get the status information of its systemd services by entering the following command: -+ -[source,terminal] ----- -$ sudo systemctl status <service> ----- - -* If you installed _mirror registry for Red Hat OpenShift_ as a standard user, you can get the status information of its systemd services by entering the following command: -+ -[source,terminal] ----- -$ systemctl --user status <service> ----- diff --git a/modules/mirror-registry-uninstall.adoc b/modules/mirror-registry-uninstall.adoc deleted file mode 100644 index 25689cf1a712..000000000000 --- a/modules/mirror-registry-uninstall.adoc +++ /dev/null @@ -1,20 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/disconnected_install/installing-mirroring-installation-images.adoc - -[id="uninstalling-mirror-registry_{context}"] -= Uninstalling the mirror registry for Red Hat OpenShift - -* You can uninstall the _mirror registry for Red Hat OpenShift_ from your local host by running the following command: -+ -[source,terminal] ----- -$ ./mirror-registry uninstall -v \ - --quayRoot <example_directory_name> ----- -+ -[NOTE] -==== -* Deleting the _mirror registry for Red Hat OpenShift_ will prompt the user before deletion. You can use `--autoApprove` to skip this prompt. -* Users who install the _mirror registry for Red Hat OpenShift_ with the `--quayRoot` flag must include the `--quayRoot` flag when uninstalling. For example, if you installed the _mirror registry for Red Hat OpenShift_ with `--quayRoot example_directory_name`, you must include that string to properly uninstall the mirror registry. -==== diff --git a/modules/mod-docs-ocp-conventions.adoc b/modules/mod-docs-ocp-conventions.adoc deleted file mode 100644 index ff4ede521281..000000000000 --- a/modules/mod-docs-ocp-conventions.adoc +++ /dev/null @@ -1,154 +0,0 @@ -// Module included in the following assemblies: -// -// * mod_docs_guide/mod-docs-conventions-ocp.adoc - -// Base the file name and the ID on the module title. For example: -// * file name: my-reference-a.adoc -// * ID: [id="my-reference-a"] -// * Title: = My reference A - -[id="mod-docs-ocp-conventions_{context}"] -= Modular docs OpenShift conventions - -These Modular Docs conventions for OpenShift docs build on top of the CCS -modular docs guidelines. - -These guidelines and conventions should be read along with the: - -* General CCS -link:https://redhat-documentation.github.io/modular-docs/[modular docs guidelines]. -* link:https://redhat-documentation.github.io/asciidoc-markup-conventions/[AsciiDoc markup conventions] -* link:https://github.com/openshift/openshift-docs/blob/main/contributing_to_docs/contributing.adoc[OpenShift Contribution Guide] -* link:https://github.com/openshift/openshift-docs/blob/main/contributing_to_docs/doc_guidelines.adoc[OpenShift Documentation Guidelines] - -IMPORTANT: If some convention is duplicated, the convention in this guide -supersedes all others. - -[id="ocp-ccs-conventions_{context}"] -== OpenShift CCS conventions - -* All assemblies must define a context that is unique. -+ -Add this context at the top of the page, just before the first anchor id. -+ -Example: -+ ----- -:context: assembly-gsg ----- - -* All assemblies must include the `_attributes/common-attributes.adoc` file near the -context statement. This file contains the standard attributes for the collection. -+ -`include::_attributes/common-attributes.adoc[leveloffset=+1]` - -* All anchor ids must follow the format: -+ ----- -[id="<anchor-name-with-dashes>_{context}"] ----- -+ -Anchor name is _connected_ to the `{context}` using a dash. -+ -Example: -+ ----- -[id="creating-your-first-content_{context}"] ----- - -* All modules anchor ids must have the `{context}` variable. -+ -This is just reiterating the format described in the previous bullet point. - -* A comment section must be present at the top of each module and assembly, as -shown in the link:https://github.com/redhat-documentation/modular-docs/tree/master/modular-docs-manual/files[modular docs templates]. -+ -The modules comment section must list which assemblies this module has been -included in, while the assemblies comment section must include other assemblies -that it itself is included in, if any. -+ -Example comment section in an assembly: -+ ----- -// This assembly is included in the following assemblies: -// -// NONE ----- -+ -Example comment section in a module: -+ ----- -// Module included in the following assemblies: -// -// mod_docs_guide/mod-docs-conventions-ocp.adoc ----- - -* All modules must go in the modules directory which is present in the top level -of the openshift-docs repository. These modules must follow the file naming -conventions specified in the -link:https://redhat-documentation.github.io/modular-docs/[modular docs guidelines]. - -* All assemblies must go in the relevant guide/book. If you can't find a relevant - guide/book, reach out to a member of the OpenShift CCS team. So guides/books contain assemblies, which - contain modules. - -* modules and images folders are symlinked to the top level folder from each book/guide folder. - -* In your assemblies, when you are linking to the content in other books, you must -use the relative path starting like so: -+ ----- -xref:../architecture/architecture.adoc#architecture[architecture] overview. ----- -+ -[IMPORTANT] -==== -You must not include xrefs in modules or create an xref to a module. You can -only use xrefs to link from one assembly to another. -==== - -* All modules in assemblies must be included using the following format (replace 'ilude' with 'include'): -+ -`ilude::modules/<file_name_of_module>.adoc[]` -+ -_OR_ -+ -`ilude::modules/<file_name_of_module>.adoc[leveloffset=+<offset_by>]` -+ -if it requires a leveloffset. -+ -Example: -+ -`include::modules/creating-your-first-content.adoc[leveloffset=+1]` - -NOTE: There is no `..` at the starting of the path. - -//// -* If your assembly is in a subfolder of a guide/book directory, you must add a -statement to the assembly's metadata to use `relfileprefix`. -+ -This adjusts all the xref links in your modules to start from the root -directory. -+ -At the top of the assembly (in the metadata section), add the following line: -+ ----- -:relfileprefix: ../ ----- -+ -NOTE: There is a space between the second : and the ../. - -+ -The only difference in including a module in the _install_config/index.adoc_ -assembly and _install_config/install/planning.adoc_ assembly is the addition of -the `:relfileprefix: ../` attribute at the top of the -_install_config/install/planning.adoc_ assembly. The actual inclusion of -module remains the same as described in the previous bullet. - -+ -NOTE: This strategy is in place so that links resolve correctly on both -docs.openshift.com and portal docs. -//// - -* Do not use 3rd level folders even though AsciiBinder permits it. If you need -to, work out a better way to organize your content. diff --git a/modules/modify-unavailable-workers.adoc b/modules/modify-unavailable-workers.adoc deleted file mode 100644 index 30f58b3e238d..000000000000 --- a/modules/modify-unavailable-workers.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * post_installation_configuration/node-tasks.adoc - -:_content-type: PROCEDURE -[id="modify-unavailable-workers_{context}"] -= Modifying the number of unavailable worker nodes - -By default, only one machine is allowed to be unavailable when applying the kubelet-related configuration to the available worker nodes. For a large cluster, it can take a long time for the configuration change to be reflected. At any time, you can adjust the number of machines that are updating to speed up the process. - -.Procedure - -. Edit the `worker` machine config pool: -+ -[source,terminal] ----- -$ oc edit machineconfigpool worker ----- - -. Add the `maxUnavailable` field and set the value: -+ -[source,yaml] ----- -spec: - maxUnavailable: <node_count> ----- -+ -[IMPORTANT] -==== -When setting the value, consider the number of worker nodes that can be -unavailable without affecting the applications running on the cluster. -==== diff --git a/modules/modifying-an-existing-ingress-controller.adoc b/modules/modifying-an-existing-ingress-controller.adoc deleted file mode 100644 index b6cfa72989cd..000000000000 --- a/modules/modifying-an-existing-ingress-controller.adoc +++ /dev/null @@ -1,28 +0,0 @@ -// Module included in the following assemblies: -// -// *ingress-controller-dnsmgt.adoc - -:_content-type: PROCEDURE -[id="modifying-an-existing-ingress-controller_{context}"] -= Modifying an existing Ingress Controller - -As a cluster administrator, you can modify an existing Ingress Controller to manually manage the DNS record lifecycle. - -.Prerequisites - -* Install the OpenShift CLI (`oc`). -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -. Modify the chosen `IngressController` to set `dnsManagementPolicy`: - -+ -[source,terminal] ----- -SCOPE=$(oc -n openshift-ingress-operator get ingresscontroller <name> -o=jsonpath="{.status.endpointPublishingStrategy.loadBalancer.scope}") - -oc -n openshift-ingress-operator patch ingresscontrollers/<name> --type=merge --patch='{"spec":{"endpointPublishingStrategy":{"type":"LoadBalancerService","loadBalancer":{"dnsManagementPolicy":"Unmanaged", "scope":"${SCOPE}"}}}}' ----- - -. Optional: You can delete the associated DNS record in the cloud provider. diff --git a/modules/modifying-kubelet-as-one-time-scenario.adoc b/modules/modifying-kubelet-as-one-time-scenario.adoc deleted file mode 100644 index 9850b8922bf5..000000000000 --- a/modules/modifying-kubelet-as-one-time-scenario.adoc +++ /dev/null @@ -1,73 +0,0 @@ -:_content-type: PROCEDURE -[id="modifying-kubelet-one-time_{context}"] -= Modifying the kubelet as a one-time scenario - -To modify the kubelet in a one-time scenario without rebooting the node due to the change of `machine-config(spec":{"paused":false}})`, allowing you to modify the kubelet without affecting the service, follow this procedure. - -.Procedure - -. Connect to the node in debug mode: -+ -[source,terminal] ----- -$ oc debug node/<node> ----- -+ -[source,terminal] ----- -$ chroot /host ----- -+ -Alternatively, it is possible to SSH to the node and become root. - -. After access is established, check the default log level: -+ -[source,terminal] ----- -$ systemctl cat kubelet ----- -+ -.Example output -[source,terminal] ----- -# /etc/systemd/system/kubelet.service.d/20-logging.conf -[Service] -Environment="KUBELET_LOG_LEVEL=2" ----- - -. Define the new verbosity required in a new `/etc/systemd/system/kubelet.service.d/30-logging.conf` file, which overrides `/etc/systemd/system/kubelet.service.d/20-logging.conf`. In this example, the verbosity is changed from `2` to `8`: -+ -[source,terminal] ----- -$ echo -e "[Service]\nEnvironment=\"KUBELET_LOG_LEVEL=8\"" > /etc/systemd/system/kubelet.service.d/30-logging.conf ----- - -. Reload systemd and restart the service: -+ -[source,terminal] ----- -$ systemctl daemon-reload ----- -+ -[source,terminal] ----- -$ systemctl restart kubelet ----- - -. Gather the logs, and then revert the log level increase: -+ -[source,terminal] ----- -$ rm -f /etc/systemd/system/kubelet.service.d/30-logging.conf ----- -+ -[source,terminal] ----- -$ systemctl daemon-reload ----- -+ -[source,terminal] ----- -$ systemctl restart kubelet ----- - diff --git a/modules/modifying-template-for-new-projects.adoc b/modules/modifying-template-for-new-projects.adoc deleted file mode 100644 index 5526d8bd6db9..000000000000 --- a/modules/modifying-template-for-new-projects.adoc +++ /dev/null @@ -1,69 +0,0 @@ -// Module included in the following assemblies: -// -// * applications/projects/configuring-project-creation.adoc -// * post_installation_configuration/network-configuration.adoc - -:_content-type: PROCEDURE -[id="modifying-template-for-new-projects_{context}"] -= Modifying the template for new projects - -As a cluster administrator, you can modify the default project template so that -new projects are created using your custom requirements. - -To create your own custom project template: - -.Procedure - -. Log in as a user with `cluster-admin` privileges. - -. Generate the default project template: -+ -[source,terminal] ----- -$ oc adm create-bootstrap-project-template -o yaml > template.yaml ----- - -. Use a text editor to modify the generated `template.yaml` file by adding -objects or modifying existing objects. - -. The project template must be created in the `openshift-config` namespace. Load -your modified template: -+ -[source,terminal] ----- -$ oc create -f template.yaml -n openshift-config ----- - -. Edit the project configuration resource using the web console or CLI. - -** Using the web console: -... Navigate to the *Administration* -> *Cluster Settings* page. -... Click *Configuration* to view all configuration resources. -... Find the entry for *Project* and click *Edit YAML*. - -** Using the CLI: -... Edit the `project.config.openshift.io/cluster` resource: -+ -[source,terminal] ----- -$ oc edit project.config.openshift.io/cluster ----- - -. Update the `spec` section to include the `projectRequestTemplate` and `name` -parameters, and set the name of your uploaded project template. The default name -is `project-request`. -+ -.Project configuration resource with custom project template -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: Project -metadata: - ... -spec: - projectRequestTemplate: - name: <template_name> ----- - -. After you save your changes, create a new project to verify that your changes -were successfully applied. diff --git a/modules/monitoring-about-cluster-monitoring.adoc b/modules/monitoring-about-cluster-monitoring.adoc deleted file mode 100644 index 80596811f776..000000000000 --- a/modules/monitoring-about-cluster-monitoring.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/monitoring-overview.adoc - -// This module uses a conditionalized title so that the module -// can be re-used in associated products but the title is not -// included in the existing OpenShift assembly. - -:_content-type: CONCEPT -[id="about-openshift-monitoring_{context}"] -ifeval::["{context}" == "understanding-the-monitoring-stack"] -:ocp-monitoring: -endif::[] - -ifndef::ocp-monitoring[] -= About {product-title} monitoring -endif::ocp-monitoring[] -:ocp-monitoring!: - -{product-title} includes a pre-configured, pre-installed, and self-updating monitoring stack that provides *monitoring for core platform components*. {product-title} delivers monitoring best practices out of the box. A set of alerts are included by default that immediately notify cluster administrators about issues with a cluster. Default dashboards in the {product-title} web console include visual representations of cluster metrics to help you to quickly understand the state of your cluster. - -After installing {product-title} {product-version}, cluster administrators can optionally enable *monitoring for user-defined projects*. By using this feature, cluster administrators, developers, and other users can specify how services and pods are monitored in their own projects. You can then query metrics, review dashboards, and manage alerting rules and silences for your own projects in the {product-title} web console. - -[NOTE] -==== -Cluster administrators can grant developers and other users permission to monitor their own projects. Privileges are granted by assigning one of the predefined monitoring roles. -==== - -[id="about-openshift-monitoring_{context}"] -ifeval::["{context}" == "understanding-the-monitoring-stack"] -:!ocp-monitoring: -endif::[] diff --git a/modules/monitoring-about-querying-metrics.adoc b/modules/monitoring-about-querying-metrics.adoc deleted file mode 100644 index 23bfa6832cdc..000000000000 --- a/modules/monitoring-about-querying-metrics.adoc +++ /dev/null @@ -1,14 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/querying-metrics.adoc -// * virt/support/virt-prometheus-queries.adoc - -:_content-type: CONCEPT -[id="about-querying-metrics_{context}"] -= About querying metrics - -The {product-title} monitoring dashboard enables you to run Prometheus Query Language (PromQL) queries to examine metrics visualized on a plot. This functionality provides information about the state of a cluster and any user-defined workloads that you are monitoring. - -As a *cluster administrator*, you can query metrics for all core {product-title} and user-defined projects. - -As a *developer*, you must specify a project name when querying metrics. You must have the required privileges to view metrics for the selected project. diff --git a/modules/monitoring-accessing-alerting-rules-for-your-project.adoc b/modules/monitoring-accessing-alerting-rules-for-your-project.adoc deleted file mode 100644 index 91d9d5a4b3ba..000000000000 --- a/modules/monitoring-accessing-alerting-rules-for-your-project.adoc +++ /dev/null @@ -1,31 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/managing-alerts.adoc - -:_content-type: PROCEDURE -[id="accessing-alerting-rules-for-your-project_{context}"] -= Accessing alerting rules for user-defined projects - -To list alerting rules for a user-defined project, you must have been assigned the `monitoring-rules-view` role for the project. - -.Prerequisites - -* You have enabled monitoring for user-defined projects. -* You are logged in as a user that has the `monitoring-rules-view` role for your project. -* You have installed the OpenShift CLI (`oc`). - -.Procedure - -. You can list alerting rules in `<project>`: -+ -[source,terminal] ----- -$ oc -n <project> get prometheusrule ----- - -. To list the configuration of an alerting rule, run the following: -+ -[source,terminal] ----- -$ oc -n <project> get prometheusrule <rule> -o yaml ----- diff --git a/modules/monitoring-accessing-the-alerting-ui.adoc b/modules/monitoring-accessing-the-alerting-ui.adoc deleted file mode 100644 index f76b00f83387..000000000000 --- a/modules/monitoring-accessing-the-alerting-ui.adoc +++ /dev/null @@ -1,20 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/managing-alerts.adoc - -:_content-type: PROCEDURE -[id="accessing_the_alerting_ui_{context}"] -= Accessing the Alerting UI in the Administrator and Developer perspectives - -The Alerting UI is accessible through the Administrator perspective and the Developer perspective in the {product-title} web console. - -* In the *Administrator* perspective, select *Observe* -> *Alerting*. The three main pages in the Alerting UI in this perspective are the *Alerts*, *Silences*, and *Alerting Rules* pages. - -//Next to the title of each of these pages is a link to the Alertmanager interface. - -* In the *Developer* perspective, select *Observe* -> *<project_name>* -> *Alerts*. In this perspective, alerts, silences, and alerting rules are all managed from the *Alerts* page. The results shown in the *Alerts* page are specific to the selected project. - -[NOTE] -==== -In the Developer perspective, you can select from core {product-title} and user-defined projects that you have access to in the *Project:* list. However, alerts, silences, and alerting rules relating to core {product-title} projects are not displayed if you do not have `cluster-admin` privileges. -==== diff --git a/modules/monitoring-accessing-the-metrics-targets-page.adoc b/modules/monitoring-accessing-the-metrics-targets-page.adoc deleted file mode 100644 index d9df69a494b8..000000000000 --- a/modules/monitoring-accessing-the-metrics-targets-page.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/managing-metrics-targets.adoc - -:_content-type: PROCEDURE -[id="monitoring-accessing-the-metrics-targets-page_{context}"] -= Accessing the Metrics Targets page in the Administrator perspective - -You can view the *Metrics Targets* page in the *Administrator* perspective in the {product-title} web console. - -.Prerequisites - -* You have access to the cluster as an administrator for the project for which you want to view metrics targets. - -.Procedure - -* In the *Administrator* perspective, select *Observe* -> *Targets*. -The *Metrics Targets* page opens with a list of all service endpoint targets that are being scraped for metrics. - diff --git a/modules/monitoring-accessing-third-party-monitoring-web-service-apis.adoc b/modules/monitoring-accessing-third-party-monitoring-web-service-apis.adoc deleted file mode 100644 index a28c016b9e7b..000000000000 --- a/modules/monitoring-accessing-third-party-monitoring-web-service-apis.adoc +++ /dev/null @@ -1,39 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/accessing-third-party-monitoring-uis-and-apis.adoc - -:_content-type: PROCEDURE -[id="accessing-third-party-monitoring-web-service-apis_{context}"] -= Accessing third-party monitoring web service APIs - -[role="_abstract"] -You can directly access third-party web service APIs from the command line for the following monitoring stack components: Prometheus, Alertmanager, Thanos Ruler, and Thanos Querier. - -The following example commands show how to query the service API receivers for Alertmanager. -This example requires that the associated user account be bound against the `monitoring-alertmanager-edit` role in the `openshift-monitoring` namespace and that the account has the privilege to view the route. -This access only supports using a Bearer Token for authentication. - -[source,terminal] ----- -$ oc login -u <username> -p <password> ----- - -[source,terminal] ----- -$ host=$(oc -n openshift-monitoring get route alertmanager-main -ojsonpath={.spec.host}) ----- - -[source,terminal] ----- -$ token=$(oc whoami -t) ----- - -[source,terminal] ----- -$ curl -H "Authorization: Bearer $token" -k "https://$host/api/v2/receivers" ----- - -[NOTE] -==== -To access Thanos Ruler and Thanos Querier service APIs, the requesting account must have `get` permission on the namespaces resource, which can be done by granting the `cluster-monitoring-view` cluster role to the account. -==== \ No newline at end of file diff --git a/modules/monitoring-adding-a-secret-to-the-alertmanager-configuration.adoc b/modules/monitoring-adding-a-secret-to-the-alertmanager-configuration.adoc deleted file mode 100644 index 79aecb07de1f..000000000000 --- a/modules/monitoring-adding-a-secret-to-the-alertmanager-configuration.adoc +++ /dev/null @@ -1,100 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/configuring-the-monitoring-stack.adoc - -:_content-type: PROCEDURE -[id="monitoring-adding-a-secret-to-the-alertmanager-configuration_{context}"] -= Adding a secret to the Alertmanager configuration - -You can add secrets to the Alertmanager configuration for core platform monitoring components by editing the `cluster-monitoring-config` config map in the `openshift-monitoring` project. -You can add secrets to the Alertmanager configuration for user-defined projects by editing the `user-workload-monitoring-config` config map in the `openshift-user-workload-monitoring` project. - -After you add a secret to the config map, the secret is mounted as a volume at `/etc/alertmanager/secrets/<secret_name>` within the `alertmanager` container for the Alertmanager pods. - -.Prerequisites - -* You have installed the OpenShift CLI (`oc`). -* *If you are configuring core {product-title} monitoring components in the `openshift-monitoring` project*: -** You have access to the cluster as a user with the `cluster-admin` role. -** You have created the `cluster-monitoring-config` config map. -** You have created the secret to be configured in Alertmanager in the `openshift-monitoring` project. -* *If you are configuring components that monitor user-defined projects*: -** A cluster administrator has enabled monitoring for user-defined projects. -** You have access to the cluster as a user with the `cluster-admin` role, or as a user with the `user-workload-monitoring-config-edit` role in the `openshift-user-workload-monitoring` project. -** You have created the secret to be configured in Alertmanager in the `openshift-user-workload-monitoring` project. - -.Procedure - -. To add a secret configuration to Alertmanager for core platform monitoring, edit the `cluster-monitoring-config` config map in the `openshift-monitoring` project: -+ -[source,terminal] ----- -$ oc -n openshift-monitoring edit configmap cluster-monitoring-config ----- - -. Add a `secrets:` section under `data/config.yaml/alertmanagerMain`. - -. Add the configuration details for the secret in this section: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: cluster-monitoring-config - namespace: openshift-monitoring -data: - config.yaml: | - alertmanagerMain: - secrets: <1> - - <secret_name_1> <2> - - <secret_name_2> ----- -<1> This section contains the secrets to be mounted into Alertmanager. -The secrets must be located within the same namespace as the Alertmanager object. -<2> The name of the `Secret` object that contains authentication credentials for the receiver. -If you add multiple secrets, place each one on a new line. -+ -The following sample config map settings configure Alertmanager to use two `Secret` objects named `test-secret-basic-auth` and `test-secret-api-token`: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: cluster-monitoring-config - namespace: openshift-monitoring -data: - config.yaml: | - alertmanagerMain: - secrets: - - test-secret-basic-auth - - test-secret-api-token ----- - -. Optional: To add the secrets for use by Alertmanager in user-defined projects, add the secret names under `data/config.yaml/alertmanager/secrets` in the `user-workload-monitoring-config` config map in the `openshift-user-workload-monitoring` project: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: user-workload-monitoring-config - namespace: openshift-user-workload-monitoring -data: - config.yaml: | - alertmanager: - enabled: true - secrets: - - test-secret - - test-api-receiver-token ----- -+ -[NOTE] -==== -Configurations applied to the `user-workload-monitoring-config` `ConfigMap` object are not activated unless a cluster administrator has enabled monitoring for user-defined projects. -==== - -. Save the file to apply the changes to the `ConfigMap` object. -The new configuration is applied automatically. - diff --git a/modules/monitoring-adding-cluster-id-labels-to-metrics.adoc b/modules/monitoring-adding-cluster-id-labels-to-metrics.adoc deleted file mode 100644 index b9886b154b4d..000000000000 --- a/modules/monitoring-adding-cluster-id-labels-to-metrics.adoc +++ /dev/null @@ -1,20 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/configuring-the-monitoring-stack.adoc - -:_content-type: CONCEPT -[id="adding-cluster-id-labels-to-metrics_{context}"] -= Adding cluster ID labels to metrics - -If you manage multiple {product-title} clusters and use the remote write feature to send metrics data from these clusters to an external storage location, you can add cluster ID labels to identify the metrics data coming from different clusters. -You can then query these labels to identify the source cluster for a metric and distinguish that data from similar metrics data sent by other clusters. - -This way, if you manage many clusters for multiple customers and send metrics data to a single centralized storage system, you can use cluster ID labels to query metrics for a particular cluster or customer. - -Creating and using cluster ID labels involves three general steps: - -* Configuring the write relabel settings for remote write storage. - -* Adding cluster ID labels to the metrics. - -* Querying these labels to identify the source cluster or customer for a metric. diff --git a/modules/monitoring-applying-a-custom-configuration-to-alertmanager-for-user-defined-alert-routing.adoc b/modules/monitoring-applying-a-custom-configuration-to-alertmanager-for-user-defined-alert-routing.adoc deleted file mode 100644 index a4c34ba4c924..000000000000 --- a/modules/monitoring-applying-a-custom-configuration-to-alertmanager-for-user-defined-alert-routing.adoc +++ /dev/null @@ -1,54 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/managing-alerts.adoc - -:_content-type: PROCEDURE -[id="applying-a-custom-configuration-to-alertmanager-for-user-defined-alert-routing_{context}"] -= Applying a custom configuration to Alertmanager for user-defined alert routing - -If you have enabled a separate instance of Alertmanager dedicated to user-defined alert routing, you can overwrite the configuration for this instance of Alertmanager by editing the `alertmanager-user-workload` secret in the `openshift-user-workload-monitoring` namespace. - -.Prerequisites - -ifdef::openshift-rosa,openshift-dedicated[] -* You have access to the cluster as a user with the `cluster-admin` or `dedicated-admin` role. -endif::[] -ifndef::openshift-rosa,openshift-dedicated[] -* You have access to the cluster as a user with the `cluster-admin` role. -endif::[] - -.Procedure - -. Print the currently active Alertmanager configuration into the file `alertmanager.yaml`: -+ -[source,terminal] ----- -$ oc -n openshift-user-workload-monitoring get secret alertmanager-user-workload --template='{{ index .data "alertmanager.yaml" }}' | base64 --decode > alertmanager.yaml ----- -+ -. Edit the configuration in `alertmanager.yaml`: -+ -[source,yaml] ----- -route: - receiver: Default - group_by: - - name: Default - routes: - - matchers: - - "service = prometheus-example-monitor" <1> - receiver: <receiver> <2> -receivers: -- name: Default -- name: <receiver> -# <receiver_configuration> ----- -<1> Specifies which alerts match the route. This example shows all alerts that have the `service="prometheus-example-monitor"` label. -<2> Specifies the receiver to use for the alerts group. -+ -. Apply the new configuration in the file: -+ -[source,terminal] ----- -$ oc -n openshift-user-workload-monitoring create secret generic alertmanager-user-workload --from-file=alertmanager.yaml --dry-run=client -o=yaml | oc -n openshift-user-workload-monitoring replace secret --filename=- ----- diff --git a/modules/monitoring-applying-custom-alertmanager-configuration.adoc b/modules/monitoring-applying-custom-alertmanager-configuration.adoc deleted file mode 100644 index ae32b0002a93..000000000000 --- a/modules/monitoring-applying-custom-alertmanager-configuration.adoc +++ /dev/null @@ -1,118 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/managing-alerts.adoc - -:_content-type: PROCEDURE -[id="applying-custom-alertmanager-configuration_{context}"] -= Applying a custom Alertmanager configuration - -You can overwrite the default Alertmanager configuration by editing the `alertmanager-main` secret in the `openshift-monitoring` namespace for the platform instance of Alertmanager. - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. - -.Procedure - -To change the Alertmanager configuration from the CLI: - -. Print the currently active Alertmanager configuration into file `alertmanager.yaml`: -+ -[source,terminal] ----- -$ oc -n openshift-monitoring get secret alertmanager-main --template='{{ index .data "alertmanager.yaml" }}' | base64 --decode > alertmanager.yaml ----- -+ -. Edit the configuration in `alertmanager.yaml`: -+ -[source,yaml] ----- -global: - resolve_timeout: 5m -route: - group_wait: 30s <1> - group_interval: 5m <2> - repeat_interval: 12h <3> - receiver: default - routes: - - matchers: - - "alertname=Watchdog" - repeat_interval: 2m - receiver: watchdog - - matchers: - - "service=<your_service>" <4> - routes: - - matchers: - - <your_matching_rules> <5> - receiver: <receiver> <6> -receivers: -- name: default -- name: watchdog -- name: <receiver> -# <receiver_configuration> ----- -<1> The `group_wait` value specifies how long Alertmanager waits before sending an initial notification for a group of alerts. -This value controls how long Alertmanager waits while collecting initial alerts for the same group before sending a notification. -<2> The `group_interval` value specifies how much time must elapse before Alertmanager sends a notification about new alerts added to a group of alerts for which an initial notification was already sent. -<3> The `repeat_interval` value specifies the minimum amount of time that must pass before an alert notification is repeated. -If you want a notification to repeat at each group interval, set the `repeat_interval` value to less than the `group_interval` value. -However, the repeated notification can still be delayed, for example, when certain Alertmanager pods are restarted or rescheduled. -<4> The `service` value specifies the service that fires the alerts. -<5> The `<your_matching_rules>` value specifies the target alerts. -<6> The `receiver` value specifies the receiver to use for the alert. -+ -[NOTE] -==== -Use the `matchers` key name to indicate the matchers that an alert has to fulfill to match the node. -Do not use the `match` or `match_re` key names, which are both deprecated and planned for removal in a future release. - -In addition, if you define inhibition rules, use the `target_matchers` key name to indicate the target matchers and the `source_matchers` key name to indicate the source matchers. -Do not use the `target_match`, `target_match_re`, `source_match`, or `source_match_re` key names, which are deprecated and planned for removal in a future release. -==== -+ -The following Alertmanager configuration example configures PagerDuty as an alert receiver: -+ -[source,yaml] ----- -global: - resolve_timeout: 5m -route: - group_wait: 30s - group_interval: 5m - repeat_interval: 12h - receiver: default - routes: - - matchers: - - "alertname=Watchdog" - repeat_interval: 2m - receiver: watchdog - - matchers: - - "service=example-app" - routes: - - matchers: - - "severity=critical" - receiver: team-frontend-page* -receivers: -- name: default -- name: watchdog -- name: team-frontend-page - pagerduty_configs: - - service_key: "_your-key_" ----- -+ -With this configuration, alerts of `critical` severity that are fired by the `example-app` service are sent using the `team-frontend-page` receiver. Typically these types of alerts would be paged to an individual or a critical response team. -+ -. Apply the new configuration in the file: -+ -[source,terminal] ----- -$ oc -n openshift-monitoring create secret generic alertmanager-main --from-file=alertmanager.yaml --dry-run=client -o=yaml | oc -n openshift-monitoring replace secret --filename=- ----- - -To change the Alertmanager configuration from the {product-title} web console: - -. Navigate to the *Administration* -> *Cluster Settings* -> *Configuration* -> *Alertmanager* -> *YAML* page of the web console. - -. Modify the YAML configuration file. - -. Select *Save*. diff --git a/modules/monitoring-assigning-tolerations-to-monitoring-components.adoc b/modules/monitoring-assigning-tolerations-to-monitoring-components.adoc deleted file mode 100644 index 3adb5c3c8790..000000000000 --- a/modules/monitoring-assigning-tolerations-to-monitoring-components.adoc +++ /dev/null @@ -1,124 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/configuring-the-monitoring-stack.adoc - -:_content-type: PROCEDURE -[id="assigning-tolerations-to-monitoring-components_{context}"] -= Assigning tolerations to monitoring components - -You can assign tolerations to any of the monitoring stack components to enable moving them to tainted nodes. - -.Prerequisites - -* *If you are configuring core {product-title} monitoring components*: -** You have access to the cluster as a user with the `cluster-admin` role. -** You have created the `cluster-monitoring-config` `ConfigMap` object. -* *If you are configuring components that monitor user-defined projects*: -** You have access to the cluster as a user with the `cluster-admin` role, or as a user with the `user-workload-monitoring-config-edit` role in the `openshift-user-workload-monitoring` project. -** You have created the `user-workload-monitoring-config` `ConfigMap` object. -* You have installed the OpenShift CLI (`oc`). - -.Procedure - -. Edit the `ConfigMap` object: -** *To assign tolerations to a component that monitors core {product-title} projects*: -.. Edit the `cluster-monitoring-config` `ConfigMap` object in the `openshift-monitoring` project: -+ -[source,terminal] ----- -$ oc -n openshift-monitoring edit configmap cluster-monitoring-config ----- - -.. Specify `tolerations` for the component: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: cluster-monitoring-config - namespace: openshift-monitoring -data: - config.yaml: | - <component>: - tolerations: - <toleration_specification> ----- -+ -Substitute `<component>` and `<toleration_specification>` accordingly. -+ -For example, `oc adm taint nodes node1 key1=value1:NoSchedule` adds a taint to `node1` with the key `key1` and the value `value1`. This prevents monitoring components from deploying pods on `node1` unless a toleration is configured for that taint. The following example configures the `alertmanagerMain` component to tolerate the example taint: -+ -[source,yaml,subs=quotes] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: cluster-monitoring-config - namespace: openshift-monitoring -data: - config.yaml: | - alertmanagerMain: - tolerations: - - key: "key1" - operator: "Equal" - value: "value1" - effect: "NoSchedule" ----- - -** *To assign tolerations to a component that monitors user-defined projects*: -.. Edit the `user-workload-monitoring-config` `ConfigMap` object in the `openshift-user-workload-monitoring` project: -+ -[source,terminal] ----- -$ oc -n openshift-user-workload-monitoring edit configmap user-workload-monitoring-config ----- - -.. Specify `tolerations` for the component: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: user-workload-monitoring-config - namespace: openshift-user-workload-monitoring -data: - config.yaml: | - <component>: - tolerations: - <toleration_specification> ----- -+ -Substitute `<component>` and `<toleration_specification>` accordingly. -+ -For example, `oc adm taint nodes node1 key1=value1:NoSchedule` adds a taint to `node1` with the key `key1` and the value `value1`. This prevents monitoring components from deploying pods on `node1` unless a toleration is configured for that taint. The following example configures the `thanosRuler` component to tolerate the example taint: -+ -[source,yaml,subs=quotes] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: user-workload-monitoring-config - namespace: openshift-user-workload-monitoring -data: - config.yaml: | - thanosRuler: - tolerations: - - key: "key1" - operator: "Equal" - value: "value1" - effect: "NoSchedule" ----- - -. Save the file to apply the changes. The new component placement configuration is applied automatically. -+ -[NOTE] -==== -Configurations applied to the `user-workload-monitoring-config` `ConfigMap` object are not activated unless a cluster administrator has enabled monitoring for user-defined projects. -==== -+ -[WARNING] -==== -When changes are saved to a monitoring config map, the pods and other resources in the related project might be redeployed. The running monitoring processes in that project might also be restarted. -==== diff --git a/modules/monitoring-attaching-additional-labels-to-your-time-series-and-alerts.adoc b/modules/monitoring-attaching-additional-labels-to-your-time-series-and-alerts.adoc deleted file mode 100644 index 6d8ed830140c..000000000000 --- a/modules/monitoring-attaching-additional-labels-to-your-time-series-and-alerts.adoc +++ /dev/null @@ -1,135 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/configuring-the-monitoring-stack.adoc - -:_content-type: PROCEDURE -[id="attaching-additional-labels-to-your-time-series-and-alerts_{context}"] -= Attaching additional labels to your time series and alerts - -Using the external labels feature of Prometheus, you can attach custom labels to all time series and alerts leaving Prometheus. - -.Prerequisites - -* *If you are configuring core {product-title} monitoring components*: -** You have access to the cluster as a user with the `cluster-admin` role. -** You have created the `cluster-monitoring-config` `ConfigMap` object. -* *If you are configuring components that monitor user-defined projects*: -** You have access to the cluster as a user with the `cluster-admin` role, or as a user with the `user-workload-monitoring-config-edit` role in the `openshift-user-workload-monitoring` project. -** You have created the `user-workload-monitoring-config` `ConfigMap` object. -* You have installed the OpenShift CLI (`oc`). - -.Procedure - -. Edit the `ConfigMap` object: -** *To attach custom labels to all time series and alerts leaving the Prometheus instance that monitors core {product-title} projects*: -.. Edit the `cluster-monitoring-config` `ConfigMap` object in the `openshift-monitoring` project: -+ -[source,terminal] ----- -$ oc -n openshift-monitoring edit configmap cluster-monitoring-config ----- - -.. Define a map of labels you want to add for every metric under `data/config.yaml`: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: cluster-monitoring-config - namespace: openshift-monitoring -data: - config.yaml: | - prometheusK8s: - externalLabels: - <key>: <value> <1> ----- -+ -<1> Substitute `<key>: <value>` with a map of key-value pairs where `<key>` is a unique name for the new label and `<value>` is its value. -+ -[WARNING] -==== -Do not use `prometheus` or `prometheus_replica` as key names, because they are reserved and will be overwritten. -==== -+ -For example, to add metadata about the region and environment to all time series and alerts, use: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: cluster-monitoring-config - namespace: openshift-monitoring -data: - config.yaml: | - prometheusK8s: - externalLabels: - region: eu - environment: prod ----- - -** *To attach custom labels to all time series and alerts leaving the Prometheus instance that monitors user-defined projects*: -.. Edit the `user-workload-monitoring-config` `ConfigMap` object in the `openshift-user-workload-monitoring` project: -+ -[source,terminal] ----- -$ oc -n openshift-user-workload-monitoring edit configmap user-workload-monitoring-config ----- - -.. Define a map of labels you want to add for every metric under `data/config.yaml`: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: user-workload-monitoring-config - namespace: openshift-user-workload-monitoring -data: - config.yaml: | - prometheus: - externalLabels: - <key>: <value> <1> ----- -+ -<1> Substitute `<key>: <value>` with a map of key-value pairs where `<key>` is a unique name for the new label and `<value>` is its value. -+ -[WARNING] -==== -Do not use `prometheus` or `prometheus_replica` as key names, because they are reserved and will be overwritten. -==== -+ -[NOTE] -==== -In the `openshift-user-workload-monitoring` project, Prometheus handles metrics and Thanos Ruler handles alerting and recording rules. Setting `externalLabels` for `prometheus` in the `user-workload-monitoring-config` `ConfigMap` object will only configure external labels for metrics and not for any rules. -==== -+ -For example, to add metadata about the region and environment to all time series and alerts related to user-defined projects, use: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: user-workload-monitoring-config - namespace: openshift-user-workload-monitoring -data: - config.yaml: | - prometheus: - externalLabels: - region: eu - environment: prod ----- - -. Save the file to apply the changes. The new configuration is applied automatically. -+ -[NOTE] -==== -Configurations applied to the `user-workload-monitoring-config` `ConfigMap` object are not activated unless a cluster administrator has enabled monitoring for user-defined projects. -==== -+ -[WARNING] -==== -When changes are saved to a monitoring config map, the pods and other resources in the related project might be redeployed. The running monitoring processes in that project might also be restarted. -==== diff --git a/modules/monitoring-choosing-a-metrics-collection-profile.adoc b/modules/monitoring-choosing-a-metrics-collection-profile.adoc deleted file mode 100644 index 7ba65b9c70e6..000000000000 --- a/modules/monitoring-choosing-a-metrics-collection-profile.adoc +++ /dev/null @@ -1,67 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/configuring-the-monitoring-stack.adoc - -:_content-type: PROCEDURE -[id="choosing-a-metrics-collection-profile_{context}"] -= Choosing a metrics collection profile - -To choose a metrics collection profile for core {product-title} monitoring components, edit the `cluster-monitoring-config` `ConfigMap` object. - -.Prerequisites - -* You have installed the OpenShift CLI (`oc`). -* You have enabled Technology Preview features by using the `FeatureGate` custom resource (CR). -* You have created the `cluster-monitoring-config` `ConfigMap` object. -* You have access to the cluster as a user with the `cluster-admin` role. - -[WARNING] -==== -Saving changes to a monitoring config map might restart monitoring processes and redeploy the pods and other resources in the related project. -The running monitoring processes in that project might also restart. -==== - -.Procedure - -. Edit the `cluster-monitoring-config` `ConfigMap` object in the `openshift-monitoring` project: -+ -[source,terminal] ----- -$ oc -n openshift-monitoring edit configmap cluster-monitoring-config ----- - -. Add the metrics collection profile setting under `data/config.yaml/prometheusK8s`: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: cluster-monitoring-config - namespace: openshift-monitoring -data: - config.yaml: | - prometheusK8s: - collectionProfile: <metrics_collection_profile_name> <1> ----- -+ -<1> The name of the metrics collection profile. -The available values are `full` or `minimal`. -If you do not specify a value or if the `collectionProfile` key name does not exist in the config map, the default setting of `full` is used. -+ -The following example sets the metrics collection profile to `minimal` for the core platform instance of Prometheus: -+ -[source,yaml,subs=quotes] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: cluster-monitoring-config - namespace: openshift-monitoring -data: - config.yaml: | - prometheusK8s: - collectionProfile: *minimal* ----- - -. Save the file to apply the changes. The pods affected by the new configuration restart automatically. \ No newline at end of file diff --git a/modules/monitoring-common-terms.adoc b/modules/monitoring-common-terms.adoc deleted file mode 100644 index 6d2fba5dceb5..000000000000 --- a/modules/monitoring-common-terms.adoc +++ /dev/null @@ -1,90 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/monitoring-overview.adoc - -:_content-type: REFERENCE -[id="openshift-monitoring-common-terms_{context}"] -= Glossary of common terms for {product-title} monitoring - -This glossary defines common terms that are used in {product-title} architecture. - -Alertmanager:: -Alertmanager handles alerts received from Prometheus. Alertmanager is also responsible for sending the alerts to external notification systems. - -Alerting rules:: -Alerting rules contain a set of conditions that outline a particular state within a cluster. Alerts are triggered when those conditions are true. An alerting rule can be assigned a severity that defines how the alerts are routed. - -Cluster Monitoring Operator:: -The Cluster Monitoring Operator (CMO) is a central component of the monitoring stack. It deploys and manages Prometheus instances such as, the Thanos Querier, the Telemeter Client, and metrics targets to ensure that they are up to date. The CMO is deployed by the Cluster Version Operator (CVO). - -Cluster Version Operator:: -The Cluster Version Operator (CVO) manages the lifecycle of cluster Operators, many of which are installed in {product-title} by default. - -config map:: -A config map provides a way to inject configuration data into pods. You can reference the data stored in a config map in a volume of type `ConfigMap`. Applications running in a pod can use this data. - -Container:: -A container is a lightweight and executable image that includes software and all its dependencies. Containers virtualize the operating system. As a result, you can run containers anywhere from a data center to a public or private cloud as well as a developer’s laptop. - -custom resource (CR):: -A CR is an extension of the Kubernetes API. You can create custom resources. - -etcd:: -etcd is the key-value store for {product-title}, which stores the state of all resource objects. - -Fluentd:: -Fluentd gathers logs from nodes and feeds them to Elasticsearch. - -Kubelets:: -Runs on nodes and reads the container manifests. Ensures that the defined containers have started and are running. - -Kubernetes API server:: -Kubernetes API server validates and configures data for the API objects. - -Kubernetes controller manager:: -Kubernetes controller manager governs the state of the cluster. - -Kubernetes scheduler:: -Kubernetes scheduler allocates pods to nodes. - -labels:: -Labels are key-value pairs that you can use to organize and select subsets of objects such as a pod. - -node:: -A worker machine in the {product-title} cluster. A node is either a virtual machine (VM) or a physical machine. - -Operator:: -The preferred method of packaging, deploying, and managing a Kubernetes application in an {product-title} cluster. An Operator takes human operational knowledge and encodes it into software that is packaged and shared with customers. - -Operator Lifecycle Manager (OLM):: -OLM helps you install, update, and manage the lifecycle of Kubernetes native applications. OLM is an open source toolkit designed to manage Operators in an effective, automated, and scalable way. - -Persistent storage:: -Stores the data even after the device is shut down. Kubernetes uses persistent volumes to store the application data. - -Persistent volume claim (PVC):: -You can use a PVC to mount a PersistentVolume into a Pod. You can access the storage without knowing the details of the cloud environment. - -pod:: -The pod is the smallest logical unit in Kubernetes. A pod is comprised of one or more containers to run in a worker node. - -Prometheus:: -Prometheus is the monitoring system on which the {product-title} monitoring stack is based. Prometheus is a time-series database and a rule evaluation engine for metrics. Prometheus sends alerts to Alertmanager for processing. - -Prometheus adapter:: -The Prometheus Adapter translates Kubernetes node and pod queries for use in Prometheus. The resource metrics that are translated include CPU and memory utilization. The Prometheus Adapter exposes the cluster resource metrics API for horizontal pod autoscaling. - -Prometheus Operator:: -The Prometheus Operator (PO) in the `openshift-monitoring` project creates, configures, and manages platform Prometheus and Alertmanager instances. It also automatically generates monitoring target configurations based on Kubernetes label queries. - -Silences:: -A silence can be applied to an alert to prevent notifications from being sent when the conditions for an alert are true. You can mute an alert after the initial notification, while you work on resolving the underlying issue. - -storage:: -{product-title} supports many types of storage, both for on-premise and cloud providers. You can manage container storage for persistent and non-persistent data in an {product-title} cluster. - -Thanos Ruler:: -The Thanos Ruler is a rule evaluation engine for Prometheus that is deployed as a separate process. In {product-title}, Thanos Ruler provides rule and alerting evaluation for the monitoring of user-defined projects. - -web console:: -A user interface (UI) to manage {product-title}. diff --git a/modules/monitoring-components-for-monitoring-user-defined-projects.adoc b/modules/monitoring-components-for-monitoring-user-defined-projects.adoc deleted file mode 100644 index 2e4e21174d52..000000000000 --- a/modules/monitoring-components-for-monitoring-user-defined-projects.adoc +++ /dev/null @@ -1,36 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/monitoring-overview.adoc - -:_content-type: REFERENCE -[id="components-for-monitoring-user-defined-projects_{context}"] -= Components for monitoring user-defined projects - -{product-title} {product-version} includes an optional enhancement to the monitoring stack that enables you to monitor services and pods in user-defined projects. This feature includes the following components: - -.Components for monitoring user-defined projects -[options="header"] -|=== - -|Component|Description - -|Prometheus Operator -|The Prometheus Operator (PO) in the `openshift-user-workload-monitoring` project creates, configures, and manages Prometheus and Thanos Ruler instances in the same project. - -|Prometheus -|Prometheus is the monitoring system through which monitoring is provided for user-defined projects. Prometheus sends alerts to Alertmanager for processing. - -|Thanos Ruler -|The Thanos Ruler is a rule evaluation engine for Prometheus that is deployed as a separate process. In {product-title} {product-version}, Thanos Ruler provides rule and alerting evaluation for the monitoring of user-defined projects. - -|Alertmanager -|The Alertmanager service handles alerts received from Prometheus and Thanos Ruler. Alertmanager is also responsible for sending user-defined alerts to external notification systems. Deploying this service is optional. - -|=== - -[NOTE] -==== -The components in the preceding table are deployed after monitoring is enabled for user-defined projects. -==== - -All of the components in the monitoring stack are monitored by the stack and are automatically updated when {product-title} is updated. diff --git a/modules/monitoring-configurable-monitoring-components.adoc b/modules/monitoring-configurable-monitoring-components.adoc deleted file mode 100644 index 94a44004b692..000000000000 --- a/modules/monitoring-configurable-monitoring-components.adoc +++ /dev/null @@ -1,28 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/configuring-the-monitoring-stack.adoc - -[id="configurable-monitoring-components_{context}"] -= Configurable monitoring components - -This table shows the monitoring components you can configure and the keys used to specify the components in the `cluster-monitoring-config` and `user-workload-monitoring-config` `ConfigMap` objects: - -.Configurable monitoring components -[options="header"] -|==== -|Component |cluster-monitoring-config config map key |user-workload-monitoring-config config map key -|Prometheus Operator |`prometheusOperator` |`prometheusOperator` -|Prometheus |`prometheusK8s` |`prometheus` -|Alertmanager |`alertmanagerMain` | `alertmanager` -|kube-state-metrics |`kubeStateMetrics` | -|openshift-state-metrics |`openshiftStateMetrics` | -|Telemeter Client |`telemeterClient` | -|Prometheus Adapter |`k8sPrometheusAdapter` | -|Thanos Querier |`thanosQuerier` | -|Thanos Ruler | |`thanosRuler` -|==== - -[NOTE] -==== -The Prometheus key is called `prometheusK8s` in the `cluster-monitoring-config` `ConfigMap` object and `prometheus` in the `user-workload-monitoring-config` `ConfigMap` object. -==== diff --git a/modules/monitoring-configuring-a-local-persistent-volume-claim.adoc b/modules/monitoring-configuring-a-local-persistent-volume-claim.adoc deleted file mode 100644 index ff10904d8d84..000000000000 --- a/modules/monitoring-configuring-a-local-persistent-volume-claim.adoc +++ /dev/null @@ -1,183 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/configuring-the-monitoring-stack.adoc - -:_content-type: PROCEDURE -[id="configuring-a-local-persistent-volume-claim_{context}"] -= Configuring a local persistent volume claim - -For monitoring components to use a persistent volume (PV), you must configure a persistent volume claim (PVC). - -.Prerequisites - -* *If you are configuring core {product-title} monitoring components*: -** You have access to the cluster as a user with the `cluster-admin` role. -** You have created the `cluster-monitoring-config` `ConfigMap` object. -* *If you are configuring components that monitor user-defined projects*: -** You have access to the cluster as a user with the `cluster-admin` role, or as a user with the `user-workload-monitoring-config-edit` role in the `openshift-user-workload-monitoring` project. -** You have created the `user-workload-monitoring-config` `ConfigMap` object. -* You have installed the OpenShift CLI (`oc`). - -.Procedure - -. Edit the `ConfigMap` object: -** *To configure a PVC for a component that monitors core {product-title} projects*: -.. Edit the `cluster-monitoring-config` `ConfigMap` object in the `openshift-monitoring` project: -+ -[source,terminal] ----- -$ oc -n openshift-monitoring edit configmap cluster-monitoring-config ----- - -.. Add your PVC configuration for the component under `data/config.yaml`: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: cluster-monitoring-config - namespace: openshift-monitoring -data: - config.yaml: | - <component>: - volumeClaimTemplate: - spec: - storageClassName: <storage_class> - resources: - requests: - storage: <amount_of_storage> ----- -+ -See the link:https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims[Kubernetes documentation on PersistentVolumeClaims] for information on how to specify `volumeClaimTemplate`. -+ -The following example configures a PVC that claims local persistent storage for the Prometheus instance that monitors core {product-title} components: -+ -[source,yaml,subs=quotes] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: cluster-monitoring-config - namespace: openshift-monitoring -data: - config.yaml: | - *prometheusK8s*: - volumeClaimTemplate: - spec: - storageClassName: *local-storage* - resources: - requests: - storage: *40Gi* ----- -+ -In the above example, the storage class created by the Local Storage Operator is called `local-storage`. -+ -The following example configures a PVC that claims local persistent storage for Alertmanager: -+ -[source,yaml,subs=quotes] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: cluster-monitoring-config - namespace: openshift-monitoring -data: - config.yaml: | - *alertmanagerMain*: - volumeClaimTemplate: - spec: - storageClassName: *local-storage* - resources: - requests: - storage: *10Gi* ----- - -** *To configure a PVC for a component that monitors user-defined projects*: -.. Edit the `user-workload-monitoring-config` `ConfigMap` object in the `openshift-user-workload-monitoring` project: -+ -[source,terminal] ----- -$ oc -n openshift-user-workload-monitoring edit configmap user-workload-monitoring-config ----- - -.. Add your PVC configuration for the component under `data/config.yaml`: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: user-workload-monitoring-config - namespace: openshift-user-workload-monitoring -data: - config.yaml: | - <component>: - volumeClaimTemplate: - spec: - storageClassName: <storage_class> - resources: - requests: - storage: <amount_of_storage> ----- -+ -See the link:https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims[Kubernetes documentation on PersistentVolumeClaims] for information on how to specify `volumeClaimTemplate`. -+ -The following example configures a PVC that claims local persistent storage for the Prometheus instance that monitors user-defined projects: -+ -[source,yaml,subs=quotes] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: user-workload-monitoring-config - namespace: openshift-user-workload-monitoring -data: - config.yaml: | - *prometheus*: - volumeClaimTemplate: - spec: - storageClassName: *local-storage* - resources: - requests: - storage: *40Gi* ----- -+ -In the above example, the storage class created by the Local Storage Operator is called `local-storage`. -+ -The following example configures a PVC that claims local persistent storage for Thanos Ruler: -+ -[source,yaml,subs=quotes] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: user-workload-monitoring-config - namespace: openshift-user-workload-monitoring -data: - config.yaml: | - *thanosRuler*: - volumeClaimTemplate: - spec: - storageClassName: *local-storage* - resources: - requests: - storage: *10Gi* ----- -+ -[NOTE] -==== -Storage requirements for the `thanosRuler` component depend on the number of rules that are evaluated and how many samples each rule generates. -==== - -. Save the file to apply the changes. The pods affected by the new configuration are restarted automatically and the new storage configuration is applied. -+ -[NOTE] -==== -Configurations applied to the `user-workload-monitoring-config` `ConfigMap` object are not activated unless a cluster administrator has enabled monitoring for user-defined projects. -==== -+ -[WARNING] -==== -When changes are saved to a monitoring config map, the pods and other resources in the related project might be redeployed. The running monitoring processes in that project might also be restarted. -==== diff --git a/modules/monitoring-configuring-alert-receivers.adoc b/modules/monitoring-configuring-alert-receivers.adoc deleted file mode 100644 index 0fa3e5052af5..000000000000 --- a/modules/monitoring-configuring-alert-receivers.adoc +++ /dev/null @@ -1,70 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/managing-alerts.adoc -// * post_installation_configuration/configuring-alert-notifications.adoc - -:_content-type: PROCEDURE -[id="configuring-alert-receivers_{context}"] -= Configuring alert receivers - -You can configure alert receivers to ensure that you learn about important issues with your cluster. - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. - -.Procedure - -. In the *Administrator* perspective, navigate to *Administration* -> *Cluster Settings* -> *Configuration* -> *Alertmanager*. -+ -[NOTE] -==== -Alternatively, you can navigate to the same page through the notification drawer. Select the bell icon at the top right of the {product-title} web console and choose *Configure* in the *AlertmanagerReceiverNotConfigured* alert. -==== - -. Select *Create Receiver* in the *Receivers* section of the page. - -. In the *Create Receiver* form, add a *Receiver Name* and choose a *Receiver Type* from the list. - -. Edit the receiver configuration: -+ -* For PagerDuty receivers: -+ -.. Choose an integration type and add a PagerDuty integration key. -+ -.. Add the URL of your PagerDuty installation. -+ -.. Select *Show advanced configuration* if you want to edit the client and incident details or the severity specification. -+ -* For webhook receivers: -+ -.. Add the endpoint to send HTTP POST requests to. -+ -.. Select *Show advanced configuration* if you want to edit the default option to send resolved alerts to the receiver. -+ -* For email receivers: -+ -.. Add the email address to send notifications to. -+ -.. Add SMTP configuration details, including the address to send notifications from, the smarthost and port number used for sending emails, the hostname of the SMTP server, and authentication details. -+ -.. Choose whether TLS is required. -+ -.. Select *Show advanced configuration* if you want to edit the default option not to send resolved alerts to the receiver or edit the body of email notifications configuration. -+ -* For Slack receivers: -+ -.. Add the URL of the Slack webhook. -+ -.. Add the Slack channel or user name to send notifications to. -+ -.. Select *Show advanced configuration* if you want to edit the default option not to send resolved alerts to the receiver or edit the icon and username configuration. You can also choose whether to find and link channel names and usernames. - -. By default, firing alerts with labels that match all of the selectors will be sent to the receiver. If you want label values for firing alerts to be matched exactly before they are sent to the receiver: -.. Add routing label names and values in the *Routing Labels* section of the form. -+ -.. Select *Regular Expression* if want to use a regular expression. -+ -.. Select *Add Label* to add further routing labels. - -. Select *Create* to create the receiver. diff --git a/modules/monitoring-configuring-cluster-for-application-monitoring.adoc b/modules/monitoring-configuring-cluster-for-application-monitoring.adoc deleted file mode 100644 index 6b349617193c..000000000000 --- a/modules/monitoring-configuring-cluster-for-application-monitoring.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/application-monitoring.adoc - -:_content-type: PROCEDURE -[id="configuring-cluster-for-application-monitoring_{context}"] -= Configuring cluster for application monitoring - -Before application developers can monitor their applications, the human operator of the cluster needs to configure the cluster accordingly. This procedure shows how to. - -.Prerequisites - -* You must log in as a user that belongs to a role with administrative privileges for the cluster. - -.Procedure - -. In the {product-title} web console, navigate to the *Operators* -> *OperatorHub* page and install the Prometheus Operator in the namespace where your application is. - -. Navigate to the *Operators* -> *Installed Operators* page and install Prometheus, Alertmanager, Prometheus Rule, and Service Monitor in the same namespace. diff --git a/modules/monitoring-configuring-external-alertmanagers.adoc b/modules/monitoring-configuring-external-alertmanagers.adoc deleted file mode 100644 index 9a2d9e0ebcdc..000000000000 --- a/modules/monitoring-configuring-external-alertmanagers.adoc +++ /dev/null @@ -1,164 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/configuring-the-monitoring-stack.adoc - -:_content-type: PROCEDURE -[id="monitoring-configuring-external-alertmanagers_{context}"] -= Configuring external Alertmanager instances - -The {product-title} monitoring stack includes a local Alertmanager instance that routes alerts from Prometheus. -You can add external Alertmanager instances by configuring the `cluster-monitoring-config` config map in either the `openshift-monitoring` project or the `user-workload-monitoring-config` project. - -If you add the same external Alertmanager configuration for multiple clusters and disable the local instance for each cluster, you can then manage alert routing for multiple clusters by using a single external Alertmanager instance. - -.Prerequisites - -* You have installed the OpenShift CLI (`oc`). -* *If you are configuring core {product-title} monitoring components in the `openshift-monitoring` project*: -** You have access to the cluster as a user with the `cluster-admin` role. -** You have created the `cluster-monitoring-config` config map. -* *If you are configuring components that monitor user-defined projects*: -** You have access to the cluster as a user with the `cluster-admin` role, or as a user with the `user-workload-monitoring-config-edit` role in the `openshift-user-workload-monitoring` project. -** You have created the `user-workload-monitoring-config` config map. - -.Procedure - -. Edit the `ConfigMap` object. -** *To configure additional Alertmanagers for routing alerts from core {product-title} projects*: -.. Edit the `cluster-monitoring-config` config map in the `openshift-monitoring` project: -+ -[source,terminal] ----- -$ oc -n openshift-monitoring edit configmap cluster-monitoring-config ----- - -.. Add an `additionalAlertmanagerConfigs:` section under `data/config.yaml/prometheusK8s`. - -.. Add the configuration details for additional Alertmanagers in this section: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: cluster-monitoring-config - namespace: openshift-monitoring -data: - config.yaml: | - prometheusK8s: - additionalAlertmanagerConfigs: - - <alertmanager_specification> ----- -+ -For `<alertmanager_specification>`, substitute authentication and other configuration details for additional Alertmanager instances. -Currently supported authentication methods are bearer token (`bearerToken`) and client TLS (`tlsConfig`). -The following sample config map configures an additional Alertmanager using a bearer token with client TLS authentication: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: cluster-monitoring-config - namespace: openshift-monitoring -data: - config.yaml: | - prometheusK8s: - additionalAlertmanagerConfigs: - - scheme: https - pathPrefix: / - timeout: "30s" - apiVersion: v1 - bearerToken: - name: alertmanager-bearer-token - key: token - tlsConfig: - key: - name: alertmanager-tls - key: tls.key - cert: - name: alertmanager-tls - key: tls.crt - ca: - name: alertmanager-tls - key: tls.ca - staticConfigs: - - external-alertmanager1-remote.com - - external-alertmanager1-remote2.com ----- - -** *To configure additional Alertmanager instances for routing alerts from user-defined projects*: - -.. Edit the `user-workload-monitoring-config` config map in the `openshift-user-workload-monitoring` project: -+ -[source,terminal] ----- -$ oc -n openshift-user-workload-monitoring edit configmap user-workload-monitoring-config ----- - -.. Add a `<component>/additionalAlertmanagerConfigs:` section under `data/config.yaml/`. - -.. Add the configuration details for additional Alertmanagers in this section: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: user-workload-monitoring-config - namespace: openshift-user-workload-monitoring -data: - config.yaml: | - <component>: - additionalAlertmanagerConfigs: - - <alertmanager_specification> ----- -+ -For `<component>`, substitute one of two supported external Alertmanager components: `prometheus` or `thanosRuler`. -+ -For `<alertmanager_specification>`, substitute authentication and other configuration details for additional Alertmanager instances. -Currently supported authentication methods are bearer token (`bearerToken`) and client TLS (`tlsConfig`). -The following sample config map configures an additional Alertmanager using Thanos Ruler with a bearer token and client TLS authentication: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: user-workload-monitoring-config - namespace: openshift-user-workload-monitoring -data: - config.yaml: | - thanosRuler: - additionalAlertmanagerConfigs: - - scheme: https - pathPrefix: / - timeout: "30s" - apiVersion: v1 - bearerToken: - name: alertmanager-bearer-token - key: token - tlsConfig: - key: - name: alertmanager-tls - key: tls.key - cert: - name: alertmanager-tls - key: tls.crt - ca: - name: alertmanager-tls - key: tls.ca - staticConfigs: - - external-alertmanager1-remote.com - - external-alertmanager1-remote2.com ----- -+ -[NOTE] -==== -Configurations applied to the `user-workload-monitoring-config` `ConfigMap` object are not activated unless a cluster administrator has enabled monitoring for user-defined projects. -==== - -. Save the file to apply the changes to the `ConfigMap` object. -The new component placement configuration is applied automatically. - - diff --git a/modules/monitoring-configuring-metrics-collection-profiles.adoc b/modules/monitoring-configuring-metrics-collection-profiles.adoc deleted file mode 100644 index 9d3e0597c1c4..000000000000 --- a/modules/monitoring-configuring-metrics-collection-profiles.adoc +++ /dev/null @@ -1,35 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/configuring-the-monitoring-stack.adoc - -:_content-type: CONCEPT -[id="configuring-metrics-collection-profiles_{context}"] -= Configuring metrics collection profiles - -[IMPORTANT] -==== -[subs="attributes+"] -Using a metrics collection profile is a Technology Preview feature only. Technology Preview features are not supported with Red Hat production service level agreements (SLAs) and might not be functionally complete. -Red Hat does not recommend using them in production. -These features provide early access to upcoming product features, enabling customers to test functionality and provide feedback during the development process. - -For more information about the support scope of Red Hat Technology Preview features, see link:https://access.redhat.com/support/offerings/techpreview[https://access.redhat.com/support/offerings/techpreview]. -==== - -By default, Prometheus collects metrics exposed by all default metrics targets in {product-title} components. -However, you might want Prometheus to collect fewer metrics from a cluster in certain scenarios: - -* If cluster administrators require only alert, telemetry, and console metrics and do not require other metrics to be available. -* If a cluster increases in size, and the increased size of the default metrics data collected now requires a significant increase in CPU and memory resources. - -You can use a metrics collection profile to collect either the default amount of metrics data or a minimal amount of metrics data. -When you collect minimal metrics data, basic monitoring features such as alerting continue to work. -At the same time, the CPU and memory resources required by Prometheus decrease. - -[id="about-metrics-collection-profiles_{context}"] -== About metrics collection profiles - -You can enable one of two metrics collection profiles: - -* *full*: Prometheus collects metrics data exposed by all platform components. This setting is the default. -* *minimal*: Prometheus collects only the metrics data required for platform alerts, recording rules, telemetry, and console dashboards. diff --git a/modules/monitoring-configuring-monitoring-for-an-application.adoc b/modules/monitoring-configuring-monitoring-for-an-application.adoc deleted file mode 100644 index 50bd5bf07256..000000000000 --- a/modules/monitoring-configuring-monitoring-for-an-application.adoc +++ /dev/null @@ -1,100 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/application-monitoring.adoc - -:_content-type: PROCEDURE -[id="configuring-monitoring-for-an-application_{context}"] -= Configuring monitoring for an application - -This procedure shows, on an example, how an application developer can deploy an application and configure monitoring for it. - -.Prerequisites - -* Make sure you configured the cluster for application monitoring. In this example, it is presumed that Prometheus and Alertmanager instances were installed in the `default` project. - -.Procedure - -. Create a YAML file for your configuration. In this example, it is called `deploy.yaml`. - -. Add configuration for deploying a sample application: -+ -[source,yaml] ----- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: example-app - namespace: default -spec: - selector: - matchLabels: - app: example-app - replicas: 1 - template: - metadata: - labels: - app: example-app - spec: - containers: - - name: example-app - image: ghcr.io/rhobs/prometheus-example-app:0.3.0 - ports: - - name: web - containerPort: 8080 ---- ----- - -. Add configuration for exposing the sample application as a service: -+ -[source,yaml] ----- -kind: Service -apiVersion: v1 -metadata: - name: example-app - namespace: default - labels: - tier: frontend -spec: - selector: - app: example-app - ports: - - name: web - port: 8080 ---- ----- - -. Add configuration for creating a service monitor for the sample application. This will add your application as a target for monitoring: -+ -[source,yaml] ----- -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - name: example-app - namespace: default - labels: - k8s-app: example-app -spec: - selector: - matchLabels: - tier: frontend - endpoints: - - port: web ----- - -. Apply the configuration file to the cluster: -+ -[source,terminal] ----- -$ oc apply -f deploy.yaml ----- - -. Forward a port to the Prometheus UI. In this example, port 9090 is used: -+ -[source,terminal] ----- -$ oc port-forward -n openshift-user-workload-monitoring svc/prometheus-operated 9090 ----- - -. Navigate to the Prometheus UI at http://localhost:9090/targets to see the sample application being monitored. diff --git a/modules/monitoring-configuring-persistent-storage.adoc b/modules/monitoring-configuring-persistent-storage.adoc deleted file mode 100644 index 4fbe4e21a0f7..000000000000 --- a/modules/monitoring-configuring-persistent-storage.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/configuring-the-monitoring-stack.adoc - -:_content-type: CONCEPT -[id="configuring_persistent_storage_{context}"] -= Configuring persistent storage - -Running cluster monitoring with persistent storage means that your metrics are stored to a persistent volume (PV) and can survive a pod being restarted or recreated. This is ideal if you require your metrics or alerting data to be guarded from data loss. For production environments, it is highly recommended to configure persistent storage. Because of the high IO demands, it is advantageous to use local storage. - -[id="persistent-storage-prerequisites"] -== Persistent storage prerequisites - -* Dedicate sufficient local persistent storage to ensure that the disk does not become full. How much storage you need depends on the number of pods. - -* Verify that you have a persistent volume (PV) ready to be claimed by the persistent volume claim (PVC), one PV for each replica. Because Prometheus and Alertmanager both have two replicas, you need four PVs to support the entire monitoring stack. The PVs are available from the Local Storage Operator, but not if you have enabled dynamically provisioned storage. - -* Use `Filesystem` as the storage type value for the `volumeMode` parameter when you configure the persistent volume. -+ -[NOTE] -==== -If you use a local volume for persistent storage, do not use a raw block volume, which is described with `volumeMode: Block` in the `LocalVolume` object. Prometheus cannot use raw block volumes. -==== -+ -[IMPORTANT] -==== -Prometheus does not support file systems that are not POSIX compliant. -For example, some NFS file system implementations are not POSIX compliant. -If you want to use an NFS file system for storage, verify with the vendor that their NFS implementation is fully POSIX compliant. -==== \ No newline at end of file diff --git a/modules/monitoring-configuring-pod-topology-spread-constraints-for-monitoring.adoc b/modules/monitoring-configuring-pod-topology-spread-constraints-for-monitoring.adoc deleted file mode 100644 index d917cfa5020b..000000000000 --- a/modules/monitoring-configuring-pod-topology-spread-constraints-for-monitoring.adoc +++ /dev/null @@ -1,12 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/configuring-the-monitoring-stack.adoc - -:_content-type: CONCEPT -[id="configuring_pod_topology_spread_constraintsfor_monitoring_{context}"] -= Configuring pod topology spread constraints for monitoring - -You can use pod topology spread constraints to control how Prometheus, Thanos Ruler, and Alertmanager pods are spread across a network topology when {product-title} pods are deployed in multiple availability zones. - -Pod topology spread constraints are suitable for controlling pod scheduling within hierarchical topologies in which nodes are spread across different infrastructure levels, such as regions and zones within those regions. -Additionally, by being able to schedule pods in different zones, you can improve network latency in certain scenarios. \ No newline at end of file diff --git a/modules/monitoring-configuring-remote-write-storage.adoc b/modules/monitoring-configuring-remote-write-storage.adoc deleted file mode 100644 index e0960a3843da..000000000000 --- a/modules/monitoring-configuring-remote-write-storage.adoc +++ /dev/null @@ -1,129 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/configuring-the-monitoring-stack.adoc - -:_content-type: PROCEDURE -[id="configuring_remote_write_storage_{context}"] -= Configuring remote write storage - -[role="_abstract"] -You can configure remote write storage to enable Prometheus to send ingested metrics to remote systems for long-term storage. -Doing so has no impact on how or for how long Prometheus stores metrics. - -.Prerequisites - -* *If you are configuring core {product-title} monitoring components:* -** You have access to the cluster as a user with the `cluster-admin` role. -** You have created the `cluster-monitoring-config` `ConfigMap` object. -* *If you are configuring components that monitor user-defined projects:* -** You have access to the cluster as a user with the `cluster-admin` role or as a user with the `user-workload-monitoring-config-edit` role in the `openshift-user-workload-monitoring` project. -** You have created the `user-workload-monitoring-config` `ConfigMap` object. -* You have installed the OpenShift CLI (`oc`). -* You have set up a remote write compatible endpoint (such as Thanos) and know the endpoint URL. -See the link:https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage[Prometheus remote endpoints and storage documentation] for information about endpoints that are compatible with the remote write feature. -* You have set up authentication credentials in a `Secret` object for the remote write endpoint. -You must create the secret in the same namespace as the Prometheus object for which you configure remote write: the `openshift-monitoring` namespace for default platform monitoring or the `openshift-user-workload-monitoring` namespace for user workload monitoring. - -+ -[CAUTION] -==== -To reduce security risks, use HTTPS and authentication to send metrics to an endpoint. -==== - -.Procedure - -Follow these steps to configure remote write for default platform monitoring in the `cluster-monitoring-config` config map in the `openshift-monitoring` namespace. - -[NOTE] -==== -If you configure remote write for the Prometheus instance that monitors user-defined projects, make similar edits to the `user-workload-monitoring-config` config map in the `openshift-user-workload-monitoring` namespace. -Note that the Prometheus config map component is called `prometheus` in the `user-workload-monitoring-config` `ConfigMap` object and not `prometheusK8s`, as it is in the `cluster-monitoring-config` `ConfigMap` object. -==== - -. Edit the `cluster-monitoring-config` `ConfigMap` object in the `openshift-monitoring` project: -+ -[source,terminal] ----- -$ oc -n openshift-monitoring edit configmap cluster-monitoring-config ----- - -. Add a `remoteWrite:` section under `data/config.yaml/prometheusK8s`. - -. Add an endpoint URL and authentication credentials in this section: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: cluster-monitoring-config - namespace: openshift-monitoring -data: - config.yaml: | - prometheusK8s: - remoteWrite: - - url: "https://remote-write-endpoint.example.com" <1> - <endpoint_authentication_credentials> <2> ----- -+ -<1> The URL of the remote write endpoint. -<2> The authentication method and credentials for the endpoint. -Currently supported authentication methods are AWS Signature Version 4, authentication using HTTP in an `Authorization` request header, Basic authentication, OAuth 2.0, and TLS client. -See _Supported remote write authentication settings_ for sample configurations of supported authentication methods. - -. Add write relabel configuration values after the authentication credentials: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: cluster-monitoring-config - namespace: openshift-monitoring -data: - config.yaml: | - prometheusK8s: - remoteWrite: - - url: "https://remote-write-endpoint.example.com" - <endpoint_authentication_credentials> - <write_relabel_configs> <1> ----- -<1> The write relabel configuration settings. -+ -For `<write_relabel_configs>` substitute a list of write relabel configurations for metrics that you want to send to the remote endpoint. -+ -The following sample shows how to forward a single metric called `my_metric`: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: cluster-monitoring-config - namespace: openshift-monitoring -data: - config.yaml: | - prometheusK8s: - remoteWrite: - - url: "https://remote-write-endpoint.example.com" - writeRelabelConfigs: - - sourceLabels: [__name__] - regex: 'my_metric' - action: keep - ----- -+ -See the link:https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config[Prometheus relabel_config documentation] for information about write relabel configuration options. - -. Save the file to apply the changes to the `ConfigMap` object. -The pods affected by the new configuration restart automatically. -+ -[NOTE] -==== -Configurations applied to the `user-workload-monitoring-config` `ConfigMap` object are not activated unless a cluster administrator has enabled monitoring for user-defined projects. -==== -+ -[WARNING] -==== -Saving changes to a monitoring `ConfigMap` object might redeploy the pods and other resources in the related project. Saving changes might also restart the running monitoring processes in that project. -==== diff --git a/modules/monitoring-configuring-secrets-for-alertmanager.adoc b/modules/monitoring-configuring-secrets-for-alertmanager.adoc deleted file mode 100644 index c0100a3dc852..000000000000 --- a/modules/monitoring-configuring-secrets-for-alertmanager.adoc +++ /dev/null @@ -1,14 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/configuring-the-monitoring-stack.adoc - -:_content-type: CONCEPT -[id="monitoring-configuring-secrets-for-alertmanager_{context}"] -= Configuring secrets for Alertmanager - -The {product-title} monitoring stack includes Alertmanager, which routes alerts from Prometheus to endpoint receivers. -If you need to authenticate with a receiver so that Alertmanager can send alerts to it, you can configure Alertmanager to use a secret that contains authentication credentials for the receiver. - -For example, you can configure Alertmanager to use a secret to authenticate with an endpoint receiver that requires a certificate issued by a private Certificate Authority (CA). -You can also configure Alertmanager to use a secret to authenticate with a receiver that requires a password file for Basic HTTP authentication. -In either case, authentication details are contained in the `Secret` object rather than in the `ConfigMap` object. diff --git a/modules/monitoring-configuring-the-monitoring-stack.adoc b/modules/monitoring-configuring-the-monitoring-stack.adoc deleted file mode 100644 index 17cd79436b99..000000000000 --- a/modules/monitoring-configuring-the-monitoring-stack.adoc +++ /dev/null @@ -1,134 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/configuring-the-monitoring-stack.adoc - -:_content-type: PROCEDURE -[id="configuring-the-monitoring-stack_{context}"] -= Configuring the monitoring stack - -In {product-title} {product-version}, you can configure the monitoring stack using the `cluster-monitoring-config` or `user-workload-monitoring-config` `ConfigMap` objects. Config maps configure the Cluster Monitoring Operator (CMO), which in turn configures the components of the stack. - -.Prerequisites - -* *If you are configuring core {product-title} monitoring components*: -** You have access to the cluster as a user with the `cluster-admin` role. -** You have created the `cluster-monitoring-config` `ConfigMap` object. -* *If you are configuring components that monitor user-defined projects*: -** You have access to the cluster as a user with the `cluster-admin` role, or as a user with the `user-workload-monitoring-config-edit` role in the `openshift-user-workload-monitoring` project. -** You have created the `user-workload-monitoring-config` `ConfigMap` object. -* You have installed the OpenShift CLI (`oc`). - -.Procedure - -. Edit the `ConfigMap` object. -** *To configure core {product-title} monitoring components*: -.. Edit the `cluster-monitoring-config` `ConfigMap` object in the `openshift-monitoring` project: -+ -[source,terminal] ----- -$ oc -n openshift-monitoring edit configmap cluster-monitoring-config ----- - -.. Add your configuration under `data/config.yaml` as a key-value pair `<component_name>:{nbsp}<component_configuration>`: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: cluster-monitoring-config - namespace: openshift-monitoring -data: - config.yaml: | - <component>: - <configuration_for_the_component> ----- -+ -Substitute `<component>` and `<configuration_for_the_component>` accordingly. -+ -The following example `ConfigMap` object configures a persistent volume claim (PVC) for Prometheus. This relates to the Prometheus instance that monitors core {product-title} components only: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: cluster-monitoring-config - namespace: openshift-monitoring -data: - config.yaml: | - prometheusK8s: <1> - volumeClaimTemplate: - spec: - storageClassName: fast - volumeMode: Filesystem - resources: - requests: - storage: 40Gi ----- -<1> Defines the Prometheus component and the subsequent lines define its configuration. - -** *To configure components that monitor user-defined projects*: -.. Edit the `user-workload-monitoring-config` `ConfigMap` object in the `openshift-user-workload-monitoring` project: -+ -[source,terminal] ----- -$ oc -n openshift-user-workload-monitoring edit configmap user-workload-monitoring-config ----- - -.. Add your configuration under `data/config.yaml` as a key-value pair `<component_name>:{nbsp}<component_configuration>`: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: user-workload-monitoring-config - namespace: openshift-user-workload-monitoring -data: - config.yaml: | - <component>: - <configuration_for_the_component> ----- -+ -Substitute `<component>` and `<configuration_for_the_component>` accordingly. -+ -The following example `ConfigMap` object configures a data retention period and minimum container resource requests for Prometheus. This relates to the Prometheus instance that monitors user-defined projects only: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: user-workload-monitoring-config - namespace: openshift-user-workload-monitoring -data: - config.yaml: | - prometheus: <1> - retention: 24h <2> - resources: - requests: - cpu: 200m <3> - memory: 2Gi <4> ----- -<1> Defines the Prometheus component and the subsequent lines define its configuration. -<2> Configures a twenty-four hour data retention period for the Prometheus instance that monitors user-defined projects. -<3> Defines a minimum resource request of 200 millicores for the Prometheus container. -<4> Defines a minimum pod resource request of 2 GiB of memory for the Prometheus container. -+ -[NOTE] -==== -The Prometheus config map component is called `prometheusK8s` in the `cluster-monitoring-config` `ConfigMap` object and `prometheus` in the `user-workload-monitoring-config` `ConfigMap` object. -==== - -. Save the file to apply the changes to the `ConfigMap` object. The pods affected by the new configuration are restarted automatically. -+ -[NOTE] -==== -Configurations applied to the `user-workload-monitoring-config` `ConfigMap` object are not activated unless a cluster administrator has enabled monitoring for user-defined projects. -==== -+ -[WARNING] -==== -When changes are saved to a monitoring config map, the pods and other resources in the related project might be redeployed. The running monitoring processes in that project might also be restarted. -==== diff --git a/modules/monitoring-contents-of-the-metrics-ui.adoc b/modules/monitoring-contents-of-the-metrics-ui.adoc deleted file mode 100644 index b8a838b7a5f5..000000000000 --- a/modules/monitoring-contents-of-the-metrics-ui.adoc +++ /dev/null @@ -1,35 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/managing-metrics.adoc - -[id="contents-of-the-metrics-ui_{context}"] -= Contents of the Metrics UI - -This section shows and explains the contents of the Metrics UI, a web interface to Prometheus. - -The *Metrics* page is accessible by clicking *Observe* -> *Metrics* in the {product-title} web console. - -image::monitoring-metrics-screen.png[] - -. Actions. -* Add query. -* Expand or collapse all query tables. -* Delete all queries. -. Hide the plot. -. The interactive plot. -. The catalog of available metrics. -. Add query. -. Run queries. -. Query forms. -. Expand or collapse the form. -. The query. -. Clear query. -. Enable or disable query. -. Actions for a specific query. -* Enable or disable query. -* Show or hide all series of the query from the plot. -* Delete query. -. The metrics table for a query. -. Color assigned to the graph of the metric. Selecting the square shows or hides the graph. - -Additionally, there is a link to the old Prometheus interface next to the title of the page. diff --git a/modules/monitoring-creating-a-role-for-setting-up-metrics-collection.adoc b/modules/monitoring-creating-a-role-for-setting-up-metrics-collection.adoc deleted file mode 100644 index af2a54cb8824..000000000000 --- a/modules/monitoring-creating-a-role-for-setting-up-metrics-collection.adoc +++ /dev/null @@ -1,36 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/monitoring-your-own-services.adoc - -[id="creating-a-role-for-setting-up-metrics-collection_{context}"] -= Creating a role for setting up metrics collection - -This procedure shows how to create a role that allows a user to set up metrics collection for a service as described in "Setting up metrics collection". - -.Procedure - -. Create a YAML file for the new role. In this example, it is called `custom-metrics-role.yaml`. - -. Fill the file with the configuration for the `monitor-crd-edit` role: -+ -[source,yaml] ----- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: monitor-crd-edit -rules: -- apiGroups: ["monitoring.coreos.com"] - resources: ["prometheusrules", "servicemonitors", "podmonitors"] - verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] ----- -+ -This role enables a user to set up metrics collection for services. - -. Apply the configuration file to the cluster: -+ ----- -$ oc apply -f custom-metrics-role.yaml ----- -+ -Now the role is created. diff --git a/modules/monitoring-creating-alert-routing-for-user-defined-projects.adoc b/modules/monitoring-creating-alert-routing-for-user-defined-projects.adoc deleted file mode 100644 index 3c36507169a9..000000000000 --- a/modules/monitoring-creating-alert-routing-for-user-defined-projects.adoc +++ /dev/null @@ -1,57 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/managing-alerts.adoc - -:_content-type: PROCEDURE -[id="creating-alert-routing-for-user-defined-projects_{context}"] -= Creating alert routing for user-defined projects - -[role="_abstract"] -If you are a non-administrator user who has been given the `alert-routing-edit` role, you can create or edit alert routing for user-defined projects. - -.Prerequisites - -* A cluster administrator has enabled monitoring for user-defined projects. -* A cluster administrator has enabled alert routing for user-defined projects. -* You are logged in as a user that has the `alert-routing-edit` role for the project for which you want to create alert routing. -* You have installed the OpenShift CLI (`oc`). - -.Procedure - -. Create a YAML file for alert routing. The example in this procedure uses a file called `example-app-alert-routing.yaml`. - -. Add an `AlertmanagerConfig` YAML definition to the file. For example: -+ -[source,yaml] ----- -apiVersion: monitoring.coreos.com/v1beta1 -kind: AlertmanagerConfig -metadata: - name: example-routing - namespace: ns1 -spec: - route: - receiver: default - groupBy: [job] - receivers: - - name: default - webhookConfigs: - - url: https://example.org/post ----- -+ -[NOTE] -==== -For user-defined alerting rules, user-defined routing is scoped to the namespace in which the resource is defined. -For example, a routing configuration defined in the `AlertmanagerConfig` object for namespace `ns1` only applies to `PrometheusRules` resources in the same namespace. -==== -+ -. Save the file. - -. Apply the resource to the cluster: -+ -[source,terminal] ----- -$ oc apply -f example-app-alert-routing.yaml ----- -+ -The configuration is automatically applied to the Alertmanager pods. diff --git a/modules/monitoring-creating-alerting-rules-for-user-defined-projects.adoc b/modules/monitoring-creating-alerting-rules-for-user-defined-projects.adoc deleted file mode 100644 index bf7b6517f9b3..000000000000 --- a/modules/monitoring-creating-alerting-rules-for-user-defined-projects.adoc +++ /dev/null @@ -1,61 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/managing-alerts.adoc - -:_content-type: PROCEDURE -[id="creating-alerting-rules-for-user-defined-projects_{context}"] -= Creating alerting rules for user-defined projects - -You can create alerting rules for user-defined projects. Those alerting rules will fire alerts based on the values of chosen metrics. - -.Prerequisites - -* You have enabled monitoring for user-defined projects. -* You are logged in as a user that has the `monitoring-rules-edit` role for the project where you want to create an alerting rule. -* You have installed the OpenShift CLI (`oc`). - -.Procedure - -. Create a YAML file for alerting rules. In this example, it is called `example-app-alerting-rule.yaml`. - -. Add an alerting rule configuration to the YAML file. For example: -+ -[NOTE] -==== -When you create an alerting rule, a project label is enforced on it if a rule with the same name exists in another project. -==== -+ -[source,yaml] ----- -apiVersion: monitoring.coreos.com/v1 -kind: PrometheusRule -metadata: - name: example-alert - namespace: ns1 -spec: - groups: - - name: example - rules: - - alert: VersionAlert - expr: version{job="prometheus-example-app"} == 0 ----- -+ -This configuration creates an alerting rule named `example-alert`. The alerting rule fires an alert when the `version` metric exposed by the sample service becomes `0`. -+ -[IMPORTANT] -==== -A user-defined alerting rule can include metrics for its own project and cluster metrics. You cannot include metrics for another user-defined project. - -For example, an alerting rule for the user-defined project `ns1` can have metrics from `ns1` and cluster metrics, such as the CPU and memory metrics. However, the rule cannot include metrics from `ns2`. - -Additionally, you cannot create alerting rules for the `openshift-*` core {product-title} projects. {product-title} monitoring by default provides a set of alerting rules for these projects. -==== - -. Apply the configuration file to the cluster: -+ -[source,terminal] ----- -$ oc apply -f example-app-alerting-rule.yaml ----- -+ -It takes some time to create the alerting rule. diff --git a/modules/monitoring-creating-alerting-rules.adoc b/modules/monitoring-creating-alerting-rules.adoc deleted file mode 100644 index fa547c42c4d5..000000000000 --- a/modules/monitoring-creating-alerting-rules.adoc +++ /dev/null @@ -1,60 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/managing-alerts.adoc - -[id="creating-alerting-rules-for-user-defined-projects_{context}"] -= Creating alerting rules for user-defined projects - -For user-defined projects you can create alerting rules. Those alerting rules will fire alerts based on the values of chosen metrics. - -.Prerequisites - -* You have enabled monitoring for user-defined projects. -* You are logged in as a user that has the `monitoring-rules-edit` role for the project where you want to create an alerting rule. -* You have installed the OpenShift CLI (`oc`). - -.Procedure - -. Create a YAML file for alerting rules. In this example, it is called `example-app-alerting-rule.yaml`. - -. Add an alerting rule configuration to the YAML file. For example: -+ -[NOTE] -==== -When you create an alerting rule, a project label is enforced on it if a rule with the same name exists in another project. -==== -+ -[source,yaml] ----- -apiVersion: monitoring.coreos.com/v1 -kind: PrometheusRule -metadata: - name: example-alert - namespace: ns1 -spec: - groups: - - name: example - rules: - - alert: VersionAlert - expr: version{job="prometheus-example-app"} == 0 ----- -+ -This configuration creates an alerting rule named `example-alert`. The alerting rule fires an alert when the `version` metric exposed by the sample service becomes `0`. -+ -[IMPORTANT] -==== -A user-defined alerting rule can include metrics for its own project and cluster metrics. You cannot include metrics for another user-defined project. - -For example, an alerting rule for the user-defined project `ns1` can have metrics from `ns1` and cluster metrics, such as the CPU and memory metrics. However, the rule cannot include metrics from `ns2`. - -Additionally, you cannot create alerting rules for the `openshift-*` core {product-title} projects. {product-title} monitoring by default provides a set of alerting rules for these projects. -==== - -. Apply the configuration file to the cluster: -+ -[source,terminal] ----- -$ oc apply -f example-app-alerting-rule.yaml ----- -+ -It takes some time to create the alerting rule. diff --git a/modules/monitoring-creating-cluster-id-labels-for-metrics.adoc b/modules/monitoring-creating-cluster-id-labels-for-metrics.adoc deleted file mode 100644 index e3daed86803e..000000000000 --- a/modules/monitoring-creating-cluster-id-labels-for-metrics.adoc +++ /dev/null @@ -1,95 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/configuring-the-monitoring-stack.adoc - -:_content-type: PROCEDURE -[id="creating-cluster-id-labels-for-metrics_{context}"] -= Creating cluster ID labels for metrics - -You can create cluster ID labels for metrics for default platform monitoring and for user workload monitoring. - -For default platform monitoring, you add cluster ID labels for metrics in the `write_relabel` settings for remote write storage in the `cluster-monitoring-config` config map in the `openshift-monitoring` namespace. - -For user workload monitoring, you edit the settings in the `user-workload-monitoring-config` config map in the `openshift-user-workload-monitoring` namespace. - -.Prerequsites - -* You have installed the OpenShift CLI (`oc`). -* You have configured remote write storage. -* *If you are configuring default platform monitoring components:* -** You have access to the cluster as a user with the `cluster-admin` role. -** You have created the `cluster-monitoring-config` `ConfigMap` object. -* *If you are configuring components that monitor user-defined projects:* -** You have access to the cluster as a user with the `cluster-admin` role or as a user with the `user-workload-monitoring-config-edit` role in the `openshift-user-workload-monitoring` project. -** You have created the `user-workload-monitoring-config` `ConfigMap` object. - -.Procedure - -. Edit the `cluster-monitoring-config` `ConfigMap` object in the `openshift-monitoring` project: -+ -[source,terminal] ----- -$ oc -n openshift-monitoring edit configmap cluster-monitoring-config ----- -+ -[NOTE] -==== -If you configure cluster ID labels for metrics for the Prometheus instance that monitors user-defined projects, edit the `user-workload-monitoring-config` config map in the `openshift-user-workload-monitoring` namespace. -Note that the Prometheus component is called `prometheus` in this config map and not `prometheusK8s`, which is the name used in the `cluster-monitoring-config` config map. -==== - -. In the `writeRelabelConfigs:` section under `data/config.yaml/prometheusK8s/remoteWrite`, add cluster ID relabel configuration values: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: cluster-monitoring-config - namespace: openshift-monitoring -data: - config.yaml: | - prometheusK8s: - remoteWrite: - - url: "https://remote-write-endpoint.example.com" - <endpoint_authentication_credentials> - writeRelabelConfigs: <1> - - <relabel_config> <2> ----- -<1> Add a list of write relabel configurations for metrics that you want to send to the remote endpoint. -<2> Substitute the label configuration for the metrics sent to the remote write endpoint. -+ -The following sample shows how to forward a metric with the cluster ID label `cluster_id` in default platform monitoring: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: cluster-monitoring-config - namespace: openshift-monitoring -data: - config.yaml: | - prometheusK8s: - remoteWrite: - - url: "https://remote-write-endpoint.example.com" - writeRelabelConfigs: - - sourceLabels: - - __tmp_openshift_cluster_id__ <1> - targetLabel: cluster_id <2> - action: replace <3> ----- -<1> The system initially applies a temporary cluster ID source label named `+++__tmp_openshift_cluster_id__+++`. This temporary label gets replaced by the cluster ID label name that you specify. -<2> Specify the name of the cluster ID label for metrics sent to remote write storage. -If you use a label name that already exists for a metric, that value is overwritten with the name of this cluster ID label. -For the label name, do not use `+++__tmp_openshift_cluster_id__+++`. The final relabeling step removes labels that use this name. -<3> The `replace` write relabel action replaces the temporary label with the target label for outgoing metrics. -This action is the default and is applied if no action is specified. - -. Save the file to apply the changes to the `ConfigMap` object. -The pods affected by the updated configuration automatically restart. -+ -[WARNING] -==== -Saving changes to a monitoring `ConfigMap` object might redeploy the pods and other resources in the related project. Saving changes might also restart the running monitoring processes in that project. -==== diff --git a/modules/monitoring-creating-cluster-monitoring-configmap.adoc b/modules/monitoring-creating-cluster-monitoring-configmap.adoc deleted file mode 100644 index 6f3caa351848..000000000000 --- a/modules/monitoring-creating-cluster-monitoring-configmap.adoc +++ /dev/null @@ -1,49 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/configuring-the-monitoring-stack.adoc - -:_content-type: PROCEDURE -[id="creating-cluster-monitoring-configmap_{context}"] -= Creating a cluster monitoring config map - -To configure core {product-title} monitoring components, you must create the `cluster-monitoring-config` `ConfigMap` object in the `openshift-monitoring` project. - -[NOTE] -==== -When you save your changes to the `cluster-monitoring-config` `ConfigMap` object, some or all of the pods in the `openshift-monitoring` project might be redeployed. It can sometimes take a while for these components to redeploy. -==== - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. -* You have installed the OpenShift CLI (`oc`). - -.Procedure - -. Check whether the `cluster-monitoring-config` `ConfigMap` object exists: -+ -[source,terminal] ----- -$ oc -n openshift-monitoring get configmap cluster-monitoring-config ----- - -. If the `ConfigMap` object does not exist: -.. Create the following YAML manifest. In this example the file is called `cluster-monitoring-config.yaml`: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: cluster-monitoring-config - namespace: openshift-monitoring -data: - config.yaml: | ----- -+ -.. Apply the configuration to create the `ConfigMap` object: -+ -[source,terminal] ----- -$ oc apply -f cluster-monitoring-config.yaml ----- diff --git a/modules/monitoring-creating-new-alerting-rules.adoc b/modules/monitoring-creating-new-alerting-rules.adoc deleted file mode 100644 index 7162658479c7..000000000000 --- a/modules/monitoring-creating-new-alerting-rules.adoc +++ /dev/null @@ -1,53 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/managing-alerts.adoc - -:_content-type: PROCEDURE -[id="creating-new-alerting-rules_{context}"] -= Creating new alerting rules - -As a cluster administrator, you can create new alerting rules based on platform metrics. -These alerting rules trigger alerts based on the values of chosen metrics. - -[NOTE] -==== -If you create a customized `AlertingRule` resource based on an existing platform alerting rule, silence the original alert to avoid receiving conflicting alerts. -==== - -.Prerequisites - -* You are logged in as a user that has the `cluster-admin` role. -* You have installed the OpenShift CLI (`oc`). -* You have enabled Technology Preview features, and all nodes in the cluster are ready. - - -.Procedure - -. Create a new YAML configuration file named `example-alerting-rule.yaml` in the `openshift-monitoring` namespace. - -. Add an `AlertingRule` resource to the YAML file. -The following example creates a new alerting rule named `example`, similar to the default `watchdog` alert: -+ -[source,yaml] ----- -apiVersion: monitoring.openshift.io/v1 -kind: AlertingRule -metadata: - name: example - namespace: openshift-monitoring -spec: - groups: - - name: example-rules - rules: - - alert: ExampleAlert <1> - expr: vector(1) <2> ----- -<1> The name of the alerting rule you want to create. -<2> The PromQL query expression that defines the new rule. - -. Apply the configuration file to the cluster: -+ -[source,terminal] ----- -$ oc apply -f example-alerting-rule.yaml ----- diff --git a/modules/monitoring-creating-scrape-sample-alerts.adoc b/modules/monitoring-creating-scrape-sample-alerts.adoc deleted file mode 100644 index 9d08dd229135..000000000000 --- a/modules/monitoring-creating-scrape-sample-alerts.adoc +++ /dev/null @@ -1,74 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/configuring-the-monitoring-stack.adoc - -:_content-type: PROCEDURE -[id="creating-scrape-sample-alerts_{context}"] -= Creating scrape sample alerts - -You can create alerts that notify you when: - -* The target cannot be scraped or is not available for the specified `for` duration -* A scrape sample threshold is reached or is exceeded for the specified `for` duration - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role, or as a user with the `user-workload-monitoring-config-edit` role in the `openshift-user-workload-monitoring` project. -* You have enabled monitoring for user-defined projects. -* You have created the `user-workload-monitoring-config` `ConfigMap` object. -* You have limited the number of samples that can be accepted per target scrape in user-defined projects, by using `enforcedSampleLimit`. -* You have installed the OpenShift CLI (`oc`). - -.Procedure - -. Create a YAML file with alerts that inform you when the targets are down and when the enforced sample limit is approaching. The file in this example is called `monitoring-stack-alerts.yaml`: -+ -[source,yaml] ----- -apiVersion: monitoring.coreos.com/v1 -kind: PrometheusRule -metadata: - labels: - prometheus: k8s - role: alert-rules - name: monitoring-stack-alerts <1> - namespace: ns1 <2> -spec: - groups: - - name: general.rules - rules: - - alert: TargetDown <3> - annotations: - message: '{{ printf "%.4g" $value }}% of the {{ $labels.job }}/{{ $labels.service - }} targets in {{ $labels.namespace }} namespace are down.' <4> - expr: 100 * (count(up == 0) BY (job, namespace, service) / count(up) BY (job, - namespace, service)) > 10 - for: 10m <5> - labels: - severity: warning <6> - - alert: ApproachingEnforcedSamplesLimit <7> - annotations: - message: '{{ $labels.container }} container of the {{ $labels.pod }} pod in the {{ $labels.namespace }} namespace consumes {{ $value | humanizePercentage }} of the samples limit budget.' <8> - expr: scrape_samples_scraped/50000 > 0.8 <9> - for: 10m <10> - labels: - severity: warning <11> ----- -<1> Defines the name of the alerting rule. -<2> Specifies the user-defined project where the alerting rule will be deployed. -<3> The `TargetDown` alert will fire if the target cannot be scraped or is not available for the `for` duration. -<4> The message that will be output when the `TargetDown` alert fires. -<5> The conditions for the `TargetDown` alert must be true for this duration before the alert is fired. -<6> Defines the severity for the `TargetDown` alert. -<7> The `ApproachingEnforcedSamplesLimit` alert will fire when the defined scrape sample threshold is reached or exceeded for the specified `for` duration. -<8> The message that will be output when the `ApproachingEnforcedSamplesLimit` alert fires. -<9> The threshold for the `ApproachingEnforcedSamplesLimit` alert. In this example the alert will fire when the number of samples per target scrape has exceeded 80% of the enforced sample limit of `50000`. The `for` duration must also have passed before the alert will fire. The `<number>` in the expression `scrape_samples_scraped/<number> > <threshold>` must match the `enforcedSampleLimit` value defined in the `user-workload-monitoring-config` `ConfigMap` object. -<10> The conditions for the `ApproachingEnforcedSamplesLimit` alert must be true for this duration before the alert is fired. -<11> Defines the severity for the `ApproachingEnforcedSamplesLimit` alert. - -. Apply the configuration to the user-defined project: -+ -[source,terminal] ----- -$ oc apply -f monitoring-stack-alerts.yaml ----- diff --git a/modules/monitoring-creating-user-defined-workload-monitoring-configmap.adoc b/modules/monitoring-creating-user-defined-workload-monitoring-configmap.adoc deleted file mode 100644 index 3bb6dbf3e524..000000000000 --- a/modules/monitoring-creating-user-defined-workload-monitoring-configmap.adoc +++ /dev/null @@ -1,54 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/configuring-the-monitoring-stack.adoc - -:_content-type: PROCEDURE -[id="creating-user-defined-workload-monitoring-configmap_{context}"] -= Creating a user-defined workload monitoring config map - -To configure the components that monitor user-defined projects, you must create the `user-workload-monitoring-config` `ConfigMap` object in the `openshift-user-workload-monitoring` project. - -[NOTE] -==== -When you save your changes to the `user-workload-monitoring-config` `ConfigMap` object, some or all of the pods in the `openshift-user-workload-monitoring` project might be redeployed. It can sometimes take a while for these components to redeploy. You can create and configure the config map before you first enable monitoring for user-defined projects, to prevent having to redeploy the pods often. -==== - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. -* You have installed the OpenShift CLI (`oc`). - -.Procedure - -. Check whether the `user-workload-monitoring-config` `ConfigMap` object exists: -+ -[source,terminal] ----- -$ oc -n openshift-user-workload-monitoring get configmap user-workload-monitoring-config ----- - -. If the `user-workload-monitoring-config` `ConfigMap` object does not exist: -.. Create the following YAML manifest. In this example the file is called `user-workload-monitoring-config.yaml`: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: user-workload-monitoring-config - namespace: openshift-user-workload-monitoring -data: - config.yaml: | ----- -+ -.. Apply the configuration to create the `ConfigMap` object: -+ -[source,terminal] ----- -$ oc apply -f user-workload-monitoring-config.yaml ----- -+ -[NOTE] -==== -Configurations applied to the `user-workload-monitoring-config` `ConfigMap` object are not activated unless a cluster administrator has enabled monitoring for user-defined projects. -==== diff --git a/modules/monitoring-default-monitoring-components.adoc b/modules/monitoring-default-monitoring-components.adoc deleted file mode 100644 index 7b3337276a5c..000000000000 --- a/modules/monitoring-default-monitoring-components.adoc +++ /dev/null @@ -1,55 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/monitoring-overview.adoc - -:_content-type: REFERENCE -[id="default-monitoring-components_{context}"] -= Default monitoring components - -By default, the {product-title} {product-version} monitoring stack includes these components: - -.Default monitoring stack components -[options="header"] -|=== - -|Component|Description - -|Cluster Monitoring Operator -|The Cluster Monitoring Operator (CMO) is a central component of the monitoring stack. It deploys, manages, and automatically updates Prometheus and Alertmanager instances, Thanos Querier, Telemeter Client, and metrics targets. The CMO is deployed by the Cluster Version Operator (CVO). - -|Prometheus Operator -|The Prometheus Operator (PO) in the `openshift-monitoring` project creates, configures, and manages platform Prometheus instances and Alertmanager instances. It also automatically generates monitoring target configurations based on Kubernetes label queries. - -|Prometheus -|Prometheus is the monitoring system on which the {product-title} monitoring stack is based. Prometheus is a time-series database and a rule evaluation engine for metrics. Prometheus sends alerts to Alertmanager for processing. - -|Prometheus Adapter -|The Prometheus Adapter (PA in the preceding diagram) translates Kubernetes node and pod queries for use in Prometheus. The resource metrics that are translated include CPU and memory utilization metrics. The Prometheus Adapter exposes the cluster resource metrics API for horizontal pod autoscaling. The Prometheus Adapter is also used by the `oc adm top nodes` and `oc adm top pods` commands. - -|Alertmanager -|The Alertmanager service handles alerts received from Prometheus. Alertmanager is also responsible for sending the alerts to external notification systems. - -|`kube-state-metrics` agent -|The `kube-state-metrics` exporter agent (KSM in the preceding diagram) converts Kubernetes objects to metrics that Prometheus can use. - -|`openshift-state-metrics` agent -|The `openshift-state-metrics` exporter (OSM in the preceding diagram) expands upon `kube-state-metrics` by adding metrics for {product-title}-specific resources. - -|`node-exporter` agent -|The `node-exporter` agent (NE in the preceding diagram) collects metrics about every node in a cluster. The `node-exporter` agent is deployed on every node. - -|Thanos Querier -|Thanos Querier aggregates and optionally deduplicates core {product-title} metrics and metrics for user-defined projects under a single, multi-tenant interface. - -|Telemeter Client -|Telemeter Client sends a subsection of the data from platform Prometheus instances to Red Hat to facilitate Remote Health Monitoring for clusters. - -|=== - -All of the components in the monitoring stack are monitored by the stack and are automatically updated when {product-title} is updated. - -[NOTE] -==== -All components of the monitoring stack use the TLS security profile settings that are centrally configured by a cluster administrator. -If you configure a monitoring stack component that uses TLS security settings, the component uses the TLS security profile settings that already exist in the `tlsSecurityProfile` field in the global {product-title} `apiservers.config.openshift.io/cluster` resource. -==== diff --git a/modules/monitoring-default-monitoring-targets.adoc b/modules/monitoring-default-monitoring-targets.adoc deleted file mode 100644 index 36fd27d30986..000000000000 --- a/modules/monitoring-default-monitoring-targets.adoc +++ /dev/null @@ -1,31 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/monitoring-overview.adoc - -:_content-type: REFERENCE -[id="default-monitoring-targets_{context}"] -= Default monitoring targets - -In addition to the components of the stack itself, the default monitoring stack monitors: - -* CoreDNS -* Elasticsearch (if Logging is installed) -* etcd -* Fluentd (if Logging is installed) -* HAProxy -* Image registry -* Kubelets -* Kubernetes API server -* Kubernetes controller manager -* Kubernetes scheduler -* OpenShift API server -* OpenShift Controller Manager -* Operator Lifecycle Manager (OLM) - -[NOTE] -==== -Each {product-title} component is responsible for its monitoring configuration. For problems with the monitoring of an {product-title} component, open a -link:https://issues.redhat.com/secure/CreateIssueDetails!init.jspa?pid=12332330&summary=Monitoring_issue&issuetype=1&priority=10200&versions=12385624[Jira issue] against that component, not against the general monitoring component. -==== - -Other {product-title} framework components might be exposing metrics as well. For details, see their respective documentation. diff --git a/modules/monitoring-deploying-a-sample-service.adoc b/modules/monitoring-deploying-a-sample-service.adoc deleted file mode 100644 index 70718cf632f7..000000000000 --- a/modules/monitoring-deploying-a-sample-service.adoc +++ /dev/null @@ -1,87 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/managing-metrics.adoc - -:_content-type: PROCEDURE -[id="deploying-a-sample-service_{context}"] -= Deploying a sample service - -To test monitoring of a service in a user-defined project, you can deploy a sample service. - -.Procedure - -. Create a YAML file for the service configuration. In this example, it is called `prometheus-example-app.yaml`. - -. Add the following deployment and service configuration details to the file: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Namespace -metadata: - name: ns1 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app: prometheus-example-app - name: prometheus-example-app - namespace: ns1 -spec: - replicas: 1 - selector: - matchLabels: - app: prometheus-example-app - template: - metadata: - labels: - app: prometheus-example-app - spec: - containers: - - image: ghcr.io/rhobs/prometheus-example-app:0.4.1 - imagePullPolicy: IfNotPresent - name: prometheus-example-app ---- -apiVersion: v1 -kind: Service -metadata: - labels: - app: prometheus-example-app - name: prometheus-example-app - namespace: ns1 -spec: - ports: - - port: 8080 - protocol: TCP - targetPort: 8080 - name: web - selector: - app: prometheus-example-app - type: ClusterIP ----- -+ -This configuration deploys a service named `prometheus-example-app` in the user-defined `ns1` project. This service exposes the custom `version` metric. - -. Apply the configuration to the cluster: -+ -[source,terminal] ----- -$ oc apply -f prometheus-example-app.yaml ----- -+ -It takes some time to deploy the service. - -. You can check that the pod is running: -+ -[source,terminal] ----- -$ oc -n ns1 get pod ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -prometheus-example-app-7857545cb7-sbgwq 1/1 Running 0 81m ----- diff --git a/modules/monitoring-determining-why-prometheus-is-consuming-disk-space.adoc b/modules/monitoring-determining-why-prometheus-is-consuming-disk-space.adoc deleted file mode 100644 index 088506f8131a..000000000000 --- a/modules/monitoring-determining-why-prometheus-is-consuming-disk-space.adoc +++ /dev/null @@ -1,78 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/troubleshooting-monitoring-issues.adoc -// * support/troubleshooting/investigating-monitoring-issues.adoc - -:_content-type: PROCEDURE -[id="determining-why-prometheus-is-consuming-disk-space_{context}"] -= Determining why Prometheus is consuming a lot of disk space - -Developers can create labels to define attributes for metrics in the form of key-value pairs. The number of potential key-value pairs corresponds to the number of possible values for an attribute. An attribute that has an unlimited number of potential values is called an unbound attribute. For example, a `customer_id` attribute is unbound because it has an infinite number of possible values. - -Every assigned key-value pair has a unique time series. The use of many unbound attributes in labels can result in an exponential increase in the number of time series created. This can impact Prometheus performance and can consume a lot of disk space. - -You can use the following measures when Prometheus consumes a lot of disk: - -* *Check the number of scrape samples* that are being collected. - -* *Check the time series database (TSDB) status using the Prometheus HTTP API* for more information about which labels are creating the most time series. Doing so requires cluster administrator privileges. - -* *Reduce the number of unique time series that are created* by reducing the number of unbound attributes that are assigned to user-defined metrics. -+ -[NOTE] -==== -Using attributes that are bound to a limited set of possible values reduces the number of potential key-value pair combinations. -==== -+ -* *Enforce limits on the number of samples that can be scraped* across user-defined projects. This requires cluster administrator privileges. - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. -* You have installed the OpenShift CLI (`oc`). - -.Procedure - -. In the *Administrator* perspective, navigate to *Observe* -> *Metrics*. - -. Run the following Prometheus Query Language (PromQL) query in the *Expression* field. This returns the ten metrics that have the highest number of scrape samples: -+ -[source,terminal] ----- -topk(10,count by (job)({__name__=~".+"})) ----- - -. Investigate the number of unbound label values assigned to metrics with higher than expected scrape sample counts. -** *If the metrics relate to a user-defined project*, review the metrics key-value pairs assigned to your workload. These are implemented through Prometheus client libraries at the application level. Try to limit the number of unbound attributes referenced in your labels. - -** *If the metrics relate to a core {product-title} project*, create a Red Hat support case on the link:https://access.redhat.com/[Red Hat Customer Portal]. - -. Review the TSDB status using the Prometheus HTTP API by running the following commands as a cluster administrator: -+ -[source,terminal] ----- -$ oc login -u <username> -p <password> ----- -+ -[source,terminal] ----- -$ host=$(oc -n openshift-monitoring get route prometheus-k8s -ojsonpath={.spec.host}) ----- -+ -[source,terminal] ----- -$ token=$(oc whoami -t) ----- -+ -[source,terminal] ----- -$ curl -H "Authorization: Bearer $token" -k "https://$host/api/v1/status/tsdb" ----- -+ -.Example output -[source,terminal] ----- -"status": "success", ----- - - diff --git a/modules/monitoring-disabling-monitoring-for-user-defined-projects.adoc b/modules/monitoring-disabling-monitoring-for-user-defined-projects.adoc deleted file mode 100644 index 9d173f9baeda..000000000000 --- a/modules/monitoring-disabling-monitoring-for-user-defined-projects.adoc +++ /dev/null @@ -1,57 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/enabling-monitoring-for-user-defined-projects.adoc - -:_content-type: PROCEDURE -[id="disabling-monitoring-for-user-defined-projects_{context}"] -= Disabling monitoring for user-defined projects - -After enabling monitoring for user-defined projects, you can disable it again by setting `enableUserWorkload: false` in the cluster monitoring `ConfigMap` object. - -[NOTE] -==== -Alternatively, you can remove `enableUserWorkload: true` to disable monitoring for user-defined projects. -==== - -.Procedure - -. Edit the `cluster-monitoring-config` `ConfigMap` object: -+ -[source,terminal] ----- -$ oc -n openshift-monitoring edit configmap cluster-monitoring-config ----- -+ -.. Set `enableUserWorkload:` to `false` under `data/config.yaml`: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: cluster-monitoring-config - namespace: openshift-monitoring -data: - config.yaml: | - enableUserWorkload: false ----- - -. Save the file to apply the changes. Monitoring for user-defined projects is then disabled automatically. - -. Check that the `prometheus-operator`, `prometheus-user-workload` and `thanos-ruler-user-workload` pods are terminated in the `openshift-user-workload-monitoring` project. This might take a short while: -+ -[source,terminal] ----- -$ oc -n openshift-user-workload-monitoring get pod ----- -+ -.Example output -[source,terminal] ----- -No resources found in openshift-user-workload-monitoring project. ----- - -[NOTE] -==== -The `user-workload-monitoring-config` `ConfigMap` object in the `openshift-user-workload-monitoring` project is not automatically deleted when monitoring for user-defined projects is disabled. This is to preserve any custom configurations that you may have created in the `ConfigMap` object. -==== diff --git a/modules/monitoring-disabling-the-local-alertmanager.adoc b/modules/monitoring-disabling-the-local-alertmanager.adoc deleted file mode 100644 index 07a0b2967398..000000000000 --- a/modules/monitoring-disabling-the-local-alertmanager.adoc +++ /dev/null @@ -1,44 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/configuring-the-monitoring-stack.adoc - -:_content-type: PROCEDURE -[id="monitoring-disabling-the-local-alertmanager_{context}"] -= Disabling the local Alertmanager - -A local Alertmanager that routes alerts from Prometheus instances is enabled by default in the `openshift-monitoring` project of the {product-title} monitoring stack. - -If you do not need the local Alertmanager, you can disable it by configuring the `cluster-monitoring-config` config map in the `openshift-monitoring` project. - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. -* You have created the `cluster-monitoring-config` config map. -* You have installed the OpenShift CLI (`oc`). - -.Procedure - -. Edit the `cluster-monitoring-config` config map in the `openshift-monitoring` project: -+ -[source,terminal] ----- -$ oc -n openshift-monitoring edit configmap cluster-monitoring-config ----- - -. Add `enabled: false` for the `alertmanagerMain` component under `data/config.yaml`: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: cluster-monitoring-config - namespace: openshift-monitoring -data: - config.yaml: | - alertmanagerMain: - enabled: false ----- - -. Save the file to apply the changes. The Alertmanager instance is disabled automatically when you apply the change. - diff --git a/modules/monitoring-editing-silences.adoc b/modules/monitoring-editing-silences.adoc deleted file mode 100644 index f4713ffb1b51..000000000000 --- a/modules/monitoring-editing-silences.adoc +++ /dev/null @@ -1,35 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/managing-alerts.adoc - -:_content-type: PROCEDURE -[id="editing-silences_{context}"] -= Editing silences - -You can edit a silence, which will expire the existing silence and create a new one with the changed configuration. - -.Procedure - -To edit a silence in the *Administrator* perspective: - -. Navigate to the *Observe* -> *Alerting* -> *Silences* page. - -. For the silence you want to modify, select the {kebab} in the last column and choose *Edit silence*. -+ -Alternatively, you can select *Actions* -> *Edit Silence* in the *Silence Details* page for a silence. - -. In the *Edit Silence* page, enter your changes and select *Silence*. This will expire the existing silence and create one with the chosen configuration. - -To edit a silence in the *Developer* perspective: - -. Navigate to the *Observe* -> *<project_name>* -> *Alerts* page. - -. Expand the details for an alert by selecting *>* to the left of the alert name. Select the name of the alert in the expanded view to open the *Alert Details* page for the alert. - -. Select the name of a silence in the *Silenced By* section in that page to navigate to the *Silence Details* page for the silence. - -. Select the name of a silence to navigate to its *Silence Details* page. - -. Select *Actions* -> *Edit Silence* in the *Silence Details* page for a silence. - -. In the *Edit Silence* page, enter your changes and select *Silence*. This will expire the existing silence and create one with the chosen configuration. diff --git a/modules/monitoring-enabling-a-separate-alertmanager-instance-for-user-defined-alert-routing.adoc b/modules/monitoring-enabling-a-separate-alertmanager-instance-for-user-defined-alert-routing.adoc deleted file mode 100644 index c7acc2363281..000000000000 --- a/modules/monitoring-enabling-a-separate-alertmanager-instance-for-user-defined-alert-routing.adoc +++ /dev/null @@ -1,76 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/enabling-alert-routing-for-user-defined-projects.adoc -// * monitoring/osd-enabling-alert-routing-for-user-defined-projects.adoc -// * monitoring/osd-enabling-alert-routing-for-user-defined-projects.adoc -// * monitoring/rosa-enabling-alert-routing-for-user-defined-projects.adoc - -:_content-type: PROCEDURE -[id="enabling-a-separate-alertmanager-instance-for-user-defined-alert-routing_{context}"] -= Enabling a separate Alertmanager instance for user-defined alert routing - -ifndef::openshift-rosa,openshift-dedicated[] -In some clusters, you might want to deploy a dedicated Alertmanager instance for user-defined projects, which can help reduce the load on the default platform Alertmanager instance and can better separate user-defined alerts from default platform alerts. -endif::[] -ifdef::openshift-rosa,openshift-dedicated[] -In {product-title}, you may want to deploy a dedicated Alertmanager instance for user-defined projects, which provides user-defined alerts separate from default platform alerts. -endif::[] -In these cases, you can optionally enable a separate instance of Alertmanager to send alerts for user-defined projects only. - -.Prerequisites - -ifdef::openshift-rosa,openshift-dedicated[] -* You have access to the cluster as a user with the `cluster-admin` or `dedicated-admin` role. -endif::[] -ifndef::openshift-rosa,openshift-dedicated[] -* You have access to the cluster as a user with the `cluster-admin` role. -endif::[] -* You have enabled monitoring for user-defined projects in the `cluster-monitoring-config` config map for the `openshift-monitoring` namespace. -* You have installed the OpenShift CLI (`oc`). - -.Procedure - -. Edit the `user-workload-monitoring-config` `ConfigMap` object: -+ -[source,terminal] ----- -$ oc -n openshift-user-workload-monitoring edit configmap user-workload-monitoring-config ----- -+ -. Add `enabled: true` and `enableAlertmanagerConfig: true` in the `alertmanager` section under `data/config.yaml`: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: user-workload-monitoring-config - namespace: openshift-user-workload-monitoring -data: - config.yaml: | - alertmanager: - enabled: true <1> - enableAlertmanagerConfig: true <2> ----- -<1> Set the `enabled` value to `true` to enable a dedicated instance of the Alertmanager for user-defined projects in a cluster. Set the value to `false` or omit the key entirely to disable the Alertmanager for user-defined projects. -If you set this value to `false` or if the key is omitted, user-defined alerts are routed to the default platform Alertmanager instance. -<2> Set the `enableAlertmanagerConfig` value to `true` to enable users to define their own alert routing configurations with `AlertmanagerConfig` objects. -+ -. Save the file to apply the changes. The dedicated instance of Alertmanager for user-defined projects starts automatically. - -.Verification - -* Verify that the `user-workload` Alertmanager instance has started: -+ -[source,terminal] ----- -# oc -n openshift-user-workload-monitoring get alertmanager ----- -+ -.Example output -+ -[source,terminal] ----- -NAME VERSION REPLICAS AGE -user-workload 0.24.0 2 100s ----- diff --git a/modules/monitoring-enabling-monitoring-for-user-defined-projects.adoc b/modules/monitoring-enabling-monitoring-for-user-defined-projects.adoc deleted file mode 100644 index 41f9aa4ea3bb..000000000000 --- a/modules/monitoring-enabling-monitoring-for-user-defined-projects.adoc +++ /dev/null @@ -1,80 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/enabling-monitoring-for-user-defined-projects.adoc - -:_content-type: PROCEDURE -[id="enabling-monitoring-for-user-defined-projects_{context}"] -= Enabling monitoring for user-defined projects - -Cluster administrators can enable monitoring for user-defined projects by setting the `enableUserWorkload: true` field in the cluster monitoring `ConfigMap` object. - -[IMPORTANT] -==== -In {product-title} {product-version} you must remove any custom Prometheus instances before enabling monitoring for user-defined projects. -==== - -[NOTE] -==== -You must have access to the cluster as a user with the `cluster-admin` role to enable monitoring for user-defined projects in {product-title}. Cluster administrators can then optionally grant users permission to configure the components that are responsible for monitoring user-defined projects. -==== - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. -* You have installed the OpenShift CLI (`oc`). -* You have created the `cluster-monitoring-config` `ConfigMap` object. -* You have optionally created and configured the `user-workload-monitoring-config` `ConfigMap` object in the `openshift-user-workload-monitoring` project. You can add configuration options to this `ConfigMap` object for the components that monitor user-defined projects. -+ -[NOTE] -==== -Every time you save configuration changes to the `user-workload-monitoring-config` `ConfigMap` object, the pods in the `openshift-user-workload-monitoring` project are redeployed. It can sometimes take a while for these components to redeploy. You can create and configure the `ConfigMap` object before you first enable monitoring for user-defined projects, to prevent having to redeploy the pods often. -==== - -.Procedure - -. Edit the `cluster-monitoring-config` `ConfigMap` object: -+ -[source,terminal] ----- -$ oc -n openshift-monitoring edit configmap cluster-monitoring-config ----- - -. Add `enableUserWorkload: true` under `data/config.yaml`: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: cluster-monitoring-config - namespace: openshift-monitoring -data: - config.yaml: | - enableUserWorkload: true <1> ----- -<1> When set to `true`, the `enableUserWorkload` parameter enables monitoring for user-defined projects in a cluster. - -. Save the file to apply the changes. Monitoring for user-defined projects is then enabled automatically. -+ -[WARNING] -==== -When changes are saved to the `cluster-monitoring-config` `ConfigMap` object, the pods and other resources in the `openshift-monitoring` project might be redeployed. The running monitoring processes in that project might also be restarted. -==== - -. Check that the `prometheus-operator`, `prometheus-user-workload` and `thanos-ruler-user-workload` pods are running in the `openshift-user-workload-monitoring` project. It might take a short while for the pods to start: -+ -[source,terminal] ----- -$ oc -n openshift-user-workload-monitoring get pod ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -prometheus-operator-6f7b748d5b-t7nbg 2/2 Running 0 3h -prometheus-user-workload-0 4/4 Running 1 3h -prometheus-user-workload-1 4/4 Running 1 3h -thanos-ruler-user-workload-0 3/3 Running 0 3h -thanos-ruler-user-workload-1 3/3 Running 0 3h ----- diff --git a/modules/monitoring-enabling-query-logging-for-thanos-querier.adoc b/modules/monitoring-enabling-query-logging-for-thanos-querier.adoc deleted file mode 100644 index af0aa654c556..000000000000 --- a/modules/monitoring-enabling-query-logging-for-thanos-querier.adoc +++ /dev/null @@ -1,90 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/configuring-the-monitoring-stack.adoc - -:_content-type: PROCEDURE -[id="enabling-query-logging-for-thanos-querier_{context}"] -= Enabling query logging for Thanos Querier - -[role="_abstract"] -For default platform monitoring in the `openshift-monitoring` project, you can enable the Cluster Monitoring Operator to log all queries run by Thanos Querier. - -[IMPORTANT] -==== -Because log rotation is not supported, only enable this feature temporarily when you need to troubleshoot an issue. After you finish troubleshooting, disable query logging by reverting the changes you made to the `ConfigMap` object to enable the feature. -==== - -.Prerequisites - -* You have installed the OpenShift CLI (`oc`). -* You have access to the cluster as a user with the `cluster-admin` role. -* You have created the `cluster-monitoring-config` `ConfigMap` object. - -.Procedure - -You can enable query logging for Thanos Querier in the `openshift-monitoring` project: - -. Edit the `cluster-monitoring-config` `ConfigMap` object in the `openshift-monitoring` project: -+ -[source,terminal] ----- -$ oc -n openshift-monitoring edit configmap cluster-monitoring-config ----- -+ -. Add a `thanosQuerier` section under `data/config.yaml` and add values as shown in the following example: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: cluster-monitoring-config - namespace: openshift-monitoring -data: - config.yaml: | - thanosQuerier: - enableRequestLogging: <value> <1> - logLevel: <value> <2> - - ----- -<1> Set the value to `true` to enable logging and `false` to disable logging. The default value is `false`. -<2> Set the value to `debug`, `info`, `warn`, or `error`. If no value exists for `logLevel`, the log level defaults to `error`. -+ -. Save the file to apply the changes. -+ -[WARNING] -==== -When you save changes to a monitoring config map, pods and other resources in the related project might be redeployed. The running monitoring processes in that project might also be restarted. -==== - -.Verification - -. Verify that the Thanos Querier pods are running. The following sample command lists the status of pods in the `openshift-monitoring` project: -+ -[source,terminal] ----- -$ oc -n openshift-monitoring get pods ----- -+ -. Run a test query using the following sample commands as a model: -+ -[source,terminal] ----- -$ token=`oc create token prometheus-k8s -n openshift-monitoring` -$ oc -n openshift-monitoring exec -c prometheus prometheus-k8s-0 -- curl -k -H "Authorization: Bearer $token" 'https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query?query=cluster_version' ----- -. Run the following command to read the query log: -+ -[source,terminal] ----- -$ oc -n openshift-monitoring logs <thanos_querier_pod_name> -c thanos-query ----- -+ -[NOTE] -==== -Because the `thanos-querier` pods are highly available (HA) pods, you might be able to see logs in only one pod. -==== -+ -. After you examine the logged query information, disable query logging by changing the `enableRequestLogging` value to `false` in the config map. - diff --git a/modules/monitoring-enabling-the-platform-alertmanager-instance-for-user-defined-alert-routing.adoc b/modules/monitoring-enabling-the-platform-alertmanager-instance-for-user-defined-alert-routing.adoc deleted file mode 100644 index 1769eb2fc62a..000000000000 --- a/modules/monitoring-enabling-the-platform-alertmanager-instance-for-user-defined-alert-routing.adoc +++ /dev/null @@ -1,41 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/enabling-alert-routing-for-user-defined-projects.adoc - -:_content-type: PROCEDURE -[id="enabling-the-platform-alertmanager-instance-for-user-defined-alert-routing_{context}"] -= Enabling the platform Alertmanager instance for user-defined alert routing - -You can allow users to create user-defined alert routing configurations that use the main platform instance of Alertmanager. - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. -* You have installed the OpenShift CLI (`oc`). - -.Procedure - -. Edit the `cluster-monitoring-config` `ConfigMap` object: -+ -[source,terminal] ----- -$ oc -n openshift-monitoring edit configmap cluster-monitoring-config ----- -+ -. Add `enableUserAlertmanagerConfig: true` in the `alertmanagerMain` section under `data/config.yaml`: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: cluster-monitoring-config - namespace: openshift-monitoring -data: - config.yaml: | - alertmanagerMain: - enableUserAlertmanagerConfig: true <1> ----- -<1> Set the `enableUserAlertmanagerConfig` value to `true` to allow users to create user-defined alert routing configurations that use the main platform instance of Alertmanager. -+ -. Save the file to apply the changes. diff --git a/modules/monitoring-excluding-a-user-defined-project-from-monitoring.adoc b/modules/monitoring-excluding-a-user-defined-project-from-monitoring.adoc deleted file mode 100644 index 03cee1925775..000000000000 --- a/modules/monitoring-excluding-a-user-defined-project-from-monitoring.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/enabling-monitoring-for-user-defined-projects.adoc - -:_content-type: PROCEDURE -[id="excluding-a-user-defined-project-from-monitoring_{context}"] -= Excluding a user-defined project from monitoring - -Individual user-defined projects can be excluded from user workload monitoring. To do so, simply add the `openshift.io/user-monitoring` label to the project's namespace with a value of `false`. - -.Procedure - -. Add the label to the project namespace: -+ -[source,terminal] ----- -$ oc label namespace my-project 'openshift.io/user-monitoring=false' ----- -+ -. To re-enable monitoring, remove the label from the namespace: -+ -[source,terminal] ----- -$ oc label namespace my-project 'openshift.io/user-monitoring-' ----- -+ -[NOTE] -==== -If there were any active monitoring targets for the project, it may take a few minutes for Prometheus to stop scraping them after adding the label. -==== diff --git a/modules/monitoring-expiring-silences.adoc b/modules/monitoring-expiring-silences.adoc deleted file mode 100644 index 069a41f4a917..000000000000 --- a/modules/monitoring-expiring-silences.adoc +++ /dev/null @@ -1,31 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/managing-alerts.adoc - -:_content-type: PROCEDURE -[id="expiring-silences_{context}"] -= Expiring silences - -You can expire a silence. Expiring a silence deactivates it forever. - -.Procedure - -To expire a silence in the *Administrator* perspective: - -. Navigate to the *Observe* -> *Alerting* -> *Silences* page. - -. For the silence you want to modify, select the {kebab} in the last column and choose *Expire silence*. -+ -Alternatively, you can select *Actions* -> *Expire Silence* in the *Silence Details* page for a silence. - -To expire a silence in the *Developer* perspective: - -. Navigate to the *Observe* -> *<project_name>* -> *Alerts* page. - -. Expand the details for an alert by selecting *>* to the left of the alert name. Select the name of the alert in the expanded view to open the *Alert Details* page for the alert. - -. Select the name of a silence in the *Silenced By* section in that page to navigate to the *Silence Details* page for the silence. - -. Select the name of a silence to navigate to its *Silence Details* page. - -. Select *Actions* -> *Expire Silence* in the *Silence Details* page for a silence. diff --git a/modules/monitoring-exploring-the-visualized-metrics.adoc b/modules/monitoring-exploring-the-visualized-metrics.adoc deleted file mode 100644 index dc6be6a615d3..000000000000 --- a/modules/monitoring-exploring-the-visualized-metrics.adoc +++ /dev/null @@ -1,48 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/managing-metrics.adoc - -:_content-type: PROCEDURE -[id="exploring-the-visualized-metrics_{context}"] -= Exploring the visualized metrics - -After running the queries, the metrics are displayed on an interactive plot. The X-axis in the plot represents time and the Y-axis represents metrics values. Each metric is shown as a colored line on the graph. You can manipulate the plot interactively and explore the metrics. - -.Procedure - -In the *Administrator* perspective: - -. Initially, all metrics from all enabled queries are shown on the plot. You can select which metrics are shown. -+ -[NOTE] -==== -By default, the query table shows an expanded view that lists every metric and its current value. You can select *˅* to minimize the expanded view for a query. -==== - -* To hide all metrics from a query, click {kebab} for the query and click *Hide all series*. - -* To hide a specific metric, go to the query table and click the colored square near the metric name. - -. To zoom into the plot and change the time range, do one of the following: - -* Visually select the time range by clicking and dragging on the plot horizontally. - -* Use the menu in the left upper corner to select the time range. - -. To reset the time range, select *Reset Zoom*. - -. To display outputs for all queries at a specific point in time, hold the mouse cursor on the plot at that point. The query outputs will appear in a pop-up box. - -. To hide the plot, select *Hide Graph*. - -In the *Developer* perspective: - -. To zoom into the plot and change the time range, do one of the following: - -* Visually select the time range by clicking and dragging on the plot horizontally. - -* Use the menu in the left upper corner to select the time range. - -. To reset the time range, select *Reset Zoom*. - -. To display outputs for all queries at a specific point in time, hold the mouse cursor on the plot at that point. The query outputs will appear in a pop-up box. diff --git a/modules/monitoring-exposing-custom-application-metrics-for-horizontal-pod-autoscaling.adoc b/modules/monitoring-exposing-custom-application-metrics-for-horizontal-pod-autoscaling.adoc deleted file mode 100644 index b7a6d2d08d78..000000000000 --- a/modules/monitoring-exposing-custom-application-metrics-for-horizontal-pod-autoscaling.adoc +++ /dev/null @@ -1,276 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/application-monitoring.adoc - -[id="exposing-custom-application-metrics-for-horizontal-pod-autoscaling_{context}"] -= Exposing custom application metrics for horizontal pod autoscaling - -You can use the `prometheus-adapter` resource to expose custom application metrics for the horizontal pod autoscaler. - -.Prerequisites - -* You have a custom Prometheus instance installed. In this example, it is presumed that Prometheus was installed in a user-defined `custom-prometheus` project. -+ -[NOTE] -==== -Custom Prometheus instances and the Prometheus Operator installed through Operator Lifecycle Manager (OLM) can cause issues with user-defined workload monitoring if it is enabled. Custom Prometheus instances are not supported in {product-title}. -==== -+ -* You have deployed an application and a service in a user-defined project. In this example, it is presumed that the application and its service monitor were installed in a user-defined `custom-prometheus` project. -* You have installed the OpenShift CLI (`oc`). - -.Procedure - -. Create a YAML file for your configuration. In this example, the file is called `deploy.yaml`. - -. Add configuration details for creating the service account, roles, and role bindings for `prometheus-adapter`: -+ -[source,yaml,subs=quotes] ----- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: custom-metrics-apiserver - namespace: custom-prometheus ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: custom-metrics-server-resources -rules: -- apiGroups: - - custom.metrics.k8s.io - resources: ["\*"] - verbs: ["*"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: custom-metrics-resource-reader -rules: -- apiGroups: - - "" - resources: - - namespaces - - pods - - services - verbs: - - get - - list ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: custom-metrics:system:auth-delegator -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: system:auth-delegator -subjects: -- kind: ServiceAccount - name: custom-metrics-apiserver - namespace: custom-prometheus ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: custom-metrics-auth-reader - namespace: kube-system -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: extension-apiserver-authentication-reader -subjects: -- kind: ServiceAccount - name: custom-metrics-apiserver - namespace: custom-prometheus ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: custom-metrics-resource-reader -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: custom-metrics-resource-reader -subjects: -- kind: ServiceAccount - name: custom-metrics-apiserver - namespace: custom-prometheus ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: hpa-controller-custom-metrics -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: custom-metrics-server-resources -subjects: -- kind: ServiceAccount - name: horizontal-pod-autoscaler - namespace: kube-system ---- ----- - -. Add configuration details for the custom metrics for `prometheus-adapter`: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: adapter-config - namespace: custom-prometheus -data: - config.yaml: | - rules: - - seriesQuery: 'http_requests_total{namespace!="",pod!=""}' <1> - resources: - overrides: - namespace: {resource: "namespace"} - pod: {resource: "pod"} - service: {resource: "service"} - name: - matches: "^(.*)_total" - as: "${1}_per_second" <2> - metricsQuery: 'sum(rate(<<.Series>>{<<.LabelMatchers>>}[2m])) by (<<.GroupBy>>)' ---- ----- -<1> Specifies the chosen metric to be the number of HTTP requests. -<2> Specifies the frequency for the metric. - -. Add configuration details for registering `prometheus-adapter` as an API service: -+ -[source,yaml,subs=quotes] ----- -apiVersion: v1 -kind: Service -metadata: - annotations: - service.beta.openshift.io/serving-cert-secret-name: prometheus-adapter-tls - labels: - name: prometheus-adapter - name: prometheus-adapter - namespace: custom-prometheus -spec: - ports: - - name: https - port: 443 - targetPort: 6443 - selector: - app: prometheus-adapter - type: ClusterIP ---- -apiVersion: apiregistration.k8s.io/v1beta1 -kind: APIService -metadata: - name: v1beta1.custom.metrics.k8s.io -spec: - service: - name: prometheus-adapter - namespace: custom-prometheus - group: custom.metrics.k8s.io - version: v1beta1 - insecureSkipTLSVerify: true - groupPriorityMinimum: 100 - versionPriority: 100 ---- ----- - -. List the Prometheus Adapter image: -+ -[source,terminal] ----- -$ oc get -n openshift-monitoring deploy/prometheus-adapter -o jsonpath="{..image}" ----- - -. Add configuration details for deploying `prometheus-adapter`: -+ -[source,yaml] ----- -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app: prometheus-adapter - name: prometheus-adapter - namespace: custom-prometheus -spec: - replicas: 1 - selector: - matchLabels: - app: prometheus-adapter - template: - metadata: - labels: - app: prometheus-adapter - name: prometheus-adapter - spec: - serviceAccountName: custom-metrics-apiserver - containers: - - name: prometheus-adapter - image: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a46915a206cd7d97f240687c618dd59e8848fcc3a0f51e281f3384153a12c3e0 <1> - args: - - --secure-port=6443 - - --tls-cert-file=/var/run/serving-cert/tls.crt - - --tls-private-key-file=/var/run/serving-cert/tls.key - - --logtostderr=true - - --prometheus-url=http://prometheus-operated.default.svc:9090/ - - --metrics-relist-interval=1m - - --v=4 - - --config=/etc/adapter/config.yaml - ports: - - containerPort: 6443 - volumeMounts: - - mountPath: /var/run/serving-cert - name: volume-serving-cert - readOnly: true - - mountPath: /etc/adapter/ - name: config - readOnly: true - - mountPath: /tmp - name: tmp-vol - volumes: - - name: volume-serving-cert - secret: - secretName: prometheus-adapter-tls - - name: config - configMap: - name: adapter-config - - name: tmp-vol - emptyDir: {} ----- -<1> Specifies the Prometheus Adapter image found in the previous step. - -. Apply the configuration to the cluster: -+ -[source,terminal] ----- -$ oc apply -f deploy.yaml ----- -+ -.Example output -[source,terminal] ----- -serviceaccount/custom-metrics-apiserver created -clusterrole.rbac.authorization.k8s.io/custom-metrics-server-resources created -clusterrole.rbac.authorization.k8s.io/custom-metrics-resource-reader created -clusterrolebinding.rbac.authorization.k8s.io/custom-metrics:system:auth-delegator created -rolebinding.rbac.authorization.k8s.io/custom-metrics-auth-reader created -clusterrolebinding.rbac.authorization.k8s.io/custom-metrics-resource-reader created -clusterrolebinding.rbac.authorization.k8s.io/hpa-controller-custom-metrics created -configmap/adapter-config created -service/prometheus-adapter created -apiservice.apiregistration.k8s.io/v1.custom.metrics.k8s.io created -deployment.apps/prometheus-adapter created ----- - -. Verify that the `prometheus-adapter` pod in your user-defined project is in a `Running` state. In this example the project is `custom-prometheus`: -+ -[source,terminal] ----- -$ oc -n custom-prometheus get pods prometheus-adapter-<string> ----- - -. The metrics for the application are now exposed and they can be used to configure horizontal pod autoscaling. diff --git a/modules/monitoring-getting-detailed-information-about-a-target.adoc b/modules/monitoring-getting-detailed-information-about-a-target.adoc deleted file mode 100644 index 15f6b2f71d6f..000000000000 --- a/modules/monitoring-getting-detailed-information-about-a-target.adoc +++ /dev/null @@ -1,37 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/managing-metrics-targets.adoc - -:_content-type: PROCEDURE -[id="getting-detailed-information-about-a-target_{context}"] -= Getting detailed information about a target - -On the **Target details** page, you can view detailed information about a metric target. - -.Prerequisites - -* You have access to the cluster as an administrator for the project for which you want to view metrics targets. - -.Procedure - -*To view detailed information about a target in the Administrator perspective*: - -. Open the {product-title} web console and navigate to *Observe* -> *Targets*. - -. Optional: Filter the targets by status and source by selecting filters in the *Filter* list. - -. Optional: Search for a target by name or label by using the *Text* or *Label* field next to the search box. - -. Optional: Sort the targets by clicking one or more of the *Endpoint*, *Status*, *Namespace*, *Last Scrape*, and *Scrape Duration* column headers. - -. Click the URL in the *Endpoint* column for a target to navigate to its *Target details* page. This page provides information about the target, including: -+ --- -** The endpoint URL being scraped for metrics -** The current *Up* or *Down* status of the target -** A link to the namespace -** A link to the ServiceMonitor details -** Labels attached to the target -** The most recent time that the target was scraped for metrics --- - diff --git a/modules/monitoring-getting-information-about-alerts-silences-and-alerting-rules.adoc b/modules/monitoring-getting-information-about-alerts-silences-and-alerting-rules.adoc deleted file mode 100644 index 7c1d4253cdee..000000000000 --- a/modules/monitoring-getting-information-about-alerts-silences-and-alerting-rules.adoc +++ /dev/null @@ -1,98 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/managing-alerts.adoc - -:_content-type: PROCEDURE -[id="getting-information-about-alerts-silences-and-alerting-rules_{context}"] -= Getting information about alerts, silences, and alerting rules - -The Alerting UI provides detailed information about alerts and their governing alerting rules and silences. - -.Prerequisites - -* You have access to the cluster as a developer or as a user with view permissions for the project that you are viewing metrics for. - -.Procedure - -*To obtain information about alerts in the Administrator perspective*: - -. Open the {product-title} web console and navigate to the *Observe* -> *Alerting* -> *Alerts* page. - -. Optional: Search for alerts by name using the *Name* field in the search list. - -. Optional: Filter alerts by state, severity, and source by selecting filters in the *Filter* list. - -. Optional: Sort the alerts by clicking one or more of the *Name*, *Severity*, *State*, and *Source* column headers. - -. Select the name of an alert to navigate to its *Alert Details* page. The page includes a graph that illustrates alert time series data. It also provides information about the alert, including: -+ --- -** A description of the alert -** Messages associated with the alerts -** Labels attached to the alert -** A link to its governing alerting rule -** Silences for the alert, if any exist --- - -*To obtain information about silences in the Administrator perspective*: - -. Navigate to the *Observe* -> *Alerting* -> *Silences* page. - -. Optional: Filter the silences by name using the *Search by name* field. - -. Optional: Filter silences by state by selecting filters in the *Filter* list. By default, *Active* and *Pending* filters are applied. - -. Optional: Sort the silences by clicking one or more of the *Name*, *Firing Alerts*, and *State* column headers. - -. Select the name of a silence to navigate to its *Silence Details* page. The page includes the following details: -+ --- -* Alert specification -* Start time -* End time -* Silence state -* Number and list of firing alerts --- - -*To obtain information about alerting rules in the Administrator perspective*: - -. Navigate to the *Observe* -> *Alerting* -> *Alerting Rules* page. - -. Optional: Filter alerting rules by state, severity, and source by selecting filters in the *Filter* list. - -. Optional: Sort the alerting rules by clicking one or more of the *Name*, *Severity*, *Alert State*, and *Source* column headers. - -. Select the name of an alerting rule to navigate to its *Alerting Rule Details* page. The page provides the following details about the alerting rule: -+ --- -** Alerting rule name, severity, and description -** The expression that defines the condition for firing the alert -** The time for which the condition should be true for an alert to fire -** A graph for each alert governed by the alerting rule, showing the value with which the alert is firing -** A table of all alerts governed by the alerting rule --- - -*To obtain information about alerts, silences, and alerting rules in the Developer perspective*: - -. Navigate to the *Observe* -> *<project_name>* -> *Alerts* page. - -. View details for an alert, silence, or an alerting rule: - -* *Alert Details* can be viewed by selecting *>* to the left of an alert name and then selecting the alert in the list. - -* *Silence Details* can be viewed by selecting a silence in the *Silenced By* section of the *Alert Details* page. The *Silence Details* page includes the following information: -+ --- -* Alert specification -* Start time -* End time -* Silence state -* Number and list of firing alerts --- - -* *Alerting Rule Details* can be viewed by selecting *View Alerting Rule* in the {kebab} menu on the right of an alert in the *Alerts* page. - -[NOTE] -==== -Only alerts, silences, and alerting rules relating to the selected project are displayed in the *Developer* perspective. -==== diff --git a/modules/monitoring-granting-user-permissions-using-the-cli.adoc b/modules/monitoring-granting-user-permissions-using-the-cli.adoc deleted file mode 100644 index 2612064c6260..000000000000 --- a/modules/monitoring-granting-user-permissions-using-the-cli.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/enabling-monitoring-for-user-defined-projects.adoc - -:_content-type: PROCEDURE -[id="granting-user-permissions-using-the-cli_{context}"] -= Granting user permissions by using the CLI - -You can grant users permissions to monitor their own projects, by using the OpenShift CLI (`oc`). - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. -* The user account that you are assigning the role to already exists. -* You have installed the OpenShift CLI (`oc`). - -.Procedure - -* Assign a monitoring role to a user for a project: -+ -[source,terminal] ----- -$ oc policy add-role-to-user <role> <user> -n <namespace> <1> ----- -<1> Substitute `<role>` with `monitoring-rules-view`, `monitoring-rules-edit`, or `monitoring-edit`. -+ -[IMPORTANT] -==== -Whichever role you choose, you must bind it against a specific project as a cluster administrator. -==== -+ -As an example, substitute `<role>` with `monitoring-edit`, `<user>` with `johnsmith`, and `<namespace>` with `ns1`. This assigns the user `johnsmith` permission to set up metrics collection and to create alerting rules in the `ns1` namespace. diff --git a/modules/monitoring-granting-user-permissions-using-the-web-console.adoc b/modules/monitoring-granting-user-permissions-using-the-web-console.adoc deleted file mode 100644 index 7a1c51032390..000000000000 --- a/modules/monitoring-granting-user-permissions-using-the-web-console.adoc +++ /dev/null @@ -1,37 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/enabling-monitoring-for-user-defined-projects.adoc - -:_content-type: PROCEDURE -[id="granting-user-permissions-using-the-web-console_{context}"] -= Granting user permissions by using the web console - -You can grant users permissions to monitor their own projects, by using the {product-title} web console. - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. -* The user account that you are assigning the role to already exists. - -.Procedure - -. In the *Administrator* perspective within the {product-title} web console, navigate to *User Management* -> *Role Bindings* -> *Create Binding*. - -. In the *Binding Type* section, select the "Namespace Role Binding" type. - -. In the *Name* field, enter a name for the role binding. - -. In the *Namespace* field, select the user-defined project where you want to grant the access. -+ -[IMPORTANT] -==== -The monitoring role will be bound to the project that you apply in the *Namespace* field. The permissions that you grant to a user by using this procedure will apply only to the selected project. -==== - -. Select `monitoring-rules-view`, `monitoring-rules-edit`, or `monitoring-edit` in the *Role Name* list. - -. In the *Subject* section, select *User*. - -. In the *Subject Name* field, enter the name of the user. - -. Select *Create* to apply the role binding. diff --git a/modules/monitoring-granting-users-permission-to-configure-alert-routing-for-user-defined-projects.adoc b/modules/monitoring-granting-users-permission-to-configure-alert-routing-for-user-defined-projects.adoc deleted file mode 100644 index 0032bd3a2fb8..000000000000 --- a/modules/monitoring-granting-users-permission-to-configure-alert-routing-for-user-defined-projects.adoc +++ /dev/null @@ -1,35 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/enabling-alert-routing-for-user-defined-projects.adoc -// * monitoring/osd-enabling-alert-routing-for-user-defined-projects.adoc -// * monitoring/osd-enabling-alert-routing-for-user-defined-projects.adoc -// * monitoring/rosa-enabling-alert-routing-for-user-defined-projects.adoc - -:_content-type: PROCEDURE -[id="granting-users-permission-to-configure-alert-routing-for-user-defined-projects_{context}"] -= Granting users permission to configure alert routing for user-defined projects - -[role="_abstract"] -You can grant users permission to configure alert routing for user-defined projects. - -.Prerequisites - -ifdef::openshift-rosa,openshift-dedicated[] -* You have access to the cluster as a user with the `cluster-admin` or `dedicated-admin` role. -endif::[] -ifndef::openshift-rosa,openshift-dedicated[] -* You have access to the cluster as a user with the `cluster-admin` role. -endif::[] -* The user account that you are assigning the role to already exists. -* You have installed the OpenShift CLI (`oc`). -* You have enabled monitoring for user-defined projects. - -.Procedure - -* Assign the `alert-routing-edit` role to a user in the user-defined project: -+ -[source,terminal] ----- -$ oc -n <namespace> adm policy add-role-to-user alert-routing-edit <user> <1> ----- -<1> For `<namespace>`, substitute the namespace for the user-defined project, such as `ns1`. For `<user>`, substitute the username for the account to which you want to assign the role. \ No newline at end of file diff --git a/modules/monitoring-granting-users-permission-to-configure-monitoring-for-user-defined-projects.adoc b/modules/monitoring-granting-users-permission-to-configure-monitoring-for-user-defined-projects.adoc deleted file mode 100644 index 3f79d319117c..000000000000 --- a/modules/monitoring-granting-users-permission-to-configure-monitoring-for-user-defined-projects.adoc +++ /dev/null @@ -1,26 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/enabling-monitoring-for-user-defined-projects.adoc - -:_content-type: PROCEDURE -[id="granting-users-permission-to-configure-monitoring-for-user-defined-projects_{context}"] -= Granting users permission to configure monitoring for user-defined projects - -You can grant users permission to configure monitoring for user-defined projects. - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. -* The user account that you are assigning the role to already exists. -* You have installed the OpenShift CLI (`oc`). - -.Procedure - -* Assign the `user-workload-monitoring-config-edit` role to a user in the `openshift-user-workload-monitoring` project: -+ -[source,terminal] ----- -$ oc -n openshift-user-workload-monitoring adm policy add-role-to-user \ - user-workload-monitoring-config-edit <user> \ - --role-namespace openshift-user-workload-monitoring ----- diff --git a/modules/monitoring-granting-users-permission-to-monitor-user-defined-projects.adoc b/modules/monitoring-granting-users-permission-to-monitor-user-defined-projects.adoc deleted file mode 100644 index 2dd0f346d301..000000000000 --- a/modules/monitoring-granting-users-permission-to-monitor-user-defined-projects.adoc +++ /dev/null @@ -1,26 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/enabling-monitoring-for-user-defined-projects.adoc - -[id="granting-users-permission-to-monitor-user-defined-projects_{context}"] -= Granting users permission to monitor user-defined projects - -Cluster administrators can monitor all core {product-title} and user-defined projects. - -Cluster administrators can grant developers and other users permission to monitor their own projects. Privileges are granted by assigning one of the following monitoring roles: - -* The *monitoring-rules-view* role provides read access to `PrometheusRule` custom resources for a project. - -* The *monitoring-rules-edit* role grants a user permission to create, modify, and deleting `PrometheusRule` custom resources for a project. - -* The *monitoring-edit* role grants the same privileges as the `monitoring-rules-edit` role. Additionally, it enables a user to create new scrape targets for services or pods. With this role, you can also create, modify, and delete `ServiceMonitor` and `PodMonitor` resources. - -You can also grant users permission to configure the components that are responsible for monitoring user-defined projects: - -* The *user-workload-monitoring-config-edit* role in the `openshift-user-workload-monitoring` project enables you to edit the `user-workload-monitoring-config` `ConfigMap` object. With this role, you can edit the `ConfigMap` object to configure Prometheus, Prometheus Operator, and Thanos Ruler for user-defined workload monitoring. - -You can also grant users permission to configure alert routing for user-defined projects: - -* The **alert-routing-edit** role grants a user permission to create, update, and delete `AlertmanagerConfig` custom resources for a project. - -This section provides details on how to assign these roles by using the {product-title} web console or the CLI. diff --git a/modules/monitoring-installation-progress.adoc b/modules/monitoring-installation-progress.adoc deleted file mode 100644 index c7ca4ac5fada..000000000000 --- a/modules/monitoring-installation-progress.adoc +++ /dev/null @@ -1,71 +0,0 @@ -// Module included in the following assemblies: -// -// * support/troubleshooting/troubleshooting-installations.adoc - -:_content-type: PROCEDURE -[id="monitoring-installation-progress_{context}"] -= Monitoring installation progress - -You can monitor high-level installation, bootstrap, and control plane logs as an {product-title} installation progresses. This provides greater visibility into how an installation progresses and helps identify the stage at which an installation failure occurs. - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. -* You have installed the OpenShift CLI (`oc`). -* You have SSH access to your hosts. -* You have the fully qualified domain names of the bootstrap and control plane nodes. -+ -[NOTE] -==== -The initial `kubeadmin` password can be found in `<install_directory>/auth/kubeadmin-password` on the installation host. -==== - -.Procedure - -. Watch the installation log as the installation progresses: -+ -[source,terminal] ----- -$ tail -f ~/<installation_directory>/.openshift_install.log ----- - -. Monitor the `bootkube.service` journald unit log on the bootstrap node, after it has booted. This provides visibility into the bootstrapping of the first control plane. Replace `<bootstrap_fqdn>` with the bootstrap node's fully qualified domain name: -+ -[source,terminal] ----- -$ ssh core@<bootstrap_fqdn> journalctl -b -f -u bootkube.service ----- -+ -[NOTE] -==== -The `bootkube.service` log on the bootstrap node outputs etcd `connection refused` errors, indicating that the bootstrap server is unable to connect to etcd on control plane nodes. After etcd has started on each control plane node and the nodes have joined the cluster, the errors should stop. -==== -+ -. Monitor `kubelet.service` journald unit logs on control plane nodes, after they have booted. This provides visibility into control plane node agent activity. -.. Monitor the logs using `oc`: -+ -[source,terminal] ----- -$ oc adm node-logs --role=master -u kubelet ----- -.. If the API is not functional, review the logs using SSH instead. Replace `<master-node>.<cluster_name>.<base_domain>` with appropriate values: -+ -[source,terminal] ----- -$ ssh core@<master-node>.<cluster_name>.<base_domain> journalctl -b -f -u kubelet.service ----- - -. Monitor `crio.service` journald unit logs on control plane nodes, after they have booted. This provides visibility into control plane node CRI-O container runtime activity. -.. Monitor the logs using `oc`: -+ -[source,terminal] ----- -$ oc adm node-logs --role=master -u crio ----- -+ -.. If the API is not functional, review the logs using SSH instead. Replace `<master-node>.<cluster_name>.<base_domain>` with appropriate values: -+ -[source,terminal] ----- -$ ssh core@master-N.cluster_name.sub_domain.domain journalctl -b -f -u crio.service ----- diff --git a/modules/monitoring-investigating-why-user-defined-metrics-are-unavailable.adoc b/modules/monitoring-investigating-why-user-defined-metrics-are-unavailable.adoc deleted file mode 100644 index d8272a40cb24..000000000000 --- a/modules/monitoring-investigating-why-user-defined-metrics-are-unavailable.adoc +++ /dev/null @@ -1,158 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/troubleshooting-monitoring-issues.adoc -// * support/troubleshooting/investigating-monitoring-issues.adoc - -:_content-type: PROCEDURE -[id="investigating-why-user-defined-metrics-are-unavailable_{context}"] -= Investigating why user-defined metrics are unavailable - -`ServiceMonitor` resources enable you to determine how to use the metrics exposed by a service in user-defined projects. Follow the steps outlined in this procedure if you have created a `ServiceMonitor` resource but cannot see any corresponding metrics in the Metrics UI. - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. -* You have installed the OpenShift CLI (`oc`). -* You have enabled and configured monitoring for user-defined workloads. -* You have created the `user-workload-monitoring-config` `ConfigMap` object. -* You have created a `ServiceMonitor` resource. - -.Procedure - -. *Check that the corresponding labels match* in the service and `ServiceMonitor` resource configurations. -.. Obtain the label defined in the service. The following example queries the `prometheus-example-app` service in the `ns1` project: -+ -[source,terminal] ----- -$ oc -n ns1 get service prometheus-example-app -o yaml ----- -+ -.Example output -[source,terminal] ----- - labels: - app: prometheus-example-app ----- -+ -.. Check that the `matchLabels` `app` label in the `ServiceMonitor` resource configuration matches the label output in the preceding step: -+ -[source,terminal] ----- -$ oc -n ns1 get servicemonitor prometheus-example-monitor -o yaml ----- -+ -.Example output ----- -spec: - endpoints: - - interval: 30s - port: web - scheme: http - selector: - matchLabels: - app: prometheus-example-app ----- -+ -[NOTE] -==== -You can check service and `ServiceMonitor` resource labels as a developer with view permissions for the project. -==== - -. *Inspect the logs for the Prometheus Operator* in the `openshift-user-workload-monitoring` project. -.. List the pods in the `openshift-user-workload-monitoring` project: -+ -[source,terminal] ----- -$ oc -n openshift-user-workload-monitoring get pods ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -prometheus-operator-776fcbbd56-2nbfm 2/2 Running 0 132m -prometheus-user-workload-0 5/5 Running 1 132m -prometheus-user-workload-1 5/5 Running 1 132m -thanos-ruler-user-workload-0 3/3 Running 0 132m -thanos-ruler-user-workload-1 3/3 Running 0 132m ----- -+ -.. Obtain the logs from the `prometheus-operator` container in the `prometheus-operator` pod. In the following example, the pod is called `prometheus-operator-776fcbbd56-2nbfm`: -+ -[source,terminal] ----- -$ oc -n openshift-user-workload-monitoring logs prometheus-operator-776fcbbd56-2nbfm -c prometheus-operator ----- -+ -If there is a issue with the service monitor, the logs might include an error similar to this example: -+ -[source,terminal] ----- -level=warn ts=2020-08-10T11:48:20.906739623Z caller=operator.go:1829 component=prometheusoperator msg="skipping servicemonitor" error="it accesses file system via bearer token file which Prometheus specification prohibits" servicemonitor=eagle/eagle namespace=openshift-user-workload-monitoring prometheus=user-workload ----- - -. *Review the target status for your endpoint* on the *Metrics targets* page in the {product-title} web console UI. -.. Log in to the {product-title} web console and navigate to *Observe* → *Targets* in the *Administrator* perspective. - -.. Locate the metrics endpoint in the list, and review the status of the target in the *Status* column. - -.. If the *Status* is *Down*, click the URL for the endpoint to view more information on the *Target Details* page for that metrics target. - -. *Configure debug level logging for the Prometheus Operator* in the `openshift-user-workload-monitoring` project. -.. Edit the `user-workload-monitoring-config` `ConfigMap` object in the `openshift-user-workload-monitoring` project: -+ -[source,terminal] ----- -$ oc -n openshift-user-workload-monitoring edit configmap user-workload-monitoring-config ----- -+ -.. Add `logLevel: debug` for `prometheusOperator` under `data/config.yaml` to set the log level to `debug`: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: user-workload-monitoring-config - namespace: openshift-user-workload-monitoring -data: - config.yaml: | - prometheusOperator: - logLevel: debug ----- -+ -.. Save the file to apply the changes. -+ -[NOTE] -==== -The `prometheus-operator` in the `openshift-user-workload-monitoring` project restarts automatically when you apply the log-level change. -==== -+ -.. Confirm that the `debug` log-level has been applied to the `prometheus-operator` deployment in the `openshift-user-workload-monitoring` project: -+ -[source,terminal] ----- -$ oc -n openshift-user-workload-monitoring get deploy prometheus-operator -o yaml | grep "log-level" ----- -+ -.Example output -[source,terminal] ----- - - --log-level=debug ----- -+ -Debug level logging will show all calls made by the Prometheus Operator. -+ -.. Check that the `prometheus-operator` pod is running: -+ -[source,terminal] ----- -$ oc -n openshift-user-workload-monitoring get pods ----- -+ -[NOTE] -==== -If an unrecognized Prometheus Operator `loglevel` value is included in the config map, the `prometheus-operator` pod might not restart successfully. -==== -+ -.. Review the debug logs to see if the Prometheus Operator is using the `ServiceMonitor` resource. Review the logs for other related errors. diff --git a/modules/monitoring-limiting-scrape-samples-in-user-defined-projects.adoc b/modules/monitoring-limiting-scrape-samples-in-user-defined-projects.adoc deleted file mode 100644 index e998e3cf762e..000000000000 --- a/modules/monitoring-limiting-scrape-samples-in-user-defined-projects.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/configuring-the-monitoring-stack.adoc - -:_content-type: CONCEPT -[id="controlling-the-impact-of-unbound-attributes-in-user-defined-projects_{context}"] -= Controlling the impact of unbound metrics attributes in user-defined projects - -Developers can create labels to define attributes for metrics in the form of key-value pairs. The number of potential key-value pairs corresponds to the number of possible values for an attribute. An attribute that has an unlimited number of potential values is called an unbound attribute. For example, a `customer_id` attribute is unbound because it has an infinite number of possible values. - -Every assigned key-value pair has a unique time series. Using many unbound attributes in labels can create exponentially more time series, which can impact Prometheus performance and available disk space. - -Cluster administrators can use the following measures to control the impact of unbound metrics attributes in user-defined projects: - -* Limit the number of samples that can be accepted per target scrape in user-defined projects -* Limit the number of scraped labels, the length of label names, and the length of label values. -* Create alerts that fire when a scrape sample threshold is reached or when the target cannot be scraped - -[NOTE] -==== -To prevent issues caused by adding many unbound attributes, limit the number of scrape samples, label names, and unbound attributes you define for metrics. -Also reduce the number of potential key-value pair combinations by using attributes that are bound to a limited set of possible values. -==== diff --git a/modules/monitoring-listing-alerting-rules-for-all-projects-in-a-single-view.adoc b/modules/monitoring-listing-alerting-rules-for-all-projects-in-a-single-view.adoc deleted file mode 100644 index 72b0a9773f1a..000000000000 --- a/modules/monitoring-listing-alerting-rules-for-all-projects-in-a-single-view.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/managing-alerts.adoc - -:_content-type: PROCEDURE -[id="listing-alerting-rules-for-all-projects-in-a-single-view_{context}"] -= Listing alerting rules for all projects in a single view - -As a cluster administrator, you can list alerting rules for core {product-title} and user-defined projects together in a single view. - -.Prerequisites - -ifdef::openshift-rosa,openshift-dedicated[] -* You have access to the cluster as a user with the `cluster-admin` or `dedicated-admin` role. -endif::[] -ifndef::openshift-rosa,openshift-dedicated[] -* You have access to the cluster as a user with the `cluster-admin` role. -endif::[] -* You have installed the OpenShift CLI (`oc`). - -.Procedure - -. In the *Administrator* perspective, navigate to *Observe* -> *Alerting* -> *Alerting Rules*. - -. Select the *Platform* and *User* sources in the *Filter* drop-down menu. -+ -[NOTE] -==== -The *Platform* source is selected by default. -==== diff --git a/modules/monitoring-maintenance-and-support.adoc b/modules/monitoring-maintenance-and-support.adoc deleted file mode 100644 index 92eeba9c4ca6..000000000000 --- a/modules/monitoring-maintenance-and-support.adoc +++ /dev/null @@ -1,8 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/configuring-the-monitoring-stack.adoc - -[id="maintenance-and-support_{context}"] -= Maintenance and support for monitoring - -The supported way of configuring {product-title} Monitoring is by configuring it using the options described in this document. *Do not use other configurations, as they are unsupported.* Configuration paradigms might change across Prometheus releases, and such cases can only be handled gracefully if all configuration possibilities are controlled. If you use configurations other than those described in this section, your changes will disappear because the `cluster-monitoring-operator` reconciles any differences. The Operator resets everything to the defined state by default and by design. diff --git a/modules/monitoring-managing-alerting-rules-for-user-defined-projects.adoc b/modules/monitoring-managing-alerting-rules-for-user-defined-projects.adoc deleted file mode 100644 index df6bdca8becf..000000000000 --- a/modules/monitoring-managing-alerting-rules-for-user-defined-projects.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/managing-alerts.adoc -// - -:_content-type: CONCEPT -[id="managing-alerting-rules-for-user-defined-projects_{context}"] -= Managing alerting rules for user-defined projects - -{product-title} monitoring ships with a set of default alerting rules. As a cluster administrator, you can view the default alerting rules. - -In {product-title} {product-version}, you can create, view, edit, and remove alerting rules in user-defined projects. - -ifdef::openshift-rosa,openshift-dedicated[] -[IMPORTANT] -==== -Managing alerting rules for user-defined projects is only available in {product-title} version 4.11 and later. -==== -endif::[] - -.Alerting rule considerations - -* The default alerting rules are used specifically for the {product-title} cluster. - -* Some alerting rules intentionally have identical names. They send alerts about the same event with different thresholds, different severity, or both. - -* Inhibition rules prevent notifications for lower severity alerts that are firing when a higher severity alert is also firing. diff --git a/modules/monitoring-managing-core-platform-alerting-rules.adoc b/modules/monitoring-managing-core-platform-alerting-rules.adoc deleted file mode 100644 index 6e48d29a780f..000000000000 --- a/modules/monitoring-managing-core-platform-alerting-rules.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/managing-alerts.adoc - -:_content-type: CONCEPT -[id="managing-core-platform-alerting-rules_{context}"] -= Managing alerting rules for core platform monitoring - -{product-title} {product-version} monitoring ships with a large set of default alerting rules for platform metrics. -As a cluster administrator, you can customize this set of rules in two ways: - -* Modify the settings for existing platform alerting rules by adjusting thresholds or by adding and modifying labels. -For example, you can change the `severity` label for an alert from `warning` to `critical` to help you route and triage issues flagged by an alert. - -* Define and add new custom alerting rules by constructing a query expression based on core platform metrics in the `openshift-monitoring` namespace. - -.Core platform alerting rule considerations - -* New alerting rules must be based on the default {product-title} monitoring metrics. - -* You can only add and modify alerting rules. You cannot create new recording rules or modify existing recording rules. - -* If you modify existing platform alerting rules by using an `AlertRelabelConfig` object, your modifications are not reflected in the Prometheus alerts API. -Therefore, any dropped alerts still appear in the {product-title} web console even though they are no longer forwarded to Alertmanager. -Additionally, any modifications to alerts, such as a changed `severity` label, do not appear in the web console. \ No newline at end of file diff --git a/modules/monitoring-managing-silences.adoc b/modules/monitoring-managing-silences.adoc deleted file mode 100644 index 3105ac00560d..000000000000 --- a/modules/monitoring-managing-silences.adoc +++ /dev/null @@ -1,13 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/managing-alerts.adoc - -:_content-type: CONCEPT -[id="managing-silences_{context}"] -= Managing silences - -You can create a silence to stop receiving notifications about an alert when it is firing. It might be useful to silence an alert after being first notified, while you resolve the underlying issue. - -When creating a silence, you must specify whether it becomes active immediately or at a later time. You must also set a duration period after which the silence expires. - -You can view, edit, and expire existing silences. diff --git a/modules/monitoring-modifying-core-platform-alerting-rules.adoc b/modules/monitoring-modifying-core-platform-alerting-rules.adoc deleted file mode 100644 index ce36780344ac..000000000000 --- a/modules/monitoring-modifying-core-platform-alerting-rules.adoc +++ /dev/null @@ -1,54 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/managing-alerts.adoc - -:_content-type: PROCEDURE -[id="modifying-core-platform-alerting-rules_{context}"] -= Modifying core platform alerting rules - -As a cluster administrator, you can modify core platform alerts before Alertmanager routes them to a receiver. -For example, you can change the severity label of an alert, add a custom label, or exclude an alert from being sent to Alertmanager. - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. -* You have installed the OpenShift CLI (`oc`). -* You have enabled Technology Preview features, and all nodes in the cluster are ready. - - -.Procedure - -. Create a new YAML configuration file named `example-modified-alerting-rule.yaml` in the `openshift-monitoring` namespace. - -. Add an `AlertRelabelConfig` resource to the YAML file. -The following example modifies the `severity` setting to `critical` for the default platform `watchdog` alerting rule: -+ -[source,yaml] ----- -apiVersion: monitoring.openshift.io/v1 -kind: AlertRelabelConfig -metadata: - name: watchdog - namespace: openshift-monitoring -spec: - configs: - - sourceLabels: [alertname,severity] <1> - regex: "Watchdog;none" <2> - targetLabel: severity <3> - replacement: critical <4> - action: Replace <5> ----- -<1> The source labels for the values you want to modify. -<2> The regular expression against which the value of `sourceLabels` is matched. -<3> The target label of the value you want to modify. -<4> The new value to replace the target label. -<5> The relabel action that replaces the old value based on regex matching. -The default action is `Replace`. -Other possible values are `Keep`, `Drop`, `HashMod`, `LabelMap`, `LabelDrop`, and `LabelKeep`. - -. Apply the configuration file to the cluster: -+ -[source,terminal] ----- -$ oc apply -f example-modified-alerting-rule.yaml ----- diff --git a/modules/monitoring-modifying-retention-time-and-size-for-prometheus-metrics-data.adoc b/modules/monitoring-modifying-retention-time-and-size-for-prometheus-metrics-data.adoc deleted file mode 100644 index ac5564e1e37e..000000000000 --- a/modules/monitoring-modifying-retention-time-and-size-for-prometheus-metrics-data.adoc +++ /dev/null @@ -1,133 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/configuring-the-monitoring-stack.adoc - -:_content-type: PROCEDURE -[id="modifying-retention-time-and-size-for-prometheus-metrics-data_{context}"] -= Modifying the retention time and size for Prometheus metrics data - -By default, Prometheus automatically retains metrics data for 15 days. -You can modify the retention time to change how soon data is deleted by specifying a time value in the `retention` field. -You can also configure the maximum amount of disk space the retained metrics data uses by specifying a size value in the `retentionSize` field. -If the data reaches this size limit, Prometheus deletes the oldest data first until the disk space used is again below the limit. - -Note the following behaviors of these data retention settings: - -* The size-based retention policy applies to all data block directories in the `/prometheus` directory, including persistent blocks, write-ahead log (WAL) data, and m-mapped chunks. -* Data in the `/wal` and `/head_chunks` directories counts toward the retention size limit, but Prometheus never purges data from these directories based on size- or time-based retention policies. -Thus, if you set a retention size limit lower than the maximum size set for the `/wal` and `/head_chunks` directories, you have configured the system not to retain any data blocks in the `/prometheus` data directories. -* The size-based retention policy is applied only when Prometheus cuts a new data block, which occurs every two hours after the WAL contains at least three hours of data. -* If you do not explicitly define values for either `retention` or `retentionSize`, retention time defaults to 15 days, and retention size is not set. -* If you define values for both `retention` and `retentionSize`, both values apply. -If any data blocks exceed the defined retention time or the defined size limit, Prometheus purges these data blocks. -* If you define a value for `retentionSize` and do not define `retention`, only the `retentionSize` value applies. -* If you do not define a value for `retentionSize` and only define a value for `retention`, only the `retention` value applies. - -.Prerequisites - -* *If you are configuring core {product-title} monitoring components*: -** You have access to the cluster as a user with the `cluster-admin` role. -** You have created the `cluster-monitoring-config` `ConfigMap` object. -* *If you are configuring components that monitor user-defined projects*: -** A cluster administrator has enabled monitoring for user-defined projects. -** You have access to the cluster as a user with the `cluster-admin` role, or as a user with the `user-workload-monitoring-config-edit` role in the `openshift-user-workload-monitoring` project. -** You have created the `user-workload-monitoring-config` `ConfigMap` object. -* You have installed the OpenShift CLI (`oc`). - -[WARNING] -==== -Saving changes to a monitoring config map might restart monitoring processes and redeploy the pods and other resources in the related project. -The running monitoring processes in that project might also restart. -==== - -.Procedure - -. Edit the `ConfigMap` object: -** *To modify the retention time and size for the Prometheus instance that monitors core {product-title} projects*: -.. Edit the `cluster-monitoring-config` `ConfigMap` object in the `openshift-monitoring` project: -+ -[source,terminal] ----- -$ oc -n openshift-monitoring edit configmap cluster-monitoring-config ----- - -.. Add the retention time and size configuration under `data/config.yaml`: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: cluster-monitoring-config - namespace: openshift-monitoring -data: - config.yaml: | - prometheusK8s: - retention: <time_specification> <1> - retentionSize: <size_specification> <2> ----- -+ -<1> The retention time: a number directly followed by `ms` (milliseconds), `s` (seconds), `m` (minutes), `h` (hours), `d` (days), `w` (weeks), or `y` (years). You can also combine time values for specific times, such as `1h30m15s`. -<2> The retention size: a number directly followed by `B` (bytes), `KB` (kilobytes), `MB` (megabytes), `GB` (gigabytes), `TB` (terabytes), `PB` (petabytes), and `EB` (exabytes). -+ -The following example sets the retention time to 24 hours and the retention size to 10 gigabytes for the Prometheus instance that monitors core {product-title} components: -+ -[source,yaml,subs=quotes] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: cluster-monitoring-config - namespace: openshift-monitoring -data: - config.yaml: | - prometheusK8s: - retention: *24h* - retentionSize: *10GB* ----- - -** *To modify the retention time and size for the Prometheus instance that monitors user-defined projects*: -.. Edit the `user-workload-monitoring-config` `ConfigMap` object in the `openshift-user-workload-monitoring` project: -+ -[source,terminal] ----- -$ oc -n openshift-user-workload-monitoring edit configmap user-workload-monitoring-config ----- - -.. Add the retention time and size configuration under `data/config.yaml`: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: user-workload-monitoring-config - namespace: openshift-user-workload-monitoring -data: - config.yaml: | - prometheus: - retention: <time_specification> <1> - retentionSize: <size_specification> <2> ----- -+ -<1> The retention time: a number directly followed by `ms` (milliseconds), `s` (seconds), `m` (minutes), `h` (hours), `d` (days), `w` (weeks), or `y` (years). -You can also combine time values for specific times, such as `1h30m15s`. -<2> The retention size: a number directly followed by `B` (bytes), `KB` (kilobytes), `MB` (megabytes), `GB` (gigabytes), `TB` (terabytes), `PB` (petabytes), or `EB` (exabytes). -+ -The following example sets the retention time to 24 hours and the retention size to 10 gigabytes for the Prometheus instance that monitors user-defined projects: -+ -[source,yaml,subs=quotes] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: user-workload-monitoring-config - namespace: openshift-user-workload-monitoring -data: - config.yaml: | - prometheus: - retention: *24h* - retentionSize: *10GB* ----- - -. Save the file to apply the changes. The pods affected by the new configuration restart automatically. diff --git a/modules/monitoring-modifying-the-retention-time-for-thanos-ruler-metrics-data.adoc b/modules/monitoring-modifying-the-retention-time-for-thanos-ruler-metrics-data.adoc deleted file mode 100644 index 83d9728d5e6f..000000000000 --- a/modules/monitoring-modifying-the-retention-time-for-thanos-ruler-metrics-data.adoc +++ /dev/null @@ -1,68 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/configuring-the-monitoring-stack.adoc - -:_content-type: PROCEDURE -[id="modifying-the-retention-time-for-thanos-ruler-metrics-data_{context}"] -= Modifying the retention time for Thanos Ruler metrics data - -By default, for user-defined projects, Thanos Ruler automatically retains metrics data for 24 hours. -You can modify the retention time to change how long this data is retained by specifying a time value in the `user-workload-monitoring-config` config map in the `openshift-user-workload-monitoring` namespace. - -.Prerequisites - -* You have installed the OpenShift CLI (`oc`). -* A cluster administrator has enabled monitoring for user-defined projects. -* You have access to the cluster as a user with the `cluster-admin` role or as a user with the `user-workload-monitoring-config-edit` role in the `openshift-user-workload-monitoring` project. -* You have created the `user-workload-monitoring-config` `ConfigMap` object. - -[WARNING] -==== -Saving changes to a monitoring config map might restart monitoring processes and redeploy the pods and other resources in the related project. -The running monitoring processes in that project might also restart. -==== - -.Procedure - -. Edit the `user-workload-monitoring-config` `ConfigMap` object in the `openshift-user-workload-monitoring` project: -+ -[source,terminal] ----- -$ oc -n openshift-user-workload-monitoring edit configmap user-workload-monitoring-config ----- - -. Add the retention time configuration under `data/config.yaml`: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: user-workload-monitoring-config - namespace: openshift-user-workload-monitoring -data: - config.yaml: | - thanosRuler: - retention: <time_specification> <1> ----- -+ -<1> Specify the retention time in the following format: a number directly followed by `ms` (milliseconds), `s` (seconds), `m` (minutes), `h` (hours), `d` (days), `w` (weeks), or `y` (years). -You can also combine time values for specific times, such as `1h30m15s`. -The default is `24h`. -+ -The following example sets the retention time to 10 days for Thanos Ruler data: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: user-workload-monitoring-config - namespace: openshift-user-workload-monitoring -data: - config.yaml: | - thanosRuler: - retention: 10d ----- - -. Save the file to apply the changes. The pods affected by the new configuration automatically restart. diff --git a/modules/monitoring-moving-monitoring-components-to-different-nodes.adoc b/modules/monitoring-moving-monitoring-components-to-different-nodes.adoc deleted file mode 100644 index d0224e5adb1c..000000000000 --- a/modules/monitoring-moving-monitoring-components-to-different-nodes.adoc +++ /dev/null @@ -1,119 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/configuring-the-monitoring-stack.adoc - -:_content-type: PROCEDURE -[id="moving-monitoring-components-to-different-nodes_{context}"] -= Moving monitoring components to different nodes - -To specify the nodes in your cluster on which monitoring stack components will run, configure the `nodeSelector` constraint in the component's `ConfigMap` object to match labels assigned to the nodes. - -[NOTE] -==== -You cannot add a node selector constraint directly to an existing scheduled pod. -==== - -.Prerequisites - -* *If you are configuring core {product-title} monitoring components*: -** You have access to the cluster as a user with the `cluster-admin` role. -** You have created the `cluster-monitoring-config` `ConfigMap` object. -* *If you are configuring components that monitor user-defined projects*: -** You have access to the cluster as a user with the `cluster-admin` role or as a user with the `user-workload-monitoring-config-edit` role in the `openshift-user-workload-monitoring` project. -** You have created the `user-workload-monitoring-config` `ConfigMap` object. -* You have installed the OpenShift CLI (`oc`). - -.Procedure - -. If you have not done so yet, add a label to the nodes on which you want to run the monitoring components: -+ -[source,terminal] ----- -$ oc label nodes <node-name> <node-label> ----- -. Edit the `ConfigMap` object: -** *To move a component that monitors core {product-title} projects*: - -.. Edit the `cluster-monitoring-config` `ConfigMap` object in the `openshift-monitoring` project: -+ -[source,terminal] ----- -$ oc -n openshift-monitoring edit configmap cluster-monitoring-config ----- - -.. Specify the node labels for the `nodeSelector` constraint for the component under `data/config.yaml`: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: cluster-monitoring-config - namespace: openshift-monitoring -data: - config.yaml: | - <component>: <1> - nodeSelector: - <node-label-1> <2> - <node-label-2> <3> - <...> ----- -<1> Substitute `<component>` with the appropriate monitoring stack component name. -<2> Substitute `<node-label-1>` with the label you added to the node. -<3> Optional: Specify additional labels. -If you specify additional labels, the pods for the component are only scheduled on the nodes that contain all of the specified labels. -+ -[NOTE] -==== -If monitoring components remain in a `Pending` state after configuring the `nodeSelector` constraint, check the pod events for errors relating to taints and tolerations. -==== - -** *To move a component that monitors user-defined projects*: - -.. Edit the `user-workload-monitoring-config` `ConfigMap` object in the `openshift-user-workload-monitoring` project: -+ -[source,terminal] ----- -$ oc -n openshift-user-workload-monitoring edit configmap user-workload-monitoring-config ----- - -.. Specify the node labels for the `nodeSelector` constraint for the component under `data/config.yaml`: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: user-workload-monitoring-config - namespace: openshift-user-workload-monitoring -data: - config.yaml: | - <component>: <1> - nodeSelector: - <node-label-1> <2> - <node-label-2> <3> - <...> ----- -<1> Substitute `<component>` with the appropriate monitoring stack component name. -<2> Substitute `<node-label-1>` with the label you added to the node. -<3> Optional: Specify additional labels. -If you specify additional labels, the pods for the component are only scheduled on the nodes that contain all of the specified labels. -+ -[NOTE] -==== -If monitoring components remain in a `Pending` state after configuring the `nodeSelector` constraint, check the pod events for errors relating to taints and tolerations. -==== - -. Save the file to apply the changes. -The components specified in the new configuration are moved to the new nodes automatically. -+ -[NOTE] -==== -Configurations applied to the `user-workload-monitoring-config` `ConfigMap` object are not activated unless a cluster administrator has enabled monitoring for user-defined projects. -==== -+ -[WARNING] -==== -When you save changes to a monitoring config map, the pods and other resources in the project might be redeployed. -The running monitoring processes in that project might also restart. -==== diff --git a/modules/monitoring-optimizing-alerting-for-user-defined-projects.adoc b/modules/monitoring-optimizing-alerting-for-user-defined-projects.adoc deleted file mode 100644 index f4d98537bba3..000000000000 --- a/modules/monitoring-optimizing-alerting-for-user-defined-projects.adoc +++ /dev/null @@ -1,26 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/managing-alerts.adoc - -:_content-type: CONCEPT -[id="Optimizing-alerting-for-user-defined-projects_{context}"] -= Optimizing alerting for user-defined projects - -You can optimize alerting for your own projects by considering the following recommendations when creating alerting rules: - -* *Minimize the number of alerting rules that you create for your project*. Create alerting rules that notify you of conditions that impact you. It is more difficult to notice relevant alerts if you generate many alerts for conditions that do not impact you. - -* *Create alerting rules for symptoms instead of causes*. Create alerting rules that notify you of conditions regardless of the underlying cause. The cause can then be investigated. You will need many more alerting rules if each relates only to a specific cause. Some causes are then likely to be missed. - -* *Plan before you write your alerting rules*. Determine what symptoms are important to you and what actions you want to take if they occur. Then build an alerting rule for each symptom. - -* *Provide clear alert messaging*. State the symptom and recommended actions in the alert message. - -* *Include severity levels in your alerting rules*. The severity of an alert depends on how you need to react if the reported symptom occurs. For example, a critical alert should be triggered if a symptom requires immediate attention by an individual or a critical response team. - -* *Optimize alert routing*. Deploy an alerting rule directly on the Prometheus instance in the `openshift-user-workload-monitoring` project if the rule does not query default {product-title} metrics. This reduces latency for alerting rules and minimizes the load on monitoring components. -+ -[WARNING] -==== -Default {product-title} metrics for user-defined projects provide information about CPU and memory usage, bandwidth statistics, and packet rate information. Those metrics cannot be included in an alerting rule if you route the rule directly to the Prometheus instance in the `openshift-user-workload-monitoring` project. Alerting rule optimization should be used only if you have read the documentation and have a comprehensive understanding of the monitoring architecture. -==== diff --git a/modules/monitoring-querying-metrics-by-using-the-federation-endpoint-for-prometheus.adoc b/modules/monitoring-querying-metrics-by-using-the-federation-endpoint-for-prometheus.adoc deleted file mode 100644 index 9aaf27668057..000000000000 --- a/modules/monitoring-querying-metrics-by-using-the-federation-endpoint-for-prometheus.adoc +++ /dev/null @@ -1,71 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/accessing-third-party-monitoring-apis.adoc - -:_content-type: PROCEDURE -[id="monitoring-querying-metrics-by-using-the-federation-endpoint-for-prometheus_{context}"] -= Querying metrics by using the federation endpoint for Prometheus - -You can use the federation endpoint to scrape platform and user-defined metrics from a network location outside the cluster. -To do so, access the Prometheus `/federate` endpoint for the cluster via an {product-title} route. - -[WARNING] -==== -A delay in retrieving metrics data occurs when you use federation. -This delay can affect the accuracy and timeliness of the scraped metrics. - -Using the federation endpoint can also degrade the performance and scalability of your cluster, especially if you use the federation endpoint to retrieve large amounts of metrics data. -To avoid these issues, follow these recommendations: - -* Do not try to retrieve all metrics data via the federation endpoint. -Query it only when you want to retrieve a limited, aggregated data set. -For example, retrieving fewer than 1,000 samples for each request helps minimize the risk of performance degradation. - -* Avoid querying the federation endpoint frequently. -Limit queries to a maximum of one every 30 seconds. - -If you need to forward large amounts of data outside the cluster, use remote write instead. For more information, see the _Configuring remote write storage_ section. -==== - -.Prerequisites - -* You have installed the OpenShift CLI (`oc`). -* You have obtained the host URL for the {product-title} route. -* You have access to the cluster as a user with the `cluster-monitoring-view` role or have obtained a bearer token with `get` permission on the `namespaces` resource. -+ -[NOTE] -==== -You can only use bearer token authentication to access the federation endpoint. -==== - -.Procedure - -. Retrieve the bearer token: -+ -[source,terminal] ----- -$ token=`oc whoami -t` ----- - -. Query metrics from the `/federate` route. -The following example queries `up` metrics: -+ -[source,terminal] ----- -$ curl -G -s -k -H "Authorization: Bearer $token" \ - 'https:/<federation_host>/federate' \ <1> - --data-urlencode 'match[]=up' ----- -+ -<1> For <federation_host>, substitute the host URL for the federation route. -+ -.Example output -+ -[source,terminal] ----- -# TYPE up untyped -up{apiserver="kube-apiserver",endpoint="https",instance="10.0.143.148:6443",job="apiserver",namespace="default",service="kubernetes",prometheus="openshift-monitoring/k8s",prometheus_replica="prometheus-k8s-0"} 1 1657035322214 -up{apiserver="kube-apiserver",endpoint="https",instance="10.0.148.166:6443",job="apiserver",namespace="default",service="kubernetes",prometheus="openshift-monitoring/k8s",prometheus_replica="prometheus-k8s-0"} 1 1657035338597 -up{apiserver="kube-apiserver",endpoint="https",instance="10.0.173.16:6443",job="apiserver",namespace="default",service="kubernetes",prometheus="openshift-monitoring/k8s",prometheus_replica="prometheus-k8s-0"} 1 1657035343834 -... ----- diff --git a/modules/monitoring-querying-metrics-for-all-projects-as-an-administrator.adoc b/modules/monitoring-querying-metrics-for-all-projects-as-an-administrator.adoc deleted file mode 100644 index 7a6f02cc65dc..000000000000 --- a/modules/monitoring-querying-metrics-for-all-projects-as-an-administrator.adoc +++ /dev/null @@ -1,52 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/managing-metrics.adoc -// * virt/support/virt-prometheus-queries.adoc - -:_content-type: PROCEDURE -[id="querying-metrics-for-all-projects-as-an-administrator_{context}"] -= Querying metrics for all projects as a cluster administrator - -As a cluster administrator or as a user with view permissions for all projects, you can access metrics for all default {product-title} and user-defined projects in the Metrics UI. - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role or with view permissions for all projects. -* You have installed the OpenShift CLI (`oc`). - -.Procedure - -. Select the *Administrator* perspective in the {product-title} web console. - -. Select *Observe* -> *Metrics*. - -. Select *Insert Metric at Cursor* to view a list of predefined queries. - -. To create a custom query, add your Prometheus Query Language (PromQL) query to the *Expression* field. -+ -[NOTE] -==== -As you type a PromQL expression, autocomplete suggestions appear in a drop-down list. -These suggestions include functions, metrics, labels, and time tokens. -You can use the keyboard arrows to select one of these suggested items and then press Enter to add the item to your expression. -You can also move your mouse pointer over a suggested item to view a brief description of that item. -==== - -. To add multiple queries, select *Add Query*. - -. To duplicate an existing query, select {kebab} next to the query, then choose *Duplicate query*. - -. To delete a query, select {kebab} next to the query, then choose *Delete query*. - -. To disable a query from being run, select {kebab} next to the query and choose *Disable query*. - -. To run queries that you created, select *Run Queries*. -The metrics from the queries are visualized on the plot. -If a query is invalid, the UI shows an error message. -+ -[NOTE] -==== -Queries that operate on large amounts of data might time out or overload the browser when drawing time series graphs. To avoid this, select *Hide graph* and calibrate your query using only the metrics table. Then, after finding a feasible query, enable the plot to draw the graphs. -==== - -. Optional: The page URL now contains the queries you ran. To use this set of queries again in the future, save this URL. diff --git a/modules/monitoring-querying-metrics-for-user-defined-projects-as-a-developer.adoc b/modules/monitoring-querying-metrics-for-user-defined-projects-as-a-developer.adoc deleted file mode 100644 index 09619992b465..000000000000 --- a/modules/monitoring-querying-metrics-for-user-defined-projects-as-a-developer.adoc +++ /dev/null @@ -1,44 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/managing-metrics.adoc -// * virt/support/virt-prometheus-queries.adoc - -:_content-type: PROCEDURE -[id="querying-metrics-for-user-defined-projects-as-a-developer_{context}"] -= Querying metrics for user-defined projects as a developer - -You can access metrics for a user-defined project as a developer or as a user with view permissions for the project. - -In the *Developer* perspective, the Metrics UI includes some predefined CPU, memory, bandwidth, and network packet queries for the selected project. You can also run custom Prometheus Query Language (PromQL) queries for CPU, memory, bandwidth, network packet and application metrics for the project. - -[NOTE] -==== -Developers can only use the *Developer* perspective and not the *Administrator* perspective. As a developer, you can only query metrics for one project at a time in the *Observe* --> *Metrics* page in the web console for your user-defined project. -==== - -.Prerequisites - -* You have access to the cluster as a developer or as a user with view permissions for the project that you are viewing metrics for. -* You have enabled monitoring for user-defined projects. -* You have deployed a service in a user-defined project. -* You have created a `ServiceMonitor` custom resource definition (CRD) for the service to define how the service is monitored. - -.Procedure - -. Select the *Developer* perspective in the {product-title} web console. - -. Select *Observe* -> *Metrics*. - -. Select the project that you want to view metrics for in the *Project:* list. - -. Select a query from the *Select query* list, or create a custom PromQL query based on the selected query by selecting *Show PromQL*. - -. Optional: Select *Custom query* from the *Select query* list to enter a new query. -As you type, autocomplete suggestions appear in a drop-down list. -These suggestions include functions and metrics. -Click a suggested item to select it. -+ -[NOTE] -==== -In the *Developer* perspective, you can only run one query at a time. -==== diff --git a/modules/monitoring-reducing-latency-for-alerting-rules-that-do-not-query-platform-metrics.adoc b/modules/monitoring-reducing-latency-for-alerting-rules-that-do-not-query-platform-metrics.adoc deleted file mode 100644 index b06a7a95e77d..000000000000 --- a/modules/monitoring-reducing-latency-for-alerting-rules-that-do-not-query-platform-metrics.adoc +++ /dev/null @@ -1,54 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/managing-alerts.adoc - -:_content-type: PROCEDURE -[id="reducing-latency-for-alerting-rules-that-do-not-query-platform-metrics_{context}"] -= Reducing latency for alerting rules that do not query platform metrics - -If an alerting rule for a user-defined project does not query default cluster metrics, you can deploy the rule directly on the Prometheus instance in the `openshift-user-workload-monitoring` project. This reduces latency for alerting rules by bypassing Thanos Ruler when it is not required. This also helps to minimize the overall load on monitoring components. - -[WARNING] -==== -Default {product-title} metrics for user-defined projects provide information about CPU and memory usage, bandwidth statistics, and packet rate information. Those metrics cannot be included in an alerting rule if you deploy the rule directly to the Prometheus instance in the `openshift-user-workload-monitoring` project. The procedure outlined in this section should only be used if you have read the documentation and have a comprehensive understanding of the monitoring architecture. -==== - -.Prerequisites - -* You have enabled monitoring for user-defined projects. -* You are logged in as a user that has the `monitoring-rules-edit` role for the project where you want to create an alerting rule. -* You have installed the OpenShift CLI (`oc`). - -.Procedure - -. Create a YAML file for alerting rules. In this example, it is called `example-app-alerting-rule.yaml`. - -. Add an alerting rule configuration to the YAML file that includes a label with the key `openshift.io/prometheus-rule-evaluation-scope` and value `leaf-prometheus`. For example: -+ -[source,yaml] ----- -apiVersion: monitoring.coreos.com/v1 -kind: PrometheusRule -metadata: - name: example-alert - namespace: ns1 - labels: - openshift.io/prometheus-rule-evaluation-scope: leaf-prometheus -spec: - groups: - - name: example - rules: - - alert: VersionAlert - expr: version{job="prometheus-example-app"} == 0 ----- - -If that label is present, the alerting rule is deployed on the Prometheus instance in the `openshift-user-workload-monitoring` project. If the label is not present, the alerting rule is deployed to Thanos Ruler. - -. Apply the configuration file to the cluster: -+ -[source,terminal] ----- -$ oc apply -f example-app-alerting-rule.yaml ----- -+ -It takes some time to create the alerting rule. diff --git a/modules/monitoring-removing-alerting-rules-for-user-defined-projects.adoc b/modules/monitoring-removing-alerting-rules-for-user-defined-projects.adoc deleted file mode 100644 index e0545c48947c..000000000000 --- a/modules/monitoring-removing-alerting-rules-for-user-defined-projects.adoc +++ /dev/null @@ -1,24 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/managing-alerts.adoc - -:_content-type: PROCEDURE -[id="removing-alerting-rules-for-user-defined-projects_{context}"] -= Removing alerting rules for user-defined projects - -You can remove alerting rules for user-defined projects. - -.Prerequisites - -* You have enabled monitoring for user-defined projects. -* You are logged in as a user that has the `monitoring-rules-edit` role for the project where you want to create an alerting rule. -* You have installed the OpenShift CLI (`oc`). - -.Procedure - -* To remove rule `<foo>` in `<namespace>`, run the following: -+ -[source,terminal] ----- -$ oc -n <namespace> delete prometheusrule <foo> ----- diff --git a/modules/monitoring-resizing-a-persistent-storage-volume.adoc b/modules/monitoring-resizing-a-persistent-storage-volume.adoc deleted file mode 100644 index 3066634191c9..000000000000 --- a/modules/monitoring-resizing-a-persistent-storage-volume.adoc +++ /dev/null @@ -1,206 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/configuring-the-monitoring-stack.adoc - -:_content-type: PROCEDURE -[id="resizing-a-persistent-storage-volume_{context}"] -= Resizing a persistent storage volume - -{product-title} does not support resizing an existing persistent storage volume used by `StatefulSet` resources, even if the underlying `StorageClass` resource used supports persistent volume sizing. -Therefore, even if you update the `storage` field for an existing persistent volume claim (PVC) with a larger size, this setting will not be propagated to the associated persistent volume (PV). - -However, resizing a PV is still possible by using a manual process. If you want to resize a PV for a monitoring component such as Prometheus, Thanos Ruler, or Alertmanager, you can update the appropriate config map in which the component is configured. Then, patch the PVC, and delete and orphan the pods. -Orphaning the pods recreates the `StatefulSet` resource immediately and automatically updates the size of the volumes mounted in the pods with the new PVC settings. -No service disruption occurs during this process. - -.Prerequisites - -* You have installed the OpenShift CLI (`oc`). -* *If you are configuring core {product-title} monitoring components*: -** You have access to the cluster as a user with the `cluster-admin` role. -** You have created the `cluster-monitoring-config` `ConfigMap` object. -** You have configured at least one PVC for core {product-title} monitoring components. -* *If you are configuring components that monitor user-defined projects*: -** You have access to the cluster as a user with the `cluster-admin` role, or as a user with the `user-workload-monitoring-config-edit` role in the `openshift-user-workload-monitoring` project. -** You have created the `user-workload-monitoring-config` `ConfigMap` object. -** You have configured at least one PVC for components that monitor user-defined projects. - -.Procedure - -. Edit the `ConfigMap` object: -** *To resize a PVC for a component that monitors core {product-title} projects*: -.. Edit the `cluster-monitoring-config` `ConfigMap` object in the `openshift-monitoring` project: -+ -[source,terminal] ----- -$ oc -n openshift-monitoring edit configmap cluster-monitoring-config ----- - -.. Add a new storage size for the PVC configuration for the component under `data/config.yaml`: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: cluster-monitoring-config - namespace: openshift-monitoring -data: - config.yaml: | - <component>: <1> - volumeClaimTemplate: - spec: - storageClassName: <storage_class> <2> - resources: - requests: - storage: <amount_of_storage> <3> ----- -<1> Specify the core monitoring component. -<2> Specify the storage class. -<3> Specify the new size for the storage volume. -+ -The following example configures a PVC that sets the local persistent storage to 100 gigabytes for the Prometheus instance that monitors core {product-title} components: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: cluster-monitoring-config - namespace: openshift-monitoring -data: - config.yaml: | - prometheusK8s: - volumeClaimTemplate: - spec: - storageClassName: local-storage - resources: - requests: - storage: 100Gi ----- -+ -The following example configures a PVC that sets the local persistent storage for Alertmanager to 40 gigabytes: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: cluster-monitoring-config - namespace: openshift-monitoring -data: - config.yaml: | - alertmanagerMain: - volumeClaimTemplate: - spec: - storageClassName: local-storage - resources: - requests: - storage: 40Gi ----- - -** *To resize a PVC for a component that monitors user-defined projects*: -+ -[NOTE] -==== -You can resize the volumes for the Thanos Ruler and Prometheus instances that monitor user-defined projects. -==== -+ -.. Edit the `user-workload-monitoring-config` `ConfigMap` object in the `openshift-user-workload-monitoring` project: -+ -[source,terminal] ----- -$ oc -n openshift-user-workload-monitoring edit configmap user-workload-monitoring-config ----- - -.. Update the PVC configuration for the monitoring component under `data/config.yaml`: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: user-workload-monitoring-config - namespace: openshift-user-workload-monitoring -data: - config.yaml: | - <component>: <1> - volumeClaimTemplate: - spec: - storageClassName: <storage_class> <2> - resources: - requests: - storage: <amount_of_storage> <3> ----- -<1> Specify the core monitoring component. -<2> Specify the storage class. -<3> Specify the new size for the storage volume. -+ -The following example configures the PVC size to 100 gigabytes for the Prometheus instance that monitors user-defined projects: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: user-workload-monitoring-config - namespace: openshift-user-workload-monitoring -data: - config.yaml: | - prometheus: - volumeClaimTemplate: - spec: - storageClassName: local-storage - resources: - requests: - storage: 100Gi ----- -+ -The following example sets the PVC size to 20 gigabytes for Thanos Ruler: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: user-workload-monitoring-config - namespace: openshift-user-workload-monitoring -data: - config.yaml: | - thanosRuler: - volumeClaimTemplate: - spec: - storageClassName: local-storage - resources: - requests: - storage: 20Gi ----- -+ -[NOTE] -==== -Storage requirements for the `thanosRuler` component depend on the number of rules that are evaluated and how many samples each rule generates. -==== - -. Save the file to apply the changes. The pods affected by the new configuration restart automatically. -+ -[WARNING] -==== -When you save changes to a monitoring config map, the pods and other resources in the related project might be redeployed. The monitoring processes running in that project might also be restarted. -==== - -. Manually patch every PVC with the updated storage request. The following example resizes the storage size for the Prometheus component in the `openshift-monitoring` namespace to 100Gi: -+ -[source,terminal] ----- -$ for p in $(oc -n openshift-monitoring get pvc -l app.kubernetes.io/name=prometheus -o jsonpath='{range .items[*]}{.metadata.name} {end}'); do \ - oc -n openshift-monitoring patch pvc/${p} --patch '{"spec": {"resources": {"requests": {"storage":"100Gi"}}}}'; \ - done - ----- - -. Delete the underlying StatefulSet with the `--cascade=orphan` parameter: -+ -[source,terminal] ----- -$ oc delete statefulset -l app.kubernetes.io/name=prometheus --cascade=orphan ----- diff --git a/modules/monitoring-reviewing-monitoring-dashboards-admin.adoc b/modules/monitoring-reviewing-monitoring-dashboards-admin.adoc deleted file mode 100644 index 453a14c66588..000000000000 --- a/modules/monitoring-reviewing-monitoring-dashboards-admin.adoc +++ /dev/null @@ -1,33 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/reviewing-monitoring-dashboards.adoc - -:_content-type: PROCEDURE -[id="reviewing-monitoring-dashboards-admin_{context}"] -= Reviewing monitoring dashboards as a cluster administrator - -In the *Administrator* perspective, you can view dashboards relating to core {product-title} cluster components. - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. - -.Procedure - -. In the *Administrator* perspective in the {product-title} web console, navigate to *Observe* -> *Dashboards*. - -. Choose a dashboard in the *Dashboard* list. Some dashboards, such as *etcd* and *Prometheus* dashboards, produce additional sub-menus when selected. - -. Optional: Select a time range for the graphs in the *Time Range* list. -+ -** Select a pre-defined time period. -+ -** Set a custom time range by selecting *Custom time range* in the *Time Range* list. -+ -.. Input or select the *From* and *To* dates and times. -+ -.. Click *Save* to save the custom time range. - -. Optional: Select a *Refresh Interval*. - -. Hover over each of the graphs within a dashboard to display detailed information about specific items. diff --git a/modules/monitoring-reviewing-monitoring-dashboards-developer.adoc b/modules/monitoring-reviewing-monitoring-dashboards-developer.adoc deleted file mode 100644 index f2cc191d4a2d..000000000000 --- a/modules/monitoring-reviewing-monitoring-dashboards-developer.adoc +++ /dev/null @@ -1,41 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/reviewing-monitoring-dashboards.adoc - -:_content-type: PROCEDURE -[id="reviewing-monitoring-dashboards-developer_{context}"] -= Reviewing monitoring dashboards as a developer - -Use the Developer perspective to view Kubernetes compute resources dashboards of a selected project. - -.Prerequisites - -* You have access to the cluster as a developer or as a user. -* You have view permissions for the project that you are viewing the dashboard for. - -.Procedure - -. In the Developer perspective in the {product-title} web console, navigate to *Observe* -> *Dashboard*. - -. Select a project from the *Project:* drop-down list. - -. Select a dashboard from the *Dashboard* drop-down list to see the filtered metrics. -+ -[NOTE] -==== -All dashboards produce additional sub-menus when selected, except *Kubernetes / Compute Resources / Namespace (Pods)*. -==== -+ -. Optional: Select a time range for the graphs in the *Time Range* list. -+ -** Select a pre-defined time period. -+ -** Set a custom time range by selecting *Custom time range* in the *Time Range* list. -+ -.. Input or select the *From* and *To* dates and times. -+ -.. Click *Save* to save the custom time range. - -. Optional: Select a *Refresh Interval*. - -. Hover over each of the graphs within a dashboard to display detailed information about specific items. diff --git a/modules/monitoring-running-metrics-queries.adoc b/modules/monitoring-running-metrics-queries.adoc deleted file mode 100644 index 6da280122c96..000000000000 --- a/modules/monitoring-running-metrics-queries.adoc +++ /dev/null @@ -1,31 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/examining-cluster-metrics.adoc - -[id="running-metrics-queries_{context}"] -= Running metrics queries - -You begin working with metrics by entering one or several Prometheus Query Language (PromQL) queries. - -.Procedure - -. Open the {product-title} web console and navigate to the *Observe* -> *Metrics* page. - -. In the query field, enter your PromQL query. -* To show all available metrics and PromQL functions, click *Insert Metric at Cursor*. -. For multiple queries, click *Add Query*. -. For deleting queries, click {kebab} for the query, then select *Delete query*. -. For keeping but not running a query, click *Disable query*. -. After you finish creating queries, click *Run Queries*. The metrics from the queries are visualized on the plot. If a query is invalid, the UI shows an error message. -+ -[NOTE] -==== -Queries that operate on large amounts of data might timeout or overload the browser when drawing timeseries graphs. To avoid this, hide the graph and calibrate your query using only the metrics table. Then, after finding a feasible query, enable the plot to draw the graphs. -==== -+ -. Optional: The page URL now contains the queries you ran. To use this set of queries again in the future, save this URL. - -[role="_additional-resources"] -.Additional resources - -* See the link:https://prometheus.io/docs/prometheus/latest/querying/basics/[Prometheus Query Language documentation]. diff --git a/modules/monitoring-searching-alerts-silences-and-alerting-rules.adoc b/modules/monitoring-searching-alerts-silences-and-alerting-rules.adoc deleted file mode 100644 index 46747994a405..000000000000 --- a/modules/monitoring-searching-alerts-silences-and-alerting-rules.adoc +++ /dev/null @@ -1,75 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/managing-alerts.adoc - -:_content-type: CONCEPT -[id="searching-alerts-silences-and-alerting-rules_{context}"] -= Searching and filtering alerts, silences, and alerting rules - -You can filter the alerts, silences, and alerting rules that are displayed in the Alerting UI. This section provides a description of each of the available filtering options. - -[discrete] -== Understanding alert filters - -In the *Administrator* perspective, the *Alerts* page in the Alerting UI provides details about alerts relating to default {product-title} and user-defined projects. The page includes a summary of severity, state, and source for each alert. The time at which an alert went into its current state is also shown. - -You can filter by alert state, severity, and source. By default, only *Platform* alerts that are *Firing* are displayed. The following describes each alert filtering option: - -* *Alert State* filters: -** *Firing*. The alert is firing because the alert condition is true and the optional `for` duration has passed. The alert will continue to fire as long as the condition remains true. -** *Pending*. The alert is active but is waiting for the duration that is specified in the alerting rule before it fires. -** *Silenced*. The alert is now silenced for a defined time period. Silences temporarily mute alerts based on a set of label selectors that you define. Notifications will not be sent for alerts that match all the listed values or regular expressions. - -* *Severity* filters: -** *Critical*. The condition that triggered the alert could have a critical impact. The alert requires immediate attention when fired and is typically paged to an individual or to a critical response team. -** *Warning*. The alert provides a warning notification about something that might require attention to prevent a problem from occurring. Warnings are typically routed to a ticketing system for non-immediate review. -** *Info*. The alert is provided for informational purposes only. -** *None*. The alert has no defined severity. -** You can also create custom severity definitions for alerts relating to user-defined projects. - -* *Source* filters: -** *Platform*. Platform-level alerts relate only to default {product-title} projects. These projects provide core {product-title} functionality. -** *User*. User alerts relate to user-defined projects. These alerts are user-created and are customizable. User-defined workload monitoring can be enabled post-installation to provide observability into your own workloads. - -[discrete] -== Understanding silence filters - -In the *Administrator* perspective, the *Silences* page in the Alerting UI provides details about silences applied to alerts in default {product-title} and user-defined projects. The page includes a summary of the state of each silence and the time at which a silence ends. - -You can filter by silence state. By default, only *Active* and *Pending* silences are displayed. The following describes each silence state filter option: - -* *Silence State* filters: -** *Active*. The silence is active and the alert will be muted until the silence is expired. -** *Pending*. The silence has been scheduled and it is not yet active. -** *Expired*. The silence has expired and notifications will be sent if the conditions for an alert are true. - -[discrete] -== Understanding alerting rule filters - -In the *Administrator* perspective, the *Alerting Rules* page in the Alerting UI provides details about alerting rules relating to default {product-title} and user-defined projects. The page includes a summary of the state, severity, and source for each alerting rule. - -You can filter alerting rules by alert state, severity, and source. By default, only *Platform* alerting rules are displayed. The following describes each alerting rule filtering option: - -* *Alert State* filters: -** *Firing*. The alert is firing because the alert condition is true and the optional `for` duration has passed. The alert will continue to fire as long as the condition remains true. -** *Pending*. The alert is active but is waiting for the duration that is specified in the alerting rule before it fires. -** *Silenced*. The alert is now silenced for a defined time period. Silences temporarily mute alerts based on a set of label selectors that you define. Notifications will not be sent for alerts that match all the listed values or regular expressions. -** *Not Firing*. The alert is not firing. - -* *Severity* filters: -** *Critical*. The conditions defined in the alerting rule could have a critical impact. When true, these conditions require immediate attention. Alerts relating to the rule are typically paged to an individual or to a critical response team. -** *Warning*. The conditions defined in the alerting rule might require attention to prevent a problem from occurring. Alerts relating to the rule are typically routed to a ticketing system for non-immediate review. -** *Info*. The alerting rule provides informational alerts only. -** *None*. The alerting rule has no defined severity. -** You can also create custom severity definitions for alerting rules relating to user-defined projects. - -* *Source* filters: -** *Platform*. Platform-level alerting rules relate only to default {product-title} projects. These projects provide core {product-title} functionality. -** *User*. User-defined workload alerting rules relate to user-defined projects. These alerting rules are user-created and are customizable. User-defined workload monitoring can be enabled post-installation to provide observability into your own workloads. - -[discrete] -== Searching and filtering alerts, silences, and alerting rules in the Developer perspective - -In the *Developer* perspective, the Alerts page in the Alerting UI provides a combined view of alerts and silences relating to the selected project. A link to the governing alerting rule is provided for each displayed alert. - -In this view, you can filter by alert state and severity. By default, all alerts in the selected project are displayed if you have permission to access the project. These filters are the same as those described for the *Administrator* perspective. diff --git a/modules/monitoring-searching-and-filtering-metrics-targets.adoc b/modules/monitoring-searching-and-filtering-metrics-targets.adoc deleted file mode 100644 index 1253dc9909cf..000000000000 --- a/modules/monitoring-searching-and-filtering-metrics-targets.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/managing-metrics-targets.adoc - -:_content-type: CONCEPT -[id="monitoring-searching-and-filtering-metrics-targets_{context}"] -= Searching and filtering metrics targets - -The list of metrics targets can be long. You can filter and search these targets based on various criteria. - -In the *Administrator* perspective, the *Metrics Targets* page provides details about targets for default {product-title} and user-defined projects. -This page lists the following information for each target: - -* the service endpoint URL being scraped -* the ServiceMonitor component being monitored -* the up or down status of the target -* the namespace -* the last scrape time -* the duration of the last scrape - -You can filter the list of targets by status and source. The following filtering options are available: - -* *Status* filters: -** *Up*. The target is currently up and being actively scraped for metrics. -** *Down*. The target is currently down and not being scraped for metrics. - -* *Source* filters: -** *Platform*. Platform-level targets relate only to default {product-title} projects. These projects provide core {product-title} functionality. -** *User*. User targets relate to user-defined projects. These projects are user-created and can be customized. - -You can also use the search box to find a target by target name or label. -Select *Text* or *Label* from the search box menu to limit your search. \ No newline at end of file diff --git a/modules/monitoring-sending-notifications-to-external-systems.adoc b/modules/monitoring-sending-notifications-to-external-systems.adoc deleted file mode 100644 index 2eef40f01fac..000000000000 --- a/modules/monitoring-sending-notifications-to-external-systems.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/managing-alerts.adoc -// * post_installation_configuration/configuring-alert-notifications.adoc - -:_content-type: CONCEPT -[id="sending-notifications-to-external-systems_{context}"] -= Sending notifications to external systems - -In {product-title} {product-version}, firing alerts can be viewed in the Alerting UI. Alerts are not configured by default to be sent to any notification systems. You can configure {product-title} to send alerts to the following receiver types: - -* PagerDuty -* Webhook -* Email -* Slack - -Routing alerts to receivers enables you to send timely notifications to the appropriate teams when failures occur. For example, critical alerts require immediate attention and are typically paged to an individual or a critical response team. Alerts that provide non-critical warning notifications might instead be routed to a ticketing system for non-immediate review. - -.Checking that alerting is operational by using the watchdog alert - -{product-title} monitoring includes a watchdog alert that fires continuously. Alertmanager repeatedly sends watchdog alert notifications to configured notification providers. The provider is usually configured to notify an administrator when it stops receiving the watchdog alert. This mechanism helps you quickly identify any communication issues between Alertmanager and the notification provider. diff --git a/modules/monitoring-setting-audit-log-levels-for-the-prometheus-adapter.adoc b/modules/monitoring-setting-audit-log-levels-for-the-prometheus-adapter.adoc deleted file mode 100644 index a561ed83d0b5..000000000000 --- a/modules/monitoring-setting-audit-log-levels-for-the-prometheus-adapter.adoc +++ /dev/null @@ -1,117 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/configuring-the-monitoring-stack.adoc - -:_content-type: PROCEDURE -[id="setting-audit-log-levels-for-the-prometheus-adapter_{context}"] -= Setting audit log levels for the Prometheus Adapter - -[role=_abstract] -In default platform monitoring, you can configure the audit log level for the Prometheus Adapter. - -.Prerequisites - -* You have installed the OpenShift CLI (`oc`). -* You have access to the cluster as a user with the `cluster-admin` role. -* You have created the `cluster-monitoring-config` `ConfigMap` object. - -.Procedure - -You can set an audit log level for the Prometheus Adapter in the default `openshift-monitoring` project: - -. Edit the `cluster-monitoring-config` `ConfigMap` object in the `openshift-monitoring` project: -+ -[source,terminal] ----- -$ oc -n openshift-monitoring edit configmap cluster-monitoring-config ----- - -. Add `profile:` in the `k8sPrometheusAdapter/audit` section under `data/config.yaml`: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: cluster-monitoring-config - namespace: openshift-monitoring -data: - config.yaml: | - k8sPrometheusAdapter: - audit: - profile: <audit_log_level> <1> ----- -<1> The audit log level to apply to the Prometheus Adapter. - -. Set the audit log level by using one of the following values for the `profile:` parameter: -+ -* `None`: Do not log events. -* `Metadata`: Log only the metadata for the request, such as user, timestamp, and so forth. Do not log the request text and the response text. `Metadata` is the default audit log level. -* `Request`: Log only the metadata and the request text but not the response text. This option does not apply for non-resource requests. -* `RequestResponse`: Log event metadata, request text, and response text. This option does not apply for non-resource requests. - -. Save the file to apply the changes. The pods for the Prometheus Adapter restart automatically when you apply the change. -+ -[WARNING] -==== -When changes are saved to a monitoring config map, the pods and other resources in the related project might be redeployed. The running monitoring processes in that project might also be restarted. -==== - -.Verification - -. In the config map, under `k8sPrometheusAdapter/audit/profile`, set the log level to `Request` and save the file. - -. Confirm that the pods for the Prometheus Adapter are running. The following example lists the status of pods in the `openshift-monitoring` project: -+ -[source,terminal] ----- -$ oc -n openshift-monitoring get pods ----- - -. Confirm that the audit log level and audit log file path are correctly configured: -+ -[source,terminal] ----- -$ oc -n openshift-monitoring get deploy prometheus-adapter -o yaml ----- -+ -.Example output -[source,terminal] ----- -... - - --audit-policy-file=/etc/audit/request-profile.yaml - - --audit-log-path=/var/log/adapter/audit.log ----- - -. Confirm that the correct log level has been applied in the `prometheus-adapter` deployment in the `openshift-monitoring` project: -+ -[source,terminal] ----- -$ oc -n openshift-monitoring exec deploy/prometheus-adapter -c prometheus-adapter -- cat /etc/audit/request-profile.yaml ----- -+ -.Example output -[source,terminal] ----- -"apiVersion": "audit.k8s.io/v1" -"kind": "Policy" -"metadata": - "name": "Request" -"omitStages": -- "RequestReceived" -"rules": -- "level": "Request" ----- -+ -[NOTE] -==== -If you enter an unrecognized `profile` value for the Prometheus Adapter in the `ConfigMap` object, no changes are made to the Prometheus Adapter, and an error is logged by the Cluster Monitoring Operator. -==== - -. Review the audit log for the Prometheus Adapter: -+ -[source,terminal] ----- -$ oc -n openshift-monitoring exec -c <prometheus_adapter_pod_name> -- cat /var/log/adapter/audit.log ----- - diff --git a/modules/monitoring-setting-log-levels-for-monitoring-components.adoc b/modules/monitoring-setting-log-levels-for-monitoring-components.adoc deleted file mode 100644 index 70846659137c..000000000000 --- a/modules/monitoring-setting-log-levels-for-monitoring-components.adoc +++ /dev/null @@ -1,124 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/configuring-the-monitoring-stack.adoc - -:_content-type: PROCEDURE -[id="setting-log-levels-for-monitoring-components_{context}"] -= Setting log levels for monitoring components - -You can configure the log level for Alertmanager, Prometheus Operator, Prometheus, Thanos Querier, and Thanos Ruler. - -The following log levels can be applied to the relevant component in the `cluster-monitoring-config` and `user-workload-monitoring-config` `ConfigMap` objects: - -* `debug`. Log debug, informational, warning, and error messages. -* `info`. Log informational, warning, and error messages. -* `warn`. Log warning and error messages only. -* `error`. Log error messages only. - -The default log level is `info`. - -.Prerequisites - -* *If you are setting a log level for Alertmanager, Prometheus Operator, Prometheus, or Thanos Querier in the `openshift-monitoring` project*: -** You have access to the cluster as a user with the `cluster-admin` role. -** You have created the `cluster-monitoring-config` `ConfigMap` object. -* *If you are setting a log level for Prometheus Operator, Prometheus, or Thanos Ruler in the `openshift-user-workload-monitoring` project*: -** You have access to the cluster as a user with the `cluster-admin` role, or as a user with the `user-workload-monitoring-config-edit` role in the `openshift-user-workload-monitoring` project. -** You have created the `user-workload-monitoring-config` `ConfigMap` object. -* You have installed the OpenShift CLI (`oc`). - -.Procedure - -. Edit the `ConfigMap` object: -** *To set a log level for a component in the `openshift-monitoring` project*: -.. Edit the `cluster-monitoring-config` `ConfigMap` object in the `openshift-monitoring` project: -+ -[source,terminal] ----- -$ oc -n openshift-monitoring edit configmap cluster-monitoring-config ----- - -.. Add `logLevel: <log_level>` for a component under `data/config.yaml`: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: cluster-monitoring-config - namespace: openshift-monitoring -data: - config.yaml: | - <component>: <1> - logLevel: <log_level> <2> ----- -<1> The monitoring stack component for which you are setting a log level. -For default platform monitoring, available component values are `prometheusK8s`, `alertmanagerMain`, `prometheusOperator`, and `thanosQuerier`. -<2> The log level to set for the component. -The available values are `error`, `warn`, `info`, and `debug`. -The default value is `info`. - -** *To set a log level for a component in the `openshift-user-workload-monitoring` project*: -.. Edit the `user-workload-monitoring-config` `ConfigMap` object in the `openshift-user-workload-monitoring` project: -+ -[source,terminal] ----- -$ oc -n openshift-user-workload-monitoring edit configmap user-workload-monitoring-config ----- - -.. Add `logLevel: <log_level>` for a component under `data/config.yaml`: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: user-workload-monitoring-config - namespace: openshift-user-workload-monitoring -data: - config.yaml: | - <component>: <1> - logLevel: <log_level> <2> ----- -<1> The monitoring stack component for which you are setting a log level. -For user workload monitoring, available component values are `prometheus`, `prometheusOperator`, and `thanosRuler`. -<2> The log level to set for the component. -The available values are `error`, `warn`, `info`, and `debug`. -The default value is `info`. - -. Save the file to apply the changes. The pods for the component restarts automatically when you apply the log-level change. -+ -[NOTE] -==== -Configurations applied to the `user-workload-monitoring-config` `ConfigMap` object are not activated unless a cluster administrator has enabled monitoring for user-defined projects. -==== -+ -[WARNING] -==== -When changes are saved to a monitoring config map, the pods and other resources in the related project might be redeployed. The running monitoring processes in that project might also be restarted. -==== - -. Confirm that the log-level has been applied by reviewing the deployment or pod configuration in the related project. The following example checks the log level in the `prometheus-operator` deployment in the `openshift-user-workload-monitoring` project: -+ -[source,terminal] ----- -$ oc -n openshift-user-workload-monitoring get deploy prometheus-operator -o yaml | grep "log-level" ----- -+ -.Example output -[source,terminal] ----- - - --log-level=debug ----- - -. Check that the pods for the component are running. The following example lists the status of pods in the `openshift-user-workload-monitoring` project: -+ -[source,terminal] ----- -$ oc -n openshift-user-workload-monitoring get pods ----- -+ -[NOTE] -==== -If an unrecognized `loglevel` value is included in the `ConfigMap` object, the pods for the component might not restart successfully. -==== diff --git a/modules/monitoring-setting-query-log-file-for-prometheus.adoc b/modules/monitoring-setting-query-log-file-for-prometheus.adoc deleted file mode 100644 index ded1516498df..000000000000 --- a/modules/monitoring-setting-query-log-file-for-prometheus.adoc +++ /dev/null @@ -1,132 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/configuring-the-monitoring-stack.adoc - -:_content-type: PROCEDURE -[id="setting-query-log-file-for-prometheus_{context}"] -= Enabling the query log file for Prometheus - -[role="_abstract"] -You can configure Prometheus to write all queries that have been run by the engine to a log file. You can do so for default platform monitoring and for user-defined workload monitoring. - -[IMPORTANT] -==== -Because log rotation is not supported, only enable this feature temporarily when you need to troubleshoot an issue. After you finish troubleshooting, disable query logging by reverting the changes you made to the `ConfigMap` object to enable the feature. -==== - -.Prerequisites - -* You have installed the OpenShift CLI (`oc`). -* *If you are enabling the query log file feature for Prometheus in the `openshift-monitoring` project*: -** You have access to the cluster as a user with the `cluster-admin` role. -** You have created the `cluster-monitoring-config` `ConfigMap` object. -* *If you are enabling the query log file feature for Prometheus in the `openshift-user-workload-monitoring` project*: -** You have access to the cluster as a user with the `cluster-admin` role, or as a user with the `user-workload-monitoring-config-edit` role in the `openshift-user-workload-monitoring` project. -** You have created the `user-workload-monitoring-config` `ConfigMap` object. - -.Procedure - -** *To set the query log file for Prometheus in the `openshift-monitoring` project*: -. Edit the `cluster-monitoring-config` `ConfigMap` object in the `openshift-monitoring` project: -+ -[source,terminal] ----- -$ oc -n openshift-monitoring edit configmap cluster-monitoring-config ----- -+ -. Add `queryLogFile: <path>` for `prometheusK8s` under `data/config.yaml`: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: cluster-monitoring-config - namespace: openshift-monitoring -data: - config.yaml: | - prometheusK8s: - queryLogFile: <path> <1> ----- -<1> The full path to the file in which queries will be logged. -+ -. Save the file to apply the changes. -+ -[WARNING] -==== -When you save changes to a monitoring config map, pods and other resources in the related project might be redeployed. The running monitoring processes in that project might also be restarted. -==== -+ -. Verify that the pods for the component are running. The following sample command lists the status of pods in the `openshift-monitoring` project: -+ -[source,terminal] ----- -$ oc -n openshift-monitoring get pods ----- -+ -. Read the query log: -+ -[source,terminal] ----- -$ oc -n openshift-monitoring exec prometheus-k8s-0 -- cat <path> ----- -+ -[IMPORTANT] -==== -Revert the setting in the config map after you have examined the logged query information. -==== - -** *To set the query log file for Prometheus in the `openshift-user-workload-monitoring` project*: -. Edit the `user-workload-monitoring-config` `ConfigMap` object in the `openshift-user-workload-monitoring` project: -+ -[source,terminal] ----- -$ oc -n openshift-user-workload-monitoring edit configmap user-workload-monitoring-config ----- -+ -. Add `queryLogFile: <path>` for `prometheus` under `data/config.yaml`: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: user-workload-monitoring-config - namespace: openshift-user-workload-monitoring -data: - config.yaml: | - prometheus: - queryLogFile: <path> <1> ----- -<1> The full path to the file in which queries will be logged. -+ -. Save the file to apply the changes. -+ -[NOTE] -==== -Configurations applied to the `user-workload-monitoring-config` `ConfigMap` object are not activated unless a cluster administrator has enabled monitoring for user-defined projects. -==== -+ -[WARNING] -==== -When you save changes to a monitoring config map, pods and other resources in the related project might be redeployed. The running monitoring processes in that project might also be restarted. -==== -+ -. Verify that the pods for the component are running. The following example command lists the status of pods in the `openshift-user-workload-monitoring` project: -+ -[source,terminal] ----- -$ oc -n openshift-user-workload-monitoring get pods ----- -+ -. Read the query log: -+ -[source,terminal] ----- -$ oc -n openshift-user-workload-monitoring exec prometheus-user-workload-0 -- cat <path> ----- -+ -[IMPORTANT] -==== -Revert the setting in the config map after you have examined the logged query information. -==== diff --git a/modules/monitoring-setting-scrape-sample-and-label-limits-for-user-defined-projects.adoc b/modules/monitoring-setting-scrape-sample-and-label-limits-for-user-defined-projects.adoc deleted file mode 100644 index 0c9465489d00..000000000000 --- a/modules/monitoring-setting-scrape-sample-and-label-limits-for-user-defined-projects.adoc +++ /dev/null @@ -1,81 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/configuring-the-monitoring-stack.adoc - -:_content-type: PROCEDURE -[id="setting-scrape-sample-and-label-limits-for-user-defined-projects_{context}"] -= Setting scrape sample and label limits for user-defined projects - -You can limit the number of samples that can be accepted per target scrape in user-defined projects. -You can also limit the number of scraped labels, the length of label names, and the length of label values. - -[WARNING] -==== -If you set sample or label limits, no further sample data is ingested for that target scrape after the limit is reached. -==== - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role, or as a user with the `user-workload-monitoring-config-edit` role in the `openshift-user-workload-monitoring` project. -* You have enabled monitoring for user-defined projects. -* You have installed the OpenShift CLI (`oc`). - -.Procedure - -. Edit the `user-workload-monitoring-config` `ConfigMap` object in the `openshift-user-workload-monitoring` project: -+ -[source,terminal] ----- -$ oc -n openshift-user-workload-monitoring edit configmap user-workload-monitoring-config ----- - -. Add the `enforcedSampleLimit` configuration to `data/config.yaml` to limit the number of samples that can be accepted per target scrape in user-defined projects: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: user-workload-monitoring-config - namespace: openshift-user-workload-monitoring -data: - config.yaml: | - prometheus: - enforcedSampleLimit: 50000 <1> ----- -<1> A value is required if this parameter is specified. This `enforcedSampleLimit` example limits the number of samples that can be accepted per target scrape in user-defined projects to 50,000. - -. Add the `enforcedLabelLimit`, `enforcedLabelNameLengthLimit`, and `enforcedLabelValueLengthLimit` configurations to `data/config.yaml` to limit the number of scraped labels, the length of label names, and the length of label values in user-defined projects: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: user-workload-monitoring-config - namespace: openshift-user-workload-monitoring -data: - config.yaml: | - prometheus: - enforcedLabelLimit: 500 <1> - enforcedLabelNameLengthLimit: 50 <2> - enforcedLabelValueLengthLimit: 600 <3> ----- -<1> Specifies the maximum number of labels per scrape. -The default value is `0`, which specifies no limit. -<2> Specifies the maximum length in characters of a label name. -The default value is `0`, which specifies no limit. -<3> Specifies the maximum length in characters of a label value. -The default value is `0`, which specifies no limit. - -. Save the file to apply the changes. The limits are applied automatically. -+ -[NOTE] -==== -Configurations applied to the `user-workload-monitoring-config` `ConfigMap` object are not activated unless a cluster administrator has enabled monitoring for user-defined projects. -==== -+ -[WARNING] -==== -When changes are saved to the `user-workload-monitoring-config` `ConfigMap` object, the pods and other resources in the `openshift-user-workload-monitoring` project might be redeployed. The running monitoring processes in that project might also be restarted. -==== diff --git a/modules/monitoring-setting-the-body-size-limit-for-metrics-scraping.adoc b/modules/monitoring-setting-the-body-size-limit-for-metrics-scraping.adoc deleted file mode 100644 index b15cdb3d1ce9..000000000000 --- a/modules/monitoring-setting-the-body-size-limit-for-metrics-scraping.adoc +++ /dev/null @@ -1,61 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/configuring-the-monitoring-stack.adoc - -:_content-type: PROCEDURE -[id="setting-the-body-size-limit-for-metrics-scraping_{context}"] -= Setting the body size limit for metrics scraping - -By default, no limit exists for the uncompressed body size for data returned from scraped metrics targets. -You can set a body size limit to help avoid situations in which Prometheus consumes excessive amounts of memory when scraped targets return a response that contains a large amount of data. -In addition, by setting a body size limit, you can reduce the impact that a malicious target might have on Prometheus and on the cluster as a whole. - -After you set a value for `enforcedBodySizeLimit`, the alert `PrometheusScrapeBodySizeLimitHit` fires when at least one Prometheus scrape target replies with a response body larger than the configured value. - -[NOTE] -==== -If metrics data scraped from a target has an uncompressed body size exceeding the configured size limit, the scrape fails. -Prometheus then considers this target to be down and sets its `up` metric value to `0`, which can trigger the `TargetDown` alert. -==== - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. -* You have installed the OpenShift CLI (`oc`). - -.Procedure - -. Edit the `cluster-monitoring-config` `ConfigMap` object in the `openshift-monitoring` namespace: -+ -[source,terminal] ----- -$ oc -n openshift-monitoring edit configmap cluster-monitoring-config ----- - -. Add a value for `enforcedBodySizeLimit` to `data/config.yaml/prometheusK8s` to limit the body size that can be accepted per target scrape: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: cluster-monitoring-config - namespace: openshift-monitoring -data: - config.yaml: |- - prometheusK8s: - enforcedBodySizeLimit: 40MB <1> ----- -<1> Specify the maximum body size for scraped metrics targets. -This `enforcedBodySizeLimit` example limits the uncompressed size per target scrape to 40 megabytes. -Valid numeric values use the Prometheus data size format: B (bytes), KB (kilobytes), MB (megabytes), GB (gigabytes), TB (terabytes), PB (petabytes), and EB (exabytes). -The default value is `0`, which specifies no limit. -You can also set the value to `automatic` to calculate the limit automatically based on cluster capacity. - -. Save the file to apply the changes automatically. -+ -[WARNING] -==== -When you save changes to a `cluster-monitoring-config` config map, the pods and other resources in the `openshift-monitoring` project might be redeployed. -The running monitoring processes in that project might also restart. -==== diff --git a/modules/monitoring-setting-up-metrics-collection-for-user-defined-projects.adoc b/modules/monitoring-setting-up-metrics-collection-for-user-defined-projects.adoc deleted file mode 100644 index 29d2c1fe72ee..000000000000 --- a/modules/monitoring-setting-up-metrics-collection-for-user-defined-projects.adoc +++ /dev/null @@ -1,11 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/managing-metrics.adoc - -:_content-type: CONCEPT -[id="setting-up-metrics-collection-for-user-defined-projects_{context}"] -= Setting up metrics collection for user-defined projects - -You can create a `ServiceMonitor` resource to scrape metrics from a service endpoint in a user-defined project. This assumes that your application uses a Prometheus client library to expose metrics to the `/metrics` canonical name. - -This section describes how to deploy a sample service in a user-defined project and then create a `ServiceMonitor` resource that defines how that service should be monitored. diff --git a/modules/monitoring-setting-up-pod-topology-spread-constraints-for-alertmanager.adoc b/modules/monitoring-setting-up-pod-topology-spread-constraints-for-alertmanager.adoc deleted file mode 100644 index 8fd3a6622401..000000000000 --- a/modules/monitoring-setting-up-pod-topology-spread-constraints-for-alertmanager.adoc +++ /dev/null @@ -1,69 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/configuring-the-monitoring-stack.adoc - -:_content-type: PROCEDURE -[id="setting-up-pod-topology-spread-constraints-for-alertmanager_{context}"] -= Setting up pod topology spread constraints for Alertmanager - -For core {product-title} platform monitoring, you can set up pod topology spread constraints for Alertmanager to fine tune how pod replicas are scheduled to nodes across zones. -Doing so helps ensure that Alertmanager pods are highly available and run more efficiently, because workloads are spread across nodes in different data centers or hierarchical infrastructure zones. - -You configure pod topology spread constraints for Alertmanager in the `cluster-monitoring-config` config map. - -.Prerequisites - -* You have installed the OpenShift CLI (`oc`). -* You have access to the cluster as a user with the `cluster-admin` role. -* You have created the `cluster-monitoring-config` `ConfigMap` object. - -.Procedure - -. Edit the `cluster-monitoring-config` `ConfigMap` object in the `openshift-monitoring` namespace: -+ -[source,terminal] ----- -$ oc -n openshift-monitoring edit configmap cluster-monitoring-config ----- - -. Add values for the following settings under `data/config.yaml/alertmanagermain` to configure pod topology spread constraints: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: cluster-monitoring-config - namespace: openshift-monitoring -data: - config.yaml: | - alertmanagerMain: - topologySpreadConstraints: - - maxSkew: 1 <1> - topologyKey: monitoring <2> - whenUnsatisfiable: DoNotSchedule <3> - labelSelector: - matchLabels: <4> - app.kubernetes.io/name: alertmanager ----- -<1> Specify a numeric value for `maxSkew`, which defines the degree to which pods are allowed to be unevenly distributed. -This field is required, and the value must be greater than zero. -The value specified has a different effect depending on what value you specify for `whenUnsatisfiable`. -<2> Specify a key of node labels for `topologyKey`. -This field is required. -Nodes that have a label with this key and identical values are considered to be in the same topology. -The scheduler will try to put a balanced number of pods into each domain. -<3> Specify a value for `whenUnsatisfiable`. -This field is required. -Available options are `DoNotSchedule` and `ScheduleAnyway`. -Specify `DoNotSchedule` if you want the `maxSkew` value to define the maximum difference allowed between the number of matching pods in the target topology and the global minimum. -Specify `ScheduleAnyway` if you want the scheduler to still schedule the pod but to give higher priority to nodes that might reduce the skew. -<4> Specify a value for `matchLabels`. This value is used to identify the set of matching pods to which to apply the constraints. - -. Save the file to apply the changes automatically. -+ -[WARNING] -==== -When you save changes to the `cluster-monitoring-config` config map, the pods and other resources in the `openshift-monitoring` project might be redeployed. -The running monitoring processes in that project might also restart. -==== diff --git a/modules/monitoring-setting-up-pod-topology-spread-constraints-for-prometheus.adoc b/modules/monitoring-setting-up-pod-topology-spread-constraints-for-prometheus.adoc deleted file mode 100644 index 3593b46f3336..000000000000 --- a/modules/monitoring-setting-up-pod-topology-spread-constraints-for-prometheus.adoc +++ /dev/null @@ -1,69 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/configuring-the-monitoring-stack.adoc - -:_content-type: PROCEDURE -[id="setting-up-pod-topology-spread-constraints-for-prometheus_{context}"] -= Setting up pod topology spread constraints for Prometheus - -For core {product-title} platform monitoring, you can set up pod topology spread constraints for Prometheus to fine tune how pod replicas are scheduled to nodes across zones. -Doing so helps ensure that Prometheus pods are highly available and run more efficiently, because workloads are spread across nodes in different data centers or hierarchical infrastructure zones. - -You configure pod topology spread constraints for Prometheus in the `cluster-monitoring-config` config map. - -.Prerequisites - -* You have installed the OpenShift CLI (`oc`). -* You have access to the cluster as a user with the `cluster-admin` role. -* You have created the `cluster-monitoring-config` `ConfigMap` object. - -.Procedure - -. Edit the `cluster-monitoring-config` `ConfigMap` object in the `openshift-monitoring` namespace: -+ -[source,terminal] ----- -$ oc -n openshift-monitoring edit configmap cluster-monitoring-config ----- - -. Add values for the following settings under `data/config.yaml/prometheusK8s` to configure pod topology spread constraints: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: cluster-monitoring-config - namespace: openshift-monitoring -data: - config.yaml: | - prometheusK8s: - topologySpreadConstraints: - - maxSkew: 1 <1> - topologyKey: monitoring <2> - whenUnsatisfiable: DoNotSchedule <3> - labelSelector: - matchLabels: <4> - app.kubernetes.io/name: prometheus ----- -<1> Specify a numeric value for `maxSkew`, which defines the degree to which pods are allowed to be unevenly distributed. -This field is required, and the value must be greater than zero. -The value specified has a different effect depending on what value you specify for `whenUnsatisfiable`. -<2> Specify a key of node labels for `topologyKey`. -This field is required. -Nodes that have a label with this key and identical values are considered to be in the same topology. -The scheduler will try to put a balanced number of pods into each domain. -<3> Specify a value for `whenUnsatisfiable`. -This field is required. -Available options are `DoNotSchedule` and `ScheduleAnyway`. -Specify `DoNotSchedule` if you want the `maxSkew` value to define the maximum difference allowed between the number of matching pods in the target topology and the global minimum. -Specify `ScheduleAnyway` if you want the scheduler to still schedule the pod but to give higher priority to nodes that might reduce the skew. -<4> Specify a value for `matchLabels`. This value is used to identify the set of matching pods to which to apply the constraints. - -. Save the file to apply the changes automatically. -+ -[WARNING] -==== -When you save changes to the `cluster-monitoring-config` config map, the pods and other resources in the `openshift-monitoring` project might be redeployed. -The running monitoring processes in that project might also restart. -==== diff --git a/modules/monitoring-setting-up-pod-topology-spread-constraints-for-thanos-ruler.adoc b/modules/monitoring-setting-up-pod-topology-spread-constraints-for-thanos-ruler.adoc deleted file mode 100644 index 31f67dfc8214..000000000000 --- a/modules/monitoring-setting-up-pod-topology-spread-constraints-for-thanos-ruler.adoc +++ /dev/null @@ -1,70 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/configuring-the-monitoring-stack.adoc - -:_content-type: PROCEDURE -[id="setting-up-pod-topology-spread-constraints-for-thanos-ruler_{context}"] -= Setting up pod topology spread constraints for Thanos Ruler - -For user-defined monitoring, you can set up pod topology spread constraints for Thanos Ruler to fine tune how pod replicas are scheduled to nodes across zones. -Doing so helps ensure that Thanos Ruler pods are highly available and run more efficiently, because workloads are spread across nodes in different data centers or hierarchical infrastructure zones. - -You configure pod topology spread constraints for Thanos Ruler in the `user-workload-monitoring-config` config map. - -.Prerequisites - -* You have installed the OpenShift CLI (`oc`). -* A cluster administrator has enabled monitoring for user-defined projects. -* You have access to the cluster as a user with the `cluster-admin` role, or as a user with the `user-workload-monitoring-config-edit` role in the `openshift-user-workload-monitoring` project. -* You have created the `user-workload-monitoring-config` `ConfigMap` object. - -.Procedure - -. Edit the `user-workload-monitoring-config` config map in the `openshift-user-workload-monitoring` namespace: -+ -[source,terminal] ----- -$ oc -n openshift-user-workload-monitoring edit configmap user-workload-monitoring-config ----- - -. Add values for the following settings under `data/config.yaml/thanosRuler` to configure pod topology spread constraints: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: user-workload-monitoring-config - namespace: openshift-user-workload-monitoring -data: - config.yaml: | - thanosRuler: - topologySpreadConstraints: - - maxSkew: 1 <1> - topologyKey: monitoring <2> - whenUnsatisfiable: ScheduleAnyway <3> - labelSelector: - matchLabels: <4> - app.kubernetes.io/name: thanos-ruler ----- -<1> Specify a numeric value for `maxSkew`, which defines the degree to which pods are allowed to be unevenly distributed. -This field is required, and the value must be greater than zero. -The value specified has a different effect depending on what value you specify for `whenUnsatisfiable`. -<2> Specify a key of node labels for `topologyKey`. -This field is required. -Nodes that have a label with this key and identical values are considered to be in the same topology. -The scheduler will try to put a balanced number of pods into each domain. -<3> Specify a value for `whenUnsatisfiable`. -This field is required. -Available options are `DoNotSchedule` and `ScheduleAnyway`. -Specify `DoNotSchedule` if you want the `maxSkew` value to define the maximum difference allowed between the number of matching pods in the target topology and the global minimum. -Specify `ScheduleAnyway` if you want the scheduler to still schedule the pod but to give higher priority to nodes that might reduce the skew. -<4> Specify a value for `matchLabels`. This value is used to identify the set of matching pods to which to apply the constraints. - -. Save the file to apply the changes automatically. -+ -[WARNING] -==== -When you save changes to the `user-workload-monitoring-config` config map, the pods and other resources in the `openshift-user-workload-monitoring` project might be redeployed. -The running monitoring processes in that project might also restart. -==== diff --git a/modules/monitoring-silencing-alerts.adoc b/modules/monitoring-silencing-alerts.adoc deleted file mode 100644 index 9ef15af65809..000000000000 --- a/modules/monitoring-silencing-alerts.adoc +++ /dev/null @@ -1,57 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/managing-alerts.adoc - -:_content-type: PROCEDURE -[id="silencing-alerts_{context}"] -= Silencing alerts - -You can either silence a specific alert or silence alerts that match a specification that you define. - -.Prerequisites - -* You are a cluster administrator and have access to the cluster as a user with the `cluster-admin` cluster role. -* You are a non-administator user and have access to the cluster as a user with the following user roles: -** The `cluster-monitoring-view` cluster role, which allows you to access Alertmanager. -** The `monitoring-alertmanager-edit` role, which permits you to create and silence alerts in the *Administrator* perspective in the web console. -** The `monitoring-rules-edit` role, which permits you to create and silence alerts in the *Developer* perspective in the web console. - -.Procedure - -To silence a specific alert: - -* In the *Administrator* perspective: - -. Navigate to the *Observe* -> *Alerting* -> *Alerts* page of the {product-title} web console. - -. For the alert that you want to silence, select the {kebab} in the right-hand column and select *Silence Alert*. The *Silence Alert* form will appear with a pre-populated specification for the chosen alert. - -. Optional: Modify the silence. - -. You must add a comment before creating the silence. - -. To create the silence, select *Silence*. - -* In the *Developer* perspective: - -. Navigate to the *Observe* -> *<project_name>* -> *Alerts* page in the {product-title} web console. - -. Expand the details for an alert by selecting *>* to the left of the alert name. Select the name of the alert in the expanded view to open the *Alert Details* page for the alert. - -. Select *Silence Alert*. The *Silence Alert* form will appear with a prepopulated specification for the chosen alert. - -. Optional: Modify the silence. - -. You must add a comment before creating the silence. - -. To create the silence, select *Silence*. - -To silence a set of alerts by creating an alert specification in the *Administrator* perspective: - -. Navigate to the *Observe* -> *Alerting* -> *Silences* page in the {product-title} web console. - -. Select *Create Silence*. - -. Set the schedule, duration, and label details for an alert in the *Create Silence* form. You must also add a comment for the silence. - -. To create silences for alerts that match the label sectors that you entered in the previous step, select *Silence*. diff --git a/modules/monitoring-specifying-how-a-service-is-monitored.adoc b/modules/monitoring-specifying-how-a-service-is-monitored.adoc deleted file mode 100644 index 182b2bc7dd12..000000000000 --- a/modules/monitoring-specifying-how-a-service-is-monitored.adoc +++ /dev/null @@ -1,78 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/managing-metrics.adoc - -:_content-type: PROCEDURE -[id="specifying-how-a-service-is-monitored_{context}"] -= Specifying how a service is monitored - -[role="_abstract"] -To use the metrics exposed by your service, you must configure {product-title} monitoring to scrape metrics from the `/metrics` endpoint. You can do this using a `ServiceMonitor` custom resource definition (CRD) that specifies how a service should be monitored, or a `PodMonitor` CRD that specifies how a pod should be monitored. The former requires a `Service` object, while the latter does not, allowing Prometheus to directly scrape metrics from the metrics endpoint exposed by a pod. - -This procedure shows you how to create a `ServiceMonitor` resource for a service in a user-defined project. - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role or the `monitoring-edit` role. -* You have enabled monitoring for user-defined projects. -* For this example, you have deployed the `prometheus-example-app` sample service in the `ns1` project. -+ -[NOTE] -==== -The `prometheus-example-app` sample service does not support TLS authentication. -==== - -.Procedure - -. Create a YAML file for the `ServiceMonitor` resource configuration. In this example, the file is called `example-app-service-monitor.yaml`. - -. Add the following `ServiceMonitor` resource configuration details: -+ -[source,yaml] ----- -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - labels: - k8s-app: prometheus-example-monitor - name: prometheus-example-monitor - namespace: ns1 -spec: - endpoints: - - interval: 30s - port: web - scheme: http - selector: - matchLabels: - app: prometheus-example-app ----- -+ -This defines a `ServiceMonitor` resource that scrapes the metrics exposed by the `prometheus-example-app` sample service, which includes the `version` metric. -+ -[NOTE] -==== -A `ServiceMonitor` resource in a user-defined namespace can only discover services in the same namespace. That is, the `namespaceSelector` field of the `ServiceMonitor` resource is always ignored. -==== - -. Apply the configuration to the cluster: -+ -[source,terminal] ----- -$ oc apply -f example-app-service-monitor.yaml ----- -+ -It takes some time to deploy the `ServiceMonitor` resource. - -. You can check that the `ServiceMonitor` resource is running: -+ -[source,terminal] ----- -$ oc -n ns1 get servicemonitor ----- -+ -.Example output -[source,terminal] ----- -NAME AGE -prometheus-example-monitor 81m ----- diff --git a/modules/monitoring-support-considerations.adoc b/modules/monitoring-support-considerations.adoc deleted file mode 100644 index 7b68ac992639..000000000000 --- a/modules/monitoring-support-considerations.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/configuring-the-monitoring-stack.adoc - -:_content-type: CONCEPT -[id="support-considerations_{context}"] -= Support considerations for monitoring - -The following modifications are explicitly not supported: - -* *Creating additional `ServiceMonitor`, `PodMonitor`, and `PrometheusRule` objects in the `openshift-*` and `kube-*` projects.* -* *Modifying any resources or objects deployed in the `openshift-monitoring` or `openshift-user-workload-monitoring` projects.* The resources created by the {product-title} monitoring stack are not meant to be used by any other resources, as there are no guarantees about their backward compatibility. -+ -[NOTE] -==== -The Alertmanager configuration is deployed as a secret resource in the `openshift-monitoring` namespace. -If you have enabled a separate Alertmanager instance for user-defined alert routing, an Alertmanager configuration is also deployed as a secret resource in the `openshift-user-workload-monitoring` namespace. -To configure additional routes for any instance of Alertmanager, you need to decode, modify, and then encode that secret. -This procedure is a supported exception to the preceding statement. -==== -+ -* *Modifying resources of the stack.* The {product-title} monitoring stack ensures its resources are always in the state it expects them to be. If they are modified, the stack will reset them. -* *Deploying user-defined workloads to `openshift-*`, and `kube-*` projects.* These projects are reserved for Red Hat provided components and they should not be used for user-defined workloads. -* *Installing custom Prometheus instances on {product-title}.* A custom instance is a Prometheus custom resource (CR) managed by the Prometheus Operator. -* *Enabling symptom based monitoring by using the `Probe` custom resource definition (CRD) in Prometheus Operator.* - -[NOTE] -==== -Backward compatibility for metrics, recording rules, or alerting rules is not guaranteed. -==== diff --git a/modules/monitoring-supported-remote-write-authentication-settings.adoc b/modules/monitoring-supported-remote-write-authentication-settings.adoc deleted file mode 100644 index 4ea59cca00fa..000000000000 --- a/modules/monitoring-supported-remote-write-authentication-settings.adoc +++ /dev/null @@ -1,314 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/configuring-the-monitoring-stack.adoc - -:_content-type: REFERENCE -[id="supported_remote_write_authentication_settings_{context}"] -= Supported remote write authentication settings - -You can use different methods to authenticate with a remote write endpoint. -Currently supported authentication methods are AWS Signature Version 4, Basic authentication, authentication using HTTP in an `Authorization` request header, OAuth 2.0, and TLS client. -The following table provides details about supported authentication methods for use with remote write. - -[options="header"] -|=== - -|Authentication method|Config map field|Description - -|AWS Signature Version 4|`sigv4`|This method uses AWS Signature Version 4 authentication to sign requests. -You cannot use this method simultaneously with authorization, OAuth 2.0, or Basic authentication. - -|Basic authentication|`basicAuth`|Basic authentication sets the authorization header on every remote write request with the configured username and password. - -|authorization|`authorization`|Authorization sets the `Authorization` header on every remote write request using the configured token. - -|OAuth 2.0|`oauth2`|An OAuth 2.0 configuration uses the client credentials grant type. -Prometheus fetches an access token from `tokenUrl` with the specified client ID and client secret to access the remote write endpoint. -You cannot use this method simultaneously with authorization, AWS Signature Version 4, or Basic authentication. - -|TLS client|`tlsConfig`|A TLS client configuration specifies the CA certificate, the client certificate, and the client key file information used to authenticate with the remote write endpoint server using TLS. -The sample configuration assumes that you have already created a CA certificate file, a client certificate file, and a client key file. - -|=== - -== Config map location for authentication settings -The following shows the location of the authentication configuration in the `ConfigMap` object for default platform monitoring. - -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: cluster-monitoring-config - namespace: openshift-monitoring -data: - config.yaml: | - prometheusK8s: - remoteWrite: - - url: "https://remote-write-endpoint.example.com" <1> - <endpoint_authentication_details> <2> ----- -<1> The URL of the remote write endpoint. -<2> The required configuration details for the authentication method for the endpoint. -Currently supported authentication methods are Amazon Web Services (AWS) Signature Version 4, authentication using HTTP in an `Authorization` request header, Basic authentication, OAuth 2.0, and TLS client. - -[NOTE] -==== -If you configure remote write for the Prometheus instance that monitors user-defined projects, edit the `user-workload-monitoring-config` config map in the `openshift-user-workload-monitoring` namespace. -Note that the Prometheus config map component is called `prometheus` in the `user-workload-monitoring-config` `ConfigMap` object and not `prometheusK8s`, as it is in the `cluster-monitoring-config` `ConfigMap` object. -==== - -== Example remote write authentication settings - -The following samples show different authentication settings you can use to connect to a remote write endpoint. -Each sample also shows how to configure a corresponding `Secret` object that contains authentication credentials and other relevant settings. -Each sample configures authentication for use with default platform monitoring in the `openshift-monitoring` namespace. - -.Sample YAML for AWS Signature Version 4 authentication - -The following shows the settings for a `sigv4` secret named `sigv4-credentials` in the `openshift-monitoring` namespace. - -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: sigv4-credentials - namespace: openshift-monitoring -stringData: - accessKey: <AWS_access_key> <1> - secretKey: <AWS_secret_key> <2> -type: Opaque ----- -<1> The AWS API access key. -<2> The AWS API secret key. - -The following shows sample AWS Signature Version 4 remote write authentication settings that use a `Secret` object named `sigv4-credentials` in the `openshift-monitoring` namespace: - -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: cluster-monitoring-config - namespace: openshift-monitoring -data: - config.yaml: | - prometheusK8s: - remoteWrite: - - url: "https://authorization.example.com/api/write" - sigv4: - region: <AWS_region> <1> - accessKey: - name: sigv4-credentials <2> - key: accessKey <3> - secretKey: - name: sigv4-credentials <2> - key: secretKey <4> - profile: <AWS_profile_name> <5> - roleArn: <AWS_role_arn> <6> ----- -<1> The AWS region. -<2> The name of the `Secret` object containing the AWS API access credentials. -<3> The key that contains the AWS API access key in the specified `Secret` object. -<4> The key that contains the AWS API secret key in the specified `Secret` object. -<5> The name of the AWS profile that is being used to authenticate. -<6> The unique identifier for the Amazon Resource Name (ARN) assigned to your role. - -.Sample YAML for Basic authentication - -The following shows sample Basic authentication settings for a `Secret` object named `rw-basic-auth` in the `openshift-monitoring` namespace: - -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: rw-basic-auth - namespace: openshift-monitoring -stringData: - user: <basic_username> <1> - password: <basic_password> <2> -type: Opaque ----- -<1> The username. -<2> The password. - -The following sample shows a `basicAuth` remote write configuration that uses a `Secret` object named `rw-basic-auth` in the `openshift-monitoring` namespace. -It assumes that you have already set up authentication credentials for the endpoint. - -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: cluster-monitoring-config - namespace: openshift-monitoring -data: - config.yaml: | - prometheusK8s: - remoteWrite: - - url: "https://basicauth.example.com/api/write" - basicAuth: - username: - name: rw-basic-auth <1> - key: user <2> - password: - name: rw-basic-auth <1> - key: password <3> ----- -<1> The name of the `Secret` object that contains the authentication credentials. -<2> The key that contains the username in the specified `Secret` object. -<3> The key that contains the password in the specified `Secret` object. - -.Sample YAML for authentication with a bearer token using a `Secret` Object - -The following shows bearer token settings for a `Secret` object named `rw-bearer-auth` in the `openshift-monitoring` namespace: - -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: rw-bearer-auth - namespace: openshift-monitoring -stringData: - token: <authentication_token> <1> -type: Opaque ----- -<1> The authentication token. - -The following shows sample bearer token config map settings that use a `Secret` object named `rw-bearer-auth` in the `openshift-monitoring` namespace: - -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: cluster-monitoring-config - namespace: openshift-monitoring -data: - config.yaml: | - enableUserWorkload: true - prometheusK8s: - remoteWrite: - - url: "https://authorization.example.com/api/write" - authorization: - type: Bearer <1> - credentials: - name: rw-bearer-auth <2> - key: token <3> ----- -<1> The authentication type of the request. The default value is `Bearer`. -<2> The name of the `Secret` object that contains the authentication credentials. -<3> The key that contains the authentication token in the specified `Secret` object. - -.Sample YAML for OAuth 2.0 authentication - -The following shows sample OAuth 2.0 settings for a `Secret` object named `oauth2-credentials` in the `openshift-monitoring` namespace: - -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: oauth2-credentials - namespace: openshift-monitoring -stringData: - id: <oauth2_id> <1> - secret: <oauth2_secret> <2> - token: <oauth2_authentication_token> <3> -type: Opaque ----- -<1> The Oauth 2.0 ID. -<2> The OAuth 2.0 secret. -<3> The OAuth 2.0 token. - -The following shows an `oauth2` remote write authentication sample configuration that uses a `Secret` object named `oauth2-credentials` in the `openshift-monitoring` namespace: - -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: cluster-monitoring-config - namespace: openshift-monitoring -data: - config.yaml: | - prometheusK8s: - remoteWrite: - - url: "https://test.example.com/api/write" - oauth2: - clientId: - secret: - name: oauth2-credentials <1> - key: id <2> - clientSecret: - name: oauth2-credentials <1> - key: secret <2> - tokenUrl: https://example.com/oauth2/token <3> - scopes: <4> - - <scope_1> - - <scope_2> - endpointParams: <5> - param1: <parameter_1> - param2: <parameter_2> ----- -<1> The name of the corresponding `Secret` object. Note that `ClientId` can alternatively refer to a `ConfigMap` object, although `clientSecret` must refer to a `Secret` object. -<2> The key that contains the OAuth 2.0 credentials in the specified `Secret` object. -<3> The URL used to fetch a token with the specified `clientId` and `clientSecret`. -<4> The OAuth 2.0 scopes for the authorization request. These scopes limit what data the tokens can access. -<5> The OAuth 2.0 authorization request parameters required for the authorization server. - -.Sample YAML for TLS client authentication - -The following shows sample TLS client settings for a `tls` `Secret` object named `mtls-bundle` in the `openshift-monitoring` namespace. - -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: mtls-bundle - namespace: openshift-monitoring -data: - ca.crt: <ca_cert> <1> - client.crt: <client_cert> <2> - client.key: <client_key> <3> -type: tls ----- -<1> The CA certificate in the Prometheus container with which to validate the server certificate. -<2> The client certificate for authentication with the server. -<3> The client key. - -The following sample shows a `tlsConfig` remote write authentication configuration that uses a TLS `Secret` object named `mtls-bundle`. - -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: cluster-monitoring-config - namespace: openshift-monitoring -data: - config.yaml: | - prometheusK8s: - remoteWrite: - - url: "https://remote-write-endpoint.example.com" - tlsConfig: - ca: - secret: - name: mtls-bundle <1> - key: ca.crt <2> - cert: - secret: - name: mtls-bundle <1> - key: client.crt <3> - keySecret: - name: mtls-bundle <1> - key: client.key <4> ----- -<1> The name of the corresponding `Secret` object that contains the TLS authentication credentials. Note that `ca` and `cert` can alternatively refer to a `ConfigMap` object, though `keySecret` must refer to a `Secret` object. -<2> The key in the specified `Secret` object that contains the CA certificate for the endpoint. -<3> The key in the specified `Secret` object that contains the client certificate for the endpoint. -<4> The key in the specified `Secret` object that contains the client key secret. diff --git a/modules/monitoring-targets-for-user-defined-projects.adoc b/modules/monitoring-targets-for-user-defined-projects.adoc deleted file mode 100644 index 4d78d9a1f89c..000000000000 --- a/modules/monitoring-targets-for-user-defined-projects.adoc +++ /dev/null @@ -1,12 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/monitoring-overview.adoc - -:_content-type: CONCEPT -[id="monitoring-targets-for-user-defined-projects_{context}"] -= Monitoring targets for user-defined projects - -When monitoring is enabled for user-defined projects, you can monitor: - -* Metrics provided through service endpoints in user-defined projects. -* Pods running in user-defined projects. diff --git a/modules/monitoring-understanding-alert-routing-for-user-defined-projects.adoc b/modules/monitoring-understanding-alert-routing-for-user-defined-projects.adoc deleted file mode 100644 index 9d8ceb688823..000000000000 --- a/modules/monitoring-understanding-alert-routing-for-user-defined-projects.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/enabling-alert-routing-for-user-defined-projects.adoc - -:_content-type: CONCEPT -[id="understanding-alert-routing-for-user-defined-projects_{context}"] -= Understanding alert routing for user-defined projects - -[role="_abstract"] -As a cluster administrator, you can enable alert routing for user-defined projects. -With this feature, you can allow users with the **alert-routing-edit** role to configure alert notification routing and receivers for user-defined projects. -These notifications are routed by the default Alertmanager instance or, if enabled, an optional Alertmanager instance dedicated to user-defined monitoring. - -Users can then create and configure user-defined alert routing by creating or editing the `AlertmanagerConfig` objects for their user-defined projects without the help of an administrator. - -After a user has defined alert routing for a user-defined project, user-defined alert notifications are routed as follows: - -* To the `alertmanager-main` pods in the `openshift-monitoring` namespace if using the default platform Alertmanager instance. - -* To the `alertmanager-user-workload` pods in the `openshift-user-workload-monitoring` namespace if you have enabled a separate instance of Alertmanager for user-defined projects. - -[NOTE] -==== -The following are limitations of alert routing for user-defined projects: - -* For user-defined alerting rules, user-defined routing is scoped to the namespace in which the resource is defined. -For example, a routing configuration in namespace `ns1` only applies to `PrometheusRules` resources in the same namespace. - -* When a namespace is excluded from user-defined monitoring, `AlertmanagerConfig` resources in the namespace cease to be part of the Alertmanager configuration. -==== \ No newline at end of file diff --git a/modules/monitoring-understanding-metrics.adoc b/modules/monitoring-understanding-metrics.adoc deleted file mode 100644 index cbca81148aa9..000000000000 --- a/modules/monitoring-understanding-metrics.adoc +++ /dev/null @@ -1,31 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/managing-metrics.adoc - -:_content-type: CONCEPT -[id="understanding-metrics_{context}"] -= Understanding metrics - -[role="_abstract"] -In {product-title} {product-version}, cluster components are monitored by scraping metrics exposed through service endpoints. You can also configure metrics collection for user-defined projects. - -You can define the metrics that you want to provide for your own workloads by using Prometheus client libraries at the application level. - -In {product-title}, metrics are exposed through an HTTP service endpoint under the `/metrics` canonical name. You can list all available metrics for a service by running a `curl` query against `\http://<endpoint>/metrics`. For instance, you can expose a route to the `prometheus-example-app` example service and then run the following to view all of its available metrics: - -[source,terminal] ----- -$ curl http://<example_app_endpoint>/metrics ----- - -.Example output -[source,terminal] ----- -# HELP http_requests_total Count of all HTTP requests -# TYPE http_requests_total counter -http_requests_total{code="200",method="get"} 4 -http_requests_total{code="404",method="get"} 2 -# HELP version Version information about this binary -# TYPE version gauge -version{version="v0.1.0"} 1 ----- diff --git a/modules/monitoring-understanding-the-monitoring-stack.adoc b/modules/monitoring-understanding-the-monitoring-stack.adoc deleted file mode 100644 index 0d3208e2eab5..000000000000 --- a/modules/monitoring-understanding-the-monitoring-stack.adoc +++ /dev/null @@ -1,20 +0,0 @@ -// Module included in the following assemblies: -// -// * virt/support/virt-openshift-cluster-monitoring.adoc -// * monitoring/monitoring-overview.adoc - -// This module uses a conditionalized title so that the module -// can be re-used in associated products but the title is not -// included in the existing OpenShift assembly. - -:_content-type: CONCEPT -[id="understanding-the-monitoring-stack_{context}"] -= Understanding the monitoring stack - -The {product-title} monitoring stack is based on the link:https://prometheus.io/[Prometheus] open source project and its wider ecosystem. The monitoring stack includes the following: - -* *Default platform monitoring components*. A set of platform monitoring components are installed in the `openshift-monitoring` project by default during an {product-title} installation. This provides monitoring for core {product-title} components including Kubernetes services. The default monitoring stack also enables remote health monitoring for clusters. These components are illustrated in the *Installed by default* section in the following diagram. - -* *Components for monitoring user-defined projects*. After optionally enabling monitoring for user-defined projects, additional monitoring components are installed in the `openshift-user-workload-monitoring` project. This provides monitoring for user-defined projects. These components are illustrated in the *User* section in the following diagram. - -image:monitoring-architecture.png[{product-title} monitoring architecture] diff --git a/modules/monitoring-unmanaged-monitoring-operators.adoc b/modules/monitoring-unmanaged-monitoring-operators.adoc deleted file mode 100644 index 9d3ba3631ece..000000000000 --- a/modules/monitoring-unmanaged-monitoring-operators.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/configuring-the-monitoring-stack.adoc - -:_content-type: CONCEPT -[id="unmanaged-monitoring-operators_{context}"] -= Support policy for monitoring Operators - -Monitoring Operators ensure that {product-title} monitoring resources function as designed and tested. If Cluster Version Operator (CVO) control of an Operator is overridden, the Operator does not respond to configuration changes, reconcile the intended state of cluster objects, or receive updates. - -While overriding CVO control for an Operator can be helpful during debugging, this is unsupported and the cluster administrator assumes full control of the individual component configurations and upgrades. - -.Overriding the Cluster Version Operator - -The `spec.overrides` parameter can be added to the configuration for the CVO to allow administrators to provide a list of overrides to the behavior of the CVO for a component. Setting the `spec.overrides[].unmanaged` parameter to `true` for a component blocks cluster upgrades and alerts the administrator after a CVO override has been set: - -[source,terminal] ----- -Disabling ownership via cluster version overrides prevents upgrades. Please remove overrides before continuing. ----- - -[WARNING] -==== -Setting a CVO override puts the entire cluster in an unsupported state and prevents the monitoring stack from being reconciled to its intended state. This impacts the reliability features built into Operators and prevents updates from being received. Reported issues must be reproduced after removing any overrides for support to proceed. -==== diff --git a/modules/monitoring-using-node-selectors-to-move-monitoring-components.adoc b/modules/monitoring-using-node-selectors-to-move-monitoring-components.adoc deleted file mode 100644 index f1143b82555b..000000000000 --- a/modules/monitoring-using-node-selectors-to-move-monitoring-components.adoc +++ /dev/null @@ -1,28 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/configuring-the-monitoring-stack.adoc - -:_content-type: CONCEPT -[id="using-node-selectors-to-move-monitoring-components_{context}"] -= Using node selectors to move monitoring components - -By using the `nodeSelector` constraint with labeled nodes, you can move any of the monitoring stack components to specific nodes. -By doing so, you can control the placement and distribution of the monitoring components across a cluster. - -By controlling placement and distribution of monitoring components, you can optimize system resource use, improve performance, and segregate workloads based on specific requirements or policies. - -[id="how-node-selectors-work-with-other-constraints_{context}"] -== How node selectors work with other constraints - - -If you move monitoring components by using node selector constraints, be aware that other constraints to control pod scheduling might exist for a cluster: - -* Topology spread constraints might be in place to control pod placement. -* Hard anti-affinity rules are in place for Prometheus, Thanos Querier, Alertmanager, and other monitoring components to ensure that multiple pods for these components are always spread across different nodes and are therefore always highly available. - -When scheduling pods onto nodes, the pod scheduler tries to satisfy all existing constraints when determining pod placement. -That is, all constraints compound when the pod scheduler determines which pods will be placed on which nodes. - -Therefore, if you configure a node selector constraint but existing constraints cannot all be satisfied, the pod scheduler cannot match all constraints and will not schedule a pod for placement onto a node. - -To maintain resilience and high availability for monitoring components, ensure that enough nodes are available and match all constraints when you configure a node selector constraint to move a component. diff --git a/modules/monitoring-viewing-a-list-of-available-metrics.adoc b/modules/monitoring-viewing-a-list-of-available-metrics.adoc deleted file mode 100644 index a498d167c843..000000000000 --- a/modules/monitoring-viewing-a-list-of-available-metrics.adoc +++ /dev/null @@ -1,37 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/managing-metrics.adoc - -:_content-type: PROCEDURE -[id="viewing-a-list-of-available-metrics_{context}"] -= Viewing a list of available metrics - -As a cluster administrator or as a user with view permissions for all projects, you can view a list of metrics available in a cluster and output the list in JSON format. - -.Prerequisites -* You are a cluster administrator, or you have access to the cluster as a user with the `cluster-monitoring-view` role. -* You have installed the {product-title} CLI (`oc`). -* You have obtained the {product-title} API route for Thanos Querier. -* You are able to get a bearer token by using the `oc whoami -t` command. -+ -[IMPORTANT] -==== -You can only use bearer token authentication to access the Thanos Querier API route. -==== - -.Procedure - -. If you have not obtained the {product-title} API route for Thanos Querier, run the following command: -+ -[source,terminal] ----- -$ oc get routes -n openshift-monitoring thanos-querier -o jsonpath='{.status.ingress[0].host}' ----- - -. Retrieve a list of metrics in JSON format from the Thanos Querier API route by running the following command. This command uses `oc` to authenticate with a bearer token. -+ -[source,terminal] ----- -$ curl -k -H "Authorization: Bearer $(oc whoami -t)" https://<thanos_querier_route>/api/v1/metadata <1> ----- -<1> Replace `<thanos_querier_route>` with the {product-title} API route for Thanos Querier. diff --git a/modules/move-etcd-different-disk.adoc b/modules/move-etcd-different-disk.adoc deleted file mode 100644 index affb0b3f5804..000000000000 --- a/modules/move-etcd-different-disk.adoc +++ /dev/null @@ -1,190 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/recommended-performance-scale-practices/recommended-etcd-practices.adoc - -:_content-type: PROCEDURE -[id="move-etcd-different-disk_{context}"] -= Moving etcd to a different disk - -You can move etcd from a shared disk to a separate disk to prevent or resolve performance issues. - -.Prerequisites - -* The `MachineConfigPool` must match `metadata.labels[machineconfiguration.openshift.io/role]`. This applies to a controller, worker, or a custom pool. -* The node's auxiliary storage device, such as `/dev/sdb`, must match the sdb. Change this reference in all places in the file. - -[NOTE] -==== -This procedure does not move parts of the root file system, such as `/var/`, to another disk or partition on an installed node. -==== - -The Machine Config Operator (MCO) is responsible for mounting a secondary disk for an {product-title} {product-version} container storage. - -Use the following steps to move etcd to a different device: - -.Procedure -. Create a `machineconfig` YAML file named `etcd-mc.yml` and add the following information: -+ -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfig -metadata: - labels: - machineconfiguration.openshift.io/role: master - name: 98-var-lib-etcd -spec: - config: - ignition: - version: 3.2.0 - systemd: - units: - - contents: | - [Unit] - Description=Make File System on /dev/sdb - DefaultDependencies=no - BindsTo=dev-sdb.device - After=dev-sdb.device var.mount - Before=systemd-fsck@dev-sdb.service - - [Service] - Type=oneshot - RemainAfterExit=yes - ExecStart=/usr/lib/systemd/systemd-makefs xfs /dev/sdb - TimeoutSec=0 - - [Install] - WantedBy=var-lib-containers.mount - enabled: true - name: systemd-mkfs@dev-sdb.service - - contents: | - [Unit] - Description=Mount /dev/sdb to /var/lib/etcd - Before=local-fs.target - Requires=systemd-mkfs@dev-sdb.service - After=systemd-mkfs@dev-sdb.service var.mount - - [Mount] - What=/dev/sdb - Where=/var/lib/etcd - Type=xfs - Options=defaults,prjquota - - [Install] - WantedBy=local-fs.target - enabled: true - name: var-lib-etcd.mount - - contents: | - [Unit] - Description=Sync etcd data if new mount is empty - DefaultDependencies=no - After=var-lib-etcd.mount var.mount - Before=crio.service - - [Service] - Type=oneshot - RemainAfterExit=yes - ExecCondition=/usr/bin/test ! -d /var/lib/etcd/member - ExecStart=/usr/sbin/setenforce 0 - ExecStart=/bin/rsync -ar /sysroot/ostree/deploy/rhcos/var/lib/etcd/ /var/lib/etcd/ - ExecStart=/usr/sbin/setenforce 1 - TimeoutSec=0 - - [Install] - WantedBy=multi-user.target graphical.target - enabled: true - name: sync-var-lib-etcd-to-etcd.service - - contents: | - [Unit] - Description=Restore recursive SELinux security contexts - DefaultDependencies=no - After=var-lib-etcd.mount - Before=crio.service - - [Service] - Type=oneshot - RemainAfterExit=yes - ExecStart=/sbin/restorecon -R /var/lib/etcd/ - TimeoutSec=0 - - [Install] - WantedBy=multi-user.target graphical.target - enabled: true - name: restorecon-var-lib-etcd.service - ----- - -. Create the machine configuration by entering the following commands: -+ -[source,terminal] ----- -$ oc login -u ${ADMIN} -p ${ADMINPASSWORD} ${API} -... output omitted ... ----- -+ -[source,terminal] ----- -$ oc create -f etcd-mc.yml -machineconfig.machineconfiguration.openshift.io/98-var-lib-etcd created ----- -+ -[source,terminal] ----- -$ oc login -u ${ADMIN} -p ${ADMINPASSWORD} ${API} - [... output omitted ...] ----- -+ -[source, terminal] ----- -$ oc create -f etcd-mc.yml machineconfig.machineconfiguration.openshift.io/98-var-lib-etcd created ----- -+ -The nodes are updated and rebooted. After the reboot completes, the following events occur: -+ -* An XFS file system is created on the specified disk. -* The disk mounts to `/var/lib/etc`. -* The content from `/sysroot/ostree/deploy/rhcos/var/lib/etcd` syncs to `/var/lib/etcd`. -* A restore of `SELinux` labels is forced for `/var/lib/etcd`. -* The old content is not removed. -. After the nodes are on a separate disk, update the machine configuration file, `etcd-mc.yml` with the following information: -+ -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfig -metadata: - labels: - machineconfiguration.openshift.io/role: master - name: 98-var-lib-etcd -spec: - config: - ignition: - version: 3.2.0 - systemd: - units: - - contents: | - [Unit] - Description=Mount /dev/sdb to /var/lib/etcd - Before=local-fs.target - Requires=systemd-mkfs@dev-sdb.service - After=systemd-mkfs@dev-sdb.service var.mount - - [Mount] - What=/dev/sdb - Where=/var/lib/etcd - Type=xfs - Options=defaults,prjquota - - [Install] - WantedBy=local-fs.target - enabled: true - name: var-lib-etcd.mount ----- -. Apply the modified version that removes the logic for creating and syncing the device by entering the following command: -+ -[source,terminal] ----- -$ oc replace -f etcd-mc.yml ----- -+ -The previous step prevents the nodes from rebooting. diff --git a/modules/multi-architecture-creating-arm64-bootimage.adoc b/modules/multi-architecture-creating-arm64-bootimage.adoc deleted file mode 100644 index 369af56226b5..000000000000 --- a/modules/multi-architecture-creating-arm64-bootimage.adoc +++ /dev/null @@ -1,133 +0,0 @@ -//Module included in the following assemblies -// -//post_installation_configuration/cluster-tasks.adoc - -:_content-type: PROCEDURE -[id="multi-architecture-creating-arm64-bootimage_{context}"] - -= Creating an ARM64 boot image using the Azure image gallery - -The following procedure describes how to manually generate an ARM64 boot image. - -.Prerequisites - -* You installed the Azure CLI (`az`). -* You created a single-architecture Azure installer-provisioned cluster with the multi-architecture installer binary. - -.Procedure -. Log in to your Azure account: -+ -[source,terminal] ----- -$ az login ----- -. Create a storage account and upload the `arm64` virtual hard disk (VHD) to your storage account. The {product-title} installation program creates a resource group, however, the boot image can also be uploaded to a custom named resource group: -+ -[source,terminal] ----- -$ az storage account create -n ${STORAGE_ACCOUNT_NAME} -g ${RESOURCE_GROUP} -l westus --sku Standard_LRS <1> ----- -+ -<1> The `westus` object is an example region. -+ -. Create a storage container using the storage account you generated: -+ -[source,terminal] -+ ----- -$ az storage container create -n ${CONTAINER_NAME} --account-name ${STORAGE_ACCOUNT_NAME} ----- -. You must use the {product-title} installation program JSON file to extract the URL and `aarch64` VHD name: -.. Extract the `URL` field and set it to `RHCOS_VHD_ORIGIN_URL` as the file name by running the following command: -+ -[source,terminal] ----- -$ RHCOS_VHD_ORIGIN_URL=$(oc -n openshift-machine-config-operator get configmap/coreos-bootimages -o jsonpath='{.data.stream}' | jq -r '.architectures.aarch64."rhel-coreos-extensions"."azure-disk".url') ----- -.. Extract the `aarch64` VHD name and set it to `BLOB_NAME` as the file name by running the following command: -+ -[source,terminal] ----- -$ BLOB_NAME=rhcos-$(oc -n openshift-machine-config-operator get configmap/coreos-bootimages -o jsonpath='{.data.stream}' | jq -r '.architectures.aarch64."rhel-coreos-extensions"."azure-disk".release')-azure.aarch64.vhd ----- -. Generate a shared access signature (SAS) token. Use this token to upload the {op-system} VHD to your storage container with the following commands: -+ -[source,terminal] ----- -$ end=`date -u -d "30 minutes" '+%Y-%m-%dT%H:%MZ'` ----- -+ -[source,terminal] ----- -$ sas=`az storage container generate-sas -n ${CONTAINER_NAME} --account-name ${STORAGE_ACCOUNT_NAME} --https-only --permissions dlrw --expiry $end -o tsv` ----- -. Copy the {op-system} VHD into the storage container: -+ -[source, terminal] ----- -$ az storage blob copy start --account-name ${STORAGE_ACCOUNT_NAME} --sas-token "$sas" \ - --source-uri "${RHCOS_VHD_ORIGIN_URL}" \ - --destination-blob "${BLOB_NAME}" --destination-container ${CONTAINER_NAME} ----- -+ -You can check the status of the copying process with the following command: -+ -[source,terminal] ----- -$ az storage blob show -c ${CONTAINER_NAME} -n ${BLOB_NAME} --account-name ${STORAGE_ACCOUNT_NAME} | jq .properties.copy ----- -+ -.Example output -[source,terminal] ----- -{ - "completionTime": null, - "destinationSnapshot": null, - "id": "1fd97630-03ca-489a-8c4e-cfe839c9627d", - "incrementalCopy": null, - "progress": "17179869696/17179869696", - "source": "https://rhcos.blob.core.windows.net/imagebucket/rhcos-411.86.202207130959-0-azure.aarch64.vhd", - "status": "success", <1> - "statusDescription": null -} ----- -+ -<1> If the status parameter displays the `success` object, the copying process is complete. - -. Create an image gallery using the following command: -+ -[source,terminal] ----- -$ az sig create --resource-group ${RESOURCE_GROUP} --gallery-name ${GALLERY_NAME} ----- -Use the image gallery to create an image definition. In the following example command, `rhcos-arm64` is the name of the image definition. -+ -[source,terminal] ----- -$ az sig image-definition create --resource-group ${RESOURCE_GROUP} --gallery-name ${GALLERY_NAME} --gallery-image-definition rhcos-arm64 --publisher RedHat --offer arm --sku arm64 --os-type linux --architecture Arm64 --hyper-v-generation V2 ----- -. To get the URL of the VHD and set it to `RHCOS_VHD_URL` as the file name, run the following command: -+ -[source,terminal] ----- -$ RHCOS_VHD_URL=$(az storage blob url --account-name ${STORAGE_ACCOUNT_NAME} -c ${CONTAINER_NAME} -n "${BLOB_NAME}" -o tsv) ----- -. Use the `RHCOS_VHD_URL` file, your storage account, resource group, and image gallery to create an image version. In the following example, `1.0.0` is the image version. -+ -[source,terminal] ----- -$ az sig image-version create --resource-group ${RESOURCE_GROUP} --gallery-name ${GALLERY_NAME} --gallery-image-definition rhcos-arm64 --gallery-image-version 1.0.0 --os-vhd-storage-account ${STORAGE_ACCOUNT_NAME} --os-vhd-uri ${RHCOS_VHD_URL} ----- -. Your `arm64` boot image is now generated. You can access the ID of your image with the following command: -+ -[source,terminal] ----- -$ az sig image-version show -r $GALLERY_NAME -g $RESOURCE_GROUP -i rhcos-arm64 -e 1.0.0 ----- -The following example image ID is used in the `recourseID` parameter of the compute machine set: -+ -.Example `resourceID` -[source,terminal] ----- -/resourceGroups/${RESOURCE_GROUP}/providers/Microsoft.Compute/galleries/${GALLERY_NAME}/images/rhcos-arm64/versions/1.0.0 ----- \ No newline at end of file diff --git a/modules/multi-architecture-import-imagestreams.adoc b/modules/multi-architecture-import-imagestreams.adoc deleted file mode 100644 index 6f07c7f6fd75..000000000000 --- a/modules/multi-architecture-import-imagestreams.adoc +++ /dev/null @@ -1,62 +0,0 @@ -//Module included in the following assemblies -// -//post_installation_configuration/multi-architecture-configuration.adoc - -:_content-type: PROCEDURE -[id="multi-architecture-import-imagestreams_{context}"] - -= Importing manifest lists in image streams on your multi-architecture compute machines - -On an {product-title} {product-version} cluster with multi-architecture compute machines, the image streams in the cluster do not import manifest lists automatically. You must manually change the default `importMode` option to the `PreserveOriginal` option in order to import the manifest list. - -.Prerequisites - -* You installed the {product-title} CLI (`oc`). - -.Procedure - -* The following example command shows how to patch the `ImageStream` cli-artifacts so that the `cli-artifacts:latest` image stream tag is imported as a manifest list. -+ -[source,terminal] ----- -$ oc patch is/cli-artifacts -n openshift -p '{"spec":{"tags":[{"name":"latest","importPolicy":{"importMode":"PreserveOriginal"}}]}}' ----- - -.Verification - -* You can check that the manifest lists imported properly by inspecting the image stream tag. The following command will list the individual architecture manifests for a particular tag. -+ -[source,terminal] ----- -$ oc get istag cli-artifacts:latest -n openshift -oyaml ----- - -+ -If the `dockerImageManifests` object is present, then the manifest list import was successful. - -+ -.Example output of the `dockerImageManifests` object -[source, yaml] ----- -dockerImageManifests: - - architecture: amd64 - digest: sha256:16d4c96c52923a9968fbfa69425ec703aff711f1db822e4e9788bf5d2bee5d77 - manifestSize: 1252 - mediaType: application/vnd.docker.distribution.manifest.v2+json - os: linux - - architecture: arm64 - digest: sha256:6ec8ad0d897bcdf727531f7d0b716931728999492709d19d8b09f0d90d57f626 - manifestSize: 1252 - mediaType: application/vnd.docker.distribution.manifest.v2+json - os: linux - - architecture: ppc64le - digest: sha256:65949e3a80349cdc42acd8c5b34cde6ebc3241eae8daaeea458498fedb359a6a - manifestSize: 1252 - mediaType: application/vnd.docker.distribution.manifest.v2+json - os: linux - - architecture: s390x - digest: sha256:75f4fa21224b5d5d511bea8f92dfa8e1c00231e5c81ab95e83c3013d245d1719 - manifestSize: 1252 - mediaType: application/vnd.docker.distribution.manifest.v2+json - os: linux ----- diff --git a/modules/multi-architecture-modify-machine-set-aws.adoc b/modules/multi-architecture-modify-machine-set-aws.adoc deleted file mode 100644 index 3a1c26b53ff7..000000000000 --- a/modules/multi-architecture-modify-machine-set-aws.adoc +++ /dev/null @@ -1,135 +0,0 @@ -//Module included in the following assembly -// -//post_installation_configuration/cluster-tasks.adoc - -:_content-type: PROCEDURE -[id="multi-architecture-modify-machine-set-aws_{context}"] - -= Adding an ARM64 compute machine set to your cluster - -To configure a cluster with multi-architecture compute machines, you must create a AWS ARM64 compute machine set. This adds ARM64 compute nodes to your cluster so that your cluster has multi-architecture compute machines. - -.Prerequisites - -* You installed the OpenShift CLI (`oc`). -* You used the installation program to create an AMD64 single-architecture AWS cluster with the multi-architecture installer binary. - - -.Procedure -* Create and modify a compute machine set, this will control the ARM64 compute nodes in your cluster. -+ --- -[source,terminal] ----- -$ oc create -f aws-arm64-machine-set-0.yaml ----- -.Sample YAML compute machine set to deploy an ARM64 compute node - -[source,yaml] ----- -apiVersion: machine.openshift.io/v1beta1 -kind: MachineSet -metadata: - labels: - machine.openshift.io/cluster-api-cluster: <infrastructure_id> <1> - name: <infrastructure_id>-aws-arm64-machine-set-0 <1> - namespace: openshift-machine-api -spec: - replicas: 1 - selector: - matchLabels: - machine.openshift.io/cluster-api-cluster: <infrastructure_id> <1> - machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role>-<zone> <2> - template: - metadata: - labels: - machine.openshift.io/cluster-api-cluster: <infrastructure_id> - machine.openshift.io/cluster-api-machine-role: <role> <3> - machine.openshift.io/cluster-api-machine-type: <role> <3> - machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role>-<zone> <2> - spec: - metadata: - labels: - node-role.kubernetes.io/<role>: "" - providerSpec: - value: - ami: - id: ami-02a574449d4f4d280 <4> - apiVersion: awsproviderconfig.openshift.io/v1beta1 - blockDevices: - - ebs: - iops: 0 - volumeSize: 120 - volumeType: gp2 - credentialsSecret: - name: aws-cloud-credentials - deviceIndex: 0 - iamInstanceProfile: - id: <infrastructure_id>-worker-profile <1> - instanceType: m6g.xlarge <5> - kind: AWSMachineProviderConfig - placement: - availabilityZone: us-east-1a <6> - region: <region> <7> - securityGroups: - - filters: - - name: tag:Name - values: - - <infrastructure_id>-worker-sg <1> - subnet: - filters: - - name: tag:Name - values: - - <infrastructure_id>-private-<zone> - tags: - - name: kubernetes.io/cluster/<infrastructure_id> <1> - value: owned - - name: <custom_tag_name> - value: <custom_tag_value> - userDataSecret: - name: worker-user-data ----- -<1> Specify the infrastructure ID that is based on the cluster ID that you set when you provisioned the cluster. If you have the OpenShift CLI installed, you can obtain the infrastructure ID by running the following command: -+ -[source,terminal] ----- -$ oc get -o jsonpath=‘{.status.infrastructureName}{“\n”}’ infrastructure cluster ----- -<2> Specify the infrastructure ID, role node label, and zone. -<3> Specify the role node label to add. -<4> Specify an ARM64 supported Red Hat Enterprise Linux CoreOS (RHCOS) Amazon Machine Image (AMI) for your AWS zone for your OpenShift Container Platform nodes. -+ -[source,terminal] ----- -$ oc get configmap/coreos-bootimages / - -n openshift-machine-config-operator / - -o jsonpath='{.data.stream}' | jq / - -r '.architectures.<arch>.images.aws.regions."<region>".image' ----- -<5> Specify an ARM64 supported machine type. For more information, refer to "Tested instance types for AWS 64-bit ARM" -<6> Specify the zone, for example `us-east-1a`. Ensure that the zone you select offers 64-bit ARM machines. -<7> Specify the region, for example, `us-east-1`. Ensure that the zone you select offers 64-bit ARM machines. --- - -.Verification - -. View the list of compute machine sets by entering the following command: -+ -[source,terminal] ----- -$ oc get machineset -n openshift-machine-api ----- -You can then see your created ARM64 machine set. -+ -.Example output -[source,terminal] ----- -NAME DESIRED CURRENT READY AVAILABLE AGE -<infrastructure_id>-aws-arm64-machine-set-0 2 2 2 2 10m ----- -. You can check that the nodes are ready and scheduable with the following command: -+ -[source,terminal] ----- -$ oc get nodes ----- \ No newline at end of file diff --git a/modules/multi-architecture-modify-machine-set.adoc b/modules/multi-architecture-modify-machine-set.adoc deleted file mode 100644 index 0557e97eb1d7..000000000000 --- a/modules/multi-architecture-modify-machine-set.adoc +++ /dev/null @@ -1,107 +0,0 @@ -//Module included in the following assembly -// -//post_installation_configuration/cluster-tasks.adoc - -:_content-type: PROCEDURE -[id="multi-architecture-modify-machine-set_{context}"] - -= Adding a multi-architecture compute machine set to your cluster - -To add ARM64 compute nodes to your cluster, you must create an Azure compute machine set that uses the ARM64 boot image. To create your own custom compute machine set on Azure, see "Creating a compute machine set on Azure". - -.Prerequisites - -* You installed the OpenShift CLI (`oc`). - -.Procedure -* Create a compute machine set and modify the `resourceID` and `vmSize` parameters with the following command. This compute machine set will control the `arm64` worker nodes in your cluster: -+ -[source,terminal] ----- -$ oc create -f arm64-machine-set-0.yaml ----- -.Sample YAML compute machine set with `arm64` boot image -+ -[source,yaml] ----- -apiVersion: machine.openshift.io/v1beta1 -kind: MachineSet -metadata: - labels: - machine.openshift.io/cluster-api-cluster: <infrastructure_id> - machine.openshift.io/cluster-api-machine-role: worker - machine.openshift.io/cluster-api-machine-type: worker - name: <infrastructure_id>-arm64-machine-set-0 - namespace: openshift-machine-api -spec: - replicas: 2 - selector: - matchLabels: - machine.openshift.io/cluster-api-cluster: <infrastructure_id> - machine.openshift.io/cluster-api-machineset: <infrastructure_id>-arm64-machine-set-0 - template: - metadata: - labels: - machine.openshift.io/cluster-api-cluster: <infrastructure_id> - machine.openshift.io/cluster-api-machine-role: worker - machine.openshift.io/cluster-api-machine-type: worker - machine.openshift.io/cluster-api-machineset: <infrastructure_id>-arm64-machine-set-0 - spec: - lifecycleHooks: {} - metadata: {} - providerSpec: - value: - acceleratedNetworking: true - apiVersion: machine.openshift.io/v1beta1 - credentialsSecret: - name: azure-cloud-credentials - namespace: openshift-machine-api - image: - offer: "" - publisher: "" - resourceID: /resourceGroups/${RESOURCE_GROUP}/providers/Microsoft.Compute/galleries/${GALLERY_NAME}/images/rhcos-arm64/versions/1.0.0 <1> - sku: "" - version: "" - kind: AzureMachineProviderSpec - location: <region> - managedIdentity: <infrastructure_id>-identity - networkResourceGroup: <infrastructure_id>-rg - osDisk: - diskSettings: {} - diskSizeGB: 128 - managedDisk: - storageAccountType: Premium_LRS - osType: Linux - publicIP: false - publicLoadBalancer: <infrastructure_id> - resourceGroup: <infrastructure_id>-rg - subnet: <infrastructure_id>-worker-subnet - userDataSecret: - name: worker-user-data - vmSize: Standard_D4ps_v5 <2> - vnet: <infrastructure_id>-vnet - zone: "<zone>" ----- -<1> Set the `resourceID` parameter to the `arm64` boot image. -<2> Set the `vmSize` parameter to the instance type used in your installation. Some example instance types are `Standard_D4ps_v5` or `D8ps`. - -.Verification -. Verify that the new ARM64 machines are running by entering the following command: -+ -[source,terminal] ----- -$ oc get machineset -n openshift-machine-api ----- -+ -.Example output -[source,terminal] ----- -NAME DESIRED CURRENT READY AVAILABLE AGE -<infrastructure_id>-arm64-machine-set-0 2 2 2 2 10m ----- -. You can check that the nodes are ready and scheduable with the following command: -+ -[source, terminal] ----- -$ oc get nodes ----- \ No newline at end of file diff --git a/modules/multi-architecture-verifying-cluster-compatibility.adoc b/modules/multi-architecture-verifying-cluster-compatibility.adoc deleted file mode 100644 index fe3399d3902c..000000000000 --- a/modules/multi-architecture-verifying-cluster-compatibility.adoc +++ /dev/null @@ -1,41 +0,0 @@ -// Module included in the following assemblies: - -// * post_installation_configuration/multi-architecture-configuration.adoc - -:_content-type: PROCEDURE -[id="multi-architecture-verifying-cluster-compatibility_{context}"] - -= Verifying cluster compatibility - -Before you can start adding compute nodes of different architectures to your cluster, you must verify that your cluster is multi-architecture compatible. - -.Prerequisites - -* You installed the OpenShift CLI (`oc`) - -.Procedure - -* You can check that your cluster uses the architecture payload by running the following command: -+ -[source,terminal] ----- -$ oc adm release info -o json | jq .metadata.metadata ----- - -.Verification - -. If you see the following output, then your cluster is using the multi-architecture payload: -+ -[source,terminal] ----- -$ "release.openshift.io/architecture": "multi" ----- -You can then begin adding multi-arch compute nodes to your cluster. - -. If you see the following output, then your cluster is not using the multi-architecture payload: -+ -[source,terminal] ----- -$ null ----- -To migrate your cluster to one that supports multi-architecture compute machines, follow the procedure in "Migrating to a cluster with multi-architecture compute machines". \ No newline at end of file diff --git a/modules/multi-cluster-about.adoc b/modules/multi-cluster-about.adoc deleted file mode 100644 index 9d6bdcb7af74..000000000000 --- a/modules/multi-cluster-about.adoc +++ /dev/null @@ -1,9 +0,0 @@ -// Module included in the following assemblies: -// -// * assemblies/web-console.adoc - -:_content-type: CONCEPT -[id="multi-cluster-about_{context}"] -= Multicluster console - -The multicluster console provides a single interface with consistent design for the hybrid cloud console. If you enable the feature, you can switch between Advanced Cluster Management (ACM) and the cluster console in the same browser tab. It provides a simplified and consistent design that allows for shared components. diff --git a/modules/nbde-automatic-start-at-boot.adoc b/modules/nbde-automatic-start-at-boot.adoc deleted file mode 100644 index 770cd2c160f5..000000000000 --- a/modules/nbde-automatic-start-at-boot.adoc +++ /dev/null @@ -1,10 +0,0 @@ -// Module included in the following assemblies: -// -// security/nbde-implementation-guide.adoc - -[id="nbde-automatic-start-at-boot_{context}"] -= Automatic start at boot - -Due to the sensitive nature of the key material the Tang server uses, you should keep in mind that the overhead of manual intervention during the Tang server’s boot sequence can be beneficial. - -By default, if a Tang server starts and does not have key material present in the expected local volume, it will create fresh material and serve it. You can avoid this default behavior by either starting with pre-existing key material or aborting the startup and waiting for manual intervention. diff --git a/modules/nbde-backing-up-server-keys.adoc b/modules/nbde-backing-up-server-keys.adoc deleted file mode 100644 index 6b282b5fb548..000000000000 --- a/modules/nbde-backing-up-server-keys.adoc +++ /dev/null @@ -1,13 +0,0 @@ -// Module included in the following assemblies: -// -// security/nbde-implementation-guide.adoc - -:_content-type: PROCEDURE -[id="nbde-backing-up-server-keys_{context}"] -= Backing up keys for a Tang server - -The Tang server uses `/usr/libexec/tangd-keygen` to generate new keys and stores them in the `/var/db/tang` directory by default. To recover the Tang server in the event of a failure, back up this directory. The keys are sensitive and because they are able to perform the boot disk decryption of all hosts that have used them, the keys must be protected accordingly. - -.Procedure - -* Copy the backup key from the `/var/db/tang` directory to the temp directory from which you can restore the key. diff --git a/modules/nbde-compromise-of-key-material.adoc b/modules/nbde-compromise-of-key-material.adoc deleted file mode 100644 index 9dc0dcf0006a..000000000000 --- a/modules/nbde-compromise-of-key-material.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// security/nbde-implementation-guide.adoc - -:_content-type: PROCEDURE -[id="nbde-compromise-of-key-material_{context}"] -= Rekeying compromised key material - -If key material is potentially exposed to unauthorized third parties, such as through the physical theft of a Tang server or associated data, immediately rotate the keys. - -.Procedure - -. Rekey any Tang server holding the affected material. -. Rekey all clients using the Tang server. -. Destroy the original key material. -. Scrutinize any incidents that result in unintended exposure of the master encryption key. If possible, take compromised nodes offline and re-encrypt their disks. - -[TIP] -Reformatting and reinstalling on the same physical hardware, although slow, is easy to automate and test. diff --git a/modules/nbde-compute-requirements.adoc b/modules/nbde-compute-requirements.adoc deleted file mode 100644 index 180e1859e8fe..000000000000 --- a/modules/nbde-compute-requirements.adoc +++ /dev/null @@ -1,10 +0,0 @@ -// Module included in the following assemblies: -// -// security/nbde-implementation-guide.adoc - -[id="nbde-compute-requirements_{context}"] -= Compute requirements - -The computational requirements for the Tang server are very low. Any typical server grade configuration that you would use to deploy a server into production can provision sufficient compute capacity. - -High availability considerations are solely for availability and not additional compute power to satisfy client demands. diff --git a/modules/nbde-deciding-the-number-of-tang-servers-to-use.adoc b/modules/nbde-deciding-the-number-of-tang-servers-to-use.adoc deleted file mode 100644 index 0e85bc4ceca5..000000000000 --- a/modules/nbde-deciding-the-number-of-tang-servers-to-use.adoc +++ /dev/null @@ -1,24 +0,0 @@ -// Module included in the following assemblies: -// -// security/nbde-implementation-guide.adoc - -[id="nbde-deciding-the-number-of-tang-servers-to-use_{context}"] -= Tang server sizing requirements - -The requirements around availability, network, and physical location drive the decision of how many Tang servers to use, rather than any concern over server capacity. - -Tang servers do not maintain the state of data encrypted using Tang resources. Tang servers are either fully independent or share only their key material, which enables them to scale well. - -There are two ways Tang servers handle key material: - -* Multiple Tang servers share key material: -** You must load balance Tang servers sharing keys behind the same URL. The configuration can be as simple as round-robin DNS, or you can use physical load balancers. -** You can scale from a single Tang server to multiple Tang servers. Scaling Tang servers does not require rekeying or client reconfiguration on the node when the Tang servers share key material and the same URL. -** Client node setup and key rotation only requires one Tang server. - -* Multiple Tang servers generate their own key material: -** You can configure multiple Tang servers at installation time. -** You can scale an individual Tang server behind a load balancer. -** All Tang servers must be available during client node setup or key rotation. -** When a client node boots using the default configuration, the Clevis client contacts all Tang servers. Only _n_ Tang servers must be online to proceed with decryption. The default value for _n_ is 1. -** Red Hat does not support post-installation configuration that changes the behavior of the Tang servers. diff --git a/modules/nbde-deleting-old-tang-server-keys.adoc b/modules/nbde-deleting-old-tang-server-keys.adoc deleted file mode 100644 index 86bebb5120d0..000000000000 --- a/modules/nbde-deleting-old-tang-server-keys.adoc +++ /dev/null @@ -1,91 +0,0 @@ -// Module included in the following assemblies: -// -// security/nbde-implementation-guide.adoc - -:_content-type: PROCEDURE -[id="nbde-deleting-old-tang-server-keys_{context}"] -= Deleting old Tang server keys - -.Prerequisites - -* A root shell on the Linux machine running the Tang server. - -.Procedure - -. Locate and access the directory where the Tang server key is stored. This is usually the `/var/db/tang` directory: -+ -[source,terminal] ----- -# cd /var/db/tang/ ----- - -. List the current Tang server keys, showing the advertised and unadvertised keys: -+ -[source,terminal] ----- -# ls -A1 ----- -+ -.Example output -[source,terminal] ----- -.36AHjNH3NZDSnlONLz1-V4ie6t8.jwk -.gJZiNPMLRBnyo_ZKfK4_5SrnHYo.jwk -Bp8XjITceWSN_7XFfW7WfJDTomE.jwk -WOjQYkyK7DxY_T5pMncMO5w0f6E.jwk ----- - -. Delete the old keys: -+ -[source,terminal] ----- -# rm .*.jwk ----- - -. List the current Tang server keys to verify the unadvertised keys are no longer present: -+ -[source,terminal] ----- -# ls -A1 ----- -+ -.Example output -[source,terminal] ----- -Bp8XjITceWSN_7XFfW7WfJDTomE.jwk -WOjQYkyK7DxY_T5pMncMO5w0f6E.jwk ----- - -.Verification - -At this point, the server still advertises the new keys, but an attempt to decrypt based on the old key will fail. - -. Query the Tang server for the current advertised key thumbprints: -+ -[source,terminal] ----- -# tang-show-keys 7500 ----- -+ -.Example output -+ -[source,terminal] ----- -WOjQYkyK7DxY_T5pMncMO5w0f6E ----- - -. Decrypt the test file created earlier to verify decryption against the old keys fails: -+ -[source,terminal] ----- -# clevis decrypt </tmp/encryptValidation ----- -+ -.Example output -+ -[source,terminal] ----- -Error communicating with the server! ----- - -If you are running multiple Tang servers behind a load balancer that share the same key material, ensure the changes made are properly synchronized across the entire set of servers before proceeding. diff --git a/modules/nbde-disk-encryption-technology-comparison.adoc b/modules/nbde-disk-encryption-technology-comparison.adoc deleted file mode 100644 index 775298bdb5f0..000000000000 --- a/modules/nbde-disk-encryption-technology-comparison.adoc +++ /dev/null @@ -1,46 +0,0 @@ -// Module included in the following assemblies: -// -// security/nbde-implementation-guide.adoc - -[id="nbde-disk-encryption-technology-comparison_{context}"] -= Disk encryption technology comparison - -To understand the merits of Network-Bound Disk Encryption (NBDE) for securing data at rest on edge servers, compare key escrow and TPM disk encryption without Clevis to NBDE on systems running {op-system-base-full}. - -The following table presents some tradeoffs to consider around the threat model and the complexity of each encryption solution. - -[cols="1,1,1,1"] -|=== -| Scenario | Key escrow | TPM disk encryption (without Clevis) | NBDE - -| Protects against single-disk theft -| X -| X -| X - -| Protects against entire-server theft -| X -| -| X - -| Systems can reboot independently from the network -| -| X -| - -| No periodic rekeying -| -| X -| - -| Key is never transmitted over a network -| -| X -| X - -| Supported by OpenShift -| -| X -| X - -|=== diff --git a/modules/nbde-emergency-recovery-of-network-connectivity.adoc b/modules/nbde-emergency-recovery-of-network-connectivity.adoc deleted file mode 100644 index 473bb402647e..000000000000 --- a/modules/nbde-emergency-recovery-of-network-connectivity.adoc +++ /dev/null @@ -1,15 +0,0 @@ -// Module included in the following assemblies: -// -// security/nbde-implementation-guide.adoc - -[id="nbde-emergency-recovery-of-network-connectivity_{context}"] -= Emergency recovery of network connectivity - -If you are unable to recover network connectivity manually, consider the following steps. Be aware that these steps are discouraged if other methods to recover network connectivity are available. - -* This method must only be performed by a highly trusted technician. -* Taking the Tang server’s key material to the remote site is considered to be a breach of the key material and all servers must be rekeyed and re-encrypted. -* This method must be used in extreme cases only, or as a proof of concept recovery method to demonstrate its viability. -* Equally extreme, but theoretically possible, is to power the server in question with an Uninterruptible Power Supply (UPS), transport the server to a location with network connectivity to boot and decrypt the disks, and then restore the server at the original location on battery power to continue operation. -* If you want to use a backup manual passphrase, you must create it before the failure situation occurs. -* Just as attack scenarios become more complex with TPM and Tang compared to a stand-alone Tang installation, so emergency disaster recovery processes are also made more complex if leveraging the same method. diff --git a/modules/nbde-generating-a-new-tang-server-key.adoc b/modules/nbde-generating-a-new-tang-server-key.adoc deleted file mode 100644 index 74b57bbf109e..000000000000 --- a/modules/nbde-generating-a-new-tang-server-key.adoc +++ /dev/null @@ -1,130 +0,0 @@ -// Module included in the following assemblies: -// -// security/nbde-implementation-guide.adoc - -:_content-type: PROCEDURE -[id="nbde-generating-a-new-tang-server-key_{context}"] -= Generating a new Tang server key - -.Prerequisites - -* A root shell on the Linux machine running the Tang server. - -* To facilitate verification of the Tang server key rotation, encrypt a small test file with the old key: -+ -[source,terminal] ----- -# echo plaintext | clevis encrypt tang '{"url":"http://localhost:7500”}' -y >/tmp/encrypted.oldkey ----- -+ -* Verify that the encryption succeeded and the file can be decrypted to produce the same string `plaintext`: -+ -[source,terminal] ----- -# clevis decrypt </tmp/encrypted.oldkey ----- - -.Procedure - -. Locate and access the directory that stores the Tang server key. This is usually the `/var/db/tang` directory. Check the currently advertised key thumbprint: -+ -[source,terminal] ----- -# tang-show-keys 7500 ----- -+ -.Example output -+ -[source,terminal] ----- -36AHjNH3NZDSnlONLz1-V4ie6t8 ----- -+ -. Enter the Tang server key directory: -+ -[source,terminal] ----- -# cd /var/db/tang/ ----- - -. List the current Tang server keys: -+ -[source,terminal] ----- -# ls -A1 ----- -+ -.Example output -[source,terminal] ----- -36AHjNH3NZDSnlONLz1-V4ie6t8.jwk -gJZiNPMLRBnyo_ZKfK4_5SrnHYo.jwk ----- -+ -During normal Tang server operations, there are two `.jwk` files in this directory: one for signing and verification, and another for key derivation. - -. Disable advertisement of the old keys: -+ -[source,terminal] ----- -# for key in *.jwk; do \ - mv -- "$key" ".$key"; \ -done ----- -+ -New clients setting up Network-Bound Disk Encryption (NBDE) or requesting keys will no longer see the old keys. Existing clients can still access and use the old keys until they are deleted. The Tang server reads but does not advertise keys stored in UNIX hidden files, which start with the `.` character. - -. Generate a new key: -+ -[source,terminal] ----- -# /usr/libexec/tangd-keygen /var/db/tang ----- - -. List the current Tang server keys to verify the old keys are no longer advertised, as they are now hidden files, and new keys are present: -+ -[source,terminal] ----- -# ls -A1 ----- -+ -.Example output -[source,terminal] ----- -.36AHjNH3NZDSnlONLz1-V4ie6t8.jwk -.gJZiNPMLRBnyo_ZKfK4_5SrnHYo.jwk -Bp8XjITceWSN_7XFfW7WfJDTomE.jwk -WOjQYkyK7DxY_T5pMncMO5w0f6E.jwk ----- -+ -Tang automatically advertises the new keys. -+ -[NOTE] -==== -More recent Tang server installations include a helper `/usr/libexec/tangd-rotate-keys` directory that takes care of disabling advertisement and generating the new keys simultaneously. -==== - -. If you are running multiple Tang servers behind a load balancer that share the same key material, ensure the changes made here are properly synchronized across the entire set of servers before proceeding. - -.Verification - -. Verify that the Tang server is advertising the new key, and not advertising the old key: -+ -[source,terminal] ----- -# tang-show-keys 7500 ----- -+ -.Example output -+ -[source,terminal] ----- -WOjQYkyK7DxY_T5pMncMO5w0f6E ----- - -. Verify that the old key, while not advertised, is still available to decryption requests: -+ -[source,terminal] ----- -# clevis decrypt </tmp/encrypted.oldkey ----- diff --git a/modules/nbde-http-versus-https.adoc b/modules/nbde-http-versus-https.adoc deleted file mode 100644 index 2cfebe3aecd9..000000000000 --- a/modules/nbde-http-versus-https.adoc +++ /dev/null @@ -1,12 +0,0 @@ -// Module included in the following assemblies: -// -// security/nbde-implementation-guide.adoc - -[id="nbde-http-versus-https_{context}"] -= HTTP versus HTTPS - -Traffic to the Tang server can be encrypted (HTTPS) or plaintext (HTTP). There are no significant security advantages of encrypting this traffic, and leaving it decrypted removes any complexity or failure conditions related to Transport Layer Security (TLS) certificate checking in the node running a Clevis client. - -While it is possible to perform passive monitoring of unencrypted traffic between the node’s Clevis client and the Tang server, the ability to use this traffic to determine the key material is at best a future theoretical concern. Any such traffic analysis would require large quantities of captured data. Key rotation would immediately invalidate it. Finally, any threat actor able to perform passive monitoring has already obtained the necessary network access to perform manual connections to the Tang server and can perform the simpler manual decryption of captured Clevis headers. - -However, because other network policies in place at the installation site might require traffic encryption regardless of application, consider leaving this decision to the cluster administrator. diff --git a/modules/nbde-installation-scenarios.adoc b/modules/nbde-installation-scenarios.adoc deleted file mode 100644 index 3b18acf675d5..000000000000 --- a/modules/nbde-installation-scenarios.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// Module included in the following assemblies: -// -// security/nbde-implementation-guide.adoc - -[id="nbde-installation-scenarios_{context}"] -= Installation scenarios - -Consider the following recommendations when planning Tang server installations: - -* Small environments can use a single set of key material, even when using multiple Tang servers: -** Key rotations are easier. -** Tang servers can scale easily to permit high availability. - -* Large environments can benefit from multiple sets of key material: -** Physically diverse installations do not require the copying and synchronizing of key material between geographic regions. -** Key rotations are more complex in large environments. -** Node installation and rekeying require network connectivity to all Tang servers. -** A small increase in network traffic can occur due to a booting node querying all Tang servers during decryption. Note that while only one Clevis client query must succeed, Clevis queries all Tang servers. - -* Further complexity: -** Additional manual reconfiguration can permit the Shamir’s secret sharing (sss) of `any N of M servers online` in order to decrypt the disk partition. Decrypting disks in this scenario requires multiple sets of key material, and manual management of Tang servers and nodes with Clevis clients after the initial installation. - -* High level recommendations: -** For a single RAN deployment, a limited set of Tang servers can run in the corresponding domain controller (DC). -** For multiple RAN deployments, you must decide whether to run Tang servers in each corresponding DC or whether a global Tang environment better suits the other needs and requirements of the system. diff --git a/modules/nbde-installing-a-tang-server.adoc b/modules/nbde-installing-a-tang-server.adoc deleted file mode 100644 index 04cea976fd05..000000000000 --- a/modules/nbde-installing-a-tang-server.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -// security/nbde-implementation-guide.adoc - -:_content-type: PROCEDURE -[id="nbde-installing-a-tang-server_{context}"] -= Installing a Tang server - -.Procedure - -* You can install a Tang server on a {op-system-base-full} machine using either of the following commands: - -** Install the Tang server by using the `yum` command: -+ -[source,terminal] ----- -$ sudo yum install tang ----- - -** Install the Tang server by using the `dnf` command: -+ -[source,terminal] ----- -$ sudo dnf install tang ----- - -[NOTE] -==== -Installation can also be containerized and is very lightweight. -==== diff --git a/modules/nbde-installing-nbde-with-ztp.adoc b/modules/nbde-installing-nbde-with-ztp.adoc deleted file mode 100644 index 45c1e4898222..000000000000 --- a/modules/nbde-installing-nbde-with-ztp.adoc +++ /dev/null @@ -1,38 +0,0 @@ -// Module included in the following assemblies: -// -// security/nbde-implementation-guide.adoc - -[id="nbde-installing-nbde-with-ztp_{context}"] -= Installing NBDE with {ztp} - -{ztp-first} provides the capability to install Network-Bound Disk Encryption (NBDE) and enable disk encryption at cluster installation through SiteConfig. Use the automated SiteConfig method when you are enabling disk encryption on multiple managed clusters. - -You can specify disk encryption with a list of Tang server URLs and associated thumbprints in the site plan that contains the configuration for the site installation. The site plan generates the appropriate corresponding ignition manifest along with the other day-0 manifests and applies them to the hub cluster. - -.Example `SiteConfig` custom resource (CR) containing a disk encryption specification -[source,yaml] ----- -apiVersion: ran.openshift.io/v1 -kind: SiteConfig -metadata: - name: "site-plan-sno-du-ex" - namespace: "clusters-sub" -spec: - baseDomain: "example.com" - ... - clusters: - - clusterName: "du-sno-ex" - clusterType: sno - clusterProfile: du - ... - diskEncryption: - type: "nbde" - tang: - - url: "http://10.0.0.1:7500" - thumbprint: "1c3wJKh6TQKTghTjWgS4MlIXtGk" - - url: "http://10.0.0.2:7500" - thumbprint: "WOjQYkyK7DxY_T5pMncMO5w0f6E" - … - nodes: - - hostName: "host.domain.example.com" ----- diff --git a/modules/nbde-key-escrow.adoc b/modules/nbde-key-escrow.adoc deleted file mode 100644 index f51783627e92..000000000000 --- a/modules/nbde-key-escrow.adoc +++ /dev/null @@ -1,10 +0,0 @@ -// Module included in the following assemblies: -// -// security/nbde-implementation-guide.adoc - -[id="nbde-key-escrow_{context}"] -= Key escrow - -Key escrow is the traditional system for storing cryptographic keys. The key server on the network stores the encryption key for a node with an encrypted boot disk and returns it when queried. The complexities around key management, transport encryption, and authentication do not make this a reasonable choice for boot disk encryption. - -Although available in {op-system-base-full}, key escrow-based disk encryption setup and management is a manual process and not suited to {product-title} automation operations, including automated addition of nodes, and currently not supported by {product-title}. diff --git a/modules/nbde-locating-the-tang-servers.adoc b/modules/nbde-locating-the-tang-servers.adoc deleted file mode 100644 index 50c5f68e917a..000000000000 --- a/modules/nbde-locating-the-tang-servers.adoc +++ /dev/null @@ -1,22 +0,0 @@ -// Module included in the following assemblies: -// -// security/nbde-implementation-guide.adoc - -[id="nbde-locating-the-tang-servers_{context}"] -= Tang server location planning - -When planning your Tang server environment, consider the physical and network locations of the Tang servers. - -Physical location:: -The geographic location of the Tang servers is relatively unimportant, as long as they are suitably secured from unauthorized access or theft and offer the required availability and accessibility to run a critical service. -+ -Nodes with Clevis clients do not require local Tang servers as long as the Tang servers are available at all times. Disaster recovery requires both redundant power and redundant network connectivity to Tang servers regardless of their location. - -Network location:: -Any node with network access to the Tang servers can decrypt their own disk partitions, or any other disks encrypted by the same Tang servers. -+ -Select network locations for the Tang servers that ensure the presence or absence of network connectivity from a given host allows for permission to decrypt. For example, firewall protections might be in place to prohibit access from any type of guest or public network, or any network jack located in an unsecured area of the building. -+ -Additionally, maintain network segregation between production and development networks. This assists in defining appropriate network locations and adds an additional layer of security. -+ -Do not deploy Tang servers on the same resource, for example, the same `rolebindings.rbac.authorization.k8s.io` cluster, that they are responsible for unlocking. However, a cluster of Tang servers and other security resources can be a useful configuration to enable support of multiple additional clusters and cluster resources. diff --git a/modules/nbde-logging-considerations.adoc b/modules/nbde-logging-considerations.adoc deleted file mode 100644 index 2dbc8409dfbe..000000000000 --- a/modules/nbde-logging-considerations.adoc +++ /dev/null @@ -1,11 +0,0 @@ -// Module included in the following assemblies: -// -// security/nbde-implementation-guide.adoc - -[id="nbde-logging-considerations_{context}"] -= Logging considerations - -Centralized logging of Tang traffic is advantageous because it might allow you to detect such things as unexpected decryption requests. For example: - -* A node requesting decryption of a passphrase that does not correspond to its boot sequence -* A node requesting decryption outside of a known maintenance activity, such as cycling keys diff --git a/modules/nbde-loss-of-a-client-machine.adoc b/modules/nbde-loss-of-a-client-machine.adoc deleted file mode 100644 index a20ac2ab40cc..000000000000 --- a/modules/nbde-loss-of-a-client-machine.adoc +++ /dev/null @@ -1,12 +0,0 @@ -// Module included in the following assemblies: -// -// security/nbde-implementation-guide.adoc - -[id="nbde-loss-of-a-client-machine_{context}"] -= Loss of a client machine - -The loss of a cluster node that uses the Tang server to decrypt its disk partition is _not_ a disaster. Whether the machine was stolen, suffered hardware failure, or another loss scenario is not important: the disks are encrypted and considered unrecoverable. - -However, in the event of theft, a precautionary rotation of the Tang server’s keys and rekeying of all remaining nodes would be prudent to ensure the disks remain unrecoverable even in the event the thieves subsequently gain access to the Tang servers. - -To recover from this situation, either reinstall or replace the node. diff --git a/modules/nbde-loss-of-a-network-segment.adoc b/modules/nbde-loss-of-a-network-segment.adoc deleted file mode 100644 index 146a9ae9ea77..000000000000 --- a/modules/nbde-loss-of-a-network-segment.adoc +++ /dev/null @@ -1,16 +0,0 @@ -// Module included in the following assemblies: -// -// security/nbde-implementation-guide.adoc - -[id="nbde-loss-of-a-network-segment_{context}"] -= Loss of a network segment - -The loss of a network segment, making a Tang server temporarily unavailable, has the following consequences: - -* {product-title} nodes continue to boot as normal, provided other servers are available. - -* New nodes cannot establish their encryption keys until the network segment is restored. In this case, ensure connectivity to remote geographic locations for the purposes of high availability and redundancy. This is because when you are installing a new node or rekeying an existing node, all of the Tang servers you are referencing in that operation must be available. - -A hybrid model for a vastly diverse network, such as five geographic regions in which each client is connected to the closest three clients is worth investigating. - -In this scenario, new clients are able to establish their encryption keys with the subset of servers that are reachable. For example, in the set of `tang1`, `tang2` and `tang3` servers, if `tang2` becomes unreachable clients can still establish their encryption keys with `tang1` and `tang3`, and at a later time re-establish with the full set. This can involve either a manual intervention or a more complex automation to be available. diff --git a/modules/nbde-loss-of-a-tang-server.adoc b/modules/nbde-loss-of-a-tang-server.adoc deleted file mode 100644 index ffafffc74f32..000000000000 --- a/modules/nbde-loss-of-a-tang-server.adoc +++ /dev/null @@ -1,12 +0,0 @@ -// Module included in the following assemblies: -// -// security/nbde-implementation-guide.adoc - -[id="nbde-loss-of-a-tang-server_{context}"] -= Loss of a Tang server - -The loss of an individual Tang server within a load balanced set of servers with identical key material is completely transparent to the clients. - -The temporary failure of all Tang servers associated with the same URL, that is, the entire load balanced set, can be considered the same as the loss of a network segment. Existing clients have the ability to decrypt their disk partitions so long as another preconfigured Tang server is available. New clients cannot enroll until at least one of these servers comes back online. - -You can mitigate the physical loss of a Tang server by either reinstalling the server or restoring the server from backups. Ensure that the backup and restore processes of the key material is adequately protected from unauthorized access. diff --git a/modules/nbde-loss-of-client-connectivity.adoc b/modules/nbde-loss-of-client-connectivity.adoc deleted file mode 100644 index 1c803c9e4ef0..000000000000 --- a/modules/nbde-loss-of-client-connectivity.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// Module included in the following assemblies: -// -// security/nbde-implementation-guide.adoc - -:_content-type: PROCEDURE -[id="nbde-loss-of-client-connectivity_{context}"] -= Planning for a loss of client network connectivity - -The loss of network connectivity to an individual node will cause it to become unable to boot in an unattended fashion. - -If you are planning work that might cause a loss of network connectivity, -you can reveal the passphrase for an onsite technician to use manually, -and then rotate the keys afterwards to invalidate it: - -.Procedure - -. Before the network becomes unavailable, show the password used in the first slot `-s 1` of device `/dev/vda2` with this command: -+ -[source,terminal] ----- -$ sudo clevis luks pass -d /dev/vda2 -s 1 ----- - -. Invalidate that value and regenerate a new random boot-time passphrase with this command: -+ -[source,terminal] ----- -$ sudo clevis luks regen -d /dev/vda2 -s 1 ----- diff --git a/modules/nbde-managing-encryption-keys.adoc b/modules/nbde-managing-encryption-keys.adoc deleted file mode 100644 index 25d0849f1a78..000000000000 --- a/modules/nbde-managing-encryption-keys.adoc +++ /dev/null @@ -1,10 +0,0 @@ -// Module included in the following assemblies: -// -// security/nbde-implementation-guide.adoc - -[id="nbde-managing-encryption-keys_{context}"] -= Tang server encryption key management - -The cryptographic mechanism to recreate the encryption key is based on the _blinded key_ stored on the node and the private key of the involved Tang servers. To protect against the possibility of an attacker who has obtained both the Tang server private key and the node’s encrypted disk, periodic rekeying is advisable. - -You must perform the rekeying operation for every node before you can delete the old key from the Tang server. The following sections provide procedures for rekeying and deleting old keys. diff --git a/modules/nbde-network-bound-disk-encryption.adoc b/modules/nbde-network-bound-disk-encryption.adoc deleted file mode 100644 index 100729b68bf6..000000000000 --- a/modules/nbde-network-bound-disk-encryption.adoc +++ /dev/null @@ -1,22 +0,0 @@ -// Module included in the following assemblies: -// -// security/nbde-implementation-guide.adoc - -[id="nbde-network-bound-disk-encryption_{context}"] -= Network-Bound Disk Encryption (NBDE) - -Network-Bound Disk Encryption (NBDE) effectively ties the encryption key to an external server or set of servers in a secure and anonymous way across the network. This is not a key escrow, in that the nodes do not store the encryption key or transfer it over the network, but otherwise behaves in a similar fashion. - -Clevis and Tang are generic client and server components that provide network-bound encryption. {op-system-first} -uses these components in conjunction with Linux Unified Key Setup-on-disk-format (LUKS) to encrypt and decrypt root and non-root storage volumes to accomplish -Network-Bound Disk Encryption. - -When a node starts, it attempts to contact a predefined set of Tang servers by performing a cryptographic handshake. If it can reach the required number of Tang servers, the node can construct its disk decryption key and unlock the disks to continue booting. If the node cannot access a Tang server due to a network outage or server unavailability, the node cannot boot and continues retrying indefinitely until the Tang servers become available again. Because the key is effectively tied to the node’s presence in a network, an attacker attempting to gain access to the data at rest would need to obtain both the disks on the node, and network access to the Tang server as well. - -The following figure illustrates the deployment model for NBDE. - -image::179_OpenShift_NBDE_implementation_0821_1.png[NBDE deployment model] - -The following figure illustrates NBDE behavior during a reboot. - -image::179_OpenShift_NBDE_implementation_0821_2.png[NBDE reboot behavior] diff --git a/modules/nbde-openshift-installation-with-nbde.adoc b/modules/nbde-openshift-installation-with-nbde.adoc deleted file mode 100644 index 05fb3eb334df..000000000000 --- a/modules/nbde-openshift-installation-with-nbde.adoc +++ /dev/null @@ -1,8 +0,0 @@ -// Module included in the following assemblies: -// -// security/nbde-implementation-guide.adoc - -[id="nbde-openshift-installation-with-nbde_{context}"] -= Installation considerations with Network-Bound Disk Encryption - -Network-Bound Disk Encryption (NBDE) must be enabled when a cluster node is installed. However, you can change the disk encryption policy at any time after it was initialized at installation. diff --git a/modules/nbde-recovering-network-connectivity-manually.adoc b/modules/nbde-recovering-network-connectivity-manually.adoc deleted file mode 100644 index 873b44134f8a..000000000000 --- a/modules/nbde-recovering-network-connectivity-manually.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// -// security/nbde-implementation-guide.adoc - -:_content-type: PROCEDURE -[id="nbde-recovering-network-connectivity-manually_{context}"] -= Recovering network connectivity manually - -A somewhat complex and manually intensive process is also available to the onsite technician for network recovery. - -.Procedure - -. The onsite technician extracts the Clevis header from the hard disks. Depending on BIOS lockdown, this might involve removing the disks and installing them in a lab machine. - -. The onsite technician transmits the Clevis headers to a colleague with legitimate access to the Tang network who then performs the decryption. - -. Due to the necessity of limited access to the Tang network, the technician should not be able to access that network via VPN or other remote connectivity. Similarly, the technician cannot patch the remote server through to this network in order to decrypt the disks automatically. - -. The technician reinstalls the disk and manually enters the plain text passphrase provided by their colleague. - -. The machine successfully starts even without direct access to the Tang servers. Note that the transmission of the key material from the install site to another site with network access must be done carefully. - -. When network connectivity is restored, the technician rotates the encryption keys. diff --git a/modules/nbde-recovering-server-keys.adoc b/modules/nbde-recovering-server-keys.adoc deleted file mode 100644 index 51266e86ebf7..000000000000 --- a/modules/nbde-recovering-server-keys.adoc +++ /dev/null @@ -1,15 +0,0 @@ -// Module included in the following assemblies: -// -// security/nbde-implementation-guide.adoc - -:_content-type: PROCEDURE -[id="nbde-recovering-server-keys_{context}"] -= Recovering keys for a Tang server - -You can recover the keys for a Tang server by accessing the keys from a backup. - -.Procedure - -* Restore the key from your backup folder to the `/var/db/tang/` directory. -+ -When the Tang server starts up, it advertises and uses these restored keys. diff --git a/modules/nbde-rekeying-all-nbde-nodes.adoc b/modules/nbde-rekeying-all-nbde-nodes.adoc deleted file mode 100644 index c1482905ce89..000000000000 --- a/modules/nbde-rekeying-all-nbde-nodes.adoc +++ /dev/null @@ -1,152 +0,0 @@ -// Module included in the following assemblies: -// -// security/nbde-implementation-guide.adoc - -:_content-type: PROCEDURE -[id="nbde-rekeying-all-nbde-nodes_{context}"] -= Rekeying all NBDE nodes - -You can rekey all of the nodes on a remote cluster by using a `DaemonSet` object without incurring any downtime to the remote cluster. - -[NOTE] -==== -If a node loses power during the rekeying, it is possible that it might become unbootable, and must be redeployed via -{rh-rhacm-first} or a GitOps pipeline. -==== - -.Prerequisites - -* `cluster-admin` access to all clusters with Network-Bound Disk Encryption (NBDE) nodes. -* All Tang servers must be accessible to every NBDE node undergoing rekeying, even if the keys of a Tang server have not changed. -* Obtain the Tang server URL and key thumbprint for every Tang server. - -.Procedure - -. Create a `DaemonSet` object based on the following template. This template sets up three redundant Tang servers, but can be easily adapted to other situations. Change the Tang server URLs and thumbprints in the `NEW_TANG_PIN` environment to suit your environment: -+ -[source,yaml] ----- -apiVersion: apps/v1 -kind: DaemonSet -metadata: - name: tang-rekey - namespace: openshift-machine-config-operator -spec: - selector: - matchLabels: - name: tang-rekey - template: - metadata: - labels: - name: tang-rekey - spec: - containers: - - name: tang-rekey - image: registry.access.redhat.com/ubi9/ubi-minimal:latest - imagePullPolicy: IfNotPresent - command: - - "/sbin/chroot" - - "/host" - - "/bin/bash" - - "-ec" - args: - - | - rm -f /tmp/rekey-complete || true - echo "Current tang pin:" - clevis-luks-list -d $ROOT_DEV -s 1 - echo "Applying new tang pin: $NEW_TANG_PIN" - clevis-luks-edit -f -d $ROOT_DEV -s 1 -c "$NEW_TANG_PIN" - echo "Pin applied successfully" - touch /tmp/rekey-complete - sleep infinity - readinessProbe: - exec: - command: - - cat - - /host/tmp/rekey-complete - initialDelaySeconds: 30 - periodSeconds: 10 - env: - - name: ROOT_DEV - value: /dev/disk/by-partlabel/root - - name: NEW_TANG_PIN - value: >- - {"t":1,"pins":{"tang":[ - {"url":"http://tangserver01:7500","thp":"WOjQYkyK7DxY_T5pMncMO5w0f6E"}, - {"url":"http://tangserver02:7500","thp":"I5Ynh2JefoAO3tNH9TgI4obIaXI"}, - {"url":"http://tangserver03:7500","thp":"38qWZVeDKzCPG9pHLqKzs6k1ons"} - ]}} - volumeMounts: - - name: hostroot - mountPath: /host - securityContext: - privileged: true - volumes: - - name: hostroot - hostPath: - path: / - nodeSelector: - kubernetes.io/os: linux - priorityClassName: system-node-critical - restartPolicy: Always - serviceAccount: machine-config-daemon - serviceAccountName: machine-config-daemon ----- -+ -In this case, even though you are rekeying `tangserver01`, you must specify not only the new thumbprint for `tangserver01`, but also the current thumbprints for all other Tang servers. Failure to specify all thumbprints for a rekeying operation opens up the opportunity for a man-in-the-middle attack. - -. To distribute the daemon set to every cluster that must be rekeyed, run the following command: -+ -[source,terminal] ----- -$ oc apply -f tang-rekey.yaml ----- -+ -However, to run at scale, wrap the daemon set in an ACM policy. This ACM configuration must contain one policy to deploy the daemon set, -a second policy to check that all the daemon set pods are READY, and a placement rule to apply it to the appropriate set of clusters. - -[NOTE] -==== -After validating that the daemon set has successfully rekeyed all servers, delete the daemon set. If you do not delete the daemon set, it must be deleted before the next rekeying operation. -==== - -.Verification - -After you distribute the daemon set, monitor the daemon sets to ensure that the rekeying has completed successfully. The script in the example daemon set terminates with an error if the rekeying failed, and remains in the `CURRENT` state if successful. There is also a readiness probe that marks the pod as `READY` when the rekeying has completed successfully. - -* This is an example of the output listing for the daemon set before the rekeying has completed: -+ -[source,terminal] ----- -$ oc get -n openshift-machine-config-operator ds tang-rekey ----- -+ -.Example output -+ -[source,terminal] ----- -NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE -tang-rekey 1 1 0 1 0 kubernetes.io/os=linux 11s ----- -+ -* This is an example of the output listing for the daemon set after the rekeying has completed successfully: -+ -[source,terminal] ----- -$ oc get -n openshift-machine-config-operator ds tang-rekey ----- -+ -.Example output -+ -[source,terminal] ----- -NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE -tang-rekey 1 1 1 1 1 kubernetes.io/os=linux 13h ----- - -Rekeying usually takes a few minutes to complete. - -[NOTE] -==== -If you use ACM policies to distribute the daemon sets to multiple clusters, you must include a compliance policy that checks every daemon set’s READY count is equal to the DESIRED count. In this way, compliance to such a policy demonstrates that all daemon set pods are READY and the rekeying has completed successfully. You could also use an ACM search to query all of the daemon sets' states. -==== diff --git a/modules/nbde-rekeying-tang-servers.adoc b/modules/nbde-rekeying-tang-servers.adoc deleted file mode 100644 index 42e19398b830..000000000000 --- a/modules/nbde-rekeying-tang-servers.adoc +++ /dev/null @@ -1,31 +0,0 @@ -// Module included in the following assemblies: -// -// security/nbde-implementation-guide.adoc - -:_content-type: PROCEDURE -[id="nbde-rekeying-tang-servers_{context}"] -= Rekeying Tang servers - -This procedure uses a set of three Tang servers, each with unique keys, as an example. - -Using redundant Tang servers reduces the chances of nodes failing to boot automatically. - -Rekeying a Tang server, and all associated NBDE-encrypted nodes, is a three-step procedure. - -.Prerequisites - -* A working Network-Bound Disk Encryption (NBDE) installation on one or more nodes. - -.Procedure - -. Generate a new Tang server key. -. Rekey all NBDE-encrypted nodes so they use the new key. -. Delete the old Tang server key. -+ -[NOTE] -==== -Deleting the old key before all NBDE-encrypted nodes have completed their rekeying causes those nodes to become overly dependent on any other configured Tang servers. -==== - -.Example workflow for rekeying a Tang server -image::179_OpenShift_NBDE_implementation_0821_4.png[Rekeying a Tang server] diff --git a/modules/nbde-secret-sharing-encryption.adoc b/modules/nbde-secret-sharing-encryption.adoc deleted file mode 100644 index 9168ec0f82a1..000000000000 --- a/modules/nbde-secret-sharing-encryption.adoc +++ /dev/null @@ -1,10 +0,0 @@ -// Module included in the following assemblies: -// -// security/nbde-implementation-guide.adoc - -[id="nbde-secret-sharing-encryption_{context}"] -= Secret sharing encryption - -Shamir’s secret sharing (sss) is a cryptographic algorithm to securely divide up, distribute, and re-assemble keys. Using this algorithm, {product-title} can support more complicated mixtures of key protection. - -When you configure a cluster node to use multiple Tang servers, {product-title} uses sss to set up a decryption policy that will succeed if at least one of the specified servers is available. You can create layers for additional security. For example, you can define a policy where {product-title} requires both the TPM and one of the given list of Tang servers to decrypt the disk. diff --git a/modules/nbde-tpm-encryption.adoc b/modules/nbde-tpm-encryption.adoc deleted file mode 100644 index b98fee78f358..000000000000 --- a/modules/nbde-tpm-encryption.adoc +++ /dev/null @@ -1,10 +0,0 @@ -// Module included in the following assemblies: -// -// security/nbde-implementation-guide.adoc - -[id="nbde-tpm-encryption_{context}"] -= TPM encryption - -Trusted Platform Module (TPM) disk encryption is best suited for data centers or installations in remote protected locations. Full disk encryption utilities such as dm-crypt and BitLocker encrypt disks with a TPM bind key, and then store the TPM bind key in the TPM, which is attached to the motherboard of the node. The main benefit of this method is that there is no external dependency, and the node is able to decrypt its own disks at boot time without any external interaction. - -TPM disk encryption protects against decryption of data if the disk is stolen from the node and analyzed externally. However, for insecure locations this may not be sufficient. For example, if an attacker steals the entire node, the attacker can intercept the data when powering on the node, because the node decrypts its own disks. This applies to nodes with physical TPM2 chips as well as virtual machines with Virtual Trusted Platform Module (VTPM) access. diff --git a/modules/nbde-troubleshooting-permanent-error-conditions.adoc b/modules/nbde-troubleshooting-permanent-error-conditions.adoc deleted file mode 100644 index 6eff8c522f47..000000000000 --- a/modules/nbde-troubleshooting-permanent-error-conditions.adoc +++ /dev/null @@ -1,110 +0,0 @@ -// Module included in the following assemblies: -// -// security/nbde-implementation-guide.adoc - -:_content-type: PROCEDURE -[id="nbde-troubleshooting-permanent-error-conditions_{context}"] -= Troubleshooting permanent rekeying errors for Tang servers - -If, after rekeying the Tang servers, the `READY` count does not equal the `DESIRED` count after an extended period of time, it might indicate a permanent failure condition. In this case, the following conditions might apply: - -* A typographical error in the Tang server URL or thumbprint in the `NEW_TANG_PIN` definition. -* The Tang server is decommissioned or the keys are permanently lost. - -.Prerequisites - -* The commands shown in this procedure can be run on the Tang server or on any Linux system that has network -access to the Tang server. - -.Procedure - -. Validate the Tang server configuration by performing a simple encrypt and decrypt operation on each Tang -server’s configuration as defined in the daemon set. -+ -This is an example of an encryption and decryption attempt with a bad thumbprint: -+ -[source,terminal] ----- -$ echo "okay" | clevis encrypt tang \ - '{"url":"http://tangserver02:7500","thp":"badthumbprint"}' | \ - clevis decrypt ----- -+ -.Example output -+ -[source,terminal] ----- -Unable to fetch advertisement: 'http://tangserver02:7500/adv/badthumbprint'! ----- -+ -This is an example of an encryption and decryption attempt with a good thumbprint: -+ -[source,terminal] ----- -$ echo "okay" | clevis encrypt tang \ - '{"url":"http://tangserver03:7500","thp":"goodthumbprint"}' | \ - clevis decrypt ----- -+ -.Example output - -+ -[source,terminal] ----- -okay ----- - -. After you identify the root cause, remedy the underlying situation: - -.. Delete the non-working daemon set. -.. Edit the daemon set definition to fix the underlying issue. This might include any of the following actions: -+ -* Edit a Tang server entry to correct the URL and thumbprint. -* Remove a Tang server that is no longer in service. -* Add a new Tang server that is a replacement for a decommissioned server. - -. Distribute the updated daemon set again. - -[NOTE] -==== -When replacing, removing, or adding a Tang server from a configuration, the rekeying operation will succeed as long as at least one original server is still functional, including the server currently being rekeyed. If none of the original Tang servers are functional or can be recovered, recovery of the system is impossible and you must redeploy the affected nodes. -==== - -.Verification - -Check the logs from each pod in the daemon set to determine whether the rekeying completed successfully. If the rekeying is not successful, the logs might indicate the failure condition. - -. Locate the name of the container that was created by the daemon set: -+ -[source,terminal] ----- -$ oc get pods -A | grep tang-rekey ----- -+ -.Example output -[source,terminal] ----- -openshift-machine-config-operator tang-rekey-7ks6h 1/1 Running 20 (8m39s ago) 89m ----- - -. Print the logs from the container. The following log is from a completed successful rekeying operation: -+ -[source,terminal] ----- -$ oc logs tang-rekey-7ks6h ----- -+ -.Example output -[source,terminal] ----- -Current tang pin: -1: sss '{"t":1,"pins":{"tang":[{"url":"http://10.46.55.192:7500"},{"url":"http://10.46.55.192:7501"},{"url":"http://10.46.55.192:7502"}]}}' -Applying new tang pin: {"t":1,"pins":{"tang":[ - {"url":"http://tangserver01:7500","thp":"WOjQYkyK7DxY_T5pMncMO5w0f6E"}, - {"url":"http://tangserver02:7500","thp":"I5Ynh2JefoAO3tNH9TgI4obIaXI"}, - {"url":"http://tangserver03:7500","thp":"38qWZVeDKzCPG9pHLqKzs6k1ons"} -]}} -Updating binding... -Binding edited successfully -Pin applied successfully ----- diff --git a/modules/nbde-troubleshooting-temporary-error-conditions.adoc b/modules/nbde-troubleshooting-temporary-error-conditions.adoc deleted file mode 100644 index 06a7f5db8275..000000000000 --- a/modules/nbde-troubleshooting-temporary-error-conditions.adoc +++ /dev/null @@ -1,20 +0,0 @@ -// Module included in the following assemblies: -// -// security/nbde-implementation-guide.adoc - -:_content-type: PROCEDURE -[id="nbde-troubleshooting-temporary-error-conditions_{context}"] -= Troubleshooting temporary rekeying errors for Tang servers - -To determine if the error condition from rekeying the Tang servers is temporary, perform the following procedure. Temporary error conditions might include: - -* Temporary network outages -* Tang server maintenance - -Generally, when these types of temporary error conditions occur, you can wait until the daemon set succeeds in resolving the error or you can delete the daemon set and not try again until the temporary error condition has been resolved. - -.Procedure - -. Restart the pod that performs the rekeying operation using the normal Kubernetes pod restart policy. - -. If any of the associated Tang servers are unavailable, try rekeying until all the servers are back online. diff --git a/modules/nbde-unexpected-loss-of-network-connectivity.adoc b/modules/nbde-unexpected-loss-of-network-connectivity.adoc deleted file mode 100644 index ec0218a745ba..000000000000 --- a/modules/nbde-unexpected-loss-of-network-connectivity.adoc +++ /dev/null @@ -1,12 +0,0 @@ -// Module included in the following assemblies: -// -// security/nbde-implementation-guide.adoc - -[id="nbde-unexpected-loss-of-network-connectivity_{context}"] -= Unexpected loss of network connectivity - -If the network disruption is unexpected and a node reboots, consider the following scenarios: - -* If any nodes are still online, ensure that they do not reboot until network connectivity is restored. This is not applicable for single-node clusters. -* The node will remain offline until such time that either network connectivity is restored, or a pre-established passphrase is entered manually at the console. In exceptional circumstances, network administrators might be able to reconfigure network segments to reestablish access, but this is counter to the intent of NBDE, which is that lack of network access means lack of ability to boot. -* The lack of network access at the node can reasonably be expected to impact that node’s ability to function as well as its ability to boot. Even if the node were to boot via manual intervention, the lack of network access would make it effectively useless. diff --git a/modules/nbde-using-tang-servers-for-disk-encryption.adoc b/modules/nbde-using-tang-servers-for-disk-encryption.adoc deleted file mode 100644 index 3e1394dabf20..000000000000 --- a/modules/nbde-using-tang-servers-for-disk-encryption.adoc +++ /dev/null @@ -1,24 +0,0 @@ -// Module included in the following assemblies: -// -// security/nbde-implementation-guide.adoc - -[id="nbde-using-tang-servers-for-disk-encryption_{context}"] -= Tang server disk encryption - -The following components and technologies implement Network-Bound Disk Encryption (NBDE). - -image::179_OpenShift_NBDE_implementation_0821_3.png[Network-Bound Disk Encryption (NBDE), Clevis framework, Tang server] - -_Tang_ is a server for binding data to network presence. It makes a node containing the data available when the node is bound to a certain secure network. Tang is stateless and does not require Transport Layer Security (TLS) or authentication. Unlike escrow-based solutions, where the key server stores all encryption keys and has knowledge of every encryption key, Tang never interacts with any node keys, so it never gains any identifying information from the node. - -_Clevis_ is a pluggable framework for automated decryption that provides automated unlocking of Linux Unified Key Setup-on-disk-format (LUKS) volumes. The Clevis package runs on the node and provides the client side of the feature. - -A _Clevis pin_ is a plugin into the Clevis framework. There are three pin types: - -TPM2:: Binds the disk encryption to the TPM2. -Tang:: Binds the disk encryption to a Tang server to enable NBDE. -Shamir’s secret sharing (sss):: Allows more complex combinations of other pins. It allows more nuanced policies such as the following: - -* Must be able to reach one of these three Tang servers -* Must be able to reach three of these five Tang servers -* Must be able to reach the TPM2 AND at least one of these three Tang servers diff --git a/modules/network-observability-auth-multi-tenancy.adoc b/modules/network-observability-auth-multi-tenancy.adoc deleted file mode 100644 index a41a3d39761c..000000000000 --- a/modules/network-observability-auth-multi-tenancy.adoc +++ /dev/null @@ -1,62 +0,0 @@ -// Module included in the following assemblies: - -// * networking/network_observability/installing-operators.adoc - -:_content-type: PROCEDURE -[id="network-observability-auth-mutli-tenancy_{context}"] -= Configure authorization and multi-tenancy -Define `ClusterRole` and `ClusterRoleBinding`. The `netobserv-reader` `ClusterRole` enables multi-tenancy and allows individual user access, or group access, to the flows stored in Loki. You can create a YAML file to define these roles. - -.Procedure - -. Using the web console, click the Import icon, *+*. -. Drop your YAML file into the editor and click *Create*: -+ -[source, yaml] ----- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: netobserv-reader <1> -rules: -- apiGroups: - - 'loki.grafana.com' - resources: - - network - resourceNames: - - logs - verbs: - - 'get' -... -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: netobserv-writer -rules: -- apiGroups: - - 'loki.grafana.com' - resources: - - network - resourceNames: - - logs - verbs: - - 'create' -... -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: netobserv-writer-flp -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: netobserv-writer -subjects: -- kind: ServiceAccount - name: flowlogs-pipeline <2> - namespace: netobserv -- kind: ServiceAccount - name: flowlogs-pipeline-transformer - namespace: netobserv ----- -<1> This role can be used for multi-tenancy. -<2> The `flowlogs-pipeline` writes to Loki. If you are using Kafka, this value is `flowlogs-pipeline-transformer`. \ No newline at end of file diff --git a/modules/network-observability-configuring-FLP-sampling.adoc b/modules/network-observability-configuring-FLP-sampling.adoc deleted file mode 100644 index 3a0961603d7f..000000000000 --- a/modules/network-observability-configuring-FLP-sampling.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module included in the following assemblies: - -// * networking/network_observability/configuring-operators.adoc - -:_content-type: PROCEDURE -[id="network-observability-config-FLP-sampling_{context}"] - -= Updating the Flow Collector resource -As an alternative to editing YAML in the {product-title} web console, you can configure specifications, such as eBPF sampling, by patching the `flowcollector` custom resource (CR): - -.Procedure - -. Run the following command to patch the `flowcollector` CR and update the `spec.agent.ebpf.sampling` value: -+ -[source,terminal] ----- -$ oc patch flowcollector cluster --type=json -p "[{"op": "replace", "path": "/spec/agent/ebpf/sampling", "value": <new value>}] -n netobserv" ----- diff --git a/modules/network-observability-configuring-options-overview.adoc b/modules/network-observability-configuring-options-overview.adoc deleted file mode 100644 index b44a12382e0c..000000000000 --- a/modules/network-observability-configuring-options-overview.adoc +++ /dev/null @@ -1,16 +0,0 @@ -// Module included in the following assemblies: -// -// network_observability/observing-network-traffic.adoc - -:_content-type: REFERENCE -[id="network-observability-configuring-options-overview_{context}"] -= Configuring advanced options for the Overview view -You can customize the graphical view by using advanced options. To access the advanced options, click *Show advanced options*.You can configure the details in the graph by using the *Display options* drop-down menu. The options available are: - -* *Metric type*: The metrics to be shown in *Bytes* or *Packets*. The default value is *Bytes*. -* *Scope*: To select the detail of components between which the network traffic flows. You can set the scope to *Node*, *Namespace*, *Owner*, or *Resource*. *Owner* is an aggregation of resources. *Resource* can be a pod, service, node, in case of host-network traffic, or an unknown IP address. The default value is *Namespace*. -* *Truncate labels*: Select the required width of the label from the drop-down list. The default value is *M*. - -[id="network-observability-cao-managing-panels-overview_{context}"] -== Managing panels -You can select the required statistics to be displayed, and reorder them. To manage columns, click *Manage panels*. \ No newline at end of file diff --git a/modules/network-observability-configuring-options-topology.adoc b/modules/network-observability-configuring-options-topology.adoc deleted file mode 100644 index c03257a98c29..000000000000 --- a/modules/network-observability-configuring-options-topology.adoc +++ /dev/null @@ -1,22 +0,0 @@ -// Module included in the following assemblies: -// -// network_observability/observing-network-traffic.adoc - -:_content-type: REFERENCE -[id="network-observability-configuring-options-topology_{context}"] -= Configuring the advanced options for the Topology view -You can customize and export the view by using *Show advanced options*. The advanced options view has the following features: - -* *Find in view*: To search the required components in the view. -* *Display options*: To configure the following options: -+ -** *Layout*: To select the layout of the graphical representation. The default value is *ColaNoForce*. -** *Scope*: To select the scope of components between which the network traffic flows. The default value is *Namespace*. -** *Groups*: To enchance the understanding of ownership by grouping the components. The default value is *None*. -** *Collapse groups*: To expand or collapse the groups. The groups are expanded by default. This option is disabled if *Groups* has value *None*. -** *Show*: To select the details that need to be displayed. All the options are checked by default. The options available are: *Edges*, *Edges label*, and *Badges*. -** *Truncate labels*: To select the required width of the label from the drop-down list. The default value is *M*. - -[id="network-observability-cao-export-topology_{context}"] -== Exporting the topology view -To export the view, click *Export topology view*. The view is downloaded in PNG format. \ No newline at end of file diff --git a/modules/network-observability-configuring-options-trafficflow.adoc b/modules/network-observability-configuring-options-trafficflow.adoc deleted file mode 100644 index 3ae03ccb2e64..000000000000 --- a/modules/network-observability-configuring-options-trafficflow.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// -// network_observability/observing-network-traffic.adoc - -:_content-type: PROCEDURE -[id="network-observability-configuring-options-trafficflow_{context}"] -= Configuring advanced options for the Traffic flows view -You can customize and export the view by using *Show advanced options*. -You can set the row size by using the *Display options* drop-down menu. The default value is *Normal*. - -[id="network-observability-cao-managing-columns-trafficflow{context}"] -== Managing columns -You can select the required columns to be displayed, and reorder them. To manage columns, click *Manage columns*. - -[id="network-observability-cao-export-trafficflow_{context}"] -== Exporting the traffic flow data -You can export data from the *Traffic flows* view. - -.Procedure - -. Click *Export data*. -. In the pop-up window, you can select the *Export all data* checkbox to export all the data, and clear the checkbox to select the required fields to be exported. -. Click *Export*. \ No newline at end of file diff --git a/modules/network-observability-configuring-quickfilters-flowcollector.adoc b/modules/network-observability-configuring-quickfilters-flowcollector.adoc deleted file mode 100644 index 7e3a8f7824d9..000000000000 --- a/modules/network-observability-configuring-quickfilters-flowcollector.adoc +++ /dev/null @@ -1,78 +0,0 @@ -// Module included in the following assemblies: - -// * networking/network_observability/configuring-operators.adoc - -:_content-type: PROCEDURE -[id="network-observability-config-quick-filters_{context}"] -= Configuring quick filters - -You can modify the filters in the `FlowCollector` resource. Exact matches are possible using double-quotes around values. Otherwise, partial matches are used for textual values. The bang (!) character, placed at the end of a key, means negation. See the sample `FlowCollector` resource for more context about modifying the YAML. - -[NOTE] -==== -The filter matching types "all of" or "any of" is a UI setting that the users can modify from the query options. It is not part of this resource configuration. -==== - -Here is a list of all available filter keys: - -.Filter keys -[cols="1,1,1,8a",options="header"] -|=== - -|Universal* -|Source -|Destination -|Description - -|namespace -|`src_namespace` -|`dst_namespace` -|Filter traffic related to a specific namespace. - -|name -|`src_name` -|`dst_name` -|Filter traffic related to a given leaf resource name, such as a specific pod, service, or node (for host-network traffic). - -|kind -|`src_kind` -|`dst_kind` -|Filter traffic related to a given resource kind. The resource kinds include the leaf resource (Pod, Service or Node), or the owner resource (Deployment and StatefulSet). - -|owner_name -|`src_owner_name` -|`dst_owner_name` -|Filter traffic related to a given resource owner; that is, a workload or a set of pods. For example, it can be a Deployment name, a StatefulSet name, etc. - -|resource -|`src_resource` -|`dst_resource` -|Filter traffic related to a specific resource that is denoted by its canonical name, that identifies it uniquely. The canonical notation is `kind.namespace.name` for namespaced kinds, or `node.name` for nodes. For example, `Deployment.my-namespace.my-web-server`. - -|address -|`src_address` -|`dst_address` -|Filter traffic related to an IP address. IPv4 and IPv6 are supported. CIDR ranges are also supported. - -|mac -|`src_mac` -|`dst_mac` -|Filter traffic related to a MAC address. - -|port -|`src_port` -|`dst_port` -|Filter traffic related to a specific port. - -|host_address -|`src_host_address` -|`dst_host_address` -|Filter traffic related to the host IP address where the pods are running. - -|protocol -|N/A -|N/A -|Filter traffic related to a protocol, such as TCP or UDP. - -|=== -* Universal keys filter for any of source or destination. For example, filtering `name: 'my-pod'` means all traffic from `my-pod` and all traffic to `my-pod`, regardless of the matching type used, whether *Match all* or *Match any*. diff --git a/modules/network-observability-disabling-health-alerts.adoc b/modules/network-observability-disabling-health-alerts.adoc deleted file mode 100644 index 3f370b7435a6..000000000000 --- a/modules/network-observability-disabling-health-alerts.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// Module included in the following assemblies: -// -// * network_observability/network-observability-operator-monitoring.adoc - -:_content-type: PROCEDURE -[id="network-observability-disable-alerts_{context}"] -= Disabling health alerts -You can opt out of health alerting by editing the `FlowCollector` resource: - -. In the web console, navigate to *Operators* -> *Installed Operators*. -. Under the *Provided APIs* heading for the *NetObserv Operator*, select *Flow Collector*. -. Select *cluster* then select the *YAML* tab. -. Add `spec.processor.metrics.disableAlerts` to disable health alerts, as in the following YAML sample: -[source,yaml] ----- -apiVersion: flows.netobserv.io/v1alpha1 -kind: FlowCollector -metadata: - name: cluster -spec: - processor: - metrics: - disableAlerts: [NetObservLokiError, NetObservNoFlows] <1> ----- -<1> You can specify one or a list with both types of alerts to disable. \ No newline at end of file diff --git a/modules/network-observability-enriched-flows-kafka.adoc b/modules/network-observability-enriched-flows-kafka.adoc deleted file mode 100644 index 8d47bc6c7a82..000000000000 --- a/modules/network-observability-enriched-flows-kafka.adoc +++ /dev/null @@ -1,39 +0,0 @@ -// Module included in the following assemblies: -// -// network_observability/configuring-operator.adoc - -:_content-type: PROCEDURE -[id="network-observability-enriched-flows-kafka_{context}"] -= Export enriched network flow data - -You can send network flows to Kafka, so that they can be consumed by any processor or storage that supports Kafka input, such as Splunk, Elasticsearch, or Fluentd. - -.Prerequisites -* Installed Kafka - -.Procedure - -. In the web console, navigate to *Operators* -> *Installed Operators*. -. Under the *Provided APIs* heading for the *NetObserv Operator*, select *Flow Collector*. -. Select *cluster* and then select the *YAML* tab. -. Edit the `FlowCollector` to configure `spec.exporters` as follows: -+ -[source,yaml] ----- -apiVersion: flows.netobserv.io/v1alpha1 -kind: FlowCollector -metadata: - name: cluster -spec: - exporters: - - type: KAFKA - kafka: - address: "kafka-cluster-kafka-bootstrap.netobserv" - topic: netobserv-flows-export <1> - tls: - enable: false <2> - ----- -<1> The Network Observability Operator exports all flows to the configured Kafka topic. -<2> You can encrypt all communications to and from Kafka with SSL/TLS or mTLS. When enabled, the Kafka CA certificate must be available as a ConfigMap or a Secret, both in the namespace where the `flowlogs-pipeline` processor component is deployed (default: netobserv). It must be referenced with `spec.exporters.tls.caCert`. When using mTLS, client secrets must be available in these namespaces as well (they can be generated for instance using the AMQ Streams User Operator) and referenced with `spec.exporters.tls.userCert`. -. After configuration, network flows data can be sent to an available output in a JSON format. For more information, see _Network flows format reference_ \ No newline at end of file diff --git a/modules/network-observability-flowcollector-api-specifications.adoc b/modules/network-observability-flowcollector-api-specifications.adoc deleted file mode 100644 index 5ec9eac0d9f7..000000000000 --- a/modules/network-observability-flowcollector-api-specifications.adoc +++ /dev/null @@ -1,1461 +0,0 @@ -// Automatically generated by 'openshift-apidocs-gen'. Do not edit. -// Module included in the following assemblies: -// networking/network_observability/flowcollector-api.adoc -:_content-type: REFERENCE -[id="network-observability-flowcollector-api-specifications_{context}"] -= FlowCollector API specifications - - - -Description:: -+ --- -`FlowCollector` is the schema for the network flows collection API, which pilots and configures the underlying deployments. --- - -Type:: - `object` - - - - -[cols="1,1,1",options="header"] -|=== -| Property | Type | Description - -| `apiVersion` -| `string` -| APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and might reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - -| `kind` -| `string` -| Kind is a string value representing the REST resource this object represents. Servers might infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - -| `metadata` -| `object` -| Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - -| `spec` -| `object` -| `FlowCollectorSpec` defines the desired state of the FlowCollector resource. + - + - *: the mention of _"unsupported"_, or _"deprecated"_ for a feature throughout this document means that this feature is not officially supported by Red Hat. It might have been, for instance, contributed by the community and accepted without a formal agreement for maintenance. The product maintainers might provide some support for these features as a best effort only. - -|=== -== .metadata -Description:: -+ --- -Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata --- - -Type:: - `object` - - - - -== .spec -Description:: -+ --- -`FlowCollectorSpec` defines the desired state of the FlowCollector resource. + - + - *: the mention of _"unsupported"_, or _"deprecated"_ for a feature throughout this document means that this feature is not officially supported by Red Hat. It might have been, for instance, contributed by the community and accepted without a formal agreement for maintenance. The product maintainers might provide some support for these features as a best effort only. --- - -Type:: - `object` - -Required:: - - `agent` - - `deploymentModel` - - - -[cols="1,1,1",options="header"] -|=== -| Property | Type | Description - -| `agent` -| `object` -| Agent configuration for flows extraction. - -| `consolePlugin` -| `object` -| `consolePlugin` defines the settings related to the {product-title} Console plugin, when available. - -| `deploymentModel` -| `string` -| `deploymentModel` defines the desired type of deployment for flow processing. Possible values are: + - - `DIRECT` (default) to make the flow processor listening directly from the agents. + - - `KAFKA` to make flows sent to a Kafka pipeline before consumption by the processor. + - Kafka can provide better scalability, resiliency, and high availability (for more details, see https://www.redhat.com/en/topics/integration/what-is-apache-kafka). - -| `exporters` -| `array` -| `exporters` define additional optional exporters for custom consumption or storage. - -| `kafka` -| `object` -| Kafka configuration, allowing to use Kafka as a broker as part of the flow collection pipeline. Available when the `spec.deploymentModel` is `KAFKA`. - -| `loki` -| `object` -| Loki, the flow store, client settings. - -| `namespace` -| `string` -| Namespace where NetObserv pods are deployed. If empty, the namespace of the operator is going to be used. - -| `processor` -| `object` -| `processor` defines the settings of the component that receives the flows from the agent, enriches them, generates metrics, and forwards them to the Loki persistence layer and/or any available exporter. - -|=== -== .spec.agent -Description:: -+ --- -Agent configuration for flows extraction. --- - -Type:: - `object` - -Required:: - - `type` - - - -[cols="1,1,1",options="header"] -|=== -| Property | Type | Description - -| `ebpf` -| `object` -| `ebpf` describes the settings related to the eBPF-based flow reporter when `spec.agent.type` is set to `EBPF`. - -| `ipfix` -| `object` -| `ipfix` - _deprecated (*)_ - describes the settings related to the IPFIX-based flow reporter when `spec.agent.type` is set to `IPFIX`. - -| `type` -| `string` -| `type` selects the flows tracing agent. Possible values are: + - - `EBPF` (default) to use NetObserv eBPF agent. + - - `IPFIX` - _deprecated (*)_ - to use the legacy IPFIX collector. + - `EBPF` is recommended as it offers better performances and should work regardless of the CNI installed on the cluster. `IPFIX` works with OVN-Kubernetes CNI (other CNIs could work if they support exporting IPFIX, but they would require manual configuration). - -|=== -== .spec.agent.ebpf -Description:: -+ --- -`ebpf` describes the settings related to the eBPF-based flow reporter when `spec.agent.type` is set to `EBPF`. --- - -Type:: - `object` - - - - -[cols="1,1,1",options="header"] -|=== -| Property | Type | Description - -| `cacheActiveTimeout` -| `string` -| `cacheActiveTimeout` is the max period during which the reporter will aggregate flows before sending. Increasing `cacheMaxFlows` and `cacheActiveTimeout` can decrease the network traffic overhead and the CPU load, however you can expect higher memory consumption and an increased latency in the flow collection. - -| `cacheMaxFlows` -| `integer` -| `cacheMaxFlows` is the max number of flows in an aggregate; when reached, the reporter sends the flows. Increasing `cacheMaxFlows` and `cacheActiveTimeout` can decrease the network traffic overhead and the CPU load, however you can expect higher memory consumption and an increased latency in the flow collection. - -| `debug` -| `object` -| `debug` allows setting some aspects of the internal configuration of the eBPF agent. This section is aimed exclusively for debugging and fine-grained performance optimizations, such as GOGC and GOMAXPROCS env vars. Users setting its values do it at their own risk. - -| `excludeInterfaces` -| `array (string)` -| `excludeInterfaces` contains the interface names that will be excluded from flow tracing. An entry is enclosed by slashes, such as `/br-/`, is matched as a regular expression. Otherwise it is matched as a case-sensitive string. - -| `imagePullPolicy` -| `string` -| `imagePullPolicy` is the Kubernetes pull policy for the image defined above - -| `interfaces` -| `array (string)` -| `interfaces` contains the interface names from where flows will be collected. If empty, the agent will fetch all the interfaces in the system, excepting the ones listed in ExcludeInterfaces. An entry is enclosed by slashes, such as `/br-/`, is matched as a regular expression. Otherwise it is matched as a case-sensitive string. - -| `kafkaBatchSize` -| `integer` -| `kafkaBatchSize` limits the maximum size of a request in bytes before being sent to a partition. Ignored when not using Kafka. Default: 10MB. - -| `logLevel` -| `string` -| `logLevel` defines the log level for the NetObserv eBPF Agent - -| `privileged` -| `boolean` -| Privileged mode for the eBPF Agent container. In general this setting can be ignored or set to false: in that case, the operator will set granular capabilities (BPF, PERFMON, NET_ADMIN, SYS_RESOURCE) to the container, to enable its correct operation. If for some reason these capabilities cannot be set, such as if an old kernel version not knowing CAP_BPF is in use, then you can turn on this mode for more global privileges. - -| `resources` -| `object` -| `resources` are the compute resources required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - -| `sampling` -| `integer` -| Sampling rate of the flow reporter. 100 means one flow on 100 is sent. 0 or 1 means all flows are sampled. - -|=== -== .spec.agent.ebpf.debug -Description:: -+ --- -`debug` allows setting some aspects of the internal configuration of the eBPF agent. This section is aimed exclusively for debugging and fine-grained performance optimizations, such as GOGC and GOMAXPROCS env vars. Users setting its values do it at their own risk. --- - -Type:: - `object` - - - - -[cols="1,1,1",options="header"] -|=== -| Property | Type | Description - -| `env` -| `object (string)` -| `env` allows passing custom environment variables to underlying components. Useful for passing some very concrete performance-tuning options, such as GOGC and GOMAXPROCS, that should not be publicly exposed as part of the FlowCollector descriptor, as they are only useful in edge debug or support scenarios. - -|=== -== .spec.agent.ebpf.resources -Description:: -+ --- -`resources` are the compute resources required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ --- - -Type:: - `object` - - - - -[cols="1,1,1",options="header"] -|=== -| Property | Type | Description - -| `limits` -| `integer-or-string` -| Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - -| `requests` -| `integer-or-string` -| Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - -|=== -== .spec.agent.ipfix -Description:: -+ --- -`ipfix` - _deprecated (*)_ - describes the settings related to the IPFIX-based flow reporter when `spec.agent.type` is set to `IPFIX`. --- - -Type:: - `object` - - - - -[cols="1,1,1",options="header"] -|=== -| Property | Type | Description - -| `cacheActiveTimeout` -| `string` -| `cacheActiveTimeout` is the max period during which the reporter will aggregate flows before sending - -| `cacheMaxFlows` -| `integer` -| `cacheMaxFlows` is the max number of flows in an aggregate; when reached, the reporter sends the flows - -| `clusterNetworkOperator` -| `object` -| `clusterNetworkOperator` defines the settings related to the {product-title} Cluster Network Operator, when available. - -| `forceSampleAll` -| `boolean` -| `forceSampleAll` allows disabling sampling in the IPFIX-based flow reporter. It is not recommended to sample all the traffic with IPFIX, as it might generate cluster instability. If you REALLY want to do that, set this flag to true. Use at your own risk. When it is set to true, the value of `sampling` is ignored. - -| `ovnKubernetes` -| `object` -| `ovnKubernetes` defines the settings of the OVN-Kubernetes CNI, when available. This configuration is used when using OVN's IPFIX exports, without {product-title}. When using {product-title}, refer to the `clusterNetworkOperator` property instead. - -| `sampling` -| `integer` -| `sampling` is the sampling rate on the reporter. 100 means one flow on 100 is sent. To ensure cluster stability, it is not possible to set a value below 2. If you really want to sample every packet, which might impact the cluster stability, refer to `forceSampleAll`. Alternatively, you can use the eBPF Agent instead of IPFIX. - -|=== -== .spec.agent.ipfix.clusterNetworkOperator -Description:: -+ --- -`clusterNetworkOperator` defines the settings related to the {product-title} Cluster Network Operator, when available. --- - -Type:: - `object` - - - - -[cols="1,1,1",options="header"] -|=== -| Property | Type | Description - -| `namespace` -| `string` -| Namespace where the config map is going to be deployed. - -|=== -== .spec.agent.ipfix.ovnKubernetes -Description:: -+ --- -`ovnKubernetes` defines the settings of the OVN-Kubernetes CNI, when available. This configuration is used when using OVN's IPFIX exports, without {product-title}. When using {product-title}, refer to the `clusterNetworkOperator` property instead. --- - -Type:: - `object` - - - - -[cols="1,1,1",options="header"] -|=== -| Property | Type | Description - -| `containerName` -| `string` -| `containerName` defines the name of the container to configure for IPFIX. - -| `daemonSetName` -| `string` -| `daemonSetName` defines the name of the DaemonSet controlling the OVN-Kubernetes pods. - -| `namespace` -| `string` -| Namespace where OVN-Kubernetes pods are deployed. - -|=== -== .spec.consolePlugin -Description:: -+ --- -`consolePlugin` defines the settings related to the {product-title} Console plugin, when available. --- - -Type:: - `object` - - - - -[cols="1,1,1",options="header"] -|=== -| Property | Type | Description - -| `autoscaler` -| `object` -| `autoscaler` spec of a horizontal pod autoscaler to set up for the plugin Deployment. Refer to HorizontalPodAutoscaler documentation (autoscaling/v2). - -| `imagePullPolicy` -| `string` -| `imagePullPolicy` is the Kubernetes pull policy for the image defined above - -| `logLevel` -| `string` -| `logLevel` for the console plugin backend - -| `port` -| `integer` -| `port` is the plugin service port. Do not use 9002, which is reserved for metrics. - -| `portNaming` -| `object` -| `portNaming` defines the configuration of the port-to-service name translation - -| `quickFilters` -| `array` -| `quickFilters` configures quick filter presets for the Console plugin - -| `register` -| `boolean` -| `register` allows, when set to true, to automatically register the provided console plugin with the {product-title} Console operator. When set to false, you can still register it manually by editing console.operator.openshift.io/cluster with the following command: `oc patch console.operator.openshift.io cluster --type='json' -p '[{"op": "add", "path": "/spec/plugins/-", "value": "netobserv-plugin"}]'` - -| `replicas` -| `integer` -| `replicas` defines the number of replicas (pods) to start. - -| `resources` -| `object` -| `resources`, in terms of compute resources, required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - -|=== -== .spec.consolePlugin.autoscaler -Description:: -+ --- -`autoscaler` spec of a horizontal pod autoscaler to set up for the plugin Deployment. Refer to HorizontalPodAutoscaler documentation (autoscaling/v2). --- - -Type:: - `object` - - - - -== .spec.consolePlugin.portNaming -Description:: -+ --- -`portNaming` defines the configuration of the port-to-service name translation --- - -Type:: - `object` - - - - -[cols="1,1,1",options="header"] -|=== -| Property | Type | Description - -| `enable` -| `boolean` -| Enable the console plugin port-to-service name translation - -| `portNames` -| `object (string)` -| `portNames` defines additional port names to use in the console, for example, `portNames: {"3100": "loki"}`. - -|=== -== .spec.consolePlugin.quickFilters -Description:: -+ --- -`quickFilters` configures quick filter presets for the Console plugin --- - -Type:: - `array` - - - - -== .spec.consolePlugin.quickFilters[] -Description:: -+ --- -`QuickFilter` defines preset configuration for Console's quick filters --- - -Type:: - `object` - -Required:: - - `filter` - - `name` - - - -[cols="1,1,1",options="header"] -|=== -| Property | Type | Description - -| `default` -| `boolean` -| `default` defines whether this filter should be active by default or not - -| `filter` -| `object (string)` -| `filter` is a set of keys and values to be set when this filter is selected. Each key can relate to a list of values using a coma-separated string, for example, `filter: {"src_namespace": "namespace1,namespace2"}`. - -| `name` -| `string` -| Name of the filter, that will be displayed in Console - -|=== -== .spec.consolePlugin.resources -Description:: -+ --- -`resources`, in terms of compute resources, required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ --- - -Type:: - `object` - - - - -[cols="1,1,1",options="header"] -|=== -| Property | Type | Description - -| `limits` -| `integer-or-string` -| Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - -| `requests` -| `integer-or-string` -| Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - -|=== -== .spec.exporters -Description:: -+ --- -`exporters` define additional optional exporters for custom consumption or storage. --- - -Type:: - `array` - - - - -== .spec.exporters[] -Description:: -+ --- -`FlowCollectorExporter` defines an additional exporter to send enriched flows to. --- - -Type:: - `object` - -Required:: - - `type` - - - -[cols="1,1,1",options="header"] -|=== -| Property | Type | Description - -| `ipfix` -| `object` -| IPFIX configuration, such as the IP address and port to send enriched IPFIX flows to. _Unsupported (*)_. - -| `kafka` -| `object` -| Kafka configuration, such as the address and topic, to send enriched flows to. - -| `type` -| `string` -| `type` selects the type of exporters. The available options are `KAFKA` and `IPFIX`. `IPFIX` is _unsupported (*)_. - -|=== -== .spec.exporters[].ipfix -Description:: -+ --- -IPFIX configuration, such as the IP address and port to send enriched IPFIX flows to. _Unsupported (*)_. --- - -Type:: - `object` - -Required:: - - `targetHost` - - `targetPort` - - - -[cols="1,1,1",options="header"] -|=== -| Property | Type | Description - -| `targetHost` -| `string` -| Address of the IPFIX external receiver - -| `targetPort` -| `integer` -| Port for the IPFIX external receiver - -| `transport` -| `string` -| Transport protocol (`TCP` or `UDP`) to be used for the IPFIX connection, defaults to `TCP`. - -|=== -== .spec.exporters[].kafka -Description:: -+ --- -Kafka configuration, such as the address and topic, to send enriched flows to. --- - -Type:: - `object` - -Required:: - - `address` - - `topic` - - - -[cols="1,1,1",options="header"] -|=== -| Property | Type | Description - -| `address` -| `string` -| Address of the Kafka server - -| `tls` -| `object` -| TLS client configuration. When using TLS, verify that the address matches the Kafka port used for TLS, generally 9093. Note that, when eBPF agents are used, the Kafka certificate needs to be copied in the agent namespace (by default it is `netobserv-privileged`). - -| `topic` -| `string` -| Kafka topic to use. It must exist, NetObserv will not create it. - -|=== -== .spec.exporters[].kafka.tls -Description:: -+ --- -TLS client configuration. When using TLS, verify that the address matches the Kafka port used for TLS, generally 9093. Note that, when eBPF agents are used, the Kafka certificate needs to be copied in the agent namespace (by default it is `netobserv-privileged`). --- - -Type:: - `object` - - - - -[cols="1,1,1",options="header"] -|=== -| Property | Type | Description - -| `caCert` -| `object` -| `caCert` defines the reference of the certificate for the Certificate Authority - -| `enable` -| `boolean` -| Enable TLS - -| `insecureSkipVerify` -| `boolean` -| `insecureSkipVerify` allows skipping client-side verification of the server certificate. If set to true, the `caCert` field is ignored. - -| `userCert` -| `object` -| `userCert` defines the user certificate reference and is used for mTLS (you can ignore it when using one-way TLS) - -|=== -== .spec.exporters[].kafka.tls.caCert -Description:: -+ --- -`caCert` defines the reference of the certificate for the Certificate Authority --- - -Type:: - `object` - - - - -[cols="1,1,1",options="header"] -|=== -| Property | Type | Description - -| `certFile` -| `string` -| `certFile` defines the path to the certificate file name within the config map or secret - -| `certKey` -| `string` -| `certKey` defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary. - -| `name` -| `string` -| Name of the config map or secret containing certificates - -| `namespace` -| `string` -| Namespace of the config map or secret containing certificates. If omitted, assumes the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required. - -| `type` -| `string` -| Type for the certificate reference: `configmap` or `secret` - -|=== -== .spec.exporters[].kafka.tls.userCert -Description:: -+ --- -`userCert` defines the user certificate reference and is used for mTLS (you can ignore it when using one-way TLS) --- - -Type:: - `object` - - - - -[cols="1,1,1",options="header"] -|=== -| Property | Type | Description - -| `certFile` -| `string` -| `certFile` defines the path to the certificate file name within the config map or secret - -| `certKey` -| `string` -| `certKey` defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary. - -| `name` -| `string` -| Name of the config map or secret containing certificates - -| `namespace` -| `string` -| Namespace of the config map or secret containing certificates. If omitted, assumes the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required. - -| `type` -| `string` -| Type for the certificate reference: `configmap` or `secret` - -|=== -== .spec.kafka -Description:: -+ --- -Kafka configuration, allowing to use Kafka as a broker as part of the flow collection pipeline. Available when the `spec.deploymentModel` is `KAFKA`. --- - -Type:: - `object` - -Required:: - - `address` - - `topic` - - - -[cols="1,1,1",options="header"] -|=== -| Property | Type | Description - -| `address` -| `string` -| Address of the Kafka server - -| `tls` -| `object` -| TLS client configuration. When using TLS, verify that the address matches the Kafka port used for TLS, generally 9093. Note that, when eBPF agents are used, the Kafka certificate needs to be copied in the agent namespace (by default it is `netobserv-privileged`). - -| `topic` -| `string` -| Kafka topic to use. It must exist, NetObserv will not create it. - -|=== -== .spec.kafka.tls -Description:: -+ --- -TLS client configuration. When using TLS, verify that the address matches the Kafka port used for TLS, generally 9093. Note that, when eBPF agents are used, the Kafka certificate needs to be copied in the agent namespace (by default it is `netobserv-privileged`). --- - -Type:: - `object` - - - - -[cols="1,1,1",options="header"] -|=== -| Property | Type | Description - -| `caCert` -| `object` -| `caCert` defines the reference of the certificate for the Certificate Authority - -| `enable` -| `boolean` -| Enable TLS - -| `insecureSkipVerify` -| `boolean` -| `insecureSkipVerify` allows skipping client-side verification of the server certificate. If set to true, the `caCert` field is ignored. - -| `userCert` -| `object` -| `userCert` defines the user certificate reference and is used for mTLS (you can ignore it when using one-way TLS) - -|=== -== .spec.kafka.tls.caCert -Description:: -+ --- -`caCert` defines the reference of the certificate for the Certificate Authority --- - -Type:: - `object` - - - - -[cols="1,1,1",options="header"] -|=== -| Property | Type | Description - -| `certFile` -| `string` -| `certFile` defines the path to the certificate file name within the config map or secret - -| `certKey` -| `string` -| `certKey` defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary. - -| `name` -| `string` -| Name of the config map or secret containing certificates - -| `namespace` -| `string` -| Namespace of the config map or secret containing certificates. If omitted, assumes the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required. - -| `type` -| `string` -| Type for the certificate reference: `configmap` or `secret` - -|=== -== .spec.kafka.tls.userCert -Description:: -+ --- -`userCert` defines the user certificate reference and is used for mTLS (you can ignore it when using one-way TLS) --- - -Type:: - `object` - - - - -[cols="1,1,1",options="header"] -|=== -| Property | Type | Description - -| `certFile` -| `string` -| `certFile` defines the path to the certificate file name within the config map or secret - -| `certKey` -| `string` -| `certKey` defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary. - -| `name` -| `string` -| Name of the config map or secret containing certificates - -| `namespace` -| `string` -| Namespace of the config map or secret containing certificates. If omitted, assumes the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required. - -| `type` -| `string` -| Type for the certificate reference: `configmap` or `secret` - -|=== -== .spec.loki -Description:: -+ --- -Loki, the flow store, client settings. --- - -Type:: - `object` - - - - -[cols="1,1,1",options="header"] -|=== -| Property | Type | Description - -| `authToken` -| `string` -| `authToken` describes the way to get a token to authenticate to Loki. + - - `DISABLED` will not send any token with the request. + - - `FORWARD` will forward the user token for authorization. + - - `HOST` - _deprecated (*)_ - will use the local pod service account to authenticate to Loki. + - When using the Loki Operator, this must be set to `FORWARD`. - -| `batchSize` -| `integer` -| `batchSize` is the maximum batch size (in bytes) of logs to accumulate before sending. - -| `batchWait` -| `string` -| `batchWait` is the maximum time to wait before sending a batch. - -| `maxBackoff` -| `string` -| `maxBackoff` is the maximum backoff time for client connection between retries. - -| `maxRetries` -| `integer` -| `maxRetries` is the maximum number of retries for client connections. - -| `minBackoff` -| `string` -| `minBackoff` is the initial backoff time for client connection between retries. - -| `querierUrl` -| `string` -| `querierURL` specifies the address of the Loki querier service, in case it is different from the Loki ingester URL. If empty, the URL value will be used (assuming that the Loki ingester and querier are in the same server). When using the Loki Operator, do not set it, since ingestion and queries use the Loki gateway. - -| `staticLabels` -| `object (string)` -| `staticLabels` is a map of common labels to set on each flow. - -| `statusTls` -| `object` -| TLS client configuration for Loki status URL. - -| `statusUrl` -| `string` -| `statusURL` specifies the address of the Loki `/ready`, `/metrics` and `/config` endpoints, in case it is different from the Loki querier URL. If empty, the `querierURL` value will be used. This is useful to show error messages and some context in the frontend. When using the Loki Operator, set it to the Loki HTTP query frontend service, for example https://loki-query-frontend-http.netobserv.svc:3100/. `statusTLS` configuration will be used when `statusUrl` is set. - -| `tenantID` -| `string` -| `tenantID` is the Loki `X-Scope-OrgID` that identifies the tenant for each request. When using the Loki Operator, set it to `network`, which corresponds to a special tenant mode. - -| `timeout` -| `string` -| `timeout` is the maximum time connection / request limit. A timeout of zero means no timeout. - -| `tls` -| `object` -| TLS client configuration for Loki URL. - -| `url` -| `string` -| `url` is the address of an existing Loki service to push the flows to. When using the Loki Operator, set it to the Loki gateway service with the `network` tenant set in path, for example https://loki-gateway-http.netobserv.svc:8080/api/logs/v1/network. - -|=== -== .spec.loki.statusTls -Description:: -+ --- -TLS client configuration for Loki status URL. --- - -Type:: - `object` - - - - -[cols="1,1,1",options="header"] -|=== -| Property | Type | Description - -| `caCert` -| `object` -| `caCert` defines the reference of the certificate for the Certificate Authority - -| `enable` -| `boolean` -| Enable TLS - -| `insecureSkipVerify` -| `boolean` -| `insecureSkipVerify` allows skipping client-side verification of the server certificate. If set to true, the `caCert` field is ignored. - -| `userCert` -| `object` -| `userCert` defines the user certificate reference and is used for mTLS (you can ignore it when using one-way TLS) - -|=== -== .spec.loki.statusTls.caCert -Description:: -+ --- -`caCert` defines the reference of the certificate for the Certificate Authority --- - -Type:: - `object` - - - - -[cols="1,1,1",options="header"] -|=== -| Property | Type | Description - -| `certFile` -| `string` -| `certFile` defines the path to the certificate file name within the config map or secret - -| `certKey` -| `string` -| `certKey` defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary. - -| `name` -| `string` -| Name of the config map or secret containing certificates - -| `namespace` -| `string` -| Namespace of the config map or secret containing certificates. If omitted, assumes the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required. - -| `type` -| `string` -| Type for the certificate reference: `configmap` or `secret` - -|=== -== .spec.loki.statusTls.userCert -Description:: -+ --- -`userCert` defines the user certificate reference and is used for mTLS (you can ignore it when using one-way TLS) --- - -Type:: - `object` - - - - -[cols="1,1,1",options="header"] -|=== -| Property | Type | Description - -| `certFile` -| `string` -| `certFile` defines the path to the certificate file name within the config map or secret - -| `certKey` -| `string` -| `certKey` defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary. - -| `name` -| `string` -| Name of the config map or secret containing certificates - -| `namespace` -| `string` -| Namespace of the config map or secret containing certificates. If omitted, assumes the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required. - -| `type` -| `string` -| Type for the certificate reference: `configmap` or `secret` - -|=== -== .spec.loki.tls -Description:: -+ --- -TLS client configuration for Loki URL. --- - -Type:: - `object` - - - - -[cols="1,1,1",options="header"] -|=== -| Property | Type | Description - -| `caCert` -| `object` -| `caCert` defines the reference of the certificate for the Certificate Authority - -| `enable` -| `boolean` -| Enable TLS - -| `insecureSkipVerify` -| `boolean` -| `insecureSkipVerify` allows skipping client-side verification of the server certificate. If set to true, the `caCert` field is ignored. - -| `userCert` -| `object` -| `userCert` defines the user certificate reference and is used for mTLS (you can ignore it when using one-way TLS) - -|=== -== .spec.loki.tls.caCert -Description:: -+ --- -`caCert` defines the reference of the certificate for the Certificate Authority --- - -Type:: - `object` - - - - -[cols="1,1,1",options="header"] -|=== -| Property | Type | Description - -| `certFile` -| `string` -| `certFile` defines the path to the certificate file name within the config map or secret - -| `certKey` -| `string` -| `certKey` defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary. - -| `name` -| `string` -| Name of the config map or secret containing certificates - -| `namespace` -| `string` -| Namespace of the config map or secret containing certificates. If omitted, assumes the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required. - -| `type` -| `string` -| Type for the certificate reference: `configmap` or `secret` - -|=== -== .spec.loki.tls.userCert -Description:: -+ --- -`userCert` defines the user certificate reference and is used for mTLS (you can ignore it when using one-way TLS) --- - -Type:: - `object` - - - - -[cols="1,1,1",options="header"] -|=== -| Property | Type | Description - -| `certFile` -| `string` -| `certFile` defines the path to the certificate file name within the config map or secret - -| `certKey` -| `string` -| `certKey` defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary. - -| `name` -| `string` -| Name of the config map or secret containing certificates - -| `namespace` -| `string` -| Namespace of the config map or secret containing certificates. If omitted, assumes the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required. - -| `type` -| `string` -| Type for the certificate reference: `configmap` or `secret` - -|=== -== .spec.processor -Description:: -+ --- -`processor` defines the settings of the component that receives the flows from the agent, enriches them, generates metrics, and forwards them to the Loki persistence layer and/or any available exporter. --- - -Type:: - `object` - - - - -[cols="1,1,1",options="header"] -|=== -| Property | Type | Description - -| `conversationEndTimeout` -| `string` -| `conversationEndTimeout` is the time to wait after a network flow is received, to consider the conversation ended. This delay is ignored when a FIN packet is collected for TCP flows (see `conversationTerminatingTimeout` instead). - -| `conversationHeartbeatInterval` -| `string` -| `conversationHeartbeatInterval` is the time to wait between "tick" events of a conversation - -| `conversationTerminatingTimeout` -| `string` -| `conversationTerminatingTimeout` is the time to wait from detected FIN flag to end a conversation. Only relevant for TCP flows. - -| `debug` -| `object` -| `debug` allows setting some aspects of the internal configuration of the flow processor. This section is aimed exclusively for debugging and fine-grained performance optimizations, such as GOGC and GOMAXPROCS env vars. Users setting its values do it at their own risk. - -| `dropUnusedFields` -| `boolean` -| `dropUnusedFields` allows, when set to true, to drop fields that are known to be unused by OVS, to save storage space. - -| `enableKubeProbes` -| `boolean` -| `enableKubeProbes` is a flag to enable or disable Kubernetes liveness and readiness probes - -| `healthPort` -| `integer` -| `healthPort` is a collector HTTP port in the Pod that exposes the health check API - -| `imagePullPolicy` -| `string` -| `imagePullPolicy` is the Kubernetes pull policy for the image defined above - -| `kafkaConsumerAutoscaler` -| `object` -| `kafkaConsumerAutoscaler` is the spec of a horizontal pod autoscaler to set up for `flowlogs-pipeline-transformer`, which consumes Kafka messages. This setting is ignored when Kafka is disabled. Refer to HorizontalPodAutoscaler documentation (autoscaling/v2). - -| `kafkaConsumerBatchSize` -| `integer` -| `kafkaConsumerBatchSize` indicates to the broker the maximum batch size, in bytes, that the consumer will accept. Ignored when not using Kafka. Default: 10MB. - -| `kafkaConsumerQueueCapacity` -| `integer` -| `kafkaConsumerQueueCapacity` defines the capacity of the internal message queue used in the Kafka consumer client. Ignored when not using Kafka. - -| `kafkaConsumerReplicas` -| `integer` -| `kafkaConsumerReplicas` defines the number of replicas (pods) to start for `flowlogs-pipeline-transformer`, which consumes Kafka messages. This setting is ignored when Kafka is disabled. - -| `logLevel` -| `string` -| `logLevel` of the processor runtime - -| `logTypes` -| `string` -| `logTypes` defines the desired record types to generate. Possible values are: + - - `FLOWS` (default) to export regular network flows + - - `CONVERSATIONS` to generate events for started conversations, ended conversations as well as periodic "tick" updates + - - `ENDED_CONVERSATIONS` to generate only ended conversations events + - - `ALL` to generate both network flows and all conversations events + - - -| `metrics` -| `object` -| `Metrics` define the processor configuration regarding metrics - -| `port` -| `integer` -| Port of the flow collector (host port). By convention, some values are forbidden. It must be greater than 1024 and different from 4500, 4789 and 6081. - -| `profilePort` -| `integer` -| `profilePort` allows setting up a Go pprof profiler listening to this port - -| `resources` -| `object` -| `resources` are the compute resources required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - -|=== -== .spec.processor.debug -Description:: -+ --- -`debug` allows setting some aspects of the internal configuration of the flow processor. This section is aimed exclusively for debugging and fine-grained performance optimizations, such as GOGC and GOMAXPROCS env vars. Users setting its values do it at their own risk. --- - -Type:: - `object` - - - - -[cols="1,1,1",options="header"] -|=== -| Property | Type | Description - -| `env` -| `object (string)` -| `env` allows passing custom environment variables to underlying components. Useful for passing some very concrete performance-tuning options, such as GOGC and GOMAXPROCS, that should not be publicly exposed as part of the FlowCollector descriptor, as they are only useful in edge debug or support scenarios. - -|=== -== .spec.processor.kafkaConsumerAutoscaler -Description:: -+ --- -`kafkaConsumerAutoscaler` is the spec of a horizontal pod autoscaler to set up for `flowlogs-pipeline-transformer`, which consumes Kafka messages. This setting is ignored when Kafka is disabled. Refer to HorizontalPodAutoscaler documentation (autoscaling/v2). --- - -Type:: - `object` - - - - -== .spec.processor.metrics -Description:: -+ --- -`Metrics` define the processor configuration regarding metrics --- - -Type:: - `object` - - - - -[cols="1,1,1",options="header"] -|=== -| Property | Type | Description - -| `disableAlerts` -| `array (string)` -| `disableAlerts` is a list of alerts that should be disabled. Possible values are: + - `NetObservNoFlows`, which is triggered when no flows are being observed for a certain period. + - `NetObservLokiError`, which is triggered when flows are being dropped due to Loki errors. + - - -| `ignoreTags` -| `array (string)` -| `ignoreTags` is a list of tags to specify which metrics to ignore. Each metric is associated with a list of tags. More details in https://github.com/netobserv/network-observability-operator/tree/main/controllers/flowlogspipeline/metrics_definitions . Available tags are: `egress`, `ingress`, `flows`, `bytes`, `packets`, `namespaces`, `nodes`, `workloads`. - -| `server` -| `object` -| Metrics server endpoint configuration for Prometheus scraper - -|=== -== .spec.processor.metrics.server -Description:: -+ --- -Metrics server endpoint configuration for Prometheus scraper --- - -Type:: - `object` - - - - -[cols="1,1,1",options="header"] -|=== -| Property | Type | Description - -| `port` -| `integer` -| The prometheus HTTP port - -| `tls` -| `object` -| TLS configuration. - -|=== -== .spec.processor.metrics.server.tls -Description:: -+ --- -TLS configuration. --- - -Type:: - `object` - - - - -[cols="1,1,1",options="header"] -|=== -| Property | Type | Description - -| `provided` -| `object` -| TLS configuration when `type` is set to `PROVIDED`. - -| `type` -| `string` -| Select the type of TLS configuration: + - - `DISABLED` (default) to not configure TLS for the endpoint. - `PROVIDED` to manually provide cert file and a key file. - `AUTO` to use {product-title} auto generated certificate using annotations. - -|=== -== .spec.processor.metrics.server.tls.provided -Description:: -+ --- -TLS configuration when `type` is set to `PROVIDED`. --- - -Type:: - `object` - - - - -[cols="1,1,1",options="header"] -|=== -| Property | Type | Description - -| `certFile` -| `string` -| `certFile` defines the path to the certificate file name within the config map or secret - -| `certKey` -| `string` -| `certKey` defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary. - -| `name` -| `string` -| Name of the config map or secret containing certificates - -| `namespace` -| `string` -| Namespace of the config map or secret containing certificates. If omitted, assumes the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required. - -| `type` -| `string` -| Type for the certificate reference: `configmap` or `secret` - -|=== -== .spec.processor.resources -Description:: -+ --- -`resources` are the compute resources required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ --- - -Type:: - `object` - - - - -[cols="1,1,1",options="header"] -|=== -| Property | Type | Description - -| `limits` -| `integer-or-string` -| Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - -| `requests` -| `integer-or-string` -| Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - -|=== diff --git a/modules/network-observability-flowcollector-kafka-config.adoc b/modules/network-observability-flowcollector-kafka-config.adoc deleted file mode 100644 index b8bbb50bee60..000000000000 --- a/modules/network-observability-flowcollector-kafka-config.adoc +++ /dev/null @@ -1,26 +0,0 @@ -// Module included in the following assemblies: - -// * networking/network_observability/configuring-operators.adoc - -:_content-type: PROCEDURE -[id="network-observability-flowcollector-kafka-config_{context}"] -= Configuring the Flow Collector resource with Kafka -You can configure the `FlowCollector` resource to use Kafka. A Kafka instance needs to be running, and a Kafka topic dedicated to {product-title} Network Observability must be created in that instance. For more information, refer to your Kafka documentation, such as link:https://access.redhat.com/documentation/en-us/red_hat_amq/7.7/html/using_amq_streams_on_openshift/using-the-topic-operator-str[Kafka documentation with AMQ Streams]. - -The following example shows how to modify the `FlowCollector` resource for {product-title} Network Observability operator to use Kafka: - -.Sample Kafka configuration in `FlowCollector` resource -[id="network-observability-flowcollector-configuring-kafka-sample_{context}"] -[source, yaml] ----- - deploymentModel: KAFKA <1> - kafka: - address: "kafka-cluster-kafka-bootstrap.netobserv" <2> - topic: network-flows <3> - tls: - enable: false <4> ----- -<1> Set `spec.deploymentModel` to `KAFKA` instead of `DIRECT` to enable the Kafka deployment model. -<2> `spec.kafka.address` refers to the Kafka bootstrap server address. You can specify a port if needed, for instance `kafka-cluster-kafka-bootstrap.netobserv:9093` for using TLS on port 9093. -<3> `spec.kafka.topic` should match the name of a topic created in Kafka. -<4> `spec.kafka.tls` can be used to encrypt all communications to and from Kafka with TLS or mTLS. When enabled, the Kafka CA certificate must be available as a ConfigMap or a Secret, both in the namespace where the `flowlogs-pipeline` processor component is deployed (default: `netobserv`) and where the eBPF agents are deployed (default: `netobserv-privileged`). It must be referenced with `spec.kafka.tls.caCert`. When using mTLS, client secrets must be available in these namespaces as well (they can be generated for instance using the AMQ Streams User Operator) and referenced with `spec.kafka.tls.userCert`. \ No newline at end of file diff --git a/modules/network-observability-flowcollector-view.adoc b/modules/network-observability-flowcollector-view.adoc deleted file mode 100644 index 4c6491df9df4..000000000000 --- a/modules/network-observability-flowcollector-view.adoc +++ /dev/null @@ -1,90 +0,0 @@ -// Module included in the following assemblies: - -// * networking/network_observability/configuring-operators.adoc - -:_content-type: CONCEPT -[id="network-observability-flowcollector-view_{context}"] -= View the FlowCollector resource -You can view and edit YAML directly in the {product-title} web console. - -.Procedure -. In the web console, navigate to *Operators* -> *Installed Operators*. -. Under the *Provided APIs* heading for the *NetObserv Operator*, select *Flow Collector*. -. Select *cluster* then select the *YAML* tab. There, you can modify the `FlowCollector` resource to configure the Network Observability operator. - -The following example shows a sample `FlowCollector` resource for {product-title} Network Observability operator: -[id="network-observability-flowcollector-configuring-about-sample_{context}"] -.Sample `FlowCollector` resource -[source, yaml] ----- -apiVersion: flows.netobserv.io/v1beta1 -kind: FlowCollector -metadata: - name: cluster -spec: - namespace: netobserv - deploymentModel: DIRECT - agent: - type: EBPF <1> - ebpf: - sampling: 50 <2> - logLevel: info - privileged: false - resources: - requests: - memory: 50Mi - cpu: 100m - limits: - memory: 800Mi - processor: - logLevel: info - resources: - requests: - memory: 100Mi - cpu: 100m - limits: - memory: 800Mi - conversationEndTimeout: 10s - logTypes: FLOWS <3> - conversationHeartbeatInterval: 30s - loki: <4> - url: 'https://loki-gateway-http.netobserv.svc:8080/api/logs/v1/network' - statusUrl: 'https://loki-query-frontend-http.netobserv.svc:3100/' - authToken: FORWARD - tls: - enable: true - caCert: - type: configmap - name: loki-gateway-ca-bundle - certFile: service-ca.crt - consolePlugin: - register: true - logLevel: info - portNaming: - enable: true - portNames: - "3100": loki - quickFilters: <5> - - name: Applications - filter: - src_namespace!: 'openshift-,netobserv' - dst_namespace!: 'openshift-,netobserv' - default: true - - name: Infrastructure - filter: - src_namespace: 'openshift-,netobserv' - dst_namespace: 'openshift-,netobserv' - - name: Pods network - filter: - src_kind: 'Pod' - dst_kind: 'Pod' - default: true - - name: Services network - filter: - dst_kind: 'Service' ----- -<1> The Agent specification, `spec.agent.type`, must be `EBPF`. eBPF is the only {product-title} supported option. -<2> You can set the Sampling specification, `spec.agent.ebpf.sampling`, to manage resources. Lower sampling values might consume a large amount of computational, memory and storage resources. You can mitigate this by specifying a sampling ratio value. A value of 100 means 1 flow every 100 is sampled. A value of 0 or 1 means all flows are captured. The lower the value, the increase in returned flows and the accuracy of derived metrics. By default, eBPF sampling is set to a value of 50, so 1 flow every 50 is sampled. Note that more sampled flows also means more storage needed. It is recommend to start with default values and refine empirically, to determine which setting your cluster can manage. -<3> The optional specifications `spec.processor.logTypes`, `spec.processor.conversationHeartbeatInterval`, and `spec.processor.conversationEndTimeout` can be set to enable conversation tracking. When enabled, conversation events are queryable in the web console. The values for `spec.processor.logTypes` are as follows: `FLOWS` `CONVERSATIONS`, `ENDED_CONVERSATIONS`, or `ALL`. Storage requirements are highest for `ALL` and lowest for `ENDED_CONVERSATIONS`. -<4> The Loki specification, `spec.loki`, specifies the Loki client. The default values match the Loki install paths mentioned in the Installing the Loki Operator section. If you used another installation method for Loki, specify the appropriate client information for your install. -<5> The `spec.quickFilters` specification defines filters that show up in the web console. The `Application` filter keys,`src_namespace` and `dst_namespace`, are negated (`!`), so the `Application` filter shows all traffic that _does not_ originate from, or have a destination to, any `openshift-` or `netobserv` namespaces. For more information, see Configuring quick filters below. diff --git a/modules/network-observability-flows-format.adoc b/modules/network-observability-flows-format.adoc deleted file mode 100644 index 3efb80076797..000000000000 --- a/modules/network-observability-flows-format.adoc +++ /dev/null @@ -1,322 +0,0 @@ -// Automatically generated by 'hack/asciidoc-flows-gen.sh'. Do not edit. -// Module included in the following assemblies: -// json-flows-format-reference.adoc - -:_content-type: REFERENCE -[id="network-observability-flows-format_{context}"] -= Network Flows format reference -The document is organized in two main categories: _Labels_ and regular _Fields_. This distinction only matters when querying Loki. This is because _Labels_, unlike _Fields_, must be used in link:https://grafana.com/docs/loki/latest/logql/log_queries/#log-stream-selector[stream selectors]. - -If you are reading this specification as a reference for the Kafka export feature, you must treat all _Labels_ and _Fields_ as regualr fields and ignore any distinctions between them that are specific to Loki. - - -== Labels - -''' - -SrcK8S_Namespace:: - -• `Optional` *SrcK8S_Namespace*: `string` - -Source namespace - -''' - -DstK8S_Namespace:: - -• `Optional` *DstK8S_Namespace*: `string` - -Destination namespace - -''' - -SrcK8S_OwnerName:: - -• `Optional` *SrcK8S_OwnerName*: `string` - -Source owner, such as Deployment, StatefulSet, etc. - -''' - -DstK8S_OwnerName:: - -• `Optional` *DstK8S_OwnerName*: `string` - -Destination owner, such as Deployment, StatefulSet, etc. - -''' - -FlowDirection:: - -• *FlowDirection*: see the following section, _Enumeration: FlowDirection_ for more details. - -Flow direction from the node observation point - -''' - -_RecordType:: - -• `Optional` *_RecordType*: `RecordType` - -Type of record: 'flowLog' for regular flow logs, or 'allConnections', -'newConnection', 'heartbeat', 'endConnection' for conversation tracking - -== Fields - -''' - -SrcAddr:: - -• *SrcAddr*: `string` - -Source IP address (ipv4 or ipv6) - -''' - -DstAddr:: - -• *DstAddr*: `string` - -Destination IP address (ipv4 or ipv6) - -''' - -SrcMac:: - -• *SrcMac*: `string` - -Source MAC address - -''' - -DstMac:: - -• *DstMac*: `string` - -Destination MAC address - -''' - -SrcK8S_Name:: - -• `Optional` *SrcK8S_Name*: `string` - -Name of the source matched Kubernetes object, such as Pod name, Service name, etc. - -''' - -DstK8S_Name:: - -• `Optional` *DstK8S_Name*: `string` - -Name of the destination matched Kubernetes object, such as Pod name, Service name, etc. - -''' - -SrcK8S_Type:: - -• `Optional` *SrcK8S_Type*: `string` - -Kind of the source matched Kubernetes object, such as Pod, Service, etc. - -''' - -DstK8S_Type:: - -• `Optional` *DstK8S_Type*: `string` - -Kind of the destination matched Kubernetes object, such as Pod name, Service name, etc. - -''' - -SrcPort:: - -• *SrcPort*: `number` - -Source port - -''' - -DstPort:: - -• *DstPort*: `number` - -Destination port - -''' - -SrcK8S_OwnerType:: - -• `Optional` *SrcK8S_OwnerType*: `string` - -Kind of the source Kubernetes owner, such as Deployment, StatefulSet, etc. - -''' - -DstK8S_OwnerType:: - -• `Optional` *DstK8S_OwnerType*: `string` - -Kind of the destination Kubernetes owner, such as Deployment, StatefulSet, etc. - -''' - -SrcK8S_HostIP:: - -• `Optional` *SrcK8S_HostIP*: `string` - -Source node IP - -''' - -DstK8S_HostIP:: - -• `Optional` *DstK8S_HostIP*: `string` - -Destination node IP - -''' - -SrcK8S_HostName:: - -• `Optional` *SrcK8S_HostName*: `string` - -Source node name - -''' - -DstK8S_HostName:: - -• `Optional` *DstK8S_HostName*: `string` - -Destination node name - -''' - -Proto:: - -• *Proto*: `number` - -L4 protocol - -''' - -Interface:: - -• `Optional` *Interface*: `string` - -Network interface - -''' -Packets:: - -• *Packets*: `number` - -Number of packets in this flow - - -''' - -Packets_AB:: - -• `Optional` *Packets_AB*: `number` - -In conversation tracking, A to B packets counter per conversation - -''' - -Packets_BA:: - -• `Optional` *Packets_BA*: `number` - -In conversation tracking, B to A packets counter per conversation - -''' - -Bytes:: - -• *Bytes*: `number` - -Number of bytes in this flow - -''' - -Bytes_AB:: - -• `Optional` *Bytes_AB*: `number` - -In conversation tracking, A to B bytes counter per conversation - -''' - -Bytes_BA:: - -• `Optional` *Bytes_BA*: `number` - -In conversation tracking, B to A bytes counter per conversation - -''' - -TimeFlowStartMs:: - -• *TimeFlowStartMs*: `number` - -Start timestamp of this flow, in milliseconds - -''' - -TimeFlowEndMs:: - -• *TimeFlowEndMs*: `number` - -End timestamp of this flow, in milliseconds - -''' - -TimeReceived:: - -• *TimeReceived*: `number` - -Timestamp when this flow was received and processed by the flow collector, in seconds - -''' - -_HashId:: - -• `Optional` *_HashId*: `string` - -In conversation tracking, the conversation identifier - -''' - -_IsFirst:: - -• `Optional` *_IsFirst*: `string` - -In conversation tracking, a flag identifying the first flow - -''' - -numFlowLogs:: - -• `Optional` *numFlowLogs*: `number` - -In conversation tracking, a counter of flow logs per conversation - -== Enumeration: FlowDirection - -''' - -Ingress:: - -• *Ingress* = `"0"` - -Incoming traffic, from node observation point - -''' - -Egress:: - -• *Egress* = `"1"` - -Outgoing traffic, from node observation point \ No newline at end of file diff --git a/modules/network-observability-histogram-trafficflow.adoc b/modules/network-observability-histogram-trafficflow.adoc deleted file mode 100644 index bd8d89d1f4e9..000000000000 --- a/modules/network-observability-histogram-trafficflow.adoc +++ /dev/null @@ -1,8 +0,0 @@ -// Module included in the following assemblies: -// -// network_observability/observing-network-traffic.adoc - -:_content-type: CONCEPT -[id="network-observability-histogram-trafficflow_{context}"] -== Using the histogram -You can click *Show histogram* to display a toolbar view for visualizing the history of flows as a bar chart. The histogram shows the number of logs over time. You can select a part of the histogram to filter the network flow data in the table that follows the toolbar. \ No newline at end of file diff --git a/modules/network-observability-kafka-option.adoc b/modules/network-observability-kafka-option.adoc deleted file mode 100644 index e7ead259281d..000000000000 --- a/modules/network-observability-kafka-option.adoc +++ /dev/null @@ -1,13 +0,0 @@ -// Module included in the following assemblies: - -// * networking/network_observability/installing-operators.adoc - -:_content-type: CONCEPT -[id="network-observability-kafka-option_{context}"] -= Installing Kafka (optional) -The Kafka Operator is supported for large scale environments. You can install the Kafka Operator as link:https://access.redhat.com/documentation/en-us/red_hat_amq_streams/2.2[Red Hat AMQ Streams] from the Operator Hub, just as the Loki Operator and Network Observability Operator were installed. - -[NOTE] -==== -To uninstall Kafka, refer to the uninstallation process that corresponds with the method you used to install. -==== \ No newline at end of file diff --git a/modules/network-observability-loki-install.adoc b/modules/network-observability-loki-install.adoc deleted file mode 100644 index 817b39f47b5a..000000000000 --- a/modules/network-observability-loki-install.adoc +++ /dev/null @@ -1,60 +0,0 @@ -// Module included in the following assemblies: - -// * networking/network_observability/installing-operators.adoc - -:_content-type: PROCEDURE -[id="network-observability-loki-installation_{context}"] -= Installing the Loki Operator -It is recommended to install link:https://catalog.redhat.com/software/containers/openshift-logging/loki-rhel8-operator/622b46bcae289285d6fcda39[Loki Operator version 5.7], This version provides the ability to create a LokiStack instance using the `openshift-network` tenant configuration mode. It also provides fully automatic, in-cluster authentication and authorization support for Network Observability. - -.Prerequisites - -* Supported Log Store (AWS S3, Google Cloud Storage, Azure, Swift, Minio, OpenShift Data Foundation) -* {product-title} 4.10+. -* Linux Kernel 4.18+. - -//* <Any Loki install prerequisites for using with Network Observability operator?> - -There are several ways you can install Loki. One way you can install the Loki Operator is by using the {product-title} web console Operator Hub. - - -.Procedure - -. Install the `Loki Operator` Operator: - -.. In the {product-title} web console, click *Operators* -> *OperatorHub*. - -.. Choose *Loki Operator* from the list of available Operators, and click *Install*. - -.. Under *Installation Mode*, select *All namespaces on the cluster*. - -.. Verify that you installed the Loki Operator. Visit the *Operators* → *Installed Operators* page and look for *Loki Operator*. - -.. Verify that *Loki Operator* is listed with *Status* as *Succeeded* in all the projects. -+ -. Create a `Secret` YAML file. You can create this secret in the web console or CLI. -.. Using the web console, navigate to the *Project* -> *All Projects* dropdown and select *Create Project*. Name the project `netobserv` and click *Create*. -.. Navigate to the Import icon ,*+*, in the top right corner. Drop your YAML file into the editor. It is important to create this YAML file in the `netobserv` namespace that uses the `access_key_id` and `access_key_secret` to specify your credentials. - -.. Once you create the secret, you should see it listed under *Workloads* -> *Secrets* in the web console. -+ -The following shows an example secret YAML file: -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: loki-s3 - namespace: netobserv -stringData: - access_key_id: QUtJQUlPU0ZPRE5ON0VYQU1QTEUK - access_key_secret: d0phbHJYVXRuRkVNSS9LN01ERU5HL2JQeFJmaUNZRVhBTVBMRUtFWQo= - bucketnames: s3-bucket-name - endpoint: https://s3.eu-central-1.amazonaws.com - region: eu-central-1 ----- - -[IMPORTANT] -==== -To uninstall Loki, refer to the uninstallation process that corresponds with the method you used to install Loki. You might have remaining `ClusterRoles` and `ClusterRoleBindings`, data stored in object store, and persistent volume that must be removed. -==== \ No newline at end of file diff --git a/modules/network-observability-lokistack-create.adoc b/modules/network-observability-lokistack-create.adoc deleted file mode 100644 index 0c54fdeb9829..000000000000 --- a/modules/network-observability-lokistack-create.adoc +++ /dev/null @@ -1,64 +0,0 @@ -// Module included in the following assemblies: - -// * networking/network_observability/installing-operators.adoc - -:_content-type: PROCEDURE -[id="network-observability-lokistack-create_{context}"] -= Create a LokiStack custom resource -It is recommended to deploy the LokiStack in the same namespace referenced by the FlowCollector specification, `spec.namespace`. You can use the web console or CLI to create a namespace, or new project. - -.Procedure - -. Navigate to *Operators* -> *Installed Operators*, viewing *All projects* from the *Project* dropdown. -. Look for *Loki Operator*. In the details, under *Provided APIs*, select *LokiStack*. -. Click *Create LokiStack*. -. Ensure the following fields are specified in either *Form View* or *YAML view*: -+ -[source,yaml] ----- - apiVersion: loki.grafana.com/v1 - kind: LokiStack - metadata: - name: loki - namespace: netobserv - spec: - size: 1x.small - storage: - schemas: - - version: v12 - effectiveDate: '2022-06-01' - secret: - name: loki-s3 - type: s3 - storageClassName: gp3 <1> - tenants: - mode: openshift-network ----- -<1> Use a storage class name that is available on the cluster for `ReadWriteOnce` access mode. You can use `oc get storageclasses` to see what is available on your cluster. -+ -[IMPORTANT] -==== -You must not reuse the same LokiStack that is used for cluster logging. -==== -. Click *Create*. - -[id="deployment-sizing_{context}"] -== Deployment Sizing -Sizing for Loki follows the format of `N<x>._<size>_` where the value `<N>` is the number of instances and `<size>` specifies performance capabilities. - -[NOTE] -==== -1x.extra-small is for demo purposes only, and is not supported. -==== - -.Loki Sizing -[options="header"] -|======================================================================================== -| | 1x.extra-small | 1x.small | 1x.medium -| *Data transfer* | Demo use only. | 500GB/day | 2TB/day -| *Queries per second (QPS)* | Demo use only. | 25-50 QPS at 200ms | 25-75 QPS at 200ms -| *Replication factor* | None | 2 | 3 -| *Total CPU requests* | 5 vCPUs | 36 vCPUs | 54 vCPUs -| *Total Memory requests* | 7.5Gi | 63Gi | 139Gi -| *Total Disk requests* | 150Gi | 300Gi | 450Gi -|======================================================================================== \ No newline at end of file diff --git a/modules/network-observability-lokistack-ingestion-query.adoc b/modules/network-observability-lokistack-ingestion-query.adoc deleted file mode 100644 index fadae2fef93f..000000000000 --- a/modules/network-observability-lokistack-ingestion-query.adoc +++ /dev/null @@ -1,26 +0,0 @@ -// Module included in the following assemblies: - -// * networking/network_observability/installing-operators.adoc -:_content-type: CONCEPT -[id="network-observability-lokistack-configuring-ingestion{context}"] - -= LokiStack ingestion limits and health alerts -The LokiStack instance comes with default settings according to the configured size. It is possible to override some of these settings, such as the ingestion and query limits. You might want to update them if you get Loki errors showing up in the Console plugin, or in `flowlogs-pipeline` logs. An automatic alert in the web console notifies you when these limits are reached. - -Here is an example of configured limits: - -[source,yaml] ----- -spec: - limits: - global: - ingestion: - ingestionBurstSize: 40 - ingestionRate: 20 - maxGlobalStreamsPerTenant: 25000 - queries: - maxChunksPerQuery: 2000000 - maxEntriesLimitPerQuery: 10000 - maxQuerySeries: 3000 ----- -For more information about these settings, see the link:https://loki-operator.dev/docs/api.md/#loki-grafana-com-v1-IngestionLimitSpec[LokiStack API reference]. \ No newline at end of file diff --git a/modules/network-observability-multitenancy.adoc b/modules/network-observability-multitenancy.adoc deleted file mode 100644 index b5ca0e1445bb..000000000000 --- a/modules/network-observability-multitenancy.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// Module included in the following assemblies: -// -// network_observability/observing-network-traffic.adoc - -:_content-type: PROCEDURE -[id="network-observability-multi-tenancy{context}"] -= Enable multi-tenancy in Network Observability -Multi-tenancy in the Network Observability Operator allows and restricts individual user access, or group access, to the flows stored in Loki. Access is enabled for project admins. Project admins who have limited access to some namespaces can access flows for only those namespaces. - -.Prerequisite -* You have installed link:https://catalog.redhat.com/software/containers/openshift-logging/loki-rhel8-operator/622b46bcae289285d6fcda39[Loki Operator version 5.7] -* The `FlowCollector` `spec.loki.authToken` configuration must be set to `FORWARD`. -* You must be logged in as a project administrator - -.Procedure - -. Authorize reading permission to `user1` by running the following command: -+ -[source, terminal] ----- -$ oc adm policy add-cluster-role-to-user netobserv-reader user1 ----- -+ -Now, the data is restricted to only allowed user namespaces. For example, a user that has access to a single namespace can see all the flows internal to this namespace, as well as flows going from and to this namespace. -Project admins have access to the Administrator perspective in the {product-title} console to access the Network Flows Traffic page. \ No newline at end of file diff --git a/modules/network-observability-operator-install.adoc b/modules/network-observability-operator-install.adoc deleted file mode 100644 index cd3b90d4954d..000000000000 --- a/modules/network-observability-operator-install.adoc +++ /dev/null @@ -1,58 +0,0 @@ -// Module included in the following assemblies: - -// * networking/network_observability/installing-operators.adoc - -:_content-type: PROCEDURE -[id="network-observability-operator-installation_{context}"] -= Installing the Network Observability Operator -You can install the Network Observability Operator using the {product-title} web console Operator Hub. When you install the Operator, it provides the `FlowCollector` custom resource definition (CRD). You can set specifications in the web console when you create the `FlowCollector`. - -.Prerequisites - -* Installed Loki. It is recommended to install Loki using the link:https://catalog.redhat.com/software/containers/openshift-logging/loki-rhel8-operator/622b46bcae289285d6fcda39[Loki Operator version 5.7]. -* One of the following supported architectures is required: `amd64`, `ppc64le`, `arm64`, or `s390x`. -* Any CPU supported by Red Hat Enterprise Linux (RHEL) 9 - -[NOTE] -==== -This documentation assumes that your `LokiStack` instance name is `loki`. Using a different name requires additional configuration. -==== - -.Procedure - -. In the {product-title} web console, click *Operators* -> *OperatorHub*. -. Choose *Network Observability Operator* from the list of available Operators in the *OperatorHub*, and click *Install*. -. Select the checkbox `Enable Operator recommended cluster monitoring on this Namespace`. -. Navigate to *Operators* -> *Installed Operators*. Under Provided APIs for Network Observability, select the *Flow Collector* link. -.. Navigate to the *Flow Collector* tab, and click *Create FlowCollector*. Make the following selections in the form view: -+ -* *spec.agent.ebpf.Sampling* : Specify a sampling size for flows. Lower sampling sizes will have higher impact on resource utilization. For more information, see the `FlowCollector` API reference, under spec.agent.ebpf. -* *spec.deploymentModel*: If you are using Kafka, verify Kafka is selected. -* *spec.exporters*: If you are using Kafka, you can optionally send network flows to Kafka, so that they can be consumed by any processor or storage that supports Kafka input, such as Splunk, Elasticsearch, or Fluentd. To do this, set the following specifications: -** Set the *type* to `KAFKA`. -** Set the *address* as `kafka-cluster-kafka-bootstrap.netobserv`. -** Set the *topic* as `netobserv-flows-export`. The Operator exports all flows to the configured Kafka topic. -** Set the following *tls* specifications: -*** *certFile*: `service-ca.crt`, *name*: `kafka-gateway-ca-bundle`, and *type*: `configmap`. -+ -You can also configure this option at a later time by directly editing the YAML. For more information, see _Export enriched network flow data_. -* *loki.url*: Since authentication is specified separately, this URL needs to be updated to `https://loki-gateway-http.netobserv.svc:8080/api/logs/v1/network`. The first part of the URL, "loki", should match the name of your LokiStack. -* *loki.statusUrl*: Set this to `https://loki-query-frontend-http.netobserv.svc:3100/`. The first part of the URL, "loki", should match the name of your LokiStack. -* *loki.authToken*: Select the `FORWARD` value. -* *tls.enable*: Verify that the box is checked so it is enabled. -* *statusTls*: The `enable` value is false by default. -+ -For the first part of the certificate reference names: `loki-gateway-ca-bundle`, `loki-ca-bundle`, and `loki-query-frontend-http`,`loki`, should match the name of your `LokiStack`. -.. Click *Create*. - -.Verification - -To confirm this was successful, when you navigate to *Observe* you should see *Network Traffic* listed in the options. - -In the absence of *Application Traffic* within the {product-title} cluster, default filters might show that there are "No results", which results in no visual flow. Beside the filter selections, select *Clear all filters* to see the flow. - -[IMPORTANT] -==== -If you installed Loki using the Loki Operator, it is advised not to use `querierUrl`, as it can break the console access to Loki. If you installed Loki using another type of Loki installation, this does not apply. -==== - diff --git a/modules/network-observability-operator-uninstall.adoc b/modules/network-observability-operator-uninstall.adoc deleted file mode 100644 index 03fcd54f59a3..000000000000 --- a/modules/network-observability-operator-uninstall.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/network_observability/installing-operators.adoc - -:_content-type: PROCEDURE -[id="network-observability-operator-uninstall_{context}"] -= Uninstalling the Network Observability Operator - -You can uninstall the Network Observability Operator using the {product-title} web console Operator Hub, working in the *Operators* -> *Installed Operators* area. - -.Procedure - -. Remove the `FlowCollector` custom resource. -.. Click *Flow Collector*, which is next to the *Network Observability Operator* in the *Provided APIs* column. -.. Click the options menu {kebab} for the *cluster* and select *Delete FlowCollector*. -. Uninstall the Network Observability Operator. -.. Navigate back to the *Operators* -> *Installed Operators* area. -.. Click the options menu {kebab} next to the *Network Observability Operator* and select *Uninstall Operator*. -.. *Home* -> *Projects* and select `openshift-netobserv-operator` -.. Navigate to *Actions* and select *Delete Project* -. Remove the `FlowCollector` custom resource definition (CRD). -.. Navigate to *Administration* -> *CustomResourceDefinitions*. -.. Look for *FlowCollector* and click the options menu {kebab}. -.. Select *Delete CustomResourceDefinition*. -+ -[IMPORTANT] -==== -The Loki Operator and Kafka remain if they were installed and must be removed separately. Additionally, you might have remaining data stored in an object store, and a persistent volume that must be removed. -==== diff --git a/modules/network-observability-overview.adoc b/modules/network-observability-overview.adoc deleted file mode 100644 index 7a0df2056ae7..000000000000 --- a/modules/network-observability-overview.adoc +++ /dev/null @@ -1,8 +0,0 @@ -// Module included in the following assemblies: -// -// network_observability/observing-network-traffic.adoc - -:_content-type: CONCEPT -[id="network-observability-overview_{context}"] -= Observing the network traffic from the Overview view -The *Overview* view displays the overall aggregated metrics of the network traffic flow on the cluster. As an administrator, you can monitor the statistics with the available display options. \ No newline at end of file diff --git a/modules/network-observability-quickfilter.adoc b/modules/network-observability-quickfilter.adoc deleted file mode 100644 index 54510a71492a..000000000000 --- a/modules/network-observability-quickfilter.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// Module included in the following assemblies: -// -// network_observability/observing-network-traffic.adoc - -:_content-type: REFERENCE -[id="network-observability-quickfilter{context}"] -= Filtering the network traffic -By default, the Network Traffic page displays the traffic flow data in the cluster based on the default filters configured in the `FlowCollector` instance. You can use the filter options to observe the required data by changing the preset filter. - -Query Options:: -You can use *Query Options* to optimize the search results, as listed below: - -** *Log Type*: The available options *Conversation* and *Flows* provide the ability to query flows by log type, such as flow log, new conversation, completed conversation, and a heartbeat, which is a periodic record with updates for long conversations. A conversation is an aggregation of flows between the same peers. -** *Reporter Node*: Every flow can be reported from both source and destination nodes. For cluster ingress, the flow is reported from the destination node and for cluster egress, the flow is reported from the source node. You can select either *Source* or *Destination*. The option *Both* is disabled for the *Overview* and *Topology* view. The default selected value is *Destination*. -** *Match filters*: You can determine the relation between different filter parameters selected in the advanced filter. The available options are *Match all* and *Match any*. *Match all* provides results that match all the values, and *Match any* provides results that match any of the values entered. The default value is *Match all*. -** *Limit*: The data limit for internal backend queries. Depending upon the matching and the filter settings, the number of traffic flow data is displayed within the specified limit. - -Quick filters:: -The default values in *Quick filters* drop-down menu are defined in the `FlowCollector` configuration. You can modify the options from console. - -Advanced filters:: -You can set the advanced filters by providing the parameter to be filtered and its corresponding text value. The section *Common* in the parameter drop-down list filters the results that match either *Source* or *Destination*. To enable or disable the applied filter, you can click on the applied filter listed below the filter options. - -[NOTE] -==== -To understand the rules of specifying the text value, click *Learn More*. -==== - -You can click *Reset default filter* to remove the existing filters, and apply the filter defined in `FlowCollector` configuration. \ No newline at end of file diff --git a/modules/network-observability-roles-create.adoc b/modules/network-observability-roles-create.adoc deleted file mode 100644 index 3cd41e348a0e..000000000000 --- a/modules/network-observability-roles-create.adoc +++ /dev/null @@ -1,45 +0,0 @@ -// Module included in the following assemblies: - -// * networking/network_observability/installing-operators.adoc - -:_content-type: PROCEDURE -[id="network-observability-roles-create_{context}"] -= Create roles for authentication and authorization -Specify authentication and authorization configurations by defining `ClusterRole` and `ClusterRoleBinding`. You can create a YAML file to define these roles. - -.Procedure - -. Using the web console, click the Import icon, *+*. -. Drop your YAML file into the editor and click *Create*: -+ -[source, yaml] ----- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: loki-netobserv-tenant -rules: -- apiGroups: - - 'loki.grafana.com' - resources: - - network - resourceNames: - - logs - verbs: - - 'get' - - 'create' ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: loki-netobserv-tenant -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: loki-netobserv-tenant -subjects: -- kind: ServiceAccount - name: flowlogs-pipeline <1> - namespace: netobserv ----- -<1> The `flowlogs-pipeline` writes to Loki. If you are using Kafka, this value is `flowlogs-pipeline-transformer`. diff --git a/modules/network-observability-topology.adoc b/modules/network-observability-topology.adoc deleted file mode 100644 index c8d2268f1335..000000000000 --- a/modules/network-observability-topology.adoc +++ /dev/null @@ -1,8 +0,0 @@ -// Module included in the following assemblies: -// -// network_observability/observing-network-traffic.adoc - -:_content-type: CONCEPT -[id="network-observability-topology_{context}"] -= Observing the network traffic from the Topology view -The *Topology* view provides a graphical representation of the network flows and the amount of traffic. As an administrator, you can monitor the traffic data across the application by using the *Topology* view. \ No newline at end of file diff --git a/modules/network-observability-trafficflow.adoc b/modules/network-observability-trafficflow.adoc deleted file mode 100644 index 25258977955e..000000000000 --- a/modules/network-observability-trafficflow.adoc +++ /dev/null @@ -1,8 +0,0 @@ -// Module included in the following assemblies: -// -// network_observability/observing-network-traffic.adoc - -:_content-type: CONCEPT -[id="network-observability-trafficflow_{context}"] -= Observing the network traffic from the Traffic flows view -The *Traffic flows* view displays the data of the network flows and the amount of traffic in a table. As an administrator, you can monitor the amount of traffic across the application by using the traffic flow table. \ No newline at end of file diff --git a/modules/network-observability-viewing-alerts.adoc b/modules/network-observability-viewing-alerts.adoc deleted file mode 100644 index f238f78928bb..000000000000 --- a/modules/network-observability-viewing-alerts.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// Module included in the following assemblies: -// -// * network_observability/network-observability-operator-monitoring.adoc - -:_content-type: PROCEDURE -[id="network-observability-alert-dashboard_{context}"] -= Viewing health information - -You can access metrics about health and resource useage of the Network Observability Operator from the *Dashboards* page in the web console. A health alert banner that directs you to the dashboard can appear on the *Network Traffic* and *Home* pages in the event that an alert is triggered. Alerts are generated in the following cases: - -* The *NetObservLokiError* alert occurs if the `flowlogs-pipeline` workload is dropping flows because of Loki errors, such as if the Loki ingestion rate limit has been reached. -* The *NetObservNoFlows* alert occurs if no flows are ingested for a certain amount of time..Prerequisites - -* You have the Network Observability Operator installed. -* You have access to the cluster as a user with the `cluster-admin` role or with view permissions for all projects. - -.Procedure - -. From the *Administrator* perspective in the web console, navigate to *Observe* → *Dashboards*. -. From the *Dashboards* dropdown, select *Netobserv/Health*. -Metrics about the health of the Operator are displayed on the page. \ No newline at end of file diff --git a/modules/network-observability-working-with-conversations.adoc b/modules/network-observability-working-with-conversations.adoc deleted file mode 100644 index 41e5411d5113..000000000000 --- a/modules/network-observability-working-with-conversations.adoc +++ /dev/null @@ -1,47 +0,0 @@ -// Module included in the following assemblies: -// -// network_observability/observing-network-traffic.adoc - -:_content-type: PROCEDURE -[id="network-observability-working-with-conversations_{context}"] -= Working with conversation tracking -As an administrator, you can you can group network flows that are part of the same conversation. A conversation is defined as a grouping of peers that are identified by their IP addresses, ports, and protocols, resulting in an unique *Conversation Id*. You can query conversation events in the web console. These events are represented in the web console as follows: - -** *Conversation start*: This event happens when a connection is starting or TCP flag intercepted -** *Conversation tick*: This event happens at each specified interval defined in the `FlowCollector` `spec.processor.conversationHeartbeatInterval` parameter while the connection is active. -** *Conversation end*: This event happens when the `FlowCollector` `spec.processor.conversationEndTimeout` parameter is reached or the TCP flag is intercepted. -** *Flow*: This is the network traffic flow that occurs within the specified interval. - - -.Procedure -. In the web console, navigate to *Operators* -> *Installed Operators*. -. Under the *Provided APIs* heading for the *NetObserv Operator*, select *Flow Collector*. -. Select *cluster* then select the *YAML* tab. -. Configure the `FlowCollector` custom resource so that `spec.processor.logTypes`, `conversationEndTimeout`, and `conversationHeartbeatInterval` parameters are set according to your observation needs. A sample configuration is as follows: -+ -[id="network-observability-flowcollector-configuring-conversations_{context}"] -.Configure `FlowCollector` for conversation tracking -[source, yaml] ----- -apiVersion: flows.netobserv.io/v1alpha1 -kind: FlowCollector -metadata: - name: cluster -spec: - processor: - conversationEndTimeout: 10s <1> - logTypes: FLOWS <2> - conversationHeartbeatInterval: 30s <3> ----- -<1> The *Conversation end* event represents the point when the `conversationEndTimeout` is reached or the TCP flag is intercepted. -<2> When `logTypes` is set to `FLOWS`, only the *Flow* event is exported. If you set the value to `ALL`, both conversation and flow events are exported and visible in the *Network Traffic* page. To focus only on conversation events, you can specify `CONVERSATIONS` which exports the *Conversation start*, *Conversation tick* and *Conversation end* events; or `ENDED_CONVERSATIONS` exports only the *Conversation end* events. Storage requirements are highest for `ALL` and lowest for `ENDED_CONVERSATIONS`. -<3> The *Conversation tick* event represents each specified interval defined in the `FlowCollector` `conversationHeartbeatInterval` parameter while the network connection is active. -+ -[NOTE] -==== -If you update the `logType` option, the flows from the previous selection do not clear from the console plugin. For example, if you initially set `logType` to `CONVERSATIONS` for a span of time until 10 AM and then move to `ENDED_CONVERSATIONS`, the console plugin shows all conversation events before 10 AM and only ended conversations after 10 AM. -==== -. Refresh the *Network Traffic* page on the *Traffic flows* tab. Notice there are two new columns, *Event/Type* and *Conversation Id*. All the *Event/Type* fields are `Flow` when *Flow* is the selected query option. -. Select *Query Options* and choose the *Log Type*, *Conversation*. Now the *Event/Type* shows all of the desired conversation events. -. Next you can filter on a specific conversation ID or switch between the *Conversation* and *Flow* log type options from the side panel. - diff --git a/modules/network-observability-working-with-overview.adoc b/modules/network-observability-working-with-overview.adoc deleted file mode 100644 index 7c2ebd78b5b9..000000000000 --- a/modules/network-observability-working-with-overview.adoc +++ /dev/null @@ -1,14 +0,0 @@ -// Module included in the following assemblies: -// -// network_observability/observing-network-traffic.adoc - -:_content-type: PROCEDURE -[id="network-observability-working-with-overview_{context}"] -= Working with the Overview view -As an administrator, you can navigate to the *Overview* view to see the graphical representation of the flow rate statistics. - -.Procedure -. Navigate to *Observe* → *Network Traffic*. -. In the *Network Traffic* page, click the *Overview* tab. - -You can configure the scope of each flow rate data by clicking the menu icon. \ No newline at end of file diff --git a/modules/network-observability-working-with-topology.adoc b/modules/network-observability-working-with-topology.adoc deleted file mode 100644 index 3bd72ebd242b..000000000000 --- a/modules/network-observability-working-with-topology.adoc +++ /dev/null @@ -1,14 +0,0 @@ -// Module included in the following assemblies: -// -// network_observability/observing-network-traffic.adoc - -:_content-type: PROCEDURE -[id="network-observability-working-with-topology_{context}"] -= Working with the Topology view -As an administrator, you can navigate to the *Topology* view to see the details and metrics of the component. - -.Procedure -. Navigate to *Observe* → *Network Traffic*. -. In the *Network Traffic* page, click the *Topology* tab. - -You can click each component in the *Topology* to view the details and metrics of the component. \ No newline at end of file diff --git a/modules/network-observability-working-with-trafficflow.adoc b/modules/network-observability-working-with-trafficflow.adoc deleted file mode 100644 index bb9ca4b5082e..000000000000 --- a/modules/network-observability-working-with-trafficflow.adoc +++ /dev/null @@ -1,15 +0,0 @@ -// Module included in the following assemblies: -// -// network_observability/observing-network-traffic.adoc - -:_content-type: PROCEDURE -[id="network-observability-working-with-trafficflow_{context}"] -= Working with the Traffic flows view -As an administrator, you can navigate to *Traffic flows* table to see network flow information. - -.Procedure - -. Navigate to *Observe* → *Network Traffic*. -. In the *Network Traffic* page, click the *Traffic flows* tab. - -You can click on each row to get the corresponding flow information. \ No newline at end of file diff --git a/modules/networking-osp-enabling-metadata.adoc b/modules/networking-osp-enabling-metadata.adoc deleted file mode 100644 index 418982cdd2a5..000000000000 --- a/modules/networking-osp-enabling-metadata.adoc +++ /dev/null @@ -1,67 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_openstack/installing-openstack-user.adoc -// * installing/installing_openstack/installing-openstack-user-kuryr.adoc - -:_content-type: PROCEDURE -[id="networking-osp-enabling-metadata_{context}"] -= Enabling the {rh-openstack} metadata service as a mountable drive - -You can apply a machine config to your machine pool that makes the {rh-openstack-first} metadata service available as a mountable drive. - -[NOTE] -==== -The following machine config enables the display of {rh-openstack} network UUIDs from within the SR-IOV Network Operator. This configuration simplifies the association of SR-IOV resources to cluster SR-IOV resources. -==== - -.Procedure - -. Create a machine config file from the following template: -+ -.A mountable metadata service machine config file -[source,yaml] ----- -kind: MachineConfig -apiVersion: machineconfiguration.openshift.io/v1 -metadata: - name: 20-mount-config <1> - labels: - machineconfiguration.openshift.io/role: worker -spec: - config: - ignition: - version: 3.2.0 - systemd: - units: - - name: create-mountpoint-var-config.service - enabled: true - contents: | - [Unit] - Description=Create mountpoint /var/config - Before=kubelet.service - - [Service] - ExecStart=/bin/mkdir -p /var/config - - [Install] - WantedBy=var-config.mount - - - name: var-config.mount - enabled: true - contents: | - [Unit] - Before=local-fs.target - [Mount] - Where=/var/config - What=/dev/disk/by-label/config-2 - [Install] - WantedBy=local-fs.target ----- -<1> You can substitute a name of your choice. - -. From a command line, apply the machine config: -+ -[source,terminal] ----- -$ oc apply -f <machine_config_file_name>.yaml ----- diff --git a/modules/networking-osp-enabling-vfio-noiommu.adoc b/modules/networking-osp-enabling-vfio-noiommu.adoc deleted file mode 100644 index 0e6058fcdf30..000000000000 --- a/modules/networking-osp-enabling-vfio-noiommu.adoc +++ /dev/null @@ -1,43 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_openstack/installing-openstack-user.adoc -// * installing/installing_openstack/installing-openstack-user-kuryr.adoc - -:_content-type: PROCEDURE -[id="networking-osp-enabling-vfio-noiommu_{context}"] -= Enabling the No-IOMMU feature for the {rh-openstack} VFIO driver - -You can apply a machine config to your machine pool that enables the No-IOMMU feature for the {rh-openstack-first} virtual function I/O (VFIO) driver. The {rh-openstack} vfio-pci driver requires this feature. - -.Procedure - -. Create a machine config file from the following template: -+ -.A No-IOMMU VFIO machine config file -[source,yaml] ----- -kind: MachineConfig -apiVersion: machineconfiguration.openshift.io/v1 -metadata: - name: 99-vfio-noiommu <1> - labels: - machineconfiguration.openshift.io/role: worker -spec: - config: - ignition: - version: 3.2.0 - storage: - files: - - path: /etc/modprobe.d/vfio-noiommu.conf - mode: 0644 - contents: - source: data:;base64,b3B0aW9ucyB2ZmlvIGVuYWJsZV91bnNhZmVfbm9pb21tdV9tb2RlPTEK ----- -<1> You can substitute a name of your choice. - -. From a command line, apply the machine config: -+ -[source,terminal] ----- -$ oc apply -f <machine_config_file_name>.yaml ----- diff --git a/modules/networking-osp-preparing-for-sr-iov.adoc b/modules/networking-osp-preparing-for-sr-iov.adoc deleted file mode 100644 index ddf436ddc004..000000000000 --- a/modules/networking-osp-preparing-for-sr-iov.adoc +++ /dev/null @@ -1,4 +0,0 @@ -[id="networking-osp-preparing-for-sr-iov_{context}"] -= Preparing a cluster that runs on {rh-openstack} for SR-IOV - -Before you use link:https://access.redhat.com/documentation/en-us/red_hat_openstack_platform/16.1/html-single/network_functions_virtualization_planning_and_configuration_guide/index#assembly_sriov_parameters[single root I/O virtualization (SR-IOV)] on a cluster that runs on {rh-openstack-first}, make the {rh-openstack} metadata service mountable as a drive and enable the No-IOMMU Operator for the virtual function I/O (VFIO) driver. \ No newline at end of file diff --git a/modules/node-observability-create-custom-resource.adoc b/modules/node-observability-create-custom-resource.adoc deleted file mode 100644 index 2309ea140fe6..000000000000 --- a/modules/node-observability-create-custom-resource.adoc +++ /dev/null @@ -1,96 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/understanding-node-observability-operator.adoc - -:_content-type: PROCEDURE -[id="creating-node-observability-custom-resource_{context}"] -= Creating the Node Observability custom resource - -You must create and run the `NodeObservability` custom resource (CR) before you run the profiling query. When you run the `NodeObservability` CR, it creates the necessary machine config and machine config pool CRs to enable the CRI-O profiling on the worker nodes matching the `nodeSelector`. - -[IMPORTANT] -==== -If CRI-O profiling is not enabled on the worker nodes, the `NodeObservabilityMachineConfig` resource gets created. Worker nodes matching the `nodeSelector` specified in `NodeObservability` CR restarts. This might take 10 or more minutes to complete. -==== - -[NOTE] -==== -Kubelet profiling is enabled by default. -==== - -The CRI-O unix socket of the node is mounted on the agent pod, which allows the agent to communicate with CRI-O to run the pprof request. Similarly, the `kubelet-serving-ca` certificate chain is mounted on the agent pod, which allows secure communication between the agent and node's kubelet endpoint. - -.Prerequisites -* You have installed the Node Observability Operator. -* You have installed the OpenShift CLI (oc). -* You have access to the cluster with `cluster-admin` privileges. - -.Procedure - -. Log in to the {product-title} CLI by running the following command: -+ -[source,terminal] ----- -$ oc login -u kubeadmin https://<HOSTNAME>:6443 ----- - -. Switch back to the `node-observability-operator` namespace by running the following command: -+ -[source,terminal] ----- -$ oc project node-observability-operator ----- - -. Create a CR file named `nodeobservability.yaml` that contains the following text: -+ -[source,yaml] ----- - apiVersion: nodeobservability.olm.openshift.io/v1alpha2 - kind: NodeObservability - metadata: - name: cluster <1> - spec: - nodeSelector: - kubernetes.io/hostname: <node_hostname> <2> - type: crio-kubelet ----- -<1> You must specify the name as `cluster` because there should be only one `NodeObservability` CR per cluster. -<2> Specify the nodes on which the Node Observability agent must be deployed. - -. Run the `NodeObservability` CR: -+ -[source,terminal] ----- -oc apply -f nodeobservability.yaml ----- - -+ -.Example output -[source,terminal] ----- -nodeobservability.olm.openshift.io/cluster created ----- - -. Review the status of the `NodeObservability` CR by running the following command: -+ -[source,terminal] ----- -$ oc get nob/cluster -o yaml | yq '.status.conditions' ----- - -+ -.Example output -[source,terminal] ----- -conditions: - conditions: - - lastTransitionTime: "2022-07-05T07:33:54Z" - message: 'DaemonSet node-observability-ds ready: true NodeObservabilityMachineConfig - ready: true' - reason: Ready - status: "True" - type: Ready ----- - -+ -`NodeObservability` CR run is completed when the reason is `Ready` and the status is `True`. diff --git a/modules/node-observability-high-level-workflow.adoc b/modules/node-observability-high-level-workflow.adoc deleted file mode 100644 index 2a4c68aa2d8e..000000000000 --- a/modules/node-observability-high-level-workflow.adoc +++ /dev/null @@ -1,13 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/understanding-node-observability-operator.adoc - -:_content-type: CONCEPT -[id="workflow-node-observability-operator_{context}"] -= Workflow of the Node Observability Operator - -The following workflow outlines on how to query the profiling data using the Node Observability Operator: - -. Install the Node Observability Operator in the {product-title} cluster. -. Create a NodeObservability custom resource to enable the CRI-O profiling on the worker nodes of your choice. -. Run the profiling query to generate the profiling data. diff --git a/modules/node-observability-install-cli.adoc b/modules/node-observability-install-cli.adoc deleted file mode 100644 index 14d4bcbb99b1..000000000000 --- a/modules/node-observability-install-cli.adoc +++ /dev/null @@ -1,118 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/understanding-node-observability-operator.adoc - -:_content-type: PROCEDURE -[id="install-node-observability-using-cli_{context}"] -= Installing the Node Observability Operator using the CLI - -You can install the Node Observability Operator by using the OpenShift CLI (oc). - -.Prerequisites - -* You have installed the OpenShift CLI (oc). -* You have access to the cluster with `cluster-admin` privileges. - -.Procedure - -. Confirm that the Node Observability Operator is available by running the following command: -+ -[source,terminal] ----- -$ oc get packagemanifests -n openshift-marketplace node-observability-operator ----- - -+ -.Example output -[source,terminal] ----- -NAME CATALOG AGE -node-observability-operator Red Hat Operators 9h ----- - -. Create the `node-observability-operator` namespace by running the following command: -+ -[source,terminal] ----- -$ oc new-project node-observability-operator ----- - -. Create an `OperatorGroup` object YAML file: -+ -[source,yaml] ----- -cat <<EOF | oc apply -f - -apiVersion: operators.coreos.com/v1 -kind: OperatorGroup -metadata: - name: node-observability-operator - namespace: node-observability-operator -spec: - targetNamespaces: [] -EOF ----- - -. Create a `Subscription` object YAML file to subscribe a namespace to an Operator: -+ -[source,yaml] ----- -cat <<EOF | oc apply -f - -apiVersion: operators.coreos.com/v1alpha1 -kind: Subscription -metadata: - name: node-observability-operator - namespace: node-observability-operator -spec: - channel: alpha - name: node-observability-operator - source: redhat-operators - sourceNamespace: openshift-marketplace -EOF ----- - -.Verification - -. View the install plan name by running the following command: -+ -[source,terminal] ----- -$ oc -n node-observability-operator get sub node-observability-operator -o yaml | yq '.status.installplan.name' ----- - -+ -.Example output -[source,terminal] ----- -install-dt54w ----- - -. Verify the install plan status by running the following command: -+ -[source,terminal] ----- -$ oc -n node-observability-operator get ip <install_plan_name> -o yaml | yq '.status.phase' ----- -+ -`<install_plan_name>` is the install plan name that you obtained from the output of the previous command. - -+ -.Example output -[source,terminal] ----- -COMPLETE ----- - -. Verify that the Node Observability Operator is up and running: -+ -[source,terminal] ----- -$ oc get deploy -n node-observability-operator ----- - -+ -.Example output -[source,terminal] ----- -NAME READY UP-TO-DATE AVAILABLE AGE -node-observability-operator-controller-manager 1/1 1 1 40h ----- diff --git a/modules/node-observability-install-web-console.adoc b/modules/node-observability-install-web-console.adoc deleted file mode 100644 index 40fd247c25bb..000000000000 --- a/modules/node-observability-install-web-console.adoc +++ /dev/null @@ -1,31 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/understanding-node-observability-operator.adoc - -:_content-type: PROCEDURE -[id="install-node-observability-using-web-console_{context}"] -= Installing the Node Observability Operator using the web console - -You can install the Node Observability Operator from the {product-title} web console. - -.Prerequisites - -* You have access to the cluster with `cluster-admin` privileges. -* You have access to the {product-title} web console. - -.Procedure - -. Log in to the {product-title} web console. -. In the Administrator's navigation panel, expand *Operators* → *OperatorHub*. -. In the *All items* field, enter *Node Observability Operator* and select the *Node Observability Operator* tile. -. Click *Install*. -. On the *Install Operator* page, configure the following settings: -.. In the *Update channel* area, click *alpha*. -.. In the *Installation mode* area, click *A specific namespace on the cluster*. -.. From the *Installed Namespace* list, select *node-observability-operator* from the list. -.. In the *Update approval* area, select *Automatic*. -.. Click *Install*. - -.Verification -. In the Administrator's navigation panel, expand *Operators* → *Installed Operators*. -. Verify that the Node Observability Operator is listed in the Operators list. diff --git a/modules/node-observability-installation.adoc b/modules/node-observability-installation.adoc deleted file mode 100644 index 245faa8664a4..000000000000 --- a/modules/node-observability-installation.adoc +++ /dev/null @@ -1,8 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/understanding-node-observability-operator.adoc - -:_content-type: CONCEPT -[id="install-node-observability-operator_{context}"] -= Installing the Node Observability Operator -The Node Observability Operator is not installed in {product-title} by default. You can install the Node Observability Operator by using the {product-title} CLI or the web console. diff --git a/modules/node-observability-run-profiling-query.adoc b/modules/node-observability-run-profiling-query.adoc deleted file mode 100644 index eaf91c4cf275..000000000000 --- a/modules/node-observability-run-profiling-query.adoc +++ /dev/null @@ -1,83 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/understanding-node-observability-operator.adoc - -:_content-type: PROCEDURE -[id="running-profiling-query_{context}"] -= Running the profiling query - -To run the profiling query, you must create a `NodeObservabilityRun` resource. The profiling query is a blocking operation that fetches CRI-O and Kubelet profiling data for a duration of 30 seconds. After the profiling query is complete, you must retrieve the profiling data inside the container file system `/run/node-observability` directory. The lifetime of data is bound to the agent pod through the `emptyDir` volume, so you can access the profiling data while the agent pod is in the `running` status. - -[IMPORTANT] -==== -You can request only one profiling query at any point of time. -==== - -.Prerequisites -* You have installed the Node Observability Operator. -* You have created the `NodeObservability` custom resource (CR). -* You have access to the cluster with `cluster-admin` privileges. - -.Procedure - -. Create a `NodeObservabilityRun` resource file named `nodeobservabilityrun.yaml` that contains the following text: -+ -[source,yaml] ----- -apiVersion: nodeobservability.olm.openshift.io/v1alpha2 -kind: NodeObservabilityRun -metadata: - name: nodeobservabilityrun -spec: - nodeObservabilityRef: - name: cluster ----- - -. Trigger the profiling query by running the `NodeObservabilityRun` resource: -+ -[source,terminal] ----- -$ oc apply -f nodeobservabilityrun.yaml ----- - -. Review the status of the `NodeObservabilityRun` by running the following command: -+ -[source,terminal] ----- -$ oc get nodeobservabilityrun nodeobservabilityrun -o yaml | yq '.status.conditions' ----- - -+ -.Example output -[source,terminal] ----- -conditions: -- lastTransitionTime: "2022-07-07T14:57:34Z" - message: Ready to start profiling - reason: Ready - status: "True" - type: Ready -- lastTransitionTime: "2022-07-07T14:58:10Z" - message: Profiling query done - reason: Finished - status: "True" - type: Finished ----- - -+ -The profiling query is complete once the status is `True` and type is `Finished`. - -. Retrieve the profiling data from the container's `/run/node-observability` path by running the following bash script: -+ -[source,bash] ----- -for a in $(oc get nodeobservabilityrun nodeobservabilityrun -o yaml | yq .status.agents[].name); do - echo "agent ${a}" - mkdir -p "/tmp/${a}" - for p in $(oc exec "${a}" -c node-observability-agent -- bash -c "ls /run/node-observability/*.pprof"); do - f="$(basename ${p})" - echo "copying ${f} to /tmp/${a}/${f}" - oc exec "${a}" -c node-observability-agent -- cat "${p}" > "/tmp/${a}/${f}" - done -done ----- diff --git a/modules/node-tuning-hosted-cluster.adoc b/modules/node-tuning-hosted-cluster.adoc deleted file mode 100644 index d6e22bd5e8d2..000000000000 --- a/modules/node-tuning-hosted-cluster.adoc +++ /dev/null @@ -1,135 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/using-node-tuning-operator.adoc - -:_content-type: PROCEDURE -[id="node-tuning-hosted-cluster_{context}"] -= Configuring node tuning in a hosted cluster - -//# Manage node-level tuning with the Node Tuning Operator - -:FeatureName: Hosted control planes -include::snippets/technology-preview.adoc[] - -To set node-level tuning on the nodes in your hosted cluster, you can use the Node Tuning Operator. In hosted control planes, you can configure node tuning by creating config maps that contain `Tuned` objects and referencing those config maps in your node pools. - -.Procedure - -. Create a config map that contains a valid tuned manifest, and reference the manifest in a node pool. In the following example, a `Tuned` manifest defines a profile that sets `vm.dirty_ratio` to 55 on nodes that contain the `tuned-1-node-label` node label with any value. Save the following `ConfigMap` manifest in a file named `tuned-1.yaml`: -+ -[source,yaml] ----- - apiVersion: v1 - kind: ConfigMap - metadata: - name: tuned-1 - namespace: clusters - data: - tuning: | - apiVersion: tuned.openshift.io/v1 - kind: Tuned - metadata: - name: tuned-1 - namespace: openshift-cluster-node-tuning-operator - spec: - profile: - - data: | - [main] - summary=Custom OpenShift profile - include=openshift-node - [sysctl] - vm.dirty_ratio="55" - name: tuned-1-profile - recommend: - - priority: 20 - profile: tuned-1-profile ----- -+ -[NOTE] -==== -If you do not add any labels to an entry in the `spec.recommend` section of the Tuned spec, node-pool-based matching is assumed, so the highest priority profile in the `spec.recommend` section is applied to nodes in the pool. Although you can achieve more fine-grained node-label-based matching by setting a label value in the Tuned `.spec.recommend.match` section, node labels will not persist during an upgrade unless you set the `.spec.management.upgradeType` value of the node pool to `InPlace`. -==== - -. Create the `ConfigMap` object in the management cluster: -+ -[source, terminal] ----- -$ oc --kubeconfig="$MGMT_KUBECONFIG" create -f tuned-1.yaml ----- - -. Reference the `ConfigMap` object in the `spec.tuningConfig` field of the node pool, either by editing a node pool or creating one. In this example, assume that you have only one `NodePool`, named `nodepool-1`, which contains 2 nodes. -+ -[source,yaml] ----- - apiVersion: hypershift.openshift.io/v1alpha1 - kind: NodePool - metadata: - ... - name: nodepool-1 - namespace: clusters - ... - spec: - ... - tuningConfig: - - name: tuned-1 - status: - ... ----- -+ -[NOTE] -==== -You can reference the same config map in multiple node pools. In hosted control planes, the Node Tuning Operator appends a hash of the node pool name and namespace to the name of the Tuned CRs to distinguish them. Outside of this case, do not create multiple TuneD profiles of the same name in different Tuned CRs for the same hosted cluster. -==== - -.Verification - -Now that you have created the `ConfigMap` object that contains a `Tuned` manifest and referenced it in a `NodePool`, the Node Tuning Operator syncs the `Tuned` objects into the hosted cluster. You can verify which `Tuned` objects are defined and which TuneD profiles are applied to each node. - -. List the `Tuned` objects in the hosted cluster: -+ -[source,terminal] ----- -$ oc --kubeconfig="$HC_KUBECONFIG" get Tuneds -n openshift-cluster-node-tuning-operator ----- -+ -.Example output -[source,terminal] ----- -NAME AGE -default 7m36s -rendered 7m36s -tuned-1 65s ----- - -. List the `Profile` objects in the hosted cluster: -+ -[source,terminal] ----- -$ oc --kubeconfig="$HC_KUBECONFIG" get Profiles -n openshift-cluster-node-tuning-operator ----- -+ -.Example output -[source,terminal] ----- -NAME TUNED APPLIED DEGRADED AGE -nodepool-1-worker-1 tuned-1-profile True False 7m43s -nodepool-1-worker-2 tuned-1-profile True False 7m14s ----- -+ -[NOTE] -==== -If no custom profiles are created, the `openshift-node` profile is applied by default. -==== - -. To confirm that the tuning was applied correctly, start a debug shell on a node and check the sysctl values: -+ -[source,terminal] ----- -$ oc --kubeconfig="$HC_KUBECONFIG" debug node/nodepool-1-worker-1 -- chroot /host sysctl vm.dirty_ratio ----- -+ -.Example output -[source,terminal] ----- -vm.dirty_ratio = 55 ----- \ No newline at end of file diff --git a/modules/node-tuning-operator-supported-tuned-daemon-plug-ins.adoc b/modules/node-tuning-operator-supported-tuned-daemon-plug-ins.adoc deleted file mode 100644 index 524e9480b7b5..000000000000 --- a/modules/node-tuning-operator-supported-tuned-daemon-plug-ins.adoc +++ /dev/null @@ -1,45 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/using-node-tuning-operator.adoc -// * post_installation_configuration/node-tasks.adoc - -[id="supported-tuned-daemon-plug-ins_{context}"] -= Supported TuneD daemon plugins - -Excluding the `[main]` section, the following TuneD plugins are supported when -using custom profiles defined in the `profile:` section of the Tuned CR: - -* audio -* cpu -* disk -* eeepc_she -* modules -* mounts -* net -* scheduler -* scsi_host -* selinux -* sysctl -* sysfs -* usb -* video -* vm -* bootloader - -There is some dynamic tuning functionality provided by some of these plugins -that is not supported. The following TuneD plugins are currently not supported: - -* script -* systemd - - -[WARNING] -==== -The TuneD bootloader plugin is currently supported on {op-system-first} 8.x worker nodes. For {op-system-base-full} 7.x worker nodes, the TuneD bootloader plugin is currently not supported. -==== - -See -link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/monitoring_and_managing_system_status_and_performance/customizing-tuned-profiles_monitoring-and-managing-system-status-and-performance#available-tuned-plug-ins_customizing-tuned-profiles[Available -TuneD Plugins] and -link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/monitoring_and_managing_system_status_and_performance/getting-started-with-tuned_monitoring-and-managing-system-status-and-performance[Getting -Started with TuneD] for more information. diff --git a/modules/node-tuning-operator.adoc b/modules/node-tuning-operator.adoc deleted file mode 100644 index 847043db7165..000000000000 --- a/modules/node-tuning-operator.adoc +++ /dev/null @@ -1,71 +0,0 @@ -// Module included in the following assemblies: -// -// * scalability_and_performance/using-node-tuning-operator.adoc -// * operators/operator-reference.adoc -// * post_installation_configuration/node-tasks.adoc - -ifeval::["{context}" == "cluster-operators-ref"] -:operators: -endif::[] -ifeval::["{context}" == "node-tuning-operator"] -:perf: -endif::[] -ifeval::["{context}" == "cluster-capabilities"] -:cluster-caps: -endif::[] - -:_content-type: CONCEPT -[id="about-node-tuning-operator_{context}"] -ifdef::operators[] -= Node Tuning Operator -endif::operators[] -ifdef::perf[] -= About the Node Tuning Operator -endif::perf[] -ifdef::cluster-caps[= Node Tuning capability] - -ifndef::perf[] -[discrete] -== Purpose -endif::perf[] - -ifdef::cluster-caps[] -The Node Tuning Operator provides features for the `NodeTuning` capability. -endif::cluster-caps[] - -The Node Tuning Operator helps you manage node-level tuning by orchestrating the TuneD daemon and achieves low latency performance by using the Performance Profile controller. The majority of high-performance applications require some level of kernel tuning. The Node Tuning Operator provides a unified management interface to users of node-level sysctls and more flexibility to add custom tuning specified by user needs. - -ifdef::cluster-caps[] -If you disable the NodeTuning capability, some default tuning settings will not be applied to the control-plane nodes. This might limit the scalability and performance of large clusters with over 900 nodes or 900 routes. -endif::[] - -ifndef::cluster-caps[] -The Operator manages the containerized TuneD daemon for {product-title} as a Kubernetes daemon set. It ensures the custom tuning specification is passed to all containerized TuneD daemons running in the cluster in the format that the daemons understand. The daemons run on all nodes in the cluster, one per node. - -Node-level settings applied by the containerized TuneD daemon are rolled back on an event that triggers a profile change or when the containerized TuneD daemon is terminated gracefully by receiving and handling a termination signal. - -The Node Tuning Operator uses the Performance Profile controller to implement automatic tuning to achieve low latency performance for {product-title} applications. The cluster administrator configures a performance profile to define node-level settings such as the following: - -* Updating the kernel to kernel-rt. -* Choosing CPUs for housekeeping. -* Choosing CPUs for running workloads. - -[NOTE] -==== -Currently, disabling CPU load balancing is not supported by cgroup v2. As a result, you might not get the desired behavior from performance profiles if you have cgroup v2 enabled. Enabling cgroup v2 is not recommended if you are using performace profiles. -==== - -The Node Tuning Operator is part of a standard {product-title} installation in version 4.1 and later. - -[NOTE] -==== -In earlier versions of {product-title}, the Performance Addon Operator was used to implement automatic tuning to achieve low latency performance for OpenShift applications. In {product-title} 4.11 and later, this functionality is part of the Node Tuning Operator. -==== -endif::cluster-caps[] - -ifdef::operators[] -[discrete] -== Project - -link:https://github.com/openshift/cluster-node-tuning-operator[cluster-node-tuning-operator] -endif::operators[] diff --git a/modules/nodes-application-secrets-using.adoc b/modules/nodes-application-secrets-using.adoc deleted file mode 100644 index 58c7e03a9273..000000000000 --- a/modules/nodes-application-secrets-using.adoc +++ /dev/null @@ -1,64 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-pods-secrets.adoc - -:_content-type: PROCEDURE -[id="nodes-application-secrets-creating-using-sa_{context}"] -= Creating and using secrets - -As an administrator, you can create a service account token secret. This allows you to distribute a service account token to applications that must authenticate to the API. - -.Procedure - -. Create a service account in your namespace by running the following command: -+ -[source,terminal] ----- -$ oc create sa <service_account_name> -n <your_namespace> ----- - -. Save the following YAML example to a file named `service-account-token-secret.yaml`. The example includes a `Secret` object configuration that you can use to generate a service account token: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: <secret_name> <1> - annotations: - kubernetes.io/service-account.name: "sa-name" <2> -type: kubernetes.io/service-account-token <3> ----- -<1> Replace `<secret_name>` with the name of your service token secret. -<2> Specifies an existing service account name. If you are creating both the `ServiceAccount` and the `Secret` objects, create the `ServiceAccount` object first. -<3> Specifies a service account token secret type. - -. Generate the service account token by applying the file: -+ -[source,terminal] ----- -$ oc apply -f service-account-token-secret.yaml ----- - -. Get the service account token from the secret by running the following command: -+ -[source,terminal] ------ -$ oc get secret <sa_token_secret> -o jsonpath='{.data.token}' | base64 --decode) <1> ------ -+ -.Example output -[source,terminal] ----- -ayJhbGciOiJSUzI1NiIsImtpZCI6IklOb2dtck1qZ3hCSWpoNnh5YnZhSE9QMkk3YnRZMVZoclFfQTZfRFp1YlUifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJkZWZhdWx0Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZWNyZXQubmFtZSI6ImJ1aWxkZXItdG9rZW4tdHZrbnIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoiYnVpbGRlciIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6IjNmZGU2MGZmLTA1NGYtNDkyZi04YzhjLTNlZjE0NDk3MmFmNyIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDpkZWZhdWx0OmJ1aWxkZXIifQ.OmqFTDuMHC_lYvvEUrjr1x453hlEEHYcxS9VKSzmRkP1SiVZWPNPkTWlfNRp6bIUZD3U6aN3N7dMSN0eI5hu36xPgpKTdvuckKLTCnelMx6cxOdAbrcw1mCmOClNscwjS1KO1kzMtYnnq8rXHiMJELsNlhnRyyIXRTtNBsy4t64T3283s3SLsancyx0gy0ujx-Ch3uKAKdZi5iT-I8jnnQ-ds5THDs2h65RJhgglQEmSxpHrLGZFmyHAQI-_SjvmHZPXEc482x3SkaQHNLqpmrpJorNqh1M8ZHKzlujhZgVooMvJmWPXTb2vnvi3DGn2XI-hZxl1yD2yGH1RBpYUHA ----- -<1> Replace <sa_token_secret> with the name of your service token secret. - -. Use your service account token to authenticate with the API of your cluster: -+ -[source,terminal] ----- -$ curl -X GET <openshift_cluster_api> --header "Authorization: Bearer <token>" <1> <2> ----- -<1> Replace `<openshift_cluster_api>` with the OpenShift cluster API. -<2> Replace `<token>` with the service account token that is output in the preceding command. diff --git a/modules/nodes-cluster-enabling-features-about.adoc b/modules/nodes-cluster-enabling-features-about.adoc deleted file mode 100644 index 88c1f845e48a..000000000000 --- a/modules/nodes-cluster-enabling-features-about.adoc +++ /dev/null @@ -1,51 +0,0 @@ -// Module included in the following assemblies: -// -// nodes/clusters/nodes-cluster-enabling-features.adoc - -:_content-type: CONCEPT -[id="nodes-cluster-enabling-features-about_{context}"] -= Understanding feature gates - -You can use the `FeatureGate` custom resource (CR) to enable specific feature sets in your cluster. A feature set is a collection of {product-title} features that are not enabled by default. - -You can activate the following feature set by using the `FeatureGate` CR: - -* `TechPreviewNoUpgrade`. This feature set is a subset of the current Technology Preview features. This feature set allows you to enable these Technology Preview features on test clusters, where you can fully test them, while leaving the features disabled on production clusters. -+ -[WARNING] -==== -Enabling the `TechPreviewNoUpgrade` feature set on your cluster cannot be undone and prevents minor version updates. You should not enable this feature set on production clusters. -==== -+ -The following Technology Preview features are enabled by this feature set: -+ --- -** External cloud providers. Enables support for external cloud providers for clusters on vSphere, AWS, Azure, and GCP. Support for OpenStack is GA. This is an internal feature that most users do not need to interact with. (`ExternalCloudProvider`) -** Shared Resources CSI Driver and Build CSI Volumes in OpenShift Builds. Enables the Container Storage Interface (CSI). (`CSIDriverSharedResource`) -** CSI volumes. Enables CSI volume support for the {product-title} build system. (`BuildCSIVolumes`) -** Swap memory on nodes. Enables swap memory use for {product-title} workloads on a per-node basis. (`NodeSwap`) -** OpenStack Machine API Provider. This gate has no effect and is planned to be removed from this feature set in a future release. (`MachineAPIProviderOpenStack`) -** Insights Operator. Enables the Insights Operator, which gathers {product-title} configuration data and sends it to Red Hat. (`InsightsConfigAPI`) -** Retroactive Default Storage Class. Enables {product-title} to retroactively assign the default storage class to PVCs if there was no default storage class when the PVC was created.(`RetroactiveDefaultStorageClass`) -** Pod disruption budget (PDB) unhealthy pod eviction policy. Enables support for specifying how unhealthy pods are considered for eviction when using PDBs. (`PDBUnhealthyPodEvictionPolicy`) -** Dynamic Resource Allocation API. Enables a new API for requesting and sharing resources between pods and containers. This is an internal feature that most users do not need to interact with. (`DynamicResourceAllocation`) -** Pod security admission enforcement. Enables the restricted enforcement mode for pod security admission. Instead of only logging a warning, pods are rejected if they violate pod security standards. (`OpenShiftPodSecurityAdmission`) --- - -//// -Do not document per Derek Carr: https://github.com/openshift/api/pull/370#issuecomment-510632939 -|`CustomNoUpgrade` ^[2]^ -|Allows the enabling or disabling of any feature. Turning on this feature set on is not supported, cannot be undone, and prevents upgrades. - -[.small] --- -1. -2. If you use the `CustomNoUpgrade` feature set to disable a feature that appears in the web console, you might see that feature, but -no objects are listed. For example, if you disable builds, you can see the *Builds* tab in the web console, but there are no builds present. If you attempt to use commands associated with a disabled feature, such as `oc start-build`, {product-title} displays an error. - -[NOTE] -==== -If you disable a feature that any application in the cluster relies on, the application might not -function properly, depending upon the feature disabled and how the application uses that feature. -==== -//// diff --git a/modules/nodes-cluster-enabling-features-cli.adoc b/modules/nodes-cluster-enabling-features-cli.adoc deleted file mode 100644 index 8eddd396ca88..000000000000 --- a/modules/nodes-cluster-enabling-features-cli.adoc +++ /dev/null @@ -1,53 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/cluster/nodes-cluster-enabling-features.adoc - -:_content-type: PROCEDURE -[id="nodes-cluster-enabling-features-cli_{context}"] -= Enabling feature sets using the CLI - -You can use the OpenShift CLI (`oc`) to enable feature sets for all of the nodes in a cluster by editing the `FeatureGate` custom resource (CR). - -.Prerequisites - -* You have installed the OpenShift CLI (`oc`). - -.Procedure - -To enable feature sets: - -. Edit the `FeatureGate` CR named `cluster`: -+ -[source,terminal] ----- -$ oc edit featuregate cluster ----- -+ -[WARNING] -==== -Enabling the `TechPreviewNoUpgrade` feature set on your cluster cannot be undone and prevents minor version updates. You should not enable this feature set on production clusters. -==== - -+ -.Sample FeatureGate custom resource -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: FeatureGate -metadata: - name: cluster <1> -spec: - featureSet: TechPreviewNoUpgrade <2> ----- -+ --- -<1> The name of the `FeatureGate` CR must be `cluster`. -<2> Add the feature set that you want to enable: -* `TechPreviewNoUpgrade` enables specific Technology Preview features. --- -+ -After you save the changes, new machine configs are created, the machine config pools are updated, and scheduling on each node is disabled while the change is being applied. - -.Verification - -include::snippets/nodes-cluster-enabling-features-verification.adoc[] diff --git a/modules/nodes-cluster-enabling-features-console.adoc b/modules/nodes-cluster-enabling-features-console.adoc deleted file mode 100644 index 316ddf01d001..000000000000 --- a/modules/nodes-cluster-enabling-features-console.adoc +++ /dev/null @@ -1,54 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/clusters/nodes-cluster-enabling-features.adoc - -:_content-type: PROCEDURE -[id="nodes-cluster-enabling-features-console_{context}"] -= Enabling feature sets using the web console - -You can use the {product-title} web console to enable feature sets for all of the nodes in a cluster by editing the `FeatureGate` custom resource (CR). - -.Procedure - -To enable feature sets: - -. In the {product-title} web console, switch to the *Administration* -> *Custom Resource Definitions* page. - -. On the *Custom Resource Definitions* page, click *FeatureGate*. - -. On the *Custom Resource Definition Details* page, click the *Instances* tab. - -. Click the *cluster* feature gate, then click the *YAML* tab. - -. Edit the *cluster* instance to add specific feature sets: -+ -[WARNING] -==== -Enabling the `TechPreviewNoUpgrade` feature set on your cluster cannot be undone and prevents minor version updates. You should not enable this feature set on production clusters. -==== - -+ -.Sample Feature Gate custom resource -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: FeatureGate -metadata: - name: cluster <1> -.... - -spec: - featureSet: TechPreviewNoUpgrade <2> ----- -+ --- -<1> The name of the `FeatureGate` CR must be `cluster`. -<2> Add the feature set that you want to enable: -* `TechPreviewNoUpgrade` enables specific Technology Preview features. --- -+ -After you save the changes, new machine configs are created, the machine config pools are updated, and scheduling on each node is disabled while the change is being applied. - -.Verification - -include::snippets/nodes-cluster-enabling-features-verification.adoc[] diff --git a/modules/nodes-cluster-enabling-features-install.adoc b/modules/nodes-cluster-enabling-features-install.adoc deleted file mode 100644 index c231b806ae54..000000000000 --- a/modules/nodes-cluster-enabling-features-install.adoc +++ /dev/null @@ -1,50 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/cluster/nodes-cluster-enabling-features.adoc - -:_content-type: PROCEDURE -[id="nodes-cluster-enabling-features-install_{context}"] -= Enabling feature sets at installation - -You can enable feature sets for all nodes in the cluster by editing the `install-config.yaml` file before you deploy the cluster. - -.Prerequisites - -* You have an `install-config.yaml` file. - -.Procedure - -. Use the `featureSet` parameter to specify the name of the feature set you want to enable, such as `TechPreviewNoUpgrade`: -+ -[WARNING] -==== -Enabling the `TechPreviewNoUpgrade` feature set on your cluster cannot be undone and prevents minor version updates. You should not enable this feature set on production clusters. -==== -+ -.Sample `install-config.yaml` file with an enabled feature set - -[source,yaml] ----- -compute: -- hyperthreading: Enabled - name: worker - platform: - aws: - rootVolume: - iops: 2000 - size: 500 - type: io1 - metadataService: - authentication: Optional - type: c5.4xlarge - zones: - - us-west-2c - replicas: 3 -featureSet: TechPreviewNoUpgrade ----- - -. Save the file and reference it when using the installation program to deploy the cluster. - -.Verification - -include::snippets/nodes-cluster-enabling-features-verification.adoc[] diff --git a/modules/nodes-cluster-limit-ranges-about.adoc b/modules/nodes-cluster-limit-ranges-about.adoc deleted file mode 100644 index 83270f88e963..000000000000 --- a/modules/nodes-cluster-limit-ranges-about.adoc +++ /dev/null @@ -1,57 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/cluster/limit-ranges.adoc - -:_content-type: CONCEPT -[id="nodes-cluster-limit-ranges-about_{context}"] -= About limit ranges - -A limit range, defined by a `LimitRange` object, restricts resource -consumption in a project. In the project you can set specific resource -limits for a pod, container, image, image stream, or -persistent volume claim (PVC). - -All requests to create and modify resources are evaluated against each -`LimitRange` object in the project. If the resource violates any of the -enumerated constraints, the resource is rejected. - -ifdef::openshift-online[] -[IMPORTANT] -==== -For {product-title} Pro, the maximum pod memory is 3Gi. The minimum pod or -container memory that you can specify is 100Mi. - -==== -endif::[] - -The following shows a limit range object for all components: pod, container, -image, image stream, or PVC. You can configure limits for any or all of these -components in the same object. You create a different limit range object for -each project where you want to control resources. - -.Sample limit range object for a container - -[source,yaml] ----- -apiVersion: "v1" -kind: "LimitRange" -metadata: - name: "resource-limits" -spec: - limits: - - type: "Container" - max: - cpu: "2" - memory: "1Gi" - min: - cpu: "100m" - memory: "4Mi" - default: - cpu: "300m" - memory: "200Mi" - defaultRequest: - cpu: "200m" - memory: "100Mi" - maxLimitRequestRatio: - cpu: "10" ----- diff --git a/modules/nodes-cluster-limit-ranges-creating.adoc b/modules/nodes-cluster-limit-ranges-creating.adoc deleted file mode 100644 index 8899988e3947..000000000000 --- a/modules/nodes-cluster-limit-ranges-creating.adoc +++ /dev/null @@ -1,70 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/cluster/limit-ranges.adoc - -[id="nodes-cluster-limit-creating_{context}"] -= Creating a Limit Range - -To apply a limit range to a project: - -. Create a `LimitRange` object with your required specifications: -+ -[source,yaml] ----- -apiVersion: "v1" -kind: "LimitRange" -metadata: - name: "resource-limits" <1> -spec: - limits: - - type: "Pod" <2> - max: - cpu: "2" - memory: "1Gi" - min: - cpu: "200m" - memory: "6Mi" - - type: "Container" <3> - max: - cpu: "2" - memory: "1Gi" - min: - cpu: "100m" - memory: "4Mi" - default: <4> - cpu: "300m" - memory: "200Mi" - defaultRequest: <5> - cpu: "200m" - memory: "100Mi" - maxLimitRequestRatio: <6> - cpu: "10" - - type: openshift.io/Image <7> - max: - storage: 1Gi - - type: openshift.io/ImageStream <8> - max: - openshift.io/image-tags: 20 - openshift.io/images: 30 - - type: "PersistentVolumeClaim" <9> - min: - storage: "2Gi" - max: - storage: "50Gi" ----- -<1> Specify a name for the `LimitRange` object. -<2> To set limits for a pod, specify the minimum and maximum CPU and memory requests as needed. -<3> To set limits for a container, specify the minimum and maximum CPU and memory requests as needed. -<4> Optional. For a container, specify the default amount of CPU or memory that a container can use, if not specified in the `Pod` spec. -<5> Optional. For a container, specify the default amount of CPU or memory that a container can request, if not specified in the `Pod` spec. -<6> Optional. For a container, specify the maximum limit-to-request ratio that can be specified in the `Pod` spec. -<7> To set limits for an Image object, set the maximum size of an image that can be pushed to an {product-registry}. -<8> To set limits for an image stream, set the maximum number of image tags and references that can be in the `ImageStream` object file, as needed. -<9> To set limits for a persistent volume claim, set the minimum and maximum amount of storage that can be requested. - -. Create the object: -+ ----- -$ oc create -f <limit_range_file> -n <project> <1> ----- -<1> Specify the name of the YAML file you created and the project where you want the limits to apply. diff --git a/modules/nodes-cluster-limit-ranges-deleting.adoc b/modules/nodes-cluster-limit-ranges-deleting.adoc deleted file mode 100644 index 24b63725ae3d..000000000000 --- a/modules/nodes-cluster-limit-ranges-deleting.adoc +++ /dev/null @@ -1,15 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/cluster/limit-ranges.adoc - -[id="nodes-cluster-limit-ranges-deleting_{context}"] -= Deleting a Limit Range - - -To remove any active `LimitRange` object to no longer enforce the limits in a project: - -. Run the following command: -+ ----- -$ oc delete limits <limit_name> ----- diff --git a/modules/nodes-cluster-limit-ranges-limits.adoc b/modules/nodes-cluster-limit-ranges-limits.adoc deleted file mode 100644 index d232cbb849ea..000000000000 --- a/modules/nodes-cluster-limit-ranges-limits.adoc +++ /dev/null @@ -1,265 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/cluster/limit-ranges.adoc - -:_content-type: CONCEPT -[id="nodes-cluster-limit-ranges-limits_{context}"] -= About component limits - -The following examples show limit range parameters for each component. The -examples are broken out for clarity. You can create a single `LimitRange` object -for any or all components as necessary. - -[id="nodes-cluster-limit-container-limits"] -== Container limits - -A limit range allows you to specify the minimum and maximum CPU and memory that each container -in a pod can request for a specific project. If a container is created in the project, -the container CPU and memory requests in the `Pod` spec must comply with the values set in the -`LimitRange` object. If not, the pod does not get created. - -* The container CPU or memory request and limit must be greater than or equal to the -`min` resource constraint for containers that are specified in the `LimitRange` object. - -* The container CPU or memory request and limit must be less than or equal to the -`max` resource constraint for containers that are specified in the `LimitRange` object. -+ -If the `LimitRange` object defines a `max` CPU, you do not need to define a CPU -`request` value in the `Pod` spec. But you must specify a CPU `limit` value that -satisfies the maximum CPU constraint specified in the limit range. - -* The ratio of the container limits to requests must be -less than or equal to the `maxLimitRequestRatio` value for containers that -is specified in the `LimitRange` object. -+ -If the `LimitRange` object defines a `maxLimitRequestRatio` constraint, any new -containers must have both a `request` and a `limit` value. {product-title} -calculates the limit-to-request ratio by dividing the `limit` by the -`request`. This value should be a non-negative integer greater than 1. -+ -For example, if a container has `cpu: 500` in the `limit` value, and -`cpu: 100` in the `request` value, the limit-to-request ratio for `cpu` is -`5`. This ratio must be less than or equal to the `maxLimitRequestRatio`. - -If the `Pod` spec does not specify a container resource memory or limit, -the `default` or `defaultRequest` CPU and memory values for containers -specified in the limit range object are assigned to the container. - -.Container `LimitRange` object definition - -[source,yaml] ----- -apiVersion: "v1" -kind: "LimitRange" -metadata: - name: "resource-limits" <1> -spec: - limits: - - type: "Container" - max: - cpu: "2" <2> - memory: "1Gi" <3> - min: - cpu: "100m" <4> - memory: "4Mi" <5> - default: - cpu: "300m" <6> - memory: "200Mi" <7> - defaultRequest: - cpu: "200m" <8> - memory: "100Mi" <9> - maxLimitRequestRatio: - cpu: "10" <10> ----- -<1> The name of the LimitRange object. -<2> The maximum amount of CPU that a single container in a pod can request. -<3> The maximum amount of memory that a single container in a pod can request. -<4> The minimum amount of CPU that a single container in a pod can request. -<5> The minimum amount of memory that a single container in a pod can request. -<6> The default amount of CPU that a container can use if not specified in the `Pod` spec. -<7> The default amount of memory that a container can use if not specified in the `Pod` spec. -<8> The default amount of CPU that a container can request if not specified in the `Pod` spec. -<9> The default amount of memory that a container can request if not specified in the `Pod` spec. -<10> The maximum limit-to-request ratio for a container. - - -[id="nodes-cluster-limit-pod-limits"] -== Pod limits - -A limit range allows you to specify the minimum and maximum CPU and memory limits for all containers -across a pod in a given project. To create a container in the project, the container CPU and memory -requests in the `Pod` spec must comply with the values set in the `LimitRange` object. If not, -the pod does not get created. - -If the `Pod` spec does not specify a container resource memory or limit, -the `default` or `defaultRequest` CPU and memory values for containers -specified in the limit range object are assigned to the container. - -Across all containers in a pod, the following must hold true: - -* The container CPU or memory request and limit must be greater than or equal to the -`min` resource constraints for pods that are specified in the `LimitRange` object. - -* The container CPU or memory request and limit must be less than or equal to the -`max` resource constraints for pods that are specified in the `LimitRange` object. - -* The ratio of the container limits to requests must be less than or equal to -the `maxLimitRequestRatio` constraint specified in the `LimitRange` object. - -.Pod `LimitRange` object definition - -[source,yaml] ----- -apiVersion: "v1" -kind: "LimitRange" -metadata: - name: "resource-limits" <1> -spec: - limits: - - type: "Pod" - max: - cpu: "2" <2> - memory: "1Gi" <3> - min: - cpu: "200m" <4> - memory: "6Mi" <5> - maxLimitRequestRatio: - cpu: "10" <6> ----- -<1> The name of the limit range object. -<2> The maximum amount of CPU that a pod can request across all containers. -<3> The maximum amount of memory that a pod can request across all containers. -<4> The minimum amount of CPU that a pod can request across all containers. -<5> The minimum amount of memory that a pod can request across all containers. -<6> The maximum limit-to-request ratio for a container. - -[id="nodes-cluster-limit-image-limits"] -== Image limits - -A `LimitRange` object allows you to specify the maximum size of an image -that can be pushed to an {product-registry}. - -When pushing images to an {product-registry}, the following must hold true: - -* The size of the image must be less than or equal to the `max` size for -images that is specified in the `LimitRange` object. - -.Image `LimitRange` object definition - -[source,yaml] ----- -apiVersion: "v1" -kind: "LimitRange" -metadata: - name: "resource-limits" <1> -spec: - limits: - - type: openshift.io/Image - max: - storage: 1Gi <2> ----- -<1> The name of the `LimitRange` object. -<2> The maximum size of an image that can be pushed to an {product-registry}. - -ifdef::openshift-enterprise,openshift-origin[] -[NOTE] -==== -To prevent blobs that exceed the limit from being uploaded to the registry, the -registry must be configured to enforce quotas. -==== -endif::[] - -[WARNING] -==== -The image size is not always available in the manifest of an uploaded image. -This is especially the case for images built with Docker 1.10 or higher and -pushed to a v2 registry. If such an image is pulled with an older Docker daemon, -the image manifest is converted by the registry to schema v1 lacking all -the size information. No storage limit set on images prevent it from being -uploaded. - -link:https://github.com/openshift/origin/issues/7706[The issue] is being -addressed. -==== - -[id="nodes-cluster-limit-stream-limits"] -== Image stream limits - -A `LimitRange` object allows you to specify limits for image streams. - -For each image stream, the following must hold true: - -* The number of image tags in an `ImageStream` specification must be less -than or equal to the `openshift.io/image-tags` constraint in the `LimitRange` object. - -* The number of unique references to images in an `ImageStream` specification -must be less than or equal to the `openshift.io/images` constraint in the limit -range object. - -.Imagestream `LimitRange` object definition - -[source,yaml] ----- -apiVersion: "v1" -kind: "LimitRange" -metadata: - name: "resource-limits" <1> -spec: - limits: - - type: openshift.io/ImageStream - max: - openshift.io/image-tags: 20 <2> - openshift.io/images: 30 <3> ----- -<1> The name of the `LimitRange` object. -<2> The maximum number of unique image tags in the `imagestream.spec.tags` -parameter in imagestream spec. -<3> The maximum number of unique image references in the `imagestream.status.tags` -parameter in the `imagestream` spec. - -The `openshift.io/image-tags` resource represents unique image -references. Possible references are an `*ImageStreamTag*`, an -`*ImageStreamImage*` and a `*DockerImage*`. Tags can be created using -the `oc tag` and `oc import-image` commands. No distinction -is made between internal and external references. However, each unique reference -tagged in an `ImageStream` specification is counted just once. It does not -restrict pushes to an internal container image registry in any way, but is useful for tag -restriction. - -The `openshift.io/images` resource represents unique image names recorded in -image stream status. It allows for restriction of a number of images that can be -pushed to the {product-registry}. Internal and external references are not -distinguished. - -[id="nodes-cluster-limit-pvc-limits"] -== Persistent volume claim limits - -A `LimitRange` object allows you to restrict the storage requested in a persistent volume claim (PVC). - -Across all persistent volume claims in a project, the following must hold true: - -* The resource request in a persistent volume claim (PVC) must be greater than or equal -the `min` constraint for PVCs that is specified in the `LimitRange` object. - -* The resource request in a persistent volume claim (PVC) must be less than or equal -the `max` constraint for PVCs that is specified in the `LimitRange` object. - -.PVC `LimitRange` object definition - -[source,yaml] ----- -apiVersion: "v1" -kind: "LimitRange" -metadata: - name: "resource-limits" <1> -spec: - limits: - - type: "PersistentVolumeClaim" - min: - storage: "2Gi" <2> - max: - storage: "50Gi" <3> ----- -<1> The name of the `LimitRange` object. -<2> The minimum amount of storage that can be requested in a persistent volume claim. -<3> The maximum amount of storage that can be requested in a persistent volume claim. diff --git a/modules/nodes-cluster-limit-ranges-viewing.adoc b/modules/nodes-cluster-limit-ranges-viewing.adoc deleted file mode 100644 index 550e5b321b88..000000000000 --- a/modules/nodes-cluster-limit-ranges-viewing.adoc +++ /dev/null @@ -1,46 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/cluster/limit-ranges.adoc - -[id="nodes-cluster-limit-viewing_{context}"] -= Viewing a limit - -You can view any limits defined in a project by navigating in the web -console to the project's *Quota* page. - -You can also use the CLI to view limit range details: - -. Get the list of `LimitRange` object defined in the project. For example, for a -project called *demoproject*: -+ ----- -$ oc get limits -n demoproject ----- -+ ----- -NAME CREATED AT -resource-limits 2020-07-15T17:14:23Z ----- - -. Describe the `LimitRange` object you are interested in, for example the -`resource-limits` limit range: -+ ----- -$ oc describe limits resource-limits -n demoproject ----- -+ ----- -Name: resource-limits -Namespace: demoproject -Type Resource Min Max Default Request Default Limit Max Limit/Request Ratio ----- -------- --- --- --------------- ------------- ----------------------- -Pod cpu 200m 2 - - - -Pod memory 6Mi 1Gi - - - -Container cpu 100m 2 200m 300m 10 -Container memory 4Mi 1Gi 100Mi 200Mi - -openshift.io/Image storage - 1Gi - - - -openshift.io/ImageStream openshift.io/image - 12 - - - -openshift.io/ImageStream openshift.io/image-tags - 10 - - - -PersistentVolumeClaim storage - 50Gi - - - ----- - diff --git a/modules/nodes-cluster-node-overcommit.adoc b/modules/nodes-cluster-node-overcommit.adoc deleted file mode 100644 index 4c09d4c871a1..000000000000 --- a/modules/nodes-cluster-node-overcommit.adoc +++ /dev/null @@ -1,11 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/clusters/nodes-cluster-overcommit.adoc -// * post_installation_configuration/node-tasks.adoc - -[id="nodes-cluster-node-overcommit_{context}"] -= Node-level overcommit - -You can use various ways to control overcommit on specific nodes, such as quality of service (QOS) -guarantees, CPU limits, or reserve resources. You can also disable overcommit for specific nodes -and specific projects. diff --git a/modules/nodes-cluster-overcommit-about.adoc b/modules/nodes-cluster-overcommit-about.adoc deleted file mode 100644 index fe23c4e45218..000000000000 --- a/modules/nodes-cluster-overcommit-about.adoc +++ /dev/null @@ -1,20 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-cluster-overcommit.adoc - -:_content-type: CONCEPT -[id="nodes-cluster-overcommit-about_{context}"] -= Understanding overcommitment - -Requests and limits enable administrators to allow and manage the overcommitment of resources on a node. The scheduler uses requests for scheduling your container and providing a minimum service guarantee. Limits constrain the amount of compute resource that may be consumed on your node. - -{product-title} administrators can control the level of overcommit and manage container density on nodes by configuring masters to override the ratio between request and limit set on developer containers. In conjunction with a per-project `LimitRange` object specifying limits and defaults, this adjusts the container limit and request to achieve the desired level of overcommit. - -[NOTE] -==== -That these overrides have no effect if no limits have been set on containers. Create a `LimitRange` object with default limits, per individual project, or in the project template, to ensure that the overrides apply. -==== - -After these overrides, the container limits and requests must still be validated by any `LimitRange` object in the project. It is possible, for example, for developers to specify a limit close to the minimum limit, and have the request then be overridden below the minimum limit, causing the pod to be forbidden. This unfortunate user experience should be addressed with future work, but for now, configure this capability and `LimitRange` objects with caution. - - diff --git a/modules/nodes-cluster-overcommit-buffer-chunk.adoc b/modules/nodes-cluster-overcommit-buffer-chunk.adoc deleted file mode 100644 index ed6a6cc0204a..000000000000 --- a/modules/nodes-cluster-overcommit-buffer-chunk.adoc +++ /dev/null @@ -1,69 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-cluster-overcommit.adoc -// * cluster-logging-collector.adoc - - -[id="understandin-fluentd-buffering_{context}"] -= Understanding Buffer Chunk Limiting for Fluentd - -If the Fluentd logger is unable to keep up with a high number of logs, it will need -to switch to file buffering to reduce memory usage and prevent data loss. - -Fluentd file buffering stores records in _chunks_. Chunks are stored in _buffers_. - -[NOTE] -==== -To modify the `FILE_BUFFER_LIMIT` or `BUFFER_SIZE_LIMIT` parameters -in the Fluentd daemonset as described below, you must set OpenShift Logging to the unmanaged state. -Operators in an unmanaged state are unsupported and the cluster administrator assumes full control of the individual component configurations and upgrades. -==== - -The Fluentd `buffer_chunk_limit` is determined by the environment variable -`BUFFER_SIZE_LIMIT`, which has the default value `8m`. The file buffer size per -output is determined by the environment variable `FILE_BUFFER_LIMIT`, which has -the default value `256Mi`. The permanent volume size must be larger than -`FILE_BUFFER_LIMIT` multiplied by the output. - -On the Fluentd pods, permanent volume */var/lib/fluentd* should be -prepared by the PVC or hostmount, for example. That area is then used for the -file buffers. - -The `buffer_type` and `buffer_path` are configured in the Fluentd configuration files as -follows: - -[source,terminal] ----- -$ egrep "buffer_type|buffer_path" *.conf ----- - -.Example output -[source,text] ----- -output-es-config.conf: - buffer_type file - buffer_path `/var/lib/fluentd/buffer-output-es-config` -output-es-ops-config.conf: - buffer_type file - buffer_path `/var/lib/fluentd/buffer-output-es-ops-config` ----- - -The Fluentd `buffer_queue_limit` is the value of the variable `BUFFER_QUEUE_LIMIT`. This value is `32` by default. - -The environment variable `BUFFER_QUEUE_LIMIT` is calculated as `(FILE_BUFFER_LIMIT / (number_of_outputs * BUFFER_SIZE_LIMIT))`. - -If the `BUFFER_QUEUE_LIMIT` variable has the default set of values: - -* `FILE_BUFFER_LIMIT = 256Mi` -* `number_of_outputs = 1` -* `BUFFER_SIZE_LIMIT = 8Mi` - -The value of `buffer_queue_limit` will be `32`. To change the `buffer_queue_limit`, you must change the value of `FILE_BUFFER_LIMIT`. - -In this formula, `number_of_outputs` is `1` if all the logs are sent to a single resource, and it is incremented by `1` for each additional resource. For example, the value of `number_of_outputs` is: - - * `1` - if all logs are sent to a single Elasticsearch pod - * `2` - if application logs are sent to an Elasticsearch pod and ops logs are sent to -another Elasticsearch pod - * `4` - if application logs are sent to an Elasticsearch pod, ops logs are sent to -another Elasticsearch pod, and both of them are forwarded to other Fluentd instances diff --git a/modules/nodes-cluster-overcommit-configure-nodes.adoc b/modules/nodes-cluster-overcommit-configure-nodes.adoc deleted file mode 100644 index 6e32a4e41fc6..000000000000 --- a/modules/nodes-cluster-overcommit-configure-nodes.adoc +++ /dev/null @@ -1,61 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-cluster-overcommit.adoc -// * post_installation_configuration/node-tasks.adoc - -:_content-type: CONCEPT -[id="nodes-cluster-overcommit-configure-nodes_{context}"] -= Understanding nodes overcommitment - -In an overcommitted environment, it is important to properly configure your node to provide best system behavior. - -When the node starts, it ensures that the kernel tunable flags for memory -management are set properly. The kernel should never fail memory allocations -unless it runs out of physical memory. - -To ensure this behavior, {product-title} configures the kernel to always overcommit -memory by setting the `vm.overcommit_memory` parameter to `1`, overriding the -default operating system setting. - -{product-title} also configures the kernel not to panic when it runs out of memory -by setting the `vm.panic_on_oom` parameter to `0`. A setting of 0 instructs the -kernel to call oom_killer in an Out of Memory (OOM) condition, which kills -processes based on priority - -You can view the current setting by running the following commands on your nodes: - -[source,terminal] ----- -$ sysctl -a |grep commit ----- - -.Example output -[source,terminal] ----- -vm.overcommit_memory = 1 ----- - -[source,terminal] ----- -$ sysctl -a |grep panic ----- - -.Example output -[source,terminal] ----- -vm.panic_on_oom = 0 ----- - -[NOTE] -==== -The above flags should already be set on nodes, and no further action is -required. -==== - -You can also perform the following configurations for each node: - -* Disable or enforce CPU limits using CPU CFS quotas - -* Reserve resources for system processes - -* Reserve memory across quality of service tiers diff --git a/modules/nodes-cluster-overcommit-node-disable.adoc b/modules/nodes-cluster-overcommit-node-disable.adoc deleted file mode 100644 index 5008a034fccd..000000000000 --- a/modules/nodes-cluster-overcommit-node-disable.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-cluster-overcommit.adoc -// * post_installation_configuration/node-tasks.adoc - -:_content-type: PROCEDURE -[id="nodes-cluster-overcommit-node-disable_{context}"] -= Disabling overcommitment for a node - -When enabled, overcommitment can be disabled on each node. - -.Procedure - -To disable overcommitment in a node run the following command on that node: - -[source,terminal] ----- -$ sysctl -w vm.overcommit_memory=0 ----- diff --git a/modules/nodes-cluster-overcommit-node-enforcing.adoc b/modules/nodes-cluster-overcommit-node-enforcing.adoc deleted file mode 100644 index 36eef517fc6e..000000000000 --- a/modules/nodes-cluster-overcommit-node-enforcing.adoc +++ /dev/null @@ -1,86 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-cluster-overcommit.adoc -// * post_installation_configuration/node-tasks.adoc - -:_content-type: PROCEDURE -[id="nodes-cluster-overcommit-node-enforcing_{context}"] - -= Disabling or enforcing CPU limits using CPU CFS quotas - -Nodes by default enforce specified CPU limits using the Completely Fair Scheduler (CFS) quota support in the Linux kernel. - -If you disable CPU limit enforcement, it is important to understand the impact on your node: - -* If a container has a CPU request, the request continues to be enforced by CFS shares in the Linux kernel. -* If a container does not have a CPU request, but does have a CPU limit, the CPU request defaults to the specified CPU limit, and is enforced by CFS shares in the Linux kernel. -* If a container has both a CPU request and limit, the CPU request is enforced by CFS shares in the Linux kernel, and the CPU limit has no impact on the node. - -.Prerequisites - -. Obtain the label associated with the static `MachineConfigPool` CRD for the type of node you want to configure by entering the following command: -+ -[source,terminal] ----- -$ oc edit machineconfigpool <name> ----- -+ -For example: -+ -[source,terminal] ----- -$ oc edit machineconfigpool worker ----- -+ -.Example output -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfigPool -metadata: - creationTimestamp: "2022-11-16T15:34:25Z" - generation: 4 - labels: - pools.operator.machineconfiguration.openshift.io/worker: "" <1> - name: worker ----- -<1> The label appears under Labels. -+ -[TIP] -==== -If the label is not present, add a key/value pair such as: - ----- -$ oc label machineconfigpool worker custom-kubelet=small-pods ----- -==== - -.Procedure - -. Create a custom resource (CR) for your configuration change. -+ -.Sample configuration for a disabling CPU limits -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: KubeletConfig -metadata: - name: disable-cpu-units <1> -spec: - machineConfigPoolSelector: - matchLabels: - pools.operator.machineconfiguration.openshift.io/worker: "" <2> - kubeletConfig: - cpuCfsQuota: <3> - - "true" ----- -<1> Assign a name to CR. -<2> Specify the label from the machine config pool. -<3> Set the `cpuCfsQuota` parameter to `true`. - -. Run the following command to create the CR: -+ -[source,terminal] ----- -$ oc create -f <file_name>.yaml ----- diff --git a/modules/nodes-cluster-overcommit-node-resources.adoc b/modules/nodes-cluster-overcommit-node-resources.adoc deleted file mode 100644 index 2ad225ce529f..000000000000 --- a/modules/nodes-cluster-overcommit-node-resources.adoc +++ /dev/null @@ -1,20 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-cluster-overcommit.adoc -// * post_installation_configuration/node-tasks.adoc - -:_content-type: PROCEDURE -[id="nodes-cluster-overcommit-node-resources_{context}"] - -= Reserving resources for system processes - -To provide more reliable scheduling and minimize node resource overcommitment, -each node can reserve a portion of its resources for use by system daemons -that are required to run on your node for your cluster to function. -In particular, it is recommended that you reserve resources for incompressible resources such as memory. - -.Procedure - -To explicitly reserve resources for non-pod processes, allocate node resources by specifying resources -available for scheduling. -For more details, see Allocating Resources for Nodes. diff --git a/modules/nodes-cluster-overcommit-project-disable.adoc b/modules/nodes-cluster-overcommit-project-disable.adoc deleted file mode 100644 index c6ed79327f1b..000000000000 --- a/modules/nodes-cluster-overcommit-project-disable.adoc +++ /dev/null @@ -1,31 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-cluster-overcommit.adoc -// * post_installation_configuration/node-tasks.adoc - -:_content-type: PROCEDURE -[id="nodes-cluster-overcommit-project-disable_{context}"] -= Disabling overcommitment for a project - -When enabled, overcommitment can be disabled per-project. -For example, you can allow infrastructure components to be configured independently of overcommitment. - -.Procedure - -To disable overcommitment in a project: - -. Edit the project object file - -. Add the following annotation: -+ -[source,yaml] ----- -quota.openshift.io/cluster-resource-override-enabled: "false" ----- - -. Create the project object: -+ -[source,terminal] ----- -$ oc create -f <file-name>.yaml ----- diff --git a/modules/nodes-cluster-overcommit-qos-about.adoc b/modules/nodes-cluster-overcommit-qos-about.adoc deleted file mode 100644 index 2ad8cb3fe5b9..000000000000 --- a/modules/nodes-cluster-overcommit-qos-about.adoc +++ /dev/null @@ -1,75 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-cluster-overcommit.adoc -// * post_installation_configuration/node-tasks.adoc - -:_content-type: CONCEPT -[id="nodes-cluster-overcommit-qos-about_{context}"] -= Understanding overcomitment and quality of service classes - -A node is _overcommitted_ when it has a pod scheduled that makes no request, or -when the sum of limits across all pods on that node exceeds available machine -capacity. - -In an overcommitted environment, it is possible that the pods on the node will -attempt to use more compute resource than is available at any given point in -time. When this occurs, the node must give priority to one pod over another. The -facility used to make this decision is referred to as a Quality of Service (QoS) -Class. - -A pod is designated as one of three QoS classes with decreasing order of priority: - -.Quality of Service Classes -[options="header",cols="1,1,5"] -|=== -|Priority |Class Name |Description - -|1 (highest) -|*Guaranteed* -|If limits and optionally requests are set (not equal to 0) for all resources -and they are equal, then the pod is classified as *Guaranteed*. - -|2 -|*Burstable* -|If requests and optionally limits are set (not equal to 0) for all resources, -and they are not equal, then the pod is classified as *Burstable*. - -|3 (lowest) -|*BestEffort* -|If requests and limits are not set for any of the resources, then the pod -is classified as *BestEffort*. -|=== - -Memory is an incompressible resource, so in low memory situations, containers -that have the lowest priority are terminated first: - -- *Guaranteed* containers are considered top priority, and are guaranteed to -only be terminated if they exceed their limits, or if the system is under memory -pressure and there are no lower priority containers that can be evicted. -- *Burstable* containers under system memory pressure are more likely to be -terminated once they exceed their requests and no other *BestEffort* containers -exist. -- *BestEffort* containers are treated with the lowest priority. Processes in -these containers are first to be terminated if the system runs out of memory. - -[id="qos-about-reserve_{context}"] -== Understanding how to reserve memory across quality of service tiers - -You can use the `qos-reserved` parameter to specify a percentage of memory to be reserved -by a pod in a particular QoS level. This feature attempts to reserve requested resources to exclude pods -from lower OoS classes from using resources requested by pods in higher QoS classes. - -{product-title} uses the `qos-reserved` parameter as follows: - -- A value of `qos-reserved=memory=100%` will prevent the `Burstable` and `BestEffort` QoS classes from consuming memory -that was requested by a higher QoS class. This increases the risk of inducing OOM -on `BestEffort` and `Burstable` workloads in favor of increasing memory resource guarantees -for `Guaranteed` and `Burstable` workloads. - -- A value of `qos-reserved=memory=50%` will allow the `Burstable` and `BestEffort` QoS classes -to consume half of the memory requested by a higher QoS class. - -- A value of `qos-reserved=memory=0%` -will allow a `Burstable` and `BestEffort` QoS classes to consume up to the full node -allocatable amount if available, but increases the risk that a `Guaranteed` workload -will not have access to requested memory. This condition effectively disables this feature. diff --git a/modules/nodes-cluster-overcommit-resource-requests.adoc b/modules/nodes-cluster-overcommit-resource-requests.adoc deleted file mode 100644 index 13269625505c..000000000000 --- a/modules/nodes-cluster-overcommit-resource-requests.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-cluster-overcommit.adoc -// * post_installation_configuration/node-tasks.adoc - -[id="nodes-cluster-overcommit-resource-requests_{context}"] -= Resource requests and overcommitment - -For each compute resource, a container may specify a resource request and limit. -Scheduling decisions are made based on the request to ensure that a node has -enough capacity available to meet the requested value. If a container specifies -limits, but omits requests, the requests are defaulted to the limits. A -container is not able to exceed the specified limit on the node. - -The enforcement of limits is dependent upon the compute resource type. If a -container makes no request or limit, the container is scheduled to a node with -no resource guarantees. In practice, the container is able to consume as much of -the specified resource as is available with the lowest local priority. In low -resource situations, containers that specify no resource requests are given the -lowest quality of service. - -Scheduling is based on resources requested, while quota and hard limits refer -to resource limits, which can be set higher than requested resources. The -difference between request and limit determines the level of overcommit; -for instance, if a container is given a memory request of 1Gi and a memory limit -of 2Gi, it is scheduled based on the 1Gi request being available on the node, -but could use up to 2Gi; so it is 200% overcommitted. diff --git a/modules/nodes-cluster-overcommit-resources-containers.adoc b/modules/nodes-cluster-overcommit-resources-containers.adoc deleted file mode 100644 index 63f5143808d2..000000000000 --- a/modules/nodes-cluster-overcommit-resources-containers.adoc +++ /dev/null @@ -1,59 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-cluster-overcommit.adoc -// * post_installation_configuration/node-tasks.adoc - -:_content-type: CONCEPT -[id="nodes-cluster-overcommit-reserving-memory_{context}"] -= Understanding compute resources and containers -The node-enforced behavior for compute resources is specific to the resource -type. - -[id="understanding-container-CPU-requests_{context}"] -== Understanding container CPU requests - -A container is guaranteed the amount of CPU it requests and is additionally able -to consume excess CPU available on the node, up to any limit specified by the -container. If multiple containers are attempting to use excess CPU, CPU time is -distributed based on the amount of CPU requested by each container. - -For example, if one container requested 500m of CPU time and another container -requested 250m of CPU time, then any extra CPU time available on the node is -distributed among the containers in a 2:1 ratio. If a container specified a -limit, it will be throttled not to use more CPU than the specified limit. -CPU requests are enforced using the CFS shares support in the Linux kernel. By -default, CPU limits are enforced using the CFS quota support in the Linux kernel -over a 100ms measuring interval, though this can be disabled. - -[id="understanding-memory-requests-container_{context}"] -== Understanding container memory requests - -A container is guaranteed the amount of memory it requests. A container can use -more memory than requested, but once it exceeds its requested amount, it could -be terminated in a low memory situation on the node. -If a container uses less memory than requested, it will not be terminated unless -system tasks or daemons need more memory than was accounted for in the node's -resource reservation. If a container specifies a limit on memory, it is -immediately terminated if it exceeds the limit amount. - -//// -Not in 4.1 -[id="containers-ephemeral_{context}"] -== Understanding containers and ephemeral storage - -[NOTE] -==== -The {product-title} cluster uses ephemeral storage to store information that does not have to persist after the cluster is destroyed. -==== - -A container is guaranteed the amount of ephemeral storage it requests. A -container can use more ephemeral storage than requested, but once it exceeds its -requested amount, it can be terminated if the available ephemeral disk space gets -too low. - -If a container uses less ephemeral storage than requested, it will not be -terminated unless system tasks or daemons need more local ephemeral storage than -was accounted for in the node's resource reservation. If a container specifies a -limit on ephemeral storage, it is immediately terminated if it exceeds the limit -amount. -//// diff --git a/modules/nodes-cluster-project-overcommit.adoc b/modules/nodes-cluster-project-overcommit.adoc deleted file mode 100644 index 62649200b576..000000000000 --- a/modules/nodes-cluster-project-overcommit.adoc +++ /dev/null @@ -1,15 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/clusters/nodes-cluster-overcommit.adoc -// * post_installation_configuration/node-tasks.adoc - -[id="nodes-cluster-project-overcommit_{context}"] -= Project-level limits - -To help control overcommit, you can set per-project resource limit ranges, -specifying memory and CPU limits and defaults for a project that overcommit -cannot exceed. - -For information on project-level resource limits, see Additional resources. - -Alternatively, you can disable overcommitment for specific projects. diff --git a/modules/nodes-cluster-resource-configure-about.adoc b/modules/nodes-cluster-resource-configure-about.adoc deleted file mode 100644 index 4967ccf670ae..000000000000 --- a/modules/nodes-cluster-resource-configure-about.adoc +++ /dev/null @@ -1,97 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-cluster-resource-configure.adoc - -:_content-type: CONCEPT -[id="nodes-cluster-resource-configure-about_{context}"] -= Understanding managing application memory - -It is recommended to fully read the overview of how {product-title} manages -Compute Resources before proceeding. - -For each kind of resource (memory, CPU, storage), {product-title} allows -optional *request* and *limit* values to be placed on each container in a -pod. - -Note the following about memory requests and memory limits: - -* *Memory request* - - - The memory request value, if specified, influences the {product-title} - scheduler. The scheduler considers the memory request when scheduling a - container to a node, then fences off the requested memory on the chosen node - for the use of the container. - - - If a node's memory is exhausted, {product-title} prioritizes evicting its - containers whose memory usage most exceeds their memory request. In serious - cases of memory exhaustion, the node OOM killer may select and kill a - process in a container based on a similar metric. - - - The cluster administrator can assign quota or assign default values for the memory request value. - - - The cluster administrator can override the memory request values that a developer specifies, to manage cluster overcommit. - -* *Memory limit* - - - The memory limit value, if specified, provides a hard limit on the memory - that can be allocated across all the processes in a container. - - - If the memory allocated by all of the processes in a container exceeds the - memory limit, the node Out of Memory (OOM) killer will immediately select and kill a - process in the container. - - - If both memory request and limit are specified, the memory limit value must - be greater than or equal to the memory request. - - - The cluster administrator can assign quota or assign default values for the memory limit value. - - - The minimum memory limit is 12 MB. If a container fails to start due to a `Cannot allocate memory` pod event, the memory limit is too low. -Either increase or remove the memory limit. Removing the limit allows pods to consume unbounded node resources. - -[id="nodes-cluster-resource-configure-about-memory_{context}"] -== Managing application memory strategy - -The steps for sizing application memory on {product-title} are as follows: - -. *Determine expected container memory usage* -+ -Determine expected mean and peak container memory usage, empirically if -necessary (for example, by separate load testing). Remember to consider all the -processes that may potentially run in parallel in the container: for example, -does the main application spawn any ancillary scripts? - -. *Determine risk appetite* -+ -Determine risk appetite for eviction. If the risk appetite is low, the -container should request memory according to the expected peak usage plus a -percentage safety margin. If the risk appetite is higher, it may be more -appropriate to request memory according to the expected mean usage. - -. *Set container memory request* -+ -Set container memory request based on the above. The more accurately the -request represents the application memory usage, the better. If the request is -too high, cluster and quota usage will be inefficient. If the request is too -low, the chances of application eviction increase. - -. *Set container memory limit, if required* -+ -Set container memory limit, if required. Setting a limit has the effect of -immediately killing a container process if the combined memory usage of all -processes in the container exceeds the limit, and is therefore a mixed blessing. -On the one hand, it may make unanticipated excess memory usage obvious early -("fail fast"); on the other hand it also terminates processes abruptly. -+ -Note that some {product-title} clusters may require a limit value to be set; -some may override the request based on the limit; and some application images -rely on a limit value being set as this is easier to detect than a request -value. -+ -If the memory limit is set, it should not be set to less than the expected peak -container memory usage plus a percentage safety margin. - -. *Ensure application is tuned* -+ -Ensure application is tuned with respect to configured request and limit values, -if appropriate. This step is particularly relevant to applications which pool -memory, such as the JVM. The rest of this page discusses this. diff --git a/modules/nodes-cluster-resource-configure-evicted.adoc b/modules/nodes-cluster-resource-configure-evicted.adoc deleted file mode 100644 index 54771fbab4d2..000000000000 --- a/modules/nodes-cluster-resource-configure-evicted.adoc +++ /dev/null @@ -1,46 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-cluster-resource-configure.adoc - -:_content-type: CONCEPT -[id="nodes-cluster-resource-configure-evicted_{context}"] -= Understanding pod eviction - -{product-title} may evict a pod from its node when the node's memory is -exhausted. Depending on the extent of memory exhaustion, the eviction may or -may not be graceful. Graceful eviction implies the main process (PID 1) of each -container receiving a SIGTERM signal, then some time later a SIGKILL signal if -the process has not exited already. Non-graceful eviction implies the main -process of each container immediately receiving a SIGKILL signal. - -An evicted pod has phase *Failed* and reason *Evicted*. It will not be -restarted, regardless of the value of `restartPolicy`. However, controllers -such as the replication controller will notice the pod's failed status and create -a new pod to replace the old one. - -[source,terminal] ----- -$ oc get pod test ----- - -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -test 0/1 Evicted 0 1m ----- - -[source,terminal] ----- -$ oc get pod test -o yaml ----- - -.Example output -[source,terminal] ----- -... -status: - message: 'Pod The node was low on resource: [MemoryPressure].' - phase: Failed - reason: Evicted ----- diff --git a/modules/nodes-cluster-resource-configure-jdk.adoc b/modules/nodes-cluster-resource-configure-jdk.adoc deleted file mode 100644 index 40b55ff1e2a8..000000000000 --- a/modules/nodes-cluster-resource-configure-jdk.adoc +++ /dev/null @@ -1,116 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-cluster-resource-configure.adoc - -:_content-type: CONCEPT -[id="nodes-cluster-resource-configure-jdk_{context}"] -= Understanding OpenJDK settings for {product-title} - -The default OpenJDK settings do not work well with containerized -environments. As a result, some additional Java memory -settings must always be provided whenever running the OpenJDK in a container. - -The JVM memory layout is complex, version dependent, and describing it in detail -is beyond the scope of this documentation. However, as a starting point for -running OpenJDK in a container, at least the following three memory-related -tasks are key: - -. Overriding the JVM maximum heap size. - -. Encouraging the JVM to release unused memory to the operating system, if - appropriate. - -. Ensuring all JVM processes within a container are appropriately configured. - -Optimally tuning JVM workloads for running in a container is beyond the scope of -this documentation, and may involve setting multiple additional JVM options. - -[id="nodes-cluster-resource-configure-jdk-heap_{context}"] -== Understanding how to override the JVM maximum heap size - -For many Java workloads, the JVM heap is the largest single consumer of memory. -Currently, the OpenJDK defaults to allowing up to 1/4 (1/`-XX:MaxRAMFraction`) -of the compute node's memory to be used for the heap, regardless of whether the -OpenJDK is running in a container or not. It is therefore *essential* to -override this behavior, especially if a container memory limit is also set. - -There are at least two ways the above can be achieved: - -. If the container memory limit is set and the experimental options are - supported by the JVM, set `-XX:+UnlockExperimentalVMOptions - -XX:+UseCGroupMemoryLimitForHeap`. -+ -[NOTE] -==== -The `UseCGroupMemoryLimitForHeap` option has been removed in JDK 11. Use `-XX:+UseContainerSupport` instead. -==== -+ -This sets `-XX:MaxRAM` to the container memory limit, and the maximum heap size -(`-XX:MaxHeapSize` / `-Xmx`) to 1/`-XX:MaxRAMFraction` (1/4 by default). - -. Directly override one of `-XX:MaxRAM`, `-XX:MaxHeapSize` or `-Xmx`. -+ -This option involves hard-coding a value, but has the advantage of allowing a -safety margin to be calculated. - -[id="nodes-cluster-resource-configure-jdk-unused_{context}"] -== Understanding how to encourage the JVM to release unused memory to the operating system - -By default, the OpenJDK does not aggressively return unused memory to the -operating system. This may be appropriate for many containerized Java -workloads, but notable exceptions include workloads where additional active -processes co-exist with a JVM within a container, whether those additional -processes are native, additional JVMs, or a combination of the two. - -Java-based agents can use the following JVM arguments to encourage the JVM -to release unused memory to the operating system: - -[source,terminal] ----- --XX:+UseParallelGC --XX:MinHeapFreeRatio=5 -XX:MaxHeapFreeRatio=10 -XX:GCTimeRatio=4 --XX:AdaptiveSizePolicyWeight=90. ----- - -These arguments are intended to return heap -memory to the operating system whenever allocated memory exceeds 110% of in-use -memory (`-XX:MaxHeapFreeRatio`), spending up to 20% of CPU time in the garbage -collector (`-XX:GCTimeRatio`). At no time will the application heap allocation -be less than the initial heap allocation (overridden by `-XX:InitialHeapSize` / -`-Xms`). Detailed additional information is available -link:https://developers.redhat.com/blog/2014/07/15/dude-wheres-my-paas-memory-tuning-javas-footprint-in-openshift-part-1/[Tuning Java's footprint in OpenShift (Part 1)], -link:https://developers.redhat.com/blog/2014/07/22/dude-wheres-my-paas-memory-tuning-javas-footprint-in-openshift-part-2/[Tuning Java's footprint in OpenShift (Part 2)], -and at -link:https://developers.redhat.com/blog/2017/04/04/openjdk-and-containers/[OpenJDK -and Containers]. - -[id="nodes-cluster-resource-configure-jdk-proc_{context}"] -== Understanding how to ensure all JVM processes within a container are appropriately configured - -In the case that multiple JVMs run in the same container, it is essential to -ensure that they are all configured appropriately. For many workloads it will -be necessary to grant each JVM a percentage memory budget, leaving a perhaps -substantial additional safety margin. - -Many Java tools use different environment variables (`JAVA_OPTS`, `GRADLE_OPTS`, and so on) to configure their JVMs and it can be challenging to ensure -that the right settings are being passed to the right JVM. - -The `JAVA_TOOL_OPTIONS` environment variable is always respected by the OpenJDK, -and values specified in `JAVA_TOOL_OPTIONS` will be overridden by other options -specified on the JVM command line. By default, to ensure that these options are -used by default for all JVM workloads run in the Java-based agent image, the {product-title} Jenkins -Maven agent image sets: - -[source,terminal] ----- -JAVA_TOOL_OPTIONS="-XX:+UnlockExperimentalVMOptions --XX:+UseCGroupMemoryLimitForHeap -Dsun.zip.disableMemoryMapping=true" ----- - -[NOTE] -==== -The `UseCGroupMemoryLimitForHeap` option has been removed in JDK 11. Use `-XX:+UseContainerSupport` instead. -==== - -This does not guarantee that additional options are not required, but is -intended to be a helpful starting point. diff --git a/modules/nodes-cluster-resource-configure-oom.adoc b/modules/nodes-cluster-resource-configure-oom.adoc deleted file mode 100644 index aa5ba1a93664..000000000000 --- a/modules/nodes-cluster-resource-configure-oom.adoc +++ /dev/null @@ -1,141 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-cluster-resource-configure.adoc - -:_content-type: CONCEPT -[id="nodes-cluster-resource-configure-oom_{context}"] -= Understanding OOM kill policy - -{product-title} can kill a process in a container if the total memory usage of -all the processes in the container exceeds the memory limit, or in serious cases -of node memory exhaustion. - -When a process is Out of Memory (OOM) killed, this might result in the container -exiting immediately. If the container PID 1 process receives the *SIGKILL*, the -container will exit immediately. Otherwise, the container behavior is -dependent on the behavior of the other processes. - -For example, a container process exited with code 137, indicating it received a SIGKILL signal. - -If the container does not exit immediately, an OOM kill is detectable as -follows: - -. Access the pod using a remote shell: -+ -[source,terminal] ----- -# oc rsh test ----- - -. Run the following command to see the current OOM kill count in `/sys/fs/cgroup/memory/memory.oom_control`: -+ -[source,terminal] ----- -$ grep '^oom_kill ' /sys/fs/cgroup/memory/memory.oom_control -oom_kill 0 ----- - -. Run the following command to provoke an OOM kill: -+ -[source,terminal] ----- -$ sed -e '' </dev/zero ----- -+ -.Example output -[source,terminal] ----- -Killed ----- - -. Run the following command to view the exit status of the `sed` command: -+ -[source,terminal] ----- -$ echo $? ----- -+ -.Example output -[source,terminal] ----- -137 ----- -+ -The `137` code indicates the container process exited with code 137, indicating it received a SIGKILL signal. - -. Run the following command to see that the OOM kill counter in `/sys/fs/cgroup/memory/memory.oom_control` incremented: -+ -[source,terminal] ----- -$ grep '^oom_kill ' /sys/fs/cgroup/memory/memory.oom_control -oom_kill 1 ----- -+ -If one or more processes in a pod are OOM killed, when the pod subsequently -exits, whether immediately or not, it will have phase *Failed* and reason -*OOMKilled*. An OOM-killed pod might be restarted depending on the value of -`restartPolicy`. If not restarted, controllers such as the -replication controller will notice the pod's failed status and create a new pod -to replace the old one. -+ -Use the follwing command to get the pod status: -+ -[source,terminal] ----- -$ oc get pod test ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -test 0/1 OOMKilled 0 1m ----- - -* If the pod has not restarted, run the following command to view the pod: -+ -[source,terminal] ----- -$ oc get pod test -o yaml ----- -+ -.Example output -[source,terminal] ----- -... -status: - containerStatuses: - - name: test - ready: false - restartCount: 0 - state: - terminated: - exitCode: 137 - reason: OOMKilled - phase: Failed ----- - -* If restarted, run the following command to view the pod: -+ -[source,terminal] ----- -$ oc get pod test -o yaml ----- -+ -.Example output -[source,terminal] ----- -... -status: - containerStatuses: - - name: test - ready: true - restartCount: 1 - lastState: - terminated: - exitCode: 137 - reason: OOMKilled - state: - running: - phase: Running ----- diff --git a/modules/nodes-cluster-resource-configure-request-limit.adoc b/modules/nodes-cluster-resource-configure-request-limit.adoc deleted file mode 100644 index d32987fd14d3..000000000000 --- a/modules/nodes-cluster-resource-configure-request-limit.adoc +++ /dev/null @@ -1,81 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-cluster-resource-configure.adoc - -:_content-type: PROCEDURE -[id="nodes-cluster-resource-configure-request-limit_{context}"] -= Finding the memory request and limit from within a pod - -An application wishing to dynamically discover its memory request and limit from -within a pod should use the Downward API. - -.Procedure - -. Configure the pod to add the `MEMORY_REQUEST` and `MEMORY_LIMIT` stanzas: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: test -spec: - containers: - - name: test - image: fedora:latest - command: - - sleep - - "3600" - env: - - name: MEMORY_REQUEST <1> - valueFrom: - resourceFieldRef: - containerName: test - resource: requests.memory - - name: MEMORY_LIMIT <2> - valueFrom: - resourceFieldRef: - containerName: test - resource: limits.memory - resources: - requests: - memory: 384Mi - limits: - memory: 512Mi ----- -<1> Add this stanza to discover the application memory request value. -<2> Add this stanza to discover the application memory limit value. - -. Create the pod: -+ -[source,terminal] ----- -$ oc create -f <file-name>.yaml ----- - -. Access the pod using a remote shell: -+ -[source,terminal] ----- -$ oc rsh test ----- - -. Check that the requested values were applied: -+ -[source,terminal] ----- -$ env | grep MEMORY | sort ----- -+ -.Example output -[source,terminal] ----- -MEMORY_LIMIT=536870912 -MEMORY_REQUEST=402653184 ----- - -[NOTE] -==== -The memory limit value can also be read from inside the container by the -`/sys/fs/cgroup/memory/memory.limit_in_bytes` file. -==== diff --git a/modules/nodes-cluster-resource-configure.adoc b/modules/nodes-cluster-resource-configure.adoc deleted file mode 100644 index f9e449647264..000000000000 --- a/modules/nodes-cluster-resource-configure.adoc +++ /dev/null @@ -1,57 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/clusters/nodes-cluster-overcommit.adoc -// * post_installation_configuration/node-tasks.adoc - -:_content-type: PROCEDURE -[id="nodes-cluster-resource-configure_{context}"] -= Configuring cluster-level overcommit - - -The Cluster Resource Override Operator requires a `ClusterResourceOverride` custom resource (CR) -and a label for each project where you want the Operator to control overcommit. - -.Prerequisites - -* The Cluster Resource Override Operator has no effect if limits have not -been set on containers. You must specify default limits for a project using a `LimitRange` object or configure limits in `Pod` specs for the overrides to apply. - -.Procedure - -To modify cluster-level overcommit: - -. Edit the `ClusterResourceOverride` CR: -+ -[source,yaml] ----- -apiVersion: operator.autoscaling.openshift.io/v1 -kind: ClusterResourceOverride -metadata: - name: cluster -spec: - podResourceOverride: - spec: - memoryRequestToLimitPercent: 50 <1> - cpuRequestToLimitPercent: 25 <2> - limitCPUToMemoryPercent: 200 <3> ----- -<1> Optional. Specify the percentage to override the container memory limit, if used, between 1-100. The default is 50. -<2> Optional. Specify the percentage to override the container CPU limit, if used, between 1-100. The default is 25. -<3> Optional. Specify the percentage to override the container memory limit, if used. Scaling 1Gi of RAM at 100 percent is equal to 1 CPU core. This is processed prior to overriding the CPU request, if configured. The default is 200. - -. Ensure the following label has been added to the Namespace object for each project where you want the Cluster Resource Override Operator to control overcommit: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Namespace -metadata: - - ... - - labels: - clusterresourceoverrides.admission.autoscaling.openshift.io/enabled: "true" <1> - - ... ----- -<1> Add this label to each project. diff --git a/modules/nodes-cluster-resource-levels-about.adoc b/modules/nodes-cluster-resource-levels-about.adoc deleted file mode 100644 index a259b7cccbc9..000000000000 --- a/modules/nodes-cluster-resource-levels-about.adoc +++ /dev/null @@ -1,28 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-cluster-resource-about.adoc - -:_content-type: CONCEPT -[id="nodes-cluster-resource-levels-about_{context}"] -= Understanding the {product-title} cluster capacity tool - -The cluster capacity tool simulates a sequence of scheduling decisions to -determine how many instances of an input pod can be scheduled on the cluster -before it is exhausted of resources to provide a more accurate estimation. - -[NOTE] -==== -The remaining allocatable capacity is a rough estimation, because it does not -count all of the resources being distributed among nodes. It analyzes only the -remaining resources and estimates the available capacity that is still -consumable in terms of a number of instances of a pod with given requirements -that can be scheduled in a cluster. - -Also, pods might only have scheduling support on particular sets of nodes based -on its selection and affinity criteria. As a result, the estimation of which -remaining pods a cluster can schedule can be difficult. -==== - -You can run the cluster capacity analysis tool as a stand-alone utility from -the command line, or as a job in a pod inside an {product-title} cluster. -Running it as job inside of a pod enables you to run it multiple times without intervention. diff --git a/modules/nodes-cluster-resource-levels-command.adoc b/modules/nodes-cluster-resource-levels-command.adoc deleted file mode 100644 index a23a24648dc1..000000000000 --- a/modules/nodes-cluster-resource-levels-command.adoc +++ /dev/null @@ -1,94 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-cluster-resource-levels.adoc - -:_content-type: PROCEDURE -[id="nodes-cluster-resource-levels-command_{context}"] -= Running the cluster capacity tool on the command line - -You can run the {product-title} cluster capacity tool from the command line -to estimate the number of pods that can be scheduled onto your cluster. - -.Prerequisites - -* Run the link:https://catalog.redhat.com/software/containers/openshift4/ose-cluster-capacity/5cca0324d70cc57c44ae8eb6?container-tabs=overview[OpenShift Cluster Capacity Tool], which is available as a container image from the Red Hat Ecosystem Catalog. - -* Create a sample `Pod` spec file, which the tool uses for estimating resource usage. The `podspec` specifies its resource -requirements as `limits` or `requests`. The cluster capacity tool takes the -pod's resource requirements into account for its estimation analysis. -+ -An example of the `Pod` spec input is: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: small-pod - labels: - app: guestbook - tier: frontend -spec: - containers: - - name: php-redis - image: gcr.io/google-samples/gb-frontend:v4 - imagePullPolicy: Always - resources: - limits: - cpu: 150m - memory: 100Mi - requests: - cpu: 150m - memory: 100Mi ----- - - -.Procedure - -To use the cluster capacity tool on the command line: - -. From the terminal, log in to the Red Hat Registry: -+ -[source,terminal] ----- -$ podman login registry.redhat.io ----- - -. Pull the cluster capacity tool image: -+ -[source,terminal] ----- -$ podman pull registry.redhat.io/openshift4/ose-cluster-capacity ----- - -. Run the cluster capacity tool: -+ -[source,terminal] ----- -$ podman run -v $HOME/.kube:/kube:Z -v $(pwd):/cc:Z ose-cluster-capacity \ -/bin/cluster-capacity --kubeconfig /kube/config --podspec /cc/pod-spec.yaml \ ---verbose <1> ----- -<1> You can also add the `--verbose` option to output a detailed description of how many pods can be scheduled on each node in the cluster. -+ -.Example output -[source,terminal] ----- -small-pod pod requirements: - - CPU: 150m - - Memory: 100Mi - -The cluster can schedule 88 instance(s) of the pod small-pod. - -Termination reason: Unschedulable: 0/5 nodes are available: 2 Insufficient cpu, -3 node(s) had taint {node-role.kubernetes.io/master: }, that the pod didn't -tolerate. - -Pod distribution among nodes: -small-pod - - 192.168.124.214: 45 instance(s) - - 192.168.124.120: 43 instance(s) ----- -+ -In the above example, the number of estimated pods that can be scheduled onto -the cluster is 88. diff --git a/modules/nodes-cluster-resource-levels-job.adoc b/modules/nodes-cluster-resource-levels-job.adoc deleted file mode 100644 index 8ef5171faa19..000000000000 --- a/modules/nodes-cluster-resource-levels-job.adoc +++ /dev/null @@ -1,177 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-cluster-resource-levels.adoc - -:_content-type: PROCEDURE -[id="nodes-cluster-resource-levels-job_{context}"] -= Running the cluster capacity tool as a job inside a pod - -Running the cluster capacity tool as a job inside of a pod has the advantage of -being able to be run multiple times without needing user intervention. Running -the cluster capacity tool as a job involves using a `ConfigMap` object. - -.Prerequisites - -Download and install link:https://github.com/kubernetes-incubator/cluster-capacity[the cluster capacity tool]. - -.Procedure - -To run the cluster capacity tool: - -. Create the cluster role: -+ -[source,terminal] ----- -$ cat << EOF| oc create -f - ----- -+ -.Example output -[source,terminal] ----- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: cluster-capacity-role -rules: -- apiGroups: [""] - resources: ["pods", "nodes", "persistentvolumeclaims", "persistentvolumes", "services", "replicationcontrollers"] - verbs: ["get", "watch", "list"] -- apiGroups: ["apps"] - resources: ["replicasets", "statefulsets"] - verbs: ["get", "watch", "list"] -- apiGroups: ["policy"] - resources: ["poddisruptionbudgets"] - verbs: ["get", "watch", "list"] -- apiGroups: ["storage.k8s.io"] - resources: ["storageclasses"] - verbs: ["get", "watch", "list"] -EOF ----- - -. Create the service account: -+ -[source,terminal] ----- -$ oc create sa cluster-capacity-sa ----- - -. Add the role to the service account: -+ -[source,terminal] ----- -$ oc adm policy add-cluster-role-to-user cluster-capacity-role \ - system:serviceaccount:default:cluster-capacity-sa ----- - -. Define and create the `Pod` spec: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: small-pod - labels: - app: guestbook - tier: frontend -spec: - containers: - - name: php-redis - image: gcr.io/google-samples/gb-frontend:v4 - imagePullPolicy: Always - resources: - limits: - cpu: 150m - memory: 100Mi - requests: - cpu: 150m - memory: 100Mi ----- - -. The cluster capacity analysis is mounted in a volume using a -`ConfigMap` object named `cluster-capacity-configmap` to mount input pod spec file -`pod.yaml` into a volume `test-volume` at the path `/test-pod`. -+ -If you haven't created a `ConfigMap` object, create one before creating the job: -+ ----- -$ oc create configmap cluster-capacity-configmap \ - --from-file=pod.yaml=pod.yaml ----- - -. Create the job using the below example of a job specification file: -+ -[source,yaml] ----- -apiVersion: batch/v1 -kind: Job -metadata: - name: cluster-capacity-job -spec: - parallelism: 1 - completions: 1 - template: - metadata: - name: cluster-capacity-pod - spec: - containers: - - name: cluster-capacity - image: openshift/origin-cluster-capacity - imagePullPolicy: "Always" - volumeMounts: - - mountPath: /test-pod - name: test-volume - env: - - name: CC_INCLUSTER <1> - value: "true" - command: - - "/bin/sh" - - "-ec" - - | - /bin/cluster-capacity --podspec=/test-pod/pod.yaml --verbose - restartPolicy: "Never" - serviceAccountName: cluster-capacity-sa - volumes: - - name: test-volume - configMap: - name: cluster-capacity-configmap ----- -<1> A required environment variable letting the cluster capacity tool - know that it is running inside a cluster as a pod. - + -The `pod.yaml` key of the `ConfigMap` object is the same as the `Pod` spec file -name, though it is not required. By doing this, the input pod spec file can be -accessed inside the pod as `/test-pod/pod.yaml`. - -. Run the cluster capacity image as a job in a pod: -+ -[source,terminal] ----- -$ oc create -f cluster-capacity-job.yaml ----- - -. Check the job logs to find the number of pods that can be scheduled in the - cluster: -+ -[source,terminal] ----- -$ oc logs jobs/cluster-capacity-job ----- -+ -.Example output -[source,terminal] ----- -small-pod pod requirements: - - CPU: 150m - - Memory: 100Mi - -The cluster can schedule 52 instance(s) of the pod small-pod. - -Termination reason: Unschedulable: No nodes are available that match all of the -following predicates:: Insufficient cpu (2). - -Pod distribution among nodes: -small-pod - - 192.168.124.214: 26 instance(s) - - 192.168.124.120: 26 instance(s) ----- diff --git a/modules/nodes-cluster-resource-override-deploy-cli.adoc b/modules/nodes-cluster-resource-override-deploy-cli.adoc deleted file mode 100644 index 787cb387cae9..000000000000 --- a/modules/nodes-cluster-resource-override-deploy-cli.adoc +++ /dev/null @@ -1,214 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/clusters/nodes-cluster-overcommit.adoc -// * post_installation_configuration/node-tasks.adoc - -:_content-type: PROCEDURE -[id="nodes-cluster-resource-override-deploy-cli_{context}"] -= Installing the Cluster Resource Override Operator using the CLI - -You can use the {product-title} CLI to install the Cluster Resource Override Operator to help control overcommit in your cluster. - -.Prerequisites - -* The Cluster Resource Override Operator has no effect if limits have not been set on containers. You must specify default limits for a project using a `LimitRange` object or configure limits in `Pod` specs for the overrides to apply. - -.Procedure - -To install the Cluster Resource Override Operator using the CLI: - -. Create a namespace for the Cluster Resource Override Operator: - -.. Create a `Namespace` object YAML file (for example, `cro-namespace.yaml`) for the Cluster Resource Override Operator: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Namespace -metadata: - name: clusterresourceoverride-operator ----- - -.. Create the namespace: -+ -[source,terminal] ----- -$ oc create -f <file-name>.yaml ----- -+ -For example: -+ -[source,terminal] ----- -$ oc create -f cro-namespace.yaml ----- - -. Create an Operator group: - -.. Create an `OperatorGroup` object YAML file (for example, cro-og.yaml) for the Cluster Resource Override Operator: -+ -[source,yaml] ----- -apiVersion: operators.coreos.com/v1 -kind: OperatorGroup -metadata: - name: clusterresourceoverride-operator - namespace: clusterresourceoverride-operator -spec: - targetNamespaces: - - clusterresourceoverride-operator ----- - -.. Create the Operator Group: -+ -[source,terminal] ----- -$ oc create -f <file-name>.yaml ----- -+ -For example: -+ -[source,terminal] ----- -$ oc create -f cro-og.yaml ----- - -. Create a subscription: - -.. Create a `Subscription` object YAML file (for example, cro-sub.yaml) for the Cluster Resource Override Operator: -+ -[source,yaml] ----- -apiVersion: operators.coreos.com/v1alpha1 -kind: Subscription -metadata: - name: clusterresourceoverride - namespace: clusterresourceoverride-operator -spec: - channel: "4.13" - name: clusterresourceoverride - source: redhat-operators - sourceNamespace: openshift-marketplace ----- - -.. Create the subscription: -+ -[source,terminal] ----- -$ oc create -f <file-name>.yaml ----- -+ -For example: -+ -[source,terminal] ----- -$ oc create -f cro-sub.yaml ----- - -. Create a `ClusterResourceOverride` custom resource (CR) object in the `clusterresourceoverride-operator` namespace: - -.. Change to the `clusterresourceoverride-operator` namespace. -+ -[source,terminal] ----- -$ oc project clusterresourceoverride-operator ----- - -.. Create a `ClusterResourceOverride` object YAML file (for example, cro-cr.yaml) for the Cluster Resource Override Operator: -+ -[source,yaml] ----- -apiVersion: operator.autoscaling.openshift.io/v1 -kind: ClusterResourceOverride -metadata: - name: cluster <1> -spec: - podResourceOverride: - spec: - memoryRequestToLimitPercent: 50 <2> - cpuRequestToLimitPercent: 25 <3> - limitCPUToMemoryPercent: 200 <4> ----- -<1> The name must be `cluster`. -<2> Optional. Specify the percentage to override the container memory limit, if used, between 1-100. The default is 50. -<3> Optional. Specify the percentage to override the container CPU limit, if used, between 1-100. The default is 25. -<4> Optional. Specify the percentage to override the container memory limit, if used. Scaling 1Gi of RAM at 100 percent is equal to 1 CPU core. This is processed prior to overriding the CPU request, if configured. The default is 200. - -.. Create the `ClusterResourceOverride` object: -+ -[source,terminal] ----- -$ oc create -f <file-name>.yaml ----- -+ -For example: -+ -[source,terminal] ----- -$ oc create -f cro-cr.yaml ----- - -. Verify the current state of the admission webhook by checking the status of the cluster custom resource. -+ -[source,terminal] ----- -$ oc get clusterresourceoverride cluster -n clusterresourceoverride-operator -o yaml ----- -+ -The `mutatingWebhookConfigurationRef` section appears when the webhook is called. -+ -.Example output -[source,yaml] ----- -apiVersion: operator.autoscaling.openshift.io/v1 -kind: ClusterResourceOverride -metadata: - annotations: - kubectl.kubernetes.io/last-applied-configuration: | - {"apiVersion":"operator.autoscaling.openshift.io/v1","kind":"ClusterResourceOverride","metadata":{"annotations":{},"name":"cluster"},"spec":{"podResourceOverride":{"spec":{"cpuRequestToLimitPercent":25,"limitCPUToMemoryPercent":200,"memoryRequestToLimitPercent":50}}}} - creationTimestamp: "2019-12-18T22:35:02Z" - generation: 1 - name: cluster - resourceVersion: "127622" - selfLink: /apis/operator.autoscaling.openshift.io/v1/clusterresourceoverrides/cluster - uid: 978fc959-1717-4bd1-97d0-ae00ee111e8d -spec: - podResourceOverride: - spec: - cpuRequestToLimitPercent: 25 - limitCPUToMemoryPercent: 200 - memoryRequestToLimitPercent: 50 -status: - -.... - - mutatingWebhookConfigurationRef: <1> - apiVersion: admissionregistration.k8s.io/v1beta1 - kind: MutatingWebhookConfiguration - name: clusterresourceoverrides.admission.autoscaling.openshift.io - resourceVersion: "127621" - uid: 98b3b8ae-d5ce-462b-8ab5-a729ea8f38f3 - -.... ----- -<1> Reference to the `ClusterResourceOverride` admission webhook. - -//// -. When the webhook is called, you can add a label to any Namespaces where you want overrides enabled: -+ ----- -$ oc edit namespace <name> ----- -+ ----- -apiVersion: v1 -kind: Namespace -metadata: - -.... - - labels: - clusterresourceoverrides.admission.autoscaling.openshift.io: enabled <1> ----- -<1> Add the `clusterresourceoverrides.admission.autoscaling.openshift.io: enabled` label to the Namespace. -//// diff --git a/modules/nodes-cluster-resource-override-deploy-console.adoc b/modules/nodes-cluster-resource-override-deploy-console.adoc deleted file mode 100644 index 600c1ba5e840..000000000000 --- a/modules/nodes-cluster-resource-override-deploy-console.adoc +++ /dev/null @@ -1,128 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/clusters/nodes-cluster-overcommit.adoc - -:_content-type: PROCEDURE -[id="nodes-cluster-resource-override-deploy-console_{context}"] -= Installing the Cluster Resource Override Operator using the web console - -You can use the {product-title} web console to install the Cluster Resource Override Operator to help control overcommit in your cluster. - -.Prerequisites - -* The Cluster Resource Override Operator has no effect if limits have not -been set on containers. You must specify default limits for a project using a `LimitRange` object or configure limits in `Pod` specs for the overrides to apply. - -.Procedure - -To install the Cluster Resource Override Operator using the {product-title} web console: - -. In the {product-title} web console, navigate to *Home* -> *Projects* - -.. Click *Create Project*. - -.. Specify `clusterresourceoverride-operator` as the name of the project. - -.. Click *Create*. - -. Navigate to *Operators* -> *OperatorHub*. - -.. Choose *ClusterResourceOverride Operator* from the list of available Operators and click *Install*. - -.. On the *Install Operator* page, make sure *A specific Namespace on the cluster* is selected for *Installation Mode*. - -.. Make sure *clusterresourceoverride-operator* is selected for *Installed Namespace*. - -.. Select an *Update Channel* and *Approval Strategy*. - -.. Click *Install*. - -. On the *Installed Operators* page, click *ClusterResourceOverride*. - -.. On the *ClusterResourceOverride Operator* details page, click *Create Instance*. - -.. On the *Create ClusterResourceOverride* page, edit the YAML template to set the overcommit values as needed: -+ -[source,yaml] ----- -apiVersion: operator.autoscaling.openshift.io/v1 -kind: ClusterResourceOverride -metadata: - name: cluster <1> -spec: - podResourceOverride: - spec: - memoryRequestToLimitPercent: 50 <2> - cpuRequestToLimitPercent: 25 <3> - limitCPUToMemoryPercent: 200 <4> ----- -<1> The name must be `cluster`. -<2> Optional. Specify the percentage to override the container memory limit, if used, between 1-100. The default is 50. -<3> Optional. Specify the percentage to override the container CPU limit, if used, between 1-100. The default is 25. -<4> Optional. Specify the percentage to override the container memory limit, if used. Scaling 1Gi of RAM at 100 percent is equal to 1 CPU core. This is processed prior to overriding the CPU request, if configured. The default is 200. - -.. Click *Create*. - -. Check the current state of the admission webhook by checking the status of the cluster custom resource: - -.. On the *ClusterResourceOverride Operator* page, click *cluster*. - -.. On the *ClusterResourceOverride Details* page, click *YAML*. The `mutatingWebhookConfigurationRef` section appears when the webhook is called. -+ -[source,yaml] ----- -apiVersion: operator.autoscaling.openshift.io/v1 -kind: ClusterResourceOverride -metadata: - annotations: - kubectl.kubernetes.io/last-applied-configuration: | - {"apiVersion":"operator.autoscaling.openshift.io/v1","kind":"ClusterResourceOverride","metadata":{"annotations":{},"name":"cluster"},"spec":{"podResourceOverride":{"spec":{"cpuRequestToLimitPercent":25,"limitCPUToMemoryPercent":200,"memoryRequestToLimitPercent":50}}}} - creationTimestamp: "2019-12-18T22:35:02Z" - generation: 1 - name: cluster - resourceVersion: "127622" - selfLink: /apis/operator.autoscaling.openshift.io/v1/clusterresourceoverrides/cluster - uid: 978fc959-1717-4bd1-97d0-ae00ee111e8d -spec: - podResourceOverride: - spec: - cpuRequestToLimitPercent: 25 - limitCPUToMemoryPercent: 200 - memoryRequestToLimitPercent: 50 -status: - -.... - - mutatingWebhookConfigurationRef: <1> - apiVersion: admissionregistration.k8s.io/v1beta1 - kind: MutatingWebhookConfiguration - name: clusterresourceoverrides.admission.autoscaling.openshift.io - resourceVersion: "127621" - uid: 98b3b8ae-d5ce-462b-8ab5-a729ea8f38f3 - -.... - ----- -<1> Reference to the `ClusterResourceOverride` admission webhook. - -//// -. When the webhook is called, you can add a label to any Namespaces where you want overrides enabled: - -.. Click `Administration` -> `Namespaces`. - -.. Click the Namespace to edit then click *YAML*. - -.. Add the label under `metadata`: -+ ----- -apiVersion: v1 -kind: Namespace -metadata: - -.... - - labels: - clusterresourceoverrides.admission.autoscaling.openshift.io: enabled <1> ----- -<1> Add the `clusterresourceoverrides.admission.autoscaling.openshift.io: enabled` label to the Namespace. -//// diff --git a/modules/nodes-cluster-resource-override-deploy.adoc b/modules/nodes-cluster-resource-override-deploy.adoc deleted file mode 100644 index 31fc3f10c459..000000000000 --- a/modules/nodes-cluster-resource-override-deploy.adoc +++ /dev/null @@ -1,8 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/clusters/nodes-cluster-overcommit.adoc - -[id="nodes-cluster-resource-override-deploy_{context}"] -= Installing the Cluster Resource Override Operator - -You can use the {product-title} console or CLI to install the Red Hat OpenShift Logging Operator. diff --git a/modules/nodes-cluster-resource-override.adoc b/modules/nodes-cluster-resource-override.adoc deleted file mode 100644 index 21d2dd30723d..000000000000 --- a/modules/nodes-cluster-resource-override.adoc +++ /dev/null @@ -1,58 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/clusters/nodes-cluster-overcommit.adoc -// * post_installation_configuration/node-tasks.adoc - -[id="nodes-cluster-resource-override_{context}"] -= Cluster-level overcommit using the Cluster Resource Override Operator - -The Cluster Resource Override Operator is an admission webhook that allows you to control the level of overcommit and manage -container density across all the nodes in your cluster. The Operator controls how nodes in specific projects can exceed defined memory and CPU limits. - -You must install the Cluster Resource Override Operator using the {product-title} console or CLI as shown in the following sections. -During the installation, you create a `ClusterResourceOverride` custom resource (CR), where you set the level of overcommit, as shown in the -following example: - -[source,yaml] ----- -apiVersion: operator.autoscaling.openshift.io/v1 -kind: ClusterResourceOverride -metadata: - name: cluster <1> -spec: - podResourceOverride: - spec: - memoryRequestToLimitPercent: 50 <2> - cpuRequestToLimitPercent: 25 <3> - limitCPUToMemoryPercent: 200 <4> ----- -<1> The name must be `cluster`. -<2> Optional. If a container memory limit has been specified or defaulted, the memory request is overridden to this percentage of the limit, between 1-100. The default is 50. -<3> Optional. If a container CPU limit has been specified or defaulted, the CPU request is overridden to this percentage of the limit, between 1-100. The default is 25. -<4> Optional. If a container memory limit has been specified or defaulted, the CPU limit is overridden to a percentage of the memory limit, if specified. Scaling 1Gi of RAM at 100 percent is equal to 1 CPU core. This is processed prior to overriding the CPU request (if configured). The default is 200. - -[NOTE] -==== -The Cluster Resource Override Operator overrides have no effect if limits have not -been set on containers. Create a `LimitRange` object with default limits per individual project -or configure limits in `Pod` specs for the overrides to apply. -==== - -When configured, overrides can be enabled per-project by applying the following -label to the Namespace object for each project: - -[source,yaml] ----- -apiVersion: v1 -kind: Namespace -metadata: - -.... - - labels: - clusterresourceoverrides.admission.autoscaling.openshift.io/enabled: "true" - -.... ----- - -The Operator watches for the `ClusterResourceOverride` CR and ensures that the `ClusterResourceOverride` admission webhook is installed into the same namespace as the operator. diff --git a/modules/nodes-cluster-worker-latency-profiles-about.adoc b/modules/nodes-cluster-worker-latency-profiles-about.adoc deleted file mode 100644 index f9a4e3ce7486..000000000000 --- a/modules/nodes-cluster-worker-latency-profiles-about.adoc +++ /dev/null @@ -1,119 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/clusters/nodes-cluster-worker-latency-profiles -// * nodes/edge/nodes-edge-remote-workers. ?? -// * post_installation_configuration/cluster-tasks ?? - -:_content-type: CONCEPT -[id="nodes-cluster-worker-latency-profiles-about_{context}"] -= Understanding worker latency profiles - -Worker latency profiles are multiple sets of carefully-tuned values for the `node-status-update-frequency`, `node-monitor-grace-period`, `default-not-ready-toleration-seconds` and `default-unreachable-toleration-seconds` parameters. These parameters let you control the reaction of the cluster to latency issues without needing to determine the best values manually. - -All worker latency profiles configure the following parameters: - --- -* `node-status-update-frequency`. Specifies the amount of time in seconds that a kubelet updates its status to the Kubernetes Controller Manager Operator. -* `node-monitor-grace-period`. Specifies the amount of time in seconds that the Kubernetes Controller Manager Operator waits for an update from a kubelet before marking the node unhealthy and adding the `node.kubernetes.io/not-ready` or `node.kubernetes.io/unreachable` taint to the node. -* `default-not-ready-toleration-seconds`. Specifies the amount of time in seconds after marking a node unhealthy that the Kubernetes Controller Manager Operator waits before evicting pods from that node. -* `default-unreachable-toleration-seconds`. Specifies the amount of time in seconds after marking a node unreachable that the Kubernetes Controller Manager Operator waits before evicting pods from that node. --- - -[IMPORTANT] -==== -Manually modifying the `node-monitor-grace-period` parameter is not supported. -==== - -The following Operators monitor the changes to the worker latency profiles and respond accordingly: - -* The Machine Config Operator (MCO) updates the `node-status-update-frequency` parameter on the worker nodes. -* The Kubernetes Controller Manager Operator updates the `node-monitor-grace-period` parameter on the control plane nodes. -* The Kubernetes API Server Operator updates the `default-not-ready-toleration-seconds` and `default-unreachable-toleration-seconds` parameters on the control plance nodes. - -While the default configuration works in most cases, {product-title} offers two other worker latency profiles for situations where the network is experiencing higher latency than usual. The three worker latency profiles are described in the following sections: - -Default worker latency profile:: With the `Default` profile, each kubelet reports its node status to the Kubelet Controller Manager Operator (kube controller) every 10 seconds. The Kubelet Controller Manager Operator checks the kubelet for a status every 5 seconds. -+ -The Kubernetes Controller Manager Operator waits 40 seconds for a status update before considering that node unhealthy. It marks the node with the `node.kubernetes.io/not-ready` or `node.kubernetes.io/unreachable` taint and evicts the pods on that node. If a pod on that node has the `NoExecute` toleration, the pod gets evicted in 300 seconds. If the pod has the `tolerationSeconds` parameter, the eviction waits for the period specified by that parameter. -+ -[cols="2,1,2,1"] -|=== -| Profile | Component | Parameter | Value - -.4+| Default -| kubelet -| `node-status-update-frequency` -| 10s - -| Kubelet Controller Manager -| `node-monitor-grace-period` -| 40s - -| Kubernetes API Server -| `default-not-ready-toleration-seconds` -| 300s - -| Kubernetes API Server -| `default-unreachable-toleration-seconds` -| 300s - -|=== - -Medium worker latency profile:: Use the `MediumUpdateAverageReaction` profile if the network latency is slightly higher than usual. -+ -The `MediumUpdateAverageReaction` profile reduces the frequency of kubelet updates to 20 seconds and changes the period that the Kubernetes Controller Manager Operator waits for those updates to 2 minutes. The pod eviction period for a pod on that node is reduced to 60 seconds. If the pod has the `tolerationSeconds` parameter, the eviction waits for the period specified by that parameter. -+ -The Kubernetes Controller Manager Operator waits for 2 minutes to consider a node unhealthy. In another minute, the eviction process starts. -+ -[cols="2,1,2,1"] -|=== -| Profile | Component | Parameter | Value - -.4+| MediumUpdateAverageReaction -| kubelet -| `node-status-update-frequency` -| 20s - -| Kubelet Controller Manager -| `node-monitor-grace-period` -| 2m - -| Kubernetes API Server -| `default-not-ready-toleration-seconds` -| 60s - -| Kubernetes API Server -| `default-unreachable-toleration-seconds` -| 60s - -|=== - -Low worker latency profile:: Use the `LowUpdateSlowReaction` profile if the network latency is extremely high. -+ -The `LowUpdateSlowReaction` profile reduces the frequency of kubelet updates to 1 minute and changes the period that the Kubernetes Controller Manager Operator waits for those updates to 5 minutes. The pod eviction period for a pod on that node is reduced to 60 seconds. If the pod has the `tolerationSeconds` parameter, the eviction waits for the period specified by that parameter. -+ -The Kubernetes Controller Manager Operator waits for 5 minutes to consider a node unhealthy. In another minute, the eviction process starts. -+ -[cols="2,1,2,1"] -|=== -| Profile | Component | Parameter | Value - -.4+| LowUpdateSlowReaction -| kubelet -| `node-status-update-frequency` -| 1m - -| Kubelet Controller Manager -| `node-monitor-grace-period` -| 5m - -| Kubernetes API Server -| `default-not-ready-toleration-seconds` -| 60s - -| Kubernetes API Server -| `default-unreachable-toleration-seconds` -| 60s - -|=== - diff --git a/modules/nodes-cluster-worker-latency-profiles-using.adoc b/modules/nodes-cluster-worker-latency-profiles-using.adoc deleted file mode 100644 index efd647903ae2..000000000000 --- a/modules/nodes-cluster-worker-latency-profiles-using.adoc +++ /dev/null @@ -1,138 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/clusters/nodes-cluster-worker-latency-profiles -// * Need to determine if these are good locations: -// * nodes/edge/nodes-edge-remote-workers -// * post_installation_configuration/cluster-tasks - -:_content-type: PROCEDURE -[id="nodes-cluster-worker-latency-profiles-using_{context}"] -= Using worker latency profiles - -To implement a worker latency profile to deal with network latency, edit the `node.config` object to add the name of the profile. You can change the profile at any time as latency increases or decreases. - -You must move one worker latency profile at a time. For example, you cannot move directly from the `Default` profile to the `LowUpdateSlowReaction` worker latency profile. You must move from the `default` worker latency profile to the `MediumUpdateAverageReaction` profile first, then to `LowUpdateSlowReaction`. Similarly, when returning to the default profile, you must move from the low profile to the medium profile first, then to the default. - -[NOTE] -==== -You can also configure worker latency profiles upon installing an {product-title} cluster. -==== - -.Procedure - -To move from the default worker latency profile: - -. Move to the medium worker latency profile: - -.. Edit the `node.config` object: -+ -[source,terminal] ----- -$ oc edit nodes.config/cluster ----- - -.. Add `spec.workerLatencyProfile: MediumUpdateAverageReaction`: -+ -.Example `node.config` object -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: Node -metadata: - annotations: - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/create-only: "true" - creationTimestamp: "2022-07-08T16:02:51Z" - generation: 1 - name: cluster - ownerReferences: - - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - name: version - uid: 36282574-bf9f-409e-a6cd-3032939293eb - resourceVersion: "1865" - uid: 0c0f7a4c-4307-4187-b591-6155695ac85b -spec: - workerLatencyProfile: MediumUpdateAverageReaction <1> - - ... ----- -<1> Specifies the medium worker latency policy. -+ -Scheduling on each worker node is disabled as the change is being applied. -+ -When all nodes return to the `Ready` condition, you can use the following command to look in the Kubernetes Controller Manager to ensure it was applied: -+ -[source,terminal] ----- -$ oc get KubeControllerManager -o yaml | grep -i workerlatency -A 5 -B 5 ----- -+ -.Example output -[source,terminal] ----- - ... - - lastTransitionTime: "2022-07-11T19:47:10Z" - reason: ProfileUpdated - status: "False" - type: WorkerLatencyProfileProgressing - - lastTransitionTime: "2022-07-11T19:47:10Z" <1> - message: all static pod revision(s) have updated latency profile - reason: ProfileUpdated - status: "True" - type: WorkerLatencyProfileComplete - - lastTransitionTime: "2022-07-11T19:20:11Z" - reason: AsExpected - status: "False" - type: WorkerLatencyProfileDegraded - - lastTransitionTime: "2022-07-11T19:20:36Z" - status: "False" - ... ----- -<1> Specifies that the profile is applied and active. - -. Optional: Move to the low worker latency profile: - -.. Edit the `node.config` object: -+ -[source,terminal] ----- -$ oc edit nodes.config/cluster ----- - -.. Change the `spec.workerLatencyProfile` value to `LowUpdateSlowReaction`: -+ -.Example `node.config` object -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: Node -metadata: - annotations: - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/create-only: "true" - creationTimestamp: "2022-07-08T16:02:51Z" - generation: 1 - name: cluster - ownerReferences: - - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - name: version - uid: 36282574-bf9f-409e-a6cd-3032939293eb - resourceVersion: "1865" - uid: 0c0f7a4c-4307-4187-b591-6155695ac85b -spec: - workerLatencyProfile: LowUpdateSlowReaction <1> - - ... ----- -<1> Specifies to use the low worker latency policy. -+ -Scheduling on each worker node is disabled as the change is being applied. - -To change the low profile to medium or change the medium to low, edit the `node.config` object and set the `spec.workerLatencyProfile` parameter to the appropriate value. - diff --git a/modules/nodes-clusters-cgroups-2-install.adoc b/modules/nodes-clusters-cgroups-2-install.adoc deleted file mode 100644 index f4da4d2408b4..000000000000 --- a/modules/nodes-clusters-cgroups-2-install.adoc +++ /dev/null @@ -1,26 +0,0 @@ -// Module included in the following assemblies: -// -// * install/install_config/enabling-cgroup-v2 - -:_content-type: PROCEDURE -[id="nodes-clusters-cgroups-2-install_{context}"] -= Enabling Linux cgroup v2 during installation - -You can enable Linux control group version 2 (cgroup v2) when you install a cluster by creating installation manifests. - -.Procedure - -. Create or edit the `node.config` object to specify the `v2` cgroup: -+ -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: Node -metadata: - name: cluster - spec: - cgroupMode: "v2" ----- - -. Proceed with the installation as usual. - diff --git a/modules/nodes-clusters-cgroups-2.adoc b/modules/nodes-clusters-cgroups-2.adoc deleted file mode 100644 index 7e9498c0ffa4..000000000000 --- a/modules/nodes-clusters-cgroups-2.adoc +++ /dev/null @@ -1,261 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/clusters/nodes-cluster-cgroups-2.adoc -// * post_installation_configuration/machine-configuration-tasks.adoc - -ifeval::["{context}" == "nodes-cluster-cgroups-2"] -:nodes: -endif::[] -ifeval::["{context}" == "post-install-cluster-tasks"] -:post: -endif::[] - -:_content-type: PROCEDURE -[id="nodes-clusters-cgroups-2_{context}"] -= Configuring Linux cgroup - -ifdef::post[] -link:https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html[Linux control group version 1] (cgroup v1) is enabled by default. You can enable link:https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html[Linux control group version 2] (cgroup v2) in your cluster by editing the `node.config` object. Enabling cgroup v2 in {product-title} disables all cgroup version 1 controllers and hierarchies in your cluster. - -cgroup v2 is the next version of the Linux cgroup API. cgroup v2 offers several improvements over cgroup v1, including a unified hierarchy, safer sub-tree delegation, new features such as link:https://www.kernel.org/doc/html/latest/accounting/psi.html[Pressure Stall Information], and enhanced resource management and isolation. - -You can change between cgroup v1 and cgroup v2, as needed. For more information, see "Configuring the Linux cgroup on your nodes" in the "Additional resources" of this section. -endif::post[] - -ifdef::nodes[] -You can enable link:https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v1.html[Linux control group version 1] (cgroup v1) or link:https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html[Linux control group version 2] (cgroup v2) by editing the `node.config` object. The default is cgroup v1. -endif::nodes[] - -[NOTE] -==== -Currently, disabling CPU load balancing is not supported by cgroup v2. As a result, you might not get the desired behavior from performance profiles if you have cgroup v2 enabled. Enabling cgroup v2 is not recommended if you are using performace profiles. -==== - -.Prerequisites -* You have a running {product-title} cluster that uses version 4.12 or later. -* You are logged in to the cluster as a user with administrative privileges. - -.Procedure - -. Enable cgroup v2 on nodes: - -.. Edit the `node.config` object: -+ -[source,terminal] ----- -$ oc edit nodes.config/cluster ----- - -ifdef::post[] -.. Add `spec.cgroupMode: "v2"`: -+ -.Example `node.config` object -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: Node -metadata: - annotations: - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/create-only: "true" - creationTimestamp: "2022-07-08T16:02:51Z" - generation: 1 - name: cluster - ownerReferences: - - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - name: version - uid: 36282574-bf9f-409e-a6cd-3032939293eb - resourceVersion: "1865" - uid: 0c0f7a4c-4307-4187-b591-6155695ac85b -spec: - cgroupMode: "v2" <1> -... ----- -<1> Enables cgroup v2. -endif::post[] - -ifdef::nodes[] -.. Edit the `spec.cgroupMode` parameter: -+ -.Example `node.config` object -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: Node -metadata: - annotations: - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/create-only: "true" - creationTimestamp: "2022-07-08T16:02:51Z" - generation: 1 - name: cluster - ownerReferences: - - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - name: version - uid: 36282574-bf9f-409e-a6cd-3032939293eb - resourceVersion: "1865" - uid: 0c0f7a4c-4307-4187-b591-6155695ac85b -spec: - cgroupMode: "v2" <1> -... ----- -<1> Specify `v2` to enable cgroup v2 or `v1` for cgroup v1. -endif::nodes[] - -.Verification - -. Check the machine configs to see that the new machine configs were added: -+ -[source,terminal] ----- -$ oc get mc ----- -+ -.Example output -[source,terminal] ----- -NAME GENERATEDBYCONTROLLER IGNITIONVERSION AGE -00-master 52dd3ba6a9a527fc3ab42afac8d12b693534c8c9 3.2.0 33m -00-worker 52dd3ba6a9a527fc3ab42afac8d12b693534c8c9 3.2.0 33m -01-master-container-runtime 52dd3ba6a9a527fc3ab42afac8d12b693534c8c9 3.2.0 33m -01-master-kubelet 52dd3ba6a9a527fc3ab42afac8d12b693534c8c9 3.2.0 33m -01-worker-container-runtime 52dd3ba6a9a527fc3ab42afac8d12b693534c8c9 3.2.0 33m -01-worker-kubelet 52dd3ba6a9a527fc3ab42afac8d12b693534c8c9 3.2.0 33m -97-master-generated-kubelet 52dd3ba6a9a527fc3ab42afac8d12b693534c8c9 3.2.0 33m -99-worker-generated-kubelet 52dd3ba6a9a527fc3ab42afac8d12b693534c8c9 3.2.0 33m -99-master-generated-registries 52dd3ba6a9a527fc3ab42afac8d12b693534c8c9 3.2.0 33m -99-master-ssh 3.2.0 40m -99-worker-generated-registries 52dd3ba6a9a527fc3ab42afac8d12b693534c8c9 3.2.0 33m -99-worker-ssh 3.2.0 40m -rendered-master-23d4317815a5f854bd3553d689cfe2e9 52dd3ba6a9a527fc3ab42afac8d12b693534c8c9 3.2.0 10s <1> -rendered-master-23e785de7587df95a4b517e0647e5ab7 52dd3ba6a9a527fc3ab42afac8d12b693534c8c9 3.2.0 33m -rendered-worker-5d596d9293ca3ea80c896a1191735bb1 52dd3ba6a9a527fc3ab42afac8d12b693534c8c9 3.2.0 33m -rendered-worker-dcc7f1b92892d34db74d6832bcc9ccd4 52dd3ba6a9a527fc3ab42afac8d12b693534c8c9 3.2.0 10s ----- -<1> New machine configs are created, as expected. - -. Check that the new `kernelArguments` were added to the new machine configs: -+ -[source,terminal] ----- -$ oc describe mc <name> ----- -+ -ifdef::nodes[] -.Example output for cgroup v1 -[source,terminal] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfig -metadata: - labels: - machineconfiguration.openshift.io/role: worker - name: 05-worker-kernelarg-selinuxpermissive -spec: - kernelArguments: - systemd.unified_cgroup_hierarchy=0 <1> - systemd.legacy_systemd_cgroup_controller=1 <2> ----- -<1> Enables cgroup v1 in systemd. -<2> Disables cgroup v2. -+ -endif::nodes[] -.Example output for cgroup v2 -[source,terminal] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfig -metadata: - labels: - machineconfiguration.openshift.io/role: worker - name: 05-worker-kernelarg-selinuxpermissive -spec: - kernelArguments: - - systemd_unified_cgroup_hierarchy=1 <1> - - cgroup_no_v1="all" <2> - - psi=1 <3> ----- -<1> Enables cgroup v2 in systemd. -<2> Disables cgroup v1. -<3> Enables the Linux Pressure Stall Information (PSI) feature. - -. Check the nodes to see that scheduling on the nodes is disabled. This indicates that the change is being applied: -+ -[source,terminal] ----- -$ oc get nodes ----- -+ -.Example output -[source,terminal] ----- -NAME STATUS ROLES AGE VERSION -ci-ln-fm1qnwt-72292-99kt6-master-0 Ready,SchedulingDisabled master 58m v1.26.0 -ci-ln-fm1qnwt-72292-99kt6-master-1 Ready master 58m v1.26.0 -ci-ln-fm1qnwt-72292-99kt6-master-2 Ready master 58m v1.26.0 -ci-ln-fm1qnwt-72292-99kt6-worker-a-h5gt4 Ready,SchedulingDisabled worker 48m v1.26.0 -ci-ln-fm1qnwt-72292-99kt6-worker-b-7vtmd Ready worker 48m v1.26.0 -ci-ln-fm1qnwt-72292-99kt6-worker-c-rhzkv Ready worker 48m v1.26.0 ----- - -. After a node returns to the `Ready` state, start a debug session for that node: -+ -[source,terminal] ----- -$ oc debug node/<node_name> ----- - -. Set `/host` as the root directory within the debug shell: -+ -[source,terminal] ----- -sh-4.4# chroot /host ----- - -ifdef::post[] -. Check that the `sys/fs/cgroup/cgroup2fs` file is present on your nodes. This file is created by cgroup v2: -+ -[source,terminal] ----- -$ stat -c %T -f /sys/fs/cgroup ----- -+ -.Example output -[source,terminal] ----- -cgroup2fs ----- -endif::post[] -ifdef::nodes[] -. Check that the `sys/fs/cgroup/cgroup2fs` or `sys/fs/cgroup/tmpfs` file is present on your nodes: -+ -[source,terminal] ----- -$ stat -c %T -f /sys/fs/cgroup ----- -+ -.Example output for cgroup v1 -[source,terminal] ----- -tmp2fs ----- -+ -.Example output for cgroup v2 -[source,terminal] ----- -cgroup2fs ----- -endif::nodes[] - -ifeval::["{context}" == "nodes-cluster-cgroups-2"] -:!nodes: -endif::[] -ifeval::["{context}" == "post-install-cluster-tasks"] -:!post: -endif::[] diff --git a/modules/nodes-clusters-cgroups-okd-configure.adoc b/modules/nodes-clusters-cgroups-okd-configure.adoc deleted file mode 100644 index 0067ab623339..000000000000 --- a/modules/nodes-clusters-cgroups-okd-configure.adoc +++ /dev/null @@ -1,147 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/clusters/nodes-cluster-cgroups-okd.adoc -// * post_installation_configuration/cluster-tasks.adoc - - -ifeval::["{context}" == "nodes-cluster-cgroups-2"] -:node: -endif::[] -ifeval::["{context}" == "post-install-cluster-tasks"] -:post: -endif::[] - -ifdef::post[] -:_content-type: PROCEDURE -[id="nodes-clusters-cgroups-okd-configure_{context}"] -= Configuring the Linux cgroup version on your nodes - -By default, {product-title} uses link:https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html[Linux control group version 2] (cgroup v2) in your cluster. You can switch to link:https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v1.html[Linux control group version 1] (cgroup v1), if needed, by using a machine config. Enabling cgroup v1 in {product-title} disables the cgroup v2 controllers and hierarchies in your cluster. - -cgroup v2 is the next version of the kernel link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/resource_management_guide/ch01[control group] and offers multiple improvements. However, it can have some unwanted effects on your nodes. -endif::post[] - -ifdef::node[] -:_content-type: PROCEDURE -[id="nodes-clusters-cgroups-okd-configure_{context}"] -= Configuring Linux cgroup - -You can switch to Linux control group version 1 (cgroup v1), if needed, by using a machine config. Enabling cgroup v1 in {product-title} disables the cgroup v2 controllers and hierarchies in your cluster. -endif::node[] - -.Prerequisites -* Have administrative privilege to a working {product-title} cluster. - -.Procedure - -. Create a `MachineConfig` object file that identifies the kernel argument (for example, `worker-cgroup-v1.yaml`) -+ -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfig -metadata: - labels: - machineconfiguration.openshift.io/role: worker <1> - name: worker-cgroup-v1 <2> -spec: - config: - ignition: - version: 3.2.0 - kernelArguments: - - systemd.unified_cgroup_hierarchy=0 <3> ----- -+ -<1> Applies the new kernel argument only to worker nodes. -<2> Applies a name to the machine config. -<3> Configures cgroup v1 on the associated nodes. - -. Create the new machine config: -+ -[source,terminal] ----- -$ oc create -f 05-worker-cgroup-v1.yaml ----- - -. Check to see that the new machine config was added: -+ -[source,terminal] ----- -$ oc get MachineConfig ----- -+ -.Example output -[source,terminal] ----- -NAME GENERATEDBYCONTROLLER IGNITIONVERSION AGE -00-master 52dd3ba6a9a527fc3ab42afac8d12b693534c8c9 3.2.0 33m -00-worker 52dd3ba6a9a527fc3ab42afac8d12b693534c8c9 3.2.0 33m -01-master-container-runtime 52dd3ba6a9a527fc3ab42afac8d12b693534c8c9 3.2.0 33m -01-master-kubelet 52dd3ba6a9a527fc3ab42afac8d12b693534c8c9 3.2.0 33m -01-worker-container-runtime 52dd3ba6a9a527fc3ab42afac8d12b693534c8c9 3.2.0 33m -01-worker-kubelet 52dd3ba6a9a527fc3ab42afac8d12b693534c8c9 3.2.0 33m -99-worker-cgroup-v1 3.2.0 105s -99-master-generated-registries 52dd3ba6a9a527fc3ab42afac8d12b693534c8c9 3.2.0 33m -99-master-ssh 3.2.0 40m -99-worker-generated-registries 52dd3ba6a9a527fc3ab42afac8d12b693534c8c9 3.2.0 33m -99-worker-ssh 3.2.0 40m -rendered-master-23e785de7587df95a4b517e0647e5ab7 52dd3ba6a9a527fc3ab42afac8d12b693534c8c9 3.2.0 33m -rendered-master-c5e92d98103061c4818cfcefcf462770 60746a843e7ef8855ae00f2ffcb655c53e0e8296 3.2.0 115s -rendered-worker-5d596d9293ca3ea80c896a1191735bb1 52dd3ba6a9a527fc3ab42afac8d12b693534c8c9 3.2.0 33m ----- - -. Check the nodes: -+ -[source,terminal] ----- -$ oc get nodes ----- -+ -.Example output -[source,terminal] ----- -NAME STATUS ROLES AGE VERSION -ip-10-0-136-161.ec2.internal Ready worker 28m v1.26.0 -ip-10-0-136-243.ec2.internal Ready master 34m v1.26.0 -ip-10-0-141-105.ec2.internal Ready,SchedulingDisabled worker 28m v1.26.0 -ip-10-0-142-249.ec2.internal Ready master 34m v1.26.0 -ip-10-0-153-11.ec2.internal Ready worker 28m v1.26.0 -ip-10-0-153-150.ec2.internal Ready master 34m v1.26.0 ----- -+ -You can see that the command disables scheduling on each worker node. - -. After a node returns to the `Ready` state, start a debug session for that node: -+ -[source,terminal] ----- -$ oc debug node/<node_name> ----- - -. Set `/host` as the root directory within the debug shell: -+ -[source,terminal] ----- -sh-4.4# chroot /host ----- - -. Check that the `sys/fs/cgroup/cgroup2fs` file has been moved to the `tmpfs` file system: -+ -[source,terminal] ----- -$ stat -c %T -f /sys/fs/cgroup ----- -+ -.Example output -+ -[source,terminal] ----- -tmpfs ----- - -ifeval::["{context}" == "nodes-cluster-cgroups-2"] -:!node: -endif::[] -ifeval::["{context}" == "post-install-cluster-tasks"] -:!post: -endif::[] diff --git a/modules/nodes-cma-autoscaling-custom-audit.adoc b/modules/nodes-cma-autoscaling-custom-audit.adoc deleted file mode 100644 index d2a89a9034ad..000000000000 --- a/modules/nodes-cma-autoscaling-custom-audit.adoc +++ /dev/null @@ -1,173 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/cma/nodes-cma-autoscaling-custom-audit-log.adoc - -:_content-type: PROCEDURE -[id="nodes-cma-autoscaling-custom-audit_{context}"] -= Configuring audit logging - -You can configure auditing for the Custom Metrics Autoscaler Operator by editing the `KedaController` custom resource. The logs are sent to an audit log file on a volume that is secured by using a persistent volume claim in the `KedaController` CR. - -// You can view the audit log file directly or use the `oc adm must-gather` CLI. The `oc adm must-gather` CLI collects the log along with other information from your cluster that is most likely needed for debugging issues, such as resource definitions and service logs. - -.Prerequisites - -* The Custom Metrics Autoscaler Operator must be installed. - -.Procedure - -. Edit the `KedaController` custom resource to add the `auditConfig` stanza: -+ -[source,yaml] ----- -kind: KedaController -apiVersion: keda.sh/v1alpha1 -metadata: - name: keda - namespace: openshift-keda -spec: -# ... - metricsServer: -# ... - auditConfig: - logFormat: "json" <1> - logOutputVolumeClaim: "pvc-audit-log" <2> - policy: - rules: <3> - - level: Metadata - omitStages: "RequestReceived" <4> - omitManagedFields: false <5> - lifetime: <6> - maxAge: "2" - maxBackup: "1" - maxSize: "50" ----- -<1> Specifies the output format of the audit log, either `legacy` or `json`. -<2> Specifies an existing persistent volume claim for storing the log data. All requests coming to the API server are logged to this persistent volume claim. If you leave this field empty, the log data is sent to stdout. -<3> Specifies which events should be recorded and what data they should include: -+ -* `None`: Do not log events. -* `Metadata`: Log only the metadata for the request, such as user, timestamp, and so forth. Do not log the request text and the response text. This is the default. -* `Request`: Log only the metadata and the request text but not the response text. This option does not apply for non-resource requests. -* `RequestResponse`: Log event metadata, request text, and response text. This option does not apply for non-resource requests. -+ -<4> Specifies stages for which no event is created. -<5> Specifies whether to omit the managed fields of the request and response bodies from being written to the API audit log, either `true` to omit the fields or `false` to include the fields. -<6> Specifies the size and lifespan of the audit logs. -+ -* `maxAge`: The maximum number of days to retain audit log files, based on the timestamp encoded in their filename. -* `maxBackup`: The maximum number of audit log files to retain. Set to `0` to retain all audit log files. -* `maxSize`: The maximum size in megabytes of an audit log file before it gets rotated. - -.Verification - -//// -. Use the `oc adm must-gather` CLI to collect the audit log file: -+ -[source,terminal] ----- -oc adm must-gather -- /usr/bin/gather_audit_logs ----- -//// - -. View the audit log file directly: - -.. Obtain the name of the `keda-metrics-apiserver-*` pod: -+ -[source,terminal] ----- -oc get pod -n openshift-keda ----- -+ -.Example output -+ -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -custom-metrics-autoscaler-operator-5cb44cd75d-9v4lv 1/1 Running 0 8m20s -keda-metrics-apiserver-65c7cc44fd-rrl4r 1/1 Running 0 2m55s -keda-operator-776cbb6768-zpj5b 1/1 Running 0 2m55s ----- - -.. View the log data by using a command similar to the following: -+ -[source,terminal] ----- -$ oc logs keda-metrics-apiserver-<hash>|grep -i metadata <1> ----- -<1> Optional: You can use the `grep` command to specify the log level to display: `Metadata`, `Request`, `RequestResponse`. -+ -For example: -+ -[source,terminal] ----- -$ oc logs keda-metrics-apiserver-65c7cc44fd-rrl4r|grep -i metadata ----- -+ -.Example output -+ -[source,terminal] ----- - ... -{"kind":"Event","apiVersion":"audit.k8s.io/v1","level":"Metadata","auditID":"4c81d41b-3dab-4675-90ce-20b87ce24013","stage":"ResponseComplete","requestURI":"/healthz","verb":"get","user":{"username":"system:anonymous","groups":["system:unauthenticated"]},"sourceIPs":["10.131.0.1"],"userAgent":"kube-probe/1.26","responseStatus":{"metadata":{},"code":200},"requestReceivedTimestamp":"2023-02-16T13:00:03.554567Z","stageTimestamp":"2023-02-16T13:00:03.555032Z","annotations":{"authorization.k8s.io/decision":"allow","authorization.k8s.io/reason":""}} - ... ----- - -. Alternatively, you can view a specific log: -+ -.. Use a command similar to the following to log into the `keda-metrics-apiserver-*` pod: -+ -[source,terminal] ----- -$ oc rsh pod/keda-metrics-apiserver-<hash> -n openshift-keda ----- -+ -For example: -+ -[source,terminal] ----- -$ oc rsh pod/keda-metrics-apiserver-65c7cc44fd-rrl4r -n openshift-keda ----- - -.. Change to the `/var/audit-policy/` directory: -+ -[source,terminal] ----- -sh-4.4$ cd /var/audit-policy/ ----- - -.. List the available logs: -+ -[source,terminal] ----- -sh-4.4$ ls ----- -+ -.Example output -+ -[source,terminal] ----- -log-2023.02.17-14:50 policy.yaml ----- - -.. View the log, as needed: -+ -[source,terminal] ----- -sh-4.4$ cat <log_name>/<pvc_name>|grep -i <log_level> <1> ----- -<1> Optional: You can use the `grep` command to specify the log level to display: `Metadata`, `Request`, `RequestResponse`. -+ -For example: -+ -[source,terminal] ----- -sh-4.4$ cat log-2023.02.17-14:50/pvc-audit-log|grep -i Request ----- -+ -.Example output ----- - ... -{"kind":"Event","apiVersion":"audit.k8s.io/v1","level":"Request","auditID":"63e7f68c-04ec-4f4d-8749-bf1656572a41","stage":"ResponseComplete","requestURI":"/openapi/v2","verb":"get","user":{"username":"system:aggregator","groups":["system:authenticated"]},"sourceIPs":["10.128.0.1"],"responseStatus":{"metadata":{},"code":304},"requestReceivedTimestamp":"2023-02-17T13:12:55.035478Z","stageTimestamp":"2023-02-17T13:12:55.038346Z","annotations":{"authorization.k8s.io/decision":"allow","authorization.k8s.io/reason":"RBAC: allowed by ClusterRoleBinding \"system:discovery\" of ClusterRole \"system:discovery\" to Group \"system:authenticated\""}} - ... ----- diff --git a/modules/nodes-cma-autoscaling-custom-creating-job.adoc b/modules/nodes-cma-autoscaling-custom-creating-job.adoc deleted file mode 100644 index 4a512bcf4720..000000000000 --- a/modules/nodes-cma-autoscaling-custom-creating-job.adoc +++ /dev/null @@ -1,144 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/cma/nodes-cma-autoscaling-custom-adding.adoc - -:_content-type: PROCEDURE -[id="nodes-cma-autoscaling-custom-creating-job_{context}"] -= Adding a custom metrics autoscaler to a job - -You can create a custom metrics autoscaler for any `Job` object. - -:FeatureName: Scaling by using a scaled job -include::snippets/technology-preview.adoc[] - -.Prerequisites - -* The Custom Metrics Autoscaler Operator must be installed. - -.Procedure - -. Create a YAML file similar to the following: -+ -[source,yaml,options="nowrap"] ----- -kind: ScaledJob -apiVersion: keda.sh/v1alpha1 -metadata: - name: scaledjob - namespace: my-namespace -spec: - failedJobsHistoryLimit: 5 - jobTargetRef: - activeDeadlineSeconds: 600 <1> - backoffLimit: 6 <2> - parallelism: 1 <3> - completions: 1 <4> - template: <5> - metadata: - name: pi - spec: - containers: - - name: pi - image: perl - command: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"] - maxReplicaCount: 100 <6> - pollingInterval: 30 <7> - successfulJobsHistoryLimit: 5 <8> - failedJobsHistoryLimit: 5 <9> - envSourceContainerName: <10> - rolloutStrategy: gradual <11> - scalingStrategy: <12> - strategy: "custom" - customScalingQueueLengthDeduction: 1 - customScalingRunningJobPercentage: "0.5" - pendingPodConditions: - - "Ready" - - "PodScheduled" - - "AnyOtherCustomPodCondition" - multipleScalersCalculation : "max" - triggers: - - type: prometheus <13> - metadata: - serverAddress: https://thanos-querier.openshift-monitoring.svc.cluster.local:9092 - namespace: kedatest - metricName: http_requests_total - threshold: '5' - query: sum(rate(http_requests_total{job="test-app"}[1m])) - authModes: "bearer" - - authenticationRef: <14> - name: prom-triggerauthentication - metadata: - name: prom-triggerauthentication - type: object - - authenticationRef: <15> - name: prom-cluster-triggerauthentication - metadata: - name: prom-cluster-triggerauthentication - type: object ----- -<1> Specifies the maximum duration the job can run. -<2> Specifies the number of retries for a job. The default is `6`. -<3> Optional: Specifies how many pod replicas a job should run in parallel; defaults to `1`. -* For non-parallel jobs, leave unset. When unset, the default is `1`. -<4> Optional: Specifies how many successful pod completions are needed to mark a job completed. -* For non-parallel jobs, leave unset. When unset, the default is `1`. -* For parallel jobs with a fixed completion count, specify the number of completions. -* For parallel jobs with a work queue, leave unset. When unset the default is the value of the `parallelism` parameter. -<5> Specifies the template for the pod the controller creates. -<6> Optional: Specifies the maximum number of replicas when scaling up. The default is `100`. -<7> Optional: Specifies the interval in seconds to check each trigger on. The default is `30`. -<8> Optional: Specifies the number of successful finished jobs should be kept. The default is `100`. -<9> Optional: Specifies how many failed jobs should be kept. The default is `100`. -<10> Optional: Specifies the name of the container in the target resource, from which the custom autoscaler gets environment variables holding secrets and so forth. The default is `.spec.template.spec.containers[0]`. -<11> Optional: Specifies whether existing jobs are terminated whenever a scaled job is being updated: -+ --- -* `default`: The autoscaler terminates an existing job if its associated scaled job is updated. The autoscaler recreates the job with the latest specs. -* `gradual`: The autoscaler does not terminate an existing job if its associated scaled job is updated. The autoscaler creates new jobs with the latest specs. --- -+ -<12> Optional: Specifies a scaling strategy: `default`, `custom`, or `accurate`. The default is `default`. For more information, see the link in the "Additional resources" section that follows. -<13> Specifies the trigger to use as the basis for scaling, as described in the "Understanding the custom metrics autoscaler triggers" section. -<14> Optional: Specifies a trigger authentication, as described in the "Creating a custom metrics autoscaler trigger authentication" section. -<15> Optional: Specifies a cluster trigger authentication, as described in the "Creating a custom metrics autoscaler trigger authentication" section. -+ -[NOTE] -==== -It is not necessary to specify both a namespace trigger authentication and a cluster trigger authentication. -==== - -. Create the custom metrics autoscaler: -+ -[source,terminal] ----- -$ oc create -f <file-name>.yaml ----- - -.Verification - -* View the command output to verify that the custom metrics autoscaler was created: -+ -[source,terminal] ----- -$ oc get scaledjob <scaled_job_name> ----- -+ -.Example output -[source,terminal] ----- -NAME MAX TRIGGERS AUTHENTICATION READY ACTIVE AGE -scaledjob 100 prometheus prom-triggerauthentication True True 8s ----- -+ -Note the following fields in the output: -+ --- -* `TRIGGERS`: Indicates the trigger, or scaler, that is being used. -* `AUTHENTICATION`: Indicates the name of any trigger authentication being used. -* `READY`: Indicates whether the scaled object is ready to start scaling: -** If `True`, the scaled object is ready. -** If `False`, the scaled object is not ready because of a problem in one or more of the objects you created. -* `ACTIVE`: Indicates whether scaling is taking place: -** If `True`, scaling is taking place. -** If `False`, scaling is not taking place because there are no metrics or there is a problem in one or more of the objects you created. --- diff --git a/modules/nodes-cma-autoscaling-custom-creating-workload.adoc b/modules/nodes-cma-autoscaling-custom-creating-workload.adoc deleted file mode 100644 index f3f4b427e185..000000000000 --- a/modules/nodes-cma-autoscaling-custom-creating-workload.adoc +++ /dev/null @@ -1,201 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/cma/nodes-cma-autoscaling-custom-adding.adoc - -:_content-type: PROCEDURE -[id="nodes-cma-autoscaling-custom-creating-workload_{context}"] -= Adding a custom metrics autoscaler to a workload - -You can create a custom metrics autoscaler for a workload that is created by a `Deployment`, `StatefulSet`, or `custom resource` object. - -.Prerequisites - -* The Custom Metrics Autoscaler Operator must be installed. - -* If you use a custom metrics autoscaler for scaling based on CPU or memory: - -** Your cluster administrator must have properly configured cluster metrics. You can use the `oc describe PodMetrics <pod-name>` command to determine if metrics are configured. If metrics are configured, the output appears similar to the following, with CPU and Memory displayed under Usage. -+ -[source,terminal] ----- -$ oc describe PodMetrics openshift-kube-scheduler-ip-10-0-135-131.ec2.internal ----- -+ -.Example output -[source,yaml,options="nowrap"] ----- -Name: openshift-kube-scheduler-ip-10-0-135-131.ec2.internal -Namespace: openshift-kube-scheduler -Labels: <none> -Annotations: <none> -API Version: metrics.k8s.io/v1beta1 -Containers: - Name: wait-for-host-port - Usage: - Memory: 0 - Name: scheduler - Usage: - Cpu: 8m - Memory: 45440Ki -Kind: PodMetrics -Metadata: - Creation Timestamp: 2019-05-23T18:47:56Z - Self Link: /apis/metrics.k8s.io/v1beta1/namespaces/openshift-kube-scheduler/pods/openshift-kube-scheduler-ip-10-0-135-131.ec2.internal -Timestamp: 2019-05-23T18:47:56Z -Window: 1m0s -Events: <none> ----- - -** The pods associated with the object you want to scale must include specified memory and CPU limits. For example: -+ -.Example pod spec -[source,yaml] ----- -apiVersion: v1 -kind: Pod -# ... -spec: - containers: - - name: app - image: images.my-company.example/app:v4 - resources: - limits: - memory: "128Mi" - cpu: "500m" -# ... ----- - -.Procedure - -. Create a YAML file similar to the following. Only the name `<2>`, object name `<4>`, and object kind `<5>` are required: -+ -.Example scaled object -[source,yaml,options="nowrap"] ----- -apiVersion: keda.sh/v1alpha1 -kind: ScaledObject -metadata: - annotations: - autoscaling.keda.sh/paused-replicas: "0" <1> - name: scaledobject <2> - namespace: my-namespace -spec: - scaleTargetRef: - apiVersion: apps/v1 <3> - name: example-deployment <4> - kind: Deployment <5> - envSourceContainerName: .spec.template.spec.containers[0] <6> - cooldownPeriod: 200 <7> - maxReplicaCount: 100 <8> - minReplicaCount: 0 <9> - metricsServer: <10> - auditConfig: - logFormat: "json" - logOutputVolumeClaim: "persistentVolumeClaimName" - policy: - rules: - - level: Metadata - omitStages: "RequestReceived" - omitManagedFields: false - lifetime: - maxAge: "2" - maxBackup: "1" - maxSize: "50" - fallback: <11> - failureThreshold: 3 - replicas: 6 - pollingInterval: 30 <12> - advanced: - restoreToOriginalReplicaCount: false <13> - horizontalPodAutoscalerConfig: - name: keda-hpa-scale-down <14> - behavior: <15> - scaleDown: - stabilizationWindowSeconds: 300 - policies: - - type: Percent - value: 100 - periodSeconds: 15 - triggers: - - type: prometheus <16> - metadata: - serverAddress: https://thanos-querier.openshift-monitoring.svc.cluster.local:9092 - namespace: kedatest - metricName: http_requests_total - threshold: '5' - query: sum(rate(http_requests_total{job="test-app"}[1m])) - authModes: basic - - authenticationRef: <17> - name: prom-triggerauthentication - metadata: - name: prom-triggerauthentication - type: object - - authenticationRef: <18> - name: prom-cluster-triggerauthentication - metadata: - name: prom-cluster-triggerauthentication - type: object ----- -<1> Optional: Specifies that the Custom Metrics Autoscaler Operator is to scale the replicas to the specified value and stop autoscaling, as described in the "Pausing the custom metrics autoscaler for a workload" section. -<2> Specifies a name for this custom metrics autoscaler. -<3> Optional: Specifies the API version of the target resource. The default is `apps/v1`. -<4> Specifies the name of the object that you want to scale. -<5> Specifies the `kind` as `Deployment`, `StatefulSet` or `CustomResource`. -<6> Optional: Specifies the name of the container in the target resource, from which the custom metrics autoscaler gets environment variables holding secrets and so forth. The default is `.spec.template.spec.containers[0]`. -<7> Optional. Specifies the period in seconds to wait after the last trigger is reported before scaling the deployment back to `0` if the `minReplicaCount` is set to `0`. The default is `300`. -<8> Optional: Specifies the maximum number of replicas when scaling up. The default is `100`. -<9> Optional: Specifies the minimum number of replicas when scaling down. -<10> Optional: Specifies the parameters for audit logs. as described in the "Configuring audit logging" section. -<11> Optional: Specifies the number of replicas to fall back to if a scaler fails to get metrics from the source for the number of times defined by the `failureThreshold` parameter. For more information on fallback behavior, see the link:https://keda.sh/docs/2.7/concepts/scaling-deployments/#fallback[KEDA documentation]. -<12> Optional: Specifies the interval in seconds to check each trigger on. The default is `30`. -<13> Optional: Specifies whether to scale back the target resource to the original replica count after the scaled object is deleted. The default is `false`, which keeps the replica count as it is when the scaled object is deleted. -<14> Optional: Specifies a name for the horizontal pod autoscaler. The default is `keda-hpa-{scaled-object-name}`. -<15> Optional: Specifies a scaling policy to use to control the rate to scale pods up or down, as described in the "Scaling policies" section. -<16> Specifies the trigger to use as the basis for scaling, as described in the "Understanding the custom metrics autoscaler triggers" section. This example uses {product-title} monitoring. -<17> Optional: Specifies a trigger authentication, as described in the "Creating a custom metrics autoscaler trigger authentication" section. -<18> Optional: Specifies a cluster trigger authentication, as described in the "Creating a custom metrics autoscaler trigger authentication" section. -+ -[NOTE] -==== -It is not necessary to specify both a namespace trigger authentication and a cluster trigger authentication. -==== - -. Create the custom metrics autoscaler: -+ -[source,terminal] ----- -$ oc create -f <file-name>.yaml ----- - -.Verification - -* View the command output to verify that the custom metrics autoscaler was created: -+ -[source,terminal] ----- -$ oc get scaledobject <scaled_object_name> ----- -+ -.Example output -[source,terminal] ----- -NAME SCALETARGETKIND SCALETARGETNAME MIN MAX TRIGGERS AUTHENTICATION READY ACTIVE FALLBACK AGE -scaledobject apps/v1.Deployment example-deployment 0 50 prometheus prom-triggerauthentication True True True 17s ----- -+ -Note the following fields in the output: -+ --- -* `TRIGGERS`: Indicates the trigger, or scaler, that is being used. -* `AUTHENTICATION`: Indicates the name of any trigger authentication being used. -* `READY`: Indicates whether the scaled object is ready to start scaling: -** If `True`, the scaled object is ready. -** If `False`, the scaled object is not ready because of a problem in one or more of the objects you created. -* `ACTIVE`: Indicates whether scaling is taking place: -** If `True`, scaling is taking place. -** If `False`, scaling is not taking place because there are no metrics or there is a problem in one or more of the objects you created. -* `FALLBACK`: Indicates whether the custom metrics autoscaler is able to get metrics from the source -** If `False`, the custom metrics autoscaler is getting metrics. -** If `True`, the custom metrics autoscaler is getting metrics because there are no metrics or there is a problem in one or more of the objects you created. --- - diff --git a/modules/nodes-cma-autoscaling-custom-gather.adoc b/modules/nodes-cma-autoscaling-custom-gather.adoc deleted file mode 100644 index 8ce3479d4a1f..000000000000 --- a/modules/nodes-cma-autoscaling-custom-gather.adoc +++ /dev/null @@ -1,173 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/cma/nodes-cma-autoscaling-custom-debugging.adoc - -:_content-type: PROCEDURE -[id="nodes-cma-autoscaling-custom-debugging-gather_{context}"] -= Gathering debugging data - -The following command runs the `must-gather` tool for the Custom Metrics Autoscaler Operator: - -[source,terminal] ----- -$ oc adm must-gather --image="$(oc get packagemanifests openshift-custom-metrics-autoscaler-operator \ --n openshift-marketplace \ --o jsonpath='{.status.channels[?(@.name=="stable")].currentCSVDesc.annotations.containerImage}')" ----- - -[NOTE] -==== -The standard {product-title} `must-gather` command, `oc adm must-gather`, does not collect Custom Metrics Autoscaler Operator data. -==== - - -.Prerequisites - -* Access to the cluster as a user with the `cluster-admin` role. -* The {product-title} CLI (`oc`) installed. - -.Procedure - -. Navigate to the directory where you want to store the `must-gather` data. -+ -[NOTE] -==== -If your cluster is using a restricted network, you must take additional steps. If your mirror registry has a trusted CA, you must first add the trusted CA to the cluster. For all clusters on restricted networks, you must import the default `must-gather` image as an image stream by running the following command. - -[source,terminal] ----- -$ oc import-image is/must-gather -n openshift ----- -==== - -. Perform one of the following: -+ --- -* To get only the Custom Metrics Autoscaler Operator `must-gather` data, use the following command: -+ -[source,terminal] ----- -$ oc adm must-gather --image="$(oc get packagemanifests openshift-custom-metrics-autoscaler-operator \ --n openshift-marketplace \ --o jsonpath='{.status.channels[?(@.name=="stable")].currentCSVDesc.annotations.containerImage}')" ----- -+ -The custom image for the `must-gather` command is pulled directly from the Operator package manifests, so that it works on any cluster where the Custom Metric Autoscaler Operator is available. - -* To gather the default `must-gather` data in addition to the Custom Metric Autoscaler Operator information: - -.. Use the following command to obtain the Custom Metrics Autoscaler Operator image and set it as an environment variable: -+ -[source,terminal] ----- -$ IMAGE="$(oc get packagemanifests openshift-custom-metrics-autoscaler-operator \ - -n openshift-marketplace \ - -o jsonpath='{.status.channels[?(@.name=="stable")].currentCSVDesc.annotations.containerImage}')" ----- - -.. Use the `oc adm must-gather` with the Custom Metrics Autoscaler Operator image: -+ -[source,terminal] ----- -$ oc adm must-gather --image-stream=openshift/must-gather --image=${IMAGE} ----- --- -+ -.Example must-gather output for the Custom Metric Autoscaler: -[%collapsible] -==== -[source,terminal] ----- -└── openshift-keda - ├── apps - │ ├── daemonsets.yaml - │ ├── deployments.yaml - │ ├── replicasets.yaml - │ └── statefulsets.yaml - ├── apps.openshift.io - │ └── deploymentconfigs.yaml - ├── autoscaling - │ └── horizontalpodautoscalers.yaml - ├── batch - │ ├── cronjobs.yaml - │ └── jobs.yaml - ├── build.openshift.io - │ ├── buildconfigs.yaml - │ └── builds.yaml - ├── core - │ ├── configmaps.yaml - │ ├── endpoints.yaml - │ ├── events.yaml - │ ├── persistentvolumeclaims.yaml - │ ├── pods.yaml - │ ├── replicationcontrollers.yaml - │ ├── secrets.yaml - │ └── services.yaml - ├── discovery.k8s.io - │ └── endpointslices.yaml - ├── image.openshift.io - │ └── imagestreams.yaml - ├── k8s.ovn.org - │ ├── egressfirewalls.yaml - │ └── egressqoses.yaml - ├── keda.sh - │ ├── kedacontrollers - │ │ └── keda.yaml - │ ├── scaledobjects - │ │ └── example-scaledobject.yaml - │ └── triggerauthentications - │ └── example-triggerauthentication.yaml - ├── monitoring.coreos.com - │ └── servicemonitors.yaml - ├── networking.k8s.io - │ └── networkpolicies.yaml - ├── openshift-keda.yaml - ├── pods - │ ├── custom-metrics-autoscaler-operator-58bd9f458-ptgwx - │ │ ├── custom-metrics-autoscaler-operator - │ │ │ └── custom-metrics-autoscaler-operator - │ │ │ └── logs - │ │ │ ├── current.log - │ │ │ ├── previous.insecure.log - │ │ │ └── previous.log - │ │ └── custom-metrics-autoscaler-operator-58bd9f458-ptgwx.yaml - │ ├── custom-metrics-autoscaler-operator-58bd9f458-thbsh - │ │ └── custom-metrics-autoscaler-operator - │ │ └── custom-metrics-autoscaler-operator - │ │ └── logs - │ ├── keda-metrics-apiserver-65c7cc44fd-6wq4g - │ │ ├── keda-metrics-apiserver - │ │ │ └── keda-metrics-apiserver - │ │ │ └── logs - │ │ │ ├── current.log - │ │ │ ├── previous.insecure.log - │ │ │ └── previous.log - │ │ └── keda-metrics-apiserver-65c7cc44fd-6wq4g.yaml - │ └── keda-operator-776cbb6768-fb6m5 - │ ├── keda-operator - │ │ └── keda-operator - │ │ └── logs - │ │ ├── current.log - │ │ ├── previous.insecure.log - │ │ └── previous.log - │ └── keda-operator-776cbb6768-fb6m5.yaml - ├── policy - │ └── poddisruptionbudgets.yaml - └── route.openshift.io - └── routes.yaml ----- -==== - -ifndef::openshift-origin[] -. Create a compressed file from the `must-gather` directory that was created in your working directory. For example, on a computer that uses a Linux -operating system, run the following command: -+ -[source,terminal] ----- -$ tar cvaf must-gather.tar.gz must-gather.local.5421342344627712289/ <1> ----- -<1> Replace `must-gather-local.5421342344627712289/` with the actual directory name. - -. Attach the compressed file to your support case on the link:https://access.redhat.com[Red Hat Customer Portal]. -endif::[] - diff --git a/modules/nodes-cma-autoscaling-custom-install.adoc b/modules/nodes-cma-autoscaling-custom-install.adoc deleted file mode 100644 index 43ae8c4b1142..000000000000 --- a/modules/nodes-cma-autoscaling-custom-install.adoc +++ /dev/null @@ -1,121 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/cma/nodes-cma-autoscaling-custom-install.adoc - -:_content-type: PROCEDURE -[id="nodes-cma-autoscaling-custom-install_{context}"] -= Installing the custom metrics autoscaler - -You can use the following procedure to install the Custom Metrics Autoscaler Operator. - -.Prerequisites -ifdef::openshift-origin[] -* Ensure that you have downloaded the {cluster-manager-url-pull} as shown in _Obtaining the installation program_ in the installation documentation for your platform. -+ -If you have the pull secret, add the `redhat-operators` catalog to the OperatorHub custom resource (CR) as shown in _Configuring {product-title} to use Red Hat Operators_. -endif::openshift-origin[] - -* Remove any previously-installed Technology Preview versions of the Cluster Metrics Autoscaler Operator. - -* Remove any versions of the community-based KEDA. -+ -Also, remove the KEDA 1.x custom resource definitions by running the following commands: -+ -[source,terminal] ----- -$ oc delete crd scaledobjects.keda.k8s.io ----- -+ -[source,terminal] ----- -$ oc delete crd triggerauthentications.keda.k8s.io ----- - -.Procedure - -. In the {product-title} web console, click *Operators* -> *OperatorHub*. - -. Choose *Custom Metrics Autoscaler* from the list of available Operators, and click *Install*. - -. On the *Install Operator* page, ensure that the *All namespaces on the cluster (default)* option -is selected for *Installation Mode*. This installs the Operator in all namespaces. - -. Ensure that the *openshift-keda* namespace is selected for *Installed Namespace*. {product-title} creates the namespace, if not present in your cluster. - -. Click *Install*. - -. Verify the installation by listing the Custom Metrics Autoscaler Operator components: - -.. Navigate to *Workloads* -> *Pods*. - -.. Select the `openshift-keda` project from the drop-down menu and verify that the `custom-metrics-autoscaler-operator-*` pod is running. - -.. Navigate to *Workloads* -> *Deployments* to verify that the `custom-metrics-autoscaler-operator` deployment is running. - -. Optional: Verify the installation in the OpenShift CLI using the following commands: -+ -[source,terminal] ----- -$ oc get all -n openshift-keda ----- -+ -The output appears similar to the following: -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -pod/custom-metrics-autoscaler-operator-5fd8d9ffd8-xt4xp 1/1 Running 0 18m - -NAME READY UP-TO-DATE AVAILABLE AGE -deployment.apps/custom-metrics-autoscaler-operator 1/1 1 1 18m - -NAME DESIRED CURRENT READY AGE -replicaset.apps/custom-metrics-autoscaler-operator-5fd8d9ffd8 1 1 1 18m ----- - -. Install the `KedaController` custom resource, which creates the required CRDs: - -.. In the {product-title} web console, click *Operators* -> *Installed Operators*. - -.. Click *Custom Metrics Autoscaler*. - -.. On the *Operator Details* page, click the *KedaController* tab. - -.. On the *KedaController* tab, click *Create KedaController* and edit the file. -+ -[source,yaml] ----- -kind: KedaController -apiVersion: keda.sh/v1alpha1 -metadata: - name: keda - namespace: openshift-keda -spec: - watchNamespace: '' <1> - operator: - logLevel: info <2> - logEncoder: console <3> - metricsServer: - logLevel: '0' <4> - auditConfig: <5> - logFormat: "json" - logOutputVolumeClaim: "persistentVolumeClaimName" - policy: - rules: - - level: Metadata - omitStages: "RequestReceived" - omitManagedFields: false - lifetime: - maxAge: "2" - maxBackup: "1" - maxSize: "50" - serviceAccount: {} ----- -<1> Specifies the namespaces that the custom autoscaler should watch. Enter names in a comma-separated list. Omit or set empty to watch all namespaces. The default is empty. -<2> Specifies the level of verbosity for the Custom Metrics Autoscaler Operator log messages. The allowed values are `debug`, `info`, `error`. The default is `info`. -<3> Specifies the logging format for the Custom Metrics Autoscaler Operator log messages. The allowed values are `console` or `json`. The default is `console`. -<4> Specifies the logging level for the Custom Metrics Autoscaler Metrics Server. The allowed values are `0` for `info` and `4` or `debug`. The default is `0`. -<5> Activates audit logging for the Custom Metrics Autoscaler Operator and specifies the audit policy to use, as described in the "Configuring audit logging" section. - -.. Click *Create* to create the KEDAController. diff --git a/modules/nodes-cma-autoscaling-custom-metrics-access.adoc b/modules/nodes-cma-autoscaling-custom-metrics-access.adoc deleted file mode 100644 index d50e1a435d6b..000000000000 --- a/modules/nodes-cma-autoscaling-custom-metrics-access.adoc +++ /dev/null @@ -1,22 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/cma/nodes-cma-autoscaling-custom-metrics.adoc -// Modeled after migration-accessing-performance-metrics.adoc - -:_content-type: PROCEDURE -[id="nodes-cma-autoscaling-custom-metrics-access_{context}"] -= Accessing performance metrics - -You can access the metrics and run queries by using the {product-title} web console. - -.Procedure - -. Select the *Administrator* perspective in the {product-title} web console. - -. Select *Observe* -> *Metrics*. - -. To create a custom query, add your PromQL query to the *Expression* field. - -. To add multiple queries, select *Add Query*. - -// Procedure copied from monitoring-querying-metrics-for-all-projects-as-an-administrator diff --git a/modules/nodes-cma-autoscaling-custom-metrics-provided.adoc b/modules/nodes-cma-autoscaling-custom-metrics-provided.adoc deleted file mode 100644 index cc384ff875e6..000000000000 --- a/modules/nodes-cma-autoscaling-custom-metrics-provided.adoc +++ /dev/null @@ -1,59 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/cma/nodes-cma-autoscaling-custom-metrics.adoc - -:_content-type: REFERENCE -[id="nodes-cma-autoscaling-custom-metrics-provided_{context}"] -= Provided Operator metrics - -The Custom Metrics Autoscaler Operator exposes the following metrics, which you can view by using the {product-title} web console. - -.Custom Metric Autoscaler Operator metrics - -[cols="3,7",options="header"] -|=== -|Metric name -|Description - -|`keda_scaler_activity` -|Whether the particular scaler is active or inactive. A value of `1` indicates the scaler is active; a value of `0` indicates the scaler is inactive. - -|`keda_scaler_metrics_value` -|The current value for each scaler’s metric, which is used by the Horizontal Pod Autoscaler (HPA) in computing the target average. - -|`keda_scaler_metrics_latency` -|The latency of retrieving the current metric from each scaler. - -|`keda_scaler_errors` -|The number of errors that have occurred for each scaler. - -|`keda_scaler_errors_total` -|The total number of errors encountered for all scalers. - -|`keda_scaled_object_errors` -|The number of errors that have occurred for each scaled obejct. - -|`keda_resource_totals` -|The total number of Custom Metrics Autoscaler custom resources in each namespace for each custom resource type. - -|`keda_trigger_totals` -|The total number of triggers by trigger type. - -|=== - -.Custom Metrics Autoscaler Admission webhook metrics - -The Custom Metrics Autoscaler Admission webhook also exposes the following Prometheus metrics. - -[cols="3,7"options="header"] -|=== -|Metric name -|Description - -|`keda_scaled_object_validation_total` -|The number of scaled object validations. - -|`keda_scaled_object_validation_errors` -|The number of validation errors. - -|=== diff --git a/modules/nodes-cma-autoscaling-custom-pausing-restart.adoc b/modules/nodes-cma-autoscaling-custom-pausing-restart.adoc deleted file mode 100644 index ad08c2afe605..000000000000 --- a/modules/nodes-cma-autoscaling-custom-pausing-restart.adoc +++ /dev/null @@ -1,46 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/cma/nodes-cma-autoscaling-custom-pausing.adoc - -:_content-type: PROCEDURE -[id="nodes-cma-autoscaling-custom-pausing-restart_{context}"] -= Restarting the custom metrics autoscaler for a scaled object - -You can restart a paused custom metrics autoscaler by removing the `autoscaling.keda.sh/paused-replicas` annotation for that `ScaledObject`. - -[source,yaml] ----- -apiVersion: keda.sh/v1alpha1 -kind: ScaledObject -metadata: - annotations: - autoscaling.keda.sh/paused-replicas: "4" -# ... ----- - -.Procedure - -. Use the following command to edit the `ScaledObject` CR for your workload: -+ -[source,terminal] ----- -$ oc edit ScaledObject scaledobject ----- - -. Remove the `autoscaling.keda.sh/paused-replicas` annotation. -+ -[source,yaml] ----- -apiVersion: keda.sh/v1alpha1 -kind: ScaledObject -metadata: - annotations: - autoscaling.keda.sh/paused-replicas: "4" <1> - creationTimestamp: "2023-02-08T14:41:01Z" - generation: 1 - name: scaledobject - namespace: my-project - resourceVersion: '65729' - uid: f5aec682-acdf-4232-a783-58b5b82f5dd0 ----- -<1> Remove this annotation to restart a paused custom metrics autoscaler. diff --git a/modules/nodes-cma-autoscaling-custom-pausing-workload.adoc b/modules/nodes-cma-autoscaling-custom-pausing-workload.adoc deleted file mode 100644 index 7d27b861cb24..000000000000 --- a/modules/nodes-cma-autoscaling-custom-pausing-workload.adoc +++ /dev/null @@ -1,47 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/cma/nodes-cma-autoscaling-custom-pausing.adoc - -:_content-type: PROCEDURE -[id="nodes-cma-autoscaling-custom-pausing-workload_{context}"] -= Pausing a custom metrics autoscaler - -You can pause the autoscaling of a scaled object by adding the `autoscaling.keda.sh/paused-replicas` annotation to the custom metrics autoscaler for that scaled object. The custom metrics autoscaler scales the replicas for that workload to the specified value and pauses autoscaling until the annotation is removed. - -[source,yaml] ----- -apiVersion: keda.sh/v1alpha1 -kind: ScaledObject -metadata: - annotations: - autoscaling.keda.sh/paused-replicas: "4" -# ... ----- - -.Procedure - -. Use the following command to edit the `ScaledObject` CR for your workload: -+ -[source,terminal] ----- -$ oc edit ScaledObject scaledobject ----- - -. Add the `autoscaling.keda.sh/paused-replicas` annotation with any value: -+ -[source,yaml] ----- -apiVersion: keda.sh/v1alpha1 -kind: ScaledObject -metadata: - annotations: - autoscaling.keda.sh/paused-replicas: "4" <1> - creationTimestamp: "2023-02-08T14:41:01Z" - generation: 1 - name: scaledobject - namespace: my-project - resourceVersion: '65729' - uid: f5aec682-acdf-4232-a783-58b5b82f5dd0 ----- -<1> Specifies that the Custom Metrics Autoscaler Operator is to scale the replicas to the specified value and stop autoscaling. - diff --git a/modules/nodes-cma-autoscaling-custom-prometheus-config.adoc b/modules/nodes-cma-autoscaling-custom-prometheus-config.adoc deleted file mode 100644 index 528a8f00134f..000000000000 --- a/modules/nodes-cma-autoscaling-custom-prometheus-config.adoc +++ /dev/null @@ -1,183 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/cma/nodes-cma-autoscaling-custom.adoc - -:_content-type: PROCEDURE -[id="nodes-cma-autoscaling-custom-prometheus-config_{context}"] -= Configuring the custom metrics autoscaler to use {product-title} monitoring - -You can use the installed {product-title} Prometheus monitoring as a source for the metrics used by the custom metrics autoscaler. However, there are some additional configurations you must perform. - -[NOTE] -==== -These steps are not required for an external Prometheus source. -==== - -You must perform the following tasks, as described in this section: - -* Create a service account to get a token. -* Create a role. -* Add that role to the service account. -* Reference the token in the trigger authentication object used by Prometheus. - -.Prerequisites - -* {product-title} monitoring must be installed. - -* Monitoring of user-defined workloads must be enabled in {product-title} monitoring, as described in the *Creating a user-defined workload monitoring config map* section. - -* The Custom Metrics Autoscaler Operator must be installed. - -.Procedure - -. Change to the project with the object you want to scale: -+ -[source,terminal] ----- -$ oc project my-project ----- - -. Use the following command to create a service account, if your cluster does not have one: -+ -[source,terminal] ----- -$ oc create serviceaccount <service_account> ----- -+ -where: -+ -<service_account>:: Specifies the name of the service account. - -. Use the following command to locate the token assigned to the service account: -+ -[source,terminal] ----- -$ oc describe serviceaccount <service_account> ----- -+ --- -where: - -<service_account>:: Specifies the name of the service account. --- -+ -.Example output -[source,terminal] ----- -Name: thanos -Namespace: my-project -Labels: <none> -Annotations: <none> -Image pull secrets: thanos-dockercfg-nnwgj -Mountable secrets: thanos-dockercfg-nnwgj -Tokens: thanos-token-9g4n5 <1> -Events: <none> - ----- -<1> Use this token in the trigger authentication. - -. Create a trigger authentication with the service account token: - -.. Create a YAML file similar to the following: -+ -[source,yaml] ----- -apiVersion: keda.sh/v1alpha1 -kind: TriggerAuthentication -metadata: - name: keda-trigger-auth-prometheus -spec: - secretTargetRef: <1> - - parameter: bearerToken <2> - name: thanos-token-9g4n5 <3> - key: token <4> - - parameter: ca - name: thanos-token-9g4n5 - key: ca.crt ----- -<1> Specifies that this object uses a secret for authorization. -<2> Specifies the authentication parameter to supply by using the token. -<3> Specifies the name of the token to use. -<4> Specifies the key in the token to use with the specified parameter. - -.. Create the CR object: -+ -[source,terminal] ----- -$ oc create -f <file-name>.yaml ----- - -. Create a role for reading Thanos metrics: -+ -.. Create a YAML file with the following parameters: -+ -[source,yaml] ----- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: thanos-metrics-reader -rules: -- apiGroups: - - "" - resources: - - pods - verbs: - - get -- apiGroups: - - metrics.k8s.io - resources: - - pods - - nodes - verbs: - - get - - list - - watch ----- - -.. Create the CR object: -+ -[source,terminal] ----- -$ oc create -f <file-name>.yaml ----- - -. Create a role binding for reading Thanos metrics: -+ -.. Create a YAML file similar to the following: -+ -[source,yaml] ----- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: thanos-metrics-reader <1> - namespace: my-project <2> -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: thanos-metrics-reader -subjects: -- kind: ServiceAccount - name: thanos <3> - namespace: my-project <4> ----- -<1> Specifies the name of the role you created. -<2> Specifies the namespace of the object you want to scale. -<3> Specifies the name of the service account to bind to the role. -<4> Specifies the namespace of the object you want to scale. -.. Create the CR object: -+ -[source,terminal] ----- -$ oc create -f <file-name>.yaml ----- - -You can now deploy a scaled object or scaled job to enable autoscaling for your application, as described in "Understanding how to add custom metrics autoscalers". To use {product-title} monitoring as the source, in the trigger, or scaler, you must include the following parameters: - -* `triggers.type` must be `prometheus` -* `triggers.metadata.serverAddress` must be `\https://thanos-querier.openshift-monitoring.svc.cluster.local:9092` -* `triggers.metadata.authModes` must be `bearer` -* `triggers.metadata.namespace` must be set to the namespace of the object to scale -* `triggers.authenticationRef` must point to the trigger authentication resource specified in the previous step - diff --git a/modules/nodes-cma-autoscaling-custom-trigger-auth-using.adoc b/modules/nodes-cma-autoscaling-custom-trigger-auth-using.adoc deleted file mode 100644 index 31b7e260c870..000000000000 --- a/modules/nodes-cma-autoscaling-custom-trigger-auth-using.adoc +++ /dev/null @@ -1,111 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/cma/nodes-cma-autoscaling-custom-trigger-auth.adoc - -:_content-type: PROCEDURE -[id="nodes-cma-autoscaling-custom-trigger-auth-using_{context}"] -= Using trigger authentications - -You use trigger authentications and cluster trigger authentications by using a custom resource to create the authentication, then add a reference to a scaled object or scaled job. - -.Prerequisites - -* The Custom Metrics Autoscaler Operator must be installed. - -* If you are using a secret, the `Secret` object must exist, for example: -+ -.Example secret -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: my-secret -data: - user-name: <base64_USER_NAME> - password: <base64_USER_PASSWORD> ----- - -.Procedure - -. Create the `TriggerAuthentication` or `ClusterTriggerAuthentication` object. - -.. Create a YAML file that defines the object: -+ -.Example trigger authentication with a secret -[source,yaml] ----- -kind: TriggerAuthentication -apiVersion: keda.sh/v1alpha1 -metadata: - name: prom-triggerauthentication - namespace: my-namespace -spec: - secretTargetRef: - - parameter: user-name - name: my-secret - key: USER_NAME - - parameter: password - name: my-secret - key: USER_PASSWORD ----- - -.. Create the `TriggerAuthentication` object: -+ -[source,terminal] ----- -$ oc create -f <file-name>.yaml ----- - -. Create or edit a `ScaledObject` YAML file: -+ -.Example scaled object -[source,yaml,options="nowrap"] ----- -apiVersion: keda.sh/v1alpha1 -kind: ScaledObject -metadata: - name: scaledobject - namespace: my-namespace -spec: - scaleTargetRef: - name: example-deployment - maxReplicaCount: 100 - minReplicaCount: 0 - pollingInterval: 30 - triggers: - - authenticationRef: - type: prometheus - metadata: - serverAddress: https://thanos-querier.openshift-monitoring.svc.cluster.local:9092 - namespace: kedatest # replace <NAMESPACE> - metricName: http_requests_total - threshold: '5' - query: sum(rate(http_requests_total{job="test-app"}[1m])) - authModes: "basic" - - authenticationRef: <1> - name: prom-triggerauthentication - metadata: - name: prom-triggerauthentication - type: object - - authenticationRef: <2> - name: prom-cluster-triggerauthentication - kind: ClusterTriggerAuthentication - metadata: - name: prom-cluster-triggerauthentication - type: object ----- -<1> Optional: Specify a trigger authentication. -<2> Optional: Specify a cluster trigger authentication. You must include the `kind: ClusterTriggerAuthentication` parameter. -+ -[NOTE] -==== -It is not necessary to specify both a namespace trigger authentication and a cluster trigger authentication. -==== - -. Create the object. For example: -+ -[source,terminal] ----- -$ oc apply -f <file-name> ----- diff --git a/modules/nodes-cma-autoscaling-custom-trigger-cpu.adoc b/modules/nodes-cma-autoscaling-custom-trigger-cpu.adoc deleted file mode 100644 index d9a61a8e7080..000000000000 --- a/modules/nodes-cma-autoscaling-custom-trigger-cpu.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/cma/nodes-cma-autoscaling-custom-trigger.adoc - -:_content-type: PROCEDURE -[id="nodes-cma-autoscaling-custom-trigger-cpu_{context}"] -= Understanding the CPU trigger - -You can scale pods based on CPU metrics. This trigger uses cluster metrics as the source for metrics. - -The custom metrics autoscaler scales the pods associated with an object to maintain the CPU usage that you specify. The autoscaler increases or decreases the number of replicas between the minimum and maximum numbers to maintain the specified CPU utilization across all pods. The memory trigger considers the memory utilization of the entire pod. If the pod has multiple containers, the memory trigger considers the total memory utilization of all containers in the pod. - -[NOTE] -==== -* This trigger cannot be used with the `ScaledJob` custom resource. -* When using a memory trigger to scale an object, the object does not scale to `0`, even if you are using multiple triggers. -==== - -.Example scaled object with a CPU target -[source,yaml,options="nowrap"] ----- -apiVersion: keda.sh/v1alpha1 -kind: ScaledObject -metadata: - name: cpu-scaledobject - namespace: my-namespace -spec: -# ... - triggers: - - type: cpu <1> - metricType: Utilization <2> - metadata: - value: '60' <3> - containerName: api <4> - ----- -<1> Specifies CPU as the trigger type. -<2> Specifies the type of metric to use, either `Utilization` or `AverageValue`. -<3> Specifies the value that triggers scaling. Must be specified as a quoted string value. -* When using `Utilization`, the target value is the average of the resource metrics across all relevant pods, represented as a percentage of the requested value of the resource for the pods. -* When using `AverageValue`, the target value is the average of the metrics across all relevant pods. -<4> Optional: Specifies an individual container to scale, based on the memory utilization of only that container, rather than the entire pod. In this example, only the container named `api` is to be scaled. diff --git a/modules/nodes-cma-autoscaling-custom-trigger-kafka.adoc b/modules/nodes-cma-autoscaling-custom-trigger-kafka.adoc deleted file mode 100644 index 9babaced121a..000000000000 --- a/modules/nodes-cma-autoscaling-custom-trigger-kafka.adoc +++ /dev/null @@ -1,65 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/cma/nodes-cma-autoscaling-custom-trigger.adoc - -:_content-type: PROCEDURE -[id="nodes-cma-autoscaling-custom-trigger-kafka_{context}"] -= Understanding the Kafka trigger - -You can scale pods based on an Apache Kafka topic or other services that support the Kafka protocol. The custom metrics autoscaler does not scale higher than the number of Kafka partitions, unless you set the `allowIdleConsumers` parameter to `true` in the scaled object or scaled job. - -[NOTE] -==== -If the number of consumer groups exceeds the number of partitions in a topic, the extra consumer groups remain idle. To avoid this, by default the number of replicas does not exceed: - -* The number of partitions on a topic, if a topic is specified -* The number of partitions of all topics in the consumer group, if no topic is specified -* The `maxReplicaCount` specified in scaled object or scaled job CR - -You can use the `allowIdleConsumers` parameter to disable these default behaviors. -==== - -.Example scaled object with a Kafka target -[source,yaml,options="nowrap"] ----- -apiVersion: keda.sh/v1alpha1 -kind: ScaledObject -metadata: - name: kafka-scaledobject - namespace: my-namespace -spec: -# ... - triggers: - - type: kafka <1> - metadata: - topic: my-topic <2> - bootstrapServers: my-cluster-kafka-bootstrap.openshift-operators.svc:9092 <3> - consumerGroup: my-group <4> - lagThreshold: '10' <5> - activationLagThreshold: '5' <6> - offsetResetPolicy: latest <7> - allowIdleConsumers: true <8> - scaleToZeroOnInvalidOffset: false <9> - excludePersistentLag: false <10> - version: '1.0.0' <11> - partitionLimitation: '1,2,10-20,31' <12> ----- -<1> Specifies Kafka as the trigger type. -<2> Specifies the name of the Kafka topic on which Kafka is processing the offset lag. -<3> Specifies a comma-separated list of Kafka brokers to connect to. -<4> Specifies the name of the Kafka consumer group used for checking the offset on the topic and processing the related lag. -<5> Optional: Specifies the average target value that triggers scaling. Must be specified as a quoted string value. The default is `5`. -<6> Optional: Specifies the target value for the activation phase. Must be specified as a quoted string value. -<7> Optional: Specifies the Kafka offset reset policy for the Kafka consumer. The available values are: `latest` and `earliest`. The default is `latest`. -<8> Optional: Specifies whether the number of Kafka replicas can exceed the number of partitions on a topic. - * If `true`, the number of Kafka replicas can exceed the number of partitions on a topic. This allows for idle Kafka consumers. - * If `false`, the number of Kafka replicas cannot exceed the number of partitions on a topic. This is the default. -<9> Specifies how the trigger behaves when a Kafka partition does not have a valid offset. - * If `true`, the consumers are scaled to zero for that partition. - * If `false`, the scaler keeps a single consumer for that partition. This is the default. -<10> Optional: Specifies whether the trigger includes or excludes partition lag for partitions whose current offset is the same as the current offset of the previous polling cycle. - * If `true`, the scaler excludes partition lag in these partitions. - * If `false`, the trigger includes all consumer lag in all partitions. This is the default. -<11> Optional: Specifies the version of your Kafka brokers. Must be specified as a quoted string value. The default is `1.0.0`. -<12> Optional: Specifies a comma-separated list of partition IDs to scope the scaling on. If set, only the listed IDs are considered when calculating lag. Must be specified as a quoted string value. The default is to consider all partitions. - diff --git a/modules/nodes-cma-autoscaling-custom-trigger-memory.adoc b/modules/nodes-cma-autoscaling-custom-trigger-memory.adoc deleted file mode 100644 index 25ec0d2b2ea9..000000000000 --- a/modules/nodes-cma-autoscaling-custom-trigger-memory.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/cma/nodes-cma-autoscaling-custom-trigger.adoc - -:_content-type: PROCEDURE -[id="nodes-cma-autoscaling-custom-trigger-memory_{context}"] -= Understanding the memory trigger - -You can scale pods based on memory metrics. This trigger uses cluster metrics as the source for metrics. - -The custom metrics autoscaler scales the pods associated with an object to maintain the average memory usage that you specify. The autoscaler increases and decreases the number of replicas between the minimum and maximum numbers to maintain the specified memory utilization across all pods. The memory trigger considers the memory utilization of entire pod. If the pod has multiple containers, the memory utilization is the sum of all of the containers. - -[NOTE] -==== -* This trigger cannot be used with the `ScaledJob` custom resource. -* When using a memory trigger to scale an object, the object does not scale to `0`, even if you are using multiple triggers. -==== - -.Example scaled object with a memory target -[source,yaml,options="nowrap"] ----- -apiVersion: keda.sh/v1alpha1 -kind: ScaledObject -metadata: - name: memory-scaledobject - namespace: my-namespace -spec: -# ... - triggers: - - type: memory <1> - metricType: Utilization <2> - metadata: - value: '60' <3> - containerName: api <4> ----- -<1> Specifies memory as the trigger type. -<2> Specifies the type of metric to use, either `Utilization` or `AverageValue`. -<3> Specifies the value that triggers scaling. Must be specified as a quoted string value. -* When using `Utilization`, the target value is the average of the resource metrics across all relevant pods, represented as a percentage of the requested value of the resource for the pods. -* When using `AverageValue`, the target value is the average of the metrics across all relevant pods. -<4> Optional: Specifies an individual container to scale, based on the memory utilization of only that container, rather than the entire pod. In this example, only the container named `api` is to be scaled. - diff --git a/modules/nodes-cma-autoscaling-custom-trigger-prom.adoc b/modules/nodes-cma-autoscaling-custom-trigger-prom.adoc deleted file mode 100644 index c1f432d698f1..000000000000 --- a/modules/nodes-cma-autoscaling-custom-trigger-prom.adoc +++ /dev/null @@ -1,53 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/cma/nodes-cma-autoscaling-custom-trigger.adoc - -:_content-type: PROCEDURE -[id="nodes-cma-autoscaling-custom-trigger-prom_{context}"] -= Understanding the Prometheus trigger - -You can scale pods based on Prometheus metrics, which can use the installed {product-title} monitoring or an external Prometheus server as the metrics source. See "Additional resources" for information on the configurations required to use the {product-title} monitoring as a source for metrics. - -[NOTE] -==== -If Prometheus is collecting metrics from the application that the custom metrics autoscaler is scaling, do not set the minimum replicas to `0` in the custom resource. If there are no application pods, the custom metrics autoscaler does not have any metrics to scale on. -==== - -.Example scaled object with a Prometheus target -[source,yaml,options="nowrap"] ----- -apiVersion: keda.sh/v1alpha1 -kind: ScaledObject -metadata: - name: prom-scaledobject - namespace: my-namespace -spec: -# ... - triggers: - - type: prometheus <1> - metadata: - serverAddress: https://thanos-querier.openshift-monitoring.svc.cluster.local:9092 <2> - namespace: kedatest <3> - metricName: http_requests_total <4> - threshold: '5' <5> - query: sum(rate(http_requests_total{job="test-app"}[1m])) <6> - authModes: basic <7> - cortexOrgID: my-org <8> - ignoreNullValues: false <9> - unsafeSsl: false <10> ----- -<1> Specifies Prometheus as the trigger type. -<2> Specifies the address of the Prometheus server. This example uses {product-title} monitoring. -<3> Optional: Specifies the namespace of the object you want to scale. This parameter is mandatory if using {product-title} monitoring as a source for the metrics. -<4> Specifies the name to identify the metric in the `external.metrics.k8s.io` API. If you are using more than one trigger, all metric names must be unique. -<5> Specifies the value that triggers scaling. Must be specified as a quoted string value. -<6> Specifies the Prometheus query to use. -<7> Specifies the authentication method to use. Prometheus scalers support bearer authentication (`bearer`), basic authentication (`basic`), or TLS authentication (`tls`). You configure the specific authentication parameters in a trigger authentication, as discussed in a following section. As needed, you can also use a secret. -<8> Optional: Passes the `X-Scope-OrgID` header to multi-tenant link:https://cortexmetrics.io/[Cortex] or link:https://grafana.com/oss/mimir/[Mimir] storage for Prometheus. This parameter is required only with multi-tenant Prometheus storage, to indicate which data Prometheus should return. -<9> Optional: Specifies how the trigger should proceed if the Prometheus target is lost. - * If `true`, the trigger continues to operate if the Prometheus target is lost. This is the default behavior. - * If `false`, the trigger returns an error if the Prometheus target is lost. -<10> Optional: Specifies whether the certificate check should be skipped. For example, you might skip the check if you use self-signed certificates at the Prometheus endpoint. - * If `true`, the certificate check is performed. - * If `false`, the certificate check is not performed. This is the default behavior. - diff --git a/modules/nodes-cma-autoscaling-custom-uninstalling.adoc b/modules/nodes-cma-autoscaling-custom-uninstalling.adoc deleted file mode 100644 index da6fc74c9226..000000000000 --- a/modules/nodes-cma-autoscaling-custom-uninstalling.adoc +++ /dev/null @@ -1,96 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/cma/nodes-cma-autoscaling-custom-uninstall.adoc - -:_content-type: PROCEDURE -[id="nodes-cma-autoscaling-custom-uninstalling_{context}"] -= Uninstalling the Custom Metrics Autoscaler Operator - -Use the following procedure to remove the custom metrics autoscaler from your {product-title} cluster. - -.Prerequisites - -* The Custom Metrics Autoscaler Operator must be installed. - -.Procedure - -. In the {product-title} web console, click *Operators* -> *Installed Operators*. - -. Switch to the *openshift-keda* project. - -. Remove the `KedaController` custom resource. - -.. Find the *CustomMetricsAutoscaler* Operator and click the *KedaController* tab. - -.. Find the custom resource, and then click *Delete KedaController*. - -.. Click *Uninstall*. - -. Remove the Custom Metrics Autoscaler Operator: - -.. Click *Operators* -> *Installed Operators*. - -.. Find the *CustomMetricsAutoscaler* Operator and click the *Options* menu {kebab} and select *Uninstall Operator*. - -.. Click *Uninstall*. - -. Optional: Use the OpenShift CLI to remove the custom metrics autoscaler components: - -.. Delete the custom metrics autoscaler CRDs: -+ --- -* `clustertriggerauthentications.keda.sh` -* `kedacontrollers.keda.sh` -* `scaledjobs.keda.sh` -* `scaledobjects.keda.sh` -* `triggerauthentications.keda.sh` --- -+ -[source,terminal] ----- -$ oc delete crd clustertriggerauthentications.keda.sh kedacontrollers.keda.sh scaledjobs.keda.sh scaledobjects.keda.sh triggerauthentications.keda.sh ----- -+ -Deleting the CRDs removes the associated roles, cluster roles, and role bindings. However, there might be a few cluster roles that must be manually deleted. - -.. List any custom metrics autoscaler cluster roles: -+ -[source,terminal] ----- -$ oc get clusterrole | grep keda.sh ----- - -.. Delete the listed custom metrics autoscaler cluster roles. For example: -+ -[source,terminal] ----- -$ oc delete clusterrole.keda.sh-v1alpha1-admin ----- - -.. List any custom metrics autoscaler cluster role bindings: -+ -[source,terminal] ----- -$ oc get clusterrolebinding | grep keda.sh ----- - -.. Delete the listed custom metrics autoscaler cluster role bindings. For example: -+ -[source,terminal] ----- -$ oc delete clusterrolebinding.keda.sh-v1alpha1-admin ----- - -. Delete the custom metrics autoscaler project: -+ -[source,terminal] ----- -$ oc delete project openshift-keda ----- - -. Delete the Cluster Metric Autoscaler Operator: -+ -[source,terminal] ----- -$ oc delete operator/openshift-custom-metrics-autoscaler-operator.openshift-keda ----- diff --git a/modules/nodes-containers-copying-files-about.adoc b/modules/nodes-containers-copying-files-about.adoc deleted file mode 100644 index c8d242210c82..000000000000 --- a/modules/nodes-containers-copying-files-about.adoc +++ /dev/null @@ -1,55 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-containers-copying-files.adoc - -:_content-type: CONCEPT -[id="nodes-containers-copying-files-about_{context}"] -= Understanding how to copy files - -The `oc rsync` command, or remote sync, is a useful tool for copying database archives to and from your pods for backup and restore purposes. -You can also use `oc rsync` to copy source code changes into a running pod for development debugging, when the running pod supports hot reload of source files. - -[source,terminal] ----- -$ oc rsync <source> <destination> [-c <container>] ----- - -== Requirements - -Specifying the Copy Source:: -The source argument of the `oc rsync` command must point to either a local -directory or a pod directory. Individual files are not supported. - -When specifying a pod directory the directory name must be prefixed with the pod -name: - -[source,terminal] ----- -<pod name>:<dir> ----- - -If the directory name ends in a path separator (`/`), only the contents of the directory are copied to the destination. Otherwise, the -directory and its contents are copied to the destination. - -Specifying the Copy Destination:: -The destination argument of the `oc rsync` command must point to a directory. If -the directory does not exist, but `rsync` is used for copy, the directory is -created for you. - -Deleting Files at the Destination:: -The `--delete` flag may be used to delete any files in the remote directory that -are not in the local directory. - -Continuous Syncing on File Change:: -Using the `--watch` option causes the command to monitor the source path for any -file system changes, and synchronizes changes when they occur. With this -argument, the command runs forever. - -Synchronization occurs after short quiet periods to ensure a -rapidly changing file system does not result in continuous synchronization -calls. - -When using the `--watch` option, the behavior is effectively the same as -manually invoking `oc rsync` repeatedly, including any arguments normally passed -to `oc rsync`. Therefore, you can control the behavior via the same flags used -with manual invocations of `oc rsync`, such as `--delete`. diff --git a/modules/nodes-containers-copying-files-procedure.adoc b/modules/nodes-containers-copying-files-procedure.adoc deleted file mode 100644 index b8c343eaaf0a..000000000000 --- a/modules/nodes-containers-copying-files-procedure.adoc +++ /dev/null @@ -1,61 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-containers-copying-files.adoc - -:_content-type: PROCEDURE -[id="nodes-containers-copying-files-procedure_{context}"] -= Copying files to and from containers - -Support for copying local files to or from a container is built into the CLI. - -.Prerequisites - -When working with `oc rsync`, note the following: - -rsync must be installed:: -The `oc rsync` command uses the local `rsync` tool if present on the client -machine and the remote container. - -If `rsync` is not found locally or in the remote container, a *tar* archive -is created locally and sent to the container where the *tar* utility is used to -extract the files. If *tar* is not available in the remote container, the -copy will fail. - -The *tar* copy method does not provide the same functionality as `oc rsync`. For -example, `oc rsync` creates the destination directory if it does not exist and -only sends files that are different between the source and the destination. - -[NOTE] -==== -In Windows, the `cwRsync` client should be installed and added to the PATH for -use with the `oc rsync` command. -==== - -.Procedure - -* To copy a local directory to a pod directory: -+ -[source,terminal] ----- -$ oc rsync <local-dir> <pod-name>:/<remote-dir> -c <container-name> ----- -+ -For example: -+ -[source,terminal] ----- -$ oc rsync /home/user/source devpod1234:/src -c user-container ----- - -* To copy a pod directory to a local directory: -+ -[source,terminal] ----- -$ oc rsync devpod1234:/src /home/user/source ----- -+ -.Example output -[source,terminal] ----- -$ oc rsync devpod1234:/src/status.txt /home/user/ ----- diff --git a/modules/nodes-containers-copying-files-rsync.adoc b/modules/nodes-containers-copying-files-rsync.adoc deleted file mode 100644 index fe98c17f0d06..000000000000 --- a/modules/nodes-containers-copying-files-rsync.adoc +++ /dev/null @@ -1,37 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-containers-copying-files.adoc - -[id="nodes-containers-copying-files-rsync_{context}"] -= Using advanced Rsync features - -The `oc rsync` command exposes fewer command line options than standard `rsync`. -In the case that you want to use a standard `rsync` command line option that is -not available in `oc rsync`, for example the `--exclude-from=FILE` option, it -might be possible to use standard `rsync` 's `--rsh` (`-e`) option or `RSYNC_RSH` -environment variable as a workaround, as follows: - -[source,terminal] ----- -$ rsync --rsh='oc rsh' --exclude-from=FILE SRC POD:DEST ----- - -or: - -Export the `RSYNC_RSH` variable: - -[source,terminal] ----- -$ export RSYNC_RSH='oc rsh' ----- - -Then, run the rsync command: - -[source,terminal] ----- -$ rsync --exclude-from=FILE SRC POD:DEST ----- - -Both of the above examples configure standard `rsync` to use `oc rsh` as its -remote shell program to enable it to connect to the remote pod, and are an -alternative to running `oc rsync`. diff --git a/modules/nodes-containers-downward-api-about.adoc b/modules/nodes-containers-downward-api-about.adoc deleted file mode 100644 index 60f308e18cba..000000000000 --- a/modules/nodes-containers-downward-api-about.adoc +++ /dev/null @@ -1,49 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-containers-downward-api.adoc - -[id="nodes-containers-projected-volumes-about_{context}"] -= Expose pod information to Containers using the Downward API - -The Downward API contains such information as the pod's name, project, and resource values. Containers can consume -information from the downward API using environment variables or a volume -plugin. - -Fields within the pod are selected using the `FieldRef` API type. `FieldRef` -has two fields: - -[options="header"] -|=== -|Field |Description - -|`fieldPath` -|The path of the field to select, relative to the pod. - -|`apiVersion` -|The API version to interpret the `fieldPath` selector within. -|=== - -Currently, the valid selectors in the v1 API include: - -[options="header"] -|=== -|Selector |Description - -|`metadata.name` -|The pod's name. This is supported in both environment variables and volumes. - -|`metadata.namespace` -|The pod's namespace.This is supported in both environment variables and volumes. - -|`metadata.labels` -|The pod's labels. This is only supported in volumes and not in environment variables. - -|`metadata.annotations` -|The pod's annotations. This is only supported in volumes and not in environment variables. - -|`status.podIP` -|The pod's IP. This is only supported in environment variables and not volumes. -|=== - -The `apiVersion` field, if not specified, defaults to the API version of the -enclosing pod template. diff --git a/modules/nodes-containers-downward-api-container-configmaps.adoc b/modules/nodes-containers-downward-api-container-configmaps.adoc deleted file mode 100644 index f348a38ca3b5..000000000000 --- a/modules/nodes-containers-downward-api-container-configmaps.adoc +++ /dev/null @@ -1,67 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-containers-downward-api.adoc - -:_content-type: PROCEDURE -[id="nodes-containers-downward-api-container-configmaps_{context}"] -= Consuming configuration maps using the Downward API - -When creating pods, you can use the Downward API to inject configuration map values -so image and application authors can create an image for specific environments. - -.Procedure - -. Create a `*_configmap.yaml_*` file: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: myconfigmap -data: - mykey: myvalue ----- - -. Create a `ConfigMap` object from the `*_configmap.yaml_*` file: -+ -[source,terminal] ----- -$ oc create -f configmap.yaml ----- - -. Create a `*_pod.yaml_*` file that references the above `ConfigMap` object: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: dapi-env-test-pod -spec: - containers: - - name: env-test-container - image: gcr.io/google_containers/busybox - command: [ "/bin/sh", "-c", "env" ] - env: - - name: MY_CONFIGMAP_VALUE - valueFrom: - configMapKeyRef: - name: myconfigmap - key: mykey - restartPolicy: Always ----- - -. Create the pod from the `*_pod.yaml_*` file: -+ -[source,terminal] ----- -$ oc create -f pod.yaml ----- - -. Check the container's logs for the `MY_CONFIGMAP_VALUE` value: -+ -[source,terminal] ----- -$ oc logs -p dapi-env-test-pod ----- diff --git a/modules/nodes-containers-downward-api-container-envars.adoc b/modules/nodes-containers-downward-api-container-envars.adoc deleted file mode 100644 index b8f8e5f88a43..000000000000 --- a/modules/nodes-containers-downward-api-container-envars.adoc +++ /dev/null @@ -1,49 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-containers-downward-api.adoc - -:_content-type: PROCEDURE -[id="nodes-containers-downward-api-container-envars_{context}"] -= Referencing environment variables - -When creating pods, you can reference the value of a previously defined -environment variable by using the `$()` syntax. If the environment variable -reference can not be resolved, the value will be left as the provided -string. - -.Procedure - -. Create a `*_pod.yaml_*` file that references an existing `environment variable`: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: dapi-env-test-pod -spec: - containers: - - name: env-test-container - image: gcr.io/google_containers/busybox - command: [ "/bin/sh", "-c", "env" ] - env: - - name: MY_EXISTING_ENV - value: my_value - - name: MY_ENV_VAR_REF_ENV - value: $(MY_EXISTING_ENV) - restartPolicy: Never ----- - -. Create the pod from the `*_pod.yaml_*` file: -+ -[source,terminal] ----- -$ oc create -f pod.yaml ----- - -. Check the container's logs for the `MY_ENV_VAR_REF_ENV` value: -+ -[source,terminal] ----- -$ oc logs -p dapi-env-test-pod ----- diff --git a/modules/nodes-containers-downward-api-container-escaping.adoc b/modules/nodes-containers-downward-api-container-escaping.adoc deleted file mode 100644 index 8b097db421a2..000000000000 --- a/modules/nodes-containers-downward-api-container-escaping.adoc +++ /dev/null @@ -1,46 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-containers-downward-api.adoc - -:_content-type: PROCEDURE -[id="nodes-containers-downward-api-container-escaping_{context}"] -= Escaping environment variable references - -When creating a pod, you can escape an environment variable reference by using -a double dollar sign. The value will then be set to a single dollar sign version -of the provided value. - -.Procedure - -. Create a `*_pod.yaml_*` file that references an existing `environment variable`: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: dapi-env-test-pod -spec: - containers: - - name: env-test-container - image: gcr.io/google_containers/busybox - command: [ "/bin/sh", "-c", "env" ] - env: - - name: MY_NEW_ENV - value: $$(SOME_OTHER_ENV) - restartPolicy: Never ----- - -. Create the pod from the `*_pod.yaml_*` file: -+ -[source,terminal] ----- -$ oc create -f pod.yaml ----- - -. Check the container's logs for the `MY_NEW_ENV` value: -+ -[source,terminal] ----- -$ oc logs -p dapi-env-test-pod ----- diff --git a/modules/nodes-containers-downward-api-container-resources-envars.adoc b/modules/nodes-containers-downward-api-container-resources-envars.adoc deleted file mode 100644 index 152fc70bad32..000000000000 --- a/modules/nodes-containers-downward-api-container-resources-envars.adoc +++ /dev/null @@ -1,63 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-containers-downward-api.adoc - -:_content-type: PROCEDURE -[id="nodes-containers-downward-api-container-resources-envars_{context}"] -= Consuming container resources using environment variables - -When creating pods, you can use the Downward API to inject information about -computing resource requests and limits using environment variables. - -.Procedure - -To use environment variables: - -. When creating a pod configuration, specify environment variables that -correspond to the contents of the `resources` field in the `*spec.container*` -field: -+ -[source,yaml] ----- -.... -spec: - containers: - - name: test-container - image: gcr.io/google_containers/busybox:1.24 - command: [ "/bin/sh", "-c", "env" ] - resources: - requests: - memory: "32Mi" - cpu: "125m" - limits: - memory: "64Mi" - cpu: "250m" - env: - - name: MY_CPU_REQUEST - valueFrom: - resourceFieldRef: - resource: requests.cpu - - name: MY_CPU_LIMIT - valueFrom: - resourceFieldRef: - resource: limits.cpu - - name: MY_MEM_REQUEST - valueFrom: - resourceFieldRef: - resource: requests.memory - - name: MY_MEM_LIMIT - valueFrom: - resourceFieldRef: - resource: limits.memory -.... ----- -+ -If the resource limits are not included in the container configuration, the -downward API defaults to the node's CPU and memory allocatable values. - -. Create the pod from the `*_pod.yaml_*` file: -+ -[source,terminal] ----- -$ oc create -f pod.yaml ----- diff --git a/modules/nodes-containers-downward-api-container-resources-plugin.adoc b/modules/nodes-containers-downward-api-container-resources-plugin.adoc deleted file mode 100644 index 2ad3bc32171d..000000000000 --- a/modules/nodes-containers-downward-api-container-resources-plugin.adoc +++ /dev/null @@ -1,70 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-containers-downward-api.adoc - -:_content-type: PROCEDURE -[id="nodes-containers-downward-api-container-resources-plugin_{context}"] -= Consuming container resources using a volume plugin - -When creating pods, you can use the Downward API to inject information about -computing resource requests and limits using a volume plugin. - -.Procedure - -To use the Volume Plugin: - -. When creating a pod configuration, use the `spec.volumes.downwardAPI.items` -field to describe the desired resources that correspond to the -`spec.resources` field: -+ -[source,yaml] ----- -.... -spec: - containers: - - name: client-container - image: gcr.io/google_containers/busybox:1.24 - command: ["sh", "-c", "while true; do echo; if [[ -e /etc/cpu_limit ]]; then cat /etc/cpu_limit; fi; if [[ -e /etc/cpu_request ]]; then cat /etc/cpu_request; fi; if [[ -e /etc/mem_limit ]]; then cat /etc/mem_limit; fi; if [[ -e /etc/mem_request ]]; then cat /etc/mem_request; fi; sleep 5; done"] - resources: - requests: - memory: "32Mi" - cpu: "125m" - limits: - memory: "64Mi" - cpu: "250m" - volumeMounts: - - name: podinfo - mountPath: /etc - readOnly: false - volumes: - - name: podinfo - downwardAPI: - items: - - path: "cpu_limit" - resourceFieldRef: - containerName: client-container - resource: limits.cpu - - path: "cpu_request" - resourceFieldRef: - containerName: client-container - resource: requests.cpu - - path: "mem_limit" - resourceFieldRef: - containerName: client-container - resource: limits.memory - - path: "mem_request" - resourceFieldRef: - containerName: client-container - resource: requests.memory -.... ----- -+ -If the resource limits are not included in the container configuration, the -Downward API defaults to the node's CPU and memory allocatable values. - -. Create the pod from the `*_volume-pod.yaml_*` file: -+ -[source,terminal] ----- -$ oc create -f volume-pod.yaml ----- diff --git a/modules/nodes-containers-downward-api-container-resources.adoc b/modules/nodes-containers-downward-api-container-resources.adoc deleted file mode 100644 index 904a741cdc2e..000000000000 --- a/modules/nodes-containers-downward-api-container-resources.adoc +++ /dev/null @@ -1,14 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-containers-downward-api.adoc - -:_content-type: CONCEPT -[id="nodes-containers-downward-api-container-resources-api_{context}"] -= Understanding how to consume container resources using the Downward API - -When creating pods, you can use the Downward API to inject information about -computing resource requests and limits so that image and application authors can -correctly create an image for specific environments. - -You can do this using environment variable or a volume plugin. - diff --git a/modules/nodes-containers-downward-api-container-secrets.adoc b/modules/nodes-containers-downward-api-container-secrets.adoc deleted file mode 100644 index e03f16b8a414..000000000000 --- a/modules/nodes-containers-downward-api-container-secrets.adoc +++ /dev/null @@ -1,70 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-containers-downward-api.adoc - -:_content-type: PROCEDURE -[id="nodes-containers-downward-api-container-secrets_{context}"] -= Consuming secrets using the Downward API - -When creating pods, you can use the downward API to inject secrets -so image and application authors can create an image -for specific environments. - -.Procedure - -. Create a `secret.yaml` file: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: mysecret -data: - password: cGFzc3dvcmQ= - username: ZGV2ZWxvcGVy -type: kubernetes.io/basic-auth ----- - -. Create a `Secret` object from the `secret.yaml` file: -+ -[source,terminal] ----- -$ oc create -f secret.yaml ----- - -. Create a `pod.yaml` file that references the `username` field from the above `Secret` object: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: dapi-env-test-pod -spec: - containers: - - name: env-test-container - image: gcr.io/google_containers/busybox - command: [ "/bin/sh", "-c", "env" ] - env: - - name: MY_SECRET_USERNAME - valueFrom: - secretKeyRef: - name: mysecret - key: username - restartPolicy: Never ----- - -. Create the pod from the `pod.yaml` file: -+ -[source,terminal] ----- -$ oc create -f pod.yaml ----- - -. Check the container's logs for the `MY_SECRET_USERNAME` value: -+ -[source,terminal] ----- -$ oc logs -p dapi-env-test-pod ----- diff --git a/modules/nodes-containers-downward-api-container-values-envars.adoc b/modules/nodes-containers-downward-api-container-values-envars.adoc deleted file mode 100644 index 5921710eaae4..000000000000 --- a/modules/nodes-containers-downward-api-container-values-envars.adoc +++ /dev/null @@ -1,63 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-containers-downward-api.adoc - -:_content-type: PROCEDURE -[id="nodes-containers-downward-api-container-values-envars_{context}"] -= Consuming container values using environment variables - -When using a container's environment variables, use the `EnvVar` type's `valueFrom` field (of type `EnvVarSource`) -to specify that the variable's value should come from a `FieldRef` -source instead of the literal value specified by the `value` field. - -Only constant attributes of the pod can be consumed this way, as environment -variables cannot be updated once a process is started in a way that allows the -process to be notified that the value of a variable has changed. The fields -supported using environment variables are: - -- Pod name -- Pod project/namespace - -.Procedure - -To use environment variables - -. Create a `pod.yaml` file: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: dapi-env-test-pod -spec: - containers: - - name: env-test-container - image: gcr.io/google_containers/busybox - command: [ "/bin/sh", "-c", "env" ] - env: - - name: MY_POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: MY_POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - restartPolicy: Never ----- - -. Create the pod from the `pod.yaml` file: -+ -[source,terminal] ----- -$ oc create -f pod.yaml ----- - -. Check the container's logs for the `MY_POD_NAME` and `MY_POD_NAMESPACE` -values: -+ -[source,terminal] ----- -$ oc logs -p dapi-env-test-pod ----- diff --git a/modules/nodes-containers-downward-api-container-values-plugin.adoc b/modules/nodes-containers-downward-api-container-values-plugin.adoc deleted file mode 100644 index 4843dc15bd79..000000000000 --- a/modules/nodes-containers-downward-api-container-values-plugin.adoc +++ /dev/null @@ -1,92 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-containers-downward-api.adoc - -:_content-type: PROCEDURE -[id="nodes-containers-downward-api-container-values-plugin_{context}"] -= Consuming container values using a volume plugin - -You containers can consume API values using a volume plugin. - -Containers can consume: - -* Pod name - -* Pod project/namespace - -* Pod annotations - -* Pod labels - -.Procedure - -To use the volume plugin: - -. Create a `volume-pod.yaml` file: -+ -[source,yaml] ----- -kind: Pod -apiVersion: v1 -metadata: - labels: - zone: us-east-coast - cluster: downward-api-test-cluster1 - rack: rack-123 - name: dapi-volume-test-pod - annotations: - annotation1: "345" - annotation2: "456" -spec: - containers: - - name: volume-test-container - image: gcr.io/google_containers/busybox - command: ["sh", "-c", "cat /tmp/etc/pod_labels /tmp/etc/pod_annotations"] - volumeMounts: - - name: podinfo - mountPath: /tmp/etc - readOnly: false - volumes: - - name: podinfo - downwardAPI: - defaultMode: 420 - items: - - fieldRef: - fieldPath: metadata.name - path: pod_name - - fieldRef: - fieldPath: metadata.namespace - path: pod_namespace - - fieldRef: - fieldPath: metadata.labels - path: pod_labels - - fieldRef: - fieldPath: metadata.annotations - path: pod_annotations - restartPolicy: Never ----- - -. Create the pod from the `volume-pod.yaml` file: -+ -[source,terminal] ----- -$ oc create -f volume-pod.yaml ----- - -. Check the container's logs and verify the presence of the configured fields: -+ -[source,terminal] ----- -$ oc logs -p dapi-volume-test-pod ----- -+ -.Example output -[source,terminal] ----- -cluster=downward-api-test-cluster1 -rack=rack-123 -zone=us-east-coast -annotation1=345 -annotation2=456 -kubernetes.io/config.source=api ----- diff --git a/modules/nodes-containers-downward-api-container-values.adoc b/modules/nodes-containers-downward-api-container-values.adoc deleted file mode 100644 index 0e2c16c77f6f..000000000000 --- a/modules/nodes-containers-downward-api-container-values.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-containers-downward-api.adoc - -:_content-type: CONCEPT -[id="nodes-containers-downward-api-container-values_{context}"] -= Understanding how to consume container values using the downward API - -You containers can consume API values using environment variables or a volume plugin. -Depending on the method you choose, containers can consume: - -* Pod name - -* Pod project/namespace - -* Pod annotations - -* Pod labels - -Annotations and labels are available using only a volume plugin. - diff --git a/modules/nodes-containers-events-about.adoc b/modules/nodes-containers-events-about.adoc deleted file mode 100644 index 26a6578b82bf..000000000000 --- a/modules/nodes-containers-events-about.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-containers-events.adoc - -:_content-type: CONCEPT -[id="nodes-containers-events-about_{context}"] -= Understanding events - -Events allow {product-title} to record -information about real-world events in a resource-agnostic manner. They also -allow developers and administrators to consume information about system -components in a unified way. - -ifdef::openshift-online[] -[id="event-failure-notifications_{context}"] -== Failure Notifications - -For each of your projects, you can choose to receive email notifications -about various failures, including dead or failed deployments, dead builds, and -dead or failed persistent volume claims (PVCs). - -See Notifications. -endif::[] diff --git a/modules/nodes-containers-events-list.adoc b/modules/nodes-containers-events-list.adoc deleted file mode 100644 index 0a16a534d050..000000000000 --- a/modules/nodes-containers-events-list.adoc +++ /dev/null @@ -1,477 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-containers-events.adoc - -[id="nodes-containers-events-list_{context}"] -= List of events - -This section describes the events of {product-title}. - -.Configuration events -[cols="2,8",options="header"] -|=== -| Name | Description - -|`FailedValidation` -|Failed pod configuration validation. -|=== - -.Container events -[cols="2,8",options="header"] -|=== -| Name | Description - -|`BackOff` -|Back-off restarting failed the container. - -|`Created` -|Container created. - -|`Failed` -|Pull/Create/Start failed. - -|`Killing` -|Killing the container. - -|`Started` -|Container started. - -|`Preempting` -|Preempting other pods. - -|`ExceededGracePeriod` -|Container runtime did not stop the pod within specified grace period. - -|=== - -.Health events -[cols="2,8",options="header"] -|=== -| Name | Description - -|`Unhealthy` -|Container is unhealthy. -|=== - -.Image events -[cols="2,8",options="header"] -|=== -| Name | Description - -|`BackOff` -|Back off Ctr Start, image pull. - -|`ErrImageNeverPull` -|The image's *NeverPull Policy* is violated. - -|`Failed` -|Failed to pull the image. - -|`InspectFailed` -|Failed to inspect the image. - -|`Pulled` -|Successfully pulled the image or the container image is already present on the machine. - -|`Pulling` -|Pulling the image. -|=== - -.Image Manager events -[cols="2,8",options="header"] -|=== -| Name | Description - -|`FreeDiskSpaceFailed` -|Free disk space failed. - -|`InvalidDiskCapacity` -|Invalid disk capacity. -|=== - -.Node events -[cols="2,8",options="header"] -|=== -| Name | Description - -|`FailedMount` -|Volume mount failed. - -|`HostNetworkNotSupported` -|Host network not supported. - -|`HostPortConflict` -|Host/port conflict. - -|`KubeletSetupFailed` -|Kubelet setup failed. - -|`NilShaper` -|Undefined shaper. - -|`NodeNotReady` -|Node is not ready. - -|`NodeNotSchedulable` -|Node is not schedulable. - -|`NodeReady` -|Node is ready. - -|`NodeSchedulable` -|Node is schedulable. - -|`NodeSelectorMismatching` -|Node selector mismatch. - -|`OutOfDisk` -|Out of disk. - -|`Rebooted` -|Node rebooted. - -|`Starting` -|Starting kubelet. - -|`FailedAttachVolume` -|Failed to attach volume. - -|`FailedDetachVolume` -|Failed to detach volume. - -|`VolumeResizeFailed` -|Failed to expand/reduce volume. - -|`VolumeResizeSuccessful` -| Successfully expanded/reduced volume. - -|`FileSystemResizeFailed` -|Failed to expand/reduce file system. - -|`FileSystemResizeSuccessful` -| Successfully expanded/reduced file system. - -|`FailedUnMount` -|Failed to unmount volume. - -|`FailedMapVolume` -|Failed to map a volume. - -|`FailedUnmapDevice` -|Failed unmaped device. - -|`AlreadyMountedVolume` -|Volume is already mounted. - -|`SuccessfulDetachVolume` -|Volume is successfully detached. - -|`SuccessfulMountVolume` -|Volume is successfully mounted. - -|`SuccessfulUnMountVolume` -|Volume is successfully unmounted. - -|`ContainerGCFailed` -|Container garbage collection failed. - -|`ImageGCFailed` -|Image garbage collection failed. - -|`FailedNodeAllocatableEnforcement` -|Failed to enforce System Reserved Cgroup limit. - -|`NodeAllocatableEnforced` -|Enforced System Reserved Cgroup limit. - -|`UnsupportedMountOption` -|Unsupported mount option. - -|`SandboxChanged` -|Pod sandbox changed. - -|`FailedCreatePodSandBox` -|Failed to create pod sandbox. - -|`FailedPodSandBoxStatus` -|Failed pod sandbox status. - -|=== - -.Pod worker events -[cols="2,8",options="header"] -|=== -| Name | Description - -|`FailedSync` -|Pod sync failed. -|=== - -.System Events -[cols="2,8",options="header"] -|=== -| Name | Description - -|`SystemOOM` -|There is an OOM (out of memory) situation on the cluster. -|=== - -.Pod events -[cols="2,8",options="header"] -|=== -| Name | Description - -|`FailedKillPod` -|Failed to stop a pod. - -|`FailedCreatePodContainer` -|Failed to create a pod container. - -|`Failed` -|Failed to make pod data directories. - -|`NetworkNotReady` -|Network is not ready. - -|`FailedCreate` -|Error creating: `<error-msg>`. - -|`SuccessfulCreate` -|Created pod: `<pod-name>`. - -|`FailedDelete` -|Error deleting: `<error-msg>`. - -|`SuccessfulDelete` -|Deleted pod: `<pod-id>`. - -|=== - -.Horizontal Pod AutoScaler events -[cols="2,8",options="header"] -|=== -| Name | Description - -|SelectorRequired -|Selector is required. - -|`InvalidSelector` -|Could not convert selector into a corresponding internal selector object. - -|`FailedGetObjectMetric` -|HPA was unable to compute the replica count. - -|`InvalidMetricSourceType` -|Unknown metric source type. - -|`ValidMetricFound` -|HPA was able to successfully calculate a replica count. - -|`FailedConvertHPA` -|Failed to convert the given HPA. - -|`FailedGetScale` -|HPA controller was unable to get the target's current scale. - -|`SucceededGetScale` -|HPA controller was able to get the target's current scale. - -|`FailedComputeMetricsReplicas` -|Failed to compute desired number of replicas based on listed metrics. - -|`FailedRescale` -|New size: `<size>`; reason: `<msg>`; error: `<error-msg>`. - -|`SuccessfulRescale` -|New size: `<size>`; reason: `<msg>`. - -|`FailedUpdateStatus` -|Failed to update status. - -|=== - -.Network events (openshift-sdn) -[cols="2,8",options="header"] -|=== -| Name | Description - -|`Starting` -|Starting OpenShift SDN. - -|`NetworkFailed` -|The pod's network interface has been lost and the pod will be stopped. -|=== - -.Network events (kube-proxy) -[cols="2,8",options="header"] -|=== -| Name | Description - -|`NeedPods` -|The service-port `<serviceName>:<port>` needs pods. -|=== - -.Volume events -[cols="2,8",options="header"] -|=== -| Name | Description - -|`FailedBinding` -|There are no persistent volumes available and no storage class is set. - -|`VolumeMismatch` -|Volume size or class is different from what is requested in claim. - -|`VolumeFailedRecycle` -|Error creating recycler pod. - -|`VolumeRecycled` -|Occurs when volume is recycled. - -|`RecyclerPod` -|Occurs when pod is recycled. - -|`VolumeDelete` -|Occurs when volume is deleted. - -|`VolumeFailedDelete` -|Error when deleting the volume. - -|`ExternalProvisioning` -|Occurs when volume for the claim is provisioned either manually or via external software. - -|`ProvisioningFailed` -|Failed to provision volume. - -|`ProvisioningCleanupFailed` -|Error cleaning provisioned volume. - -|`ProvisioningSucceeded` -|Occurs when the volume is provisioned successfully. - -|`WaitForFirstConsumer` -|Delay binding until pod scheduling. - -|=== - -.Lifecycle hooks -[cols="2,8",options="header"] -|=== -| Name | Description - -|`FailedPostStartHook` -|Handler failed for pod start. - -|`FailedPreStopHook` -|Handler failed for pre-stop. - -|`UnfinishedPreStopHook` -|Pre-stop hook unfinished. -|=== - -.Deployments -[cols="2,8",options="header"] -|=== -| Name | Description - -|`DeploymentCancellationFailed` -|Failed to cancel deployment. - -|`DeploymentCancelled` -|Canceled deployment. - -|`DeploymentCreated` -|Created new replication controller. - -|`IngressIPRangeFull` -|No available Ingress IP to allocate to service. - -|=== - -.Scheduler events -[cols="2,8",options="header"] -|=== -| Name | Description - -|`FailedScheduling` -|Failed to schedule pod: `<pod-namespace>/<pod-name>`. This event is raised for -multiple reasons, for example: `AssumePodVolumes` failed, Binding rejected etc. - -|`Preempted` -|By `<preemptor-namespace>/<preemptor-name>` on node `<node-name>`. - -|`Scheduled` -|Successfully assigned `<pod-name>` to `<node-name>`. - -|=== - -.Daemon set events -[cols="2,8",options="header"] -|=== -| Name | Description - -|`SelectingAll` -|This daemon set is selecting all pods. A non-empty selector is required. - -|`FailedPlacement` -|Failed to place pod on `<node-name>`. - -|`FailedDaemonPod` -|Found failed daemon pod `<pod-name>` on node `<node-name>`, will try to kill it. - -|=== - -.LoadBalancer service events -[cols="2,8",options="header"] -|=== -| Name | Description - -|`CreatingLoadBalancerFailed` -|Error creating load balancer. - -|`DeletingLoadBalancer` -|Deleting load balancer. - -|`EnsuringLoadBalancer` -|Ensuring load balancer. - -|`EnsuredLoadBalancer` -|Ensured load balancer. - -|`UnAvailableLoadBalancer` -|There are no available nodes for `LoadBalancer` service. - -|`LoadBalancerSourceRanges` -|Lists the new `LoadBalancerSourceRanges`. For example, `<old-source-range> -> <new-source-range>`. - -|`LoadbalancerIP` -|Lists the new IP address. For example, `<old-ip> -> <new-ip>`. - -|`ExternalIP` -|Lists external IP address. For example, `Added: <external-ip>`. - -|`UID` -|Lists the new UID. For example, `<old-service-uid> -> <new-service-uid>`. - -|`ExternalTrafficPolicy` -|Lists the new `ExternalTrafficPolicy`. For example, `<old-policy> -> <new-policy>`. - -|`HealthCheckNodePort` -|Lists the new `HealthCheckNodePort`. For example, `<old-node-port> -> new-node-port>`. - -|`UpdatedLoadBalancer` -|Updated load balancer with new hosts. - -|`LoadBalancerUpdateFailed` -|Error updating load balancer with new hosts. - -|`DeletingLoadBalancer` -|Deleting load balancer. - -|`DeletingLoadBalancerFailed` -|Error deleting load balancer. - -|`DeletedLoadBalancer` -|Deleted load balancer. - -|=== diff --git a/modules/nodes-containers-events-viewing.adoc b/modules/nodes-containers-events-viewing.adoc deleted file mode 100644 index 0566cb69a7ee..000000000000 --- a/modules/nodes-containers-events-viewing.adoc +++ /dev/null @@ -1,50 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-containers-events.adoc - -:_content-type: PROCEDURE -[id="nodes-containers-events-viewing-cli_{context}"] -= Viewing events using the CLI - -You can get a list of events in a given project using the CLI. - -.Procedure - -* To view events in a project use the following command: -+ -[source,terminal] ----- -$ oc get events [-n <project>] <1> ----- -<1> The name of the project. -+ -For example: -+ -[source,terminal] ----- -$ oc get events -n openshift-config ----- -+ -.Example output -[source,terminal] ----- -LAST SEEN TYPE REASON OBJECT MESSAGE -97m Normal Scheduled pod/dapi-env-test-pod Successfully assigned openshift-config/dapi-env-test-pod to ip-10-0-171-202.ec2.internal -97m Normal Pulling pod/dapi-env-test-pod pulling image "gcr.io/google_containers/busybox" -97m Normal Pulled pod/dapi-env-test-pod Successfully pulled image "gcr.io/google_containers/busybox" -97m Normal Created pod/dapi-env-test-pod Created container -9m5s Warning FailedCreatePodSandBox pod/dapi-volume-test-pod Failed create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_dapi-volume-test-pod_openshift-config_6bc60c1f-452e-11e9-9140-0eec59c23068_0(748c7a40db3d08c07fb4f9eba774bd5effe5f0d5090a242432a73eee66ba9e22): Multus: Err adding pod to network "openshift-sdn": cannot set "openshift-sdn" ifname to "eth0": no netns: failed to Statfs "/proc/33366/ns/net": no such file or directory -8m31s Normal Scheduled pod/dapi-volume-test-pod Successfully assigned openshift-config/dapi-volume-test-pod to ip-10-0-171-202.ec2.internal ----- - - -* To view events in your project from the {product-title} console. -+ -. Launch the {product-title} console. -+ -. Click *Home* -> *Events* and select your project. - -. Move to resource that you want to see events. For example: *Home* -> *Projects* -> <project-name> -> <resource-name>. -+ -Many objects, such as pods and deployments, have their own -*Events* tab as well, which shows events related to that object. diff --git a/modules/nodes-containers-init-about.adoc b/modules/nodes-containers-init-about.adoc deleted file mode 100644 index d53885929513..000000000000 --- a/modules/nodes-containers-init-about.adoc +++ /dev/null @@ -1,44 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-containers-init.adoc - -:_content-type: CONCEPT -[id="nodes-containers-init-about_{context}"] -= Understanding Init Containers - -You can use an Init Container resource to perform tasks before the rest of a pod is deployed. - -A pod can have Init Containers in addition to application containers. Init -containers allow you to reorganize setup scripts and binding code. - -An Init Container can: - -* Contain and run utilities that are not desirable to include in the app Container image for security reasons. -* Contain utilities or custom code for setup that is not present in an app image. For example, there is no requirement to make an image FROM another image just to use a tool like sed, awk, python, or dig during setup. -* Use Linux namespaces so that they have different filesystem views from app containers, such as access to secrets that application containers are not able to access. - -Each Init Container must complete successfully before the next one is started. So, Init Containers provide an easy way to block or delay the startup of app containers until some set of preconditions are met. - -For example, the following are some ways you can use Init Containers: - -* Wait for a service to be created with a shell command like: -+ -[source,terminal] ----- -for i in {1..100}; do sleep 1; if dig myservice; then exit 0; fi; done; exit 1 ----- - -* Register this pod with a remote server from the downward API with a command like: -+ -[source,terminal] ----- -$ curl -X POST http://$MANAGEMENT_SERVICE_HOST:$MANAGEMENT_SERVICE_PORT/register -d ‘instance=$()&ip=$()’ ----- - -* Wait for some time before starting the app Container with a command like `sleep 60`. - -* Clone a git repository into a volume. - -* Place values into a configuration file and run a template tool to dynamically generate a configuration file for the main app Container. For example, place the POD_IP value in a configuration and generate the main app configuration file using Jinja. - -See the link:https://kubernetes.io/docs/concepts/workloads/pods/init-containers/[Kubernetes documentation] for more information. diff --git a/modules/nodes-containers-init-creating.adoc b/modules/nodes-containers-init-creating.adoc deleted file mode 100644 index b14fe6f26343..000000000000 --- a/modules/nodes-containers-init-creating.adoc +++ /dev/null @@ -1,120 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-containers-init.adoc - -:_content-type: PROCEDURE -[id="nodes-containers-init-creating_{context}"] -= Creating Init Containers - -The following example outlines a simple pod which has two Init Containers. The first waits for `myservice` and the second waits for `mydb`. After both containers complete, the pod begins. - -.Procedure - -. Create a YAML file for the Init Container: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: myapp-pod - labels: - app: myapp -spec: - containers: - - name: myapp-container - image: registry.access.redhat.com/ubi9/ubi:latest - command: ['sh', '-c', 'echo The app is running! && sleep 3600'] - initContainers: - - name: init-myservice - image: registry.access.redhat.com/ubi9/ubi:latest - command: ['sh', '-c', 'until getent hosts myservice; do echo waiting for myservice; sleep 2; done;'] - - name: init-mydb - image: registry.access.redhat.com/ubi9/ubi:latest - command: ['sh', '-c', 'until getent hosts mydb; do echo waiting for mydb; sleep 2; done;'] ----- - -. Create a YAML file for the `myservice` service. -+ -[source,yaml] ----- -kind: Service -apiVersion: v1 -metadata: - name: myservice -spec: - ports: - - protocol: TCP - port: 80 - targetPort: 9376 ----- - -. Create a YAML file for the `mydb` service. -+ -[source,yaml] ----- -kind: Service -apiVersion: v1 -metadata: - name: mydb -spec: - ports: - - protocol: TCP - port: 80 - targetPort: 9377 ----- - -. Run the following command to create the `myapp-pod`: -+ -[source,terminal] ----- -$ oc create -f myapp.yaml ----- -+ -.Example output -[source,terminal] ----- -pod/myapp-pod created ----- - -. View the status of the pod: -+ -[source,terminal] ----- -$ oc get pods ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -myapp-pod 0/1 Init:0/2 0 5s ----- -+ -Note that the pod status indicates it is waiting - -. Run the following commands to create the services: -+ -[source,terminal] ----- -$ oc create -f mydb.yaml ----- -+ -[source,terminal] ----- -$ oc create -f myservice.yaml ----- - -. View the status of the pod: -+ -[source,terminal] ----- -$ oc get pods ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -myapp-pod 1/1 Running 0 2m ----- diff --git a/modules/nodes-containers-port-forwarding-about.adoc b/modules/nodes-containers-port-forwarding-about.adoc deleted file mode 100644 index 194e3f392a49..000000000000 --- a/modules/nodes-containers-port-forwarding-about.adoc +++ /dev/null @@ -1,38 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-containers-port-forwarding.adoc - -:_content-type: CONCEPT -[id="nodes-containers-port-forwarding-about_{context}"] -= Understanding port forwarding - -You can use the CLI to forward one or more local ports to a pod. This allows you -to listen on a given or random port locally, and have data forwarded to and from -given ports in the pod. - -Support for port forwarding is built into the CLI: - -[source,terminal] ----- -$ oc port-forward <pod> [<local_port>:]<remote_port> [...[<local_port_n>:]<remote_port_n>] ----- - -The CLI listens on each local port specified by the user, forwarding using the protocol described below. - -Ports may be specified using the following formats: - -[horizontal] -`5000`:: The client listens on port 5000 locally and forwards to 5000 in the -pod. -`6000:5000`:: The client listens on port 6000 locally and forwards to 5000 in -the pod. -`:5000` or `0:5000`:: The client selects a free local port and forwards to 5000 -in the pod. - -{product-title} handles port-forward requests from clients. Upon receiving a request, {product-title} upgrades the response and waits for the client -to create port-forwarding streams. When {product-title} receives a new stream, it copies data between the stream and the pod's port. - -Architecturally, there are options for forwarding to a pod's port. The supported {product-title} implementation invokes `nsenter` directly on the node host -to enter the pod's network namespace, then invokes `socat` to copy data between the stream and the pod's port. However, a custom implementation could -include running a _helper_ pod that then runs `nsenter` and `socat`, so that those binaries are not required to be installed on the host. - diff --git a/modules/nodes-containers-port-forwarding-protocol.adoc b/modules/nodes-containers-port-forwarding-protocol.adoc deleted file mode 100644 index aaac02a2d582..000000000000 --- a/modules/nodes-containers-port-forwarding-protocol.adoc +++ /dev/null @@ -1,37 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-containers-port-forwarding.adoc - -[id="nodes-containers-port-forwarding-protocol_{context}"] -= Protocol for initiating port forwarding from a client - -Clients initiate port forwarding to a pod by issuing a request to the -Kubernetes API server: - ----- -/proxy/nodes/<node_name>/portForward/<namespace>/<pod> ----- - -In the above URL: - -- `<node_name>` is the FQDN of the node. -- `<namespace>` is the namespace of the target pod. -- `<pod>` is the name of the target pod. - -For example: - ----- -/proxy/nodes/node123.openshift.com/portForward/myns/mypod ----- - -After sending a port forward request to the API server, the client upgrades the -connection to one that supports multiplexed streams; the current implementation -uses link:https://httpwg.org/specs/rfc7540.html[*Hyptertext Transfer Protocol Version 2 (HTTP/2)*]. - -The client creates a stream with the `port` header containing the target port in -the pod. All data written to the stream is delivered via the kubelet to the -target pod and port. Similarly, all data sent from the pod for that forwarded -connection is delivered back to the same stream in the client. - -The client closes all streams, the upgraded connection, and the underlying -connection when it is finished with the port forwarding request. diff --git a/modules/nodes-containers-port-forwarding-using.adoc b/modules/nodes-containers-port-forwarding-using.adoc deleted file mode 100644 index 44d831f1a559..000000000000 --- a/modules/nodes-containers-port-forwarding-using.adoc +++ /dev/null @@ -1,72 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-containers-port-forwarding.adoc - -:_content-type: PROCEDURE -[id="nodes-containers-port-forwarding-using_{context}"] -= Using port forwarding - -You can use the CLI to port-forward one or more local ports to a pod. - -.Procedure - -Use the following command to listen on the specified port in a pod: - -[source,terminal] ----- -$ oc port-forward <pod> [<local_port>:]<remote_port> [...[<local_port_n>:]<remote_port_n>] ----- - -For example: - -* Use the following command to listen on ports `5000` and `6000` locally and forward data to and from ports `5000` and `6000` in the pod: -+ -[source,terminal] ----- -$ oc port-forward <pod> 5000 6000 ----- -+ -.Example output -[source,terminal] ----- -Forwarding from 127.0.0.1:5000 -> 5000 -Forwarding from [::1]:5000 -> 5000 -Forwarding from 127.0.0.1:6000 -> 6000 -Forwarding from [::1]:6000 -> 6000 ----- - -* Use the following command to listen on port `8888` locally and forward to `5000` in the pod: -+ -[source,terminal] ----- -$ oc port-forward <pod> 8888:5000 ----- -+ -.Example output -[source,terminal] ----- -Forwarding from 127.0.0.1:8888 -> 5000 -Forwarding from [::1]:8888 -> 5000 ----- - -* Use the following command to listen on a free port locally and forward to `5000` in the pod: -+ -[source,terminal] ----- -$ oc port-forward <pod> :5000 ----- -+ -.Example output -[source,terminal] ----- -Forwarding from 127.0.0.1:42390 -> 5000 -Forwarding from [::1]:42390 -> 5000 ----- -+ -Or: -+ -[source,terminal] ----- -$ oc port-forward <pod> 0:5000 ----- - diff --git a/modules/nodes-containers-projected-volumes-about.adoc b/modules/nodes-containers-projected-volumes-about.adoc deleted file mode 100644 index 8a950d79a845..000000000000 --- a/modules/nodes-containers-projected-volumes-about.adoc +++ /dev/null @@ -1,188 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-containers-projected-volumes.adoc - -:_content-type: CONCEPT -[id="nodes-containers-projected-volumes-about_{context}"] -= Understanding projected volumes - -Projected volumes can map any combination of these volume sources into a single directory, allowing the user to: - -* automatically populate a single volume with the keys from multiple secrets, config maps, and with downward API information, -so that I can synthesize a single directory with various sources of information; -* populate a single volume with the keys from multiple secrets, config maps, and with downward API information, -explicitly specifying paths for each item, so that I can have full control over the contents of that volume. - -[IMPORTANT] -==== -When the `RunAsUser` permission is set in the security context of a Linux-based pod, the projected files have the correct permissions set, including container user ownership. However, when the Windows equivalent `RunAsUsername` permission is set in a Windows pod, the kubelet is unable to correctly set ownership on the files in the projected volume. - -Therefore, the `RunAsUsername` permission set in the security context of a Windows pod is not honored for Windows projected volumes running in {product-title}. -==== - -The following general scenarios show how you can use projected volumes. - -*Config map, secrets, Downward API.*:: -Projected volumes allow you to deploy containers with configuration data that includes passwords. -An application using these resources could be deploying {rh-openstack-first} on Kubernetes. The configuration data might have to be assembled differently depending on if the services are going to be used for production or for testing. If a pod is labeled with production or testing, the downward API selector `metadata.labels` can be used to produce the correct {rh-openstack} configs. - -*Config map + secrets.*:: -Projected volumes allow you to deploy containers involving configuration data and passwords. -For example, you might execute a config map with some sensitive encrypted tasks that are decrypted using a vault password file. - -*ConfigMap + Downward API.*:: -Projected volumes allow you to generate a config including the pod name (available via the `metadata.name` selector). This application can then pass the pod name along with requests to easily determine the source without using IP tracking. - -*Secrets + Downward API.*:: -Projected volumes allow you to use a secret as a public key to encrypt the namespace of the pod (available via the `metadata.namespace` selector). -This example allows the Operator to use the application to deliver the namespace information securely without using an encrypted transport. - -[id="projected-volumes-examples_{context}"] -== Example Pod specs - -The following are examples of `Pod` specs for creating projected volumes. - -.Pod with a secret, a Downward API, and a config map - -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: volume-test -spec: - containers: - - name: container-test - image: busybox - volumeMounts: <1> - - name: all-in-one - mountPath: "/projected-volume"<2> - readOnly: true <3> - volumes: <4> - - name: all-in-one <5> - projected: - defaultMode: 0400 <6> - sources: - - secret: - name: mysecret <7> - items: - - key: username - path: my-group/my-username <8> - - downwardAPI: <9> - items: - - path: "labels" - fieldRef: - fieldPath: metadata.labels - - path: "cpu_limit" - resourceFieldRef: - containerName: container-test - resource: limits.cpu - - configMap: <10> - name: myconfigmap - items: - - key: config - path: my-group/my-config - mode: 0777 <11> ----- - -<1> Add a `volumeMounts` section for each container that needs the secret. -<2> Specify a path to an unused directory where the secret will appear. -<3> Set `readOnly` to `true`. -<4> Add a `volumes` block to list each projected volume source. -<5> Specify any name for the volume. -<6> Set the execute permission on the files. -<7> Add a secret. Enter the name of the secret object. Each secret you want to use must be listed. -<8> Specify the path to the secrets file under the `mountPath`. Here, the secrets file is in *_/projected-volume/my-group/my-username_*. -<9> Add a Downward API source. -<10> Add a ConfigMap source. -<11> Set the mode for the specific projection - -[NOTE] -==== -If there are multiple containers in the pod, each container needs a `volumeMounts` section, but only one `volumes` section is needed. -==== - -.Pod with multiple secrets with a non-default permission mode set - -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: volume-test -spec: - containers: - - name: container-test - image: busybox - volumeMounts: - - name: all-in-one - mountPath: "/projected-volume" - readOnly: true - volumes: - - name: all-in-one - projected: - defaultMode: 0755 - sources: - - secret: - name: mysecret - items: - - key: username - path: my-group/my-username - - secret: - name: mysecret2 - items: - - key: password - path: my-group/my-password - mode: 511 ----- - -[NOTE] -==== -The `defaultMode` can only be specified at the projected level and not for each -volume source. However, as illustrated above, you can explicitly set the `mode` -for each individual projection. -==== - -[id="projected-volumes-pathing_{context}"] -== Pathing Considerations - -*Collisions Between Keys when Configured Paths are Identical*:: If you configure any keys with the same path, the pod spec will not be accepted as valid. -In the following example, the specified path for `mysecret` and `myconfigmap` are the same: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: volume-test -spec: - containers: - - name: container-test - image: busybox - volumeMounts: - - name: all-in-one - mountPath: "/projected-volume" - readOnly: true - volumes: - - name: all-in-one - projected: - sources: - - secret: - name: mysecret - items: - - key: username - path: my-group/data - - configMap: - name: myconfigmap - items: - - key: config - path: my-group/data ----- - -Consider the following situations related to the volume file paths. - -*Collisions Between Keys without Configured Paths*:: The only run-time validation that can occur is when all the paths are known at pod creation, similar to the above scenario. Otherwise, when a conflict occurs the most recent specified resource will overwrite anything preceding it -(this is true for resources that are updated after pod creation as well). - -*Collisions when One Path is Explicit and the Other is Automatically Projected*:: In the event that there is a collision due to a user specified path matching data that is automatically projected, -the latter resource will overwrite anything preceding it as before diff --git a/modules/nodes-containers-projected-volumes-creating.adoc b/modules/nodes-containers-projected-volumes-creating.adoc deleted file mode 100644 index 23495c719b40..000000000000 --- a/modules/nodes-containers-projected-volumes-creating.adoc +++ /dev/null @@ -1,230 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-containers-projected-volumes.adoc - -:_content-type: PROCEDURE -[id="nodes-containers-projected-volumes-creating_{context}"] -= Configuring a Projected Volume for a Pod - -When creating projected volumes, consider the volume file path situations described in _Understanding projected volumes_. - -The following example shows how to use a projected volume to mount an existing secret volume source. The steps can be used to create a user name and password secrets from local files. You then create a pod that runs one container, using a projected volume to mount the secrets into the same shared directory. - -.Procedure - -To use a projected volume to mount an existing secret volume source. - -. Create files containing the secrets, entering the following, replacing the password and user information as appropriate: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: mysecret -type: Opaque -data: - pass: MWYyZDFlMmU2N2Rm - user: YWRtaW4= ----- -+ -The `user` and `pass` values can be any valid string that is *base64* encoded. -+ -The following example shows `admin` in base64: -+ -[source,terminal] ----- -$ echo -n "admin" | base64 ----- -+ -.Example output -[source,terminal] ----- -YWRtaW4= ----- -+ -The following example shows the password `1f2d1e2e67df` in base64:. -+ -[source,terminal] ----- -$ echo -n "1f2d1e2e67df" | base64 ----- -+ -.Example output -[source,terminal] ----- -MWYyZDFlMmU2N2Rm ----- - -. Use the following command to create the secrets: -+ -[source,terminal] ----- -$ oc create -f <secrets-filename> ----- -+ -For example: -+ -[source,terminal] ----- -$ oc create -f secret.yaml ----- -+ -.Example output -[source,terminal] ----- -secret "mysecret" created ----- - -. You can check that the secret was created using the following commands: -+ -[source,terminal] ----- -$ oc get secret <secret-name> ----- -+ -For example: -+ -[source,terminal] ----- -$ oc get secret mysecret ----- -+ -.Example output -[source,terminal] ----- -NAME TYPE DATA AGE -mysecret Opaque 2 17h ----- -+ -[source,terminal] ----- -$ oc get secret <secret-name> -o yaml ----- -+ -For example: -+ -[source,terminal] ----- -$ oc get secret mysecret -o yaml ----- -+ -[source,yaml] ----- -apiVersion: v1 -data: - pass: MWYyZDFlMmU2N2Rm - user: YWRtaW4= -kind: Secret -metadata: - creationTimestamp: 2017-05-30T20:21:38Z - name: mysecret - namespace: default - resourceVersion: "2107" - selfLink: /api/v1/namespaces/default/secrets/mysecret - uid: 959e0424-4575-11e7-9f97-fa163e4bd54c -type: Opaque ----- - -. Create a pod configuration file similar to the following that includes a `volumes` section: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: test-projected-volume -spec: - containers: - - name: test-projected-volume - image: busybox - args: - - sleep - - "86400" - volumeMounts: - - name: all-in-one - mountPath: "/projected-volume" - readOnly: true - volumes: - - name: all-in-one - projected: - sources: - - secret: <1> - name: user - - secret: <1> - name: pass ----- -<1> The name of the secret you created. - -. Create the pod from the configuration file: -+ -[source,terminal] ----- -$ oc create -f <your_yaml_file>.yaml ----- -+ -For example: -+ -[source,terminal] ----- -$ oc create -f secret-pod.yaml ----- -+ -.Example output -[source,terminal] ----- -pod "test-projected-volume" created ----- - -. Verify that the pod container is running, and then watch for changes to -the pod: -+ -[source,terminal] ----- -$ oc get pod <name> ----- -+ -For example: -+ -[source,terminal] ----- -$ oc get pod test-projected-volume ----- -+ -The output should appear similar to the following: -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -test-projected-volume 1/1 Running 0 14s ----- - -. In another terminal, use the `oc exec` command to open a shell to the running container: -+ -[source,terminal] ----- -$ oc exec -it <pod> <command> ----- -+ -For example: -+ -[source,terminal] ----- -$ oc exec -it test-projected-volume -- /bin/sh ----- - -. In your shell, verify that the `projected-volumes` directory contains your projected sources: -+ -[source,terminal] ----- -/ # ls ----- -+ -.Example output -[source,terminal] ----- -bin home root tmp -dev proc run usr -etc projected-volume sys var ----- diff --git a/modules/nodes-containers-remote-commands-about.adoc b/modules/nodes-containers-remote-commands-about.adoc deleted file mode 100644 index 78b3c0edf252..000000000000 --- a/modules/nodes-containers-remote-commands-about.adoc +++ /dev/null @@ -1,38 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-containers-remote-commands.adoc - -:_content-type: PROCEDURE -[id="nodes-containers-remote-commands-about_{context}"] -= Executing remote commands in containers - -Support for remote container command execution is built into the CLI. - -.Procedure - -To run a command in a container: - -[source,terminal] ----- -$ oc exec <pod> [-c <container>] <command> [<arg_1> ... <arg_n>] ----- - -For example: - -[source,terminal] ----- -$ oc exec mypod date ----- - -.Example output -[source,terminal] ----- -Thu Apr 9 02:21:53 UTC 2015 ----- - -[IMPORTANT] -==== -link:https://access.redhat.com/errata/RHSA-2015:1650[For security purposes], the -`oc exec` command does not work when accessing privileged containers except when -the command is executed by a `cluster-admin` user. -==== diff --git a/modules/nodes-containers-remote-commands-protocol.adoc b/modules/nodes-containers-remote-commands-protocol.adoc deleted file mode 100644 index 8767ff06e1c1..000000000000 --- a/modules/nodes-containers-remote-commands-protocol.adoc +++ /dev/null @@ -1,47 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-containers-remote-commands.adoc - -[id="nodes-containers-remote-commands-protocol_{context}"] -= Protocol for initiating a remote command from a client - -Clients initiate the execution of a remote command in a container by issuing a -request to the Kubernetes API server: - -[source, terminal] ----- -/proxy/nodes/<node_name>/exec/<namespace>/<pod>/<container>?command=<command> ----- - -In the above URL: - -- `<node_name>` is the FQDN of the node. -- `<namespace>` is the project of the target pod. -- `<pod>` is the name of the target pod. -- `<container>` is the name of the target container. -- `<command>` is the desired command to be executed. - -For example: - -[source, terminal] ----- -/proxy/nodes/node123.openshift.com/exec/myns/mypod/mycontainer?command=date ----- - -Additionally, the client can add parameters to the request to indicate if: - -- the client should send input to the remote container's command (stdin). -- the client's terminal is a TTY. -- the remote container's command should send output from stdout to the client. -- the remote container's command should send output from stderr to the client. - -After sending an `exec` request to the API server, the client upgrades the -connection to one that supports multiplexed streams; the current implementation -uses *HTTP/2*. - -The client creates one stream each for stdin, stdout, and stderr. To distinguish -among the streams, the client sets the `streamType` header on the stream to one -of `stdin`, `stdout`, or `stderr`. - -The client closes all streams, the upgraded connection, and the underlying -connection when it is finished with the remote command execution request. diff --git a/modules/nodes-containers-start-pod-safe-sysctls.adoc b/modules/nodes-containers-start-pod-safe-sysctls.adoc deleted file mode 100644 index 998363d5c84a..000000000000 --- a/modules/nodes-containers-start-pod-safe-sysctls.adoc +++ /dev/null @@ -1,114 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/containers/nodes-containers-sysctls.adoc - -:_content-type: PROCEDURE -[id="nodes-starting-pod-safe-sysctls_{context}"] -= Starting a pod with safe sysctls - -You can set sysctls on pods using the pod's `securityContext`. The `securityContext` applies to all containers in the same pod. - -Safe sysctls are allowed by default. - -This example uses the pod `securityContext` to set the following safe sysctls: - -* `kernel.shm_rmid_forced` -* `net.ipv4.ip_local_port_range` -* `net.ipv4.tcp_syncookies` -* `net.ipv4.ping_group_range` - -[WARNING] -==== -To avoid destabilizing your operating system, modify sysctl parameters only after you understand their effects. -==== - -Use this procedure to start a pod with the configured sysctl settings. -[NOTE] -==== -In most cases you modify an existing pod definition and add the `securityContext` spec. -==== - - -.Procedure - -. Create a YAML file `sysctl_pod.yaml` that defines an example pod and add the `securityContext` spec, as shown in the following example: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: sysctl-example - namespace: default -spec: - containers: - - name: podexample - image: centos - command: ["bin/bash", "-c", "sleep INF"] - securityContext: - runAsUser: 2000 <1> - runAsGroup: 3000 <2> - allowPrivilegeEscalation: false <3> - capabilities: <4> - drop: ["ALL"] - securityContext: - runAsNonRoot: true <5> - seccompProfile: <6> - type: RuntimeDefault - sysctls: - - name: kernel.shm_rmid_forced - value: "1" - - name: net.ipv4.ip_local_port_range - value: "32770 60666" - - name: net.ipv4.tcp_syncookies - value: "0" - - name: net.ipv4.ping_group_range - value: "0 200000000" ----- -<1> `runAsUser` controls which user ID the container is run with. -<2> `runAsGroup` controls which primary group ID the containers is run with. -<3> `allowPrivilegeEscalation` determines if a pod can request to allow privilege escalation. If unspecified, it defaults to true. This boolean directly controls whether the `no_new_privs` flag gets set on the container process. -<4> `capabilities` permit privileged actions without giving full root access. This policy ensures all capabilities are dropped from the pod. -<5> `runAsNonRoot: true` requires that the container will run with a user with any UID other than 0. -<6> `RuntimeDefault` enables the default seccomp profile for a pod or container workload. - -. Create the pod by running the following command: -+ -[source,terminal] ----- -$ oc apply -f sysctl_pod.yaml ----- -+ -. Verify that the pod is created by running the following command: -+ -[source,terminal] ----- -$ oc get pod ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -sysctl-example 1/1 Running 0 14s ----- - -. Log in to the pod by running the following command: -+ -[source,terminal] ----- -$ oc rsh sysctl-example ----- - -. Verify the values of the configured sysctl flags. For example, find the value `kernel.shm_rmid_forced` by running the following command: -+ -[source,terminal] ----- -sh-4.4# sysctl kernel.shm_rmid_forced ----- -+ -.Expected output -[source,terminal] ----- -kernel.shm_rmid_forced = 1 ----- diff --git a/modules/nodes-containers-sysctls-about.adoc b/modules/nodes-containers-sysctls-about.adoc deleted file mode 100644 index 0b61b954b1e8..000000000000 --- a/modules/nodes-containers-sysctls-about.adoc +++ /dev/null @@ -1,22 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/containers/nodes-containers-sysctls.adoc - -:_content-type: CONCEPT -[id="nodes-containers-sysctls-about_{context}"] -= About sysctls - -In Linux, the sysctl interface allows an administrator to modify kernel parameters at runtime. Parameters are available from the `_/proc/sys/_` virtual process file system. The parameters cover various subsystems, such as: - -- kernel (common prefix: `_kernel._`) -- networking (common prefix: `_net._`) -- virtual memory (common prefix: `_vm._`) -- MDADM (common prefix: `_dev._`) - -More subsystems are described in link:https://www.kernel.org/doc/Documentation/sysctl/README[Kernel documentation]. -To get a list of all parameters, run: - -[source,terminal] ----- -$ sudo sysctl -a ----- \ No newline at end of file diff --git a/modules/nodes-containers-sysctls-setting.adoc b/modules/nodes-containers-sysctls-setting.adoc deleted file mode 100644 index a8865c964ccf..000000000000 --- a/modules/nodes-containers-sysctls-setting.adoc +++ /dev/null @@ -1,73 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/containers/nodes-containers-sysctls.adoc - -:_content-type: PROCEDURE -[id="nodes-containers-starting-pod-with-unsafe-sysctls_{context}"] -= Starting a pod with unsafe sysctls - -A pod with unsafe sysctls fails to launch on any node unless the cluster administrator explicitly enables unsafe sysctls for that node. As with node-level sysctls, use the taints and toleration feature or labels on nodes to schedule those pods onto the right nodes. - -The following example uses the pod `securityContext` to set a safe sysctl `kernel.shm_rmid_forced` and two unsafe sysctls, `net.core.somaxconn` and `kernel.msgmax`. There is no distinction between _safe_ and _unsafe_ sysctls in the specification. - -[WARNING] -==== -To avoid destabilizing your operating system, modify sysctl parameters only after you understand their effects. -==== - -The following example illustrates what happens when you add safe and unsafe sysctls to a pod specification: - -.Procedure - -. Create a YAML file `sysctl-example-unsafe.yaml` that defines an example pod and add the `securityContext` specification, as shown in the following example: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: sysctl-example-unsafe -spec: - containers: - - name: podexample - image: centos - command: ["bin/bash", "-c", "sleep INF"] - securityContext: - runAsUser: 2000 - runAsGroup: 3000 - allowPrivilegeEscalation: false - capabilities: - drop: ["ALL"] - securityContext: - runAsNonRoot: true - seccompProfile: - type: RuntimeDefault - sysctls: - - name: kernel.shm_rmid_forced - value: "0" - - name: net.core.somaxconn - value: "1024" - - name: kernel.msgmax - value: "65536" ----- - -. Create the pod using the following command: -+ -[source,terminal] ----- -$ oc apply -f sysctl-example-unsafe.yaml ----- - -. Verify that the pod is scheduled but does not deploy because unsafe sysctls are not allowed for the node using the following command: -+ -[source,terminal] ----- -$ oc get pod ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -sysctl-example-unsafe 0/1 SysctlForbidden 0 14s ----- diff --git a/modules/nodes-containers-sysctls-unsafe.adoc b/modules/nodes-containers-sysctls-unsafe.adoc deleted file mode 100644 index 89d48cc9fbdd..000000000000 --- a/modules/nodes-containers-sysctls-unsafe.adoc +++ /dev/null @@ -1,173 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/containers/nodes-containers-sysctls.adoc - -:_content-type: PROCEDURE -[id="nodes-containers-sysctls-unsafe_{context}"] -= Enabling unsafe sysctls - -A cluster administrator can allow certain unsafe sysctls for very special -situations such as high performance or real-time application tuning. - -If you want to use unsafe sysctls, a cluster administrator must enable them -individually for a specific type of node. The sysctls must be namespaced. - -You can further control which sysctls are set in pods by specifying lists of sysctls or sysctl patterns in the `allowedUnsafeSysctls` field of the Security Context Constraints. - -- The `allowedUnsafeSysctls` option controls specific needs such as high performance or real-time application tuning. - -[WARNING] -==== -Due to their nature of being unsafe, the use of unsafe sysctls is -at-your-own-risk and can lead to severe problems, such as improper behavior of -containers, resource shortage, or breaking a node. -==== - -.Procedure - -. List existing MachineConfig objects for your {product-title} cluster to decide how to label your machine config by running the following command: -+ -[source,terminal] ----- -$ oc get machineconfigpool ----- -+ -.Example output -[source,terminal] ----- -NAME CONFIG UPDATED UPDATING DEGRADED MACHINECOUNT READYMACHINECOUNT UPDATEDMACHINECOUNT DEGRADEDMACHINECOUNT AGE -master rendered-master-bfb92f0cd1684e54d8e234ab7423cc96 True False False 3 3 3 0 42m -worker rendered-worker-21b6cb9a0f8919c88caf39db80ac1fce True False False 3 3 3 0 42m ----- - -. Add a label to the machine config pool where the containers with the unsafe sysctls will run by running the following command: -+ -[source,terminal] ----- -$ oc label machineconfigpool worker custom-kubelet=sysctl ----- -. Create a YAML file `set-sysctl-worker.yaml` that defines a `KubeletConfig` custom resource (CR): -+ -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: KubeletConfig -metadata: - name: custom-kubelet -spec: - machineConfigPoolSelector: - matchLabels: - custom-kubelet: sysctl <1> - kubeletConfig: - allowedUnsafeSysctls: <2> - - "kernel.msg*" - - "net.core.somaxconn" ----- -<1> Specify the label from the machine config pool. -<2> List the unsafe sysctls you want to allow. - -. Create the object by running the following command: -+ -[source,terminal] ----- -$ oc apply -f set-sysctl-worker.yaml ----- - -. Wait for the Machine Config Operator to generate the new rendered configuration and apply it to the machines by running the following command: -+ -[source,terminal] ----- -$ oc get machineconfigpool worker -w ----- -+ -After some minutes the `UPDATING` status changes from True to False: -+ -[source,terminal] ----- -NAME CONFIG UPDATED UPDATING DEGRADED MACHINECOUNT READYMACHINECOUNT UPDATEDMACHINECOUNT DEGRADEDMACHINECOUNT AGE -worker rendered-worker-f1704a00fc6f30d3a7de9a15fd68a800 False True False 3 2 2 0 71m -worker rendered-worker-f1704a00fc6f30d3a7de9a15fd68a800 False True False 3 2 3 0 72m -worker rendered-worker-0188658afe1f3a183ec8c4f14186f4d5 True False False 3 3 3 0 72m ----- -. Create a YAML file `sysctl-example-safe-unsafe.yaml` that defines an example pod and add the `securityContext` spec, as shown in the following example: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: sysctl-example-safe-unsafe -spec: - containers: - - name: podexample - image: centos - command: ["bin/bash", "-c", "sleep INF"] - securityContext: - runAsUser: 2000 - runAsGroup: 3000 - allowPrivilegeEscalation: false - capabilities: - drop: ["ALL"] - securityContext: - runAsNonRoot: true - seccompProfile: - type: RuntimeDefault - sysctls: - - name: kernel.shm_rmid_forced - value: "0" - - name: net.core.somaxconn - value: "1024" - - name: kernel.msgmax - value: "65536" ----- - -. Create the pod by running the following command: -+ -[source,terminal] ----- -$ oc apply -f sysctl-example-safe-unsafe.yaml ----- -+ -.Expected output -+ -[source,terminal] ----- -Warning: would violate PodSecurity "restricted:latest": forbidden sysctls (net.core.somaxconn, kernel.msgmax) -pod/sysctl-example-safe-unsafe created ----- - -. Verify that the pod is created by running the following command: -+ -[source,terminal] ----- -$ oc get pod ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -sysctl-example-safe-unsafe 1/1 Running 0 19s ----- - -. Log in to the pod by running the following command: -+ -[source,terminal] ----- -$ oc rsh sysctl-example-safe-unsafe ----- - -. Verify the values of the configured sysctl flags. For example, find the value `net.core.somaxconn` by running the following command: -+ -[source,terminal] ----- -sh-4.4# sysctl net.core.somaxconn ----- -+ -.Expected output -[source,terminal] ----- -net.core.somaxconn = 1024 ----- - -The unsafe sysctl is now allowed and the value is set as defined in the `securityContext` spec of the updated pod specification. diff --git a/modules/nodes-containers-volumes-about.adoc b/modules/nodes-containers-volumes-about.adoc deleted file mode 100644 index 6100524a7e93..000000000000 --- a/modules/nodes-containers-volumes-about.adoc +++ /dev/null @@ -1,28 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-containers-volumes.adoc - -:_content-type: CONCEPT -[id="nodes-containers-volumes-about_{context}"] -= Understanding volumes - -Volumes are mounted file systems available to pods and their -containers which may be backed by a number of host-local or network attached -storage endpoints. Containers are not persistent by default; on restart, their contents are -cleared. - -To ensure that the file system on the volume contains no errors and, if errors -are present, to repair them when possible, {product-title} invokes the `fsck` -utility prior to the `mount` utility. This occurs when either adding a volume or -updating an existing volume. - -The simplest volume type is `emptyDir`, which is a temporary directory on a -single machine. Administrators may also allow you to request a persistent volume that is automatically attached -to your pods. - -[NOTE] -==== -`emptyDir` volume storage may be restricted by a quota based on the pod's -FSGroup, if the FSGroup parameter is enabled by your cluster administrator. -==== - diff --git a/modules/nodes-containers-volumes-adding.adoc b/modules/nodes-containers-volumes-adding.adoc deleted file mode 100644 index 3242c0f37187..000000000000 --- a/modules/nodes-containers-volumes-adding.adoc +++ /dev/null @@ -1,244 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-containers-volumes.adoc - -:_content-type: PROCEDURE -[id="nodes-containers-volumes-adding_{context}"] -= Adding volumes to a pod - -You can add volumes and volume mounts to a pod. - -.Procedure - -To add a volume, a volume mount, or both to pod templates: - -[source,terminal] ----- -$ oc set volume <object_type>/<name> --add [options] ----- - -.Supported Options for Adding Volumes -[cols="3a*",options="header"] -|=== - -|Option |Description |Default - -|`--name` -|Name of the volume. -|Automatically generated, if not specified. - -|`-t, --type` -|Name of the volume source. Supported values: `emptyDir`, `hostPath`, `secret`, -`configmap`, `persistentVolumeClaim` or `projected`. -|`emptyDir` - -|`-c, --containers` -|Select containers by name. It can also take wildcard `'*'` that matches any -character. -|`'*'` - -|`-m, --mount-path` -|Mount path inside the selected containers. Do not mount to the container root, `/`, or any path that is the same in the host and the container. This can corrupt your host system if the container is sufficiently privileged, such as the host `/dev/pts` files. It is safe to mount the host by using `/host`. -| - -|`--path` -|Host path. Mandatory parameter for `--type=hostPath`. Do not mount to the container root, `/`, or any path that is the same in the host and the container. This can corrupt your host system if the container is sufficiently privileged, such as the host `/dev/pts` files. It is safe to mount the host by using `/host`. -| - -|`--secret-name` -|Name of the secret. Mandatory parameter for `--type=secret`. -| - -|`--configmap-name` -|Name of the configmap. Mandatory parameter for `--type=configmap`. -| - -|`--claim-name` -|Name of the persistent volume claim. Mandatory parameter for -`--type=persistentVolumeClaim`. -| - -|`--source` -|Details of volume source as a JSON string. Recommended if the desired volume -source is not supported by `--type`. -| - -|`-o, --output` -|Display the modified objects instead of updating them on the server. Supported -values: `json`, `yaml`. -| - -|`--output-version` -|Output the modified objects with the given version. -|`api-version` -|=== - - -For example: - -* To add a new volume source *emptyDir* to the *registry* `DeploymentConfig` object: -+ -[source,terminal] ----- -$ oc set volume dc/registry --add ----- -+ -[TIP] -==== -You can alternatively apply the following YAML to add the volume: - -.Sample deployment config with an added volume -[%collapsible] -===== -[source,yaml] ----- -kind: DeploymentConfig -apiVersion: apps.openshift.io/v1 -metadata: - name: registry - namespace: registry -spec: - replicas: 3 - selector: - app: httpd - template: - metadata: - labels: - app: httpd - spec: - volumes: <1> - - name: volume-pppsw - emptyDir: {} - containers: - - name: httpd - image: >- - image-registry.openshift-image-registry.svc:5000/openshift/httpd:latest - ports: - - containerPort: 8080 - protocol: TCP ----- -<1> Add the volume source *emptyDir*. -===== -==== - -* To add volume *v1* with secret *secret1* for replication controller *r1* and mount -inside the containers at *_/data_*: -+ -[source,terminal] ----- -$ oc set volume rc/r1 --add --name=v1 --type=secret --secret-name='secret1' --mount-path=/data ----- -+ -[TIP] -==== -You can alternatively apply the following YAML to add the volume: - -.Sample replication controller with added volume and secret -[%collapsible] -===== -[source,yaml] ----- -kind: ReplicationController -apiVersion: v1 -metadata: - name: example-1 - namespace: example -spec: - replicas: 0 - selector: - app: httpd - deployment: example-1 - deploymentconfig: example - template: - metadata: - creationTimestamp: null - labels: - app: httpd - deployment: example-1 - deploymentconfig: example - spec: - volumes: <1> - - name: v1 - secret: - secretName: secret1 - defaultMode: 420 - containers: - - name: httpd - image: >- - image-registry.openshift-image-registry.svc:5000/openshift/httpd:latest - volumeMounts: <2> - - name: v1 - mountPath: /data ----- -<1> Add the volume and secret. -<2> Add the container mount path. -===== -==== - -* To add existing persistent volume *v1* with claim name *pvc1* to deployment -configuration *_dc.json_* on disk, mount the volume on container *c1* at -*_/data_*, and update the `DeploymentConfig` object on the server: -+ -[source,terminal] ----- -$ oc set volume -f dc.json --add --name=v1 --type=persistentVolumeClaim \ - --claim-name=pvc1 --mount-path=/data --containers=c1 ----- -+ -[TIP] -==== -You can alternatively apply the following YAML to add the volume: - -.Sample deployment config with persistent volume added -[%collapsible] -===== -[source,yaml] ----- -kind: DeploymentConfig -apiVersion: apps.openshift.io/v1 -metadata: - name: example - namespace: example -spec: - replicas: 3 - selector: - app: httpd - template: - metadata: - labels: - app: httpd - spec: - volumes: - - name: volume-pppsw - emptyDir: {} - - name: v1 <1> - persistentVolumeClaim: - claimName: pvc1 - containers: - - name: httpd - image: >- - image-registry.openshift-image-registry.svc:5000/openshift/httpd:latest - ports: - - containerPort: 8080 - protocol: TCP - volumeMounts: <2> - - name: v1 - mountPath: /data ----- -<1> Add the persistent volume claim named `pvc1. -<2> Add the container mount path. -===== -==== - -* To add a volume *v1* based on Git repository -*$$https://github.com/namespace1/project1$$* with revision *5125c45f9f563* for -all replication controllers: -+ -[source,terminal] ----- -$ oc set volume rc --all --add --name=v1 \ - --source='{"gitRepo": { - "repository": "https://github.com/namespace1/project1", - "revision": "5125c45f9f563" - }}' ----- diff --git a/modules/nodes-containers-volumes-cli.adoc b/modules/nodes-containers-volumes-cli.adoc deleted file mode 100644 index f922926e9b8e..000000000000 --- a/modules/nodes-containers-volumes-cli.adoc +++ /dev/null @@ -1,66 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-containers-volumes.adoc - -[id="nodes-containers-volumes-cli_{context}"] -= Working with volumes using the {product-title} CLI - -You can use the CLI command `oc set volume` to add and remove volumes and -volume mounts for any object that has a pod template like replication controllers or -deployment configs. You can also list volumes in pods or any -object that has a pod template. - -The `oc set volume` command uses the following general syntax: - -[source,terminal] ----- -$ oc set volume <object_selection> <operation> <mandatory_parameters> <options> ----- - - -Object selection:: -Specify one of the following for the `object_selection` parameter in the `oc set volume` command: - -[id="vol-object-selection_{context}"] -.Object Selection -[cols="3a*",options="header"] -|=== - -|Syntax |Description |Example - -|`_<object_type>_ _<name>_` -|Selects `_<name>_` of type `_<object_type>_`. -|`deploymentConfig registry` - -|`_<object_type>_/_<name>_` -|Selects `_<name>_` of type `_<object_type>_`. -|`deploymentConfig/registry` - -|`_<object_type>_` -`--selector=_<object_label_selector>_` -|Selects resources of type `_<object_type>_` that matched the given label -selector. -|`deploymentConfig` -`--selector="name=registry"` - -|`_<object_type>_ --all` -|Selects all resources of type `_<object_type>_`. -|`deploymentConfig --all` - -|`-f` or -`--filename=_<file_name>_` -|File name, directory, or URL to file to use to edit the resource. -|`-f registry-deployment-config.json` -|=== - - -Operation:: -Specify `--add` or `--remove` for the `operation` parameter in the `oc set volume` command. - -Mandatory parameters:: -Any mandatory parameters are specific to the -selected operation and are discussed in later sections. - -Options:: -Any options are specific to the -selected operation and are discussed in later sections. diff --git a/modules/nodes-containers-volumes-listing.adoc b/modules/nodes-containers-volumes-listing.adoc deleted file mode 100644 index 2e5625f797f1..000000000000 --- a/modules/nodes-containers-volumes-listing.adoc +++ /dev/null @@ -1,50 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-containers-volumes.adoc - -:_content-type: PROCEDURE -[id="nodes-containers-volumes-listing_{context}"] -= Listing volumes and volume mounts in a pod - -You can list volumes and volume mounts in pods or pod templates: - -.Procedure - -To list volumes: - -[source,terminal] ----- -$ oc set volume <object_type>/<name> [options] ----- - -List volume supported options: -[cols="3a*",options="header"] -|=== - -|Option |Description |Default - -|`--name` -|Name of the volume. -| - -|`-c, --containers` -|Select containers by name. It can also take wildcard `'*'` that matches any -character. -|`'*'` -|=== - -For example: - -* To list all volumes for pod *p1*: -+ -[source,terminal] ----- -$ oc set volume pod/p1 ----- - -* To list volume *v1* defined on all deployment configs: -+ -[source,terminal] ----- -$ oc set volume dc --all --name=v1 ----- diff --git a/modules/nodes-containers-volumes-removing.adoc b/modules/nodes-containers-volumes-removing.adoc deleted file mode 100644 index 3adf7683fb04..000000000000 --- a/modules/nodes-containers-volumes-removing.adoc +++ /dev/null @@ -1,68 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-containers-volumes.adoc - -:_content-type: PROCEDURE -[id="nodes-containers-volumes-removing_{context}"] -= Removing volumes and volume mounts from a pod - -You can remove a volume or volume mount from a pod. - -.Procedure - -To remove a volume from pod templates: - -[source,terminal] ----- -$ oc set volume <object_type>/<name> --remove [options] ----- - -.Supported options for removing volumes -[cols="3a*",options="header"] -|=== - -|Option |Description |Default - -|`--name` -|Name of the volume. -| - -|`-c, --containers` -|Select containers by name. It can also take wildcard `'*'` that matches any character. -|`'*'` - -|`--confirm` -|Indicate that you want to remove multiple volumes at once. -| - -|`-o, --output` -|Display the modified objects instead of updating them on the server. Supported values: `json`, `yaml`. -| - -|`--output-version` -|Output the modified objects with the given version. -|`api-version` -|=== - -For example: - -* To remove a volume *v1* from the `DeploymentConfig` object *d1*: -+ -[source,terminal] ----- -$ oc set volume dc/d1 --remove --name=v1 ----- - -* To unmount volume *v1* from container *c1* for the `DeploymentConfig` object *d1* and remove the volume *v1* if it is not referenced by any containers on *d1*: -+ -[source,terminal] ----- -$ oc set volume dc/d1 --remove --name=v1 --containers=c1 ----- - -* To remove all volumes for replication controller *r1*: -+ -[source,terminal] ----- -$ oc set volume rc/r1 --remove --confirm ----- diff --git a/modules/nodes-containers-volumes-subpath.adoc b/modules/nodes-containers-volumes-subpath.adoc deleted file mode 100644 index 9ed550fe71f8..000000000000 --- a/modules/nodes-containers-volumes-subpath.adoc +++ /dev/null @@ -1,58 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-containers-volumes.adoc - -:_content-type: PROCEDURE -[id="nodes-containers-volumes-subpath_{context}"] -= Configuring volumes for multiple uses in a pod - -You can configure a volume to allows you to share one volume for -multiple uses in a single pod using the `volumeMounts.subPath` property to specify a `subPath` value inside a volume -instead of the volume's root. - -.Procedure - -. View the list of files in the volume, run the `oc rsh` command: -+ -[source,terminal] ----- -$ oc rsh <pod> ----- -+ -.Example output -[source,terminal] ----- -sh-4.2$ ls /path/to/volume/subpath/mount -example_file1 example_file2 example_file3 ----- - -. Specify the `subPath`: -+ -.Example `Pod` spec with `subPath` parameter -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: my-site -spec: - containers: - - name: mysql - image: mysql - volumeMounts: - - mountPath: /var/lib/mysql - name: site-data - subPath: mysql <1> - - name: php - image: php - volumeMounts: - - mountPath: /var/www/html - name: site-data - subPath: html <2> - volumes: - - name: site-data - persistentVolumeClaim: - claimName: my-site-data ----- -<1> Databases are stored in the `mysql` folder. -<2> HTML content is stored in the `html` folder. diff --git a/modules/nodes-containers-volumes-updating.adoc b/modules/nodes-containers-volumes-updating.adoc deleted file mode 100644 index 5f082dcb0efc..000000000000 --- a/modules/nodes-containers-volumes-updating.adoc +++ /dev/null @@ -1,128 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-containers-volumes.adoc - -:_content-type: PROCEDURE -[id="nodes-containers-volumes-updating_{context}"] -= Updating volumes and volume mounts in a pod - -You can modify the volumes and volume mounts in a pod. - -.Procedure - -Updating existing volumes using the `--overwrite` option: - -[source,terminal] ----- -$ oc set volume <object_type>/<name> --add --overwrite [options] ----- - -For example: - -* To replace existing volume *v1* for replication controller *r1* with existing -persistent volume claim *pvc1*: -+ -[source,terminal] ----- -$ oc set volume rc/r1 --add --overwrite --name=v1 --type=persistentVolumeClaim --claim-name=pvc1 ----- -+ -[TIP] -==== -You can alternatively apply the following YAML to replace the volume: - -.Sample replication controller with persistent volume claim named `pvc1` -[%collapsible] -===== -[source,yaml] ----- -kind: ReplicationController -apiVersion: v1 -metadata: - name: example-1 - namespace: example -spec: - replicas: 0 - selector: - app: httpd - deployment: example-1 - deploymentconfig: example - template: - metadata: - labels: - app: httpd - deployment: example-1 - deploymentconfig: example - spec: - volumes: - - name: v1 <1> - persistentVolumeClaim: - claimName: pvc1 - containers: - - name: httpd - image: >- - image-registry.openshift-image-registry.svc:5000/openshift/httpd:latest - ports: - - containerPort: 8080 - protocol: TCP - volumeMounts: - - name: v1 - mountPath: /data ----- -<1> Set persistent volume claim to `pvc1`. -===== -==== - -* To change the `DeploymentConfig` object *d1* mount point to *_/opt_* for volume *v1*: -+ -[source,terminal] ----- -$ oc set volume dc/d1 --add --overwrite --name=v1 --mount-path=/opt ----- -+ -[TIP] -==== -You can alternatively apply the following YAML to change the mount point: - -.Sample deployment config with mount point set to `opt`. -[%collapsible] -===== -[source,yaml] ----- -kind: DeploymentConfig -apiVersion: apps.openshift.io/v1 -metadata: - name: example - namespace: example -spec: - replicas: 3 - selector: - app: httpd - template: - metadata: - labels: - app: httpd - spec: - volumes: - - name: volume-pppsw - emptyDir: {} - - name: v2 - persistentVolumeClaim: - claimName: pvc1 - - name: v1 - persistentVolumeClaim: - claimName: pvc1 - containers: - - name: httpd - image: >- - image-registry.openshift-image-registry.svc:5000/openshift/httpd:latest - ports: - - containerPort: 8080 - protocol: TCP - volumeMounts: <1> - - name: v1 - mountPath: /opt ----- -<1> Set the mount point to `/opt`. -===== -==== diff --git a/modules/nodes-control-plane-osp-migrating.adoc b/modules/nodes-control-plane-osp-migrating.adoc deleted file mode 100644 index 0c75476b09dd..000000000000 --- a/modules/nodes-control-plane-osp-migrating.adoc +++ /dev/null @@ -1,76 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes/nodes-nodes-working.adoc - -:_content-type: PROCEDURE -[id="nodes-control-plane-osp-migrating_{context}"] -= Migrating control plane nodes from one RHOSP host to another - -You can run a script that moves a control plane node from one {rh-openstack-first} node to another. - -.Prerequisites - -* The environment variable `OS_CLOUD` refers to a `clouds` entry that has administrative credentials in a `clouds.yaml` file. -* The environment variable `KUBECONFIG` refers to a configuration that contains administrative {product-title} credentials. - -.Procedure - -* From a command line, run the following script: - -[source,bash] ----- -#!/usr/bin/env bash - -set -Eeuo pipefail - -if [ $# -lt 1 ]; then - echo "Usage: '$0 node_name'" - exit 64 -fi - -# Check for admin OpenStack credentials -openstack server list --all-projects >/dev/null || { >&2 echo "The script needs OpenStack admin credentials. Exiting"; exit 77; } - -# Check for admin OpenShift credentials -oc adm top node >/dev/null || { >&2 echo "The script needs OpenShift admin credentials. Exiting"; exit 77; } - -set -x - -declare -r node_name="$1" -declare server_id -server_id="$(openstack server list --all-projects -f value -c ID -c Name | grep "$node_name" | cut -d' ' -f1)" -readonly server_id - -# Drain the node -oc adm cordon "$node_name" -oc adm drain "$node_name" --delete-emptydir-data --ignore-daemonsets --force - -# Power off the server -oc debug "node/${node_name}" -- chroot /host shutdown -h 1 - -# Verify the server is shut off -until openstack server show "$server_id" -f value -c status | grep -q 'SHUTOFF'; do sleep 5; done - -# Migrate the node -openstack server migrate --wait "$server_id" - -# Resize the VM -openstack server resize confirm "$server_id" - -# Wait for the resize confirm to finish -until openstack server show "$server_id" -f value -c status | grep -q 'SHUTOFF'; do sleep 5; done - -# Restart the VM -openstack server start "$server_id" - -# Wait for the node to show up as Ready: -until oc get node "$node_name" | grep -q "^${node_name}[[:space:]]\+Ready"; do sleep 5; done - -# Uncordon the node -oc adm uncordon "$node_name" - -# Wait for cluster operators to stabilize -until oc get co -o go-template='statuses: {{ range .items }}{{ range .status.conditions }}{{ if eq .type "Degraded" }}{{ if ne .status "False" }}DEGRADED{{ end }}{{ else if eq .type "Progressing"}}{{ if ne .status "False" }}PROGRESSING{{ end }}{{ else if eq .type "Available"}}{{ if ne .status "True" }}NOTAVAILABLE{{ end }}{{ end }}{{ end }}{{ end }}' | grep -qv '\(DEGRADED\|PROGRESSING\|NOTAVAILABLE\)'; do sleep 5; done ----- - -If the script completes, the control plane machine is migrated to a new {rh-openstack} node. \ No newline at end of file diff --git a/modules/nodes-descheduler-about.adoc b/modules/nodes-descheduler-about.adoc deleted file mode 100644 index 0af8c240f1e0..000000000000 --- a/modules/nodes-descheduler-about.adoc +++ /dev/null @@ -1,33 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/scheduling/nodes-descheduler.adoc - -:_content-type: CONCEPT -[id="nodes-descheduler-about_{context}"] -= About the descheduler - -You can use the descheduler to evict pods based on specific strategies so that the pods can be rescheduled onto more appropriate nodes. - -You can benefit from descheduling running pods in situations such as the following: - -* Nodes are underutilized or overutilized. -* Pod and node affinity requirements, such as taints or labels, have changed and the original scheduling decisions are no longer appropriate for certain nodes. -* Node failure requires pods to be moved. -* New nodes are added to clusters. -* Pods have been restarted too many times. - -[IMPORTANT] -==== -The descheduler does not schedule replacement of evicted pods. The scheduler automatically performs this task for the evicted pods. -==== - -When the descheduler decides to evict pods from a node, it employs the following general mechanism: - -* Pods in the `openshift-*` and `kube-system` namespaces are never evicted. -* Critical pods with `priorityClassName` set to `system-cluster-critical` or `system-node-critical` are never evicted. -* Static, mirrored, or stand-alone pods that are not part of a replication controller, replica set, deployment, or job are never evicted because these pods will not be recreated. -* Pods associated with daemon sets are never evicted. -* Pods with local storage are never evicted. -* Best effort pods are evicted before burstable and guaranteed pods. -* All types of pods with the `descheduler.alpha.kubernetes.io/evict` annotation are eligible for eviction. This annotation is used to override checks that prevent eviction, and the user can select which pod is evicted. Users should know how and if the pod will be recreated. -* Pods subject to pod disruption budget (PDB) are not evicted if descheduling violates its pod disruption budget (PDB). The pods are evicted by using eviction subresource to handle PDB. diff --git a/modules/nodes-descheduler-configuring-interval.adoc b/modules/nodes-descheduler-configuring-interval.adoc deleted file mode 100644 index 49de51dd4c78..000000000000 --- a/modules/nodes-descheduler-configuring-interval.adoc +++ /dev/null @@ -1,39 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/scheduling/nodes-descheduler.adoc - -:_content-type: PROCEDURE -[id="nodes-descheduler-configuring-interval_{context}"] -= Configuring the descheduler interval - -You can configure the amount of time between descheduler runs. The default is 3600 seconds (one hour). - -.Prerequisites - -* Cluster administrator privileges - -.Procedure - -. Edit the `KubeDescheduler` object: -+ -[source,terminal] ----- -$ oc edit kubedeschedulers.operator.openshift.io cluster -n openshift-kube-descheduler-operator ----- - -. Update the `deschedulingIntervalSeconds` field to the desired value: -+ -[source,yaml] ----- -apiVersion: operator.openshift.io/v1 -kind: KubeDescheduler -metadata: - name: cluster - namespace: openshift-kube-descheduler-operator -spec: - deschedulingIntervalSeconds: 3600 <1> -... ----- -<1> Set the number of seconds between descheduler runs. A value of `0` in this field runs the descheduler once and exits. - -. Save the file to apply the changes. diff --git a/modules/nodes-descheduler-configuring-profiles.adoc b/modules/nodes-descheduler-configuring-profiles.adoc deleted file mode 100644 index c3364dae5a2b..000000000000 --- a/modules/nodes-descheduler-configuring-profiles.adoc +++ /dev/null @@ -1,64 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/scheduling/nodes-descheduler.adoc - -:_content-type: PROCEDURE -[id="nodes-descheduler-configuring-profiles_{context}"] -= Configuring descheduler profiles - -You can configure which profiles the descheduler uses to evict pods. - -.Prerequisites - -* Cluster administrator privileges - -.Procedure - -. Edit the `KubeDescheduler` object: -+ -[source,terminal] ----- -$ oc edit kubedeschedulers.operator.openshift.io cluster -n openshift-kube-descheduler-operator ----- - -. Specify one or more profiles in the `spec.profiles` section. -+ -[source,yaml] ----- -apiVersion: operator.openshift.io/v1 -kind: KubeDescheduler -metadata: - name: cluster - namespace: openshift-kube-descheduler-operator -spec: - deschedulingIntervalSeconds: 3600 - logLevel: Normal - managementState: Managed - operatorLogLevel: Normal - mode: Predictive <1> - profileCustomizations: - namespaces: <2> - excluded: - - my-namespace - podLifetime: 48h <3> - thresholdPriorityClassName: my-priority-class-name <4> - profiles: <5> - - AffinityAndTaints - - TopologyAndDuplicates <6> - - LifecycleAndUtilization - - EvictPodsWithLocalStorage - - EvictPodsWithPVC ----- -+ --- -<1> Optional: By default, the descheduler does not evict pods. To evict pods, set `mode` to `Automatic`. -<2> Optional: Set a list of user-created namespaces to include or exclude from descheduler operations. Use `excluded` to set a list of namespaces to exclude or use `included` to set a list of namespaces to include. Note that protected namespaces (`openshift-*`, `kube-system`, `hypershift`) are excluded by default. -<3> Optional: Enable a custom pod lifetime value for the `LifecycleAndUtilization` profile. Valid units are `s`, `m`, or `h`. The default pod lifetime is 24 hours. -<4> Optional: Specify a priority threshold to consider pods for eviction only if their priority is lower than the specified level. Use the `thresholdPriority` field to set a numerical priority threshold (for example, `10000`) or use the `thresholdPriorityClassName` field to specify a certain priority class name (for example, `my-priority-class-name`). If you specify a priority class name, it must already exist or the descheduler will throw an error. Do not set both `thresholdPriority` and `thresholdPriorityClassName`. -<5> Add one or more profiles to enable. Available profiles: `AffinityAndTaints`, `TopologyAndDuplicates`, `LifecycleAndUtilization`, `SoftTopologyAndDuplicates`, `EvictPodsWithLocalStorage`, and `EvictPodsWithPVC`. -<6> Do not enable both `TopologyAndDuplicates` and `SoftTopologyAndDuplicates`. Enabling both results in a conflict. - -You can enable multiple profiles; the order that the profiles are specified in is not important. --- -+ -. Save the file to apply the changes. diff --git a/modules/nodes-descheduler-installing.adoc b/modules/nodes-descheduler-installing.adoc deleted file mode 100644 index f9af8364aa64..000000000000 --- a/modules/nodes-descheduler-installing.adoc +++ /dev/null @@ -1,109 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/scheduling/nodes-descheduler.adoc - -ifeval::["{context}" == "nodes-descheduler"] -:nodes: -endif::[] - -ifeval::["{context}" == "virt-enabling-descheduler-evictions"] -:virt: -endif::[] - -:_content-type: PROCEDURE -[id="nodes-descheduler-installing_{context}"] -= Installing the descheduler - -The descheduler is not available by default. To enable the descheduler, you must install the Kube Descheduler Operator from OperatorHub and enable one or more descheduler profiles. - -By default, the descheduler runs in predictive mode, which means that it only simulates pod evictions. You must change the mode to automatic for the descheduler to perform the pod evictions. - -[IMPORTANT] -==== -If you have enabled hosted control planes in your cluster, set a custom priority threshold to lower the chance that pods in the hosted control plane namespaces are evicted. Set the priority threshold class name to `hypershift-control-plane`, because it has the lowest priority value (`100000000`) of the hosted control plane priority classes. -==== - -.Prerequisites - -* Cluster administrator privileges. -* Access to the {product-title} web console. -ifdef::openshift-origin[] -* Ensure that you have downloaded the {cluster-manager-url-pull} as shown in _Obtaining the installation program_ in the installation documentation for your platform. -+ -If you have the pull secret, add the `redhat-operators` catalog to the OperatorHub custom resource (CR) as shown in _Configuring {product-title} to use Red Hat Operators_. -endif::[] - -.Procedure - -. Log in to the {product-title} web console. -. Create the required namespace for the Kube Descheduler Operator. -.. Navigate to *Administration* -> *Namespaces* and click *Create Namespace*. -.. Enter `openshift-kube-descheduler-operator` in the *Name* field, enter `openshift.io/cluster-monitoring=true` in the *Labels* field to enable descheduler metrics, and click *Create*. -. Install the Kube Descheduler Operator. -.. Navigate to *Operators* -> *OperatorHub*. -.. Type *Kube Descheduler Operator* into the filter box. -.. Select the *Kube Descheduler Operator* and click *Install*. -.. On the *Install Operator* page, select *A specific namespace on the cluster*. Select *openshift-kube-descheduler-operator* from the drop-down menu. -.. Adjust the values for the *Update Channel* and *Approval Strategy* to the desired values. -.. Click *Install*. -. Create a descheduler instance. -.. From the *Operators* -> *Installed Operators* page, click the *Kube Descheduler Operator*. -.. Select the *Kube Descheduler* tab and click *Create KubeDescheduler*. -.. Edit the settings as necessary. -... To evict pods instead of simulating the evictions, change the *Mode* field to *Automatic*. - -ifdef::virt[] -... Expand the *Profiles* section and select `DevPreviewLongLifecycle`. The `AffinityAndTaints` profile is enabled by default. -+ -[IMPORTANT] -==== -The only profile currently available for {VirtProductName} is `DevPreviewLongLifecycle`. -==== - -You can also configure the profiles and settings for the descheduler later using the OpenShift CLI (`oc`). -endif::virt[] -ifdef::nodes[] -... Expand the *Profiles* section to select one or more profiles to enable. The `AffinityAndTaints` profile is enabled by default. Click *Add Profile* to select additional profiles. -+ -[NOTE] -==== -Do not enable both `TopologyAndDuplicates` and `SoftTopologyAndDuplicates`. Enabling both results in a conflict. -==== -... Optional: Expand the *Profile Customizations* section to set optional configurations for the descheduler. -**** Set a custom pod lifetime value for the `LifecycleAndUtilization` profile. Use the *podLifetime* field to set a numerical value and a valid unit (`s`, `m`, or `h`). The default pod lifetime is 24 hours (`24h`). - -**** Set a custom priority threshold to consider pods for eviction only if their priority is lower than a specified priority level. Use the *thresholdPriority* field to set a numerical priority threshold or use the *thresholdPriorityClassName* field to specify a certain priority class name. -+ -[NOTE] -==== -Do not specify both *thresholdPriority* and *thresholdPriorityClassName* for the descheduler. -==== - -**** Set specific namespaces to exclude or include from descheduler operations. Expand the *namespaces* field and add namespaces to the *excluded* or *included* list. You can only either set a list of namespaces to exclude or a list of namespaces to include. Note that protected namespaces (`openshift-*`, `kube-system`, `hypershift`) are excluded by default. - -**** Experimental: Set thresholds for underutilization and overutilization for the `LowNodeUtilization` strategy. Use the *devLowNodeUtilizationThresholds* field to set one of the following values: -+ --- -***** `Low`: 10% underutilized and 30% overutilized -***** `Medium`: 20% underutilized and 50% overutilized (Default) -***** `High`: 40% underutilized and 70% overutilized --- -+ -[NOTE] -==== -This setting is experimental and should not be used in a production environment. -==== - -... Optional: Use the *Descheduling Interval Seconds* field to change the number of seconds between descheduler runs. The default is `3600` seconds. -.. Click *Create*. - -You can also configure the profiles and settings for the descheduler later using the OpenShift CLI (`oc`). If you did not adjust the profiles when creating the descheduler instance from the web console, the `AffinityAndTaints` profile is enabled by default. -endif::nodes[] - -ifeval::["{context}" == "nodes-descheduler"] -:!nodes: -endif::[] - -ifeval::["{context}" == "virt-enabling-descheduler-evictions"] -:!virt: -endif::[] diff --git a/modules/nodes-descheduler-profiles.adoc b/modules/nodes-descheduler-profiles.adoc deleted file mode 100644 index 4ed2771d7d58..000000000000 --- a/modules/nodes-descheduler-profiles.adoc +++ /dev/null @@ -1,82 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/scheduling/nodes-descheduler.adoc - -ifeval::["{context}" == "nodes-descheduler"] -:nodes: -endif::[] - -ifeval::["{context}" == "virt-enabling-descheduler-evictions"] -:virt: -endif::[] - -:_content-type: REFERENCE -[id="nodes-descheduler-profiles_{context}"] -= Descheduler profiles -ifdef::nodes[] -The following descheduler profiles are available: - -`AffinityAndTaints`:: This profile evicts pods that violate inter-pod anti-affinity, node affinity, and node taints. -+ -It enables the following strategies: -+ -* `RemovePodsViolatingInterPodAntiAffinity`: removes pods that are violating inter-pod anti-affinity. -* `RemovePodsViolatingNodeAffinity`: removes pods that are violating node affinity. -* `RemovePodsViolatingNodeTaints`: removes pods that are violating `NoSchedule` taints on nodes. -+ -Pods with a node affinity type of `requiredDuringSchedulingIgnoredDuringExecution` are removed. - -`TopologyAndDuplicates`:: This profile evicts pods in an effort to evenly spread similar pods, or pods of the same topology domain, among nodes. -+ -It enables the following strategies: -+ -* `RemovePodsViolatingTopologySpreadConstraint`: finds unbalanced topology domains and tries to evict pods from larger ones when `DoNotSchedule` constraints are violated. -* `RemoveDuplicates`: ensures that there is only one pod associated with a replica set, replication controller, deployment, or job running on same node. If there are more, those duplicate pods are evicted for better pod distribution in a cluster. - -`LifecycleAndUtilization`:: This profile evicts long-running pods and balances resource usage between nodes. -+ -It enables the following strategies: -+ -* `RemovePodsHavingTooManyRestarts`: removes pods whose containers have been restarted too many times. -+ -Pods where the sum of restarts over all containers (including Init Containers) is more than 100. - -* `LowNodeUtilization`: finds nodes that are underutilized and evicts pods, if possible, from overutilized nodes in the hope that recreation of evicted pods will be scheduled on these underutilized nodes. -+ -A node is considered underutilized if its usage is below 20% for all thresholds (CPU, memory, and number of pods). -+ -A node is considered overutilized if its usage is above 50% for any of the thresholds (CPU, memory, and number of pods). - -* `PodLifeTime`: evicts pods that are too old. -+ -By default, pods that are older than 24 hours are removed. You can customize the pod lifetime value. - -`SoftTopologyAndDuplicates`:: This profile is the same as `TopologyAndDuplicates`, except that pods with soft topology constraints, such as `whenUnsatisfiable: ScheduleAnyway`, are also considered for eviction. -+ -[NOTE] -==== -Do not enable both `SoftTopologyAndDuplicates` and `TopologyAndDuplicates`. Enabling both results in a conflict. -==== - -`EvictPodsWithLocalStorage`:: This profile allows pods with local storage to be eligible for eviction. - -`EvictPodsWithPVC`:: This profile allows pods with persistent volume claims to be eligible for eviction. If you are using `Kubernetes NFS Subdir External Provisioner`, you must add an excluded namespace for the namespace where the provisioner is installed. -endif::nodes[] -ifdef::virt[] -Use the Technology Preview `DevPreviewLongLifecycle` profile to enable the descheduler on a virtual machine. This is the only descheduler profile currently available for {VirtProductName}. To ensure proper scheduling, create VMs with CPU and memory requests for the expected load. - -`DevPreviewLongLifecycle`:: This profile balances resource usage between nodes and enables the following strategies: -+ -* `RemovePodsHavingTooManyRestarts`: removes pods whose containers have been restarted too many times and pods where the sum of restarts over all containers (including Init Containers) is more than 100. Restarting the VM guest operating system does not increase this count. -* `LowNodeUtilization`: evicts pods from overutilized nodes when there are any underutilized nodes. The destination node for the evicted pod will be determined by the scheduler. -** A node is considered underutilized if its usage is below 20% for all thresholds (CPU, memory, and number of pods). -** A node is considered overutilized if its usage is above 50% for any of the thresholds (CPU, memory, and number of pods). -endif::virt[] - -ifeval::["{context}" == "nodes-descheduler"] -:!nodes: -endif::[] - -ifeval::["{context}" == "virt-enabling-descheduler-evictions"] -:!virt: -endif::[] diff --git a/modules/nodes-descheduler-uninstalling.adoc b/modules/nodes-descheduler-uninstalling.adoc deleted file mode 100644 index b36a732aac77..000000000000 --- a/modules/nodes-descheduler-uninstalling.adoc +++ /dev/null @@ -1,37 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/scheduling/nodes-descheduler.adoc - -:_content-type: PROCEDURE -[id="nodes-descheduler-uninstalling_{context}"] -= Uninstalling the descheduler - -You can remove the descheduler from your cluster by removing the descheduler instance and uninstalling the Kube Descheduler Operator. This procedure also cleans up the `KubeDescheduler` CRD and `openshift-kube-descheduler-operator` namespace. - -.Prerequisites - -* Cluster administrator privileges. -* Access to the {product-title} web console. - -.Procedure - -. Log in to the {product-title} web console. -. Delete the descheduler instance. -.. From the *Operators* -> *Installed Operators* page, click *Kube Descheduler Operator*. -.. Select the *Kube Descheduler* tab. -.. Click the Options menu {kebab} next to the *cluster* entry and select *Delete KubeDescheduler*. -.. In the confirmation dialog, click *Delete*. -. Uninstall the Kube Descheduler Operator. -.. Navigate to *Operators* -> *Installed Operators*. -.. Click the Options menu {kebab} next to the *Kube Descheduler Operator* entry and select *Uninstall Operator*. -.. In the confirmation dialog, click *Uninstall*. -. Delete the `openshift-kube-descheduler-operator` namespace. -.. Navigate to *Administration* -> *Namespaces*. -.. Enter `openshift-kube-descheduler-operator` into the filter box. -.. Click the Options menu {kebab} next to the *openshift-kube-descheduler-operator* entry and select *Delete Namespace*. -.. In the confirmation dialog, enter `openshift-kube-descheduler-operator` and click *Delete*. -. Delete the `KubeDescheduler` CRD. -.. Navigate to *Administration* -> *Custom Resource Definitions*. -.. Enter `KubeDescheduler` into the filter box. -.. Click the Options menu {kebab} next to the *KubeDescheduler* entry and select *Delete CustomResourceDefinition*. -.. In the confirmation dialog, click *Delete*. diff --git a/modules/nodes-edge-remote-workers-network.adoc b/modules/nodes-edge-remote-workers-network.adoc deleted file mode 100644 index 6029aec4e808..000000000000 --- a/modules/nodes-edge-remote-workers-network.adoc +++ /dev/null @@ -1,22 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/nodes-edge-remote-workers.adoc - -[id="nodes-edge-remote-workers-network_{context}"] -= Network separation with remote worker nodes - -All nodes send heartbeats to the Kubernetes Controller Manager Operator (kube controller) in the {product-title} cluster every 10 seconds. If the cluster does not receive heartbeats from a node, {product-title} responds using several default mechanisms. - -{product-title} is designed to be resilient to network partitions and other disruptions. You can mitigate some of the more common disruptions, such as interruptions from software upgrades, network splits, and routing issues. Mitigation strategies include ensuring that pods on remote worker nodes request the correct amount of CPU and memory resources, configuring an appropriate replication policy, using redundancy across zones, and using Pod Disruption Budgets on workloads. - -If the kube controller loses contact with a node after a configured period, the node controller on the control plane updates the node health to `Unhealthy` and marks the node `Ready` condition as `Unknown`. In response, the scheduler stops scheduling pods to that node. The on-premise node controller adds a `node.kubernetes.io/unreachable` taint with a `NoExecute` effect to the node and schedules pods on the node for eviction after five minutes, by default. - -If a workload controller, such as a `Deployment` object or `StatefulSet` object, is directing traffic to pods on the unhealthy node and other nodes can reach the cluster, {product-title} routes the traffic away from the pods on the node. Nodes that cannot reach the cluster do not get updated with the new traffic routing. As a result, the workloads on those nodes might continue to attempt to reach the unhealthy node. - -You can mitigate the effects of connection loss by: - -* using daemon sets to create pods that tolerate the taints -* using static pods that automatically restart if a node goes down -* using Kubernetes zones to control pod eviction -* configuring pod tolerations to delay or avoid pod eviction -* configuring the kubelet to control the timing of when it marks nodes as unhealthy. diff --git a/modules/nodes-edge-remote-workers-power.adoc b/modules/nodes-edge-remote-workers-power.adoc deleted file mode 100644 index b48d1311150b..000000000000 --- a/modules/nodes-edge-remote-workers-power.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/nodes-edge-remote-workers.adoc - -[id="nodes-edge-remote-workers-power_{context}"] -= Power loss on remote worker nodes - -If a remote worker node loses power or restarts ungracefully, {product-title} responds using several default mechanisms. - -If the Kubernetes Controller Manager Operator (kube controller) loses contact with a node after a configured period, the control plane updates the node health to `Unhealthy` and marks the node `Ready` condition as `Unknown`. In response, the scheduler stops scheduling pods to that node. The on-premise node controller adds a `node.kubernetes.io/unreachable` taint with a `NoExecute` effect to the node and schedules pods on the node for eviction after five minutes, by default. - -On the node, the pods must be restarted when the node recovers power and reconnects with the control plane. - -[NOTE] -==== -If you want the pods to restart immediately upon restart, use static pods. -==== - -After the node restarts, the kubelet also restarts and attempts to restart the pods that were scheduled on the node. If the connection to the control plane takes longer than the default five minutes, the control plane cannot update the node health and remove the `node.kubernetes.io/unreachable` taint. On the node, the kubelet terminates any running pods. When these conditions are cleared, the scheduler can start scheduling pods to that node. - -You can mitigate the effects of power loss by: - -* using daemon sets to create pods that tolerate the taints -* using static pods that automatically restart with a node -* configuring pods tolerations to delay or avoid pod eviction -* configuring the kubelet to control the timing of when the node controller marks nodes as unhealthy. - diff --git a/modules/nodes-edge-remote-workers-strategies.adoc b/modules/nodes-edge-remote-workers-strategies.adoc deleted file mode 100644 index 21b7f79513a3..000000000000 --- a/modules/nodes-edge-remote-workers-strategies.adoc +++ /dev/null @@ -1,170 +0,0 @@ -// Module included in the following assemblies: -// -// * logging/nodes-edge-remote-workers.adoc - -[id="nodes-edge-remote-workers-strategies_{context}"] -= Remote worker node strategies - -If you use remote worker nodes, consider which objects to use to run your applications. - -It is recommended to use daemon sets or static pods based on the behavior you want in the event of network issues or power loss. In addition, you can use Kubernetes zones and tolerations to control or avoid pod evictions if the control plane cannot reach remote worker nodes. - -[id="nodes-edge-remote-workers-strategies-daemonsets_{context}"] -Daemon sets:: -Daemon sets are the best approach to managing pods on remote worker nodes for the following reasons: --- -* Daemon sets do not typically need rescheduling behavior. If a node disconnects from the cluster, pods on the node can continue to run. {product-title} does not change the state of daemon set pods, and leaves the pods in the state they last reported. For example, if a daemon set pod is in the `Running` state, when a node stops communicating, the pod keeps running and is assumed to be running by {product-title}. - -* Daemon set pods, by default, are created with `NoExecute` tolerations for the `node.kubernetes.io/unreachable` and `node.kubernetes.io/not-ready` taints with no `tolerationSeconds` value. These default values ensure that daemon set pods are never evicted if the control plane cannot reach a node. For example: -+ -.Tolerations added to daemon set pods by default -[source,yaml] ----- - tolerations: - - key: node.kubernetes.io/not-ready - operator: Exists - effect: NoExecute - - key: node.kubernetes.io/unreachable - operator: Exists - effect: NoExecute - - key: node.kubernetes.io/disk-pressure - operator: Exists - effect: NoSchedule - - key: node.kubernetes.io/memory-pressure - operator: Exists - effect: NoSchedule - - key: node.kubernetes.io/pid-pressure - operator: Exists - effect: NoSchedule - - key: node.kubernetes.io/unschedulable - operator: Exists - effect: NoSchedule ----- - -* Daemon sets can use labels to ensure that a workload runs on a matching worker node. - -* You can use an {product-title} service endpoint to load balance daemon set pods. - -[NOTE] -==== -Daemon sets do not schedule pods after a reboot of the node if {product-title} cannot reach the node. -==== --- - -[id="nodes-edge-remote-workers-strategies-static_{context}"] -Static pods:: -If you want pods restart if a node reboots, after a power loss for example, consider link:https://kubernetes.io/docs/tasks/configure-pod-container/static-pod/[static pods]. The kubelet on a node automatically restarts static pods as node restarts. - -[NOTE] -==== -Static pods cannot use secrets and config maps. -==== - -[id="nodes-edge-remote-workers-strategies-zones_{context}"] -Kubernetes zones:: -link:https://kubernetes.io/docs/setup/best-practices/multiple-zones/[Kubernetes zones] can slow down the rate or, in some cases, completely stop pod evictions. - -When the control plane cannot reach a node, the node controller, by default, applies `node.kubernetes.io/unreachable` taints and evicts pods at a rate of 0.1 nodes per second. However, in a cluster that uses Kubernetes zones, pod eviction behavior is altered. - -If a zone is fully disrupted, where all nodes in the zone have a `Ready` condition that is `False` or `Unknown`, the control plane does not apply the `node.kubernetes.io/unreachable` taint to the nodes in that zone. - -For partially disrupted zones, where more than 55% of the nodes have a `False` or `Unknown` condition, the pod eviction rate is reduced to 0.01 nodes per second. Nodes in smaller clusters, with fewer than 50 nodes, are not tainted. Your cluster must have more than three zones for these behavior to take effect. - -You assign a node to a specific zone by applying the `topology.kubernetes.io/region` label in the node specification. - -.Sample node labels for Kubernetes zones -[source,yaml] ----- -kind: Node -apiVersion: v1 -metadata: - labels: - topology.kubernetes.io/region=east ----- - -[id="nodes-edge-remote-workers-strategies-kubeconfig_{context}"] -`KubeletConfig` objects:: --- -You can adjust the amount of time that the kubelet checks the state of each node. - -To set the interval that affects the timing of when the on-premise node controller marks nodes with the `Unhealthy` or `Unreachable` condition, create a `KubeletConfig` object that contains the `node-status-update-frequency` and `node-status-report-frequency` parameters. - -The kubelet on each node determines the node status as defined by the `node-status-update-frequency` setting and reports that status to the cluster based on the `node-status-report-frequency` setting. By default, the kubelet determines the pod status every 10 seconds and reports the status every minute. However, if the node state changes, the kubelet reports the change to the cluster immediately. {product-title} uses the `node-status-report-frequency` setting only when the Node Lease feature gate is enabled, which is the default state in {product-title} clusters. If the Node Lease feature gate is disabled, the node reports its status based on the `node-status-update-frequency` setting. - -.Example kubelet config -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: KubeletConfig -metadata: - name: disable-cpu-units -spec: - machineConfigPoolSelector: - matchLabels: - machineconfiguration.openshift.io/role: worker <1> - kubeletConfig: - node-status-update-frequency: <2> - - "10s" - node-status-report-frequency: <3> - - "1m" ----- -<1> Specify the type of node type to which this `KubeletConfig` object applies using the label from the `MachineConfig` object. -<2> Specify the frequency that the kubelet checks the status of a node associated with this `MachineConfig` object. The default value is `10s`. If you change this default, the `node-status-report-frequency` value is changed to the same value. -<3> Specify the frequency that the kubelet reports the status of a node associated with this `MachineConfig` object. The default value is `1m`. - -The `node-status-update-frequency` parameter works with the `node-monitor-grace-period` and `pod-eviction-timeout` parameters. - -* The `node-monitor-grace-period` parameter specifies how long {product-title} waits after a node associated with a `MachineConfig` object is marked `Unhealthy` if the controller manager does not receive the node heartbeat. Workloads on the node continue to run after this time. If the remote worker node rejoins the cluster after `node-monitor-grace-period` expires, pods continue to run. New pods can be scheduled to that node. The `node-monitor-grace-period` interval is `40s`. The `node-status-update-frequency` value must be lower than the `node-monitor-grace-period` value. - -* The `pod-eviction-timeout` parameter specifies the amount of time {product-title} waits after marking a node that is associated with a `MachineConfig` object as `Unreachable` to start marking pods for eviction. Evicted pods are rescheduled on other nodes. If the remote worker node rejoins the cluster after `pod-eviction-timeout` expires, the pods running on the remote worker node are terminated because the node controller has evicted the pods on-premise. Pods can then be rescheduled to that node. The `pod-eviction-timeout` interval is `5m0s`. - -[NOTE] -==== -Modifying the `node-monitor-grace-period` and `pod-eviction-timeout` parameters is not supported. -==== - --- - -[id="nodes-edge-remote-workers-strategies-tolerations_{context}"] -Tolerations:: -You can use pod tolerations to mitigate the effects if the on-premise node controller adds a `node.kubernetes.io/unreachable` taint with a `NoExecute` effect to a node it cannot reach. - -A taint with the `NoExecute` effect affects pods that are running on the node in the following ways: - -* Pods that do not tolerate the taint are queued for eviction. -* Pods that tolerate the taint without specifying a `tolerationSeconds` value in their toleration specification remain bound forever. -* Pods that tolerate the taint with a specified `tolerationSeconds` value remain bound for the specified amount of time. After the time elapses, the pods are queued for eviction. - -You can delay or avoid pod eviction by configuring pods tolerations with the `NoExecute` effect for the `node.kubernetes.io/unreachable` and `node.kubernetes.io/not-ready` taints. - -.Example toleration in a pod spec -[source,yaml] ----- -... -tolerations: -- key: "node.kubernetes.io/unreachable" - operator: "Exists" - effect: "NoExecute" <1> -- key: "node.kubernetes.io/not-ready" - operator: "Exists" - effect: "NoExecute" <2> - tolerationSeconds: 600 -... ----- -<1> The `NoExecute` effect without `tolerationSeconds` lets pods remain forever if the control plane cannot reach the node. -<2> The `NoExecute` effect with `tolerationSeconds`: 600 lets pods remain for 10 minutes if the control plane marks the node as `Unhealthy`. - -{product-title} uses the `tolerationSeconds` value after the `pod-eviction-timeout` value elapses. - -Other types of {product-title} objects:: -You can use replica sets, deployments, and replication controllers. The scheduler can reschedule these pods onto other nodes after the node is disconnected for five minutes. Rescheduling onto other nodes can be beneficial for some workloads, such as REST APIs, where an administrator can guarantee a specific number of pods are running and accessible. - -[NOTE] -==== -When working with remote worker nodes, rescheduling pods on different nodes might not be acceptable if remote worker nodes are intended to be reserved for specific functions. -==== - -[id="nodes-edge-remote-workers-strategies-statefulset_{context}"] -https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/[stateful sets] do not get restarted when there is an outage. The pods remain in the `terminating` state until the control plane can acknowledge that the pods are terminated. - -To avoid scheduling a to a node that does not have access to the same type of persistent storage, {product-title} cannot migrate pods that require persistent volumes to other zones in the case of network separation. diff --git a/modules/nodes-namespaced-nodelevel-sysctls.adoc b/modules/nodes-namespaced-nodelevel-sysctls.adoc deleted file mode 100644 index 7806fb8dffba..000000000000 --- a/modules/nodes-namespaced-nodelevel-sysctls.adoc +++ /dev/null @@ -1,31 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/containers/nodes-containers-sysctls.adoc - -:_content-type: CONCEPT - -[id="namespaced-and-node-level-sysctls"] -= Namespaced and node-level sysctls - -A number of sysctls are _namespaced_ in the Linux kernels. This means that you can set them independently for each pod on a node. Being namespaced is a requirement for sysctls to be accessible in a pod context within Kubernetes. - -The following sysctls are known to be namespaced: - -- `_kernel.shm*_` -- `_kernel.msg*_` -- `_kernel.sem_` -- `_fs.mqueue.*_` - -Additionally, most of the sysctls in the `net.*` group are known to be namespaced. Their namespace adoption differs based on the kernel version and distributor. - -Sysctls that are not namespaced are called _node-level_ and must be set -manually by the cluster administrator, either by means of the underlying Linux -distribution of the nodes, such as by modifying the `_/etc/sysctls.conf_` file, -or by using a daemon set with privileged containers. You can use the Node Tuning Operator to set _node-level_ sysctls. - - -[NOTE] -==== -Consider marking nodes with special sysctls as tainted. Only schedule pods onto -them that need those sysctl settings. Use the taints and toleration feature to mark the nodes. -==== diff --git a/modules/nodes-nodes-audit-config-about.adoc b/modules/nodes-nodes-audit-config-about.adoc deleted file mode 100644 index 4fc2c3a453bd..000000000000 --- a/modules/nodes-nodes-audit-config-about.adoc +++ /dev/null @@ -1,41 +0,0 @@ -// Module included in the following assemblies: -// -// * security/audit-log-policy-config.adoc - -:_content-type: CONCEPT -[id="about-audit-log-profiles_{context}"] -= About audit log policy profiles - -Audit log profiles define how to log requests that come to the OpenShift API server, Kubernetes API server, OpenShift OAuth API server, and OpenShift OAuth server. - -{product-title} provides the following predefined audit policy profiles: - -[cols="1,2a",options="header"] -|=== -|Profile -|Description - -|`Default` -|Logs only metadata for read and write requests; does not log request bodies except for OAuth access token requests. This is the default policy. - -|`WriteRequestBodies` -|In addition to logging metadata for all requests, logs request bodies for every write request to the API servers (`create`, `update`, `patch`). This profile has more resource overhead than the `Default` profile. ^[1]^ - -|`AllRequestBodies` -|In addition to logging metadata for all requests, logs request bodies for every read and write request to the API servers (`get`, `list`, `create`, `update`, `patch`). This profile has the most resource overhead. ^[1]^ - -|`None` -|No requests are logged; even OAuth access token requests and OAuth authorize token requests are not logged. - -[WARNING] -==== -It is not recommended to disable audit logging by using the `None` profile unless you are fully aware of the risks of not logging data that can be beneficial when troubleshooting issues. If you disable audit logging and a support situation arises, you might need to enable audit logging and reproduce the issue in order to troubleshoot properly. -==== - -|=== -[.small] --- -1. Sensitive resources, such as `Secret`, `Route`, and `OAuthClient` objects, are only ever logged at the metadata level. OpenShift OAuth server events are only ever logged at the metadata level. --- - -By default, {product-title} uses the `Default` audit log profile. You can use another audit policy profile that also logs request bodies, but be aware of the increased resource usage (CPU, memory, and I/O). diff --git a/modules/nodes-nodes-audit-log-basic-viewing.adoc b/modules/nodes-nodes-audit-log-basic-viewing.adoc deleted file mode 100644 index 8d62dbf17fd0..000000000000 --- a/modules/nodes-nodes-audit-log-basic-viewing.adoc +++ /dev/null @@ -1,175 +0,0 @@ -// Module included in the following assemblies: -// -// * security/audit-log-view.adoc - -:_content-type: PROCEDURE -[id="nodes-nodes-audit-log-basic-viewing_{context}"] -= Viewing the audit logs - -You can view the logs for the OpenShift API server, Kubernetes API server, OpenShift OAuth API server, and OpenShift OAuth server for each control plane node. - -.Procedure - -To view the audit logs: - -* View the OpenShift API server audit logs: - -.. List the OpenShift API server audit logs that are available for each control plane node: -+ -[source,terminal] ----- -$ oc adm node-logs --role=master --path=openshift-apiserver/ ----- -+ -.Example output -[source,terminal] ----- -ci-ln-m0wpfjb-f76d1-vnb5x-master-0 audit-2021-03-09T00-12-19.834.log -ci-ln-m0wpfjb-f76d1-vnb5x-master-0 audit.log -ci-ln-m0wpfjb-f76d1-vnb5x-master-1 audit-2021-03-09T00-11-49.835.log -ci-ln-m0wpfjb-f76d1-vnb5x-master-1 audit.log -ci-ln-m0wpfjb-f76d1-vnb5x-master-2 audit-2021-03-09T00-13-00.128.log -ci-ln-m0wpfjb-f76d1-vnb5x-master-2 audit.log ----- - -.. View a specific OpenShift API server audit log by providing the node name and the log name: -+ -[source,terminal] ----- -$ oc adm node-logs <node_name> --path=openshift-apiserver/<log_name> ----- -+ -For example: -+ -[source,terminal] ----- -$ oc adm node-logs ci-ln-m0wpfjb-f76d1-vnb5x-master-0 --path=openshift-apiserver/audit-2021-03-09T00-12-19.834.log ----- -+ -.Example output -[source,terminal] ----- -{"kind":"Event","apiVersion":"audit.k8s.io/v1","level":"Metadata","auditID":"381acf6d-5f30-4c7d-8175-c9c317ae5893","stage":"ResponseComplete","requestURI":"/metrics","verb":"get","user":{"username":"system:serviceaccount:openshift-monitoring:prometheus-k8s","uid":"825b60a0-3976-4861-a342-3b2b561e8f82","groups":["system:serviceaccounts","system:serviceaccounts:openshift-monitoring","system:authenticated"]},"sourceIPs":["10.129.2.6"],"userAgent":"Prometheus/2.23.0","responseStatus":{"metadata":{},"code":200},"requestReceivedTimestamp":"2021-03-08T18:02:04.086545Z","stageTimestamp":"2021-03-08T18:02:04.107102Z","annotations":{"authorization.k8s.io/decision":"allow","authorization.k8s.io/reason":"RBAC: allowed by ClusterRoleBinding \"prometheus-k8s\" of ClusterRole \"prometheus-k8s\" to ServiceAccount \"prometheus-k8s/openshift-monitoring\""}} ----- - -* View the Kubernetes API server audit logs: - -.. List the Kubernetes API server audit logs that are available for each control plane node: -+ -[source,terminal] ----- -$ oc adm node-logs --role=master --path=kube-apiserver/ ----- -+ -.Example output -[source,terminal] ----- -ci-ln-m0wpfjb-f76d1-vnb5x-master-0 audit-2021-03-09T14-07-27.129.log -ci-ln-m0wpfjb-f76d1-vnb5x-master-0 audit.log -ci-ln-m0wpfjb-f76d1-vnb5x-master-1 audit-2021-03-09T19-24-22.620.log -ci-ln-m0wpfjb-f76d1-vnb5x-master-1 audit.log -ci-ln-m0wpfjb-f76d1-vnb5x-master-2 audit-2021-03-09T18-37-07.511.log -ci-ln-m0wpfjb-f76d1-vnb5x-master-2 audit.log ----- - -.. View a specific Kubernetes API server audit log by providing the node name and the log name: -+ -[source,terminal] ----- -$ oc adm node-logs <node_name> --path=kube-apiserver/<log_name> ----- -+ -For example: -+ -[source,terminal] ----- -$ oc adm node-logs ci-ln-m0wpfjb-f76d1-vnb5x-master-0 --path=kube-apiserver/audit-2021-03-09T14-07-27.129.log ----- -+ -.Example output -[source,terminal] ----- -{"kind":"Event","apiVersion":"audit.k8s.io/v1","level":"Metadata","auditID":"cfce8a0b-b5f5-4365-8c9f-79c1227d10f9","stage":"ResponseComplete","requestURI":"/api/v1/namespaces/openshift-kube-scheduler/serviceaccounts/openshift-kube-scheduler-sa","verb":"get","user":{"username":"system:serviceaccount:openshift-kube-scheduler-operator:openshift-kube-scheduler-operator","uid":"2574b041-f3c8-44e6-a057-baef7aa81516","groups":["system:serviceaccounts","system:serviceaccounts:openshift-kube-scheduler-operator","system:authenticated"]},"sourceIPs":["10.128.0.8"],"userAgent":"cluster-kube-scheduler-operator/v0.0.0 (linux/amd64) kubernetes/$Format","objectRef":{"resource":"serviceaccounts","namespace":"openshift-kube-scheduler","name":"openshift-kube-scheduler-sa","apiVersion":"v1"},"responseStatus":{"metadata":{},"code":200},"requestReceivedTimestamp":"2021-03-08T18:06:42.512619Z","stageTimestamp":"2021-03-08T18:06:42.516145Z","annotations":{"authentication.k8s.io/legacy-token":"system:serviceaccount:openshift-kube-scheduler-operator:openshift-kube-scheduler-operator","authorization.k8s.io/decision":"allow","authorization.k8s.io/reason":"RBAC: allowed by ClusterRoleBinding \"system:openshift:operator:cluster-kube-scheduler-operator\" of ClusterRole \"cluster-admin\" to ServiceAccount \"openshift-kube-scheduler-operator/openshift-kube-scheduler-operator\""}} ----- - -* View the OpenShift OAuth API server audit logs: - -.. List the OpenShift OAuth API server audit logs that are available for each control plane node: -+ -[source,terminal] ----- -$ oc adm node-logs --role=master --path=oauth-apiserver/ ----- -+ -.Example output -[source,terminal] ----- -ci-ln-m0wpfjb-f76d1-vnb5x-master-0 audit-2021-03-09T13-06-26.128.log -ci-ln-m0wpfjb-f76d1-vnb5x-master-0 audit.log -ci-ln-m0wpfjb-f76d1-vnb5x-master-1 audit-2021-03-09T18-23-21.619.log -ci-ln-m0wpfjb-f76d1-vnb5x-master-1 audit.log -ci-ln-m0wpfjb-f76d1-vnb5x-master-2 audit-2021-03-09T17-36-06.510.log -ci-ln-m0wpfjb-f76d1-vnb5x-master-2 audit.log ----- - -.. View a specific OpenShift OAuth API server audit log by providing the node name and the log name: -+ -[source,terminal] ----- -$ oc adm node-logs <node_name> --path=oauth-apiserver/<log_name> ----- -+ -For example: -+ -[source,terminal] ----- -$ oc adm node-logs ci-ln-m0wpfjb-f76d1-vnb5x-master-0 --path=oauth-apiserver/audit-2021-03-09T13-06-26.128.log ----- -+ -.Example output -[source,terminal] ----- -{"kind":"Event","apiVersion":"audit.k8s.io/v1","level":"Metadata","auditID":"dd4c44e2-3ea1-4830-9ab7-c91a5f1388d6","stage":"ResponseComplete","requestURI":"/apis/user.openshift.io/v1/users/~","verb":"get","user":{"username":"system:serviceaccount:openshift-monitoring:prometheus-k8s","groups":["system:serviceaccounts","system:serviceaccounts:openshift-monitoring","system:authenticated"]},"sourceIPs":["10.0.32.4","10.128.0.1"],"userAgent":"dockerregistry/v0.0.0 (linux/amd64) kubernetes/$Format","objectRef":{"resource":"users","name":"~","apiGroup":"user.openshift.io","apiVersion":"v1"},"responseStatus":{"metadata":{},"code":200},"requestReceivedTimestamp":"2021-03-08T17:47:43.653187Z","stageTimestamp":"2021-03-08T17:47:43.660187Z","annotations":{"authorization.k8s.io/decision":"allow","authorization.k8s.io/reason":"RBAC: allowed by ClusterRoleBinding \"basic-users\" of ClusterRole \"basic-user\" to Group \"system:authenticated\""}} ----- - -* View the OpenShift OAuth server audit logs: - -.. List the OpenShift OAuth server audit logs that are available for each control plane node: -+ -[source,terminal] ----- -$ oc adm node-logs --role=master --path=oauth-server/ ----- -+ -.Example output -[source,terminal] ----- -ci-ln-m0wpfjb-f76d1-vnb5x-master-0 audit-2022-05-11T18-57-32.395.log -ci-ln-m0wpfjb-f76d1-vnb5x-master-0 audit.log -ci-ln-m0wpfjb-f76d1-vnb5x-master-1 audit-2022-05-11T19-07-07.021.log -ci-ln-m0wpfjb-f76d1-vnb5x-master-1 audit.log -ci-ln-m0wpfjb-f76d1-vnb5x-master-2 audit-2022-05-11T19-06-51.844.log -ci-ln-m0wpfjb-f76d1-vnb5x-master-2 audit.log ----- - -.. View a specific OpenShift OAuth server audit log by providing the node name and the log name: -+ -[source,terminal] ----- -$ oc adm node-logs <node_name> --path=oauth-server/<log_name> ----- -+ -For example: -+ -[source,terminal] ----- -$ oc adm node-logs ci-ln-m0wpfjb-f76d1-vnb5x-master-0 --path=oauth-server/audit-2022-05-11T18-57-32.395.log ----- -+ -.Example output -[source,terminal] ----- -{"kind":"Event","apiVersion":"audit.k8s.io/v1","level":"Metadata","auditID":"13c20345-f33b-4b7d-b3b6-e7793f805621","stage":"ResponseComplete","requestURI":"/login","verb":"post","user":{"username":"system:anonymous","groups":["system:unauthenticated"]},"sourceIPs":["10.128.2.6"],"userAgent":"Mozilla/5.0 (X11; Linux x86_64; rv:91.0) Gecko/20100101 Firefox/91.0","responseStatus":{"metadata":{},"code":302},"requestReceivedTimestamp":"2022-05-11T17:31:16.280155Z","stageTimestamp":"2022-05-11T17:31:16.297083Z","annotations":{"authentication.openshift.io/decision":"error","authentication.openshift.io/username":"kubeadmin","authorization.k8s.io/decision":"allow","authorization.k8s.io/reason":""}} ----- -+ -The possible values for the `authentication.openshift.io/decision` annotation are `allow`, `deny`, or `error`. diff --git a/modules/nodes-nodes-audit-log-basic.adoc b/modules/nodes-nodes-audit-log-basic.adoc deleted file mode 100644 index fb011fcfa2b7..000000000000 --- a/modules/nodes-nodes-audit-log-basic.adoc +++ /dev/null @@ -1,38 +0,0 @@ -// Module included in the following assemblies: -// -// * security/audit-log-view.adoc - -:_content-type: CONCEPT -[id="nodes-pods-audit-log-basic_{context}"] -= About the API audit log - -Audit works at the API server level, logging all requests coming to the server. Each audit log contains the following information: - -.Audit log fields -[cols="1,2",options="header"] -|=== -|Field |Description -|`level` | The audit level at which the event was generated. -|`auditID` |A unique audit ID, generated for each request. -|`stage` |The stage of the request handling when this event instance was generated. -|`requestURI` |The request URI as sent by the client to a server. -|`verb` |The Kubernetes verb associated with the request. For non-resource requests, this is the lowercase HTTP method. -|`user` |The authenticated user information. -|`impersonatedUser` |Optional. The impersonated user information, if the request is impersonating another user. -|`sourceIPs` |Optional. The source IPs, from where the request originated and any intermediate proxies. -|`userAgent` |Optional. The user agent string reported by the client. Note that the user agent is provided by the client, and must not be trusted. -|`objectRef` |Optional. The object reference this request is targeted at. This does not apply for `List`-type requests, or non-resource requests. -|`responseStatus` |Optional. The response status, populated even when the `ResponseObject` is not a `Status` type. For successful responses, this will only include the code. For non-status type error responses, this will be auto-populated with the error message. -|`requestObject` |Optional. The API object from the request, in JSON format. The `RequestObject` is recorded as is in the request (possibly re-encoded as JSON), prior to version conversion, defaulting, admission or merging. It is an external versioned object type, and might not be a valid object on its own. This is omitted for non-resource requests and is only logged at request level and higher. -|`responseObject` |Optional. The API object returned in the response, in JSON format. The `ResponseObject` is recorded after conversion to the external type, and serialized as JSON. This is omitted for non-resource requests and is only logged at response level. -|`requestReceivedTimestamp` |The time that the request reached the API server. -|`stageTimestamp` |The time that the request reached the current audit stage. -|`annotations` |Optional. An unstructured key value map stored with an audit event that may be set by plugins invoked in the request serving chain, including authentication, authorization and admission plugins. Note that these annotations are for the audit event, and do not correspond to the `metadata.annotations` of the submitted object. Keys should uniquely identify the informing component to avoid name collisions, for example `podsecuritypolicy.admission.k8s.io/policy`. Values should be short. Annotations are included in the metadata level. -|=== - -Example output for the Kubernetes API server: - -[source,json] ----- -{"kind":"Event","apiVersion":"audit.k8s.io/v1","level":"Metadata","auditID":"ad209ce1-fec7-4130-8192-c4cc63f1d8cd","stage":"ResponseComplete","requestURI":"/api/v1/namespaces/openshift-kube-controller-manager/configmaps/cert-recovery-controller-lock?timeout=35s","verb":"update","user":{"username":"system:serviceaccount:openshift-kube-controller-manager:localhost-recovery-client","uid":"dd4997e3-d565-4e37-80f8-7fc122ccd785","groups":["system:serviceaccounts","system:serviceaccounts:openshift-kube-controller-manager","system:authenticated"]},"sourceIPs":["::1"],"userAgent":"cluster-kube-controller-manager-operator/v0.0.0 (linux/amd64) kubernetes/$Format","objectRef":{"resource":"configmaps","namespace":"openshift-kube-controller-manager","name":"cert-recovery-controller-lock","uid":"5c57190b-6993-425d-8101-8337e48c7548","apiVersion":"v1","resourceVersion":"574307"},"responseStatus":{"metadata":{},"code":200},"requestReceivedTimestamp":"2020-04-02T08:27:20.200962Z","stageTimestamp":"2020-04-02T08:27:20.206710Z","annotations":{"authorization.k8s.io/decision":"allow","authorization.k8s.io/reason":"RBAC: allowed by ClusterRoleBinding \"system:openshift:operator:kube-controller-manager-recovery\" of ClusterRole \"cluster-admin\" to ServiceAccount \"localhost-recovery-client/openshift-kube-controller-manager\""}} ----- diff --git a/modules/nodes-nodes-audit-policy-custom.adoc b/modules/nodes-nodes-audit-policy-custom.adoc deleted file mode 100644 index 22068f1a196d..000000000000 --- a/modules/nodes-nodes-audit-policy-custom.adoc +++ /dev/null @@ -1,74 +0,0 @@ -// Module included in the following assemblies: -// -// * security/audit-log-policy-config.adoc - -:_content-type: PROCEDURE -[id="configuring-audit-policy-custom_{context}"] -= Configuring the audit log policy with custom rules - -You can configure an audit log policy that defines custom rules. You can specify multiple groups and define which profile to use for that group. - -These custom rules take precedence over the top-level profile field. The custom rules are evaluated from top to bottom, and the first that matches is applied. - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. - -.Procedure - -. Edit the `APIServer` resource: -+ -[source,terminal] ----- -$ oc edit apiserver cluster ----- - -. Add the `spec.audit.customRules` field: -+ -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: APIServer -metadata: -... -spec: - audit: - customRules: <1> - - group: system:authenticated:oauth - profile: WriteRequestBodies - - group: system:authenticated - profile: AllRequestBodies - profile: Default <2> ----- -<1> Add one or more groups and specify the profile to use for that group. These custom rules take precedence over the top-level profile field. The custom rules are evaluated from top to bottom, and the first that matches is applied. -<2> Set to `Default`, `WriteRequestBodies`, `AllRequestBodies`, or `None`. If you do not set this top-level `audit.profile` field, it defaults to the `Default` profile. -+ -[WARNING] -==== -It is not recommended to disable audit logging by using the `None` profile unless you are fully aware of the risks of not logging data that can be beneficial when troubleshooting issues. If you disable audit logging and a support situation arises, you might need to enable audit logging and reproduce the issue in order to troubleshoot properly. -==== - -. Save the file to apply the changes. - -.Verification - -* Verify that a new revision of the Kubernetes API server pods is rolled out. It can take several minutes for all nodes to update to the new revision. -+ -[source,terminal] ----- -$ oc get kubeapiserver -o=jsonpath='{range .items[0].status.conditions[?(@.type=="NodeInstallerProgressing")]}{.reason}{"\n"}{.message}{"\n"}' ----- -+ -Review the `NodeInstallerProgressing` status condition for the Kubernetes API server to verify that all nodes are at the latest revision. The output shows `AllNodesAtLatestRevision` upon successful update: -+ -[source,terminal] ----- -AllNodesAtLatestRevision -3 nodes are at revision 12 <1> ----- -<1> In this example, the latest revision number is `12`. -+ -If the output shows a message similar to one of the following messages, the update is still in progress. Wait a few minutes and try again. - -** `3 nodes are at revision 11; 0 nodes have achieved new revision 12` -** `2 nodes are at revision 11; 1 nodes are at revision 12` diff --git a/modules/nodes-nodes-audit-policy-disable.adoc b/modules/nodes-nodes-audit-policy-disable.adoc deleted file mode 100644 index 08720210ab3f..000000000000 --- a/modules/nodes-nodes-audit-policy-disable.adoc +++ /dev/null @@ -1,70 +0,0 @@ -// Module included in the following assemblies: -// -// * security/audit-log-policy-config.adoc - -:_content-type: PROCEDURE -[id="configuring-audit-policy-disable_{context}"] -= Disabling audit logging - -You can disable audit logging for {product-title}. When you disable audit logging, even OAuth access token requests and OAuth authorize token requests are not logged. - -[WARNING] -==== -It is not recommended to disable audit logging by using the `None` profile unless you are fully aware of the risks of not logging data that can be beneficial when troubleshooting issues. If you disable audit logging and a support situation arises, you might need to enable audit logging and reproduce the issue in order to troubleshoot properly. -==== - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. - -.Procedure - -. Edit the `APIServer` resource: -+ -[source,terminal] ----- -$ oc edit apiserver cluster ----- - -. Set the `spec.audit.profile` field to `None`: -+ -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: APIServer -metadata: -... -spec: - audit: - profile: None ----- -+ -[NOTE] -==== -You can also disable audit logging only for specific groups by specifying custom rules in the `spec.audit.customRules` field. -==== - -. Save the file to apply the changes. - -.Verification - -* Verify that a new revision of the Kubernetes API server pods is rolled out. It can take several minutes for all nodes to update to the new revision. -+ -[source,terminal] ----- -$ oc get kubeapiserver -o=jsonpath='{range .items[0].status.conditions[?(@.type=="NodeInstallerProgressing")]}{.reason}{"\n"}{.message}{"\n"}' ----- -+ -Review the `NodeInstallerProgressing` status condition for the Kubernetes API server to verify that all nodes are at the latest revision. The output shows `AllNodesAtLatestRevision` upon successful update: -+ -[source,terminal] ----- -AllNodesAtLatestRevision -3 nodes are at revision 12 <1> ----- -<1> In this example, the latest revision number is `12`. -+ -If the output shows a message similar to one of the following messages, the update is still in progress. Wait a few minutes and try again. - -** `3 nodes are at revision 11; 0 nodes have achieved new revision 12` -** `2 nodes are at revision 11; 1 nodes are at revision 12` diff --git a/modules/nodes-nodes-audit-policy.adoc b/modules/nodes-nodes-audit-policy.adoc deleted file mode 100644 index f638a6b27ad3..000000000000 --- a/modules/nodes-nodes-audit-policy.adoc +++ /dev/null @@ -1,66 +0,0 @@ -// Module included in the following assemblies: -// -// * security/audit-log-policy-config.adoc - -:_content-type: PROCEDURE -[id="configuring-audit-policy_{context}"] -= Configuring the audit log policy - -You can configure the audit log policy to use when logging requests that come to the API servers. - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. - -.Procedure - -. Edit the `APIServer` resource: -+ -[source,terminal] ----- -$ oc edit apiserver cluster ----- - -. Update the `spec.audit.profile` field: -+ -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: APIServer -metadata: -... -spec: - audit: - profile: WriteRequestBodies <1> ----- -<1> Set to `Default`, `WriteRequestBodies`, `AllRequestBodies`, or `None`. The default profile is `Default`. -+ -[WARNING] -==== -It is not recommended to disable audit logging by using the `None` profile unless you are fully aware of the risks of not logging data that can be beneficial when troubleshooting issues. If you disable audit logging and a support situation arises, you might need to enable audit logging and reproduce the issue in order to troubleshoot properly. -==== - -. Save the file to apply the changes. - -.Verification - -* Verify that a new revision of the Kubernetes API server pods is rolled out. It can take several minutes for all nodes to update to the new revision. -+ -[source,terminal] ----- -$ oc get kubeapiserver -o=jsonpath='{range .items[0].status.conditions[?(@.type=="NodeInstallerProgressing")]}{.reason}{"\n"}{.message}{"\n"}' ----- -+ -Review the `NodeInstallerProgressing` status condition for the Kubernetes API server to verify that all nodes are at the latest revision. The output shows `AllNodesAtLatestRevision` upon successful update: -+ -[source,terminal] ----- -AllNodesAtLatestRevision -3 nodes are at revision 12 <1> ----- -<1> In this example, the latest revision number is `12`. -+ -If the output shows a message similar to one of the following messages, the update is still in progress. Wait a few minutes and try again. - -** `3 nodes are at revision 11; 0 nodes have achieved new revision 12` -** `2 nodes are at revision 11; 1 nodes are at revision 12` diff --git a/modules/nodes-nodes-cluster-timeout-graceful-shutdown.adoc b/modules/nodes-nodes-cluster-timeout-graceful-shutdown.adoc deleted file mode 100644 index 57a1c19dd214..000000000000 --- a/modules/nodes-nodes-cluster-timeout-graceful-shutdown.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// Module included in the following assembly: -// * nodes/nodes-nodes-graceful-shutdown - -:_content-type: CONCEPT -[id="nodes-nodes-cluster-timeout-graceful-shutdown_{context}"] -= About graceful node shutdown - -During a graceful node shutdown, the kubelet sends a termination signal to pods running on the node and postpones the node shutdown until all the pods evicted. If a node unexpectedly shuts down, the graceful node shutdown feature minimizes interruption to workloads running on these pods. - -During a graceful node shutdown, the kubelet stops pods in two phases: - -* Regular pod termination -* Critical pod termination - -You can define shutdown grace periods for regular and critical pods by configuring the following specifications in the `KubeletConfig` custom resource: - -* `shutdownGracePeriod`: Specifies the total duration for pod termination for regular and critical pods. -* `shutdownGracePeriodCriticalPods`: Specifies the duration for critical pod termination. This value must be less than the `shutdownGracePeriod` value. - -For example, if the `shutdownGracePeriod` value is `30s`, and the `shutdownGracePeriodCriticalPods` value is `10s`, the kubelet delays the node shutdown by 30 seconds. During the shutdown, the first 20 (30-10) seconds are reserved for gracefully shutting down regular pods, and the last 10 seconds are reserved for gracefully shutting down critical pods. - -To define a critical pod, assign a pod priority value greater than or equal to `2000000000`. To define a regular pod, assign a pod priority value of less than `2000000000`. - -For more information about how to define a priority value for pods, see the _Additional resources_ section. - diff --git a/modules/nodes-nodes-configuring-graceful-shutdown.adoc b/modules/nodes-nodes-configuring-graceful-shutdown.adoc deleted file mode 100644 index bbedc21481f3..000000000000 --- a/modules/nodes-nodes-configuring-graceful-shutdown.adoc +++ /dev/null @@ -1,143 +0,0 @@ -// Module included in the following assembly: -// * nodes/nodes-nodes-graceful-shutdown - -:_content-type: PROCEDURE -[id="nodes-nodes-activating-graceful-shutdown_{context}"] -= Configuring graceful node shutdown - -To configure graceful node shutdown, create a `KubeletConfig` custom resource (CR) to specify a shutdown grace period for pods on a set of nodes. The graceful node shutdown feature minimizes interruption to workloads that run on these pods. - -[NOTE] -==== -If you do not configure graceful node shutdown, the default grace period is `0` and the pod is forcefully evicted from the node. -==== - -.Prerequisites - -* You have access to the cluster with the `cluster-admin` role. -* You have defined priority classes for pods that require critical or regular classification. - -.Procedure - -. Define shutdown grace periods in the `KubeletConfig` CR by saving the following YAML in the `kubelet-gns.yaml` file: -+ -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: KubeletConfig -metadata: - name: graceful-shutdown - namespace: openshift-machine-config-operator -spec: - machineConfigPoolSelector: - matchLabels: - pools.operator.machineconfiguration.openshift.io/worker: "" <1> - kubeletConfig: - shutdownGracePeriod: "3m" <2> - shutdownGracePeriodCriticalPods: "2m" <3> ----- -<1> This example applies shutdown grace periods to nodes with the `worker` role. -<2> Define a time period for regular pods to shut down. -<3> Define a time period for critical pods to shut down. - -. Create the `KubeletConfig` CR by running the following command: -+ -[source,terminal] ----- -$ oc create -f kubelet-gns.yaml ----- -+ -.Example output -[source,terminal] ----- -kubeletconfig.machineconfiguration.openshift.io/graceful-shutdown created ----- - -.Verification - -. View the kubelet logs for a node to verify the grace period configuration by using the command line or by viewing the `kubelet.conf` file. -+ -[NOTE] -==== -Ensure that the log messages for `shutdownGracePeriodRequested` and `shutdownGracePeriodCriticalPods` match the values set in the `KubeletConfig` CR. -==== - -.. To view the logs by using the command line, run the following command, replacing `<node_name>` with the name of the node: -+ -[source,bash] ----- -$ oc adm node-logs <node_name> -u kubelet ----- -+ -.Example output -[source,terminal] ----- -Sep 12 22:13:46 -ci-ln-qv5pvzk-72292-xvkd9-worker-a-dmbr4 -hyperkube[22317]: I0912 22:13:46.687472 -22317 nodeshutdown_manager_linux.go:134] -"Creating node shutdown manager" -shutdownGracePeriodRequested="3m0s" <1> -shutdownGracePeriodCriticalPods="2m0s" -shutdownGracePeriodByPodPriority=[ -{Priority:0 -ShutdownGracePeriodSeconds:1200} -{Priority:2000000000 -ShutdownGracePeriodSeconds:600}] -... ----- -+ -<1> Ensure that the log messages for `shutdownGracePeriodRequested` and `shutdownGracePeriodCriticalPods` match the values set in the `KubeletConfig` CR. -+ -.. To view the logs in the `kubelet.conf` file on a node, run the following commands to enter a debug session on the node: -+ -[source,terminal] ----- -$ oc debug node/<node_name> ----- -+ -[source,terminal] ----- -$ chroot /host ----- -+ -[source,terminal] ----- -$ cat /etc/kubernetes/kubelet.conf ----- -+ -.Example output -[source,terminal] ----- -... -“memorySwap”: {}, - “containerLogMaxSize”: “50Mi”, - “logging”: { - “flushFrequency”: 0, - “verbosity”: 0, - “options”: { - “json”: { - “infoBufferSize”: “0” - } - } - }, - “shutdownGracePeriod”: “10m0s”, <1> - “shutdownGracePeriodCriticalPods”: “3m0s” -} ----- -+ -<1> Ensure that the log messages for `shutdownGracePeriodRequested` and `shutdownGracePeriodCriticalPods` match the values set in the `KubeletConfig` CR. - -. During a graceful node shutdown, you can verify that a pod was gracefully shut down by running the following command, replacing `<pod_name>` with the name of the pod: -+ -[source,terminal] ----- -$ oc describe pod <pod_name> ----- -+ -.Example output -[source,terminal] ----- -Reason: Terminated -Message: Pod was terminated in response to imminent node shutdown. ----- diff --git a/modules/nodes-nodes-garbage-collection-configuring.adoc b/modules/nodes-nodes-garbage-collection-configuring.adoc deleted file mode 100644 index 7c1f863c8c29..000000000000 --- a/modules/nodes-nodes-garbage-collection-configuring.adoc +++ /dev/null @@ -1,156 +0,0 @@ - -// Module included in the following assemblies: -// -// * nodes/nodes-nodes-garbage-collection.adoc -// * post_installation_configuration/node-tasks.adoc - -:_content-type: PROCEDURE -[id="nodes-nodes-garbage-collection-configuring_{context}"] -= Configuring garbage collection for containers and images - -As an administrator, you can configure how {product-title} performs garbage collection by creating a `kubeletConfig` object for each machine config pool. - -[NOTE] -==== -{product-title} supports only one `kubeletConfig` object for each machine config pool. -==== - -You can configure any combination of the following: - -* Soft eviction for containers -* Hard eviction for containers -* Eviction for images - -Container garbage collection removes terminated containers. Image garbage collection removes images that are not referenced by any running pods. - -.Prerequisites - -. Obtain the label associated with the static `MachineConfigPool` CRD for the type of node you want to configure by entering the following command: -+ -[source,terminal] ----- -$ oc edit machineconfigpool <name> ----- -+ -For example: -+ -[source,terminal] ----- -$ oc edit machineconfigpool worker ----- -+ -.Example output -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfigPool -metadata: - creationTimestamp: "2022-11-16T15:34:25Z" - generation: 4 - labels: - pools.operator.machineconfiguration.openshift.io/worker: "" <1> - name: worker ----- -<1> The label appears under Labels. -+ -[TIP] -==== -If the label is not present, add a key/value pair such as: - ----- -$ oc label machineconfigpool worker custom-kubelet=small-pods ----- -==== - -.Procedure - -. Create a custom resource (CR) for your configuration change. -+ -[IMPORTANT] -==== -If there is one file system, or if `/var/lib/kubelet` and `/var/lib/containers/` are in the same file system, the settings with the highest values trigger evictions, as those are met first. The file system triggers the eviction. -==== -+ -.Sample configuration for a container garbage collection CR: -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: KubeletConfig -metadata: - name: worker-kubeconfig <1> -spec: - machineConfigPoolSelector: - matchLabels: - pools.operator.machineconfiguration.openshift.io/worker: "" <2> - kubeletConfig: - evictionSoft: <3> - memory.available: "500Mi" <4> - nodefs.available: "10%" - nodefs.inodesFree: "5%" - imagefs.available: "15%" - imagefs.inodesFree: "10%" - evictionSoftGracePeriod: <5> - memory.available: "1m30s" - nodefs.available: "1m30s" - nodefs.inodesFree: "1m30s" - imagefs.available: "1m30s" - imagefs.inodesFree: "1m30s" - evictionHard: <6> - memory.available: "200Mi" - nodefs.available: "5%" - nodefs.inodesFree: "4%" - imagefs.available: "10%" - imagefs.inodesFree: "5%" - evictionPressureTransitionPeriod: 0s <7> - imageMinimumGCAge: 5m <8> - imageGCHighThresholdPercent: 80 <9> - imageGCLowThresholdPercent: 75 <10> ----- -<1> Name for the object. -<2> Specify the label from the machine config pool. -<3> For container garbage collection: Type of eviction: `evictionSoft` or `evictionHard`. -<4> For container garbage collection: Eviction thresholds based on a specific eviction trigger signal. -<5> For container garbage collection: Grace periods for the soft eviction. This parameter does not apply to `eviction-hard`. -<6> For container garbage collection: Eviction thresholds based on a specific eviction trigger signal. -For `evictionHard` you must specify all of these parameters. If you do not specify all parameters, only the specified parameters are applied and the garbage collection will not function properly. -<7> For container garbage collection: The duration to wait before transitioning out of an eviction pressure condition. -<8> For image garbage collection: The minimum age for an unused image before the image is removed by garbage collection. -<9> For image garbage collection: The percent of disk usage (expressed as an integer) that triggers image garbage collection. -<10> For image garbage collection: The percent of disk usage (expressed as an integer) that image garbage collection attempts to free. - -. Run the following command to create the CR: -+ -[source,terminal] ----- -$ oc create -f <file_name>.yaml ----- -+ -For example: -+ -[source,terminal] ----- -$ oc create -f gc-container.yaml ----- -+ -.Example output -[source,terminal] ----- -kubeletconfig.machineconfiguration.openshift.io/gc-container created ----- - -.Verification - -. Verify that garbage collection is active by entering the following command. The Machine Config Pool you specified in the custom resource appears with `UPDATING` as 'true` until the change is fully implemented: -+ -[source,terminal] ----- -$ oc get machineconfigpool ----- -+ -.Example output -[source,terminal] ----- -NAME CONFIG UPDATED UPDATING -master rendered-master-546383f80705bd5aeaba93 True False -worker rendered-worker-b4c51bb33ccaae6fc4a6a5 False True ----- diff --git a/modules/nodes-nodes-garbage-collection-containers.adoc b/modules/nodes-nodes-garbage-collection-containers.adoc deleted file mode 100644 index 197c41f49ab0..000000000000 --- a/modules/nodes-nodes-garbage-collection-containers.adoc +++ /dev/null @@ -1,44 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-nodes-garbage-collection.adoc -// * post_installation_configuration/node-tasks.adoc - - -:_content-type: CONCEPT -[id="nodes-nodes-garbage-collection-containers_{context}"] -= Understanding how terminated containers are removed through garbage collection - -Container garbage collection removes terminated containers by using eviction thresholds. - -When eviction thresholds are set for garbage collection, the node tries to keep any container for any pod accessible from the API. If the pod has been deleted, the containers will be as well. Containers are preserved as long the pod is not deleted and the eviction threshold is not reached. If the node is under disk pressure, it will remove containers and their logs will no longer be accessible using `oc logs`. - -* *eviction-soft* - A soft eviction threshold pairs an eviction threshold with a required administrator-specified grace period. - -* *eviction-hard* - A hard eviction threshold has no grace period, and if observed, {product-title} takes immediate action. - -The following table lists the eviction thresholds: - -.Variables for configuring container garbage collection -|=== -| Node condition | Eviction signal | Description - -| MemoryPressure -| `memory.available` -| The available memory on the node. - -| DiskPressure -a| * `nodefs.available` - * `nodefs.inodesFree` - * `imagefs.available` - * `imagefs.inodesFree` -| The available disk space or inodes on the node root file system, `nodefs`, or image file system, `imagefs`. -|=== - -[NOTE] -==== -For `evictionHard` you must specify all of these parameters. If you do not specify all parameters, only the specified parameters are applied and the garbage collection will not function properly. -==== - -If a node is oscillating above and below a soft eviction threshold, but not exceeding its associated grace period, the corresponding node would constantly oscillate between `true` and `false`. As a consequence, the scheduler could make poor scheduling decisions. - -To protect against this oscillation, use the `eviction-pressure-transition-period` flag to control how long {product-title} must wait before transitioning out of a pressure condition. {product-title} will not set an eviction threshold as being met for the specified pressure condition for the period specified before toggling the condition back to false. diff --git a/modules/nodes-nodes-garbage-collection-images.adoc b/modules/nodes-nodes-garbage-collection-images.adoc deleted file mode 100644 index e10b1be67d93..000000000000 --- a/modules/nodes-nodes-garbage-collection-images.adoc +++ /dev/null @@ -1,56 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-nodes-garbage-collection.adoc -// * post_installation_configuration/node-tasks.adoc - -:_content-type: CONCEPT -[id="nodes-nodes-garbage-collection-images_{context}"] -= Understanding how images are removed through garbage collection - -Image garbage collection removes images that are not referenced by any running pods. - -{produt-title} determines which images to remove from a node based on the disk usage that is reported by *cAdvisor*. - -The policy for image garbage collection is based on two conditions: - -* The percent of disk usage (expressed as an integer) which triggers image -garbage collection. The default is *85*. - -* The percent of disk usage (expressed as an integer) to which image garbage -collection attempts to free. Default is *80*. - -For image garbage collection, you can modify any of the following variables using -a custom resource. - -.Variables for configuring image garbage collection - -[options="header",cols="1,3"] -|=== - -|Setting |Description - -|`imageMinimumGCAge` -|The minimum age for an unused image before the image is removed by garbage collection. The default is *2m*. - -|`imageGCHighThresholdPercent` -|The percent of disk usage, expressed as an integer, which triggers image -garbage collection. The default is *85*. - -|`imageGCLowThresholdPercent` -|The percent of disk usage, expressed as an integer, to which image garbage -collection attempts to free. The default is *80*. -|=== - -Two lists of images are retrieved in each garbage collector run: - -1. A list of images currently running in at least one pod. -2. A list of images available on a host. - -As new containers are run, new images appear. All images are marked with a time -stamp. If the image is running (the first list above) or is newly detected (the -second list above), it is marked with the current time. The remaining images are -already marked from the previous spins. All images are then sorted by the time -stamp. - -Once the collection starts, the oldest images get deleted first until the -stopping criterion is met. diff --git a/modules/nodes-nodes-jobs-about.adoc b/modules/nodes-nodes-jobs-about.adoc deleted file mode 100644 index bfee9acd2a29..000000000000 --- a/modules/nodes-nodes-jobs-about.adoc +++ /dev/null @@ -1,126 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-nodes-jobs.adoc - -:_content-type: CONCEPT -[id="nodes-nodes-jobs-about_{context}"] -= Understanding jobs and cron jobs - -A job tracks the overall progress of a task and updates its status with information -about active, succeeded, and failed pods. Deleting a job cleans up any pods it created. -Jobs are part of the Kubernetes API, which can be managed -with `oc` commands like other object types. - -There are two possible resource types that allow creating run-once objects in {product-title}: - -Job:: -A regular job is a run-once object that creates a task and ensures the job finishes. - -There are three main types of task suitable to run as a job: - -* Non-parallel jobs: -** A job that starts only one pod, unless the pod fails. -** The job is complete as soon as its pod terminates successfully. - -* Parallel jobs with a fixed completion count: -** a job that starts multiple pods. -** The job represents the overall task and is complete when there is one successful pod for each value in the range `1` to the `completions` value. - -* Parallel jobs with a work queue: -** A job with multiple parallel worker processes in a given pod. -** {product-title} coordinates pods to determine what each should work on or use an external queue service. -** Each pod is independently capable of determining whether or not all peer pods are complete and that the entire job is done. -** When any pod from the job terminates with success, no new pods are created. -** When at least one pod has terminated with success and all pods are terminated, the job is successfully completed. -** When any pod has exited with success, no other pod should be doing any work for this task or writing any output. Pods should all be in the process of exiting. - -For more information about how to make use of the different types of job, see link:https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#job-patterns[Job Patterns] in the Kubernetes documentation. - -Cron job:: - -A job can be scheduled to run multiple times, using a cron job. - -A _cron job_ builds on a regular job by allowing you to specify -how the job should be run. Cron jobs are part of the -link:http://kubernetes.io/docs/user-guide/cron-jobs[Kubernetes] API, which -can be managed with `oc` commands like other object types. - -Cron jobs are useful for creating periodic and recurring tasks, like running backups or sending emails. -Cron jobs can also schedule individual tasks for a specific time, such as if you want to schedule a job for a low activity period. A cron job creates a `Job` object based on the timezone configured on the control plane node that runs the cronjob controller. - -[WARNING] -==== -A cron job creates a `Job` object approximately once per execution time of its -schedule, but there are circumstances in which it fails to create a job or -two jobs might be created. Therefore, jobs must be idempotent and you must -configure history limits. -==== - -[id="jobs-create_{context}"] -== Understanding how to create jobs - -Both resource types require a job configuration that consists of the following key parts: - -- A pod template, which describes the pod that {product-title} creates. -- The `parallelism` parameter, which specifies how many pods running in parallel at any point in time should execute a job. -** For non-parallel jobs, leave unset. When unset, defaults to `1`. -- The `completions` parameter, specifying how many successful pod completions are needed to finish a job. -** For non-parallel jobs, leave unset. When unset, defaults to `1`. -** For parallel jobs with a fixed completion count, specify a value. -** For parallel jobs with a work queue, leave unset. When unset defaults to the `parallelism` value. - -[id="jobs-set-max_{context}"] -== Understanding how to set a maximum duration for jobs - -When defining a job, you can define its maximum duration by setting -the `activeDeadlineSeconds` field. It is specified in seconds and is not -set by default. When not set, there is no maximum duration enforced. - -The maximum duration is counted from the time when a first pod gets scheduled in -the system, and defines how long a job can be active. It tracks overall time of -an execution. After reaching the specified timeout, the job is terminated by {product-title}. - -[id="jobs-set-backoff_{context}"] -== Understanding how to set a job back off policy for pod failure - -A job can be considered failed, after a set amount of retries due to a -logical error in configuration or other similar reasons. Failed pods associated with the job are recreated by the controller with -an exponential back off delay (`10s`, `20s`, `40s` …) capped at six minutes. The -limit is reset if no new failed pods appear between controller checks. - -Use the `spec.backoffLimit` parameter to set the number of retries for a job. - -[id="jobs-artifacts_{context}"] -== Understanding how to configure a cron job to remove artifacts - -Cron jobs can leave behind artifact resources such as jobs or pods. As a user it is important -to configure history limits so that old jobs and their pods are properly cleaned. There are two fields within cron job's spec responsible for that: - -* `.spec.successfulJobsHistoryLimit`. The number of successful finished jobs to retain (defaults to 3). - -* `.spec.failedJobsHistoryLimit`. The number of failed finished jobs to retain (defaults to 1). - -[TIP] -==== -* Delete cron jobs that you no longer need: -+ -[source,terminal] ----- -$ oc delete cronjob/<cron_job_name> ----- -+ -Doing this prevents them from generating unnecessary artifacts. - -* You can suspend further executions by setting the `spec.suspend` to true. All subsequent executions are suspended until you reset to `false`. -==== - -[id="jobs-limits_{context}"] -== Known limitations - -The job specification restart policy only applies to the _pods_, and not the _job controller_. However, the job controller is hard-coded to keep retrying jobs to completion. - -As such, `restartPolicy: Never` or `--restart=Never` results in the same behavior as `restartPolicy: OnFailure` or `--restart=OnFailure`. That is, when a job fails it is restarted automatically until it succeeds (or is manually discarded). The policy only sets which subsystem performs the restart. - -With the `Never` policy, the _job controller_ performs the restart. With each attempt, the job controller increments the number of failures in the job status and create new pods. This means that with each failed attempt, the number of pods increases. - -With the `OnFailure` policy, _kubelet_ performs the restart. Each attempt does not increment the number of failures in the job status. In addition, kubelet will retry failed jobs starting pods on the same nodes. diff --git a/modules/nodes-nodes-jobs-creating-cron.adoc b/modules/nodes-nodes-jobs-creating-cron.adoc deleted file mode 100644 index 91b4f2e7f7cf..000000000000 --- a/modules/nodes-nodes-jobs-creating-cron.adoc +++ /dev/null @@ -1,89 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-nodes-jobs.adoc - -:_content-type: PROCEDURE -[id="nodes-nodes-jobs-creating-cron_{context}"] -= Creating cron jobs - -You create a cron job in {product-title} by creating a job object. - -.Procedure - -To create a cron job: - -. Create a YAML file similar to the following: -+ -[source,yaml] ----- -apiVersion: batch/v1 -kind: CronJob -metadata: - name: pi -spec: - schedule: "*/1 * * * *" <1> - timeZone: Etc/UTC <2> - concurrencyPolicy: "Replace" <3> - startingDeadlineSeconds: 200 <4> - suspend: true <5> - successfulJobsHistoryLimit: 3 <6> - failedJobsHistoryLimit: 1 <7> - jobTemplate: <8> - spec: - template: - metadata: - labels: <9> - parent: "cronjobpi" - spec: - containers: - - name: pi - image: perl - command: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"] - restartPolicy: OnFailure <10> ----- -+ -<1> Schedule for the job specified in link:https://en.wikipedia.org/wiki/Cron[cron format]. In this example, the job will run every minute. -<2> An optional time zone for the schedule. See link:https://en.wikipedia.org/wiki/List_of_tz_database_time_zones[List of tz database time zones] for valid options. If not specified, the Kubernetes controller manager interprets the schedule relative to its local time zone. This setting is offered as a link:https://access.redhat.com/support/offerings/techpreview[Technology Preview]. -<3> An optional concurrency policy, specifying how to treat concurrent jobs within a cron job. Only one of the following concurrent policies may be specified. If not specified, this defaults to allowing concurrent executions. -* `Allow` allows cron jobs to run concurrently. -* `Forbid` forbids concurrent runs, skipping the next run if the previous has not -finished yet. -* `Replace` cancels the currently running job and replaces -it with a new one. -<4> An optional deadline (in seconds) for starting the job if it misses its -scheduled time for any reason. Missed jobs executions will be counted as failed -ones. If not specified, there is no deadline. -<5> An optional flag allowing the suspension of a cron job. If set to `true`, -all subsequent executions will be suspended. -<6> The number of successful finished jobs to retain (defaults to 3). -<7> The number of failed finished jobs to retain (defaults to 1). -<8> Job template. This is similar to the job example. -<9> Sets a label for jobs spawned by this cron job. -<10> The restart policy of the pod. This does not apply to the job controller. -+ -[NOTE] -==== -The `.spec.successfulJobsHistoryLimit` and `.spec.failedJobsHistoryLimit` fields are optional. -These fields specify how many completed and failed jobs should be kept. By default, they are -set to `3` and `1` respectively. Setting a limit to `0` corresponds to keeping none of the corresponding -kind of jobs after they finish. -==== - -. Create the cron job: -+ -[source,terminal] ----- -$ oc create -f <file-name>.yaml ----- - -[NOTE] -==== -You can also create and launch a cron job from a single command using `oc create cronjob`. The following command creates and launches a cron job similar to the one specified in the previous example: - -[source,terminal] ----- -$ oc create cronjob pi --image=perl --schedule='*/1 * * * *' -- perl -Mbignum=bpi -wle 'print bpi(2000)' ----- - -With `oc create cronjob`, the `--schedule` option accepts schedules in link:https://en.wikipedia.org/wiki/Cron[cron format]. -==== diff --git a/modules/nodes-nodes-jobs-creating.adoc b/modules/nodes-nodes-jobs-creating.adoc deleted file mode 100644 index 1e69f002bcc1..000000000000 --- a/modules/nodes-nodes-jobs-creating.adoc +++ /dev/null @@ -1,70 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-nodes-jobs.adoc - -:_content-type: PROCEDURE -[id="nodes-nodes-jobs-creating_{context}"] -= Creating jobs - -You create a job in {product-title} by creating a job object. - -.Procedure - -To create a job: - -. Create a YAML file similar to the following: -+ -[source,yaml] ----- -apiVersion: batch/v1 -kind: Job -metadata: - name: pi -spec: - parallelism: 1 <1> - completions: 1 <2> - activeDeadlineSeconds: 1800 <3> - backoffLimit: 6 <4> - template: <5> - metadata: - name: pi - spec: - containers: - - name: pi - image: perl - command: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"] - restartPolicy: OnFailure <6> ----- -<1> Optional: Specify how many pod replicas a job should run in parallel; defaults to `1`. -* For non-parallel jobs, leave unset. When unset, defaults to `1`. -<2> Optional: Specify how many successful pod completions are needed to mark a job completed. -* For non-parallel jobs, leave unset. When unset, defaults to `1`. -* For parallel jobs with a fixed completion count, specify the number of completions. -* For parallel jobs with a work queue, leave unset. When unset defaults to the `parallelism` value. -<3> Optional: Specify the maximum duration the job can run. -<4> Optional: Specify the number of retries for a job. This field defaults to six. -<5> Specify the template for the pod the controller creates. -<6> Specify the restart policy of the pod: -* `Never`. Do not restart the job. -* `OnFailure`. Restart the job only if it fails. -* `Always`. Always restart the job. -+ -For details on how {product-title} uses restart policy with failed containers, see -the link:https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#example-states[Example States] in the Kubernetes documentation. - -. Create the job: -+ -[source,terminal] ----- -$ oc create -f <file-name>.yaml ----- - -[NOTE] -==== -You can also create and launch a job from a single command using `oc create job`. The following command creates and launches a job similar to the one specified in the previous example: - -[source,terminal] ----- -$ oc create job pi --image=perl -- perl -Mbignum=bpi -wle 'print bpi(2000)' ----- -==== diff --git a/modules/nodes-nodes-kernel-arguments.adoc b/modules/nodes-nodes-kernel-arguments.adoc deleted file mode 100644 index 3e88677148c6..000000000000 --- a/modules/nodes-nodes-kernel-arguments.adoc +++ /dev/null @@ -1,205 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-nodes-managing.adoc -// * post_installation_configuration/machine-configuration-tasks.adoc - -:_content-type: PROCEDURE -[id="nodes-nodes-kernel-arguments_{context}"] -= Adding kernel arguments to nodes - -In some special cases, you might want to add kernel arguments to a set of nodes in your cluster. This should only be done with caution and clear understanding of the implications of the arguments you set. - -[WARNING] -==== -Improper use of kernel arguments can result in your systems becoming unbootable. -==== - -Examples of kernel arguments you could set include: - -* **enforcing=0**: Configures Security Enhanced Linux (SELinux) to run in permissive mode. In permissive mode, the system acts as if SELinux is enforcing the loaded security policy, including labeling objects and emitting access denial entries in the logs, but it does not actually deny any operations. While not supported for production systems, permissive mode can be helpful for debugging. - -* **nosmt**: Disables symmetric multithreading (SMT) in the kernel. Multithreading allows multiple logical threads for each CPU. You could consider `nosmt` in multi-tenant environments to reduce risks from potential cross-thread attacks. By disabling SMT, you essentially choose security over performance. - -ifndef::openshift-origin[] -* **systemd.unified_cgroup_hierarchy**: Enables link:https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html[Linux control group version 2] (cgroup v2). cgroup v2 is the next version of the kernel link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/resource_management_guide/ch01[control group] and offers multiple improvements. -endif::openshift-origin[] - -ifdef::openshift-origin[] -* **systemd.unified_cgroup_hierarchy**: Configures the version of Linux control group that is installed on your nodes: link:https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v1.html[cgroup v1] or link:https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html[cgroup v2]. cgroup v2 is the next version of the kernel link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/resource_management_guide/ch01[control group] and offers multiple improvements. However, it can have some unwanted effects on your nodes. -+ -[NOTE] -==== -cgroup v2 is enabled by default. To disable cgroup v2, use the `systemd.unified_cgroup_hierarchy=0` kernel argument, as shown in the following procedure. -==== -endif::openshift-origin[] - -See link:https://www.kernel.org/doc/Documentation/admin-guide/kernel-parameters.txt[Kernel.org kernel parameters] for a list and descriptions of kernel arguments. - -In the following procedure, you create a `MachineConfig` object that identifies: - -* A set of machines to which you want to add the kernel argument. In this case, machines with a worker role. -* Kernel arguments that are appended to the end of the existing kernel arguments. -* A label that indicates where in the list of machine configs the change is applied. - -.Prerequisites -* Have administrative privilege to a working {product-title} cluster. - -.Procedure - -. List existing `MachineConfig` objects for your {product-title} cluster to determine how to -label your machine config: -+ -[source,terminal] ----- -$ oc get MachineConfig ----- -+ -.Example output -[source,terminal] ----- -NAME GENERATEDBYCONTROLLER IGNITIONVERSION AGE -00-master 52dd3ba6a9a527fc3ab42afac8d12b693534c8c9 3.2.0 33m -00-worker 52dd3ba6a9a527fc3ab42afac8d12b693534c8c9 3.2.0 33m -01-master-container-runtime 52dd3ba6a9a527fc3ab42afac8d12b693534c8c9 3.2.0 33m -01-master-kubelet 52dd3ba6a9a527fc3ab42afac8d12b693534c8c9 3.2.0 33m -01-worker-container-runtime 52dd3ba6a9a527fc3ab42afac8d12b693534c8c9 3.2.0 33m -01-worker-kubelet 52dd3ba6a9a527fc3ab42afac8d12b693534c8c9 3.2.0 33m -99-master-generated-registries 52dd3ba6a9a527fc3ab42afac8d12b693534c8c9 3.2.0 33m -99-master-ssh 3.2.0 40m -99-worker-generated-registries 52dd3ba6a9a527fc3ab42afac8d12b693534c8c9 3.2.0 33m -99-worker-ssh 3.2.0 40m -rendered-master-23e785de7587df95a4b517e0647e5ab7 52dd3ba6a9a527fc3ab42afac8d12b693534c8c9 3.2.0 33m -rendered-worker-5d596d9293ca3ea80c896a1191735bb1 52dd3ba6a9a527fc3ab42afac8d12b693534c8c9 3.2.0 33m ----- - -ifndef::openshift-origin[] -. Create a `MachineConfig` object file that identifies the kernel argument (for example, `05-worker-kernelarg-selinuxpermissive.yaml`) -+ -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfig -metadata: - labels: - machineconfiguration.openshift.io/role: worker<1> - name: 05-worker-kernelarg-selinuxpermissive<2> -spec: - kernelArguments: - - enforcing=0<3> ----- -+ -<1> Applies the new kernel argument only to worker nodes. -<2> Named to identify where it fits among the machine configs (05) and what it does (adds -a kernel argument to configure SELinux permissive mode). -<3> Identifies the exact kernel argument as `enforcing=0`. -. Create the new machine config: -+ -[source,terminal] ----- -$ oc create -f 05-worker-kernelarg-selinuxpermissive.yaml ----- -endif::openshift-origin[] -ifdef::openshift-origin[] -. Create a `MachineConfig` object file that identifies the kernel argument (for example, `05-worker-kernelarg-selinuxpermissive.yaml`) -+ -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfig -metadata: - labels: - machineconfiguration.openshift.io/role: worker <1> - name: 05-worker-kernelarg-selinuxpermissive <2> -spec: - config: - ignition: - version: 3.2.0 - kernelArguments: - - enforcing=0 <3> - systemd.unified_cgroup_hierarchy=0 <4> ----- -+ -<1> Applies the new kernel argument only to worker nodes. -<2> Named to identify where it fits among the machine configs (05) and what it does (adds -a kernel argument to configure SELinux permissive mode). -<3> Identifies the exact kernel argument as `enforcing=0`. -<4> Configures cgroup v1 on the associated nodes. cgroup v2 is the default. - -. Create the new machine config: -+ -[source,terminal] ----- -$ oc create -f 05-worker-kernelarg-selinuxpermissive.yaml ----- -endif::openshift-origin[] - -. Check the machine configs to see that the new one was added: -+ -[source,terminal] ----- -$ oc get MachineConfig ----- -+ -.Example output -[source,terminal] ----- -NAME GENERATEDBYCONTROLLER IGNITIONVERSION AGE -00-master 52dd3ba6a9a527fc3ab42afac8d12b693534c8c9 3.2.0 33m -00-worker 52dd3ba6a9a527fc3ab42afac8d12b693534c8c9 3.2.0 33m -01-master-container-runtime 52dd3ba6a9a527fc3ab42afac8d12b693534c8c9 3.2.0 33m -01-master-kubelet 52dd3ba6a9a527fc3ab42afac8d12b693534c8c9 3.2.0 33m -01-worker-container-runtime 52dd3ba6a9a527fc3ab42afac8d12b693534c8c9 3.2.0 33m -01-worker-kubelet 52dd3ba6a9a527fc3ab42afac8d12b693534c8c9 3.2.0 33m -05-worker-kernelarg-selinuxpermissive 3.2.0 105s -99-master-generated-registries 52dd3ba6a9a527fc3ab42afac8d12b693534c8c9 3.2.0 33m -99-master-ssh 3.2.0 40m -99-worker-generated-registries 52dd3ba6a9a527fc3ab42afac8d12b693534c8c9 3.2.0 33m -99-worker-ssh 3.2.0 40m -rendered-master-23e785de7587df95a4b517e0647e5ab7 52dd3ba6a9a527fc3ab42afac8d12b693534c8c9 3.2.0 33m -rendered-worker-5d596d9293ca3ea80c896a1191735bb1 52dd3ba6a9a527fc3ab42afac8d12b693534c8c9 3.2.0 33m ----- - -. Check the nodes: -+ -[source,terminal] ----- -$ oc get nodes ----- -+ -.Example output -[source,terminal] ----- -NAME STATUS ROLES AGE VERSION -ip-10-0-136-161.ec2.internal Ready worker 28m v1.26.0 -ip-10-0-136-243.ec2.internal Ready master 34m v1.26.0 -ip-10-0-141-105.ec2.internal Ready,SchedulingDisabled worker 28m v1.26.0 -ip-10-0-142-249.ec2.internal Ready master 34m v1.26.0 -ip-10-0-153-11.ec2.internal Ready worker 28m v1.26.0 -ip-10-0-153-150.ec2.internal Ready master 34m v1.26.0 ----- -+ -You can see that scheduling on each worker node is disabled as the change is being applied. - -. Check that the kernel argument worked by going to one of the worker nodes and listing -the kernel command line arguments (in `/proc/cmdline` on the host): -+ -[source,terminal] ----- -$ oc debug node/ip-10-0-141-105.ec2.internal ----- -+ -.Example output -[source,terminal] ----- -Starting pod/ip-10-0-141-105ec2internal-debug ... -To use host binaries, run `chroot /host` - -sh-4.2# cat /host/proc/cmdline -BOOT_IMAGE=/ostree/rhcos-... console=tty0 console=ttyS0,115200n8 -rootflags=defaults,prjquota rw root=UUID=fd0... ostree=/ostree/boot.0/rhcos/16... -coreos.oem.id=qemu coreos.oem.id=ec2 ignition.platform.id=ec2 enforcing=0 - -sh-4.2# exit ----- -+ -You should see the `enforcing=0` argument added to the other kernel arguments. diff --git a/modules/nodes-nodes-managing-about.adoc b/modules/nodes-nodes-managing-about.adoc deleted file mode 100644 index 69551ea5392c..000000000000 --- a/modules/nodes-nodes-managing-about.adoc +++ /dev/null @@ -1,98 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-nodes-managing.adoc - -:_content-type: PROCEDURE -[id="nodes-nodes-managing-about_{context}"] -= Modifying nodes - -To make configuration changes to a cluster, or machine pool, you must create a custom resource definition (CRD), or `kubeletConfig` object. {product-title} uses the Machine Config Controller to watch for changes introduced through the CRD to apply the changes to the cluster. - -[NOTE] -==== -Because the fields in a `kubeletConfig` object are passed directly to the kubelet from upstream Kubernetes, the validation of those fields is handled directly by the kubelet itself. Please refer to the relevant Kubernetes documentation for the valid values for these fields. Invalid values in the `kubeletConfig` object can render cluster nodes unusable. -==== - -.Procedure - -. Obtain the label associated with the static CRD, Machine Config Pool, for the type of node you want to configure. -Perform one of the following steps: - -.. Check current labels of the desired machine config pool. -+ -For example: -+ -[source,terminal] ----- -$ oc get machineconfigpool --show-labels ----- -+ -.Example output -[source,terminal] ----- -NAME CONFIG UPDATED UPDATING DEGRADED LABELS -master rendered-master-e05b81f5ca4db1d249a1bf32f9ec24fd True False False operator.machineconfiguration.openshift.io/required-for-upgrade= -worker rendered-worker-f50e78e1bc06d8e82327763145bfcf62 True False False ----- - -.. Add a custom label to the desired machine config pool. -+ -For example: -+ -[source,terminal] ----- -$ oc label machineconfigpool worker custom-kubelet=enabled ----- - - -. Create a `kubeletconfig` custom resource (CR) for your configuration change. -+ -For example: -+ -.Sample configuration for a *custom-config* CR -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: KubeletConfig -metadata: - name: custom-config <1> -spec: - machineConfigPoolSelector: - matchLabels: - custom-kubelet: enabled <2> - kubeletConfig: <3> - podsPerCore: 10 - maxPods: 250 - systemReserved: - cpu: 2000m - memory: 1Gi ----- -<1> Assign a name to CR. -<2> Specify the label to apply the configuration change, this is the label you added to the machine config pool. -<3> Specify the new value(s) you want to change. - -. Create the CR object. -+ -[source,terminal] ----- -$ oc create -f <file-name> ----- -+ -For example: -+ -[source,terminal] ----- -$ oc create -f master-kube-config.yaml ----- - -Most https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/[Kubelet Configuration options] can be set by the user. The following options are not allowed to be overwritten: - -* CgroupDriver -* ClusterDNS -* ClusterDomain -* StaticPodPath - -[NOTE] -==== -If a single node contains more than 50 images, pod scheduling might be imbalanced across nodes. This is because the list of images on a node is shortened to 50 by default. You can disable the image limit by editing the `KubeletConfig` object and setting the value of `nodeStatusMaxImages` to `-1`. -==== diff --git a/modules/nodes-nodes-managing-max-pods-proc.adoc b/modules/nodes-nodes-managing-max-pods-proc.adoc deleted file mode 100644 index 24581aa55e8b..000000000000 --- a/modules/nodes-nodes-managing-max-pods-proc.adoc +++ /dev/null @@ -1,121 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-nodes-managing-max-pods.adoc -// * post_installation_configuration/node-tasks.adoc - -:_content-type: PROCEDURE -[id="nodes-nodes-managing-max-pods-about_{context}"] -= Configuring the maximum number of pods per node - -Two parameters control the maximum number of pods that can be scheduled to a node: `podsPerCore` and `maxPods`. If you use both options, the lower of the two limits the number of pods on a node. - -For example, if `podsPerCore` is set to `10` on a node with 4 processor cores, the maximum number of pods allowed on the node will be 40. - -.Prerequisites - -. Obtain the label associated with the static `MachineConfigPool` CRD for the type of node you want to configure by entering the following command: -+ -[source,terminal] ----- -$ oc edit machineconfigpool <name> ----- -+ -For example: -+ -[source,terminal] ----- -$ oc edit machineconfigpool worker ----- -+ -.Example output -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfigPool -metadata: - creationTimestamp: "2022-11-16T15:34:25Z" - generation: 4 - labels: - pools.operator.machineconfiguration.openshift.io/worker: "" <1> - name: worker ----- -<1> The label appears under Labels. -+ -[TIP] -==== -If the label is not present, add a key/value pair such as: - ----- -$ oc label machineconfigpool worker custom-kubelet=small-pods ----- -==== - -.Procedure - -. Create a custom resource (CR) for your configuration change. -+ -.Sample configuration for a `max-pods` CR -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: KubeletConfig -metadata: - name: set-max-pods <1> -spec: - machineConfigPoolSelector: - matchLabels: - pools.operator.machineconfiguration.openshift.io/worker: "" <2> - kubeletConfig: - podsPerCore: 10 <3> - maxPods: 250 <4> ----- -<1> Assign a name to CR. -<2> Specify the label from the machine config pool. -<3> Specify the number of pods the node can run based on the number of processor cores on the node. -<4> Specify the number of pods the node can run to a fixed value, regardless of the properties of the node. -+ -[NOTE] -==== -Setting `podsPerCore` to `0` disables this limit. -==== -+ -In the above example, the default value for `podsPerCore` is `10` and the default value for `maxPods` is `250`. This means that unless the node has 25 cores or more, by default, `podsPerCore` will be the limiting factor. - -. Run the following command to create the CR: -+ -[source,terminal] ----- -$ oc create -f <file_name>.yaml ----- - -.Verification - -. List the `MachineConfigPool` CRDs to see if the change is applied. The `UPDATING` column reports `True` if the change is picked up by the Machine Config Controller: -+ -[source,terminal] ----- -$ oc get machineconfigpools ----- -+ -.Example output -[source,terminal] ----- -NAME CONFIG UPDATED UPDATING DEGRADED -master master-9cc2c72f205e103bb534 False False False -worker worker-8cecd1236b33ee3f8a5e False True False ----- -+ -Once the change is complete, the `UPDATED` column reports `True`. -+ -[source,terminal] ----- -$ oc get machineconfigpools ----- -+ -.Example output -[source,terminal] ----- -NAME CONFIG UPDATED UPDATING DEGRADED -master master-9cc2c72f205e103bb534 False True False -worker worker-8cecd1236b33ee3f8a5e True False False ----- diff --git a/modules/nodes-nodes-rebooting-affinity.adoc b/modules/nodes-nodes-rebooting-affinity.adoc deleted file mode 100644 index bef8c7ee5196..000000000000 --- a/modules/nodes-nodes-rebooting-affinity.adoc +++ /dev/null @@ -1,54 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-nodes-rebooting.adoc - -:_content-type: PROCEDURE -[id="nodes-nodes-rebooting-affinity_{context}"] -= Rebooting a node using pod anti-affinity - -Pod anti-affinity is slightly different than node anti-affinity. Node anti-affinity can be -violated if there are no other suitable locations to deploy a pod. Pod -anti-affinity can be set to either required or preferred. - -With this in place, if only two infrastructure nodes are available and one is rebooted, the container image registry -pod is prevented from running on the other node. `*oc get pods*` reports the pod as unready until a suitable node is available. -Once a node is available and all pods are back in ready state, the next node can be restarted. - -.Procedure - -To reboot a node using pod anti-affinity: - -. Edit the node specification to configure pod anti-affinity: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: with-pod-antiaffinity -spec: - affinity: - podAntiAffinity: <1> - preferredDuringSchedulingIgnoredDuringExecution: <2> - - weight: 100 <3> - podAffinityTerm: - labelSelector: - matchExpressions: - - key: registry <4> - operator: In <5> - values: - - default - topologyKey: kubernetes.io/hostname ----- -<1> Stanza to configure pod anti-affinity. -<2> Defines a preferred rule. -<3> Specifies a weight for a preferred rule. The node with the highest weight is preferred. -<4> Description of the pod label that determines when the anti-affinity rule applies. Specify a key and value for the label. -<5> The operator represents the relationship between the label on the existing pod and the set of values in the `matchExpression` parameters in the specification for the new pod. Can be `In`, `NotIn`, `Exists`, or `DoesNotExist`. -+ -This example assumes the container image registry pod has a label of -`registry=default`. Pod anti-affinity can use any Kubernetes match -expression. - -. Enable the `MatchInterPodAffinity` scheduler predicate in the scheduling policy file. -. Perform a graceful restart of the node. diff --git a/modules/nodes-nodes-rebooting-gracefully.adoc b/modules/nodes-nodes-rebooting-gracefully.adoc deleted file mode 100644 index 25a13768dd4a..000000000000 --- a/modules/nodes-nodes-rebooting-gracefully.adoc +++ /dev/null @@ -1,124 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-nodes-rebooting.adoc - -:_content-type: PROCEDURE -[id="nodes-nodes-rebooting-gracefully_{context}"] -= Rebooting a node gracefully - -Before rebooting a node, it is recommended to backup etcd data to avoid any data loss on the node. - -[NOTE] -==== -For {sno} clusters that require users to perform the `oc login` command rather than having the certificates in `kubeconfig` file to manage the cluster, the `oc adm` commands might not be available after cordoning and draining the node. This is because the `openshift-oauth-apiserver` pod is not running due to the cordon. You can use SSH to access the nodes as indicated in the following procedure. - -In a {sno} cluster, pods cannot be rescheduled when cordoning and draining. However, doing so gives the pods, especially your workload pods, time to properly stop and release associated resources. -==== - -.Procedure - -To perform a graceful restart of a node: - -. Mark the node as unschedulable: -+ -[source,terminal] ----- -$ oc adm cordon <node1> ----- - -. Drain the node to remove all the running pods: -+ -[source,terminal] ----- -$ oc adm drain <node1> --ignore-daemonsets --delete-emptydir-data --force ----- -+ -You might receive errors that pods associated with custom pod disruption budgets (PDB) cannot be evicted. -+ -.Example error -[source,terminal] ----- -error when evicting pods/"rails-postgresql-example-1-72v2w" -n "rails" (will retry after 5s): Cannot evict pod as it would violate the pod's disruption budget. ----- -+ -In this case, run the drain command again, adding the `disable-eviction` flag, which bypasses the PDB checks: -+ -[source,terminal] ----- -$ oc adm drain <node1> --ignore-daemonsets --delete-emptydir-data --force --disable-eviction ----- - -. Access the node in debug mode: -+ -[source,terminal] ----- -$ oc debug node/<node1> ----- - -. Change your root directory to `/host`: -+ -[source,terminal] ----- -$ chroot /host ----- - -. Restart the node: -+ -[source,terminal] ----- -$ systemctl reboot ----- -+ -In a moment, the node enters the `NotReady` state. -+ -[NOTE] -==== -With some {sno} clusters, the `oc` commands might not be available after you cordon and drain the node because the `openshift-oauth-apiserver` pod is not running. You can use SSH to connect to the node and perform the reboot. - -[source,terminal] ----- -$ ssh core@<master-node>.<cluster_name>.<base_domain> ----- - -[source,terminal] ----- -$ sudo systemctl reboot ----- -==== - -. After the reboot is complete, mark the node as schedulable by running the following command: -+ -[source,terminal] ----- -$ oc adm uncordon <node1> ----- -+ -[NOTE] -==== -With some {sno} clusters, the `oc` commands might not be available after you cordon and drain the node because the `openshift-oauth-apiserver` pod is not running. You can use SSH to connect to the node and uncordon it. - -[source,terminal] ----- -$ ssh core@<target_node> ----- - -[source,terminal] ----- -$ sudo oc adm uncordon <node> --kubeconfig /etc/kubernetes/static-pod-resources/kube-apiserver-certs/secrets/node-kubeconfigs/localhost.kubeconfig ----- -==== - -. Verify that the node is ready: -+ -[source,terminal] ----- -$ oc get node <node1> ----- -+ -.Example output -[source,terminal] ----- -NAME STATUS ROLES AGE VERSION -<node1> Ready worker 6d22h v1.18.3+b0068a8 ----- - diff --git a/modules/nodes-nodes-rebooting-infrastructure.adoc b/modules/nodes-nodes-rebooting-infrastructure.adoc deleted file mode 100644 index 02fc4113dc8f..000000000000 --- a/modules/nodes-nodes-rebooting-infrastructure.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-nodes-rebooting.adoc - -:_content-type: CONCEPT -[id="nodes-nodes-rebooting-infrastructure_{context}"] -= About rebooting nodes running critical infrastructure - -When rebooting nodes that host critical {product-title} infrastructure components, such as router pods, registry pods, and monitoring pods, ensure that there are at least three nodes available to run these components. - -The following scenario demonstrates how service interruptions can occur with applications running on {product-title} when only two nodes are available: - -- Node A is marked unschedulable and all pods are evacuated. -- The registry pod running on that node is now redeployed on node B. Node B is now running both registry pods. -- Node B is now marked unschedulable and is evacuated. -- The service exposing the two pod endpoints on node B loses all endpoints, for a brief period of time, until they are redeployed to node A. - -When using three nodes for infrastructure components, this process does not result in a service disruption. However, due to pod scheduling, the last node that is evacuated and brought back into rotation does not have a registry pod. One of the other nodes has two registry pods. To schedule the third registry pod on the last node, use pod anti-affinity to prevent the scheduler from locating two registry pods on the same node. diff --git a/modules/nodes-nodes-rebooting-router.adoc b/modules/nodes-nodes-rebooting-router.adoc deleted file mode 100644 index c0ec07e1c576..000000000000 --- a/modules/nodes-nodes-rebooting-router.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-nodes-rebooting.adoc - -:_content-type: CONCEPT -[id="nodes-nodes-rebooting-router_{context}"] -= Understanding how to reboot nodes running routers - -In most cases, a pod running an {product-title} router exposes a host port. - -The `PodFitsPorts` scheduler predicate ensures that no router pods using the -same port can run on the same node, and pod anti-affinity is achieved. If the -routers are relying on IP failover for high availability, there is nothing else that is needed. - -For router pods relying on an external service such as AWS Elastic Load Balancing for high -availability, it is that service's responsibility to react to router pod restarts. - -In rare cases, a router pod may not have a host port configured. In those cases, -it is important to follow the recommended restart process for infrastructure nodes. diff --git a/modules/nodes-nodes-resources-configuring-about.adoc b/modules/nodes-nodes-resources-configuring-about.adoc deleted file mode 100644 index 30cb67cdc5fd..000000000000 --- a/modules/nodes-nodes-resources-configuring-about.adoc +++ /dev/null @@ -1,98 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-nodes-resources-configuring.adoc - -:_content-type: CONCEPT -[id="nodes-nodes-resources-configuring-about_{context}"] -= Understanding how to allocate resources for nodes - -CPU and memory resources reserved for node components in {product-title} are based on two node settings: - -[options="header",cols="1,2"] -|=== -|Setting |Description - -|`kube-reserved` -| This setting is not used with {product-title}. Add the CPU and memory resources that you planned to reserve to the `system-reserved` setting. - -|`system-reserved` -| This setting identifies the resources to reserve for the node components and system components, such as CRI-O and Kubelet. The default settings depend on the {product-title} and Machine Config Operator versions. Confirm the default `systemReserved` parameter on the `machine-config-operator` repository. -|=== - -If a flag is not set, the defaults are used. If none of the flags are set, the -allocated resource is set to the node's capacity as it was before the -introduction of allocatable resources. - -[NOTE] -==== -Any CPUs specifically reserved using the `reservedSystemCPUs` parameter are not available for allocation using `kube-reserved` or `system-reserved`. -==== - -[id="computing-allocated-resources_{context}"] -== How {product-title} computes allocated resources - -An allocated amount of a resource is computed based on the following formula: - ----- -[Allocatable] = [Node Capacity] - [system-reserved] - [Hard-Eviction-Thresholds] ----- - -[NOTE] -==== -The withholding of `Hard-Eviction-Thresholds` from `Allocatable` improves system reliability because the value for `Allocatable` is enforced for pods at the node level. -==== - -If `Allocatable` is negative, it is set to `0`. - -Each node reports the system resources that are used by the container runtime and kubelet. To simplify configuring the `system-reserved` parameter, view the resource use for the node by using the node summary API. The node summary is available at `/api/v1/nodes/<node>/proxy/stats/summary`. - -[id="allocate-node-enforcement_{context}"] -== How nodes enforce resource constraints - -The node is able to limit the total amount of resources that pods can consume based on the configured allocatable value. This feature significantly improves the reliability of the node by preventing pods from using CPU and memory resources that are needed by system services such as the container runtime and node agent. To improve node reliability, administrators should reserve resources based on a target for resource use. - -The node enforces resource constraints by using a new cgroup hierarchy that enforces quality of service. All pods are launched in a dedicated cgroup hierarchy that is separate from system daemons. - -Administrators should treat system daemons similar to pods that have a guaranteed quality of service. System daemons can burst within their bounding control groups and this behavior must be managed as part of cluster deployments. Reserve CPU and memory resources for system daemons by specifying the amount of CPU and memory resources in `system-reserved`. - -Enforcing `system-reserved` limits can prevent critical system services from receiving CPU and memory resources. As a result, a critical system service can be ended by the out-of-memory killer. The recommendation is to enforce `system-reserved` only if you have profiled the nodes exhaustively to determine precise estimates and you are confident that critical system services can recover if any process in that group is ended by the out-of-memory killer. - -[id="allocate-eviction-thresholds_{context}"] -== Understanding Eviction Thresholds - -If a node is under memory pressure, it can impact the entire node and all pods running on the node. For example, a system daemon that uses more than its reserved amount of memory can trigger an out-of-memory event. To avoid or reduce the probability of system out-of-memory events, the node provides out-of-resource handling. - -You can reserve some memory using the `--eviction-hard` flag. The node attempts to evict -pods whenever memory availability on the node drops below the absolute value or percentage. -If system daemons do not exist on a node, pods are limited to the memory -`capacity - eviction-hard`. For this reason, resources set aside as a buffer for eviction -before reaching out of memory conditions are not available for pods. - -The following is an example to illustrate the impact of node allocatable for memory: - -* Node capacity is `32Gi` -* --system-reserved is `3Gi` -* --eviction-hard is set to `100Mi`. - -For this node, the effective node allocatable value is `28.9Gi`. If the node and system components use all their reservation, the memory available for pods is `28.9Gi`, and kubelet evicts pods when it exceeds this threshold. - -If you enforce node allocatable, `28.9Gi`, with top-level cgroups, then pods can never exceed `28.9Gi`. Evictions are not performed unless system daemons consume more than `3.1Gi` of memory. - -If system daemons do not use up all their reservation, with the above example, -pods would face memcg OOM kills from their bounding cgroup before node evictions kick in. -To better enforce QoS under this situation, the node applies the hard eviction thresholds to -the top-level cgroup for all pods to be `Node Allocatable + Eviction Hard Thresholds`. - -If system daemons do not use up all their reservation, the node will evict pods whenever -they consume more than `28.9Gi` of memory. If eviction does not occur in time, a pod -will be OOM killed if pods consume `29Gi` of memory. - -[id="allocate-scheduler-policy_{context}"] -== How the scheduler determines resource availability - -The scheduler uses the value of `node.Status.Allocatable` instead of -`node.Status.Capacity` to decide if a node will become a candidate for pod -scheduling. - -By default, the node will report its machine capacity as fully schedulable by -the cluster. diff --git a/modules/nodes-nodes-resources-configuring-auto.adoc b/modules/nodes-nodes-resources-configuring-auto.adoc deleted file mode 100644 index 00920919f0a7..000000000000 --- a/modules/nodes-nodes-resources-configuring-auto.adoc +++ /dev/null @@ -1,112 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-nodes-resources-configuring.adoc - -:_content-type: PROCEDURE -[id="nodes-nodes-resources-configuring-auto_{context}"] -= Automatically allocating resources for nodes - -{product-title} can automatically determine the optimal `system-reserved` CPU and memory resources for nodes associated with a specific machine config pool and update the nodes with those values when the nodes start. By default, the `system-reserved` CPU is `500m` and `system-reserved` memory is `1Gi`. - -To automatically determine and allocate the `system-reserved` resources on nodes, create a `KubeletConfig` custom resource (CR) to set the `autoSizingReserved: true` parameter. A script on each node calculates the optimal values for the respective reserved resources based on the installed CPU and memory capacity on each node. The script takes into account that increased capacity requires a corresponding increase in the reserved resources. - -Automatically determining the optimal `system-reserved` settings ensures that your cluster is running efficiently and prevents node failure due to resource starvation of system components, such as CRI-O and kubelet, without your needing to manually calculate and update the values. - -This feature is disabled by default. - -.Prerequisites - -. Obtain the label associated with the static `MachineConfigPool` object for the type of node you want to configure by entering the following command: -+ -[source,terminal] ----- -$ oc edit machineconfigpool <name> ----- -+ -For example: -+ -[source,terminal] ----- -$ oc edit machineconfigpool worker ----- -+ -.Example output -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfigPool -metadata: - creationTimestamp: "2022-11-16T15:34:25Z" - generation: 4 - labels: - pools.operator.machineconfiguration.openshift.io/worker: "" <1> - name: worker - ... ----- -<1> The label appears under `Labels`. -+ -[TIP] -==== -If an appropriate label is not present, add a key/value pair such as: - ----- -$ oc label machineconfigpool worker custom-kubelet=small-pods ----- -==== - -.Procedure - -. Create a custom resource (CR) for your configuration change: -+ -.Sample configuration for a resource allocation CR -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: KubeletConfig -metadata: - name: dynamic-node <1> -spec: - autoSizingReserved: true <2> - machineConfigPoolSelector: - matchLabels: - pools.operator.machineconfiguration.openshift.io/worker: "" <3> ----- -<1> Assign a name to CR. -<2> Add the `autoSizingReserved` parameter set to `true` to allow {product-title} to automatically determine and allocate the `system-reserved` resources on the nodes associated with the specified label. To disable automatic allocation on those nodes, set this parameter to `false`. -<3> Specify the label from the machine config pool that you configured in the "Prerequisites" section. You can choose any desired labels for the machine config pool, such as `custom-kubelet: small-pods`, or the default label, `pools.operator.machineconfiguration.openshift.io/worker: ""`. -+ -The previous example enables automatic resource allocation on all worker nodes. {product-title} drains the nodes, applies the kubelet config, and restarts the nodes. - -. Create the CR by entering the following command: -+ -[source,terminal] ----- -$ oc create -f <file_name>.yaml ----- - -.Verification - -. Log in to a node you configured by entering the following command: -+ -[source,terminal] ----- -$ oc debug node/<node_name> ----- - -. Set `/host` as the root directory within the debug shell: -+ -[source,terminal] ----- -# chroot /host ----- - -. View the `/etc/node-sizing.env` file: -+ -.Example output -[source,terminal] ----- -SYSTEM_RESERVED_MEMORY=3Gi -SYSTEM_RESERVED_CPU=0.08 ----- -+ -The kubelet uses the `system-reserved` values in the `/etc/node-sizing.env` file. In the previous example, the worker nodes are allocated `0.08` CPU and 3 Gi of memory. It can take several minutes for the optimal values to appear. diff --git a/modules/nodes-nodes-resources-configuring-setting.adoc b/modules/nodes-nodes-resources-configuring-setting.adoc deleted file mode 100644 index 6f38c84fcd96..000000000000 --- a/modules/nodes-nodes-resources-configuring-setting.adoc +++ /dev/null @@ -1,89 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-nodes-resources-configuring.adoc - -:_content-type: PROCEDURE -[id="nodes-nodes-resources-configuring-setting_{context}"] -= Manually allocating resources for nodes - -{product-title} supports the CPU and memory resource types for allocation. The `ephemeral-resource` resource type is also supported. For the `cpu` type, you specify the resource quantity in units of cores, such as `200m`, `0.5`, or `1`. For `memory` and `ephemeral-storage`, you specify the resource quantity in units of bytes, such as `200Ki`, `50Mi`, or `5Gi`. By default, the `system-reserved` CPU is `500m` and `system-reserved` memory is `1Gi`. - -As an administrator, you can set these values by using a kubelet config custom resource (CR) through a set of `<resource_type>=<resource_quantity>` pairs -(e.g., `cpu=200m,memory=512Mi`). - -[IMPORTANT] -==== -You must use a kubelet config CR to manually set resource values. You cannot use a machine config CR. -==== - -For details on the recommended `system-reserved` values, refer to the link:https://access.redhat.com/solutions/5843241[recommended system-reserved values]. - -.Prerequisites - -. Obtain the label associated with the static `MachineConfigPool` CRD for the type of node you want to configure by entering the following command: -+ -[source,terminal] ----- -$ oc edit machineconfigpool <name> ----- -+ -For example: -+ -[source,terminal] ----- -$ oc edit machineconfigpool worker ----- -+ -.Example output -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfigPool -metadata: - creationTimestamp: "2022-11-16T15:34:25Z" - generation: 4 - labels: - pools.operator.machineconfiguration.openshift.io/worker: "" <1> - name: worker ----- -<1> The label appears under Labels. -+ -[TIP] -==== -If the label is not present, add a key/value pair such as: - ----- -$ oc label machineconfigpool worker custom-kubelet=small-pods ----- -==== - -.Procedure - -. Create a custom resource (CR) for your configuration change. -+ -.Sample configuration for a resource allocation CR -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: KubeletConfig -metadata: - name: set-allocatable <1> -spec: - machineConfigPoolSelector: - matchLabels: - pools.operator.machineconfiguration.openshift.io/worker: "" <2> - kubeletConfig: - systemReserved: <3> - cpu: 1000m - memory: 1Gi ----- -<1> Assign a name to CR. -<2> Specify the label from the machine config pool. -<3> Specify the resources to reserve for the node components and system components. - -. Run the following command to create the CR: -+ -[source,terminal] ----- -$ oc create -f <file_name>.yaml ----- diff --git a/modules/nodes-nodes-resources-cpus-reserve.adoc b/modules/nodes-nodes-resources-cpus-reserve.adoc deleted file mode 100644 index ea660634fdbb..000000000000 --- a/modules/nodes-nodes-resources-cpus-reserve.adoc +++ /dev/null @@ -1,66 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-nodes-resources-cpus - -:_content-type: PROCEDURE -[id="nodes-nodes-resources-cpus-reserve_{context}"] -= Reserving CPUs for nodes - -To explicitly define a list of CPUs that are reserved for specific nodes, create a `KubeletConfig` custom resource (CR) to define the `reservedSystemCPUs` parameter. This list supersedes the CPUs that might be reserved using the `systemReserved` and `kubeReserved` parameters. - -.Procedure - -. Obtain the label associated with the machine config pool (MCP) for the type of node you want to configure: -+ -[source,terminal] ----- -$ oc describe machineconfigpool <name> ----- -+ -For example: -+ -[source,terminal] ----- -$ oc describe machineconfigpool worker ----- -+ -.Example output -[source,yaml] ----- -Name: worker -Namespace: -Labels: machineconfiguration.openshift.io/mco-built-in= - pools.operator.machineconfiguration.openshift.io/worker= <1> -Annotations: <none> -API Version: machineconfiguration.openshift.io/v1 -Kind: MachineConfigPool -... ----- -<1> Get the MCP label. - -. Create a YAML file for the `KubeletConfig` CR: -+ -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: KubeletConfig -metadata: - name: set-reserved-cpus <1> -spec: - kubeletConfig: - reservedSystemCPUs: "0,1,2,3" <2> - machineConfigPoolSelector: - matchLabels: - pools.operator.machineconfiguration.openshift.io/worker: "" <3> ----- -<1> Specify a name for the CR. -<2> Specify the core IDs of the CPUs you want to reserve for the nodes associated with the MCP. -<3> Specify the label from the MCP. - -. Create the CR object: -+ -[source,terminal] ----- -$ oc create -f <file_name>.yaml ----- - diff --git a/modules/nodes-nodes-rtkernel-arguments.adoc b/modules/nodes-nodes-rtkernel-arguments.adoc deleted file mode 100644 index d966d3c6e8ce..000000000000 --- a/modules/nodes-nodes-rtkernel-arguments.adoc +++ /dev/null @@ -1,89 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes/nodes-nodes-managing.adoc -// * post_installation_configuration/machine-configuration-tasks.adoc - -:_content-type: PROCEDURE -[id="nodes-nodes-rtkernel-arguments_{context}"] -= Adding a real-time kernel to nodes - -Some {product-title} workloads require a high degree of determinism.While Linux is not a real-time operating system, the Linux real-time -kernel includes a preemptive scheduler that provides the operating system with real-time characteristics. - -If your {product-title} workloads require these real-time characteristics, you can switch your machines to the Linux real-time kernel. For {product-title}, {product-version} you can make this switch using a `MachineConfig` object. Although making the change is as simple as changing a machine config `kernelType` setting to `realtime`, there are a few other considerations before making the change: - -* Currently, real-time kernel is supported only on worker nodes, and only for radio access network (RAN) use. -* The following procedure is fully supported with bare metal installations that use systems that are certified for Red Hat Enterprise Linux for Real Time 8. -* Real-time support in {product-title} is limited to specific subscriptions. -* The following procedure is also supported for use with Google Cloud Platform. - -.Prerequisites -* Have a running {product-title} cluster (version 4.4 or later). -* Log in to the cluster as a user with administrative privileges. - -.Procedure - -. Create a machine config for the real-time kernel: Create a YAML file (for example, `99-worker-realtime.yaml`) that contains a `MachineConfig` -object for the `realtime` kernel type. This example tells the cluster to use a real-time kernel for all worker nodes: -+ -[source,terminal] ----- -$ cat << EOF > 99-worker-realtime.yaml -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfig -metadata: - labels: - machineconfiguration.openshift.io/role: "worker" - name: 99-worker-realtime -spec: - kernelType: realtime -EOF ----- - -. Add the machine config to the cluster. Type the following to add the machine config to the cluster: -+ -[source,terminal] ----- -$ oc create -f 99-worker-realtime.yaml ----- - -. Check the real-time kernel: Once each impacted node reboots, log in to the cluster and run the following commands to make sure that the real-time kernel has replaced the regular kernel for the set of nodes you configured: -+ -[source,terminal] ----- -$ oc get nodes ----- -+ -.Example output -[source,terminal] ----- -NAME STATUS ROLES AGE VERSION -ip-10-0-143-147.us-east-2.compute.internal Ready worker 103m v1.26.0 -ip-10-0-146-92.us-east-2.compute.internal Ready worker 101m v1.26.0 -ip-10-0-169-2.us-east-2.compute.internal Ready worker 102m v1.26.0 ----- -+ -[source,terminal] ----- -$ oc debug node/ip-10-0-143-147.us-east-2.compute.internal ----- -+ -.Example output -[source,terminal] ----- -Starting pod/ip-10-0-143-147us-east-2computeinternal-debug ... -To use host binaries, run `chroot /host` - -sh-4.4# uname -a -Linux <worker_node> 4.18.0-147.3.1.rt24.96.el8_1.x86_64 #1 SMP PREEMPT RT - Wed Nov 27 18:29:55 UTC 2019 x86_64 x86_64 x86_64 GNU/Linux ----- -+ -The kernel name contains `rt` and text “PREEMPT RT” indicates that this is a real-time kernel. - -. To go back to the regular kernel, delete the `MachineConfig` object: -+ -[source,terminal] ----- -$ oc delete -f 99-worker-realtime.yaml ----- diff --git a/modules/nodes-nodes-swap-memory.adoc b/modules/nodes-nodes-swap-memory.adoc deleted file mode 100644 index 2a964d22fe4f..000000000000 --- a/modules/nodes-nodes-swap-memory.adoc +++ /dev/null @@ -1,70 +0,0 @@ -:_content-type: PROCEDURE -[id="nodes-nodes-swap-memory_{context}"] - -= Enabling swap memory use on nodes - -:FeatureName: Enabling swap memory use on nodes -include::snippets/technology-preview.adoc[] - -You can enable swap memory use for {product-title} workloads on a per-node basis. - -[WARNING] -==== -Enabling swap memory can negatively impact workload performance and out-of-resource handling. Do not enable swap memory on control plane nodes. -==== - -To enable swap memory, create a `kubeletconfig` custom resource (CR) to set the `swapbehavior` parameter. You can set limited or unlimited swap memory: - -* Limited: Use the `LimitedSwap` value to limit how much swap memory workloads can use. Any workloads on the node that are not managed by {product-title} can still use swap memory. The `LimitedSwap` behavior depends on whether the node is running with Linux control groups link:https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v1/index.html[version 1 (cgroups v1)] or link:https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html[version 2 (cgroup v2)]: -** cgroup v1: {product-title} workloads can use any combination of memory and swap, up to the pod's memory limit, if set. -** cgroup v2: {product-title} workloads cannot use swap memory. - -* Unlimited: Use the `UnlimitedSwap` value to allow workloads to use as much swap memory as they request, up to the system limit. - -Because the kubelet will not start in the presence of swap memory without this configuration, you must enable swap memory in {product-title} before enabling swap memory on the nodes. If there is no swap memory present on a node, enabling swap memory in {product-title} has no effect. - -.Prerequisites - -* You have a running {product-title} cluster that uses version 4.10 or later. - -* You are logged in to the cluster as a user with administrative privileges. - -* You have enabled the `TechPreviewNoUpgrade` feature set on the cluster (see _Nodes -> Working with clusters -> Enabling features using feature gates_). -+ -[NOTE] -==== -Enabling the `TechPreviewNoUpgrade` feature set cannot be undone and prevents minor version updates. These feature sets are not recommended on production clusters. -==== - -* If cgroup v2 is enabled on a node, you must enable swap accounting on the node, by setting the `swapaccount=1` kernel argument. - -.Procedure - -. Apply a custom label to the machine config pool where you want to allow swap memory. -+ -[source,terminal] ----- -$ oc label machineconfigpool worker kubelet-swap=enabled ----- - -. Create a custom resource (CR) to enable and configure swap settings. -+ -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: KubeletConfig -metadata: - name: swap-config -spec: - machineConfigPoolSelector: - matchLabels: - kubelet-swap: enabled - kubeletConfig: - failSwapOn: false <1> - memorySwap: - swapBehavior: LimitedSwap <2> ----- -<1> Set to `false` to enable swap memory use on the associated nodes. Set to `true` to disable swap memory use. -<2> Specify the swap memory behavior. If unspecified, the default is `LimitedSwap`. - -. Enable swap memory on the machines. diff --git a/modules/nodes-nodes-viewing-listing-pods.adoc b/modules/nodes-nodes-viewing-listing-pods.adoc deleted file mode 100644 index e785bf067464..000000000000 --- a/modules/nodes-nodes-viewing-listing-pods.adoc +++ /dev/null @@ -1,56 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-nodes-viewing.adoc - -:_content-type: PROCEDURE -[id="nodes-nodes-viewing-listing-pods_{context}"] -= Listing pods on a node in your cluster - -You can list all the pods on a specific node. - -.Procedure - -* To list all or selected pods on one or more nodes: -+ -[source,terminal] ----- -$ oc describe node <node1> <node2> ----- -+ -For example: -+ -[source,terminal] ----- -$ oc describe node ip-10-0-128-218.ec2.internal ----- - -* To list all or selected pods on selected nodes: -+ -[source,terminal] ----- -$ oc describe --selector=<node_selector> ----- -+ -[source,terminal] ----- -$ oc describe node --selector=kubernetes.io/os ----- -+ -Or: -+ -[source,terminal] ----- -$ oc describe -l=<pod_selector> ----- -+ -[source,terminal] ----- -$ oc describe node -l node-role.kubernetes.io/worker ----- - -* To list all pods on a specific node, including terminated pods: -+ -[source,terminal] ----- -$ oc get pod --all-namespaces --field-selector=spec.nodeName=<nodename> ----- diff --git a/modules/nodes-nodes-viewing-listing.adoc b/modules/nodes-nodes-viewing-listing.adoc deleted file mode 100644 index c0aa76a8b151..000000000000 --- a/modules/nodes-nodes-viewing-listing.adoc +++ /dev/null @@ -1,245 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-nodes-viewing.adoc - -:_content-type: CONCEPT -[id="nodes-nodes-viewing-listing_{context}"] -= About listing all the nodes in a cluster - -You can get detailed information on the nodes in the cluster. - -* The following command lists all nodes: -+ -[source,terminal] ----- -$ oc get nodes ----- -+ -The following example is a cluster with healthy nodes: -+ -[source,terminal] ----- -$ oc get nodes ----- -+ -.Example output -[source,terminal] ----- -NAME STATUS ROLES AGE VERSION -master.example.com Ready master 7h v1.26.0 -node1.example.com Ready worker 7h v1.26.0 -node2.example.com Ready worker 7h v1.26.0 ----- -+ -The following example is a cluster with one unhealthy node: -+ -[source,terminal] ----- -$ oc get nodes ----- -+ -.Example output -[source,terminal] ----- -NAME STATUS ROLES AGE VERSION -master.example.com Ready master 7h v1.26.0 -node1.example.com NotReady,SchedulingDisabled worker 7h v1.26.0 -node2.example.com Ready worker 7h v1.26.0 ----- -+ -The conditions that trigger a `NotReady` status are shown later in this section. - -* The `-o wide` option provides additional information on nodes. -+ -[source,terminal] ----- -$ oc get nodes -o wide ----- -+ -.Example output -[source,terminal] ----- -NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME -master.example.com Ready master 171m v1.26.0 10.0.129.108 <none> Red Hat Enterprise Linux CoreOS 48.83.202103210901-0 (Ootpa) 4.18.0-240.15.1.el8_3.x86_64 cri-o://1.26.0-30.rhaos4.10.gitf2f339d.el8-dev -node1.example.com Ready worker 72m v1.26.0 10.0.129.222 <none> Red Hat Enterprise Linux CoreOS 48.83.202103210901-0 (Ootpa) 4.18.0-240.15.1.el8_3.x86_64 cri-o://1.26.0-30.rhaos4.10.gitf2f339d.el8-dev -node2.example.com Ready worker 164m v1.26.0 10.0.142.150 <none> Red Hat Enterprise Linux CoreOS 48.83.202103210901-0 (Ootpa) 4.18.0-240.15.1.el8_3.x86_64 cri-o://1.26.0-30.rhaos4.10.gitf2f339d.el8-dev ----- - -* The following command lists information about a single node: -+ -[source,terminal] ----- -$ oc get node <node> ----- -+ -For example: -+ -[source,terminal] ----- -$ oc get node node1.example.com ----- -+ -.Example output -[source,terminal] ----- -NAME STATUS ROLES AGE VERSION -node1.example.com Ready worker 7h v1.26.0 ----- - -* The following command provides more detailed information about a specific node, including the reason for -the current condition: -+ -[source,terminal] ----- -$ oc describe node <node> ----- -+ -For example: -+ -[source,terminal] ----- -$ oc describe node node1.example.com ----- -+ -.Example output -[source,text] ----- -Name: node1.example.com <1> -Roles: worker <2> -Labels: kubernetes.io/os=linux - kubernetes.io/hostname=ip-10-0-131-14 - kubernetes.io/arch=amd64 <3> - node-role.kubernetes.io/worker= - node.kubernetes.io/instance-type=m4.large - node.openshift.io/os_id=rhcos - node.openshift.io/os_version=4.5 - region=east - topology.kubernetes.io/region=us-east-1 - topology.kubernetes.io/zone=us-east-1a -Annotations: cluster.k8s.io/machine: openshift-machine-api/ahardin-worker-us-east-2a-q5dzc <4> - machineconfiguration.openshift.io/currentConfig: worker-309c228e8b3a92e2235edd544c62fea8 - machineconfiguration.openshift.io/desiredConfig: worker-309c228e8b3a92e2235edd544c62fea8 - machineconfiguration.openshift.io/state: Done - volumes.kubernetes.io/controller-managed-attach-detach: true -CreationTimestamp: Wed, 13 Feb 2019 11:05:57 -0500 -Taints: <none> <5> -Unschedulable: false -Conditions: <6> - Type Status LastHeartbeatTime LastTransitionTime Reason Message - ---- ------ ----------------- ------------------ ------ ------- - OutOfDisk False Wed, 13 Feb 2019 15:09:42 -0500 Wed, 13 Feb 2019 11:05:57 -0500 KubeletHasSufficientDisk kubelet has sufficient disk space available - MemoryPressure False Wed, 13 Feb 2019 15:09:42 -0500 Wed, 13 Feb 2019 11:05:57 -0500 KubeletHasSufficientMemory kubelet has sufficient memory available - DiskPressure False Wed, 13 Feb 2019 15:09:42 -0500 Wed, 13 Feb 2019 11:05:57 -0500 KubeletHasNoDiskPressure kubelet has no disk pressure - PIDPressure False Wed, 13 Feb 2019 15:09:42 -0500 Wed, 13 Feb 2019 11:05:57 -0500 KubeletHasSufficientPID kubelet has sufficient PID available - Ready True Wed, 13 Feb 2019 15:09:42 -0500 Wed, 13 Feb 2019 11:07:09 -0500 KubeletReady kubelet is posting ready status -Addresses: <7> - InternalIP: 10.0.140.16 - InternalDNS: ip-10-0-140-16.us-east-2.compute.internal - Hostname: ip-10-0-140-16.us-east-2.compute.internal -Capacity: <8> - attachable-volumes-aws-ebs: 39 - cpu: 2 - hugepages-1Gi: 0 - hugepages-2Mi: 0 - memory: 8172516Ki - pods: 250 -Allocatable: - attachable-volumes-aws-ebs: 39 - cpu: 1500m - hugepages-1Gi: 0 - hugepages-2Mi: 0 - memory: 7558116Ki - pods: 250 -System Info: <9> - Machine ID: 63787c9534c24fde9a0cde35c13f1f66 - System UUID: EC22BF97-A006-4A58-6AF8-0A38DEEA122A - Boot ID: f24ad37d-2594-46b4-8830-7f7555918325 - Kernel Version: 3.10.0-957.5.1.el7.x86_64 - OS Image: Red Hat Enterprise Linux CoreOS 410.8.20190520.0 (Ootpa) - Operating System: linux - Architecture: amd64 - Container Runtime Version: cri-o://1.26.0-0.6.dev.rhaos4.3.git9ad059b.el8-rc2 - Kubelet Version: v1.26.0 - Kube-Proxy Version: v1.26.0 -PodCIDR: 10.128.4.0/24 -ProviderID: aws:///us-east-2a/i-04e87b31dc6b3e171 -Non-terminated Pods: (12 in total) <10> - Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits - --------- ---- ------------ ---------- --------------- ------------- - openshift-cluster-node-tuning-operator tuned-hdl5q 0 (0%) 0 (0%) 0 (0%) 0 (0%) - openshift-dns dns-default-l69zr 0 (0%) 0 (0%) 0 (0%) 0 (0%) - openshift-image-registry node-ca-9hmcg 0 (0%) 0 (0%) 0 (0%) 0 (0%) - openshift-ingress router-default-76455c45c-c5ptv 0 (0%) 0 (0%) 0 (0%) 0 (0%) - openshift-machine-config-operator machine-config-daemon-cvqw9 20m (1%) 0 (0%) 50Mi (0%) 0 (0%) - openshift-marketplace community-operators-f67fh 0 (0%) 0 (0%) 0 (0%) 0 (0%) - openshift-monitoring alertmanager-main-0 50m (3%) 50m (3%) 210Mi (2%) 10Mi (0%) - openshift-monitoring node-exporter-l7q8d 10m (0%) 20m (1%) 20Mi (0%) 40Mi (0%) - openshift-monitoring prometheus-adapter-75d769c874-hvb85 0 (0%) 0 (0%) 0 (0%) 0 (0%) - openshift-multus multus-kw8w5 0 (0%) 0 (0%) 0 (0%) 0 (0%) - openshift-sdn ovs-t4dsn 100m (6%) 0 (0%) 300Mi (4%) 0 (0%) - openshift-sdn sdn-g79hg 100m (6%) 0 (0%) 200Mi (2%) 0 (0%) -Allocated resources: - (Total limits may be over 100 percent, i.e., overcommitted.) - Resource Requests Limits - -------- -------- ------ - cpu 380m (25%) 270m (18%) - memory 880Mi (11%) 250Mi (3%) - attachable-volumes-aws-ebs 0 0 -Events: <11> - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal NodeHasSufficientPID 6d (x5 over 6d) kubelet, m01.example.com Node m01.example.com status is now: NodeHasSufficientPID - Normal NodeAllocatableEnforced 6d kubelet, m01.example.com Updated Node Allocatable limit across pods - Normal NodeHasSufficientMemory 6d (x6 over 6d) kubelet, m01.example.com Node m01.example.com status is now: NodeHasSufficientMemory - Normal NodeHasNoDiskPressure 6d (x6 over 6d) kubelet, m01.example.com Node m01.example.com status is now: NodeHasNoDiskPressure - Normal NodeHasSufficientDisk 6d (x6 over 6d) kubelet, m01.example.com Node m01.example.com status is now: NodeHasSufficientDisk - Normal NodeHasSufficientPID 6d kubelet, m01.example.com Node m01.example.com status is now: NodeHasSufficientPID - Normal Starting 6d kubelet, m01.example.com Starting kubelet. - ... ----- -<1> The name of the node. -<2> The role of the node, either `master` or `worker`. -<3> The labels applied to the node. -<4> The annotations applied to the node. -<5> The taints applied to the node. -<6> The node conditions and status. The `conditions` stanza lists the `Ready`, `PIDPressure`, `PIDPressure`, `MemoryPressure`, `DiskPressure` and `OutOfDisk` status. These condition are described later in this section. -<7> The IP address and hostname of the node. -<8> The pod resources and allocatable resources. -<9> Information about the node host. -<10> The pods on the node. -<11> The events reported by the node. - -Among the information shown for nodes, the following node conditions appear in the output of the commands shown in this section: - -[id="machine-health-checks-resource-conditions"] -.Node Conditions -[cols="3a,8a",options="header"] -|=== - -|Condition |Description - -|`Ready` -|If `true`, the node is healthy and ready to accept pods. If `false`, the node is not healthy and is not accepting pods. If `unknown`, the node controller has not received a heartbeat from the node for the `node-monitor-grace-period` (the default is 40 seconds). - -|`DiskPressure` -|If `true`, the disk capacity is low. - -|`MemoryPressure` -|If `true`, the node memory is low. - -|`PIDPressure` -|If `true`, there are too many processes on the node. - -|`OutOfDisk` -|If `true`, the node has insufficient free space on the node for adding new pods. - -|`NetworkUnavailable` -| If `true`, the network for the node is not correctly configured. - -|`NotReady` -|If `true`, one of the underlying components, such as the container runtime or network, is experiencing issues or is not yet configured. - -|`SchedulingDisabled` -|Pods cannot be scheduled for placement on the node. - -|=== diff --git a/modules/nodes-nodes-viewing-memory.adoc b/modules/nodes-nodes-viewing-memory.adoc deleted file mode 100644 index a1934174f5cb..000000000000 --- a/modules/nodes-nodes-viewing-memory.adoc +++ /dev/null @@ -1,48 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-nodes-viewing.adoc - -:_content-type: PROCEDURE -[id="nodes-nodes-viewing-memory_{context}"] -= Viewing memory and CPU usage statistics on your nodes - -You can display usage statistics about nodes, which provide the runtime -environments for containers. These usage statistics include CPU, memory, and -storage consumption. - -.Prerequisites - -* You must have `cluster-reader` permission to view the usage statistics. - -* Metrics must be installed to view the usage statistics. - -.Procedure - -* To view the usage statistics: -+ -[source,terminal] ----- -$ oc adm top nodes ----- -+ -.Example output -[source,terminal] ----- -NAME CPU(cores) CPU% MEMORY(bytes) MEMORY% -ip-10-0-12-143.ec2.compute.internal 1503m 100% 4533Mi 61% -ip-10-0-132-16.ec2.compute.internal 76m 5% 1391Mi 18% -ip-10-0-140-137.ec2.compute.internal 398m 26% 2473Mi 33% -ip-10-0-142-44.ec2.compute.internal 656m 43% 6119Mi 82% -ip-10-0-146-165.ec2.compute.internal 188m 12% 3367Mi 45% -ip-10-0-19-62.ec2.compute.internal 896m 59% 5754Mi 77% -ip-10-0-44-193.ec2.compute.internal 632m 42% 5349Mi 72% ----- - -* To view the usage statistics for nodes with labels: -+ -[source,terminal] ----- -$ oc adm top node --selector='' ----- -+ -You must choose the selector (label query) to filter on. Supports `=`, `==`, and `!=`. diff --git a/modules/nodes-nodes-working-deleting-bare-metal.adoc b/modules/nodes-nodes-working-deleting-bare-metal.adoc deleted file mode 100644 index ba93251a1a83..000000000000 --- a/modules/nodes-nodes-working-deleting-bare-metal.adoc +++ /dev/null @@ -1,49 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-nodes-working.adoc -// * virt/virtual_machines/virt-triggering-vm-failover-resolving-failed-node.adoc - -:_content-type: PROCEDURE -[id="nodes-nodes-working-deleting-bare-metal_{context}"] -= Deleting nodes from a bare metal cluster - -When you delete a node using the CLI, the node object is deleted in Kubernetes, -but the pods that exist on the node are not deleted. Any bare pods not backed by -a replication controller become inaccessible to {product-title}. Pods backed by -replication controllers are rescheduled to other available nodes. You must -delete local manifest pods. - -.Procedure - -Delete a node from an {product-title} cluster running on bare metal by completing -the following steps: - -. Mark the node as unschedulable: -+ -[source,terminal] ----- -$ oc adm cordon <node_name> ----- - -. Drain all pods on the node: -+ -[source,terminal] ----- -$ oc adm drain <node_name> --force=true ----- -+ -This step might fail if the node is offline or unresponsive. Even if the node does not respond, it might still be running a workload that writes to shared storage. To avoid data corruption, power down the physical hardware before you proceed. - -. Delete the node from the cluster: -+ -[source,terminal] ----- -$ oc delete node <node_name> ----- -+ -Although the node object is now deleted from the cluster, it can still rejoin -the cluster after reboot or if the kubelet service is restarted. To permanently -delete the node and all its data, you must -link:https://access.redhat.com/solutions/84663[decommission the node]. - -. If you powered down the physical hardware, turn it back on so that the node can rejoin the cluster. diff --git a/modules/nodes-nodes-working-deleting.adoc b/modules/nodes-nodes-working-deleting.adoc deleted file mode 100644 index dab38934d229..000000000000 --- a/modules/nodes-nodes-working-deleting.adoc +++ /dev/null @@ -1,64 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-nodes-working.adoc - -:_content-type: PROCEDURE -[id="nodes-nodes-working-deleting_{context}"] -= Deleting nodes from a cluster - -When you delete a node using the CLI, the node object is deleted in Kubernetes, -but the pods that exist on the node are not deleted. Any bare pods not -backed by a replication controller become inaccessible to {product-title}. -Pods backed by replication controllers are rescheduled to other available -nodes. You must delete local manifest pods. - -.Procedure - -To delete a node from the {product-title} cluster, edit the appropriate `MachineSet` object: - -[NOTE] -==== -If you are running cluster on bare metal, you cannot delete a node by editing -`MachineSet` objects. Compute machine sets are only available when a cluster is integrated with a cloud provider. Instead you must unschedule and drain the node before manually -deleting it. -==== - -. View the compute machine sets that are in the cluster: -+ -[source,terminal] ----- -$ oc get machinesets -n openshift-machine-api ----- -+ -The compute machine sets are listed in the form of <clusterid>-worker-<aws-region-az>. - -. Scale the compute machine set: -+ -[source,terminal] ----- -$ oc scale --replicas=2 machineset <machineset> -n openshift-machine-api ----- -+ -Or: -+ -[source,terminal] ----- -$ oc edit machineset <machineset> -n openshift-machine-api ----- -+ -[TIP] -==== -You can alternatively apply the following YAML to scale the compute machine set: - -[source,yaml] ----- -apiVersion: machine.openshift.io/v1beta1 -kind: MachineSet -metadata: - name: <machineset> - namespace: openshift-machine-api -spec: - replicas: 2 ----- -==== - diff --git a/modules/nodes-nodes-working-evacuating.adoc b/modules/nodes-nodes-working-evacuating.adoc deleted file mode 100644 index b8371f3c3fed..000000000000 --- a/modules/nodes-nodes-working-evacuating.adoc +++ /dev/null @@ -1,117 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-nodes-working.adoc - -:_content-type: PROCEDURE -[id="nodes-nodes-working-evacuating_{context}"] -= Understanding how to evacuate pods on nodes - -Evacuating pods allows you to migrate all or selected pods from a given node or -nodes. - -You can only evacuate pods backed by a replication controller. The replication controller creates new pods on -other nodes and removes the existing pods from the specified node(s). - -Bare pods, meaning those not backed by a replication controller, are unaffected by default. -You can evacuate a subset of pods by specifying a pod-selector. Pod selectors are -based on labels, so all the pods with the specified label will be evacuated. - -.Procedure - -. Mark the nodes unschedulable before performing the pod evacuation. - -.. Mark the node as unschedulable: -+ -[source,terminal] ----- -$ oc adm cordon <node1> ----- -+ -.Example output -[source,terminal] ----- -node/<node1> cordoned ----- - -.. Check that the node status is `Ready,SchedulingDisabled`: -+ -[source,terminal] ----- -$ oc get node <node1> ----- -+ -.Example output -[source,terminal] ----- -NAME STATUS ROLES AGE VERSION -<node1> Ready,SchedulingDisabled worker 1d v1.26.0 ----- - -. Evacuate the pods using one of the following methods: - -** Evacuate all or selected pods on one or more nodes: -+ -[source,terminal] ----- -$ oc adm drain <node1> <node2> [--pod-selector=<pod_selector>] ----- - -** Force the deletion of bare pods using the `--force` option. When set to -`true`, deletion continues even if there are pods not managed by a replication -controller, replica set, job, daemon set, or stateful set: -+ -[source,terminal] ----- -$ oc adm drain <node1> <node2> --force=true ----- - -** Set a period of time in seconds for each pod to -terminate gracefully, use `--grace-period`. If negative, the default value specified in the pod will -be used: -+ -[source,terminal] ----- -$ oc adm drain <node1> <node2> --grace-period=-1 ----- - -** Ignore pods managed by daemon sets using the `--ignore-daemonsets` flag set to `true`: -+ -[source,terminal] ----- -$ oc adm drain <node1> <node2> --ignore-daemonsets=true ----- - -** Set the length of time to wait before giving up using the `--timeout` flag. A -value of `0` sets an infinite length of time: -+ -[source,terminal] ----- -$ oc adm drain <node1> <node2> --timeout=5s ----- - -** Delete pods even if there are pods using `emptyDir` volumes by setting the `--delete-emptydir-data` flag to `true`. Local data is deleted when the node -is drained: -+ -[source,terminal] ----- -$ oc adm drain <node1> <node2> --delete-emptydir-data=true ----- - -** List objects that will be migrated without actually performing the evacuation, -using the `--dry-run` option set to `true`: -+ -[source,terminal] ----- -$ oc adm drain <node1> <node2> --dry-run=true ----- -+ -Instead of specifying specific node names (for example, `<node1> <node2>`), you -can use the `--selector=<node_selector>` option to evacuate pods on selected -nodes. - -. Mark the node as schedulable when done. -+ -[source,terminal] ----- -$ oc adm uncordon <node1> ----- diff --git a/modules/nodes-nodes-working-marking.adoc b/modules/nodes-nodes-working-marking.adoc deleted file mode 100644 index ffae51ce3ae1..000000000000 --- a/modules/nodes-nodes-working-marking.adoc +++ /dev/null @@ -1,46 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-nodes-working.adoc - -:_content-type: CONCEPT -[id="nodes-nodes-working-marking_{context}"] -= Understanding how to mark nodes as unschedulable or schedulable - -By default, healthy nodes with a `Ready` status are -marked as schedulable, which means that you can place new pods on the -node. Manually marking a node as unschedulable blocks any new pods from being -scheduled on the node. Existing pods on the node are not affected. - -* The following command marks a node or nodes as unschedulable: -+ -.Example output -[source,terminal] ----- -$ oc adm cordon <node> ----- -+ -For example: -+ -[source,terminal] ----- -$ oc adm cordon node1.example.com ----- -+ -.Example output -[source,terminal] ----- -node/node1.example.com cordoned - -NAME LABELS STATUS -node1.example.com kubernetes.io/hostname=node1.example.com Ready,SchedulingDisabled ----- - -* The following command marks a currently unschedulable node or nodes as schedulable: -+ -[source,terminal] ----- -$ oc adm uncordon <node1> ----- -+ -Alternatively, instead of specifying specific node names (for example, `<node>`), you can use the `--selector=<node_selector>` option to mark selected -nodes as schedulable or unschedulable. diff --git a/modules/nodes-nodes-working-master-schedulable.adoc b/modules/nodes-nodes-working-master-schedulable.adoc deleted file mode 100644 index c57aa4ac6461..000000000000 --- a/modules/nodes-nodes-working-master-schedulable.adoc +++ /dev/null @@ -1,57 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-nodes-managing.adoc - -:_content-type: PROCEDURE -[id="nodes-nodes-working-master-schedulable_{context}"] -= Configuring control plane nodes as schedulable - -You can configure control plane nodes to be -schedulable, meaning that new pods are allowed for placement on the master -nodes. By default, control plane nodes are not schedulable. - -You can set the masters to be schedulable, but must retain the worker nodes. - -[NOTE] -==== -You can deploy {product-title} with no worker nodes on a bare metal cluster. -In this case, the control plane nodes are marked schedulable by default. -==== - -You can allow or disallow control plane nodes to be schedulable by configuring the `mastersSchedulable` field. - -[IMPORTANT] -==== -When you configure control plane nodes from the default unschedulable to schedulable, additional subscriptions are required. This is because control plane nodes then become worker nodes. -==== - -.Procedure - -. Edit the `schedulers.config.openshift.io` resource. -+ -[source,terminal] ----- -$ oc edit schedulers.config.openshift.io cluster ----- - -. Configure the `mastersSchedulable` field. -+ -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: Scheduler -metadata: - creationTimestamp: "2019-09-10T03:04:05Z" - generation: 1 - name: cluster - resourceVersion: "433" - selfLink: /apis/config.openshift.io/v1/schedulers/cluster - uid: a636d30a-d377-11e9-88d4-0a60097bee62 -spec: - mastersSchedulable: false <1> -status: {} ----- -<1> Set to `true` to allow control plane nodes to be schedulable, or `false` to -disallow control plane nodes to be schedulable. - -. Save the file to apply the changes. diff --git a/modules/nodes-nodes-working-setting-booleans.adoc b/modules/nodes-nodes-working-setting-booleans.adoc deleted file mode 100644 index 8b3956c7582d..000000000000 --- a/modules/nodes-nodes-working-setting-booleans.adoc +++ /dev/null @@ -1,62 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes/nodes-nodes-managing.adoc - - -:_content-type: PROCEDURE -[id="nodes-nodes-working-setting-booleans"] - -= Setting SELinux booleans - -{product-title} allows you to enable and disable an SELinux boolean on a {op-system-first} node. The following procedure explains how to modify SELinux booleans on nodes using the Machine Config Operator (MCO). This procedure uses `container_manage_cgroup` as the example boolean. You can modify this value to whichever boolean you need. - -.Prerequisites - -* You have installed the OpenShift CLI (oc). - -.Procedure - -. Create a new YAML file with a `MachineConfig` object, displayed in the following example: -+ -[source, yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfig -metadata: - labels: - machineconfiguration.openshift.io/role: worker - name: 99-worker-setsebool -spec: - config: - ignition: - version: 3.2.0 - systemd: - units: - - contents: | - [Unit] - Description=Set SELinux booleans - Before=kubelet.service - - [Service] - Type=oneshot - ExecStart=/sbin/setsebool container_manage_cgroup=on - RemainAfterExit=true - - [Install] - WantedBy=multi-user.target graphical.target - enabled: true - name: setsebool.service ----- -+ - -. Create the new `MachineConfig` object by running the following command: -+ -[source,terminal] ----- -$ oc create -f 99-worker-setsebool.yaml ----- - -[NOTE] -==== -Applying any changes to the `MachineConfig` object causes all affected nodes to gracefully reboot after the change is applied. -==== diff --git a/modules/nodes-nodes-working-updating.adoc b/modules/nodes-nodes-working-updating.adoc deleted file mode 100644 index 96ad8dff6ffa..000000000000 --- a/modules/nodes-nodes-working-updating.adoc +++ /dev/null @@ -1,61 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-nodes-working.adoc - -:_content-type: CONCEPT -[id="nodes-nodes-working-updating_{context}"] -= Understanding how to update labels on nodes - -You can update any label on a node. - -Node labels are not persisted after a node is deleted even if the node is backed up by a Machine. - -[NOTE] -==== -Any change to a `MachineSet` object is not applied to existing machines owned by the compute machine set. -For example, labels edited or added to an existing `MachineSet` object are not propagated to existing machines and nodes -associated with the compute machine set. -==== - -* The following command adds or updates labels on a node: -+ -[source,terminal] ----- -$ oc label node <node> <key_1>=<value_1> ... <key_n>=<value_n> ----- -+ -For example: -+ -[source,terminal] ----- -$ oc label nodes webconsole-7f7f6 unhealthy=true ----- -+ -[TIP] -==== -You can alternatively apply the following YAML to apply the label: - -[source,yaml] ----- -kind: Node -apiVersion: v1 -metadata: - name: webconsole-7f7f6 - labels: - unhealthy: 'true' ----- -==== - -* The following command updates all pods in the namespace: -+ -[source,terminal] ----- -$ oc label pods --all <key_1>=<value_1> ----- -+ -For example: -+ -[source,terminal] ----- -$ oc label pods --all status=unhealthy ----- diff --git a/modules/nodes-pods-about.adoc b/modules/nodes-pods-about.adoc deleted file mode 100644 index 8eb2edf2bc25..000000000000 --- a/modules/nodes-pods-about.adoc +++ /dev/null @@ -1,13 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-pods-using.adoc - -:_content-type: CONCEPT -[id="nodes-pods-about_{context}"] -= About pods - -{product-title} leverages the Kubernetes concept of a _pod_, which is one or more containers deployed -together on one host, and the smallest compute unit that can be defined, -deployed, and managed. Pods are the rough equivalent of a machine instance (physical or virtual) to a container. - -You can view a list of pods associated with a specific project or view usage statistics about pods. diff --git a/modules/nodes-pods-autoscaling-about.adoc b/modules/nodes-pods-autoscaling-about.adoc deleted file mode 100644 index 7bbc20670e92..000000000000 --- a/modules/nodes-pods-autoscaling-about.adoc +++ /dev/null @@ -1,117 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-pods-autoscaling-about.adoc - -:_content-type: CONCEPT -[id="nodes-pods-autoscaling-about_{context}"] -= Understanding horizontal pod autoscalers - -You can create a horizontal pod autoscaler to specify the minimum and maximum number of pods -you want to run, as well as the CPU utilization or memory utilization your pods should target. - -After you create a horizontal pod autoscaler, {product-title} begins to query the CPU and/or memory resource metrics on the pods. -When these metrics are available, the horizontal pod autoscaler computes -the ratio of the current metric utilization with the desired metric utilization, -and scales up or down accordingly. The query and scaling occurs at a regular interval, -but can take one to two minutes before metrics become available. - -For replication controllers, this scaling corresponds directly to the replicas -of the replication controller. For deployment configurations, scaling corresponds -directly to the replica count of the deployment configuration. Note that autoscaling -applies only to the latest deployment in the `Complete` phase. - -{product-title} automatically accounts for resources and prevents unnecessary autoscaling -during resource spikes, such as during start up. Pods in the `unready` state -have `0 CPU` usage when scaling up and the autoscaler ignores the pods when scaling down. -Pods without known metrics have `0% CPU` usage when scaling up and `100% CPU` when scaling down. -This allows for more stability during the HPA decision. To use this feature, you must configure -readiness checks to determine if a new pod is ready for use. - -ifdef::openshift-origin,openshift-enterprise,openshift-webscale[] -To use horizontal pod autoscalers, your cluster administrator must have -properly configured cluster metrics. -endif::openshift-origin,openshift-enterprise,openshift-webscale[] - -== Supported metrics - -The following metrics are supported by horizontal pod autoscalers: - -.Metrics -[cols="3a,5a,5a",options="header"] -|=== - -|Metric |Description |API version - -|CPU utilization -|Number of CPU cores used. Can be used to calculate a percentage of the pod's requested CPU. -|`autoscaling/v1`, `autoscaling/v2` - -|Memory utilization -|Amount of memory used. Can be used to calculate a percentage of the pod's requested memory. -|`autoscaling/v2` -|=== - -[IMPORTANT] -==== -For memory-based autoscaling, memory usage must increase and decrease -proportionally to the replica count. On average: - -* An increase in replica count must lead to an overall decrease in memory -(working set) usage per-pod. -* A decrease in replica count must lead to an overall increase in per-pod memory -usage. - -Use the {product-title} web console to check the memory behavior of your application -and ensure that your application meets these requirements before using -memory-based autoscaling. -==== - -The following example shows autoscaling for the `image-registry` `Deployment` object. The initial deployment requires 3 pods. The HPA object increases the minimum to 5. If CPU usage on the pods reaches 75%, the pods increase to 7: - -[source,terminal] ----- -$ oc autoscale deployment/image-registry --min=5 --max=7 --cpu-percent=75 ----- - -.Example output -[source,terminal] ----- -horizontalpodautoscaler.autoscaling/image-registry autoscaled ----- - -.Sample HPA for the `image-registry` `Deployment` object with `minReplicas` set to 3 -[source,yaml] ----- -apiVersion: autoscaling/v1 -kind: HorizontalPodAutoscaler -metadata: - name: image-registry - namespace: default -spec: - maxReplicas: 7 - minReplicas: 3 - scaleTargetRef: - apiVersion: apps/v1 - kind: Deployment - name: image-registry - targetCPUUtilizationPercentage: 75 -status: - currentReplicas: 5 - desiredReplicas: 0 ----- - -. View the new state of the deployment: -+ -[source,terminal] ----- -$ oc get deployment image-registry ----- -+ -There are now 5 pods in the deployment: -+ -.Example output -[source,terminal] ----- -NAME REVISION DESIRED CURRENT TRIGGERED BY -image-registry 1 5 5 config ----- diff --git a/modules/nodes-pods-autoscaling-best-practices-hpa.adoc b/modules/nodes-pods-autoscaling-best-practices-hpa.adoc deleted file mode 100644 index 5dcd323b0680..000000000000 --- a/modules/nodes-pods-autoscaling-best-practices-hpa.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-pods-autoscaling-about.adoc - -:_content-type: CONCEPT -[id="nodes-pods-autoscaling-best-practices-hpa_{context}"] -= Best practices - -.All pods must have resource requests configured -The HPA makes a scaling decision based on the observed CPU or memory utilization values of pods in an {product-title} cluster. Utilization values are calculated as a percentage of the resource requests of each pod. -Missing resource request values can affect the optimal performance of the HPA. - -.Configure the cool down period -During horizontal pod autoscaling, there might be a rapid scaling of events without a time gap. Configure the cool down period to prevent frequent replica fluctuations. -You can specify a cool down period by configuring the `stabilizationWindowSeconds` field. The stabilization window is used to restrict the fluctuation of replicas count when the metrics used for scaling keep fluctuating. -The autoscaling algorithm uses this window to infer a previous desired state and avoid unwanted changes to workload scale. - -For example, a stabilization window is specified for the `scaleDown` field: - -[source,yaml] ----- -behavior: - scaleDown: - stabilizationWindowSeconds: 300 ----- - -In the above example, all desired states for the past 5 minutes are considered. This approximates a rolling maximum, and avoids having the scaling algorithm frequently remove pods only to trigger recreating an equivalent pod just moments later. diff --git a/modules/nodes-pods-autoscaling-creating-cpu.adoc b/modules/nodes-pods-autoscaling-creating-cpu.adoc deleted file mode 100644 index aa68d7a75ccb..000000000000 --- a/modules/nodes-pods-autoscaling-creating-cpu.adoc +++ /dev/null @@ -1,147 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-pods-autoscaling-about.adoc - -:_content-type: PROCEDURE -[id="nodes-pods-autoscaling-creating-cpu_{context}"] -= Creating a horizontal pod autoscaler for CPU utilization by using the CLI - -Using the {product-title} CLI, you can create a horizontal pod autoscaler (HPA) to automatically scale an existing `Deployment`, `DeploymentConfig`, `ReplicaSet`, `ReplicationController`, or `StatefulSet` object. The HPA scales the pods associated with that object to maintain the CPU usage you specify. - -[NOTE] -==== -It is recommended to use a `Deployment` object or `ReplicaSet` object unless you need a specific feature or behavior provided by other objects. -==== - -The HPA increases and decreases the number of replicas between the minimum and maximum numbers to maintain the specified CPU utilization across all pods. - -When autoscaling for CPU utilization, you can use the `oc autoscale` command and specify the minimum and maximum number of pods you want to run at any given time and the average CPU utilization your pods should target. If you do not specify a minimum, the pods are given default values from the {product-title} server. - -To autoscale for a specific CPU value, create a `HorizontalPodAutoscaler` object with the target CPU and pod limits. - -.Prerequisites - -To use horizontal pod autoscalers, your cluster administrator must have properly configured cluster metrics. -You can use the `oc describe PodMetrics <pod-name>` command to determine if metrics are configured. If metrics are -configured, the output appears similar to the following, with `Cpu` and `Memory` displayed under `Usage`. - -[source,terminal] ----- -$ oc describe PodMetrics openshift-kube-scheduler-ip-10-0-135-131.ec2.internal ----- - -.Example output -[source,text,options="nowrap"] ----- -Name: openshift-kube-scheduler-ip-10-0-135-131.ec2.internal -Namespace: openshift-kube-scheduler -Labels: <none> -Annotations: <none> -API Version: metrics.k8s.io/v1beta1 -Containers: - Name: wait-for-host-port - Usage: - Memory: 0 - Name: scheduler - Usage: - Cpu: 8m - Memory: 45440Ki -Kind: PodMetrics -Metadata: - Creation Timestamp: 2019-05-23T18:47:56Z - Self Link: /apis/metrics.k8s.io/v1beta1/namespaces/openshift-kube-scheduler/pods/openshift-kube-scheduler-ip-10-0-135-131.ec2.internal -Timestamp: 2019-05-23T18:47:56Z -Window: 1m0s -Events: <none> ----- - -.Procedure - -To create a horizontal pod autoscaler for CPU utilization: - -. Perform one of the following: - -** To scale based on the percent of CPU utilization, create a `HorizontalPodAutoscaler` object for an existing object: -+ -[source,terminal] ----- -$ oc autoscale <object_type>/<name> \// <1> - --min <number> \// <2> - --max <number> \// <3> - --cpu-percent=<percent> <4> ----- -+ -<1> Specify the type and name of the object to autoscale. The object must exist and be a `Deployment`, `DeploymentConfig`/`dc`, `ReplicaSet`/`rs`, `ReplicationController`/`rc`, or `StatefulSet`. -<2> Optionally, specify the minimum number of replicas when scaling down. -<3> Specify the maximum number of replicas when scaling up. -<4> Specify the target average CPU utilization over all the pods, represented as a percent of requested CPU. If not specified or negative, a default autoscaling policy is used. -+ -For example, the following command shows autoscaling for the `image-registry` `Deployment` object. The initial deployment requires 3 pods. The HPA object increases the minimum to 5. If CPU usage on the pods reaches 75%, the pods will increase to 7: -+ -[source,terminal] ----- -$ oc autoscale deployment/image-registry --min=5 --max=7 --cpu-percent=75 ----- - -** To scale for a specific CPU value, create a YAML file similar to the following for an existing object: -+ -.. Create a YAML file similar to the following: -+ -[source,yaml,options="nowrap"] ----- -apiVersion: autoscaling/v2 <1> -kind: HorizontalPodAutoscaler -metadata: - name: cpu-autoscale <2> - namespace: default -spec: - scaleTargetRef: - apiVersion: apps/v1 <3> - kind: Deployment <4> - name: example <5> - minReplicas: 1 <6> - maxReplicas: 10 <7> - metrics: <8> - - type: Resource - resource: - name: cpu <9> - target: - type: AverageValue <10> - averageValue: 500m <11> ----- -<1> Use the `autoscaling/v2` API. -<2> Specify a name for this horizontal pod autoscaler object. -<3> Specify the API version of the object to scale: -* For a `Deployment`, `ReplicaSet`, `Statefulset` object, use `apps/v1`. -* For a `ReplicationController`, use `v1`. -* For a `DeploymentConfig`, use `apps.openshift.io/v1`. -<4> Specify the type of object. The object must be a `Deployment`, `DeploymentConfig`/`dc`, `ReplicaSet`/`rs`, `ReplicationController`/`rc`, or `StatefulSet`. -<5> Specify the name of the object to scale. The object must exist. -<6> Specify the minimum number of replicas when scaling down. -<7> Specify the maximum number of replicas when scaling up. -<8> Use the `metrics` parameter for memory utilization. -<9> Specify `cpu` for CPU utilization. -<10> Set to `AverageValue`. -<11> Set to `averageValue` with the targeted CPU value. - -.. Create the horizontal pod autoscaler: -+ -[source,terminal] ----- -$ oc create -f <file-name>.yaml ----- - -. Verify that the horizontal pod autoscaler was created: -+ -[source,terminal] ----- -$ oc get hpa cpu-autoscale ----- -+ -.Example output -[source,terminal] ----- -NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE -cpu-autoscale Deployment/example 173m/500m 1 10 1 20m ----- - diff --git a/modules/nodes-pods-autoscaling-creating-memory.adoc b/modules/nodes-pods-autoscaling-creating-memory.adoc deleted file mode 100644 index 4867c2f42612..000000000000 --- a/modules/nodes-pods-autoscaling-creating-memory.adoc +++ /dev/null @@ -1,237 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-pods-autoscaling-about.adoc - -:_content-type: PROCEDURE -[id="nodes-pods-autoscaling-creating-memory_{context}"] - -= Creating a horizontal pod autoscaler object for memory utilization by using the CLI - -Using the {product-title} CLI, you can create a horizontal pod autoscaler (HPA) to automatically scale an existing -`Deployment`, `DeploymentConfig`, `ReplicaSet`, `ReplicationController`, or `StatefulSet` object. The HPA -scales the pods associated with that object to maintain the average memory utilization you specify, either a direct value or a percentage -of requested memory. - -[NOTE] -==== -It is recommended to use a `Deployment` object or `ReplicaSet` object unless you need a specific feature or behavior provided by other objects. -==== - -The HPA increases and decreases the number of replicas between the minimum and maximum numbers to maintain -the specified memory utilization across all pods. - -For memory utilization, you can specify the minimum and maximum number of pods and the average memory utilization -your pods should target. If you do not specify a minimum, the pods are given default values from the {product-title} server. - -.Prerequisites - -To use horizontal pod autoscalers, your cluster administrator must have properly configured cluster metrics. -You can use the `oc describe PodMetrics <pod-name>` command to determine if metrics are configured. If metrics are -configured, the output appears similar to the following, with `Cpu` and `Memory` displayed under `Usage`. - -[source,terminal] ----- -$ oc describe PodMetrics openshift-kube-scheduler-ip-10-0-129-223.compute.internal -n openshift-kube-scheduler ----- - -.Example output -[source,text,options="nowrap"] ----- -Name: openshift-kube-scheduler-ip-10-0-129-223.compute.internal -Namespace: openshift-kube-scheduler -Labels: <none> -Annotations: <none> -API Version: metrics.k8s.io/v1beta1 -Containers: - Name: wait-for-host-port - Usage: - Cpu: 0 - Memory: 0 - Name: scheduler - Usage: - Cpu: 8m - Memory: 45440Ki -Kind: PodMetrics -Metadata: - Creation Timestamp: 2020-02-14T22:21:14Z - Self Link: /apis/metrics.k8s.io/v1beta1/namespaces/openshift-kube-scheduler/pods/openshift-kube-scheduler-ip-10-0-129-223.compute.internal -Timestamp: 2020-02-14T22:21:14Z -Window: 5m0s -Events: <none> ----- - -.Procedure - -To create a horizontal pod autoscaler for memory utilization: - -. Create a YAML file for one of the following: - -** To scale for a specific memory value, create a `HorizontalPodAutoscaler` object similar to the following for an existing object: -+ -[source,yaml,options="nowrap"] ----- -apiVersion: autoscaling/v2 <1> -kind: HorizontalPodAutoscaler -metadata: - name: hpa-resource-metrics-memory <2> - namespace: default -spec: - scaleTargetRef: - apiVersion: apps/v1 <3> - kind: Deployment <4> - name: example <5> - minReplicas: 1 <6> - maxReplicas: 10 <7> - metrics: <8> - - type: Resource - resource: - name: memory <9> - target: - type: AverageValue <10> - averageValue: 500Mi <11> - behavior: <12> - scaleDown: - stabilizationWindowSeconds: 300 - policies: - - type: Pods - value: 4 - periodSeconds: 60 - - type: Percent - value: 10 - periodSeconds: 60 - selectPolicy: Max ----- -<1> Use the `autoscaling/v2` API. -<2> Specify a name for this horizontal pod autoscaler object. -<3> Specify the API version of the object to scale: -* For a `Deployment`, `ReplicaSet`, or `Statefulset` object, use `apps/v1`. -* For a `ReplicationController`, use `v1`. -* For a `DeploymentConfig`, use `apps.openshift.io/v1`. -<4> Specify the type of object. The object must be a `Deployment`, `DeploymentConfig`, -`ReplicaSet`, `ReplicationController`, or `StatefulSet`. -<5> Specify the name of the object to scale. The object must exist. -<6> Specify the minimum number of replicas when scaling down. -<7> Specify the maximum number of replicas when scaling up. -<8> Use the `metrics` parameter for memory utilization. -<9> Specify `memory` for memory utilization. -<10> Set the type to `AverageValue`. -<11> Specify `averageValue` and a specific memory value. -<12> Optional: Specify a scaling policy to control the rate of scaling up or down. - -** To scale for a percentage, create a `HorizontalPodAutoscaler` object similar to the following for an existing object: -+ -[source,yaml,options="nowrap"] ----- -apiVersion: autoscaling/v2 <1> -kind: HorizontalPodAutoscaler -metadata: - name: memory-autoscale <2> - namespace: default -spec: - scaleTargetRef: - apiVersion: apps/v1 <3> - kind: Deployment <4> - name: example <5> - minReplicas: 1 <6> - maxReplicas: 10 <7> - metrics: <8> - - type: Resource - resource: - name: memory <9> - target: - type: Utilization <10> - averageUtilization: 50 <11> - behavior: <12> - scaleUp: - stabilizationWindowSeconds: 180 - policies: - - type: Pods - value: 6 - periodSeconds: 120 - - type: Percent - value: 10 - periodSeconds: 120 - selectPolicy: Max ----- -<1> Use the `autoscaling/v2` API. -<2> Specify a name for this horizontal pod autoscaler object. -<3> Specify the API version of the object to scale: -* For a ReplicationController, use `v1`. -* For a DeploymentConfig, use `apps.openshift.io/v1`. -* For a Deployment, ReplicaSet, Statefulset object, use `apps/v1`. -<4> Specify the type of object. The object must be a `Deployment`, `DeploymentConfig`, -`ReplicaSet`, `ReplicationController`, or `StatefulSet`. -<5> Specify the name of the object to scale. The object must exist. -<6> Specify the minimum number of replicas when scaling down. -<7> Specify the maximum number of replicas when scaling up. -<8> Use the `metrics` parameter for memory utilization. -<9> Specify `memory` for memory utilization. -<10> Set to `Utilization`. -<11> Specify `averageUtilization` and a target average memory utilization over all the pods, -represented as a percent of requested memory. The target pods must have memory requests configured. -<12> Optional: Specify a scaling policy to control the rate of scaling up or down. - -. Create the horizontal pod autoscaler: -+ -[source,terminal] ----- -$ oc create -f <file-name>.yaml ----- -+ -For example: -+ -[source,terminal] ----- -$ oc create -f hpa.yaml ----- -+ -.Example output -[source,terminal] ----- -horizontalpodautoscaler.autoscaling/hpa-resource-metrics-memory created ----- - -. Verify that the horizontal pod autoscaler was created: -+ -[source,terminal] ----- -$ oc get hpa hpa-resource-metrics-memory ----- -+ -.Example output -[source,terminal] ----- -NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE -hpa-resource-metrics-memory Deployment/example 2441216/500Mi 1 10 1 20m ----- -+ -[source,terminal] ----- -$ oc describe hpa hpa-resource-metrics-memory ----- -+ -.Example output -[source,text] ----- -Name: hpa-resource-metrics-memory -Namespace: default -Labels: <none> -Annotations: <none> -CreationTimestamp: Wed, 04 Mar 2020 16:31:37 +0530 -Reference: Deployment/example -Metrics: ( current / target ) - resource memory on pods: 2441216 / 500Mi -Min replicas: 1 -Max replicas: 10 -ReplicationController pods: 1 current / 1 desired -Conditions: - Type Status Reason Message - ---- ------ ------ ------- - AbleToScale True ReadyForNewScale recommended size matches current size - ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from memory resource - ScalingLimited False DesiredWithinRange the desired count is within the acceptable range -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal SuccessfulRescale 6m34s horizontal-pod-autoscaler New size: 1; reason: All metrics below target ----- diff --git a/modules/nodes-pods-autoscaling-creating-web-console.adoc b/modules/nodes-pods-autoscaling-creating-web-console.adoc deleted file mode 100644 index 60b04bb7357e..000000000000 --- a/modules/nodes-pods-autoscaling-creating-web-console.adoc +++ /dev/null @@ -1,48 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-pods-autoscaling-about.adoc - -:_content-type: PROCEDURE -[id="nodes-pods-autoscaling-creating-web-console_{context}"] -= Creating a horizontal pod autoscaler by using the web console - -From the web console, you can create a horizontal pod autoscaler (HPA) that specifies the minimum and maximum number of pods you want to run on a `Deployment` or `DeploymentConfig` object. You can also define the amount of CPU or memory usage that your pods should target. - -[NOTE] -==== -An HPA cannot be added to deployments that are part of an Operator-backed service, Knative service, or Helm chart. -==== - -.Procedure - -To create an HPA in the web console: - -. In the *Topology* view, click the node to reveal the side pane. -. From the *Actions* drop-down list, select *Add HorizontalPodAutoscaler* to open the *Add HorizontalPodAutoscaler* form. -+ -.Add HorizontalPodAutoscaler -image::node-add-hpa-action.png[Add HorizontalPodAutoscaler form] - -. From the *Add HorizontalPodAutoscaler* form, define the name, minimum and maximum pod limits, the CPU and memory usage, and click *Save*. -+ -[NOTE] -==== -If any of the values for CPU and memory usage are missing, a warning is displayed. -==== - -To edit an HPA in the web console: - -. In the *Topology* view, click the node to reveal the side pane. -. From the *Actions* drop-down list, select *Edit HorizontalPodAutoscaler* to open the *Edit Horizontal Pod Autoscaler* form. -. From the *Edit Horizontal Pod Autoscaler* form, edit the minimum and maximum pod limits and the CPU and memory usage, and click *Save*. - -[NOTE] -==== -While creating or editing the horizontal pod autoscaler in the web console, you can switch from *Form view* to *YAML view*. -==== - -To remove an HPA in the web console: - -. In the *Topology* view, click the node to reveal the side panel. -. From the *Actions* drop-down list, select *Remove HorizontalPodAutoscaler*. -. In the confirmation pop-up window, click *Remove* to remove the HPA. diff --git a/modules/nodes-pods-autoscaling-custom-rn-210.adoc b/modules/nodes-pods-autoscaling-custom-rn-210.adoc deleted file mode 100644 index 94169b4dcd2a..000000000000 --- a/modules/nodes-pods-autoscaling-custom-rn-210.adoc +++ /dev/null @@ -1,86 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-pods-autoscaling-custom.adoc - -:_content-type: CONCEPT -[id="nodes-pods-autoscaling-custom-rn_{context}"] -= Custom Metrics Autoscaler Operator release notes - -The release notes for the Custom Metrics Autoscaler Operator for Red Hat OpenShift describe new features and enhancements, deprecated features, and known issues. - -The Custom Metrics Autoscaler Operator uses the Kubernetes-based Event Driven Autoscaler (KEDA) and is built on top of the {product-title} horizontal pod autoscaler (HPA). - -[NOTE] -==== -The Custom Metrics Autoscaler Operator for Red Hat OpenShift is provided as an installable component, with a distinct release cycle from the core {product-title}. The link:https://access.redhat.com/support/policy/updates/openshift#cma[Red Hat OpenShift Container Platform Life Cycle Policy] outlines release compatibility. -==== - -[id="nodes-pods-autoscaling-custom-rn-versions_{context}"] -== Supported versions - -The following table defines the Custom Metrics Autoscaler Operator versions for each {product-title} version. - -[cols="3,7,3",options="header"] -|=== -|Version -|{product-title} version -|General availability - -|2.10 -|4.13 -|General availability - -|2.10 -|4.12 -|General availability - -|2.10 -|4.11 -|General availability - -|2.10 -|4.10 -|General availability -|=== - -[id="nodes-pods-autoscaling-custom-rn-210_{context}"] -== Custom Metrics Autoscaler Operator 2.10 release notes - -This release of the Custom Metrics Autoscaler Operator 2.10 provides new features and bug fixes for running the Operator in an {product-title} cluster. The components of the Custom Metrics Autoscaler Operator 2.10 were released in link:https://access.redhat.com/errata/RHEA-:[RHEA-:]. - -[id="nodes-pods-autoscaling-custom-rn-210-new_{context}"] -=== New features and enhancements - -[id="nodes-pods-autoscaling-custom-rn-210-ga_{context}"] -==== Custom Metrics Autoscaler Operator general availability - -The Custom Metrics Autoscaler Operator is now generally available as of Custom Metrics Autoscaler Operator version 2.10. - -:FeatureName: Scaling by using a scaled job -include::snippets/technology-preview.adoc[] - -[id="nodes-pods-autoscaling-custom-rn-210-metrics_{context}"] -==== Custom Metrics Autoscaler Operator metrics - -You can now use the Prometheus Query Language (PromQL) to query metrics from the Custom Metrics Autoscaler Operator. - -[id="nodes-pods-autoscaling-custom-rn-210-pause_{context}"] -==== Pausing the custom metrics autoscaling for scaled objects - -You can now pause the autoscaling of a scaled object, as needed, and resume autoscaling when ready. - -[id="nodes-pods-autoscaling-custom-rn-210-fall-back_{context}"] -==== Replica fall back for scaled objects - -You can now specify the number of replicas to fall back to if a scaled object fails to get metrics from the source. - -[id="nodes-pods-autoscaling-custom-rn-210-hpa-name_{context}"] -==== Customizable HPA naming for scaled objects - -You can now specify a custom name for the horizontal pod autoscaler in scaled objects. - -[id="nodes-pods-autoscaling-custom-rn-210-activation_{context}"] -==== Activation and scaling thresholds - -Because the horizontal pod autoscaler (HPA) cannot scale to or from 0 replicas, the Custom Metrics Autoscaler Operator does that scaling, after which the HPA performs the scaling. You can now specify when the HPA takes over autoscaling, based on the number of replicas. This allows for more flexibility with your scaling policies. - diff --git a/modules/nodes-pods-autoscaling-policies.adoc b/modules/nodes-pods-autoscaling-policies.adoc deleted file mode 100644 index 8fd12144e3d6..000000000000 --- a/modules/nodes-pods-autoscaling-policies.adoc +++ /dev/null @@ -1,106 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-pods-autoscaling.adoc - -[id="nodes-pods-autoscaling-policies_{context}"] -= Scaling policies - -The `autoscaling/v2` API allows you to add _scaling policies_ to a horizontal pod autoscaler. A scaling policy controls how the {product-title} horizontal pod autoscaler (HPA) scales pods. Scaling policies allow you to restrict the rate that HPAs scale pods up or down by setting a specific number or specific percentage to scale in a specified period of time. You can also define a _stabilization window_, which uses previously computed desired states to control scaling if the metrics are fluctuating. You can create multiple policies for the same scaling direction, and determine which policy is used, based on the amount of change. You can also restrict the scaling by timed iterations. The HPA scales pods during an iteration, then performs scaling, as needed, in further iterations. - -.Sample HPA object with a scaling policy -[source, yaml] ----- -apiVersion: autoscaling/v2 -kind: HorizontalPodAutoscaler -metadata: - name: hpa-resource-metrics-memory - namespace: default -spec: - behavior: - scaleDown: <1> - policies: <2> - - type: Pods <3> - value: 4 <4> - periodSeconds: 60 <5> - - type: Percent - value: 10 <6> - periodSeconds: 60 - selectPolicy: Min <7> - stabilizationWindowSeconds: 300 <8> - scaleUp: <9> - policies: - - type: Pods - value: 5 <10> - periodSeconds: 70 - - type: Percent - value: 12 <11> - periodSeconds: 80 - selectPolicy: Max - stabilizationWindowSeconds: 0 -... ----- -<1> Specifies the direction for the scaling policy, either `scaleDown` or `scaleUp`. This example creates a policy for scaling down. -<2> Defines the scaling policy. -<3> Determines if the policy scales by a specific number of pods or a percentage of pods during each iteration. The default value is `pods`. -<4> Limits the amount of scaling, either the number of pods or percentage of pods, during each iteration. There is no default value for scaling down by number of pods. -<5> Determines the length of a scaling iteration. The default value is `15` seconds. -<6> The default value for scaling down by percentage is 100%. -<7> Determines which policy to use first, if multiple policies are defined. Specify `Max` to use the policy that allows the highest amount of change, `Min` to use the policy that allows the lowest amount of change, or `Disabled` to prevent the HPA from scaling in that policy direction. The default value is `Max`. -<8> Determines the time period the HPA should look back at desired states. The default value is `0`. -<9> This example creates a policy for scaling up. -<10> Limits the amount of scaling up by the number of pods. The default value for scaling up the number of pods is 4%. -<11> Limits the amount of scaling up by the percentage of pods. The default value for scaling up by percentage is 100%. - -.Example policy for scaling down -[source,yaml] ----- -apiVersion: autoscaling/v2 -kind: HorizontalPodAutoscaler -metadata: - name: hpa-resource-metrics-memory - namespace: default -spec: -... - minReplicas: 20 -... - behavior: - scaleDown: - stabilizationWindowSeconds: 300 - policies: - - type: Pods - value: 4 - periodSeconds: 30 - - type: Percent - value: 10 - periodSeconds: 60 - selectPolicy: Max - scaleUp: - selectPolicy: Disabled ----- - -In this example, when the number of pods is greater than 40, the percent-based policy is used for scaling down, as that policy results in a larger change, as required by the `selectPolicy`. - -If there are 80 pod replicas, in the first iteration the HPA reduces the pods by 8, which is 10% of the 80 pods (based on the `type: Percent` and `value: 10` parameters), over one minute (`periodSeconds: 60`). For the next iteration, the number of pods is 72. The HPA calculates that 10% of the remaining pods is 7.2, which it rounds up to 8 and scales down 8 pods. On each subsequent iteration, the number of pods to be scaled is re-calculated based on the number of remaining pods. When the number of pods falls below 40, the pods-based policy is applied, because the pod-based number is greater than the percent-based number. The HPA reduces 4 pods at a time (`type: Pods` and `value: 4`), over 30 seconds (`periodSeconds: 30`), until there are 20 replicas remaining (`minReplicas`). - -The `selectPolicy: Disabled` parameter prevents the HPA from scaling up the pods. You can manually scale up by adjusting the number of replicas in the replica set or deployment set, if needed. - -If set, you can view the scaling policy by using the `oc edit` command: - -[source,terminal] ----- -$ oc edit hpa hpa-resource-metrics-memory ----- - -.Example output -[source,terminal] ----- -apiVersion: autoscaling/v1 -kind: HorizontalPodAutoscaler -metadata: - annotations: - autoscaling.alpha.kubernetes.io/behavior:\ -'{"ScaleUp":{"StabilizationWindowSeconds":0,"SelectPolicy":"Max","Policies":[{"Type":"Pods","Value":4,"PeriodSeconds":15},{"Type":"Percent","Value":100,"PeriodSeconds":15}]},\ -"ScaleDown":{"StabilizationWindowSeconds":300,"SelectPolicy":"Min","Policies":[{"Type":"Pods","Value":4,"PeriodSeconds":60},{"Type":"Percent","Value":10,"PeriodSeconds":60}]}}' -... ----- - diff --git a/modules/nodes-pods-autoscaling-requests-and-limits-hpa.adoc b/modules/nodes-pods-autoscaling-requests-and-limits-hpa.adoc deleted file mode 100644 index 6d301a7efc7b..000000000000 --- a/modules/nodes-pods-autoscaling-requests-and-limits-hpa.adoc +++ /dev/null @@ -1,28 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-pods-autoscaling-about.adoc - -:_content-type: CONCEPT -[id="nodes-pods-autoscaling-requests-and-limits-hpa_{context}"] -= About requests and limits - -The scheduler uses the resource request that you specify for containers in a pod, to decide which node to place the pod on. The kubelet enforces the resource limit that you specify for a container to ensure that the container is not allowed to use more than the specified limit. -The kubelet also reserves the request amount of that system resource specifically for that container to use. - -.How to use resource metrics? - -In the pod specifications, you must specify the resource requests, such as CPU and memory. The HPA uses this specification to determine the resource utilization and then scales the target up or down. - -For example, the HPA object uses the following metric source: - -[source,yaml] ----- -type: Resource -resource: - name: cpu - target: - type: Utilization - averageUtilization: 60 ----- - -In this example, the HPA keeps the average utilization of the pods in the scaling target at 60%. Utilization is the ratio between the current resource usage to the requested resource of the pod. diff --git a/modules/nodes-pods-autoscaling-status-about.adoc b/modules/nodes-pods-autoscaling-status-about.adoc deleted file mode 100644 index 387d306c4f7d..000000000000 --- a/modules/nodes-pods-autoscaling-status-about.adoc +++ /dev/null @@ -1,100 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-pods-autoscaling-about.adoc - -:_content-type: CONCEPT -[id="nodes-pods-autoscaling-status-about_{context}"] - -= Understanding horizontal pod autoscaler status conditions by using the CLI - -You can use the status conditions set to determine -whether or not the horizontal pod autoscaler (HPA) is able to scale and whether or not it is currently restricted -in any way. - -The HPA status conditions are available with the `v2` version of the -autoscaling API. - -The HPA responds with the following status conditions: - -* The `AbleToScale` condition indicates whether HPA is able to fetch and update metrics, as well as whether any backoff-related conditions could prevent scaling. -** A `True` condition indicates scaling is allowed. -** A `False` condition indicates scaling is not allowed for the reason specified. - -* The `ScalingActive` condition indicates whether the HPA is enabled (for example, the replica count of the target is not zero) and is able to calculate desired metrics. -** A `True` condition indicates metrics is working properly. -** A `False` condition generally indicates a problem with fetching metrics. - -* The `ScalingLimited` condition indicates that the desired scale was capped by the maximum or minimum of the horizontal pod autoscaler. -** A `True` condition indicates that you need to raise or lower the minimum or maximum replica count in order to scale. -** A `False` condition indicates that the requested scaling is allowed. -+ -[source,terminal] ----- -$ oc describe hpa cm-test ----- -+ -.Example output -[source,text] ----- -Name: cm-test -Namespace: prom -Labels: <none> -Annotations: <none> -CreationTimestamp: Fri, 16 Jun 2017 18:09:22 +0000 -Reference: ReplicationController/cm-test -Metrics: ( current / target ) - "http_requests" on pods: 66m / 500m -Min replicas: 1 -Max replicas: 4 -ReplicationController pods: 1 current / 1 desired -Conditions: <1> - Type Status Reason Message - ---- ------ ------ ------- - AbleToScale True ReadyForNewScale the last scale time was sufficiently old as to warrant a new scale - ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from pods metric http_request - ScalingLimited False DesiredWithinRange the desired replica count is within the acceptable range -Events: ----- -<1> The horizontal pod autoscaler status messages. - -// The above output and bullets from https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough/#appendix-horizontal-pod-autoscaler-status-conditions - -The following is an example of a pod that is unable to scale: - -.Example output -[source,text] ----- -Conditions: - Type Status Reason Message - ---- ------ ------ ------- - AbleToScale False FailedGetScale the HPA controller was unable to get the target's current scale: no matches for kind "ReplicationController" in group "apps" -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Warning FailedGetScale 6s (x3 over 36s) horizontal-pod-autoscaler no matches for kind "ReplicationController" in group "apps" ----- - -The following is an example of a pod that could not obtain the needed metrics for scaling: - -.Example output -[source,text] ----- -Conditions: - Type Status Reason Message - ---- ------ ------ ------- - AbleToScale True SucceededGetScale the HPA controller was able to get the target's current scale - ScalingActive False FailedGetResourceMetric the HPA was unable to compute the replica count: failed to get cpu utilization: unable to get metrics for resource cpu: no metrics returned from resource metrics API ----- - -The following is an example of a pod where the requested autoscaling was less than the required minimums: - -.Example output -[source,text] ----- -Conditions: - Type Status Reason Message - ---- ------ ------ ------- - AbleToScale True ReadyForNewScale the last scale time was sufficiently old as to warrant a new scale - ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from pods metric http_request - ScalingLimited False DesiredWithinRange the desired replica count is within the acceptable range ----- diff --git a/modules/nodes-pods-autoscaling-status-viewing.adoc b/modules/nodes-pods-autoscaling-status-viewing.adoc deleted file mode 100644 index 423a4a71612a..000000000000 --- a/modules/nodes-pods-autoscaling-status-viewing.adoc +++ /dev/null @@ -1,91 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-pods-autoscaling-about.adoc - -:_content-type: PROCEDURE -[id="nodes-pods-autoscaling-status-viewing_{context}"] - -= Viewing horizontal pod autoscaler status conditions by using the CLI - -You can view the status conditions set on a pod by the horizontal pod autoscaler (HPA). - -[NOTE] -==== -The horizontal pod autoscaler status conditions are available with the `v2` version of the autoscaling API. -==== - -.Prerequisites - -To use horizontal pod autoscalers, your cluster administrator must have properly configured cluster metrics. -You can use the `oc describe PodMetrics <pod-name>` command to determine if metrics are configured. If metrics are -configured, the output appears similar to the following, with `Cpu` and `Memory` displayed under `Usage`. - -[source,terminal] ----- -$ oc describe PodMetrics openshift-kube-scheduler-ip-10-0-135-131.ec2.internal ----- - -.Example output -[source,terminal] ----- -Name: openshift-kube-scheduler-ip-10-0-135-131.ec2.internal -Namespace: openshift-kube-scheduler -Labels: <none> -Annotations: <none> -API Version: metrics.k8s.io/v1beta1 -Containers: - Name: wait-for-host-port - Usage: - Memory: 0 - Name: scheduler - Usage: - Cpu: 8m - Memory: 45440Ki -Kind: PodMetrics -Metadata: - Creation Timestamp: 2019-05-23T18:47:56Z - Self Link: /apis/metrics.k8s.io/v1beta1/namespaces/openshift-kube-scheduler/pods/openshift-kube-scheduler-ip-10-0-135-131.ec2.internal -Timestamp: 2019-05-23T18:47:56Z -Window: 1m0s -Events: <none> ----- - -.Procedure - -To view the status conditions on a pod, use the following command with the name of the pod: - -[source,terminal] ----- -$ oc describe hpa <pod-name> ----- - -For example: - -[source,terminal] ----- -$ oc describe hpa cm-test ----- - -The conditions appear in the `Conditions` field in the output. - -.Example output -[source,terminal] ----- -Name: cm-test -Namespace: prom -Labels: <none> -Annotations: <none> -CreationTimestamp: Fri, 16 Jun 2017 18:09:22 +0000 -Reference: ReplicationController/cm-test -Metrics: ( current / target ) - "http_requests" on pods: 66m / 500m -Min replicas: 1 -Max replicas: 4 -ReplicationController pods: 1 current / 1 desired -Conditions: <1> - Type Status Reason Message - ---- ------ ------ ------- - AbleToScale True ReadyForNewScale the last scale time was sufficiently old as to warrant a new scale - ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from pods metric http_request - ScalingLimited False DesiredWithinRange the desired replica count is within the acceptable range ----- diff --git a/modules/nodes-pods-autoscaling-workflow-hpa.adoc b/modules/nodes-pods-autoscaling-workflow-hpa.adoc deleted file mode 100644 index fd99c110bea9..000000000000 --- a/modules/nodes-pods-autoscaling-workflow-hpa.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-pods-autoscaling-about.adoc - -:_content-type: CONCEPT -[id="nodes-pods-autoscaling-workflow-hpa_{context}"] -= How does the HPA work? - -The horizontal pod autoscaler (HPA) extends the concept of pod auto-scaling. The HPA lets you create and manage a group of load-balanced nodes. The HPA automatically increases or decreases the number of pods when a given CPU or memory threshold is crossed. - -.High level workflow of the HPA -image::HPAflow.png[workflow] - -The HPA is an API resource in the Kubernetes autoscaling API group. The autoscaler works as a control loop with a default of 15 seconds for the sync period. During this period, the controller manager queries the CPU, memory utilization, or both, against what is defined in the YAML file for the HPA. -The controller manager obtains the utilization metrics from the resource metrics API for per-pod resource metrics like CPU or memory, for each pod that is targeted by the HPA. - -If a utilization value target is set, the controller calculates the utilization value as a percentage of the equivalent resource request on the containers in each pod. The controller then takes the average of utilization across all targeted pods and produces a ratio that is used to scale the number of desired replicas. -The HPA is configured to fetch metrics from `metrics.k8s.io`, which is provided by the metrics server. Because of the dynamic nature of metrics evaluation, the number of replicas can fluctuate during scaling for a group of replicas. - -[NOTE] -==== -To implement the HPA, all targeted pods must have a resource request set on their containers. -==== diff --git a/modules/nodes-pods-configmap-create-from-console.adoc b/modules/nodes-pods-configmap-create-from-console.adoc deleted file mode 100644 index e31dceade16e..000000000000 --- a/modules/nodes-pods-configmap-create-from-console.adoc +++ /dev/null @@ -1,31 +0,0 @@ -// Module included in the following assemblies: -// -//* authentication/configmaps.adoc - -:_content-type: PROCEDURE -[id="nodes-pods-configmap-create-from-console_{context}"] -= Creating a config map in the {product-title} web console - -You can create a config map in the {product-title} web console. - -.Procedure - -* To create a config map as a cluster administrator: -+ -. In the Administrator perspective, select `Workloads` -> `Config Maps`. -+ -. At the top right side of the page, select *Create Config Map*. -+ -. Enter the contents of your config map. -+ -. Select *Create*. - -* To create a config map as a developer: -+ -. In the Developer perspective, select `Config Maps`. -+ -. At the top right side of the page, select *Create Config Map*. -+ -. Enter the contents of your config map. -+ -. Select *Create*. diff --git a/modules/nodes-pods-configmap-create.adoc b/modules/nodes-pods-configmap-create.adoc deleted file mode 100644 index a10fcdf60ef5..000000000000 --- a/modules/nodes-pods-configmap-create.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module included in the following assemblies: -// -//* authentication/configmaps.adoc - -:_content-type: PROCEDURE -[id="nodes-pods-configmap-create_{context}"] -= Creating a config map by using the CLI - -You can use the following command to create a config map from directories, specific files, or literal values. - -.Procedure - -* Create a config map: -+ -[source,terminal] ----- -$ oc create configmap <configmap_name> [options] ----- diff --git a/modules/nodes-pods-configmap-creating-from-directories.adoc b/modules/nodes-pods-configmap-creating-from-directories.adoc deleted file mode 100644 index f48e06346b9e..000000000000 --- a/modules/nodes-pods-configmap-creating-from-directories.adoc +++ /dev/null @@ -1,126 +0,0 @@ -// Module included in the following assemblies: -// -//* authentication/configmaps.adoc - -:_content-type: PROCEDURE -[id="nodes-pods-configmap-creating-from-directories_{context}"] -= Creating a config map from a directory - -You can create a config map from a directory. This method allows you to use multiple files within a directory to create a config map. - -.Procedure - -The following example procedure outlines how to create a config map from a directory. - -. Start with a directory with some files that already contain the data with which you want to populate a config map: -+ -[source,terminal] ----- -$ ls example-files ----- -+ -.Example output -[source,terminal] ----- -game.properties -ui.properties ----- -+ -[source,terminal] ----- -$ cat example-files/game.properties ----- -+ -.Example output -[source,terminal] ----- -enemies=aliens -lives=3 -enemies.cheat=true -enemies.cheat.level=noGoodRotten -secret.code.passphrase=UUDDLRLRBABAS -secret.code.allowed=true -secret.code.lives=30 ----- -+ -[source,terminal] ----- -$ cat example-files/ui.properties ----- -+ -.Example output -[source,terminal] ----- -color.good=purple -color.bad=yellow -allow.textmode=true -how.nice.to.look=fairlyNice ----- - -. Create a config map holding the content of each file in this directory by entering the following command: -+ -[source,terminal] ----- -$ oc create configmap game-config \ - --from-file=example-files/ ----- -+ -When the `--from-file` option points to a directory, each file directly in that directory is used to populate a key in the config map, where the name of the key is the file name, and the value of the key is the content of the file. -+ -For example, the previous command creates the following config map: -+ -[source,terminal] ----- -$ oc describe configmaps game-config ----- -+ -.Example output -[source,terminal] ----- -Name: game-config -Namespace: default -Labels: <none> -Annotations: <none> - -Data - -game.properties: 158 bytes -ui.properties: 83 bytes ----- -+ -You can see that the two keys in the map are created from the file names in the directory specified in the command. Because the content of those keys might be large, the output of `oc describe` only shows the names of the keys and their sizes. -+ -. Enter the `oc get` command for the object with the `-o` option to see the values of the keys: -+ -[source,terminal] ----- -$ oc get configmaps game-config -o yaml ----- -+ -.Example output -[source,yaml] ----- -apiVersion: v1 -data: - game.properties: |- - enemies=aliens - lives=3 - enemies.cheat=true - enemies.cheat.level=noGoodRotten - secret.code.passphrase=UUDDLRLRBABAS - secret.code.allowed=true - secret.code.lives=30 - ui.properties: | - color.good=purple - color.bad=yellow - allow.textmode=true - how.nice.to.look=fairlyNice -kind: ConfigMap -metadata: - creationTimestamp: 2016-02-18T18:34:05Z - name: game-config - namespace: default - resourceVersion: "407" - selflink: /api/v1/namespaces/default/configmaps/game-config - uid: 30944725-d66e-11e5-8cd0-68f728db1985 ----- diff --git a/modules/nodes-pods-configmap-creating-from-files.adoc b/modules/nodes-pods-configmap-creating-from-files.adoc deleted file mode 100644 index c4cd3cf355ec..000000000000 --- a/modules/nodes-pods-configmap-creating-from-files.adoc +++ /dev/null @@ -1,105 +0,0 @@ -// Module included in the following assemblies: -// -//* authentication/configmaps.adoc - -:_content-type: PROCEDURE -[id="nodes-pods-configmap-creating-from-files_{context}"] -= Creating a config map from a file - -You can create a config map from a file. - -.Procedure - -The following example procedure outlines how to create a config map from a file. - -[NOTE] -==== -If you create a config map from a file, you can include files containing non-UTF8 data that are placed in this field without corrupting the non-UTF8 data. {product-title} detects binary files and transparently encodes the file as `MIME`. On the server, the `MIME` payload is decoded and stored without corrupting the data. -==== - -You can pass the `--from-file` option multiple times to the CLI. The following example yields equivalent results to the creating from directories example. - -. Create a config map by specifying a specific file: -+ -[source,terminal] ----- -$ oc create configmap game-config-2 \ - --from-file=example-files/game.properties \ - --from-file=example-files/ui.properties ----- -+ -. Verify the results: -+ -[source,terminal] ----- -$ oc get configmaps game-config-2 -o yaml ----- -+ -.Example output -[source,yaml] ----- -apiVersion: v1 -data: - game.properties: |- - enemies=aliens - lives=3 - enemies.cheat=true - enemies.cheat.level=noGoodRotten - secret.code.passphrase=UUDDLRLRBABAS - secret.code.allowed=true - secret.code.lives=30 - ui.properties: | - color.good=purple - color.bad=yellow - allow.textmode=true - how.nice.to.look=fairlyNice -kind: ConfigMap -metadata: - creationTimestamp: 2016-02-18T18:52:05Z - name: game-config-2 - namespace: default - resourceVersion: "516" - selflink: /api/v1/namespaces/default/configmaps/game-config-2 - uid: b4952dc3-d670-11e5-8cd0-68f728db1985 ----- - -You can specify the key to set in a config map for content imported from a file. This can be set by passing a `key=value` expression to the `--from-file` option. For example: - -. Create a config map by specifying a key-value pair: -+ -[source,terminal] ----- -$ oc create configmap game-config-3 \ - --from-file=game-special-key=example-files/game.properties ----- - -. Verify the results: -+ -[source,terminal] ----- -$ oc get configmaps game-config-3 -o yaml ----- -+ -.Example output -[source,yaml] ----- -apiVersion: v1 -data: - game-special-key: |- <1> - enemies=aliens - lives=3 - enemies.cheat=true - enemies.cheat.level=noGoodRotten - secret.code.passphrase=UUDDLRLRBABAS - secret.code.allowed=true - secret.code.lives=30 -kind: ConfigMap -metadata: - creationTimestamp: 2016-02-18T18:54:22Z - name: game-config-3 - namespace: default - resourceVersion: "530" - selflink: /api/v1/namespaces/default/configmaps/game-config-3 - uid: 05f8da22-d671-11e5-8cd0-68f728db1985 ----- -<1> This is the key that you set in the preceding step. diff --git a/modules/nodes-pods-configmap-creating-from-literal-values.adoc b/modules/nodes-pods-configmap-creating-from-literal-values.adoc deleted file mode 100644 index 7b2dfd0a8378..000000000000 --- a/modules/nodes-pods-configmap-creating-from-literal-values.adoc +++ /dev/null @@ -1,46 +0,0 @@ -// Module included in the following assemblies: -// -//* authentication/configmaps.adoc - -:_content-type: PROCEDURE -[id="nodes-pods-configmap-creating-from-literal-values_{context}"] -= Creating a config map from literal values - -You can supply literal values for a config map. - -.Procedure - -The `--from-literal` option takes a `key=value` syntax that allows literal values to be supplied directly on the command line. - -. Create a config map by specifying a literal value: -+ -[source,terminal] ----- -$ oc create configmap special-config \ - --from-literal=special.how=very \ - --from-literal=special.type=charm ----- - -. Verify the results: -+ -[source,terminal] ----- -$ oc get configmaps special-config -o yaml ----- -+ -.Example output -[source,yaml] ----- -apiVersion: v1 -data: - special.how: very - special.type: charm -kind: ConfigMap -metadata: - creationTimestamp: 2016-02-18T19:14:38Z - name: special-config - namespace: default - resourceVersion: "651" - selflink: /api/v1/namespaces/default/configmaps/special-config - uid: dadce046-d673-11e5-8cd0-68f728db1985 ----- diff --git a/modules/nodes-pods-configmap-overview.adoc b/modules/nodes-pods-configmap-overview.adoc deleted file mode 100644 index ccdb6ad98075..000000000000 --- a/modules/nodes-pods-configmap-overview.adoc +++ /dev/null @@ -1,65 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/configmaps.adoc - -:_content-type: CONCEPT -[id="nodes-pods-configmap-overview_{context}"] -= Understanding config maps - -Many applications require configuration by using some combination of configuration files, command line arguments, and environment variables. In {product-title}, these configuration artifacts are decoupled from image content to keep containerized applications portable. - -The `ConfigMap` object provides mechanisms to inject containers with configuration data while keeping containers agnostic of {product-title}. A config map can be used to store fine-grained information like individual properties or coarse-grained information like entire configuration files or JSON blobs. - -The `ConfigMap` object holds key-value pairs of configuration data that can be consumed in pods or used to store configuration data for system components such as controllers. For example: - -.`ConfigMap` Object Definition -[source,yaml] ----- -kind: ConfigMap -apiVersion: v1 -metadata: - creationTimestamp: 2016-02-18T19:14:38Z - name: example-config - namespace: my-namespace -data: <1> - example.property.1: hello - example.property.2: world - example.property.file: |- - property.1=value-1 - property.2=value-2 - property.3=value-3 -binaryData: - bar: L3Jvb3QvMTAw <2> ----- -<1> Contains the configuration data. -<2> Points to a file that contains non-UTF8 data, for example, a binary Java keystore file. Enter the file data in Base 64. - -[NOTE] -==== -You can use the `binaryData` field when you create a config map from a binary file, such as an image. -==== - -Configuration data can be consumed in pods in a variety of ways. A config map can be used to: - -* Populate environment variable values in containers -* Set command-line arguments in a container -* Populate configuration files in a volume - -Users and system components can store configuration data in a config map. - -A config map is similar to a secret, but designed to more conveniently support working with strings that do not contain sensitive information. - -[discrete] -== Config map restrictions - -*A config map must be created before its contents can be consumed in pods.* - -Controllers can be written to tolerate missing configuration data. Consult individual components configured by using config maps on a case-by-case basis. - -*`ConfigMap` objects reside in a project.* - -They can only be referenced by pods in the same project. - -*The Kubelet only supports the use of a config map for pods it gets from the API server.* - -This includes any pods created by using the CLI, or indirectly from a replication controller. It does not include pods created by using the {product-title} node's `--manifest-url` flag, its `--config` flag, or its REST API because these are not common ways to create pods. diff --git a/modules/nodes-pods-configmaps-use-case-consuming-in-env-vars.adoc b/modules/nodes-pods-configmaps-use-case-consuming-in-env-vars.adoc deleted file mode 100644 index a401baf54b83..000000000000 --- a/modules/nodes-pods-configmaps-use-case-consuming-in-env-vars.adoc +++ /dev/null @@ -1,94 +0,0 @@ -// Module included in the following assemblies: -// -//* authentication/configmaps.adoc - -:_content-type: PROCEDURE -[id="nodes-pods-configmaps-use-case-consuming-in-env-vars_{context}"] -= Populating environment variables in containers by using config maps - -You can use config maps to populate individual environment variables in containers or to populate environment variables in containers from all keys that form valid environment variable names. - -As an example, consider the following config map: - -.`ConfigMap` with two environment variables -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: special-config <1> - namespace: default <2> -data: - special.how: very <3> - special.type: charm <3> ----- -<1> Name of the config map. -<2> The project in which the config map resides. Config maps can only be referenced by pods in the same project. -<3> Environment variables to inject. - -.`ConfigMap` with one environment variable -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: env-config <1> - namespace: default -data: - log_level: INFO <2> ----- -<1> Name of the config map. -<2> Environment variable to inject. - -.Procedure - -* You can consume the keys of this `ConfigMap` in a pod using `configMapKeyRef` sections. -+ -.Sample `Pod` specification configured to inject specific environment variables -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: dapi-test-pod -spec: - containers: - - name: test-container - image: gcr.io/google_containers/busybox - command: [ "/bin/sh", "-c", "env" ] - env: <1> - - name: SPECIAL_LEVEL_KEY <2> - valueFrom: - configMapKeyRef: - name: special-config <3> - key: special.how <4> - - name: SPECIAL_TYPE_KEY - valueFrom: - configMapKeyRef: - name: special-config <3> - key: special.type <4> - optional: true <5> - envFrom: <6> - - configMapRef: - name: env-config <7> - restartPolicy: Never ----- -<1> Stanza to pull the specified environment variables from a `ConfigMap`. -<2> Name of a pod environment variable that you are injecting a key's value into. -<3> Name of the `ConfigMap` to pull specific environment variables from. -<4> Environment variable to pull from the `ConfigMap`. -<5> Makes the environment variable optional. As optional, the pod will be started even if the specified `ConfigMap` and keys do not exist. -<6> Stanza to pull all environment variables from a `ConfigMap`. -<7> Name of the `ConfigMap` to pull all environment variables from. -+ -When this pod is run, the pod logs will include the following output: -+ ----- -SPECIAL_LEVEL_KEY=very -log_level=INFO ----- - -[NOTE] -==== -`SPECIAL_TYPE_KEY=charm` is not listed in the example output because `optional: true` is set. -==== diff --git a/modules/nodes-pods-configmaps-use-case-consuming-in-volumes.adoc b/modules/nodes-pods-configmaps-use-case-consuming-in-volumes.adoc deleted file mode 100644 index de787a5373a5..000000000000 --- a/modules/nodes-pods-configmaps-use-case-consuming-in-volumes.adoc +++ /dev/null @@ -1,89 +0,0 @@ -// Module included in the following assemblies: -// -//* authentication/configmaps.adoc - -:_content-type: PROCEDURE -[id="nodes-pods-configmaps-use-case-consuming-in-volumes_{context}"] -= Injecting content into a volume by using config maps - -You can inject content into a volume by using config maps. - -.Example `ConfigMap` custom resource (CR) -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: special-config - namespace: default -data: - special.how: very - special.type: charm ----- - -.Procedure - -You have a couple different options for injecting content into a volume by using config maps. - -* The most basic way to inject content into a volume by using a config map is to populate the volume with files where the key is the file name and the content of the file is the value of the key: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: dapi-test-pod -spec: - containers: - - name: test-container - image: gcr.io/google_containers/busybox - command: [ "/bin/sh", "-c", "cat", "/etc/config/special.how" ] - volumeMounts: - - name: config-volume - mountPath: /etc/config - volumes: - - name: config-volume - configMap: - name: special-config <1> - restartPolicy: Never ----- -<1> File containing key. -+ -When this pod is run, the output of the cat command will be: -+ ----- -very ----- - -* You can also control the paths within the volume where config map keys are projected: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: dapi-test-pod -spec: - containers: - - name: test-container - image: gcr.io/google_containers/busybox - command: [ "/bin/sh", "-c", "cat", "/etc/config/path/to/special-key" ] - volumeMounts: - - name: config-volume - mountPath: /etc/config - volumes: - - name: config-volume - configMap: - name: special-config - items: - - key: special.how - path: path/to/special-key <1> - restartPolicy: Never ----- -<1> Path to config map key. -+ -When this pod is run, the output of the cat command will be: -+ ----- -very ----- diff --git a/modules/nodes-pods-configmaps-use-case-setting-command-line-arguments.adoc b/modules/nodes-pods-configmaps-use-case-setting-command-line-arguments.adoc deleted file mode 100644 index 2308bed67823..000000000000 --- a/modules/nodes-pods-configmaps-use-case-setting-command-line-arguments.adoc +++ /dev/null @@ -1,61 +0,0 @@ -// Module included in the following assemblies: -// -//* nodes/pods/configmaps.adoc -//* applications/config-maps.adoc - -:_content-type: PROCEDURE -[id="nodes-pods-configmaps-use-case-setting-command-line-arguments_{context}"] -= Setting command-line arguments for container commands with config maps - -You can use a config map to set the value of the commands or arguments in a container by using the Kubernetes substitution syntax `$(VAR_NAME)`. - -As an example, consider the following config map: - -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: special-config - namespace: default -data: - special.how: very - special.type: charm ----- - -.Procedure - -* To inject values into a command in a container, you must consume the keys you want to use as environment variables. Then you can refer to them in a container's command using the `$(VAR_NAME)` syntax. -+ -.Sample pod specification configured to inject specific environment variables -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: dapi-test-pod -spec: - containers: - - name: test-container - image: gcr.io/google_containers/busybox - command: [ "/bin/sh", "-c", "echo $(SPECIAL_LEVEL_KEY) $(SPECIAL_TYPE_KEY)" ] <1> - env: - - name: SPECIAL_LEVEL_KEY - valueFrom: - configMapKeyRef: - name: special-config - key: special.how - - name: SPECIAL_TYPE_KEY - valueFrom: - configMapKeyRef: - name: special-config - key: special.type - restartPolicy: Never ----- -<1> Inject the values into a command in a container using the keys you want to use as environment variables. -+ -When this pod is run, the output from the echo command run in the test-container container is as follows: -+ ----- -very charm ----- diff --git a/modules/nodes-pods-configuring-bandwidth.adoc b/modules/nodes-pods-configuring-bandwidth.adoc deleted file mode 100644 index 881466c105a3..000000000000 --- a/modules/nodes-pods-configuring-bandwidth.adoc +++ /dev/null @@ -1,53 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-pods-configuring.adoc -// * nodes/nodes-cluster-pods-configuring - -:_content-type: PROCEDURE -[id="nodes-pods-configuring-bandwidth_{context}"] -= Limiting the bandwidth available to pods - -You can apply quality-of-service traffic shaping to a pod and effectively limit -its available bandwidth. Egress traffic (from the pod) is handled by policing, -which simply drops packets in excess of the configured rate. Ingress traffic (to -the pod) is handled by shaping queued packets to effectively handle data. The -limits you place on a pod do not affect the bandwidth of other pods. - -.Procedure - -To limit the bandwidth on a pod: - -. Write an object definition JSON file, and specify the data traffic speed using -`kubernetes.io/ingress-bandwidth` and `kubernetes.io/egress-bandwidth` -annotations. For example, to limit both pod egress and ingress bandwidth to 10M/s: -+ -.Limited `Pod` object definition -[source,json] ----- -{ - "kind": "Pod", - "spec": { - "containers": [ - { - "image": "openshift/hello-openshift", - "name": "hello-openshift" - } - ] - }, - "apiVersion": "v1", - "metadata": { - "name": "iperf-slow", - "annotations": { - "kubernetes.io/ingress-bandwidth": "10M", - "kubernetes.io/egress-bandwidth": "10M" - } - } -} ----- - -. Create the pod using the object definition: -+ -[source,terminal] ----- -$ oc create -f <file_or_dir_path> ----- diff --git a/modules/nodes-pods-configuring-pod-critical.adoc b/modules/nodes-pods-configuring-pod-critical.adoc deleted file mode 100644 index 9506fbc651fa..000000000000 --- a/modules/nodes-pods-configuring-pod-critical.adoc +++ /dev/null @@ -1,43 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-pods-configuring.adoc -// * nodes/nodes-cluster-pods-configuring - -:_content-type: PROCEDURE -[id="nodes-pods-configuring-critical_{context}"] -= Preventing pod removal using critical pods - -There are a number of core components that are critical to a fully functional cluster, -but, run on a regular cluster node rather than the master. A cluster might stop working properly if a critical add-on is evicted. - -Pods marked as critical are not allowed to be evicted. - -.Procedure - -To make a pod critical: - -. Create a `Pod` spec or edit existing pods to include the `system-cluster-critical` priority class: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: my-pdb -spec: - template: - metadata: - name: critical-pod - priorityClassName: system-cluster-critical <1> ----- -<1> Default priority class for pods that should never be evicted from a node. -+ -Alternatively, you can specify `system-node-critical` for pods that are important to the cluster -but can be removed if necessary. - -. Create the pod: -+ -[source,terminal] ----- -$ oc create -f <file-name>.yaml ----- diff --git a/modules/nodes-pods-configuring-reducing.adoc b/modules/nodes-pods-configuring-reducing.adoc deleted file mode 100644 index c78a43bd24da..000000000000 --- a/modules/nodes-pods-configuring-reducing.adoc +++ /dev/null @@ -1,24 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-pods-configuring.adoc -// * nodes/nodes-cluster-pods-configuring - -:_content-type: REFERENCE -[id="nodes-pods-configuring-reducing_{context}"] -= Reducing pod timeouts when using persistent volumes with high file counts - -If a storage volume contains many files (~1,000,000 or greater), you might experience pod timeouts. - -This can occur because, when volumes are mounted, {product-title} recursively changes the ownership and permissions of the contents of each volume in order to match the `fsGroup` specified in a pod's `securityContext`. For large volumes, checking and changing the ownership and permissions can be time consuming, resulting in a very slow pod startup. - -You can reduce this delay by applying one of the following workarounds: - -* Use a security context constraint (SCC) to skip the SELinux relabeling for a volume. - -* Use the `fsGroupChangePolicy` field inside an SCC to control the way that {product-title} checks and manages ownership and permissions for a volume. - -* Use the Cluster Resource Override Operator to automatically apply an SCC to skip the SELinux relabeling. - -* Use a runtime class to skip the SELinux relabeling for a volume. - -For information, see link:https://access.redhat.com/solutions/6221251[When using Persistent Volumes with high file counts in OpenShift, why do pods fail to start or take an excessive amount of time to achieve "Ready" state?]. diff --git a/modules/nodes-pods-configuring-restart.adoc b/modules/nodes-pods-configuring-restart.adoc deleted file mode 100644 index 1f9bc53e5e59..000000000000 --- a/modules/nodes-pods-configuring-restart.adoc +++ /dev/null @@ -1,54 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-pods-configuring.adoc -// * nodes/nodes-cluster-pods-configuring - -[id="nodes-pods-configuring-restart_{context}"] -= Configuring how pods behave after restart - -A pod restart policy determines how {product-title} responds when Containers in that pod exit. -The policy applies to all Containers in that pod. - -The possible values are: - -* `Always` - Tries restarting a successfully exited Container on the pod continuously, with an exponential back-off delay (10s, 20s, 40s) capped at 5 minutes. The default is `Always`. -* `OnFailure` - Tries restarting a failed Container on the pod with an exponential back-off delay (10s, 20s, 40s) capped at 5 minutes. -* `Never` - Does not try to restart exited or failed Containers on the pod. Pods immediately fail and exit. - -After the pod is bound to a node, the pod will never be bound to another node. This means that a controller is necessary in order for a pod to survive node failure: - -[cols="3",options="header"] -|=== - -|Condition -|Controller Type -|Restart Policy - -|Pods that are expected to terminate (such as batch computations) -|Job -|`OnFailure` or `Never` - -|Pods that are expected to not terminate (such as web servers) -|Replication controller -| `Always`. - -|Pods that must run one-per-machine -|Daemon set -|Any -|=== - -If a Container on a pod fails and the restart policy is set to `OnFailure`, the pod stays on the node and the Container is restarted. If you do not want the Container to -restart, use a restart policy of `Never`. - -If an entire pod fails, {product-title} starts a new pod. Developers must address the possibility that applications might be restarted in a new pod. In particular, -applications must handle temporary files, locks, incomplete output, and so forth caused by previous runs. - -[NOTE] -==== -Kubernetes architecture expects reliable endpoints from cloud providers. When a cloud provider is down, the kubelet prevents {product-title} from restarting. - -If the underlying cloud provider endpoints are not reliable, do not install a cluster using cloud provider integration. Install the cluster as if it was in a no-cloud environment. It is not recommended to toggle cloud provider integration on or off in an installed cluster. -==== - -For details on how {product-title} uses restart policy with failed Containers, see -the link:https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#example-states[Example States] in the Kubernetes documentation. diff --git a/modules/nodes-pods-daemonsets-creating.adoc b/modules/nodes-pods-daemonsets-creating.adoc deleted file mode 100644 index f6085d5f64c9..000000000000 --- a/modules/nodes-pods-daemonsets-creating.adoc +++ /dev/null @@ -1,147 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-pods-daemonsets.adoc - -:_content-type: PROCEDURE -[id="nodes-pods-daemonsets-creating_{context}"] -= Creating daemonsets - -When creating daemon sets, the `nodeSelector` field is used to indicate the -nodes on which the daemon set should deploy replicas. - -.Prerequisites - -* Before you start using daemon sets, disable the default project-wide node selector -in your namespace, by setting the namespace annotation `openshift.io/node-selector` to an empty string: -+ -[source,terminal] ----- -$ oc patch namespace myproject -p \ - '{"metadata": {"annotations": {"openshift.io/node-selector": ""}}}' ----- -+ -[TIP] -==== -You can alternatively apply the following YAML to disable the default project-wide node selector for a namespace: - -[source,yaml] ----- -apiVersion: v1 -kind: Namespace -metadata: - name: <namespace> - annotations: - openshift.io/node-selector: '' ----- -==== - -* If you are creating a new project, overwrite the default node selector: -+ -[source,terminal] ----- -$ oc adm new-project <name> --node-selector="" ----- - -.Procedure - -To create a daemon set: - -. Define the daemon set yaml file: -+ -[source,yaml] ----- -apiVersion: apps/v1 -kind: DaemonSet -metadata: - name: hello-daemonset -spec: - selector: - matchLabels: - name: hello-daemonset <1> - template: - metadata: - labels: - name: hello-daemonset <2> - spec: - nodeSelector: <3> - role: worker - containers: - - image: openshift/hello-openshift - imagePullPolicy: Always - name: registry - ports: - - containerPort: 80 - protocol: TCP - resources: {} - terminationMessagePath: /dev/termination-log - serviceAccount: default - terminationGracePeriodSeconds: 10 ----- -<1> The label selector that determines which pods belong to the daemon set. -<2> The pod template's label selector. Must match the label selector above. -<3> The node selector that determines on which nodes pod replicas should be deployed. -A matching label must be present on the node. - -. Create the daemon set object: -+ -[source,terminal] ----- -$ oc create -f daemonset.yaml ----- - -. To verify that the pods were created, and that each node has a pod replica: -+ -.. Find the daemonset pods: -+ -[source,terminal] ----- -$ oc get pods ----- -+ -.Example output -[source,terminal] ----- -hello-daemonset-cx6md 1/1 Running 0 2m -hello-daemonset-e3md9 1/1 Running 0 2m ----- -+ -.. View the pods to verify the pod has been placed onto the node: -+ -[source,terminal] ----- -$ oc describe pod/hello-daemonset-cx6md|grep Node ----- -+ -.Example output -[source,terminal] ----- -Node: openshift-node01.hostname.com/10.14.20.134 ----- -+ -[source,terminal] ----- -$ oc describe pod/hello-daemonset-e3md9|grep Node ----- -+ -.Example output -[source,terminal] ----- -Node: openshift-node02.hostname.com/10.14.20.137 ----- - -[IMPORTANT] -==== -* If you update a daemon set pod template, the existing pod -replicas are not affected. - -* If you delete a daemon set and then create a new daemon set -with a different template but the same label selector, it recognizes any -existing pod replicas as having matching labels and thus does not update them or -create new replicas despite a mismatch in the pod template. - -* If you change node labels, the daemon set adds pods to nodes that match the new labels and deletes pods -from nodes that do not match the new labels. - -To update a daemon set, force new pod replicas to be created by deleting the old -replicas or nodes. -==== diff --git a/modules/nodes-pods-plugins-about.adoc b/modules/nodes-pods-plugins-about.adoc deleted file mode 100644 index 0169494a1e90..000000000000 --- a/modules/nodes-pods-plugins-about.adoc +++ /dev/null @@ -1,77 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-pods-plugin.adoc -// * post_installation_configuration/node-tasks.adoc - -:_content-type: CONCEPT -[id="nodes-pods-plugins-about_{context}"] -= Understanding device plugins - -The device plugin provides a consistent and portable solution to consume hardware -devices across clusters. The device plugin provides support for these devices -through an extension mechanism, which makes these devices available to -Containers, provides health checks of these devices, and securely shares them. - -[IMPORTANT] -==== -{product-title} supports the device plugin API, but the device plugin -Containers are supported by individual vendors. -==== - -A device plugin is a gRPC service running on the nodes (external to -the `kubelet`) that is responsible for managing specific -hardware resources. Any device plugin must support following remote procedure -calls (RPCs): - -[source,golang] ----- -service DevicePlugin { - // GetDevicePluginOptions returns options to be communicated with Device - // Manager - rpc GetDevicePluginOptions(Empty) returns (DevicePluginOptions) {} - - // ListAndWatch returns a stream of List of Devices - // Whenever a Device state change or a Device disappears, ListAndWatch - // returns the new list - rpc ListAndWatch(Empty) returns (stream ListAndWatchResponse) {} - - // Allocate is called during container creation so that the Device - // Plug-in can run device specific operations and instruct Kubelet - // of the steps to make the Device available in the container - rpc Allocate(AllocateRequest) returns (AllocateResponse) {} - - // PreStartcontainer is called, if indicated by Device Plug-in during - // registration phase, before each container start. Device plug-in - // can run device specific operations such as reseting the device - // before making devices available to the container - rpc PreStartcontainer(PreStartcontainerRequest) returns (PreStartcontainerResponse) {} -} ----- - -[discrete] -=== Example device plugins -* link:https://github.com/GoogleCloudPlatform/Container-engine-accelerators/tree/master/cmd/nvidia_gpu[Nvidia GPU device plugin for COS-based operating system] -* link:https://github.com/NVIDIA/k8s-device-plugin[Nvidia official GPU device plugin] -* link:https://github.com/vikaschoudhary16/sfc-device-plugin[Solarflare device plugin] -* link:https://github.com/kubevirt/kubernetes-device-plugins[KubeVirt device plugins: vfio and kvm] -* link:https://github.com/ibm-s390-cloud/k8s-cex-dev-plugin[Kubernetes device plugin for IBM Crypto Express (CEX) cards] - - -[NOTE] -==== -For easy device plugin reference implementation, there is a stub device plugin -in the Device Manager code: -*_vendor/k8s.io/kubernetes/pkg/kubelet/cm/deviceplugin/device_plugin_stub.go_*. -==== - -[id="methods-for-deploying-a-device-plugin_{context}"] -== Methods for deploying a device plugin - -* Daemon sets are the recommended approach for device plugin deployments. -* Upon start, the device plugin will try to create a UNIX domain socket at -*_/var/lib/kubelet/device-plugin/_* on the node to serve RPCs from Device Manager. -* Since device plugins must manage hardware resources, access to the host -file system, as well as socket creation, they must be run in a privileged -security context. -* More specific details regarding deployment steps can be found with each device -plugin implementation. diff --git a/modules/nodes-pods-plugins-device-mgr.adoc b/modules/nodes-pods-plugins-device-mgr.adoc deleted file mode 100644 index 33c13b467113..000000000000 --- a/modules/nodes-pods-plugins-device-mgr.adoc +++ /dev/null @@ -1,46 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-pods-plugins.adoc -// * post_installation_configuration/node-tasks.adoc - -:_content-type: CONCEPT -[id="nodes-pods-plugins-device-mgr_{context}"] -= Understanding the Device Manager - -Device Manager provides a mechanism for advertising specialized node hardware resources -with the help of plugins known as device plugins. - -You can advertise specialized hardware without requiring any upstream code changes. - -[IMPORTANT] -==== -{product-title} supports the device plugin API, but the device plugin -Containers are supported by individual vendors. -==== - -Device Manager advertises devices as *Extended Resources*. User pods can consume -devices, advertised by Device Manager, using the same *Limit/Request* mechanism, -which is used for requesting any other *Extended Resource*. - -Upon start, the device plugin registers itself with Device Manager invoking `Register` on the -*_/var/lib/kubelet/device-plugins/kubelet.sock_* and starts a gRPC service at -*_/var/lib/kubelet/device-plugins/<plugin>.sock_* for serving Device Manager -requests. - -Device Manager, while processing a new registration request, invokes -`ListAndWatch` remote procedure call (RPC) at the device plugin service. In -response, Device Manager gets a list of *Device* objects from the plugin over a -gRPC stream. Device Manager will keep watching on the stream for new updates -from the plugin. On the plugin side, the plugin will also keep the stream -open and whenever there is a change in the state of any of the devices, a new -device list is sent to the Device Manager over the same streaming connection. - -While handling a new pod admission request, Kubelet passes requested `Extended -Resources` to the Device Manager for device allocation. Device Manager checks in -its database to verify if a corresponding plugin exists or not. If the plugin exists -and there are free allocatable devices as well as per local cache, `Allocate` -RPC is invoked at that particular device plugin. - -Additionally, device plugins can also perform several other device-specific -operations, such as driver installation, device initialization, and device -resets. These functionalities vary from implementation to implementation. diff --git a/modules/nodes-pods-plugins-install.adoc b/modules/nodes-pods-plugins-install.adoc deleted file mode 100644 index 37159dc3490f..000000000000 --- a/modules/nodes-pods-plugins-install.adoc +++ /dev/null @@ -1,82 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-pods-plugins.adoc -// * post_installation_configuration/node-tasks.adoc - -:_content-type: PROCEDURE -[id="nodes-pods-plugins-install_{context}"] -= Enabling Device Manager - -Enable Device Manager to implement a device plugin to advertise specialized -hardware without any upstream code changes. - -Device Manager provides a mechanism for advertising specialized node hardware resources -with the help of plugins known as device plugins. - -. Obtain the label associated with the static `MachineConfigPool` CRD for the type of node you want to configure by entering the following command. -Perform one of the following steps: - -.. View the machine config: -+ -[source,terminal] ----- -# oc describe machineconfig <name> ----- -+ -For example: -+ -[source,terminal] ----- -# oc describe machineconfig 00-worker ----- -+ -.Example output -[source,terminal] ----- -Name: 00-worker -Namespace: -Labels: machineconfiguration.openshift.io/role=worker <1> ----- -<1> Label required for the Device Manager. - -.Procedure - -. Create a custom resource (CR) for your configuration change. -+ -.Sample configuration for a Device Manager CR -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: KubeletConfig -metadata: - name: devicemgr <1> -spec: - machineConfigPoolSelector: - matchLabels: - machineconfiguration.openshift.io: devicemgr <2> - kubeletConfig: - feature-gates: - - DevicePlugins=true <3> ----- -<1> Assign a name to CR. -<2> Enter the label from the Machine Config Pool. -<3> Set `DevicePlugins` to 'true`. - -. Create the Device Manager: -+ -[source,terminal] ----- -$ oc create -f devicemgr.yaml ----- -+ -.Example output -[source,terminal] ----- -kubeletconfig.machineconfiguration.openshift.io/devicemgr created ----- - -. Ensure that Device Manager was actually enabled by confirming that -*_/var/lib/kubelet/device-plugins/kubelet.sock_* is created on the node. This is -the UNIX domain socket on which the Device Manager gRPC server listens for new -plugin registrations. This sock file is created when the Kubelet is started -only if Device Manager is enabled. diff --git a/modules/nodes-pods-pod-disruption-about.adoc b/modules/nodes-pods-pod-disruption-about.adoc deleted file mode 100644 index 26fd08de0956..000000000000 --- a/modules/nodes-pods-pod-disruption-about.adoc +++ /dev/null @@ -1,68 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-pods-configuring.adoc -// * nodes/nodes-cluster-pods-configuring -// * post_installation_configuration/cluster-tasks.adoc - -:_content-type: CONCEPT -[id="nodes-pods-configuring-pod-distruption-about_{context}"] -= Understanding how to use pod disruption budgets to specify the number of pods that must be up - -A _pod disruption budget_ is part of the -link:http://kubernetes.io/docs/admin/disruptions/[Kubernetes] API, which can be -managed with `oc` commands like other object types. They -allow the specification of safety constraints on pods during operations, such as -draining a node for maintenance. - -`PodDisruptionBudget` is an API object that specifies the minimum number or -percentage of replicas that must be up at a time. Setting these in projects can -be helpful during node maintenance (such as scaling a cluster down or a cluster -upgrade) and is only honored on voluntary evictions (not on node failures). - -A `PodDisruptionBudget` object's configuration consists of the following key -parts: - -* A label selector, which is a label query over a set of pods. -* An availability level, which specifies the minimum number of pods that must be - available simultaneously, either: -** `minAvailable` is the number of pods must always be available, even during a disruption. -** `maxUnavailable` is the number of pods can be unavailable during a disruption. - -[NOTE] -==== -`Available` refers to the number of pods that has condition `Ready=True`. -`Ready=True` refers to the pod that is able to serve requests and should be added to the load balancing pools of all matching services. - -A `maxUnavailable` of `0%` or `0` or a `minAvailable` of `100%` or equal to the number of replicas -is permitted but can block nodes from being drained. -==== - -You can check for pod disruption budgets across all projects with the following: - -[source,terminal] ----- -$ oc get poddisruptionbudget --all-namespaces ----- - -.Example output -[source,terminal] ----- -NAMESPACE NAME MIN AVAILABLE MAX UNAVAILABLE ALLOWED DISRUPTIONS AGE -openshift-apiserver openshift-apiserver-pdb N/A 1 1 121m -openshift-cloud-controller-manager aws-cloud-controller-manager 1 N/A 1 125m -openshift-cloud-credential-operator pod-identity-webhook 1 N/A 1 117m -openshift-cluster-csi-drivers aws-ebs-csi-driver-controller-pdb N/A 1 1 121m -openshift-cluster-storage-operator csi-snapshot-controller-pdb N/A 1 1 122m -openshift-cluster-storage-operator csi-snapshot-webhook-pdb N/A 1 1 122m -openshift-console console N/A 1 1 116m -#... ----- - -The `PodDisruptionBudget` is considered healthy when there are at least -`minAvailable` pods running in the system. Every pod above that limit can be evicted. - -[NOTE] -==== -Depending on your pod priority and preemption settings, -lower-priority pods might be removed despite their pod disruption budget requirements. -==== diff --git a/modules/nodes-pods-pod-disruption-configuring.adoc b/modules/nodes-pods-pod-disruption-configuring.adoc deleted file mode 100644 index a02427df4f5f..000000000000 --- a/modules/nodes-pods-pod-disruption-configuring.adoc +++ /dev/null @@ -1,63 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-pods-configuring.adoc -// * nodes/nodes-cluster-pods-configuring -// * post_installation_configuration/cluster-tasks.adoc - -:_content-type: PROCEDURE -[id="nodes-pods-pod-disruption-configuring_{context}"] -= Specifying the number of pods that must be up with pod disruption budgets - -You can use a `PodDisruptionBudget` object to specify the minimum number or -percentage of replicas that must be up at a time. - -.Procedure - -To configure a pod disruption budget: - -. Create a YAML file with the an object definition similar to the following: -+ -[source,yaml] ----- -apiVersion: policy/v1 <1> -kind: PodDisruptionBudget -metadata: - name: my-pdb -spec: - minAvailable: 2 <2> - selector: <3> - matchLabels: - name: my-pod ----- -<1> `PodDisruptionBudget` is part of the `policy/v1` API group. -<2> The minimum number of pods that must be available simultaneously. This can -be either an integer or a string specifying a percentage, for example, `20%`. -<3> A label query over a set of resources. The result of `matchLabels` and - `matchExpressions` are logically conjoined. Leave this parameter blank, for example `selector {}`, to select all pods in the project. -+ -Or: -+ -[source,yaml] ----- -apiVersion: policy/v1 <1> -kind: PodDisruptionBudget -metadata: - name: my-pdb -spec: - maxUnavailable: 25% <2> - selector: <3> - matchLabels: - name: my-pod ----- -<1> `PodDisruptionBudget` is part of the `policy/v1` API group. -<2> The maximum number of pods that can be unavailable simultaneously. This can -be either an integer or a string specifying a percentage, for example, `20%`. -<3> A label query over a set of resources. The result of `matchLabels` and - `matchExpressions` are logically conjoined. Leave this parameter blank, for example `selector {}`, to select all pods in the project. - -. Run the following command to add the object to project: -+ -[source,terminal] ----- -$ oc create -f </path/to/file> -n <project_name> ----- diff --git a/modules/nodes-pods-priority-about.adoc b/modules/nodes-pods-priority-about.adoc deleted file mode 100644 index 49de51bd3b16..000000000000 --- a/modules/nodes-pods-priority-about.adoc +++ /dev/null @@ -1,56 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-pods-priority.adoc - -:_content-type: CONCEPT -[id="nodes-pods-priority-about_{context}"] -= Understanding pod priority - -When you use the Pod Priority and Preemption feature, the scheduler orders pending pods by their priority, and a pending pod is placed ahead of other pending pods with lower priority in the scheduling queue. As a result, the higher priority pod might be scheduled sooner than pods with lower priority if its scheduling requirements are met. If a pod cannot be scheduled, scheduler continues to schedule other lower priority pods. - -[id="admin-guide-priority-preemption-priority-class_{context}"] -== Pod priority classes - -You can assign pods a priority class, which is a non-namespaced object that defines a mapping from a name to the integer value of the priority. The higher the value, the higher the priority. - -A priority class object can take any 32-bit integer value smaller than or equal to 1000000000 (one billion). Reserve numbers larger than or equal to one billion for critical pods that must not be preempted or evicted. By default, {product-title} has two reserved priority classes for critical system pods to have guaranteed scheduling. - -[source,terminal] ----- -$ oc get priorityclasses ----- - -.Example output -[source,terminal] ----- -NAME VALUE GLOBAL-DEFAULT AGE -system-node-critical 2000001000 false 72m -system-cluster-critical 2000000000 false 72m -openshift-user-critical 1000000000 false 3d13h -cluster-logging 1000000 false 29s ----- - -* *system-node-critical* - This priority class has a value of 2000001000 and is used for all pods that should never be evicted from a node. Examples of pods that have this priority class are `sdn-ovs`, `sdn`, and so forth. A number of critical components include the `system-node-critical` priority class by default, for example: -+ -** master-api -** master-controller -** master-etcd -** sdn -** sdn-ovs -** sync - -* *system-cluster-critical* - This priority class has a value of 2000000000 (two billion) and is used with pods that are important for the cluster. Pods with this priority class can be evicted from a node in certain circumstances. For example, pods configured with the `system-node-critical` priority class can take priority. However, this priority class does ensure guaranteed scheduling. Examples of pods that can have this priority class are fluentd, add-on components like descheduler, and so forth. -A number of critical components include the `system-cluster-critical` priority class by default, for example: -+ -** fluentd -** metrics-server -** descheduler - -* *openshift-user-critical* - You can use the `priorityClassName` field with important pods that cannot bind their resource consumption and do not have predictable resource consumption behavior. Prometheus pods under the `openshift-monitoring` and `openshift-user-workload-monitoring` namespaces use the `openshift-user-critical` `priorityClassName`. Monitoring workloads use `system-critical` as their first `priorityClass`, but this causes problems when monitoring uses excessive memory and the nodes cannot evict them. As a result, monitoring drops priority to give the scheduler flexibility, moving heavy workloads around to keep critical nodes operating. - -* *cluster-logging* - This priority is used by Fluentd to make sure Fluentd pods are scheduled to nodes over other apps. - -[id="admin-guide-priority-preemption-names_{context}"] -== Pod priority names - -After you have one or more priority classes, you can create pods that specify a priority class name in a `Pod` spec. The priority admission controller uses the priority class name field to populate the integer value of the priority. If the named priority class is not found, the pod is rejected. diff --git a/modules/nodes-pods-priority-configuring.adoc b/modules/nodes-pods-priority-configuring.adoc deleted file mode 100644 index 3e6dd2654328..000000000000 --- a/modules/nodes-pods-priority-configuring.adoc +++ /dev/null @@ -1,67 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-pods-priority.adoc - -:_content-type: PROCEDURE -[id="nodes-pods-priority-configuring_{context}"] -= Configuring priority and preemption - -You apply pod priority and preemption by creating a priority class object and associating pods to the priority using the -`priorityClassName` in your `Pod` specs. - -.Sample priority class object -[source,yaml] ----- -apiVersion: scheduling.k8s.io/v1 -kind: PriorityClass -metadata: - name: high-priority <1> -value: 1000000 <2> -preemptionPolicy: PreemptLowerPriority <3> -globalDefault: false <4> -description: "This priority class should be used for XYZ service pods only." <5> ----- -<1> The name of the priority class object. -<2> The priority value of the object. -<3> Optional field that indicates whether this priority class is preempting or non-preempting. The preemption policy defaults to `PreemptLowerPriority`, which allows pods of that priority class to preempt lower-priority pods. If the preemption policy is set to `Never`, pods in that priority class are non-preempting. -<4> Optional field that indicates whether this priority class should be used for pods without a priority class name specified. This field is `false` by default. Only one priority class with `globalDefault` set to `true` can exist in the cluster. If there is no priority class with `globalDefault:true`, the priority of pods with no priority class name is zero. Adding a priority class with `globalDefault:true` affects only pods created after the priority class is added and does not change the priorities of existing pods. -<5> Optional arbitrary text string that describes which pods developers should use with this priority class. - -.Procedure - -To configure your cluster to use priority and preemption: - -. Create one or more priority classes: - -.. Specify a name and value for the priority. - -.. Optionally specify the `globalDefault` field in the priority class and a description. - -. Create a `Pod` spec or edit existing pods to include the name of a priority class, similar to the following: -+ -.Sample `Pod` spec with priority class name -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: nginx - labels: - env: test -spec: - containers: - - name: nginx - image: nginx - imagePullPolicy: IfNotPresent - priorityClassName: high-priority <1> ----- -<1> Specify the priority class to use with this pod. - -. Create the pod: -+ -[source,terminal] ----- -$ oc create -f <file-name>.yaml ----- -+ -You can add the priority name directly to the pod configuration or to a pod template. diff --git a/modules/nodes-pods-priority-preempt-about.adoc b/modules/nodes-pods-priority-preempt-about.adoc deleted file mode 100644 index 664714e651b0..000000000000 --- a/modules/nodes-pods-priority-preempt-about.adoc +++ /dev/null @@ -1,54 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-pods-priority.adoc - -:_content-type: CONCEPT -[id="nodes-pods-priority-preempt-about_{context}"] -= Understanding pod preemption - -When a developer creates a pod, the pod goes into a queue. If the developer configured the pod for pod priority or preemption, the scheduler picks a pod from the queue and tries to schedule the pod on a node. If the scheduler cannot find space on an appropriate node that satisfies all the specified requirements of the pod, preemption logic is triggered for the pending pod. - -When the scheduler preempts one or more pods on a node, the `nominatedNodeName` field of higher-priority `Pod` spec is set to the name of the node, along with the `nodename` field. The scheduler uses the `nominatedNodeName` field to keep track of the resources reserved for pods and also provides information to the user about preemptions in the clusters. - -After the scheduler preempts a lower-priority pod, the scheduler honors the graceful termination period of the pod. If another node becomes available while scheduler is waiting for the lower-priority pod to terminate, the scheduler can schedule the higher-priority pod on that node. As a result, the `nominatedNodeName` field and `nodeName` field of the `Pod` spec might be different. - -Also, if the scheduler preempts pods on a node and is waiting for termination, and a pod with a higher-priority pod than the pending pod needs to be scheduled, the scheduler can schedule the higher-priority pod instead. In such a case, the scheduler clears the `nominatedNodeName` of the pending pod, making the pod eligible for another node. - -Preemption does not necessarily remove all lower-priority pods from a node. The scheduler can schedule a pending pod by removing a portion of the lower-priority pods. - -The scheduler considers a node for pod preemption only if the pending pod can be scheduled on the node. - -[id="non-preempting-priority-class_{context}"] -== Non-preempting priority classes - -Pods with the preemption policy set to `Never` are placed in the scheduling queue ahead of lower-priority pods, but they cannot preempt other pods. A non-preempting pod waiting to be scheduled stays in the scheduling queue until sufficient resources are free and it can be scheduled. Non-preempting pods, like other pods, are subject to scheduler back-off. This means that if the scheduler tries unsuccessfully to schedule these pods, they are retried with lower frequency, allowing other pods with lower priority to be scheduled before them. - -Non-preempting pods can still be preempted by other, high-priority pods. - -[id="priority-preemption-other_{context}"] -== Pod preemption and other scheduler settings - -If you enable pod priority and preemption, consider your other scheduler settings: - -Pod priority and pod disruption budget:: -A pod disruption budget specifies the minimum number or percentage of replicas that must be up at a time. If you specify pod disruption budgets, {product-title} respects them when preempting pods at a best effort level. The scheduler attempts to preempt pods without violating the pod disruption budget. If no such pods are found, lower-priority pods might be preempted despite their pod disruption budget requirements. - -Pod priority and pod affinity:: -Pod affinity requires a new pod to be scheduled on the same node as other pods with the same label. - -If a pending pod has inter-pod affinity with one or more of the lower-priority pods on a node, the scheduler cannot preempt the lower-priority pods without violating the affinity requirements. In this case, the scheduler looks for another node to schedule the pending pod. However, there is no guarantee that the scheduler can find an appropriate node and pending pod might not be scheduled. - -To prevent this situation, carefully configure pod affinity with equal-priority pods. - -//// -Under consideration for future release -Pod priority and cross-node preemption:: -If the scheduler is considering preempting pods on a node so that a pending pod can be scheduled, the scheduler can preempt a pod on different node to schedule the pending pod. -//// - -[id="priority-preemption-graceful_{context}"] -== Graceful termination of preempted pods - -When preempting a pod, the scheduler waits for the pod graceful termination period to expire, allowing the pod to finish working and exit. If the pod does not exit after the period, the scheduler kills the pod. This graceful termination period creates a time gap between the point that the scheduler preempts the pod and the time when the pending pod can be scheduled on the node. - -To minimize this gap, configure a small graceful termination period for lower-priority pods. diff --git a/modules/nodes-pods-secrets-about.adoc b/modules/nodes-pods-secrets-about.adoc deleted file mode 100644 index f8a2a00616d8..000000000000 --- a/modules/nodes-pods-secrets-about.adoc +++ /dev/null @@ -1,90 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-pods-secrets.adoc - -:_content-type: CONCEPT -[id="nodes-pods-secrets-about_{context}"] -= Understanding secrets - -The `Secret` object type provides a mechanism to hold sensitive information such -as passwords, {product-title} client configuration files, -private source repository credentials, and so on. Secrets decouple sensitive -content from the pods. You can mount secrets into containers using a volume -plugin or the system can use secrets to perform actions on behalf of a pod. - -Key properties include: - -- Secret data can be referenced independently from its definition. -- Secret data volumes are backed by temporary file-storage facilities (tmpfs) and never come to rest on a node. -- Secret data can be shared within a namespace. - -.YAML `Secret` object definition - -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: test-secret - namespace: my-namespace -type: Opaque <1> -data: <2> - username: dmFsdWUtMQ0K <3> - password: dmFsdWUtMg0KDQo= -stringData: <4> - hostname: myapp.mydomain.com <5> ----- -<1> Indicates the structure of the secret's key names and values. -<2> The allowable format for the keys in the `data` field must meet the -guidelines in the *DNS_SUBDOMAIN* value in -link:https://github.com/kubernetes/kubernetes/blob/v1.0.0/docs/design/identifiers.md[the -Kubernetes identifiers glossary]. -<3> The value associated with keys in the `data` map must be base64 encoded. -<4> Entries in the `stringData` map are converted to base64 -and the entry will then be moved to the `data` map automatically. This field -is write-only; the value will only be returned via the `data` field. -<5> The value associated with keys in the `stringData` map is made up of -plain text strings. - -You must create a secret before creating the pods that depend on that secret. - -When creating secrets: - -- Create a secret object with secret data. -- Update the pod's service account to allow the reference to the secret. -- Create a pod, which consumes the secret as an environment variable or as a file -(using a `secret` volume). - -[id="nodes-pods-secrets-about-types_{context}"] -== Types of secrets - -The value in the `type` field indicates the structure of the secret's key names and values. The type can be used to -enforce the presence of user names and keys in the secret object. If you do not want validation, use the `opaque` type, -which is the default. - -Specify one of the following types to trigger minimal server-side validation to ensure the presence of specific key names in the secret data: - -* `kubernetes.io/service-account-token`. Uses a service account token. -* `kubernetes.io/basic-auth`. Use with Basic Authentication. -* `kubernetes.io/ssh-auth`. Use with SSH Key Authentication. -* `kubernetes.io/tls`. Use with TLS certificate authorities. - -Specify `type: Opaque` if you do not want validation, which means the secret does not claim to conform to any convention for key names or values. -An _opaque_ secret, allows for unstructured `key:value` pairs that can contain arbitrary values. - -[NOTE] -==== -You can specify other arbitrary types, such as `example.com/my-secret-type`. These types are not enforced server-side, -but indicate that the creator of the secret intended to conform to the key/value requirements of that type. -==== - -For examples of different secret types, see the code samples in _Using Secrets_. - -[id="nodes-pods-secrets-about-keys_{context}"] -== Secret data keys - -Secret keys must be in a DNS subdomain. - -// remove this snippet for 4.12+ - -include::snippets/service-account-auto-secret-removed.adoc[] diff --git a/modules/nodes-pods-secrets-certificates-about.adoc b/modules/nodes-pods-secrets-certificates-about.adoc deleted file mode 100644 index 34d8b86b2dff..000000000000 --- a/modules/nodes-pods-secrets-certificates-about.adoc +++ /dev/null @@ -1,38 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-pods-secrets.adoc - -:_content-type: CONCEPT -[id="nodes-pods-secrets-certificates-about_{context}"] -= About using signed certificates with secrets - -To secure communication to your service, you can configure {product-title} to generate a signed -serving certificate/key pair that you can add into a secret in a project. - -A _service serving certificate secret_ is intended to support complex middleware -applications that need out-of-the-box certificates. It has the same settings as -the server certificates generated by the administrator tooling for nodes and -masters. - -.Service `Pod` spec configured for a service serving certificates secret. - -[source,yaml] ----- -apiVersion: v1 -kind: Service -metadata: - name: registry - annotations: - service.beta.openshift.io/serving-cert-secret-name: registry-cert<1> -# ... ----- -<1> Specify the name for the certificate - -Other pods can trust cluster-created certificates (which are only signed for -internal DNS names), by using the CA bundle in the -*_/var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt_* file that is -automatically mounted in their pod. - -The signature algorithm for this feature is `x509.SHA256WithRSA`. To manually -rotate, delete the generated secret. A new certificate is created. - diff --git a/modules/nodes-pods-secrets-certificates-creating.adoc b/modules/nodes-pods-secrets-certificates-creating.adoc deleted file mode 100644 index d242b1856d22..000000000000 --- a/modules/nodes-pods-secrets-certificates-creating.adoc +++ /dev/null @@ -1,130 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-pods-secrets.adoc - -:_content-type: PROCEDURE -[id="nodes-pods-secrets-certificates-creating_{context}"] -= Generating signed certificates for use with secrets - -To use a signed serving certificate/key pair with a pod, create or edit the service to add -the `service.beta.openshift.io/serving-cert-secret-name` annotation, then add the secret to the pod. - -.Procedure - -To create a _service serving certificate secret_: - -. Edit the `Pod` spec for your service. - -. Add the `service.beta.openshift.io/serving-cert-secret-name` annotation -with the name you want to use for your secret. -+ -[source,yaml] ----- -kind: Service -apiVersion: v1 -metadata: - name: my-service - annotations: - service.beta.openshift.io/serving-cert-secret-name: my-cert <1> -spec: - selector: - app: MyApp - ports: - - protocol: TCP - port: 80 - targetPort: 9376 ----- -+ -The certificate and key are in PEM format, stored in `tls.crt` and `tls.key` -respectively. - -. Create the service: -+ -[source,terminal] ----- -$ oc create -f <file-name>.yaml ----- - -. View the secret to make sure it was created: - -.. View a list of all secrets: -+ -[source,terminal] ----- -$ oc get secrets ----- -+ -.Example output -[source,terminal] ----- -NAME TYPE DATA AGE -my-cert kubernetes.io/tls 2 9m ----- -+ -.. View details on your secret: -+ -[source,terminal] ----- -$ oc describe secret my-cert ----- -+ -.Example output -[source,terminal] ----- -Name: my-cert -Namespace: openshift-console -Labels: <none> -Annotations: service.beta.openshift.io/expiry: 2023-03-08T23:22:40Z - service.beta.openshift.io/originating-service-name: my-service - service.beta.openshift.io/originating-service-uid: 640f0ec3-afc2-4380-bf31-a8c784846a11 - service.beta.openshift.io/expiry: 2023-03-08T23:22:40Z - -Type: kubernetes.io/tls - -Data -==== -tls.key: 1679 bytes -tls.crt: 2595 bytes ----- - -. Edit your `Pod` spec with that secret. -+ -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: my-service-pod -spec: - containers: - - name: mypod - image: redis - volumeMounts: - - name: my-container - mountPath: "/etc/my-path" - volumes: - - name: my-volume - secret: - secretName: my-cert - items: - - key: username - path: my-group/my-username - mode: 511 ----- -+ -When it is available, your pod will run. -The certificate will be good for the internal service DNS name, -`<service.name>.<service.namespace>.svc`. -+ -The certificate/key pair is automatically replaced when it gets -close to expiration. View the expiration date in the -`service.beta.openshift.io/expiry` annotation on the secret, which is in -RFC3339 format. -+ -[NOTE] -==== -In most cases, the service DNS name -`<service.name>.<service.namespace>.svc` is not externally routable. The -primary use of `<service.name>.<service.namespace>.svc` is for intracluster or -intraservice communication, and with re-encrypt routes. -==== diff --git a/modules/nodes-pods-secrets-creating-basic.adoc b/modules/nodes-pods-secrets-creating-basic.adoc deleted file mode 100644 index 104c6c022921..000000000000 --- a/modules/nodes-pods-secrets-creating-basic.adoc +++ /dev/null @@ -1,50 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-pods-secrets.adoc - -:_content-type: PROCEDURE -[id="nodes-pods-secrets-creating-basic_{context}"] -= Creating a basic authentication secret - -As an administrator, you can create a basic authentication secret, which allows you to store the credentials needed for basic authentication. When using this secret type, the `data` parameter of the `Secret` object must contain the following keys encoded in the base64 format: - -* `username`: the user name for authentication -* `password`: the password or token for authentication - -[NOTE] -==== -You can use the `stringData` parameter to use clear text content. -==== - -.Procedure - -. Create a `Secret` object in a YAML file on a control plane node: -+ -.Example `secret` object -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: secret-basic-auth -type: kubernetes.io/basic-auth <1> -data: -stringData: <2> - username: admin - password: t0p-Secret ----- -<1> Specifies a basic authentication secret. -<2> Specifies the basic authentication values to use. - -. Use the following command to create the `Secret` object: -+ -[source,terminal] ----- -$ oc create -f <filename>.yaml ----- - -. To use the secret in a pod: - -.. Update the pod's service account to reference the secret, as shown in the "Understanding how to create secrets" section. - -.. Create the pod, which consumes the secret as an environment variable or as a file (using a `secret` volume), as shown in the "Understanding how to create secrets" section. diff --git a/modules/nodes-pods-secrets-creating-docker.adoc b/modules/nodes-pods-secrets-creating-docker.adoc deleted file mode 100644 index c6aab4b8e53c..000000000000 --- a/modules/nodes-pods-secrets-creating-docker.adoc +++ /dev/null @@ -1,65 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-pods-secrets.adoc - -:_content-type: PROCEDURE -[id="nodes-pods-secrets-creating-docker_{context}"] -= Creating a Docker configuration secret - -As an administrator, you can create a Docker configuration secret, which allows you to store the credentials for accessing a container image registry. - -* `kubernetes.io/dockercfg`. Use this secret type to store your local Docker configuration file. The `data` parameter of the `secret` object must contain the contents of a `.dockercfg` file encoded in the base64 format. - -* `kubernetes.io/dockerconfigjson`. Use this secret type to store your local Docker configuration JSON file. The `data` parameter of the `secret` object must contain the contents of a `.docker/config.json` file encoded in the base64 format. - -.Procedure - -. Create a `Secret` object in a YAML file on a control plane node. -+ --- -.Example Docker configuration `secret` object -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: secret-docker-cfg - namespace: my-project -type: kubernetes.io/dockerconfig <1> -data: - .dockerconfig:bm5ubm5ubm5ubm5ubm5ubm5ubm5ubmdnZ2dnZ2dnZ2dnZ2dnZ2dnZ2cgYXV0aCBrZXlzCg== <2> ----- -<1> Specifies that the secret is using a Docker configuration file. -<2> The output of a base64-encoded Docker configuration file --- -+ --- -.Example Docker configuration JSON `secret` object -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: secret-docker-json - namespace: my-project -type: kubernetes.io/dockerconfig <1> -data: - .dockerconfigjson:bm5ubm5ubm5ubm5ubm5ubm5ubm5ubmdnZ2dnZ2dnZ2dnZ2dnZ2dnZ2cgYXV0aCBrZXlzCg== <2> ----- -<1> Specifies that the secret is using a Docker configuration JSONfile. -<2> The output of a base64-encoded Docker configuration JSON file --- - -. Use the following command to create the `Secret` object -+ -[source,terminal] ----- -$ oc create -f <filename>.yaml ----- - -. To use the secret in a pod: - -.. Update the pod's service account to reference the secret, as shown in the "Understanding how to create secrets" section. - -.. Create the pod, which consumes the secret as an environment variable or as a file (using a `secret` volume), as shown in the "Understanding how to create secrets" section. - diff --git a/modules/nodes-pods-secrets-creating-opaque.adoc b/modules/nodes-pods-secrets-creating-opaque.adoc deleted file mode 100644 index 7423b1881753..000000000000 --- a/modules/nodes-pods-secrets-creating-opaque.adoc +++ /dev/null @@ -1,41 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-pods-secrets.adoc - -:_content-type: PROCEDURE -[id="nodes-pods-secrets-creating-opaque_{context}"] -= Creating an opaque secret - -As an administrator, you can create an opaque secret, which allows you to store unstructured `key:value` pairs that can contain arbitrary values. - -.Procedure - -. Create a `Secret` object in a YAML file on a control plane node. -+ -For example: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: mysecret -type: Opaque <1> -data: - username: dXNlci1uYW1l - password: cGFzc3dvcmQ= ----- -<1> Specifies an opaque secret. - -. Use the following command to create a `Secret` object: -+ -[source,terminal] ----- -$ oc create -f <filename>.yaml ----- - -. To use the secret in a pod: - -.. Update the pod's service account to reference the secret, as shown in the "Understanding how to create secrets" section. - -.. Create the pod, which consumes the secret as an environment variable or as a file (using a `secret` volume), as shown in the "Understanding how to create secrets" section. diff --git a/modules/nodes-pods-secrets-creating-sa.adoc b/modules/nodes-pods-secrets-creating-sa.adoc deleted file mode 100644 index a20ea49ca11f..000000000000 --- a/modules/nodes-pods-secrets-creating-sa.adoc +++ /dev/null @@ -1,49 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-pods-secrets.adoc - -:_content-type: PROCEDURE -[id="nodes-pods-secrets-creating-sa_{context}"] -= Creating a service account token secret - -As an administrator, you can create a service account token secret, which allows you to distribute a service account token to applications that must authenticate to the API. - -[NOTE] -==== -It is recommended to obtain bound service account tokens using the TokenRequest API instead of using service account token secrets. The tokens obtained from the TokenRequest API are more secure than the tokens stored in secrets, because they have a bounded lifetime and are not readable by other API clients. - -You should create a service account token secret only if you cannot use the TokenRequest API and if the security exposure of a non-expiring token in a readable API object is acceptable to you. - -See the Additional resources section that follows for information on creating bound service account tokens. -==== - -.Procedure - -. Create a `Secret` object in a YAML file on a control plane node: -+ -.Example `secret` object: -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: secret-sa-sample - annotations: - kubernetes.io/service-account.name: "sa-name" <1> -type: kubernetes.io/service-account-token <2> ----- -<1> Specifies an existing service account name. If you are creating both the `ServiceAccount` and the `Secret` objects, create the `ServiceAccount` object first. -<2> Specifies a service account token secret. - -. Use the following command to create the `Secret` object: -+ -[source,terminal] ----- -$ oc create -f <filename>.yaml ----- - -. To use the secret in a pod: - -.. Update the pod's service account to reference the secret, as shown in the "Understanding how to create secrets" section. - -.. Create the pod, which consumes the secret as an environment variable or as a file (using a `secret` volume), as shown in the "Understanding how to create secrets" section. diff --git a/modules/nodes-pods-secrets-creating-ssh.adoc b/modules/nodes-pods-secrets-creating-ssh.adoc deleted file mode 100644 index 018067cd650a..000000000000 --- a/modules/nodes-pods-secrets-creating-ssh.adoc +++ /dev/null @@ -1,41 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-pods-secrets.adoc - -:_content-type: PROCEDURE -[id="nodes-pods-secrets-creating-ssh_{context}"] -= Creating an SSH authentication secret - -As an administrator, you can create an SSH authentication secret, which allows you to store data used for SSH authentication. When using this secret type, the `data` parameter of the `Secret` object must contain the SSH credential to use. - -.Procedure - -. Create a `Secret` object in a YAML file on a control plane node: -+ -.Example `secret` object: -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: secret-ssh-auth -type: kubernetes.io/ssh-auth <1> -data: - ssh-privatekey: | <2> - MIIEpQIBAAKCAQEAulqb/Y ... ----- -<1> Specifies an SSH authentication secret. -<2> Specifies the SSH key/value pair as the SSH credentials to use. - -. Use the following command to create the `Secret` object: -+ -[source,terminal] ----- -$ oc create -f <filename>.yaml ----- - -. To use the secret in a pod: - -.. Update the pod's service account to reference the secret, as shown in the "Understanding how to create secrets" section. - -.. Create the pod, which consumes the secret as an environment variable or as a file (using a `secret` volume), as shown in the "Understanding how to create secrets" section. diff --git a/modules/nodes-pods-secrets-creating-tls.adoc b/modules/nodes-pods-secrets-creating-tls.adoc deleted file mode 100644 index 1ca4e88f6d5d..000000000000 --- a/modules/nodes-pods-secrets-creating-tls.adoc +++ /dev/null @@ -1,51 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-pods-secrets.adoc - -:_content-type: PROCEDURE -[id="nodes-pods-secrets-creating-tls_{context}"] -= Creating a TLS secret - -As an administrator, you can create a Transport Layer Security (TLS) secret, which allows you to store a certificate and its associated key that are typically used for TLS. When using this type of secret, the `data` parameter of the `Secret` object must contain the `tls.key` and the `tls.crt` keys to use. The API server does not validate the values for each key. - -One common use for TLS secrets is to configure encryption in transit for ingress. You can also use a TLS secret with other resources or directly in your workload. - -[NOTE] -==== -You can use the `stringData` parameter to use clear text content. -==== - -.Procedure - -. Create a `Secret` object in a YAML file on a control plane node: -+ -.Example `secret` object: -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: secret-tls -type: kubernetes.io/tls <1> -data: - tls.crt: | <2> - MIIC2DCCAcCgAwIBAgIBATANBgkqh ... - tls.key: | - MIIEpgIBAAKCAQEA7yn3bRHQ5FHMQ ... - ----- -<1> Specifies a TLS secret. -<2> Specifies the `tls.key` and the `tls.crt` keys to use. - -. Use the following command to create the `Secret` object: -+ -[source,terminal] ----- -$ oc create -f <filename>.yaml ----- - -. To use the secret in a pod: - -.. Update the pod's service account to reference the secret, as shown in the "Understanding how to create secrets" section. - -.. Create the pod, which consumes the secret as an environment variable or as a file (using a `secret` volume), as shown in the "Understanding how to create secrets" section. diff --git a/modules/nodes-pods-secrets-creating.adoc b/modules/nodes-pods-secrets-creating.adoc deleted file mode 100644 index d7dd4b366adb..000000000000 --- a/modules/nodes-pods-secrets-creating.adoc +++ /dev/null @@ -1,154 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-pods-secrets.adoc - -:_content-type: CONCEPT -[id="nodes-pods-secrets-creating_{context}"] -= Understanding how to create secrets - -As an administrator you must create a secret before developers can create the pods that depend on that secret. - -When creating secrets: - -. Create a secret object that contains the data you want to keep secret. The specific data required for each secret type is descibed in the following sections. -+ -.Example YAML object that creates an opaque secret - -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: test-secret -type: Opaque <1> -data: <2> - username: dmFsdWUtMQ0K - password: dmFsdWUtMQ0KDQo= -stringData: <3> - hostname: myapp.mydomain.com - secret.properties: | - property1=valueA - property2=valueB ----- -<1> Specifies the type of secret. -<2> Specifies encoded string and data. -<3> Specifies decoded string and data. -+ -Use either the `data` or `stringdata` fields, not both. - -. Update the pod's service account to reference the secret: -+ -.YAML of a service account that uses a secret -+ -[source,yaml] ----- -apiVersion: v1 -kind: ServiceAccount - ... -secrets: -- name: test-secret ----- - -. Create a pod, which consumes the secret as an environment variable or as a file -(using a `secret` volume): -+ -.YAML of a pod populating files in a volume with secret data -+ -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: secret-example-pod -spec: - containers: - - name: secret-test-container - image: busybox - command: [ "/bin/sh", "-c", "cat /etc/secret-volume/*" ] - volumeMounts: <1> - - name: secret-volume - mountPath: /etc/secret-volume <2> - readOnly: true <3> - volumes: - - name: secret-volume - secret: - secretName: test-secret <4> - restartPolicy: Never ----- -<1> Add a `volumeMounts` field to each container that needs the secret. -<2> Specifies an unused directory name where you would like the secret to appear. Each key in the secret data map becomes the filename under `mountPath`. -<3> Set to `true`. If true, this instructs the driver to provide a read-only volume. -<4> Specifies the name of the secret. -+ -.YAML of a pod populating environment variables with secret data -+ -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: secret-example-pod -spec: - containers: - - name: secret-test-container - image: busybox - command: [ "/bin/sh", "-c", "export" ] - env: - - name: TEST_SECRET_USERNAME_ENV_VAR - valueFrom: - secretKeyRef: <1> - name: test-secret - key: username - restartPolicy: Never ----- -<1> Specifies the environment variable that consumes the secret key. -+ -.YAML of a build config populating environment variables with secret data -+ -[source,yaml] ----- -apiVersion: build.openshift.io/v1 -kind: BuildConfig -metadata: - name: secret-example-bc -spec: - strategy: - sourceStrategy: - env: - - name: TEST_SECRET_USERNAME_ENV_VAR - valueFrom: - secretKeyRef: <1> - name: test-secret - key: username - from: - kind: ImageStreamTag - namespace: openshift - name: 'cli:latest' ----- -<1> Specifies the environment variable that consumes the secret key. - -== Secret creation restrictions - -To use a secret, a pod needs to reference the secret. A secret can be used with -a pod in three ways: - -- To populate environment variables for containers. -- As files in a volume mounted on one or more of its containers. -- By kubelet when pulling images for the pod. - -Volume type secrets write data into the container as a file using the volume -mechanism. Image pull secrets use service accounts for the automatic injection of -the secret into all pods in a namespace. - -When a template contains a secret definition, the only way for the template to -use the provided secret is to ensure that the secret volume sources are -validated and that the specified object reference actually points to a `Secret` object. Therefore, a secret needs to be created before any pods that -depend on it. The most effective way to ensure this is to have it get injected -automatically through the use of a service account. - -Secret API objects reside in a namespace. They can only be referenced by pods in -that same namespace. - -Individual secrets are limited to 1MB in size. This is to discourage the -creation of large secrets that could exhaust apiserver and kubelet memory. -However, creation of a number of smaller secrets could also exhaust memory. diff --git a/modules/nodes-pods-secrets-troubleshooting.adoc b/modules/nodes-pods-secrets-troubleshooting.adoc deleted file mode 100644 index e20241c4d028..000000000000 --- a/modules/nodes-pods-secrets-troubleshooting.adoc +++ /dev/null @@ -1,46 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-pods-secrets.adoc - -[id="nodes-pods-secrets-troubleshooting_{context}"] -= Troubleshooting secrets - -If a service certificate generation fails with (service's -`service.beta.openshift.io/serving-cert-generation-error` annotation -contains): - -[source,terminal] ----- -secret/ssl-key references serviceUID 62ad25ca-d703-11e6-9d6f-0e9c0057b608, which does not match 77b6dd80-d716-11e6-9d6f-0e9c0057b60 ----- - -The service that generated the certificate no longer exists, or has a different -`serviceUID`. You must force certificates regeneration by removing the old -secret, and clearing the following annotations on the service -`service.beta.openshift.io/serving-cert-generation-error`, -`service.beta.openshift.io/serving-cert-generation-error-num`: - -. Delete the secret: -+ -[source,terminal] ----- -$ oc delete secret <secret_name> ----- - -. Clear the annotations: -+ -[source,terminal] ----- -$ oc annotate service <service_name> service.beta.openshift.io/serving-cert-generation-error- ----- -+ -[source,terminal] ----- -$ oc annotate service <service_name> service.beta.openshift.io/serving-cert-generation-error-num- ----- - -[NOTE] -==== -The command removing annotation has a `-` after the annotation name to be -removed. -==== diff --git a/modules/nodes-pods-secrets-updating.adoc b/modules/nodes-pods-secrets-updating.adoc deleted file mode 100644 index a92dd475672d..000000000000 --- a/modules/nodes-pods-secrets-updating.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-pods-secrets.adoc - -:_content-type: CONCEPT -[id="nodes-pods-secrets-updating_{context}"] -= Understanding how to update secrets - -When you modify the value of a secret, the value (used by an already running pod) will not dynamically change. To change a secret, you must delete the -original pod and create a new pod (perhaps with an identical PodSpec). - -Updating a secret follows the same workflow as deploying a new Container image. You can use the `kubectl rolling-update` command. - -The `resourceVersion` value in a secret is not specified when it is referenced. Therefore, if a secret is updated at the same time as pods are starting, the version of the secret that is used for the pod is not defined. - -[NOTE] -==== -Currently, it is not possible to check the resource version of a secret object that was used when a pod was created. It is planned that pods will report this information, so that a controller could restart ones using an old `resourceVersion`. In the interim, do not update the data of existing secrets, but create new ones with distinct names. -==== diff --git a/modules/nodes-pods-using-about.adoc b/modules/nodes-pods-using-about.adoc deleted file mode 100644 index 6e7070ac1ce7..000000000000 --- a/modules/nodes-pods-using-about.adoc +++ /dev/null @@ -1,34 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-pods-using.adoc - -:_content-type: CONCEPT -[id="nodes-pods-using-about_{context}"] -= Understanding pods - -Pods are the rough equivalent of a machine instance (physical or virtual) to a Container. Each pod is allocated its own internal IP address, therefore owning its entire port space, and containers within pods can share their local storage and networking. - -Pods have a lifecycle; they are defined, then they are assigned to run on -a node, then they run until their container(s) exit or they are removed -for some other reason. Pods, depending on policy and exit code, might be -removed after exiting, or can be retained to enable access to -the logs of their containers. - -{product-title} treats pods as largely immutable; changes cannot be made to -a pod definition while it is running. {product-title} implements changes by -terminating an existing pod and recreating it with modified configuration, -base image(s), or both. Pods are also treated as expendable, and do not -maintain state when recreated. Therefore pods should usually be managed by -higher-level controllers, rather than directly by users. - -ifdef::openshift-enterprise,openshift-webscale[] -[NOTE] -==== -For the maximum number of pods per {product-title} node host, see the Cluster Limits. -==== -endif::[] - -[WARNING] -==== -Bare pods that are not managed by a replication controller will be not rescheduled upon node disruption. -==== diff --git a/modules/nodes-pods-using-example.adoc b/modules/nodes-pods-using-example.adoc deleted file mode 100644 index 0f5748255e6f..000000000000 --- a/modules/nodes-pods-using-example.adoc +++ /dev/null @@ -1,122 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-pods-using.adoc - -[id="nodes-pods-using-example_{context}"] -= Example pod configurations - -{product-title} leverages the Kubernetes concept of a _pod_, which is one or more containers deployed together on one host, and the smallest compute unit that can be defined, deployed, and managed. - -The following is an example definition of a pod from a Rails application. It demonstrates many features of pods, most of which are discussed in other topics and thus only briefly mentioned here: - -[id="example-pod-definition_{context}"] -.`Pod` object definition (YAML) - -[source,yaml] ----- -kind: Pod -apiVersion: v1 -metadata: - name: example - namespace: default - selfLink: /api/v1/namespaces/default/pods/example - uid: 5cc30063-0265780783bc - resourceVersion: '165032' - creationTimestamp: '2019-02-13T20:31:37Z' - labels: - app: hello-openshift <1> - annotations: - openshift.io/scc: anyuid -spec: - restartPolicy: Always <2> - serviceAccountName: default - imagePullSecrets: - - name: default-dockercfg-5zrhb - priority: 0 - schedulerName: default-scheduler - terminationGracePeriodSeconds: 30 - nodeName: ip-10-0-140-16.us-east-2.compute.internal - securityContext: <3> - seLinuxOptions: - level: 's0:c11,c10' - containers: <4> - - resources: {} - terminationMessagePath: /dev/termination-log - name: hello-openshift - securityContext: - capabilities: - drop: - - MKNOD - procMount: Default - ports: - - containerPort: 8080 - protocol: TCP - imagePullPolicy: Always - volumeMounts: <5> - - name: default-token-wbqsl - readOnly: true - mountPath: /var/run/secrets/kubernetes.io/serviceaccount <6> - terminationMessagePolicy: File - image: registry.redhat.io/openshift4/ose-ogging-eventrouter:v4.3 <7> - serviceAccount: default <8> - volumes: <9> - - name: default-token-wbqsl - secret: - secretName: default-token-wbqsl - defaultMode: 420 - dnsPolicy: ClusterFirst -status: - phase: Pending - conditions: - - type: Initialized - status: 'True' - lastProbeTime: null - lastTransitionTime: '2019-02-13T20:31:37Z' - - type: Ready - status: 'False' - lastProbeTime: null - lastTransitionTime: '2019-02-13T20:31:37Z' - reason: ContainersNotReady - message: 'containers with unready status: [hello-openshift]' - - type: ContainersReady - status: 'False' - lastProbeTime: null - lastTransitionTime: '2019-02-13T20:31:37Z' - reason: ContainersNotReady - message: 'containers with unready status: [hello-openshift]' - - type: PodScheduled - status: 'True' - lastProbeTime: null - lastTransitionTime: '2019-02-13T20:31:37Z' - hostIP: 10.0.140.16 - startTime: '2019-02-13T20:31:37Z' - containerStatuses: - - name: hello-openshift - state: - waiting: - reason: ContainerCreating - lastState: {} - ready: false - restartCount: 0 - image: openshift/hello-openshift - imageID: '' - qosClass: BestEffort ----- - -<1> Pods can be "tagged" with one or more labels, which can then be used to select and manage groups of pods in a single operation. The labels are stored in key/value format in the `metadata` hash. -<2> The pod restart policy with possible values `Always`, `OnFailure`, and `Never`. The default value is `Always`. -<3> {product-title} defines a security context for containers which specifies whether they are allowed to run as privileged containers, run as a user of their choice, and more. The default context is very restrictive but administrators can modify this as needed. -<4> `containers` specifies an array of one or more container definitions. -<5> The container specifies where external storage volumes are mounted within the container. In this case, there is a volume for storing access to credentials the registry needs for making requests against the {product-title} API. -<6> Specify the volumes to provide for the pod. Volumes mount at the specified path. Do not mount to the container root, `/`, or any path that is the same in the host and the container. This can corrupt your host system if the container is sufficiently privileged, such as the host `/dev/pts` files. It is safe to mount the host by using `/host`. -<7> Each container in the pod is instantiated from its own container image. -<8> Pods making requests against the {product-title} API is a common enough pattern that there is a `serviceAccount` field for specifying which service account user the pod should authenticate as when making the requests. This enables fine-grained access control for custom infrastructure components. -<9> The pod defines storage volumes that are available to its container(s) to use. In this case, it provides an ephemeral volume for a `secret` volume containing the default service account tokens. -+ -If you attach persistent volumes that have high file counts to pods, those pods can fail or can take a long time to start. For -more information, see link:https://access.redhat.com/solutions/6221251[When using Persistent Volumes with high file counts in OpenShift, why do pods fail to start or take an excessive amount of time to achieve "Ready" state?]. - -[NOTE] -==== -This pod definition does not include attributes that are filled by {product-title} automatically after the pod is created and its lifecycle begins. The link:https://kubernetes.io/docs/concepts/workloads/pods/pod/[Kubernetes pod documentation] has details about the functionality and purpose of pods. -==== diff --git a/modules/nodes-pods-vertical-autoscaler-about.adoc b/modules/nodes-pods-vertical-autoscaler-about.adoc deleted file mode 100644 index 27217b1c6fa6..000000000000 --- a/modules/nodes-pods-vertical-autoscaler-about.adoc +++ /dev/null @@ -1,31 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-vertical-autoscaler.adoc - -:_content-type: CONCEPT -[id="nodes-pods-vertical-autoscaler-about_{context}"] -= About the Vertical Pod Autoscaler Operator - -The Vertical Pod Autoscaler Operator (VPA) is implemented as an API resource and a custom resource (CR). The CR determines the actions the Vertical Pod Autoscaler Operator should take with the pods associated with a specific workload object, such as a daemon set, replication controller, and so forth, in a project. - -You can use the default recommender or use your own alternative recommender to autoscale based on your own algorithms. - -The default recommender automatically computes historic and current CPU and memory usage for the containers in those pods and uses this data to determine optimized resource limits and requests to ensure that these pods are operating efficiently at all times. For example, the default recommender suggests reduced resources for pods that are requesting more resources than they are using and increased resources for pods that are not requesting enough. - -The VPA then automatically deletes any pods that are out of alignment with these recommendations one at a time, so that your applications can continue to serve requests with no downtime. The workload objects then re-deploy the pods with the original resource limits and requests. The VPA uses a mutating admission webhook to update the pods with optimized resource limits and requests before the pods are admitted to a node. If you do not want the VPA to delete pods, you can view the VPA resource limits and requests and manually update the pods as needed. - -[NOTE] -==== -By default, workload objects must specify a minimum of two replicas in order for the VPA to automatically delete their pods. Workload objects that specify fewer replicas than this minimum are not deleted. If you manually delete these pods, when the workload object redeploys the pods, the VPA does update the new pods with its recommendations. You can change this minimum by modifying the `VerticalPodAutoscalerController` object as shown shown in _Changing the VPA minimum value_. -==== - -For example, if you have a pod that uses 50% of the CPU but only requests 10%, the VPA determines that the pod is consuming more CPU than requested and deletes the pod. The workload object, such as replica set, restarts the pods and the VPA updates the new pod with its recommended resources. - -For developers, you can use the VPA to help ensure your pods stay up during periods of high demand by scheduling pods onto nodes that have appropriate resources for each pod. - -Administrators can use the VPA to better utilize cluster resources, such as preventing pods from reserving more CPU resources than needed. The VPA monitors the resources that workloads are actually using and adjusts the resource requirements so capacity is available to other workloads. The VPA also maintains the ratios between limits and requests that are specified in initial container configuration. - -[NOTE] -==== -If you stop running the VPA or delete a specific VPA CR in your cluster, the resource requests for the pods already modified by the VPA do not change. Any new pods get the resources defined in the workload object, not the previous recommendations made by the VPA. -==== diff --git a/modules/nodes-pods-vertical-autoscaler-configuring.adoc b/modules/nodes-pods-vertical-autoscaler-configuring.adoc deleted file mode 100644 index ebc64fdc95c8..000000000000 --- a/modules/nodes-pods-vertical-autoscaler-configuring.adoc +++ /dev/null @@ -1,115 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-vertical-autoscaler.adoc - -:_content-type: PROCEDURE -[id="nodes-pods-vertical-autoscaler-configuring_{context}"] -= Using the Vertical Pod Autoscaler Operator - -You can use the Vertical Pod Autoscaler Operator (VPA) by creating a VPA custom resource (CR). The CR indicates which pods it should analyze and determines the actions the VPA should take with those pods. - -.Prerequisites - -* The workload object that you want to autoscale must exist. - -* If you want to use an alternative recommender, a deployment including that recommender must exist. - -.Procedure - -To create a VPA CR for a specific workload object: - -. Change to the project where the workload object you want to scale is located. - -.. Create a VPA CR YAML file: -+ -[source,yaml] ----- -apiVersion: autoscaling.k8s.io/v1 -kind: VerticalPodAutoscaler -metadata: - name: vpa-recommender -spec: - targetRef: - apiVersion: "apps/v1" - kind: Deployment <1> - name: frontend <2> - updatePolicy: - updateMode: "Auto" <3> - resourcePolicy: <4> - containerPolicies: - - containerName: my-opt-sidecar - mode: "Off" - recommenders: <5> - - name: my-recommender ----- -<1> Specify the type of workload object you want this VPA to manage: `Deployment`, `StatefulSet`, `Job`, `DaemonSet`, `ReplicaSet`, or `ReplicationController`. -<2> Specify the name of an existing workload object you want this VPA to manage. -<3> Specify the VPA mode: -* `auto` to automatically apply the recommended resources on pods associated with the controller. The VPA terminates existing pods and creates new pods with the recommended resource limits and requests. -* `recreate` to automatically apply the recommended resources on pods associated with the workload object. The VPA terminates existing pods and creates new pods with the recommended resource limits and requests. The `recreate` mode should be used rarely, only if you need to ensure that the pods are restarted whenever the resource request changes. -* `initial` to automatically apply the recommended resources when pods associated with the workload object are created. The VPA does not update the pods as it learns new resource recommendations. -* `off` to only generate resource recommendations for the pods associated with the workload object. The VPA does not update the pods as it learns new resource recommendations and does not apply the recommendations to new pods. -<4> Optional. Specify the containers you want to opt-out and set the mode to `Off`. -<5> Optional. Specify an alternative recommender. - -.. Create the VPA CR: -+ -[source,terminal] ----- -$ oc create -f <file-name>.yaml ----- -+ -After a few moments, the VPA learns the resource usage of the containers in the pods associated with the workload object. -+ -You can view the VPA recommendations using the following command: -+ -[source,terminal] ----- -$ oc get vpa <vpa-name> --output yaml ----- -+ -The output shows the recommendations for CPU and memory requests, similar to the following: -+ -.Example output -[source,yaml] ----- -... -status: - -... - - recommendation: - containerRecommendations: - - containerName: frontend - lowerBound: <1> - cpu: 25m - memory: 262144k - target: <2> - cpu: 25m - memory: 262144k - uncappedTarget: <3> - cpu: 25m - memory: 262144k - upperBound: <4> - cpu: 262m - memory: "274357142" - - containerName: backend - lowerBound: - cpu: 12m - memory: 131072k - target: - cpu: 12m - memory: 131072k - uncappedTarget: - cpu: 12m - memory: 131072k - upperBound: - cpu: 476m - memory: "498558823" - -... ----- -<1> `lowerBound` is the minimum recommended resource levels. -<2> `target` is the recommended resource levels. -<3> `upperBound` is the highest recommended resource levels. -<4> `uncappedTarget` is the most recent resource recommendations. diff --git a/modules/nodes-pods-vertical-autoscaler-custom.adoc b/modules/nodes-pods-vertical-autoscaler-custom.adoc deleted file mode 100644 index 457a3b89da3a..000000000000 --- a/modules/nodes-pods-vertical-autoscaler-custom.adoc +++ /dev/null @@ -1,165 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-vertical-autoscaler.adoc - -:_content-type: PROCEDURE -[id="nodes-pods-vertical-autoscaler-custom_{context}"] -= Using an alternative recommender - -You can use your own recommender to autoscale based on your own algorithms. If you do not specify an alternative recommender, {product-title} uses the default recommender, which suggests CPU and memory requests based on historical usage. Because there is no universal recommendation policy that applies to all types of workloads, you might want to create and deploy different recommenders for specific workloads. - -For example, the default recommender might not accurately predict future resource usage when containers exhibit certain resource behaviors, such as cyclical patterns that alternate between usage spikes and idling as used by monitoring applications, or recurring and repeating patterns used with deep learning applications. Using the default recommender with these usage behaviors might result in significant over-provisioning and Out of Memory (OOM) kills for your applications. - -// intro paragraph based on https://github.com/kubernetes/autoscaler/tree/master/vertical-pod-autoscaler/enhancements/3919-customized-recommender-vpa - -[NOTE] -==== -Instructions for how to create a recommender are beyond the scope of this documentation, -==== - -.Procedure - -To use an alternative recommender for your pods: - -. Create a service account for the alternative recommender and bind that service account to the required cluster role: -+ -[source,yaml] ----- -apiVersion: v1 <1> -kind: ServiceAccount -metadata: - name: alt-vpa-recommender-sa - namespace: <namespace_name> ---- -apiVersion: rbac.authorization.k8s.io/v1 <2> -kind: ClusterRoleBinding -metadata: - name: system:example-metrics-reader -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: system:metrics-reader -subjects: -- kind: ServiceAccount - name: alt-vpa-recommender-sa - namespace: <namespace_name> ---- -apiVersion: rbac.authorization.k8s.io/v1 <3> -kind: ClusterRoleBinding -metadata: - name: system:example-vpa-actor -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: system:vpa-actor -subjects: -- kind: ServiceAccount - name: alt-vpa-recommender-sa - namespace: <namespace_name> ---- -apiVersion: rbac.authorization.k8s.io/v1 <4> -kind: ClusterRoleBinding -metadata: - name: system:example-vpa-target-reader-binding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: system:vpa-target-reader -subjects: -- kind: ServiceAccount - name: alt-vpa-recommender-sa - namespace: <namespace_name> ----- -<1> Creates a service accocunt for the recommender in the namespace where the recommender is deployed. -<2> Binds the recommender service account to the `metrics-reader` role. Specify the namespace where the recommender is to be deployed. -<3> Binds the recommender service account to the `vpa-actor` role. Specify the namespace where the recommender is to be deployed. -<4> Binds the recommender service account to the `vpa-target-reader` role. Specify the namespace where the recommender is to be deployed. - -. To add the alternative recommender to the cluster, create a Deployment object similar to the following: -+ -[source,yaml] ----- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: alt-vpa-recommender - namespace: <namespace_name> -spec: - replicas: 1 - selector: - matchLabels: - app: alt-vpa-recommender - template: - metadata: - labels: - app: alt-vpa-recommender - spec: - containers: <1> - - name: recommender - image: quay.io/example/alt-recommender:latest <2> - imagePullPolicy: Always - resources: - limits: - cpu: 200m - memory: 1000Mi - requests: - cpu: 50m - memory: 500Mi - ports: - - name: prometheus - containerPort: 8942 - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - serviceAccountName: alt-vpa-recommender-sa <3> - securityContext: - runAsNonRoot: true ----- -+ --- -<1> Creates a container for your alternative recommender. -<2> Specifies your recommender image. -<3> Associates the service account that you created for the recommender. --- -+ -A new pod is created for the alternative recommender in the same namespace. -+ -[source,terminal] ----- -$ oc get pods ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -frontend-845d5478d-558zf 1/1 Running 0 4m25s -frontend-845d5478d-7z9gx 1/1 Running 0 4m25s -frontend-845d5478d-b7l4j 1/1 Running 0 4m25s -vpa-alt-recommender-55878867f9-6tp5v 1/1 Running 0 9s ----- - -. Configure a VPA CR that includes the name of the alternative recommender `Deployment` object. -+ -.Example VPA CR to include the alternative recommender -[source,yml] ----- -apiVersion: autoscaling.k8s.io/v1 -kind: VerticalPodAutoscaler -metadata: - name: vpa-recommender - namespace: <namespace_name> -spec: - recommenders: - - name: alt-vpa-recommender <1> - targetRef: - apiVersion: "apps/v1" - kind: Deployment <2> - name: frontend ----- -<1> Specifies the name of the alternative recommender deployment. -<2> Specifies the name of an existing workload object you want this VPA to manage. diff --git a/modules/nodes-pods-vertical-autoscaler-install.adoc b/modules/nodes-pods-vertical-autoscaler-install.adoc deleted file mode 100644 index 30840e9e2f99..000000000000 --- a/modules/nodes-pods-vertical-autoscaler-install.adoc +++ /dev/null @@ -1,71 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-vertical-autoscaler.adoc - -:_content-type: PROCEDURE -[id="nodes-pods-vertical-autoscaler-install_{context}"] -= Installing the Vertical Pod Autoscaler Operator - -You can use the {product-title} web console to install the Vertical Pod Autoscaler Operator (VPA). - -ifdef::openshift-origin[] -.Prerequisites - -* Ensure that you have downloaded the {cluster-manager-url-pull} as shown in _Obtaining the installation program_ in the installation documentation for your platform. -+ -If you have the pull secret, add the `redhat-operators` catalog to the OperatorHub custom resource (CR) as shown in _Configuring {product-title} to use Red Hat Operators_. -endif::[] - -.Procedure - -. In the {product-title} web console, click *Operators* -> *OperatorHub*. - -. Choose *VerticalPodAutoscaler* from the list of available Operators, and click *Install*. - -. On the *Install Operator* page, ensure that the *Operator recommended namespace* option -is selected. This installs the Operator in the mandatory `openshift-vertical-pod-autoscaler` namespace, which -is automatically created if it does not exist. - -. Click *Install*. - -. Verify the installation by listing the VPA Operator components: - -.. Navigate to *Workloads* -> *Pods*. - -.. Select the `openshift-vertical-pod-autoscaler` project from the drop-down menu and verify that there are four pods running. - -.. Navigate to *Workloads* -> *Deployments* to verify that there are four deployments running. - -. Optional. Verify the installation in the {product-title} CLI using the following command: -+ -[source,terminal] ----- -$ oc get all -n openshift-vertical-pod-autoscaler ----- -+ -The output shows four pods and four deplyoments: -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -pod/vertical-pod-autoscaler-operator-85b4569c47-2gmhc 1/1 Running 0 3m13s -pod/vpa-admission-plugin-default-67644fc87f-xq7k9 1/1 Running 0 2m56s -pod/vpa-recommender-default-7c54764b59-8gckt 1/1 Running 0 2m56s -pod/vpa-updater-default-7f6cc87858-47vw9 1/1 Running 0 2m56s - -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -service/vpa-webhook ClusterIP 172.30.53.206 <none> 443/TCP 2m56s - -NAME READY UP-TO-DATE AVAILABLE AGE -deployment.apps/vertical-pod-autoscaler-operator 1/1 1 1 3m13s -deployment.apps/vpa-admission-plugin-default 1/1 1 1 2m56s -deployment.apps/vpa-recommender-default 1/1 1 1 2m56s -deployment.apps/vpa-updater-default 1/1 1 1 2m56s - -NAME DESIRED CURRENT READY AGE -replicaset.apps/vertical-pod-autoscaler-operator-85b4569c47 1 1 1 3m13s -replicaset.apps/vpa-admission-plugin-default-67644fc87f 1 1 1 2m56s -replicaset.apps/vpa-recommender-default-7c54764b59 1 1 1 2m56s -replicaset.apps/vpa-updater-default-7f6cc87858 1 1 1 2m56s ----- diff --git a/modules/nodes-pods-vertical-autoscaler-uninstall.adoc b/modules/nodes-pods-vertical-autoscaler-uninstall.adoc deleted file mode 100644 index 0e1570adf43f..000000000000 --- a/modules/nodes-pods-vertical-autoscaler-uninstall.adoc +++ /dev/null @@ -1,73 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-vertical-autoscaler.adoc - -:_content-type: PROCEDURE -[id="nodes-pods-vertical-autoscaler-uninstall_{context}"] -= Uninstalling the Vertical Pod Autoscaler Operator - -You can remove the Vertical Pod Autoscaler Operator (VPA) from your {product-title} cluster. After uninstalling, the resource requests for the pods already modified by an existing VPA CR do not change. Any new pods get the resources defined in the workload object, not the previous recommendations made by the Vertical Pod Autoscaler Operator. - -[NOTE] -==== -You can remove a specific VPA CR by using the `oc delete vpa <vpa-name>` command. The same actions apply for resource requests as uninstalling the vertical pod autoscaler. -==== - -After removing the VPA Operator, it is recommended that you remove the other components associated with the Operator to avoid potential issues. - -.Prerequisites - -* The Vertical Pod Autoscaler Operator must be installed. - -.Procedure - -. In the {product-title} web console, click *Operators* -> *Installed Operators*. - -. Switch to the *openshift-vertical-pod-autoscaler* project. - -. For the *VerticalPodAutoscaler* Operator, click the Options menu {kebab} and select *Uninstall Operator*. - -. Optional: To remove all operands associated with the Operator, in the dialog box, select *Delete all operand instances for this operator* checkbox. - -. Click *Uninstall*. - -. Optional: Use the OpenShift CLI to remove the VPA components: - -.. Delete the VPA namespace: -+ -[source,terminal] ----- -$ oc delete namespace openshift-vertical-pod-autoscaler ----- - -.. Delete the VPA custom resource definition (CRD) objects: -+ -[source,terminal] ----- -$ oc delete crd verticalpodautoscalercheckpoints.autoscaling.k8s.io ----- -+ -[source,terminal] ----- -$ oc delete crd verticalpodautoscalercontrollers.autoscaling.openshift.io ----- -+ -[source,terminal] ----- -$ oc delete crd verticalpodautoscalers.autoscaling.k8s.io ----- -+ -Deleting the CRDs removes the associated roles, cluster roles, and role bindings. -+ -[NOTE] -==== -This action removes from the cluster all user-created VPA CRs. If you re-install the VPA, you must create these objects again. -==== - -.. Delete the VPA Operator: -+ -[source,terminal] ----- -$ oc delete operator/vertical-pod-autoscaler.openshift-vertical-pod-autoscaler ----- - diff --git a/modules/nodes-pods-vertical-autoscaler-using-about.adoc b/modules/nodes-pods-vertical-autoscaler-using-about.adoc deleted file mode 100644 index 1d891f767a25..000000000000 --- a/modules/nodes-pods-vertical-autoscaler-using-about.adoc +++ /dev/null @@ -1,317 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-vertical-autoscaler.adoc - -:_content-type: CONCEPT -[id="nodes-pods-vertical-autoscaler-using-about_{context}"] -= About Using the Vertical Pod Autoscaler Operator - -To use the Vertical Pod Autoscaler Operator (VPA), you create a VPA custom resource (CR) for a workload object in your cluster. The VPA learns and applies the optimal CPU and memory resources for the pods associated with that workload object. You can use a VPA with a deployment, stateful set, job, daemon set, replica set, or replication controller workload object. The VPA CR must be in the same project as the pods you want to monitor. - -You use the VPA CR to associate a workload object and specify which mode the VPA operates in: - -* The `Auto` and `Recreate` modes automatically apply the VPA CPU and memory recommendations throughout the pod lifetime. The VPA deletes any pods in the project that are out of alignment with its recommendations. When redeployed by the workload object, the VPA updates the new pods with its recommendations. -* The `Initial` mode automatically applies VPA recommendations only at pod creation. -* The `Off` mode only provides recommended resource limits and requests, allowing you to manually apply the recommendations. The `off` mode does not update pods. - -You can also use the CR to opt-out certain containers from VPA evaluation and updates. - -For example, a pod has the following limits and requests: - -[source,yaml] ----- -resources: - limits: - cpu: 1 - memory: 500Mi - requests: - cpu: 500m - memory: 100Mi ----- - -After creating a VPA that is set to `auto`, the VPA learns the resource usage and deletes the pod. When redeployed, the pod uses the new resource limits and requests: - -[source,yaml] ----- -resources: - limits: - cpu: 50m - memory: 1250Mi - requests: - cpu: 25m - memory: 262144k ----- - -You can view the VPA recommendations using the following command: - -[source,terminal] ----- -$ oc get vpa <vpa-name> --output yaml ----- - -After a few minutes, the output shows the recommendations for CPU and memory requests, similar to the following: - -.Example output -[source,yaml] ----- -... -status: -... - recommendation: - containerRecommendations: - - containerName: frontend - lowerBound: - cpu: 25m - memory: 262144k - target: - cpu: 25m - memory: 262144k - uncappedTarget: - cpu: 25m - memory: 262144k - upperBound: - cpu: 262m - memory: "274357142" - - containerName: backend - lowerBound: - cpu: 12m - memory: 131072k - target: - cpu: 12m - memory: 131072k - uncappedTarget: - cpu: 12m - memory: 131072k - upperBound: - cpu: 476m - memory: "498558823" -... ----- - -The output shows the recommended resources, `target`, the minimum recommended resources, `lowerBound`, the highest recommended resources, `upperBound`, and the most recent resource recommendations, `uncappedTarget`. - -The VPA uses the `lowerBound` and `upperBound` values to determine if a pod needs to be updated. If a pod has resource requests below the `lowerBound` values or above the `upperBound` values, the VPA terminates and recreates the pod with the `target` values. - -[id="nodes-pods-vertical-autoscaler-using-one-pod_{context}"] -== Changing the VPA minimum value - -By default, workload objects must specify a minimum of two replicas in order for the VPA to automatically delete and update their pods. As a result, workload objects that specify fewer than two replicas are not automatically acted upon by the VPA. The VPA does update new pods from these workload objects if the pods are restarted by some process external to the VPA. You can change this cluster-wide minimum value by modifying the `minReplicas` parameter in the `VerticalPodAutoscalerController` custom resource (CR). - -For example, if you set `minReplicas` to `3`, the VPA does not delete and update pods for workload objects that specify fewer than three replicas. - -[NOTE] -==== -If you set `minReplicas` to `1`, the VPA can delete the only pod for a workload object that specifies only one replica. You should use this setting with one-replica objects only if your workload can tolerate downtime whenever the VPA deletes a pod to adjust its resources. To avoid unwanted downtime with one-replica objects, configure the VPA CRs with the `podUpdatePolicy` set to `Initial`, which automatically updates the pod only when it is restarted by some process external to the VPA, or `Off`, which allows you to update the pod manually at an appropriate time for your application. -==== - -.Example `VerticalPodAutoscalerController` object -[source,yaml] ----- -apiVersion: autoscaling.openshift.io/v1 -kind: VerticalPodAutoscalerController -metadata: - creationTimestamp: "2021-04-21T19:29:49Z" - generation: 2 - name: default - namespace: openshift-vertical-pod-autoscaler - resourceVersion: "142172" - uid: 180e17e9-03cc-427f-9955-3b4d7aeb2d59 -spec: - minReplicas: 3 <1> - podMinCPUMillicores: 25 - podMinMemoryMb: 250 - recommendationOnly: false - safetyMarginFraction: 0.15 ----- - -<1> Specify the minimum number of replicas in a workload object for the VPA to act on. Any objects with replicas fewer than the minimum are not automatically deleted by the VPA. - -[id="nodes-pods-vertical-autoscaler-using-auto_{context}"] -== Automatically applying VPA recommendations -To use the VPA to automatically update pods, create a VPA CR for a specific workload object with `updateMode` set to `Auto` or `Recreate`. - -When the pods are created for the workload object, the VPA constantly monitors the containers to analyze their CPU and memory needs. The VPA deletes any pods that do not meet the VPA recommendations for CPU and memory. When redeployed, the pods use the new resource limits and requests based on the VPA recommendations, honoring any pod disruption budget set for your applications. The recommendations are added to the `status` field of the VPA CR for reference. - -[NOTE] -==== -By default, workload objects must specify a minimum of two replicas in order for the VPA to automatically delete their pods. Workload objects that specify fewer replicas than this minimum are not deleted. If you manually delete these pods, when the workload object redeploys the pods, the VPA does update the new pods with its recommendations. You can change this minimum by modifying the `VerticalPodAutoscalerController` object as shown shown in _Changing the VPA minimum value_. -==== - -.Example VPA CR for the `Auto` mode -[source,yaml] ----- -apiVersion: autoscaling.k8s.io/v1 -kind: VerticalPodAutoscaler -metadata: - name: vpa-recommender -spec: - targetRef: - apiVersion: "apps/v1" - kind: Deployment <1> - name: frontend <2> - updatePolicy: - updateMode: "Auto" <3> ----- -<1> The type of workload object you want this VPA CR to manage. -<2> The name of the workload object you want this VPA CR to manage. -<3> Set the mode to `Auto` or `Recreate`: -* `Auto`. The VPA assigns resource requests on pod creation and updates the existing pods by terminating them when the requested resources differ significantly from the new recommendation. -* `Recreate`. The VPA assigns resource requests on pod creation and updates the existing pods by terminating them when the requested resources differ significantly from the new recommendation. This mode should be used rarely, only if you need to ensure that the pods are restarted whenever the resource request changes. - -[NOTE] -==== -There must be operating pods in the project before the VPA can determine recommended resources and apply the recommendations to new pods. -==== - -[id="nodes-pods-vertical-autoscaler-using-pod_{context}"] -== Automatically applying VPA recommendations on pod creation -To use the VPA to apply the recommended resources only when a pod is first deployed, create a VPA CR for a specific workload object with `updateMode` set to `Initial`. - -Then, manually delete any pods associated with the workload object that you want to use the VPA recommendations. In the `Initial` mode, the VPA does not delete pods and does not update the pods as it learns new resource recommendations. - -.Example VPA CR for the `Initial` mode -[source,yaml] ----- -apiVersion: autoscaling.k8s.io/v1 -kind: VerticalPodAutoscaler -metadata: - name: vpa-recommender -spec: - targetRef: - apiVersion: "apps/v1" - kind: Deployment <1> - name: frontend <2> - updatePolicy: - updateMode: "Initial" <3> ----- -<1> The type of workload object you want this VPA CR to manage. -<2> The name of the workload object you want this VPA CR to manage. -<3> Set the mode to `Initial`. The VPA assigns resources when pods are created and does not change the resources during the lifetime of the pod. - -[NOTE] -==== -There must be operating pods in the project before a VPA can determine recommended resources and apply the recommendations to new pods. -==== - -[id="nodes-pods-vertical-autoscaler-using-manual_{context}"] -== Manually applying VPA recommendations - -To use the VPA to only determine the recommended CPU and memory values, create a VPA CR for a specific workload object with `updateMode` set to `off`. - -When the pods are created for that workload object, the VPA analyzes the CPU and memory needs of the containers and records those recommendations in the `status` field of the VPA CR. The VPA does not update the pods as it determines new resource recommendations. - -.Example VPA CR for the `Off` mode -[source,yaml] ----- -apiVersion: autoscaling.k8s.io/v1 -kind: VerticalPodAutoscaler -metadata: - name: vpa-recommender -spec: - targetRef: - apiVersion: "apps/v1" - kind: Deployment <1> - name: frontend <2> - updatePolicy: - updateMode: "Off" <3> ----- -<1> The type of workload object you want this VPA CR to manage. -<2> The name of the workload object you want this VPA CR to manage. -<3> Set the mode to `Off`. - -You can view the recommendations using the following command. - -[source,terminal] ----- -$ oc get vpa <vpa-name> --output yaml ----- - -With the recommendations, you can edit the workload object to add CPU and memory requests, then delete and redeploy the pods using the recommended resources. - -[NOTE] -==== -There must be operating pods in the project before a VPA can determine recommended resources. -==== - -[id="nodes-pods-vertical-autoscaler-using-exempt_{context}"] -== Exempting containers from applying VPA recommendations - -If your workload object has multiple containers and you do not want the VPA to evaluate and act on all of the containers, create a VPA CR for a specific workload object and add a `resourcePolicy` to opt-out specific containers. - -When the VPA updates the pods with recommended resources, any containers with a `resourcePolicy` are not updated and the VPA does not present recommendations for those containers in the pod. - -[source,yaml] ----- -apiVersion: autoscaling.k8s.io/v1 -kind: VerticalPodAutoscaler -metadata: - name: vpa-recommender -spec: - targetRef: - apiVersion: "apps/v1" - kind: Deployment <1> - name: frontend <2> - updatePolicy: - updateMode: "Auto" <3> - resourcePolicy: <4> - containerPolicies: - - containerName: my-opt-sidecar - mode: "Off" ----- -<1> The type of workload object you want this VPA CR to manage. -<2> The name of the workload object you want this VPA CR to manage. -<3> Set the mode to `Auto`, `Recreate`, or `Off`. The `Recreate` mode should be used rarely, only if you need to ensure that the pods are restarted whenever the resource request changes. -<4> Specify the containers you want to opt-out and set `mode` to `Off`. - -For example, a pod has two containers, the same resource requests and limits: - -[source,yaml] ----- -# ... -spec: - containers: - - name: frontend - resources: - limits: - cpu: 1 - memory: 500Mi - requests: - cpu: 500m - memory: 100Mi - - name: backend - resources: - limits: - cpu: "1" - memory: 500Mi - requests: - cpu: 500m - memory: 100Mi -# ... ----- - -After launching a VPA CR with the `backend` container set to opt-out, the VPA terminates and recreates the pod with the recommended resources applied only to the `frontend` container: - -[source,yaml] ----- -... -spec: - containers: - name: frontend - resources: - limits: - cpu: 50m - memory: 1250Mi - requests: - cpu: 25m - memory: 262144k -... - name: backend - resources: - limits: - cpu: "1" - memory: 500Mi - requests: - cpu: 500m - memory: 100Mi -... ----- diff --git a/modules/nodes-pods-viewing-project.adoc b/modules/nodes-pods-viewing-project.adoc deleted file mode 100644 index 7b2ec0af3efa..000000000000 --- a/modules/nodes-pods-viewing-project.adoc +++ /dev/null @@ -1,57 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-pods-viewing.adoc - -:_content-type: PROCEDURE -[id="nodes-pods-viewing-project_{context}"] -= Viewing pods in a project - -You can view a list of pods associated with the current project, including the number of replica, the current status, number or restarts and the age of the pod. - -.Procedure - -To view the pods in a project: - -. Change to the project: -+ -[source,terminal] ----- -$ oc project <project-name> ----- - -. Run the following command: -+ -[source,terminal] ----- -$ oc get pods ----- -+ -For example: -+ -[source,terminal] ----- -$ oc get pods ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -console-698d866b78-bnshf 1/1 Running 2 165m -console-698d866b78-m87pm 1/1 Running 2 165m ----- -+ -Add the `-o wide` flags to view the pod IP address and the node where the pod is located. -+ -[source,terminal] ----- -$ oc get pods -o wide ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE -console-698d866b78-bnshf 1/1 Running 2 166m 10.128.0.24 ip-10-0-152-71.ec2.internal <none> -console-698d866b78-m87pm 1/1 Running 2 166m 10.129.0.23 ip-10-0-173-237.ec2.internal <none> ----- diff --git a/modules/nodes-pods-viewing-usage.adoc b/modules/nodes-pods-viewing-usage.adoc deleted file mode 100644 index afbb45e5cf34..000000000000 --- a/modules/nodes-pods-viewing-usage.adoc +++ /dev/null @@ -1,61 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-pods-viewing.adoc - -:_content-type: PROCEDURE -[id="nodes-pods-viewing-usage_{context}"] -= Viewing pod usage statistics - -You can display usage statistics about pods, which provide the runtime -environments for containers. These usage statistics include CPU, memory, and -storage consumption. - -.Prerequisites - -* You must have `cluster-reader` permission to view the usage statistics. - -* Metrics must be installed to view the usage statistics. - -.Procedure - -To view the usage statistics: - -. Run the following command: -+ -[source,terminal] ----- -$ oc adm top pods ----- -+ -For example: -+ -[source,terminal] ----- -$ oc adm top pods -n openshift-console ----- -+ -.Example output -[source,terminal] ----- -NAME CPU(cores) MEMORY(bytes) -console-7f58c69899-q8c8k 0m 22Mi -console-7f58c69899-xhbgg 0m 25Mi -downloads-594fcccf94-bcxk8 3m 18Mi -downloads-594fcccf94-kv4p6 2m 15Mi ----- - -. Run the following command to view the usage statistics for pods with labels: -+ -[source,terminal] ----- -$ oc adm top pod --selector='' ----- -+ -You must choose the selector (label query) to filter on. Supports `=`, `==`, and `!=`. -+ -For example: -+ -[source,terminal] ----- -$ oc adm top pod --selector='name=my-pod' ----- diff --git a/modules/nodes-qos-about-swap.adoc b/modules/nodes-qos-about-swap.adoc deleted file mode 100644 index 14baa3cb2460..000000000000 --- a/modules/nodes-qos-about-swap.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-cluster-overcommit.adoc -// * post_installation_configuration/node-tasks.adoc - -:_content-type: CONCEPT -[id="nodes-qos-about-swap_{context}"] -= Understanding swap memory and QOS - -You can disable swap by default on your nodes to preserve quality of -service (QOS) guarantees. Otherwise, physical resources on a node can oversubscribe, -affecting the resource guarantees the Kubernetes scheduler makes during pod -placement. - -For example, if two guaranteed pods have reached their memory limit, each -container could start using swap memory. Eventually, if there is not enough swap -space, processes in the pods can be terminated due to the system being -oversubscribed. - -Failing to disable swap results in nodes not recognizing that they are -experiencing *MemoryPressure*, resulting in pods not receiving the memory they -made in their scheduling request. As a result, additional pods are placed on the -node to further increase memory pressure, ultimately increasing your risk of -experiencing a system out of memory (OOM) event. - -[IMPORTANT] -==== -If swap is enabled, any out-of-resource handling eviction thresholds for available memory will not work as -expected. Take advantage of out-of-resource handling to allow pods to be evicted -from a node when it is under memory pressure, and rescheduled on an alternative -node that has no such pressure. -==== diff --git a/modules/nodes-safe-sysctls-list.adoc b/modules/nodes-safe-sysctls-list.adoc deleted file mode 100644 index c0e19fa3fcdd..000000000000 --- a/modules/nodes-safe-sysctls-list.adoc +++ /dev/null @@ -1,115 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/containers/nodes-containers-sysctls.adoc - -:_content-type: REFERENCE -[id="safe_and_unsafe_sysctls_{context}"] -= Safe and unsafe sysctls - -Sysctls are grouped into _safe_ and _unsafe_ sysctls. - -For system-wide sysctls to be considered safe, they must be namespaced. A namespaced sysctl ensures there is isolation between namespaces and therefore pods. If you set a sysctl for one pod it must not add any of the following: - -- Influence any other pod on the node -- Harm the node health -- Gain CPU or memory resources outside of the resource limits of a pod - -[NOTE] -==== -Being namespaced alone is not sufficient for the sysctl to be considered safe. -==== -Any sysctl that is not added to the allowed list on {product-title} is considered unsafe for {product-title}. - -Unsafe sysctls are not allowed by default. For system-wide sysctls the cluster administrator must manually enable them on a per-node basis. Pods with disabled unsafe sysctls are scheduled but do not launch. - -[NOTE] -==== -You cannot manually enable interface-specific unsafe sysctls. -==== - -{product-title} adds the following system-wide and interface-specific safe sysctls to an allowed safe list: - -.System-wide safe sysctls -[cols="30%,70%",options="header"] -|=== -| sysctl | Description - -| `kernel.shm_rmid_forced` -a|When set to `1`, all shared memory objects in current IPC namespace are automatically forced to use IPC_RMID. For more information, see link:https://docs.kernel.org/admin-guide/sysctl/kernel.html?highlight=shm_rmid_forced#shm-rmid-forced[shm_rmid_forced]. - -| `net.ipv4.ip_local_port_range` -a| Defines the local port range that is used by TCP and UDP to choose the local port. The first number is the first port number, and the second number is the last local port number. If possible, it is better if these numbers have different parity (one even and one odd value). They must be greater than or equal to `ip_unprivileged_port_start`. The default values are `32768` and `60999` respectively. For more information, see link:https://docs.kernel.org/networking/ip-sysctl.html?highlight=ip_local_port_range#ip-variables[ip_local_port_range]. - -| `net.ipv4.tcp_syncookies` -|When `net.ipv4.tcp_syncookies` is set, the kernel handles TCP SYN packets normally until the -half-open connection queue is full, at which time, the SYN cookie functionality kicks in. This functionality allows the system to keep accepting valid connections, even if under a denial-of-service attack. For more information, see link:https://docs.kernel.org/networking/ip-sysctl.html?highlight=tcp_syncookies#tcp-variables[tcp_syncookies]. - -| `net.ipv4.ping_group_range` -a| This restricts `ICMP_PROTO` datagram sockets to users in the group range. The default is `1 0`, meaning that nobody, not even root, can create ping sockets. For more information, see link:https://docs.kernel.org/networking/ip-sysctl.html?highlight=ping_group_range#ip-variables[ping_group_range]. - -| `net.ipv4.ip_unprivileged_port_start` -| This defines the first unprivileged port in the network namespace. To disable all privileged ports, set this to `0`. Privileged ports must not overlap with the `ip_local_port_range`. For more information, see link:https://docs.kernel.org/networking/ip-sysctl.html?highlight=ip_unprivileged_port_start#ip-variables#ip-variables[ip_unprivileged_port_start]. -|=== - - -.Interface-specific safe sysctls -[cols="30%,70%",options="header"] -|=== -| sysctl | Description - -| `net.ipv4.conf.IFNAME.accept_redirects` -a| Accept IPv4 ICMP redirect messages. - -| `net.ipv4.conf.IFNAME.accept_source_route` -|Accept IPv4 packets with strict source route (SRR) option. - -| `net.ipv4.conf.IFNAME.arp_accept` -a| Define behavior for gratuitous ARP frames with an IPv4 address that is not already present in the ARP table: - -* `0` - Do not create new entries in the ARP table. - -* `1` - Create new entries in the ARP table. - -| `net.ipv4.conf.IFNAME.arp_notify` -| Define mode for notification of IPv4 address and device changes. - -| `net.ipv4.conf.IFNAME.disable_policy` -a| Disable IPSEC policy (SPD) for this IPv4 interface. - -| `net.ipv4.conf.IFNAME.secure_redirects` -a| Accept ICMP redirect messages only to gateways listed in the interface’s current gateway list. - -| `net.ipv4.conf.IFNAME.send_redirects` -| Send redirects is enabled only if the node acts as a router. That is, a host should not send an ICMP redirect message. It is used by routers to notify the host about a better routing path that is available for a particular destination. - -| `net.ipv6.conf.IFNAME.accept_ra` -a| Accept IPv6 Router advertisements; autoconfigure using them. It also determines whether or not to transmit router solicitations. Router solicitations are transmitted only if the functional setting is to accept router advertisements. - -| `net.ipv6.conf.IFNAME.accept_redirects` -a| Accept IPv6 ICMP redirect messages. - -| `net.ipv6.conf.IFNAME.accept_source_route` -a| Accept IPv6 packets with SRR option. - -| `net.ipv6.conf.IFNAME.arp_accept` -a| Define behavior for gratuitous ARP frames with an IPv6 address that is not already present in the ARP table: - -* `0` - Do not create new entries in the ARP table. - -* `1` - Create new entries in the ARP table. - -| `net.ipv6.conf.IFNAME.arp_notify` -| Define mode for notification of IPv6 address and device changes. - -| `net.ipv6.neigh.IFNAME.base_reachable_time_ms` -| This parameter controls the hardware address to IP mapping lifetime in the neighbour table for IPv6. - -| `net.ipv6.neigh.IFNAME.retrans_time_ms` -| Set the retransmit timer for neighbor discovery messages. - -|=== - -[NOTE] -==== -When setting these values using the `tuning` CNI plugin, use the value `IFNAME` literally. The interface name is represented by the `IFNAME` token, and is replaced with the actual name of the interface at runtime. -==== diff --git a/modules/nodes-scheduler-default-about.adoc b/modules/nodes-scheduler-default-about.adoc deleted file mode 100644 index 902766828b8a..000000000000 --- a/modules/nodes-scheduler-default-about.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-scheduler-default.adoc - -:_content-type: CONCEPT -[id="nodes-scheduler-default-about_{context}"] -= Understanding default scheduling - -The existing generic scheduler is the default platform-provided scheduler -_engine_ that selects a node to host the pod in a three-step operation: - -Filters the nodes:: -The available nodes are filtered based on the constraints or requirements -specified. This is done by running each node through the list of filter -functions called _predicates_, or _filters_. - -Prioritizes the filtered list of nodes:: -This is achieved by passing each node through a series of _priority_, or _scoring_, functions -that assign it a score between 0 - 10, with 0 indicating a bad fit and 10 -indicating a good fit to host the pod. The scheduler configuration can also take -in a simple _weight_ (positive numeric value) for each scoring function. The -node score provided by each scoring function is multiplied by the weight -(default weight for most scores is 1) and then combined by adding the scores for each node -provided by all the scores. This weight attribute can be used by -administrators to give higher importance to some scores. - -Selects the best fit node:: -The nodes are sorted based on their scores and the node with the highest score -is selected to host the pod. If multiple nodes have the same high score, then -one of them is selected at random. diff --git a/modules/nodes-scheduler-node-affinity-about.adoc b/modules/nodes-scheduler-node-affinity-about.adoc deleted file mode 100644 index d702b6fb6633..000000000000 --- a/modules/nodes-scheduler-node-affinity-about.adoc +++ /dev/null @@ -1,100 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-scheduler-node-affinity.adoc - -:_content-type: CONCEPT -[id="nodes-scheduler-node-affinity-about_{context}"] -= Understanding node affinity - -Node affinity allows a pod to specify an affinity towards a group of nodes it can be placed on. The node does not have control over the placement. - -For example, you could configure a pod to only run on a node with a specific CPU or in a specific availability zone. - -There are two types of node affinity rules: _required_ and _preferred_. - -Required rules *must* be met before a pod can be scheduled on a node. Preferred rules specify that, if the rule is met, the scheduler tries to enforce the rules, but does not guarantee enforcement. - -[NOTE] -==== -If labels on a node change at runtime that results in an node affinity rule on a pod no longer being met, the pod continues to run on the node. -==== - -You configure node affinity through the `Pod` spec file. You can specify a required rule, a preferred rule, or both. If you specify both, the node must first meet the required rule, then attempts to meet the preferred rule. - -The following example is a `Pod` spec with a rule that requires the pod be placed on a node with a label whose key is `e2e-az-NorthSouth` and whose value is either `e2e-az-North` or `e2e-az-South`: - -.Example pod configuration file with a node affinity required rule -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: with-node-affinity -spec: - affinity: - nodeAffinity: <1> - requiredDuringSchedulingIgnoredDuringExecution: <2> - nodeSelectorTerms: - - matchExpressions: - - key: e2e-az-NorthSouth <3> - operator: In <4> - values: - - e2e-az-North <3> - - e2e-az-South <3> - containers: - - name: with-node-affinity - image: docker.io/ocpqe/hello-pod ----- - -<1> The stanza to configure node affinity. -<2> Defines a required rule. -<3> The key/value pair (label) that must be matched to apply the rule. -<4> The operator represents the relationship between the label on the node and the set of values in the `matchExpression` parameters in the `Pod` spec. This value can be `In`, `NotIn`, `Exists`, or `DoesNotExist`, `Lt`, or `Gt`. - -The following example is a node specification with a preferred rule that a node with a label whose key is `e2e-az-EastWest` and whose value is either `e2e-az-East` or `e2e-az-West` is preferred for the pod: - -.Example pod configuration file with a node affinity preferred rule -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: with-node-affinity -spec: - affinity: - nodeAffinity: <1> - preferredDuringSchedulingIgnoredDuringExecution: <2> - - weight: 1 <3> - preference: - matchExpressions: - - key: e2e-az-EastWest <4> - operator: In <5> - values: - - e2e-az-East <4> - - e2e-az-West <4> - containers: - - name: with-node-affinity - image: docker.io/ocpqe/hello-pod ----- - -<1> The stanza to configure node affinity. -<2> Defines a preferred rule. -<3> Specifies a weight for a preferred rule. The node with highest weight is preferred. -<4> The key/value pair (label) that must be matched to apply the rule. -<5> The operator represents the relationship between the label on the node and -the set of values in the `matchExpression` parameters in the `Pod` spec. -This value can be `In`, `NotIn`, `Exists`, or `DoesNotExist`, `Lt`, or `Gt`. - -There is no explicit _node anti-affinity_ concept, but using the `NotIn` or `DoesNotExist` operator replicates that behavior. - -[NOTE] -==== -If you are using node affinity and node selectors in the same pod configuration, note the following: - -* If you configure both `nodeSelector` and `nodeAffinity`, both conditions must be satisfied for the pod to be scheduled onto a candidate node. - -* If you specify multiple `nodeSelectorTerms` associated with `nodeAffinity` types, then the pod can be scheduled onto a node if one of the `nodeSelectorTerms` is satisfied. - -* If you specify multiple `matchExpressions` associated with `nodeSelectorTerms`, then the pod can be scheduled onto a node only if all `matchExpressions` are satisfied. -==== - diff --git a/modules/nodes-scheduler-node-affinity-configuring-preferred.adoc b/modules/nodes-scheduler-node-affinity-configuring-preferred.adoc deleted file mode 100644 index f7896d085a2f..000000000000 --- a/modules/nodes-scheduler-node-affinity-configuring-preferred.adoc +++ /dev/null @@ -1,50 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-scheduler-node-affinity.adoc - -:_content-type: PROCEDURE -[id="nodes-scheduler-node-affinity-configuring-preferred_{context}"] -= Configuring a preferred node affinity rule - -Preferred rules specify that, if the rule is met, the scheduler tries to enforce the rules, but does not guarantee enforcement. - -.Procedure - -The following steps demonstrate a simple configuration that creates a node and a pod that the scheduler tries to place on the node. - -. Add a label to a node using the `oc label node` command: -+ -[source,terminal] ----- -$ oc label node node1 e2e-az-name=e2e-az3 ----- - -. In the `Pod` spec, use the `nodeAffinity` stanza to configure the `preferredDuringSchedulingIgnoredDuringExecution` parameter: -+ -.. Specify a weight for the node, as a number 1-100. The node with highest weight is preferred. -+ -.. Specify the key and values that must be met. If you want the new pod to be scheduled on the node you edited, use the same `key` and `value` parameters as the label in the node: -+ -[source,yaml] ----- -spec: - affinity: - nodeAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 1 - preference: - matchExpressions: - - key: e2e-az-name - operator: In - values: - - e2e-az3 ----- -+ -.. Specify an `operator`. The operator can be `In`, `NotIn`, `Exists`, `DoesNotExist`, `Lt`, or `Gt`. For example, use the Operator `In` to require the label to be in the node. - -. Create the pod. -+ -[source,terminal] ----- -$ oc create -f e2e-az3.yaml ----- diff --git a/modules/nodes-scheduler-node-affinity-configuring-required.adoc b/modules/nodes-scheduler-node-affinity-configuring-required.adoc deleted file mode 100644 index b110069c4aa2..000000000000 --- a/modules/nodes-scheduler-node-affinity-configuring-required.adoc +++ /dev/null @@ -1,64 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-scheduler-node-affinity.adoc - -:_content-type: PROCEDURE -[id="nodes-scheduler-node-affinity-configuring-required_{context}"] -= Configuring a required node affinity rule - -Required rules *must* be met before a pod can be scheduled on a node. - -.Procedure - -The following steps demonstrate a simple configuration that creates a node and a pod that the scheduler is required to place on the node. - -. Add a label to a node using the `oc label node` command: -+ -[source,terminal] ----- -$ oc label node node1 e2e-az-name=e2e-az1 ----- -+ -[TIP] -==== -You can alternatively apply the following YAML to add the label: - -[source,yaml] ----- -kind: Node -apiVersion: v1 -metadata: - name: <node_name> - labels: - e2e-az-name: e2e-az1 ----- -==== - -. In the `Pod` spec, use the `nodeAffinity` stanza to configure the `requiredDuringSchedulingIgnoredDuringExecution` parameter: -+ -.. Specify the key and values that must be met. If you want the new pod to be scheduled on the node you edited, use the same `key` and `value` parameters as the label in the node. -+ -.. Specify an `operator`. The operator can be `In`, `NotIn`, `Exists`, `DoesNotExist`, `Lt`, or `Gt`. For example, use the operator `In` to require the label to be in the node: -+ -.Example output -[source,yaml] ----- -spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: e2e-az-name - operator: In - values: - - e2e-az1 - - e2e-az2 ----- - -. Create the pod: -+ -[source,terminal] ----- -$ oc create -f e2e-az2.yaml ----- diff --git a/modules/nodes-scheduler-node-affinity-example.adoc b/modules/nodes-scheduler-node-affinity-example.adoc deleted file mode 100644 index 9913b2a85349..000000000000 --- a/modules/nodes-scheduler-node-affinity-example.adoc +++ /dev/null @@ -1,152 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-scheduler-node-affinity.adoc - -[id="nodes-scheduler-node-affinity-examples_{context}"] -= Sample node affinity rules - -The following examples demonstrate node affinity. - -[id="admin-guide-sched-affinity-examples1_{context}"] -== Node affinity with matching labels - -The following example demonstrates node affinity for a node and pod with matching labels: - -* The Node1 node has the label `zone:us`: -+ -[source,terminal] ----- -$ oc label node node1 zone=us ----- -+ -[TIP] -==== -You can alternatively apply the following YAML to add the label: - -[source,yaml] ----- -kind: Node -apiVersion: v1 -metadata: - name: <node_name> - labels: - zone: us ----- -==== - -* The pod-s1 pod has the `zone` and `us` key/value pair under a required node affinity rule: -+ -[source,terminal] ----- -$ cat pod-s1.yaml ----- -+ -.Example output -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: pod-s1 -spec: - containers: - - image: "docker.io/ocpqe/hello-pod" - name: hello-pod - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: "zone" - operator: In - values: - - us ----- - -* The pod-s1 pod can be scheduled on Node1: -+ -[source,terminal] ----- -$ oc get pod -o wide ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE IP NODE -pod-s1 1/1 Running 0 4m IP1 node1 ----- - -[id="admin-guide-sched-affinity-examples2_{context}"] -== Node affinity with no matching labels - -The following example demonstrates node affinity for a node and pod without matching labels: - -* The Node1 node has the label `zone:emea`: -+ -[source,terminal] ----- -$ oc label node node1 zone=emea ----- -+ -[TIP] -==== -You can alternatively apply the following YAML to add the label: - -[source,yaml] ----- -kind: Node -apiVersion: v1 -metadata: - name: <node_name> - labels: - zone: emea ----- -==== - -* The pod-s1 pod has the `zone` and `us` key/value pair under a required node affinity rule: -+ -[source,terminal] ----- -$ cat pod-s1.yaml ----- -+ -.Example output -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: pod-s1 -spec: - containers: - - image: "docker.io/ocpqe/hello-pod" - name: hello-pod - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: "zone" - operator: In - values: - - us ----- - -* The pod-s1 pod cannot be scheduled on Node1: -+ -[source,terminal] ----- -$ oc describe pod pod-s1 ----- -+ -.Example output -[source,terminal] ----- -... - -Events: - FirstSeen LastSeen Count From SubObjectPath Type Reason - --------- -------- ----- ---- ------------- -------- ------ - 1m 33s 8 default-scheduler Warning FailedScheduling No nodes are available that match all of the following predicates:: MatchNodeSelector (1). ----- diff --git a/modules/nodes-scheduler-node-names-configuring.adoc b/modules/nodes-scheduler-node-names-configuring.adoc deleted file mode 100644 index e978a5beb440..000000000000 --- a/modules/nodes-scheduler-node-names-configuring.adoc +++ /dev/null @@ -1,59 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-scheduler-node-names.adoc - -[id="nodes-scheduler-node-name-configuring_{context}"] -= Configuring the Pod Node Constraints admission controller to use names - -You can configure the Pod Node Constraints admission controller to ensure that pods are only placed onto nodes with a specific name. - -.Prerequisites - -Ensure you have the desired labels -and node selector set up in your environment. - -For example, make sure that your pod configuration features the `nodeName` -value indicating the desired label: - -[source,yaml] ----- -apiVersion: v1 -kind: Pod -spec: - nodeName: <value> ----- - -.Procedure - -To configure the Pod Node Constraints admission controller: - -. Create a file containing the admission controller information: -+ -[source,yaml] ----- -podNodeSelectorPluginConfig: - clusterDefaultNodeSelector: name-of-node-selector - namespace1: name-of-node-selector - namespace2: name-of-node-selector ----- -+ -For example: -+ -[source,yaml] ----- -podNodeConstraintsPluginConfig: - clusterDefaultNodeSelector: ns1 - ns1: region=west,env=test,infra=fedora,os=fedora ----- - -. Create an *AdmissionConfiguration* object that references the file: -+ -[source,yaml] ----- -kind: AdmissionConfiguration -apiVersion: apiserver.k8s.io/v1alpha1 -plugins: -- name: PodNodeConstraints - path: podnodeconstraints.yaml ----- - diff --git a/modules/nodes-scheduler-node-projects-about.adoc b/modules/nodes-scheduler-node-projects-about.adoc deleted file mode 100644 index fe0fbb08d4df..000000000000 --- a/modules/nodes-scheduler-node-projects-about.adoc +++ /dev/null @@ -1,33 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-scheduler-node-projects.adoc - -[id="nodes-scheduler-node-projects-about_{context}"] -= Understanding how to constrain pods by project name - -The Pod Node Selector admission controller determines where a pod can be placed using labels on projects and node selectors specified in pods. A new pod will be placed on a node associated with a project only if the node selectors in the pod match the labels in the project. - -After the pod is created, the node selectors are merged into the pod so that the `Pod` spec includes the labels originally included in the specification and any new labels from the node selectors. The example below illustrates the merging effect. - -The Pod Node Selector admission controller also allows you to create a list of labels that are permitted in a specific project. This list acts as a whitelist that lets developers know what labels are acceptable to use in a project and gives administrators greater control over labeling in a cluster. - -The Pod Node Selector uses the annotation key `scheduler.alpha.kubernetes.io/node-selector` to assign node selectors to namespaces. - -[source,yaml] ----- -apiVersion: v1 -kind: Namespace -metadata: - annotations: - scheduler.alpha.kubernetes.io/node-selector: name-of-node-selector - name: namespace3 ----- - -This admission controller has the following behavior: - -. If the Namespace has an annotation with a key scheduler.alpha.kubernetes.io/node-selector, use its value as the node selector. -. If the namespace lacks such an annotation, use the `clusterDefaultNodeSelector` defined in the `PodNodeSelector` plugin configuration file as the node selector. -. Evaluate the pod's node selector against the namespace node selector for conflicts. Conflicts result in rejection. -. Evaluate the pod's node selector against the namespace-specific whitelist defined the plugin configuration file. Conflicts result in rejection. - - diff --git a/modules/nodes-scheduler-node-projects-configuring.adoc b/modules/nodes-scheduler-node-projects-configuring.adoc deleted file mode 100644 index fcc1f265e851..000000000000 --- a/modules/nodes-scheduler-node-projects-configuring.adoc +++ /dev/null @@ -1,43 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-scheduler-node-projects.adoc - -[id="nodes-scheduler-node-projects-configuring_{context}"] -= Configuring the Pod Node Selector admission controller to use projects - -You can configure the Pod Node Selector admission controller to ensure that pods are only placed onto nodes in specific projects. -The Pod Node Selector admission controller uses a configuration file to set options for the behavior of the backend. - -.Procedure - -. Create a file containing the admission controller information: -+ -[source,yaml] ----- -podNodeSelectorPluginConfig: - clusterDefaultNodeSelector: <node-selector> - namespace1: <node-selector> - namespace2: <node-selector> ----- -+ -For example: -+ -[source,yaml] ----- -podNodeSelectorPluginConfig: - clusterDefaultNodeSelector: region=west - ns1: os=centos,region=west ----- - -. Create an *AdmissionConfiguration* object that references the file: -+ -[source,yaml] ----- -kind: AdmissionConfiguration -apiVersion: apiserver.k8s.io/v1alpha1 -plugins: -- name: PodNodeSelector - path: podnodeselector.yaml ----- - - diff --git a/modules/nodes-scheduler-node-selectors-about.adoc b/modules/nodes-scheduler-node-selectors-about.adoc deleted file mode 100644 index 4bde277c6a16..000000000000 --- a/modules/nodes-scheduler-node-selectors-about.adoc +++ /dev/null @@ -1,252 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-scheduler-node-selector.adoc - -:_content-type: CONCEPT -[id="nodes-scheduler-node-selectors-about_{context}"] -= About node selectors - -You can use node selectors on pods and labels on nodes to control where the pod is scheduled. With node selectors, {product-title} schedules the pods on nodes that contain matching labels. - -You can use a node selector to place specific pods on specific nodes, cluster-wide node selectors to place new pods on specific nodes anywhere in the cluster, and project node selectors to place new pods in a project on specific nodes. - -For example, as a cluster administrator, you can create an infrastructure where application developers can deploy pods only onto the nodes closest to their geographical location by including a node selector in every pod they create. In this example, the cluster consists of five data centers spread across two regions. In the U.S., label the nodes as `us-east`, `us-central`, or `us-west`. In the Asia-Pacific region (APAC), label the nodes as `apac-east` or `apac-west`. The developers can add a node selector to the pods they create to ensure the pods get scheduled on those nodes. - -A pod is not scheduled if the `Pod` object contains a node selector, but no node has a matching label. - -[IMPORTANT] -==== -If you are using node selectors and node affinity in the same pod configuration, the following rules control pod placement onto nodes: - -* If you configure both `nodeSelector` and `nodeAffinity`, both conditions must be satisfied for the pod to be scheduled onto a candidate node. - -* If you specify multiple `nodeSelectorTerms` associated with `nodeAffinity` types, then the pod can be scheduled onto a node if one of the `nodeSelectorTerms` is satisfied. - -* If you specify multiple `matchExpressions` associated with `nodeSelectorTerms`, then the pod can be scheduled onto a node only if all `matchExpressions` are satisfied. -==== - -Node selectors on specific pods and nodes:: -+ -You can control which node a specific pod is scheduled on by using node selectors and labels. -+ -To use node selectors and labels, first label the node to avoid pods being descheduled, then add the node selector to the pod. -+ -[NOTE] -==== -You cannot add a node selector directly to an existing scheduled pod. You must label the object that controls the pod, such as deployment config. -==== -+ -For example, the following `Node` object has the `region: east` label: -+ -ifndef::openshift-origin[] -.Sample `Node` object with a label -[source,yaml] ----- -kind: Node -apiVersion: v1 -metadata: - name: ip-10-0-131-14.ec2.internal - selfLink: /api/v1/nodes/ip-10-0-131-14.ec2.internal - uid: 7bc2580a-8b8e-11e9-8e01-021ab4174c74 - resourceVersion: '478704' - creationTimestamp: '2019-06-10T14:46:08Z' - labels: - kubernetes.io/os: linux - topology.kubernetes.io/zone: us-east-1a - node.openshift.io/os_version: '4.5' - node-role.kubernetes.io/worker: '' - topology.kubernetes.io/region: us-east-1 - node.openshift.io/os_id: rhcos - node.kubernetes.io/instance-type: m4.large - kubernetes.io/hostname: ip-10-0-131-14 - kubernetes.io/arch: amd64 - region: east <1> - type: user-node ----- -<1> Labels to match the pod node selector. -endif::openshift-origin[] -ifdef::openshift-origin[] -.Sample `Node` object with a label -[source,yaml] ----- -kind: Node -apiVersion: v1 -metadata: - name: ip-10-0-131-14.ec2.internal - selfLink: /api/v1/nodes/ip-10-0-131-14.ec2.internal - uid: 7bc2580a-8b8e-11e9-8e01-021ab4174c74 - resourceVersion: '478704' - creationTimestamp: '2019-06-10T14:46:08Z' - labels: - kubernetes.io/os: linux - topology.kubernetes.io/zone: us-east-1a - node.openshift.io/os_version: '4.5' - node-role.kubernetes.io/worker: '' - topology.kubernetes.io/region: us-east-1 - node.openshift.io/os_id: fedora - node.kubernetes.io/instance-type: m4.large - kubernetes.io/hostname: ip-10-0-131-14 - kubernetes.io/arch: amd64 - region: east <1> - type: user-node ----- -<1> Labels to match the pod node selector. -endif::openshift-origin[] -+ -A pod has the `type: user-node,region: east` node selector: -+ -.Sample `Pod` object with node selectors -[source,yaml] ----- -apiVersion: v1 -kind: Pod - -.... - -spec: - nodeSelector: <1> - region: east - type: user-node ----- -<1> Node selectors to match the node label. The node must have a label for each node selector. -+ -When you create the pod using the example pod spec, it can be scheduled on the example node. - -Default cluster-wide node selectors:: -+ -With default cluster-wide node selectors, when you create a pod in that cluster, {product-title} adds the default node selectors to the pod and schedules -the pod on nodes with matching labels. -+ -For example, the following `Scheduler` object has the default cluster-wide `region=east` and `type=user-node` node selectors: -+ -.Example Scheduler Operator Custom Resource -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: Scheduler -metadata: - name: cluster -... - -spec: - defaultNodeSelector: type=user-node,region=east -... ----- -+ -A node in that cluster has the `type=user-node,region=east` labels: -+ -.Example `Node` object -[source,yaml] ----- -apiVersion: v1 -kind: Node -metadata: - name: ci-ln-qg1il3k-f76d1-hlmhl-worker-b-df2s4 -... - labels: - region: east - type: user-node -... ----- -+ -.Example `Pod` object with a node selector -[source,terminal] ----- -apiVersion: v1 -kind: Pod -... - -spec: - nodeSelector: - region: east -... ----- -+ -When you create the pod using the example pod spec in the example cluster, the pod is created with the cluster-wide node selector and is scheduled on the labeled node: -+ -[source,terminal] -.Example pod list with the pod on the labeled node ----- -NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES -pod-s1 1/1 Running 0 20s 10.131.2.6 ci-ln-qg1il3k-f76d1-hlmhl-worker-b-df2s4 <none> <none> ----- -+ -[NOTE] -==== -If the project where you create the pod has a project node selector, that selector takes preference over a cluster-wide node selector. Your pod is not created or scheduled if the pod does not have the project node selector. -==== - -[id="project-node-selectors_{context}"] -Project node selectors:: -+ -With project node selectors, when you create a pod in this project, {product-title} adds the node selectors to the pod and schedules the pods on a node with matching labels. If there is a cluster-wide default node selector, a project node selector takes preference. -+ -For example, the following project has the `region=east` node selector: -+ -.Example `Namespace` object -[source,yaml] ----- -apiVersion: v1 -kind: Namespace -metadata: - name: east-region - annotations: - openshift.io/node-selector: "region=east" -... ----- -+ -The following node has the `type=user-node,region=east` labels: -+ -.Example `Node` object -[source,yaml] ----- -apiVersion: v1 -kind: Node -metadata: - name: ci-ln-qg1il3k-f76d1-hlmhl-worker-b-df2s4 -... - labels: - region: east - type: user-node -... ----- -+ -When you create the pod using the example pod spec in this example project, the pod is created with the project node selectors and is scheduled on the labeled node: -+ -.Example `Pod` object -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - namespace: east-region -... -spec: - nodeSelector: - region: east - type: user-node -... ----- -+ -[source,terminal] -.Example pod list with the pod on the labeled node ----- -NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES -pod-s1 1/1 Running 0 20s 10.131.2.6 ci-ln-qg1il3k-f76d1-hlmhl-worker-b-df2s4 <none> <none> ----- -+ -A pod in the project is not created or scheduled if the pod contains different node selectors. For example, if you deploy the following pod into the example project, it is not be created: -+ -.Example `Pod` object with an invalid node selector -[source,yaml] ----- -apiVersion: v1 -kind: Pod -... - -spec: - nodeSelector: - region: west - -.... ----- diff --git a/modules/nodes-scheduler-node-selectors-cluster.adoc b/modules/nodes-scheduler-node-selectors-cluster.adoc deleted file mode 100644 index fc067b819761..000000000000 --- a/modules/nodes-scheduler-node-selectors-cluster.adoc +++ /dev/null @@ -1,202 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-scheduler-node-selector.adoc - -:_content-type: PROCEDURE -[id="nodes-scheduler-node-selectors-cluster_{context}"] -= Creating default cluster-wide node selectors - -You can use default cluster-wide node selectors on pods together with labels on nodes to constrain all pods created in a cluster to specific nodes. - -With cluster-wide node selectors, when you create a pod in that cluster, {product-title} adds the default node selectors to the pod and schedules -the pod on nodes with matching labels. - -You configure cluster-wide node selectors by editing the Scheduler Operator custom resource (CR). You add labels to a node, a compute machine set, or a machine config. Adding the label to the compute machine set ensures that if the node or machine goes down, new nodes have the label. Labels added to a node or machine config do not persist if the node or machine goes down. - -[NOTE] -==== -You can add additional key/value pairs to a pod. But you cannot add a different value for a default key. -==== - -.Procedure - -To add a default cluster-wide node selector: - -. Edit the Scheduler Operator CR to add the default cluster-wide node selectors: -+ -[source,terminal] ----- -$ oc edit scheduler cluster ----- -+ -.Example Scheduler Operator CR with a node selector -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: Scheduler -metadata: - name: cluster -... -spec: - defaultNodeSelector: type=user-node,region=east <1> - mastersSchedulable: false ----- -<1> Add a node selector with the appropriate `<key>:<value>` pairs. -+ -After making this change, wait for the pods in the `openshift-kube-apiserver` project to redeploy. This can take several minutes. The default cluster-wide node selector does not take effect until the pods redeploy. - -. Add labels to a node by using a compute machine set or editing the node directly: - -* Use a compute machine set to add labels to nodes managed by the compute machine set when a node is created: - -.. Run the following command to add labels to a `MachineSet` object: -+ -[source,terminal] ----- -$ oc patch MachineSet <name> --type='json' -p='[{"op":"add","path":"/spec/template/spec/metadata/labels", "value":{"<key>"="<value>","<key>"="<value>"}}]' -n openshift-machine-api <1> ----- -<1> Add a `<key>/<value>` pair for each label. -+ -For example: -+ -[source,terminal] ----- -$ oc patch MachineSet ci-ln-l8nry52-f76d1-hl7m7-worker-c --type='json' -p='[{"op":"add","path":"/spec/template/spec/metadata/labels", "value":{"type":"user-node","region":"east"}}]' -n openshift-machine-api ----- -+ -[TIP] -==== -You can alternatively apply the following YAML to add labels to a compute machine set: - -[source,yaml] ----- -apiVersion: machine.openshift.io/v1beta1 -kind: MachineSet -metadata: - name: <machineset> - namespace: openshift-machine-api -spec: - template: - spec: - metadata: - labels: - region: "east" - type: "user-node" ----- -==== - -.. Verify that the labels are added to the `MachineSet` object by using the `oc edit` command: -+ -For example: -+ -[source,terminal] ----- -$ oc edit MachineSet abc612-msrtw-worker-us-east-1c -n openshift-machine-api ----- -+ -.Example `MachineSet` object -[source,yaml] ----- -apiVersion: machine.openshift.io/v1beta1 -kind: MachineSet - ... -spec: - ... - template: - metadata: - ... - spec: - metadata: - labels: - region: east - type: user-node - ... ----- - -.. Redeploy the nodes associated with that compute machine set by scaling down to `0` and scaling up the nodes: -+ -For example: -+ -[source,terminal] ----- -$ oc scale --replicas=0 MachineSet ci-ln-l8nry52-f76d1-hl7m7-worker-c -n openshift-machine-api ----- -+ -[source,terminal] ----- -$ oc scale --replicas=1 MachineSet ci-ln-l8nry52-f76d1-hl7m7-worker-c -n openshift-machine-api ----- - -.. When the nodes are ready and available, verify that the label is added to the nodes by using the `oc get` command: -+ -[source,terminal] ----- -$ oc get nodes -l <key>=<value> ----- -+ -For example: -+ -[source,terminal] ----- -$ oc get nodes -l type=user-node ----- -+ -.Example output -[source,terminal] ----- -NAME STATUS ROLES AGE VERSION -ci-ln-l8nry52-f76d1-hl7m7-worker-c-vmqzp Ready worker 61s v1.26.0 ----- - -* Add labels directly to a node: - -.. Edit the `Node` object for the node: -+ -[source,terminal] ----- -$ oc label nodes <name> <key>=<value> ----- -+ -For example, to label a node: -+ -[source,terminal] ----- -$ oc label nodes ci-ln-l8nry52-f76d1-hl7m7-worker-b-tgq49 type=user-node region=east ----- -+ -[TIP] -==== -You can alternatively apply the following YAML to add labels to a node: - -[source,yaml] ----- -kind: Node -apiVersion: v1 -metadata: - name: <node_name> - labels: - type: "user-node" - region: "east" ----- -==== - -.. Verify that the labels are added to the node using the `oc get` command: -+ -[source,terminal] ----- -$ oc get nodes -l <key>=<value>,<key>=<value> ----- -+ -For example: -+ -[source,terminal] ----- -$ oc get nodes -l type=user-node,region=east ----- -+ -.Example output -[source,terminal] ----- -NAME STATUS ROLES AGE VERSION -ci-ln-l8nry52-f76d1-hl7m7-worker-b-tgq49 Ready worker 17m v1.26.0 ----- diff --git a/modules/nodes-scheduler-node-selectors-configuring.adoc b/modules/nodes-scheduler-node-selectors-configuring.adoc deleted file mode 100644 index fb10be5d3886..000000000000 --- a/modules/nodes-scheduler-node-selectors-configuring.adoc +++ /dev/null @@ -1,74 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-scheduler-node-selector.adoc - -[id="nodes-scheduler-node-selectors-configuring_{context}"] -= Configuring the Pod Node Constraints admission controller to use node selectors - -You can configure the Pod Node Constraints admission controller to ensure that pods are only placed onto nodes with specific labels. - -.Prerequisites - -. Ensure you have the desired labels -ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] -labels on your nodes. -endif::openshift-enterprise,openshift-webscale,openshift-origin[] -and node selector set up in your environment. -+ -For example, make sure that your pod configuration features the `nodeSelector` -value indicating the desired label: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Pod -spec: - nodeSelector: - <key>: <value> -... ----- - -. Create a file containing the admission controller information: -+ -[source,yaml] ----- -podNodeSelectorPluginConfig: - clusterDefaultNodeSelector: name-of-node-selector - namespace1: name-of-node-selector - namespace2: name-of-node-selector ----- -+ -For example: -+ -[source,yaml] ----- -podNodeConstraintsPluginConfig: - clusterDefaultNodeSelector: ns1 - ns1: region=west,env=test,infra=fedora,os=fedora ----- - -. Create an *AdmissionConfiguration* object that references the file: -+ -[source,yaml] ----- -kind: AdmissionConfiguration -apiVersion: apiserver.k8s.io/v1alpha1 -plugins: -- name: PodNodeConstraints - path: podnodeconstraints.yaml - nodeSelectorLabelBlacklist: - kubernetes.io/hostname - - <label> ----- - -[NOTE] -==== -If you are using node selectors and node affinity in the same pod configuration, note the following: - -* If you configure both `nodeSelector` and `nodeAffinity`, both conditions must be satisfied for the pod to be scheduled onto a candidate node. - -* If you specify multiple `nodeSelectorTerms` associated with `nodeAffinity` types, then the pod can be scheduled onto a node if one of the `nodeSelectorTerms` is satisfied. - -* If you specify multiple `matchExpressions` associated with `nodeSelectorTerms`, then the pod can be scheduled onto a node only if all `matchExpressions` are satisfied. -==== - diff --git a/modules/nodes-scheduler-node-selectors-pod.adoc b/modules/nodes-scheduler-node-selectors-pod.adoc deleted file mode 100644 index 73226624edd3..000000000000 --- a/modules/nodes-scheduler-node-selectors-pod.adoc +++ /dev/null @@ -1,216 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-scheduler-node-selector.adoc - -:_content-type: PROCEDURE -[id="nodes-scheduler-node-selectors-pod_{context}"] -= Using node selectors to control pod placement - -You can use node selectors on pods and labels on nodes to control where the pod is scheduled. With node selectors, {product-title} schedules the pods on nodes that contain matching labels. - -You add labels to a node, a compute machine set, or a machine config. Adding the label to the compute machine set ensures that if the node or machine goes down, new nodes have the label. Labels added to a node or machine config do not persist if the node or machine goes down. - -To add node selectors to an existing pod, add a node selector to the controlling object for that pod, such as a `ReplicaSet` object, `DaemonSet` object, `StatefulSet` object, `Deployment` object, or `DeploymentConfig` object. -Any existing pods under that controlling object are recreated on a node with a matching label. If you are creating a new pod, you can add the node selector directly to the pod spec. If the pod does not have a controlling object, you must delete the pod, edit the pod spec, and recreate the pod. - -[NOTE] -==== -You cannot add a node selector directly to an existing scheduled pod. -==== - -.Prerequisites - -To add a node selector to existing pods, determine the controlling object for that pod. -For example, the `router-default-66d5cf9464-m2g75` pod is controlled by the `router-default-66d5cf9464` -replica set: - ----- -$ oc describe pod router-default-66d5cf9464-7pwkc - -Name: router-default-66d5cf9464-7pwkc -Namespace: openshift-ingress - -# ... - -Controlled By: ReplicaSet/router-default-66d5cf9464 -# ... ----- - -The web console lists the controlling object under `ownerReferences` in the pod YAML: - ----- -# ... - ownerReferences: - - apiVersion: apps/v1 - kind: ReplicaSet - name: router-default-66d5cf9464 - uid: d81dd094-da26-11e9-a48a-128e7edf0312 - controller: true - blockOwnerDeletion: true -# ... ----- - -.Procedure - -. Add labels to a node by using a compute machine set or editing the node directly: - -* Use a `MachineSet` object to add labels to nodes managed by the compute machine set when a node is created: - -.. Run the following command to add labels to a `MachineSet` object: -+ ----- -$ oc patch MachineSet <name> --type='json' -p='[{"op":"add","path":"/spec/template/spec/metadata/labels", "value":{"<key>"="<value>","<key>"="<value>"}}]' -n openshift-machine-api ----- -+ -For example: -+ ----- -$ oc patch MachineSet abc612-msrtw-worker-us-east-1c --type='json' -p='[{"op":"add","path":"/spec/template/spec/metadata/labels", "value":{"type":"user-node","region":"east"}}]' -n openshift-machine-api ----- -+ -[TIP] -==== -You can alternatively apply the following YAML to add labels to a compute machine set: - -[source,yaml] ----- -apiVersion: machine.openshift.io/v1beta1 -kind: MachineSet -metadata: - name: <machineset> - namespace: openshift-machine-api -spec: - template: - spec: - metadata: - labels: - region: "east" - type: "user-node" ----- -==== - -.. Verify that the labels are added to the `MachineSet` object by using the `oc edit` command: -+ -For example: -+ ----- -$ oc edit MachineSet abc612-msrtw-worker-us-east-1c -n openshift-machine-api ----- -+ -.Example `MachineSet` object -[source,yaml] -+ ----- -apiVersion: machine.openshift.io/v1beta1 -kind: MachineSet - -# ... - -spec: -# ... - template: - metadata: -# ... - spec: - metadata: - labels: - region: east - type: user-node -# ... ----- - -* Add labels directly to a node: - -.. Edit the `Node` object for the node: -+ -[source,terminal] ----- -$ oc label nodes <name> <key>=<value> ----- -+ -For example, to label a node: -+ -[source,terminal] ----- -$ oc label nodes ip-10-0-142-25.ec2.internal type=user-node region=east ----- -+ -[TIP] -==== -You can alternatively apply the following YAML to add labels to a node: - -[source,yaml] ----- -kind: Node -apiVersion: v1 -metadata: - name: <node_name> - labels: - type: "user-node" - region: "east" ----- -==== - -.. Verify that the labels are added to the node: -+ -[source,terminal] ----- -$ oc get nodes -l type=user-node,region=east ----- -+ -.Example output -[source,terminal] ----- -NAME STATUS ROLES AGE VERSION -ip-10-0-142-25.ec2.internal Ready worker 17m v1.26.0 ----- - -. Add the matching node selector to a pod: -+ -* To add a node selector to existing and future pods, add a node selector to the controlling object for the pods: -+ -.Example `ReplicaSet` object with labels -[source,yaml] ----- -kind: ReplicaSet - -# ... - -spec: - -# ... - - template: - metadata: - creationTimestamp: null - labels: - ingresscontroller.operator.openshift.io/deployment-ingresscontroller: default - pod-template-hash: 66d5cf9464 - spec: - nodeSelector: - kubernetes.io/os: linux - node-role.kubernetes.io/worker: '' - type: user-node <1> ----- -<1> Add the node selector. - -* To add a node selector to a specific, new pod, add the selector to the `Pod` object directly: -+ -.Example `Pod` object with a node selector -[source,yaml] ----- -apiVersion: v1 -kind: Pod - -# ... - -spec: - nodeSelector: - region: east - type: user-node ----- -+ -[NOTE] -==== -You cannot add a node selector directly to an existing scheduled pod. -==== diff --git a/modules/nodes-scheduler-node-selectors-project.adoc b/modules/nodes-scheduler-node-selectors-project.adoc deleted file mode 100644 index 4591bf1505c8..000000000000 --- a/modules/nodes-scheduler-node-selectors-project.adoc +++ /dev/null @@ -1,218 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-scheduler-node-selector.adoc - -:_content-type: PROCEDURE -[id="nodes-scheduler-node-selectors-project_{context}"] -= Creating project-wide node selectors - -You can use node selectors in a project together with labels on nodes to constrain all pods created in that project to the labeled nodes. - -When you create a pod in this project, {product-title} adds the node selectors to the pods in the project and schedules the pods on a node with matching labels in the project. If there is a cluster-wide default node selector, a project node selector takes preference. - -You add node selectors to a project by editing the `Namespace` object to add the `openshift.io/node-selector` parameter. You add labels to a node, a compute machine set, or a machine config. Adding the label to the compute machine set ensures that if the node or machine goes down, new nodes have the label. Labels added to a node or machine config do not persist if the node or machine goes down. - -A pod is not scheduled if the `Pod` object contains a node selector, but no project has a matching node selector. When you create a pod from that spec, you receive an error similar to the following message: - -.Example error message -[source,terminal] ----- -Error from server (Forbidden): error when creating "pod.yaml": pods "pod-4" is forbidden: pod node label selector conflicts with its project node label selector ----- - -[NOTE] -==== -You can add additional key/value pairs to a pod. But you cannot add a different value for a project key. -==== - -.Procedure - -To add a default project node selector: - -. Create a namespace or edit an existing namespace to add the `openshift.io/node-selector` parameter: -+ -[source,terminal] ----- -$ oc edit namespace <name> ----- -+ -.Example output -[source,yaml] ----- -apiVersion: v1 -kind: Namespace -metadata: - annotations: - openshift.io/node-selector: "type=user-node,region=east" <1> - openshift.io/description: "" - openshift.io/display-name: "" - openshift.io/requester: kube:admin - openshift.io/sa.scc.mcs: s0:c30,c5 - openshift.io/sa.scc.supplemental-groups: 1000880000/10000 - openshift.io/sa.scc.uid-range: 1000880000/10000 - creationTimestamp: "2021-05-10T12:35:04Z" - labels: - kubernetes.io/metadata.name: demo - name: demo - resourceVersion: "145537" - uid: 3f8786e3-1fcb-42e3-a0e3-e2ac54d15001 -spec: - finalizers: - - kubernetes ----- -<1> Add the `openshift.io/node-selector` with the appropriate `<key>:<value>` pairs. - -. Add labels to a node by using a compute machine set or editing the node directly: - -* Use a `MachineSet` object to add labels to nodes managed by the compute machine set when a node is created: - -.. Run the following command to add labels to a `MachineSet` object: -+ -[source,terminal] ----- -$ oc patch MachineSet <name> --type='json' -p='[{"op":"add","path":"/spec/template/spec/metadata/labels", "value":{"<key>"="<value>","<key>"="<value>"}}]' -n openshift-machine-api ----- -+ -For example: -+ -[source,terminal] ----- -$ oc patch MachineSet ci-ln-l8nry52-f76d1-hl7m7-worker-c --type='json' -p='[{"op":"add","path":"/spec/template/spec/metadata/labels", "value":{"type":"user-node","region":"east"}}]' -n openshift-machine-api ----- -+ -[TIP] -==== -You can alternatively apply the following YAML to add labels to a compute machine set: - -[source,yaml] ----- -apiVersion: machine.openshift.io/v1beta1 -kind: MachineSet -metadata: - name: <machineset> - namespace: openshift-machine-api -spec: - template: - spec: - metadata: - labels: - region: "east" - type: "user-node" ----- -==== - -.. Verify that the labels are added to the `MachineSet` object by using the `oc edit` command: -+ -For example: -+ -[source,terminal] ----- -$ oc edit MachineSet ci-ln-l8nry52-f76d1-hl7m7-worker-c -n openshift-machine-api ----- -+ -.Example output -[source,yaml] ----- -apiVersion: machine.openshift.io/v1beta1 -kind: MachineSet -metadata: -... -spec: -... - template: - metadata: -... - spec: - metadata: - labels: - region: east - type: user-node ----- - -.. Redeploy the nodes associated with that compute machine set: -+ -For example: -+ -[source,terminal] ----- -$ oc scale --replicas=0 MachineSet ci-ln-l8nry52-f76d1-hl7m7-worker-c -n openshift-machine-api ----- -+ -[source,terminal] ----- -$ oc scale --replicas=1 MachineSet ci-ln-l8nry52-f76d1-hl7m7-worker-c -n openshift-machine-api ----- - -.. When the nodes are ready and available, verify that the label is added to the nodes by using the `oc get` command: -+ -[source,terminal] ----- -$ oc get nodes -l <key>=<value> ----- -+ -For example: -+ -[source,terminal] ----- -$ oc get nodes -l type=user-node ----- -+ -.Example output -[source,terminal] ----- -NAME STATUS ROLES AGE VERSION -ci-ln-l8nry52-f76d1-hl7m7-worker-c-vmqzp Ready worker 61s v1.26.0 ----- - -* Add labels directly to a node: - -.. Edit the `Node` object to add labels: -+ -[source,terminal] ----- -$ oc label <resource> <name> <key>=<value> ----- -+ -For example, to label a node: -+ -[source,terminal] ----- -$ oc label nodes ci-ln-l8nry52-f76d1-hl7m7-worker-c-tgq49 type=user-node region=east ----- -+ -[TIP] -==== -You can alternatively apply the following YAML to add labels to a node: - -[source,yaml] ----- -kind: Node -apiVersion: v1 -metadata: - name: <node_name> - labels: - type: "user-node" - region: "east" ----- -==== - -.. Verify that the labels are added to the `Node` object using the `oc get` command: -+ -[source,terminal] ----- -$ oc get nodes -l <key>=<value> ----- -+ -For example: -+ -[source,terminal] ----- -$ oc get nodes -l type=user-node,region=east ----- -+ -.Example output -[source,terminal] ----- -NAME STATUS ROLES AGE VERSION -ci-ln-l8nry52-f76d1-hl7m7-worker-b-tgq49 Ready worker 17m v1.26.0 ----- diff --git a/modules/nodes-scheduler-pod-affinity-about.adoc b/modules/nodes-scheduler-pod-affinity-about.adoc deleted file mode 100644 index 5e6a7fb7715b..000000000000 --- a/modules/nodes-scheduler-pod-affinity-about.adoc +++ /dev/null @@ -1,101 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-scheduler-pod-affinity.adoc - -:_content-type: CONCEPT -[id="nodes-scheduler-pod-affinity-about_{context}"] -= Understanding pod affinity - -_Pod affinity_ and _pod anti-affinity_ allow you to constrain which nodes your pod is eligible to be scheduled on based on the key/value labels on other pods. - -* Pod affinity can tell the scheduler to locate a new pod on the same node as other pods if the label selector on the new pod matches the label on the current pod. -* Pod anti-affinity can prevent the scheduler from locating a new pod on the same node as pods with the same labels if the label selector on the new pod matches the label on the current pod. - -For example, using affinity rules, you could spread or pack pods within a service or relative to pods in other services. Anti-affinity rules allow you to prevent pods of a particular service from scheduling on the same nodes as pods of another service that are known to interfere with the performance of the pods of the first service. Or, you could spread the pods of a service across nodes, availability zones, or availability sets to reduce correlated failures. - -[NOTE] -==== -A label selector might match pods with multiple pod deployments. Use unique combinations of labels when configuring anti-affinity rules to avoid matching pods. -==== - -There are two types of pod affinity rules: _required_ and _preferred_. - -Required rules *must* be met before a pod can be scheduled on a node. Preferred rules specify that, if the rule is met, the scheduler tries to enforce the rules, but does not guarantee enforcement. - -[NOTE] -==== -Depending on your pod priority and preemption settings, the scheduler might not be able to find an appropriate node for a pod without violating affinity -requirements. If so, a pod might not be scheduled. - -To prevent this situation, carefully configure pod affinity with equal-priority pods. -==== - -You configure pod affinity/anti-affinity through the `Pod` spec files. You can specify a required rule, a preferred rule, or both. If you specify both, the node must first meet the required rule, then attempts to meet the preferred rule. - -The following example shows a `Pod` spec configured for pod affinity and anti-affinity. - -In this example, the pod affinity rule indicates that the pod can schedule onto a node only if that node has at least one already-running pod with a label that has the key `security` and value `S1`. The pod anti-affinity rule says that the pod prefers to not schedule onto a node if that node is already running a pod with label having key `security` and value `S2`. - -.Sample `Pod` config file with pod affinity -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: with-pod-affinity -spec: - affinity: - podAffinity: <1> - requiredDuringSchedulingIgnoredDuringExecution: <2> - - labelSelector: - matchExpressions: - - key: security <3> - operator: In <4> - values: - - S1 <3> - topologyKey: topology.kubernetes.io/zone - containers: - - name: with-pod-affinity - image: docker.io/ocpqe/hello-pod ----- - -<1> Stanza to configure pod affinity. -<2> Defines a required rule. -<3> The key and value (label) that must be matched to apply the rule. -<4> The operator represents the relationship between the label on the existing pod and the set of values in the `matchExpression` parameters in the specification for the new pod. Can be `In`, `NotIn`, `Exists`, or `DoesNotExist`. - -.Sample `Pod` config file with pod anti-affinity -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: with-pod-antiaffinity -spec: - affinity: - podAntiAffinity: <1> - preferredDuringSchedulingIgnoredDuringExecution: <2> - - weight: 100 <3> - podAffinityTerm: - labelSelector: - matchExpressions: - - key: security <4> - operator: In <5> - values: - - S2 - topologyKey: kubernetes.io/hostname - containers: - - name: with-pod-affinity - image: docker.io/ocpqe/hello-pod ----- - -<1> Stanza to configure pod anti-affinity. -<2> Defines a preferred rule. -<3> Specifies a weight for a preferred rule. The node with the highest weight is preferred. -<4> Description of the pod label that determines when the anti-affinity rule applies. Specify a key and value for the label. -<5> The operator represents the relationship between the label on the existing pod and the set of values in the `matchExpression` parameters in the specification for the new pod. Can be `In`, `NotIn`, `Exists`, or `DoesNotExist`. - -[NOTE] -==== -If labels on a node change at runtime such that the affinity rules on a pod are no longer met, the pod continues to run on the node. -==== diff --git a/modules/nodes-scheduler-pod-affinity-configuring.adoc b/modules/nodes-scheduler-pod-affinity-configuring.adoc deleted file mode 100644 index 9e61b1e5dd3f..000000000000 --- a/modules/nodes-scheduler-pod-affinity-configuring.adoc +++ /dev/null @@ -1,58 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-scheduler-pod-affinity.adoc - -:_content-type: PROCEDURE -[id="nodes-scheduler-pod-affinity-configuring_{context}"] -= Configuring a pod affinity rule - -The following steps demonstrate a simple two-pod configuration that creates pod with a label and a pod that uses affinity to allow scheduling with that pod. - -.Procedure - -. Create a pod with a specific label in the `Pod` spec: -+ -[source,terminal] ----- -$ cat team4.yaml -apiVersion: v1 -kind: Pod -metadata: - name: security-s1 - labels: - security: S1 -spec: - containers: - - name: security-s1 - image: docker.io/ocpqe/hello-pod ----- - -. When creating other pods, edit the `Pod` spec as follows: -+ -.. Use the `podAffinity` stanza to configure the `requiredDuringSchedulingIgnoredDuringExecution` parameter or `preferredDuringSchedulingIgnoredDuringExecution` parameter: -+ -.. Specify the key and value that must be met. If you want the new pod to be scheduled with the other pod, use the same `key` and `value` parameters as the label on the first pod. -+ -[source,yaml] ----- - podAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: security - operator: In - values: - - S1 - topologyKey: topology.kubernetes.io/zone ----- -+ -.. Specify an `operator`. The operator can be `In`, `NotIn`, `Exists`, or `DoesNotExist`. For example, use the operator `In` to require the label to be in the node. -+ -.. Specify a `topologyKey`, which is a prepopulated link:https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#interlude-built-in-node-labels[Kubernetes label] that the system uses to denote such a topology domain. - -. Create the pod. -+ -[source,terminal] ----- -$ oc create -f <pod-spec>.yaml ----- diff --git a/modules/nodes-scheduler-pod-affinity-example.adoc b/modules/nodes-scheduler-pod-affinity-example.adoc deleted file mode 100644 index 1773b2049479..000000000000 --- a/modules/nodes-scheduler-pod-affinity-example.adoc +++ /dev/null @@ -1,162 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-scheduler-node-affinity.adoc - -[id="nodes-scheduler-pod-affinity-example_{context}"] -= Sample pod affinity and anti-affinity rules - -The following examples demonstrate pod affinity and pod anti-affinity. - -[id="nodes-scheduler-pod-affinity-example-affinity_{context}"] -== Pod Affinity - -The following example demonstrates pod affinity for pods with matching labels and label selectors. - -* The pod *team4* has the label `team:4`. -+ -[source,terminal] ----- -$ cat team4.yaml -apiVersion: v1 -kind: Pod -metadata: - name: team4 - labels: - team: "4" -spec: - containers: - - name: ocp - image: docker.io/ocpqe/hello-pod ----- - -* The pod *team4a* has the label selector `team:4` under `podAffinity`. -+ -[source,yaml] ----- -$ cat pod-team4a.yaml -apiVersion: v1 -kind: Pod -metadata: - name: team4a -spec: - affinity: - podAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: team - operator: In - values: - - "4" - topologyKey: kubernetes.io/hostname - containers: - - name: pod-affinity - image: docker.io/ocpqe/hello-pod ----- - -* The *team4a* pod is scheduled on the same node as the *team4* pod. - -[id="nodes-scheduler-pod-affinity-example-antiaffinity_{context}"] -== Pod Anti-affinity - -The following example demonstrates pod anti-affinity for pods with matching labels and label selectors. - -* The pod *pod-s1* has the label `security:s1`. -+ -[source,terminal] ----- -cat pod-s1.yaml -apiVersion: v1 -kind: Pod -metadata: - name: pod-s1 - labels: - security: s1 -spec: - containers: - - name: ocp - image: docker.io/ocpqe/hello-pod ----- - -* The pod *pod-s2* has the label selector `security:s1` under `podAntiAffinity`. -+ -[source,yaml] ----- -cat pod-s2.yaml -apiVersion: v1 -kind: Pod -metadata: - name: pod-s2 -spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: security - operator: In - values: - - s1 - topologyKey: kubernetes.io/hostname - containers: - - name: pod-antiaffinity - image: docker.io/ocpqe/hello-pod ----- - -* The pod *pod-s2* cannot be scheduled on the same node as `pod-s1`. - -[id="nodes-scheduler-pod-affinity-example-no-labels_{context}"] -== Pod Affinity with no Matching Labels - -The following example demonstrates pod affinity for pods without matching labels and label selectors. - -* The pod *pod-s1* has the label `security:s1`. -+ -[source,terminal] ----- -$ cat pod-s1.yaml -apiVersion: v1 -kind: Pod -metadata: - name: pod-s1 - labels: - security: s1 -spec: - containers: - - name: ocp - image: docker.io/ocpqe/hello-pod ----- - -* The pod *pod-s2* has the label selector `security:s2`. -+ -[source,terminal] ----- -$ cat pod-s2.yaml -apiVersion: v1 -kind: Pod -metadata: - name: pod-s2 -spec: - affinity: - podAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: security - operator: In - values: - - s2 - topologyKey: kubernetes.io/hostname - containers: - - name: pod-affinity - image: docker.io/ocpqe/hello-pod ----- - -* The pod *pod-s2* is not scheduled unless there is a node with a pod that has the `security:s2` label. If there is no other pod with that label, the new pod remains in a pending state: -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE IP NODE -pod-s2 0/1 Pending 0 32s <none> ----- diff --git a/modules/nodes-scheduler-pod-anti-affinity-configuring.adoc b/modules/nodes-scheduler-pod-anti-affinity-configuring.adoc deleted file mode 100644 index 395a78424cef..000000000000 --- a/modules/nodes-scheduler-pod-anti-affinity-configuring.adoc +++ /dev/null @@ -1,64 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/nodes-scheduler-pod-affinity.adoc - -:_content-type: PROCEDURE -[id="nodes-scheduler-pod-anti-affinity-configuring_{context}"] -= Configuring a pod anti-affinity rule - -The following steps demonstrate a simple two-pod configuration that creates pod with a label and a pod that uses an anti-affinity preferred rule to attempt to prevent scheduling with that pod. - -.Procedure - -. Create a pod with a specific label in the `Pod` spec: -+ -[source,yaml] ----- -$ cat team4.yaml -apiVersion: v1 -kind: Pod -metadata: - name: security-s2 - labels: - security: S2 -spec: - containers: - - name: security-s2 - image: docker.io/ocpqe/hello-pod ----- - -. When creating other pods, edit the `Pod` spec to set the following parameters: - -. Use the `podAntiAffinity` stanza to configure the `requiredDuringSchedulingIgnoredDuringExecution` parameter or `preferredDuringSchedulingIgnoredDuringExecution` parameter: -+ -.. Specify a weight for the node, 1-100. The node that with highest weight is preferred. -+ -.. Specify the key and values that must be met. If you want the new pod to not be scheduled with the other pod, use the same `key` and `value` parameters as the label on the first pod. -+ -[source,yaml] ----- - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 - podAffinityTerm: - labelSelector: - matchExpressions: - - key: security - operator: In - values: - - S2 - topologyKey: kubernetes.io/hostname ----- -+ -.. For a preferred rule, specify a weight, 1-100. -+ -.. Specify an `operator`. The operator can be `In`, `NotIn`, `Exists`, or `DoesNotExist`. For example, use the operator `In` to require the label to be in the node. - -. Specify a `topologyKey`, which is a prepopulated link:https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#interlude-built-in-node-labels[Kubernetes label] that the system uses to denote such a topology domain. - -. Create the pod. -+ -[source,terminal] ----- -$ oc create -f <pod-spec>.yaml ----- diff --git a/modules/nodes-scheduler-pod-topology-spread-constraints-about.adoc b/modules/nodes-scheduler-pod-topology-spread-constraints-about.adoc deleted file mode 100644 index 205a0a621a23..000000000000 --- a/modules/nodes-scheduler-pod-topology-spread-constraints-about.adoc +++ /dev/null @@ -1,15 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/scheduling/nodes-scheduler-pod-topology-spread-constraints - -:_content-type: CONCEPT -[id="nodes-scheduler-pod-topology-spread-constraints-about_{context}"] -= About pod topology spread constraints - -By using a _pod topology spread constraint_, you provide fine-grained control over the distribution of pods across failure domains to help achieve high availability and more efficient resource utilization. - -{product-title} administrators can label nodes to provide topology information, such as regions, zones, nodes, or other user-defined domains. After these labels are set on nodes, users can then define pod topology spread constraints to control the placement of pods across these topology domains. - -You specify which pods to group together, which topology domains they are spread among, and the acceptable skew. Only pods within the same namespace are matched and grouped together when spreading due to a constraint. - -// TODO Mention about relationship to affinity/anti-affinity? diff --git a/modules/nodes-scheduler-pod-topology-spread-constraints-configuring.adoc b/modules/nodes-scheduler-pod-topology-spread-constraints-configuring.adoc deleted file mode 100644 index ca4c36679621..000000000000 --- a/modules/nodes-scheduler-pod-topology-spread-constraints-configuring.adoc +++ /dev/null @@ -1,56 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/scheduling/nodes-scheduler-pod-topology-spread-constraints - -:_content-type: PROCEDURE -[id="nodes-scheduler-pod-topology-spread-constraints-configuring_{context}"] -= Configuring pod topology spread constraints - -The following steps demonstrate how to configure pod topology spread constraints to distribute pods that match the specified labels based on their zone. - -You can specify multiple pod topology spread constraints, but you must ensure that they do not conflict with each other. All pod topology spread constraints must be satisfied for a pod to be placed. - -.Prerequisites - -* A cluster administrator has added the required labels to nodes. - -.Procedure - -. Create a `Pod` spec and specify a pod topology spread constraint: -+ -.Example `pod-spec.yaml` file -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: my-pod - labels: - foo: bar -spec: - topologySpreadConstraints: - - maxSkew: 1 <1> - topologyKey: topology.kubernetes.io/zone <2> - whenUnsatisfiable: DoNotSchedule <3> - labelSelector: <4> - matchLabels: - foo: bar <5> - matchLabelKeys: - - my-pod-label <6> - containers: - - image: "docker.io/ocpqe/hello-pod" - name: hello-pod ----- -<1> The maximum difference in number of pods between any two topology domains. The default is `1`, and you cannot specify a value of `0`. -<2> The key of a node label. Nodes with this key and identical value are considered to be in the same topology. -<3> How to handle a pod if it does not satisfy the spread constraint. The default is `DoNotSchedule`, which tells the scheduler not to schedule the pod. Set to `ScheduleAnyway` to still schedule the pod, but the scheduler prioritizes honoring the skew to not make the cluster more imbalanced. -<4> Pods that match this label selector are counted and recognized as a group when spreading to satisfy the constraint. Be sure to specify a label selector, otherwise no pods can be matched. -<5> Be sure that this `Pod` spec also sets its labels to match this label selector if you want it to be counted properly in the future. -<6> A list of pod label keys to select which pods to calculate spreading over. - -. Create the pod: -+ -[source,terminal] ----- -$ oc create -f pod-spec.yaml ----- diff --git a/modules/nodes-scheduler-pod-topology-spread-constraints-examples.adoc b/modules/nodes-scheduler-pod-topology-spread-constraints-examples.adoc deleted file mode 100644 index cc440e1bc29e..000000000000 --- a/modules/nodes-scheduler-pod-topology-spread-constraints-examples.adoc +++ /dev/null @@ -1,72 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/scheduling/nodes-scheduler-pod-topology-spread-constraints - -[id="nodes-scheduler-pod-topology-spread-constraints-examples_{context}"] -= Example pod topology spread constraints - -The following examples demonstrate pod topology spread constraint configurations. - -[id="nodes-scheduler-pod-topology-spread-constraints-example-single_{context}"] -== Single pod topology spread constraint example - -// TODO: Add a diagram? - -This example `Pod` spec defines one pod topology spread constraint. It matches on pods labeled `foo:bar`, distributes among zones, specifies a skew of `1`, and does not schedule the pod if it does not meet these requirements. - -[source,yaml] ----- -kind: Pod -apiVersion: v1 -metadata: - name: my-pod - labels: - foo: bar -spec: - topologySpreadConstraints: - - maxSkew: 1 - topologyKey: topology.kubernetes.io/zone - whenUnsatisfiable: DoNotSchedule - labelSelector: - matchLabels: - foo: bar - containers: - - image: "docker.io/ocpqe/hello-pod" - name: hello-pod ----- - -[id="nodes-scheduler-pod-topology-spread-constraints-example-multiple_{context}"] -== Multiple pod topology spread constraints example - -// TODO: Add a diagram? - -This example `Pod` spec defines two pod topology spread constraints. Both match on pods labeled `foo:bar`, specify a skew of `1`, and do not schedule the pod if it does not meet these requirements. - -The first constraint distributes pods based on a user-defined label `node`, and the second constraint distributes pods based on a user-defined label `rack`. Both constraints must be met for the pod to be scheduled. - -[source,yaml] ----- -kind: Pod -apiVersion: v1 -metadata: - name: my-pod-2 - labels: - foo: bar -spec: - topologySpreadConstraints: - - maxSkew: 1 - topologyKey: node - whenUnsatisfiable: DoNotSchedule - labelSelector: - matchLabels: - foo: bar - - maxSkew: 1 - topologyKey: rack - whenUnsatisfiable: DoNotSchedule - labelSelector: - matchLabels: - foo: bar - containers: - - image: "docker.io/ocpqe/hello-pod" - name: hello-pod ----- diff --git a/modules/nodes-scheduler-profiles-about.adoc b/modules/nodes-scheduler-profiles-about.adoc deleted file mode 100644 index 4bdf6e840b25..000000000000 --- a/modules/nodes-scheduler-profiles-about.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/scheduling/nodes-scheduler-profiles.adoc - -:_content-type: CONCEPT -[id="nodes-scheduler-profiles-about_{context}"] -= About scheduler profiles - -You can specify a scheduler profile to control how pods are scheduled onto nodes. - -The following scheduler profiles are available: - -`LowNodeUtilization`:: This profile attempts to spread pods evenly across nodes to get low resource usage per node. This profile provides the default scheduler behavior. - -`HighNodeUtilization`:: This profile attempts to place as many pods as possible on to as few nodes as possible. This minimizes node count and has high resource usage per node. - -`NoScoring`:: This is a low-latency profile that strives for the quickest scheduling cycle by disabling all score plugins. This might sacrifice better scheduling decisions for faster ones. diff --git a/modules/nodes-scheduler-profiles-configuring.adoc b/modules/nodes-scheduler-profiles-configuring.adoc deleted file mode 100644 index d0bdba1379fd..000000000000 --- a/modules/nodes-scheduler-profiles-configuring.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/scheduling/nodes-scheduler-profiles.adoc - -:_content-type: PROCEDURE -[id="nodes-scheduler-profiles-configuring_{context}"] -= Configuring a scheduler profile - -You can configure the scheduler to use a scheduler profile. - -.Prerequisites - -* Access to the cluster as a user with the `cluster-admin` role. - -.Procedure - -. Edit the `Scheduler` object: -+ -[source,terminal] ----- -$ oc edit scheduler cluster ----- - -. Specify the profile to use in the `spec.profile` field: -+ -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: Scheduler -metadata: - ... - name: cluster - resourceVersion: "601" - selfLink: /apis/config.openshift.io/v1/schedulers/cluster - uid: b351d6d0-d06f-4a99-a26b-87af62e79f59 -spec: - mastersSchedulable: false - profile: HighNodeUtilization <1> ----- -<1> Set to `LowNodeUtilization`, `HighNodeUtilization`, or `NoScoring`. - -. Save the file to apply the changes. diff --git a/modules/nodes-scheduler-taints-tolerations-about.adoc b/modules/nodes-scheduler-taints-tolerations-about.adoc deleted file mode 100644 index b5d290f130e2..000000000000 --- a/modules/nodes-scheduler-taints-tolerations-about.adoc +++ /dev/null @@ -1,290 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/scheduling/nodes-scheduler-taints-tolerations.adoc -// * post_installation_configuration/node-tasks.adoc - - -:_content-type: CONCEPT -[id="nodes-scheduler-taints-tolerations-about_{context}"] -= Understanding taints and tolerations - -A _taint_ allows a node to refuse a pod to be scheduled unless that pod has a matching _toleration_. - -You apply taints to a node through the `Node` specification (`NodeSpec`) and apply tolerations to a pod through the `Pod` specification (`PodSpec`). When you apply a taint a node, the scheduler cannot place a pod on that node unless the pod can tolerate the taint. - -.Example taint in a node specification -[source,yaml] ----- -spec: - taints: - - effect: NoExecute - key: key1 - value: value1 -.... ----- - -.Example toleration in a `Pod` spec -[source,yaml] ----- -spec: - tolerations: - - key: "key1" - operator: "Equal" - value: "value1" - effect: "NoExecute" - tolerationSeconds: 3600 -.... ----- - - -Taints and tolerations consist of a key, value, and effect. - -[id="taint-components-table_{context}"] -.Taint and toleration components -[cols="3a,8a",options="header"] -|=== - -|Parameter |Description - -|`key` -|The `key` is any string, up to 253 characters. The key must begin with a letter or number, and may contain letters, numbers, hyphens, dots, and underscores. - -|`value` -| The `value` is any string, up to 63 characters. The value must begin with a letter or number, and may contain letters, numbers, hyphens, dots, and underscores. - -|`effect` - -|The effect is one of the following: -[frame=none] -[cols="2a,3a"] -!==== -!`NoSchedule` ^[1]^ -!* New pods that do not match the taint are not scheduled onto that node. -* Existing pods on the node remain. -!`PreferNoSchedule` -!* New pods that do not match the taint might be scheduled onto that node, but the scheduler tries not to. -* Existing pods on the node remain. -!`NoExecute` -!* New pods that do not match the taint cannot be scheduled onto that node. -* Existing pods on the node that do not have a matching toleration are removed. -!==== - -|`operator` -|[frame=none] -[cols="2,3"] -!==== -!`Equal` -!The `key`/`value`/`effect` parameters must match. This is the default. -!`Exists` -!The `key`/`effect` parameters must match. You must leave a blank `value` parameter, which matches any. -!==== - -|=== -[.small] --- -1. If you add a `NoSchedule` taint to a control plane node, the node must have the `node-role.kubernetes.io/master=:NoSchedule` taint, which is added by default. -+ -For example: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Node -metadata: - annotations: - machine.openshift.io/machine: openshift-machine-api/ci-ln-62s7gtb-f76d1-v8jxv-master-0 - machineconfiguration.openshift.io/currentConfig: rendered-master-cdc1ab7da414629332cc4c3926e6e59c -... -spec: - taints: - - effect: NoSchedule - key: node-role.kubernetes.io/master -... ----- --- - -A toleration matches a taint: - -* If the `operator` parameter is set to `Equal`: -** the `key` parameters are the same; -** the `value` parameters are the same; -** the `effect` parameters are the same. - -* If the `operator` parameter is set to `Exists`: -** the `key` parameters are the same; -** the `effect` parameters are the same. - -The following taints are built into {product-title}: - -* `node.kubernetes.io/not-ready`: The node is not ready. This corresponds to the node condition `Ready=False`. -* `node.kubernetes.io/unreachable`: The node is unreachable from the node controller. This corresponds to the node condition `Ready=Unknown`. -* `node.kubernetes.io/memory-pressure`: The node has memory pressure issues. This corresponds to the node condition `MemoryPressure=True`. -* `node.kubernetes.io/disk-pressure`: The node has disk pressure issues. This corresponds to the node condition `DiskPressure=True`. -* `node.kubernetes.io/network-unavailable`: The node network is unavailable. -* `node.kubernetes.io/unschedulable`: The node is unschedulable. -* `node.cloudprovider.kubernetes.io/uninitialized`: When the node controller is started with an external cloud provider, this taint is set on a node to mark it as unusable. After a controller from the cloud-controller-manager initializes this node, the kubelet removes this taint. -* `node.kubernetes.io/pid-pressure`: The node has pid pressure. This corresponds to the node condition `PIDPressure=True`. -+ -[IMPORTANT] -==== -{product-title} does not set a default pid.available `evictionHard`. -==== - - -[id="nodes-scheduler-taints-tolerations-about-seconds_{context}"] -== Understanding how to use toleration seconds to delay pod evictions - -You can specify how long a pod can remain bound to a node before being evicted by specifying the `tolerationSeconds` parameter in the `Pod` specification or `MachineSet` object. If a taint with the `NoExecute` effect is added to a node, a pod that does tolerate the taint, which has the `tolerationSeconds` parameter, the pod is not evicted until that time period expires. - -.Example output -[source,yaml] ----- -spec: - tolerations: - - key: "key1" - operator: "Equal" - value: "value1" - effect: "NoExecute" - tolerationSeconds: 3600 ----- - -Here, if this pod is running but does not have a matching toleration, the pod stays bound to the node for 3,600 seconds and then be evicted. If the taint is removed before that time, the pod is not evicted. - -[id="nodes-scheduler-taints-tolerations-about-multiple_{context}"] -== Understanding how to use multiple taints - -You can put multiple taints on the same node and multiple tolerations on the same pod. {product-title} processes multiple taints and tolerations as follows: - -. Process the taints for which the pod has a matching toleration. -. The remaining unmatched taints have the indicated effects on the pod: -+ -* If there is at least one unmatched taint with effect `NoSchedule`, {product-title} cannot schedule a pod onto that node. -* If there is no unmatched taint with effect `NoSchedule` but there is at least one unmatched taint with effect `PreferNoSchedule`, {product-title} tries to not schedule the pod onto the node. -* If there is at least one unmatched taint with effect `NoExecute`, {product-title} evicts the pod from the node if it is already running on the node, or the pod is not scheduled onto the node if it is not yet running on the node. -+ -** Pods that do not tolerate the taint are evicted immediately. -+ -** Pods that tolerate the taint without specifying `tolerationSeconds` in their `Pod` specification remain bound forever. -+ -** Pods that tolerate the taint with a specified `tolerationSeconds` remain bound for the specified amount of time. - -For example: - -* Add the following taints to the node: -+ -[source,terminal] ----- -$ oc adm taint nodes node1 key1=value1:NoSchedule ----- -+ -[source,terminal] ----- -$ oc adm taint nodes node1 key1=value1:NoExecute ----- -+ -[source,terminal] ----- -$ oc adm taint nodes node1 key2=value2:NoSchedule ----- - -* The pod has the following tolerations: -+ -[source,yaml] ----- -spec: - tolerations: - - key: "key1" - operator: "Equal" - value: "value1" - effect: "NoSchedule" - - key: "key1" - operator: "Equal" - value: "value1" - effect: "NoExecute" ----- - -In this case, the pod cannot be scheduled onto the node, because there is no toleration matching the third taint. The pod continues running if it is already running on the node when the taint is added, because the third taint is the only -one of the three that is not tolerated by the pod. - -[id="nodes-scheduler-taints-tolerations-about-taintNodesByCondition_{context}"] -== Understanding pod scheduling and node conditions (taint node by condition) - -The Taint Nodes By Condition feature, which is enabled by default, automatically taints nodes that report conditions such as memory pressure and disk pressure. If a node reports a condition, a taint is added until the condition clears. The taints have the `NoSchedule` effect, which means no pod can be scheduled on the node unless the pod has a matching toleration. - -The scheduler checks for these taints on nodes before scheduling pods. If the taint is present, the pod is scheduled on a different node. Because the scheduler checks for taints and not the actual node conditions, you configure the scheduler to ignore some of these node conditions by adding appropriate pod tolerations. - -To ensure backward compatibility, the daemon set controller automatically adds the following tolerations to all daemons: - -* node.kubernetes.io/memory-pressure -* node.kubernetes.io/disk-pressure -* node.kubernetes.io/unschedulable (1.10 or later) -* node.kubernetes.io/network-unavailable (host network only) - -You can also add arbitrary tolerations to daemon sets. - -[NOTE] -==== -The control plane also adds the `node.kubernetes.io/memory-pressure` toleration on pods that have a QoS class. This is because Kubernetes manages pods in the `Guaranteed` or `Burstable` QoS classes. The new `BestEffort` pods do not get scheduled onto the affected node. -==== - -[id="nodes-scheduler-taints-tolerations-about-taintBasedEvictions_{context}"] -== Understanding evicting pods by condition (taint-based evictions) - -The Taint-Based Evictions feature, which is enabled by default, evicts pods from a node that experiences specific conditions, such as `not-ready` and `unreachable`. -When a node experiences one of these conditions, {product-title} automatically adds taints to the node, and starts evicting and rescheduling the pods on different nodes. - -Taint Based Evictions have a `NoExecute` effect, where any pod that does not tolerate the taint is evicted immediately and any pod that does tolerate the taint will never be evicted, unless the pod uses the `tolerationSeconds` parameter. - -The `tolerationSeconds` parameter allows you to specify how long a pod stays bound to a node that has a node condition. If the condition still exists after the `tolerationSeconds` period, the taint remains on the node and the pods with a matching toleration are evicted. If the condition clears before the `tolerationSeconds` period, pods with matching tolerations are not removed. - -If you use the `tolerationSeconds` parameter with no value, pods are never evicted because of the not ready and unreachable node conditions. - -[NOTE] -==== -{product-title} evicts pods in a rate-limited way to prevent massive pod evictions in scenarios such as the master becoming partitioned from the nodes. - -By default, if more than 55% of nodes in a given zone are unhealthy, the node lifecycle controller changes that zone's state to `PartialDisruption` and the rate of pod evictions is reduced. For small clusters (by default, 50 nodes or less) in this state, nodes in this zone are not tainted and evictions are stopped. - -For more information, see link:https://kubernetes.io/docs/concepts/architecture/nodes/#rate-limits-on-eviction[Rate limits on eviction] in the Kubernetes documentation. -==== - -{product-title} automatically adds a toleration for `node.kubernetes.io/not-ready` and `node.kubernetes.io/unreachable` with `tolerationSeconds=300`, unless the `Pod` configuration specifies either toleration. - -[source,yaml] ----- -spec: - tolerations: - - key: node.kubernetes.io/not-ready - operator: Exists - effect: NoExecute - tolerationSeconds: 300 <1> - - key: node.kubernetes.io/unreachable - operator: Exists - effect: NoExecute - tolerationSeconds: 300 ----- - -<1> These tolerations ensure that the default pod behavior is to remain bound for five minutes after one of these node conditions problems is detected. - -You can configure these tolerations as needed. For example, if you have an application with a lot of local state, you might want to keep the pods bound to node for a longer time in the event of network partition, allowing for the partition to recover and avoiding pod eviction. - -Pods spawned by a daemon set are created with `NoExecute` tolerations for the following taints with no `tolerationSeconds`: - -* `node.kubernetes.io/unreachable` -* `node.kubernetes.io/not-ready` - -As a result, daemon set pods are never evicted because of these node conditions. - -[id="nodes-scheduler-taints-tolerations-all_{context}"] -== Tolerating all taints - -You can configure a pod to tolerate all taints by adding an `operator: "Exists"` toleration with no `key` and `value` parameters. -Pods with this toleration are not removed from a node that has taints. - -.`Pod` spec for tolerating all taints -[source,yaml] ----- -spec: - tolerations: - - operator: "Exists" ----- diff --git a/modules/nodes-scheduler-taints-tolerations-adding-machineset.adoc b/modules/nodes-scheduler-taints-tolerations-adding-machineset.adoc deleted file mode 100644 index 73c88558d9fa..000000000000 --- a/modules/nodes-scheduler-taints-tolerations-adding-machineset.adoc +++ /dev/null @@ -1,110 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/scheduling/nodes-scheduler-taints-tolerations.adoc -// * post_installation_configuration/node-tasks.adoc - -:_content-type: PROCEDURE -[id="nodes-scheduler-taints-tolerations-adding-machineset_{context}"] -= Adding taints and tolerations using a compute machine set - -You can add taints to nodes using a compute machine set. All nodes associated with the `MachineSet` object are updated with the taint. Tolerations respond to taints added by a compute machine set in the same manner as taints added directly to the nodes. - -.Procedure - -. Add a toleration to a pod by editing the `Pod` spec to include a `tolerations` stanza: -+ -.Sample pod configuration file with `Equal` operator -[source,yaml] ----- -spec: - tolerations: - - key: "key1" <1> - value: "value1" - operator: "Equal" - effect: "NoExecute" - tolerationSeconds: 3600 <2> ----- -<1> The toleration parameters, as described in the *Taint and toleration components* table. -<2> The `tolerationSeconds` parameter specifies how long a pod is bound to a node before being evicted. -+ -For example: -+ -.Sample pod configuration file with `Exists` operator -[source,yaml] ----- -spec: - tolerations: - - key: "key1" - operator: "Exists" - effect: "NoExecute" - tolerationSeconds: 3600 ----- - -. Add the taint to the `MachineSet` object: - -.. Edit the `MachineSet` YAML for the nodes you want to taint or you can create a new `MachineSet` object: -+ -[source,terminal] ----- -$ oc edit machineset <machineset> ----- - -.. Add the taint to the `spec.template.spec` section: -+ -.Example taint in a compute machine set specification -[source,yaml] ----- -spec: -.... - template: -.... - spec: - taints: - - effect: NoExecute - key: key1 - value: value1 -.... ----- -+ -This example places a taint that has the key `key1`, value `value1`, and taint effect `NoExecute` on the nodes. - -.. Scale down the compute machine set to 0: -+ -[source,terminal] ----- -$ oc scale --replicas=0 machineset <machineset> -n openshift-machine-api ----- -+ -[TIP] -==== -You can alternatively apply the following YAML to scale the compute machine set: - -[source,yaml] ----- -apiVersion: machine.openshift.io/v1beta1 -kind: MachineSet -metadata: - name: <machineset> - namespace: openshift-machine-api -spec: - replicas: 0 ----- -==== -+ -Wait for the machines to be removed. - -.. Scale up the compute machine set as needed: -+ -[source,terminal] ----- -$ oc scale --replicas=2 machineset <machineset> -n openshift-machine-api ----- -+ -Or: -+ -[source,terminal] ----- -$ oc edit machineset <machineset> -n openshift-machine-api ----- -+ -Wait for the machines to start. The taint is added to the nodes associated with the `MachineSet` object. diff --git a/modules/nodes-scheduler-taints-tolerations-adding.adoc b/modules/nodes-scheduler-taints-tolerations-adding.adoc deleted file mode 100644 index 43f3c24f3f40..000000000000 --- a/modules/nodes-scheduler-taints-tolerations-adding.adoc +++ /dev/null @@ -1,86 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/scheduling/nodes-scheduler-taints-tolerations.adoc -// * post_installation_configuration/node-tasks.adoc - -:_content-type: PROCEDURE -[id="nodes-scheduler-taints-tolerations-adding_{context}"] -= Adding taints and tolerations - -You add tolerations to pods and taints to nodes to allow the node to control which pods should or should not be scheduled on them. For existing pods and nodes, you should add the toleration to the pod first, then add the taint to the node to avoid pods being removed from the node before you can add the toleration. - -.Procedure - -. Add a toleration to a pod by editing the `Pod` spec to include a `tolerations` stanza: -+ -.Sample pod configuration file with an Equal operator -[source,yaml] ----- -spec: - tolerations: - - key: "key1" <1> - value: "value1" - operator: "Equal" - effect: "NoExecute" - tolerationSeconds: 3600 <2> ----- -<1> The toleration parameters, as described in the *Taint and toleration components* table. -<2> The `tolerationSeconds` parameter specifies how long a pod can remain bound to a node before being evicted. -+ -For example: -+ -.Sample pod configuration file with an Exists operator -[source,yaml] ----- -spec: - tolerations: - - key: "key1" - operator: "Exists" <1> - effect: "NoExecute" - tolerationSeconds: 3600 ----- -<1> The `Exists` operator does not take a `value`. -+ -This example places a taint on `node1` that has key `key1`, value `value1`, and taint effect `NoExecute`. - -. Add a taint to a node by using the following command with the parameters described in the *Taint and toleration components* table: -+ -[source,terminal] ----- -$ oc adm taint nodes <node_name> <key>=<value>:<effect> ----- -+ -For example: -+ -[source,terminal] ----- -$ oc adm taint nodes node1 key1=value1:NoExecute ----- -+ -This command places a taint on `node1` that has key `key1`, value `value1`, and effect `NoExecute`. -+ -[NOTE] -==== -If you add a `NoSchedule` taint to a control plane node, the node must have the `node-role.kubernetes.io/master=:NoSchedule` taint, which is added by default. - -For example: - -[source,yaml] ----- -apiVersion: v1 -kind: Node -metadata: - annotations: - machine.openshift.io/machine: openshift-machine-api/ci-ln-62s7gtb-f76d1-v8jxv-master-0 - machineconfiguration.openshift.io/currentConfig: rendered-master-cdc1ab7da414629332cc4c3926e6e59c -... -spec: - taints: - - effect: NoSchedule - key: node-role.kubernetes.io/master -... ----- -==== -+ -The tolerations on the pod match the taint on the node. A pod with either toleration can be scheduled onto `node1`. - diff --git a/modules/nodes-scheduler-taints-tolerations-binding.adoc b/modules/nodes-scheduler-taints-tolerations-binding.adoc deleted file mode 100644 index b7fdfef3e8b8..000000000000 --- a/modules/nodes-scheduler-taints-tolerations-binding.adoc +++ /dev/null @@ -1,47 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/scheduling/nodes-scheduler-taints-tolerations.adoc -// * post_installation_configuration/node-tasks.adoc - -:_content-type: PROCEDURE -[id="nodes-scheduler-taints-tolerations-bindings_{context}"] -= Binding a user to a node using taints and tolerations - -If you want to dedicate a set of nodes for exclusive use by a particular set of users, add a toleration to their pods. Then, add a corresponding taint to those nodes. The pods with the tolerations are allowed to use the tainted nodes or any other nodes in the cluster. - -If you want ensure the pods are scheduled to only those tainted nodes, also add a label to the same set of nodes and add a node affinity to the pods so that the pods can only be scheduled onto nodes with that label. - -.Procedure - -To configure a node so that users can use only that node: - -. Add a corresponding taint to those nodes: -+ -For example: -+ -[source,terminal] ----- -$ oc adm taint nodes node1 dedicated=groupName:NoSchedule ----- -+ -[TIP] -==== -You can alternatively apply the following YAML to add the taint: - -[source,yaml] ----- -kind: Node -apiVersion: v1 -metadata: - name: <node_name> - labels: - ... -spec: - taints: - - key: dedicated - value: groupName - effect: NoSchedule ----- -==== - -. Add a toleration to the pods by writing a custom admission controller. diff --git a/modules/nodes-scheduler-taints-tolerations-projects.adoc b/modules/nodes-scheduler-taints-tolerations-projects.adoc deleted file mode 100644 index a10398e03048..000000000000 --- a/modules/nodes-scheduler-taints-tolerations-projects.adoc +++ /dev/null @@ -1,46 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/scheduling/nodes-scheduler-taints-tolerations.adoc -// * post_installation_configuration/node-tasks.adoc - -:_content-type: PROCEDURE -[id="nodes-scheduler-taints-tolerations-projects_{context}"] -= Creating a project with a node selector and toleration - -You can create a project that uses a node selector and toleration, which are set as annotations, to control the placement of pods onto specific nodes. Any subsequent resources created in the project are then scheduled on nodes that have a taint matching the toleration. - -.Prerequisites - -* A label for node selection has been added to one or more nodes by using a compute machine set or editing the node directly. -* A taint has been added to one or more nodes by using a compute machine set or editing the node directly. - -.Procedure - -. Create a `Project` resource definition, specifying a node selector and toleration in the `metadata.annotations` section: -+ -.Example `project.yaml` file -[source,yaml] ----- -kind: Project -apiVersion: project.openshift.io/v1 -metadata: - name: <project_name> <1> - annotations: - openshift.io/node-selector: '<label>' <2> - scheduler.alpha.kubernetes.io/defaultTolerations: >- - [{"operator": "Exists", "effect": "NoSchedule", "key": - "<key_name>"} <3> - ] ----- -<1> The project name. -<2> The default node selector label. -<3> The toleration parameters, as described in the *Taint and toleration components* table. This example uses the `NoSchedule` effect, which allows existing pods on the node to remain, and the `Exists` operator, which does not take a value. - -. Use the `oc apply` command to create the project: -+ -[source,terminal] ----- -$ oc apply -f project.yaml ----- - -Any subsequent resources created in the `<project_name>` namespace should now be scheduled on the specified nodes. diff --git a/modules/nodes-scheduler-taints-tolerations-removing.adoc b/modules/nodes-scheduler-taints-tolerations-removing.adoc deleted file mode 100644 index 9abb6239580d..000000000000 --- a/modules/nodes-scheduler-taints-tolerations-removing.adoc +++ /dev/null @@ -1,46 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/scheduling/nodes-scheduler-taints-tolerations.adoc -// * post_installation_configuration/node-tasks.adoc - -:_content-type: PROCEDURE -[id="nodes-scheduler-taints-tolerations-removing_{context}"] -= Removing taints and tolerations - -You can remove taints from nodes and tolerations from pods as needed. You should add the toleration to the pod first, then add the taint to the node to avoid pods being removed from the node before you can add the toleration. - -.Procedure - -To remove taints and tolerations: - -. To remove a taint from a node: -+ -[source,terminal] ----- -$ oc adm taint nodes <node-name> <key>- ----- -+ -For example: -+ -[source,terminal] ----- -$ oc adm taint nodes ip-10-0-132-248.ec2.internal key1- ----- -+ -.Example output -[source,terminal] ----- -node/ip-10-0-132-248.ec2.internal untainted ----- - -. To remove a toleration from a pod, edit the `Pod` spec to remove the toleration: -+ -[source,yaml] ----- -spec: - tolerations: - - key: "key2" - operator: "Exists" - effect: "NoExecute" - tolerationSeconds: 3600 ----- diff --git a/modules/nodes-scheduler-taints-tolerations-special.adoc b/modules/nodes-scheduler-taints-tolerations-special.adoc deleted file mode 100644 index 50e3ed0d2c2b..000000000000 --- a/modules/nodes-scheduler-taints-tolerations-special.adoc +++ /dev/null @@ -1,65 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/scheduling/nodes-scheduler-taints-tolerations.adoc -// * post_installation_configuration/node-tasks.adoc - -:_content-type: PROCEDURE -[id="nodes-scheduler-taints-tolerations-special_{context}"] -= Controlling nodes with special hardware using taints and tolerations - -In a cluster where a small subset of nodes have specialized hardware, you can use taints and tolerations to keep pods that do not need the specialized hardware off of those nodes, leaving the nodes for pods that do need the specialized hardware. You can also require pods that need specialized hardware to use specific nodes. - -You can achieve this by adding a toleration to pods that need the special hardware and tainting the nodes that have the specialized hardware. - -.Procedure - -To ensure nodes with specialized hardware are reserved for specific pods: - -. Add a toleration to pods that need the special hardware. -+ -For example: -+ -[source,yaml] ----- -spec: - tolerations: - - key: "disktype" - value: "ssd" - operator: "Equal" - effect: "NoSchedule" - tolerationSeconds: 3600 ----- - -. Taint the nodes that have the specialized hardware using one of the following commands: -+ -[source,terminal] ----- -$ oc adm taint nodes <node-name> disktype=ssd:NoSchedule ----- -+ -Or: -+ -[source,terminal] ----- -$ oc adm taint nodes <node-name> disktype=ssd:PreferNoSchedule ----- -+ -[TIP] -==== -You can alternatively apply the following YAML to add the taint: - -[source,yaml] ----- -kind: Node -apiVersion: v1 -metadata: - name: <node_name> - labels: - ... -spec: - taints: - - key: disktype - value: ssd - effect: PreferNoSchedule ----- -==== diff --git a/modules/nodes-secondary-scheduler-about.adoc b/modules/nodes-secondary-scheduler-about.adoc deleted file mode 100644 index 7ba5e5df588f..000000000000 --- a/modules/nodes-secondary-scheduler-about.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/scheduling/secondary_scheduler/index.adoc - -:_content-type: CONCEPT -[id="nodes-secondary-scheduler-about_{context}"] -= About the {secondary-scheduler-operator} - -The {secondary-scheduler-operator-full} provides a way to deploy a custom secondary scheduler in {product-title}. The secondary scheduler runs alongside the default scheduler to schedule pods. Pod configurations can specify which scheduler to use. - -The custom scheduler must have the `/bin/kube-scheduler` binary and be based on the link:https://kubernetes.io/docs/concepts/scheduling-eviction/scheduling-framework/[Kubernetes scheduling framework]. - -[IMPORTANT] -==== -You can use the {secondary-scheduler-operator} to deploy a custom secondary scheduler in {product-title}, but Red Hat does not directly support the functionality of the custom secondary scheduler. -==== - -The {secondary-scheduler-operator} creates the default roles and role bindings required by the secondary scheduler. You can specify which scheduling plugins to enable or disable by configuring the `KubeSchedulerConfiguration` resource for the secondary scheduler. diff --git a/modules/nodes-secondary-scheduler-configuring-console.adoc b/modules/nodes-secondary-scheduler-configuring-console.adoc deleted file mode 100644 index bf62a7975660..000000000000 --- a/modules/nodes-secondary-scheduler-configuring-console.adoc +++ /dev/null @@ -1,67 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/scheduling/secondary_scheduler/nodes-secondary-scheduler-configuring.adoc - -:_content-type: PROCEDURE -[id="nodes-secondary-scheduler-configuring-console_{context}"] -= Deploying a secondary scheduler - -After you have installed the {secondary-scheduler-operator}, you can deploy a secondary scheduler. - -.Prerequisities - -* You have access to the cluster with `cluster-admin` privileges. -* You have access to the {product-title} web console. -* The {secondary-scheduler-operator-full} is installed. - -.Procedure - -. Log in to the {product-title} web console. -. Create config map to hold the configuration for the secondary scheduler. -.. Navigate to *Workloads* -> *ConfigMaps*. -.. Click *Create ConfigMap*. -.. In the YAML editor, enter the config map definition that contains the necessary `KubeSchedulerConfiguration` configuration. For example: -+ -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: "secondary-scheduler-config" <1> - namespace: "openshift-secondary-scheduler-operator" <2> -data: - "config.yaml": | - apiVersion: kubescheduler.config.k8s.io/v1beta3 - kind: KubeSchedulerConfiguration <3> - leaderElection: - leaderElect: false - profiles: - - schedulerName: secondary-scheduler <4> - plugins: <5> - score: - disabled: - - name: NodeResourcesBalancedAllocation - - name: NodeResourcesLeastAllocated ----- -<1> The name of the config map. This is used in the *Scheduler Config* field when creating the `SecondaryScheduler` CR. -<2> The config map must be created in the `openshift-secondary-scheduler-operator` namespace. -<3> The `KubeSchedulerConfiguration` resource for the secondary scheduler. For more information, see link:https://kubernetes.io/docs/reference/config-api/kube-scheduler-config.v1beta3/#kubescheduler-config-k8s-io-v1beta3-KubeSchedulerConfiguration[`KubeSchedulerConfiguration`] in the Kubernetes API documentation. -<4> The name of the secondary scheduler. Pods that set their `spec.schedulerName` field to this value are scheduled with this secondary scheduler. -<5> The plugins to enable or disable for the secondary scheduler. For a list default scheduling plugins, see link:https://kubernetes.io/docs/reference/scheduling/config/#scheduling-plugins[Scheduling plugins] in the Kubernetes documentation. - -.. Click *Create*. - -. Create the `SecondaryScheduler` CR: -.. Navigate to *Operators* -> *Installed Operators*. -.. Select *{secondary-scheduler-operator-full}*. -.. Select the *Secondary Scheduler* tab and click *Create SecondaryScheduler*. -.. The *Name* field defaults to `cluster`; do not change this name. -.. The *Scheduler Config* field defaults to `secondary-scheduler-config`. Ensure that this value matches the name of the config map created earlier in this procedure. -.. In the *Scheduler Image* field, enter the image name for your custom scheduler. -+ -[IMPORTANT] -==== -Red Hat does not directly support the functionality of your custom secondary scheduler. -==== - -.. Click *Create*. diff --git a/modules/nodes-secondary-scheduler-install-console.adoc b/modules/nodes-secondary-scheduler-install-console.adoc deleted file mode 100644 index 9ad09a354e1d..000000000000 --- a/modules/nodes-secondary-scheduler-install-console.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/scheduling/secondary_scheduler/nodes-secondary-scheduler-configuring.adoc - -:_content-type: PROCEDURE -[id="nodes-secondary-scheduler-install-console_{context}"] -= Installing the {secondary-scheduler-operator} - -You can use the web console to install the {secondary-scheduler-operator-full}. - -.Prerequisites - -* You have access to the cluster with `cluster-admin` privileges. -* You have access to the {product-title} web console. - -.Procedure - -. Log in to the {product-title} web console. - -. Create the required namespace for the {secondary-scheduler-operator-full}. -.. Navigate to *Administration* -> *Namespaces* and click *Create Namespace*. -.. Enter `openshift-secondary-scheduler-operator` in the *Name* field and click *Create*. -+ -// There are no metrics to collect for the secondary scheduler operator as of now, so no need to add the metrics label - -. Install the {secondary-scheduler-operator-full}. -.. Navigate to *Operators* -> *OperatorHub*. -.. Enter *{secondary-scheduler-operator-full}* into the filter box. -.. Select the *{secondary-scheduler-operator-full}* and click *Install*. -.. On the *Install Operator* page: -... The *Update channel* is set to *stable*, which installs the latest stable release of the {secondary-scheduler-operator-full}. -... Select *A specific namespace on the cluster* and select *openshift-secondary-scheduler-operator* from the drop-down menu. -... Select an *Update approval* strategy. -+ -* The *Automatic* strategy allows Operator Lifecycle Manager (OLM) to automatically update the Operator when a new version is available. -* The *Manual* strategy requires a user with appropriate credentials to approve the Operator update. -... Click *Install*. - -.Verification - -. Navigate to *Operators* -> *Installed Operators*. -. Verify that *{secondary-scheduler-operator-full}* is listed with a *Status* of *Succeeded*. diff --git a/modules/nodes-secondary-scheduler-pod-console.adoc b/modules/nodes-secondary-scheduler-pod-console.adoc deleted file mode 100644 index 5cee14ee1ffb..000000000000 --- a/modules/nodes-secondary-scheduler-pod-console.adoc +++ /dev/null @@ -1,84 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/scheduling/secondary_scheduler/nodes-secondary-scheduler-configuring.adoc - -:_content-type: PROCEDURE -[id="nodes-secondary-scheduler-pod-console_{context}"] -= Scheduling a pod using the secondary scheduler - -To schedule a pod using the secondary scheduler, set the `schedulerName` field in the pod definition. - -.Prerequisities - -* You have access to the cluster with `cluster-admin` privileges. -* You have access to the {product-title} web console. -* The {secondary-scheduler-operator-full} is installed. -* A secondary scheduler is configured. - -.Procedure - -. Log in to the {product-title} web console. -. Navigate to *Workloads* -> *Pods*. -. Click *Create Pod*. -. In the YAML editor, enter the desired pod configuration and add the `schedulerName` field: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: nginx - namespace: default -spec: - containers: - - name: nginx - image: nginx:1.14.2 - ports: - - containerPort: 80 - schedulerName: secondary-scheduler <1> ----- -<1> The `schedulerName` field must match the name that is defined in the config map when you configured the secondary scheduler. - -. Click *Create*. - -.Verification - -. Log in to the OpenShift CLI. -. Describe the pod using the following command: -+ -[source,terminal] ----- -$ oc describe pod nginx -n default ----- -+ -.Example output -[source,text] ----- -Name: nginx -Namespace: default -Priority: 0 -Node: ci-ln-t0w4r1k-72292-xkqs4-worker-b-xqkxp/10.0.128.3 -... -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal Scheduled 12s secondary-scheduler Successfully assigned default/nginx to ci-ln-t0w4r1k-72292-xkqs4-worker-b-xqkxp -... ----- - -. In the events table, find the event with a message similar to `Successfully assigned <namespace>/<pod_name> to <node_name>`. -. In the "From" column, verify that the event was generated from the secondary scheduler and not the default scheduler. -+ -[NOTE] -==== -You can also check the `secondary-scheduler-*` pod logs in the `openshift-secondary-scheduler-namespace` to verify that the pod was scheduled by the secondary scheduler. -==== - -//// -Due to a UI bug, can't verify via console. Bug should be fixed in 4.11 hopefully, and if so, update to use the console steps: - -.Verification -. Navigate to the *Events* tab for the pod. -. Find the event with a message similar to `Successfully assigned <namespace>/<pod_name> to <node_name>`. -. Verify that the event was generated from the secondary scheduler and not the default scheduler. -//// diff --git a/modules/nodes-secondary-scheduler-remove-resources-console.adoc b/modules/nodes-secondary-scheduler-remove-resources-console.adoc deleted file mode 100644 index 96266181d9ac..000000000000 --- a/modules/nodes-secondary-scheduler-remove-resources-console.adoc +++ /dev/null @@ -1,28 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/scheduling/secondary_scheduler/nodes-secondary-scheduler-uninstalling.adoc - -:_content-type: PROCEDURE -[id="nodes-secondary-scheduler-remove-resources-console_{context}"] -= Removing {secondary-scheduler-operator} resources - -Optionally, after uninstalling the {secondary-scheduler-operator-full}, you can remove its related resources from your cluster. - -.Prerequisites - -* You have access to the cluster with `cluster-admin` privileges. -* You have access to the {product-title} web console. - -.Procedure - -. Log in to the {product-title} web console. - -. Remove CRDs that were installed by the {secondary-scheduler-operator}: -.. Navigate to *Administration* -> *CustomResourceDefinitions*. -.. Enter `SecondaryScheduler` in the *Name* field to filter the CRDs. -.. Click the Options menu {kebab} next to the *SecondaryScheduler* CRD and select *Delete Custom Resource Definition*: - -. Remove the `openshift-secondary-scheduler-operator` namespace. -.. Navigate to *Administration* -> *Namespaces*. -.. Click the Options menu {kebab} next to the *openshift-secondary-scheduler-operator* and select *Delete Namespace*. -.. In the confirmation dialog, enter `openshift-secondary-scheduler-operator` in the field and click *Delete*. diff --git a/modules/nodes-secondary-scheduler-uninstall-console.adoc b/modules/nodes-secondary-scheduler-uninstall-console.adoc deleted file mode 100644 index e5138ea2f7f4..000000000000 --- a/modules/nodes-secondary-scheduler-uninstall-console.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// -// * nodes/scheduling/secondary_scheduler/nodes-secondary-scheduler-uninstalling.adoc - -:_content-type: PROCEDURE -[id="nodes-secondary-scheduler-uninstall-console_{context}"] -= Uninstalling the {secondary-scheduler-operator} - -You can uninstall the {secondary-scheduler-operator-full} by using the web console. - -.Prerequisites - -* You have access to the cluster with `cluster-admin` privileges. -* You have access to the {product-title} web console. -* The {secondary-scheduler-operator-full} is installed. - -.Procedure - -. Log in to the {product-title} web console. -. Uninstall the {secondary-scheduler-operator-full} Operator. -.. Navigate to *Operators* -> *Installed Operators*. -.. Click the Options menu {kebab} next to the *{secondary-scheduler-operator}* entry and click *Uninstall Operator*. -.. In the confirmation dialog, click *Uninstall*. diff --git a/modules/nutanix-entitlements.adoc b/modules/nutanix-entitlements.adoc deleted file mode 100644 index 309b7517b252..000000000000 --- a/modules/nutanix-entitlements.adoc +++ /dev/null @@ -1,9 +0,0 @@ -// Module included in the following assemblies: -// * installing/installing_nutanix/installing-nutanix-installer-provisioned.adoc - -:_content-type: CONCEPT -[id="nutanix-entitlements_{context}"] -= Internet access for Prism Central - -Prism Central requires internet access to obtain the {op-system-first} image that is required to install the cluster. The {op-system} image for Nutanix is available at `rhcos.mirror.openshift.com`. - diff --git a/modules/nvidia-gpu-admin-dashboard-installing.adoc b/modules/nvidia-gpu-admin-dashboard-installing.adoc deleted file mode 100644 index f613931960a0..000000000000 --- a/modules/nvidia-gpu-admin-dashboard-installing.adoc +++ /dev/null @@ -1,136 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/nvidia-gpu-admin-dashboard.adoc - -:_content-type: PROCEDURE -[id="nvidia-gpu-admin-dashboard-installing_{context}"] -= Installing the NVIDIA GPU administration dashboard - -Install the NVIDIA GPU plugin by using Helm on the OpenShift Container Platform (OCP) Console to add GPU capabilities. - -The OpenShift Console NVIDIA GPU plugin works as a remote bundle for the OCP console. To run the OpenShift Console NVIDIA GPU plugin -an instance of the OCP console must be running. - - -.Prerequisites - -* Red Hat OpenShift 4.11+ -* NVIDIA GPU operator -* link:https://helm.sh/docs/intro/install/[Helm] - - -.Procedure - -Use the following procedure to install the OpenShift Console NVIDIA GPU plugin. - -. Add the Helm repository: -+ -[source,terminal] ----- -$ helm repo add rh-ecosystem-edge https://rh-ecosystem-edge.github.io/console-plugin-nvidia-gpu ----- -+ -[source,terminal] ----- -$ helm repo update ----- - -. Install the Helm chart in the default NVIDIA GPU operator namespace: -+ -[source,terminal] ----- -$ helm install -n nvidia-gpu-operator console-plugin-nvidia-gpu rh-ecosystem-edge/console-plugin-nvidia-gpu ----- -+ -.Example output -+ -[source,terminal] ----- -NAME: console-plugin-nvidia-gpu -LAST DEPLOYED: Tue Aug 23 15:37:35 2022 -NAMESPACE: nvidia-gpu-operator -STATUS: deployed -REVISION: 1 -NOTES: -View the Console Plugin NVIDIA GPU deployed resources by running the following command: - -$ oc -n {{ .Release.Namespace }} get all -l app.kubernetes.io/name=console-plugin-nvidia-gpu - -Enable the plugin by running the following command: - -# Check if a plugins field is specified -$ oc get consoles.operator.openshift.io cluster --output=jsonpath="{.spec.plugins}" - -# if not, then run the following command to enable the plugin -$ oc patch consoles.operator.openshift.io cluster --patch '{ "spec": { "plugins": ["console-plugin-nvidia-gpu"] } }' --type=merge - -# if yes, then run the following command to enable the plugin -$ oc patch consoles.operator.openshift.io cluster --patch '[{"op": "add", "path": "/spec/plugins/-", "value": "console-plugin-nvidia-gpu" }]' --type=json - -# add the required DCGM Exporter metrics ConfigMap to the existing NVIDIA operator ClusterPolicy CR: -oc patch clusterpolicies.nvidia.com gpu-cluster-policy --patch '{ "spec": { "dcgmExporter": { "config": { "name": "console-plugin-nvidia-gpu" } } } }' --type=merge - ----- -+ -The dashboard relies mostly on Prometheus metrics exposed by the NVIDIA DCGM Exporter, but the default exposed metrics are not enough for the dashboard to render the required gauges. Therefore, the DGCM exporter is configured to expose a custom set of metrics, as shown here. -+ -[source,yaml] ----- -apiVersion: v1 -data: - dcgm-metrics.csv: | - DCGM_FI_PROF_GR_ENGINE_ACTIVE, gauge, gpu utilization. - DCGM_FI_DEV_MEM_COPY_UTIL, gauge, mem utilization. - DCGM_FI_DEV_ENC_UTIL, gauge, enc utilization. - DCGM_FI_DEV_DEC_UTIL, gauge, dec utilization. - DCGM_FI_DEV_POWER_USAGE, gauge, power usage. - DCGM_FI_DEV_POWER_MGMT_LIMIT_MAX, gauge, power mgmt limit. - DCGM_FI_DEV_GPU_TEMP, gauge, gpu temp. - DCGM_FI_DEV_SM_CLOCK, gauge, sm clock. - DCGM_FI_DEV_MAX_SM_CLOCK, gauge, max sm clock. - DCGM_FI_DEV_MEM_CLOCK, gauge, mem clock. - DCGM_FI_DEV_MAX_MEM_CLOCK, gauge, max mem clock. -kind: ConfigMap -metadata: - annotations: - meta.helm.sh/release-name: console-plugin-nvidia-gpu - meta.helm.sh/release-namespace: nvidia-gpu-operator - creationTimestamp: "2022-10-26T19:46:41Z" - labels: - app.kubernetes.io/component: console-plugin-nvidia-gpu - app.kubernetes.io/instance: console-plugin-nvidia-gpu - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/name: console-plugin-nvidia-gpu - app.kubernetes.io/part-of: console-plugin-nvidia-gpu - app.kubernetes.io/version: latest - helm.sh/chart: console-plugin-nvidia-gpu-0.2.3 - name: console-plugin-nvidia-gpu - namespace: nvidia-gpu-operator - resourceVersion: "19096623" - uid: 96cdf700-dd27-437b-897d-5cbb1c255068 ----- -+ -Install the ConfigMap and edit the NVIDIA Operator ClusterPolicy CR to add that ConfigMap in the DCGM exporter configuration. The installation of the ConfigMap is done by the new version of the Console Plugin NVIDIA GPU Helm Chart, but the ClusterPolicy CR editing is done by the user. - -. View the deployed resources: -+ -[source,terminal] ----- -$ oc -n nvidia-gpu-operator get all -l app.kubernetes.io/name=console-plugin-nvidia-gpu ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -pod/console-plugin-nvidia-gpu-7dc9cfb5df-ztksx 1/1 Running 0 2m6s - -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -service/console-plugin-nvidia-gpu ClusterIP 172.30.240.138 <none> 9443/TCP 2m6s - -NAME READY UP-TO-DATE AVAILABLE AGE -deployment.apps/console-plugin-nvidia-gpu 1/1 1 1 2m6s - -NAME DESIRED CURRENT READY AGE -replicaset.apps/console-plugin-nvidia-gpu-7dc9cfb5df 1 1 1 2m6s ----- diff --git a/modules/nvidia-gpu-admin-dashboard-introduction.adoc b/modules/nvidia-gpu-admin-dashboard-introduction.adoc deleted file mode 100644 index 8c3cb76a9ff3..000000000000 --- a/modules/nvidia-gpu-admin-dashboard-introduction.adoc +++ /dev/null @@ -1,14 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/nvidia-gpu-admin-dashboard.adoc - -:_content-type: CONCEPT -[id="nvidia-gpu-admin-dashboard-introduction_{context}"] -= Introduction - -The OpenShift Console NVIDIA GPU plugin is a dedicated administration dashboard for NVIDIA GPU usage visualization -in the OpenShift Container Platform (OCP) Console. The visualizations in the administration dashboard provide guidance on how to -best optimize GPU resources in clusters, such as when a GPU is under- or over-utilized. - -The OpenShift Console NVIDIA GPU plugin works as a remote bundle for the OCP console. -To run the plugin the OCP console must be running. diff --git a/modules/nvidia-gpu-admin-dashboard-using.adoc b/modules/nvidia-gpu-admin-dashboard-using.adoc deleted file mode 100644 index 24179938c6e2..000000000000 --- a/modules/nvidia-gpu-admin-dashboard-using.adoc +++ /dev/null @@ -1,59 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/nvidia-gpu-admin-dashboard.adoc - -:_content-type: PROCEDURE -[id="nvidia-gpu-admin-dashboard-using_{context}"] -= Using the NVIDIA GPU administration dashboard - -After deploying the OpenShift Console NVIDIA GPU plugin, log in to the OpenShift Container Platform web console using your login credentials to access the *Administrator* perspective. - -To view the changes, you need to refresh the console to see the **GPUs** tab under **Compute**. - - -== Viewing the cluster GPU overview - -You can view the status of your cluster GPUs in the Overview page by selecting -Overview in the Home section. - -The Overview page provides information about the cluster GPUs, including: - -* Details about the GPU providers -* Status of the GPUs -* Cluster utilization of the GPUs - -== Viewing the GPUs dashboard - -You can view the NVIDIA GPU administration dashboard by selecting GPUs -in the Compute section of the OpenShift Console. - - -Charts on the GPUs dashboard include: - -* *GPU utilization*: Shows the ratio of time the graphics engine is active and is based on the ``DCGM_FI_PROF_GR_ENGINE_ACTIVE`` metric. - -* *Memory utilization*: Shows the memory being used by the GPU and is based on the ``DCGM_FI_DEV_MEM_COPY_UTIL`` metric. - -* *Encoder utilization*: Shows the video encoder rate of utilization and is based on the ``DCGM_FI_DEV_ENC_UTIL`` metric. - -* *Decoder utilization*: *Encoder utilization*: Shows the video decoder rate of utilization and is based on the ``DCGM_FI_DEV_DEC_UTIL`` metric. - -* *Power consumption*: Shows the average power usage of the GPU in Watts and is based on the ``DCGM_FI_DEV_POWER_USAGE`` metric. - -* *GPU temperature*: Shows the current GPU temperature and is based on the ``DCGM_FI_DEV_GPU_TEMP`` metric. The maximum is set to ``110``, which is an empirical number, as the actual number is not exposed via a metric. - -* *GPU clock speed*: Shows the average clock speed utilized by the GPU and is based on the ``DCGM_FI_DEV_SM_CLOCK`` metric. - -* *Memory clock speed*: Shows the average clock speed utilized by memory and is based on the ``DCGM_FI_DEV_MEM_CLOCK`` metric. - -== Viewing the GPU Metrics - -You can view the metrics for the GPUs by selecting the metric at the bottom of -each GPU to view the Metrics page. - -On the Metrics page, you can: - -* Specify a refresh rate for the metrics -* Add, run, disable, and delete queries -* Insert Metrics -* Reset the zoom view diff --git a/modules/nvidia-gpu-aws-adding-a-gpu-node.adoc b/modules/nvidia-gpu-aws-adding-a-gpu-node.adoc deleted file mode 100644 index 03dc70d96b73..000000000000 --- a/modules/nvidia-gpu-aws-adding-a-gpu-node.adoc +++ /dev/null @@ -1,199 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/creating-machinesets/creating-machineset-aws.adoc - -:_content-type: PROCEDURE -[id="nvidia-gpu-aws-adding-a-gpu-node_{context}"] -= Adding a GPU node to an existing {product-title} cluster - -You can copy and modify a default compute machine set configuration to create a GPU-enabled machine set and machines for the AWS EC2 cloud provider. - -The following table lists the validated instance types: - -[cols="1,1,1,1"] -|=== -|Instance type |NVIDIA GPU accelerator |Maximum number of GPUs |Architecture - -|`p4d.24xlarge` -|A100 -|8 -|x86 - -|`g4dn.xlarge` -|T4 -|1 -|x86 -|=== - -.Procedure - -. View the existing nodes, machines, and machine sets by running the following command. Note that each node is an instance of a machine definition with a specific AWS region and {product-title} role. -+ -[source,terminal] ----- -$ oc get nodes ----- -+ -.Example output -+ -[source,terminal] ----- -NAME STATUS ROLES AGE VERSION -ip-10-0-52-50.us-east-2.compute.internal Ready worker 3d17h v1.26.0 -ip-10-0-58-24.us-east-2.compute.internal Ready control-plane,master 3d17h v1.26.0 -ip-10-0-68-148.us-east-2.compute.internal Ready worker 3d17h v1.26.0 -ip-10-0-68-68.us-east-2.compute.internal Ready control-plane,master 3d17h v1.26.0 -ip-10-0-72-170.us-east-2.compute.internal Ready control-plane,master 3d17h v1.26.0 -ip-10-0-74-50.us-east-2.compute.internal Ready worker 3d17h v1.26.0 ----- - -. View the machines and machine sets that exist in the `openshift-machine-api` namespace by running the following command. Each compute machine set is associated with a different availability zone within the AWS region. The installer automatically load balances compute machines across availability zones. -+ -[source,terminal] ----- -$ oc get machinesets -n openshift-machine-api ----- -+ -.Example output -+ -[source,terminal] ----- -NAME DESIRED CURRENT READY AVAILABLE AGE -preserve-dsoc12r4-ktjfc-worker-us-east-2a 1 1 1 1 3d11h -preserve-dsoc12r4-ktjfc-worker-us-east-2b 2 2 2 2 3d11h ----- - -. View the machines that exist in the `openshift-machine-api` namespace by running the following command. At this time, there is only one compute machine per machine set, though a compute machine set could be scaled to add a node in a particular region and zone. -+ -[source,terminal] ----- -$ oc get machines -n openshift-machine-api | grep worker ----- -+ -.Example output -+ -[source,terminal] ----- -preserve-dsoc12r4-ktjfc-worker-us-east-2a-dts8r Running m5.xlarge us-east-2 us-east-2a 3d11h -preserve-dsoc12r4-ktjfc-worker-us-east-2b-dkv7w Running m5.xlarge us-east-2 us-east-2b 3d11h -preserve-dsoc12r4-ktjfc-worker-us-east-2b-k58cw Running m5.xlarge us-east-2 us-east-2b 3d11h ----- - -. Make a copy of one of the existing compute `MachineSet` definitions and output the result to a JSON file by running the following command. This will be the basis for the GPU-enabled compute machine set definition. -+ -[source,terminal] ----- -$ oc get machineset preserve-dsoc12r4-ktjfc-worker-us-east-2a -n openshift-machine-api -o json > <output_file.json> ----- - -. Edit the JSON file and make the following changes to the new `MachineSet` definition: -+ -* Replace `worker` with `gpu`. This will be the name of the new machine set. -* Change the instance type of the new `MachineSet` definition to `g4dn`, which includes an NVIDIA Tesla T4 GPU. -To learn more about AWS `g4dn` instance types, see link:https://aws.amazon.com/ec2/instance-types/#Accelerated_Computing[Accelerated Computing]. -+ -[source,terminal] ----- -$ jq .spec.template.spec.providerSpec.value.instanceType preserve-dsoc12r4-ktjfc-worker-gpu-us-east-2a.json - -"g4dn.xlarge" ----- -+ -The `<output_file.json>` file is saved as `preserve-dsoc12r4-ktjfc-worker-gpu-us-east-2a.json`. - - . Update the following fields in `preserve-dsoc12r4-ktjfc-worker-gpu-us-east-2a.json`: -+ -* `.metadata.name` to a name containing `gpu`. - -* `.spec.selector.matchLabels["machine.openshift.io/cluster-api-machineset"]` to -match the new `.metadata.name`. - -* `.spec.template.metadata.labels["machine.openshift.io/cluster-api-machineset"]` -to match the new `.metadata.name`. - -* `.spec.template.spec.providerSpec.value.instanceType` to `g4dn.xlarge`. - -. To verify your changes, perform a `diff` of the original compute definition and the new GPU-enabled node definition by running the following command: -+ -[source,terminal] ----- -$ oc -n openshift-machine-api get preserve-dsoc12r4-ktjfc-worker-us-east-2a -o json | diff preserve-dsoc12r4-ktjfc-worker-gpu-us-east-2a.json - ----- -+ -.Example output -+ -[source,terminal] ----- -10c10 - -< "name": "preserve-dsoc12r4-ktjfc-worker-gpu-us-east-2a", ---- -> "name": "preserve-dsoc12r4-ktjfc-worker-us-east-2a", - -21c21 - -< "machine.openshift.io/cluster-api-machineset": "preserve-dsoc12r4-ktjfc-worker-gpu-us-east-2a" ---- -> "machine.openshift.io/cluster-api-machineset": "preserve-dsoc12r4-ktjfc-worker-us-east-2a" - -31c31 - -< "machine.openshift.io/cluster-api-machineset": "preserve-dsoc12r4-ktjfc-worker-gpu-us-east-2a" ---- -> "machine.openshift.io/cluster-api-machineset": "preserve-dsoc12r4-ktjfc-worker-us-east-2a" - -60c60 - -< "instanceType": "g4dn.xlarge", ---- -> "instanceType": "m5.xlarge", ----- - -. Create the GPU-enabled compute machine set from the definition by running the following command: -+ -[source,terminal] ----- -$ oc create -f preserve-dsoc12r4-ktjfc-worker-gpu-us-east-2a.json ----- -+ -.Example output -+ -[source,terminal] ----- -machineset.machine.openshift.io/preserve-dsoc12r4-ktjfc-worker-gpu-us-east-2a created ----- - -.Verification - -. View the machine set you created by running the following command: -+ -[source,terminal] ----- -$ oc -n openshift-machine-api get machinesets | grep gpu ----- -+ -The MachineSet replica count is set to `1` so a new `Machine` object is created automatically. - -+ -.Example output -+ -[source,terminal] ----- -preserve-dsoc12r4-ktjfc-worker-gpu-us-east-2a 1 1 1 1 4m21s ----- - -. View the `Machine` object that the machine set created by running the following command: -+ -[source,terminal] ----- -$ oc -n openshift-machine-api get machines | grep gpu ----- -+ -.Example output -+ -[source,terminal] ----- -preserve-dsoc12r4-ktjfc-worker-gpu-us-east-2a running g4dn.xlarge us-east-2 us-east-2a 4m36s ----- - -Note that there is no need to specify a namespace for the node. The node definition is cluster scoped. diff --git a/modules/nvidia-gpu-aws-deploying-the-node-feature-discovery-operator.adoc b/modules/nvidia-gpu-aws-deploying-the-node-feature-discovery-operator.adoc deleted file mode 100644 index c9d6b00936c1..000000000000 --- a/modules/nvidia-gpu-aws-deploying-the-node-feature-discovery-operator.adoc +++ /dev/null @@ -1,79 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/creating_machinesets/creating-machineset-aws.adoc -// * machine_management/creating_machinesets/creating-machineset-gcp.adoc -// * machine_management/creating_machinesets/creating-machineset-azure.adoc - -:_content-type: PROCEDURE -[id="nvidia-gpu-aws-deploying-the-node-feature-discovery-operator_{context}"] -= Deploying the Node Feature Discovery Operator - -After the GPU-enabled node is created, you need to discover the GPU-enabled node so it can be scheduled. To do this, install the Node Feature Discovery (NFD) Operator. The NFD Operator identifies hardware device features in nodes. It solves the general problem of identifying and cataloging hardware resources in the infrastructure nodes so they can be made available to {product-title}. - -.Procedure - -. Install the Node Feature Discovery Operator from *OperatorHub* in the {product-title} console. - -. After installing the NFD Operator into *OperatorHub*, select *Node Feature Discovery* from the installed Operators list and select *Create instance*. This installs the `nfd-master` and `nfd-worker` pods, one `nfd-worker` pod for each compute node, in the `openshift-nfd` namespace. - -. Verify that the Operator is installed and running by running the following command: -+ -[source,terminal] ----- -$ oc get pods -n openshift-nfd ----- -+ -.Example output -+ -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE - -nfd-controller-manager-8646fcbb65-x5qgk 2/2 Running 7 (8h ago) 1d ----- - -. Browse to the installed Oerator in the console and select *Create Node Feature Discovery*. - -. Select *Create* to build a NFD custom resource. This creates NFD pods in the `openshift-nfd` namespace that poll the {product-title} nodes for hardware resources and catalogue them. - -.Verification - -. After a successful build, verify that a NFD pod is running on each nodes by running the following command: -+ -[source,terminal] ----- -$ oc get pods -n openshift-nfd ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -nfd-controller-manager-8646fcbb65-x5qgk 2/2 Running 7 (8h ago) 12d -nfd-master-769656c4cb-w9vrv 1/1 Running 0 12d -nfd-worker-qjxb2 1/1 Running 3 (3d14h ago) 12d -nfd-worker-xtz9b 1/1 Running 5 (3d14h ago) 12d ----- -+ -The NFD Operator uses vendor PCI IDs to identify hardware in a node. NVIDIA uses the PCI ID `10de`. - -. View the NVIDIA GPU discovered by the NFD Operator by running the following command: -+ -[source,terminal] ----- -$ oc describe node ip-10-0-132-138.us-east-2.compute.internal | egrep 'Roles|pci' ----- -+ -.Example output -[source,terminal] ----- -Roles: worker - -feature.node.kubernetes.io/pci-1013.present=true - -feature.node.kubernetes.io/pci-10de.present=true - -feature.node.kubernetes.io/pci-1d0f.present=true ----- -+ -`10de` appears in the node feature list for the GPU-enabled node. This mean the NFD Operator correctly identified the node from the GPU-enabled MachineSet. diff --git a/modules/nvidia-gpu-azure-adding-a-gpu-node.adoc b/modules/nvidia-gpu-azure-adding-a-gpu-node.adoc deleted file mode 100644 index 31d71d583eb9..000000000000 --- a/modules/nvidia-gpu-azure-adding-a-gpu-node.adoc +++ /dev/null @@ -1,435 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/creating-machinesets/creating-machineset-azure.adoc - -:_content-type: PROCEDURE -[id="nvidia-gpu-aws-adding-a-gpu-node_{context}"] -= Adding a GPU node to an existing {product-title} cluster - -You can copy and modify a default compute machine set configuration to create a GPU-enabled machine set and machines for the Azure cloud provider. - -The following table lists the validated instance types: - -[cols="1,1,1,1"] -|=== -|vmSize |NVIDIA GPU accelerator |Maximum number of GPUs |Architecture - -|`Standard_NC24s_v3` -|V100 -|4 -|x86 - -|`Standard_NC4as_T4_v3` -|T4 -|1 -|x86 - -|`ND A100 v4` -|A100 -|8 -|x86 -|=== - -[NOTE] -==== -By default, Azure subscriptions do not have a quota for the Azure instance types with GPU. Customers have to request a quota increase for the Azure instance families listed above. -==== - -.Procedure - -. View the machines and machine sets that exist in the `openshift-machine-api` namespace -by running the following command. Each compute machine set is associated with a different availability zone within the Azure region. -The installer automatically load balances compute machines across availability zones. -+ -[source,terminal] ----- -$ oc get machineset -n openshift-machine-api ----- -+ -.Example output -+ -[source,terminal] ----- -NAME DESIRED CURRENT READY AVAILABLE AGE -myclustername-worker-centralus1 1 1 1 1 6h9m -myclustername-worker-centralus2 1 1 1 1 6h9m -myclustername-worker-centralus3 1 1 1 1 6h9m ----- - -. Make a copy of one of the existing compute `MachineSet` definitions and output the result to a YAML file by running the following command. -This will be the basis for the GPU-enabled compute machine set definition. -+ -[source,terminal] ----- -$ oc get machineset -n openshift-machine-api myclustername-worker-centralus1 -o yaml > machineset-azure.yaml ----- - -. View the content of the machineset: -+ -[source,terminal] ----- -$ cat machineset-azure.yaml ----- -+ -.Example `machineset-azure.yaml` file -+ -[source,yaml] ----- -apiVersion: machine.openshift.io/v1beta1 -kind: MachineSet -metadata: - annotations: - machine.openshift.io/GPU: "0" - machine.openshift.io/memoryMb: "16384" - machine.openshift.io/vCPU: "4" - creationTimestamp: "2023-02-06T14:08:19Z" - generation: 1 - labels: - machine.openshift.io/cluster-api-cluster: myclustername - machine.openshift.io/cluster-api-machine-role: worker - machine.openshift.io/cluster-api-machine-type: worker - name: myclustername-worker-centralus1 - namespace: openshift-machine-api - resourceVersion: "23601" - uid: acd56e0c-7612-473a-ae37-8704f34b80de -spec: - replicas: 1 - selector: - matchLabels: - machine.openshift.io/cluster-api-cluster: myclustername - machine.openshift.io/cluster-api-machineset: myclustername-worker-centralus1 - template: - metadata: - labels: - machine.openshift.io/cluster-api-cluster: myclustername - machine.openshift.io/cluster-api-machine-role: worker - machine.openshift.io/cluster-api-machine-type: worker - machine.openshift.io/cluster-api-machineset: myclustername-worker-centralus1 - spec: - lifecycleHooks: {} - metadata: {} - providerSpec: - value: - acceleratedNetworking: true - apiVersion: machine.openshift.io/v1beta1 - credentialsSecret: - name: azure-cloud-credentials - namespace: openshift-machine-api - diagnostics: {} - image: - offer: "" - publisher: "" - resourceID: /resourceGroups/myclustername-rg/providers/Microsoft.Compute/galleries/gallery_myclustername_n6n4r/images/myclustername-gen2/versions/latest - sku: "" - version: "" - kind: AzureMachineProviderSpec - location: centralus - managedIdentity: myclustername-identity - metadata: - creationTimestamp: null - networkResourceGroup: myclustername-rg - osDisk: - diskSettings: {} - diskSizeGB: 128 - managedDisk: - storageAccountType: Premium_LRS - osType: Linux - publicIP: false - publicLoadBalancer: myclustername - resourceGroup: myclustername-rg - spotVMOptions: {} - subnet: myclustername-worker-subnet - userDataSecret: - name: worker-user-data - vmSize: Standard_D4s_v3 - vnet: myclustername-vnet - zone: "1" -status: - availableReplicas: 1 - fullyLabeledReplicas: 1 - observedGeneration: 1 - readyReplicas: 1 - replicas: 1 ----- - -. Make a copy of the `machineset-azure.yaml` file by running the following command: -+ -[source,terminal] ----- -$ cp machineset-azure.yaml machineset-azure-gpu.yaml ----- - -. Update the following fields in `machineset-azure-gpu.yaml`: -+ -* Change `.metadata.name` to a name containing `gpu`. - -* Change `.spec.selector.matchLabels["machine.openshift.io/cluster-api-machineset"]` to match the new .metadata.name. - -* Change `.spec.template.metadata.labels["machine.openshift.io/cluster-api-machineset"]` to match the new `.metadata.name`. - -* Change `.spec.template.spec.providerSpec.value.vmSize` to `Standard_NC4as_T4_v3`. -+ -.Example `machineset-azure-gpu.yaml` file -+ -[source,yaml] ----- -apiVersion: machine.openshift.io/v1beta1 -kind: MachineSet -metadata: - annotations: - machine.openshift.io/GPU: "1" - machine.openshift.io/memoryMb: "28672" - machine.openshift.io/vCPU: "4" - creationTimestamp: "2023-02-06T20:27:12Z" - generation: 1 - labels: - machine.openshift.io/cluster-api-cluster: myclustername - machine.openshift.io/cluster-api-machine-role: worker - machine.openshift.io/cluster-api-machine-type: worker - name: myclustername-nc4ast4-gpu-worker-centralus1 - namespace: openshift-machine-api - resourceVersion: "166285" - uid: 4eedce7f-6a57-4abe-b529-031140f02ffa -spec: - replicas: 1 - selector: - matchLabels: - machine.openshift.io/cluster-api-cluster: myclustername - machine.openshift.io/cluster-api-machineset: myclustername-nc4ast4-gpu-worker-centralus1 - template: - metadata: - labels: - machine.openshift.io/cluster-api-cluster: myclustername - machine.openshift.io/cluster-api-machine-role: worker - machine.openshift.io/cluster-api-machine-type: worker - machine.openshift.io/cluster-api-machineset: myclustername-nc4ast4-gpu-worker-centralus1 - spec: - lifecycleHooks: {} - metadata: {} - providerSpec: - value: - acceleratedNetworking: true - apiVersion: machine.openshift.io/v1beta1 - credentialsSecret: - name: azure-cloud-credentials - namespace: openshift-machine-api - diagnostics: {} - image: - offer: "" - publisher: "" - resourceID: /resourceGroups/myclustername-rg/providers/Microsoft.Compute/galleries/gallery_myclustername_n6n4r/images/myclustername-gen2/versions/latest - sku: "" - version: "" - kind: AzureMachineProviderSpec - location: centralus - managedIdentity: myclustername-identity - metadata: - creationTimestamp: null - networkResourceGroup: myclustername-rg - osDisk: - diskSettings: {} - diskSizeGB: 128 - managedDisk: - storageAccountType: Premium_LRS - osType: Linux - publicIP: false - publicLoadBalancer: myclustername - resourceGroup: myclustername-rg - spotVMOptions: {} - subnet: myclustername-worker-subnet - userDataSecret: - name: worker-user-data - vmSize: Standard_NC4as_T4_v3 - vnet: myclustername-vnet - zone: "1" -status: - availableReplicas: 1 - fullyLabeledReplicas: 1 - observedGeneration: 1 - readyReplicas: 1 - replicas: 1 ----- - -. To verify your changes, perform a `diff` of the original compute definition and the new GPU-enabled node definition by running the following command: -+ -[source,terminal] ----- -$ diff machineset-azure.yaml machineset-azure-gpu.yaml ----- -+ -.Example output -[source,terminal] ----- -14c14 -< name: myclustername-worker-centralus1 ---- -> name: myclustername-nc4ast4-gpu-worker-centralus1 -23c23 -< machine.openshift.io/cluster-api-machineset: myclustername-worker-centralus1 ---- -> machine.openshift.io/cluster-api-machineset: myclustername-nc4ast4-gpu-worker-centralus1 -30c30 -< machine.openshift.io/cluster-api-machineset: myclustername-worker-centralus1 ---- -> machine.openshift.io/cluster-api-machineset: myclustername-nc4ast4-gpu-worker-centralus1 -67c67 -< vmSize: Standard_D4s_v3 ---- -> vmSize: Standard_NC4as_T4_v3 ----- - -. Create the GPU-enabled compute machine set from the definition file by running the following command: -+ -[source,terminal] ----- -$ oc create -f machineset-azure-gpu.yaml ----- -+ -.Example output -+ -[source,terminal] ----- -machineset.machine.openshift.io/myclustername-nc4ast4-gpu-worker-centralus1 created ----- - -. View the machines and machine sets that exist in the `openshift-machine-api` namespace -by running the following command. Each compute machine set is associated with a -different availability zone within the Azure region. -The installer automatically load balances compute machines across availability zones. -+ -[source,terminal] ----- -$ oc get machineset -n openshift-machine-api ----- -+ -.Example output -+ -[source,terminal] ----- -NAME DESIRED CURRENT READY AVAILABLE AGE -clustername-n6n4r-nc4ast4-gpu-worker-centralus1 1 1 1 1 122m -clustername-n6n4r-worker-centralus1 1 1 1 1 8h -clustername-n6n4r-worker-centralus2 1 1 1 1 8h -clustername-n6n4r-worker-centralus3 1 1 1 1 8h ----- - -. View the machines that exist in the `openshift-machine-api` namespace by running the following command. You can only configure one compute machine per set, although you can scale a compute machine set to add a node in a particular region and zone. -+ -[source,terminal] ----- -$ oc get machines -n openshift-machine-api ----- -+ -.Example output -+ -[source,terminal] ----- -NAME PHASE TYPE REGION ZONE AGE -myclustername-master-0 Running Standard_D8s_v3 centralus 2 6h40m -myclustername-master-1 Running Standard_D8s_v3 centralus 1 6h40m -myclustername-master-2 Running Standard_D8s_v3 centralus 3 6h40m -myclustername-nc4ast4-gpu-worker-centralus1-w9bqn Running centralus 1 21m -myclustername-worker-centralus1-rbh6b Running Standard_D4s_v3 centralus 1 6h38m -myclustername-worker-centralus2-dbz7w Running Standard_D4s_v3 centralus 2 6h38m -myclustername-worker-centralus3-p9b8c Running Standard_D4s_v3 centralus 3 6h38m ----- - -. View the existing nodes, machines, and machine sets by running the following command. Note that each node is an instance of a machine definition with a specific Azure region and {product-title} role. -+ -[source,terminal] ----- -$ oc get nodes ----- -+ -.Example output -+ -[source,terminal] ----- -NAME STATUS ROLES AGE VERSION -myclustername-master-0 Ready control-plane,master 6h39m v1.26.0 -myclustername-master-1 Ready control-plane,master 6h41m v1.26.0 -myclustername-master-2 Ready control-plane,master 6h39m v1.26.0 -myclustername-nc4ast4-gpu-worker-centralus1-w9bqn Ready worker 14m v1.26.0 -myclustername-worker-centralus1-rbh6b Ready worker 6h29m v1.26.0 -myclustername-worker-centralus2-dbz7w Ready worker 6h29m v1.26.0 -myclustername-worker-centralus3-p9b8c Ready worker 6h31m v1.26.0 ----- - -. View the list of compute machine sets: -+ -[source,terminal] ----- -$ oc get machineset -n openshift-machine-api ----- -+ -.Example output -+ -[source,terminal] ----- -NAME DESIRED CURRENT READY AVAILABLE AGE -myclustername-worker-centralus1 1 1 1 1 8h -myclustername-worker-centralus2 1 1 1 1 8h -myclustername-worker-centralus3 1 1 1 1 8h ----- - -. Create the GPU-enabled compute machine set from the definition file by running the following command: -+ -[source,terminal] ----- -$ oc create -f machineset-azure-gpu.yaml ----- - -. View the list of compute machine sets: -+ -[source,terminal] ----- -oc get machineset -n openshift-machine-api ----- -+ -.Example output -+ -[source,terminal] ----- -NAME DESIRED CURRENT READY AVAILABLE AGE -myclustername-nc4ast4-gpu-worker-centralus1 1 1 1 1 121m -myclustername-worker-centralus1 1 1 1 1 8h -myclustername-worker-centralus2 1 1 1 1 8h -myclustername-worker-centralus3 1 1 1 1 8h ----- - -.Verification - -. View the machine set you created by running the following command: -+ -[source,terminal] ----- -$ oc get machineset -n openshift-machine-api | grep gpu ----- -+ -The MachineSet replica count is set to `1` so a new `Machine` object is created automatically. -+ -.Example output -+ -[source,terminal] ----- -myclustername-nc4ast4-gpu-worker-centralus1 1 1 1 1 121m ----- - -. View the `Machine` object that the machine set created by running the following command: -+ -[source,terminal] ----- -$ oc -n openshift-machine-api get machines | grep gpu ----- -+ -.Example output -+ -[source,terminal] ----- -myclustername-nc4ast4-gpu-worker-centralus1-w9bqn Running Standard_NC4as_T4_v3 centralus 1 21m ----- - -[NOTE] -==== -There is no need to specify a namespace for the node. The node definition is cluster scoped. -==== diff --git a/modules/nvidia-gpu-gcp-adding-a-gpu-node.adoc b/modules/nvidia-gpu-gcp-adding-a-gpu-node.adoc deleted file mode 100644 index 63b548621230..000000000000 --- a/modules/nvidia-gpu-gcp-adding-a-gpu-node.adoc +++ /dev/null @@ -1,323 +0,0 @@ -// Module included in the following assemblies: -// -// * machine_management/creating-machinesets/creating-machineset-aws.adoc - -:_content-type: PROCEDURE -[id="nvidia-gpu-gcp-adding-a-gpu-node_{context}"] -= Adding a GPU node to an existing {product-title} cluster - -You can copy and modify a default compute machine set configuration to create a GPU-enabled machine set and machines for the GCP cloud provider. - -The following table lists the validated instance types: - -[cols="1,1,1,1"] -|=== -|Instance type |NVIDIA GPU accelerator |Maximum number of GPUs |Architecture - -|`a2-highgpu-1g` -|A100 -|1 -|x86 - -|`n1-standard-4` -|T4 -|1 -|x86 -|=== - -.Procedure - -. Make a copy of an existing `MachineSet`. - -. In the new copy, change the machine set `name` in `metadata.name` and in both instances of `machine.openshift.io/cluster-api-machineset`. - -. Change the instance type to add the following two lines to the newly copied `MachineSet`: -+ ----- -machineType: a2-highgpu-1g -onHostMaintenance: Terminate ----- -+ -.Example `a2-highgpu-1g.json` file -+ -[source,json] ----- -{ - "apiVersion": "machine.openshift.io/v1beta1", - "kind": "MachineSet", - "metadata": { - "annotations": { - "machine.openshift.io/GPU": "0", - "machine.openshift.io/memoryMb": "16384", - "machine.openshift.io/vCPU": "4" - }, - "creationTimestamp": "2023-01-13T17:11:02Z", - "generation": 1, - "labels": { - "machine.openshift.io/cluster-api-cluster": "myclustername-2pt9p" - }, - "name": "myclustername-2pt9p-worker-gpu-a", - "namespace": "openshift-machine-api", - "resourceVersion": "20185", - "uid": "2daf4712-733e-4399-b4b4-d43cb1ed32bd" - }, - "spec": { - "replicas": 1, - "selector": { - "matchLabels": { - "machine.openshift.io/cluster-api-cluster": "myclustername-2pt9p", - "machine.openshift.io/cluster-api-machineset": "myclustername-2pt9p-worker-gpu-a" - } - }, - "template": { - "metadata": { - "labels": { - "machine.openshift.io/cluster-api-cluster": "myclustername-2pt9p", - "machine.openshift.io/cluster-api-machine-role": "worker", - "machine.openshift.io/cluster-api-machine-type": "worker", - "machine.openshift.io/cluster-api-machineset": "myclustername-2pt9p-worker-gpu-a" - } - }, - "spec": { - "lifecycleHooks": {}, - "metadata": {}, - "providerSpec": { - "value": { - "apiVersion": "machine.openshift.io/v1beta1", - "canIPForward": false, - "credentialsSecret": { - "name": "gcp-cloud-credentials" - }, - "deletionProtection": false, - "disks": [ - { - "autoDelete": true, - "boot": true, - "image": "projects/rhcos-cloud/global/images/rhcos-412-86-202212081411-0-gcp-x86-64", - "labels": null, - "sizeGb": 128, - "type": "pd-ssd" - } - ], - "kind": "GCPMachineProviderSpec", - "machineType": "a2-highgpu-1g", - "onHostMaintenance": "Terminate", - "metadata": { - "creationTimestamp": null - }, - "networkInterfaces": [ - { - "network": "myclustername-2pt9p-network", - "subnetwork": "myclustername-2pt9p-worker-subnet" - } - ], - "preemptible": true, - "projectID": "myteam", - "region": "us-central1", - "serviceAccounts": [ - { - "email": "myclustername-2pt9p-w@myteam.iam.gserviceaccount.com", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - } - ], - "tags": [ - "myclustername-2pt9p-worker" - ], - "userDataSecret": { - "name": "worker-user-data" - }, - "zone": "us-central1-a" - } - } - } - } - }, - "status": { - "availableReplicas": 1, - "fullyLabeledReplicas": 1, - "observedGeneration": 1, - "readyReplicas": 1, - "replicas": 1 - } -} ----- - -. View the existing nodes, machines, and machine sets by running the following command. Note that each node is an instance of a machine definition with a specific GCP region and {product-title} role. -+ -[source,terminal] ----- -$ oc get nodes ----- -+ -.Example output -+ -[source,terminal] ----- -NAME STATUS ROLES AGE VERSION -myclustername-2pt9p-master-0.c.openshift-qe.internal Ready control-plane,master 8h v1.26.0 -myclustername-2pt9p-master-1.c.openshift-qe.internal Ready control-plane,master 8h v1.26.0 -myclustername-2pt9p-master-2.c.openshift-qe.internal Ready control-plane,master 8h v1.26.0 -myclustername-2pt9p-worker-a-mxtnz.c.openshift-qe.internal Ready worker 8h v1.26.0 -myclustername-2pt9p-worker-b-9pzzn.c.openshift-qe.internal Ready worker 8h v1.26.0 -myclustername-2pt9p-worker-c-6pbg6.c.openshift-qe.internal Ready worker 8h v1.26.0 -myclustername-2pt9p-worker-gpu-a-wxcr6.c.openshift-qe.internal Ready worker 4h35m v1.26.0 ----- - -. View the machines and machine sets that exist in the `openshift-machine-api` namespace by running the following command. Each compute machine set is associated with a different availability zone within the GCP region. The installer automatically load balances compute machines across availability zones. -+ -[source,terminal] ----- -$ oc get machinesets -n openshift-machine-api ----- -+ -.Example output -+ -[source,terminal] ----- -NAME DESIRED CURRENT READY AVAILABLE AGE -myclustername-2pt9p-worker-a 1 1 1 1 8h -myclustername-2pt9p-worker-b 1 1 1 1 8h -myclustername-2pt9p-worker-c 1 1 8h -myclustername-2pt9p-worker-f 0 0 8h ----- - -. View the machines that exist in the `openshift-machine-api` namespace by running the following command. You can only configure one compute machine per set, although you can scale a compute machine set to add a node in a particular region and zone. -+ -[source,terminal] ----- -$ oc get machines -n openshift-machine-api | grep worker ----- -+ -.Example output -+ -[source,terminal] ----- -myclustername-2pt9p-worker-a-mxtnz Running n2-standard-4 us-central1 us-central1-a 8h -myclustername-2pt9p-worker-b-9pzzn Running n2-standard-4 us-central1 us-central1-b 8h -myclustername-2pt9p-worker-c-6pbg6 Running n2-standard-4 us-central1 us-central1-c 8h ----- - -. Make a copy of one of the existing compute `MachineSet` definitions and output the result to a JSON file by running the following command. This will be the basis for the GPU-enabled compute machine set definition. -+ -[source,terminal] ----- -$ oc get machineset myclustername-2pt9p-worker-a -n openshift-machine-api -o json > <output_file.json> ----- - -. Edit the JSON file to make the following changes to the new `MachineSet` definition: -+ -* Rename the machine set `name` by inserting the substring `gpu` in `metadata.name` and in both instances of `machine.openshift.io/cluster-api-machineset`. -* Change the `machineType` of the new `MachineSet` definition to `a2-highgpu-1g`, which includes an NVIDIA A100 GPU. -+ -[source,terminal] ----- -jq .spec.template.spec.providerSpec.value.machineType ocp_4.13_machineset-a2-highgpu-1g.json - -"a2-highgpu-1g" ----- -+ -The `<output_file.json>` file is saved as `ocp_4.13_machineset-a2-highgpu-1g.json`. - -. Update the following fields in `ocp_4.13_machineset-a2-highgpu-1g.json`: -+ -* Change `.metadata.name` to a name containing `gpu`. - -* Change `.spec.selector.matchLabels["machine.openshift.io/cluster-api-machineset"]` to -match the new `.metadata.name`. - -* Change `.spec.template.metadata.labels["machine.openshift.io/cluster-api-machineset"]` -to match the new `.metadata.name`. - -* Change `.spec.template.spec.providerSpec.value.MachineType` to `a2-highgpu-1g`. - -* Add the following line under `machineType`: `"onHostMaintenance": "Terminate". For example: -+ -[source,json] ----- -"machineType": "a2-highgpu-1g", -"onHostMaintenance": "Terminate", ----- - -. To verify your changes, perform a `diff` of the original compute definition and the new GPU-enabled node definition by running the following command: -+ -[source,terminal] ----- -$ oc get machineset/myclustername-2pt9p-worker-a -n openshift-machine-api -o json | diff ocp_4.13_machineset-a2-highgpu-1g.json - ----- -+ -.Example output -+ -[source,terminal] ----- -15c15 -< "name": "myclustername-2pt9p-worker-gpu-a", ---- -> "name": "myclustername-2pt9p-worker-a", -25c25 -< "machine.openshift.io/cluster-api-machineset": "myclustername-2pt9p-worker-gpu-a" ---- -> "machine.openshift.io/cluster-api-machineset": "myclustername-2pt9p-worker-a" -34c34 -< "machine.openshift.io/cluster-api-machineset": "myclustername-2pt9p-worker-gpu-a" ---- -> "machine.openshift.io/cluster-api-machineset": "myclustername-2pt9p-worker-a" -59,60c59 -< "machineType": "a2-highgpu-1g", -< "onHostMaintenance": "Terminate", ---- -> "machineType": "n2-standard-4", ----- - -. Create the GPU-enabled compute machine set from the definition file by running the following command: -+ -[source,terminal] ----- -$ oc create -f ocp_4.13_machineset-a2-highgpu-1g.json ----- -+ -.Example output -+ -[source,terminal] ----- -machineset.machine.openshift.io/myclustername-2pt9p-worker-gpu-a created ----- - -.Verification - -. View the machine set you created by running the following command: -+ -[source,terminal] ----- -$ oc -n openshift-machine-api get machinesets | grep gpu ----- -+ -The MachineSet replica count is set to `1` so a new `Machine` object is created automatically. - -+ -.Example output -+ -[source,terminal] ----- -myclustername-2pt9p-worker-gpu-a 1 1 1 1 5h24m ----- - -. View the `Machine` object that the machine set created by running the following command: -+ -[source,terminal] ----- -$ oc -n openshift-machine-api get machines | grep gpu ----- -+ -.Example output -+ -[source,terminal] ----- -myclustername-2pt9p-worker-gpu-a-wxcr6 Running a2-highgpu-1g us-central1 us-central1-a 5h25m ----- - -[NOTE] -==== -Note that there is no need to specify a namespace for the node. The node definition is cluster scoped. -==== diff --git a/modules/nw-about-multicast.adoc b/modules/nw-about-multicast.adoc deleted file mode 100644 index 1f4298e4b6df..000000000000 --- a/modules/nw-about-multicast.adoc +++ /dev/null @@ -1,52 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/openshift_sdn/enabling-multicast.adoc -// * networking/ovn_kubernetes_network_provider/enabling-multicast.adoc - -ifeval::["{context}" == "openshift-sdn-enabling-multicast"] -:openshift-sdn: -:sdn: OpenShift SDN -endif::[] -ifeval::["{context}" == "ovn-kubernetes-enabling-multicast"] -:ovn: -:sdn: OVN-Kubernetes -endif::[] - -:_content-type: CONCEPT -[id="nw-about-multicast_{context}"] -= About multicast - -With IP multicast, data is broadcast to many IP addresses simultaneously. - -[IMPORTANT] -==== -At this time, multicast is best used for low-bandwidth coordination or service -discovery and not a high-bandwidth solution. -==== - -Multicast traffic between {product-title} pods is disabled by default. If you are using the {sdn} network plugin, you can enable multicast on a per-project basis. - -ifdef::openshift-sdn[] -When using the OpenShift SDN network plugin in `networkpolicy` isolation mode: - -* Multicast packets sent by a pod will be delivered to all other pods in the project, regardless of `NetworkPolicy` objects. Pods might be able to communicate over multicast even when they cannot communicate over unicast. -* Multicast packets sent by a pod in one project will never be delivered to pods in any other project, even if there are `NetworkPolicy` objects that allow communication between the projects. - -When using the OpenShift SDN network plugin in `multitenant` isolation mode: - -* Multicast packets sent by a pod will be delivered to all other pods in the -project. -* Multicast packets sent by a pod in one project will be delivered to pods in -other projects only if each project is joined together and multicast is enabled -in each joined project. -endif::openshift-sdn[] - -ifeval::["{context}" == "openshift-sdn-enabling-multicast"] -:!openshift-sdn: -endif::[] -ifeval::["{context}" == "ovn-kubernetes-enabling-multicast"] -:!ovn: -endif::[] -ifdef::sdn[] -:!sdn: -endif::sdn[] diff --git a/modules/nw-annotating-a-route-with-a-cookie-name.adoc b/modules/nw-annotating-a-route-with-a-cookie-name.adoc deleted file mode 100644 index 8310c87042e1..000000000000 --- a/modules/nw-annotating-a-route-with-a-cookie-name.adoc +++ /dev/null @@ -1,63 +0,0 @@ -// Module filename: nw-annotating-a-route-with-a-cookie-name.adoc -// Use module with the following module: -// nw-using-cookies-keep-route-statefulness.adoc -// -// Module included in the following assemblies: -// -// * networking/configuring-routing.adoc - -:_content-type: PROCEDURE -[id="nw-annotating-a-route-with-a-cookie-name_{context}"] -= Annotating a route with a cookie - -You can set a cookie name to overwrite the default, auto-generated one for the route. This allows the application receiving route traffic to know the cookie name. By deleting the cookie it can force the next request to re-choose an endpoint. So, if a server was overloaded it tries to remove the requests from the client and redistribute them. - -.Procedure - -. Annotate the route with the specified cookie name: -+ -[source,terminal] ----- -$ oc annotate route <route_name> router.openshift.io/cookie_name="<cookie_name>" ----- -+ --- -where: - -`<route_name>`:: Specifies the name of the route. -`<cookie_name>`:: Specifies the name for the cookie. --- -+ -For example, to annotate the route `my_route` with the cookie name `my_cookie`: -+ -[source,terminal] ----- -$ oc annotate route my_route router.openshift.io/cookie_name="my_cookie" ----- - -. Capture the route hostname in a variable: -+ -[source,terminal] ----- -$ ROUTE_NAME=$(oc get route <route_name> -o jsonpath='{.spec.host}') ----- -+ --- -where: - -`<route_name>`:: Specifies the name of the route. --- - -. Save the cookie, and then access the route: -+ -[source,terminal] ----- -$ curl $ROUTE_NAME -k -c /tmp/cookie_jar ----- -+ -Use the cookie saved by the previous command when connecting to the route: -+ -[source,terminal] ----- -$ curl $ROUTE_NAME -k -b /tmp/cookie_jar ----- diff --git a/modules/nw-autoscaling-ingress-controller.adoc b/modules/nw-autoscaling-ingress-controller.adoc deleted file mode 100644 index 345619e23666..000000000000 --- a/modules/nw-autoscaling-ingress-controller.adoc +++ /dev/null @@ -1,244 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ingress-controller-configuration.adoc - -:_content-type: PROCEDURE -[id="nw-autoscaling-ingress-controller_{context}"] -= Autoscaling an Ingress Controller - -Automatically scale an Ingress Controller to dynamically meet routing performance or availability requirements such as the requirement to increase throughput. The following procedure provides an example for scaling up the default `IngressController`. - -.Prerequisites -. You have the OpenShift CLI (`oc`) installed. -. You have access to an {product-title} cluster as a user with the `cluster-admin` role. -. You have the Custom Metrics Autoscaler Operator installed. - -.Procedure -. Create a project in the `openshift-ingress-operator` namespace by running the following command: -+ -[source,terminal] ----- -$ oc project openshift-ingress-operator ----- - -. Enable OpenShift monitoring for user-defined projects by creating and applying a config map: - -.. Create a new `ConfigMap` object, `cluster-monitoring-config.yaml`: -+ -.cluster-monitoring-config.yaml -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: cluster-monitoring-config - namespace: openshift-monitoring -data: - config.yaml: | - enableUserWorkload: true <1> ----- -+ -<1> When set to `true`, the `enableUserWorkload` parameter enables monitoring for user-defined projects in a cluster. - -.. Apply the config map by running the following command: -+ -[source,terminal] ----- -$ oc apply -f cluster-monitoring-config.yaml ----- - -. Create a service account to authenticate with Thanos by running the following command: -+ -[source,terminal] ----- -$ oc create serviceaccount thanos && oc describe serviceaccount thanos ----- -+ -.Example output -[source,terminal] ----- -Name: thanos -Namespace: openshift-ingress-operator -Labels: <none> -Annotations: <none> -Image pull secrets: thanos-dockercfg-b4l9s -Mountable secrets: thanos-dockercfg-b4l9s -Tokens: thanos-token-c422q -Events: <none> ----- - -. Define a `TriggerAuthentication` object within the `openshift-ingress-operator` namespace using the service account's token. - -.. Define the variable `secret` that contains the secret by running the following command: -+ -[source,terminal] ----- -$ secret=$(oc get secret | grep thanos-token | head -n 1 | awk '{ print $1 }') ----- - -.. Create the `TriggerAuthentication` object and pass the value of the `secret` variable to the `TOKEN` parameter: -+ -[source,terminal] ----- -$ oc process TOKEN="$secret" -f - <<EOF | oc apply -f - -apiVersion: template.openshift.io/v1 -kind: Template -parameters: -- name: TOKEN -objects: -- apiVersion: keda.sh/v1alpha1 - kind: TriggerAuthentication - metadata: - name: keda-trigger-auth-prometheus - spec: - secretTargetRef: - - parameter: bearerToken - name: \${TOKEN} - key: token - - parameter: ca - name: \${TOKEN} - key: ca.crt -EOF ----- - -. Create and apply a role for reading metrics from Thanos: - -.. Create a new role, `thanos-metrics-reader.yaml`, that reads metrics from pods and nodes: -+ -.thanos-metrics-reader.yaml -[source,yaml] ----- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: thanos-metrics-reader -rules: -- apiGroups: - - "" - resources: - - pods - - nodes - verbs: - - get -- apiGroups: - - metrics.k8s.io - resources: - - pods - - nodes - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - namespaces - verbs: - - get ----- - -.. Apply the new role by running the following command: -+ -[source,terminal] ----- -$ oc apply -f thanos-metrics-reader.yaml ----- - -. Add the new role to the service account by entering the following commands: -+ -[source,terminal] ----- -$ oc adm policy add-role-to-user thanos-metrics-reader -z thanos --role=namespace=openshift-ingress-operator ----- -+ -[source,terminal] ----- -$ oc adm policy -n openshift-ingress-operator add-cluster-role-to-user cluster-monitoring-view -z thanos ----- -+ -[NOTE] -==== -The argument `add-cluster-role-to-user` is only required if you use cross-namespace queries. The following step uses a query from the `kube-metrics` namespace which requires this argument. -==== - -. Create a new `ScaledObject` YAML file, `ingress-autoscaler.yaml`, that targets the default Ingress Controller deployment: -+ -.Example `ScaledObject` definition -[source,yaml] ----- -apiVersion: keda.sh/v1alpha1 -kind: ScaledObject -metadata: - name: ingress-scaler -spec: - scaleTargetRef: <1> - apiVersion: operator.openshift.io/v1 - kind: IngressController - name: default - envSourceContainerName: ingress-operator - minReplicaCount: 1 - maxReplicaCount: 20 <2> - cooldownPeriod: 1 - pollingInterval: 1 - triggers: - - type: prometheus - metricType: AverageValue - metadata: - serverAddress: https://<example-cluster>:9091 <3> - namespace: openshift-ingress-operator <4> - metricName: 'kube-node-role' - threshold: '1' - query: 'sum(kube_node_role{role="worker",service="kube-state-metrics"})' <5> - authModes: "bearer" - authenticationRef: - name: keda-trigger-auth-prometheus ----- -<1> The custom resource that you are targeting. In this case, the Ingress Controller. -<2> Optional: The maximum number of replicas. If you omit this field, the default maximum is set to 100 replicas. -<3> The cluster address and port. -<4> The Ingress Operator namespace. -<5> This expression evaluates to however many worker nodes are present in the deployed cluster. -+ -[IMPORTANT] -==== -If you are using cross-namespace queries, you must target port 9091 and not port 9092 in the `serverAddress` field. You also must have elevated privileges to read metrics from this port. -==== - -. Apply the custom resource definition by running the following command: -+ -[source,terminal] ----- -$ oc apply -f ingress-autoscaler.yaml ----- - -.Verification -* Verify that the default Ingress Controller is scaled out to match the value returned by the `kube-state-metrics` query by running the following commands: - -** Use the `grep` command to search the Ingress Controller YAML file for replicas: -+ -[source,terminal] ----- -$ oc get ingresscontroller/default -o yaml | grep replicas: ----- -+ -.Example output -[source,terminal] ----- -replicas: 3 ----- - -** Get the pods in the `openshift-ingress` project: -+ -[source,terminal] ----- -$ oc get pods -n openshift-ingress ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -router-default-7b5df44ff-l9pmm 2/2 Running 0 17h -router-default-7b5df44ff-s5sl5 2/2 Running 0 3d22h -router-default-7b5df44ff-wwsth 2/2 Running 0 66s ----- diff --git a/modules/nw-aws-load-balancer-logs.adoc b/modules/nw-aws-load-balancer-logs.adoc deleted file mode 100644 index 6d7617456654..000000000000 --- a/modules/nw-aws-load-balancer-logs.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Module included in the following assemblies: -// * networking/aws_load_balancer_operator/understanding-aws-load-balancer-operator.adoc - -:_content-type: PROCEDURE -[id="nw-aws-load-balancer-operator-logs_{context}"] -= AWS Load Balancer Operator logs - -Use the `oc logs` command to view the AWS Load Balancer Operator logs. - -.Procedure - -* View the logs of the AWS Load Balancer Operator: -+ -[source,terminal] ----- -$ oc logs -n aws-load-balancer-operator deployment/aws-load-balancer-operator-controller-manager -c manager ----- diff --git a/modules/nw-aws-load-balancer-operator.adoc b/modules/nw-aws-load-balancer-operator.adoc deleted file mode 100644 index de98c601bbf4..000000000000 --- a/modules/nw-aws-load-balancer-operator.adoc +++ /dev/null @@ -1,60 +0,0 @@ -// Module included in the following assemblies: -// * networking/aws_load_balancer_operator/understanding-aws-load-balancer-operator.adoc - -:_content-type: PROCEDURE -[id="nw-aws-load-balancer-operator_{context}"] -= AWS Load Balancer Operator - -The AWS Load Balancer Operator can tag the public subnets if the `kubernetes.io/role/elb` tag is missing. Also, the AWS Load Balancer Operator detects the following from the underlying AWS cloud: - -* The ID of the virtual private cloud (VPC) on which the cluster hosting the Operator is deployed in. - -* Public and private subnets of the discovered VPC. - -The AWS Load Balancer Operator supports the Kubernetes service resource of type `LoadBalancer` by using Network Load Balancer (NLB) with the `instance` target type only. - -.Prerequisites - -* You must have the AWS credentials secret. The credentials are used to provide subnet tagging and VPC discovery. - -.Procedure - -. You can deploy the AWS Load Balancer Operator on demand from the OperatorHub, by creating a `Subscription` object: -+ -[source,terminal] ----- -$ oc -n aws-load-balancer-operator get sub aws-load-balancer-operator --template='{{.status.installplan.name}}{{"\n"}}' ----- -+ -.Example output -[source,terminal] ----- -install-zlfbt ----- - -. Check the status of an install plan. The status of an install plan must be `Complete`: -+ -[source,terminal] ----- -$ oc -n aws-load-balancer-operator get ip <install_plan_name> --template='{{.status.phase}}{{"\n"}}' ----- -+ -.Example output -[source,terminal] ----- -Complete ----- - -. Use the `oc get` command to view the `Deployment` status: -+ -[source,terminal] ----- -$ oc get -n aws-load-balancer-operator deployment/aws-load-balancer-operator-controller-manager ----- -+ -.Example output -[source,terminal] ----- -NAME READY UP-TO-DATE AVAILABLE AGE -aws-load-balancer-operator-controller-manager 1/1 1 1 23h ----- diff --git a/modules/nw-aws-nlb-existing-cluster.adoc b/modules/nw-aws-nlb-existing-cluster.adoc deleted file mode 100644 index 46e48635cf40..000000000000 --- a/modules/nw-aws-nlb-existing-cluster.adoc +++ /dev/null @@ -1,62 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/configuring_ingress_cluster_traffic/configuring-ingress-cluster-traffic-aws.adoc - -:_content-type: PROCEDURE -[id="nw-aws-nlb-existing-cluster_{context}"] -= Configuring an Ingress Controller Network Load Balancer on an existing AWS cluster - -You can create an Ingress Controller backed by an AWS Network Load Balancer (NLB) on an existing cluster. - -.Prerequisites - -* You must have an installed AWS cluster. -* `PlatformStatus` of the infrastructure resource must be AWS. -** To verify that the `PlatformStatus` is AWS, run: -+ -[source,terminal] ----- -$ oc get infrastructure/cluster -o jsonpath='{.status.platformStatus.type}' -AWS ----- - -.Procedure - -Create an Ingress Controller backed by an AWS NLB on an existing cluster. - -. Create the Ingress Controller manifest: -+ -[source,terminal] ----- - $ cat ingresscontroller-aws-nlb.yaml ----- -+ -.Example output -[source,yaml] ----- -apiVersion: operator.openshift.io/v1 -kind: IngressController -metadata: - name: $my_ingress_controller<1> - namespace: openshift-ingress-operator -spec: - domain: $my_unique_ingress_domain<2> - endpointPublishingStrategy: - type: LoadBalancerService - loadBalancer: - scope: External<3> - providerParameters: - type: AWS - aws: - type: NLB ----- -<1> Replace `$my_ingress_controller` with a unique name for the Ingress Controller. -<2> Replace `$my_unique_ingress_domain` with a domain name that is unique among all Ingress Controllers in the cluster. This variable must be a subdomain of the DNS name `<clustername>.<domain>`. -<3> You can replace `External` with `Internal` to use an internal NLB. - -. Create the resource in the cluster: -+ -[source,terminal] ----- -$ oc create -f ingresscontroller-aws-nlb.yaml ----- diff --git a/modules/nw-aws-nlb-new-cluster.adoc b/modules/nw-aws-nlb-new-cluster.adoc deleted file mode 100644 index ab845fbadc59..000000000000 --- a/modules/nw-aws-nlb-new-cluster.adoc +++ /dev/null @@ -1,74 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-network-customizations.adoc -// * networking/configuring_ingress_cluster_traffic/configuring-ingress-cluster-traffic-aws.adoc - -:_content-type: PROCEDURE -[id="nw-aws-nlb-new-cluster_{context}"] -= Configuring an Ingress Controller Network Load Balancer on a new AWS cluster - -You can create an Ingress Controller backed by an AWS Network Load Balancer (NLB) on a new cluster. - -.Prerequisites - -* Create the `install-config.yaml` file and complete any modifications to it. - -.Procedure - -Create an Ingress Controller backed by an AWS NLB on a new cluster. - -. Change to the directory that contains the installation program and create the manifests: -+ -[source,terminal] ----- -$ ./openshift-install create manifests --dir <installation_directory> <1> ----- -<1> For `<installation_directory>`, specify the name of the directory that -contains the `install-config.yaml` file for your cluster. - -. Create a file that is named `cluster-ingress-default-ingresscontroller.yaml` in the `<installation_directory>/manifests/` directory: -+ -[source,terminal] ----- -$ touch <installation_directory>/manifests/cluster-ingress-default-ingresscontroller.yaml <1> ----- -<1> For `<installation_directory>`, specify the directory name that contains the -`manifests/` directory for your cluster. -+ -After creating the file, several network configuration files are in the -`manifests/` directory, as shown: -+ -[source,terminal] ----- -$ ls <installation_directory>/manifests/cluster-ingress-default-ingresscontroller.yaml ----- -+ -.Example output -[source,terminal] ----- -cluster-ingress-default-ingresscontroller.yaml ----- - -. Open the `cluster-ingress-default-ingresscontroller.yaml` file in an editor and enter a custom resource (CR) that describes the Operator configuration you want: -+ -[source,yaml] ----- -apiVersion: operator.openshift.io/v1 -kind: IngressController -metadata: - creationTimestamp: null - name: default - namespace: openshift-ingress-operator -spec: - endpointPublishingStrategy: - loadBalancer: - scope: External - providerParameters: - type: AWS - aws: - type: NLB - type: LoadBalancerService ----- - -. Save the `cluster-ingress-default-ingresscontroller.yaml` file and quit the text editor. -. Optional: Back up the `manifests/cluster-ingress-default-ingresscontroller.yaml` file. The installation program deletes the `manifests/` directory when creating the cluster. diff --git a/modules/nw-aws-replacing-clb-with-nlb.adoc b/modules/nw-aws-replacing-clb-with-nlb.adoc deleted file mode 100644 index cbef3e3b62c2..000000000000 --- a/modules/nw-aws-replacing-clb-with-nlb.adoc +++ /dev/null @@ -1,58 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/configuring_ingress_cluster_traffic/configuring-ingress-cluster-traffic-aws.adoc - -:_content-type: PROCEDURE -[id="nw-aws-replacing-clb-with-nlb_{context}"] -= Replacing Ingress Controller Classic Load Balancer with Network Load Balancer - -You can replace an Ingress Controller that is using a Classic Load Balancer (CLB) with one that uses a Network Load Balancer (NLB) on AWS. - -[WARNING] -==== -This procedure might cause the following issues: - -* An outage that can last several minutes due to new DNS records propagation, new load balancers provisioning, and other factors. IP addresses and canonical names of the Ingress Controller load balancer might change after applying this procedure. - -* Leaked load balancer resources due to a change in the annotation of the service. -==== - -.Procedure - -. Create a file with a new default Ingress Controller. The following example assumes that your default Ingress Controller has an `External` scope and no other customizations: -+ -.Example `ingresscontroller.yml` file -[source,yaml] ----- -apiVersion: operator.openshift.io/v1 -kind: IngressController -metadata: - creationTimestamp: null - name: default - namespace: openshift-ingress-operator -spec: - endpointPublishingStrategy: - loadBalancer: - scope: External - providerParameters: - type: AWS - aws: - type: NLB - type: LoadBalancerService ----- -+ -If your default Ingress Controller has other customizations, ensure that you modify the file accordingly. -+ -[TIP] -==== -If your Ingress Controller has no other customizations and you are only updating the load balancer type, consider following the procedure detailed in "Switching the Ingress Controller from using a Classic Load Balancer to a Network Load Balancer". -==== - -. Force replace the Ingress Controller YAML file: -+ -[source,terminal] ----- -$ oc replace --force --wait -f ingresscontroller.yml ----- -+ -Wait until the Ingress Controller is replaced. Expect several of minutes of outages. diff --git a/modules/nw-aws-switching-clb-with-nlb.adoc b/modules/nw-aws-switching-clb-with-nlb.adoc deleted file mode 100644 index 5816c64c1d6d..000000000000 --- a/modules/nw-aws-switching-clb-with-nlb.adoc +++ /dev/null @@ -1,63 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/configuring_ingress_cluster_traffic/configuring-ingress-cluster-traffic-aws.adoc - -:_content-type: PROCEDURE -[id="nw-aws-switching-clb-with-nlb_{context}"] -= Switching the Ingress Controller from using a Classic Load Balancer to a Network Load Balancer - -You can switch the Ingress Controller that is using a Classic Load Balancer (CLB) to one that uses a Network Load Balancer (NLB) on AWS. - -Switching between these load balancers will not delete the `IngressController` object. - -[WARNING] -==== -This procedure might cause the following issues: - -* An outage that can last several minutes due to new DNS records propagation, new load balancers provisioning, and other factors. IP addresses and canonical names of the Ingress Controller load balancer might change after applying this procedure. - -* Leaked load balancer resources due to a change in the annotation of the service. -==== - -.Procedure - -. Modify the existing Ingress Controller that you want to switch to using an NLB. This example assumes that your default Ingress Controller has an `External` scope and no other customizations: -+ -.Example `ingresscontroller.yaml` file -[source,yaml] ----- -apiVersion: operator.openshift.io/v1 -kind: IngressController -metadata: - creationTimestamp: null - name: default - namespace: openshift-ingress-operator -spec: - endpointPublishingStrategy: - loadBalancer: - scope: External - providerParameters: - type: AWS - aws: - type: NLB - type: LoadBalancerService ----- -+ -[NOTE] -==== -If you do not specify a value for the `spec.endpointPublishingStrategy.loadBalancer.providerParameters.aws.type` field, the Ingress Controller uses the `spec.loadBalancer.platform.aws.type` value from the cluster `Ingress` configuration that was set during installation. -==== -+ -[TIP] -==== -If your Ingress Controller has other customizations that you want to update, such as changing the domain, consider force replacing the Ingress Controller definition file instead. -==== - -. Apply the changes to the Ingress Controller YAML file by running the command: -+ -[source,terminal] ----- -$ oc apply -f ingresscontroller.yaml ----- -+ -Expect several minutes of outages while the Ingress Controller updates. diff --git a/modules/nw-aws-switching-nlb-with-clb.adoc b/modules/nw-aws-switching-nlb-with-clb.adoc deleted file mode 100644 index 81f988756e60..000000000000 --- a/modules/nw-aws-switching-nlb-with-clb.adoc +++ /dev/null @@ -1,59 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/configuring_ingress_cluster_traffic/configuring-ingress-cluster-traffic-aws.adoc - -:_content-type: PROCEDURE -[id="nw-aws-switching-nlb-with-clb_{context}"] -= Switching the Ingress Controller from using a Network Load Balancer to a Classic Load Balancer - -You can switch the Ingress Controller that is using a Network Load Balancer (NLB) to one that uses a Classic Load Balancer (CLB) on AWS. - -Switching between these load balancers will not delete the `IngressController` object. - -[WARNING] -==== -This procedure might cause an outage that can last several minutes due to new DNS records propagation, new load balancers provisioning, and other factors. IP addresses and canonical names of the Ingress Controller load balancer might change after applying this procedure. -==== - -.Procedure - -. Modify the existing Ingress Controller that you want to switch to using a CLB. This example assumes that your default Ingress Controller has an `External` scope and no other customizations: -+ -.Example `ingresscontroller.yaml` file -[source,yaml] ----- -apiVersion: operator.openshift.io/v1 -kind: IngressController -metadata: - creationTimestamp: null - name: default - namespace: openshift-ingress-operator -spec: - endpointPublishingStrategy: - loadBalancer: - scope: External - providerParameters: - type: AWS - aws: - type: Classic - type: LoadBalancerService ----- -+ -[NOTE] -==== -If you do not specify a value for the `spec.endpointPublishingStrategy.loadBalancer.providerParameters.aws.type` field, the Ingress Controller uses the `spec.loadBalancer.platform.aws.type` value from the cluster `Ingress` configuration that was set during installation. -==== -+ -[TIP] -==== -If your Ingress Controller has other customizations that you want to update, such as changing the domain, consider force replacing the Ingress Controller definition file instead. -==== - -. Apply the changes to the Ingress Controller YAML file by running the command: -+ -[source,terminal] ----- -$ oc apply -f ingresscontroller.yaml ----- -+ -Expect several minutes of outages while the Ingress Controller updates. diff --git a/modules/nw-cfg-tuning-interface-cni.adoc b/modules/nw-cfg-tuning-interface-cni.adoc deleted file mode 100644 index a74479ff4eee..000000000000 --- a/modules/nw-cfg-tuning-interface-cni.adoc +++ /dev/null @@ -1,158 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/setting-interface-level-network-sysctls.adoc -:_content-type: PROCEDURE -[id="nw-configuring-tuning-cni_{context}"] -= Configuring the tuning CNI - -The following procedure configures the tuning CNI to change the interface-level network `net.ipv4.conf.IFNAME.accept_redirects` sysctl. This example enables accepting and sending ICMP-redirected packets. - -.Procedure - -. Create a network attachment definition, such as `tuning-example.yaml`, with the following content: -+ -[source,yaml] ----- -apiVersion: "k8s.cni.cncf.io/v1" -kind: NetworkAttachmentDefinition -metadata: - name: <name> <1> - namespace: default <2> -spec: - config: '{ - "cniVersion": "0.4.0", <3> - "name": "<name>", <4> - "plugins": [{ - "type": "<main_CNI_plugin>" <5> - }, - { - "type": "tuning", <6> - "sysctl": { - "net.ipv4.conf.IFNAME.accept_redirects": "1" <7> - } - } - ] -} ----- -<1> Specifies the name for the additional network attachment to create. The name must be unique within the specified namespace. -<2> Specifies the namespace that the object is associated with. -<3> Specifies the CNI specification version. -<4> Specifies the name for the configuration. It is recommended to match the configuration name to the name value of the network attachment definition. -<5> Specifies the name of the main CNI plugin to configure. -<6> Specifies the name of the CNI meta plugin. -<7> Specifies the sysctl to set. -+ -An example yaml file is shown here: -+ -[source,yaml] ----- -apiVersion: "k8s.cni.cncf.io/v1" -kind: NetworkAttachmentDefinition -metadata: - name: tuningnad - namespace: default -spec: - config: '{ - "cniVersion": "0.4.0", - "name": "tuningnad", - "plugins": [{ - "type": "bridge" - }, - { - "type": "tuning", - "sysctl": { - "net.ipv4.conf.IFNAME.accept_redirects": "1" - } - } - ] -}' ----- - -. Apply the yaml by running the following command: -+ -[source,terminal] ----- -$ oc apply -f tuning-example.yaml ----- -+ -.Example output -[source,terminal] ----- -networkattachmentdefinition.k8.cni.cncf.io/tuningnad created ----- - -. Create a pod such as `examplepod.yaml` with the network attachment definition similar to the following: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: tunepod - namespace: default - annotations: - k8s.v1.cni.cncf.io/networks: tuningnad <1> -spec: - containers: - - name: podexample - image: centos - command: ["/bin/bash", "-c", "sleep INF"] - securityContext: - runAsUser: 2000 <2> - runAsGroup: 3000 <3> - allowPrivilegeEscalation: false <4> - capabilities: <5> - drop: ["ALL"] - securityContext: - runAsNonRoot: true <6> - seccompProfile: <7> - type: RuntimeDefault ----- -<1> Specify the name of the configured `NetworkAttachmentDefinition`. -<2> `runAsUser` controls which user ID the container is run with. -<3> `runAsGroup` controls which primary group ID the containers is run with. -<4> `allowPrivilegeEscalation` determines if a pod can request to allow privilege escalation. If unspecified, it defaults to true. This boolean directly controls whether the `no_new_privs` flag gets set on the container process. -<5> `capabilities` permit privileged actions without giving full root access. This policy ensures all capabilities are dropped from the pod. -<6> `runAsNonRoot: true` requires that the container will run with a user with any UID other than 0. -<7> `RuntimeDefault` enables the default seccomp profile for a pod or container workload. - -. Apply the yaml by running the following command: -+ -[source,terminal] ----- -$ oc apply -f examplepod.yaml ----- - -. Verify that the pod is created by running the following command: -+ -[source,terminal] ----- -$ oc get pod ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -tunepod 1/1 Running 0 47s ----- - -. Log in to the pod by running the following command: -+ -[source,terminal] ----- -$ oc rsh tunepod ----- - -. Verify the values of the configured sysctl flags. For example, find the value `net.ipv4.conf.net1.accept_redirects` by running the following command: -+ -[source,terminal] ----- -sh-4.4# sysctl net.ipv4.conf.net1.accept_redirects ----- -+ -.Expected output -[source,terminal] ----- -net.ipv4.conf.net1.accept_redirects = 1 ----- diff --git a/modules/nw-cluster-mtu-change-about.adoc b/modules/nw-cluster-mtu-change-about.adoc deleted file mode 100644 index 0520651c6e2b..000000000000 --- a/modules/nw-cluster-mtu-change-about.adoc +++ /dev/null @@ -1,81 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/changing-cluster-network-mtu.adoc - -:_content-type: CONCEPT -[id="nw-cluster-mtu-change-about_{context}"] -= About the cluster MTU - -During installation the maximum transmission unit (MTU) for the cluster network is detected automatically based on the MTU of the primary network interface of nodes in the cluster. You do not normally need to override the detected MTU. - -You might want to change the MTU of the cluster network for several reasons: - -* The MTU detected during cluster installation is not correct for your infrastructure -* Your cluster infrastructure now requires a different MTU, such as from the addition of nodes that need a different MTU for optimal performance - -You can change the cluster MTU for only the OVN-Kubernetes and OpenShift SDN cluster network plugins. - -// https://github.com/openshift/enhancements/blob/master/enhancements/network/allow-mtu-changes.md -[id="service-interruption-considerations_{context}"] -== Service interruption considerations - -When you initiate an MTU change on your cluster the following effects might impact service availability: - -* At least two rolling reboots are required to complete the migration to a new MTU. During this time, some nodes are not available as they restart. - -* Specific applications deployed to the cluster with shorter timeout intervals than the absolute TCP timeout interval might experience disruption during the MTU change. - -[id="mtu-value-selection_{context}"] -== MTU value selection - -When planning your MTU migration there are two related but distinct MTU values to consider. - -* *Hardware MTU*: This MTU value is set based on the specifics of your network infrastructure. -* *Cluster network MTU*: This MTU value is always less than your hardware MTU to account for the cluster network overlay overhead. The specific overhead is determined by your network plugin: -** *OVN-Kubernetes*: `100` bytes -** *OpenShift SDN*: `50` bytes - -If your cluster requires different MTU values for different nodes, you must subtract the overhead value for your network plugin from the lowest MTU value that is used by any node in your cluster. For example, if some nodes in your cluster have an MTU of `9001`, and some have an MTU of `1500`, you must set this value to `1400`. - -[id="how-the-migration-process-works_{context}"] -== How the migration process works - -The following table summarizes the migration process by segmenting between the user-initiated steps in the process and the actions that the migration performs in response. - -.Live migration of the cluster MTU -[cols="1a,1a",options="header"] -|=== - -|User-initiated steps|{product-title} activity - -| -Set the following values in the Cluster Network Operator configuration: - -- `spec.migration.mtu.machine.to` -- `spec.migration.mtu.network.from` -- `spec.migration.mtu.network.to` - -| -*Cluster Network Operator (CNO)*: Confirms that each field is set to a valid value. - -- The `mtu.machine.to` must be set to either the new hardware MTU or to the current hardware MTU if the MTU for the hardware is not changing. This value is transient and is used as part of the migration process. Separately, if you specify a hardware MTU that is different from your existing hardware MTU value, you must manually configure the MTU to persist by other means, such as with a machine config, DHCP setting, or a Linux kernel command line. -- The `mtu.network.from` field must equal the `network.status.clusterNetworkMTU` field, which is the current MTU of the cluster network. -- The `mtu.network.to` field must be set to the target cluster network MTU and must be lower than the hardware MTU to allow for the overlay overhead of the network plugin. For OVN-Kubernetes, the overhead is `100` bytes and for OpenShift SDN the overhead is `50` bytes. - -If the values provided are valid, the CNO writes out a new temporary configuration with the MTU for the cluster network set to the value of the `mtu.network.to` field. - -*Machine Config Operator (MCO)*: Performs a rolling reboot of each node in the cluster. - -|Reconfigure the MTU of the primary network interface for the nodes on the cluster. You can use a variety of methods to accomplish this, including: - -- Deploying a new NetworkManager connection profile with the MTU change -- Changing the MTU through a DHCP server setting -- Changing the MTU through boot parameters -|N/A - -|Set the `mtu` value in the CNO configuration for the network plugin and set `spec.migration` to `null`. - -| -*Machine Config Operator (MCO)*: Performs a rolling reboot of each node in the cluster with the new MTU configuration. - -|=== diff --git a/modules/nw-cluster-mtu-change.adoc b/modules/nw-cluster-mtu-change.adoc deleted file mode 100644 index 40fc583387c8..000000000000 --- a/modules/nw-cluster-mtu-change.adoc +++ /dev/null @@ -1,375 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/changing-cluster-network-mtu.adoc - -:_content-type: PROCEDURE -[id="nw-cluster-mtu-change_{context}"] -= Changing the cluster MTU - -As a cluster administrator, you can change the maximum transmission unit (MTU) for your cluster. The migration is disruptive and nodes in your cluster might be temporarily unavailable as the MTU update rolls out. - -The following procedure describes how to change the cluster MTU by using either machine configs, DHCP, or an ISO. If you use the DHCP or ISO approach, you must refer to configuration artifacts that you kept after installing your cluster to complete the procedure. - -.Prerequisites - -* You installed the OpenShift CLI (`oc`). -* You are logged in to the cluster with a user with `cluster-admin` privileges. -* You identified the target MTU for your cluster. The correct MTU varies depending on the network plugin that your cluster uses: -** *OVN-Kubernetes*: The cluster MTU must be set to `100` less than the lowest hardware MTU value in your cluster. -** *OpenShift SDN*: The cluster MTU must be set to `50` less than the lowest hardware MTU value in your cluster. - -.Procedure - -To increase or decrease the MTU for the cluster network complete the following procedure. - -. To obtain the current MTU for the cluster network, enter the following command: -+ -[source,terminal] ----- -$ oc describe network.config cluster ----- -+ -.Example output -[source,text] ----- -... -Status: - Cluster Network: - Cidr: 10.217.0.0/22 - Host Prefix: 23 - Cluster Network MTU: 1400 - Network Type: OpenShiftSDN - Service Network: - 10.217.4.0/23 -... ----- - -. Prepare your configuration for the hardware MTU: - -** If your hardware MTU is specified with DHCP, update your DHCP configuration such as with the following dnsmasq configuration: -+ -[source,text] ----- -dhcp-option-force=26,<mtu> ----- -+ --- -where: - -`<mtu>`:: Specifies the hardware MTU for the DHCP server to advertise. --- - -** If your hardware MTU is specified with a kernel command line with PXE, update that configuration accordingly. - -** If your hardware MTU is specified in a NetworkManager connection configuration, complete the following steps. This approach is the default for {product-title} if you do not explicitly specify your network configuration with DHCP, a kernel command line, or some other method. Your cluster nodes must all use the same underlying network configuration for the following procedure to work unmodified. - -... Find the primary network interface: - -**** If you are using the OpenShift SDN network plugin, enter the following command: -+ -[source,terminal] ----- -$ oc debug node/<node_name> -- chroot /host ip route list match 0.0.0.0/0 | awk '{print $5 }' ----- -+ --- -where: - -`<node_name>`:: Specifies the name of a node in your cluster. --- - -**** If you are using the OVN-Kubernetes network plugin, enter the following command: -+ -[source,terminal] ----- -$ oc debug node/<node_name> -- chroot /host nmcli -g connection.interface-name c show ovs-if-phys0 ----- -+ --- -where: - -`<node_name>`:: Specifies the name of a node in your cluster. --- - -... Create the following NetworkManager configuration in the `<interface>-mtu.conf` file: -+ -.Example NetworkManager connection configuration -[source,ini] ----- -[connection-<interface>-mtu] -match-device=interface-name:<interface> -ethernet.mtu=<mtu> ----- -+ --- -where: - -`<mtu>`:: Specifies the new hardware MTU value. -`<interface>`:: Specifies the primary network interface name. --- - -... Create two `MachineConfig` objects, one for the control plane nodes and another for the worker nodes in your cluster: - -.... Create the following Butane config in the `control-plane-interface.bu` file: -+ -[source,yaml, subs="attributes+"] ----- -variant: openshift -version: {product-version}.0 -metadata: - name: 01-control-plane-interface - labels: - machineconfiguration.openshift.io/role: master -storage: - files: - - path: /etc/NetworkManager/conf.d/99-<interface>-mtu.conf <1> - contents: - local: <interface>-mtu.conf <2> - mode: 0600 ----- -<1> Specify the NetworkManager connection name for the primary network interface. -<2> Specify the local filename for the updated NetworkManager configuration file from the previous step. - -.... Create the following Butane config in the `worker-interface.bu` file: -+ -[source,yaml, subs="attributes+"] ----- -variant: openshift -version: {product-version}.0 -metadata: - name: 01-worker-interface - labels: - machineconfiguration.openshift.io/role: worker -storage: - files: - - path: /etc/NetworkManager/conf.d/99-<interface>-mtu.conf <1> - contents: - local: <interface>-mtu.conf <2> - mode: 0600 ----- -<1> Specify the NetworkManager connection name for the primary network interface. -<2> Specify the local filename for the updated NetworkManager configuration file from the previous step. - -.... Create `MachineConfig` objects from the Butane configs by running the following command: -+ -[source,terminal] ----- -$ for manifest in control-plane-interface worker-interface; do - butane --files-dir . $manifest.bu > $manifest.yaml - done ----- - -. To begin the MTU migration, specify the migration configuration by entering the following command. The Machine Config Operator performs a rolling reboot of the nodes in the cluster in preparation for the MTU change. -+ -[source,terminal] ----- -$ oc patch Network.operator.openshift.io cluster --type=merge --patch \ - '{"spec": { "migration": { "mtu": { "network": { "from": <overlay_from>, "to": <overlay_to> } , "machine": { "to" : <machine_to> } } } } }' ----- -+ --- -where: - -`<overlay_from>`:: Specifies the current cluster network MTU value. -`<overlay_to>`:: Specifies the target MTU for the cluster network. This value is set relative to the value for `<machine_to>` and for OVN-Kubernetes must be `100` less and for OpenShift SDN must be `50` less. -`<machine_to>`:: Specifies the MTU for the primary network interface on the underlying host network. --- -+ -.Example that increases the cluster MTU -[source,terminal] ----- -$ oc patch Network.operator.openshift.io cluster --type=merge --patch \ - '{"spec": { "migration": { "mtu": { "network": { "from": 1400, "to": 9000 } , "machine": { "to" : 9100} } } } }' ----- - -. As the MCO updates machines in each machine config pool, it reboots each node one by one. You must wait until all the nodes are updated. Check the machine config pool status by entering the following command: -+ -[source,terminal] ----- -$ oc get mcp ----- -+ -A successfully updated node has the following status: `UPDATED=true`, `UPDATING=false`, `DEGRADED=false`. -+ -[NOTE] -==== -By default, the MCO updates one machine per pool at a time, causing the total time the migration takes to increase with the size of the cluster. -==== - -. Confirm the status of the new machine configuration on the hosts: - -.. To list the machine configuration state and the name of the applied machine configuration, enter the following command: -+ -[source,terminal] ----- -$ oc describe node | egrep "hostname|machineconfig" ----- -+ -.Example output -[source,text] ----- -kubernetes.io/hostname=master-0 -machineconfiguration.openshift.io/currentConfig: rendered-master-c53e221d9d24e1c8bb6ee89dd3d8ad7b -machineconfiguration.openshift.io/desiredConfig: rendered-master-c53e221d9d24e1c8bb6ee89dd3d8ad7b -machineconfiguration.openshift.io/reason: -machineconfiguration.openshift.io/state: Done ----- -+ -Verify that the following statements are true: -+ --- -* The value of `machineconfiguration.openshift.io/state` field is `Done`. -* The value of the `machineconfiguration.openshift.io/currentConfig` field is equal to the value of the `machineconfiguration.openshift.io/desiredConfig` field. --- - -.. To confirm that the machine config is correct, enter the following command: -+ -[source,terminal] ----- -$ oc get machineconfig <config_name> -o yaml | grep ExecStart ----- -+ -where `<config_name>` is the name of the machine config from the `machineconfiguration.openshift.io/currentConfig` field. -+ -The machine config must include the following update to the systemd configuration: -+ -[source,plain] ----- -ExecStart=/usr/local/bin/mtu-migration.sh ----- - -. Update the underlying network interface MTU value: - -** If you are specifying the new MTU with a NetworkManager connection configuration, enter the following command. The MachineConfig Operator automatically performs a rolling reboot of the nodes in your cluster. -+ -[source,terminal] ----- -$ for manifest in control-plane-interface worker-interface; do - oc create -f $manifest.yaml - done ----- - -** If you are specifying the new MTU with a DHCP server option or a kernel command line and PXE, make the necessary changes for your infrastructure. - -. As the MCO updates machines in each machine config pool, it reboots each node one by one. You must wait until all the nodes are updated. Check the machine config pool status by entering the following command: -+ -[source,terminal] ----- -$ oc get mcp ----- -+ -A successfully updated node has the following status: `UPDATED=true`, `UPDATING=false`, `DEGRADED=false`. -+ -[NOTE] -==== -By default, the MCO updates one machine per pool at a time, causing the total time the migration takes to increase with the size of the cluster. -==== - -. Confirm the status of the new machine configuration on the hosts: - -.. To list the machine configuration state and the name of the applied machine configuration, enter the following command: -+ -[source,terminal] ----- -$ oc describe node | egrep "hostname|machineconfig" ----- -+ -.Example output -[source,text] ----- -kubernetes.io/hostname=master-0 -machineconfiguration.openshift.io/currentConfig: rendered-master-c53e221d9d24e1c8bb6ee89dd3d8ad7b -machineconfiguration.openshift.io/desiredConfig: rendered-master-c53e221d9d24e1c8bb6ee89dd3d8ad7b -machineconfiguration.openshift.io/reason: -machineconfiguration.openshift.io/state: Done ----- -+ -Verify that the following statements are true: -+ --- - * The value of `machineconfiguration.openshift.io/state` field is `Done`. - * The value of the `machineconfiguration.openshift.io/currentConfig` field is equal to the value of the `machineconfiguration.openshift.io/desiredConfig` field. --- - -.. To confirm that the machine config is correct, enter the following command: -+ -[source,terminal] ----- -$ oc get machineconfig <config_name> -o yaml | grep path: ----- -+ -where `<config_name>` is the name of the machine config from the `machineconfiguration.openshift.io/currentConfig` field. -+ -If the machine config is successfully deployed, the previous output contains the `/etc/NetworkManager/system-connections/<connection_name>` file path. -+ -The machine config must not contain the `ExecStart=/usr/local/bin/mtu-migration.sh` line. - -. To finalize the MTU migration, enter one of the following commands: -** If you are using the OVN-Kubernetes network plugin: -+ -[source,terminal] -+ ----- -$ oc patch Network.operator.openshift.io cluster --type=merge --patch \ - '{"spec": { "migration": null, "defaultNetwork":{ "ovnKubernetesConfig": { "mtu": <mtu> }}}}' ----- -+ --- -where: - -`<mtu>`:: Specifies the new cluster network MTU that you specified with `<overlay_to>`. --- - -** If you are using the OpenShift SDN network plugin: -+ -[source,terminal] ----- -$ oc patch Network.operator.openshift.io cluster --type=merge --patch \ - '{"spec": { "migration": null, "defaultNetwork":{ "openshiftSDNConfig": { "mtu": <mtu> }}}}' ----- -+ --- -where: - -`<mtu>`:: Specifies the new cluster network MTU that you specified with `<overlay_to>`. --- - -.Verification - -You can verify that a node in your cluster uses an MTU that you specified in the previous procedure. - -. To get the current MTU for the cluster network, enter the following command: -+ -[source,terminal] ----- -$ oc describe network.config cluster ----- - -. Get the current MTU for the primary network interface of a node. - -.. To list the nodes in your cluster, enter the following command: -+ -[source,terminal] ----- -$ oc get nodes ----- - -.. To obtain the current MTU setting for the primary network interface on a node, enter the following command: -+ -[source,terminal] ----- -$ oc debug node/<node> -- chroot /host ip address show <interface> ----- -+ -where: -+ --- -`<node>`:: Specifies a node from the output from the previous step. -`<interface>`:: Specifies the primary network interface name for the node. --- -+ -.Example output -[source,text] ----- -ens3: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 8051 ----- diff --git a/modules/nw-cluster-network-operator.adoc b/modules/nw-cluster-network-operator.adoc deleted file mode 100644 index 0b2433d77540..000000000000 --- a/modules/nw-cluster-network-operator.adoc +++ /dev/null @@ -1,46 +0,0 @@ -// Module included in the following assemblies: -// * networking/cluster-network-operator.adoc - -:_content-type: PROCEDURE -[id="nw-cluster-network-operator_{context}"] -= Cluster Network Operator - -The Cluster Network Operator implements the `network` API from the `operator.openshift.io` API group. -The Operator deploys the OVN-Kubernetes network plugin, or the network provider plugin that you selected during cluster installation, by using a daemon set. - -.Procedure - -The Cluster Network Operator is deployed during installation as a Kubernetes -`Deployment`. - -. Run the following command to view the Deployment status: -+ -[source,terminal] ----- -$ oc get -n openshift-network-operator deployment/network-operator ----- -+ -.Example output -[source,terminal] ----- -NAME READY UP-TO-DATE AVAILABLE AGE -network-operator 1/1 1 1 56m ----- - -. Run the following command to view the state of the Cluster Network Operator: -+ -[source,terminal] ----- -$ oc get clusteroperator/network ----- -+ -.Example output -[source,terminal] ----- -NAME VERSION AVAILABLE PROGRESSING DEGRADED SINCE -network 4.5.4 True False False 50m ----- -+ -The following fields provide information about the status of the operator: -`AVAILABLE`, `PROGRESSING`, and `DEGRADED`. The `AVAILABLE` field is `True` when -the Cluster Network Operator reports an available status condition. diff --git a/modules/nw-cluster-network-range-edit.adoc b/modules/nw-cluster-network-range-edit.adoc deleted file mode 100644 index d850af29408d..000000000000 --- a/modules/nw-cluster-network-range-edit.adoc +++ /dev/null @@ -1,84 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/configuring-cluster-network-range.adoc - -:_content-type: PROCEDURE -[id="nw-cluster-network-range-edit_{context}"] -= Expanding the cluster network IP address range - -You can expand the IP address range for the cluster network. Because this change requires rolling out a new Operator configuration across the cluster, it can take up to 30 minutes to take effect. - -.Prerequisites - -* Install the OpenShift CLI (`oc`). -* Log in to the cluster with a user with `cluster-admin` privileges. -* Ensure that the cluster uses the OVN-Kubernetes network plugin. - -.Procedure - -. To obtain the cluster network range and host prefix for your cluster, enter the following command: -+ -[source,terminal] ----- -$ oc get network.operator.openshift.io \ - -o jsonpath="{.items[0].spec.clusterNetwork}" ----- -+ -.Example output -[source,text] ----- -[{"cidr":"10.217.0.0/22","hostPrefix":23}] ----- - -. To expand the cluster network IP address range, enter the following command. Use the CIDR IP address range and host prefix returned from the output of the previous command. -+ -[source,terminal] ----- -$ oc patch Network.config.openshift.io cluster --type='merge' --patch \ - '{ - "spec":{ - "clusterNetwork": [ {"cidr":"<network>/<cidr>","hostPrefix":<prefix>} ], - "networkType": "OVNKubernetes" - } - }' ----- -+ --- -where: - -`<network>`:: Specifies the network part of the `cidr` field that you obtained from the previous step. You cannot change this value. -`<cidr>`:: Specifies the network prefix length. For example, `14`. Change this value to a smaller number than the value from the output in the previous step to expand the cluster network range. -`<prefix>`:: Specifies the current host prefix for your cluster. This value must be the same value for the `hostPrefix` field that you obtained from the previous step. --- -+ -.Example command -[source,terminal] ----- -$ oc patch Network.config.openshift.io cluster --type='merge' --patch \ - '{ - "spec":{ - "clusterNetwork": [ {"cidr":"10.217.0.0/14","hostPrefix": 23} ], - "networkType": "OVNKubernetes" - } - }' ----- -+ -.Example output -[source,text] ----- -network.config.openshift.io/cluster patched ----- - -. To confirm that the configuration is active, enter the following command. It can take up to 30 minutes for this change to take effect. -+ -[source,terminal] ----- -$ oc get network.operator.openshift.io \ - -o jsonpath="{.items[0].spec.clusterNetwork}" ----- -+ -.Example output -[source,text] ----- -[{"cidr":"10.217.0.0/14","hostPrefix":23}] ----- diff --git a/modules/nw-cno-logs.adoc b/modules/nw-cno-logs.adoc deleted file mode 100644 index 75b226149f44..000000000000 --- a/modules/nw-cno-logs.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/cluster-network-operator.adoc - -:_content-type: PROCEDURE -[id="nw-cno-logs_{context}"] -= Viewing Cluster Network Operator logs - -You can view Cluster Network Operator logs by using the `oc logs` command. - -.Procedure - -* Run the following command to view the logs of the Cluster Network Operator: -+ -[source,terminal] ----- -$ oc logs --namespace=openshift-network-operator deployment/network-operator ----- diff --git a/modules/nw-cno-status.adoc b/modules/nw-cno-status.adoc deleted file mode 100644 index d3d44e0869b7..000000000000 --- a/modules/nw-cno-status.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/cluster-network-operator.adoc - -:_content-type: PROCEDURE -[id="nw-cno-status_{context}"] -= Viewing Cluster Network Operator status - -You can inspect the status and view the details of the Cluster Network Operator -using the `oc describe` command. - -.Procedure - -* Run the following command to view the status of the Cluster Network Operator: -+ -[source,terminal] ----- -$ oc describe clusteroperators/network ----- diff --git a/modules/nw-cno-view.adoc b/modules/nw-cno-view.adoc deleted file mode 100644 index 8b449fbcf1c7..000000000000 --- a/modules/nw-cno-view.adoc +++ /dev/null @@ -1,79 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/cluster-network-operator.adoc - -:_content-type: PROCEDURE -[id="nw-cno-view_{context}"] -= Viewing the cluster network configuration - -Every new {product-title} installation has a `network.config` object named -`cluster`. - -.Procedure - -* Use the `oc describe` command to view the cluster network configuration: -+ -[source,terminal] ----- -$ oc describe network.config/cluster ----- -+ -.Example output -[source,terminal] ----- -Name: cluster -Namespace: -Labels: <none> -Annotations: <none> -API Version: config.openshift.io/v1 -Kind: Network -Metadata: - Self Link: /apis/config.openshift.io/v1/networks/cluster -Spec: <1> - Cluster Network: - Cidr: 10.128.0.0/14 - Host Prefix: 23 - Network Type: OpenShiftSDN - Service Network: - 172.30.0.0/16 -Status: <2> - Cluster Network: - Cidr: 10.128.0.0/14 - Host Prefix: 23 - Cluster Network MTU: 8951 - Network Type: OpenShiftSDN - Service Network: - 172.30.0.0/16 -Events: <none> ----- -<1> The `Spec` field displays the configured state of the cluster network. -<2> The `Status` field displays the current state of the cluster network -configuration. - -//// -* Use the `oc describe` command to view the cluster network configuration: -+ -[source,terminal] ----- -$ oc describe network.operator/cluster - -Name: cluster -Namespace: -Labels: <none> -Annotations: <none> -API Version: operator.openshift.io/v1 -Kind: Network -Metadata: - Self Link: /apis/operator.openshift.io/v1/networks/cluster -Spec: - Cluster Network: - Cidr: 10.128.0.0/14 - Host Prefix: 23 - Default Network: - Type: OpenShiftSDN - Service Network: - 172.30.0.0/16 -Status: -Events: <none> ----- -//// diff --git a/modules/nw-columbiaville-ptp-config-refererence.adoc b/modules/nw-columbiaville-ptp-config-refererence.adoc deleted file mode 100644 index b0d297d7e54b..000000000000 --- a/modules/nw-columbiaville-ptp-config-refererence.adoc +++ /dev/null @@ -1,26 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/using-ptp.adoc - -:_content-type: REFERENCE -[id="nw-columbiaville-ptp-config-refererence_{context}"] -= Intel Columbiaville E800 series NIC as PTP ordinary clock reference - -The following table describes the changes that you must make to the reference PTP configuration in order to use Intel Columbiaville E800 series NICs as ordinary clocks. Make the changes in a `PtpConfig` custom resource (CR) that you apply to the cluster. - -.Recommended PTP settings for Intel Columbiaville NIC -[options="header"] -|==== -|PTP configuration|Recommended setting -|`phc2sysOpts`|`-a -r -m -n 24 -N 8 -R 16` -|`tx_timestamp_timeout`|`50` -|`boundary_clock_jbod`|`0` -|==== - -[NOTE] -==== -For `phc2sysOpts`, `-m` prints messages to `stdout`. The `linuxptp-daemon` `DaemonSet` parses the logs and generates Prometheus metrics. -==== - - - diff --git a/modules/nw-configure-ingress-access-logging.adoc b/modules/nw-configure-ingress-access-logging.adoc deleted file mode 100644 index bbc1fb899516..000000000000 --- a/modules/nw-configure-ingress-access-logging.adoc +++ /dev/null @@ -1,118 +0,0 @@ -// Module included in the following assemblies: -// -// * ingress/configure-ingress-operator.adoc - -:_content-type: PROCEDURE -[id="nw-configure-ingress-access-logging_{context}"] -= Configuring Ingress access logging - -You can configure the Ingress Controller to enable access logs. If you have clusters that do not receive much traffic, then you can log to a sidecar. If you have high traffic clusters, to avoid exceeding the capacity of the logging stack or to integrate with a logging infrastructure outside of {product-title}, you can forward logs to a custom syslog endpoint. You can also specify the format for access logs. - -Container logging is useful to enable access logs on low-traffic clusters when there is no existing Syslog logging infrastructure, or for short-term use while diagnosing problems with the Ingress Controller. - -Syslog is needed for high-traffic clusters where access logs could exceed the OpenShift Logging stack's capacity, or for environments where any logging solution needs to integrate with an existing Syslog logging infrastructure. The Syslog use-cases can overlap. - -.Prerequisites - -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -Configure Ingress access logging to a sidecar. - -* To configure Ingress access logging, you must specify a destination using `spec.logging.access.destination`. To specify logging to a sidecar container, you must specify `Container` `spec.logging.access.destination.type`. The following example is an Ingress Controller definition that logs to a `Container` destination: -+ -[source,yaml] ----- -apiVersion: operator.openshift.io/v1 -kind: IngressController -metadata: - name: default - namespace: openshift-ingress-operator -spec: - replicas: 2 - logging: - access: - destination: - type: Container ----- - -* When you configure the Ingress Controller to log to a sidecar, the operator creates a container named `logs` inside the Ingress Controller Pod: -+ -[source,terminal] ----- -$ oc -n openshift-ingress logs deployment.apps/router-default -c logs ----- -+ -.Example output -[source,terminal] ----- -2020-05-11T19:11:50.135710+00:00 router-default-57dfc6cd95-bpmk6 router-default-57dfc6cd95-bpmk6 haproxy[108]: 174.19.21.82:39654 [11/May/2020:19:11:50.133] public be_http:hello-openshift:hello-openshift/pod:hello-openshift:hello-openshift:10.128.2.12:8080 0/0/1/0/1 200 142 - - --NI 1/1/0/0/0 0/0 "GET / HTTP/1.1" ----- - -Configure Ingress access logging to a Syslog endpoint. - -* To configure Ingress access logging, you must specify a destination using `spec.logging.access.destination`. To specify logging to a Syslog endpoint destination, you must specify `Syslog` for `spec.logging.access.destination.type`. If the destination type is `Syslog`, you must also specify a destination endpoint using `spec.logging.access.destination.syslog.endpoint` and you can specify a facility using `spec.logging.access.destination.syslog.facility`. The following example is an Ingress Controller definition that logs to a `Syslog` destination: -+ -[source,yaml] ----- -apiVersion: operator.openshift.io/v1 -kind: IngressController -metadata: - name: default - namespace: openshift-ingress-operator -spec: - replicas: 2 - logging: - access: - destination: - type: Syslog - syslog: - address: 1.2.3.4 - port: 10514 ----- -+ -[NOTE] -==== -The `syslog` destination port must be UDP. -==== - -Configure Ingress access logging with a specific log format. - -* You can specify `spec.logging.access.httpLogFormat` to customize the log format. The following example is an Ingress Controller definition that logs to a `syslog` endpoint with IP address 1.2.3.4 and port 10514: -+ -[source,yaml] ----- -apiVersion: operator.openshift.io/v1 -kind: IngressController -metadata: - name: default - namespace: openshift-ingress-operator -spec: - replicas: 2 - logging: - access: - destination: - type: Syslog - syslog: - address: 1.2.3.4 - port: 10514 - httpLogFormat: '%ci:%cp [%t] %ft %b/%s %B %bq %HM %HU %HV' ----- - -Disable Ingress access logging. - -* To disable Ingress access logging, leave `spec.logging` or `spec.logging.access` empty: -+ -[source,yaml] ----- -apiVersion: operator.openshift.io/v1 -kind: IngressController -metadata: - name: default - namespace: openshift-ingress-operator -spec: - replicas: 2 - logging: - access: null ----- diff --git a/modules/nw-configure-sysctl-interface-sriov-network-bonded.adoc b/modules/nw-configure-sysctl-interface-sriov-network-bonded.adoc deleted file mode 100644 index 19d7abe3373f..000000000000 --- a/modules/nw-configure-sysctl-interface-sriov-network-bonded.adoc +++ /dev/null @@ -1,229 +0,0 @@ -// Module included in the following assemblies: -// -//networking/hardware_networks/configuring-sriov-device.adoc - -:_content-type: PROCEDURE -[id="configuring-sysctl-on-bonded-sriov-network_{context}"] -= Configuring sysctl on a bonded SR-IOV network - -You can set interface specific `sysctl` settings on a bonded interface created from two SR-IOV interfaces. Do this by adding the tuning configuration to the optional `Plugins` parameter of the bond network attachment definition. - -[NOTE] -==== -Do not edit `NetworkAttachmentDefinition` custom resources that the SR-IOV Network Operator manages. Doing so might disrupt network traffic on your additional network. -==== - -To change specific interface-level network `sysctl` settings create the `SriovNetwork` custom resource (CR) with the Container Network Interface (CNI) tuning plugin by using the following procedure. - -.Prerequisites - -* Install the {product-title} CLI (oc). -* Log in to the {product-title} cluster as a user with cluster-admin privileges. - -.Procedure - -. Create the `SriovNetwork` custom resource (CR) for the bonded interface as in the following example CR. Save the YAML as the file `sriov-network-attachment.yaml`. -+ -[source,yaml] ----- -apiVersion: sriovnetwork.openshift.io/v1 -kind: SriovNetwork -metadata: - name: allvalidflags <1> - namespace: openshift-sriov-network-operator <2> -spec: - resourceName: policyallflags <3> - networkNamespace: sysctl-tuning-test <4> - capabilities: '{ "mac": true, "ips": true }' <5> ----- -<1> A name for the object. The SR-IOV Network Operator creates a NetworkAttachmentDefinition object with same name. -<2> The namespace where the SR-IOV Network Operator is installed. -<3> The value for the `spec.resourceName` parameter from the `SriovNetworkNodePolicy` object that defines the SR-IOV hardware for this additional network. -<4> The target namespace for the `SriovNetwork` object. Only pods in the target namespace can attach to the additional network. -<5> Optional: The capabilities to configure for this additional network. You can specify `"{ "ips": true }"` to enable IP address support or `"{ "mac": true }"` to enable MAC address support. - -. Create the `SriovNetwork` resource: -+ -[source,terminal] ----- -$ oc create -f sriov-network-attachment.yaml ----- - -. Create a bond network attachment definition as in the following example CR. Save the YAML as the file `sriov-bond-network-interface.yaml`. -+ -[source,yaml] ----- -apiVersion: "k8s.cni.cncf.io/v1" -kind: NetworkAttachmentDefinition -metadata: - name: bond-sysctl-network - namespace: sysctl-tuning-test -spec: - config: '{ - "cniVersion":"0.4.0", - "name":"bound-net", - "plugins":[ - { - "type":"bond", <1> - "mode": "active-backup", <2> - "failOverMac": 1, <3> - "linksInContainer": true, <4> - "miimon": "100", - "links": [ <5> - {"name": "net1"}, - {"name": "net2"} - ], - "ipam":{ <6> - "type":"static" - } - }, - { - "type":"tuning", <7> - "capabilities":{ - "mac":true - }, - "sysctl":{ - "net.ipv4.conf.IFNAME.accept_redirects": "0", - "net.ipv4.conf.IFNAME.accept_source_route": "0", - "net.ipv4.conf.IFNAME.disable_policy": "1", - "net.ipv4.conf.IFNAME.secure_redirects": "0", - "net.ipv4.conf.IFNAME.send_redirects": "0", - "net.ipv6.conf.IFNAME.accept_redirects": "0", - "net.ipv6.conf.IFNAME.accept_source_route": "1", - "net.ipv6.neigh.IFNAME.base_reachable_time_ms": "20000", - "net.ipv6.neigh.IFNAME.retrans_time_ms": "2000" - } - } - ] -}' ----- -<1> The type is `bond`. -<2> The `mode` attribute specifies the bonding mode. The bonding modes supported are: - - * `balance-rr` - 0 - * `active-backup` - 1 - * `balance-xor` - 2 -+ -For `balance-rr` or `balance-xor` modes, you must set the `trust` mode to `on` for the SR-IOV virtual function. -<3> The `failover` attribute is mandatory for active-backup mode. -<4> The `linksInContainer=true` flag informs the Bond CNI that the required interfaces are to be found inside the container. By default, Bond CNI looks for these interfaces on the host which does not work for integration with SRIOV and Multus. -<5> The `links` section defines which interfaces will be used to create the bond. By default, Multus names the attached interfaces as: "net", plus a consecutive number, starting with one. -<6> A configuration object for the IPAM CNI plugin as a YAML block scalar. The plugin manages IP address assignment for the attachment definition. In this pod example IP addresses are configured manually, so in this case,`ipam` is set to static. -<7> Add additional capabilities to the device. For example, set the `type` field to `tuning`. Specify the interface-level network `sysctl` you want to set in the sysctl field. This example sets all interface-level network `sysctl` settings that can be set. - -. Create the bond network attachment resource: -+ -[source,terminal] ----- -$ oc create -f sriov-bond-network-interface.yaml ----- - -.Verifying that the `NetworkAttachmentDefinition` CR is successfully created - -* Confirm that the SR-IOV Network Operator created the `NetworkAttachmentDefinition` CR by running the following command: -+ -[source,terminal] ----- -$ oc get network-attachment-definitions -n <namespace> <1> ----- -<1> Replace `<namespace>` with the networkNamespace that you specified when configuring the network attachment, for example, `sysctl-tuning-test`. -+ -.Example output -[source,terminal] ----- -NAME AGE -bond-sysctl-network 22m -allvalidflags 47m ----- -+ -[NOTE] -==== -There might be a delay before the SR-IOV Network Operator creates the CR. -==== - -.Verifying that the additional SR-IOV network resource is successful - -To verify that the tuning CNI is correctly configured and the additional SR-IOV network attachment is attached, do the following: - -. Create a `Pod` CR. For example, save the following YAML as the file `examplepod.yaml`: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: tunepod - namespace: sysctl-tuning-test - annotations: - k8s.v1.cni.cncf.io/networks: |- - [ - {"name": "allvalidflags"}, <1> - {"name": "allvalidflags"}, - { - "name": "bond-sysctl-network", - "interface": "bond0", - "mac": "0a:56:0a:83:04:0c", <2> - "ips": ["10.100.100.200/24"] <3> - } - ] -spec: - containers: - - name: podexample - image: centos - command: ["/bin/bash", "-c", "sleep INF"] - securityContext: - runAsUser: 2000 - runAsGroup: 3000 - allowPrivilegeEscalation: false - capabilities: - drop: ["ALL"] - securityContext: - runAsNonRoot: true - seccompProfile: - type: RuntimeDefault ----- -<1> The name of the SR-IOV network attachment definition CR. -<2> Optional: The MAC address for the SR-IOV device that is allocated from the resource type defined in the SR-IOV network attachment definition CR. To use this feature, you also must specify `{ "mac": true }` in the SriovNetwork object. -<3> Optional: IP addresses for the SR-IOV device that are allocated from the resource type defined in the SR-IOV network attachment definition CR. Both IPv4 and IPv6 addresses are supported. To use this feature, you also must specify `{ "ips": true }` in the `SriovNetwork` object. - -. Apply the YAML: -+ -[source,terminal] ----- -$ oc apply -f examplepod.yaml ----- - -. Verify that the pod is created by running the following command: -+ -[source,terminal] ----- -$ oc get pod -n sysctl-tuning-test ----- -+ -.Example output -+ -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -tunepod 1/1 Running 0 47s ----- - -. Log in to the pod by running the following command: -+ -[source,terminal] ----- -$ oc rsh -n sysctl-tuning-test tunepod ----- - -. Verify the values of the configured `sysctl` flag. Find the value `net.ipv6.neigh.IFNAME.base_reachable_time_ms` by running the following command:: -+ -[source,terminal] ----- -$ sysctl net.ipv6.neigh.bond0.base_reachable_time_ms ----- -+ -.Example output -[source,terminal] ----- -net.ipv6.neigh.bond0.base_reachable_time_ms = 20000 ----- \ No newline at end of file diff --git a/modules/nw-configure-sysctl-interface-sriov-network.adoc b/modules/nw-configure-sysctl-interface-sriov-network.adoc deleted file mode 100644 index 0ca2aee4cb93..000000000000 --- a/modules/nw-configure-sysctl-interface-sriov-network.adoc +++ /dev/null @@ -1,171 +0,0 @@ -// Module included in the following assemblies: -// -//networking/hardware_networks/configuring-sriov-device.adoc - -:_content-type: PROCEDURE -[id="configuring-sysctl-on-sriov-network_{context}"] -= Configuring sysctl on a SR-IOV network - -You can set interface specific `sysctl` settings on virtual interfaces created by SR-IOV by adding the tuning configuration to the optional `metaPlugins` parameter of the `SriovNetwork` resource. - -The SR-IOV Network Operator manages additional network definitions. When you specify an additional SR-IOV network to create, the SR-IOV Network Operator creates the `NetworkAttachmentDefinition` custom resource (CR) automatically. - -[NOTE] -==== -Do not edit `NetworkAttachmentDefinition` custom resources that the SR-IOV Network Operator manages. Doing so might disrupt network traffic on your additional network. -==== - -To change the interface-level network `net.ipv4.conf.IFNAME.accept_redirects` `sysctl` settings, create an additional SR-IOV network with the Container Network Interface (CNI) tuning plugin. - -.Prerequisites - -* Install the {product-title} CLI (oc). -* Log in to the {product-title} cluster as a user with cluster-admin privileges. - -.Procedure - -. Create the `SriovNetwork` custom resource (CR) for the additional SR-IOV network attachment and insert the `metaPlugins` configuration, as in the following example CR. Save the YAML as the file `sriov-network-interface-sysctl.yaml`. -+ -[source,yaml] ----- -apiVersion: sriovnetwork.openshift.io/v1 -kind: SriovNetwork -metadata: - name: onevalidflag <1> - namespace: openshift-sriov-network-operator <2> -spec: - resourceName: policyoneflag <3> - networkNamespace: sysctl-tuning-test <4> - ipam: '{ "type": "static" }' <5> - capabilities: '{ "mac": true, "ips": true }' <6> - metaPlugins : | <7> - { - "type": "tuning", - "capabilities":{ - "mac":true - }, - "sysctl":{ - "net.ipv4.conf.IFNAME.accept_redirects": "1" - } - } ----- -<1> A name for the object. The SR-IOV Network Operator creates a NetworkAttachmentDefinition object with same name. -<2> The namespace where the SR-IOV Network Operator is installed. -<3> The value for the `spec.resourceName` parameter from the `SriovNetworkNodePolicy` object that defines the SR-IOV hardware for this additional network. -<4> The target namespace for the `SriovNetwork` object. Only pods in the target namespace can attach to the additional network. -<5> A configuration object for the IPAM CNI plugin as a YAML block scalar. The plugin manages IP address assignment for the attachment definition. -<6> Optional: Set capabilities for the additional network. You can specify `"{ "ips": true }"` to enable IP address support or `"{ "mac": true }"` to enable MAC address support. -<7> Optional: The metaPlugins parameter is used to add additional capabilities to the device. In this use case set the `type` field to `tuning`. Specify the interface-level network `sysctl` you want to set in the `sysctl` field. - -. Create the `SriovNetwork` resource: -+ -[source,terminal] ----- -$ oc create -f sriov-network-interface-sysctl.yaml ----- - -.Verifying that the `NetworkAttachmentDefinition` CR is successfully created - -* Confirm that the SR-IOV Network Operator created the `NetworkAttachmentDefinition` CR by running the following command: -+ -[source,terminal] ----- -$ oc get network-attachment-definitions -n <namespace> <1> ----- -<1> Replace `<namespace>` with the value for `networkNamespace` that you specified in the `SriovNetwork` object. For example, `sysctl-tuning-test`. -+ -.Example output -[source,terminal] ----- -NAME AGE -onevalidflag 14m ----- -+ -[NOTE] -==== -There might be a delay before the SR-IOV Network Operator creates the CR. -==== - -.Verifying that the additional SR-IOV network attachment is successful - -To verify that the tuning CNI is correctly configured and the additional SR-IOV network attachment is attached, do the following: - -. Create a `Pod` CR. Save the following YAML as the file `examplepod.yaml`: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: tunepod - namespace: sysctl-tuning-test - annotations: - k8s.v1.cni.cncf.io/networks: |- - [ - { - "name": "onevalidflag", <1> - "mac": "0a:56:0a:83:04:0c", <2> - "ips": ["10.100.100.200/24"] <3> - } - ] -spec: - containers: - - name: podexample - image: centos - command: ["/bin/bash", "-c", "sleep INF"] - securityContext: - runAsUser: 2000 - runAsGroup: 3000 - allowPrivilegeEscalation: false - capabilities: - drop: ["ALL"] - securityContext: - runAsNonRoot: true - seccompProfile: - type: RuntimeDefault ----- -<1> The name of the SR-IOV network attachment definition CR. -<2> Optional: The MAC address for the SR-IOV device that is allocated from the resource type defined in the SR-IOV network attachment definition CR. To use this feature, you also must specify `{ "mac": true }` in the SriovNetwork object. -<3> Optional: IP addresses for the SR-IOV device that are allocated from the resource type defined in the SR-IOV network attachment definition CR. Both IPv4 and IPv6 addresses are supported. To use this feature, you also must specify `{ "ips": true }` in the `SriovNetwork` object. - -. Create the `Pod` CR: -+ -[source,terminal] ----- -$ oc apply -f examplepod.yaml ----- - -. Verify that the pod is created by running the following command: -+ -[source,terminal] ----- -$ oc get pod -n sysctl-tuning-test ----- -+ -.Example output -+ -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -tunepod 1/1 Running 0 47s ----- - -. Log in to the pod by running the following command: -+ -[source,terminal] ----- -$ oc rsh -n sysctl-tuning-test tunepod ----- - -. Verify the values of the configured sysctl flag. Find the value `net.ipv4.conf.IFNAME.accept_redirects` by running the following command:: -+ -[source,terminal] ----- -$ sysctl net.ipv4.conf.net1.accept_redirects ----- -+ -.Example output -[source,terminal] ----- -net.ipv4.conf.net1.accept_redirects = 1 ----- diff --git a/modules/nw-configuring-clb-timeouts.adoc b/modules/nw-configuring-clb-timeouts.adoc deleted file mode 100644 index 0737e2c9d6e3..000000000000 --- a/modules/nw-configuring-clb-timeouts.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Modules included in the following assemblies: -// -// * networking/configuring_ingress_cluster_traffic/configuring-ingress-cluster-traffic-aws.adoc - -:_content-type: PROCEDURE -[id="nw-configuring-clb-timeouts_{context}"] -= Configuring Classic Load Balancer timeouts - -You can configure the default timeouts for a Classic Load Balancer (CLB) to extend idle connections. - -.Prerequisites - -* You must have a deployed Ingress Controller on a running cluster. - -.Procedure - -. Set an AWS connection idle timeout of five minutes for the default `ingresscontroller` by running the following command: -+ -[source,terminal] ----- -$ oc -n openshift-ingress-operator patch ingresscontroller/default \ - --type=merge --patch='{"spec":{"endpointPublishingStrategy": \ - {"type":"LoadBalancerService", "loadBalancer": \ - {"scope":"External", "providerParameters":{"type":"AWS", "aws": \ - {"type":"Classic", "classicLoadBalancer": \ - {"connectionIdleTimeout":"5m"}}}}}}}' ----- - -. Optional: Restore the default value of the timeout by running the following command: -+ -[source,terminal] ----- -$ oc -n openshift-ingress-operator patch ingresscontroller/default \ - --type=merge --patch='{"spec":{"endpointPublishingStrategy": \ - {"loadBalancer":{"providerParameters":{"aws":{"classicLoadBalancer": \ - {"connectionIdleTimeout":null}}}}}}}' ----- - -[NOTE] -==== -You must specify the `scope` field when you change the connection timeout value unless the current scope is already set. When you set the `scope` field, you do not need to do so again if you restore the default timeout value. -==== diff --git a/modules/nw-configuring-elb-timeouts-aws-classic.adoc b/modules/nw-configuring-elb-timeouts-aws-classic.adoc deleted file mode 100644 index 33f71564a00e..000000000000 --- a/modules/nw-configuring-elb-timeouts-aws-classic.adoc +++ /dev/null @@ -1,11 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/configuring_ingress_cluster_traffic/configuring-ingress-cluster-traffic-aws.adoc - -:_content-type: CONCEPT -[id="nw-configuring-elb-timeouts-aws-classic_{context}"] -= Configuring Classic Load Balancer timeouts on AWS - -{product-title} provides a method for setting a custom timeout period for a specific route or Ingress Controller. Additionally, an AWS Classic Load Balancer (CLB) has its own timeout period with a default time of 60 seconds. - -If the timeout period of the CLB is shorter than the route timeout or Ingress Controller timeout, the load balancer can prematurely terminate the connection. You can prevent this problem by increasing both the timeout period of the route and CLB. diff --git a/modules/nw-configuring-high-performance-multicast-with-sriov.adoc b/modules/nw-configuring-high-performance-multicast-with-sriov.adoc deleted file mode 100644 index 9db8457b3a3d..000000000000 --- a/modules/nw-configuring-high-performance-multicast-with-sriov.adoc +++ /dev/null @@ -1,87 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/hardware_networks/using-sriov-multicast.adoc - -:_content-type: PROCEDURE -[id="nw-using-an-sriov-interface-for-multicast_{context}"] -= Configuring an SR-IOV interface for multicast - -The follow procedure creates an example SR-IOV interface for multicast. - -.Prerequisites - -* Install the OpenShift CLI (`oc`). -* You must log in to the cluster with a user that has the `cluster-admin` role. - -.Procedure - -. Create a `SriovNetworkNodePolicy` object: -+ -[source,yaml] ----- -apiVersion: sriovnetwork.openshift.io/v1 -kind: SriovNetworkNodePolicy -metadata: - name: policy-example - namespace: openshift-sriov-network-operator -spec: - resourceName: example - nodeSelector: - feature.node.kubernetes.io/network-sriov.capable: "true" - numVfs: 4 - nicSelector: - vendor: "8086" - pfNames: ['ens803f0'] - rootDevices: ['0000:86:00.0'] ----- - -. Create a `SriovNetwork` object: -+ -[source,yaml] ----- -apiVersion: sriovnetwork.openshift.io/v1 -kind: SriovNetwork -metadata: - name: net-example - namespace: openshift-sriov-network-operator -spec: - networkNamespace: default - ipam: | <1> - { - "type": "host-local", <1> - "subnet": "10.56.217.0/24", - "rangeStart": "10.56.217.171", - "rangeEnd": "10.56.217.181", - "routes": [ - {"dst": "224.0.0.0/5"}, - {"dst": "232.0.0.0/5"} - ], - "gateway": "10.56.217.1" - } - resourceName: example ----- -<1> If you choose to configure DHCP as IPAM, ensure that you provision the following default routes through your DHCP server: `224.0.0.0/5` and `232.0.0.0/5`. This is to override the static multicast route set by the default network provider. - -. Create a pod with multicast application: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: testpmd - namespace: default - annotations: - k8s.v1.cni.cncf.io/networks: nic1 -spec: - containers: - - name: example - image: rhel7:latest - securityContext: - capabilities: - add: ["NET_ADMIN"] <1> - command: [ "sleep", "infinity"] ----- -<1> The `NET_ADMIN` capability is required only if your application needs to -assign the multicast IP address to the SR-IOV interface. Otherwise, it can be -omitted. diff --git a/modules/nw-configuring-ingress-cluster-traffic-aws-networking-load-balancer.adoc b/modules/nw-configuring-ingress-cluster-traffic-aws-networking-load-balancer.adoc deleted file mode 100644 index fd38c9e86b8f..000000000000 --- a/modules/nw-configuring-ingress-cluster-traffic-aws-networking-load-balancer.adoc +++ /dev/null @@ -1,9 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/configuring_ingress_cluster_traffic/configuring-ingress-cluster-traffic-aws.adoc - -:_content-type: CONCEPT -[id="nw-configuring-ingress-cluster-traffic-aws-network-load-balancer_{context}"] -= Configuring ingress cluster traffic on AWS using a Network Load Balancer - -{product-title} provides methods for communicating from outside the cluster with services that run in the cluster. One such method uses a Network Load Balancer (NLB). You can configure an NLB on a new or existing AWS cluster. diff --git a/modules/nw-configuring-lb-allowed-source-ranges-migration.adoc b/modules/nw-configuring-lb-allowed-source-ranges-migration.adoc deleted file mode 100644 index e21d8121bcb2..000000000000 --- a/modules/nw-configuring-lb-allowed-source-ranges-migration.adoc +++ /dev/null @@ -1,66 +0,0 @@ -// Modules included in the following assemblies: -// -// * networking/configuring_ingress_cluster_traffic/configuring-ingress-cluster-traffic-load-balancer-allowed-source-ranges.adoc - -:_content-type: PROCEDURE -[id="nw-configuring-lb-allowed-source-ranges-migration_{context}"] -= Migrating to load balancer allowed source ranges - -If you have already set the annotation `service.beta.kubernetes.io/load-balancer-source-ranges`, you can migrate to load balancer allowed source ranges. When you set the `AllowedSourceRanges`, the Ingress Controller sets the `spec.loadBalancerSourceRanges` field based on the `AllowedSourceRanges` value and unsets the `service.beta.kubernetes.io/load-balancer-source-ranges` annotation. - -[NOTE] -==== -If you have already set the `spec.loadBalancerSourceRanges` field or the load balancer service anotation `service.beta.kubernetes.io/load-balancer-source-ranges` in a previous version of {product-title}, the Ingress Controller starts reporting `Progressing=True` after an upgrade. To fix this, set `AllowedSourceRanges` that overwrites the `spec.loadBalancerSourceRanges` field and clears the `service.beta.kubernetes.io/load-balancer-source-ranges` annotation. The Ingress Controller starts reporting `Progressing=False` again. -==== - -.Prerequisites - -* You have set the `service.beta.kubernetes.io/load-balancer-source-ranges` annotation. - -.Procedure - -. Ensure that the `service.beta.kubernetes.io/load-balancer-source-ranges` is set: -+ -[source,terminal] ----- -$ oc get svc router-default -n openshift-ingress -o yaml ----- -+ -.Example output -[source,yaml] ----- -apiVersion: v1 -kind: Service -metadata: - annotations: - service.beta.kubernetes.io/load-balancer-source-ranges: 192.168.0.1/32 ----- - -. Ensure that the `spec.loadBalancerSourceRanges` field is unset: -+ -[source,terminal] ----- -$ oc get svc router-default -n openshift-ingress -o yaml ----- -+ -.Example output -[source,yaml] ----- -... -spec: - loadBalancerSourceRanges: - - 0.0.0.0/0 -... ----- - -. Update your cluster to {product-title} 4.13. - -. Set the allowed source ranges API for the `ingresscontroller` by running the following command: -+ -[source,terminal] ----- -$ oc -n openshift-ingress-operator patch ingresscontroller/default \ - --type=merge --patch='{"spec":{"endpointPublishingStrategy": \ - {"loadBalancer":{"allowedSourceRanges":["0.0.0.0/0"]}}}}' <1> ----- -<1> The example value `0.0.0.0/0` specifies the allowed source range. diff --git a/modules/nw-configuring-lb-allowed-source-ranges.adoc b/modules/nw-configuring-lb-allowed-source-ranges.adoc deleted file mode 100644 index c23cb56376ad..000000000000 --- a/modules/nw-configuring-lb-allowed-source-ranges.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Modules included in the following assemblies: -// -// * networking/configuring_ingress_cluster_traffic/configuring-ingress-cluster-traffic-load-balancer-allowed-source-ranges.adoc - -:_content-type: PROCEDURE -[id="nw-configuring-lb-allowed-source-ranges_{context}"] -= Configuring load balancer allowed source ranges - -You can enable and configure the `spec.endpointPublishingStrategy.loadBalancer.allowedSourceRanges` field. By configuring load balancer allowed source ranges, you can limit the access to the load balancer for the Ingress Controller to a specified list of IP address ranges. The Ingress Operator reconciles the load balancer Service and sets the `spec.loadBalancerSourceRanges` field based on `AllowedSourceRanges`. - -[NOTE] -==== -If you have already set the `spec.loadBalancerSourceRanges` field or the load balancer service anotation `service.beta.kubernetes.io/load-balancer-source-ranges` in a previous version of {product-title}, Ingress Controller starts reporting `Progressing=True` after an upgrade. To fix this, set `AllowedSourceRanges` that overwrites the `spec.loadBalancerSourceRanges` field and clears the `service.beta.kubernetes.io/load-balancer-source-ranges` annotation. Ingress Controller starts reporting `Progressing=False` again. -==== - -.Prerequisites - -* You have a deployed Ingress Controller on a running cluster. - -.Procedure - -* Set the allowed source ranges API for the Ingress Controller by running the following command: -+ -[source,terminal] ----- -$ oc -n openshift-ingress-operator patch ingresscontroller/default \ - --type=merge --patch='{"spec":{"endpointPublishingStrategy": \ - {"loadBalancer":{"allowedSourceRanges":["0.0.0.0/0"]}}}}' <1> ----- -<1> The example value `0.0.0.0/0` specifies the allowed source range. diff --git a/modules/nw-configuring-route-timeouts.adoc b/modules/nw-configuring-route-timeouts.adoc deleted file mode 100644 index 2a99c4cb1c69..000000000000 --- a/modules/nw-configuring-route-timeouts.adoc +++ /dev/null @@ -1,34 +0,0 @@ -// Module filename: nw-configuring-route-timeouts.adoc -// Module included in the following assemblies: -// * networking/configuring-routing.adoc -// * networking/configuring_ingress_cluster_traffic/configuring-ingress-cluster-traffic-aws.adoc - -:_content-type: PROCEDURE -[id="nw-configuring-route-timeouts_{context}"] -= Configuring route timeouts - -You can configure the default timeouts for an existing route when you -have services in need of a low timeout, which is required for Service Level -Availability (SLA) purposes, or a high timeout, for cases with a slow -back end. - -.Prerequisites -* You need a deployed Ingress Controller on a running cluster. - -.Procedure -. Using the `oc annotate` command, add the timeout to the route: -+ -[source,terminal] ----- -$ oc annotate route <route_name> \ - --overwrite haproxy.router.openshift.io/timeout=<timeout><time_unit> <1> ----- -<1> Supported time units are microseconds (us), milliseconds (ms), seconds (s), -minutes (m), hours (h), or days (d). -+ -The following example sets a timeout of two seconds on a route named `myroute`: -+ -[source,terminal] ----- -$ oc annotate route myroute --overwrite haproxy.router.openshift.io/timeout=2s ----- diff --git a/modules/nw-configuring-router-compression.adoc b/modules/nw-configuring-router-compression.adoc deleted file mode 100644 index 7d5c4964cd6f..000000000000 --- a/modules/nw-configuring-router-compression.adoc +++ /dev/null @@ -1,44 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ingress_operator.adoc - -:_content-type: PROCEDURE -[id="nw-configuring-router-compression_{context}"] -= Using router compression - -You configure the HAProxy Ingress Controller to specify router compression globally for specific MIME types. You can use the `mimeTypes` variable to define the formats of MIME types to which compression is applied. The types are: application, image, message, multipart, text, video, or a custom type prefaced by "X-". To see the full notation for MIME types and subtypes, see link:https://datatracker.ietf.org/doc/html/rfc1341#page-7[RFC1341]. - -[NOTE] -==== -Memory allocated for compression can affect the max connections. Additionally, compression of large buffers can cause latency, like heavy regex or long lists of regex. - -Not all MIME types benefit from compression, but HAProxy still uses resources to try to compress if instructed to. Generally, text formats, such as html, css, and js, formats benefit from compression, but formats that are already compressed, such as image, audio, and video, benefit little in exchange for the time and resources spent on compression. -==== - -.Procedure - -. Configure the `httpCompression` field for the Ingress Controller. -.. Use the following command to edit the `IngressController` resource: -+ -[source,terminal] ----- -$ oc edit -n openshift-ingress-operator ingresscontrollers/default ----- -+ -.. Under `spec`, set the `httpCompression` policy field to `mimeTypes` and specify a list of MIME types that should have compression applied: -+ -[source,yaml] ----- -apiVersion: operator.openshift.io/v1 -kind: IngressController -metadata: - name: default - namespace: openshift-ingress-operator -spec: - httpCompression: - mimeTypes: - - "text/html" - - "text/css; charset=utf-8" - - "application/json" - ... ----- diff --git a/modules/nw-control-dns-records-public-hosted-zone-aws.adoc b/modules/nw-control-dns-records-public-hosted-zone-aws.adoc deleted file mode 100644 index f38880090381..000000000000 --- a/modules/nw-control-dns-records-public-hosted-zone-aws.adoc +++ /dev/null @@ -1,97 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/external_dns_operator/nw-creating-dns-records-on-aws.adoc - -:_content-type: PROCEDURE -[id="nw-control-dns-records-public-hosted-zone-aws_{context}"] -= Creating DNS records on an public hosted zone for AWS by using Red Hat External DNS Operator - -You can create DNS records on a public hosted zone for AWS by using the Red Hat External DNS Operator. You can use the same instructions to create DNS records on a hosted zone for AWS GovCloud. - -.Procedure - -. Check the user. The user must have access to the `kube-system` namespace. If you don’t have the credentials, as you can fetch the credentials from the `kube-system` namespace to use the cloud provider client: -+ -[source,terminal] ----- -$ oc whoami ----- -+ -.Example output -[source,terminal] ----- -system:admin ----- - -. Fetch the values from aws-creds secret present in `kube-system` namespace. -+ -[source,terminal] ----- -$ export AWS_ACCESS_KEY_ID=$(oc get secrets aws-creds -n kube-system --template={{.data.aws_access_key_id}} | base64 -d) -$ export AWS_SECRET_ACCESS_KEY=$(oc get secrets aws-creds -n kube-system --template={{.data.aws_secret_access_key}} | base64 -d) ----- - -. Get the routes to check the domain: -+ -[source,terminal] ----- -$ oc get routes --all-namespaces | grep console ----- -+ -.Example output -[source,terminal] ----- -openshift-console console console-openshift-console.apps.testextdnsoperator.apacshift.support console https reencrypt/Redirect None -openshift-console downloads downloads-openshift-console.apps.testextdnsoperator.apacshift.support downloads http edge/Redirect None ----- - -. Get the list of dns zones to find the one which corresponds to the previously found route's domain: -+ -[source,terminal] ----- -$ aws route53 list-hosted-zones | grep testextdnsoperator.apacshift.support ----- -+ -.Example output -[source,terminal] ----- -HOSTEDZONES terraform /hostedzone/Z02355203TNN1XXXX1J6O testextdnsoperator.apacshift.support. 5 ----- - -. Create `ExternalDNS` resource for `route` source: -+ -[source,yaml] ----- -$ cat <<EOF | oc create -f - -apiVersion: externaldns.olm.openshift.io/v1beta1 -kind: ExternalDNS -metadata: - name: sample-aws <1> -spec: - domains: - - filterType: Include <2> - matchType: Exact <3> - name: testextdnsoperator.apacshift.support <4> - provider: - type: AWS <5> - source: <6> - type: OpenShiftRoute <7> - openshiftRouteOptions: - routerName: default <8> -EOF ----- -<1> Defines the name of external DNS resource. -<2> By default all hosted zones are selected as potential targets. You can include a hosted zone that you need. -<3> The matching of the target zone's domain has to be exact (as opposed to regular expression match). -<4> Specify the exact domain of the zone you want to update. The hostname of the routes must be subdomains of the specified domain. -<5> Defines the `AWS Route53` DNS provider. -<6> Defines options for the source of DNS records. -<7> Defines OpenShift `route` resource as the source for the DNS records which gets created in the previously specified DNS provider. -<8> If the source is `OpenShiftRoute`, then you can pass the OpenShift Ingress Controller name. External DNS Operator selects the canonical hostname of that router as the target while creating CNAME record. - -. Check the records created for OCP routes using the following command: -+ -[source,terminal] ----- -$ aws route53 list-resource-record-sets --hosted-zone-id Z02355203TNN1XXXX1J6O --query "ResourceRecordSets[?Type == 'CNAME']" | grep console ----- diff --git a/modules/nw-control-dns-records-public-hosted-zone-azure.adoc b/modules/nw-control-dns-records-public-hosted-zone-azure.adoc deleted file mode 100644 index 930c4adbb38a..000000000000 --- a/modules/nw-control-dns-records-public-hosted-zone-azure.adoc +++ /dev/null @@ -1,101 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/external_dns_operator/nw-creating-dns-records-on-azure.adoc - -:_content-type: PROCEDURE -[id="nw-control-dns-records-public-hosted-zone-azure_{context}"] -= Creating DNS records on an public DNS zone for Azure by using Red Hat External DNS Operator - -You can create DNS records on a public DNS zone for Azure by using Red Hat External DNS Operator. - -.Procedure - -. Check the user. The user must have access to the `kube-system` namespace. If you don’t have the credentials, as you can fetch the credentials from the `kube-system` namespace to use the cloud provider client: -+ -[source,terminal] ----- -$ oc whoami ----- -+ -.Example output -[source,terminal] ----- -system:admin ----- - -. Fetch the values from azure-credentials secret present in `kube-system` namespace. -+ -[source,terminal] ----- -$ CLIENT_ID=$(oc get secrets azure-credentials -n kube-system --template={{.data.azure_client_id}} | base64 -d) -$ CLIENT_SECRET=$(oc get secrets azure-credentials -n kube-system --template={{.data.azure_client_secret}} | base64 -d) -$ RESOURCE_GROUP=$(oc get secrets azure-credentials -n kube-system --template={{.data.azure_resourcegroup}} | base64 -d) -$ SUBSCRIPTION_ID=$(oc get secrets azure-credentials -n kube-system --template={{.data.azure_subscription_id}} | base64 -d) -$ TENANT_ID=$(oc get secrets azure-credentials -n kube-system --template={{.data.azure_tenant_id}} | base64 -d) ----- - -. Login to azure with base64 decoded values: -+ -[source,terminal] ----- -$ az login --service-principal -u "${CLIENT_ID}" -p "${CLIENT_SECRET}" --tenant "${TENANT_ID}" ----- - -. Get the routes to check the domain: -+ -[source,terminal] ----- -$ oc get routes --all-namespaces | grep console ----- -+ -.Example output -[source,terminal] ----- -openshift-console console console-openshift-console.apps.test.azure.example.com console https reencrypt/Redirect None -openshift-console downloads downloads-openshift-console.apps.test.azure.example.com downloads http edge/Redirect None ----- - -. Get the list of dns zones to find the one which corresponds to the previously found route's domain: -+ -[source,terminal] ----- -$ az network dns zone list --resource-group "${RESOURCE_GROUP}" ----- - -. Create `ExternalDNS` resource for `route` source: -+ -[source,yaml] ----- -apiVersion: externaldns.olm.openshift.io/v1beta1 -kind: ExternalDNS -metadata: - name: sample-azure <1> -spec: - zones: - - "/subscriptions/1234567890/resourceGroups/test-azure-xxxxx-rg/providers/Microsoft.Network/dnszones/test.azure.example.com" <2> - provider: - type: Azure <3> - source: - openshiftRouteOptions: <4> - routerName: default <5> - type: OpenShiftRoute <6> -EOF ----- -<1> Specifies the name of External DNS CR. -<2> Define the zone ID. -<3> Defines the Azure DNS provider. -<4> You can define options for the source of DNS records. -<5> If the source is `OpenShiftRoute` then you can pass the OpenShift Ingress Controller name. External DNS selects the canonical hostname of that router as the target while creating CNAME record. -<6> Defines OpenShift `route` resource as the source for the DNS records which gets created in the previously specified DNS provider. - -. Check the records created for OCP routes using the following command: -+ -[source,terminal] ----- -$ az network dns record-set list -g "${RESOURCE_GROUP}" -z test.azure.example.com | grep console ----- -+ -[NOTE] -==== -To create records on private hosted zones on private Azure dns, you need to specify the private zone under `zones` which populates the provider type to `azure-private-dns` in the `ExternalDNS` container args. -==== diff --git a/modules/nw-control-dns-records-public-hosted-zone-infoblox.adoc b/modules/nw-control-dns-records-public-hosted-zone-infoblox.adoc deleted file mode 100644 index 5032b48c78bf..000000000000 --- a/modules/nw-control-dns-records-public-hosted-zone-infoblox.adoc +++ /dev/null @@ -1,76 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/external_dns_operator/nw-creating-dns-records-on-infoblox.adoc - -:_content-type: PROCEDURE -[id="nw-control-dns-records-public-dns-zone-infoblox_{context}"] -= Creating DNS records on a public DNS zone on Infoblox - -You can create DNS records on a public DNS zone on Infoblox by using the Red Hat External DNS Operator. - -.Prerequisites - -* You have access to the OpenShift CLI (`oc`). -* You have access to the Infoblox UI. - -.Procedure - -. Create a `secret` object with Infoblox credentials by running the following command: -+ -[source,terminal] ----- -$ oc -n external-dns-operator create secret generic infoblox-credentials --from-literal=EXTERNAL_DNS_INFOBLOX_WAPI_USERNAME=<infoblox_username> --from-literal=EXTERNAL_DNS_INFOBLOX_WAPI_PASSWORD=<infoblox_password> ----- - -. Get the routes objects to check your cluster domain by running the following command: -+ -[source,terminal] ----- -$ oc get routes --all-namespaces | grep console ----- -+ -.Example Output -[source,terminal] ----- -openshift-console console console-openshift-console.apps.test.example.com console https reencrypt/Redirect None -openshift-console downloads downloads-openshift-console.apps.test.example.com downloads http edge/Redirect None ----- - -. Create an `ExternalDNS` resource YAML file, for example, sample-infoblox.yaml, as follows: -+ -[source,yaml] ----- -apiVersion: externaldns.olm.openshift.io/v1beta1 -kind: ExternalDNS -metadata: - name: sample-infoblox -spec: - provider: - type: Infoblox - infoblox: - credentials: - name: infoblox-credentials - gridHost: ${INFOBLOX_GRID_PUBLIC_IP} - wapiPort: 443 - wapiVersion: "2.3.1" - domains: - - filterType: Include - matchType: Exact - name: test.example.com - source: - type: OpenShiftRoute - openshiftRouteOptions: - routerName: default ----- - -. Create an `ExternalDNS` resource on Infoblox by running the following command: -+ -[source,terminal] ----- -$ oc create -f sample-infoblox.yaml ----- - -. From the Infoblox UI, check the DNS records created for `console` routes: - -.. Click *Data Management* -> *DNS* -> *Zones*. -.. Select the zone name. diff --git a/modules/nw-control-dns-records-public-managed-zone-gcp.adoc b/modules/nw-control-dns-records-public-managed-zone-gcp.adoc deleted file mode 100644 index 6dafc2d07bc1..000000000000 --- a/modules/nw-control-dns-records-public-managed-zone-gcp.adoc +++ /dev/null @@ -1,112 +0,0 @@ - -// Module included in the following assemblies: -// -// * networking/external_dns_operator/nw-creating-dns-records-on-gcp.adoc - -:_content-type: PROCEDURE -[id="nw-control-dns-records-public-managed-zone-gcp_{context}"] -= Creating DNS records on an public managed zone for GCP by using Red Hat External DNS Operator - -You can create DNS records on a public managed zone for GCP by using Red Hat External DNS Operator. - -.Procedure - -. Check the user. The user must have access to the `kube-system` namespace. If you don’t have the credentials, as you can fetch the credentials from the `kube-system` namespace to use the cloud provider client: -+ -[source,terminal] ----- -$ oc whoami ----- -+ -.Example output -[source,terminal] ----- -system:admin ----- - -. Copy the value of service_account.json in gcp-credentials secret in a file encoded-gcloud.json by running the following command: -+ -[source,terminal] ----- -$ oc get secret gcp-credentials -n kube-system --template='{{$v := index .data "service_account.json"}}{{$v}}' | base64 -d - > decoded-gcloud.json ----- - -. Export Google credentials: -+ -[source,terminal] ----- -$ export GOOGLE_CREDENTIALS=decoded-gcloud.json ----- - -. Activate your account by using the following command: -+ -[source,terminal] ----- -$ gcloud auth activate-service-account <client_email as per decoded-gcloud.json> --key-file=decoded-gcloud.json ----- - -. Set your project: -+ -[source,terminal] ----- -$ gcloud config set project <project_id as per decoded-gcloud.json> ----- - -. Get the routes to check the domain: -+ -[source,terminal] ----- -$ oc get routes --all-namespaces | grep console ----- -+ -.Example output -[source,terminal] ----- -openshift-console console console-openshift-console.apps.test.gcp.example.com console https reencrypt/Redirect None -openshift-console downloads downloads-openshift-console.apps.test.gcp.example.com downloads http edge/Redirect None ----- - -. Get the list of managed zones to find the zone which corresponds to the previously found route’s domain: -+ -[source,terminal] ----- -$ gcloud dns managed-zones list | grep test.gcp.example.com -qe-cvs4g-private-zone test.gcp.example.com ----- - -. Create `ExternalDNS` resource for `route` source: -+ -[source,yaml] ----- -apiVersion: externaldns.olm.openshift.io/v1beta1 -kind: ExternalDNS -metadata: - name: sample-gcp <1> -spec: - domains: - - filterType: Include <2> - matchType: Exact <3> - name: test.gcp.example.com <4> - provider: - type: GCP <5> - source: - openshiftRouteOptions: <6> - routerName: default <7> - type: OpenShiftRoute <8> -EOF ----- -<1> Specifies the name of External DNS CR. -<2> By default all hosted zones are selected as potential targets. You can include a hosted zone that you need. -<3> The matching of the target zone's domain has to be exact (as opposed to regular expression match). -<4> Specify the exact domain of the zone you want to update. The hostname of the routes must be subdomains of the specified domain. -<5> Defines Google Cloud DNS provider. -<6> You can define options for the source of DNS records. -<7> If the source is `OpenShiftRoute` then you can pass the OpenShift Ingress Controller name. External DNS selects the canonical hostname of that router as the target while creating CNAME record. -<8> Defines OpenShift `route` resource as the source for the DNS records which gets created in the previously specified DNS provider. - -. Check the records created for OCP routes using the following command: -+ -[source,terminal] ----- -$ gcloud dns record-sets list --zone=qe-cvs4g-private-zone | grep console ----- diff --git a/modules/nw-controlling-dns-pod-placement.adoc b/modules/nw-controlling-dns-pod-placement.adoc deleted file mode 100644 index 1f5545827549..000000000000 --- a/modules/nw-controlling-dns-pod-placement.adoc +++ /dev/null @@ -1,62 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/dns-operator.adoc - -:_content-type: PROCEDURE -[id="nw-controlling-dns-pod-placement_{context}"] -= Controlling DNS pod placement - -The DNS Operator has two daemon sets: one for CoreDNS and one for managing the `/etc/hosts` file. The daemon set for `/etc/hosts` must run on every node host to add an entry for the cluster image registry to support pulling images. Security policies can prohibit communication between pairs of nodes, which prevents the daemon set for CoreDNS from running on every node. - -As a cluster administrator, you can use a custom node selector to configure the daemon set for CoreDNS to run or not run on certain nodes. - - -.Prerequisites - -* You installed the `oc` CLI. -* You are logged in to the cluster with a user with `cluster-admin` privileges. - -.Procedure - -* To prevent communication between certain nodes, configure the `spec.nodePlacement.nodeSelector` API field: - -. Modify the DNS Operator object named `default`: -+ -[source, terminal] ----- -$ oc edit dns.operator/default ----- -+ -. Specify a node selector that includes only control plane nodes in the `spec.nodePlacement.nodeSelector` API field: -+ -[source,yaml] ----- - spec: - nodePlacement: - nodeSelector: - node-role.kubernetes.io/worker: "" ----- - -* To allow the daemon set for CoreDNS to run on nodes, configure a taint and toleration: -+ -. Modify the DNS Operator object named `default`: -+ -[source,terminal] ----- -$ oc edit dns.operator/default ----- -+ -. Specify a taint key and a toleration for the taint: -+ -[source,yaml] ----- - spec: - nodePlacement: - tolerations: - - effect: NoExecute - key: "dns-only" - operators: Equal - value: abc - tolerationSeconds: 3600 <1> ----- -<1> If the taint is `dns-only`, it can be tolerated indefinitely. You can omit `tolerationSeconds`. diff --git a/modules/nw-create-load-balancer-service.adoc b/modules/nw-create-load-balancer-service.adoc deleted file mode 100644 index f25eb133e4a5..000000000000 --- a/modules/nw-create-load-balancer-service.adoc +++ /dev/null @@ -1,125 +0,0 @@ -// Module included in the following assemblies: -// -// * ingress/getting-traffic-cluster.adoc - -:_content-type: PROCEDURE -[id="nw-create-load-balancer-service_{context}"] -= Creating a load balancer service - -Use the following procedure to create a load balancer service. - -.Prerequisites - -* Make sure that the project and service you want to expose exist. - -.Procedure - -To create a load balancer service: - -. Log in to {product-title}. - -. Load the project where the service you want to expose is located. -+ -[source,terminal] ----- -$ oc project project1 ----- - -. Open a text file on the control plane node and paste the following text, editing the -file as needed: -+ -.Sample load balancer configuration file ----- -apiVersion: v1 -kind: Service -metadata: - name: egress-2 <1> -spec: - ports: - - name: db - port: 3306 <2> - loadBalancerIP: - loadBalancerSourceRanges: <3> - - 10.0.0.0/8 - - 192.168.0.0/16 - type: LoadBalancer <4> - selector: - name: mysql <5> ----- -<1> Enter a descriptive name for the load balancer service. -<2> Enter the same port that the service you want to expose is listening on. -<3> Enter a list of specific IP addresses to restrict traffic through the load balancer. This field is ignored if the cloud-provider does not support the feature. -<4> Enter `Loadbalancer` as the type. -<5> Enter the name of the service. -+ -[NOTE] -==== -To restrict the traffic through the load balancer to specific IP addresses, it is recommended to use the Ingress Controller field `spec.endpointPublishingStrategy.loadBalancer.allowedSourceRanges`. Do not set the `loadBalancerSourceRanges` field. -==== -. Save and exit the file. - -. Run the following command to create the service: -+ -[source,terminal] ----- -$ oc create -f <file-name> ----- -+ -For example: -+ -[source,terminal] ----- -$ oc create -f mysql-lb.yaml ----- - -. Execute the following command to view the new service: -+ -[source,terminal] ----- -$ oc get svc ----- -+ -.Example output -[source,terminal] ----- -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -egress-2 LoadBalancer 172.30.22.226 ad42f5d8b303045-487804948.example.com 3306:30357/TCP 15m ----- -+ -The service has an external IP address automatically assigned if there is a cloud -provider enabled. - -. On the master, use a tool, such as cURL, to make sure you can reach the service -using the public IP address: -+ -[source,terminal] ----- -$ curl <public-ip>:<port> ----- -+ -For example: -+ -[source,terminal] ----- -$ curl 172.29.121.74:3306 ----- -+ -The examples in this section use a MySQL service, which requires a client application. -If you get a string of characters with the `Got packets out of order` message, -you are connecting with the service: -+ -If you have a MySQL client, log in with the standard CLI command: -+ -[source,terminal] ----- -$ mysql -h 172.30.131.89 -u admin -p ----- -+ -.Example output -[source,terminal] ----- -Enter password: -Welcome to the MariaDB monitor. Commands end with ; or \g. - -MySQL [(none)]> ----- diff --git a/modules/nw-creating-a-route.adoc b/modules/nw-creating-a-route.adoc deleted file mode 100644 index 5e0258482de3..000000000000 --- a/modules/nw-creating-a-route.adoc +++ /dev/null @@ -1,87 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/routes/route-configuration.adoc - -:_content-type: PROCEDURE -[id="nw-creating-a-route_{context}"] -= Creating an HTTP-based route - -A route allows you to host your application at a public URL. It can either be secure or unsecured, depending on the network security configuration of your application. An HTTP-based route is an unsecured route that uses the basic HTTP routing protocol and exposes a service on an unsecured application port. - -The following procedure describes how to create a simple HTTP-based route to a web application, using the `hello-openshift` application as an example. -//link:https://github.com/openshift/origin/tree/master/examples/hello-openshift[hello-openshift] - -.Prerequisites - - -* You installed the OpenShift CLI (`oc`). -* You are logged in as an administrator. -* You have a web application that exposes a port and a TCP endpoint listening for traffic on the port. - -.Procedure - -. Create a project called `hello-openshift` by running the following command: -+ -[source,terminal] ----- -$ oc new-project hello-openshift ----- - -. Create a pod in the project by running the following command: -+ -[source,terminal] ----- -$ oc create -f https://raw.githubusercontent.com/openshift/origin/master/examples/hello-openshift/hello-pod.json ----- - -. Create a service called `hello-openshift` by running the following command: -+ -[source,terminal] ----- -$ oc expose pod/hello-openshift ----- - -. Create an unsecured route to the `hello-openshift` application by running the following command: -+ -[source,terminal] ----- -$ oc expose svc hello-openshift ----- - -.Verification - -* To verify that the `route` resource that you created, run the following command: -+ -[source,terminal] ----- -$ oc get routes -o yaml <name of resource> <1> ----- -<1> In this example, the route is named `hello-openshift`. - -.Sample YAML definition of the created unsecured route: -[source,yaml] ----- -apiVersion: route.openshift.io/v1 -kind: Route -metadata: - name: hello-openshift -spec: - host: hello-openshift-hello-openshift.<Ingress_Domain> <1> - port: - targetPort: 8080 <2> - to: - kind: Service - name: hello-openshift ----- -<1> `<Ingress_Domain>` is the default ingress domain name. The `ingresses.config/cluster` object is created during the installation and cannot be changed. If you want to specify a different domain, you can specify an alternative cluster domain using the `appsDomain` option. -<2> `targetPort` is the target port on pods that is selected by the service that this route points to. - -+ -[NOTE] -==== -To display your default ingress domain, run the following command: -[source,terminal] ----- -$ oc get ingresses.config/cluster -o jsonpath={.spec.domain} ----- -==== diff --git a/modules/nw-creating-project-and-service.adoc b/modules/nw-creating-project-and-service.adoc deleted file mode 100644 index f4ee611e2dac..000000000000 --- a/modules/nw-creating-project-and-service.adoc +++ /dev/null @@ -1,51 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/configuring_ingress_cluster_traffic/configuring-ingress-cluster-traffic-nodeport.adoc - -:_content-type: PROCEDURE -[id="nw-creating-project-and-service_{context}"] -= Creating a project and service - -If the project and service that you want to expose do not exist, first create -the project, then the service. - -If the project and service already exist, skip to the procedure on exposing the -service to create a route. - -.Prerequisites - -* Install the `oc` CLI and log in as a cluster administrator. - -.Procedure - -. Create a new project for your service by running the `oc new-project` command: -+ -[source,terminal] ----- -$ oc new-project myproject ----- - -. Use the `oc new-app` command to create your service: -+ -[source,terminal] ----- -$ oc new-app nodejs:12~https://github.com/sclorg/nodejs-ex.git ----- - -. To verify that the service was created, run the following command: -+ -[source,terminal] ----- -$ oc get svc -n myproject ----- -+ -.Example output -[source,terminal] ----- -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -nodejs-ex ClusterIP 172.30.197.157 <none> 8080/TCP 70s ----- -+ -By default, the new service does not have an external IP address. - - diff --git a/modules/nw-customize-ingress-error-pages.adoc b/modules/nw-customize-ingress-error-pages.adoc deleted file mode 100644 index 77f67256250e..000000000000 --- a/modules/nw-customize-ingress-error-pages.adoc +++ /dev/null @@ -1,112 +0,0 @@ -// Module filename: nw-customize-ingress-error-pages.adoc -// Module included in the following assemblies: -// * networking/ingress-controller-configuration.adoc - -:_content-type: PROCEDURE -[id="nw-customize-ingress-error-pages_{context}"] -= Customizing HAProxy error code response pages - -As a cluster administrator, you can specify a custom error code response page for either 503, 404, or both error pages. The HAProxy router serves a 503 error page when the application pod is not running or a 404 error page when the requested URL does not exist. For example, if you customize the 503 error code response page, then the page is served when the application pod is not running, and the default 404 error code HTTP response page is served by the HAProxy router for an incorrect route or a non-existing route. - -Custom error code response pages are specified in a config map then patched to the Ingress Controller. The config map keys have two available file names as follows: -`error-page-503.http` and `error-page-404.http`. - -Custom HTTP error code response pages must follow the link:https://www.haproxy.com/documentation/hapee/latest/configuration/config-sections/http-errors/[HAProxy HTTP error page configuration guidelines]. Here is an example of the default {product-title} HAProxy router link:https://raw.githubusercontent.com/openshift/router/master/images/router/haproxy/conf/error-page-503.http[http 503 error code response page]. You can use the default content as a template for creating your own custom page. - -By default, the HAProxy router serves only a 503 error page when the application is not running or when the route is incorrect or non-existent. This default behavior is the same as the behavior on {product-title} 4.8 and earlier. If a config map for the customization of an HTTP error code response is not provided, and you are using a custom HTTP error code response page, the router serves a default 404 or 503 error code response page. - -[NOTE] -==== -If you use the {product-title} default 503 error code page as a template for your customizations, the headers in the file require an editor that can use CRLF line endings. -==== - -.Procedure - -. Create a config map named `my-custom-error-code-pages` in the `openshift-config` namespace: -+ -[source,terminal] ----- -$ oc -n openshift-config create configmap my-custom-error-code-pages \ ---from-file=error-page-503.http \ ---from-file=error-page-404.http ----- - -. Patch the Ingress Controller to reference the `my-custom-error-code-pages` config map by name: -+ -[source,terminal] ----- -$ oc patch -n openshift-ingress-operator ingresscontroller/default --patch '{"spec":{"httpErrorCodePages":{"name":"my-custom-error-code-pages"}}}' --type=merge ----- -+ -The Ingress Operator copies the `my-custom-error-code-pages` config map from the `openshift-config` namespace to the `openshift-ingress` namespace. The Operator names the config map according to the pattern, `<your_ingresscontroller_name>-errorpages`, in the `openshift-ingress` namespace. - -. Display the copy: -+ -[source,terminal] ----- -$ oc get cm default-errorpages -n openshift-ingress ----- -+ -.Example output ----- -NAME DATA AGE -default-errorpages 2 25s <1> ----- -<1> The example config map name is `default-errorpages` because the `default` Ingress Controller custom resource (CR) was patched. -+ - -. Confirm that the config map containing the custom error response page mounts on the router volume where the config map key is the filename that has the custom HTTP error code response: -+ -* For 503 custom HTTP custom error code response: -+ -[source,terminal] ----- -$ oc -n openshift-ingress rsh <router_pod> cat /var/lib/haproxy/conf/error_code_pages/error-page-503.http ----- -+ -* For 404 custom HTTP custom error code response: -+ -[source,terminal] ----- -$ oc -n openshift-ingress rsh <router_pod> cat /var/lib/haproxy/conf/error_code_pages/error-page-404.http ----- - -.Verification - -Verify your custom error code HTTP response: - -. Create a test project and application: -+ -[source,terminal] ----- - $ oc new-project test-ingress ----- -+ -[source,terminal] ----- -$ oc new-app django-psql-example ----- - -. For 503 custom http error code response: -.. Stop all the pods for the application. -.. Run the following curl command or visit the route hostname in the browser: -+ -[source,terminal] ----- -$ curl -vk <route_hostname> ----- -. For 404 custom http error code response: -.. Visit a non-existent route or an incorrect route. -.. Run the following curl command or visit the route hostname in the browser: -+ -[source,terminal] ----- -$ curl -vk <route_hostname> ----- - -. Check if the `errorfile` attribute is properly in the `haproxy.config` file: -+ -[source,terminal] ----- -$ oc -n openshift-ingress rsh <router> cat /var/lib/haproxy/conf/haproxy.config | grep errorfile ----- diff --git a/modules/nw-disabling-hsts.adoc b/modules/nw-disabling-hsts.adoc deleted file mode 100644 index 4e1a175445fc..000000000000 --- a/modules/nw-disabling-hsts.adoc +++ /dev/null @@ -1,57 +0,0 @@ -// Module included in the following assemblies: -// * networking/configuring-routing.adoc - -:_content-type: PROCEDURE -[id="nw-disabling-hsts_{context}"] -= Disabling HTTP Strict Transport Security per-route - -To disable HTTP strict transport security (HSTS) per-route, you can set the `max-age` value in the route annotation to `0`. - -.Prerequisites - -* You are logged in to the cluster with a user with administrator privileges for the project. -* You installed the `oc` CLI. - -.Procedure - -* To disable HSTS, set the `max-age` value in the route annotation to `0`, by entering the following command: -+ -[source,terminal] ----- -$ oc annotate route <route_name> -n <namespace> --overwrite=true "haproxy.router.openshift.io/hsts_header"="max-age=0" ----- -+ -[TIP] -==== -You can alternatively apply the following YAML to create the config map: - -.Example of disabling HSTS per-route -[source,yaml] ----- -metadata: - annotations: - haproxy.router.openshift.io/hsts_header: max-age=0 ----- -==== - -* To disable HSTS for every route in a namespace, enter the following command: -+ -[source,terminal] ----- -$ oc annotate route --all -n <namespace> --overwrite=true "haproxy.router.openshift.io/hsts_header"="max-age=0" ----- - -.Verification - -. To query the annotation for all routes, enter the following command: -+ -[source,terminal] ----- -$ oc get route --all-namespaces -o go-template='{{range .items}}{{if .metadata.annotations}}{{$a := index .metadata.annotations "haproxy.router.openshift.io/hsts_header"}}{{$n := .metadata.name}}{{with $a}}Name: {{$n}} HSTS: {{$a}}{{"\n"}}{{else}}{{""}}{{end}}{{end}}{{end}}' ----- -+ -.Example output -[source,terminal] ----- -Name: routename HSTS: max-age=0 ----- diff --git a/modules/nw-disabling-multicast.adoc b/modules/nw-disabling-multicast.adoc deleted file mode 100644 index 4e664ed33376..000000000000 --- a/modules/nw-disabling-multicast.adoc +++ /dev/null @@ -1,62 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/openshift_sdn/disabling-multicast.adoc -// * networking/ovn_kubernetes_network_provider/disabling-multicast.adoc - -ifeval::["{context}" == "openshift-sdn-disabling-multicast"] -:namespace: netnamespace -:annotation: netnamespace.network.openshift.io/multicast-enabled- -endif::[] -ifeval::["{context}" == "ovn-kubernetes-disabling-multicast"] -:namespace: namespace -:annotation: k8s.ovn.org/multicast-enabled- -endif::[] - -:_content-type: PROCEDURE -[id="nw-disabling-multicast_{context}"] -= Disabling multicast between pods - -You can disable multicast between pods for your project. - -.Prerequisites - -* Install the OpenShift CLI (`oc`). -* You must log in to the cluster with a user that has the `cluster-admin` role. - -.Procedure - -* Disable multicast by running the following command: -+ -[source,terminal,subs="attributes+"] ----- -$ oc annotate {namespace} <namespace> \ <1> - {annotation} ----- -+ -<1> The `namespace` for the project you want to disable multicast for. -ifeval::["{context}" == "ovn-kubernetes-disabling-multicast"] -+ -[TIP] -==== -You can alternatively apply the following YAML to delete the annotation: - -[source,yaml] ----- -apiVersion: v1 -kind: Namespace -metadata: - name: <namespace> - annotations: - k8s.ovn.org/multicast-enabled: null ----- -==== -endif::[] - -ifeval::["{context}" == "openshift-sdn-disabling-multicast"] -:!annotation: -:!namespace: -endif::[] -ifeval::["{context}" == "ovn-kubernetes-disabling-multicast"] -:!annotation: -:!namespace: -endif::[] diff --git a/modules/nw-dns-cache-tuning.adoc b/modules/nw-dns-cache-tuning.adoc deleted file mode 100644 index c32236b2173b..000000000000 --- a/modules/nw-dns-cache-tuning.adoc +++ /dev/null @@ -1,40 +0,0 @@ -// Module included in the following assemblies: -// * networking/dns-operator.adoc - -:_content-type: PROCEDURE -[id="nw-dns-cache-tuning_{context}"] -= Tuning the CoreDNS cache - -You can configure the maximum duration of both successful or unsuccessful caching, also known as positive or negative caching respectively, done by CoreDNS. Tuning the duration of caching of DNS query responses can reduce the load for any upstream DNS resolvers. - -.Procedure - -. Edit the DNS Operator object named `default` by running the following command: -+ -[source,terminal] ----- -$ oc edit dns.operator.openshift.io/default ----- - -. Modify the time-to-live (TTL) caching values: -+ -.Configuring DNS caching -[source,yaml] ----- -apiVersion: operator.openshift.io/v1 -kind: DNS -metadata: - name: default -spec: - cache: - positiveTTL: 1h <1> - negativeTTL: 0.5h10m <2> ----- -+ -<1> The string value `1h` is converted to its respective number of seconds by CoreDNS. If this field is omitted, the value is assumed to be `0s` and the cluster uses the internal default value of `900s` as a fallback. -<2> The string value can be a combination of units such as `0.5h10m` and is converted to its respective number of seconds by CoreDNS. If this field is omitted, the value is assumed to be `0s` and the cluster uses the internal default value of `30s` as a fallback. -+ -[WARNING] -==== -Setting TTL fields to low values could lead to an increased load on the cluster, any upstream resolvers, or both. -==== diff --git a/modules/nw-dns-forward.adoc b/modules/nw-dns-forward.adoc deleted file mode 100644 index bd25799c0ee3..000000000000 --- a/modules/nw-dns-forward.adoc +++ /dev/null @@ -1,191 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/dns-operator.adoc - -:_content-type: PROCEDURE -[id="nw-dns-forward_{context}"] -= Using DNS forwarding - -You can use DNS forwarding to override the default forwarding configuration in the `/etc/resolv.conf` file in the following ways: - -* Specify name servers for every zone. If the forwarded zone is the Ingress domain managed by {product-title}, then the upstream name server must be authorized for the domain. -+ -ifdef::openshift-rosa,openshift-dedicated[] -[IMPORTANT] -==== -You must specify at least one zone. Otherwise, your cluster can lose functionality. -==== -endif::[] -+ -* Provide a list of upstream DNS servers. -* Change the default forwarding policy. - -[NOTE] -==== -A DNS forwarding configuration for the default domain can have both the default servers specified in the `/etc/resolv.conf` file and the upstream DNS servers. -==== - -.Procedure - -. Modify the DNS Operator object named `default`: -+ -[source,terminal] ----- -$ oc edit dns.operator/default ----- -+ -This allows the Operator to create and update the config map named `dns-default` with additional server configuration blocks based on `Server`. If none of the servers have a zone that matches the query, then name resolution falls back to the upstream DNS servers. -+ -.Configuring DNS forwarding -[source,yaml] ----- -apiVersion: operator.openshift.io/v1 -kind: DNS -metadata: - name: default -spec: - servers: - - name: example-server <1> - zones: <2> - - example.com - forwardPlugin: - policy: Random <3> - upstreams: <4> - - 1.1.1.1 - - 2.2.2.2:5353 - upstreamResolvers: <5> - policy: Random <6> - upstreams: <7> - - type: SystemResolvConf <8> - - type: Network - address: 1.2.3.4 <9> - port: 53 <10> ----- -<1> Must comply with the `rfc6335` service name syntax. -<2> Must conform to the definition of a subdomain in the `rfc1123` service name syntax. The cluster domain, `cluster.local`, is an invalid subdomain for the `zones` field. -ifdef::openshift-rosa,openshift-dedicated[] -+ -[IMPORTANT] -==== -Only forward to specific zones, such as your intranet. You must specify at least one zone. Otherwise, your cluster can lose functionality. -==== -+ -endif::[] -<3> Defines the policy to select upstream resolvers. Default value is `Random`. You can also use the values `RoundRobin`, and `Sequential`. -<4> A maximum of 15 `upstreams` is allowed per `forwardPlugin`. -<5> Optional. You can use it to override the default policy and forward DNS resolution to the specified DNS resolvers (upstream resolvers) for the default domain. If you do not provide any upstream resolvers, the DNS name queries go to the servers in `/etc/resolv.conf`. -<6> Determines the order in which upstream servers are selected for querying. You can specify one of these values: `Random`, `RoundRobin`, or `Sequential`. The default value is `Sequential`. -<7> Optional. You can use it to provide upstream resolvers. -<8> You can specify two types of `upstreams` - `SystemResolvConf` and `Network`. `SystemResolvConf` configures the upstream to use `/etc/resolv.conf` and `Network` defines a `Networkresolver`. You can specify one or both. -<9> If the specified type is `Network`, you must provide an IP address. The `address` field must be a valid IPv4 or IPv6 address. -<10> If the specified type is `Network`, you can optionally provide a port. The `port` field must have a value between `1` and `65535`. If you do not specify a port for the upstream, by default port 853 is tried. -+ -When working in a highly regulated environment, you might need the ability to secure DNS traffic when forwarding requests to upstream resolvers so that you can ensure additional DNS traffic and data privacy. Cluster administrators can configure transport layer security (TLS) for forwarded DNS queries. -+ -.Configuring DNS forwarding with TLS -[source,yaml] ----- -apiVersion: operator.openshift.io/v1 -kind: DNS -metadata: - name: default -spec: - servers: - - name: example-server <1> - zones: <2> - - example.com - forwardPlugin: - transportConfig: - transport: TLS <3> - tls: - caBundle: - name: mycacert - serverName: dnstls.example.com <4> - policy: Random <5> - upstreams: <6> - - 1.1.1.1 - - 2.2.2.2:5353 - upstreamResolvers: <7> - transportConfig: - transport: TLS - tls: - caBundle: - name: mycacert - serverName: dnstls.example.com - upstreams: - - type: Network <8> - address: 1.2.3.4 <9> - port: 53 <10> ----- -<1> Must comply with the `rfc6335` service name syntax. -<2> Must conform to the definition of a subdomain in the `rfc1123` service name syntax. The cluster domain, `cluster.local`, is an invalid subdomain for the `zones` field. The cluster domain, `cluster.local`, is an invalid `subdomain` for `zones`. -ifdef::openshift-rosa,openshift-dedicated[] -+ -[IMPORTANT] -==== -Only forward to specific zones, such as your intranet. You must specify at least one zone. Otherwise, your cluster can lose functionality. -==== -+ -endif::[] -<3> When configuring TLS for forwarded DNS queries, set the `transport` field to have the value `TLS`. -By default, CoreDNS caches forwarded connections for 10 seconds. CoreDNS will hold a TCP connection open for those 10 seconds if no request is issued. With large clusters, ensure that your DNS server is aware that it might get many new connections to hold open because you can initiate a connection per node. Set up your DNS hierarchy accordingly to avoid performance issues. -<4> When configuring TLS for forwarded DNS queries, this is a mandatory server name used as part of the server name indication (SNI) to validate the upstream TLS server certificate. -<5> Defines the policy to select upstream resolvers. Default value is `Random`. You can also use the values `RoundRobin`, and `Sequential`. -<6> Required. You can use it to provide upstream resolvers. A maximum of 15 `upstreams` entries are allowed per `forwardPlugin` entry. -<7> Optional. You can use it to override the default policy and forward DNS resolution to the specified DNS resolvers (upstream resolvers) for the default domain. If you do not provide any upstream resolvers, the DNS name queries go to the servers in `/etc/resolv.conf`. -<8> `Network` type indicates that this upstream resolver should handle forwarded requests separately from the upstream resolvers listed in `/etc/resolv.conf`. Only the `Network` type is allowed when using TLS and you must provide an IP address. -<9> The `address` field must be a valid IPv4 or IPv6 address. -<10> You can optionally provide a port. The `port` must have a value between `1` and `65535`. If you do not specify a port for the upstream, by default port 853 is tried. -+ -[NOTE] -==== -If `servers` is undefined or invalid, the config map only contains the default server. -==== -+ -. View the config map: -+ -[source,terminal] ----- -$ oc get configmap/dns-default -n openshift-dns -o yaml ----- -+ -.Sample DNS ConfigMap based on previous sample DNS -[source,yaml] ----- -apiVersion: v1 -data: - Corefile: | - example.com:5353 { - forward . 1.1.1.1 2.2.2.2:5353 - } - bar.com:5353 example.com:5353 { - forward . 3.3.3.3 4.4.4.4:5454 <1> - } - .:5353 { - errors - health - kubernetes cluster.local in-addr.arpa ip6.arpa { - pods insecure - upstream - fallthrough in-addr.arpa ip6.arpa - } - prometheus :9153 - forward . /etc/resolv.conf 1.2.3.4:53 { - policy Random - } - cache 30 - reload - } -kind: ConfigMap -metadata: - labels: - dns.operator.openshift.io/owning-dns: default - name: dns-default - namespace: openshift-dns ----- -<1> Changes to the `forwardPlugin` triggers a rolling update of the CoreDNS daemon set. - -[role="_additional-resources"] -.Additional resources - -* For more information on DNS forwarding, see the link:https://coredns.io/plugins/forward/[CoreDNS forward documentation]. diff --git a/modules/nw-dns-loglevel.adoc b/modules/nw-dns-loglevel.adoc deleted file mode 100644 index b6c6690e201f..000000000000 --- a/modules/nw-dns-loglevel.adoc +++ /dev/null @@ -1,44 +0,0 @@ -// Module included in the following assemblies: -// * networking/dns-operator.adoc - -:_content-type: PROCEDURE -[id="nw-dns-loglevel_{context}"] -= Setting the CoreDNS log level - -You can configure the CoreDNS log level to determine the amount of detail in logged error messages. The valid values for CoreDNS log level are `Normal`, `Debug`, and `Trace`. The default `logLevel` is `Normal`. - -[NOTE] -==== -The errors plugin is always enabled. The following `logLevel` settings report different error responses: - -* `logLevel`: `Normal` enables the "errors" class: `log . { class error }`. - -* `logLevel`: `Debug` enables the "denial" class: `log . { class denial error }`. - -* `logLevel`: `Trace` enables the "all" class: `log . { class all }`. -==== - -.Procedure - -* To set `logLevel` to `Debug`, enter the following command: -+ -[source,terminal] ----- -$ oc patch dnses.operator.openshift.io/default -p '{"spec":{"logLevel":"Debug"}}' --type=merge ----- - -* To set `logLevel` to `Trace`, enter the following command: -+ -[source,terminal] ----- -$ oc patch dnses.operator.openshift.io/default -p '{"spec":{"logLevel":"Trace"}}' --type=merge ----- - -.Verification - -* To ensure the desired log level was set, check the config map: -+ -[source,terminal] ----- -$ oc get configmap/dns-default -n openshift-dns -o yaml ----- diff --git a/modules/nw-dns-operator-logs.adoc b/modules/nw-dns-operator-logs.adoc deleted file mode 100644 index e4eae4181953..000000000000 --- a/modules/nw-dns-operator-logs.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Module included in the following assemblies: -// -// * dns/dns-operator.adoc - -:_content-type: PROCEDURE -[id="nw-dns-operator-logs_{context}"] -= DNS Operator logs - -You can view DNS Operator logs by using the `oc logs` command. - -.Procedure - -View the logs of the DNS Operator: -[source,terminal] ----- -$ oc logs -n openshift-dns-operator deployment/dns-operator -c dns-operator ----- diff --git a/modules/nw-dns-operator-managementState.adoc b/modules/nw-dns-operator-managementState.adoc deleted file mode 100644 index 3d2b205442a9..000000000000 --- a/modules/nw-dns-operator-managementState.adoc +++ /dev/null @@ -1,24 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/dns-operator.adoc - -:_content-type: PROCEDURE -[id="nw-dns-operator-managementState_{context}"] -= Changing the DNS Operator managementState - -DNS manages the CoreDNS component to provide a name resolution service for pods and services in the cluster. The `managementState` of the DNS Operator is set to `Managed` by default, which means that the DNS Operator is actively managing its resources. You can change it to `Unmanaged`, which means the DNS Operator is not managing its resources. - -The following are use cases for changing the DNS Operator `managementState`: - -* You are a developer and want to test a configuration change to see if it fixes an issue in CoreDNS. You can stop the DNS Operator from overwriting the fix by setting the `managementState` to `Unmanaged`. - -* You are a cluster administrator and have reported an issue with CoreDNS, but need to apply a workaround until the issue is fixed. You can set the `managementState` field of the DNS Operator to `Unmanaged` to apply the workaround. - -.Procedure - -* Change `managementState` DNS Operator: -+ -[source,terminal] ----- -oc patch dns.operator.openshift.io default --type merge --patch '{"spec":{"managementState":"Unmanaged"}}' ----- diff --git a/modules/nw-dns-operator-status.adoc b/modules/nw-dns-operator-status.adoc deleted file mode 100644 index afc53db18877..000000000000 --- a/modules/nw-dns-operator-status.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module included in the following assemblies: -// -// * dns/dns-operator.adoc - -:_content-type: PROCEDURE -[id="nw-dns-operator-status_{context}"] -= DNS Operator status - -You can inspect the status and view the details of the DNS Operator -using the `oc describe` command. - -.Procedure - -View the status of the DNS Operator: -[source,terminal] ----- -$ oc describe clusteroperators/dns ----- diff --git a/modules/nw-dns-operator.adoc b/modules/nw-dns-operator.adoc deleted file mode 100644 index 402f15955c3c..000000000000 --- a/modules/nw-dns-operator.adoc +++ /dev/null @@ -1,45 +0,0 @@ -// Module included in the following assemblies: -// * networking/dns/dns-operator.adoc - -:_content-type: PROCEDURE -[id="nw-dns-operator_{context}"] -= DNS Operator - -The DNS Operator implements the `dns` API from the `operator.openshift.io` API -group. The Operator deploys CoreDNS using a daemon set, creates a service for -the daemon set, and configures the kubelet to instruct pods to use the CoreDNS -service IP address for name resolution. - -.Procedure - -The DNS Operator is deployed during installation with a `Deployment` object. - -. Use the `oc get` command to view the deployment status: -+ -[source,terminal] ----- -$ oc get -n openshift-dns-operator deployment/dns-operator ----- -+ -.Example output -[source,terminal] ----- -NAME READY UP-TO-DATE AVAILABLE AGE -dns-operator 1/1 1 1 23h ----- - -. Use the `oc get` command to view the state of the DNS Operator: -+ -[source,terminal] ----- -$ oc get clusteroperator/dns ----- -+ -.Example output -[source,terminal] ----- -NAME VERSION AVAILABLE PROGRESSING DEGRADED SINCE -dns 4.1.0-0.11 True False False 92m ----- -+ -`AVAILABLE`, `PROGRESSING` and `DEGRADED` provide information about the status of the operator. `AVAILABLE` is `True` when at least 1 pod from the CoreDNS daemon set reports an `Available` status condition. diff --git a/modules/nw-dns-operatorloglevel.adoc b/modules/nw-dns-operatorloglevel.adoc deleted file mode 100644 index 59b2dc8b4e07..000000000000 --- a/modules/nw-dns-operatorloglevel.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// * networking/dns-operator.adoc - -:_content-type: PROCEDURE -[id="nw-dns-operatorloglevel_{context}"] -= Setting the CoreDNS Operator log level - -Cluster administrators can configure the Operator log level to more quickly track down OpenShift DNS issues. The valid values for `operatorLogLevel` are `Normal`, `Debug`, and `Trace`. `Trace` has the most detailed information. The default `operatorlogLevel` is `Normal`. There are seven logging levels for issues: Trace, Debug, Info, Warning, Error, Fatal and Panic. After the logging level is set, log entries with that severity or anything above it will be logged. - -* `operatorLogLevel: "Normal"` sets `logrus.SetLogLevel("Info")`. - -* `operatorLogLevel: "Debug"` sets `logrus.SetLogLevel("Debug")`. - -* `operatorLogLevel: "Trace"` sets `logrus.SetLogLevel("Trace")`. - -.Procedure - -* To set `operatorLogLevel` to `Debug`, enter the following command: -+ -[source,terminal] ----- -$ oc patch dnses.operator.openshift.io/default -p '{"spec":{"operatorLogLevel":"Debug"}}' --type=merge ----- - -* To set `operatorLogLevel` to `Trace`, enter the following command: -+ -[source,terminal] ----- -$ oc patch dnses.operator.openshift.io/default -p '{"spec":{"operatorLogLevel":"Trace"}}' --type=merge ----- diff --git a/modules/nw-dns-view.adoc b/modules/nw-dns-view.adoc deleted file mode 100644 index 3e6643c227aa..000000000000 --- a/modules/nw-dns-view.adoc +++ /dev/null @@ -1,52 +0,0 @@ -// Module included in the following assemblies: -// -// * dns/dns-operator.adoc - -:_content-type: PROCEDURE -[id="nw-dns-view_{context}"] -= View the default DNS - -Every new {product-title} installation has a `dns.operator` named `default`. - -.Procedure - -. Use the `oc describe` command to view the default `dns`: -+ -[source,terminal] ----- -$ oc describe dns.operator/default ----- -+ -.Example output -[source,terminal] ----- -Name: default -Namespace: -Labels: <none> -Annotations: <none> -API Version: operator.openshift.io/v1 -Kind: DNS -... -Status: - Cluster Domain: cluster.local <1> - Cluster IP: 172.30.0.10 <2> -... ----- -<1> The Cluster Domain field is the base DNS domain used to construct fully -qualified pod and service domain names. -<2> The Cluster IP is the address pods query for name resolution. The IP is -defined as the 10th address in the service CIDR range. - -. To find the service CIDR of your cluster, -use the `oc get` command: -+ -[source,terminal] ----- -$ oc get networks.config/cluster -o jsonpath='{$.status.serviceNetwork}' ----- - -.Example output -[source,terminal] ----- -[172.30.0.0/16] ----- diff --git a/modules/nw-dual-stack-convert-back-single-stack.adoc b/modules/nw-dual-stack-convert-back-single-stack.adoc deleted file mode 100644 index 85061f3b5d4f..000000000000 --- a/modules/nw-dual-stack-convert-back-single-stack.adoc +++ /dev/null @@ -1,26 +0,0 @@ -:_content-type: PROCEDURE -[id="nw-dual-stack-convert-back-single-stack_{context}"] -= Converting to a single-stack cluster network - -As a cluster administrator, you can convert your dual-stack cluster network to a single-stack cluster network. - -.Prerequisites - -* You installed the OpenShift CLI (`oc`). -* You are logged in to the cluster with a user with `cluster-admin` privileges. -* Your cluster uses the OVN-Kubernetes network plugin. -* The cluster nodes have IPv6 addresses. -* You have enabled dual-stack networking. - -.Procedure - -. Edit the `networks.config.openshift.io` custom resource (CR) by running the -following command: -+ -[source,terminal] ----- -$ oc edit networks.config.openshift.io ----- - -. Remove the IPv6 specific configuration that you have added to the `cidr` and `hostPrefix` fields in the previous procedure. - diff --git a/modules/nw-dual-stack-convert.adoc b/modules/nw-dual-stack-convert.adoc deleted file mode 100644 index d409d8bca5a1..000000000000 --- a/modules/nw-dual-stack-convert.adoc +++ /dev/null @@ -1,86 +0,0 @@ -:_content-type: PROCEDURE -[id="nw-dual-stack-convert_{context}"] -= Converting to a dual-stack cluster network - -As a cluster administrator, you can convert your single-stack cluster network to a dual-stack cluster network. - -[NOTE] -==== -After converting to dual-stack networking only newly created pods are assigned IPv6 addresses. Any pods created before the conversion must be recreated to receive an IPv6 address. -==== - -.Prerequisites - -* You installed the OpenShift CLI (`oc`). -* You are logged in to the cluster with a user with `cluster-admin` privileges. -* Your cluster uses the OVN-Kubernetes network plugin. -* The cluster nodes have IPv6 addresses. -* You have configured an IPv6-enabled router based on your infrastructure. - -.Procedure - -. To specify IPv6 address blocks for the cluster and service networks, create a file containing the following YAML: -+ --- -[source,yaml] ----- -- op: add - path: /spec/clusterNetwork/- - value: <1> - cidr: fd01::/48 - hostPrefix: 64 -- op: add - path: /spec/serviceNetwork/- - value: fd02::/112 <2> ----- -<1> Specify an object with the `cidr` and `hostPrefix` fields. The host prefix must be `64` or greater. The IPv6 CIDR prefix must be large enough to accommodate the specified host prefix. - -<2> Specify an IPv6 CIDR with a prefix of `112`. Kubernetes uses only the lowest 16 bits. For a prefix of `112`, IP addresses are assigned from `112` to `128` bits. --- - -. To patch the cluster network configuration, enter the following command: -+ -[source,terminal] ----- -$ oc patch network.config.openshift.io cluster \ - --type='json' --patch-file <file>.yaml ----- -+ --- -where: - -`file`:: Specifies the name of the file you created in the previous step. --- -+ -.Example output -[source,text] ----- -network.config.openshift.io/cluster patched ----- - -.Verification - -Complete the following step to verify that the cluster network recognizes the IPv6 address blocks that you specified in the previous procedure. - -. Display the network configuration: -+ -[source,terminal] ----- -$ oc describe network ----- -+ -.Example output -[source,text] ----- -Status: - Cluster Network: - Cidr: 10.128.0.0/14 - Host Prefix: 23 - Cidr: fd01::/48 - Host Prefix: 64 - Cluster Network MTU: 1400 - Network Type: OVNKubernetes - Service Network: - 172.30.0.0/16 - fd02::/112 ----- diff --git a/modules/nw-egress-ips-about.adoc b/modules/nw-egress-ips-about.adoc deleted file mode 100644 index f35cd4e235ac..000000000000 --- a/modules/nw-egress-ips-about.adoc +++ /dev/null @@ -1,325 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/openshift_sdn/assigning-egress-ips.adoc -// * networking/ovn_kubernetes_network_provider/configuring-egress-ips-ovn.adoc - -ifeval::["{context}" == "egress-ips"] -:openshift-sdn: -endif::[] -ifeval::["{context}" == "configuring-egress-ips-ovn"] -:ovn: -endif::[] - -[id="nw-egress-ips-about_{context}"] -= Egress IP address architectural design and implementation - -The {product-title} egress IP address functionality allows you to ensure that the traffic from one or more pods in one or more namespaces has a consistent source IP address for services outside the cluster network. - -For example, you might have a pod that periodically queries a database that is hosted on a server outside of your cluster. To enforce access requirements for the server, a packet filtering device is configured to allow traffic only from specific IP addresses. -To ensure that you can reliably allow access to the server from only that specific pod, you can configure a specific egress IP address for the pod that makes the requests to the server. - - -An egress IP address assigned to a namespace is different from an egress router, which is used to send traffic to specific destinations. - -In some cluster configurations, application pods and ingress router pods run on the same node. If you configure an egress IP address for an application project in this scenario, the IP address is not used when you send a request to a route from the application project. - -ifdef::openshift-sdn[] -An egress IP address is implemented as an additional IP address on the primary network interface of a node and must be in the same subnet as the primary IP address of the node. The additional IP address must not be assigned to any other node in the cluster. -endif::openshift-sdn[] - -[IMPORTANT] -==== -Egress IP addresses must not be configured in any Linux network configuration files, such as `ifcfg-eth0`. -==== - -[id="nw-egress-ips-platform-support_{context}"] -== Platform support - -Support for the egress IP address functionality on various platforms is summarized in the following table: - -[cols="1,1",options="header"] -|=== - -| Platform | Supported - -| Bare metal | Yes -| VMware vSphere | Yes -| {rh-openstack-first} | Yes -| Amazon Web Services (AWS) | Yes -| Google Cloud Platform (GCP) | Yes -| Microsoft Azure | Yes -| {ibmzProductName} and {linuxoneProductName} | Yes -| {ibmzProductName} and {linuxoneProductName} for {op-system-base-full} KVM | Yes -| {ibmpowerProductName} | Yes -| Nutanix | Yes - -|=== - -[IMPORTANT] -==== -The assignment of egress IP addresses to control plane nodes with the EgressIP feature is not supported on a cluster provisioned on Amazon Web Services (AWS). (link:https://bugzilla.redhat.com/show_bug.cgi?id=2039656[*BZ#2039656*]) -==== - -[id="nw-egress-ips-public-cloud-platform-considerations_{context}"] -== Public cloud platform considerations - -For clusters provisioned on public cloud infrastructure, there is a constraint on the absolute number of assignable IP addresses per node. The maximum number of assignable IP addresses per node, or the _IP capacity_, can be described in the following formula: - -[source,text] ----- -IP capacity = public cloud default capacity - sum(current IP assignments) ----- - -While the Egress IPs capability manages the IP address capacity per node, it is important to plan for this constraint in your deployments. For example, for a cluster installed on bare-metal infrastructure with 8 nodes you can configure 150 egress IP addresses. However, if a public cloud provider limits IP address capacity to 10 IP addresses per node, the total number of assignable IP addresses is only 80. To achieve the same IP address capacity in this example cloud provider, you would need to allocate 7 additional nodes. - -To confirm the IP capacity and subnets for any node in your public cloud environment, you can enter the `oc get node <node_name> -o yaml` command. The `cloud.network.openshift.io/egress-ipconfig` annotation includes capacity and subnet information for the node. - -The annotation value is an array with a single object with fields that provide the following information for the primary network interface: - -* `interface`: Specifies the interface ID on AWS and Azure and the interface name on GCP. -* `ifaddr`: Specifies the subnet mask for one or both IP address families. -* `capacity`: Specifies the IP address capacity for the node. On AWS, the IP address capacity is provided per IP address family. On Azure and GCP, the IP address capacity includes both IPv4 and IPv6 addresses. - -Automatic attachment and detachment of egress IP addresses for traffic between nodes are available. This allows for traffic from many pods in namespaces to have a consistent source IP address to locations outside of the cluster. This also supports OpenShift SDN and OVN-Kubernetes, which is the default networking plugin in Red Hat OpenShift Networking in {product-title} {product-version}. - -ifdef::openshift-sdn[] -[NOTE] -==== -The {rh-openstack} egress IP address feature creates a Neutron reservation port called `egressip-<IP address>`. Using the same {rh-openstack} user as the one used for the {product-title} cluster installation, you can assign a floating IP address to this reservation port to have a predictable SNAT address for egress traffic. When an egress IP address on an {rh-openstack} network is moved from one node to another, because of a node failover, for example, the Neutron reservation port is removed and recreated. This means that the floating IP association is lost and you need to manually reassign the floating IP address to the new reservation port. -==== -endif::openshift-sdn[] - -[NOTE] -==== -When an {rh-openstack} cluster administrator assigns a floating IP to the reservation port, {product-title} cannot delete the reservation port. The `CloudPrivateIPConfig` object cannot perform delete and move operations until an {rh-openstack} cluster administrator unassigns the floating IP from the reservation port. -==== - -The following examples illustrate the annotation from nodes on several public cloud providers. The annotations are indented for readability. - -.Example `cloud.network.openshift.io/egress-ipconfig` annotation on AWS -[source,yaml] ----- -cloud.network.openshift.io/egress-ipconfig: [ - { - "interface":"eni-078d267045138e436", - "ifaddr":{"ipv4":"10.0.128.0/18"}, - "capacity":{"ipv4":14,"ipv6":15} - } -] ----- - -.Example `cloud.network.openshift.io/egress-ipconfig` annotation on GCP -[source,yaml] ----- -cloud.network.openshift.io/egress-ipconfig: [ - { - "interface":"nic0", - "ifaddr":{"ipv4":"10.0.128.0/18"}, - "capacity":{"ip":14} - } -] ----- - -The following sections describe the IP address capacity for supported public cloud environments for use in your capacity calculation. - -[id="nw-egress-ips-capacity-aws_{context}"] -=== Amazon Web Services (AWS) IP address capacity limits - -On AWS, constraints on IP address assignments depend on the instance type configured. For more information, see link:https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html#AvailableIpPerENI[IP addresses per network interface per instance type] - -[id="nw-egress-ips-capacity-gcp_{context}"] -=== Google Cloud Platform (GCP) IP address capacity limits - -On GCP, the networking model implements additional node IP addresses through IP address aliasing, rather than IP address assignments. However, IP address capacity maps directly to IP aliasing capacity. - -The following capacity limits exist for IP aliasing assignment: - -- Per node, the maximum number of IP aliases, both IPv4 and IPv6, is 10. -- Per VPC, the maximum number of IP aliases is unspecified, but {product-title} scalability testing reveals the maximum to be approximately 15,000. - -For more information, see link:https://cloud.google.com/vpc/docs/quota#per_instance[Per instance] quotas and link:https://cloud.google.com/vpc/docs/alias-ip[Alias IP ranges overview]. - -[id="nw-egress-ips-capacity-azure_{context}"] -=== Microsoft Azure IP address capacity limits - -On Azure, the following capacity limits exist for IP address assignment: - -- Per NIC, the maximum number of assignable IP addresses, for both IPv4 and IPv6, is 256. -- Per virtual network, the maximum number of assigned IP addresses cannot exceed 65,536. - -For more information, see link:https://docs.microsoft.com/en-us/azure/azure-resource-manager/management/azure-subscription-service-limits?toc=/azure/virtual-network/toc.json#networking-limits[Networking limits]. - -ifdef::openshift-sdn[] -[id="nw-egress-ips-limitations_{context}"] -== Limitations - -The following limitations apply when using egress IP addresses with the OpenShift SDN network plugin: - -- You cannot use manually assigned and automatically assigned egress IP addresses on the same nodes. -- If you manually assign egress IP addresses from an IP address range, you must not make that range available for automatic IP assignment. -- You cannot share egress IP addresses across multiple namespaces using the OpenShift SDN egress IP address implementation. - -If you need to share IP addresses across namespaces, the OVN-Kubernetes network plugin egress IP address implementation allows you to span IP addresses across multiple namespaces. - -[NOTE] -==== -If you use OpenShift SDN in multitenant mode, you cannot use egress IP addresses with any namespace that is joined to another namespace by the projects that are associated with them. -For example, if `project1` and `project2` are joined by running the `oc adm pod-network join-projects --to=project1 project2` command, neither project can use an egress IP address. For more information, see link:https://bugzilla.redhat.com/show_bug.cgi?id=1645577[BZ#1645577]. -==== -endif::openshift-sdn[] - -ifdef::ovn[] -[id="nw-egress-ips-considerations_{context}"] -== Assignment of egress IPs to pods - -To assign one or more egress IPs to a namespace or specific pods in a namespace, the following conditions must be satisfied: - -- At least one node in your cluster must have the `k8s.ovn.org/egress-assignable: ""` label. -- An `EgressIP` object exists that defines one or more egress IP addresses to use as the source IP address for traffic leaving the cluster from pods in a namespace. - -[IMPORTANT] -==== -If you create `EgressIP` objects prior to labeling any nodes in your cluster for egress IP assignment, {product-title} might assign every egress IP address to the first node with the `k8s.ovn.org/egress-assignable: ""` label. - -To ensure that egress IP addresses are widely distributed across nodes in the cluster, always apply the label to the nodes you intent to host the egress IP addresses before creating any `EgressIP` objects. -==== - -[id="nw-egress-ips-node-assignment_{context}"] -== Assignment of egress IPs to nodes - -When creating an `EgressIP` object, the following conditions apply to nodes that are labeled with the `k8s.ovn.org/egress-assignable: ""` label: - -- An egress IP address is never assigned to more than one node at a time. -- An egress IP address is equally balanced between available nodes that can host the egress IP address. -- If the `spec.EgressIPs` array in an `EgressIP` object specifies more than one IP address, the following conditions apply: -* No node will ever host more than one of the specified IP addresses. -* Traffic is balanced roughly equally between the specified IP addresses for a given namespace. -- If a node becomes unavailable, any egress IP addresses assigned to it are automatically reassigned, subject to the previously described conditions. - -When a pod matches the selector for multiple `EgressIP` objects, there is no guarantee which of the egress IP addresses that are specified in the `EgressIP` objects is assigned as the egress IP address for the pod. - -Additionally, if an `EgressIP` object specifies multiple egress IP addresses, there is no guarantee which of the egress IP addresses might be used. For example, if a pod matches a selector for an `EgressIP` object with two egress IP addresses, `10.10.20.1` and `10.10.20.2`, either might be used for each TCP connection or UDP conversation. - -[id="nw-egress-ips-node-architecture_{context}"] -== Architectural diagram of an egress IP address configuration - -The following diagram depicts an egress IP address configuration. The diagram describes four pods in two different namespaces running on three nodes in a cluster. The nodes are assigned IP addresses from the `192.168.126.0/18` CIDR block on the host network. - -// Source: https://github.com/redhataccess/documentation-svg-assets/blob/master/for-web/121_OpenShift/121_OpenShift_engress_IP_Topology_1020.svg -image::nw-egress-ips-diagram.svg[Architectural diagram for the egress IP feature.] - -Both Node 1 and Node 3 are labeled with `k8s.ovn.org/egress-assignable: ""` and thus available for the assignment of egress IP addresses. - -The dashed lines in the diagram depict the traffic flow from pod1, pod2, and pod3 traveling through the pod network to egress the cluster from Node 1 and Node 3. When an external service receives traffic from any of the pods selected by the example `EgressIP` object, the source IP address is either `192.168.126.10` or `192.168.126.102`. The traffic is balanced roughly equally between these two nodes. - -The following resources from the diagram are illustrated in detail: - -`Namespace` objects:: -+ --- -The namespaces are defined in the following manifest: - -.Namespace objects -[source,yaml] ----- -apiVersion: v1 -kind: Namespace -metadata: - name: namespace1 - labels: - env: prod ---- -apiVersion: v1 -kind: Namespace -metadata: - name: namespace2 - labels: - env: prod ----- --- - -`EgressIP` object:: -+ --- -The following `EgressIP` object describes a configuration that selects all pods in any namespace with the `env` label set to `prod`. The egress IP addresses for the selected pods are `192.168.126.10` and `192.168.126.102`. - -.`EgressIP` object -[source,yaml] ----- -apiVersion: k8s.ovn.org/v1 -kind: EgressIP -metadata: - name: egressips-prod -spec: - egressIPs: - - 192.168.126.10 - - 192.168.126.102 - namespaceSelector: - matchLabels: - env: prod -status: - items: - - node: node1 - egressIP: 192.168.126.10 - - node: node3 - egressIP: 192.168.126.102 ----- - -For the configuration in the previous example, {product-title} assigns both egress IP addresses to the available nodes. The `status` field reflects whether and where the egress IP addresses are assigned. --- -endif::ovn[] - -ifdef::openshift-sdn[] -[id="automatic-manual-assignment-approaches"] -== IP address assignment approaches - -You can assign egress IP addresses to namespaces by setting the `egressIPs` parameter of the `NetNamespace` object. After an egress IP address is associated with a project, OpenShift SDN allows you to assign egress IP addresses to hosts in two ways: - -* In the _automatically assigned_ approach, an egress IP address range is assigned to a node. -* In the _manually assigned_ approach, a list of one or more egress IP address is assigned to a node. - -Namespaces that request an egress IP address are matched with nodes that can host those egress IP addresses, and then the egress IP addresses are assigned to those nodes. -If the `egressIPs` parameter is set on a `NetNamespace` object, but no node hosts that egress IP address, then egress traffic from the namespace will be dropped. - -High availability of nodes is automatic. -If a node that hosts an egress IP address is unreachable and there are nodes that are able to host that egress IP address, then the egress IP address will move to a new node. -When the unreachable node comes back online, the egress IP address automatically moves to balance egress IP addresses across nodes. - -[id="considerations-automatic-egress-ips"] -=== Considerations when using automatically assigned egress IP addresses - -When using the automatic assignment approach for egress IP addresses the following considerations apply: - -- You set the `egressCIDRs` parameter of each node's `HostSubnet` resource to indicate the range of egress IP addresses that can be hosted by a node. -{product-title} sets the `egressIPs` parameter of the `HostSubnet` resource based on the IP address range you specify. - -If the node hosting the namespace's egress IP address is unreachable, {product-title} will reassign the egress IP address to another node with a compatible egress IP address range. -The automatic assignment approach works best for clusters installed in environments with flexibility in associating additional IP addresses with nodes. - -[id="considerations-manual-egress-ips"] -=== Considerations when using manually assigned egress IP addresses - -This approach allows you to control which nodes can host an egress IP address. - -[NOTE] -==== -If your cluster is installed on public cloud infrastructure, you must ensure that each node that you assign egress IP addresses to has sufficient spare capacity to host the IP addresses. For more information, see "Platform considerations" in a previous section. -==== - -When using the manual assignment approach for egress IP addresses the following considerations apply: - -- You set the `egressIPs` parameter of each node's `HostSubnet` resource to indicate the IP addresses that can be hosted by a node. -- Multiple egress IP addresses per namespace are supported. - -If a namespace has multiple egress IP addresses and those addresses are hosted on multiple nodes, the following additional considerations apply: - -- If a pod is on a node that is hosting an egress IP address, that pod always uses the egress IP address on the node. -- If a pod is not on a node that is hosting an egress IP address, that pod uses an egress IP address at random. -endif::openshift-sdn[] - -ifdef::openshift-sdn[] -:!openshift-sdn: -endif::openshift-sdn[] -ifdef::ovn[] -:!ovn: -endif::ovn[] diff --git a/modules/nw-egress-ips-assign.adoc b/modules/nw-egress-ips-assign.adoc deleted file mode 100644 index 65016d0af160..000000000000 --- a/modules/nw-egress-ips-assign.adoc +++ /dev/null @@ -1,59 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ovn_kubernetes_network_provider/assigning-egress-ips-ovn.adoc - -:_content-type: PROCEDURE -[id="nw-egress-ips-assign_{context}"] -= Assigning an egress IP address to a namespace - -You can assign one or more egress IP addresses to a namespace or to specific pods in a namespace. - -.Prerequisites - -* Install the OpenShift CLI (`oc`). -* Log in to the cluster as a cluster administrator. -* Configure at least one node to host an egress IP address. - -.Procedure - -. Create an `EgressIP` object: -.. Create a `<egressips_name>.yaml` file where `<egressips_name>` is the name of the object. -.. In the file that you created, define an `EgressIP` object, as in the following example: -+ -[source,yaml] ----- -apiVersion: k8s.ovn.org/v1 -kind: EgressIP -metadata: - name: egress-project1 -spec: - egressIPs: - - 192.168.127.10 - - 192.168.127.11 - namespaceSelector: - matchLabels: - env: qa ----- - -. To create the object, enter the following command. -+ -[source,terminal] ----- -$ oc apply -f <egressips_name>.yaml <1> ----- -<1> Replace `<egressips_name>` with the name of the object. -+ -.Example output -[source,terminal] ----- -egressips.k8s.ovn.org/<egressips_name> created ----- - -. Optional: Save the `<egressips_name>.yaml` file so that you can make changes later. -. Add labels to the namespace that requires egress IP addresses. To add a label to the namespace of an `EgressIP` object defined in step 1, run the following command: -+ -[source,terminal] ----- -$ oc label ns <namespace> env=qa <1> ----- -<1> Replace `<namespace>` with the namespace that requires egress IP addresses. diff --git a/modules/nw-egress-ips-automatic.adoc b/modules/nw-egress-ips-automatic.adoc deleted file mode 100644 index da546e003a08..000000000000 --- a/modules/nw-egress-ips-automatic.adoc +++ /dev/null @@ -1,89 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/openshift_sdn/assigning-egress-ips.adoc - -:_content-type: PROCEDURE -[id="nw-egress-ips-automatic_{context}"] -= Configuring automatically assigned egress IP addresses for a namespace - -In {product-title} you can enable automatic assignment of an egress IP address -for a specific namespace across one or more nodes. - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. -* You have installed the OpenShift CLI (`oc`). - -.Procedure - -. Update the `NetNamespace` object with the egress IP address using the -following JSON: -+ -[source,terminal] ----- - $ oc patch netnamespace <project_name> --type=merge -p \ - '{ - "egressIPs": [ - "<ip_address>" - ] - }' ----- -+ --- -where: - -`<project_name>`:: Specifies the name of the project. -`<ip_address>`:: Specifies one or more egress IP addresses for the `egressIPs` array. --- -+ -For example, to assign `project1` to an IP address of 192.168.1.100 and -`project2` to an IP address of 192.168.1.101: -+ -[source,terminal] ----- -$ oc patch netnamespace project1 --type=merge -p \ - '{"egressIPs": ["192.168.1.100"]}' -$ oc patch netnamespace project2 --type=merge -p \ - '{"egressIPs": ["192.168.1.101"]}' ----- -+ -[NOTE] -==== -Because OpenShift SDN manages the `NetNamespace` object, you can make changes only by modifying the existing `NetNamespace` object. Do not create a new `NetNamespace` object. -==== - -. Indicate which nodes can host egress IP addresses by setting the `egressCIDRs` -parameter for each host using the following JSON: -+ -[source,terminal] ----- -$ oc patch hostsubnet <node_name> --type=merge -p \ - '{ - "egressCIDRs": [ - "<ip_address_range>", "<ip_address_range>" - ] - }' ----- -+ --- -where: - -`<node_name>`:: Specifies a node name. -`<ip_address_range>`:: Specifies an IP address range in CIDR format. You can specify more than one address range for the `egressCIDRs` array. --- -+ -For example, to set `node1` and `node2` to host egress IP addresses -in the range 192.168.1.0 to 192.168.1.255: -+ -[source,terminal] ----- -$ oc patch hostsubnet node1 --type=merge -p \ - '{"egressCIDRs": ["192.168.1.0/24"]}' -$ oc patch hostsubnet node2 --type=merge -p \ - '{"egressCIDRs": ["192.168.1.0/24"]}' ----- -+ -{product-title} automatically assigns specific egress IP addresses to -available nodes in a balanced way. In this case, it assigns the egress IP -address 192.168.1.100 to `node1` and the egress IP address 192.168.1.101 to -`node2` or vice versa. diff --git a/modules/nw-egress-ips-config-object.adoc b/modules/nw-egress-ips-config-object.adoc deleted file mode 100644 index 80c7b3849276..000000000000 --- a/modules/nw-egress-ips-config-object.adoc +++ /dev/null @@ -1,34 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ovn_kubernetes_network_provider/assigning-egress-ips-ovn.adoc - -[id="nw-egress-ips-config-object_{context}"] -= EgressIPconfig object -As a feature of egress IP, the `reachabilityTotalTimeoutSeconds` parameter configures the total timeout for checks that are sent by probes to egress IP nodes. The `egressIPConfig` object allows users to set the `reachabilityTotalTimeoutSeconds` `spec`. If the EgressIP node cannot be reached within this timeout, the node is declared down. - -You can increase this value if your network is not stable enough to handle the current default value of 1 second. - -The following YAML describes changing the `reachabilityTotalTimeoutSeconds` from the default 1 second probes to 5 second probes: - -[source,yaml] ----- -apiVersion: k8s.ovn.org/v1 -kind: EgressIP - name: networks.operator.openshift.io - spec: - clusterNetwork: - - cidr: 10.128.0.0/14 - hostPrefix: 23 - defaultNetwork: - ovnKubernetesConfig: - egressIPConfig: <1> - reachabilityTotalTimeoutSeconds: 5 <2> - gatewayConfig: - routingViaHost: false - genevePort: 6081 ----- -<1> The `egressIPConfig` holds the configurations for the options of the `EgressIP` object. Changing these configurations allows you to extend the `EgressIP` object. - -<2> The value for `reachabilityTotalTimeoutSeconds` accepts integer values from `0` to `60`. A value of 0 disables the reachability check of the egressIP node. Values of `1` to `60` correspond to the duration in seconds between probes sending the reachability check for the node. - - diff --git a/modules/nw-egress-ips-node.adoc b/modules/nw-egress-ips-node.adoc deleted file mode 100644 index 0ca5b534f2da..000000000000 --- a/modules/nw-egress-ips-node.adoc +++ /dev/null @@ -1,40 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ovn_kubernetes_network_provider/configuring-egress-ips-ovn.adoc - -:_content-type: PROCEDURE -[id="nw-egress-ips-node_{context}"] -= Labeling a node to host egress IP addresses - -You can apply the `k8s.ovn.org/egress-assignable=""` label to a node in your cluster so that {product-title} can assign one or more egress IP addresses to the node. - -.Prerequisites - -* Install the OpenShift CLI (`oc`). -* Log in to the cluster as a cluster administrator. - -.Procedure - -* To label a node so that it can host one or more egress IP addresses, enter the following command: -+ -[source,terminal] ----- -$ oc label nodes <node_name> k8s.ovn.org/egress-assignable="" <1> ----- -+ -<1> The name of the node to label. -+ -[TIP] -==== -You can alternatively apply the following YAML to add the label to a node: - -[source,yaml] ----- -apiVersion: v1 -kind: Node -metadata: - labels: - k8s.ovn.org/egress-assignable: "" - name: <node_name> ----- -==== diff --git a/modules/nw-egress-ips-object.adoc b/modules/nw-egress-ips-object.adoc deleted file mode 100644 index 8d6c006d1177..000000000000 --- a/modules/nw-egress-ips-object.adoc +++ /dev/null @@ -1,94 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ovn_kubernetes_network_provider/assigning-egress-ips-ovn.adoc - -[id="nw-egress-ips-object_{context}"] -= EgressIP object - -The following YAML describes the API for the `EgressIP` object. The scope of the object is cluster-wide; it is not created in a namespace. - -[source,yaml] ----- -apiVersion: k8s.ovn.org/v1 -kind: EgressIP -metadata: - name: <name> <1> -spec: - egressIPs: <2> - - <ip_address> - namespaceSelector: <3> - ... - podSelector: <4> - ... ----- -<1> The name for the `EgressIPs` object. - -<2> An array of one or more IP addresses. - -<3> One or more selectors for the namespaces to associate the egress IP addresses with. - -<4> Optional: One or more selectors for pods in the specified namespaces to associate egress IP addresses with. Applying these selectors allows for the selection of a subset of pods within a namespace. - -The following YAML describes the stanza for the namespace selector: - -.Namespace selector stanza -[source,yaml] ----- -namespaceSelector: <1> - matchLabels: - <label_name>: <label_value> ----- -<1> One or more matching rules for namespaces. If more than one match rule is provided, all matching namespaces are selected. - -The following YAML describes the optional stanza for the pod selector: - -.Pod selector stanza -[source,yaml] ----- -podSelector: <1> - matchLabels: - <label_name>: <label_value> ----- -<1> Optional: One or more matching rules for pods in the namespaces that match the specified `namespaceSelector` rules. If specified, only pods that match are selected. Others pods in the namespace are not selected. - -In the following example, the `EgressIP` object associates the `192.168.126.11` and `192.168.126.102` egress IP addresses with pods that have the `app` label set to `web` and are in the namespaces that have the `env` label set to `prod`: - -.Example `EgressIP` object -[source,yaml] ----- -apiVersion: k8s.ovn.org/v1 -kind: EgressIP -metadata: - name: egress-group1 -spec: - egressIPs: - - 192.168.126.11 - - 192.168.126.102 - podSelector: - matchLabels: - app: web - namespaceSelector: - matchLabels: - env: prod ----- - -In the following example, the `EgressIP` object associates the `192.168.127.30` and `192.168.127.40` egress IP addresses with any pods that do not have the `environment` label set to `development`: - -.Example `EgressIP` object -[source,yaml] ----- -apiVersion: k8s.ovn.org/v1 -kind: EgressIP -metadata: - name: egress-group2 -spec: - egressIPs: - - 192.168.127.30 - - 192.168.127.40 - namespaceSelector: - matchExpressions: - - key: environment - operator: NotIn - values: - - development ----- diff --git a/modules/nw-egress-ips-static.adoc b/modules/nw-egress-ips-static.adoc deleted file mode 100644 index e6c9abb904e0..000000000000 --- a/modules/nw-egress-ips-static.adoc +++ /dev/null @@ -1,86 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/openshift_sdn/assigning-egress-ips.adoc - -:_content-type: PROCEDURE -[id="nw-egress-ips-static_{context}"] -= Configuring manually assigned egress IP addresses for a namespace - -In {product-title} you can associate one or more egress IP addresses with a namespace. - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. -* You have installed the OpenShift CLI (`oc`). - -.Procedure - -. Update the `NetNamespace` object by specifying the following JSON -object with the desired IP addresses: -+ -[source,terminal] ----- - $ oc patch netnamespace <project_name> --type=merge -p \ - '{ - "egressIPs": [ - "<ip_address>" - ] - }' ----- -+ --- -where: - -`<project_name>`:: Specifies the name of the project. -`<ip_address>`:: Specifies one or more egress IP addresses for the `egressIPs` array. --- -+ -For example, to assign the `project1` project to the IP addresses `192.168.1.100` and `192.168.1.101`: -+ -[source,terminal] ----- -$ oc patch netnamespace project1 --type=merge \ - -p '{"egressIPs": ["192.168.1.100","192.168.1.101"]}' ----- -+ -To provide high availability, set the `egressIPs` value to two or more IP addresses on different nodes. If multiple egress IP addresses are set, then pods use all egress IP addresses roughly equally. -+ -[NOTE] -==== -Because OpenShift SDN manages the `NetNamespace` object, you can make changes only by modifying the existing `NetNamespace` object. Do not create a new `NetNamespace` object. -==== - -. Manually assign the egress IP address to the node hosts. -+ -If your cluster is installed on public cloud infrastructure, you must confirm that the node has available IP address capacity. -+ -Set the `egressIPs` parameter on the `HostSubnet` object on the node host. Using the following JSON, include as many IP addresses as you want to assign to that node host: -+ -[source,terminal] ----- -$ oc patch hostsubnet <node_name> --type=merge -p \ - '{ - "egressIPs": [ - "<ip_address>", - "<ip_address>" - ] - }' ----- -+ --- -where: - -`<node_name>`:: Specifies a node name. -`<ip_address>`:: Specifies an IP address. You can specify more than one IP address for the `egressIPs` array. --- -+ -For example, to specify that `node1` should have the egress IPs `192.168.1.100`, -`192.168.1.101`, and `192.168.1.102`: -+ -[source,terminal] ----- -$ oc patch hostsubnet node1 --type=merge -p \ - '{"egressIPs": ["192.168.1.100", "192.168.1.101", "192.168.1.102"]}' ----- -+ -In the previous example, all egress traffic for `project1` will be routed to the node hosting the specified egress IP, and then connected through Network Address Translation (NAT) to that IP address. diff --git a/modules/nw-egress-router-about.adoc b/modules/nw-egress-router-about.adoc deleted file mode 100644 index 73489d748510..000000000000 --- a/modules/nw-egress-router-about.adoc +++ /dev/null @@ -1,192 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/openshift_sdn/using-an-egress-router.adoc -// * networking/ovn_kubernetes_network_provider/using-an-egress-router-ovn.adoc - -ifeval::["{context}" == "using-an-egress-router-ovn"] -:ovn: -:egress-pod-image-name: registry.redhat.io/openshift3/ose-pod - -// Image names are different for OKD -ifdef::openshift-origin[] -:egress-pod-image-name: quay.io/openshift/origin-pod -endif::[] - -endif::[] - -ifeval::["{context}" == "using-an-egress-router"] -:openshift-sdn: -endif::[] - -:_content-type: CONCEPT -[id="nw-egress-router-about_{context}"] -= About an egress router pod - -The {product-title} egress router pod redirects traffic to a specified remote server from a private source IP address that is not used for any other purpose. An egress router pod can send network traffic to servers that are set up to allow access only from specific IP addresses. - -[NOTE] -==== -The egress router pod is not intended for every outgoing connection. Creating large numbers of egress router pods can exceed the limits of your network hardware. For example, creating an egress router pod for every project or application could exceed the number of local MAC addresses that the network interface can handle before reverting to filtering MAC addresses in software. -==== - -[IMPORTANT] -==== -The egress router image is not compatible with Amazon AWS, Azure Cloud, or any other cloud platform that does not support layer 2 manipulations due to their incompatibility with macvlan traffic. -==== - -[id="nw-egress-router-about-modes_{context}"] -== Egress router modes - -In _redirect mode_, an egress router pod configures `iptables` rules to redirect traffic from its own IP address to one or more destination IP addresses. Client pods that need to use the reserved source IP address must be configured to access the service for the egress router rather than connecting directly to the destination IP. You can access the destination service and port from the application pod by using the `curl` command. For example: - -[source, terminal] ----- -$ curl <router_service_IP> <port> ----- - -ifdef::openshift-sdn[] -In _HTTP proxy mode_, an egress router pod runs as an HTTP proxy on port `8080`. This mode only works for clients that are connecting to HTTP-based or HTTPS-based services, but usually requires fewer changes to the client pods to get them to work. Many programs can be told to use an HTTP proxy by setting an environment variable. - -In _DNS proxy mode_, an egress router pod runs as a DNS proxy for TCP-based services from its own IP address to one or more destination IP addresses. To make use of the reserved, source IP address, client pods must be modified to connect to the egress router pod rather than connecting directly to the destination IP address. This modification ensures that external destinations treat traffic as though it were coming from a known source. - -Redirect mode works for all services except for HTTP and HTTPS. For HTTP and HTTPS services, use HTTP proxy mode. For TCP-based services with IP addresses or domain names, use DNS proxy mode. -endif::openshift-sdn[] - -ifdef::ovn[] -[NOTE] -==== -The egress router CNI plugin supports redirect mode only. This is a difference with the egress router implementation that you can deploy with OpenShift SDN. Unlike the egress router for OpenShift SDN, the egress router CNI plugin does not support HTTP proxy mode or DNS proxy mode. -==== -endif::ovn[] - -[id="nw-egress-router-about-router-pod-implementation_{context}"] -== Egress router pod implementation - -ifdef::openshift-sdn[] -The egress router pod setup is performed by an initialization container. That container runs in a privileged context so that it can configure the macvlan interface and set up `iptables` rules. After the initialization container finishes setting up the `iptables` rules, it exits. Next the egress router pod executes the container to handle the egress router traffic. The image used varies depending on the egress router mode. - -The environment variables determine which addresses the egress-router image uses. The image configures the macvlan interface to use `EGRESS_SOURCE` as its IP address, with `EGRESS_GATEWAY` as the IP address for the gateway. - -Network Address Translation (NAT) rules are set up so that connections to the cluster IP address of the pod on any TCP or UDP port are redirected to the same port on IP address specified by the `EGRESS_DESTINATION` variable. - -If only some of the nodes in your cluster are capable of claiming the specified source IP address and using the specified gateway, you can specify a `nodeName` or `nodeSelector` to identify which nodes are acceptable. -endif::openshift-sdn[] - -ifdef::ovn[] -The egress router implementation uses the egress router Container Network Interface (CNI) plugin. The plugin adds a secondary network interface to a pod. - -An egress router is a pod that has two network interfaces. For example, the pod can have `eth0` and `net1` network interfaces. The `eth0` interface is on the cluster network and the pod continues to use the interface for ordinary cluster-related network traffic. The `net1` interface is on a secondary network and has an IP address and gateway for that network. Other pods in the {product-title} cluster can access the egress router service and the service enables the pods to access external services. The egress router acts as a bridge between pods and an external system. - -Traffic that leaves the egress router exits through a node, but the packets -have the MAC address of the `net1` interface from the egress router pod. - -When you add an egress router custom resource, the Cluster Network Operator creates the following objects: - -* The network attachment definition for the `net1` secondary network interface of the pod. - -* A deployment for the egress router. - -If you delete an egress router custom resource, the Operator deletes the two objects in the preceding list that are associated with the egress router. -endif::ovn[] - -[id="nw-egress-router-about-deployments_{context}"] -== Deployment considerations - -An egress router pod adds an additional IP address and MAC address to the primary network interface of the node. As a result, you might need to configure your hypervisor or cloud provider to allow the additional address. - -{rh-openstack-first}:: - -If you deploy {product-title} on {rh-openstack}, you must allow traffic from the IP and MAC addresses of the egress router pod on your OpenStack environment. If you do not allow the traffic, then link:https://access.redhat.com/solutions/2803331[communication will fail]: -+ -[source,terminal] ----- -$ openstack port set --allowed-address \ - ip_address=<ip_address>,mac_address=<mac_address> <neutron_port_uuid> ----- - -{rh-virtualization-first}:: - -If you are using link:https://access.redhat.com/documentation/en-us/red_hat_virtualization/4.4/html/administration_guide/chap-logical_networks#Explanation_of_Settings_in_the_VM_Interface_Profile_Window[{rh-virtualization}], you must select *No Network Filter* for the Virtual network interface controller (vNIC). - -VMware vSphere:: - -If you are using VMware vSphere, see the link:https://docs.vmware.com/en/VMware-vSphere/6.0/com.vmware.vsphere.security.doc/GUID-3507432E-AFEA-4B6B-B404-17A020575358.html[VMware documentation for securing vSphere standard switches]. View and change VMware vSphere default settings by selecting the host virtual switch from the vSphere Web Client. - -Specifically, ensure that the following are enabled: - -* https://docs.vmware.com/en/VMware-vSphere/6.0/com.vmware.vsphere.security.doc/GUID-942BD3AA-731B-4A05-8196-66F2B4BF1ACB.html[MAC Address Changes] -* https://docs.vmware.com/en/VMware-vSphere/6.0/com.vmware.vsphere.security.doc/GUID-7DC6486F-5400-44DF-8A62-6273798A2F80.html[Forged Transits] -* https://docs.vmware.com/en/VMware-vSphere/6.0/com.vmware.vsphere.security.doc/GUID-92F3AB1F-B4C5-4F25-A010-8820D7250350.html[Promiscuous Mode Operation] - -[id="nw-egress-router-about-failover_{context}"] -== Failover configuration - -ifdef::openshift-sdn[] -To avoid downtime, you can deploy an egress router pod with a `Deployment` resource, as in the following example. To create a new `Service` object for the example deployment, use the `oc expose deployment/egress-demo-controller` command. - -[source,yaml,subs="attributes+"] ----- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: egress-demo-controller -spec: - replicas: 1 <1> - selector: - matchLabels: - name: egress-router - template: - metadata: - name: egress-router - labels: - name: egress-router - annotations: - pod.network.openshift.io/assign-macvlan: "true" - spec: <2> - initContainers: - ... - containers: - ... ----- -<1> Ensure that replicas is set to `1`, because only one pod can use a given egress source IP address at any time. This means that only a single copy of the router runs on a node. - -<2> Specify the `Pod` object template for the egress router pod. -endif::openshift-sdn[] - -ifdef::ovn[] -To avoid downtime, the Cluster Network Operator deploys the egress router pod as a deployment resource. The deployment name is `egress-router-cni-deployment`. The pod that corresponds to the deployment has a label of `app=egress-router-cni`. - -To create a new service for the deployment, use the `oc expose deployment/egress-router-cni-deployment --port <port_number>` command or create a file like the following example: - -[source,yaml,subs="attributes+"] ----- -apiVersion: v1 -kind: Service -metadata: - name: app-egress -spec: - ports: - - name: tcp-8080 - protocol: TCP - port: 8080 - - name: tcp-8443 - protocol: TCP - port: 8443 - - name: udp-80 - protocol: UDP - port: 80 - type: ClusterIP - selector: - app: egress-router-cni ----- -endif::ovn[] - -// Clear temporary attributes -ifdef::openshift-sdn[] -:!openshift-sdn: -endif::[] - -ifdef::ovn[] -:!ovn: -:!egress-pod-image-name: -endif::[] diff --git a/modules/nw-egress-router-configmap.adoc b/modules/nw-egress-router-configmap.adoc deleted file mode 100644 index c2d2d7157195..000000000000 --- a/modules/nw-egress-router-configmap.adoc +++ /dev/null @@ -1,92 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/openshift_sdn/configuring-egress-router-configmap.adoc - -:_content-type: PROCEDURE -[id="configuring-egress-router-configmap_{context}"] -= Configuring an egress router destination mappings with a config map - -For a large or frequently-changing set of destination mappings, you can use a config map to externally maintain the list. -An advantage of this approach is that permission to edit the config map can be delegated to users without `cluster-admin` privileges. Because the egress router pod requires a privileged container, it is not possible for users without `cluster-admin` privileges to edit the pod definition directly. - -[NOTE] -==== -The egress router pod does not automatically update when the config map changes. -You must restart the egress router pod to get updates. -==== - -.Prerequisites - -* Install the OpenShift CLI (`oc`). -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -. Create a file containing the mapping data for the egress router pod, as in the following example: -+ ----- -# Egress routes for Project "Test", version 3 - -80 tcp 203.0.113.25 - -8080 tcp 203.0.113.26 80 -8443 tcp 203.0.113.26 443 - -# Fallback -203.0.113.27 ----- -+ -You can put blank lines and comments into this file. - -. Create a `ConfigMap` object from the file: -+ -[source,terminal] ----- -$ oc delete configmap egress-routes --ignore-not-found ----- -+ -[source,terminal] ----- -$ oc create configmap egress-routes \ - --from-file=destination=my-egress-destination.txt ----- -+ -In the previous command, the `egress-routes` value is the name of the `ConfigMap` object to create and `my-egress-destination.txt` is the name of the file that the data is read from. -+ -[TIP] -==== -You can alternatively apply the following YAML to create the config map: - -[source,yaml] ----- -apiVersion: v1 -kind: ConfigMap -metadata: - name: egress-routes -data: - destination: | - # Egress routes for Project "Test", version 3 - - 80 tcp 203.0.113.25 - - 8080 tcp 203.0.113.26 80 - 8443 tcp 203.0.113.26 443 - - # Fallback - 203.0.113.27 ----- -==== - -. Create an egress router pod definition and specify the `configMapKeyRef` stanza for the `EGRESS_DESTINATION` field in the environment stanza: -+ -[source,yaml] ----- -... -env: -- name: EGRESS_DESTINATION - valueFrom: - configMapKeyRef: - name: egress-routes - key: destination -... ----- diff --git a/modules/nw-egress-router-cr.adoc b/modules/nw-egress-router-cr.adoc deleted file mode 100644 index 36df8c9627c0..000000000000 --- a/modules/nw-egress-router-cr.adoc +++ /dev/null @@ -1,110 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ovn_kubernetes_network_provider/deploying-egress-router-ovn-redirection.adoc - -ifeval::["{context}" == "deploying-egress-router-ovn-redirection"] -:redirect: -:router-type: redirect -endif::[] -:router-name: egress-router-{router-type} - -[id="nw-egress-router-ovn-cr_{context}"] -= Egress router custom resource - -Define the configuration for an egress router pod in an egress router custom resource. The following YAML describes the fields for the configuration of an egress router in {router-type} mode: - -// cluster-network-operator/manifests/0000_70_cluster-network-operator_01_egr_crd.yaml -[source,yaml,subs="attributes+"] ----- -apiVersion: network.operator.openshift.io/v1 -kind: EgressRouter -metadata: - name: <egress_router_name> - namespace: <namespace> <.> -spec: - addresses: [ <.> - { - ip: "<egress_router>", <.> - gateway: "<egress_gateway>" <.> - } - ] - mode: Redirect - redirect: { - redirectRules: [ <.> - { - destinationIP: "<egress_destination>", - port: <egress_router_port>, - targetPort: <target_port>, <.> - protocol: <network_protocol> <.> - }, - ... - ], - fallbackIP: "<egress_destination>" <.> - } ----- -// openshift/api:networkoperator/v1/001-egressrouter.crd.yaml -<.> Optional: The `namespace` field specifies the namespace to create the egress router in. If you do not specify a value in the file or on the command line, the `default` namespace is used. - -<.> The `addresses` field specifies the IP addresses to configure on the secondary network interface. - -<.> The `ip` field specifies the reserved source IP address and netmask from the physical network that the node is on to use with egress router pod. Use CIDR notation to specify the IP address and netmask. - -<.> The `gateway` field specifies the IP address of the network gateway. - -<.> Optional: The `redirectRules` field specifies a combination of egress destination IP address, egress router port, and protocol. Incoming connections to the egress router on the specified port and protocol are routed to the destination IP address. - -<.> Optional: The `targetPort` field specifies the network port on the destination IP address. If this field is not specified, traffic is routed to the same network port that it arrived on. - -<.> The `protocol` field supports TCP, UDP, or SCTP. - -<.> Optional: The `fallbackIP` field specifies a destination IP address. If you do not specify any redirect rules, the egress router sends all traffic to this fallback IP address. If you specify redirect rules, any connections to network ports that are not defined in the rules are sent by the egress router to this fallback IP address. If you do not specify this field, the egress router rejects connections to network ports that are not defined in the rules. - -.Example egress router specification -[source,yaml,subs="attributes+"] ----- -apiVersion: network.operator.openshift.io/v1 -kind: EgressRouter -metadata: - name: {router-name} -spec: - networkInterface: { - macvlan: { - mode: "Bridge" - } - } - addresses: [ - { - ip: "192.168.12.99/24", - gateway: "192.168.12.1" - } - ] - mode: Redirect - redirect: { - redirectRules: [ - { - destinationIP: "10.0.0.99", - port: 80, - protocol: UDP - }, - { - destinationIP: "203.0.113.26", - port: 8080, - targetPort: 80, - protocol: TCP - }, - { - destinationIP: "203.0.113.27", - port: 8443, - targetPort: 443, - protocol: TCP - } - ] - } ----- - -// clear temporary attributes -:!router-name: -:!router-type: -ifdef::redirect[] -:!redirect: -endif::[] diff --git a/modules/nw-egress-router-dest-var.adoc b/modules/nw-egress-router-dest-var.adoc deleted file mode 100644 index 2579b721bc58..000000000000 --- a/modules/nw-egress-router-dest-var.adoc +++ /dev/null @@ -1,109 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/openshift_sdn/deploying-egress-router-layer3-redirection.adoc -// * networking/openshift_sdn/deploying-egress-router-http-redirection.adoc -// * networking/openshift_sdn/deploying-egress-router-dns-redirection.adoc - -// Every redirection mode supports an expanded environment variable - -// Conditional per flavor of Pod -ifeval::["{context}" == "deploying-egress-router-layer3-redirection"] -:redirect: -endif::[] -ifeval::["{context}" == "deploying-egress-router-http-redirection"] -:http: -endif::[] -ifeval::["{context}" == "deploying-egress-router-dns-redirection"] -:dns: -endif::[] - -[id="nw-egress-router-dest-var_{context}"] -= Egress destination configuration format - -ifdef::redirect[] -When an egress router pod is deployed in redirect mode, you can specify redirection rules by using one or more of the following formats: - -- `<port> <protocol> <ip_address>` - Incoming connections to the given `<port>` should be redirected to the same port on the given `<ip_address>`. `<protocol>` is either `tcp` or `udp`. -- `<port> <protocol> <ip_address> <remote_port>` - As above, except that the connection is redirected to a different `<remote_port>` on `<ip_address>`. -- `<ip_address>` - If the last line is a single IP address, then any connections on any other port will be redirected to the corresponding port on that IP address. If there is no fallback IP address then connections on other ports are rejected. - -In the example that follows several rules are defined: - -- The first line redirects traffic from local port `80` to port `80` on `203.0.113.25`. -- The second and third lines redirect local ports `8080` and `8443` to remote ports `80` and `443` on `203.0.113.26`. -- The last line matches traffic for any ports not specified in the previous rules. - -.Example configuration -[source,text] ----- -80 tcp 203.0.113.25 -8080 tcp 203.0.113.26 80 -8443 tcp 203.0.113.26 443 -203.0.113.27 ----- -endif::redirect[] - -ifdef::http[] -When an egress router pod is deployed in HTTP proxy mode, you can specify redirection rules by using one or more of the following formats. Each line in the configuration specifies one group of connections to allow or deny: - -- An IP address allows connections to that IP address, such as `192.168.1.1`. -- A CIDR range allows connections to that CIDR range, such as `192.168.1.0/24`. -- A hostname allows proxying to that host, such as `www.example.com`. -- A domain name preceded by `+*.+` allows proxying to that domain and all of its subdomains, such as `*.example.com`. -- A `!` followed by any of the previous match expressions denies the connection instead. -- If the last line is `*`, then anything that is not explicitly denied is allowed. Otherwise, anything that is not allowed is denied. - -You can also use `*` to allow connections to all remote destinations. - -.Example configuration -[source,text] ----- -!*.example.com -!192.168.1.0/24 -192.168.2.1 -* ----- -endif::http[] - -ifdef::dns[] -When the router is deployed in DNS proxy mode, you specify a list of port and destination mappings. A destination may be either an IP address or a DNS name. - -An egress router pod supports the following formats for specifying port and destination mappings: - -Port and remote address:: - -You can specify a source port and a destination host by using the two field format: `<port> <remote_address>`. - -The host can be an IP address or a DNS name. If a DNS name is provided, DNS resolution occurs at runtime. For a given host, the proxy connects to the specified source port on the destination host when connecting to the destination host IP address. - -.Port and remote address pair example -[source,text] ----- -80 172.16.12.11 -100 example.com ----- - -Port, remote address, and remote port:: - -You can specify a source port, a destination host, and a destination port by using the three field format: `<port> <remote_address> <remote_port>`. - -The three field format behaves identically to the two field version, with the exception that the destination port can be different than the source port. - -.Port, remote address, and remote port example -[source,text] ----- -8080 192.168.60.252 80 -8443 web.example.com 443 ----- -endif::dns[] - -// unload flavors -ifdef::redirect[] -:!redirect: -endif::[] -ifdef::http[] -:!http: -endif::[] -ifdef::dns[] -:!dns: -endif::[] diff --git a/modules/nw-egress-router-dns-mode.adoc b/modules/nw-egress-router-dns-mode.adoc deleted file mode 100644 index c96bcda547fe..000000000000 --- a/modules/nw-egress-router-dns-mode.adoc +++ /dev/null @@ -1,68 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/openshift_sdn/deploying-egress-router-dns-redirection.adoc - -:_content-type: PROCEDURE -[id="nw-egress-router-dns-mode_{context}"] -= Deploying an egress router pod in DNS proxy mode - -In _DNS proxy mode_, an egress router pod acts as a DNS proxy for TCP-based services from its own IP address to one or more destination IP addresses. - -.Prerequisites - -* Install the OpenShift CLI (`oc`). -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -. Create an egress router pod. - -. Create a service for the egress router pod: - -.. Create a file named `egress-router-service.yaml` that contains the following YAML. Set `spec.ports` to the list of ports that you defined previously for the `EGRESS_DNS_PROXY_DESTINATION` environment variable. -+ -[source,yaml] ----- -apiVersion: v1 -kind: Service -metadata: - name: egress-dns-svc -spec: - ports: - ... - type: ClusterIP - selector: - name: egress-dns-proxy ----- -+ -For example: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Service -metadata: - name: egress-dns-svc -spec: - ports: - - name: con1 - protocol: TCP - port: 80 - targetPort: 80 - - name: con2 - protocol: TCP - port: 100 - targetPort: 100 - type: ClusterIP - selector: - name: egress-dns-proxy ----- - -.. To create the service, enter the following command: -+ -[source,terminal] ----- -$ oc create -f egress-router-service.yaml ----- -+ -Pods can now connect to this service. The connections are proxied to the corresponding ports on the external server, using the reserved egress IP address. diff --git a/modules/nw-egress-router-http-proxy-mode.adoc b/modules/nw-egress-router-http-proxy-mode.adoc deleted file mode 100644 index d33192aa8231..000000000000 --- a/modules/nw-egress-router-http-proxy-mode.adoc +++ /dev/null @@ -1,62 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/openshift_sdn/deploying-egress-router-http-redirection.adoc - -:_content-type: PROCEDURE -[id="nw-egress-router-http-proxy-mode_{context}"] -= Deploying an egress router pod in HTTP proxy mode - -In _HTTP proxy mode_, an egress router pod runs as an HTTP proxy on port `8080`. This mode only works for clients that are connecting to HTTP-based or HTTPS-based services, but usually requires fewer changes to the client pods to get them to work. Many programs can be told to use an HTTP proxy by setting an environment variable. - -.Prerequisites - -* Install the OpenShift CLI (`oc`). -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -. Create an egress router pod. - -. To ensure that other pods can find the IP address of the egress router pod, create a service to point to the egress router pod, as in the following example: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Service -metadata: - name: egress-1 -spec: - ports: - - name: http-proxy - port: 8080 <1> - type: ClusterIP - selector: - name: egress-1 ----- -<1> Ensure the `http` port is set to `8080`. - -. To configure the client pod (not the egress proxy pod) to use the HTTP proxy, set the `http_proxy` or `https_proxy` variables: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: app-1 - labels: - name: app-1 -spec: - containers: - env: - - name: http_proxy - value: http://egress-1:8080/ <1> - - name: https_proxy - value: http://egress-1:8080/ - ... ----- -<1> The service created in the previous step. -+ -[NOTE] -==== -Using the `http_proxy` and `https_proxy` environment variables is not necessary for all setups. If the above does not create a working setup, then consult the documentation for the tool or software you are running in the pod. -==== diff --git a/modules/nw-egress-router-pod-ovn.adoc b/modules/nw-egress-router-pod-ovn.adoc deleted file mode 100644 index 5f73fef96a0a..000000000000 --- a/modules/nw-egress-router-pod-ovn.adoc +++ /dev/null @@ -1,47 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ovn_kubernetes_network_provider/deploying-egress-router-ovn-redirection.adoc - -ifeval::["{context}" == "deploying-egress-router-ovn-redirection"] -:redirect: -:router-type: redirect -// Like nw-egress-router-pod, monitor bz-1896170 -:egress-pod-image-name: registry.redhat.com/openshift3/ose-pod -endif::[] - -// Images are different for OKD -ifdef::openshift-origin[] - -ifdef::redirect[] -:egress-pod-image-name: quay.io/openshift/origin-pod -endif::[] - -endif::[] - -[id="nw-egress-router-cni-pod_{context}"] -= Egress router pod specification for {router-type} mode - -After you create a network attachment definition, you add a pod that references the definition. - -.Example egress router pod specification -[source,yaml,subs="attributes+"] ----- -apiVersion: v1 -kind: Pod -metadata: - name: egress-router-pod - annotations: - k8s.v1.cni.cncf.io/networks: egress-router-redirect <1> -spec: - containers: - - name: egress-router-pod - image: {egress-pod-image-name} ----- -<1> The specified network must match the name of the network attachment definition. You can specify a namespace, interface name, or both, by replacing the values in the following pattern: `<namespace>/<network>@<interface>`. By default, Multus adds a secondary network interface to the pod with a name such as `net1`, `net2`, and so on. - -// Clear temporary attributes -:!router-type: -:!egress-pod-image-name: -ifdef::redirect[] -:!redirect: -endif::[] diff --git a/modules/nw-egress-router-pod.adoc b/modules/nw-egress-router-pod.adoc deleted file mode 100644 index faf7021354f1..000000000000 --- a/modules/nw-egress-router-pod.adoc +++ /dev/null @@ -1,233 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/openshift_sdn/deploying-egress-router-layer3-redirection.adoc -// * networking/openshift_sdn/deploying-egress-router-http-redirection.adoc -// * networking/openshift_sdn/deploying-egress-router-dns-redirection.adoc - -// Conditional per flavor of Pod -ifeval::["{context}" == "deploying-egress-router-layer3-redirection"] -:redirect: -:router-type: redirect -endif::[] -ifeval::["{context}" == "deploying-egress-router-http-redirection"] -:http: -:router-type: HTTP -endif::[] -ifeval::["{context}" == "deploying-egress-router-dns-redirection"] -:dns: -:router-type: DNS -endif::[] - -:egress-router-image-name: openshift4/ose-egress-router -:egress-router-image-url: registry.redhat.io/{egress-router-image-name} - -ifdef::http[] -:egress-http-proxy-image-name: openshift4/ose-egress-http-proxy -:egress-http-proxy-image-url: registry.redhat.io/{egress-http-proxy-image-name} -endif::[] -ifdef::dns[] -:egress-dns-proxy-image-name: openshift4/ose-egress-dns-proxy -:egress-dns-proxy-image-url: registry.redhat.io/{egress-dns-proxy-image-name} -endif::[] -ifdef::redirect[] -:egress-pod-image-name: openshift4/ose-pod -:egress-pod-image-url: registry.redhat.io/{egress-pod-image-name} -endif::[] - -// All the images are different for OKD -ifdef::openshift-origin[] - -:egress-router-image-name: openshift/origin-egress-router -:egress-router-image-url: {egress-router-image-name} - -ifdef::http[] -:egress-http-proxy-image-name: openshift/origin-egress-http-proxy -:egress-http-proxy-image-url: {egress-http-proxy-image-name} -endif::[] -ifdef::dns[] -:egress-dns-proxy-image-name: openshift/origin-egress-dns-proxy -:egress-dns-proxy-image-url: {egress-dns-proxy-image-name} -endif::[] -ifdef::redirect[] -:egress-pod-image-name: openshift/origin-pod -:egress-pod-image-url: {egress-pod-image-name} -endif::[] - -endif::openshift-origin[] - -[id="nw-egress-router-pod_{context}"] -= Egress router pod specification for {router-type} mode - -Define the configuration for an egress router pod in the `Pod` object. The following YAML describes the fields for the configuration of an egress router pod in {router-type} mode: - -// Because redirect needs privileged access to setup `EGRESS_DESTINATION` -// and the other modes do not, this ends up needing its own almost -// identical Pod. It's not possible to use conditionals for an unequal -// number of callouts. - -ifdef::redirect[] -[source,yaml,subs="attributes+"] ----- -apiVersion: v1 -kind: Pod -metadata: - name: egress-1 - labels: - name: egress-1 - annotations: - pod.network.openshift.io/assign-macvlan: "true" <1> -spec: - initContainers: - - name: egress-router - image: {egress-router-image-url} - securityContext: - privileged: true - env: - - name: EGRESS_SOURCE <2> - value: <egress_router> - - name: EGRESS_GATEWAY <3> - value: <egress_gateway> - - name: EGRESS_DESTINATION <4> - value: <egress_destination> - - name: EGRESS_ROUTER_MODE - value: init - containers: - - name: egress-router-wait - image: {egress-pod-image-url} ----- -<1> The annotation tells {product-title} to create a macvlan network interface on the primary network interface controller (NIC) and move that macvlan interface into the pod's network namespace. You must include the quotation marks around the `"true"` value. To have {product-title} create the macvlan interface on a different NIC interface, set the annotation value to the name of that interface. For example, `eth1`. -<2> IP address from the physical network that the node is on that is reserved for use by the egress router pod. Optional: You can include the subnet length, the `/24` suffix, so that a proper route to the local subnet is set. If you do not specify a subnet length, then the egress router can access only the host specified with the `EGRESS_GATEWAY` variable and no other hosts on the subnet. -<3> Same value as the default gateway used by the node. -<4> External server to direct traffic to. Using this example, connections to the pod are redirected to `203.0.113.25`, with a source IP address of `192.168.12.99`. - -.Example egress router pod specification -[source,yaml,subs="attributes+"] ----- -apiVersion: v1 -kind: Pod -metadata: - name: egress-multi - labels: - name: egress-multi - annotations: - pod.network.openshift.io/assign-macvlan: "true" -spec: - initContainers: - - name: egress-router - image: {egress-router-image-url} - securityContext: - privileged: true - env: - - name: EGRESS_SOURCE - value: 192.168.12.99/24 - - name: EGRESS_GATEWAY - value: 192.168.12.1 - - name: EGRESS_DESTINATION - value: | - 80 tcp 203.0.113.25 - 8080 tcp 203.0.113.26 80 - 8443 tcp 203.0.113.26 443 - 203.0.113.27 - - name: EGRESS_ROUTER_MODE - value: init - containers: - - name: egress-router-wait - image: {egress-pod-image-url} ----- -endif::redirect[] - -// Many conditionals because DNS offers one additional env variable. - -ifdef::dns,http[] -[source,yaml,subs="attributes+"] ----- -apiVersion: v1 -kind: Pod -metadata: - name: egress-1 - labels: - name: egress-1 - annotations: - pod.network.openshift.io/assign-macvlan: "true" <1> -spec: - initContainers: - - name: egress-router - image: {egress-router-image-url} - securityContext: - privileged: true - env: - - name: EGRESS_SOURCE <2> - value: <egress-router> - - name: EGRESS_GATEWAY <3> - value: <egress-gateway> - - name: EGRESS_ROUTER_MODE -ifdef::dns[] - value: dns-proxy -endif::dns[] -ifdef::http[] - value: http-proxy -endif::http[] - containers: - - name: egress-router-pod -ifdef::dns[] - image: {egress-dns-proxy-image-url} - securityContext: - privileged: true -endif::dns[] -ifdef::http[] - image: {egress-http-proxy-image-url} -endif::http[] - env: -ifdef::http[] - - name: EGRESS_HTTP_PROXY_DESTINATION <4> - value: |- - ... -endif::http[] -ifdef::dns[] - - name: EGRESS_DNS_PROXY_DESTINATION <4> - value: |- - ... - - name: EGRESS_DNS_PROXY_DEBUG <5> - value: "1" -endif::dns[] - ... ----- -<1> The annotation tells {product-title} to create a macvlan network interface on the primary network interface controller (NIC) and move that macvlan interface into the pod's network namespace. You must include the quotation marks around the `"true"` value. To have {product-title} create the macvlan interface on a different NIC interface, set the annotation value to the name of that interface. For example, `eth1`. -<2> IP address from the physical network that the node is on that is reserved for use by the egress router pod. Optional: You can include the subnet length, the `/24` suffix, so that a proper route to the local subnet is set. If you do not specify a subnet length, then the egress router can access only the host specified with the `EGRESS_GATEWAY` variable and no other hosts on the subnet. -<3> Same value as the default gateway used by the node. -ifdef::http[] -<4> A string or YAML multi-line string specifying how to configure the proxy. Note that this is specified as an environment variable in the HTTP proxy container, not with the other environment variables in the init container. -endif::http[] -ifdef::dns[] -<4> Specify a list of one or more proxy destinations. -<5> Optional: Specify to output the DNS proxy log output to `stdout`. -endif::dns[] -endif::[] - -// unload flavors -ifdef::redirect[] -:!redirect: -endif::[] -ifdef::http[] -:!http: -endif::[] -ifdef::dns[] -:!dns: -endif::[] -ifdef::router-type[] -:!router-type: -endif::[] - -// unload images -ifdef::egress-router-image-name[] -:!egress-router-image-name: -endif::[] -ifdef::egress-router-image-url[] -:!egress-router-image-url: -endif::[] -ifdef::egress-pod-image-name[] -:!egress-pod-image-name: -endif::[] -ifdef::egress-pod-image-url[] -:!egress-pod-image-url: -endif::[] diff --git a/modules/nw-egress-router-redirect-mode-ovn.adoc b/modules/nw-egress-router-redirect-mode-ovn.adoc deleted file mode 100644 index b369240276f7..000000000000 --- a/modules/nw-egress-router-redirect-mode-ovn.adoc +++ /dev/null @@ -1,197 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ovn_kubernetes_network_provider/deploying-egress-router-ovn-redirection.adoc - -:_content-type: PROCEDURE -[id="nw-egress-router-redirect-mode-ovn_{context}"] -= Deploying an egress router in redirect mode - -You can deploy an egress router to redirect traffic from its own reserved source IP address to one or more destination IP addresses. - -After you add an egress router, the client pods that need to use the reserved source IP address must be modified to connect to the egress router rather than connecting directly to the destination IP. - -.Prerequisites - -* Install the OpenShift CLI (`oc`). -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -. Create an egress router definition. - -. To ensure that other pods can find the IP address of the egress router pod, create a service that uses the egress router, as in the following example: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Service -metadata: - name: egress-1 -spec: - ports: - - name: web-app - protocol: TCP - port: 8080 - type: ClusterIP - selector: - app: egress-router-cni <.> ----- -<.> Specify the label for the egress router. The value shown is added by the Cluster Network Operator and is not configurable. -+ -After you create the service, your pods can connect to the service. The egress router pod redirects traffic to the corresponding port on the destination IP address. The connections originate from the reserved source IP address. - -.Verification - -To verify that the Cluster Network Operator started the egress router, complete the following procedure: - -. View the network attachment definition that the Operator created for the egress router: -+ -[source,terminal] ----- -$ oc get network-attachment-definition egress-router-cni-nad ----- -+ -The name of the network attachment definition is not configurable. -+ -.Example output -+ -[source,terminal] ----- -NAME AGE -egress-router-cni-nad 18m ----- - -. View the deployment for the egress router pod: -+ -[source,terminal] ----- -$ oc get deployment egress-router-cni-deployment ----- -+ -The name of the deployment is not configurable. -+ -.Example output -+ -[source,terminal] ----- -NAME READY UP-TO-DATE AVAILABLE AGE -egress-router-cni-deployment 1/1 1 1 18m ----- - -. View the status of the egress router pod: -+ -[source,terminal] ----- -$ oc get pods -l app=egress-router-cni ----- -+ -.Example output -+ -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -egress-router-cni-deployment-575465c75c-qkq6m 1/1 Running 0 18m ----- - -. View the logs and the routing table for the egress router pod. - -// Terminology from support-collecting-network-trace.adoc -.. Get the node name for the egress router pod: -+ -[source,terminal] ----- -$ POD_NODENAME=$(oc get pod -l app=egress-router-cni -o jsonpath="{.items[0].spec.nodeName}") ----- - -.. Enter into a debug session on the target node. This step instantiates a debug pod called `<node_name>-debug`: -+ -[source,terminal] ----- -$ oc debug node/$POD_NODENAME ----- - -.. Set `/host` as the root directory within the debug shell. The debug pod mounts the root file system of the host in `/host` within the pod. By changing the root directory to `/host`, you can run binaries from the executable paths of the host: -+ -[source,terminal] ----- -# chroot /host ----- - -.. From within the `chroot` environment console, display the egress router logs: -+ -[source,terminal] ----- -# cat /tmp/egress-router-log ----- -+ -.Example output -[source,terminal] ----- -2021-04-26T12:27:20Z [debug] Called CNI ADD -2021-04-26T12:27:20Z [debug] Gateway: 192.168.12.1 -2021-04-26T12:27:20Z [debug] IP Source Addresses: [192.168.12.99/24] -2021-04-26T12:27:20Z [debug] IP Destinations: [80 UDP 10.0.0.99/30 8080 TCP 203.0.113.26/30 80 8443 TCP 203.0.113.27/30 443] -2021-04-26T12:27:20Z [debug] Created macvlan interface -2021-04-26T12:27:20Z [debug] Renamed macvlan to "net1" -2021-04-26T12:27:20Z [debug] Adding route to gateway 192.168.12.1 on macvlan interface -2021-04-26T12:27:20Z [debug] deleted default route {Ifindex: 3 Dst: <nil> Src: <nil> Gw: 10.128.10.1 Flags: [] Table: 254} -2021-04-26T12:27:20Z [debug] Added new default route with gateway 192.168.12.1 -2021-04-26T12:27:20Z [debug] Added iptables rule: iptables -t nat PREROUTING -i eth0 -p UDP --dport 80 -j DNAT --to-destination 10.0.0.99 -2021-04-26T12:27:20Z [debug] Added iptables rule: iptables -t nat PREROUTING -i eth0 -p TCP --dport 8080 -j DNAT --to-destination 203.0.113.26:80 -2021-04-26T12:27:20Z [debug] Added iptables rule: iptables -t nat PREROUTING -i eth0 -p TCP --dport 8443 -j DNAT --to-destination 203.0.113.27:443 -2021-04-26T12:27:20Z [debug] Added iptables rule: iptables -t nat -o net1 -j SNAT --to-source 192.168.12.99 ----- -+ -The logging file location and logging level are not configurable when you start the egress router by creating an `EgressRouter` object as described in this procedure. - -.. From within the `chroot` environment console, get the container ID: -+ -[source,terminal] ----- -# crictl ps --name egress-router-cni-pod | awk '{print $1}' ----- -+ -.Example output -[source,terminal] ----- -CONTAINER -bac9fae69ddb6 ----- - -.. Determine the process ID of the container. In this example, the container ID is `bac9fae69ddb6`: -+ -[source,terminal] ----- -# crictl inspect -o yaml bac9fae69ddb6 | grep 'pid:' | awk '{print $2}' ----- -+ -.Example output -[source,terminal] ----- -68857 ----- - -.. Enter the network namespace of the container: -+ -[source,terminal] ----- -# nsenter -n -t 68857 ----- - -.. Display the routing table: -+ -[source,terminal] ----- -# ip route ----- -+ -In the following example output, the `net1` network interface is the default route. Traffic for the cluster network uses the `eth0` network interface. Traffic for the `192.168.12.0/24` network uses the `net1` network interface and originates from the reserved source IP address `192.168.12.99`. The pod routes all other traffic to the gateway at IP address `192.168.12.1`. Routing for the service network is not shown. -+ -.Example output -[source,terminal] ----- -default via 192.168.12.1 dev net1 -10.128.10.0/23 dev eth0 proto kernel scope link src 10.128.10.18 -192.168.12.0/24 dev net1 proto kernel scope link src 192.168.12.99 -192.168.12.1 dev net1 ----- diff --git a/modules/nw-egress-router-redirect-mode.adoc b/modules/nw-egress-router-redirect-mode.adoc deleted file mode 100644 index 242aa0f6f8e2..000000000000 --- a/modules/nw-egress-router-redirect-mode.adoc +++ /dev/null @@ -1,46 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/openshift_sdn/deploying-egress-router-layer3-redirection.adoc - -:_content-type: PROCEDURE -[id="nw-egress-router-redirect-mode_{context}"] -= Deploying an egress router pod in redirect mode - -In _redirect mode_, an egress router pod sets up iptables rules to redirect traffic from its own IP address to one or more destination IP addresses. Client pods that need to use the reserved source IP address must be configured to access the service for the egress router rather than connecting directly to the destination IP. You can access the destination service and port from the application pod by using the `curl` command. For example: - -[source, terminal] ----- -$ curl <router_service_IP> <port> ----- - -.Prerequisites - -* Install the OpenShift CLI (`oc`). -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -. Create an egress router pod. - -. To ensure that other pods can find the IP address of the egress router pod, create a service to point to the egress router pod, as in the following example: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Service -metadata: - name: egress-1 -spec: - ports: - - name: http - port: 80 - - name: https - port: 443 - type: ClusterIP - selector: - name: egress-1 ----- -+ -Your pods can now connect to this service. Their connections are redirected to -the corresponding ports on the external server, using the reserved egress IP -address. diff --git a/modules/nw-egressnetworkpolicy-about.adoc b/modules/nw-egressnetworkpolicy-about.adoc deleted file mode 100644 index 3285d07ec9d9..000000000000 --- a/modules/nw-egressnetworkpolicy-about.adoc +++ /dev/null @@ -1,160 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/openshift_sdn/configuring-egress-firewall.adoc -// * networking/ovn_kubernetes_network_provider/configuring-egress-firewall-ovn.adoc - -ifeval::["{context}" == "configuring-egress-firewall-ovn"] -:ovn: -:kind: EgressFirewall -:api: k8s.ovn.org/v1 -endif::[] -ifeval::["{context}" == "openshift-sdn-egress-firewall"] -:openshift-sdn: -:kind: EgressNetworkPolicy -:api: network.openshift.io/v1 -endif::[] - -[id="nw-egressnetworkpolicy-about_{context}"] -= How an egress firewall works in a project - -As a cluster administrator, you can use an _egress firewall_ to -limit the external hosts that some or all pods can access from within the -cluster. An egress firewall supports the following scenarios: - -- A pod can only connect to internal hosts and cannot initiate connections to -the public internet. -- A pod can only connect to the public internet and cannot initiate connections -to internal hosts that are outside the {product-title} cluster. -- A pod cannot reach specified internal subnets or hosts outside the {product-title} cluster. -- A pod can connect to only specific external hosts. - -For example, you can allow one project access to a specified IP range but deny the same access to a different project. Or you can restrict application developers from updating from Python pip mirrors, and force updates to come only from approved sources. - -[NOTE] -==== -Egress firewall does not apply to the host network namespace. Pods with host networking enabled are unaffected by egress firewall rules. -==== - -You configure an egress firewall policy by creating an {kind} custom resource (CR) object. The egress firewall matches network traffic that meets any of the following criteria: - -- An IP address range in CIDR format -- A DNS name that resolves to an IP address -ifdef::ovn[] -- A port number -- A protocol that is one of the following protocols: TCP, UDP, and SCTP -endif::ovn[] - -[IMPORTANT] -==== -If your egress firewall includes a deny rule for `0.0.0.0/0`, access to your {product-title} API servers is blocked. You must either add allow rules for each IP address or use the `nodeSelector` type allow rule in your egress policy rules to connect to API servers. - -The following example illustrates the order of the egress firewall rules necessary to ensure API server access: - -[source,yaml,subs="attributes+"] ----- -apiVersion: {api} -kind: {kind} -metadata: - name: default - namespace: <namespace> <1> -spec: - egress: - - to: - cidrSelector: <api_server_address_range> <2> - type: Allow -# ... - - to: - cidrSelector: 0.0.0.0/0 <3> - type: Deny ----- -<1> The namespace for the egress firewall. -<2> The IP address range that includes your {product-title} API servers. -<3> A global deny rule prevents access to the {product-title} API servers. - -To find the IP address for your API servers, run `oc get ep kubernetes -n default`. - -For more information, see link:https://bugzilla.redhat.com/show_bug.cgi?id=1988324[BZ#1988324]. -==== - -ifdef::openshift-sdn[] -[IMPORTANT] -==== -You must have OpenShift SDN configured to use either the network policy or multitenant mode to configure an egress firewall. - -If you use network policy mode, an egress firewall is compatible with only one policy per namespace and will not work with projects that share a network, such as global projects. -==== -endif::openshift-sdn[] - -[WARNING] -==== -Egress firewall rules do not apply to traffic that goes through routers. Any user with permission to create a Route CR object can bypass egress firewall policy rules by creating a route that points to a forbidden destination. -==== - -[id="limitations-of-an-egress-firewall_{context}"] -== Limitations of an egress firewall - -An egress firewall has the following limitations: - -* No project can have more than one {kind} object. - -ifdef::ovn[] -* A maximum of one {kind} object with a maximum of 8,000 rules can be defined per project. - -* If you are using the OVN-Kubernetes network plugin with shared gateway mode in Red Hat OpenShift Networking, return ingress replies are affected by egress firewall rules. If the egress firewall rules drop the ingress reply destination IP, the traffic is dropped. -endif::ovn[] -ifdef::openshift-sdn[] -* A maximum of one {kind} object with a maximum of 1,000 rules can be defined per project. - -* The `default` project cannot use an egress firewall. - -* When using the OpenShift SDN network plugin in multitenant mode, the following limitations apply: - - - Global projects cannot use an egress firewall. You can make a project global by using the `oc adm pod-network make-projects-global` command. - - - Projects merged by using the `oc adm pod-network join-projects` command cannot use an egress firewall in any of the joined projects. -endif::openshift-sdn[] - -Violating any of these restrictions results in a broken egress firewall for the project, and might cause all external network traffic to be dropped. - -An Egress Firewall resource can be created in the `kube-node-lease`, `kube-public`, `kube-system`, `openshift` and `openshift-` projects. - -[id="policy-rule-order_{context}"] -== Matching order for egress firewall policy rules - -The egress firewall policy rules are evaluated in the order that they are defined, from first to last. The first rule that matches an egress connection from a pod applies. Any subsequent rules are ignored for that connection. - -[id="domain-name-server-resolution_{context}"] -== How Domain Name Server (DNS) resolution works - -If you use DNS names in any of your egress firewall policy rules, proper resolution of the domain names is subject to the following restrictions: - -ifdef::openshift-sdn[] -* Domain name updates are polled based on a time-to-live (TTL) duration. By default, the duration is 30 seconds. When the egress firewall controller queries the local name servers for a domain name, if the response includes a TTL that is less than 30 seconds, the controller sets the duration to the returned value. If the TTL in the response is greater than 30 minutes, the controller sets the duration to 30 minutes. If the TTL is between 30 seconds and 30 minutes, the controller ignores the value and sets the duration to 30 seconds. -endif::openshift-sdn[] -ifdef::ovn[] -* Domain name updates are polled based on a time-to-live (TTL) duration. By default, the duration is 30 minutes. When the egress firewall controller queries the local name servers for a domain name, if the response includes a TTL and the TTL is less than 30 minutes, the controller sets the duration for that DNS name to the returned value. Each DNS name is queried after the TTL for the DNS record expires. -endif::ovn[] - -* The pod must resolve the domain from the same local name servers when necessary. Otherwise the IP addresses for the domain known by the egress firewall controller and the pod can be different. If the IP addresses for a hostname differ, the egress firewall might not be enforced consistently. - -* Because the egress firewall controller and pods asynchronously poll the same local name server, the pod might obtain the updated IP address before the egress controller does, which causes a race condition. Due to this current limitation, domain name usage in {kind} objects is only recommended for domains with infrequent IP address changes. - -[NOTE] -==== -The egress firewall always allows pods access to the external interface of the node that the pod is on for DNS resolution. - -If you use domain names in your egress firewall policy and your DNS resolution is not handled by a DNS server on the local node, then you must add egress firewall rules that allow access to your DNS server's IP addresses. if you are using domain names in your pods. -==== - -ifdef::ovn[] -:!ovn: -endif::[] -ifdef::openshift-sdn[] -:!openshift-sdn: -endif::[] -ifdef::kind[] -:!kind: -endif::[] -ifdef::api[] -:!api: -endif::[] diff --git a/modules/nw-egressnetworkpolicy-create.adoc b/modules/nw-egressnetworkpolicy-create.adoc deleted file mode 100644 index c4b614187a4e..000000000000 --- a/modules/nw-egressnetworkpolicy-create.adoc +++ /dev/null @@ -1,71 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/openshift_sdn/configuring-egress-firewall.adoc -// * networking/ovn_kubernetes_network_provider/configuring-egress-firewall-ovn.adoc - -ifeval::["{context}" == "openshift-sdn-egress-firewall"] -:kind: EgressNetworkPolicy -:obj: egressnetworkpolicy.network.openshift.io/v1 -:cni: OpenShift SDN -endif::[] -ifeval::["{context}" == "configuring-egress-firewall-ovn"] -:kind: EgressFirewall -:obj: egressfirewall.k8s.ovn.org/v1 -:cni: OVN-Kubernetes -endif::[] - -:_content-type: PROCEDURE -[id="nw-networkpolicy-create_{context}"] -= Creating an egress firewall policy object - -As a cluster administrator, you can create an egress firewall policy object for a project. - -[IMPORTANT] -==== -If the project already has an {kind} object defined, you must edit the existing policy to make changes to the egress firewall rules. -==== - -.Prerequisites - -* A cluster that uses the {cni} network plugin. -* Install the OpenShift CLI (`oc`). -* You must log in to the cluster as a cluster administrator. - -.Procedure - -. Create a policy rule: -.. Create a `<policy_name>.yaml` file where `<policy_name>` describes the egress -policy rules. -.. In the file you created, define an egress policy object. - -. Enter the following command to create the policy object. Replace `<policy_name>` with the name of the policy and `<project>` with the project that the rule applies to. -+ -[source,terminal] ----- -$ oc create -f <policy_name>.yaml -n <project> ----- -+ -In the following example, a new {kind} object is created in a project named `project1`: -+ -[source,terminal] ----- -$ oc create -f default.yaml -n project1 ----- -+ -.Example output -[source,terminal,subs="attributes"] ----- -{obj} created ----- - -. Optional: Save the `<policy_name>.yaml` file so that you can make changes later. - -ifdef::kind[] -:!kind: -endif::[] -ifdef::obj[] -:!obj: -endif::[] -ifdef::cni[] -:!cni: -endif::[] diff --git a/modules/nw-egressnetworkpolicy-delete.adoc b/modules/nw-egressnetworkpolicy-delete.adoc deleted file mode 100644 index 06c8f09efd6f..000000000000 --- a/modules/nw-egressnetworkpolicy-delete.adoc +++ /dev/null @@ -1,53 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/openshift-sdn/removing-egress-firewall.adoc -// * networking/ovn_kubernetes_network_provider/removing-egress-firewall-ovn.adoc - -ifeval::["{context}" == "openshift-sdn-egress-firewall"] -:kind: EgressNetworkPolicy -:res: egressnetworkpolicy -:cni: OpenShift SDN -endif::[] -ifeval::["{context}" == "removing-egress-firewall-ovn"] -:kind: EgressFirewall -:res: egressfirewall -:cni: OVN-Kubernetes -endif::[] - -:_content-type: PROCEDURE -[id="nw-egressnetworkpolicy-delete_{context}"] -= Removing an {kind} object - -As a cluster administrator, you can remove an egress firewall from a project. - -.Prerequisites - -* A cluster using the {cni} network plugin. -* Install the OpenShift CLI (`oc`). -* You must log in to the cluster as a cluster administrator. - -.Procedure - -. Find the name of the {kind} object for the project. Replace `<project>` with the name of the project. -+ -[source,terminal,subs="attributes+"] ----- -$ oc get -n <project> {res} ----- - -. Enter the following command to delete the {kind} object. Replace `<project>` with the name of the project and `<name>` with the name of the object. -+ -[source,terminal,subs="attributes+"] ----- -$ oc delete -n <project> {res} <name> ----- - -ifdef::kind[] -:!kind: -endif::[] -ifdef::res[] -:!res: -endif::[] -ifdef::cni[] -:!cni: -endif::[] diff --git a/modules/nw-egressnetworkpolicy-edit.adoc b/modules/nw-egressnetworkpolicy-edit.adoc deleted file mode 100644 index 592d57846e53..000000000000 --- a/modules/nw-egressnetworkpolicy-edit.adoc +++ /dev/null @@ -1,62 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/openshift_sdn/editing-egress-firewall.adoc -// * networking/ovn_kubernetes_network_provider/editing-egress-firewall-ovn.adoc - -ifeval::["{context}" == "openshift-sdn-egress-firewall"] -:kind: EgressNetworkPolicy -:res: egressnetworkpolicy -:cni: OpenShift SDN -endif::[] -ifeval::["{context}" == "editing-egress-firewall-ovn"] -:kind: EgressFirewall -:res: egressfirewall -:cni: OVN-Kubernetes -endif::[] - -:_content-type: PROCEDURE -[id="nw-egressnetworkpolicy-edit_{context}"] -= Editing an {kind} object - -As a cluster administrator, you can update the egress firewall for a project. - -.Prerequisites - -* A cluster using the {cni} network plugin. -* Install the OpenShift CLI (`oc`). -* You must log in to the cluster as a cluster administrator. - -.Procedure - -. Find the name of the {kind} object for the project. Replace `<project>` with the name of the project. -+ -[source,terminal,subs="attributes+"] ----- -$ oc get -n <project> {res} ----- - -. Optional: If you did not save a copy of the {kind} object when you created the egress network firewall, enter the following command to create a copy. -+ -[source,terminal,subs="attributes+"] ----- -$ oc get -n <project> {res} <name> -o yaml > <filename>.yaml ----- -+ -Replace `<project>` with the name of the project. Replace `<name>` with the name of the object. Replace `<filename>` with the name of the file to save the YAML to. - -. After making changes to the policy rules, enter the following command to replace the {kind} object. Replace `<filename>` with the name of the file containing the updated {kind} object. -+ -[source,terminal] ----- -$ oc replace -f <filename>.yaml ----- - -ifdef::kind[] -:!kind: -endif::[] -ifdef::res[] -:!res: -endif::[] -ifdef::cni[] -:!cni: -endif::[] diff --git a/modules/nw-egressnetworkpolicy-object.adoc b/modules/nw-egressnetworkpolicy-object.adoc deleted file mode 100644 index 8a7960606ac4..000000000000 --- a/modules/nw-egressnetworkpolicy-object.adoc +++ /dev/null @@ -1,186 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/openshift_sdn/configuring-egress-firewall.adoc -// * networking/ovn_kubernetes_network_provider/configuring-egress-firewall-ovn.adoc - -ifeval::["{context}" == "openshift-sdn-egress-firewall"] -:kind: EgressNetworkPolicy -:api: network.openshift.io/v1 -:openshift-sdn: -endif::[] -ifeval::["{context}" == "configuring-egress-firewall-ovn"] -:kind: EgressFirewall -:api: k8s.ovn.org/v1 -:ovn: -endif::[] - -[id="nw-egressnetworkpolicy-object_{context}"] -= {kind} custom resource (CR) object - -You can define one or more rules for an egress firewall. A rule is either an `Allow` rule or a `Deny` rule, with a specification for the traffic that the rule applies to. - -The following YAML describes an {kind} CR object: - -.{kind} object -[source,yaml,subs="attributes+"] ----- -apiVersion: {api} -kind: {kind} -metadata: -ifdef::openshift-sdn[] - name: <name> <1> -endif::openshift-sdn[] -ifdef::ovn[] - name: <name> <1> -endif::ovn[] -spec: - egress: <2> - ... ----- -ifdef::openshift-sdn[] -<1> A name for your egress firewall policy. -<2> A collection of one or more egress network policy rules as described in the following section. -endif::openshift-sdn[] -ifdef::ovn[] -<1> The name for the object must be `default`. -<2> A collection of one or more egress network policy rules as described in the following section. -endif::ovn[] - -[id="egressnetworkpolicy-rules_{context}"] -== {kind} rules - -The following YAML describes an egress firewall rule object. The user can select either an IP address range in CIDR format, a domain name, or use the `nodeSelector` to allow or deny egress traffic. The `egress` stanza expects an array of one or more objects. - -// - OVN-Kubernetes does not support DNS -// - OpenShift SDN does not support port and protocol specification - -.Egress policy rule stanza -ifdef::openshift-sdn[] -[source,yaml] ----- -egress: -- type: <type> <1> - to: <2> - cidrSelector: <cidr> <3> - dnsName: <dns_name> <4> ----- -<1> The type of rule. The value must be either `Allow` or `Deny`. -<2> A stanza describing an egress traffic match rule. A value for either the `cidrSelector` field or the `dnsName` field for the rule. You cannot use both fields in the same rule. -<3> An IP address range in CIDR format. -<4> A domain name. -endif::openshift-sdn[] -ifdef::ovn[] -[source,yaml] ----- -egress: -- type: <type> <1> - to: <2> - cidrSelector: <cidr> <3> - dnsName: <dns_name> <4> - nodeSelector: <label_name>: <label_value> <5> - ports: <6> - ... ----- -<1> The type of rule. The value must be either `Allow` or `Deny`. -<2> A stanza describing an egress traffic match rule that specifies the `cidrSelector` field or the `dnsName` field. You cannot use both fields in the same rule. -<3> An IP address range in CIDR format. -<4> A DNS domain name. -<5> Labels are key/value pairs that the user defines. Labels are attached to objects, such as pods. The `nodeSelector` allows for one or more node labels to be selected and attached to pods. -<6> Optional: A stanza describing a collection of network ports and protocols for the rule. - -.Ports stanza -[source,yaml] ----- -ports: -- port: <port> <1> - protocol: <protocol> <2> ----- -<1> A network port, such as `80` or `443`. If you specify a value for this field, you must also specify a value for `protocol`. -<2> A network protocol. The value must be either `TCP`, `UDP`, or `SCTP`. -endif::ovn[] - -[id="egressnetworkpolicy-example_{context}"] -== Example {kind} CR objects - -The following example defines several egress firewall policy rules: - -[source,yaml,subs="attributes+"] ----- -apiVersion: {api} -kind: {kind} -metadata: - name: default -spec: - egress: <1> - - type: Allow - to: - cidrSelector: 1.2.3.0/24 -ifdef::openshift-sdn[] - - type: Allow - to: - dnsName: www.example.com -endif::openshift-sdn[] - - type: Deny - to: - cidrSelector: 0.0.0.0/0 ----- -<1> A collection of egress firewall policy rule objects. - -ifdef::ovn[] -The following example defines a policy rule that denies traffic to the host at the `172.16.1.1` IP address, if the traffic is using either the TCP protocol and destination port `80` or any protocol and destination port `443`. - -[source,yaml,subs="attributes+"] ----- -apiVersion: {api} -kind: {kind} -metadata: - name: default -spec: - egress: - - type: Deny - to: - cidrSelector: 172.16.1.1 - ports: - - port: 80 - protocol: TCP - - port: 443 ----- - -[id="configuringNodeSelector-example_{context}"] -== Example nodeSelector for {kind} - -As a cluster administrator, you can allow or deny egress traffic to nodes in your cluster by specifying a label using `nodeSelector`. Labels can be applied to one or more nodes. The following is an example with the `region=east` label: - -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: default -spec: - egress: - - to: - nodeSelector: - matchLabels: - region: east - type: Allow ----- - -[TIP] -==== -Instead of adding manual rules per node IP address, use node selectors to create a label that allows pods behind an egress firewall to access host network pods. -==== -endif::ovn[] - -ifdef::kind[] -:!kind: -endif::[] -ifdef::api[] -:!api: -endif::[] -ifdef::ovn[] -:!ovn: -endif::[] -ifdef::openshift-sdn[] -:!openshift-sdn: -endif::[] diff --git a/modules/nw-egressnetworkpolicy-view.adoc b/modules/nw-egressnetworkpolicy-view.adoc deleted file mode 100644 index 7b7605f034f9..000000000000 --- a/modules/nw-egressnetworkpolicy-view.adoc +++ /dev/null @@ -1,67 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/openshift_sdn/configuring-egress-firewall.adoc -// * networking/ovn_kubernetes_network_provider/configuring-egress-firewall-ovn.adoc - -ifeval::["{context}" == "openshift-sdn-viewing-egress-firewall"] -:kind: EgressNetworkPolicy -:res: egressnetworkpolicy -:cni: OpenShift SDN -endif::[] -ifeval::["{context}" == "viewing-egress-firewall-ovn"] -:kind: EgressFirewall -:res: egressfirewall -:cni: OVN-Kubernetes -endif::[] - -:_content-type: PROCEDURE -[id="nw-egressnetworkpolicy-view_{context}"] -= Viewing an {kind} object - -You can view an {kind} object in your cluster. - -.Prerequisites - -* A cluster using the {cni} network plugin. -* Install the OpenShift Command-line Interface (CLI), commonly known as `oc`. -* You must log in to the cluster. - -.Procedure - -. Optional: To view the names of the {kind} objects defined in your cluster, -enter the following command: -+ -[source,terminal,subs="attributes"] ----- -$ oc get {res} --all-namespaces ----- - -. To inspect a policy, enter the following command. Replace `<policy_name>` with the name of the policy to inspect. -+ -[source,terminal,subs="attributes+"] ----- -$ oc describe {res} <policy_name> ----- -+ -[source,terminal] -.Example output ----- -Name: default -Namespace: project1 -Created: 20 minutes ago -Labels: <none> -Annotations: <none> -Rule: Allow to 1.2.3.0/24 -Rule: Allow to www.example.com -Rule: Deny to 0.0.0.0/0 ----- - -ifdef::kind[] -:!kind: -endif::[] -ifdef::res[] -:!res: -endif::[] -ifdef::cni[] -:!cni: -endif::[] diff --git a/modules/nw-enabling-a-provisioning-network-after-installation.adoc b/modules/nw-enabling-a-provisioning-network-after-installation.adoc deleted file mode 100644 index e023a49b0a70..000000000000 --- a/modules/nw-enabling-a-provisioning-network-after-installation.adoc +++ /dev/null @@ -1,79 +0,0 @@ -// This is included in the following assemblies: -// -// ipi-install-post-installation-configuration.adoc - -:_content-type: PROCEDURE -[id="enabling-a-provisioning-network-after-installation_{context}"] - -= Enabling a provisioning network after installation - -The assisted installer and installer-provisioned installation for bare metal clusters provide the ability to deploy a cluster without a `provisioning` network. This capability is for scenarios such as proof-of-concept clusters or deploying exclusively with Redfish virtual media when each node's baseboard management controller is routable via the `baremetal` network. - -You can enable a `provisioning` network after installation using the Cluster Baremetal Operator (CBO). - -.Prerequisites - -* A dedicated physical network must exist, connected to all worker and control plane nodes. -* You must isolate the native, untagged physical network. -* The network cannot have a DHCP server when the `provisioningNetwork` configuration setting is set to `Managed`. -* You can omit the `provisioningInterface` setting in {product-title} 4.10 to use the `bootMACAddress` configuration setting. - -.Procedure - -. When setting the `provisioningInterface` setting, first identify the provisioning interface name for the cluster nodes. For example, `eth0` or `eno1`. - -. Enable the Preboot eXecution Environment (PXE) on the `provisioning` network interface of the cluster nodes. - -. Retrieve the current state of the `provisioning` network and save it to a provisioning custom resource (CR) file: -+ -[source,terminal] ----- -$ oc get provisioning -o yaml > enable-provisioning-nw.yaml ----- - -. Modify the provisioning CR file: -+ -[source,terminal] ----- -$ vim ~/enable-provisioning-nw.yaml ----- -+ -Scroll down to the `provisioningNetwork` configuration setting and change it from `Disabled` to `Managed`. Then, add the `provisioningIP`, `provisioningNetworkCIDR`, `provisioningDHCPRange`, `provisioningInterface`, and `watchAllNameSpaces` configuration settings after the `provisioningNetwork` setting. Provide appropriate values for each setting. -+ -[source,yaml] ----- -apiVersion: v1 -items: -- apiVersion: metal3.io/v1alpha1 - kind: Provisioning - metadata: - name: provisioning-configuration - spec: - provisioningNetwork: <1> - provisioningIP: <2> - provisioningNetworkCIDR: <3> - provisioningDHCPRange: <4> - provisioningInterface: <5> - watchAllNameSpaces: <6> ----- -+ -<1> The `provisioningNetwork` is one of `Managed`, `Unmanaged`, or `Disabled`. When set to `Managed`, Metal3 manages the provisioning network and the CBO deploys the Metal3 pod with a configured DHCP server. When set to `Unmanaged`, the system administrator configures the DHCP server manually. -+ -<2> The `provisioningIP` is the static IP address that the DHCP server and ironic use to provision the network. This static IP address must be within the `provisioning` subnet, and outside of the DHCP range. If you configure this setting, it must have a valid IP address even if the `provisioning` network is `Disabled`. The static IP address is bound to the metal3 pod. If the metal3 pod fails and moves to another server, the static IP address also moves to the new server. -+ -<3> The Classless Inter-Domain Routing (CIDR) address. If you configure this setting, it must have a valid CIDR address even if the `provisioning` network is `Disabled`. For example: `192.168.0.1/24`. -+ -<4> The DHCP range. This setting is only applicable to a `Managed` provisioning network. Omit this configuration setting if the `provisioning` network is `Disabled`. For example: `192.168.0.64, 192.168.0.253`. -+ -<5> The NIC name for the `provisioning` interface on cluster nodes. The `provisioningInterface` setting is only applicable to `Managed` and `Unmanaged` provisioning networks. Omit the `provisioningInterface` configuration setting if the `provisioning` network is `Disabled`. Omit the `provisioningInterface` configuration setting to use the `bootMACAddress` configuration setting instead. -+ -<6> Set this setting to `true` if you want metal3 to watch namespaces other than the default `openshift-machine-api` namespace. The default value is `false`. - -. Save the changes to the provisioning CR file. - -. Apply the provisioning CR file to the cluster: -+ -[source,terminal] ----- -$ oc apply -f enable-provisioning-nw.yaml ----- diff --git a/modules/nw-enabling-hsts-per-route.adoc b/modules/nw-enabling-hsts-per-route.adoc deleted file mode 100644 index a804929a9bd5..000000000000 --- a/modules/nw-enabling-hsts-per-route.adoc +++ /dev/null @@ -1,50 +0,0 @@ -// Module included in the following assemblies: -// * networking/configuring-routing.adoc - -:_content-type: PROCEDURE -[id="nw-enabling-hsts-per-route_{context}"] -= Enabling HTTP Strict Transport Security per-route - -HTTP strict transport security (HSTS) is implemented in the HAProxy template and applied to edge and re-encrypt routes that have the `haproxy.router.openshift.io/hsts_header` annotation. - -.Prerequisites - -* You are logged in to the cluster with a user with administrator privileges for the project. -* You installed the `oc` CLI. - -.Procedure - -* To enable HSTS on a route, add the `haproxy.router.openshift.io/hsts_header` value to the edge-terminated or re-encrypt route. You can use the `oc annotate` tool to do this by running the following command: -+ -[source,terminal] ----- -$ oc annotate route <route_name> -n <namespace> --overwrite=true "haproxy.router.openshift.io/hsts_header"="max-age=31536000;\ <1> -includeSubDomains;preload" ----- -<1> In this example, the maximum age is set to `31536000` ms, which is approximately eight and a half hours. -+ -[NOTE] -==== -In this example, the equal sign (`=`) is in quotes. This is required to properly execute the annotate command. -==== -+ -.Example route configured with an annotation -[source,yaml] ----- -apiVersion: route.openshift.io/v1 -kind: Route -metadata: - annotations: - haproxy.router.openshift.io/hsts_header: max-age=31536000;includeSubDomains;preload <1> <2> <3> -... -spec: - host: def.abc.com - tls: - termination: "reencrypt" - ... - wildcardPolicy: "Subdomain" ----- -<1> Required. `max-age` measures the length of time, in seconds, that the HSTS policy is in effect. If set to `0`, it negates the policy. -<2> Optional. When included, `includeSubDomains` tells the client -that all subdomains of the host must have the same HSTS policy as the host. -<3> Optional. When `max-age` is greater than 0, you can add `preload` in `haproxy.router.openshift.io/hsts_header` to allow external services to include this site in their HSTS preload lists. For example, sites such as Google can construct a list of sites that have `preload` set. Browsers can then use these lists to determine which sites they can communicate with over HTTPS, even before they have interacted with the site. Without `preload` set, browsers must have interacted with the site over HTTPS, at least once, to get the header. diff --git a/modules/nw-enabling-hsts.adoc b/modules/nw-enabling-hsts.adoc deleted file mode 100644 index 3100cc303805..000000000000 --- a/modules/nw-enabling-hsts.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// Module filename: nw-enabling-hsts.adoc -// Module included in the following assemblies: -// * networking/configuring-routing.adoc - -[id="nw-enabling-hsts_{context}"] -= HTTP Strict Transport Security - -HTTP Strict Transport Security (HSTS) policy is a security enhancement, which signals to the browser client that only HTTPS traffic is allowed on the route host. HSTS also optimizes web traffic by signaling HTTPS transport is required, without using HTTP redirects. HSTS is useful for speeding up interactions with websites. - -When HSTS policy is enforced, HSTS adds a Strict Transport Security header to HTTP and HTTPS responses from the site. You can use the `insecureEdgeTerminationPolicy` value in a route to redirect HTTP to HTTPS. When HSTS is enforced, the client changes all requests from the HTTP URL to HTTPS before the request is sent, eliminating the need for a redirect. - -Cluster administrators can configure HSTS to do the following: - -* Enable HSTS per-route -* Disable HSTS per-route -* Enforce HSTS per-domain, for a set of domains, or use namespace labels in combination with domains - -[IMPORTANT] -==== -HSTS works only with secure routes, either edge-terminated or re-encrypt. The configuration is ineffective on HTTP or passthrough routes. -==== diff --git a/modules/nw-enabling-multicast.adoc b/modules/nw-enabling-multicast.adoc deleted file mode 100644 index 3b0fd7eb0a2b..000000000000 --- a/modules/nw-enabling-multicast.adoc +++ /dev/null @@ -1,162 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/openshift_sdn/enabling-multicast.adoc -// * networking/ovn_kubernetes_network_provider/enabling-multicast.adoc - -ifeval::["{context}" == "openshift-sdn-enabling-multicast"] -:namespace: netnamespace -:annotation: netnamespace.network.openshift.io/multicast-enabled=true -endif::[] -ifeval::["{context}" == "ovn-kubernetes-enabling-multicast"] -:namespace: namespace -:annotation: k8s.ovn.org/multicast-enabled=true -endif::[] - -:_content-type: PROCEDURE -[id="nw-enabling-multicast_{context}"] -= Enabling multicast between pods - -You can enable multicast between pods for your project. - -.Prerequisites -* Install the OpenShift CLI (`oc`). -* You must log in to the cluster with a user that has the `cluster-admin` -ifdef::openshift-rosa,openshift-dedicated[] -or the `dedicated-admin` -endif::[] -role. - -.Procedure - -* Run the following command to enable multicast for a project. Replace `<namespace>` with the namespace for the project you want to enable multicast for. -+ -[source,terminal,subs="attributes+"] ----- -$ oc annotate {namespace} <namespace> \ - {annotation} ----- -ifeval::["{context}" == "ovn-kubernetes-enabling-multicast"] -+ -[TIP] -==== -You can alternatively apply the following YAML to add the annotation: - -[source,yaml] ----- -apiVersion: v1 -kind: Namespace -metadata: - name: <namespace> - annotations: - k8s.ovn.org/multicast-enabled: "true" ----- -==== -endif::[] - -.Verification - -To verify that multicast is enabled for a project, complete the following procedure: - -. Change your current project to the project that you enabled multicast for. Replace `<project>` with the project name. -+ -[source,terminal] ----- -$ oc project <project> ----- - -. Create a pod to act as a multicast receiver: -+ -[source,terminal] ----- -$ cat <<EOF| oc create -f - -apiVersion: v1 -kind: Pod -metadata: - name: mlistener - labels: - app: multicast-verify -spec: - containers: - - name: mlistener - image: registry.access.redhat.com/ubi9 - command: ["/bin/sh", "-c"] - args: - ["dnf -y install socat hostname && sleep inf"] - ports: - - containerPort: 30102 - name: mlistener - protocol: UDP -EOF ----- - -. Create a pod to act as a multicast sender: -+ -[source,terminal] ----- -$ cat <<EOF| oc create -f - -apiVersion: v1 -kind: Pod -metadata: - name: msender - labels: - app: multicast-verify -spec: - containers: - - name: msender - image: registry.access.redhat.com/ubi9 - command: ["/bin/sh", "-c"] - args: - ["dnf -y install socat && sleep inf"] -EOF ----- - -. In a new terminal window or tab, start the multicast listener. - -.. Get the IP address for the Pod: -+ -[source,terminal] ----- -$ POD_IP=$(oc get pods mlistener -o jsonpath='{.status.podIP}') ----- - -.. Start the multicast listener by entering the following command: -+ -[source,terminal] ----- -$ oc exec mlistener -i -t -- \ - socat UDP4-RECVFROM:30102,ip-add-membership=224.1.0.1:$POD_IP,fork EXEC:hostname ----- - -. Start the multicast transmitter. - -.. Get the pod network IP address range: -+ -[source,terminal] ----- -$ CIDR=$(oc get Network.config.openshift.io cluster \ - -o jsonpath='{.status.clusterNetwork[0].cidr}') ----- - -.. To send a multicast message, enter the following command: -+ -[source,terminal] ----- -$ oc exec msender -i -t -- \ - /bin/bash -c "echo | socat STDIO UDP4-DATAGRAM:224.1.0.1:30102,range=$CIDR,ip-multicast-ttl=64" ----- -+ -If multicast is working, the previous command returns the following output: -+ -[source,text] ----- -mlistener ----- - -ifeval::["{context}" == "openshift-sdn-enabling-multicast"] -:!annotation: -:!namespace: -endif::[] -ifeval::["{context}" == "ovn-kubernetes-enabling-multicast"] -:!annotation: -:!namespace: -endif::[] diff --git a/modules/nw-endpoint-route53.adoc b/modules/nw-endpoint-route53.adoc deleted file mode 100644 index 99c99fbc33f1..000000000000 --- a/modules/nw-endpoint-route53.adoc +++ /dev/null @@ -1,36 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-government-region.adoc - -[id="nw-endpoint-route53_{context}"] -= Ingress Operator endpoint configuration for AWS Route 53 - -If you install in either Amazon Web Services (AWS) GovCloud (US) US-West or US-East region, the Ingress Operator uses `us-gov-west-1` region for Route53 and tagging API clients. - -The Ingress Operator uses `https://tagging.us-gov-west-1.amazonaws.com` as the tagging API endpoint if a tagging custom endpoint is configured that includes the string 'us-gov-east-1'. - -For more information on AWS GovCloud (US) endpoints, see the link:https://docs.aws.amazon.com/govcloud-us/latest/UserGuide/using-govcloud-endpoints.html[Service Endpoints] in the AWS documentation about GovCloud (US). - -[IMPORTANT] -==== -Private, disconnected installations are not supported for AWS GovCloud when you install in the `us-gov-east-1` region. -==== - -.Example Route 53 configuration -[source,yaml] ----- -platform: - aws: - region: us-gov-west-1 - serviceEndpoints: - - name: ec2 - url: https://ec2.us-gov-west-1.amazonaws.com - - name: elasticloadbalancing - url: https://elasticloadbalancing.us-gov-west-1.amazonaws.com - - name: route53 - url: https://route53.us-gov.amazonaws.com <1> - - name: tagging - url: https://tagging.us-gov-west-1.amazonaws.com <2> ----- -<1> Route 53 defaults to `https://route53.us-gov.amazonaws.com` for both AWS GovCloud (US) regions. -<2> Only the US-West region has endpoints for tagging. Omit this parameter if your cluster is in another region. diff --git a/modules/nw-enforcing-hsts-per-domain.adoc b/modules/nw-enforcing-hsts-per-domain.adoc deleted file mode 100644 index 0deb2bc0eede..000000000000 --- a/modules/nw-enforcing-hsts-per-domain.adoc +++ /dev/null @@ -1,127 +0,0 @@ -// Module included in the following assemblies: -// * networking/configuring-routing.adoc - -:_content-type: PROCEDURE -[id="nw-enforcing-hsts-per-domain_{context}"] -= Enforcing HTTP Strict Transport Security per-domain - -To enforce HTTP Strict Transport Security (HSTS) per-domain for secure routes, add a `requiredHSTSPolicies` record to the Ingress spec to capture the configuration of the HSTS policy. - -If you configure a `requiredHSTSPolicy` to enforce HSTS, then any newly created route must be configured with a compliant HSTS policy annotation. - -[NOTE] -==== -To handle upgraded clusters with non-compliant HSTS routes, you can update the manifests at the source and apply the updates. -==== - -[NOTE] -==== -You cannot use `oc expose route` or `oc create route` commands to add a route in a domain that enforces HSTS, because the API for these commands does not accept annotations. -==== - -[IMPORTANT] -==== -HSTS cannot be applied to insecure, or non-TLS routes, even if HSTS is requested for all routes globally. -==== - -.Prerequisites - -* You are logged in to the cluster with a user with administrator privileges for the project. -* You installed the `oc` CLI. - -.Procedure - -. Edit the Ingress config file: -+ -[source,terminal] ----- -$ oc edit ingresses.config.openshift.io/cluster ----- -+ -.Example HSTS policy -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: Ingress -metadata: - name: cluster -spec: - domain: 'hello-openshift-default.apps.username.devcluster.openshift.com' - requiredHSTSPolicies: <1> - - domainPatterns: <2> - - '*hello-openshift-default.apps.username.devcluster.openshift.com' - - '*hello-openshift-default2.apps.username.devcluster.openshift.com' - namespaceSelector: <3> - matchLabels: - myPolicy: strict - maxAge: <4> - smallestMaxAge: 1 - largestMaxAge: 31536000 - preloadPolicy: RequirePreload <5> - includeSubDomainsPolicy: RequireIncludeSubDomains <6> - - domainPatterns: <2> - - 'abc.example.com' - - '*xyz.example.com' - namespaceSelector: - matchLabels: {} - maxAge: {} - preloadPolicy: NoOpinion - includeSubDomainsPolicy: RequireNoIncludeSubDomains ----- -<1> Required. `requiredHSTSPolicies` are validated in order, and the first matching `domainPatterns` applies. -<2> Required. You must specify at least one `domainPatterns` hostname. Any number of domains can be listed. You can include multiple sections of enforcing options for different `domainPatterns`. -<3> Optional. If you include `namespaceSelector`, it must match the labels of the project where the routes reside, to enforce the set HSTS policy on the routes. Routes that only match the `namespaceSelector` and not the `domainPatterns` are not validated. -<4> Required. `max-age` measures the length of time, in seconds, that the HSTS policy is in effect. This policy setting allows for a smallest and largest `max-age` to be enforced. - -- The `largestMaxAge` value must be between `0` and `2147483647`. It can be left unspecified, which means no upper limit is enforced. -- The `smallestMaxAge` value must be between `0` and `2147483647`. Enter `0` to disable HSTS for troubleshooting, otherwise enter `1` if you never want HSTS to be disabled. It can be left unspecified, which means no lower limit is enforced. -<5> Optional. Including `preload` in `haproxy.router.openshift.io/hsts_header` allows external services to include this site in their HSTS preload lists. Browsers can then use these lists to determine which sites they can communicate with over HTTPS, before they have interacted with the site. Without `preload` set, browsers need to interact at least once with the site to get the header. `preload` can be set with one of the following: - -- `RequirePreload`: `preload` is required by the `RequiredHSTSPolicy`. -- `RequireNoPreload`: `preload` is forbidden by the `RequiredHSTSPolicy`. -- `NoOpinion`: `preload` does not matter to the `RequiredHSTSPolicy`. -<6> Optional. `includeSubDomainsPolicy` can be set with one of the following: - -- `RequireIncludeSubDomains`: `includeSubDomains` is required by the `RequiredHSTSPolicy`. -- `RequireNoIncludeSubDomains`: `includeSubDomains` is forbidden by the `RequiredHSTSPolicy`. -- `NoOpinion`: `includeSubDomains` does not matter to the `RequiredHSTSPolicy`. -+ -. You can apply HSTS to all routes in the cluster or in a particular namespace by entering the `oc annotate command`. -+ -* To apply HSTS to all routes in the cluster, enter the `oc annotate command`. For example: -+ -[source,terminal] ----- -$ oc annotate route --all --all-namespaces --overwrite=true "haproxy.router.openshift.io/hsts_header"="max-age=31536000" ----- -+ -* To apply HSTS to all routes in a particular namespace, enter the `oc annotate command`. For example: -+ -[source,terminal] ----- -$ oc annotate route --all -n my-namespace --overwrite=true "haproxy.router.openshift.io/hsts_header"="max-age=31536000" ----- - -.Verification - -You can review the HSTS policy you configured. For example: - -* To review the `maxAge` set for required HSTS policies, enter the following command: -+ -[source,terminal] ----- -$ oc get clusteroperator/ingress -n openshift-ingress-operator -o jsonpath='{range .spec.requiredHSTSPolicies[*]}{.spec.requiredHSTSPolicies.maxAgePolicy.largestMaxAge}{"\n"}{end}' ----- -+ -* To review the HSTS annotations on all routes, enter the following command: -+ -[source,terminal] ----- -$ oc get route --all-namespaces -o go-template='{{range .items}}{{if .metadata.annotations}}{{$a := index .metadata.annotations "haproxy.router.openshift.io/hsts_header"}}{{$n := .metadata.name}}{{with $a}}Name: {{$n}} HSTS: {{$a}}{{"\n"}}{{else}}{{""}}{{end}}{{end}}{{end}}' ----- -+ -.Example output -[source,terminal] ----- -Name: <_routename_> HSTS: max-age=31536000;preload;includeSubDomains ----- diff --git a/modules/nw-exposing-router-metrics.adoc b/modules/nw-exposing-router-metrics.adoc deleted file mode 100644 index 0c2e03fb2103..000000000000 --- a/modules/nw-exposing-router-metrics.adoc +++ /dev/null @@ -1,120 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ingress_operator.adoc - -:_content-type: PROCEDURE -[id="nw-exposing-router-metrics_{context}"] -= Exposing router metrics - -You can expose the HAProxy router metrics by default in Prometheus format on the default stats port, 1936. The external metrics collection and aggregation systems such as Prometheus can access the HAProxy router metrics. You can view the HAProxy router metrics in a browser in the HTML and comma separated values (CSV) format. - -.Prerequisites - -* You configured your firewall to access the default stats port, 1936. - -.Procedure - -. Get the router pod name by running the following command: -+ -[source,terminal] ----- -$ oc get pods -n openshift-ingress ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -router-default-76bfffb66c-46qwp 1/1 Running 0 11h ----- - -. Get the router's username and password, which the router pod stores in the `/var/lib/haproxy/conf/metrics-auth/statsUsername` and `/var/lib/haproxy/conf/metrics-auth/statsPassword` files: - -.. Get the username by running the following command: -+ -[source,terminal] ----- -$ oc rsh <router_pod_name> cat metrics-auth/statsUsername ----- - -.. Get the password by running the following command: -+ -[source,terminal] ----- -$ oc rsh <router_pod_name> cat metrics-auth/statsPassword ----- - -. Get the router IP and metrics certificates by running the following command: -+ -[source,terminal] ----- -$ oc describe pod <router_pod> ----- - -. Get the raw statistics in Prometheus format by running the following command: -+ -[source,terminal] ----- -$ curl -u <user>:<password> http://<router_IP>:<stats_port>/metrics ----- - -. Access the metrics securely by running the following command: -+ -[source,terminal] ----- -$ curl -u user:password https://<router_IP>:<stats_port>/metrics -k ----- - -. Access the default stats port, 1936, by running the following command: -+ -[source,terminal] ----- -$ curl -u <user>:<password> http://<router_IP>:<stats_port>/metrics ----- -+ --- -.Example output -[%collapsible] -==== -[source,terminal] -... -# HELP haproxy_backend_connections_total Total number of connections. -# TYPE haproxy_backend_connections_total gauge -haproxy_backend_connections_total{backend="http",namespace="default",route="hello-route"} 0 -haproxy_backend_connections_total{backend="http",namespace="default",route="hello-route-alt"} 0 -haproxy_backend_connections_total{backend="http",namespace="default",route="hello-route01"} 0 -... -# HELP haproxy_exporter_server_threshold Number of servers tracked and the current threshold value. -# TYPE haproxy_exporter_server_threshold gauge -haproxy_exporter_server_threshold{type="current"} 11 -haproxy_exporter_server_threshold{type="limit"} 500 -... -# HELP haproxy_frontend_bytes_in_total Current total of incoming bytes. -# TYPE haproxy_frontend_bytes_in_total gauge -haproxy_frontend_bytes_in_total{frontend="fe_no_sni"} 0 -haproxy_frontend_bytes_in_total{frontend="fe_sni"} 0 -haproxy_frontend_bytes_in_total{frontend="public"} 119070 -... -# HELP haproxy_server_bytes_in_total Current total of incoming bytes. -# TYPE haproxy_server_bytes_in_total gauge -haproxy_server_bytes_in_total{namespace="",pod="",route="",server="fe_no_sni",service=""} 0 -haproxy_server_bytes_in_total{namespace="",pod="",route="",server="fe_sni",service=""} 0 -haproxy_server_bytes_in_total{namespace="default",pod="docker-registry-5-nk5fz",route="docker-registry",server="10.130.0.89:5000",service="docker-registry"} 0 -haproxy_server_bytes_in_total{namespace="default",pod="hello-rc-vkjqx",route="hello-route",server="10.130.0.90:8080",service="hello-svc-1"} 0 -... -==== --- - -. Launch the stats window by entering the following URL in a browser: -+ -[source,terminal] ----- -http://<user>:<password>@<router_IP>:<stats_port> ----- - -. Optional: Get the stats in CSV format by entering the following URL in a browser: -+ -[source,terminal] ----- -http://<user>:<password>@<router_ip>:1936/metrics;csv ----- diff --git a/modules/nw-exposing-service.adoc b/modules/nw-exposing-service.adoc deleted file mode 100644 index 25d8ff7af9b9..000000000000 --- a/modules/nw-exposing-service.adoc +++ /dev/null @@ -1,145 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/configuring_ingress_cluster_traffic/configuring-ingress-cluster-traffic-nodeport.adoc - -ifeval::["{context}" == "configuring-ingress-cluster-traffic-nodeport"] -:nodeport: -endif::[] - -:_content-type: PROCEDURE -[id="nw-exposing-service_{context}"] -= Exposing the service by creating a route - -You can expose the service as a route by using the `oc expose` command. - -.Procedure - -To expose the service: - -. Log in to {product-title}. - -. Log in to the project where the service you want to expose is located: -+ -[source,terminal] ----- -$ oc project myproject ----- - -ifndef::nodeport[] -. Run the `oc expose service` command to expose the route: -+ - -[source,terminal] ----- -$ oc expose service nodejs-ex ----- -+ -.Example output -[source,terminal] ----- -route.route.openshift.io/nodejs-ex exposed ----- - -. To verify that the service is exposed, you can use a tool, such as cURL, to make sure the service is accessible from outside the cluster. - -.. Use the `oc get route` command to find the route's host name: -+ -[source,terminal] ----- -$ oc get route ----- -+ -.Example output -[source,terminal] ----- -NAME HOST/PORT PATH SERVICES PORT TERMINATION WILDCARD -nodejs-ex nodejs-ex-myproject.example.com nodejs-ex 8080-tcp None ----- - -.. Use cURL to check that the host responds to a GET request: -+ -[source,terminal] ----- -$ curl --head nodejs-ex-myproject.example.com ----- -+ -.Example output -[source,terminal] ----- -HTTP/1.1 200 OK -... ----- - -endif::nodeport[] -ifdef::nodeport[] -. To expose a node port for the application, modify the custom resource definition (CRD) of a service by entering the following command: -+ -[source,terminal] ----- -$ oc edit svc <service_name> ----- -+ -.Example output -[source,yaml] ----- -spec: - ports: - - name: 8443-tcp - nodePort: 30327 <1> - port: 8443 - protocol: TCP - targetPort: 8443 - sessionAffinity: None - type: NodePort <2> ----- -<1> Optional: Specify the node port range for the application. By default, {product-title} selects an available port in the `30000-32767` range. -<2> Define the service type. - -. Optional: To confirm the service is available with a node port exposed, enter the following command: -+ -[source,terminal] ----- -$ oc get svc -n myproject ----- -+ -.Example output -[source,terminal] ----- -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -nodejs-ex ClusterIP 172.30.217.127 <none> 3306/TCP 9m44s -nodejs-ex-ingress NodePort 172.30.107.72 <none> 3306:31345/TCP 39s ----- - -. Optional: To remove the service created automatically by the `oc new-app` command, enter the following command: -+ -[source,terminal] ----- -$ oc delete svc nodejs-ex ----- - -.Verification - -* To check that the service node port is updated with a port in the `30000-32767` range, enter the following command: -+ -[source,terminal] ----- -$ oc get svc ----- -+ -In the following example output, the updated port is `30327`: -+ -.Example output -[source,terminal] ----- -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -httpd NodePort 172.xx.xx.xx <none> 8443:30327/TCP 109s ----- -endif::nodeport[] - -//Potentially add verification step, "If a verification step is needed, it would -//look something like oc get route mysql-55-rhel7 and curl with the host from the -//output of the oc get route command." - -ifdef::nodeport[] -:!nodeport: -endif::[] diff --git a/modules/nw-external-dns-operator-configuration-parameters.adoc b/modules/nw-external-dns-operator-configuration-parameters.adoc deleted file mode 100644 index 837c69723557..000000000000 --- a/modules/nw-external-dns-operator-configuration-parameters.adoc +++ /dev/null @@ -1,104 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/external_dns_operator/nw-configuration-parameters.adoc - -:_content-type: CONCEPT -[id="nw-external-dns-operator-configuration-parameters_{context}"] -= External DNS Operator configuration parameters - -The External DNS Operator includes the following configuration parameters: - -[cols="3a,8a",options="header"] -|=== -|Parameter |Description - -|`spec` -|Enables the type of a cloud provider. - -[source,yaml] ----- -spec: - provider: - type: AWS <1> - aws: - credentials: - name: aws-access-key <2> ----- -<1> Defines available options such as AWS, GCP and Azure. -<2> Defines a name of the `secret` which contains credentials for your cloud provider. - -|`zones` -|Enables you to specify DNS zones by their domains. If you do not specify zones, `ExternalDNS` discovers all the zones present in your cloud provider account. - -[source,yaml] ----- -zones: -- "myzoneid" <1> ----- - -<1> Specifies the IDs of DNS zones. - -|`domains` -|Enables you to specify AWS zones by their domains. If you do not specify domains, `ExternalDNS` discovers all the zones present in your cloud provider account. - -[source,yaml] ----- -domains: -- filterType: Include <1> - matchType: Exact <2> - name: "myzonedomain1.com" <3> -- filterType: Include - matchType: Pattern <4> - pattern: ".*\\.otherzonedomain\\.com" <5> ----- -<1> Instructs `ExternalDNS` to include the domain specified. -<2> Instructs `ExtrnalDNS` that the domain matching has to be exact as opposed to regular expression match. -<3> Defines the exact domain name by which `ExternalDNS` filters. -<4> Sets `regex-domain-filter` flag in `ExternalDNS`. You can limit possible domains by using a Regex filter. -<5> Defines the regex pattern to be used by `ExternalDNS` to filter the domains of the target zones. - -|`source` -|Enables you to specify the source for the DNS records, `Service` or `Route`. - -[source,yaml] ----- -source: <1> - type: Service <2> - service: - serviceType:<3> - - LoadBalancer - - ClusterIP - labelFilter: <4> - matchLabels: - external-dns.mydomain.org/publish: "yes" - hostnameAnnotation: "Allow" <5> - fqdnTemplate: - - "{{.Name}}.myzonedomain.com" <6> ----- -<1> Defines the settings for the source of DNS records. -<2> The `ExternalDNS` uses `Service` type as source for creating dns records. -<3> Sets `service-type-filter` flag in `ExternalDNS`. The `serviceType` contains the following fields: -* `default`: `LoadBalancer` -* `expected`: `ClusterIP` -* `NodePort` -* `LoadBalancer` -* `ExternalName` -<4> Ensures that the controller considers only those resources which matches with label filter. -<5> The default value for `hostnameAnnotation` is `Ignore` which instructs `ExternalDNS` to generate DNS records using the templates specified in the field `fqdnTemplates`. When the value is `Allow` the DNS records get generated based on the value specified in the `external-dns.alpha.kubernetes.io/hostname` annotation. -<6> External DNS Operator uses a string to generate DNS names from sources that don't define a hostname, or to add a hostname suffix when paired with the fake source. - -[source,yaml] ----- -source: - type: OpenShiftRoute <1> - openshiftRouteOptions: - routerName: default <2> - labelFilter: - matchLabels: - external-dns.mydomain.org/publish: "yes" ----- - -<1> ExternalDNS` uses type `route` as source for creating dns records. -<2> If the source is `OpenShiftRoute`, then you can pass the Ingress Controller name. The `ExternalDNS` uses canonical name of Ingress Controller as the target for CNAME record. - -|=== diff --git a/modules/nw-external-dns-operator-logs.adoc b/modules/nw-external-dns-operator-logs.adoc deleted file mode 100644 index 2b9baa602187..000000000000 --- a/modules/nw-external-dns-operator-logs.adoc +++ /dev/null @@ -1,59 +0,0 @@ -// Module included in the following assemblies: -// * networking/understanding-external-dns-operator.adoc - -:_content-type: PROCEDURE -[id="nw-external-dns-operator-logs_{context}"] -= External DNS Operator logs - -You can view External DNS Operator logs by using the `oc logs` command. - -.Procedure - -. View the logs of the External DNS Operator: -+ -[source,terminal] ----- -$ oc logs -n external-dns-operator deployment/external-dns-operator -c external-dns-operator ----- - -== External DNS Operator domain name limitations - -External DNS Operator uses the TXT registry, which follows the new format and adds the prefix for the TXT records. This reduces the maximum length of the domain name for the TXT records. A DNS record cannot be present without a corresponding TXT record, so the domain name of the DNS record must follow the same limit as the TXT records. For example, DNS record is `<domain-name-from-source>` and the TXT record is `external-dns-<record-type>-<domain-name-from-source>`. - -The domain name of the DNS records generated by External DNS Operator has the following limitations: - -[cols="3a,8a",options="header"] -|=== -|Record type |Number of characters - -|CNAME -|44 - -|Wildcard CNAME records on AzureDNS -|42 - -|A -|48 - -|Wildcard A records on AzureDNS -|46 - -|=== - -If the domain name generated by External DNS exceeds the domain name limitation, the External DNS instance gives the following error: - -[source,terminal] ----- -$ oc -n external-dns-operator logs external-dns-aws-7ddbd9c7f8-2jqjh <1> ----- -<1> The `external-dns-aws-7ddbd9c7f8-2jqjh` parameter specifies the name of the External DNS pod. - -.Example output -[source,terminal] ----- -time="2022-09-02T08:53:57Z" level=info msg="Desired change: CREATE external-dns-cname-hello-openshift-aaaaaaaaaa-bbbbbbbbbb-ccccccc.test.example.io TXT [Id: /hostedzone/Z06988883Q0H0RL6UMXXX]" -time="2022-09-02T08:53:57Z" level=info msg="Desired change: CREATE external-dns-hello-openshift-aaaaaaaaaa-bbbbbbbbbb-ccccccc.test.example.io TXT [Id: /hostedzone/Z06988883Q0H0RL6UMXXX]" -time="2022-09-02T08:53:57Z" level=info msg="Desired change: CREATE hello-openshift-aaaaaaaaaa-bbbbbbbbbb-ccccccc.test.example.io A [Id: /hostedzone/Z06988883Q0H0RL6UMXXX]" -time="2022-09-02T08:53:57Z" level=error msg="Failure in zone test.example.io. [Id: /hostedzone/Z06988883Q0H0RL6UMXXX]" -time="2022-09-02T08:53:57Z" level=error msg="InvalidChangeBatch: [FATAL problem: DomainLabelTooLong (Domain label is too long) encountered with 'external-dns-a-hello-openshift-aaaaaaaaaa-bbbbbbbbbb-ccccccc']\n\tstatus code: 400, request id: e54dfd5a-06c6-47b0-bcb9-a4f7c3a4e0c6" ----- diff --git a/modules/nw-external-dns-operator.adoc b/modules/nw-external-dns-operator.adoc deleted file mode 100644 index 944dafed1667..000000000000 --- a/modules/nw-external-dns-operator.adoc +++ /dev/null @@ -1,52 +0,0 @@ -// Module included in the following assemblies: -// * networking/understanding-external-dns-operator.adoc - -:_content-type: PROCEDURE -[id="nw-external-dns-operator_{context}"] -= External DNS Operator - -The External DNS Operator implements the External DNS API from the `olm.openshift.io` API group. The External DNS Operator deploys the `ExternalDNS` using a deployment resource. The ExternalDNS deployment watches the resources such as services and routes in the cluster and updates the external DNS providers. - -.Procedure - -You can deploy the ExternalDNS Operator on demand from the OperatorHub, this creates a `Subscription` object. - -. Check the name of an install plan: -+ -[source,terminal] ----- -$ oc -n external-dns-operator get sub external-dns-operator -o yaml | yq '.status.installplan.name' ----- -+ -.Example output -[source,terminal] ----- -install-zcvlr ----- - -. Check the status of an install plan, the status of an install plan must be `Complete`: -+ -[source,terminal] ----- -$ oc -n external-dns-operator get ip <install_plan_name> -o yaml | yq .status.phase' ----- -+ -.Example output -[source,terminal] ----- -Complete ----- - -. Use the `oc get` command to view the `Deployment` status: -+ -[source,terminal] ----- -$ oc get -n external-dns-operator deployment/external-dns-operator ----- -+ -.Example output -[source,terminal] ----- -NAME READY UP-TO-DATE AVAILABLE AGE -external-dns-operator 1/1 1 1 23h ----- diff --git a/modules/nw-externalip-about.adoc b/modules/nw-externalip-about.adoc deleted file mode 100644 index 039f58c6344e..000000000000 --- a/modules/nw-externalip-about.adoc +++ /dev/null @@ -1,179 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/configuring_ingress_cluster_traffic/configuring-externalip.adoc - -:_content-type: CONCEPT -[id="nw-externalip-about_{context}"] -= About ExternalIP - -For non-cloud environments, {product-title} supports the assignment of external IP addresses to a `Service` object `spec.externalIPs[]` field through the *ExternalIP* facility. -By setting this field, {product-title} assigns an additional virtual IP address to the service. The IP address can be outside the service network defined for the cluster. -A service configured with an ExternalIP functions similarly to a service with `type=NodePort`, allowing you to direct traffic to a local node for load balancing. - -You must configure your networking infrastructure to ensure that the external IP address blocks that you define are routed to the cluster. - -{product-title} extends the ExternalIP functionality in Kubernetes by adding the following capabilities: - -- Restrictions on the use of external IP addresses by users through a configurable policy -- Allocation of an external IP address automatically to a service upon request - -[WARNING] -==== -Disabled by default, use of ExternalIP functionality can be a security risk, because in-cluster traffic to an external IP address is directed to that service. -This could allow cluster users to intercept sensitive traffic destined for external resources. -==== - -[IMPORTANT] -==== -This feature is supported only in non-cloud deployments. -For cloud deployments, use the load balancer services for automatic deployment of a cloud load balancer to target the endpoints of a service. -==== - -You can assign an external IP address in the following ways: - -Automatic assignment of an external IP:: -{product-title} automatically assigns an IP address from the `autoAssignCIDRs` CIDR block to the `spec.externalIPs[]` array when you create a `Service` object with `spec.type=LoadBalancer` set. -In this case, {product-title} implements a non-cloud version of the load balancer service type and assigns IP addresses to the services. -Automatic assignment is disabled by default and must be configured by a cluster administrator as described in the following section. - -Manual assignment of an external IP:: -{product-title} uses the IP addresses assigned to the `spec.externalIPs[]` array when you create a `Service` object. You cannot specify an IP address that is already in use by another service. - -[id="configuration-externalip_{context}"] -== Configuration for ExternalIP - -Use of an external IP address in {product-title} is governed by the following fields in the `Network.config.openshift.io` CR named `cluster`: - -* `spec.externalIP.autoAssignCIDRs` defines an IP address block used by the load balancer when choosing an external IP address for the service. {product-title} supports only a single IP address block for automatic assignment. This can be simpler than having to manage the port space of a limited number of shared IP addresses when manually assigning ExternalIPs to services. If automatic assignment is enabled, a `Service` object with `spec.type=LoadBalancer` is allocated an external IP address. -* `spec.externalIP.policy` defines the permissible IP address blocks when manually specifying an IP address. {product-title} does not apply policy rules to IP address blocks defined by `spec.externalIP.autoAssignCIDRs`. - -If routed correctly, external traffic from the configured external IP address block can reach service endpoints through any TCP or UDP port that the service exposes. - -[IMPORTANT] -==== -As a cluster administrator, you must configure routing to externalIPs on both OpenShiftSDN and OVN-Kubernetes network types. You must also ensure that the IP address block you assign terminates at one or more nodes in your cluster. For more information, see link:https://kubernetes.io/docs/concepts/services-networking/service/#external-ips[*Kubernetes External IPs*]. -==== - -{product-title} supports both the automatic and manual assignment of IP -addresses, and each address is guaranteed to be assigned to a maximum of one -service. This ensures that each service can expose its chosen ports regardless -of the ports exposed by other services. - -[NOTE] -==== -To use IP address blocks defined by `autoAssignCIDRs` in {product-title}, you must configure the necessary IP address assignment and routing for your host network. -==== - -The following YAML describes a service with an external IP address configured: - -.Example `Service` object with `spec.externalIPs[]` set -[source,yaml] ----- -apiVersion: v1 -kind: Service -metadata: - name: http-service -spec: - clusterIP: 172.30.163.110 - externalIPs: - - 192.168.132.253 - externalTrafficPolicy: Cluster - ports: - - name: highport - nodePort: 31903 - port: 30102 - protocol: TCP - targetPort: 30102 - selector: - app: web - sessionAffinity: None - type: LoadBalancer -status: - loadBalancer: - ingress: - - ip: 192.168.132.253 ----- - -[id="restrictions-on-ip-assignment_{context}"] -== Restrictions on the assignment of an external IP address - -As a cluster administrator, you can specify IP address blocks to allow and to reject. - -Restrictions apply only to users without `cluster-admin` privileges. A cluster administrator can always set the service `spec.externalIPs[]` field to any IP address. - -You configure IP address policy with a `policy` object defined by specifying the `spec.ExternalIP.policy` field. -The policy object has the following shape: - -[source,json] ----- -{ - "policy": { - "allowedCIDRs": [], - "rejectedCIDRs": [] - } -} ----- - -When configuring policy restrictions, the following rules apply: - -- If `policy={}` is set, then creating a `Service` object with `spec.ExternalIPs[]` set will fail. This is the default for {product-title}. The behavior when `policy=null` is set is identical. -- If `policy` is set and either `policy.allowedCIDRs[]` or `policy.rejectedCIDRs[]` is set, the following rules apply: - -* If `allowedCIDRs[]` and `rejectedCIDRs[]` are both set, then `rejectedCIDRs[]` has precedence over `allowedCIDRs[]`. -* If `allowedCIDRs[]` is set, creating a `Service` object with `spec.ExternalIPs[]` will succeed only if the specified IP addresses are allowed. -* If `rejectedCIDRs[]` is set, creating a `Service` object with `spec.ExternalIPs[]` will succeed only if the specified IP addresses are not rejected. - -[id="example-policy-objects_{context}"] -== Example policy objects - -The examples that follow demonstrate several different policy configurations. - -- In the following example, the policy prevents {product-title} from creating any service with an external IP address specified: -+ -.Example policy to reject any value specified for `Service` object `spec.externalIPs[]` -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: Network -metadata: - name: cluster -spec: - externalIP: - policy: {} - ... ----- - -- In the following example, both the `allowedCIDRs` and `rejectedCIDRs` fields are set. -+ -.Example policy that includes both allowed and rejected CIDR blocks -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: Network -metadata: - name: cluster -spec: - externalIP: - policy: - allowedCIDRs: - - 172.16.66.10/23 - rejectedCIDRs: - - 172.16.66.10/24 - ... ----- - -- In the following example, `policy` is set to `null`. -If set to `null`, when inspecting the configuration object by entering `oc get networks.config.openshift.io -o yaml`, the `policy` field will not appear in the output. -+ -.Example policy to allow any value specified for `Service` object `spec.externalIPs[]` -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: Network -metadata: - name: cluster -spec: - externalIP: - policy: null - ... ----- diff --git a/modules/nw-externalip-configuring.adoc b/modules/nw-externalip-configuring.adoc deleted file mode 100644 index 755310c6b36b..000000000000 --- a/modules/nw-externalip-configuring.adoc +++ /dev/null @@ -1,55 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/configuring_ingress_cluster_traffic/configuring-externalip.adoc - -:_content-type: PROCEDURE -[id="nw-externalip-configuring_{context}"] -= Configure external IP address blocks for your cluster - -As a cluster administrator, you can configure the following ExternalIP settings: - -- An ExternalIP address block used by {product-title} to automatically populate the `spec.clusterIP` field for a `Service` object. -- A policy object to restrict what IP addresses may be manually assigned to the `spec.clusterIP` array of a `Service` object. - -.Prerequisites - -* Install the OpenShift CLI (`oc`). -* Access to the cluster as a user with the `cluster-admin` role. - -.Procedure - -. Optional: To display the current external IP configuration, enter the following command: -+ -[source,terminal] ----- -$ oc describe networks.config cluster ----- - -. To edit the configuration, enter the following command: -+ -[source,terminal] ----- -$ oc edit networks.config cluster ----- - -. Modify the ExternalIP configuration, as in the following example: -+ -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: Network -metadata: - name: cluster -spec: - ... - externalIP: <1> - ... ----- -<1> Specify the configuration for the `externalIP` stanza. - -. To confirm the updated ExternalIP configuration, enter the following command: -+ -[source,terminal] ----- -$ oc get networks.config cluster -o go-template='{{.spec.externalIP}}{{"\n"}}' ----- diff --git a/modules/nw-externalip-object.adoc b/modules/nw-externalip-object.adoc deleted file mode 100644 index 440cea43bac4..000000000000 --- a/modules/nw-externalip-object.adoc +++ /dev/null @@ -1,89 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/configuring_ingress_cluster_traffic/configuring-externalip.adoc - -[id="nw-externalip-object_{context}"] -= ExternalIP address block configuration - -The configuration for ExternalIP address blocks is defined by a Network custom resource (CR) named `cluster`. The Network CR is part of the `config.openshift.io` API group. - -[IMPORTANT] -==== -During cluster installation, the Cluster Version Operator (CVO) automatically creates a Network CR named `cluster`. -Creating any other CR objects of this type is not supported. -==== - -The following YAML describes the ExternalIP configuration: - -.Network.config.openshift.io CR named `cluster` -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: Network -metadata: - name: cluster -spec: - externalIP: - autoAssignCIDRs: [] <1> - policy: <2> - ... ----- -<1> Defines the IP address block in CIDR format that is available for automatic assignment of external IP addresses to a service. -Only a single IP address range is allowed. - -<2> Defines restrictions on manual assignment of an IP address to a service. -If no restrictions are defined, specifying the `spec.externalIP` field in a `Service` object is not allowed. -By default, no restrictions are defined. - -The following YAML describes the fields for the `policy` stanza: - -.Network.config.openshift.io `policy` stanza -[source,yaml] ----- -policy: - allowedCIDRs: [] <1> - rejectedCIDRs: [] <2> ----- -<1> A list of allowed IP address ranges in CIDR format. -<2> A list of rejected IP address ranges in CIDR format. - -[discrete] -== Example external IP configurations - -Several possible configurations for external IP address pools are displayed in the following examples: - -- The following YAML describes a configuration that enables automatically assigned external IP addresses: -+ -.Example configuration with `spec.externalIP.autoAssignCIDRs` set -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: Network -metadata: - name: cluster -spec: - ... - externalIP: - autoAssignCIDRs: - - 192.168.132.254/29 ----- - -- The following YAML configures policy rules for the allowed and rejected CIDR ranges: -+ -.Example configuration with `spec.externalIP.policy` set -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: Network -metadata: - name: cluster -spec: - ... - externalIP: - policy: - allowedCIDRs: - - 192.168.132.0/29 - - 192.168.132.8/29 - rejectedCIDRs: - - 192.168.132.7/32 ----- diff --git a/modules/nw-gcp-installing-global-access-configuration.adoc b/modules/nw-gcp-installing-global-access-configuration.adoc deleted file mode 100644 index a80654601917..000000000000 --- a/modules/nw-gcp-installing-global-access-configuration.adoc +++ /dev/null @@ -1,72 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing-gcp-vpc.adoc -// * installing/installing-restricted-networks-gcp - -:_content-type: PROCEDURE -[id="nw-gcp-global-access-configuration_{context}"] -= Create an Ingress Controller with global access on GCP -You can create an Ingress Controller that has global access to a Google Cloud Platform (GCP) cluster. Global access is only available to Ingress Controllers using internal load balancers. - -.Prerequisites - -* You created the `install-config.yaml` and complete any modifications to it. - -.Procedure - -Create an Ingress Controller with global access on a new GCP cluster. - -. Change to the directory that contains the installation program and create a manifest file: -+ -[source,terminal] ----- -$ ./openshift-install create manifests --dir <installation_directory> <1> ----- -<1> For `<installation_directory>`, specify the name of the directory that -contains the `install-config.yaml` file for your cluster. -+ -. Create a file that is named `cluster-ingress-default-ingresscontroller.yaml` in the `<installation_directory>/manifests/` directory: -+ -[source,terminal] ----- -$ touch <installation_directory>/manifests/cluster-ingress-default-ingresscontroller.yaml <1> ----- -<1> For `<installation_directory>`, specify the directory name that contains the -`manifests/` directory for your cluster. -+ -After creating the file, several network configuration files are in the -`manifests/` directory, as shown: -+ -[source,terminal] ----- -$ ls <installation_directory>/manifests/cluster-ingress-default-ingresscontroller.yaml ----- -+ -.Example output -[source,terminal] ----- -cluster-ingress-default-ingresscontroller.yaml ----- - -. Open the `cluster-ingress-default-ingresscontroller.yaml` file in an editor and enter a custom resource (CR) that describes the Operator configuration you want: -+ -.Sample `clientAccess` configuration to `Global` -[source,yaml] ----- - apiVersion: operator.openshift.io/v1 - kind: IngressController - metadata: - name: default - namespace: openshift-ingress-operator - spec: - endpointPublishingStrategy: - loadBalancer: - providerParameters: - gcp: - clientAccess: Global <1> - type: GCP - scope: Internal <2> - type: LoadBalancerService ----- -<1> Set `gcp.clientAccess` to `Global`. -<2> Global access is only available to Ingress Controllers using internal load balancers. diff --git a/modules/nw-high-performance-multicast.adoc b/modules/nw-high-performance-multicast.adoc deleted file mode 100644 index 8bdf59f48dd6..000000000000 --- a/modules/nw-high-performance-multicast.adoc +++ /dev/null @@ -1,15 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/hardware_networks/using-sriov-multicast.adoc - -[id="nw-high-performance-multicast_{context}"] -= High performance multicast - -The OpenShift SDN network plugin supports multicast between pods on the default network. This is best used for low-bandwidth coordination or service discovery, and not high-bandwidth applications. -For applications such as streaming media, like Internet Protocol television (IPTV) and multipoint videoconferencing, you can utilize Single Root I/O Virtualization (SR-IOV) hardware to provide near-native performance. - -When using additional SR-IOV interfaces for multicast: - -* Multicast packages must be sent or received by a pod through the additional SR-IOV interface. -* The physical network which connects the SR-IOV interfaces decides the -multicast routing and topology, which is not controlled by {product-title}. diff --git a/modules/nw-how-nw-iface-selected.adoc b/modules/nw-how-nw-iface-selected.adoc deleted file mode 100644 index 8ba5a4c8526c..000000000000 --- a/modules/nw-how-nw-iface-selected.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// * support/troubleshooting/troubleshooting-network-issues.adoc - -:_content-type: CONCEPT -[id="nw-how-nw-iface-selected_{context}"] -= How the network interface is selected - -For installations on bare metal or with virtual machines that have more than one network interface controller (NIC), the NIC that {product-title} uses for communication with the Kubernetes API server is determined by the `nodeip-configuration.service` service unit that is run by systemd when the node boots. The `nodeip-configuration.service` selects the IP from the interface associated with the default route. - -After the `nodeip-configuration.service` service determines the correct NIC, the service creates the `/etc/systemd/system/kubelet.service.d/20-nodenet.conf` file. The `20-nodenet.conf` file sets the `KUBELET_NODE_IP` environment variable to the IP address that the service selected. - -When the kubelet service starts, it reads the value of the environment variable from the `20-nodenet.conf` file and sets the IP address as the value of the `--node-ip` kubelet command-line argument. As a result, the kubelet service uses the selected IP address as the node IP address. - -If hardware or networking is reconfigured after installation, or if there is a networking layout where the node IP should not come from the default route interface, it is possible for the `nodeip-configuration.service` service to select a different NIC after a reboot. In some cases, you might be able to detect that a different NIC is selected by reviewing the `INTERNAL-IP` column in the output from the `oc get nodes -o wide` command. - -If network communication is disrupted or misconfigured because a different NIC is selected, you might receive the following error: `EtcdCertSignerControllerDegraded`. You can create a hint file that includes the `NODEIP_HINT` variable to override the default IP selection logic. For more information, see Optional: Overriding the default node IP selection logic. - -// Link to info for creating a machine config. diff --git a/modules/nw-http2-haproxy.adoc b/modules/nw-http2-haproxy.adoc deleted file mode 100644 index 997646b4f340..000000000000 --- a/modules/nw-http2-haproxy.adoc +++ /dev/null @@ -1,61 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ingress-operator.adoc - -:_content-type: PROCEDURE -[id="nw-http2-haproxy_{context}"] -= Enabling HTTP/2 Ingress connectivity - -You can enable transparent end-to-end HTTP/2 connectivity in HAProxy. It allows application owners to make use of HTTP/2 protocol capabilities, including single connection, header compression, binary streams, and more. - -You can enable HTTP/2 connectivity for an individual Ingress Controller or for the entire cluster. - -To enable the use of HTTP/2 for the connection from the client to HAProxy, a route must specify a custom certificate. A route that uses the default certificate cannot use HTTP/2. This restriction is necessary to avoid problems from connection coalescing, where the client re-uses a connection for different routes that use the same certificate. - -The connection from HAProxy to the application pod can use HTTP/2 only for re-encrypt routes and not for edge-terminated or insecure routes. This restriction is because HAProxy uses Application-Level Protocol Negotiation (ALPN), which is a TLS extension, to negotiate the use of HTTP/2 with the back-end. The implication is that end-to-end HTTP/2 is possible with passthrough and re-encrypt and not with insecure or edge-terminated routes. - -[WARNING] -==== -Using WebSockets with a re-encrypt route and with HTTP/2 enabled on an Ingress Controller requires WebSocket support over HTTP/2. WebSockets over HTTP/2 is a feature of HAProxy 2.4, which is unsupported in {product-title} at this time. -==== - -[IMPORTANT] -==== -For non-passthrough routes, the Ingress Controller negotiates its connection to the application independently of the connection from the client. This means a client may connect to the Ingress Controller and negotiate HTTP/1.1, and the Ingress Controller may then connect to the application, negotiate HTTP/2, and forward the request from the client HTTP/1.1 connection using the HTTP/2 connection to the application. This poses a problem if the client subsequently tries to upgrade its connection from HTTP/1.1 to the WebSocket protocol, because the Ingress Controller cannot forward WebSocket to HTTP/2 and cannot upgrade its HTTP/2 connection to WebSocket. Consequently, if you have an application that is intended to accept WebSocket connections, it must not allow negotiating the HTTP/2 protocol or else clients will fail to upgrade to the WebSocket protocol. -==== - -.Procedure - -Enable HTTP/2 on a single Ingress Controller. - -* To enable HTTP/2 on an Ingress Controller, enter the `oc annotate` command: -+ -[source,terminal] ----- -$ oc -n openshift-ingress-operator annotate ingresscontrollers/<ingresscontroller_name> ingress.operator.openshift.io/default-enable-http2=true ----- -+ -Replace `<ingresscontroller_name>` with the name of the Ingress Controller to annotate. - -Enable HTTP/2 on the entire cluster. - -* To enable HTTP/2 for the entire cluster, enter the `oc annotate` command: -+ -[source,terminal] ----- -$ oc annotate ingresses.config/cluster ingress.operator.openshift.io/default-enable-http2=true ----- -+ -[TIP] -==== -You can alternatively apply the following YAML to add the annotation: -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: Ingress -metadata: - name: cluster - annotations: - ingress.operator.openshift.io/default-enable-http2: "true" ----- -==== diff --git a/modules/nw-infw-operator-config-object.adoc b/modules/nw-infw-operator-config-object.adoc deleted file mode 100644 index f628e32d14af..000000000000 --- a/modules/nw-infw-operator-config-object.adoc +++ /dev/null @@ -1,70 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ingress-node-firewall-operator.adoc - -:_content-type: CONCEPT -[id="nw-infw-operator-config-object_{context}"] -== Ingress Node Firewall configuration object - -The fields for the Ingress Node Firewall configuration object are described in the following table: - -.Ingress Node Firewall Configuration object -[cols=".^2,.^2,.^6a",options="header"] -|==== -|Field|Type|Description - -|`metadata.name` -|`string` -|The name of the CR object. The name of the firewall rules object must be `ingressnodefirewallconfig`. - -|`metadata.namespace` -|`string` -|Namespace for the Ingress Firewall Operator CR object. The `IngressNodeFirewallConfig` CR must be created inside the `openshift-ingress-node-firewall` namespace. - -|`spec.nodeSelector` -|`string` -| -A node selection constraint used to target nodes through specified node labels. For example: - -[source,yaml] ----- -spec: - nodeSelector: - node-role.kubernetes.io/worker: "" ----- - -[NOTE] -==== -One label used in `nodeSelector` must match a label on the nodes in order for the daemon set to start. For example, if the node labels `node-role.kubernetes.io/worker` and `node-type.kubernetes.io/vm` are applied to a node, then at least one label must be set using `nodeSelector` for the daemon set to start. -==== - -|==== - -[NOTE] -==== -The Operator consumes the CR and creates an ingress node firewall daemon set on all the nodes that match the `nodeSelector`. -==== - -[discrete] -[id="nw-ingress-node-firewall-example-cr-2_{context}"] -== Ingress Node Firewall Operator example configuration - -A complete Ingress Node Firewall Configuration is specified in the following example: - -.Example Ingress Node Firewall Configuration object -[source,yaml] ----- -apiVersion: ingressnodefirewall.openshift.io/v1alpha1 -kind: IngressNodeFirewallConfig -metadata: - name: ingressnodefirewallconfig - namespace: openshift-ingress-node-firewall -spec: - nodeSelector: - node-role.kubernetes.io/worker: "" ----- - -[NOTE] -==== -The Operator consumes the CR and creates an ingress node firewall daemon set on all the nodes that match the `nodeSelector`. -==== \ No newline at end of file diff --git a/modules/nw-infw-operator-cr.adoc b/modules/nw-infw-operator-cr.adoc deleted file mode 100644 index 1c884bb8a4f0..000000000000 --- a/modules/nw-infw-operator-cr.adoc +++ /dev/null @@ -1,20 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ingress-node-firewall-operator.adoc - -:_content-type: CONCEPT -[id="nw-infw-operator-cr_{context}"] -= Ingress Node Firewall Operator - -The Ingress Node Firewall Operator provides ingress firewall rules at a node level by deploying the daemon set to nodes you specify and manage in the firewall configurations. To deploy the daemon set, you create an `IngressNodeFirewallConfig` custom resource (CR). The Operator applies the `IngressNodeFirewallConfig` CR to create ingress node firewall daemon set `daemon`, which run on all nodes that match the `nodeSelector`. - -You configure `rules` of the `IngressNodeFirewall` CR and apply them to clusters using the `nodeSelector` and setting values to "true". - -[IMPORTANT] -==== -The Ingress Node Firewall Operator supports only stateless firewall rules. - -Network interface controllers (NICs) that do not support native XDP drivers will run at a lower performance. - -For {product-title} 4.14, you must run Ingress Node Firewall Operator on {op-system-base} 9.0 or later. -==== diff --git a/modules/nw-infw-operator-deploying.adoc b/modules/nw-infw-operator-deploying.adoc deleted file mode 100644 index 20ea6d25f2c5..000000000000 --- a/modules/nw-infw-operator-deploying.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ingress-node-firewall-operator.adoc - -:_content-type: PROCEDURE -[id="nw-infw-operator-deploying_{context}"] -= Deploying Ingress Node Firewall Operator - -.Prerequisite -* The Ingress Node Firewall Operator is installed. - -.Procedure - -To deploy the Ingress Node Firewall Operator, create a `IngressNodeFirewallConfig` custom resource that will deploy the Operator's daemon set. You can deploy one or multiple `IngressNodeFirewall` CRDs to nodes by applying firewall rules. - -. Create the `IngressNodeFirewallConfig` inside the `openshift-ingress-node-firewall` namespace named `ingressnodefirewallconfig`. - -. Run the following command to deploy Ingress Node Firewall Operator rules: -+ -[source,terminal] ----- -$ oc apply -f rule.yaml ----- \ No newline at end of file diff --git a/modules/nw-infw-operator-installing.adoc b/modules/nw-infw-operator-installing.adoc deleted file mode 100644 index 570437bda391..000000000000 --- a/modules/nw-infw-operator-installing.adoc +++ /dev/null @@ -1,151 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ingress-node-firewall-operator.adoc - -:_content-type: PROCEDURE -[id="installing-infw-operator_{context}"] -= Installing the Ingress Node Firewall Operator - -As a cluster administrator, you can install the Ingress Node Firewall Operator by using the {product-title} CLI or the web console. - -[id="install-operator-cli_{context}"] -== Installing the Ingress Node Firewall Operator using the CLI - -As a cluster administrator, you can install the Operator using the CLI. - -.Prerequisites - -* You have installed the OpenShift CLI (`oc`). -* You have an account with administrator privileges. - -.Procedure - -. To create the `openshift-ingress-node-firewall` namespace, enter the following command: -+ -[source,terminal] ----- -$ cat << EOF| oc create -f - -apiVersion: v1 -kind: Namespace -metadata: - labels: - pod-security.kubernetes.io/enforce: privileged - pod-security.kubernetes.io/enforce-version: v1.24 - name: openshift-ingress-node-firewall -EOF ----- - -. To create an `OperatorGroup` CR, enter the following command: -+ -[source,terminal] ----- -$ cat << EOF| oc create -f - -apiVersion: operators.coreos.com/v1 -kind: OperatorGroup -metadata: - name: ingress-node-firewall-operators - namespace: openshift-ingress-node-firewall -EOF ----- - -. Subscribe to the Ingress Node Firewall Operator. - -.. To create a `Subscription` CR for the Ingress Node Firewall Operator, enter the following command: -+ -[source,terminal] ----- -$ cat << EOF| oc create -f - -apiVersion: operators.coreos.com/v1alpha1 -kind: Subscription -metadata: - name: ingress-node-firewall-sub - namespace: openshift-ingress-node-firewall -spec: - name: ingress-node-firewall - channel: stable - source: redhat-operators - sourceNamespace: openshift-marketplace -EOF ----- - -. To verify that the Operator is installed, enter the following command: -+ -[source,terminal] ----- -$ oc get ip -n openshift-ingress-node-firewall ----- -+ -.Example output -[source,terminal] ----- -NAME CSV APPROVAL APPROVED -install-5cvnz ingress-node-firewall.4.13.0-202211122336 Automatic true ----- - -. To verify the version of the Operator, enter the following command: - -+ -[source,terminal] ----- -$ oc get csv -n openshift-ingress-node-firewall ----- -+ -.Example output -[source,terminal] ----- -NAME DISPLAY VERSION REPLACES PHASE -ingress-node-firewall.4.13.0-202211122336 Ingress Node Firewall Operator 4.13.0-202211122336 ingress-node-firewall.4.13.0-202211102047 Succeeded ----- - -[id="install-operator-web-console_{context}"] -== Installing the Ingress Node Firewall Operator using the web console - -As a cluster administrator, you can install the Operator using the web console. - -.Prerequisites - -* You have installed the OpenShift CLI (`oc`). -* You have an account with administrator privileges. - -.Procedure - - -. Install the Ingress Node Firewall Operator: - -.. In the {product-title} web console, click *Operators* -> *OperatorHub*. - -.. Select *Ingress Node Firewall Operator* from the list of available Operators, and then click *Install*. - -.. On the *Install Operator* page, under *Installed Namespace*, select *Operator recommended Namespace*. - -.. Click *Install*. - -. Verify that the Ingress Node Firewall Operator is installed successfully: - -.. Navigate to the *Operators* -> *Installed Operators* page. - -.. Ensure that *Ingress Node Firewall Operator* is listed in the *openshift-ingress-node-firewall* project with a *Status* of *InstallSucceeded*. -+ -[NOTE] -==== -During installation an Operator might display a *Failed* status. -If the installation later succeeds with an *InstallSucceeded* message, you can ignore the *Failed* message. -==== - -+ -If the Operator does not have a *Status* of *InstallSucceeded*, troubleshoot using the following steps: - -+ -* Inspect the *Operator Subscriptions* and *Install Plans* tabs for any failures or errors under *Status*. -* Navigate to the *Workloads* -> *Pods* page and check the logs for pods in the `openshift-ingress-node-firewall` project. -* Check the namespace of the YAML file. If the annotation is missing, you can add the annotation `workload.openshift.io/allowed=management` to the Operator namespace with the following command: -+ -[source,terminal] ----- -$ oc annotate ns/openshift-ingress-node-firewall workload.openshift.io/allowed=management ----- -+ -[NOTE] -==== -For {sno} clusters, the `openshift-ingress-node-firewall` namespace requires the `workload.openshift.io/allowed=management` annotation. -==== diff --git a/modules/nw-infw-operator-rules-object.adoc b/modules/nw-infw-operator-rules-object.adoc deleted file mode 100644 index fda29d25c955..000000000000 --- a/modules/nw-infw-operator-rules-object.adoc +++ /dev/null @@ -1,155 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ingress-node-firewall-operator.adoc - -:_content-type: CONCEPT -[id="nw-ingress-node-firewall-operator-rules-object_{context}"] -= Ingress Node Firewall rules object - -The fields for the Ingress Node Firewall rules object are described in the following table: - -.Ingress Node Firewall rules object -[cols=".^2,.^2,.^6a",options="header"] -|==== -|Field|Type|Description - -|`metadata.name` -|`string` -|The name of the CR object. - -|`interfaces` -|`array` -|The fields for this object specify the interfaces to apply the firewall rules to. For example, `- en0` and -`- en1`. - -|`nodeSelector` -|`array` -|You can use `nodeSelector` to select the nodes to apply the firewall rules to. Set the value of your named `nodeselector` labels to `true` to apply the rule. - -|`ingress` -|`object` -|`ingress` allows you to configure the rules that allow outside access to the services on your cluster. -|==== - -[discrete] -[id="nw-infw-ingress-rules-object_{context}"] -=== Ingress object configuration - -The values for the `ingress` object are defined in the following table: - -.`ingress` object -[cols=".^3,.^2,.^5a",options="header"] -|==== -|Field|Type|Description - -|`sourceCIDRs` -|`array` -|Allows you to set the CIDR block. You can configure multiple CIDRs from different address families. - -[NOTE] -==== -Different CIDRs allow you to use the same order rule. In the case that there are multiple `IngressNodeFirewall` objects for the same nodes and interfaces with overlapping CIDRs, the `order` field will specify which rule is applied first. Rules are applied in ascending order. -==== - -|`rules` -|`array` -|Ingress firewall `rules.order` objects are ordered starting at `1` for each `source.CIDR` with up to 100 rules per CIDR. Lower order rules are executed first. - -`rules.protocolConfig.protocol` supports the following protocols: TCP, UDP, SCTP, ICMP and ICMPv6. ICMP and ICMPv6 rules can match against ICMP and ICMPv6 types or codes. TCP, UDP, and SCTP rules can match against a single destination port or a range of ports using `<start : end-1>` format. - -Set `rules.action` to `allow` to apply the rule or `deny` to disallow the rule. - -[NOTE] -==== -Ingress firewall rules are verified using a verification webhook that blocks any invalid configuration. The verification webhook prevents you from blocking any critical cluster services such as the API server or SSH. -==== -|==== - -[discrete] -[id="nw-ingress-node-firewall-example-cr_{context}"] -== Ingress Node Firewall rules object example - -A complete Ingress Node Firewall configuration is specified in the following example: - -.Example Ingress Node Firewall configuration -[source,yaml] ----- -apiVersion: ingressnodefirewall.openshift.io/v1alpha1 -kind: IngressNodeFirewall -metadata: - name: ingressnodefirewall -spec: - interfaces: - - eth0 - nodeSelector: - matchLabels: - <do_node_ingress_firewall>: 'true' - ingress: - - sourceCIDRs: - - 172.16.0.0/12 - rules: - - order: 10 - protocolConfig: - protocol: ICMP - icmp: - icmpType: 8 #ICMP Echo request - action: Deny - - order: 20 - protocolConfig: - protocol: TCP - tcp: - ports: "8000-9000" - action: Deny - - sourceCIDRs: - - fc00:f853:ccd:e793::0/64 - rules: - - order: 10 - protocolConfig: - protocol: ICMPv6 - icmpv6: - icmpType: 128 #ICMPV6 Echo request - action: Deny ----- - -[discrete] -[id="nw-ingress-node-firewall-zero-trust-example-cr_{context}"] -== Zero trust Ingress Node Firewall rules object example - -Zero trust Ingress Node Firewall rules can provide additional security to multi-interface clusters. For example, you can use zero trust Ingress Node Firewall rules to drop all traffic on a specific interface except for SSH. - -A complete configuration of a zero trust Ingress Node Firewall rule set is specified in the following example: - -[IMPORTANT] -==== -Users need to add all ports their application will use to their allowlist in the following case to ensure proper functionality. -==== - -.Example zero trust Ingress Node Firewall rules -[source,yaml] ----- -apiVersion: ingressnodefirewall.openshift.io/v1alpha1 -kind: IngressNodeFirewall -metadata: - name: ingressnodefirewall-zero-trust -spec: - interfaces: - - eth1 <1> - nodeSelector: - matchLabels: - <do_node_ingress_firewall>: 'true' - ingress: - - sourceCIDRs: - - 0.0.0.0/0 <2> - rules: - - order: 10 - protocolConfig: - protocol: TCP - tcp: - ports: 22 - action: Allow - - order: 20 - action: Deny <3> ----- -<1> Multi-interface cluster -<2> `0.0.0.0/0` set to match any CIDR -<3> `action` set to `deny` diff --git a/modules/nw-infw-operator-troubleshooting.adoc b/modules/nw-infw-operator-troubleshooting.adoc deleted file mode 100644 index 5e185d6f2e10..000000000000 --- a/modules/nw-infw-operator-troubleshooting.adoc +++ /dev/null @@ -1,50 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ingress-node-firewall-operator.adoc - -:_content-type: PROCEDURE -[id="nw-infw-operator-troubleshooting_{context}"] -= Troubleshooting the Ingress Node Firewall Operator - -* Run the following command to list installed Ingress Node Firewall custom resource definitions (CRD): -+ -[source,terminal] ----- -$ oc get crds | grep ingressnodefirewall ----- -+ -.Example output -[source,terminal] ----- -NAME READY UP-TO-DATE AVAILABLE AGE -ingressnodefirewallconfigs.ingressnodefirewall.openshift.io 2022-08-25T10:03:01Z -ingressnodefirewallnodestates.ingressnodefirewall.openshift.io 2022-08-25T10:03:00Z -ingressnodefirewalls.ingressnodefirewall.openshift.io 2022-08-25T10:03:00Z ----- - -* Run the following command to view the state of the Ingress Node Firewall Operator: -+ -[source,terminal] ----- -$ oc get pods -n openshift-ingress-node-firewall ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -ingress-node-firewall-controller-manager 2/2 Running 0 5d21h -ingress-node-firewall-daemon-pqx56 3/3 Running 0 5d21h ----- -+ -The following fields provide information about the status of the Operator: -`READY`, `STATUS`, `AGE`, and `RESTARTS`. The `STATUS` field is `Running` when the Ingress Node Firewall Operator is deploying a daemon set to the assigned nodes. - -* Run the following command to collect all ingress firewall node pods' logs: -+ -[source,terminal] ----- -$ oc adm must-gather – gather_ingress_node_firewall ----- -+ -The logs are available in the sos node's report containing eBPF `bpftool` outputs at `/sos_commands/ebpf`. These reports include lookup tables used or updated as the ingress firewall XDP handles packet processing, updates statistics, and emits events. diff --git a/modules/nw-infw-operator-viewing.adoc b/modules/nw-infw-operator-viewing.adoc deleted file mode 100644 index 2f7faaec6fe6..000000000000 --- a/modules/nw-infw-operator-viewing.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ingress-node-firewall-operator.adoc - -:_content-type: PROCEDURE -[id="nw-infw-operator-viewing_{context}"] -= Viewing Ingress Node Firewall Operator rules - -.Procedure - -. Run the following command to view all current rules : -+ -[source,terminal] ----- -$ oc get ingressnodefirewall ----- - -. Choose one of the returned `<resource>` names and run the following command to view the rules or configs: -+ -[source,terminal] ----- -$ oc get <resource> <name> -o yaml ----- diff --git a/modules/nw-ingress-configuring-application-domain.adoc b/modules/nw-ingress-configuring-application-domain.adoc deleted file mode 100644 index 28014dc2daf6..000000000000 --- a/modules/nw-ingress-configuring-application-domain.adoc +++ /dev/null @@ -1,79 +0,0 @@ -// Module included in the following assemblies: -// -// * ingress/configure-ingress-operator.adoc -// - -:_content-type: PROCEDURE -[id="nw-ingress-configuring-application-domain_{context}"] -= Specifying an alternative cluster domain using the appsDomain option - -//OpenShift Dedicated or Amazon RH OpenShift cluster administrator - -As a cluster administrator, you can specify an alternative to the default cluster domain for user-created routes by configuring the `appsDomain` field. The `appsDomain` field is an optional domain for {product-title} to use instead of the default, which is specified in the `domain` field. If you specify an alternative domain, it overrides the default cluster domain for the purpose of determining the default host for a new route. - -For example, you can use the DNS domain for your company as the default domain for routes and ingresses for applications running on your cluster. - -.Prerequisites - -//* You deployed an {OSD} cluster. -* You deployed an {product-title} cluster. -* You installed the `oc` command line interface. - -.Procedure - -. Configure the `appsDomain` field by specifying an alternative default domain for user-created routes. -+ -.. Edit the ingress `cluster` resource: -+ -[source,terminal] ----- -$ oc edit ingresses.config/cluster -o yaml ----- -+ -.. Edit the YAML file: -+ -.Sample `appsDomain` configuration to `test.example.com` -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: Ingress -metadata: - name: cluster -spec: - domain: apps.example.com <1> - appsDomain: <test.example.com> <2> ----- -<1> Specifies the default domain. You cannot modify the default domain after installation. -<2> Optional: Domain for {product-title} infrastructure to use for application routes. Instead of the default prefix, `apps`, you can use an alternative prefix like `test`. -+ -. Verify that an existing route contains the domain name specified in the `appsDomain` field by exposing the route and verifying the route domain change: -//+ -//.. Access the Ingress Controller Operator YAML file: -//+ -//[source,terminal] -//---- -//$ oc get ingresses.config/cluster -o yaml -//---- -+ -[NOTE] -==== -Wait for the `openshift-apiserver` finish rolling updates before exposing the route. -==== -+ -.. Expose the route: -+ -[source,terminal] ----- -$ oc expose service hello-openshift -route.route.openshift.io/hello-openshift exposed ----- -+ -.Example output: -+ -[source,terminal] ----- -$ oc get routes -NAME HOST/PORT PATH SERVICES PORT TERMINATION WILDCARD -hello-openshift hello_openshift-<my_project>.test.example.com -hello-openshift 8080-tcp None ----- diff --git a/modules/nw-ingress-controller-config-tuningoptions-healthcheckinterval.adoc b/modules/nw-ingress-controller-config-tuningoptions-healthcheckinterval.adoc deleted file mode 100644 index 24c909f3dbd7..000000000000 --- a/modules/nw-ingress-controller-config-tuningoptions-healthcheckinterval.adoc +++ /dev/null @@ -1,24 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ingress-operator.adoc - -:_content-type: PROCEDURE -[id="nw-ingress-controller-config-tuningoptions-healthcheckinterval_{context}"] -= Setting the Ingress Controller health check interval -A cluster administrator can set the health check interval to define how long the router waits between two consecutive health checks. This value is applied globally as a default for all routes. The default value is 5 seconds. - -.Prerequisites -* The following assumes that you already created an Ingress Controller. - -.Procedure -* Update the Ingress Controller to change the interval between back end health checks: -+ -[source,terminal] ----- -$ oc -n openshift-ingress-operator patch ingresscontroller/default --type=merge -p '{"spec":{"tuningOptions": {"healthCheckInterval": "8s"}}}' ----- -+ -[NOTE] -==== -To override the `healthCheckInterval` for a single route, use the route annotation `router.openshift.io/haproxy.health.check.interval` -==== diff --git a/modules/nw-ingress-controller-configuration-gcp-global-access.adoc b/modules/nw-ingress-controller-configuration-gcp-global-access.adoc deleted file mode 100644 index 009c1c521a3a..000000000000 --- a/modules/nw-ingress-controller-configuration-gcp-global-access.adoc +++ /dev/null @@ -1,61 +0,0 @@ -// Module included in the following assemblies: -// -// * ingress/configure-ingress-operator.adoc - -:_content-type: PROCEDURE -[id="nw-ingress-controller-configuration-gcp-global-access_{context}"] -= Configuring global access for an Ingress Controller on GCP - -An Ingress Controller created on GCP with an internal load balancer generates an internal IP address for the service. A cluster administrator can specify the global access option, which enables clients in any region within the same VPC network and compute region as the load balancer, to reach the workloads running on your cluster. - -For more information, see the GCP documentation for link:https://cloud.google.com/kubernetes-engine/docs/how-to/internal-load-balancing#global_access[global access]. - -.Prerequisites - -* You deployed an {product-title} cluster on GCP infrastructure. -* You configured an Ingress Controller to use an internal load balancer. -* You installed the OpenShift CLI (`oc`). - -.Procedure - -. Configure the Ingress Controller resource to allow global access. -+ -[NOTE] -==== -You can also create an Ingress Controller and specify the global access option. -==== -+ -.. Configure the Ingress Controller resource: -+ -[source,terminal] ----- -$ oc -n openshift-ingress-operator edit ingresscontroller/default ----- -+ -.. Edit the YAML file: -+ -.Sample `clientAccess` configuration to `Global` -[source,yaml] ----- - spec: - endpointPublishingStrategy: - loadBalancer: - providerParameters: - gcp: - clientAccess: Global <1> - type: GCP - scope: Internal - type: LoadBalancerService ----- -<1> Set `gcp.clientAccess` to `Global`. - -.. Save the file to apply the changes. -+ -. Run the following command to verify that the service allows global access: -+ -[source,terminal] ----- -$ oc -n openshift-ingress edit svc/router-default -o yaml ----- -+ -The output shows that global access is enabled for GCP with the annotation, `networking.gke.io/internal-load-balancer-allow-global-access`. diff --git a/modules/nw-ingress-controller-configuration-parameters.adoc b/modules/nw-ingress-controller-configuration-parameters.adoc deleted file mode 100644 index 7f410ca4622d..000000000000 --- a/modules/nw-ingress-controller-configuration-parameters.adoc +++ /dev/null @@ -1,307 +0,0 @@ -// Module included in the following assemblies: -// -// * ingress/configure-ingress-operator.adoc -:_content-type: REFERENCE -[id="nw-ingress-controller-configuration-parameters_{context}"] -= Ingress Controller configuration parameters - -The `ingresscontrollers.operator.openshift.io` resource offers the following -configuration parameters. - -[cols="3a,8a",options="header"] -|=== -|Parameter |Description - -|`domain` -|`domain` is a DNS name serviced by the Ingress Controller and is used to configure multiple features: - -* For the `LoadBalancerService` endpoint publishing strategy, `domain` is used to configure DNS records. See `endpointPublishingStrategy`. - -* When using a generated default certificate, the certificate is valid for `domain` and its `subdomains`. See `defaultCertificate`. - -* The value is published to individual Route statuses so that users know where to target external DNS records. - -The `domain` value must be unique among all Ingress Controllers and cannot be updated. - -If empty, the default value is `ingress.config.openshift.io/cluster` `.spec.domain`. - -|`replicas` -|`replicas` is the desired number of Ingress Controller replicas. If not set, the default value is `2`. - -|`endpointPublishingStrategy` -|`endpointPublishingStrategy` is used to publish the Ingress Controller endpoints to other networks, enable load balancer integrations, and provide access to other systems. - -On GCP, AWS, and Azure you can configure the following `endpointPublishingStrategy` fields: - -* `loadBalancer.scope` -* `loadBalancer.allowedSourceRanges` - -If not set, the default value is based on `infrastructure.config.openshift.io/cluster` `.status.platform`: - -* Amazon Web Services (AWS): `LoadBalancerService` (with External scope) -* Azure: `LoadBalancerService` (with External scope) -* Google Cloud Platform (GCP): `LoadBalancerService` (with External scope) -* Bare metal: `NodePortService` -* Other: `HostNetwork` -+ -[NOTE] -==== -`HostNetwork` has a `hostNetwork` field with the following default values for the optional binding ports: `httpPort: 80`, `httpsPort: 443`, and `statsPort: 1936`. -With the binding ports, you can deploy multiple Ingress Controllers on the same node for the `HostNetwork` strategy. - -.Example -[source,yaml] ----- -apiVersion: operator.openshift.io/v1 -kind: IngressController -metadata: - name: internal - namespace: openshift-ingress-operator -spec: - domain: example.com - endpointPublishingStrategy: - type: HostNetwork - hostNetwork: - httpPort: 80 - httpsPort: 443 - statsPort: 1936 ----- -==== -+ -[NOTE] -==== -On {rh-openstack-first}, the `LoadBalancerService` endpoint publishing strategy is only supported if a cloud provider is configured to create health monitors. For {rh-openstack} 16.1 and 16.2, this strategy is only possible if you use the Amphora Octavia provider. - -For more information, see the "Setting cloud provider options" section of the {rh-openstack} installation documentation. -==== - -For most platforms, the `endpointPublishingStrategy` value can be updated. On GCP, you can configure the following `endpointPublishingStrategy` fields: - -* `loadBalancer.scope` -* `loadbalancer.providerParameters.gcp.clientAccess` -* `hostNetwork.protocol` -* `nodePort.protocol` - -|`defaultCertificate` -|The `defaultCertificate` value is a reference to a secret that contains the default certificate that is served by the Ingress Controller. When Routes do not specify their own certificate, `defaultCertificate` is used. - -The secret must contain the following keys and data: -* `tls.crt`: certificate file contents -* `tls.key`: key file contents - -If not set, a wildcard certificate is automatically generated and used. The certificate is valid for the Ingress Controller `domain` and `subdomains`, and -the generated certificate's CA is automatically integrated with the -cluster's trust store. - -The in-use certificate, whether generated or user-specified, is automatically integrated with {product-title} built-in OAuth server. - -|`namespaceSelector` -|`namespaceSelector` is used to filter the set of namespaces serviced by the -Ingress Controller. This is useful for implementing shards. - -|`routeSelector` -|`routeSelector` is used to filter the set of Routes serviced by the Ingress Controller. This is useful for implementing shards. - -|`nodePlacement` -|`nodePlacement` enables explicit control over the scheduling of the Ingress Controller. - -If not set, the defaults values are used. - -[NOTE] -==== -The `nodePlacement` parameter includes two parts, `nodeSelector` and `tolerations`. For example: - -[source,yaml] ----- -nodePlacement: - nodeSelector: - matchLabels: - kubernetes.io/os: linux - tolerations: - - effect: NoSchedule - operator: Exists ----- -==== - -|`tlsSecurityProfile` -|`tlsSecurityProfile` specifies settings for TLS connections for Ingress Controllers. - -If not set, the default value is based on the `apiservers.config.openshift.io/cluster` resource. - -When using the `Old`, `Intermediate`, and `Modern` profile types, the effective profile configuration is subject to change between releases. For example, given a specification to use the `Intermediate` profile deployed on release `X.Y.Z`, an upgrade to release `X.Y.Z+1` may cause a new profile configuration to be applied to the Ingress Controller, resulting in a rollout. - -The minimum TLS version for Ingress Controllers is `1.1`, and the maximum TLS version is `1.3`. - -[NOTE] -==== -Ciphers and the minimum TLS version of the configured security profile are reflected in the `TLSProfile` status. -==== - -[IMPORTANT] -==== -The Ingress Operator converts the TLS `1.0` of an `Old` or `Custom` profile to `1.1`. -==== - -|`clientTLS` -|`clientTLS` authenticates client access to the cluster and services; as a result, mutual TLS authentication is enabled. If not set, then client TLS is not enabled. - -`clientTLS` has the required subfields, `spec.clientTLS.clientCertificatePolicy` and `spec.clientTLS.ClientCA`. - -The `ClientCertificatePolicy` subfield accepts one of the two values: `Required` or `Optional`. The `ClientCA` subfield specifies a config map that is in the openshift-config namespace. The config map should contain a CA certificate bundle. - -The `AllowedSubjectPatterns` is an optional value that specifies a list of regular expressions, which are matched against the distinguished name on a valid client certificate to filter requests. The regular expressions must use PCRE syntax. At least one pattern must match a client certificate's distinguished name; otherwise, the Ingress Controller rejects the certificate and denies the connection. If not specified, the Ingress Controller does not reject certificates based on the distinguished name. - -|`routeAdmission` -|`routeAdmission` defines a policy for handling new route claims, such as allowing or denying claims across namespaces. - -`namespaceOwnership` describes how hostname claims across namespaces should be handled. The default is `Strict`. - -* `Strict`: does not allow routes to claim the same hostname across namespaces. -* `InterNamespaceAllowed`: allows routes to claim different paths of the same hostname across namespaces. - -`wildcardPolicy` describes how routes with wildcard policies are handled by the Ingress Controller. - -* `WildcardsAllowed`: Indicates routes with any wildcard policy are admitted by the Ingress Controller. - -* `WildcardsDisallowed`: Indicates only routes with a wildcard policy of `None` are admitted by the Ingress Controller. Updating `wildcardPolicy` from `WildcardsAllowed` to `WildcardsDisallowed` causes admitted routes with a wildcard policy of `Subdomain` to stop working. These routes must be recreated to a wildcard policy of `None` to be readmitted by the Ingress Controller. `WildcardsDisallowed` is the default setting. - -|`IngressControllerLogging` -|`logging` defines parameters for what is logged where. If this field is empty, operational logs are enabled but access logs are disabled. - -* `access` describes how client requests are logged. If this field is empty, access logging is disabled. -** `destination` describes a destination for log messages. -*** `type` is the type of destination for logs: -**** `Container` specifies that logs should go to a sidecar container. The Ingress Operator configures the container, named *logs*, on the Ingress Controller pod and configures the Ingress Controller to write logs to the container. The expectation is that the administrator configures a custom logging solution that reads logs from this container. Using container logs means that logs may be dropped if the rate of logs exceeds the container runtime capacity or the custom logging solution capacity. -**** `Syslog` specifies that logs are sent to a Syslog endpoint. The administrator must specify an endpoint that can receive Syslog messages. The expectation is that the administrator has configured a custom Syslog instance. -*** `container` describes parameters for the `Container` logging destination type. Currently there are no parameters for container logging, so this field must be empty. -*** `syslog` describes parameters for the `Syslog` logging destination type: -**** `address` is the IP address of the syslog endpoint that receives log messages. -**** `port` is the UDP port number of the syslog endpoint that receives log messages. -**** `maxLength` is the maximum length of the syslog message. It must be between `480` and `4096` bytes. If this field is empty, the maximum length is set to the default value of `1024` bytes. -**** `facility` specifies the syslog facility of log messages. If this field is empty, the facility is `local1`. Otherwise, it must specify a valid syslog facility: `kern`, `user`, `mail`, `daemon`, `auth`, `syslog`, `lpr`, `news`, `uucp`, `cron`, `auth2`, `ftp`, `ntp`, `audit`, `alert`, `cron2`, `local0`, `local1`, `local2`, `local3`. `local4`, `local5`, `local6`, or `local7`. -** `httpLogFormat` specifies the format of the log message for an HTTP request. If this field is empty, log messages use the implementation's default HTTP log format. For HAProxy's default HTTP log format, see link:http://cbonte.github.io/haproxy-dconv/2.0/configuration.html#8.2.3[the HAProxy documentation]. - -|`httpHeaders` -|`httpHeaders` defines the policy for HTTP headers. - -By setting the `forwardedHeaderPolicy` for the `IngressControllerHTTPHeaders`, you specify when and how the Ingress Controller sets the `Forwarded`, `X-Forwarded-For`, `X-Forwarded-Host`, `X-Forwarded-Port`, `X-Forwarded-Proto`, and `X-Forwarded-Proto-Version` HTTP headers. - -By default, the policy is set to `Append`. - -* `Append` specifies that the Ingress Controller appends the headers, preserving any existing headers. -* `Replace` specifies that the Ingress Controller sets the headers, removing any existing headers. -* `IfNone` specifies that the Ingress Controller sets the headers if they are not already set. -* `Never` specifies that the Ingress Controller never sets the headers, preserving any existing headers. - -By setting `headerNameCaseAdjustments`, you can specify case adjustments that can be applied to HTTP header names. Each adjustment is specified as an HTTP header name with the desired capitalization. For example, specifying `X-Forwarded-For` indicates that the `x-forwarded-for` HTTP header should be adjusted to have the specified capitalization. - -These adjustments are only applied to cleartext, edge-terminated, and re-encrypt routes, and only when using HTTP/1. - -For request headers, these adjustments are applied only for routes that have the `haproxy.router.openshift.io/h1-adjust-case=true` annotation. For response headers, these adjustments are applied to all HTTP responses. If this field is empty, no request headers are adjusted. - -|`httpCompression` -|`httpCompression` defines the policy for HTTP traffic compression. - -* `mimeTypes` defines a list of MIME types to which compression should be applied. For example, `text/css; charset=utf-8`, `text/html`, `text/*`, `image/svg+xml`, `application/octet-stream`, `X-custom/customsub`, using the format pattern, `type/subtype; [;attribute=value]`. The `types` are: application, image, message, multipart, text, video, or a custom type prefaced by `X-`; e.g. To see the full notation for MIME types and subtypes, see link:https://datatracker.ietf.org/doc/html/rfc1341#page-7[RFC1341] - -|`httpErrorCodePages` -|`httpErrorCodePages` specifies custom HTTP error code response pages. By default, an IngressController uses error pages built into the IngressController image. - -|`httpCaptureCookies` -|`httpCaptureCookies` specifies HTTP cookies that you want to capture in access logs. If the `httpCaptureCookies` field is empty, the access logs do not capture the cookies. - -For any cookie that you want to capture, the following parameters must be in your `IngressController` configuration: - -* `name` specifies the name of the cookie. -* `maxLength` specifies tha maximum length of the cookie. -* `matchType` specifies if the field `name` of the cookie exactly matches the capture cookie setting or is a prefix of the capture cookie setting. The `matchType` field uses the `Exact` and `Prefix` parameters. - -For example: -[source,yaml] ----- - httpCaptureCookies: - - matchType: Exact - maxLength: 128 - name: MYCOOKIE ----- - -|`httpCaptureHeaders` -|`httpCaptureHeaders` specifies the HTTP headers that you want to capture in the access logs. If the `httpCaptureHeaders` field is empty, the access logs do not capture the headers. - -`httpCaptureHeaders` contains two lists of headers to capture in the access logs. The two lists of header fields are `request` and `response`. In both lists, the `name` field must specify the header name and the `maxlength` field must specify the maximum length of the header. For example: - -[source,yaml] ----- - httpCaptureHeaders: - request: - - maxLength: 256 - name: Connection - - maxLength: 128 - name: User-Agent - response: - - maxLength: 256 - name: Content-Type - - maxLength: 256 - name: Content-Length ----- -|`tuningOptions` -|`tuningOptions` specifies options for tuning the performance of Ingress Controller pods. - -* `clientFinTimeout` specifies how long a connection is held open while waiting for the client response to the server closing the connection. The default timeout is `1s`. - -* `clientTimeout` specifies how long a connection is held open while waiting for a client response. The default timeout is `30s`. - -* `headerBufferBytes` specifies how much memory is reserved, in bytes, for Ingress Controller connection sessions. This value must be at least `16384` if HTTP/2 is enabled for the Ingress Controller. If not set, the default value is `32768` bytes. Setting this field not recommended because `headerBufferBytes` values that are too small can break the Ingress Controller, and `headerBufferBytes` values that are too large could cause the Ingress Controller to use significantly more memory than necessary. - -* `headerBufferMaxRewriteBytes` specifies how much memory should be reserved, in bytes, from `headerBufferBytes` for HTTP header rewriting and appending for Ingress Controller connection sessions. The minimum value for `headerBufferMaxRewriteBytes` is `4096`. `headerBufferBytes` must be greater than `headerBufferMaxRewriteBytes` for incoming HTTP requests. If not set, the default value is `8192` bytes. Setting this field not recommended because `headerBufferMaxRewriteBytes` values that are too small can break the Ingress Controller and `headerBufferMaxRewriteBytes` values that are too large could cause the Ingress Controller to use significantly more memory than necessary. - -* `healthCheckInterval` specifies how long the router waits between health checks. The default is `5s`. - -* `serverFinTimeout` specifies how long a connection is held open while waiting for the server response to the client that is closing the connection. The default timeout is `1s`. - -* `serverTimeout` specifies how long a connection is held open while waiting for a server response. The default timeout is `30s`. - -* `threadCount` specifies the number of threads to create per HAProxy process. Creating more threads allows each Ingress Controller pod to handle more connections, at the cost of more system resources being used. HAProxy -supports up to `64` threads. If this field is empty, the Ingress Controller uses the default value of `4` threads. The default value can change in future releases. Setting this field is not recommended because increasing the number of HAProxy threads allows Ingress Controller pods to use more CPU time under load, and prevent other pods from receiving the CPU resources they need to perform. Reducing the number of threads can cause the Ingress Controller to perform poorly. - -* `tlsInspectDelay` specifies how long the router can hold data to find a matching route. Setting this value too short can cause the router to fall back to the default certificate for edge-terminated, reencrypted, or passthrough routes, even when using a better matched certificate. The default inspect delay is `5s`. - -* `tunnelTimeout` specifies how long a tunnel connection, including websockets, remains open while the tunnel is idle. The default timeout is `1h`. - -* `maxConnections` specifies the maximum number of simultaneous connections that can be established per HAProxy process. Increasing this value allows each ingress controller pod to handle more connections at the cost of additional system resources. Permitted values are `0`, `-1`, any value within the range `2000` and `2000000`, or the field can be left empty. - -** If this field is left empty or has the value `0`, the Ingress Controller will use the default value of `50000`. This value is subject to change in future releases. - -** If the field has the value of `-1`, then HAProxy will dynamically compute a maximum value based on the available `ulimits` in the running container. This process results in a large computed value that will incur significant memory usage compared to the current default value of `50000`. - -** If the field has a value that is greater than the current operating system limit, the HAProxy process will not start. - -** If you choose a discrete value and the router pod is migrated to a new node, it is possible the new node does not have an identical `ulimit` configured. In such cases, the pod fails to start. - -** If you have nodes with different `ulimits` configured, and you choose a discrete value, it is recommended to use the value of `-1` for this field so that the maximum number of connections is calculated at runtime. - - -|`logEmptyRequests` -|`logEmptyRequests` specifies connections for which no request is received and logged. These empty requests come from load balancer health probes or web browser speculative connections (preconnect) and logging these requests can be undesirable. However, these requests can be caused by network errors, in which case logging empty requests can be useful for diagnosing the errors. These requests can be caused by port scans, and logging empty requests can aid in detecting intrusion attempts. Allowed values for this field are `Log` and `Ignore`. The default value is `Log`. - -The `LoggingPolicy` type accepts either one of two values: - -* `Log`: Setting this value to `Log` indicates that an event should be logged. -* `Ignore`: Setting this value to `Ignore` sets the `dontlognull` option in the HAproxy configuration. - -|`HTTPEmptyRequestsPolicy` -|`HTTPEmptyRequestsPolicy` describes how HTTP connections are handled if the connection times out before a request is received. Allowed values for this field are `Respond` and `Ignore`. The default value is `Respond`. - -The `HTTPEmptyRequestsPolicy` type accepts either one of two values: - -* `Respond`: If the field is set to `Respond`, the Ingress Controller sends an HTTP `400` or `408` response, logs the connection if access logging is enabled, and counts the connection in the appropriate metrics. -* `Ignore`: Setting this option to `Ignore` adds the `http-ignore-probes` parameter in the HAproxy configuration. If the field is set to `Ignore`, the Ingress Controller closes the connection without sending a response, then logs the connection, or incrementing metrics. - -These connections come from load balancer health probes or web browser speculative connections (preconnect) and can be safely ignored. However, these requests can be caused by network errors, so setting this field to `Ignore` can impede detection and diagnosis of problems. These requests can be caused by port scans, in which case logging empty requests can aid in detecting intrusion attempts. -|=== - - -[NOTE] -==== -All parameters are optional. -==== diff --git a/modules/nw-ingress-controller-configuration-proxy-protocol.adoc b/modules/nw-ingress-controller-configuration-proxy-protocol.adoc deleted file mode 100644 index 896bfc99334d..000000000000 --- a/modules/nw-ingress-controller-configuration-proxy-protocol.adoc +++ /dev/null @@ -1,57 +0,0 @@ -// Module included in the following assemblies: -// -// * ingress/configure-ingress-operator.adoc - -:_content-type: PROCEDURE -[id="nw-ingress-controller-configuration-proxy-protocol_{context}"] -= Configuring the PROXY protocol for an Ingress Controller - -A cluster administrator can configure https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt[the PROXY protocol] when an Ingress Controller uses either the `HostNetwork` or `NodePortService` endpoint publishing strategy types. The PROXY protocol enables the load balancer to preserve the original client addresses for connections that the Ingress Controller receives. The original client addresses are useful for logging, filtering, and injecting HTTP headers. In the default configuration, the connections that the Ingress Controller receives only contain the source address that is associated with the load balancer. - -This feature is not supported in cloud deployments. This restriction is because when {product-title} runs in a cloud platform, and an IngressController specifies that a service load balancer should be used, the Ingress Operator configures the load balancer service and enables the PROXY protocol based on the platform requirement for preserving source addresses. - -[IMPORTANT] -==== -You must configure both {product-title} and the external load balancer to either use the PROXY protocol or to use TCP. -==== - -[WARNING] -==== -The PROXY protocol is unsupported for the default Ingress Controller with installer-provisioned clusters on non-cloud platforms that use a Keepalived Ingress VIP. -==== - -.Prerequisites -* You created an Ingress Controller. - -.Procedure -. Edit the Ingress Controller resource: -+ -[source,terminal] ----- -$ oc -n openshift-ingress-operator edit ingresscontroller/default ----- - -. Set the PROXY configuration: -+ -* If your Ingress Controller uses the hostNetwork endpoint publishing strategy type, set the `spec.endpointPublishingStrategy.hostNetwork.protocol` subfield to `PROXY`: -+ -.Sample `hostNetwork` configuration to `PROXY` -[source,yaml] ----- - spec: - endpointPublishingStrategy: - hostNetwork: - protocol: PROXY - type: HostNetwork ----- -* If your Ingress Controller uses the NodePortService endpoint publishing strategy type, set the `spec.endpointPublishingStrategy.nodePort.protocol` subfield to `PROXY`: -+ -.Sample `nodePort` configuration to `PROXY` -[source,yaml] ----- - spec: - endpointPublishingStrategy: - nodePort: - protocol: PROXY - type: NodePortService ----- diff --git a/modules/nw-ingress-controller-endpoint-publishing-strategies.adoc b/modules/nw-ingress-controller-endpoint-publishing-strategies.adoc deleted file mode 100644 index 05d06736e57b..000000000000 --- a/modules/nw-ingress-controller-endpoint-publishing-strategies.adoc +++ /dev/null @@ -1,35 +0,0 @@ -// Module included in the following assemblies: -// -// * ingress/configure-ingress-operator.adoc - -[id="nw-ingress-controller-endpoint-publishing-strategies_{context}"] -= Ingress Controller endpoint publishing strategy - -*`NodePortService` endpoint publishing strategy* - -The `NodePortService` endpoint publishing strategy publishes the Ingress Controller using a Kubernetes NodePort service. - -In this configuration, the Ingress Controller deployment uses container networking. A `NodePortService` is created to publish the deployment. The specific node ports are dynamically allocated by {product-title}; however, to support static port allocations, your changes to the node port field of the managed `NodePortService` are preserved. - -.Diagram of NodePortService -image::202_OpenShift_Ingress_0222_node_port.png[{product-title} Ingress NodePort endpoint publishing strategy] - -The preceding graphic shows the following concepts pertaining to {product-title} Ingress NodePort endpoint publishing strategy: - -* All the available nodes in the cluster have their own, externally accessible IP addresses. The service running in the cluster is bound to the unique NodePort for all the nodes. -* When the client connects to a node that is down, for example, by connecting the `10.0.128.4` IP address in the graphic, the node port directly connects the client to an available node that is running the service. In this scenario, no load balancing is required. As the image shows, the `10.0.128.4` address is down and another IP address must be used instead. - -[NOTE] -==== -The Ingress Operator ignores any updates to `.spec.ports[].nodePort` fields of the service. - -By default, ports are allocated automatically and you can access the port allocations for integrations. However, sometimes static port allocations are necessary to integrate with existing infrastructure which may not be easily reconfigured in response to dynamic ports. To achieve integrations with static node ports, you can update the managed service resource directly. -==== - -For more information, see the link:https://kubernetes.io/docs/concepts/services-networking/service/#nodeport[Kubernetes Services documentation on `NodePort`]. - -*`HostNetwork` endpoint publishing strategy* - -The `HostNetwork` endpoint publishing strategy publishes the Ingress Controller on node ports where the Ingress Controller is deployed. - -An Ingress Controller with the `HostNetwork` endpoint publishing strategy can have only one pod replica per node. If you want _n_ replicas, you must use at least _n_ nodes where those replicas can be scheduled. Because each pod replica requests ports `80` and `443` on the node host where it is scheduled, a replica cannot be scheduled to a node if another pod on the same node is using those ports. diff --git a/modules/nw-ingress-controller-status.adoc b/modules/nw-ingress-controller-status.adoc deleted file mode 100644 index c2b910b9784e..000000000000 --- a/modules/nw-ingress-controller-status.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module included in the following assemblies: -// -// * ingress/configure-ingress-operator.adoc - -:_content-type: PROCEDURE -[id="nw-ingress-controller-status_{context}"] -= View Ingress Controller status - -Your can view the status of a particular Ingress Controller. - -.Procedure - -* View the status of an Ingress Controller: -+ -[source,terminal] ----- -$ oc describe --namespace=openshift-ingress-operator ingresscontroller/<name> ----- diff --git a/modules/nw-ingress-controller-tls-profiles.adoc b/modules/nw-ingress-controller-tls-profiles.adoc deleted file mode 100644 index 75685f307c80..000000000000 --- a/modules/nw-ingress-controller-tls-profiles.adoc +++ /dev/null @@ -1,61 +0,0 @@ -// Module included in the following assemblies: -// -// * ingress/configure-ingress-operator.adoc - -[id="nw-ingress-controller-tls-profiles_{context}"] -= Ingress Controller TLS profiles - -The `tlsSecurityProfile` parameter defines the schema for a TLS security profile. This object is used by operators to apply TLS security settings to operands. - -There are four TLS security profile types: - -* `Old` -* `Intermediate` -* `Modern` -* `Custom` - -The `Old`, `Intermediate`, and `Modern` profiles are based on link:https://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations[recommended configurations]. The `Custom` profile provides the ability to specify individual TLS security profile parameters. - -.Sample `Old` profile configuration -[source,yaml] ----- -spec: - tlsSecurityProfile: - type: Old ----- - -.Sample `Intermediate` profile configuration -[source,yaml] ----- -spec: - tlsSecurityProfile: - type: Intermediate ----- - -.Sample `Modern` profile configuration -[source,yaml] ----- -spec: - tlsSecurityProfile: - type: Modern ----- - -The `Custom` profile is a user-defined TLS security profile. - -[WARNING] -==== -You must be careful using a `Custom` profile, because invalid configurations can cause problems. -==== - -.Sample `Custom` profile -[source,yaml] ----- -spec: - tlsSecurityProfile: - type: Custom - custom: - ciphers: - - ECDHE-ECDSA-AES128-GCM-SHA256 - - ECDHE-RSA-AES128-GCM-SHA256 - minTLSVersion: VersionTLS11 ----- diff --git a/modules/nw-ingress-converting-http-header-case.adoc b/modules/nw-ingress-converting-http-header-case.adoc deleted file mode 100644 index 287aa22802fc..000000000000 --- a/modules/nw-ingress-converting-http-header-case.adoc +++ /dev/null @@ -1,90 +0,0 @@ -// Module included in the following assemblies: -// -// * ingress/ingress-operator.adoc - -:_content-type: PROCEDURE -[id="nw-ingress-converting-http-header-case_{context}"] -= Converting HTTP header case - -HAProxy 2.2 lowercases HTTP header names by default, for example, changing `Host: xyz.com` to `host: xyz.com`. If legacy applications are sensitive to the capitalization of HTTP header names, use the Ingress Controller `spec.httpHeaders.headerNameCaseAdjustments` API field for a solution to accommodate legacy applications until they can be fixed. - -[IMPORTANT] -==== -Because {product-title} includes HAProxy 2.2, make sure to add the necessary configuration by using `spec.httpHeaders.headerNameCaseAdjustments` before upgrading. -==== - -.Prerequisites - -* You have installed the OpenShift CLI (`oc`). -* You have access to the cluster as a user with the `cluster-admin` role. - -.Procedure - -As a cluster administrator, you can convert the HTTP header case by entering the `oc patch` command or by setting the `HeaderNameCaseAdjustments` field in the Ingress Controller YAML file. - -* Specify an HTTP header to be capitalized by entering the `oc patch` command. - -. Enter the `oc patch` command to change the HTTP `host` header to `Host`: -+ -[source,terminal] ----- -$ oc -n openshift-ingress-operator patch ingresscontrollers/default --type=merge --patch='{"spec":{"httpHeaders":{"headerNameCaseAdjustments":["Host"]}}}' ----- -+ -. Annotate the route of the application: -+ -[source,terminal] ----- -$ oc annotate routes/my-application haproxy.router.openshift.io/h1-adjust-case=true ----- -+ -The Ingress Controller then adjusts the `host` request header as specified. - -//Extra example if needed -//// -* This example changes the HTTP `cache-control` header to `Cache-Control`: -+ -[source,terminal] ----- -$ oc -n openshift-ingress-operator patch ingresscontrollers/default --type=json --patch='[{"op":"add","path":"/spec/httpHeaders/headerNameCaseAdjustments/-","value":"Cache-Control"}]' ----- -+ -The Ingress Controller adjusts the `cache-control` response header as specified. -//// - -* Specify adjustments using the `HeaderNameCaseAdjustments` field by configuring the Ingress Controller YAML file. - -. The following example Ingress Controller YAML adjusts the `host` header to `Host` for HTTP/1 requests to appropriately annotated routes: -+ -.Example Ingress Controller YAML -[source,yaml] ----- -apiVersion: operator.openshift.io/v1 -kind: IngressController -metadata: - name: default - namespace: openshift-ingress-operator -spec: - httpHeaders: - headerNameCaseAdjustments: - - Host ----- -+ -. The following example route enables HTTP response header name case adjustments using the `haproxy.router.openshift.io/h1-adjust-case` annotation: -+ -.Example route YAML -[source,yaml] ----- -apiVersion: route.openshift.io/v1 -kind: Route -metadata: - annotations: - haproxy.router.openshift.io/h1-adjust-case: true <1> - name: my-application - namespace: my-application -spec: - to: - kind: Service - name: my-application ----- -<1> Set `haproxy.router.openshift.io/h1-adjust-case` to true. diff --git a/modules/nw-ingress-creating-a-passthrough-route.adoc b/modules/nw-ingress-creating-a-passthrough-route.adoc deleted file mode 100644 index ec45c3be094e..000000000000 --- a/modules/nw-ingress-creating-a-passthrough-route.adoc +++ /dev/null @@ -1,49 +0,0 @@ -// Module included in the following assemblies: -// -// * ingress/routes.adoc - -:_content-type: PROCEDURE -[id="nw-ingress-creating-a-passthrough-route_{context}"] -= Creating a passthrough route - -You can configure a secure route using passthrough termination by using the `oc create route` command. With passthrough termination, encrypted traffic is sent straight to the destination without the router providing TLS termination. Therefore no key or certificate is required on the route. - -.Prerequisites - -* You must have a service that you want to expose. - -.Procedure - -* Create a `Route` resource: -+ -[source,terminal] ----- -$ oc create route passthrough route-passthrough-secured --service=frontend --port=8080 ----- -+ -If you examine the resulting `Route` resource, it should look similar to the following: -+ -.A Secured Route Using Passthrough Termination -[source,yaml] ----- -apiVersion: route.openshift.io/v1 -kind: Route -metadata: - name: route-passthrough-secured <1> -spec: - host: www.example.com - port: - targetPort: 8080 - tls: - termination: passthrough <2> - insecureEdgeTerminationPolicy: None <3> - to: - kind: Service - name: frontend ----- -<1> The name of the object, which is limited to 63 characters. -<2> The `*termination*` field is set to `passthrough`. This is the only required `tls` field. -<3> Optional `insecureEdgeTerminationPolicy`. The only valid values are `None`, `Redirect`, or empty for disabled. -+ -The destination pod is responsible for serving certificates for the -traffic at the endpoint. This is currently the only method that can support requiring client certificates, also known as two-way authentication. diff --git a/modules/nw-ingress-creating-a-reencrypt-route-with-a-custom-certificate.adoc b/modules/nw-ingress-creating-a-reencrypt-route-with-a-custom-certificate.adoc deleted file mode 100644 index d4bd84a054e8..000000000000 --- a/modules/nw-ingress-creating-a-reencrypt-route-with-a-custom-certificate.adoc +++ /dev/null @@ -1,90 +0,0 @@ -// Module included in the following assemblies: -// -// * ingress/routes.adoc - -:_content-type: PROCEDURE -[id="nw-ingress-creating-a-reencrypt-route-with-a-custom-certificate_{context}"] -= Creating a re-encrypt route with a custom certificate - -You can configure a secure route using reencrypt TLS termination with a custom -certificate by using the `oc create route` command. - -.Prerequisites - -* You must have a certificate/key pair in PEM-encoded files, where the certificate -is valid for the route host. - -* You may have a separate CA certificate in a PEM-encoded file that completes -the certificate chain. - -* You must have a separate destination CA certificate in a PEM-encoded file. - -* You must have a service that you want to expose. - -[NOTE] -==== -Password protected key files are not supported. To remove a passphrase from a -key file, use the following command: - -[source,terminal] ----- -$ openssl rsa -in password_protected_tls.key -out tls.key ----- -==== - -.Procedure - -This procedure creates a `Route` resource with a custom certificate and -reencrypt TLS termination. The following assumes that the certificate/key pair -are in the `tls.crt` and `tls.key` files in the current working directory. You -must also specify a destination CA certificate to enable the Ingress Controller -to trust the service's certificate. You may also specify a CA certificate if -needed to complete the certificate chain. Substitute the actual path names for -`tls.crt`, `tls.key`, `cacert.crt`, and (optionally) `ca.crt`. Substitute the -name of the `Service` resource that you want to expose for `frontend`. -Substitute the appropriate hostname for `www.example.com`. - -* Create a secure `Route` resource using reencrypt TLS termination and a custom -certificate: -+ -[source,terminal] ----- -$ oc create route reencrypt --service=frontend --cert=tls.crt --key=tls.key --dest-ca-cert=destca.crt --ca-cert=ca.crt --hostname=www.example.com ----- -+ -If you examine the resulting `Route` resource, it should look similar to the -following: -+ -.YAML Definition of the Secure Route -[source,yaml] ----- -apiVersion: route.openshift.io/v1 -kind: Route -metadata: - name: frontend -spec: - host: www.example.com - to: - kind: Service - name: frontend - tls: - termination: reencrypt - key: |- - -----BEGIN PRIVATE KEY----- - [...] - -----END PRIVATE KEY----- - certificate: |- - -----BEGIN CERTIFICATE----- - [...] - -----END CERTIFICATE----- - caCertificate: |- - -----BEGIN CERTIFICATE----- - [...] - -----END CERTIFICATE----- - destinationCACertificate: |- - -----BEGIN CERTIFICATE----- - [...] - -----END CERTIFICATE----- ----- -+ -See `oc create route reencrypt --help` for more options. diff --git a/modules/nw-ingress-creating-a-route-via-an-ingress.adoc b/modules/nw-ingress-creating-a-route-via-an-ingress.adoc deleted file mode 100644 index 12546b2badd1..000000000000 --- a/modules/nw-ingress-creating-a-route-via-an-ingress.adoc +++ /dev/null @@ -1,127 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/routes/route-configuration.adoc - -:_content-type: PROCEDURE -[id="nw-ingress-creating-a-route-via-an-ingress_{context}"] -= Creating a route through an Ingress object - -Some ecosystem components have an integration with Ingress resources but not with route resources. To cover this case, {product-title} automatically creates managed route objects when an Ingress object is created. These route objects are deleted when the corresponding Ingress objects are deleted. - -.Procedure - -. Define an Ingress object in the {product-title} console or by entering the `oc create` command: -+ -.YAML Definition of an Ingress -[source,yaml] ----- -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: frontend - annotations: - route.openshift.io/termination: "reencrypt" <1> - route.openshift.io/destination-ca-certificate-secret: secret-ca-cert <3> -spec: - rules: - - host: www.example.com <2> - http: - paths: - - backend: - service: - name: frontend - port: - number: 443 - path: / - pathType: Prefix - tls: - - hosts: - - www.example.com - secretName: example-com-tls-certificate ----- -+ -<1> The `route.openshift.io/termination` annotation can be used to configure the `spec.tls.termination` field of the `Route` as `Ingress` has no field for this. The accepted values are `edge`, `passthrough` and `reencrypt`. All other values are silently ignored. When the annotation value is unset, `edge` is the default route. The TLS certificate details must be defined in the template file to implement the default edge route. -<2> When working with an `Ingress` object, you must specify an explicit hostname, unlike when working with routes. You can use the `<host_name>.<cluster_ingress_domain>` syntax, for example `apps.openshiftdemos.com`, to take advantage of the `*.<cluster_ingress_domain>` wildcard DNS record and serving certificate for the cluster. Otherwise, you must ensure that there is a DNS record for the chosen hostname. - -.. If you specify the `passthrough` value in the `route.openshift.io/termination` annotation, set `path` to `''` and `pathType` to `ImplementationSpecific` in the spec: -+ -[source,yaml] ----- - spec: - rules: - - host: www.example.com - http: - paths: - - path: '' - pathType: ImplementationSpecific - backend: - service: - name: frontend - port: - number: 443 ----- -+ -[source,terminal] ----- -$ oc apply -f ingress.yaml ----- -+ -<3> The `route.openshift.io/destination-ca-certificate-secret` can be used on an Ingress object to define a route with a custom destination certificate (CA). The annotation references a kubernetes secret, `secret-ca-cert` that will be inserted into the generated route. - -.. To specify a route object with a destination CA from an ingress object, you must create a `kubernetes.io/tls` or `Opaque` type secret with a certificate in PEM-encoded format in the `data.tls.crt` specifier of the secret. - -+ -. List your routes: -+ -[source,terminal] ----- -$ oc get routes ----- -+ -The result includes an autogenerated route whose name starts with `frontend-`: -+ -[source,terminal] ----- -NAME HOST/PORT PATH SERVICES PORT TERMINATION WILDCARD -frontend-gnztq www.example.com frontend 443 reencrypt/Redirect None ----- -+ -If you inspect this route, it looks this: -+ -.YAML Definition of an autogenerated route -[source,yaml] ----- -apiVersion: route.openshift.io/v1 -kind: Route -metadata: - name: frontend-gnztq - ownerReferences: - - apiVersion: networking.k8s.io/v1 - controller: true - kind: Ingress - name: frontend - uid: 4e6c59cc-704d-4f44-b390-617d879033b6 -spec: - host: www.example.com - path: / - port: - targetPort: https - tls: - certificate: | - -----BEGIN CERTIFICATE----- - [...] - -----END CERTIFICATE----- - insecureEdgeTerminationPolicy: Redirect - key: | - -----BEGIN RSA PRIVATE KEY----- - [...] - -----END RSA PRIVATE KEY----- - termination: reencrypt - destinationCACertificate: | - -----BEGIN CERTIFICATE----- - [...] - -----END CERTIFICATE----- - to: - kind: Service - name: frontend ----- diff --git a/modules/nw-ingress-creating-an-edge-route-with-a-custom-certificate.adoc b/modules/nw-ingress-creating-an-edge-route-with-a-custom-certificate.adoc deleted file mode 100644 index 660238fe0ea1..000000000000 --- a/modules/nw-ingress-creating-an-edge-route-with-a-custom-certificate.adoc +++ /dev/null @@ -1,84 +0,0 @@ -// Module included in the following assemblies: -// -// * ingress/routes.adoc - -:_content-type: PROCEDURE -[id="nw-ingress-creating-an-edge-route-with-a-custom-certificate_{context}"] -= Creating an edge route with a custom certificate - -You can configure a secure route using edge TLS termination with a custom -certificate by using the `oc create route` command. With an edge route, the -Ingress Controller terminates TLS encryption before forwarding traffic to the -destination pod. The route specifies the TLS certificate and key that the -Ingress Controller uses for the route. - -.Prerequisites - -* You must have a certificate/key pair in PEM-encoded files, where the certificate -is valid for the route host. - -* You may have a separate CA certificate in a PEM-encoded file that completes -the certificate chain. - -* You must have a service that you want to expose. - -[NOTE] -==== -Password protected key files are not supported. To remove a passphrase from a -key file, use the following command: - -[source,terminal] ----- -$ openssl rsa -in password_protected_tls.key -out tls.key ----- -==== - -.Procedure - -This procedure creates a `Route` resource with a custom certificate and edge TLS -termination. The following assumes that the certificate/key pair are in the -`tls.crt` and `tls.key` files in the current working directory. You may also -specify a CA certificate if needed to complete the certificate chain. -Substitute the actual path names for `tls.crt`, `tls.key`, and (optionally) -`ca.crt`. Substitute the name of the service that you want to expose -for `frontend`. Substitute the appropriate hostname for `www.example.com`. - -* Create a secure `Route` resource using edge TLS termination and a custom certificate. -+ -[source,terminal] ----- -$ oc create route edge --service=frontend --cert=tls.crt --key=tls.key --ca-cert=ca.crt --hostname=www.example.com ----- -+ -If you examine the resulting `Route` resource, it should look similar to the -following: -+ -.YAML Definition of the Secure Route -[source,yaml] ----- -apiVersion: route.openshift.io/v1 -kind: Route -metadata: - name: frontend -spec: - host: www.example.com - to: - kind: Service - name: frontend - tls: - termination: edge - key: |- - -----BEGIN PRIVATE KEY----- - [...] - -----END PRIVATE KEY----- - certificate: |- - -----BEGIN CERTIFICATE----- - [...] - -----END CERTIFICATE----- - caCertificate: |- - -----BEGIN CERTIFICATE----- - [...] - -----END CERTIFICATE----- ----- -+ -See `oc create route edge --help` for more options. diff --git a/modules/nw-ingress-custom-default-certificate-remove.adoc b/modules/nw-ingress-custom-default-certificate-remove.adoc deleted file mode 100644 index d01aeec6a384..000000000000 --- a/modules/nw-ingress-custom-default-certificate-remove.adoc +++ /dev/null @@ -1,52 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ingress-operator.adoc - -:_content-type: PROCEDURE -[id="nw-ingress-custom-default-certificate-remove_{context}"] -= Removing a custom default certificate - -As an administrator, you can remove a custom certificate that you configured an Ingress Controller to use. - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. -* You have installed the OpenShift CLI (`oc`). -* You previously configured a custom default certificate for the Ingress Controller. - -.Procedure - -* To remove the custom certificate and restore the certificate that ships with {product-title}, enter the following command: -+ -[source,terminal] ----- -$ oc patch -n openshift-ingress-operator ingresscontrollers/default \ - --type json -p $'- op: remove\n path: /spec/defaultCertificate' ----- -+ -There can be a delay while the cluster reconciles the new certificate configuration. - -.Verification - -* To confirm that the original cluster certificate is restored, enter the following command: -+ -[source,terminal] ----- -$ echo Q | \ - openssl s_client -connect console-openshift-console.apps.<domain>:443 -showcerts 2>/dev/null | \ - openssl x509 -noout -subject -issuer -enddate ----- -+ -where: -+ --- -`<domain>`:: Specifies the base domain name for your cluster. --- -+ -.Example output -[source,text] ----- -subject=CN = *.apps.<domain> -issuer=CN = ingress-operator@1620633373 -notAfter=May 10 10:44:36 2023 GMT ----- diff --git a/modules/nw-ingress-default-internal.adoc b/modules/nw-ingress-default-internal.adoc deleted file mode 100644 index 63e8c4e84418..000000000000 --- a/modules/nw-ingress-default-internal.adoc +++ /dev/null @@ -1,45 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ingress-operator.adoc - -:_content-type: PROCEDURE -[id="nw-ingress-default-internal_{context}"] -= Configuring the default Ingress Controller for your cluster to be internal - -You can configure the `default` Ingress Controller for your cluster to be internal by deleting and recreating it. - -[WARNING] -==== -If your cloud provider is Microsoft Azure, you must have at least one public load balancer that points to your nodes. -If you do not, all of your nodes will lose egress connectivity to the internet. -==== - -[IMPORTANT] -==== -If you want to change the `scope` for an `IngressController`, you can change the `.spec.endpointPublishingStrategy.loadBalancer.scope` parameter after the custom resource (CR) is created. -==== - -.Prerequisites - -* Install the OpenShift CLI (`oc`). -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -. Configure the `default` Ingress Controller for your cluster to be internal by deleting and recreating it. -+ -[source,terminal] ----- -$ oc replace --force --wait --filename - <<EOF -apiVersion: operator.openshift.io/v1 -kind: IngressController -metadata: - namespace: openshift-ingress-operator - name: default -spec: - endpointPublishingStrategy: - type: LoadBalancerService - loadBalancer: - scope: Internal -EOF ----- diff --git a/modules/nw-ingress-edge-route-default-certificate.adoc b/modules/nw-ingress-edge-route-default-certificate.adoc deleted file mode 100644 index 196603364b24..000000000000 --- a/modules/nw-ingress-edge-route-default-certificate.adoc +++ /dev/null @@ -1,71 +0,0 @@ -// This is included in the following assemblies: -// -// networking/routes/route-configuration.adoc - -:_content-type: PROCEDURE -[id="creating-edge-route-with-default-certificate_{context}"] -= Creating a route using the default certificate through an Ingress object - -If you create an Ingress object without specifying any TLS configuration, {product-title} generates an insecure route. To create an Ingress object that generates a secure, edge-terminated route using the default ingress certificate, you can specify an empty TLS configuration as follows. - -.Prerequisites - -* You have a service that you want to expose. -* You have access to the OpenShift CLI (`oc`). - -.Procedure - -. Create a YAML file for the Ingress object. In this example, the file is called `example-ingress.yaml`: -+ -.YAML definition of an Ingress object -[source,yaml] ----- -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: frontend - ... -spec: - rules: - ... - tls: - - {} <1> ----- -+ -<1> Use this exact syntax to specify TLS without specifying a custom certificate. - -. Create the Ingress object by running the following command: -+ -[source,terminal] ----- -$ oc create -f example-ingress.yaml ----- - -.Verification -* Verify that {product-title} has created the expected route for the Ingress object by running the following command: -+ -[source,terminal] ----- -$ oc get routes -o yaml ----- -+ -.Example output -[source,yaml] ----- -apiVersion: v1 -items: -- apiVersion: route.openshift.io/v1 - kind: Route - metadata: - name: frontend-j9sdd <1> - ... - spec: - ... - tls: <2> - insecureEdgeTerminationPolicy: Redirect - termination: edge <3> - ... ----- -<1> The name of the route includes the name of the Ingress object followed by a random suffix. -<2> In order to use the default certificate, the route should not specify `spec.certificate`. -<3> The route should specify the `edge` termination policy. diff --git a/modules/nw-ingress-operator-logs.adoc b/modules/nw-ingress-operator-logs.adoc deleted file mode 100644 index aac450dba5a0..000000000000 --- a/modules/nw-ingress-operator-logs.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module included in the following assemblies: -// -// * ingress/configure-ingress-operator.adoc - -:_content-type: PROCEDURE -[id="nw-ingress-operator-logs_{context}"] -= View Ingress Controller logs - -You can view your Ingress Controller logs. - -.Procedure - -* View your Ingress Controller logs: -+ -[source,terminal] ----- -$ oc logs --namespace=openshift-ingress-operator deployments/ingress-operator -c <container_name> ----- diff --git a/modules/nw-ingress-operator-status.adoc b/modules/nw-ingress-operator-status.adoc deleted file mode 100644 index 032c28538148..000000000000 --- a/modules/nw-ingress-operator-status.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module included in the following assemblies: -// -// * ingress/configure-ingress-operator.adoc - -:_content-type: PROCEDURE -[id="nw-ingress-operator-status_{context}"] -= View Ingress Operator status - -You can view and inspect the status of your Ingress Operator. - -.Procedure - -* View your Ingress Operator status: -+ -[source,terminal] ----- -$ oc describe clusteroperators/ingress ----- diff --git a/modules/nw-ingress-reencrypt-route-custom-cert.adoc b/modules/nw-ingress-reencrypt-route-custom-cert.adoc deleted file mode 100644 index 07dd109a715c..000000000000 --- a/modules/nw-ingress-reencrypt-route-custom-cert.adoc +++ /dev/null @@ -1,58 +0,0 @@ -// This is included in the following assemblies: -// -// networking/routes/route-configuration.adoc - -:_content-type: PROCEDURE -[id="creating-re-encrypt-route-with-custom-certificate_{context}"] -= Creating a route using the destination CA certificate in the Ingress annotation - -The `route.openshift.io/destination-ca-certificate-secret` annotation can be used on an Ingress object to define a route with a custom destination CA certificate. - -.Prerequisites -* You may have a certificate/key pair in PEM-encoded files, where the certificate is valid for the route host. -* You may have a separate CA certificate in a PEM-encoded file that completes the certificate chain. -* You must have a separate destination CA certificate in a PEM-encoded file. -* You must have a service that you want to expose. - - -.Procedure - -. Add the `route.openshift.io/destination-ca-certificate-secret` to the Ingress annotations: -+ -[source,yaml] ----- -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: frontend - annotations: - route.openshift.io/termination: "reencrypt" - route.openshift.io/destination-ca-certificate-secret: secret-ca-cert <1> -... ----- -<1> The annotation references a kubernetes secret. - -+ -. The secret referenced in this annotation will be inserted into the generated route. -+ -.Example output -[source,yaml] ----- -apiVersion: route.openshift.io/v1 -kind: Route -metadata: - name: frontend - annotations: - route.openshift.io/termination: reencrypt - route.openshift.io/destination-ca-certificate-secret: secret-ca-cert -spec: -... - tls: - insecureEdgeTerminationPolicy: Redirect - termination: reencrypt - destinationCACertificate: | - -----BEGIN CERTIFICATE----- - [...] - -----END CERTIFICATE----- -... ----- diff --git a/modules/nw-ingress-select-route.adoc b/modules/nw-ingress-select-route.adoc deleted file mode 100644 index 8e4fa812d6e8..000000000000 --- a/modules/nw-ingress-select-route.adoc +++ /dev/null @@ -1,8 +0,0 @@ -// Module included in the following assemblies: -// -// * ingress/configure-ingress.adoc - -[id="nw-ingress-select-route_{context}"] -= Configure Ingress to use routes - -//PLACEHOLDER diff --git a/modules/nw-ingress-setting-a-custom-default-certificate.adoc b/modules/nw-ingress-setting-a-custom-default-certificate.adoc deleted file mode 100644 index 6202959a8ded..000000000000 --- a/modules/nw-ingress-setting-a-custom-default-certificate.adoc +++ /dev/null @@ -1,119 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ingress-operator.adoc - -:_content-type: PROCEDURE -[id="nw-ingress-setting-a-custom-default-certificate_{context}"] -= Setting a custom default certificate - -As an administrator, you can configure an Ingress Controller to use a custom -certificate by creating a Secret resource and editing the `IngressController` -custom resource (CR). - -.Prerequisites - -* You must have a certificate/key pair in PEM-encoded files, where the -certificate is signed by a trusted certificate authority or by a private trusted -certificate authority that you configured in a custom PKI. - -* Your certificate meets the following requirements: - -** The certificate is valid for the ingress domain. - -** The certificate uses the `subjectAltName` extension to specify a wildcard domain, such as `*.apps.ocp4.example.com`. - -* You must have an `IngressController` CR. You may use the default one: -+ -[source,terminal] ----- -$ oc --namespace openshift-ingress-operator get ingresscontrollers ----- -+ -.Example output -[source,terminal] ----- -NAME AGE -default 10m ----- - -[NOTE] -==== -If you have intermediate certificates, they must be included in the `tls.crt` -file of the secret containing a custom default certificate. Order matters when -specifying a certificate; list your intermediate certificate(s) after any server -certificate(s). -==== - -.Procedure - -The following assumes that the custom certificate and key pair are in the -`tls.crt` and `tls.key` files in the current working directory. Substitute the -actual path names for `tls.crt` and `tls.key`. You also may substitute another -name for `custom-certs-default` when creating the Secret resource and -referencing it in the IngressController CR. - -[NOTE] -==== -This action will cause the Ingress Controller to be redeployed, using a rolling deployment strategy. -==== - -. Create a Secret resource containing the custom certificate in the -`openshift-ingress` namespace using the `tls.crt` and `tls.key` files. -+ -[source,terminal] ----- -$ oc --namespace openshift-ingress create secret tls custom-certs-default --cert=tls.crt --key=tls.key ----- -+ -. Update the IngressController CR to reference the new certificate secret: -+ -[source,terminal] ----- -$ oc patch --type=merge --namespace openshift-ingress-operator ingresscontrollers/default \ - --patch '{"spec":{"defaultCertificate":{"name":"custom-certs-default"}}}' ----- -+ -. Verify the update was effective: -+ -[source,terminal] ----- -$ echo Q |\ - openssl s_client -connect console-openshift-console.apps.<domain>:443 -showcerts 2>/dev/null |\ - openssl x509 -noout -subject -issuer -enddate ----- -+ -where: -+ --- -`<domain>`:: Specifies the base domain name for your cluster. --- -+ -.Example output -[source,text] ----- -subject=C = US, ST = NC, L = Raleigh, O = RH, OU = OCP4, CN = *.apps.example.com -issuer=C = US, ST = NC, L = Raleigh, O = RH, OU = OCP4, CN = example.com -notAfter=May 10 08:32:45 2022 GM ----- -+ -[TIP] -==== -You can alternatively apply the following YAML to set a custom default certificate: - -[source,yaml] ----- -apiVersion: operator.openshift.io/v1 -kind: IngressController -metadata: - name: default - namespace: openshift-ingress-operator -spec: - defaultCertificate: - name: custom-certs-default ----- -==== -+ -The certificate secret name should match the value used to update the CR. - -Once the IngressController CR has been modified, the Ingress Operator -updates the Ingress Controller's deployment to use the custom certificate. diff --git a/modules/nw-ingress-setting-internal-lb.adoc b/modules/nw-ingress-setting-internal-lb.adoc deleted file mode 100644 index f1b0e3a79090..000000000000 --- a/modules/nw-ingress-setting-internal-lb.adoc +++ /dev/null @@ -1,74 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ingress-operator.adoc - -:_content-type: PROCEDURE -[id="nw-ingress-setting-internal-lb_{context}"] -= Configuring an Ingress Controller to use an internal load balancer - -When creating an Ingress Controller on cloud platforms, the Ingress Controller is published by a public cloud load balancer by default. -As an administrator, you can create an Ingress Controller that uses an internal cloud load balancer. - -[WARNING] -==== -If your cloud provider is Microsoft Azure, you must have at least one public load balancer that points to your nodes. -If you do not, all of your nodes will lose egress connectivity to the internet. -==== - -[IMPORTANT] -==== -If you want to change the `scope` for an `IngressController`, you can change the `.spec.endpointPublishingStrategy.loadBalancer.scope` parameter after the custom resource (CR) is created. -==== - -.Diagram of LoadBalancer -image::202_OpenShift_Ingress_0222_load_balancer.png[{product-title} Ingress LoadBalancerService endpoint publishing strategy] - -The preceding graphic shows the following concepts pertaining to {product-title} Ingress LoadBalancerService endpoint publishing strategy: - -* You can load balance externally, using the cloud provider load balancer, or internally, using the OpenShift Ingress Controller Load Balancer. -* You can use the single IP address of the load balancer and more familiar ports, such as 8080 and 4200 as shown on the cluster depicted in the graphic. -* Traffic from the external load balancer is directed at the pods, and managed by the load balancer, as depicted in the instance of a down node. -See the link:https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer[Kubernetes Services documentation] -for implementation details. - -.Prerequisites - -* Install the OpenShift CLI (`oc`). -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -. Create an `IngressController` custom resource (CR) in a file named `<name>-ingress-controller.yaml`, such as in the following example: -+ -[source,yaml] ----- -apiVersion: operator.openshift.io/v1 -kind: IngressController -metadata: - namespace: openshift-ingress-operator - name: <name> <1> -spec: - domain: <domain> <2> - endpointPublishingStrategy: - type: LoadBalancerService - loadBalancer: - scope: Internal <3> ----- -<1> Replace `<name>` with a name for the `IngressController` object. -<2> Specify the `domain` for the application published by the controller. -<3> Specify a value of `Internal` to use an internal load balancer. - -. Create the Ingress Controller defined in the previous step by running the following command: -+ -[source,terminal] ----- -$ oc create -f <name>-ingress-controller.yaml <1> ----- -<1> Replace `<name>` with the name of the `IngressController` object. - -. Optional: Confirm that the Ingress Controller was created by running the following command: -+ -[source,terminal] ----- -$ oc --all-namespaces=true get ingresscontrollers ----- diff --git a/modules/nw-ingress-setting-max-connections.adoc b/modules/nw-ingress-setting-max-connections.adoc deleted file mode 100644 index 1d89fb64116a..000000000000 --- a/modules/nw-ingress-setting-max-connections.adoc +++ /dev/null @@ -1,24 +0,0 @@ -// Modules included in the following assemblies: -// -// * ingress/configure-ingress-operator.adoc - -:_content-type: PROCEDURE -[id="nw-ingress-setting-max-connections_{context}"] -= Setting the Ingress Controller maximum connections -A cluster administrator can set the maximum number of simultaneous connections for OpenShift router deployments. You can patch an existing Ingress Controller to increase the maximum number of connections. - -.Prerequisites -* The following assumes that you already created an Ingress Controller - -.Procedure -* Update the Ingress Controller to change the maximum number of connections for HAProxy: -+ -[source,terminal] ----- -$ oc -n openshift-ingress-operator patch ingresscontroller/default --type=merge -p '{"spec":{"tuningOptions": {"maxConnections": 7500}}}' ----- -+ -[WARNING] -==== -If you set the `spec.tuningOptions.maxConnections` value greater than the current operating system limit, the HAProxy process will not start. See the table in the "Ingress Controller configuration parameters" section for more information about this parameter. -==== diff --git a/modules/nw-ingress-setting-thread-count.adoc b/modules/nw-ingress-setting-thread-count.adoc deleted file mode 100644 index a6d8f475c170..000000000000 --- a/modules/nw-ingress-setting-thread-count.adoc +++ /dev/null @@ -1,24 +0,0 @@ -// Module included in the following assemblies: -// -// * ingress/configure-ingress-operator.adoc - -:_content-type: PROCEDURE -[id="nw-ingress-setting-thread-count_{context}"] -= Setting Ingress Controller thread count -A cluster administrator can set the thread count to increase the amount of incoming connections a cluster can handle. You can patch an existing Ingress Controller to increase the amount of threads. - -.Prerequisites -* The following assumes that you already created an Ingress Controller. - -.Procedure -* Update the Ingress Controller to increase the number of threads: -+ -[source,terminal] ----- -$ oc -n openshift-ingress-operator patch ingresscontroller/default --type=merge -p '{"spec":{"tuningOptions": {"threadCount": 8}}}' ----- -+ -[NOTE] -==== -If you have a node that is capable of running large amounts of resources, you can configure `spec.nodePlacement.nodeSelector` with labels that match the capacity of the intended node, and configure `spec.tuningOptions.threadCount` to an appropriately high value. -==== diff --git a/modules/nw-ingress-sharding-default.adoc b/modules/nw-ingress-sharding-default.adoc deleted file mode 100644 index 67f213003d9c..000000000000 --- a/modules/nw-ingress-sharding-default.adoc +++ /dev/null @@ -1,55 +0,0 @@ -// Module include in the following assemblies: -// -// * ingress-operator.adoc -// * networking/ingress-sharding.adoc - -:_content-type: PROCEDURE -[id="nw-ingress-sharding-default_{context}"] -= Sharding the default Ingress Controller - -After creating a new Ingress shard, there might be routes that are admitted to your new Ingress shard that are also admitted by the default Ingress Controller. This is because the default Ingress Controller has no selectors and admits all routes by default. - -You can restrict an Ingress Controller from servicing routes with specific labels using either namespace selectors or route selectors. The following procedure restricts the default Ingress Controller from servicing your newly sharded `finance`, `ops`, and `dev`, routes using a namespace selector. This adds further isolation to Ingress shards. - -[IMPORTANT] -==== -You must keep all of {product-title}'s administration routes on the same Ingress Controller. Therefore, avoid adding additional selectors to the default Ingress Controller that exclude these essential routes. -==== - -.Prerequisites - -* You installed the OpenShift CLI (`oc`). -* You are logged in as a project administrator. - -.Procedure - -. Modify the default Ingress Controller by running the following command: -+ -[source,terminal] ----- -$ oc edit ingresscontroller -n openshift-ingress-operator default ----- - -. Edit the Ingress Controller to contain a `namespaceSelector` that excludes the routes with any of the `finance`, `ops`, and `dev` labels: -+ -[source,yaml] ----- -apiVersion: v1 -items: -- apiVersion: operator.openshift.io/v1 - kind: IngressController - metadata: - name: default - namespace: openshift-ingress-operator - spec: - namespaceSelector: - matchExpressions: - - key: type - operator: NotIn - values: - - finance - - ops - - dev ----- - -The default Ingress Controller will no longer serve the namespaces labeled `name:finance`, `name:ops`, and `name:dev`. diff --git a/modules/nw-ingress-sharding-dns.adoc b/modules/nw-ingress-sharding-dns.adoc deleted file mode 100644 index 12f65ba26267..000000000000 --- a/modules/nw-ingress-sharding-dns.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ingress-sharding.adoc - -:_content-type: CONCEPT -[id="nw-ingress-sharding-dns_{context}"] -= Ingress sharding and DNS - -The cluster administrator is responsible for making a separate DNS entry for each router in a project. A router will not forward unknown routes to another router. - -Consider the following example: - -* Router A lives on host 192.168.0.5 and has routes with `*.foo.com`. -* Router B lives on host 192.168.1.9 and has routes with `*.example.com`. - -Separate DNS entries must resolve `\*.foo.com` to the node hosting Router A and `*.example.com` to the node hosting Router B: - -* `*.foo.com A IN 192.168.0.5` -* `*.example.com A IN 192.168.1.9` diff --git a/modules/nw-ingress-sharding-namespace-labels.adoc b/modules/nw-ingress-sharding-namespace-labels.adoc deleted file mode 100644 index f459b3837b8b..000000000000 --- a/modules/nw-ingress-sharding-namespace-labels.adoc +++ /dev/null @@ -1,73 +0,0 @@ -// Module included in the following assemblies: -// -// * configuring_ingress_cluster_traffic/configuring-ingress-cluster-traffic-ingress-controller.adoc -// * ingress-operator.adoc - -:_content-type: PROCEDURE -[id="nw-ingress-sharding-namespace-labels_{context}"] -= Configuring Ingress Controller sharding by using namespace labels - -Ingress Controller sharding by using namespace labels means that the Ingress -Controller serves any route in any namespace that is selected by the namespace -selector. - -.Ingress sharding using namespace labels -image::nw-sharding-namespace-labels.png[A diagram showing multiple Ingress Controllers with different namespace selectors serving routes that belong to the namespace containing a label that matches a given namespace selector] - -Ingress Controller sharding is useful when balancing incoming traffic load among -a set of Ingress Controllers and when isolating traffic to a specific Ingress -Controller. For example, company A goes to one Ingress Controller and company B -to another. - -.Procedure - -. Edit the `router-internal.yaml` file: -+ -[source,terminal] ----- -# cat router-internal.yaml ----- -+ -.Example output -[source,yaml] ----- -apiVersion: v1 -items: -- apiVersion: operator.openshift.io/v1 - kind: IngressController - metadata: - name: sharded - namespace: openshift-ingress-operator - spec: - domain: <apps-sharded.basedomain.example.net> <1> - nodePlacement: - nodeSelector: - matchLabels: - node-role.kubernetes.io/worker: "" - namespaceSelector: - matchLabels: - type: sharded - status: {} -kind: List -metadata: - resourceVersion: "" - selfLink: "" ----- -<1> Specify a domain to be used by the Ingress Controller. This domain must be different from the default Ingress Controller domain. - -. Apply the Ingress Controller `router-internal.yaml` file: -+ -[source,terminal] ----- -# oc apply -f router-internal.yaml ----- -+ -The Ingress Controller selects routes in any namespace that is selected by the -namespace selector that have the label `type: sharded`. - -. Create a new route using the domain configured in the `router-internal.yaml`: -+ -[source,terminal] ----- -$ oc expose svc <service-name> --hostname <route-name>.apps-sharded.basedomain.example.net ----- diff --git a/modules/nw-ingress-sharding-route-configuration.adoc b/modules/nw-ingress-sharding-route-configuration.adoc deleted file mode 100644 index 43ebe274f459..000000000000 --- a/modules/nw-ingress-sharding-route-configuration.adoc +++ /dev/null @@ -1,111 +0,0 @@ -// Module included in the following assemblies: -// -// * configuring_ingress_cluster_traffic/configuring-ingress-cluster-traffic-ingress-controller.adoc -// * networking/routes/route-configuration.adoc - -:_content-type: PROCEDURE -[id="nw-ingress-sharding-route-configuration_{context}"] -= Creating a route for Ingress Controller sharding - -A route allows you to host your application at a URL. In this case, the hostname is not set and the route uses a subdomain instead. When you specify a subdomain, you automatically use the domain of the Ingress Controller that exposes the route. For situations where a route is exposed by multiple Ingress Controllers, the route is hosted at multiple URLs. - -The following procedure describes how to create a route for Ingress Controller sharding, using the `hello-openshift` application as an example. - -Ingress Controller sharding is useful when balancing incoming traffic load among a set of Ingress Controllers and when isolating traffic to a specific Ingress Controller. For example, company A goes to one Ingress Controller and company B to another. - -.Prerequisites - -* You installed the OpenShift CLI (`oc`). -* You are logged in as a project administrator. -* You have a web application that exposes a port and an HTTP or TLS endpoint listening for traffic on the port. -* You have configured the Ingress Controller for sharding. - -.Procedure - -. Create a project called `hello-openshift` by running the following command: -+ -[source,terminal] ----- -$ oc new-project hello-openshift ----- - -. Create a pod in the project by running the following command: -+ -[source,terminal] ----- -$ oc create -f https://raw.githubusercontent.com/openshift/origin/master/examples/hello-openshift/hello-pod.json ----- - -. Create a service called `hello-openshift` by running the following command: -+ -[source,terminal] ----- -$ oc expose pod/hello-openshift ----- - -. Create a route definition called `hello-openshift-route.yaml`: -+ -.YAML definition of the created route for sharding: -[source,yaml] ----- -apiVersion: route.openshift.io/v1 -kind: Route -metadata: - labels: - type: sharded <1> - name: hello-openshift-edge - namespace: hello-openshift -spec: - subdomain: hello-openshift <2> - tls: - termination: edge - to: - kind: Service - name: hello-openshift ----- -<1> Both the label key and its corresponding label value must match the ones specified in the Ingress Controller. In this example, the Ingress Controller has the label key and value `type: sharded`. -<2> The route will be exposed using the value of the `subdomain` field. When you specify the `subdomain` field, you must leave the hostname unset. If you specify both the `host` and `subdomain` fields, then the route will use the value of the `host` field, and ignore the `subdomain` field. - -. Use `hello-openshift-route.yaml` to create a route to the `hello-openshift` application by running the following command: -+ -[source,terminal] ----- -$ oc -n hello-openshift create -f hello-openshift-route.yaml ----- - -.Verification -* Get the status of the route with the following command: -+ -[source,terminal] ----- -$ oc -n hello-openshift get routes/hello-openshift-edge -o yaml ----- -+ -The resulting `Route` resource should look similar to the following: -+ -.Example output -[source,yaml] ----- -apiVersion: route.openshift.io/v1 -kind: Route -metadata: - labels: - type: sharded - name: hello-openshift-edge - namespace: hello-openshift -spec: - subdomain: hello-openshift - tls: - termination: edge - to: - kind: Service - name: hello-openshift -status: - ingress: - - host: hello-openshift.<apps-sharded.basedomain.example.net> <1> - routerCanonicalHostname: router-sharded.<apps-sharded.basedomain.example.net> <2> - routerName: sharded <3> ----- -<1> The hostname the Ingress Controller, or router, uses to expose the route. The value of the `host` field is automatically determined by the Ingress Controller, and uses its domain. In this example, the domain of the Ingress Controller is `<apps-sharded.basedomain.example.net>`. -<2> The hostname of the Ingress Controller. -<3> The name of the Ingress Controller. In this example, the Ingress Controller has the name `sharded`. diff --git a/modules/nw-ingress-sharding-route-labels.adoc b/modules/nw-ingress-sharding-route-labels.adoc deleted file mode 100644 index 888a8b0d2598..000000000000 --- a/modules/nw-ingress-sharding-route-labels.adoc +++ /dev/null @@ -1,68 +0,0 @@ -// Module included in the following assemblies: -// -// * configuring_ingress_cluster_traffic/configuring-ingress-cluster-traffic-ingress-controller.adoc -// * networking/ingress-operator.adoc - -:_content-type: PROCEDURE -[id="nw-ingress-sharding-route-labels_{context}"] -= Configuring Ingress Controller sharding by using route labels - -Ingress Controller sharding by using route labels means that the Ingress -Controller serves any route in any namespace that is selected by the route -selector. - -.Ingress sharding using route labels -image::nw-sharding-route-labels.png[A diagram showing multiple Ingress Controllers with different route selectors serving any route containing a label that matches a given route selector regardless of the namespace a route belongs to] - -Ingress Controller sharding is useful when balancing incoming traffic load among -a set of Ingress Controllers and when isolating traffic to a specific Ingress -Controller. For example, company A goes to one Ingress Controller and company B -to another. - -.Procedure - -. Edit the `router-internal.yaml` file: -+ -[source,terminal] ----- -# cat router-internal.yaml -apiVersion: v1 -items: -- apiVersion: operator.openshift.io/v1 - kind: IngressController - metadata: - name: sharded - namespace: openshift-ingress-operator - spec: - domain: <apps-sharded.basedomain.example.net> <1> - nodePlacement: - nodeSelector: - matchLabels: - node-role.kubernetes.io/worker: "" - routeSelector: - matchLabels: - type: sharded - status: {} -kind: List -metadata: - resourceVersion: "" - selfLink: "" ----- -<1> Specify a domain to be used by the Ingress Controller. This domain must be different from the default Ingress Controller domain. - -. Apply the Ingress Controller `router-internal.yaml` file: -+ -[source,terminal] ----- -# oc apply -f router-internal.yaml ----- -+ -The Ingress Controller selects routes in any namespace that have the label -`type: sharded`. - -. Create a new route using the domain configured in the `router-internal.yaml`: -+ -[source,terminal] ----- -$ oc expose svc <service-name> --hostname <route-name>.apps-sharded.basedomain.example.net ----- \ No newline at end of file diff --git a/modules/nw-ingress-sharding.adoc b/modules/nw-ingress-sharding.adoc deleted file mode 100644 index 813aa5ff7095..000000000000 --- a/modules/nw-ingress-sharding.adoc +++ /dev/null @@ -1,105 +0,0 @@ -// Module included in the following assemblies: -// -// * ingress-operator.adoc -// * networking/ingress-sharding.adoc - -:_content-type: CONCEPT -[id="nw-ingress-sharding_{context}"] -= Ingress Controller sharding - -You can use Ingress sharding, also known as router sharding, to distribute a set of routes across multiple routers by adding labels to routes, namespaces, or both. The Ingress Controller uses a corresponding set of selectors to admit only the routes that have a specified label. Each Ingress shard comprises the routes that are filtered using a given selection expression. - -As the primary mechanism for traffic to enter the cluster, the demands on the Ingress Controller can be significant. As a cluster administrator, you can shard the routes to: - -* Balance Ingress Controllers, or routers, with several routes to speed up responses to changes. -* Allocate certain routes to have different reliability guarantees than other routes. -* Allow certain Ingress Controllers to have different policies defined. -* Allow only specific routes to use additional features. -* Expose different routes on different addresses so that internal and external users can see different routes, for example. -* Transfer traffic from one version of an application to another during a blue green deployment. - -When Ingress Controllers are sharded, a given route is admitted to zero or more Ingress Controllers in the group. A route's status describes whether an Ingress Controller has admitted it or not. An Ingress Controller will only admit a route if it is unique to its shard. - -An Ingress Controller can use three sharding methods: - -* Adding only a namespace selector to the Ingress Controller, so that all routes in a namespace with labels that match the namespace selector are in the Ingress shard. - -* Adding only a route selector to the Ingress Controller, so that all routes with labels that match the route selector are in the Ingress shard. - -* Adding both a namespace selector and route selector to the Ingress Controller, so that routes with labels that match the route selector in a namespace with labels that match the namespace selector are in the Ingress shard. - -With sharding, you can distribute subsets of routes over multiple Ingress Controllers. These subsets can be non-overlapping, also called _traditional_ sharding, or overlapping, otherwise known as _overlapped_ sharding. - -== Traditional sharding example - -An Ingress Controller `finops-router` is configured with the label selector `spec.namespaceSelector.matchLabels.name` set to `finance` and `ops`: - -.Example YAML definition for `finops-router` -[source,yaml] ----- -apiVersion: v1 -items: -- apiVersion: operator.openshift.io/v1 - kind: IngressController - metadata: - name: finops-router - namespace: openshift-ingress-operator - spec: - namespaceSelector: - matchLabels: - name: - - finance - - ops ----- - -A second Ingress Controller `dev-router` is configured with the label selector `spec.namespaceSelector.matchLabels.name` set to `dev`: - -.Example YAML definition for `dev-router` -[source,yaml] ----- -apiVersion: v1 -items: -- apiVersion: operator.openshift.io/v1 - kind: IngressController - metadata: - name: dev-router - namespace: openshift-ingress-operator - spec: - namespaceSelector: - matchLabels: - name: dev ----- - -If all application routes are in separate namespaces, each labeled with `name:finance`, `name:ops`, and `name:dev` respectively, this configuration effectively distributes your routes between the two Ingress Controllers. {product-title} routes for console, authentication, and other purposes should not be handled. - -In the above scenario, sharding becomes a special case of partitioning, with no overlapping subsets. Routes are divided between router shards. - -[WARNING] -==== -The `default` Ingress Controller continues to serve all routes unless the `namespaceSelector` or `routeSelector` fields contain routes that are meant for exclusion. See this link:https://access.redhat.com/solutions/5097511[Red Hat Knowledgebase solution] and the section "Sharding the default Ingress Controller" for more information on how to exclude routes from the default Ingress Controller. -==== - -== Overlapped sharding example - -In addition to `finops-router` and `dev-router` in the example above, you also have `devops-router`, which is configured with the label selector `spec.namespaceSelector.matchLabels.name` set to `dev` and `ops`: - -.Example YAML definition for `devops-router` -[source,yaml] ----- -apiVersion: v1 -items: -- apiVersion: operator.openshift.io/v1 - kind: IngressController - metadata: - name: devops-router - namespace: openshift-ingress-operator - spec: - namespaceSelector: - matchLabels: - name: - - dev - - ops ----- -The routes in the namespaces labeled `name:dev` and `name:ops` are now serviced by two different Ingress Controllers. With this configuration, you have overlapping subsets of routes. - -With overlapping subsets of routes you can create more complex routing rules. For example, you can divert higher priority traffic to the dedicated `finops-router` while sending lower priority traffic to `devops-router`. diff --git a/modules/nw-ingress-view.adoc b/modules/nw-ingress-view.adoc deleted file mode 100644 index 7573634821ba..000000000000 --- a/modules/nw-ingress-view.adoc +++ /dev/null @@ -1,24 +0,0 @@ -// Module included in the following assemblies: -// -// * ingress/configure-ingress-operator.adoc - -:_content-type: PROCEDURE -[id="nw-ingress-view_{context}"] -= View the default Ingress Controller - -The Ingress Operator is a core feature of {product-title} and is enabled out of the -box. - -Every new {product-title} installation has an `ingresscontroller` named default. It -can be supplemented with additional Ingress Controllers. If the default -`ingresscontroller` is deleted, the Ingress Operator will automatically recreate it -within a minute. - -.Procedure - -* View the default Ingress Controller: -+ -[source,terminal] ----- -$ oc describe --namespace=openshift-ingress-operator ingresscontroller/default ----- diff --git a/modules/nw-ingresscontroller-change-external.adoc b/modules/nw-ingresscontroller-change-external.adoc deleted file mode 100644 index 04cd2752b777..000000000000 --- a/modules/nw-ingresscontroller-change-external.adoc +++ /dev/null @@ -1,48 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ingress-operator.adoc - -[id="nw-ingresscontroller-change-external_{context}"] -= Configuring the Ingress Controller endpoint publishing scope to External - -When a cluster administrator installs a new cluster without specifying that the cluster is private, the default Ingress Controller is created with a `scope` set to `External`. - -The Ingress Controller's scope can be configured to be `Internal` during installation or after, and cluster administrators can change an `Internal` Ingress Controller to `External`. - -[IMPORTANT] -==== -On some platforms, it is necessary to delete and recreate the service. - -Changing the scope can cause disruption to Ingress traffic, potentially for several minutes. This applies to platforms where it is necessary to delete and recreate the service, because the procedure can cause {product-title} to deprovision the existing service load balancer, provision a new one, and update DNS. -==== - -.Prerequisites - -* You installed the `oc` CLI. - -.Procedure - -* To change an `Internal` scoped Ingress Controller to `External`, enter the following command: -+ -[source,terminal] ----- -$ oc -n openshift-ingress-operator patch ingresscontrollers/private --type=merge --patch='{"spec":{"endpointPublishingStrategy":{"type":"LoadBalancerService","loadBalancer":{"scope":"External"}}}}' ----- -+ -.Verification -+ -* To check the status of the Ingress Controller, enter the following command: -+ -[source,terminal] ----- -$ oc -n openshift-ingress-operator get ingresscontrollers/default -o yaml ----- -+ -** The `Progressing` status condition indicates whether you must take further action. For example, the status condition can indicate that you need to delete the service by entering the following command: -+ -[source,terminal] ----- -$ oc -n openshift-ingress delete services/router-default ----- -+ -If you delete the service, the Ingress Operator recreates it as `External`. diff --git a/modules/nw-ingresscontroller-change-internal.adoc b/modules/nw-ingresscontroller-change-internal.adoc deleted file mode 100644 index 6f1e97c956bc..000000000000 --- a/modules/nw-ingresscontroller-change-internal.adoc +++ /dev/null @@ -1,39 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ingress-operator.adoc - -[id="nw-ingresscontroller-change-internal_{context}"] -= Configuring the Ingress Controller endpoint publishing scope to Internal - -When a cluster administrator installs a new cluster without specifying that the cluster is private, the default Ingress Controller is created with a `scope` set to `External`. Cluster administrators can change an `External` scoped Ingress Controller to `Internal`. - -.Prerequisites - -* You installed the `oc` CLI. - -.Procedure - -* To change an `External` scoped Ingress Controller to `Internal`, enter the following command: -+ -[source,terminal] ----- -$ oc -n openshift-ingress-operator patch ingresscontrollers/default --type=merge --patch='{"spec":{"endpointPublishingStrategy":{"type":"LoadBalancerService","loadBalancer":{"scope":"Internal"}}}}' ----- -+ -.Verification -+ -* To check the status of the Ingress Controller, enter the following command: -+ -[source,terminal] ----- -$ oc -n openshift-ingress-operator get ingresscontrollers/default -o yaml ----- -+ -** The `Progressing` status condition indicates whether you must take further action. For example, the status condition can indicate that you need to delete the service by entering the following command: -+ -[source,terminal] ----- -$ oc -n openshift-ingress delete services/router-default ----- -+ -If you delete the service, the Ingress Operator recreates it as `Internal`. diff --git a/modules/nw-installation-ingress-config-asset.adoc b/modules/nw-installation-ingress-config-asset.adoc deleted file mode 100644 index 4609568221e4..000000000000 --- a/modules/nw-installation-ingress-config-asset.adoc +++ /dev/null @@ -1,26 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ingress/configuring_ingress_operator.adoc - - -[id="nw-installation-ingress-config-asset_{context}"] -= The Ingress configuration asset - -The installation program generates an asset with an `Ingress` resource in the `config.openshift.io` API group, `cluster-ingress-02-config.yml`. - -.YAML Definition of the `Ingress` resource -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: Ingress -metadata: - name: cluster -spec: - domain: apps.openshiftdemos.com ----- - -The installation program stores this asset in the `cluster-ingress-02-config.yml` file in the `manifests/` directory. This `Ingress` resource defines the cluster-wide configuration for Ingress. This Ingress configuration is used as follows: - -* The Ingress Operator uses the domain from the cluster Ingress configuration as the domain for the default Ingress Controller. - -* The OpenShift API Server Operator uses the domain from the cluster Ingress configuration. This domain is also used when generating a default host for a `Route` resource that does not specify an explicit host. diff --git a/modules/nw-installing-external-dns-operator.adoc b/modules/nw-installing-external-dns-operator.adoc deleted file mode 100644 index 712b943b775a..000000000000 --- a/modules/nw-installing-external-dns-operator.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/external_dns_operator/nw-installing-external-dns-operator-on-cloud-providers.adoc - -:_content-type: PROCEDURE -[id="nw-installing-external-dns-operator_{context}"] -= Installing the External DNS Operator - -You can install the External DNS Operator using the {product-title} OperatorHub. - -.Procedure - -. Click *Operators* → *OperatorHub* in the {product-title} Web Console. -. Click *External DNS Operator*. - You can use the *Filter by keyword* text box or the filter list to search for External DNS Operator from the list of Operators. -. Select the `external-dns-operator` namespace. -. On the External DNS Operator page, click *Install*. -. On the *Install Operator* page, ensure that you selected the following options: -.. Update the channel as *stable-v1*. -.. Installation mode as *A specific name on the cluster*. -.. Installed namespace as `external-dns-operator`. If namespace `external-dns-operator` does not exist, it gets created during the Operator installation. -.. Select *Approval Strategy* as *Automatic* or *Manual*. Approval Strategy is set to *Automatic* by default. -.. Click *Install*. - -If you select *Automatic* updates, the Operator Lifecycle Manager (OLM) automatically upgrades the running instance of your Operator without any intervention. - -If you select *Manual* updates, the OLM creates an update request. As a cluster administrator, you must then manually approve that update request to have the Operator updated to the new version. - - -.Verification - -Verify that External DNS Operator shows the *Status* as *Succeeded* on the Installed Operators dashboard. diff --git a/modules/nw-ipfailover-cluster-ha-ingress.adoc b/modules/nw-ipfailover-cluster-ha-ingress.adoc deleted file mode 100644 index e6dacf095527..000000000000 --- a/modules/nw-ipfailover-cluster-ha-ingress.adoc +++ /dev/null @@ -1,12 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/configuring-ipfailover.adoc - -[id="nw-ipfailover-cluster-ha-ingress_{context}"] -= High availability For ingressIP - -In non-cloud clusters, IP failover and `ingressIP` to a service can be combined. The result is high availability services for users that create services using `ingressIP`. - -The approach is to specify an `ingressIPNetworkCIDR` range and then use the same range in creating the ipfailover configuration. - -Because IP failover can support up to a maximum of 255 VIPs for the entire cluster, the `ingressIPNetworkCIDR` needs to be `/24` or smaller. diff --git a/modules/nw-ipfailover-configuration.adoc b/modules/nw-ipfailover-configuration.adoc deleted file mode 100644 index 366ff4864b55..000000000000 --- a/modules/nw-ipfailover-configuration.adoc +++ /dev/null @@ -1,181 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/configuring-ipfailover.adoc - -:_content-type: PROCEDURE -[id="nw-ipfailover-configuration_{context}"] -= Configuring IP failover - -As a cluster administrator, you can configure IP failover on an entire cluster, or on a subset of nodes, as defined by the label selector. You can also configure multiple IP failover deployment configurations in your cluster, where each one is independent of the others. - -The IP failover deployment configuration ensures that a failover pod runs on each of the nodes matching the constraints or the label used. - -This pod runs Keepalived, which can monitor an endpoint and use Virtual Router Redundancy Protocol (VRRP) to fail over the virtual IP (VIP) from one node to another if the first node cannot reach the service or endpoint. - -For production use, set a `selector` that selects at least two nodes, and set `replicas` equal to the number of selected nodes. - -.Prerequisites - -* You are logged in to the cluster with a user with `cluster-admin` privileges. -* You created a pull secret. - -.Procedure - -//. Create an {product-title} pull secret -//+ -. Create an IP failover service account: -+ -[source,terminal] ----- -$ oc create sa ipfailover ----- -+ -. Update security context constraints (SCC) for `hostNetwork`: -+ -[source,terminal] ----- -$ oc adm policy add-scc-to-user privileged -z ipfailover -$ oc adm policy add-scc-to-user hostnetwork -z ipfailover ----- -+ -. Create a deployment YAML file to configure IP failover: -+ -.Example deployment YAML for IP failover configuration -[source,yaml] ----- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: ipfailover-keepalived <1> - labels: - ipfailover: hello-openshift -spec: - strategy: - type: Recreate - replicas: 2 - selector: - matchLabels: - ipfailover: hello-openshift - template: - metadata: - labels: - ipfailover: hello-openshift - spec: - serviceAccountName: ipfailover - privileged: true - hostNetwork: true - nodeSelector: - node-role.kubernetes.io/worker: "" - containers: - - name: openshift-ipfailover - image: quay.io/openshift/origin-keepalived-ipfailover - ports: - - containerPort: 63000 - hostPort: 63000 - imagePullPolicy: IfNotPresent - securityContext: - privileged: true - volumeMounts: - - name: lib-modules - mountPath: /lib/modules - readOnly: true - - name: host-slash - mountPath: /host - readOnly: true - mountPropagation: HostToContainer - - name: etc-sysconfig - mountPath: /etc/sysconfig - readOnly: true - - name: config-volume - mountPath: /etc/keepalive - env: - - name: OPENSHIFT_HA_CONFIG_NAME - value: "ipfailover" - - name: OPENSHIFT_HA_VIRTUAL_IPS <2> - value: "1.1.1.1-2" - - name: OPENSHIFT_HA_VIP_GROUPS <3> - value: "10" - - name: OPENSHIFT_HA_NETWORK_INTERFACE <4> - value: "ens3" #The host interface to assign the VIPs - - name: OPENSHIFT_HA_MONITOR_PORT <5> - value: "30060" - - name: OPENSHIFT_HA_VRRP_ID_OFFSET <6> - value: "0" - - name: OPENSHIFT_HA_REPLICA_COUNT <7> - value: "2" #Must match the number of replicas in the deployment - - name: OPENSHIFT_HA_USE_UNICAST - value: "false" - #- name: OPENSHIFT_HA_UNICAST_PEERS - #value: "10.0.148.40,10.0.160.234,10.0.199.110" - - name: OPENSHIFT_HA_IPTABLES_CHAIN <8> - value: "INPUT" - #- name: OPENSHIFT_HA_NOTIFY_SCRIPT <9> - # value: /etc/keepalive/mynotifyscript.sh - - name: OPENSHIFT_HA_CHECK_SCRIPT <10> - value: "/etc/keepalive/mycheckscript.sh" - - name: OPENSHIFT_HA_PREEMPTION <11> - value: "preempt_delay 300" - - name: OPENSHIFT_HA_CHECK_INTERVAL <12> - value: "2" - livenessProbe: - initialDelaySeconds: 10 - exec: - command: - - pgrep - - keepalived - volumes: - - name: lib-modules - hostPath: - path: /lib/modules - - name: host-slash - hostPath: - path: / - - name: etc-sysconfig - hostPath: - path: /etc/sysconfig - # config-volume contains the check script - # created with `oc create configmap keepalived-checkscript --from-file=mycheckscript.sh` - - configMap: - defaultMode: 0755 - name: keepalived-checkscript - name: config-volume - imagePullSecrets: - - name: openshift-pull-secret <13> ----- -<1> The name of the IP failover deployment. -<2> The list of IP address ranges to replicate. This must be provided. For example, `1.2.3.4-6,1.2.3.9`. -<3> The number of groups to create for VRRP. If not set, a group is created for each virtual IP range specified with the `OPENSHIFT_HA_VIP_GROUPS` variable. -<4> The interface name that IP failover uses to send VRRP traffic. By default, `eth0` is used. -<5> The IP failover pod tries to open a TCP connection to this port on each VIP. If connection is established, the service is considered to be running. If this port is set to `0`, the test always passes. The default value is `80`. -<6> The offset value used to set the virtual router IDs. Using different offset values allows multiple IP failover configurations to exist within the same cluster. The default offset is `0`, and the allowed range is `0` through `255`. -<7> The number of replicas to create. This must match `spec.replicas` value in IP failover deployment configuration. The default value is `2`. -<8> The name of the `iptables` chain to automatically add an `iptables` rule to allow the VRRP traffic on. If the value is not set, an `iptables` rule is not added. If the chain does not exist, it is not created, and Keepalived operates in unicast mode. The default is `INPUT`. -<9> The full path name in the pod file system of a script that is run whenever the state changes. -<10> The full path name in the pod file system of a script that is periodically run to verify the application is operating. -<11> The strategy for handling a new higher priority host. The default value is `preempt_delay 300`, which causes a Keepalived instance to take over a VIP after 5 minutes if a lower-priority master is holding the VIP. -<12> The period, in seconds, that the check script is run. The default value is `2`. -<13> Create the pull secret before creating the deployment, otherwise you will get an error when creating the deployment. -//// -+ -.Example service YAML for IP failover configuration -[source,yaml] ----- -apiVersion: v1 -kind: Service -metadata: - name: ipfailover-keepalived-service -spec: - ports: - - port: 1985 - targetPort: 1985 - name: todo - - port: 112 - targetPort: 112 - name: vrrp - selector: - ipfailover: hello-openshift - externalIPs: - - 1.1.1.1 - - 1.1.1.2 ----- -//// diff --git a/modules/nw-ipfailover-configuring-check-notify-scripts.adoc b/modules/nw-ipfailover-configuring-check-notify-scripts.adoc deleted file mode 100644 index a234b69730fb..000000000000 --- a/modules/nw-ipfailover-configuring-check-notify-scripts.adoc +++ /dev/null @@ -1,111 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/configuring-ipfailover.adoc - -:_content-type: PROCEDURE -[id="nw-ipfailover-configuring-check-notify-scripts_{context}"] -= Configuring check and notify scripts - -Keepalived monitors the health of the application by periodically running an optional user supplied check script. For example, the script can test a web server by issuing a request and verifying the response. - -When a check script is not provided, a simple default script is run that tests the TCP connection. This default test is suppressed when the monitor port is `0`. - -Each IP failover pod manages a Keepalived daemon that manages one or more virtual IPs (VIP) on the node where the pod is running. The Keepalived daemon keeps the state of each VIP for that node. A particular VIP on a particular node may be in `master`, `backup`, or `fault` state. - -When the check script for that VIP on the node that is in `master` state fails, the VIP on that node enters the `fault` state, which triggers a renegotiation. During renegotiation, all VIPs on a node that are not in the `fault` state participate in deciding which node takes over the VIP. Ultimately, the VIP enters the `master` state on some node, and the VIP stays in the `backup` state on the other nodes. - -When a node with a VIP in `backup` state fails, the VIP on that node enters the `fault` state. When the check script passes again for a VIP on a node in the `fault` state, the VIP on that node exits the `fault` state and negotiates to enter the `master` state. The VIP on that node may then enter either the `master` or the `backup` state. - -As cluster administrator, you can provide an optional notify script, which is called whenever the state changes. Keepalived passes the following three parameters to the script: - -* `$1` - `group` or `instance` -* `$2` - Name of the `group` or `instance` -* `$3` - The new state: `master`, `backup`, or `fault` - -The check and notify scripts run in the IP failover pod and use the pod file system, not the host file system. However, the IP failover pod makes the host file system available under the `/hosts` mount path. When configuring a check or notify script, you must provide the full path to the script. The recommended approach for providing the scripts is to use a config map. - -The full path names of the check and notify scripts are added to the Keepalived configuration file, `_/etc/keepalived/keepalived.conf`, which is loaded every time Keepalived starts. The scripts can be added to the pod with a config map as follows. - -.Prerequisites - -* You installed the OpenShift CLI (`oc`). -* You are logged in to the cluster with a user with `cluster-admin` privileges. - -.Procedure - -. Create the desired script and create a config map to hold it. The script has no input arguments and must return `0` for `OK` and `1` for `fail`. -+ -The check script, `_mycheckscript.sh_`: -+ -[source,bash] ----- -#!/bin/bash - # Whatever tests are needed - # E.g., send request and verify response -exit 0 ----- - -. Create the config map: -+ -[source,terminal] ----- -$ oc create configmap mycustomcheck --from-file=mycheckscript.sh ----- -+ -. Add the script to the pod. The `defaultMode` for the mounted config map files must able to run by using `oc` commands or by editing the deployment configuration. A value of `0755`, `493` decimal, is typical: -+ -[source,terminal] ----- -$ oc set env deploy/ipfailover-keepalived \ - OPENSHIFT_HA_CHECK_SCRIPT=/etc/keepalive/mycheckscript.sh ----- -+ -[source,terminal] ----- -$ oc set volume deploy/ipfailover-keepalived --add --overwrite \ - --name=config-volume \ - --mount-path=/etc/keepalive \ - --source='{"configMap": { "name": "mycustomcheck", "defaultMode": 493}}' ----- -+ -[NOTE] -==== -The `oc set env` command is whitespace sensitive. There must be no whitespace on either side of the `=` sign. -==== -+ -[TIP] -==== -You can alternatively edit the `ipfailover-keepalived` deployment configuration: - -[source,terminal] ----- -$ oc edit deploy ipfailover-keepalived ----- - -[source,yaml] ----- - spec: - containers: - - env: - - name: OPENSHIFT_HA_CHECK_SCRIPT <1> - value: /etc/keepalive/mycheckscript.sh -... - volumeMounts: <2> - - mountPath: /etc/keepalive - name: config-volume - dnsPolicy: ClusterFirst -... - volumes: <3> - - configMap: - defaultMode: 0755 <4> - name: customrouter - name: config-volume -... ----- -<1> In the `spec.container.env` field, add the `OPENSHIFT_HA_CHECK_SCRIPT` environment variable to point to the mounted script file. -<2> Add the `spec.container.volumeMounts` field to create the mount point. -<3> Add a new `spec.volumes` field to mention the config map. -<4> This sets run permission on the files. When read back, it is displayed in decimal, `493`. - -Save the changes and exit the editor. This restarts `ipfailover-keepalived`. -==== diff --git a/modules/nw-ipfailover-configuring-keepalived-multicast.adoc b/modules/nw-ipfailover-configuring-keepalived-multicast.adoc deleted file mode 100644 index 5d3d488dcc94..000000000000 --- a/modules/nw-ipfailover-configuring-keepalived-multicast.adoc +++ /dev/null @@ -1,48 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/configuring-ipfailover.adoc - -[id="nw-ipfailover-configuring-keepalived-multicast_{context}"] -= Configuring Keepalived multicast - -{product-title} IP failover internally uses Keepalived. - -[IMPORTANT] -==== -Ensure that multicast is enabled on the nodes labeled above and they can accept network traffic for `224.0.0.18`, the Virtual Router Redundancy Protocol (VRRP) multicast IP address. -==== - -Before starting the Keepalived daemon, the startup script verifies the `iptables` rule that allows multicast traffic to flow. If there is no such rule, the startup script creates a new rule and adds it to the IP tables configuration. Where this new rule gets added to the IP tables configuration depends on the OPENSHIFT_HA_IPTABLES_CHAIN` variable. If there is an `OPENSHIFT_HA_IPTABLES_CHAIN` variable specified, the rule gets added to the specified chain. Otherwise, the rule is added to the `INPUT` chain. - -[IMPORTANT] -==== -The `iptables` rule must be present whenever there is one or more Keepalived daemon running on the node. -==== - -The `iptables` rule can be removed after the last Keepalived daemon terminates. The rule is not automatically removed. - -.Procedure - -* The `iptables` rule only gets created when it is not already present and the `OPENSHIFT_HA_IPTABLES_CHAIN` variable is specified. You can manually manage the `iptables` rule on each of the nodes if you unset the `OPENSHIFT_HA_IPTABLES_CHAIN` variable: -+ -[IMPORTANT] -==== -You must ensure that the manually added rules persist after a system restart. - -Be careful since every Keepalived daemon uses the VRRP protocol over multicast `224.0.0.18` to negotiate with its peers. There must be a different `VRRP-id`, in the range `0..255`, for each VIP. -==== -+ -[source,terminal] ----- -$ for node in openshift-node-{5,6,7,8,9}; do ssh $node <<EOF - -export interface=${interface:-"eth0"} -echo "Check multicast enabled ... "; -ip addr show $interface | grep -i MULTICAST - -echo "Check multicast groups ... " -ip maddr show $interface | grep 224.0.0 - -EOF -done; ----- diff --git a/modules/nw-ipfailover-configuring-more-than-254.adoc b/modules/nw-ipfailover-configuring-more-than-254.adoc deleted file mode 100644 index cf0617026015..000000000000 --- a/modules/nw-ipfailover-configuring-more-than-254.adoc +++ /dev/null @@ -1,46 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/configuring-ipfailover.adoc - -:_content-type: PROCEDURE -[id="nw-ipfailover-configuring-more-than-254_{context}"] -= Configuring IP failover for more than 254 addresses - -IP failover management is limited to 254 groups of Virtual IP (VIP) addresses. By default {product-title} assigns one IP address to each group. You can use the `OPENSHIFT_HA_VIP_GROUPS` variable to change this so multiple IP addresses are in each group and define the number of VIP groups available for each Virtual Router Redundancy Protocol (VRRP) instance when configuring IP failover. - -Grouping VIPs creates a wider range of allocation of VIPs per VRRP in the case of VRRP failover events, and is useful when all hosts in the cluster have access to a service locally. For example, when a service is being exposed with an `ExternalIP`. - -[NOTE] -==== -As a rule for failover, do not limit services, such as the router, to one specific host. Instead, services should be replicated to each host so that in the case of IP failover, the services do not have to be recreated on the new host. -==== - -[NOTE] -==== -If you are using {product-title} health checks, the nature of IP failover and groups means that all instances in the group are not checked. For that reason, link:https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/[the Kubernetes health checks] must be used to ensure that services are live. -==== - -.Prerequisites - -* You are logged in to the cluster with a user with `cluster-admin` privileges. - -.Procedure - -* To change the number of IP addresses assigned to each group, change the value for the `OPENSHIFT_HA_VIP_GROUPS` variable, for example: -+ -.Example `Deployment` YAML for IP failover configuration -[source,yaml] ----- -... - spec: - env: - - name: OPENSHIFT_HA_VIP_GROUPS <1> - value: "3" -... ----- -<1> If `OPENSHIFT_HA_VIP_GROUPS` is set to `3` in an environment with seven VIPs, it creates three groups, assigning three VIPs to the first group, and two VIPs to the two remaining groups. - -[NOTE] -==== -If the number of groups set by `OPENSHIFT_HA_VIP_GROUPS` is fewer than the number of IP addresses set to fail over, the group contains more than one IP address, and all of the addresses move as a single unit. -==== diff --git a/modules/nw-ipfailover-configuring-vrrp-preemption.adoc b/modules/nw-ipfailover-configuring-vrrp-preemption.adoc deleted file mode 100644 index 992aec10681e..000000000000 --- a/modules/nw-ipfailover-configuring-vrrp-preemption.adoc +++ /dev/null @@ -1,38 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/configuring-ipfailover.adoc - -:_content-type: PROCEDURE -[id="nw-ipfailover-configuring-vrrp-preemption_{context}"] -= Configuring VRRP preemption - -When a Virtual IP (VIP) on a node leaves the `fault` state by passing the check script, the VIP on the node enters the `backup` state if it has lower priority than the VIP on the node that is currently in the `master` state. However, if the VIP on the node that is leaving `fault` state has a higher priority, the preemption strategy determines its role in the cluster. - -The `nopreempt` strategy does not move `master` from the lower priority VIP on the host to the higher priority VIP on the host. With `preempt_delay 300`, the default, Keepalived waits the specified 300 seconds and moves `master` to the higher priority VIP on the host. - -.Prerequisites - -* You installed the OpenShift CLI (`oc`). - -.Procedure - -* To specify preemption enter `oc edit deploy ipfailover-keepalived` to edit the router deployment configuration: -+ -[source,terminal] ----- -$ oc edit deploy ipfailover-keepalived ----- -+ -[source,yaml] ----- -... - spec: - containers: - - env: - - name: OPENSHIFT_HA_PREEMPTION <1> - value: preempt_delay 300 -... ----- -<1> Set the `OPENSHIFT_HA_PREEMPTION` value: -- `preempt_delay 300`: Keepalived waits the specified 300 seconds and moves `master` to the higher priority VIP on the host. This is the default value. -- `nopreempt`: does not move `master` from the lower priority VIP on the host to the higher priority VIP on the host. diff --git a/modules/nw-ipfailover-environment-variables.adoc b/modules/nw-ipfailover-environment-variables.adoc deleted file mode 100644 index 44c265fad7d8..000000000000 --- a/modules/nw-ipfailover-environment-variables.adoc +++ /dev/null @@ -1,59 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/configuring-ipfailover.adoc - -[id="nw-ipfailover-environment-variables_{context}"] -= IP failover environment variables - -The following table contains the variables used to configure IP failover. - -.IP failover environment variables -[cols="3a,1a,4a",options="header"] -|=== - -| Variable Name | Default | Description - -|`OPENSHIFT_HA_MONITOR_PORT` -|`80` -|The IP failover pod tries to open a TCP connection to this port on each Virtual IP (VIP). If connection is established, the service is considered to be running. If this port is set to `0`, the test always passes. - -|`OPENSHIFT_HA_NETWORK_INTERFACE` -| -|The interface name that IP failover uses to send Virtual Router Redundancy Protocol (VRRP) traffic. The default value is `eth0`. - -|`OPENSHIFT_HA_REPLICA_COUNT` -|`2` -|The number of replicas to create. This must match `spec.replicas` value in IP failover deployment configuration. - -|`OPENSHIFT_HA_VIRTUAL_IPS` -| -|The list of IP address ranges to replicate. This must be provided. For example, `1.2.3.4-6,1.2.3.9`. - -|`OPENSHIFT_HA_VRRP_ID_OFFSET` -|`0` -|The offset value used to set the virtual router IDs. Using different offset values allows multiple IP failover configurations to exist within the same cluster. The default offset is `0`, and the allowed range is `0` through `255`. - -|`OPENSHIFT_HA_VIP_GROUPS` -| -|The number of groups to create for VRRP. If not set, a group is created for each virtual IP range specified with the `OPENSHIFT_HA_VIP_GROUPS` variable. - -|`OPENSHIFT_HA_IPTABLES_CHAIN` -|INPUT -|The name of the iptables chain, to automatically add an `iptables` rule to allow the VRRP traffic on. If the value is not set, an `iptables` rule is not added. If the chain does not exist, it is not created. - -|`OPENSHIFT_HA_CHECK_SCRIPT` -| -|The full path name in the pod file system of a script that is periodically run to verify the application is operating. - -|`OPENSHIFT_HA_CHECK_INTERVAL` -|`2` -|The period, in seconds, that the check script is run. - -|`OPENSHIFT_HA_NOTIFY_SCRIPT` -| -|The full path name in the pod file system of a script that is run whenever the state changes. - -|`OPENSHIFT_HA_PREEMPTION` -|`preempt_nodelay 300` -|The strategy for handling a new higher priority host. The `nopreempt` strategy does not move master from the lower priority host to the higher priority host. -|=== diff --git a/modules/nw-ipfailover-remove.adoc b/modules/nw-ipfailover-remove.adoc deleted file mode 100644 index d25344a2f4bb..000000000000 --- a/modules/nw-ipfailover-remove.adoc +++ /dev/null @@ -1,129 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/configuring-ipfailover.adoc - -:_content-type: PROCEDURE -[id="nw-ipfailover-remove_{context}"] -= Removing IP failover - -When IP failover is initially configured, the worker nodes in the cluster are modified with an `iptables` rule that explicitly allows multicast packets on `224.0.0.18` for Keepalived. Because of the change to the nodes, removing IP failover requires running a job to remove the `iptables` rule and removing the virtual IP addresses used by Keepalived. - -.Procedure - -. Optional: Identify and delete any check and notify scripts that are stored as config maps: - -.. Identify whether any pods for IP failover use a config map as a volume: -+ -[source,terminal] ----- -$ oc get pod -l ipfailover \ - -o jsonpath="\ -{range .items[?(@.spec.volumes[*].configMap)]} -{'Namespace: '}{.metadata.namespace} -{'Pod: '}{.metadata.name} -{'Volumes that use config maps:'} -{range .spec.volumes[?(@.configMap)]} {'volume: '}{.name} - {'configMap: '}{.configMap.name}{'\n'}{end} -{end}" ----- -+ -.Example output ----- -Namespace: default -Pod: keepalived-worker-59df45db9c-2x9mn -Volumes that use config maps: - volume: config-volume - configMap: mycustomcheck ----- - -.. If the preceding step provided the names of config maps that are used as volumes, delete the config maps: -+ -[source,terminal] ----- -$ oc delete configmap <configmap_name> ----- - -. Identify an existing deployment for IP failover: -+ -[source,terminal] ----- -$ oc get deployment -l ipfailover ----- -+ -.Example output -[source,terminal] ----- -NAMESPACE NAME READY UP-TO-DATE AVAILABLE AGE -default ipfailover 2/2 2 2 105d ----- - -. Delete the deployment: -+ -[source,terminal] ----- -$ oc delete deployment <ipfailover_deployment_name> ----- - -. Remove the `ipfailover` service account: -+ -[source,terminal] ----- -$ oc delete sa ipfailover ----- - -. Run a job that removes the IP tables rule that was added when IP failover was initially configured: - -.. Create a file such as `remove-ipfailover-job.yaml` with contents that are similar to the following example: -+ -[source,yaml,subs="attributes+"] ----- -apiVersion: batch/v1 -kind: Job -metadata: - generateName: remove-ipfailover- - labels: - app: remove-ipfailover -spec: - template: - metadata: - name: remove-ipfailover - spec: - containers: - - name: remove-ipfailover - image: quay.io/openshift/origin-keepalived-ipfailover:{product-version} - command: ["/var/lib/ipfailover/keepalived/remove-failover.sh"] - nodeSelector: - kubernetes.io/hostname: <host_name> <.> - restartPolicy: Never ----- -<.> Run the job for each node in your cluster that was configured for IP failover and replace the hostname each time. - -.. Run the job: -+ -[source,terminal] ----- -$ oc create -f remove-ipfailover-job.yaml ----- -+ -.Example output ----- -job.batch/remove-ipfailover-2h8dm created ----- - -.Verification - -* Confirm that the job removed the initial configuration for IP failover. -+ -[source,terminal] ----- -$ oc logs job/remove-ipfailover-2h8dm ----- -+ -.Example output -[source,terminal] ----- -remove-failover.sh: OpenShift IP Failover service terminating. - - Removing ip_vs module ... - - Cleaning up ... - - Releasing VIPs (interface eth0) ... ----- diff --git a/modules/nw-ipfailover-virtual-ip-addresses-concept.adoc b/modules/nw-ipfailover-virtual-ip-addresses-concept.adoc deleted file mode 100644 index 2e86a9d792ad..000000000000 --- a/modules/nw-ipfailover-virtual-ip-addresses-concept.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/configuring-ipfailover.adoc - -:_content-type: CONCEPT -[id="nw-ipfailover-virtual-ip-addresses-concept_{context}"] -= About virtual IP addresses - -Keepalived manages a set of virtual IP addresses (VIP). The administrator must make sure that all of these addresses: - -* Are accessible on the configured hosts from outside the cluster. -* Are not used for any other purpose within the cluster. - -Keepalived on each node determines whether the needed service is running. If it is, VIPs are supported and Keepalived participates in the negotiation to determine which node serves the VIP. For a node to participate, the service must be listening on the watch port on a VIP or the check must be disabled. - -[NOTE] -==== -Each VIP in the set may end up being served by a different node. -==== diff --git a/modules/nw-ipfailover-vrrp-ip-offset.adoc b/modules/nw-ipfailover-vrrp-ip-offset.adoc deleted file mode 100644 index 81ca434896c2..000000000000 --- a/modules/nw-ipfailover-vrrp-ip-offset.adoc +++ /dev/null @@ -1,15 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/configuring-ipfailover.adoc - -:_content-type: CONCEPT -[id="nw-ipfailover-vrrp-ip-offset_{context}"] -= About VRRP ID offset - -Each IP failover pod managed by the IP failover deployment configuration, `1` pod per node or replica, runs a Keepalived daemon. As more IP failover deployment configurations are configured, more pods are created and more daemons join into the common Virtual Router Redundancy Protocol (VRRP) negotiation. This negotiation is done by all the Keepalived daemons and it determines which nodes service which virtual IPs (VIP). - -Internally, Keepalived assigns a unique `vrrp-id` to each VIP. The negotiation uses this set of `vrrp-ids`, when a decision is made, the VIP corresponding to the winning `vrrp-id` is serviced on the winning node. - -Therefore, for every VIP defined in the IP failover deployment configuration, the IP failover pod must assign a corresponding `vrrp-id`. This is done by starting at `OPENSHIFT_HA_VRRP_ID_OFFSET` and sequentially assigning the `vrrp-ids` to the list of VIPs. The `vrrp-ids` can have values in the range `1..255`. - -When there are multiple IP failover deployment configurations, you must specify `OPENSHIFT_HA_VRRP_ID_OFFSET` so that there is room to increase the number of VIPs in the deployment configuration and none of the `vrrp-id` ranges overlap. diff --git a/modules/nw-kube-proxy-config.adoc b/modules/nw-kube-proxy-config.adoc deleted file mode 100644 index 29df949a3459..000000000000 --- a/modules/nw-kube-proxy-config.adoc +++ /dev/null @@ -1,35 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/openshift_sdn/configuring-kube-proxy.adoc - -[id="nw-kube-proxy-config_{context}"] -= kube-proxy configuration parameters - -You can modify the following `kubeProxyConfig` parameters. - -[NOTE] -==== -Because of performance improvements introduced in {product-title} 4.3 and greater, adjusting the `iptablesSyncPeriod` parameter is no longer necessary. -==== - -.Parameters -[cols="30%,30%,30%,10%",options="header"] -|==== -|Parameter|Description|Values|Default - -|`iptablesSyncPeriod` -|The refresh period for `iptables` rules. -|A time interval, such as `30s` or `2m`. Valid -suffixes include `s`, `m`, and `h` and are described in the -link:https://golang.org/pkg/time/#ParseDuration[Go time package] documentation. -|`30s` - -|`proxyArguments.iptables-min-sync-period` -|The minimum duration before refreshing `iptables` rules. This parameter ensures -that the refresh does not happen too frequently. By default, a refresh starts as soon as a change that affects `iptables` rules occurs. -|A time interval, such as `30s` or `2m`. Valid suffixes include `s`, -`m`, and `h` and are described in the -link:https://golang.org/pkg/time/#ParseDuration[Go time package] -|`0s` - -|==== diff --git a/modules/nw-kube-proxy-configuring.adoc b/modules/nw-kube-proxy-configuring.adoc deleted file mode 100644 index 6f2e9e650a62..000000000000 --- a/modules/nw-kube-proxy-configuring.adoc +++ /dev/null @@ -1,97 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/openshift_sdn/configuring-kube-proxy.adoc - -:_content-type: PROCEDURE -[id="nw-kube-proxy-configuring_{context}"] -= Modifying the kube-proxy configuration - -You can modify the Kubernetes network proxy configuration for your cluster. - -.Prerequisites - -* Install the OpenShift CLI (`oc`). -* Log in to a running cluster with the `cluster-admin` role. - -.Procedure - -. Edit the `Network.operator.openshift.io` custom resource (CR) by running the -following command: -+ -[source,terminal] ----- -$ oc edit network.operator.openshift.io cluster ----- - -. Modify the `kubeProxyConfig` parameter in the CR with your changes to the -kube-proxy configuration, such as in the following example CR: -+ -[source,yaml] ----- -apiVersion: operator.openshift.io/v1 -kind: Network -metadata: - name: cluster -spec: - kubeProxyConfig: - iptablesSyncPeriod: 30s - proxyArguments: - iptables-min-sync-period: ["30s"] ----- - -. Save the file and exit the text editor. -+ -The syntax is validated by the `oc` command when you save the file and exit the -editor. If your modifications contain a syntax error, the editor opens the file -and displays an error message. - -. Enter the following command to confirm the configuration update: -+ -[source,terminal] ----- -$ oc get networks.operator.openshift.io -o yaml ----- -+ -.Example output -[source,yaml] ----- -apiVersion: v1 -items: -- apiVersion: operator.openshift.io/v1 - kind: Network - metadata: - name: cluster - spec: - clusterNetwork: - - cidr: 10.128.0.0/14 - hostPrefix: 23 - defaultNetwork: - type: OpenShiftSDN - kubeProxyConfig: - iptablesSyncPeriod: 30s - proxyArguments: - iptables-min-sync-period: - - 30s - serviceNetwork: - - 172.30.0.0/16 - status: {} -kind: List ----- - -. Optional: Enter the following command to confirm that the Cluster Network -Operator accepted the configuration change: -+ -[source,terminal] ----- -$ oc get clusteroperator network ----- -+ -.Example output -[source,terminal] ----- -NAME VERSION AVAILABLE PROGRESSING DEGRADED SINCE -network 4.1.0-0.9 True False False 1m ----- -+ -The `AVAILABLE` field is `True` when the configuration update is applied -successfully. diff --git a/modules/nw-kube-proxy-sync.adoc b/modules/nw-kube-proxy-sync.adoc deleted file mode 100644 index 87c4eb5f1704..000000000000 --- a/modules/nw-kube-proxy-sync.adoc +++ /dev/null @@ -1,16 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/openshift_sdn/configuring-kube-proxy.adoc - -:_content-type: CONCEPT -[id="nw-kube-proxy-sync_{context}"] -= About iptables rules synchronization - -The synchronization period determines how frequently the Kubernetes network -proxy (kube-proxy) syncs the iptables rules on a node. - -A sync begins when either of the following events occurs: - -* An event occurs, such as service or endpoint is added to or removed from the -cluster. -* The time since the last sync exceeds the sync period defined for kube-proxy. diff --git a/modules/nw-kuryr-cleanup.adoc b/modules/nw-kuryr-cleanup.adoc deleted file mode 100644 index 0b81aa753912..000000000000 --- a/modules/nw-kuryr-cleanup.adoc +++ /dev/null @@ -1,289 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ovn_kubernetes_network_provider/migrate-from-kuryr-sdn.adoc - -:_content-type: PROCEDURE -[id="nw-kuryr-cleanup_{context}"] -= Cleaning up resources after migration - -After migration from the Kuryr network plugin to the OVN-Kubernetes network -plugin, you must clean up the resources that Kuryr created previously. - -[NOTE] -==== -The clean up process relies on a Python virtual environment to ensure that the package versions that you use support tags for Octavia objects. You do not need a virtual environment if you are certain that your environment uses at minimum: -* `openstacksdk` version 0.54.0 -* `python-openstackclient` version 5.5.0 -* `python-octaviaclient` version 2.3.0 -==== - -.Prerequisites - -* You installed the {product-title} CLI (`oc`). -* You installed a Python interpreter. -* You installed the `openstacksdk` Python package. -* You installed the `openstack` CLI. -* You have access to the underlying {rh-openstack} cloud. -* You can access the cluster as a user with the `cluster-admin` role. - -.Procedure -. Create a clean-up Python virtual environment: -.. Create a temporary directory for your environment. For example: -+ -[source,terminal] ----- -$ python3 -m venv /tmp/venv ----- -+ -The virtual environment located in `/tmp/venv` directory is used in all clean up examples. -.. Enter the virtual environment. For example: -+ -[source,terminal] ----- -$ source /tmp/venv/bin/activate ----- -.. Upgrade the `pip` command in the virtual environment by running the following command: -+ -[source,terminal] ----- -(venv) $ pip install pip --upgrade ----- -.. Install the required Python packages by running the following command: -+ -[source,terminal] ----- -(venv) $ pip install openstacksdk==0.54.0 python-openstackclient==5.5.0 python-octaviaclient==2.3.0 ----- - -. In your terminal, set variables to cluster and Kuryr identifiers by running the following commands: - -.. Set the cluster ID: -+ -[source,terminal] ----- -(venv) $ CLUSTERID=$(oc get infrastructure.config.openshift.io cluster -o=jsonpath='{.status.infrastructureName}') ----- - -.. Set the cluster tag: -+ -[source,terminal] ----- -(venv) $ CLUSTERTAG="openshiftClusterID=${CLUSTERID}" ----- -.. Set the router ID: -+ -[source,terminal] ----- -(venv) $ ROUTERID=$(oc get kuryrnetwork -A --no-headers -o custom-columns=":status.routerId"|head -n 1) ----- - -. Create a Bash function that removes finalizers from specified resources by running the following command: -+ -[source,terminal] ----- -(venv) $ function REMFIN { - local resource=$1 - local finalizer=$2 - for res in $(oc get $resource -A --template='{{range $i,$p := .items}}{{ $p.metadata.name }}|{{ $p.metadata.namespace }}{{"\n"}}{{end}}'); do - name=${res%%|*} - ns=${res##*|} - yaml=$(oc get -n $ns $resource $name -o yaml) - if echo "${yaml}" | grep -q "${finalizer}"; then - echo "${yaml}" | grep -v "${finalizer}" | oc replace -n $ns $resource $name -f - - fi - done -} ----- -+ -The function takes two parameters: the first parameter is name of the resource, and the second parameter is the finalizer to remove. -The named resource is removed from the cluster and its definition is replaced with copied data, excluding the specified finalizer. - -. To remove Kuryr finalizers from services, enter the following command: -+ -[source,terminal] ----- -(venv) $ REMFIN services kuryr.openstack.org/service-finalizer ----- - -. To remove the Kuryr `service-subnet-gateway-ip` service, enter the following command: -+ -[source,terminal] ----- -(venv) $ if $(oc get -n openshift-kuryr service service-subnet-gateway-ip &>/dev/null); then - oc -n openshift-kuryr delete service service-subnet-gateway-ip -fi ----- - -. To remove all tagged {rh-openstack} load balancers from Octavia, enter the following command: -+ -[source,terminal] ----- -(venv) $ for lb in $(openstack loadbalancer list --tags $CLUSTERTAG -f value -c id); do - openstack loadbalancer delete --cascade $lb -done ----- - -. To remove Kuryr finalizers from all `KuryrLoadBalancer` CRs, enter the following command: -+ -[source,terminal] ----- -(venv) $ REMFIN kuryrloadbalancers.openstack.org kuryr.openstack.org/kuryrloadbalancer-finalizers ----- - -. To remove the `openshift-kuryr` namespace, enter the following command: -+ -[source,terminal] ----- -(venv) $ oc delete namespace openshift-kuryr ----- - -. To remove the Kuryr service subnet from the router, enter the following command: -+ -[source,terminal] ----- -(venv) $ openstack router remove subnet $ROUTERID ${CLUSTERID}-kuryr-service-subnet ----- - -. To remove the Kuryr service network, enter the following command: -+ -[source,terminal] ----- -(venv) $ openstack network delete ${CLUSTERID}-kuryr-service-network ----- - -. To remove Kuryr finalizers from all pods, enter the following command: -+ -[source,terminal] ----- -(venv) $ REMFIN pods kuryr.openstack.org/pod-finalizer ----- - -. To remove Kuryr finalizers from all `KuryrPort` CRs, enter the following command: -+ -[source,terminal] ----- -(venv) $ REMFIN kuryrports.openstack.org kuryr.openstack.org/kuryrport-finalizer ----- -This command deletes the `KuryrPort` CRs. - -. To remove Kuryr finalizers from network policies, enter the following command: -+ -[source,terminal] ----- -(venv) $ REMFIN networkpolicy kuryr.openstack.org/networkpolicy-finalizer ----- - -. To remove Kuryr finalizers from remaining network policies, enter the following command: -+ -[source,terminal] ----- -(venv) $ REMFIN kuryrnetworkpolicies.openstack.org kuryr.openstack.org/networkpolicy-finalizer ----- - -. To remove subports that Kuryr created from trunks, enter the following command: -+ -[source,terminal] ----- -(venv) $ read -ra trunks <<< $(python -c "import openstack; n = openstack.connect().network; print(' '.join([x.id for x in n.trunks(any_tags='$CLUSTERTAG')]))") && \ -i=0 && \ -for trunk in "${trunks[@]}"; do - i=$((i+1)) - echo "Processing trunk $trunk, ${i}/${#trunks[@]}." - subports=() - for subport in $(python -c "import openstack; n = openstack.connect().network; print(' '.join([x['port_id'] for x in n.get_trunk('$trunk').sub_ports if '$CLUSTERTAG' in n.get_port(x['port_id']).tags]))"); do - subports+=("$subport"); - done - args=() - for sub in "${subports[@]}" ; do - args+=("--subport $sub") - done - if [ ${#args[@]} -gt 0 ]; then - openstack network trunk unset ${args[*]} $trunk - fi -done ----- - -. To retrieve all networks and subnets from `KuryrNetwork` CRs and remove ports, router interfaces and the network itself, enter the following command: -+ -[source,terminal] ----- -(venv) $ mapfile -t kuryrnetworks < <(oc get kuryrnetwork -A --template='{{range $i,$p := .items}}{{ $p.status.netId }}|{{ $p.status.subnetId }}{{"\n"}}{{end}}') && \ -i=0 && \ -for kn in "${kuryrnetworks[@]}"; do - i=$((i+1)) - netID=${kn%%|*} - subnetID=${kn##*|} - echo "Processing network $netID, ${i}/${#kuryrnetworks[@]}" - # Remove all ports from the network. - for port in $(python -c "import openstack; n = openstack.connect().network; print(' '.join([x.id for x in n.ports(network_id='$netID') if x.device_owner != 'network:router_interface']))"); do - ( openstack port delete $port ) & - - # Only allow 20 jobs in parallel. - if [[ $(jobs -r -p | wc -l) -ge 20 ]]; then - wait -n - fi - done - wait - - # Remove the subnet from the router. - openstack router remove subnet $ROUTERID $subnetID - - # Remove the network. - openstack network delete $netID -done ----- - -. To remove the Kuryr security group, enter the following command: -+ -[source,terminal] ----- -(venv) $ openstack security group delete ${CLUSTERID}-kuryr-pods-security-group ----- - -. To remove all tagged subnet pools, enter the following command: -+ -[source,terminal] ----- -(venv) $ for subnetpool in $(openstack subnet pool list --tags $CLUSTERTAG -f value -c ID); do - openstack subnet pool delete $subnetpool -done ----- - -. To check that all of the networks based on `KuryrNetwork` CRs were removed, enter the following command: -+ -[source,terminal] ----- -(venv) $ networks=$(oc get kuryrnetwork -A --no-headers -o custom-columns=":status.netId") && \ -for existingNet in $(openstack network list --tags $CLUSTERTAG -f value -c ID); do - if [[ $networks =~ $existingNet ]]; then - echo "Network still exists: $existingNet" - fi -done ----- -+ -If the command returns any existing networks, intestigate and remove them before you continue. - -. To remove security groups that are related to network policy, enter the following command: -+ -[source,terminal] ----- -(venv) $ for sgid in $(openstack security group list -f value -c ID -c Description | grep 'Kuryr-Kubernetes Network Policy' | cut -f 1 -d ' '); do - openstack security group delete $sgid -done ----- - -. To remove finalizers from `KuryrNetwork` CRs, enter the following command: -+ -[source,terminal] ----- -(venv) $ REMFIN kuryrnetworks.openstack.org kuryrnetwork.finalizers.kuryr.openstack.org ----- - -. To remove the Kuryr router, enter the following command: -+ -[source,terminal] ----- -(venv) $ if $(python3 -c "import sys; import openstack; n = openstack.connect().network; r = n.get_router('$ROUTERID'); sys.exit(0) if r.description != 'Created By OpenShift Installer' else sys.exit(1)"); then - openstack router delete $ROUTERID -fi ----- diff --git a/modules/nw-kuryr-migration-about.adoc b/modules/nw-kuryr-migration-about.adoc deleted file mode 100644 index 9f6662c3274a..000000000000 --- a/modules/nw-kuryr-migration-about.adoc +++ /dev/null @@ -1,58 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ovn_kubernetes_network_provider/migrate-from-openshift-sdn.adoc - -:_content-type: CONCEPT -[id="nw-kuryr-ovn-kubernetes-migration-about_{context}"] -= Migration to the OVN-Kubernetes network provider - -You can manually migrate a cluster that runs on {rh-openstack-first} to the OVN-Kubernetes network provider. - -[IMPORTANT] -==== -Migration to OVN-Kubernetes is a one-way process. -During migration, your cluster will be unreachable for a brief time. -==== - -[id="considerations-kuryr-migrating-network-provider_{context}"] -== Considerations when migrating to the OVN-Kubernetes network provider - -Kubernetes namespaces are kept by Kuryr in separate {rh-openstack} networking service (Neutron) subnets. Those subnets and the IP addresses that are assigned to individual pods are not preserved during the migration. - -[id="how-the-kuryr-migration-process-works_{context}"] -== How the migration process works - -The following table summarizes the migration process by relating the steps that you perform with the actions that your cluster and Operators take. - -.The Kuryr to OVN-Kubernetes migration process -[cols="1,1a",options="header"] -|=== - -|User-initiated steps|Migration activity - -| -Set the `migration` field of the `Network.operator.openshift.io` custom resource (CR) named `cluster` to `OVNKubernetes`. Verify that the value of the `migration` field prints the `null` value before setting it to another value. -| -Cluster Network Operator (CNO):: Updates the status of the `Network.config.openshift.io` CR named `cluster` accordingly. -Machine Config Operator (MCO):: Deploys an update to the systemd configuration that is required by OVN-Kubernetes. By default, the MCO updates a single machine per pool at a time. As a result, large clusters have longer migration times. - -|Update the `networkType` field of the `Network.config.openshift.io` CR. -| -CNO:: Performs the following actions: -+ --- -* Destroys the Kuryr control plane pods: Kuryr CNIs and the Kuryr controller. -* Deploys the OVN-Kubernetes control plane pods. -* Updates the Multus objects to reflect the new network plugin. --- - -| -Reboot each node in the cluster. -| -Cluster:: As nodes reboot, the cluster assigns IP addresses to pods on the OVN-Kubernetes cluster network. - -| -Clean up remaining resources Kuryr controlled. -| -Cluster:: Holds {rh-openstack} resources that need to be freed, as well as {product-title} resources to configure. -|=== diff --git a/modules/nw-kuryr-migration.adoc b/modules/nw-kuryr-migration.adoc deleted file mode 100644 index 7f014a692da6..000000000000 --- a/modules/nw-kuryr-migration.adoc +++ /dev/null @@ -1,346 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ovn_kubernetes_network_provider/migrate-from-kuryr-sdn.adoc - -:_content-type: PROCEDURE -[id="nw-kuryr-migration_{context}"] -= Migrating to the OVN-Kubernetes network plugin - -As a cluster administrator, you can change the network plugin for your cluster to OVN-Kubernetes. - -[IMPORTANT] -==== -During the migration, you must reboot every node in your cluster. -Your cluster is unavailable and workloads might be interrupted. -Perform the migration only if an interruption in service is acceptable. -==== - -.Prerequisites - -* You installed the OpenShift CLI (`oc`). -* You have access to the cluster as a user with the `cluster-admin` role. -* You have a recent backup of the etcd database is available. -* You can manually reboot each node. -* The cluster you plan to migrate is in a known good state, without any errors. -* You installed the Python interpreter. -* You installed the `openstacksdk` python package. -* You installed the `openstack` CLI tool. -* You have access to the underlying {rh-openstack} cloud. - -.Procedure - -. Back up the configuration for the cluster network by running the following command: -+ -[source,terminal] ----- -$ oc get Network.config.openshift.io cluster -o yaml > cluster-kuryr.yaml ----- - -. To set the `CLUSTERID` variable, run the following command: -+ -[source,terminal] ----- -$ CLUSTERID=$(oc get infrastructure.config.openshift.io cluster -o=jsonpath='{.status.infrastructureName}') ----- - -. To prepare all the nodes for the migration, set the `migration` field on the Cluster Network Operator configuration object by running the following command: -+ -[source,terminal] ----- -$ oc patch Network.operator.openshift.io cluster --type='merge' \ - --patch '{ "spec": { "migration": { "networkType": "OVNKubernetes" } } }' ----- -+ -[NOTE] -==== -This step does not deploy OVN-Kubernetes immediately. Specifying the `migration` field triggers the Machine Config Operator (MCO) to apply new machine configs to all the nodes in the cluster. This prepares the cluster for the OVN-Kubernetes deployment. -==== - -. Optional: Customize the following settings for OVN-Kubernetes for your network infrastructure requirements: -+ --- -* Maximum transmission unit (MTU) -* Geneve (Generic Network Virtualization Encapsulation) overlay network port -* OVN-Kubernetes IPv4 internal subnet -* OVN-Kubernetes IPv6 internal subnet --- -+ -To customize these settings, enter and customize the following command: -+ -[source,terminal] ----- -$ oc patch Network.operator.openshift.io cluster --type=merge \ - --patch '{ - "spec":{ - "defaultNetwork":{ - "ovnKubernetesConfig":{ - "mtu":<mtu>, - "genevePort":<port>, - "v4InternalSubnet":"<ipv4_subnet>", - "v6InternalSubnet":"<ipv6_subnet>" - }}}}' ----- -+ -where: -+ --- -`mtu`:: -Specifies the MTU for the Geneve overlay network. This value is normally configured automatically, but if the nodes in your cluster do not all use the same MTU, then you must set this explicitly to `100` less than the smallest node MTU value. -`port`:: -Specifies the UDP port for the Geneve overlay network. If a value is not specified, the default is `6081`. The port cannot be the same as the VXLAN port that is used by Kuryr. The default value for the VXLAN port is `4789`. -`ipv4_subnet`:: -Specifies an IPv4 address range for internal use by OVN-Kubernetes. You must ensure that the IP address range does not overlap with any other subnet used by your {product-title} installation. The IP address range must be larger than the maximum number of nodes that can be added to the cluster. The default value is `100.64.0.0/16`. -`ipv6_subnet`:: -Specifies an IPv6 address range for internal use by OVN-Kubernetes. You must ensure that the IP address range does not overlap with any other subnet used by your {product-title} installation. The IP address range must be larger than the maximum number of nodes that can be added to the cluster. The default value is `fd98::/48`. --- -+ -If you do not need to change the default value, omit the key from the patch. -+ -.Example patch command to update `mtu` field -[source,terminal] ----- -$ oc patch Network.operator.openshift.io cluster --type=merge \ - --patch '{ - "spec":{ - "defaultNetwork":{ - "ovnKubernetesConfig":{ - "mtu":1200 - }}}}' ----- - -. Check the machine config pool status by entering the following command: -+ -[source,terminal] ----- -$ oc get mcp ----- -+ -While the MCO updates machines in each machine config pool, it reboots each node one by one. You must wait until all the nodes are updated before continuing. -+ -A successfully updated node has the following status: `UPDATED=true`, `UPDATING=false`, `DEGRADED=false`. -+ -[NOTE] -==== -By default, the MCO updates one machine per pool at a time. Large clusters take more time to migrate than small clusters. -==== - -. Confirm the status of the new machine configuration on the hosts: - -.. To list the machine configuration state and the name of the applied machine configuration, enter the following command: -+ -[source,terminal] ----- -$ oc describe node | egrep "hostname|machineconfig" ----- -+ -.Example output -[source,terminal] ----- -kubernetes.io/hostname=master-0 -machineconfiguration.openshift.io/currentConfig: rendered-master-c53e221d9d24e1c8bb6ee89dd3d8ad7b <2> -machineconfiguration.openshift.io/desiredConfig: rendered-master-c53e221d9d24e1c8bb6ee89dd3d8ad7b <3> -machineconfiguration.openshift.io/reason: -machineconfiguration.openshift.io/state: Done ----- - -.. Review the output from the previous step. The following statements must be true: -+ --- - * The value of `machineconfiguration.openshift.io/state` field is `Done`. - * The value of the `machineconfiguration.openshift.io/currentConfig` field is equal to the value of the `machineconfiguration.openshift.io/desiredConfig` field. --- - -.. To confirm that the machine config is correct, enter the following command: -+ -[source,terminal] ----- -$ oc get machineconfig <config_name> -o yaml | grep ExecStart ----- -+ -where: - -<config_name>:: Specifies the name of the machine config from the `machineconfiguration.openshift.io/currentConfig` field. -+ -The machine config must include the following update to the systemd configuration: -+ -.Example output -[source,plain] ----- -ExecStart=/usr/local/bin/configure-ovs.sh OVNKubernetes ----- - -.. If a node is stuck in the `NotReady` state, investigate the machine config daemon pod logs and resolve any errors: - -... To list the pods, enter the following command: -+ -[source,terminal] ----- -$ oc get pod -n openshift-machine-config-operator ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -machine-config-controller-75f756f89d-sjp8b 1/1 Running 0 37m -machine-config-daemon-5cf4b 2/2 Running 0 43h -machine-config-daemon-7wzcd 2/2 Running 0 43h -machine-config-daemon-fc946 2/2 Running 0 43h -machine-config-daemon-g2v28 2/2 Running 0 43h -machine-config-daemon-gcl4f 2/2 Running 0 43h -machine-config-daemon-l5tnv 2/2 Running 0 43h -machine-config-operator-79d9c55d5-hth92 1/1 Running 0 37m -machine-config-server-bsc8h 1/1 Running 0 43h -machine-config-server-hklrm 1/1 Running 0 43h -machine-config-server-k9rtx 1/1 Running 0 43h ----- -+ -The names for the config daemon pods are in the following format: `machine-config-daemon-<seq>`. The `<seq>` value is a random five character alphanumeric sequence. - -... Display the pod log for the first machine config daemon pod shown in the previous output by enter the following command: -+ -[source,terminal] ----- -$ oc logs <pod> -n openshift-machine-config-operator ----- -+ -where: - -<pod>:: Specifies the name of a machine config daemon pod. - -... Resolve any errors in the logs shown by the output from the previous command. - -. To start the migration, configure the OVN-Kubernetes network plugin by using one of the following commands: - -** To specify the network provider without changing the cluster network IP address block, enter the following command: -+ -[source,terminal] ----- -$ oc patch Network.config.openshift.io cluster \ - --type='merge' --patch '{ "spec": { "networkType": "OVNKubernetes" } }' ----- - -** To specify a different cluster network IP address block, enter the following command: -+ -[source,terminal] ----- -$ oc patch Network.config.openshift.io cluster \ - --type='merge' --patch '{ - "spec": { - "clusterNetwork": [ - { - "cidr": "<cidr>", - "hostPrefix": "<prefix>" - } - ] - "networkType": "OVNKubernetes" - } - }' ----- -+ -where: - -<cidr>:: Specifies a CIDR block. -<prefix>:: Specifies a slice of the CIDR block that is apportioned to each node in your cluster. -+ -[IMPORTANT] -==== -You cannot change the service network address block during the migration. - -You cannot use any CIDR block that overlaps with the `100.64.0.0/16` CIDR block because the OVN-Kubernetes network provider uses this block internally. -==== - -. Verify that the Multus daemon set rollout is complete by entering the following command: -+ -[source,terminal] ----- -$ oc -n openshift-multus rollout status daemonset/multus ----- -+ -The name of the Multus pods is in the form of `multus-<xxxxx>`, where `<xxxxx>` is a random sequence of letters. It might take several moments for the pods to restart. -+ -.Example output -[source,text] ----- -Waiting for daemon set "multus" rollout to finish: 1 out of 6 new pods have been updated... -... -Waiting for daemon set "multus" rollout to finish: 5 of 6 updated pods are available... -daemon set "multus" successfully rolled out ----- - -. To complete the migration, reboot each node in your cluster. For example, you can use a bash script similar to the following example. The script assumes that you can connect to each host by using `ssh` and that you have configured `sudo` to not prompt for a password: -+ -[source,bash] ----- -#!/bin/bash - -for ip in $(oc get nodes -o jsonpath='{.items[*].status.addresses[?(@.type=="InternalIP")].address}') -do - echo "reboot node $ip" - ssh -o StrictHostKeyChecking=no core@$ip sudo shutdown -r -t 3 -done ----- -+ -[NOTE] -==== -If SSH access is not available, you can use the `openstack` command: -[source,terminal] ----- -$ for name in $(openstack server list --name ${CLUSTERID}\* -f value -c Name); do openstack server reboot $name; done ----- -Alternatively, you might be able to to reboot each node through the management portal for -your infrastructure provider. Otherwise, contact the appropriate authority to -either gain access to the virtual machines through SSH or the management -portal and OpenStack client. -==== - -.Verification -. Confirm that the migration succeeded, and then remove the migration resources: - -.. To confirm that the network plugin is OVN-Kubernetes, enter the following command. -+ -[source,terminal] ----- -$ oc get network.config/cluster -o jsonpath='{.status.networkType}{"\n"}' ----- -+ -The value of `status.networkType` must be `OVNKubernetes`. - -.. To confirm that the cluster nodes are in the `Ready` state, enter the following command: -+ -[source,terminal] ----- -$ oc get nodes ----- - -.. To confirm that your pods are not in an error state, enter the following command: -+ -[source,terminal] ----- -$ oc get pods --all-namespaces -o wide --sort-by='{.spec.nodeName}' ----- -+ -If pods on a node are in an error state, reboot that node. - -.. To confirm that all of the cluster Operators are not in an abnormal state, enter the following command: -+ -[source,terminal] ----- -$ oc get co ----- -+ -The status of every cluster Operator must be the following: `AVAILABLE="True"`, `PROGRESSING="False"`, `DEGRADED="False"`. If a cluster Operator is not available or degraded, check the logs for the cluster Operator for more information. -+ -[IMPORTANT] -==== -Do not proceed if any of the previous verification steps indicate errors. -You might encounter pods that have a `Terminating` state due to finalizers that are removed during clean up. They are not an error indication. -==== -+ -. If the migration completed and your cluster is in a good state, remove the migration configuration from the CNO configuration object by entering the following command: -+ -[source,terminal] ----- -$ oc patch Network.operator.openshift.io cluster --type='merge' \ - --patch '{ "spec": { "migration": null } }' ----- diff --git a/modules/nw-label-nodes-with-sriov.adoc b/modules/nw-label-nodes-with-sriov.adoc deleted file mode 100644 index 09a3d176ef25..000000000000 --- a/modules/nw-label-nodes-with-sriov.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/hardware_networks/configuring-interface-sysctl-sriov-device.adoc - -:_content-type: PROCEDURE -[id="nw-labeling-sriov-enabled-nodes_{context}"] -= Labeling nodes with an SR-IOV enabled NIC - -If you want to enable SR-IOV on only SR-IOV capable nodes there are a couple of ways to do this: - -. Install the Node Feature Discovery (NFD) Operator. NFD detects the presence of SR-IOV enabled NICs and labels the nodes with `node.alpha.kubernetes-incubator.io/nfd-network-sriov.capable = true`. - -. Examine the `SriovNetworkNodeState` CR for each node. The `interfaces` stanza includes a list of all of the SR-IOV devices discovered by the SR-IOV Network Operator on the worker node. Label each node with `feature.node.kubernetes.io/network-sriov.capable: "true"` by using the following command: -+ -[source,yaml] ----- -$ oc label node <node_name> feature.node.kubernetes.io/network-sriov.capable="true" ----- -+ -[NOTE] -==== -You can label the nodes with whatever name you want. -==== diff --git a/modules/nw-metalLB-basic-upgrade-operator.adoc b/modules/nw-metalLB-basic-upgrade-operator.adoc deleted file mode 100644 index 7f2dd5458892..000000000000 --- a/modules/nw-metalLB-basic-upgrade-operator.adoc +++ /dev/null @@ -1,63 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/metallb/metallb-upgrading-operator.adoc - -:_content-type: PROCEDURE - -[id="upgrading-metallb-operator_{context}"] -= Upgrading the MetalLB Operator - - -.Prerequisites - -* Access the cluster as a user with the `cluster-admin` role. - -.Procedure - -. Verify that the `metallb-system` namespace still exists: -+ -[source,terminal] ----- -$ oc get namespaces | grep metallb-system ----- -+ -.Example output -[source,terminal] ----- -metallb-system Active 31m ----- - -. Verify the `metallb` custom resource still exists: -+ -[source,terminal] ----- -$ oc get metallb -n metallb-system ----- -+ -.Example output -[source,terminal] ----- -NAME AGE -metallb 33m ----- - -. Follow the guidance in "Installing from OperatorHub using the CLI" to install the latest {product-version} version of the MetalLB Operator. -+ -[NOTE] -==== -When installing the latest {product-version} version of the MetalLB Operator, you must install the Operator to the same namespace it was previously installed to. -==== - -. Verify the upgraded version of the Operator is now the {product-version} version. -+ -[source,terminal] ----- -$ oc get csv -n metallb-system ----- -+ -.Example output -[source,terminal,subs="attributes+"] ----- -NAME DISPLAY VERSION REPLACES PHASE -metallb-operator.4.{product-version}.0-202207051316 MetalLB Operator 4.{product-version}.0-202207051316 Succeeded ----- diff --git a/modules/nw-metalLB-monitor-upgrading.adoc b/modules/nw-metalLB-monitor-upgrading.adoc deleted file mode 100644 index 7ae9ad826dba..000000000000 --- a/modules/nw-metalLB-monitor-upgrading.adoc +++ /dev/null @@ -1,53 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/metallb/metallb-upgrading-operator.adoc - -:_content-type: PROCEDURE - -[id="metalLB-operator-monitoring-upgrade-status_{context}"] -= Monitoring upgrade status -The best way to monitor the MetalLB Operator upgrade status is to watch the `ClusterServiceVersion` (CSV) `PHASE`. -You can also monitor the CSV conditions in the web console or by running the `oc get csv` command. - -[NOTE] -==== -The `PHASE` and conditions values are approximations that are based on available information. -==== - -.Prerequisites - -* Access the cluster as a user with the `cluster-admin` role. - -* Install the OpenShift CLI (`oc`). - -.Procedure - -. Run the following command: -+ -[source,terminal] ----- -$ oc get csv ----- - -. Review the output, checking the `PHASE` field. For example: -+ -[source,terminal] ----- -VERSION REPLACES PHASE -4.13.0 metallb-operator.4.13-nnnnnnnnnnnn Installing -4.13.0 Replacing ----- - -. Run `get csv` again to verify the output: -+ -[source,terminal] ----- -$ oc get csv ----- -+ -.Example output -[source,terminal] ----- -NAME DISPLAY VERSION REPLACES PHASE -metallb-operator.4.13-nnnnnnnnnnnn MetalLB 4.13.0 metallb-operator.v4.13.0 Succeeded ----- diff --git a/modules/nw-metallb-addresspool-cr.adoc b/modules/nw-metallb-addresspool-cr.adoc deleted file mode 100644 index 4a2da073278e..000000000000 --- a/modules/nw-metallb-addresspool-cr.adoc +++ /dev/null @@ -1,79 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/metallb/metallb-configure-address-pools.adoc - -:_content-type: REFERENCE -[id="nw-metallb-ipaddresspool-cr_{context}"] -= About the IPAddressPool custom resource - -[NOTE] -==== -The address pool custom resource definition (CRD) and API documented in "Load balancing with MetalLB" in {product-title} 4.10 can still be used in {product-version}. However, the enhanced functionality associated with advertising an IP address from an `IPAddressPool` with layer 2 protocols, or the BGP protocol, is not supported when using the `AddressPool` CRD. -==== - -The fields for the `IPAddressPool` custom resource are described in the following tables. - -.MetalLB IPAddressPool pool custom resource -[cols="1,1,3a", options="header"] -|=== - -|Field -|Type -|Description - -|`metadata.name` -|`string` -|Specifies the name for the address pool. -When you add a service, you can specify this pool name in the `metallb.universe.tf/address-pool` annotation to select an IP address from a specific pool. -The names `doc-example`, `silver`, and `gold` are used throughout the documentation. - -|`metadata.namespace` -|`string` -|Specifies the namespace for the address pool. -Specify the same namespace that the MetalLB Operator uses. - -|`metadata.label` -|`string` -|Optional: Specifies the key value pair assigned to the `IPAddressPool`. This can be referenced by the `ipAddressPoolSelectors` in the `BGPAdvertisement` and `L2Advertisement` CRD to associate the `IPAddressPool` with the advertisement - -|`spec.addresses` -|`string` -|Specifies a list of IP addresses for MetalLB Operator to assign to services. -You can specify multiple ranges in a single pool; they will all share the same settings. -Specify each range in CIDR notation or as starting and ending IP addresses separated with a hyphen. - -|`spec.autoAssign` -|`boolean` -|Optional: Specifies whether MetalLB automatically assigns IP addresses from this pool. -Specify `false` if you want explicitly request an IP address from this pool with the `metallb.universe.tf/address-pool` annotation. -The default value is `true`. - -|=== - -You can assign IP addresses from an `IPAddressPool` to services and namespaces by configuring the `spec.serviceAllocation` specification. - -.MetalLB IPAddressPool custom resource spec.serviceAllocation subfields -[cols="1,1,3a", options="header"] -|=== - -|Field -|Type -|Description - -|`priority` -|`int` -|Optional: Defines the priority between IP address pools when more than one IP address pool matches a service or namespace. A lower number indicates a higher priority. - -|`namespaces` -|`array (string)` -|Optional: Specifies a list of namespaces that you can assign to IP addresses in an IP address pool. - -|`namespaceSelectors` -|`array (LabelSelector)` -|Optional: Specifies namespace labels that you can assign to IP addresses from an IP address pool by using label selectors in a list format. - -|`serviceSelectors` -|`array (LabelSelector)` -|Optional: Specifies service labels that you can assign to IP addresses from an address pool by using label selectors in a list format. - -|=== diff --git a/modules/nw-metallb-advertise-address-pool-with-bgp-advanced.adoc b/modules/nw-metallb-advertise-address-pool-with-bgp-advanced.adoc deleted file mode 100644 index 9c34407fee78..000000000000 --- a/modules/nw-metallb-advertise-address-pool-with-bgp-advanced.adoc +++ /dev/null @@ -1,96 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/metallb/about-advertising-ipaddresspool.adoc - -:_content-type: PROCEDURE -[id="nw-metallb-advertise-an-advanced-address-pool-configuration-bgp_{context}"] -= Example: Advertise an advanced address pool configuration with BGP - -Configure MetalLB as follows so that the `IPAddressPool` is advertised with the BGP protocol. - -.Prerequisites - -* Install the OpenShift CLI (`oc`). - -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -. Create an IP address pool. - -.. Create a file, such as `ipaddresspool.yaml`, with content like the following example: -+ -[source,yaml] ----- -apiVersion: metallb.io/v1beta1 -kind: IPAddressPool -metadata: - namespace: metallb-system - name: doc-example-bgp-adv - labels: - zone: east -spec: - addresses: - - 203.0.113.200/30 - - fc00:f853:ccd:e799::/124 - autoAssign: false ----- - -.. Apply the configuration for the IP address pool: -+ -[source,terminal] ----- -$ oc apply -f ipaddresspool.yaml ----- - -. Create a BGP advertisement. - -.. Create a file, such as `bgpadvertisement1.yaml`, with content like the following example: -+ -[source,yaml] ----- -apiVersion: metallb.io/v1beta1 -kind: BGPAdvertisement -metadata: - name: bgpadvertisement-adv-1 - namespace: metallb-system -spec: - ipAddressPools: - - doc-example-bgp-adv - communities: - - 65535:65282 - aggregationLength: 32 - localPref: 100 ----- - -.. Apply the configuration: -+ -[source,terminal] ----- -$ oc apply -f bgpadvertisement1.yaml ----- - -.. Create a file, such as `bgpadvertisement2.yaml`, with content like the following example: -+ -[source,yaml] ----- -apiVersion: metallb.io/v1beta1 -kind: BGPAdvertisement -metadata: - name: bgpadvertisement-adv-2 - namespace: metallb-system -spec: - ipAddressPools: - - doc-example-bgp-adv - communities: - - 8000:800 - aggregationLength: 30 - aggregationLengthV6: 124 ----- - -.. Apply the configuration: -+ -[source,terminal] ----- -$ oc apply -f bgpadvertisement2.yaml ----- diff --git a/modules/nw-metallb-advertise-address-pool-with-bgp.adoc b/modules/nw-metallb-advertise-address-pool-with-bgp.adoc deleted file mode 100644 index 28b6cb675f31..000000000000 --- a/modules/nw-metallb-advertise-address-pool-with-bgp.adoc +++ /dev/null @@ -1,65 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/metallb/about-advertising-ipaddresspool.adoc - -:_content-type: PROCEDURE - -[id="nw-metallb-advertise-a-basic-address-pool-configuration-bgp_{context}"] -= Example: Advertise a basic address pool configuration with BGP - -Configure MetalLB as follows so that the `IPAddressPool` is advertised with the BGP protocol. - -.Prerequisites - -* Install the OpenShift CLI (`oc`). - -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -. Create an IP address pool. - -.. Create a file, such as `ipaddresspool.yaml`, with content like the following example: -+ -[source,yaml] ----- -apiVersion: metallb.io/v1beta1 -kind: IPAddressPool -metadata: - namespace: metallb-system - name: doc-example-bgp-basic -spec: - addresses: - - 203.0.113.200/30 - - fc00:f853:ccd:e799::/124 ----- - -.. Apply the configuration for the IP address pool: -+ -[source,terminal] ----- -$ oc apply -f ipaddresspool.yaml ----- - -. Create a BGP advertisement. - -.. Create a file, such as `bgpadvertisement.yaml`, with content like the following example: -+ -[source,yaml] ----- -apiVersion: metallb.io/v1beta1 -kind: BGPAdvertisement -metadata: - name: bgpadvertisement-basic - namespace: metallb-system -spec: - ipAddressPools: - - doc-example-bgp-basic ----- - -.. Apply the configuration: -+ -[source,terminal] ----- -$ oc apply -f bgpadvertisement.yaml ----- \ No newline at end of file diff --git a/modules/nw-metallb-advertise-ip-pools-from-node-subset.adoc b/modules/nw-metallb-advertise-ip-pools-from-node-subset.adoc deleted file mode 100644 index a2516e9e9b7b..000000000000 --- a/modules/nw-metallb-advertise-ip-pools-from-node-subset.adoc +++ /dev/null @@ -1,52 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/metallb/about-advertising-ipaddresspool.adoc - -:_content-type: PROCEDURE - -[id="nw-metallb-advertise-ip-pools-to-node-subset_{context}"] -= Advertising an IP address pool from a subset of nodes - -To advertise an IP address from an IP addresses pool, from a specific set of nodes only, use the `.spec.nodeSelector` specification in the BGPAdvertisement custom resource. This specification associates a pool of IP addresses with a set of nodes in the cluster. This is useful when you have nodes on different subnets in a cluster and you want to advertise an IP addresses from an address pool from a specific subnet, for example a public-facing subnet only. - -.Prerequisites - -* Install the OpenShift CLI (`oc`). -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -. Create an IP address pool by using a custom resource: -+ -[source,yaml] ----- -apiVersion: metallb.io/v1beta1 -kind: IPAddressPool -metadata: - namespace: metallb-system - name: pool1 -spec: - addresses: - - 4.4.4.100-4.4.4.200 - - 2001:100:4::200-2001:100:4::400 ----- - -. Control which nodes in the cluster the IP address from `pool1` advertises from by defining the `.spec.nodeSelector` value in the BGPAdvertisement custom resource: -+ -[source,yaml] ----- -apiVersion: metallb.io/v1beta1 -kind: BGPAdvertisement -metadata: - name: example -spec: - ipAddressPools: - - pool1 - nodeSelector: - - matchLabels: - kubernetes.io/hostname: NodeA - - matchLabels: - kubernetes.io/hostname: NodeB ----- - -In this example, the IP address from `pool1` advertises from `NodeA` and `NodeB` only. diff --git a/modules/nw-metallb-bfdprofile-cr.adoc b/modules/nw-metallb-bfdprofile-cr.adoc deleted file mode 100644 index aa82d97ffe30..000000000000 --- a/modules/nw-metallb-bfdprofile-cr.adoc +++ /dev/null @@ -1,83 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/metallb/metallb-configure-bfd-profiles.adoc - -:_content-type: REFERENCE -[id="nw-metallb-bfdprofile-cr_{context}"] -= About the BFD profile custom resource - -The fields for the BFD profile custom resource are described in the following table. - -.BFD profile custom resource -[cols="1,1,3a",options="header"] -|=== - -|Field -|Type -|Description - -|`metadata.name` -|`string` -|Specifies the name for the BFD profile custom resource. - -|`metadata.namespace` -|`string` -|Specifies the namespace for the BFD profile custom resource. - -|`spec.detectMultiplier` -|`integer` -|Specifies the detection multiplier to determine packet loss. -The remote transmission interval is multiplied by this value to determine the connection loss detection timer. - -For example, when the local system has the detect multiplier set to `3` and the remote system has the transmission interval set to `300`, the local system detects failures only after `900` ms without receiving packets. - -The range is `2` to `255`. -The default value is `3`. - -|`spec.echoMode` -|`boolean` -|Specifies the echo transmission mode. -If you are not using distributed BFD, echo transmission mode works only when the peer is also FRR. -The default value is `false` and echo transmission mode is disabled. - -When echo transmission mode is enabled, consider increasing the transmission interval of control packets to reduce bandwidth usage. -For example, consider increasing the transmit interval to `2000` ms. - -|`spec.echoInterval` -|`integer` -|Specifies the minimum transmission interval, less jitter, that this system uses to send and receive echo packets. -The range is `10` to `60000`. -The default value is `50` ms. - -|`spec.minimumTtl` -|`integer` -|Specifies the minimum expected TTL for an incoming control packet. -This field applies to multi-hop sessions only. - -The purpose of setting a minimum TTL is to make the packet validation requirements more stringent and avoid receiving control packets from other sessions. - -The default value is `254` and indicates that the system expects only one hop between this system and the peer. - -|`spec.passiveMode` -|`boolean` -|Specifies whether a session is marked as active or passive. -A passive session does not attempt to start the connection. -Instead, a passive session waits for control packets from a peer before it begins to reply. - -Marking a session as passive is useful when you have a router that acts as the central node of a star network and you want to avoid sending control packets that you do not need the system to send. - -The default value is `false` and marks the session as active. - -|`spec.receiveInterval` -|`integer` -|Specifies the minimum interval that this system is capable of receiving control packets. -The range is `10` to `60000`. -The default value is `300` ms. - -|`spec.transmitInterval` -|`integer` -|Specifies the minimum transmission interval, less jitter, that this system uses to send control packets. -The range is `10` to `60000`. -The default value is `300` ms. - -|=== diff --git a/modules/nw-metallb-bgp-limitations.adoc b/modules/nw-metallb-bgp-limitations.adoc deleted file mode 100644 index 6c972156ade5..000000000000 --- a/modules/nw-metallb-bgp-limitations.adoc +++ /dev/null @@ -1,31 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/metallb/about-metallb.adoc - -[id="nw-metallb-bgp-limitations_{context}"] -= Limitations for BGP mode - -[id="nw-metallb-bgp-limitations-break-connections_{context}"] -== Node failure can break all active connections - -MetalLB shares a limitation that is common to BGP-based load balancing. -When a BGP session terminates, such as when a node fails or when a `speaker` pod restarts, the session termination might result in resetting all active connections. -End users can experience a `Connection reset by peer` message. - -The consequence of a terminated BGP session is implementation-specific for each router manufacturer. -However, you can anticipate that a change in the number of `speaker` pods affects the number of BGP sessions and that active connections with BGP peers will break. - -To avoid or reduce the likelihood of a service interruption, you can specify a node selector when you add a BGP peer. -By limiting the number of nodes that start BGP sessions, a fault on a node that does not have a BGP session has no affect on connections to the service. - -[id="nw-metallb-bgp-limitations-single-asn_{context}"] -== Support for a single ASN and a single router ID only - -When you add a BGP peer custom resource, you specify the `spec.myASN` field to identify the Autonomous System Number (ASN) that MetalLB belongs to. -{product-title} uses an implementation of BGP with MetalLB that requires MetalLB to belong to a single ASN. -If you attempt to add a BGP peer and specify a different value for `spec.myASN` than an existing BGP peer custom resource, you receive an error. - -Similarly, when you add a BGP peer custom resource, the `spec.routerID` field is optional. -If you specify a value for this field, you must specify the same value for all other BGP peer custom resources that you add. - -The limitation to support a single ASN and single router ID is a difference with the community-supported implementation of MetalLB. diff --git a/modules/nw-metallb-bgp.adoc b/modules/nw-metallb-bgp.adoc deleted file mode 100644 index a7f6450a26f5..000000000000 --- a/modules/nw-metallb-bgp.adoc +++ /dev/null @@ -1,62 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/metallb/about-metallb.adoc - -:_content-type: CONCEPT -[id="nw-metallb-bgp_{context}"] -= MetalLB concepts for BGP mode - -In BGP mode, by default each `speaker` pod advertises the load balancer IP address for a service to each BGP peer. It is also possible to advertise the IPs coming from a given pool to a specific set of peers by adding an optional list of BGP peers. -BGP peers are commonly network routers that are configured to use the BGP protocol. -When a router receives traffic for the load balancer IP address, the router picks one of the nodes with a `speaker` pod that advertised the IP address. -The router sends the traffic to that node. -After traffic enters the node, the service proxy for the CNI network plugin distributes the traffic to all the pods for the service. - -The directly-connected router on the same layer 2 network segment as the cluster nodes can be configured as a BGP peer. -If the directly-connected router is not configured as a BGP peer, you need to configure your network so that packets for load balancer IP addresses are routed between the BGP peers and the cluster nodes that run the `speaker` pods. - -Each time a router receives new traffic for the load balancer IP address, it creates a new connection to a node. -Each router manufacturer has an implementation-specific algorithm for choosing which node to initiate the connection with. -However, the algorithms commonly are designed to distribute traffic across the available nodes for the purpose of balancing the network load. - -If a node becomes unavailable, the router initiates a new connection with another node that has a `speaker` pod that advertises the load balancer IP address. - -.MetalLB topology diagram for BGP mode -image::209_OpenShift_BGP_0122.png["Speaker pods on host network 10.0.1.0/24 use BGP to advertise the load balancer IP address, 203.0.113.200, to a router."] - -The preceding graphic shows the following concepts related to MetalLB: - -* An application is available through a service that has an IPv4 cluster IP on the `172.130.0.0/16` subnet. -That IP address is accessible from inside the cluster. -The service also has an external IP address that MetalLB assigned to the service, `203.0.113.200`. - -* Nodes 2 and 3 have a pod for the application. - -* The `speaker` daemon set runs a pod on each node. -The MetalLB Operator starts these pods. -You can configure MetalLB to specify which nodes run the `speaker` pods. - -* Each `speaker` pod is a host-networked pod. -The IP address for the pod is identical to the IP address for the node on the host network. - -* Each `speaker` pod starts a BGP session with all BGP peers and advertises the load balancer IP addresses or aggregated routes to the BGP peers. -The `speaker` pods advertise that they are part of Autonomous System 65010. -The diagram shows a router, R1, as a BGP peer within the same Autonomous System. -However, you can configure MetalLB to start BGP sessions with peers that belong to other Autonomous Systems. - -* All the nodes with a `speaker` pod that advertises the load balancer IP address can receive traffic for the service. - -** If the external traffic policy for the service is set to `cluster`, all the nodes where a speaker pod is running advertise the `203.0.113.200` load balancer IP address and all the nodes with a `speaker` pod can receive traffic for the service. The host prefix is advertised to the router peer only if the external traffic policy is set to cluster. - -** If the external traffic policy for the service is set to `local`, then all the nodes where a `speaker` pod is running and at least an endpoint of the service is running can advertise the `203.0.113.200` load balancer IP address. Only those nodes can receive traffic for the service. In the preceding graphic, nodes 2 and 3 would advertise `203.0.113.200`. - -* You can configure MetalLB to control which `speaker` pods start BGP sessions with specific BGP peers by specifying a node selector when you add a BGP peer custom resource. - -* Any routers, such as R1, that are configured to use BGP can be set as BGP peers. - -* Client traffic is routed to one of the nodes on the host network. -After traffic enters the node, the service proxy sends the traffic to the application pod on the same node or another node according to the external traffic policy that you set for the service. - -* If a node becomes unavailable, the router detects the failure and initiates a new connection with another node. -You can configure MetalLB to use a Bidirectional Forwarding Detection (BFD) profile for BGP peers. -BFD provides faster link failure detection so that routers can initiate new connections earlier than without BFD. diff --git a/modules/nw-metallb-bgpadvertisement-cr.adoc b/modules/nw-metallb-bgpadvertisement-cr.adoc deleted file mode 100644 index 8cbc3366d6fb..000000000000 --- a/modules/nw-metallb-bgpadvertisement-cr.adoc +++ /dev/null @@ -1,74 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/metallb/about-advertising-ipaddresspool.adoc - -:_content-type: REFERENCE -[id="nw-metallb-bgpadvertisement-cr_{context}"] -= About the BGPAdvertisement custom resource - -The fields for the `BGPAdvertisements` object are defined in the following table: - -.BGPAdvertisements configuration -[cols="1,1,3a", options="header"] -|=== - -|Field -|Type -|Description - -|`metadata.name` -|`string` -|Specifies the name for the BGP advertisement. - -|`metadata.namespace` -|`string` -|Specifies the namespace for the BGP advertisement. -Specify the same namespace that the MetalLB Operator uses. - -|`spec.aggregationLength` -|`integer` -|Optional: Specifies the number of bits to include in a 32-bit CIDR mask. -To aggregate the routes that the speaker advertises to BGP peers, the mask is applied to the routes for several service IP addresses and the speaker advertises the aggregated route. -For example, with an aggregation length of `24`, the speaker can aggregate several `10.0.1.x/32` service IP addresses and advertise a single `10.0.1.0/24` route. - -|`spec.aggregationLengthV6` -|`integer` -|Optional: Specifies the number of bits to include in a 128-bit CIDR mask. -For example, with an aggregation length of `124`, the speaker can aggregate several `fc00:f853:0ccd:e799::x/128` service IP addresses and advertise a single `fc00:f853:0ccd:e799::0/124` route. - -|`spec.communities` -|`string` -|Optional: Specifies one or more BGP communities. -Each community is specified as two 16-bit values separated by the colon character. -Well-known communities must be specified as 16-bit values: - -* `NO_EXPORT`: `65535:65281` -* `NO_ADVERTISE`: `65535:65282` -* `NO_EXPORT_SUBCONFED`: `65535:65283` -+ -[NOTE] -==== -You can also use community objects that are created along with the strings. -==== - -|`spec.localPref` -|`integer` -|Optional: Specifies the local preference for this advertisement. -This BGP attribute applies to BGP sessions within the Autonomous System. - -|`spec.ipAddressPools` -|`string` -|Optional: The list of `IPAddressPools` to advertise with this advertisement, selected by name. - -|`spec.ipAddressPoolSelectors` -|`string` -|Optional: A selector for the `IPAddressPools` that gets advertised with this advertisement. This is for associating the `IPAddressPool` to the advertisement based on the label assigned to the `IPAddressPool` instead of the name itself. If no `IPAddressPool` is selected by this or by the list, the advertisement is applied to all the `IPAddressPools`. - -|`spec.nodeSelectors` -|`string` -|Optional: `NodeSelectors` allows to limit the nodes to announce as next hops for the load balancer IP. When empty, all the nodes are announced as next hops. - -|`spec.peers` -|`string` -|Optional: Peers limits the BGP peer to advertise the IPs of the selected pools to. When empty, the load balancer IP is announced to all the BGP peers configured. -|=== diff --git a/modules/nw-metallb-bgppeer-cr.adoc b/modules/nw-metallb-bgppeer-cr.adoc deleted file mode 100644 index e9f69cdcc641..000000000000 --- a/modules/nw-metallb-bgppeer-cr.adoc +++ /dev/null @@ -1,98 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/metallb/metallb-configure-bgp-peers.adoc - -:_content-type: REFERENCE -[id="nw-metallb-bgppeer-cr_{context}"] -= About the BGP peer custom resource - -The fields for the BGP peer custom resource are described in the following table. - -.MetalLB BGP peer custom resource -[cols="1,1,3",options="header"] -|=== - -|Field -|Type -|Description - -|`metadata.name` -|`string` -|Specifies the name for the BGP peer custom resource. - -|`metadata.namespace` -|`string` -|Specifies the namespace for the BGP peer custom resource. - -|`spec.myASN` -|`integer` -|Specifies the Autonomous System number for the local end of the BGP session. -Specify the same value in all BGP peer custom resources that you add. -The range is `0` to `4294967295`. - -|`spec.peerASN` -|`integer` -|Specifies the Autonomous System number for the remote end of the BGP session. -The range is `0` to `4294967295`. - -|`spec.peerAddress` -|`string` -|Specifies the IP address of the peer to contact for establishing the BGP session. - -|`spec.sourceAddress` -|`string` -|Optional: Specifies the IP address to use when establishing the BGP session. -The value must be an IPv4 address. - -|`spec.peerPort` -|`integer` -|Optional: Specifies the network port of the peer to contact for establishing the BGP session. -The range is `0` to `16384`. - -|`spec.holdTime` -|`string` -|Optional: Specifies the duration for the hold time to propose to the BGP peer. -The minimum value is 3 seconds (`3s`). -The common units are seconds and minutes, such as `3s`, `1m`, and `5m30s`. -To detect path failures more quickly, also configure BFD. - -|`spec.keepaliveTime` -|`string` -|Optional: Specifies the maximum interval between sending keep-alive messages to the BGP peer. -If you specify this field, you must also specify a value for the `holdTime` field. -The specified value must be less than the value for the `holdTime` field. - -|`spec.routerID` -|`string` -|Optional: Specifies the router ID to advertise to the BGP peer. -If you specify this field, you must specify the same value in every BGP peer custom resource that you add. - -|`spec.password` -|`string` -|Optional: Specifies the MD5 password to send to the peer for routers that enforce TCP MD5 authenticated BGP sessions. - -|`spec.passwordSecret` -|`string` -|Optional: Specifies name of the authentication secret for the BGP Peer. The secret must live in the `metallb` namespace and be of type basic-auth. - -|`spec.bfdProfile` -|`string` -|Optional: Specifies the name of a BFD profile. - -|`spec.nodeSelectors` -|`object[]` -|Optional: Specifies a selector, using match expressions and match labels, to control which nodes can connect to the BGP peer. - -|`spec.ebgpMultiHop` -|`boolean` -|Optional: Specifies that the BGP peer is multiple network hops away. -If the BGP peer is not directly connected to the same network, the speaker cannot establish a BGP session unless this field is set to `true`. -This field applies to _external BGP_. -External BGP is the term that is used to describe when a BGP peer belongs to a different Autonomous System. - -|=== - -[NOTE] -==== -The `passwordSecret` field is mutually exclusive with the `password` field, and contains a reference to a secret containing the password to use. Setting both fields results in a failure of the parsing. -==== diff --git a/modules/nw-metallb-collecting-data.adoc b/modules/nw-metallb-collecting-data.adoc deleted file mode 100644 index d3d8b26ce8a7..000000000000 --- a/modules/nw-metallb-collecting-data.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/metallb/metallb-troubleshoot-support.adoc - -[id="nw-metallb-collecting-data_{context}"] -= About collecting MetalLB data - -You can use the `oc adm must-gather` CLI command to collect information about your cluster, your MetalLB configuration, and the MetalLB Operator. -The following features and objects are associated with MetalLB and the MetalLB Operator: - -* The namespace and child objects that the MetalLB Operator is deployed in - -* All MetalLB Operator custom resource definitions (CRDs) - -The `oc adm must-gather` CLI command collects the following information from FRRouting (FRR) that Red Hat uses to implement BGP and BFD: - -* `/etc/frr/frr.conf` -* `/etc/frr/frr.log` -* `/etc/frr/daemons` configuration file -* `/etc/frr/vtysh.conf` - -The log and configuration files in the preceding list are collected from the `frr` container in each `speaker` pod. - -In addition to the log and configuration files, the `oc adm must-gather` CLI command collects the output from the following `vtysh` commands: - -* `show running-config` -* `show bgp ipv4` -* `show bgp ipv6` -* `show bgp neighbor` -* `show bfd peer` - -No additional configuration is required when you run the `oc adm must-gather` CLI command. diff --git a/modules/nw-metallb-community-cr.adoc b/modules/nw-metallb-community-cr.adoc deleted file mode 100644 index 9ab2aa679b21..000000000000 --- a/modules/nw-metallb-community-cr.adoc +++ /dev/null @@ -1,55 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/metallb/metallb-configure-community-alias.adoc - -:_content-type: REFERENCE -[id="nw-metallb-community-cr_{context}"] -= About the community custom resource - -The `community` custom resource is a collection of aliases for communities. Users can define named aliases to be used when advertising `ipAddressPools` using the `BGPAdvertisement`. The fields for the `community` custom resource are described in the following table. - -[NOTE] -==== -The `community` CRD applies only to BGPAdvertisement. -==== - - -.MetalLB community custom resource -[cols="1,1,3a", options="header"] -|=== - -|Field -|Type -|Description - -|`metadata.name` -|`string` -|Specifies the name for the `community`. - -|`metadata.namespace` -|`string` -|Specifies the namespace for the `community`. -Specify the same namespace that the MetalLB Operator uses. - -|`spec.communities` -|`string` -|Specifies a list of BGP community aliases that can be used in BGPAdvertisements. A community alias consists of a pair of name (alias) and value (number:number). Link the BGPAdvertisement to a community alias by referring to the alias name in its `spec.communities` field. - -|=== - -.CommunityAlias -[cols="1,1,3a", options="header"] -|=== - -|Field -|Type -|Description - -|`name` -|`string` -|The name of the alias for the `community`. - -|`value` -|`string` -|The BGP `community` value corresponding to the given name. -|=== \ No newline at end of file diff --git a/modules/nw-metallb-configure-address-pool.adoc b/modules/nw-metallb-configure-address-pool.adoc deleted file mode 100644 index f18ea8cd3617..000000000000 --- a/modules/nw-metallb-configure-address-pool.adoc +++ /dev/null @@ -1,68 +0,0 @@ -:_content-type: PROCEDURE -[id="nw-metallb-configure-address-pool_{context}"] -= Configuring an address pool - -As a cluster administrator, you can add address pools to your cluster to control the IP addresses that MetalLB can assign to load-balancer services. - -.Prerequisites - -* Install the OpenShift CLI (`oc`). - -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -. Create a file, such as `ipaddresspool.yaml`, with content like the following example: -+ -[source,yaml] ----- -apiVersion: metallb.io/v1beta1 -kind: IPAddressPool -metadata: - namespace: metallb-system - name: doc-example - labels: <1> - zone: east -spec: - addresses: - - 203.0.113.1-203.0.113.10 - - 203.0.113.65-203.0.113.75 ----- -<1> This label assigned to the `IPAddressPool` can be referenced by the `ipAddressPoolSelectors` in the `BGPAdvertisement` CRD to associate the `IPAddressPool` with the advertisement. - -. Apply the configuration for the IP address pool: -+ -[source,terminal] ----- -$ oc apply -f ipaddresspool.yaml ----- - -.Verification - -* View the address pool: -+ -[source,terminal] ----- -$ oc describe -n metallb-system IPAddressPool doc-example ----- -+ -.Example output -[source,terminal] ----- -Name: doc-example -Namespace: metallb-system -Labels: zone=east -Annotations: <none> -API Version: metallb.io/v1beta1 -Kind: IPAddressPool -Metadata: - ... -Spec: - Addresses: - 203.0.113.1-203.0.113.10 - 203.0.113.65-203.0.113.75 - Auto Assign: true -Events: <none> ----- - -Confirm that the address pool name, such as `doc-example`, and the IP address ranges appear in the output. diff --git a/modules/nw-metallb-configure-bfdprofle.adoc b/modules/nw-metallb-configure-bfdprofle.adoc deleted file mode 100644 index 6421eaf4402f..000000000000 --- a/modules/nw-metallb-configure-bfdprofle.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/metallb/metallb-configure-bfd-profiles.adoc - -:_content-type: PROCEDURE -[id="nw-metallb-configure-bfdprofile_{context}"] -= Configuring a BFD profile - -As a cluster administrator, you can add a BFD profile and configure a BGP peer to use the profile. BFD provides faster path failure detection than BGP alone. - -.Prerequisites - -* Install the OpenShift CLI (`oc`). - -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -. Create a file, such as `bfdprofile.yaml`, with content like the following example: -+ -[source,yaml] ----- -apiVersion: metallb.io/v1beta1 -kind: BFDProfile -metadata: - name: doc-example-bfd-profile-full - namespace: metallb-system -spec: - receiveInterval: 300 - transmitInterval: 300 - detectMultiplier: 3 - echoMode: false - passiveMode: true - minimumTtl: 254 ----- - -. Apply the configuration for the BFD profile: -+ -[source,terminal] ----- -$ oc apply -f bfdprofile.yaml ----- diff --git a/modules/nw-metallb-configure-bgp-advertisement-advanced.adoc b/modules/nw-metallb-configure-bgp-advertisement-advanced.adoc deleted file mode 100644 index 5855fedb6b05..000000000000 --- a/modules/nw-metallb-configure-bgp-advertisement-advanced.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/metallb/about-advertising-ipaddresspool.adoc - -:_content-type: CONCEPT -[id="nw-metallb-configure-BGP-advertisement-advanced-use-case_{context}"] -= Configuring MetalLB with a BGP advertisement and an advanced use case - -Configure MetalLB as follows so that MetalLB assigns IP addresses to load-balancer services in the ranges between `203.0.113.200` and `203.0.113.203` and between `fc00:f853:ccd:e799::0` and `fc00:f853:ccd:e799::f`. - -To explain the two BGP advertisements, consider an instance when MetalLB assigns the IP address of `203.0.113.200` to a service. -With that IP address as an example, the speaker advertises two routes to BGP peers: - -* `203.0.113.200/32`, with `localPref` set to `100` and the community set to the numeric value of the `NO_ADVERTISE` community. -This specification indicates to the peer routers that they can use this route but they should not propagate information about this route to BGP peers. - -* `203.0.113.200/30`, aggregates the load-balancer IP addresses assigned by MetalLB into a single route. -MetalLB advertises the aggregated route to BGP peers with the community attribute set to `8000:800`. -BGP peers propagate the `203.0.113.200/30` route to other BGP peers. -When traffic is routed to a node with a speaker, the `203.0.113.200/32` route is used to forward the traffic into the cluster and to a pod that is associated with the service. - -As you add more services and MetalLB assigns more load-balancer IP addresses from the pool, peer routers receive one local route, `203.0.113.20x/32`, for each service, as well as the `203.0.113.200/30` aggregate route. -Each service that you add generates the `/30` route, but MetalLB deduplicates the routes to one BGP advertisement before communicating with peer routers. \ No newline at end of file diff --git a/modules/nw-metallb-configure-bgp-advertisement.adoc b/modules/nw-metallb-configure-bgp-advertisement.adoc deleted file mode 100644 index 4b9a49e2d642..000000000000 --- a/modules/nw-metallb-configure-bgp-advertisement.adoc +++ /dev/null @@ -1,10 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/metallb/about-advertising-ipaddresspool.adoc - -:_content-type: CONCEPT -[id="nw-metallb-configure-BGP-advertisement-basic-use-case_{context}"] -= Configuring MetalLB with a BGP advertisement and a basic use case - -Configure MetalLB as follows so that the peer BGP routers receive one `203.0.113.200/32` route and one `fc00:f853:ccd:e799::1/128` route for each load-balancer IP address that MetalLB assigns to a service. -Because the `localPref` and `communities` fields are not specified, the routes are advertised with `localPref` set to zero and no BGP communities. diff --git a/modules/nw-metallb-configure-bgppeer.adoc b/modules/nw-metallb-configure-bgppeer.adoc deleted file mode 100644 index 54c334d5d94b..000000000000 --- a/modules/nw-metallb-configure-bgppeer.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/metallb/metallb-configure-bgp-peers.adoc - -:_content-type: PROCEDURE -[id="nw-metallb-configure-bgppeer_{context}"] -= Configuring a BGP peer - -As a cluster administrator, you can add a BGP peer custom resource to exchange routing information with network routers and advertise the IP addresses for services. - -.Prerequisites - -* Install the OpenShift CLI (`oc`). - -* Log in as a user with `cluster-admin` privileges. - -* Configure MetalLB with a BGP advertisement. - -.Procedure - -. Create a file, such as `bgppeer.yaml`, with content like the following example: -+ -[source,yaml] ----- -apiVersion: metallb.io/v1beta2 -kind: BGPPeer -metadata: - namespace: metallb-system - name: doc-example-peer -spec: - peerAddress: 10.0.0.1 - peerASN: 64501 - myASN: 64500 - routerID: 10.10.10.10 ----- - -. Apply the configuration for the BGP peer: -+ -[source,terminal] ----- -$ oc apply -f bgppeer.yaml ----- diff --git a/modules/nw-metallb-configure-community-bgp-advertisement.adoc b/modules/nw-metallb-configure-community-bgp-advertisement.adoc deleted file mode 100644 index 839c0904dfd3..000000000000 --- a/modules/nw-metallb-configure-community-bgp-advertisement.adoc +++ /dev/null @@ -1,115 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/metallb/metallb-configure-community-alias.adoc - -:_content-type: PROCEDURE -[id="nw-metallb-configure-BGP-advertisement-community-alias_{context}"] -= Configuring MetalLB with a BGP advertisement and community alias - -Configure MetalLB as follows so that the `IPAddressPool` is advertised with the BGP protocol and the community alias set to the numeric value of the NO_ADVERTISE community. - -In the following example, the peer BGP router `doc-example-peer-community` receives one `203.0.113.200/32` route and one `fc00:f853:ccd:e799::1/128` route for each load-balancer IP address that MetalLB assigns to a service. A community alias is configured with the `NO_ADVERTISE` community. - -.Prerequisites - -* Install the OpenShift CLI (`oc`). - -* Log in as a user with `cluster-admin` privileges. - - -.Procedure - -. Create an IP address pool. - -.. Create a file, such as `ipaddresspool.yaml`, with content like the following example: -+ -[source,yaml] ----- -apiVersion: metallb.io/v1beta1 -kind: IPAddressPool -metadata: - namespace: metallb-system - name: doc-example-bgp-community -spec: - addresses: - - 203.0.113.200/30 - - fc00:f853:ccd:e799::/124 ----- - -.. Apply the configuration for the IP address pool: -+ -[source,terminal] ----- -$ oc apply -f ipaddresspool.yaml ----- - -. Create a community alias named `community1`. -+ -[source,yaml] ----- -apiVersion: metallb.io/v1beta1 -kind: Community -metadata: - name: community1 - namespace: metallb-system -spec: - communities: - - name: NO_ADVERTISE - value: '65535:65282' ----- - -. Create a BGP peer named `doc-example-bgp-peer`. - -.. Create a file, such as `bgppeer.yaml`, with content like the following example: -+ -[source,yaml] ----- -apiVersion: metallb.io/v1beta2 -kind: BGPPeer -metadata: - namespace: metallb-system - name: doc-example-bgp-peer -spec: - peerAddress: 10.0.0.1 - peerASN: 64501 - myASN: 64500 - routerID: 10.10.10.10 ----- - -.. Apply the configuration for the BGP peer: -+ -[source,terminal] ----- -$ oc apply -f bgppeer.yaml ----- - -. Create a BGP advertisement with the community alias. - -.. Create a file, such as `bgpadvertisement.yaml`, with content like the following example: -+ -[source,yaml] ----- -apiVersion: metallb.io/v1beta1 -kind: BGPAdvertisement -metadata: - name: bgp-community-sample - namespace: metallb-system -spec: - aggregationLength: 32 - aggregationLengthV6: 128 - communities: - - NO_ADVERTISE <1> - ipAddressPools: - - doc-example-bgp-community - peers: - - doc-example-peer ----- -+ -<1> Specify the `CommunityAlias.name` here and not the community custom resource (CR) name. - -.. Apply the configuration: -+ -[source,terminal] ----- -$ oc apply -f bgpadvertisement.yaml ----- diff --git a/modules/nw-metallb-configure-l2-advertisement-interface.adoc b/modules/nw-metallb-configure-l2-advertisement-interface.adoc deleted file mode 100644 index 99b995dba647..000000000000 --- a/modules/nw-metallb-configure-l2-advertisement-interface.adoc +++ /dev/null @@ -1,74 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/metallb/about-advertising-ipaddresspool.adoc - -:_content-type: PROCEDURE -[id="nw-metallb-configure-with-L2-advertisement-interface_{context}"] -= Configuring MetalLB with an L2 advertisement for selected interfaces - -By default, the IP addresses from IP address pool that has been assigned to the service, is advertised from all the network interfaces. The `interfaces` field in the `L2Advertisement` custom resource definition is used to restrict those network interfaces that advertise the IP address pool. - -This example shows how to configure MetalLB so that the IP address pool is advertised only from the network interfaces listed in the `interfaces` field of all nodes. - -.Prerequisites - -* You have installed the OpenShift CLI (`oc`). - -* You are logged in as a user with `cluster-admin` privileges. - -.Procedure - -. Create an IP address pool. - -.. Create a file, such as `ipaddresspool.yaml`, and enter the configuration details like the following example: -+ -[source,yaml] ----- -apiVersion: metallb.io/v1beta1 -kind: IPAddressPool -metadata: - namespace: metallb-system - name: doc-example-l2 -spec: - addresses: - - 4.4.4.0/24 - autoAssign: false ----- - -.. Apply the configuration for the IP address pool like the following example: -+ -[source,terminal] ----- -$ oc apply -f ipaddresspool.yaml ----- - -. Create a L2 advertisement advertising the IP with `interfaces` selector. - -.. Create a YAML file, such as `l2advertisement.yaml`, and enter the configuration details like the following example: -+ -[source,yaml] ----- -apiVersion: metallb.io/v1beta1 -kind: L2Advertisement -metadata: - name: l2advertisement - namespace: metallb-system -spec: - ipAddressPools: - - doc-example-l2 - interfaces: - - interfaceA - - interfaceB ----- - -.. Apply the configuration for the advertisement like the following example: -+ -[source,terminal] ----- -$ oc apply -f l2advertisement.yaml ----- - -[IMPORTANT] -==== -The interface selector does not affect how MetalLB chooses the node to announce a given IP by using L2. The chosen node does not announce the service if the node does not have the selected interface. -==== \ No newline at end of file diff --git a/modules/nw-metallb-configure-l2-advertisement-label.adoc b/modules/nw-metallb-configure-l2-advertisement-label.adoc deleted file mode 100644 index be83f5fefc95..000000000000 --- a/modules/nw-metallb-configure-l2-advertisement-label.adoc +++ /dev/null @@ -1,71 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/metallb/about-advertising-ipaddresspool.adoc - -:_content-type: PROCEDURE -[id="nw-metallb-configure-with-L2-advertisement-label_{context}"] -= Configuring MetalLB with a L2 advertisement and label - -The `ipAddressPoolSelectors` field in the `BGPAdvertisement` and `L2Advertisement` custom resource definitions is used to associate the `IPAddressPool` to the advertisement based on the label assigned to the `IPAddressPool` instead of the name itself. - -This example shows how to configure MetalLB so that the `IPAddressPool` is advertised with the L2 protocol by configuring the `ipAddressPoolSelectors` field. - -.Prerequisites - -* Install the OpenShift CLI (`oc`). - -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -. Create an IP address pool. - -.. Create a file, such as `ipaddresspool.yaml`, with content like the following example: -+ -[source,yaml] ----- -apiVersion: metallb.io/v1beta1 -kind: IPAddressPool -metadata: - namespace: metallb-system - name: doc-example-l2-label - labels: - zone: east -spec: - addresses: - - 172.31.249.87/32 ----- - -.. Apply the configuration for the IP address pool: -+ -[source,terminal] ----- -$ oc apply -f ipaddresspool.yaml ----- - -. Create a L2 advertisement advertising the IP using `ipAddressPoolSelectors`. - -.. Create a file, such as `l2advertisement.yaml`, with content like the following example: -+ -[source,yaml] ----- -apiVersion: metallb.io/v1beta1 -kind: L2Advertisement -metadata: - name: l2advertisement-label - namespace: metallb-system -spec: - ipAddressPoolSelectors: - - matchExpressions: - - key: zone - operator: In - values: - - east ----- - -.. Apply the configuration: -+ -[source,terminal] ----- -$ oc apply -f l2advertisement.yaml ----- diff --git a/modules/nw-metallb-configure-l2-advertisement.adoc b/modules/nw-metallb-configure-l2-advertisement.adoc deleted file mode 100644 index 973a1e509f6d..000000000000 --- a/modules/nw-metallb-configure-l2-advertisement.adoc +++ /dev/null @@ -1,64 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/metallb/about-advertising-ipaddresspool.adoc - -:_content-type: PROCEDURE -[id="nw-metallb-configure-with-L2-advertisement_{context}"] -= Configuring MetalLB with an L2 advertisement - -Configure MetalLB as follows so that the `IPAddressPool` is advertised with the L2 protocol. - -.Prerequisites - -* Install the OpenShift CLI (`oc`). - -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -. Create an IP address pool. - -.. Create a file, such as `ipaddresspool.yaml`, with content like the following example: -+ -[source,yaml] ----- -apiVersion: metallb.io/v1beta1 -kind: IPAddressPool -metadata: - namespace: metallb-system - name: doc-example-l2 -spec: - addresses: - - 4.4.4.0/24 - autoAssign: false ----- - -.. Apply the configuration for the IP address pool: -+ -[source,terminal] ----- -$ oc apply -f ipaddresspool.yaml ----- - -. Create a L2 advertisement. - -.. Create a file, such as `l2advertisement.yaml`, with content like the following example: -+ -[source,yaml] ----- -apiVersion: metallb.io/v1beta1 -kind: L2Advertisement -metadata: - name: l2advertisement - namespace: metallb-system -spec: - ipAddressPools: - - doc-example-l2 ----- - -.. Apply the configuration: -+ -[source,terminal] ----- -$ oc apply -f l2advertisement.yaml ----- diff --git a/modules/nw-metallb-configure-specificpools-to-bgppeer.adoc b/modules/nw-metallb-configure-specificpools-to-bgppeer.adoc deleted file mode 100644 index d8513b9af989..000000000000 --- a/modules/nw-metallb-configure-specificpools-to-bgppeer.adoc +++ /dev/null @@ -1,178 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/metallb/metallb-configure-bgp-peers.adoc - -:_content-type: PROCEDURE -[id="nw-metallb-example-assign-specific-address-pools-specific-bgp-peers_{context}"] -= Configure a specific set of BGP peers for a given address pool - -This procedure illustrates how to: - -* Configure a set of address pools (`pool1` and `pool2`). -* Configure a set of BGP peers (`peer1` and `peer2`). -* Configure BGP advertisement to assign `pool1` to `peer1` and `pool2` to `peer2`. - -.Prerequisites - -* Install the OpenShift CLI (`oc`). - -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -. Create address pool `pool1`. - -.. Create a file, such as `ipaddresspool1.yaml`, with content like the following example: -+ -[source,yaml] ----- -apiVersion: metallb.io/v1beta1 -kind: IPAddressPool -metadata: - namespace: metallb-system - name: pool1 -spec: - addresses: - - 4.4.4.100-4.4.4.200 - - 2001:100:4::200-2001:100:4::400 ----- - -.. Apply the configuration for the IP address pool `pool1`: -+ -[source,terminal] ----- -$ oc apply -f ipaddresspool1.yaml ----- - -. Create address pool `pool2`. - -.. Create a file, such as `ipaddresspool2.yaml`, with content like the following example: -+ -[source,yaml] ----- -apiVersion: metallb.io/v1beta1 -kind: IPAddressPool -metadata: - namespace: metallb-system - name: pool2 -spec: - addresses: - - 5.5.5.100-5.5.5.200 - - 2001:100:5::200-2001:100:5::400 ----- - -.. Apply the configuration for the IP address pool `pool2`: -+ -[source,terminal] ----- -$ oc apply -f ipaddresspool2.yaml ----- -. Create BGP `peer1`. - -.. Create a file, such as `bgppeer1.yaml`, with content like the following example: -+ -[source,yaml] ----- -apiVersion: metallb.io/v1beta2 -kind: BGPPeer -metadata: - namespace: metallb-system - name: peer1 -spec: - peerAddress: 10.0.0.1 - peerASN: 64501 - myASN: 64500 - routerID: 10.10.10.10 ----- - -.. Apply the configuration for the BGP peer: -+ -[source,terminal] ----- -$ oc apply -f bgppeer1.yaml ----- - -. Create BGP `peer2`. - -.. Create a file, such as `bgppeer2.yaml`, with content like the following example: -+ -[source,yaml] ----- -apiVersion: metallb.io/v1beta2 -kind: BGPPeer -metadata: - namespace: metallb-system - name: peer2 -spec: - peerAddress: 10.0.0.2 - peerASN: 64501 - myASN: 64500 - routerID: 10.10.10.10 ----- - -.. Apply the configuration for the BGP peer2: -+ -[source,terminal] ----- -$ oc apply -f bgppeer2.yaml ----- - -. Create BGP advertisement 1. - -.. Create a file, such as `bgpadvertisement1.yaml`, with content like the following example: -+ -[source,yaml] ----- -apiVersion: metallb.io/v1beta1 -kind: BGPAdvertisement -metadata: - name: bgpadvertisement-1 - namespace: metallb-system -spec: - ipAddressPools: - - pool1 - peers: - - peer1 - communities: - - 65535:65282 - aggregationLength: 32 - aggregationLengthV6: 128 - localPref: 100 ----- - -.. Apply the configuration: -+ -[source,terminal] ----- -$ oc apply -f bgpadvertisement1.yaml ----- - -. Create BGP advertisement 2. - -.. Create a file, such as `bgpadvertisement2.yaml`, with content like the following example: -+ -[source,yaml] ----- -apiVersion: metallb.io/v1beta1 -kind: BGPAdvertisement -metadata: - name: bgpadvertisement-2 - namespace: metallb-system -spec: - ipAddressPools: - - pool2 - peers: - - peer2 - communities: - - 65535:65282 - aggregationLength: 32 - aggregationLengthV6: 128 - localPref: 100 ----- - -.. Apply the configuration: -+ -[source,terminal] ----- -$ oc apply -f bgpadvertisement2.yaml ----- diff --git a/modules/nw-metallb-configure-svc.adoc b/modules/nw-metallb-configure-svc.adoc deleted file mode 100644 index a13da551b649..000000000000 --- a/modules/nw-metallb-configure-svc.adoc +++ /dev/null @@ -1,77 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/metallb/nw-metalb-configure-svc.adoc - -:_content-type: PROCEDURE -[id="nw-metallb-configure-svc_{context}"] -= Configuring a service with MetalLB - -You can configure a load-balancing service to use an external IP address from an address pool. - -.Prerequisites - -* Install the OpenShift CLI (`oc`). - -* Install the MetalLB Operator and start MetalLB. - -* Configure at least one address pool. - -* Configure your network to route traffic from the clients to the host network for the cluster. - -.Procedure - -. Create a `<service_name>.yaml` file. In the file, ensure that the `spec.type` field is set to `LoadBalancer`. -+ -Refer to the examples for information about how to request the external IP address that MetalLB assigns to the service. - -. Create the service: -+ -[source,terminal] ----- -$ oc apply -f <service_name>.yaml ----- -+ -.Example output -[source,terminal] ----- -service/<service_name> created ----- - -.Verification - -* Describe the service: -+ -[source,terminal] ----- -$ oc describe service <service_name> ----- -+ -.Example output ----- -Name: <service_name> -Namespace: default -Labels: <none> -Annotations: metallb.universe.tf/address-pool: doc-example <.> -Selector: app=service_name -Type: LoadBalancer <.> -IP Family Policy: SingleStack -IP Families: IPv4 -IP: 10.105.237.254 -IPs: 10.105.237.254 -LoadBalancer Ingress: 192.168.100.5 <.> -Port: <unset> 80/TCP -TargetPort: 8080/TCP -NodePort: <unset> 30550/TCP -Endpoints: 10.244.0.50:8080 -Session Affinity: None -External Traffic Policy: Cluster -Events: <.> - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal nodeAssigned 32m (x2 over 32m) metallb-speaker announcing from node "<node_name>" ----- -<.> The annotation is present if you request an IP address from a specific pool. -<.> The service type must indicate `LoadBalancer`. -<.> The load-balancer ingress field indicates the external IP address if the service is assigned correctly. -<.> The events field indicates the node name that is assigned to announce the external IP address. -If you experience an error, the events field indicates the reason for the error. diff --git a/modules/nw-metallb-example-addresspool.adoc b/modules/nw-metallb-example-addresspool.adoc deleted file mode 100644 index 4903bdbeffcf..000000000000 --- a/modules/nw-metallb-example-addresspool.adoc +++ /dev/null @@ -1,105 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/metallb/metallb-configure-address-pools.adoc - -[id="nw-metallb-example-addresspool_{context}"] -= Example address pool configurations - -== Example: IPv4 and CIDR ranges - -You can specify a range of IP addresses in CIDR notation. -You can combine CIDR notation with the notation that uses a hyphen to separate lower and upper bounds. - -[source,yaml] ----- -apiVersion: metallb.io/v1beta1 -kind: IPAddressPool -metadata: - name: doc-example-cidr - namespace: metallb-system -spec: - addresses: - - 192.168.100.0/24 - - 192.168.200.0/24 - - 192.168.255.1-192.168.255.5 ----- - -== Example: Reserve IP addresses - -You can set the `autoAssign` field to `false` to prevent MetalLB from automatically assigning the IP addresses from the pool. -When you add a service, you can request a specific IP address from the pool or you can specify the pool name in an annotation to request any IP address from the pool. - -[source,yaml] ----- -apiVersion: metallb.io/v1beta1 -kind: IPAddressPool -metadata: - name: doc-example-reserved - namespace: metallb-system -spec: - addresses: - - 10.0.100.0/28 - autoAssign: false ----- - -== Example: IPv4 and IPv6 addresses - -You can add address pools that use IPv4 and IPv6. -You can specify multiple ranges in the `addresses` list, just like several IPv4 examples. - -Whether the service is assigned a single IPv4 address, a single IPv6 address, or both is determined by how you add the service. -The `spec.ipFamilies` and `spec.ipFamilyPolicy` fields control how IP addresses are assigned to the service. - -[source,yaml] ----- -apiVersion: metallb.io/v1beta1 -kind: IPAddressPool -metadata: - name: doc-example-combined - namespace: metallb-system -spec: - addresses: - - 10.0.100.0/28 - - 2002:2:2::1-2002:2:2::100 ----- - -== Example: Assign IP address pools to services or namespaces -You can assign IP addresses from an `IPAddressPool` to services and namespaces that you specify. - -If you assign a service or namespace to more than one IP address pool, MetalLB uses an available IP address from the higher-priority IP address pool. If no IP addresses are available from the assigned IP address pools with a high priority, MetalLB uses available IP addresses from an IP address pool with lower priority or no priority. - -[NOTE] -==== -You can use the `matchLabels` label selector, the `matchExpressions` label selector, or both, for the `namespaceSelectors` and `serviceSelectors` specifications. This example demonstrates one label selector for each specification. -==== - -[source,yaml] ----- -apiVersion: metallb.io/v1beta1 -kind: IPAddressPool -metadata: - name: doc-example-service-allocation - namespace: metallb-system -spec: - addresses: - - 192.168.20.0/24 - serviceAllocation: - priority: 50 <1> - namespaces: <2> - - namespace-a - - namespace-b - namespaceSelectors: <3> - - matchLabels: - zone: east - serviceSelectors: <4> - - matchExpressions: - - key: security - operator: In - values: - - S1 ----- -<1> Assign a priority to the address pool. A lower number indicates a higher priority. -<2> Assign one or more namespaces to the IP address pool in a list format. -<3> Assign one or more namespace labels to the IP address pool by using label selectors in a list format. -<4> Assign one or more service labels to the IP address pool by using label selectors in a list format. - diff --git a/modules/nw-metallb-example-bgppeer.adoc b/modules/nw-metallb-example-bgppeer.adoc deleted file mode 100644 index 48d1bdb0325c..000000000000 --- a/modules/nw-metallb-example-bgppeer.adoc +++ /dev/null @@ -1,84 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/metallb/metallb-configure-bgp-peers.adoc - -:_content-type: PROCEDURE -[id="nw-metallb-example-bgppeer_{context}"] -= Example BGP peer configurations - -[id="nw-metallb-example-limit-nodes-bgppeer_{context}"] -== Example: Limit which nodes connect to a BGP peer - -You can specify the node selectors field to control which nodes can connect to a BGP peer. - -[source,yaml] ----- -apiVersion: metallb.io/v1beta2 -kind: BGPPeer -metadata: - name: doc-example-nodesel - namespace: metallb-system -spec: - peerAddress: 10.0.20.1 - peerASN: 64501 - myASN: 64500 - nodeSelectors: - - matchExpressions: - - key: kubernetes.io/hostname - operator: In - values: [compute-1.example.com, compute-2.example.com] ----- - -[id="nw-metallb-example-specify-bfd-profile_{context}"] -== Example: Specify a BFD profile for a BGP peer - -You can specify a BFD profile to associate with BGP peers. -BFD compliments BGP by providing more rapid detection of communication failures between peers than BGP alone. - -[source,yaml] ----- -apiVersion: metallb.io/v1beta2 -kind: BGPPeer -metadata: - name: doc-example-peer-bfd - namespace: metallb-system -spec: - peerAddress: 10.0.20.1 - peerASN: 64501 - myASN: 64500 - holdTime: "10s" - bfdProfile: doc-example-bfd-profile-full ----- -//Dependency on RHEL bug 2054160 being addressed.Remove note when fixed. -[NOTE] -==== -Deleting the bidirectional forwarding detection (BFD) profile and removing the `bfdProfile` added to the border gateway protocol (BGP) peer resource does not disable the BFD. Instead, the BGP peer starts using the default BFD profile. To disable BFD from a BGP peer resource, delete the BGP peer configuration and recreate it without a BFD profile. For more information, see link:https://bugzilla.redhat.com/show_bug.cgi?id=2050824[*BZ#2050824*]. -==== - -[id="nw-metallb-example-dual-stack_{context}"] -== Example: Specify BGP peers for dual-stack networking - -To support dual-stack networking, add one BGP peer custom resource for IPv4 and one BGP peer custom resource for IPv6. - -[source,yaml] ----- -apiVersion: metallb.io/v1beta2 -kind: BGPPeer -metadata: - name: doc-example-dual-stack-ipv4 - namespace: metallb-system -spec: - peerAddress: 10.0.20.1 - peerASN: 64500 - myASN: 64500 ---- -apiVersion: metallb.io/v1beta2 -kind: BGPPeer -metadata: - name: doc-example-dual-stack-ipv6 - namespace: metallb-system -spec: - peerAddress: 2620:52:0:88::104 - peerASN: 64500 - myASN: 64500 ----- diff --git a/modules/nw-metallb-extern-traffic-pol.adoc b/modules/nw-metallb-extern-traffic-pol.adoc deleted file mode 100644 index 2089879e0a9e..000000000000 --- a/modules/nw-metallb-extern-traffic-pol.adoc +++ /dev/null @@ -1,36 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/metallb/about-metallb.adoc - -[id="nw-metallb-extern-traffic-pol_{context}"] -= MetalLB and external traffic policy - -With layer 2 mode, one node in your cluster receives all the traffic for the service IP address. -With BGP mode, a router on the host network opens a connection to one of the nodes in the cluster for a new client connection. -How your cluster handles the traffic after it enters the node is affected by the external traffic policy. - -`cluster`:: -This is the default value for `spec.externalTrafficPolicy`. -+ -With the `cluster` traffic policy, after the node receives the traffic, the service proxy distributes the traffic to all the pods in your service. -This policy provides uniform traffic distribution across the pods, but it obscures the client IP address and it can appear to the application in your pods that the traffic originates from the node rather than the client. - -`local`:: -With the `local` traffic policy, after the node receives the traffic, the service proxy only sends traffic to the pods on the same node. -For example, if the `speaker` pod on node A announces the external service IP, then all traffic is sent to node A. -After the traffic enters node A, the service proxy only sends traffic to pods for the service that are also on node A. -Pods for the service that are on additional nodes do not receive any traffic from node A. -Pods for the service on additional nodes act as replicas in case failover is needed. -+ -This policy does not affect the client IP address. -Application pods can determine the client IP address from the incoming connections. - -[NOTE] -==== -The following information is important when configuring the external traffic policy in BGP mode. - -Although MetalLB advertises the load balancer IP address from all the eligible nodes, the number of nodes loadbalancing the service can be limited by the capacity of the router to establish equal-cost multipath (ECMP) routes. If the number of nodes advertising the IP is greater than the ECMP group limit of the router, the router will use less nodes than the ones advertising the IP. - -For example, if the external traffic policy is set to `local` and the router has an ECMP group limit set to 16 and the pods implementing a LoadBalancer service are deployed on 30 nodes, this would result in pods deployed on 14 nodes not receiving any traffic. In this situation, it would be preferable to set the external traffic policy for the service to `cluster`. -==== - diff --git a/modules/nw-metallb-infra-considerations.adoc b/modules/nw-metallb-infra-considerations.adoc deleted file mode 100644 index 3f4457a945a5..000000000000 --- a/modules/nw-metallb-infra-considerations.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/metallb/about-metallb.adoc - -[id="nw-metallb-infra-considerations_{context}"] -= Infrastructure considerations for MetalLB - -MetalLB is primarily useful for on-premise, bare metal installations because these installations do not include a native load-balancer capability. -In addition to bare metal installations, installations of {product-title} on some infrastructures might not include a native load-balancer capability. -For example, the following infrastructures can benefit from adding the MetalLB Operator: - -* Bare metal - -* VMware vSphere - -* {ibmzProductName} and {linuxoneProductName} - -* {ibmzProductName} and {linuxoneProductName} for {op-system-base-full} KVM - -* {ibmpowerProductName} - -MetalLB Operator and MetalLB are supported with the OpenShift SDN and OVN-Kubernetes network providers. - diff --git a/modules/nw-metallb-installing-operator-cli.adoc b/modules/nw-metallb-installing-operator-cli.adoc deleted file mode 100644 index fb7712863de7..000000000000 --- a/modules/nw-metallb-installing-operator-cli.adoc +++ /dev/null @@ -1,128 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/metallb/metallb-operator-install.adoc - -:_content-type: PROCEDURE -[id="nw-metallb-installing-operator-cli_{context}"] -= Installing from OperatorHub using the CLI - -Instead of using the {product-title} web console, you can install an Operator from OperatorHub using the CLI. You can use the OpenShift CLI (`oc`) to install the MetalLB Operator. - -It is recommended that when using the CLI you install the Operator in the `metallb-system` namespace. - -.Prerequisites - -* A cluster installed on bare-metal hardware. -* Install the OpenShift CLI (`oc`). -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -. Create a namespace for the MetalLB Operator by entering the following command: -+ -[source,terminal] ----- -$ cat << EOF | oc apply -f - -apiVersion: v1 -kind: Namespace -metadata: - name: metallb-system -EOF ----- - -. Create an Operator group custom resource (CR) in the namespace: -+ -[source,terminal] ----- -$ cat << EOF | oc apply -f - -apiVersion: operators.coreos.com/v1 -kind: OperatorGroup -metadata: - name: metallb-operator - namespace: metallb-system -EOF ----- - -. Confirm the Operator group is installed in the namespace: -+ -[source,terminal] ----- -$ oc get operatorgroup -n metallb-system ----- -+ -.Example output -[source,terminal] ----- -NAME AGE -metallb-operator 14m ----- - -. Create a `Subscription` CR: -.. Define the `Subscription` CR and save the YAML file, for example, `metallb-sub.yaml`: -+ -[source,yaml] ----- -apiVersion: operators.coreos.com/v1alpha1 -kind: Subscription -metadata: - name: metallb-operator-sub - namespace: metallb-system -spec: - channel: stable - name: metallb-operator - source: redhat-operators <1> - sourceNamespace: openshift-marketplace ----- -<1> You must specify the `redhat-operators` value. - -.. To create the `Subscription` CR, run the following command: -+ -[source,terminal] ----- -$ oc create -f metallb-sub.yaml ----- - -. Optional: To ensure BGP and BFD metrics appear in Prometheus, you can label the namespace as in the following command: -+ -[source,terminal] ----- -$ oc label ns metallb-system "openshift.io/cluster-monitoring=true" ----- - -.Verification - -The verification steps assume the MetalLB Operator is installed in the `metallb-system` namespace. - -. Confirm the install plan is in the namespace: -+ -[source,terminal] ----- -$ oc get installplan -n metallb-system ----- -+ -.Example output -[source,terminal,subs="attributes+"] ----- -NAME CSV APPROVAL APPROVED -install-wzg94 metallb-operator.{product-version}.0-nnnnnnnnnnnn Automatic true ----- -+ -[NOTE] -==== -Installation of the Operator might take a few seconds. -==== - -. To verify that the Operator is installed, enter the following command: -+ -[source,terminal] ----- -$ oc get clusterserviceversion -n metallb-system \ - -o custom-columns=Name:.metadata.name,Phase:.status.phase ----- -+ -.Example output -[source,terminal,subs="attributes+"] ----- -Name Phase -metallb-operator.{product-version}.0-nnnnnnnnnnnn Succeeded ----- \ No newline at end of file diff --git a/modules/nw-metallb-l2padvertisement-cr.adoc b/modules/nw-metallb-l2padvertisement-cr.adoc deleted file mode 100644 index 740cd40de8cc..000000000000 --- a/modules/nw-metallb-l2padvertisement-cr.adoc +++ /dev/null @@ -1,47 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/metallb/about-advertising-ipaddresspool.adoc - -:_content-type: REFERENCE -[id="nw-metallb-l2padvertisement-cr_{context}"] -= About the L2Advertisement custom resource - -The fields for the `l2Advertisements` object are defined in the following table: - -.L2 advertisements configuration -[cols="1,1,3a", options="header"] -|=== - -|Field -|Type -|Description - -|`metadata.name` -|`string` -|Specifies the name for the L2 advertisement. - -|`metadata.namespace` -|`string` -|Specifies the namespace for the L2 advertisement. -Specify the same namespace that the MetalLB Operator uses. - -|`spec.ipAddressPools` -|`string` -|Optional: The list of `IPAddressPools` to advertise with this advertisement, selected by name. - -|`spec.ipAddressPoolSelectors` -|`string` -|Optional: A selector for the `IPAddressPools` that gets advertised with this advertisement. This is for associating the `IPAddressPool` to the advertisement based on the label assigned to the `IPAddressPool` instead of the name itself. If no `IPAddressPool` is selected by this or by the list, the advertisement is applied to all the `IPAddressPools`. - -|`spec.nodeSelectors` -|`string` -|Optional: `NodeSelectors` limits the nodes to announce as next hops for the load balancer IP. When empty, all the nodes are announced as next hops. - -:FeatureName: Limiting the nodes to announce as next hops -include::snippets/technology-preview.adoc[leveloffset=+1] - -|`spec.interfaces` -|`string` -|Optional: The list of `interfaces` that are used to announce the load balancer IP. - -|=== diff --git a/modules/nw-metallb-layer2-limitations.adoc b/modules/nw-metallb-layer2-limitations.adoc deleted file mode 100644 index 058df875bdb8..000000000000 --- a/modules/nw-metallb-layer2-limitations.adoc +++ /dev/null @@ -1,36 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/metallb/about-metallb.adoc - -[id="nw-metallb-layer2-limitations_{context}"] -= Limitations for layer 2 mode - -[id="nw-metallb-layer2-limitations-bottleneck_{context}"] -== Single-node bottleneck - -MetalLB routes all traffic for a service through a single node, the node can become a bottleneck and limit performance. - -Layer 2 mode limits the ingress bandwidth for your service to the bandwidth of a single node. -This is a fundamental limitation of using ARP and NDP to direct traffic. - -[id="nw-metallb-layer2-limitations-failover_{context}"] -== Slow failover performance - -Failover between nodes depends on cooperation from the clients. -When a failover occurs, MetalLB sends gratuitous ARP packets to notify clients that the MAC address associated with the service IP has changed. - -Most client operating systems handle gratuitous ARP packets correctly and update their neighbor caches promptly. -When clients update their caches quickly, failover completes within a few seconds. -Clients typically fail over to a new node within 10 seconds. -However, some client operating systems either do not handle gratuitous ARP packets at all or have outdated implementations that delay the cache update. - -Recent versions of common operating systems such as Windows, macOS, and Linux implement layer 2 failover correctly. -Issues with slow failover are not expected except for older and less common client operating systems. - -// FIXME: I think "leadership" is from an old algorithm. -// If there is a way to perform a planned failover, let's cover it. `oc drain`? -To minimize the impact from a planned failover on outdated clients, keep the old node running for a few minutes after flipping leadership. -The old node can continue to forward traffic for outdated clients until their caches refresh. - -During an unplanned failover, the service IPs are unreachable until the outdated clients refresh their cache entries. - diff --git a/modules/nw-metallb-layer2.adoc b/modules/nw-metallb-layer2.adoc deleted file mode 100644 index b2162982b7b8..000000000000 --- a/modules/nw-metallb-layer2.adoc +++ /dev/null @@ -1,58 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/metallb/about-metallb.adoc - -:_content-type: CONCEPT - -[id="nw-metallb-layer2_{context}"] -= MetalLB concepts for layer 2 mode - -In layer 2 mode, the `speaker` pod on one node announces the external IP address for a service to the host network. -From a network perspective, the node appears to have multiple IP addresses assigned to a network interface. - -[NOTE] -==== -In layer 2 mode, MetalLB relies on ARP and NDP. These protocols implement local address resolution within a specific subnet. In this context, the client must be able to reach the VIP assigned by MetalLB that exists on the same subnet as the nodes announcing the service in order for MetalLB to work. -==== - -The `speaker` pod responds to ARP requests for IPv4 services and NDP requests for IPv6. - -In layer 2 mode, all traffic for a service IP address is routed through one node. -After traffic enters the node, the service proxy for the CNI network provider distributes the traffic to all the pods for the service. - -Because all traffic for a service enters through a single node in layer 2 mode, in a strict sense, MetalLB does not implement a load balancer for layer 2. -Rather, MetalLB implements a failover mechanism for layer 2 so that when a `speaker` pod becomes unavailable, a `speaker` pod on a different node can announce the service IP address. - -When a node becomes unavailable, failover is automatic. -The `speaker` pods on the other nodes detect that a node is unavailable and a new `speaker` pod and node take ownership of the service IP address from the failed node. - -image::nw-metallb-layer2.png[Conceptual diagram for MetalLB and layer 2 mode] - -The preceding graphic shows the following concepts related to MetalLB: - -* An application is available through a service that has a cluster IP on the `172.130.0.0/16` subnet. -That IP address is accessible from inside the cluster. -The service also has an external IP address that MetalLB assigned to the service, `192.168.100.200`. - -* Nodes 1 and 3 have a pod for the application. - -* The `speaker` daemon set runs a pod on each node. -The MetalLB Operator starts these pods. - -* Each `speaker` pod is a host-networked pod. -The IP address for the pod is identical to the IP address for the node on the host network. - -* The `speaker` pod on node 1 uses ARP to announce the external IP address for the service, `192.168.100.200`. -The `speaker` pod that announces the external IP address must be on the same node as an endpoint for the service and the endpoint must be in the `Ready` condition. - -* Client traffic is routed to the host network and connects to the `192.168.100.200` IP address. -After traffic enters the node, the service proxy sends the traffic to the application pod on the same node or another node according to the external traffic policy that you set for the service. - -** If the external traffic policy for the service is set to `cluster`, the node that advertises the `192.168.100.200` load balancer IP address is selected from the nodes where a `speaker` pod is running. Only that node can receive traffic for the service. - -** If the external traffic policy for the service is set to `local`, the node that advertises the `192.168.100.200` load balancer IP address is selected from the nodes where a `speaker` pod is running and at least an endpoint of the service. Only that node can receive traffic for the service. In the preceding graphic, either node 1 or 3 would advertise `192.168.100.200`. - -* If node 1 becomes unavailable, the external IP address fails over to another node. -On another node that has an instance of the application pod and service endpoint, the `speaker` pod begins to announce the external IP address, `192.168.100.200` and the new node receives the client traffic. -In the diagram, the only candidate is node 3. - diff --git a/modules/nw-metallb-levels.adoc b/modules/nw-metallb-levels.adoc deleted file mode 100644 index 8d3fc6975cc1..000000000000 --- a/modules/nw-metallb-levels.adoc +++ /dev/null @@ -1,36 +0,0 @@ -// Module included in the following assemblies: -// Epic CNF-3274 (4.11) -// * networking/metallb/metallb-troubleshoot-support.adoc - -:_content-type: REFERENCE - -[id="frr-log-levels_{context}"] -= FRRouting (FRR) log levels - -The following table describes the FRR logging levels. - -.Log levels -[cols="30%,70%",options="header"] -|=== -| Log level | Description - -| `all` -a| -Supplies all logging information for all logging levels. -| `debug` -a| -Information that is diagnostically helpful to people. Set to `debug` to give detailed troubleshooting information. -| `info` -| -Provides information that always should be logged but under normal circumstances does not require user intervention. This is the default logging level. -| `warn` -| -Anything that can potentially cause inconsistent `MetalLB` behaviour. Usually `MetalLB` automatically recovers from this type of error. - -| `error` -a| -Any error that is fatal to the functioning of `MetalLB`. These errors usually require administrator intervention to fix. - -| `none` -|Turn off all logging. -|=== diff --git a/modules/nw-metallb-loglevel.adoc b/modules/nw-metallb-loglevel.adoc deleted file mode 100644 index 799c21b15e2b..000000000000 --- a/modules/nw-metallb-loglevel.adoc +++ /dev/null @@ -1,164 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/metallb/metallb-troubleshoot-support.adoc - -:_content-type: PROCEDURE - -[id="nw-metallb-setting-metalb-logging-levels_{context}"] -= Setting the MetalLB logging levels - -MetalLB uses FRRouting (FRR) in a container with the default setting of `info` generates a lot of logging. You can control the verbosity of the logs generated by setting the `logLevel` as illustrated in this example. - -Gain a deeper insight into MetalLB by setting the `logLevel` to `debug` as follows: - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. - -* You have installed the OpenShift CLI (`oc`). - -.Procedure - -. Create a file, such as `setdebugloglevel.yaml`, with content like the following example: -+ -[source,yaml] ----- -apiVersion: metallb.io/v1beta1 -kind: MetalLB -metadata: - name: metallb - namespace: metallb-system -spec: - logLevel: debug - nodeSelector: - node-role.kubernetes.io/worker: "" ----- - -. Apply the configuration: -+ -[source,terminal] ----- -$ oc replace -f setdebugloglevel.yaml ----- -+ -[NOTE] -==== -Use `oc replace` as the understanding is the `metallb` CR is already created and here you are changing the log level. -==== - -. Display the names of the `speaker` pods: -+ -[source,terminal] ----- -$ oc get -n metallb-system pods -l component=speaker ----- -+ -.Example output -[source,text] ----- -NAME READY STATUS RESTARTS AGE -speaker-2m9pm 4/4 Running 0 9m19s -speaker-7m4qw 3/4 Running 0 19s -speaker-szlmx 4/4 Running 0 9m19s ----- -+ -[NOTE] -==== -Speaker and controller pods are recreated to ensure the updated logging level is applied. The logging level is modified for all the components of MetalLB. -==== - -. View the `speaker` logs: -+ -[source,terminal] ----- -$ oc logs -n metallb-system speaker-7m4qw -c speaker ----- -+ -.Example output ----- -{"branch":"main","caller":"main.go:92","commit":"3d052535","goversion":"gc / go1.17.1 / amd64","level":"info","msg":"MetalLB speaker starting (commit 3d052535, branch main)","ts":"2022-05-17T09:55:05Z","version":""} -{"caller":"announcer.go:110","event":"createARPResponder","interface":"ens4","level":"info","msg":"created ARP responder for interface","ts":"2022-05-17T09:55:05Z"} -{"caller":"announcer.go:119","event":"createNDPResponder","interface":"ens4","level":"info","msg":"created NDP responder for interface","ts":"2022-05-17T09:55:05Z"} -{"caller":"announcer.go:110","event":"createARPResponder","interface":"tun0","level":"info","msg":"created ARP responder for interface","ts":"2022-05-17T09:55:05Z"} -{"caller":"announcer.go:119","event":"createNDPResponder","interface":"tun0","level":"info","msg":"created NDP responder for interface","ts":"2022-05-17T09:55:05Z"} -I0517 09:55:06.515686 95 request.go:665] Waited for 1.026500832s due to client-side throttling, not priority and fairness, request: GET:https://172.30.0.1:443/apis/operators.coreos.com/v1alpha1?timeout=32s -{"Starting Manager":"(MISSING)","caller":"k8s.go:389","level":"info","ts":"2022-05-17T09:55:08Z"} -{"caller":"speakerlist.go:310","level":"info","msg":"node event - forcing sync","node addr":"10.0.128.4","node event":"NodeJoin","node name":"ci-ln-qb8t3mb-72292-7s7rh-worker-a-vvznj","ts":"2022-05-17T09:55:08Z"} -{"caller":"service_controller.go:113","controller":"ServiceReconciler","enqueueing":"openshift-kube-controller-manager-operator/metrics","epslice":"{\"metadata\":{\"name\":\"metrics-xtsxr\",\"generateName\":\"metrics-\",\"namespace\":\"openshift-kube-controller-manager-operator\",\"uid\":\"ac6766d7-8504-492c-9d1e-4ae8897990ad\",\"resourceVersion\":\"9041\",\"generation\":4,\"creationTimestamp\":\"2022-05-17T07:16:53Z\",\"labels\":{\"app\":\"kube-controller-manager-operator\",\"endpointslice.kubernetes.io/managed-by\":\"endpointslice-controller.k8s.io\",\"kubernetes.io/service-name\":\"metrics\"},\"annotations\":{\"endpoints.kubernetes.io/last-change-trigger-time\":\"2022-05-17T07:21:34Z\"},\"ownerReferences\":[{\"apiVersion\":\"v1\",\"kind\":\"Service\",\"name\":\"metrics\",\"uid\":\"0518eed3-6152-42be-b566-0bd00a60faf8\",\"controller\":true,\"blockOwnerDeletion\":true}],\"managedFields\":[{\"manager\":\"kube-controller-manager\",\"operation\":\"Update\",\"apiVersion\":\"discovery.k8s.io/v1\",\"time\":\"2022-05-17T07:20:02Z\",\"fieldsType\":\"FieldsV1\",\"fieldsV1\":{\"f:addressType\":{},\"f:endpoints\":{},\"f:metadata\":{\"f:annotations\":{\".\":{},\"f:endpoints.kubernetes.io/last-change-trigger-time\":{}},\"f:generateName\":{},\"f:labels\":{\".\":{},\"f:app\":{},\"f:endpointslice.kubernetes.io/managed-by\":{},\"f:kubernetes.io/service-name\":{}},\"f:ownerReferences\":{\".\":{},\"k:{\\\"uid\\\":\\\"0518eed3-6152-42be-b566-0bd00a60faf8\\\"}\":{}}},\"f:ports\":{}}}]},\"addressType\":\"IPv4\",\"endpoints\":[{\"addresses\":[\"10.129.0.7\"],\"conditions\":{\"ready\":true,\"serving\":true,\"terminating\":false},\"targetRef\":{\"kind\":\"Pod\",\"namespace\":\"openshift-kube-controller-manager-operator\",\"name\":\"kube-controller-manager-operator-6b98b89ddd-8d4nf\",\"uid\":\"dd5139b8-e41c-4946-a31b-1a629314e844\",\"resourceVersion\":\"9038\"},\"nodeName\":\"ci-ln-qb8t3mb-72292-7s7rh-master-0\",\"zone\":\"us-central1-a\"}],\"ports\":[{\"name\":\"https\",\"protocol\":\"TCP\",\"port\":8443}]}","level":"debug","ts":"2022-05-17T09:55:08Z"} ----- - -. View the FRR logs: -+ -[source,terminal] ----- -$ oc logs -n metallb-system speaker-7m4qw -c frr ----- -+ -.Example output ----- -Started watchfrr -2022/05/17 09:55:05 ZEBRA: client 16 says hello and bids fair to announce only bgp routes vrf=0 -2022/05/17 09:55:05 ZEBRA: client 31 says hello and bids fair to announce only vnc routes vrf=0 -2022/05/17 09:55:05 ZEBRA: client 38 says hello and bids fair to announce only static routes vrf=0 -2022/05/17 09:55:05 ZEBRA: client 43 says hello and bids fair to announce only bfd routes vrf=0 -2022/05/17 09:57:25.089 BGP: Creating Default VRF, AS 64500 -2022/05/17 09:57:25.090 BGP: dup addr detect enable max_moves 5 time 180 freeze disable freeze_time 0 -2022/05/17 09:57:25.090 BGP: bgp_get: Registering BGP instance (null) to zebra -2022/05/17 09:57:25.090 BGP: Registering VRF 0 -2022/05/17 09:57:25.091 BGP: Rx Router Id update VRF 0 Id 10.131.0.1/32 -2022/05/17 09:57:25.091 BGP: RID change : vrf VRF default(0), RTR ID 10.131.0.1 -2022/05/17 09:57:25.091 BGP: Rx Intf add VRF 0 IF br0 -2022/05/17 09:57:25.091 BGP: Rx Intf add VRF 0 IF ens4 -2022/05/17 09:57:25.091 BGP: Rx Intf address add VRF 0 IF ens4 addr 10.0.128.4/32 -2022/05/17 09:57:25.091 BGP: Rx Intf address add VRF 0 IF ens4 addr fe80::c9d:84da:4d86:5618/64 -2022/05/17 09:57:25.091 BGP: Rx Intf add VRF 0 IF lo -2022/05/17 09:57:25.091 BGP: Rx Intf add VRF 0 IF ovs-system -2022/05/17 09:57:25.091 BGP: Rx Intf add VRF 0 IF tun0 -2022/05/17 09:57:25.091 BGP: Rx Intf address add VRF 0 IF tun0 addr 10.131.0.1/23 -2022/05/17 09:57:25.091 BGP: Rx Intf address add VRF 0 IF tun0 addr fe80::40f1:d1ff:feb6:5322/64 -2022/05/17 09:57:25.091 BGP: Rx Intf add VRF 0 IF veth2da49fed -2022/05/17 09:57:25.091 BGP: Rx Intf address add VRF 0 IF veth2da49fed addr fe80::24bd:d1ff:fec1:d88/64 -2022/05/17 09:57:25.091 BGP: Rx Intf add VRF 0 IF veth2fa08c8c -2022/05/17 09:57:25.091 BGP: Rx Intf address add VRF 0 IF veth2fa08c8c addr fe80::6870:ff:fe96:efc8/64 -2022/05/17 09:57:25.091 BGP: Rx Intf add VRF 0 IF veth41e356b7 -2022/05/17 09:57:25.091 BGP: Rx Intf address add VRF 0 IF veth41e356b7 addr fe80::48ff:37ff:fede:eb4b/64 -2022/05/17 09:57:25.092 BGP: Rx Intf add VRF 0 IF veth1295c6e2 -2022/05/17 09:57:25.092 BGP: Rx Intf address add VRF 0 IF veth1295c6e2 addr fe80::b827:a2ff:feed:637/64 -2022/05/17 09:57:25.092 BGP: Rx Intf add VRF 0 IF veth9733c6dc -2022/05/17 09:57:25.092 BGP: Rx Intf address add VRF 0 IF veth9733c6dc addr fe80::3cf4:15ff:fe11:e541/64 -2022/05/17 09:57:25.092 BGP: Rx Intf add VRF 0 IF veth336680ea -2022/05/17 09:57:25.092 BGP: Rx Intf address add VRF 0 IF veth336680ea addr fe80::94b1:8bff:fe7e:488c/64 -2022/05/17 09:57:25.092 BGP: Rx Intf add VRF 0 IF vetha0a907b7 -2022/05/17 09:57:25.092 BGP: Rx Intf address add VRF 0 IF vetha0a907b7 addr fe80::3855:a6ff:fe73:46c3/64 -2022/05/17 09:57:25.092 BGP: Rx Intf add VRF 0 IF vethf35a4398 -2022/05/17 09:57:25.092 BGP: Rx Intf address add VRF 0 IF vethf35a4398 addr fe80::40ef:2fff:fe57:4c4d/64 -2022/05/17 09:57:25.092 BGP: Rx Intf add VRF 0 IF vethf831b7f4 -2022/05/17 09:57:25.092 BGP: Rx Intf address add VRF 0 IF vethf831b7f4 addr fe80::f0d9:89ff:fe7c:1d32/64 -2022/05/17 09:57:25.092 BGP: Rx Intf add VRF 0 IF vxlan_sys_4789 -2022/05/17 09:57:25.092 BGP: Rx Intf address add VRF 0 IF vxlan_sys_4789 addr fe80::80c1:82ff:fe4b:f078/64 -2022/05/17 09:57:26.094 BGP: 10.0.0.1 [FSM] Timer (start timer expire). -2022/05/17 09:57:26.094 BGP: 10.0.0.1 [FSM] BGP_Start (Idle->Connect), fd -1 -2022/05/17 09:57:26.094 BGP: Allocated bnc 10.0.0.1/32(0)(VRF default) peer 0x7f807f7631a0 -2022/05/17 09:57:26.094 BGP: sendmsg_zebra_rnh: sending cmd ZEBRA_NEXTHOP_REGISTER for 10.0.0.1/32 (vrf VRF default) -2022/05/17 09:57:26.094 BGP: 10.0.0.1 [FSM] Waiting for NHT -2022/05/17 09:57:26.094 BGP: bgp_fsm_change_status : vrf default(0), Status: Connect established_peers 0 -2022/05/17 09:57:26.094 BGP: 10.0.0.1 went from Idle to Connect -2022/05/17 09:57:26.094 BGP: 10.0.0.1 [FSM] TCP_connection_open_failed (Connect->Active), fd -1 -2022/05/17 09:57:26.094 BGP: bgp_fsm_change_status : vrf default(0), Status: Active established_peers 0 -2022/05/17 09:57:26.094 BGP: 10.0.0.1 went from Connect to Active -2022/05/17 09:57:26.094 ZEBRA: rnh_register msg from client bgp: hdr->length=8, type=nexthop vrf=0 -2022/05/17 09:57:26.094 ZEBRA: 0: Add RNH 10.0.0.1/32 type Nexthop -2022/05/17 09:57:26.094 ZEBRA: 0:10.0.0.1/32: Evaluate RNH, type Nexthop (force) -2022/05/17 09:57:26.094 ZEBRA: 0:10.0.0.1/32: NH has become unresolved -2022/05/17 09:57:26.094 ZEBRA: 0: Client bgp registers for RNH 10.0.0.1/32 type Nexthop -2022/05/17 09:57:26.094 BGP: VRF default(0): Rcvd NH update 10.0.0.1/32(0) - metric 0/0 #nhops 0/0 flags 0x6 -2022/05/17 09:57:26.094 BGP: NH update for 10.0.0.1/32(0)(VRF default) - flags 0x6 chgflags 0x0 - evaluate paths -2022/05/17 09:57:26.094 BGP: evaluate_paths: Updating peer (10.0.0.1(VRF default)) status with NHT -2022/05/17 09:57:30.081 ZEBRA: Event driven route-map update triggered -2022/05/17 09:57:30.081 ZEBRA: Event handler for route-map: 10.0.0.1-out -2022/05/17 09:57:30.081 ZEBRA: Event handler for route-map: 10.0.0.1-in -2022/05/17 09:57:31.104 ZEBRA: netlink_parse_info: netlink-listen (NS 0) type RTM_NEWNEIGH(28), len=76, seq=0, pid=0 -2022/05/17 09:57:31.104 ZEBRA: Neighbor Entry received is not on a VLAN or a BRIDGE, ignoring -2022/05/17 09:57:31.105 ZEBRA: netlink_parse_info: netlink-listen (NS 0) type RTM_NEWNEIGH(28), len=76, seq=0, pid=0 -2022/05/17 09:57:31.105 ZEBRA: Neighbor Entry received is not on a VLAN or a BRIDGE, ignoring ----- diff --git a/modules/nw-metallb-metrics.adoc b/modules/nw-metallb-metrics.adoc deleted file mode 100644 index fc72248f188e..000000000000 --- a/modules/nw-metallb-metrics.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/metallb/metallb-troubleshoot-support.adoc - -[id="nw-metallb-metrics_{context}"] -= MetalLB metrics for BGP and BFD - -{product-title} captures the following metrics that are related to MetalLB and BGP peers and BFD profiles: - -* `metallb_bfd_control_packet_input` counts the number of BFD control packets received from each BFD peer. - -* `metallb_bfd_control_packet_output` counts the number of BFD control packets sent to each BFD peer. - -* `metallb_bfd_echo_packet_input` counts the number of BFD echo packets received from each BFD peer. - -* `metallb_bfd_echo_packet_output` counts the number of BFD echo packets sent to each BFD peer. - -* `metallb_bfd_session_down_events` counts the number of times the BFD session with a peer entered the `down` state. - -* `metallb_bfd_session_up` indicates the connection state with a BFD peer. `1` indicates the session is `up` and `0` indicates the session is `down`. - -* `metallb_bfd_session_up_events` counts the number of times the BFD session with a peer entered the `up` state. - -* `metallb_bfd_zebra_notifications` counts the number of BFD Zebra notifications for each BFD peer. - -* `metallb_bgp_announced_prefixes_total` counts the number of load balancer IP address prefixes that are advertised to BGP peers. The terms _prefix_ and _aggregated route_ have the same meaning. - -* `metallb_bgp_session_up` indicates the connection state with a BGP peer. `1` indicates the session is `up` and `0` indicates the session is `down`. - -* `metallb_bgp_updates_total` counts the number of BGP `update` messages that were sent to a BGP peer. diff --git a/modules/nw-metallb-operator-custom-resources.adoc b/modules/nw-metallb-operator-custom-resources.adoc deleted file mode 100644 index fafda7961e86..000000000000 --- a/modules/nw-metallb-operator-custom-resources.adoc +++ /dev/null @@ -1,46 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/metallb/about-metallb.adoc - -[id="nw-metallb-operator-custom-resources_{context}"] -= MetalLB Operator custom resources - -The MetalLB Operator monitors its own namespace for the following custom resources: - -`MetalLB`:: -When you add a `MetalLB` custom resource to the cluster, the MetalLB Operator deploys MetalLB on the cluster. -The Operator only supports a single instance of the custom resource. -If the instance is deleted, the Operator removes MetalLB from the cluster. - -`IPAddressPool`:: -MetalLB requires one or more pools of IP addresses that it can assign to a service when you add a service of type `LoadBalancer`. -An `IPAddressPool` includes a list of IP addresses. -The list can be a single IP address that is set using a range, such as 1.1.1.1-1.1.1.1, a range specified in CIDR notation, a range specified as a starting and ending address separated by a hyphen, or a combination of the three. -An `IPAddressPool` requires a name. -The documentation uses names like `doc-example`, `doc-example-reserved`, and `doc-example-ipv6`. -The MetalLB `controller` assigns IP addresses from a pool of addresses in an `IPAddressPool`. -`L2Advertisement` and `BGPAdvertisement` custom resources enable the advertisement of a given IP from a given pool. -You can assign IP addresses from an `IPAddressPool` to services and namespaces by using the `spec.serviceAllocation` specification in the `IPAddressPool` custom resource. -+ -[NOTE] -==== -A single `IPAddressPool` can be referenced by a L2 advertisement and a BGP advertisement. -==== - -`BGPPeer`:: -The BGP peer custom resource identifies the BGP router for MetalLB to communicate with, the AS number of the router, the AS number for MetalLB, and customizations for route advertisement. -MetalLB advertises the routes for service load-balancer IP addresses to one or more BGP peers. - -`BFDProfile`:: -The BFD profile custom resource configures Bidirectional Forwarding Detection (BFD) for a BGP peer. -BFD provides faster path failure detection than BGP alone provides. - -`L2Advertisement`:: -The L2Advertisement custom resource advertises an IP coming from an `IPAddressPool` using the L2 protocol. - -`BGPAdvertisement`:: -The BGPAdvertisement custom resource advertises an IP coming from an `IPAddressPool` using the BGP protocol. - -After you add the `MetalLB` custom resource to the cluster and the Operator deploys MetalLB, the `controller` and `speaker` MetalLB software components begin running. - -MetalLB validates all relevant custom resources. diff --git a/modules/nw-metallb-operator-deployment-specifications-for-metallb.adoc b/modules/nw-metallb-operator-deployment-specifications-for-metallb.adoc deleted file mode 100644 index 3f91c567ec9a..000000000000 --- a/modules/nw-metallb-operator-deployment-specifications-for-metallb.adoc +++ /dev/null @@ -1,14 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/metallb/metallb-operator-install.adoc - -[id="nw-metallb-operator-deployment-specifications-for-metallb_{context}"] -= Deployment specifications for MetalLB - -When you start an instance of MetalLB using the `MetalLB` custom resource, you can configure deployment specifications in the `MetalLB` custom resource to manage how the `controller` or `speaker` pods deploy and run in your cluster. Use these deployment specifications to manage the following tasks: - -* Select nodes for MetalLB pod deployment. -* Manage scheduling by using pod priority and pod affinity. -* Assign CPU limits for MetalLB pods. -* Assign a container RuntimeClass for MetalLB pods. -* Assign metadata for MetalLB pods. \ No newline at end of file diff --git a/modules/nw-metallb-operator-initial-config.adoc b/modules/nw-metallb-operator-initial-config.adoc deleted file mode 100644 index 523c06feec9f..000000000000 --- a/modules/nw-metallb-operator-initial-config.adoc +++ /dev/null @@ -1,70 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/metallb/metallb-operator-install.adoc - -:_content-type: PROCEDURE -[id="nw-metallb-operator-initial-config_{context}"] -= Starting MetalLB on your cluster - -After you install the Operator, you need to configure a single instance of a MetalLB custom resource. After you configure the custom resource, the Operator starts MetalLB on your cluster. - -.Prerequisites - -* Install the OpenShift CLI (`oc`). - -* Log in as a user with `cluster-admin` privileges. - -* Install the MetalLB Operator. - - -.Procedure - -This procedure assumes the MetalLB Operator is installed in the `metallb-system` namespace. If you installed using the web console substitute `openshift-operators` for the namespace. - -. Create a single instance of a MetalLB custom resource: -+ -[source,terminal] ----- -$ cat << EOF | oc apply -f - -apiVersion: metallb.io/v1beta1 -kind: MetalLB -metadata: - name: metallb - namespace: metallb-system -EOF ----- - -.Verification - -Confirm that the deployment for the MetalLB controller and the daemon set for the MetalLB speaker are running. - -. Verify that the deployment for the controller is running: -+ -[source,terminal] ----- -$ oc get deployment -n metallb-system controller ----- -+ -.Example output -[source,terminal] ----- -NAME READY UP-TO-DATE AVAILABLE AGE -controller 1/1 1 1 11m ----- - -. Verify that the daemon set for the speaker is running: -+ -[source,terminal] ----- -$ oc get daemonset -n metallb-system speaker ----- -+ -.Example output -[source,terminal] ----- -NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE -speaker 6 6 6 6 6 kubernetes.io/os=linux 18m ----- -+ -The example output indicates 6 speaker pods. The number of speaker pods in your cluster might differ from the example output. Make sure the output indicates one pod for each node in your cluster. - diff --git a/modules/nw-metallb-operator-limit-speaker-to-nodes.adoc b/modules/nw-metallb-operator-limit-speaker-to-nodes.adoc deleted file mode 100644 index 00c37a2d9229..000000000000 --- a/modules/nw-metallb-operator-limit-speaker-to-nodes.adoc +++ /dev/null @@ -1,39 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/metallb/metallb-operator-install.adoc - -[id="nw-metallb-operator-limit-speaker-to-nodes_{context}"] -= Limit speaker pods to specific nodes - -By default, when you start MetalLB with the MetalLB Operator, the Operator starts an instance of a `speaker` pod on each node in the cluster. -Only the nodes with a `speaker` pod can advertise a load balancer IP address. -You can configure the `MetalLB` custom resource with a node selector to specify which nodes run the `speaker` pods. - -The most common reason to limit the `speaker` pods to specific nodes is to ensure that only nodes with network interfaces on specific networks advertise load balancer IP addresses. -Only the nodes with a running `speaker` pod are advertised as destinations of the load balancer IP address. - -If you limit the `speaker` pods to specific nodes and specify `local` for the external traffic policy of a service, then you must ensure that the application pods for the service are deployed to the same nodes. - -.Example configuration to limit speaker pods to worker nodes -[source,yaml] ----- -apiVersion: metallb.io/v1beta1 -kind: MetalLB -metadata: - name: metallb - namespace: metallb-system -spec: - nodeSelector: <.> - node-role.kubernetes.io/worker: "" - speakerTolerations: <.> - - key: "Example" - operator: "Exists" - effect: "NoExecute" ----- -<.> The example configuration specifies to assign the speaker pods to worker nodes, but you can specify labels that you assigned to nodes or any valid node selector. -<.> In this example configuration, the pod that this toleration is attached to tolerates any taint that matches the `key` value and `effect` value using the `operator`. - -After you apply a manifest with the `spec.nodeSelector` field, you can check the number of pods that the Operator deployed with the `oc get daemonset -n metallb-system speaker` command. -Similarly, you can display the nodes that match your labels with a command like `oc get nodes -l node-role.kubernetes.io/worker=`. - -You can optionally allow the node to control which speaker pods should, or should not, be scheduled on them by using affinity rules. You can also limit these pods by applying a list of tolerations. For more information about affinity rules, taints, and tolerations, see the additional resources. diff --git a/modules/nw-metallb-operator-setting-pod-CPU-limits.adoc b/modules/nw-metallb-operator-setting-pod-CPU-limits.adoc deleted file mode 100644 index d3616c93f4b5..000000000000 --- a/modules/nw-metallb-operator-setting-pod-CPU-limits.adoc +++ /dev/null @@ -1,51 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/metallb/metallb-operator-install.adoc - -[id="nw-metallb-operator-setting-pod-CPU-limits_{context}"] -= Configuring pod CPU limits in a MetalLB deployment - -You can optionally assign pod CPU limits to `controller` and `speaker` pods by configuring the `MetalLB` custom resource. Defining CPU limits for the `controller` or `speaker` pods helps you to manage compute resources on the node. This ensures all pods on the node have the necessary compute resources to manage workloads and cluster housekeeping. - -.Prerequisites - -* You are logged in as a user with `cluster-admin` privileges. - -* You have installed the MetalLB Operator. - -.Procedure -. Create a `MetalLB` custom resource file, such as `CPULimits.yaml`, to specify the `cpu` value for the `controller` and `speaker` pods: -+ -[source,yaml] ----- -apiVersion: metallb.io/v1beta1 -kind: MetalLB -metadata: - name: metallb - namespace: metallb-system -spec: - logLevel: debug - controllerConfig: - resources: - limits: - cpu: "200m" - speakerConfig: - resources: - limits: - cpu: "300m" ----- - -. Apply the `MetalLB` custom resource configuration: -+ -[source,bash] ----- -$ oc apply -f CPULimits.yaml ----- - -.Verification -* To view compute resources for a pod, run the following command, replacing `<pod_name>` with your target pod: -+ -[source,bash] ----- -$ oc describe pod <pod_name> ----- diff --git a/modules/nw-metallb-operator-setting-pod-priority-affinity.adoc b/modules/nw-metallb-operator-setting-pod-priority-affinity.adoc deleted file mode 100644 index b8f032176ec3..000000000000 --- a/modules/nw-metallb-operator-setting-pod-priority-affinity.adoc +++ /dev/null @@ -1,83 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/metallb/metallb-operator-install.adoc - -[id="nw-metallb-operator-setting-pod-priority-affinity_{context}"] -= Configuring pod priority and pod affinity in a MetalLB deployment - -You can optionally assign pod priority and pod affinity rules to `controller` and `speaker` pods by configuring the `MetalLB` custom resource. The pod priority indicates the relative importance of a pod on a node and schedules the pod based on this priority. Set a high priority on your `controller` or `speaker` pod to ensure scheduling priority over other pods on the node. - -Pod affinity manages relationships among pods. Assign pod affinity to the `controller` or `speaker` pods to control on what node the scheduler places the pod in the context of pod relationships. For example, you can allow pods with logically related workloads on the same node, or ensure pods with conflicting workloads are on separate nodes. - -.Prerequisites - -* You are logged in as a user with `cluster-admin` privileges. - -* You have installed the MetalLB Operator. - -.Procedure -. Create a `PriorityClass` custom resource, such as `myPriorityClass.yaml`, to configure the priority level. This example uses a high-priority class: -+ -[source,yaml] ----- -apiVersion: scheduling.k8s.io/v1 -kind: PriorityClass -metadata: - name: high-priority -value: 1000000 ----- - -. Apply the `PriorityClass` custom resource configuration: -+ -[source,bash] ----- -$ oc apply -f myPriorityClass.yaml ----- - -. Create a `MetalLB` custom resource, such as `MetalLBPodConfig.yaml`, to specify the `priorityClassName` and `podAffinity` values: -+ -[source,yaml] ----- -apiVersion: metallb.io/v1beta1 -kind: MetalLB -metadata: - name: metallb - namespace: metallb-system -spec: - logLevel: debug - controllerConfig: - priorityClassName: high-priority - runtimeClassName: myclass - speakerConfig: - priorityClassName: high-priority - runtimeClassName: myclass - affinity: - podAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - app: metallb - topologyKey: kubernetes.io/hostname ----- - -. Apply the `MetalLB` custom resource configuration: -+ -[source,bash] ----- -$ oc apply -f MetalLBPodConfig.yaml ----- - -.Verification -* To view the priority class that you assigned to pods in a namespace, run the following command, replacing `<namespace>` with your target namespace: -+ -[source,bash] ----- -$ oc get pods -n <namespace> -o custom-columns=NAME:.metadata.name,PRIORITY:.spec.priorityClassName ----- - -* To verify that the scheduler placed pods according to pod affinity rules, view the metadata for the pod's node by running the following command, replacing `<namespace>` with your target namespace: -+ -[source,bash] ----- -$ oc get pod -o=custom-columns=NODE:.spec.nodeName,NAME:.metadata.name -n <namespace> ----- diff --git a/modules/nw-metallb-operator-setting-runtimeclass.adoc b/modules/nw-metallb-operator-setting-runtimeclass.adoc deleted file mode 100644 index fd93d9e475a9..000000000000 --- a/modules/nw-metallb-operator-setting-runtimeclass.adoc +++ /dev/null @@ -1,70 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/metallb/metallb-operator-install.adoc - -[id="nw-metallb-operator-setting-runtimeclass_{context}"] -= Configuring a container runtime class in a MetalLB deployment - -You can optionally assign a container runtime class to `controller` and `speaker` pods by configuring the `MetalLB` custom resource. For example, for Windows workloads, you can assign a Windows runtime class to the pod, which uses this runtime class for all containers in the pod. - -.Prerequisites - -* You are logged in as a user with `cluster-admin` privileges. - -* You have installed the MetalLB Operator. - -.Procedure -. Create a `RuntimeClass` custom resource, such as `myRuntimeClass.yaml`, to define your runtime class: -+ -[source,yaml,options="nowrap",role="white-space-pre"] ----- -apiVersion: node.k8s.io/v1 -kind: RuntimeClass -metadata: - name: myclass -handler: myconfiguration ----- - -. Apply the `RuntimeClass` custom resource configuration: -+ -[source,bash] ----- -$ oc apply -f myRuntimeClass.yaml ----- - -. Create a `MetalLB` custom resource, such as `MetalLBRuntime.yaml`, to specify the `runtimeClassName` value: -+ -[source,yaml] ----- -apiVersion: metallb.io/v1beta1 -kind: MetalLB -metadata: - name: metallb - namespace: metallb-system -spec: - logLevel: debug - controllerConfig: - runtimeClassName: myclass - annotations: <1> - controller: demo - speakerConfig: - runtimeClassName: myclass - annotations: <1> - speaker: demo ----- -<1> This example uses `annotations` to add metadata such as build release information or GitHub pull request information. You can populate annotations with characters not permitted in labels. However, you cannot use annotations to identify or select objects. - -. Apply the `MetalLB` custom resource configuration: -+ -[source,bash,options="nowrap",role="white-space-pre"] ----- -$ oc apply -f MetalLBRuntime.yaml ----- - -.Verification -* To view the container runtime for a pod, run the following command: -+ -[source,bash,options="nowrap",role="white-space-pre"] ----- -$ oc get pod -o custom-columns=NAME:metadata.name,STATUS:.status.phase,RUNTIME_CLASS:.spec.runtimeClassName ----- diff --git a/modules/nw-metallb-software-components.adoc b/modules/nw-metallb-software-components.adoc deleted file mode 100644 index 4c651ad5528b..000000000000 --- a/modules/nw-metallb-software-components.adoc +++ /dev/null @@ -1,37 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/metallb/about-metallb.adoc - -[id="nw-metallb-software-components_{context}"] -= MetalLB software components - -When you install the MetalLB Operator, the `metallb-operator-controller-manager` deployment starts a pod. The pod is the implementation of the Operator. The pod monitors for changes to all the relevant resources. - -When the Operator starts an instance of MetalLB, it starts a `controller` deployment and a `speaker` daemon set. - -[NOTE] -==== -You can configure deployment specifications in the MetalLB custom resource to manage how `controller` and `speaker` pods deploy and run in your cluster. For more information about these deployment specifications, see the _Additional Resources_ section. -==== - -`controller`:: -The Operator starts the deployment and a single pod. When you add a service of type `LoadBalancer`, Kubernetes uses the `controller` to allocate an IP address from an address pool. -In case of a service failure, verify you have the following entry in your `controller` pod logs: -+ -.Example output -[source,terminal] ----- -"event":"ipAllocated","ip":"172.22.0.201","msg":"IP address assigned by controller ----- - -`speaker`:: -The Operator starts a daemon set for `speaker` pods. By default, a pod is started on each node in your cluster. You can limit the pods to specific nodes by specifying a node selector in the `MetalLB` custom resource when you start MetalLB. If the `controller` allocated the IP address to the service and service is still unavailable, read the `speaker` pod logs. If the `speaker` pod is unavailable, run the `oc describe pod -n` command. -+ -For layer 2 mode, after the `controller` allocates an IP address for the service, the `speaker` pods use an algorithm to determine which `speaker` pod on which node will announce the load balancer IP address. -The algorithm involves hashing the node name and the load balancer IP address. For more information, see "MetalLB and external traffic policy". -// IETF treats protocol names as proper nouns. -The `speaker` uses Address Resolution Protocol (ARP) to announce IPv4 addresses and Neighbor Discovery Protocol (NDP) to announce IPv6 addresses. - -For Border Gateway Protocol (BGP) mode, after the `controller` allocates an IP address for the service, each `speaker` pod advertises the load balancer IP address with its BGP peers. You can configure which nodes start BGP sessions with BGP peers. - -Requests for the load balancer IP address are routed to the node with the `speaker` that announces the IP address. After the node receives the packets, the service proxy routes the packets to an endpoint for the service. The endpoint can be on the same node in the optimal case, or it can be on another node. The service proxy chooses an endpoint each time a connection is established. diff --git a/modules/nw-metallb-troubleshoot-bfd.adoc b/modules/nw-metallb-troubleshoot-bfd.adoc deleted file mode 100644 index bea4dee7979d..000000000000 --- a/modules/nw-metallb-troubleshoot-bfd.adoc +++ /dev/null @@ -1,53 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/metallb/metallb-troubleshoot-support.adoc - -[id="nw-metallb-troubleshoot-bfd_{context}"] -= Troubleshooting BFD issues - -The Bidirectional Forwarding Detection (BFD) implementation that Red Hat supports uses FRRouting (FRR) in a container in the `speaker` pods. -The BFD implementation relies on BFD peers also being configured as BGP peers with an established BGP session. -As a cluster administrator, if you need to troubleshoot BFD configuration issues, you need to run commands in the FRR container. - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. - -* You have installed the OpenShift CLI (`oc`). - -.Procedure - -. Display the names of the `speaker` pods: -+ -[source,terminal] ----- -$ oc get -n metallb-system pods -l component=speaker ----- -+ -.Example output -[source,text] ----- -NAME READY STATUS RESTARTS AGE -speaker-66bth 4/4 Running 0 26m -speaker-gvfnf 4/4 Running 0 26m -... ----- - -. Display the BFD peers: -+ -[source,terminal] ----- -$ oc exec -n metallb-system speaker-66bth -c frr -- vtysh -c "show bfd peers brief" ----- -+ -.Example output ----- -Session count: 2 -SessionId LocalAddress PeerAddress Status -========= ============ =========== ====== -3909139637 10.0.1.2 10.0.2.3 up <.> ----- -<.> Confirm that the `PeerAddress` column includes each BFD peer. -If the output does not list a BFD peer IP address that you expected the output to include, troubleshoot BGP connectivity with the peer. -If the status field indicates `down`, check for connectivity on the links and equipment between the node and the peer. -You can determine the node name for the speaker pod with a command like `oc get pods -n metallb-system speaker-66bth -o jsonpath='{.spec.nodeName}'`. diff --git a/modules/nw-metallb-troubleshoot-bgp.adoc b/modules/nw-metallb-troubleshoot-bgp.adoc deleted file mode 100644 index 3823796b86dc..000000000000 --- a/modules/nw-metallb-troubleshoot-bgp.adoc +++ /dev/null @@ -1,168 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/metallb/metallb-troubleshoot-support.adoc - -[id="nw-metallb-troubleshoot-bgp_{context}"] -= Troubleshooting BGP issues - -The BGP implementation that Red Hat supports uses FRRouting (FRR) in a container in the `speaker` pods. -As a cluster administrator, if you need to troubleshoot BGP configuration issues, you need to run commands in the FRR container. - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. - -* You have installed the OpenShift CLI (`oc`). - -.Procedure - -. Display the names of the `speaker` pods: -+ -[source,terminal] ----- -$ oc get -n metallb-system pods -l component=speaker ----- -+ -.Example output -[source,text] ----- -NAME READY STATUS RESTARTS AGE -speaker-66bth 4/4 Running 0 56m -speaker-gvfnf 4/4 Running 0 56m -... ----- - -. Display the running configuration for FRR: -+ -[source,terminal] ----- -$ oc exec -n metallb-system speaker-66bth -c frr -- vtysh -c "show running-config" ----- -+ -.Example output ----- -Building configuration... - -Current configuration: -! -frr version 7.5.1_git -frr defaults traditional -hostname some-hostname -log file /etc/frr/frr.log informational -log timestamp precision 3 -service integrated-vtysh-config -! -router bgp 64500 <1> - bgp router-id 10.0.1.2 - no bgp ebgp-requires-policy - no bgp default ipv4-unicast - no bgp network import-check - neighbor 10.0.2.3 remote-as 64500 <2> - neighbor 10.0.2.3 bfd profile doc-example-bfd-profile-full <3> - neighbor 10.0.2.3 timers 5 15 - neighbor 10.0.2.4 remote-as 64500 <2> - neighbor 10.0.2.4 bfd profile doc-example-bfd-profile-full <3> - neighbor 10.0.2.4 timers 5 15 - ! - address-family ipv4 unicast - network 203.0.113.200/30 <4> - neighbor 10.0.2.3 activate - neighbor 10.0.2.3 route-map 10.0.2.3-in in - neighbor 10.0.2.4 activate - neighbor 10.0.2.4 route-map 10.0.2.4-in in - exit-address-family - ! - address-family ipv6 unicast - network fc00:f853:ccd:e799::/124 <4> - neighbor 10.0.2.3 activate - neighbor 10.0.2.3 route-map 10.0.2.3-in in - neighbor 10.0.2.4 activate - neighbor 10.0.2.4 route-map 10.0.2.4-in in - exit-address-family -! -route-map 10.0.2.3-in deny 20 -! -route-map 10.0.2.4-in deny 20 -! -ip nht resolve-via-default -! -ipv6 nht resolve-via-default -! -line vty -! -bfd - profile doc-example-bfd-profile-full <3> - transmit-interval 35 - receive-interval 35 - passive-mode - echo-mode - echo-interval 35 - minimum-ttl 10 - ! -! -end ----- -<.> The `router bgp` section indicates the ASN for MetalLB. -<.> Confirm that a `neighbor <ip-address> remote-as <peer-ASN>` line exists for each BGP peer custom resource that you added. -<.> If you configured BFD, confirm that the BFD profile is associated with the correct BGP peer and that the BFD profile appears in the command output. -<.> Confirm that the `network <ip-address-range>` lines match the IP address ranges that you specified in address pool custom resources that you added. - -. Display the BGP summary: -+ -[source,terminal] ----- -$ oc exec -n metallb-system speaker-66bth -c frr -- vtysh -c "show bgp summary" ----- -+ -.Example output ----- -IPv4 Unicast Summary: -BGP router identifier 10.0.1.2, local AS number 64500 vrf-id 0 -BGP table version 1 -RIB entries 1, using 192 bytes of memory -Peers 2, using 29 KiB of memory - -Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd PfxSnt -10.0.2.3 4 64500 387 389 0 0 0 00:32:02 0 1 <1> -10.0.2.4 4 64500 0 0 0 0 0 never Active 0 <2> - -Total number of neighbors 2 - -IPv6 Unicast Summary: -BGP router identifier 10.0.1.2, local AS number 64500 vrf-id 0 -BGP table version 1 -RIB entries 1, using 192 bytes of memory -Peers 2, using 29 KiB of memory - -Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd PfxSnt -10.0.2.3 4 64500 387 389 0 0 0 00:32:02 NoNeg <1> -10.0.2.4 4 64500 0 0 0 0 0 never Active 0 <2> - -Total number of neighbors 2 ----- -<1> Confirm that the output includes a line for each BGP peer custom resource that you added. -<2> Output that shows `0` messages received and messages sent indicates a BGP peer that does not have a BGP session. -Check network connectivity and the BGP configuration of the BGP peer. - -. Display the BGP peers that received an address pool: -+ -[source,terminal] ----- -$ oc exec -n metallb-system speaker-66bth -c frr -- vtysh -c "show bgp ipv4 unicast 203.0.113.200/30" ----- -+ -Replace `ipv4` with `ipv6` to display the BGP peers that received an IPv6 address pool. -Replace `203.0.113.200/30` with an IPv4 or IPv6 IP address range from an address pool. -+ -.Example output ----- -BGP routing table entry for 203.0.113.200/30 -Paths: (1 available, best #1, table default) - Advertised to non peer-group peers: - 10.0.2.3 <.> - Local - 0.0.0.0 from 0.0.0.0 (10.0.1.2) - Origin IGP, metric 0, weight 32768, valid, sourced, local, best (First path received) - Last update: Mon Jan 10 19:49:07 2022 ----- -<.> Confirm that the output includes an IP address for a BGP peer. diff --git a/modules/nw-metallb-when-metallb.adoc b/modules/nw-metallb-when-metallb.adoc deleted file mode 100644 index 812c15707c7b..000000000000 --- a/modules/nw-metallb-when-metallb.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/metallb/about-metallb.adoc - -:_content-type: CONCEPT -[id="nw-metallb-when-metallb_{context}"] -= When to use MetalLB - -Using MetalLB is valuable when you have a bare-metal cluster, or an infrastructure that is like bare metal, and you want fault-tolerant access to an application through an external IP address. - -You must configure your networking infrastructure to ensure that network traffic for the external IP address is routed from clients to the host network for the cluster. - -After deploying MetalLB with the MetalLB Operator, when you add a service of type `LoadBalancer`, MetalLB provides a platform-native load balancer. - -MetalLB operating in layer2 mode provides support for failover by utilizing a mechanism similar to IP failover. However, instead of relying on the virtual router redundancy protocol (VRRP) and keepalived, MetalLB leverages a gossip-based protocol to identify instances of node failure. When a failover is detected, another node assumes the role of the leader node, and a gratuitous ARP message is dispatched to broadcast this change. - -MetalLB operating in layer3 or border gateway protocol (BGP) mode delegates failure detection to the network. The BGP router or routers that the {product-title} nodes have established a connection with will identify any node failure and terminate the routes to that node. - -Using MetalLB instead of IP failover is preferable for ensuring high availability of pods and services. \ No newline at end of file diff --git a/modules/nw-modifying-operator-install-config.adoc b/modules/nw-modifying-operator-install-config.adoc deleted file mode 100644 index 785c88635cbd..000000000000 --- a/modules/nw-modifying-operator-install-config.adoc +++ /dev/null @@ -1,119 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-network-customizations.adoc -// * installing/installing_azure/installing-azure-network-customizations.adoc -// * installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-network-customizations.adoc -// * installing/installing_vsphere/installing-vsphere-installer-provisioned-network-customizations.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-network-customizations.adoc - -ifeval::["{context}" == "installing-bare-metal-network-customizations"] -:ignition-config: -endif::[] -ifeval::["{context}" == "installing-vsphere-network-customizations"] -:ignition-config: -:vsphere: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-network-customizations"] -:ibm-cloud: -endif::[] - -:_content-type: PROCEDURE -[id="modifying-nwoperator-config-startup_{context}"] -= Specifying advanced network configuration - -You can use advanced network configuration for your network plugin to integrate your cluster into your existing network environment. -You can specify advanced network configuration only before you install the cluster. - -[IMPORTANT] -==== -Customizing your network configuration by modifying the {product-title} manifest files created by the installation program is not supported. Applying a manifest file that you create, as in the following procedure, is supported. -==== - -.Prerequisites - -* You have created the `install-config.yaml` file and completed any modifications to it. - -.Procedure - -. Change to the directory that contains the installation program and create the manifests: -+ -[source,terminal] ----- -$ ./openshift-install create manifests --dir <installation_directory> <1> ----- -<1> `<installation_directory>` specifies the name of the directory that contains the `install-config.yaml` file for your cluster. - -. Create a stub manifest file for the advanced network configuration that is named `cluster-network-03-config.yml` in the `<installation_directory>/manifests/` directory: -+ -[source,terminal] ----- -apiVersion: operator.openshift.io/v1 -kind: Network -metadata: - name: cluster -spec: ----- - -. Specify the advanced network configuration for your cluster in the `cluster-network-03-config.yml` file, such as in the following -ifndef::ibm-cloud[examples:] -ifdef::ibm-cloud[example:] -+ --- -.Specify a different VXLAN port for the OpenShift SDN network provider -[source,yaml] ----- -apiVersion: operator.openshift.io/v1 -kind: Network -metadata: - name: cluster -spec: - defaultNetwork: - openshiftSDNConfig: - vxlanPort: 4800 ----- - -ifndef::ibm-cloud[] -.Enable IPsec for the OVN-Kubernetes network provider -[source,yaml] ----- -apiVersion: operator.openshift.io/v1 -kind: Network -metadata: - name: cluster -spec: - defaultNetwork: - ovnKubernetesConfig: - ipsecConfig: {} ----- -endif::ibm-cloud[] --- - -. Optional: Back up the `manifests/cluster-network-03-config.yml` file. The -installation program consumes the `manifests/` directory when you create the -Ignition config files. - -ifdef::vsphere[] -. Remove the Kubernetes manifest files that define the control plane machines and compute machineSets: -+ -[source,terminal] ----- -$ rm -f openshift/99_openshift-cluster-api_master-machines-*.yaml openshift/99_openshift-cluster-api_worker-machineset-*.yaml ----- -+ -Because you create and manage these resources yourself, you do not have -to initialize them. -+ -* You can preserve the MachineSet files to create compute machines by using the machine API, but you must update references to them to match your environment. -endif::vsphere[] - -ifeval::["{context}" == "installing-bare-metal-network-customizations"] -:!ignition-config: -endif::[] -ifeval::["{context}" == "installing-vsphere-network-customizations"] -:!ignition-config: -:!vsphere: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-network-customizations"] -:!ibm-cloud: -endif::[] diff --git a/modules/nw-multi-network-policy-differences.adoc b/modules/nw-multi-network-policy-differences.adoc deleted file mode 100644 index d4197775b2ba..000000000000 --- a/modules/nw-multi-network-policy-differences.adoc +++ /dev/null @@ -1,31 +0,0 @@ -[id="nw-multi-network-policy-differences_{context}"] -= Differences between multi-network policy and network policy - -Although the `MultiNetworkPolicy` API implements the `NetworkPolicy` API, there are several important differences: - -* You must use the `MultiNetworkPolicy` API: -+ -[source,yaml] ----- -apiVersion: k8s.cni.cncf.io/v1beta1 -kind: MultiNetworkPolicy ----- - -* You must use the `multi-networkpolicy` resource name when using the CLI to interact with multi-network policies. For example, you can view a multi-network policy object with the `oc get multi-networkpolicy <name>` command where `<name>` is the name of a multi-network policy. - -* You must specify an annotation with the name of the network attachment definition that defines the macvlan or SR-IOV additional network: -+ -[source,yaml] ----- -apiVersion: k8s.cni.cncf.io/v1beta1 -kind: MultiNetworkPolicy -metadata: - annotations: - k8s.v1.cni.cncf.io/policy-for: <network_name> ----- -+ --- -where: - -`<network_name>`:: Specifies the name of a network attachment definition. --- diff --git a/modules/nw-multi-network-policy-enable.adoc b/modules/nw-multi-network-policy-enable.adoc deleted file mode 100644 index ae2d77ca2dc3..000000000000 --- a/modules/nw-multi-network-policy-enable.adoc +++ /dev/null @@ -1,41 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/multiple_networks/configuring-multi-network-policy.adoc - -:_content-type: PROCEDURE -[id="nw-multi-network-policy-enable_{context}"] -= Enabling multi-network policy for the cluster - -As a cluster administrator, you can enable multi-network policy support on your cluster. - -.Prerequisites - -* Install the OpenShift CLI (`oc`). -* Log in to the cluster with a user with `cluster-admin` privileges. - -.Procedure - -. Create the `multinetwork-enable-patch.yaml` file with the following YAML: -+ -[source,yaml] ----- -apiVersion: operator.openshift.io/v1 -kind: Network -metadata: - name: cluster -spec: - useMultiNetworkPolicy: true ----- - -. Configure the cluster to enable multi-network policy: -+ -[source,terminal] ----- -$ oc patch network.operator.openshift.io cluster --type=merge --patch-file=multinetwork-enable-patch.yaml ----- -+ -.Example output -[source,text] ----- -network.operator.openshift.io/cluster patched ----- diff --git a/modules/nw-multitenant-global.adoc b/modules/nw-multitenant-global.adoc deleted file mode 100644 index d3642b8f4533..000000000000 --- a/modules/nw-multitenant-global.adoc +++ /dev/null @@ -1,26 +0,0 @@ -// Module included in the following assemblies: -// * networking/multitenant-isolation.adoc - -:_content-type: PROCEDURE -[id="nw-multitenant-global_{context}"] -= Disabling network isolation for a project - -You can disable network isolation for a project. - -.Prerequisites - -* Install the OpenShift CLI (`oc`). -* You must log in to the cluster with a user that has the `cluster-admin` role. - -.Procedure - -* Run the following command for the project: -+ -[source,terminal] ----- -$ oc adm pod-network make-projects-global <project1> <project2> ----- -+ -Alternatively, instead of specifying specific project names, you can use the -`--selector=<project_selector>` option to specify projects based upon an -associated label. diff --git a/modules/nw-multitenant-isolation.adoc b/modules/nw-multitenant-isolation.adoc deleted file mode 100644 index e8fdcfc0058d..000000000000 --- a/modules/nw-multitenant-isolation.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// * networking/multitenant-isolation.adoc - -:_content-type: PROCEDURE -[id="nw-multitenant-isolation_{context}"] -= Isolating a project - -You can isolate a project so that pods and services in other projects cannot -access its pods and services. - -.Prerequisites - -* Install the OpenShift CLI (`oc`). -* You must log in to the cluster with a user that has the `cluster-admin` role. - -.Procedure - -* To isolate the projects in the cluster, run the following command: -+ -[source,terminal] ----- -$ oc adm pod-network isolate-projects <project1> <project2> ----- -+ -Alternatively, instead of specifying specific project names, you can use the -`--selector=<project_selector>` option to specify projects based upon an -associated label. diff --git a/modules/nw-multitenant-joining.adoc b/modules/nw-multitenant-joining.adoc deleted file mode 100644 index 692527438a7b..000000000000 --- a/modules/nw-multitenant-joining.adoc +++ /dev/null @@ -1,37 +0,0 @@ -// Module included in the following assemblies: -// * networking/multitenant-isolation.adoc - -:_content-type: PROCEDURE -[id="nw-multitenant-joining_{context}"] -= Joining projects - -You can join two or more projects to allow network traffic between pods and -services in different projects. - -.Prerequisites - -* Install the OpenShift CLI (`oc`). -* You must log in to the cluster with a user that has the `cluster-admin` role. - -.Procedure - -. Use the following command to join projects to an existing project network: -+ -[source,terminal] ----- -$ oc adm pod-network join-projects --to=<project1> <project2> <project3> ----- -+ -Alternatively, instead of specifying specific project names, you can use the -`--selector=<project_selector>` option to specify projects based upon an -associated label. - -. Optional: Run the following command to view the pod networks that you have -joined together: -+ -[source,terminal] ----- -$ oc get netnamespaces ----- -+ -Projects in the same pod-network have the same network ID in the *NETID* column. diff --git a/modules/nw-multus-add-pod.adoc b/modules/nw-multus-add-pod.adoc deleted file mode 100644 index 9c76da35cfaf..000000000000 --- a/modules/nw-multus-add-pod.adoc +++ /dev/null @@ -1,153 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/multiple_networks/attaching-pod.adoc -// * networking/hardware_networks/add-pod.adoc - -ifeval::["{context}" == "configuring-sr-iov"] -:sriov: -endif::[] - -ifeval::["{product-version}" == "4.3"] -:bz: -endif::[] -ifeval::["{product-version}" == "4.4"] -:bz: -endif::[] -ifeval::["{product-version}" == "4.5"] -:bz: -endif::[] - -:_content-type: PROCEDURE -[id="nw-multus-add-pod_{context}"] -= Adding a pod to an additional network - -You can add a pod to an additional network. The pod continues to send normal cluster-related network traffic over the default network. - -When a pod is created additional networks are attached to it. However, if a pod already exists, you cannot attach additional networks to it. - -The pod must be in the same namespace as the additional network. - -ifdef::sriov[] -[NOTE] -===== -The SR-IOV Network Resource Injector adds the `resource` field to the first container in a pod automatically. - -If you are using an Intel network interface controller (NIC) in Data Plane Development Kit (DPDK) mode, only the first container in your pod is configured to access the NIC. Your SR-IOV additional network is configured for DPDK mode if the `deviceType` is set to `vfio-pci` in the `SriovNetworkNodePolicy` object. - -You can work around this issue by either ensuring that the container that needs access to the NIC is the first container defined in the `Pod` object or by disabling the Network Resource Injector. For more information, see link:https://bugzilla.redhat.com/show_bug.cgi?id=1990953[BZ#1990953]. -===== - -ifdef::bz[] -[IMPORTANT] -==== -When specifying an SR-IOV hardware network for a `Deployment` object or a `ReplicationController` object, you must specify the namespace of the `NetworkAttachmentDefinition` object. For more information, see the following bugs: link:https://bugzilla.redhat.com/show_bug.cgi?id=1846333[BZ#1846333] and link:https://bugzilla.redhat.com/show_bug.cgi?id=1840962[BZ#1840962]. -==== -endif::bz[] -endif::sriov[] - -.Prerequisites - -* Install the OpenShift CLI (`oc`). -* Log in to the cluster. -ifdef::sriov[] -* Install the SR-IOV Operator. -* Create either an `SriovNetwork` object or an `SriovIBNetwork` object to attach the pod to. -endif::sriov[] - -.Procedure - -. Add an annotation to the `Pod` object. Only one of the following annotation formats can be used: - -.. To attach an additional network without any customization, add an annotation with the following format. Replace `<network>` with the name of the additional network to associate with the pod: -+ -[source,yaml] ----- -metadata: - annotations: - k8s.v1.cni.cncf.io/networks: <network>[,<network>,...] <1> ----- -<1> To specify more than one additional network, separate each network -with a comma. Do not include whitespace between the comma. If you specify -the same additional network multiple times, that pod will have multiple network -interfaces attached to that network. - -.. To attach an additional network with customizations, add an annotation with the following format: -+ -[source,yaml] ----- -metadata: - annotations: - k8s.v1.cni.cncf.io/networks: |- - [ - { - "name": "<network>", <1> - "namespace": "<namespace>", <2> - "default-route": ["<default-route>"] <3> - } - ] ----- -<1> Specify the name of the additional network defined by a `NetworkAttachmentDefinition` object. -<2> Specify the namespace where the `NetworkAttachmentDefinition` object is defined. -<3> Optional: Specify an override for the default route, such as `192.168.17.1`. - -. To create the pod, enter the following command. Replace `<name>` with the name of the pod. -+ -[source,terminal] ----- -$ oc create -f <name>.yaml ----- - -. Optional: To Confirm that the annotation exists in the `Pod` CR, enter the following command, replacing `<name>` with the name of the pod. -+ -[source,terminal] ----- -$ oc get pod <name> -o yaml ----- -+ -In the following example, the `example-pod` pod is attached to the `net1` -additional network: -+ -[source,terminal] ----- -$ oc get pod example-pod -o yaml -apiVersion: v1 -kind: Pod -metadata: - annotations: - k8s.v1.cni.cncf.io/networks: macvlan-bridge - k8s.v1.cni.cncf.io/network-status: |- <1> - [{ - "name": "openshift-sdn", - "interface": "eth0", - "ips": [ - "10.128.2.14" - ], - "default": true, - "dns": {} - },{ - "name": "macvlan-bridge", - "interface": "net1", - "ips": [ - "20.2.2.100" - ], - "mac": "22:2f:60:a5:f8:00", - "dns": {} - }] - name: example-pod - namespace: default -spec: - ... -status: - ... ----- -<1> The `k8s.v1.cni.cncf.io/network-status` parameter is a JSON array of -objects. Each object describes the status of an additional network attached -to the pod. The annotation value is stored as a plain text value. - -ifeval::["{context}" == "configuring-sr-iov"] -:!sriov: -endif::[] - -ifdef::bz[] -:!bz: -endif::bz[] diff --git a/modules/nw-multus-advanced-annotations.adoc b/modules/nw-multus-advanced-annotations.adoc deleted file mode 100644 index e6bbe5d2738e..000000000000 --- a/modules/nw-multus-advanced-annotations.adoc +++ /dev/null @@ -1,205 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/multiple_networks/attaching-pod.adoc - -:_content-type: PROCEDURE -[id="nw-multus-advanced-annotations_{context}"] -= Specifying pod-specific addressing and routing options - -When attaching a pod to an additional network, you may want to specify further properties -about that network in a particular pod. This allows you to change some aspects of routing, as well -as specify static IP addresses and MAC addresses. To accomplish this, you can use the JSON formatted annotations. - -.Prerequisites - -* The pod must be in the same namespace as the additional network. -* Install the OpenShift CLI (`oc`). -* You must log in to the cluster. -ifdef::sriov[] -* You must have the SR-IOV Operator installed and a `SriovNetwork` object defined. -endif::sriov[] - -.Procedure - -To add a pod to an additional network while specifying addressing and/or routing options, complete the following steps: - -. Edit the `Pod` resource definition. If you are editing an existing `Pod` resource, run the -following command to edit its definition in the default editor. Replace `<name>` -with the name of the `Pod` resource to edit. -+ -[source,terminal] ----- -$ oc edit pod <name> ----- - -. In the `Pod` resource definition, add the `k8s.v1.cni.cncf.io/networks` -parameter to the pod `metadata` mapping. The `k8s.v1.cni.cncf.io/networks` -accepts a JSON string of a list of objects that reference the name of `NetworkAttachmentDefinition` custom resource (CR) names -in addition to specifying additional properties. -+ -[source,yaml] ----- -metadata: - annotations: - k8s.v1.cni.cncf.io/networks: '[<network>[,<network>,...]]' <1> ----- -<1> Replace `<network>` with a JSON object as shown in the following examples. The single quotes are required. - -. In the following example the annotation specifies which network attachment will have the default route, -using the `default-route` parameter. -+ -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: example-pod - annotations: - k8s.v1.cni.cncf.io/networks: ' - { - "name": "net1" - }, - { - "name": "net2", <1> - "default-route": ["192.0.2.1"] <2> - }' -spec: - containers: - - name: example-pod - command: ["/bin/bash", "-c", "sleep 2000000000000"] - image: centos/tools ----- -<1> The `name` key is the name of the additional network to associate -with the pod. -<2> The `default-route` key specifies a value of a gateway for traffic to be routed over if no other -routing entry is present in the routing table. If more than one `default-route` key is specified, -this will cause the pod to fail to become active. - -The default route will cause any traffic that is not specified in other routes to be routed to the gateway. - -[IMPORTANT] -==== -Setting the default route to an interface other than the default network interface for {product-title} -may cause traffic that is anticipated for pod-to-pod traffic to be routed over another interface. -==== - -To verify the routing properties of a pod, the `oc` command may be used to execute the `ip` command within a pod. - -[source,terminal] ----- -$ oc exec -it <pod_name> -- ip route ----- - -[NOTE] -==== -You may also reference the pod's `k8s.v1.cni.cncf.io/network-status` to see which additional network has been -assigned the default route, by the presence of the `default-route` key in the JSON-formatted list of objects. -==== - -To set a static IP address or MAC address for a pod you can use the JSON formatted annotations. This requires you create networks that specifically allow for this functionality. This can be specified in a rawCNIConfig for the CNO. - -. Edit the CNO CR by running the following command: -+ -[source,terminal] ----- -$ oc edit networks.operator.openshift.io cluster ----- - -The following YAML describes the configuration parameters for the CNO: - -.Cluster Network Operator YAML configuration -[source,yaml] ----- -name: <name> <1> -namespace: <namespace> <2> -rawCNIConfig: '{ <3> - ... -}' -type: Raw ----- -<1> Specify a name for the additional network attachment that you are -creating. The name must be unique within the specified `namespace`. - -<2> Specify the namespace to create the network attachment in. If -you do not specify a value, then the `default` namespace is used. - -<3> Specify the CNI plugin configuration in JSON format, which -is based on the following template. - -The following object describes the configuration parameters for utilizing static MAC address and IP address using the macvlan CNI plugin: - -.macvlan CNI plugin JSON configuration object using static IP and MAC address -[source,json] ----- -{ - "cniVersion": "0.3.1", - "name": "<name>", <1> - "plugins": [{ <2> - "type": "macvlan", - "capabilities": { "ips": true }, <3> - "master": "eth0", <4> - "mode": "bridge", - "ipam": { - "type": "static" - } - }, { - "capabilities": { "mac": true }, <5> - "type": "tuning" - }] -} ----- - -<1> Specifies the name for the additional network attachment to create. The name must be unique within the specified `namespace`. - -<2> Specifies an array of CNI plugin configurations. The first object specifies a macvlan plugin configuration and the second object specifies a tuning plugin configuration. - -<3> Specifies that a request is made to enable the static IP address functionality of the CNI plugin runtime configuration capabilities. - -<4> Specifies the interface that the macvlan plugin uses. - -<5> Specifies that a request is made to enable the static MAC address functionality of a CNI plugin. - -The above network attachment can be referenced in a JSON formatted annotation, along with keys to specify which static IP and MAC address will be assigned to a given pod. - -Edit the pod with: - -[source,terminal] ----- -$ oc edit pod <name> ----- - -.macvlan CNI plugin JSON configuration object using static IP and MAC address - -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: example-pod - annotations: - k8s.v1.cni.cncf.io/networks: '[ - { - "name": "<name>", <1> - "ips": [ "192.0.2.205/24" ], <2> - "mac": "CA:FE:C0:FF:EE:00" <3> - } - ]' ----- - -<1> Use the `<name>` as provided when creating the `rawCNIConfig` above. - -<2> Provide an IP address including the subnet mask. - -<3> Provide the MAC address. - -[NOTE] -==== -Static IP addresses and MAC addresses do not have to be used at the same time, you may use them individually, or together. -==== - -To verify the IP address and MAC properties of a pod with additional networks, use the `oc` command to execute the ip command within a pod. - -[source,terminal] ----- -$ oc exec -it <pod_name> -- ip a ----- diff --git a/modules/nw-multus-bridge-object.adoc b/modules/nw-multus-bridge-object.adoc deleted file mode 100644 index 3829d4ec17b5..000000000000 --- a/modules/nw-multus-bridge-object.adoc +++ /dev/null @@ -1,119 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/multiple_networks/configuring-additional-network.adoc -:_content-type: REFERENCE -[id="nw-multus-bridge-object_{context}"] -= Configuration for a bridge additional network - -The following object describes the configuration parameters for the bridge CNI -plugin: - -.Bridge CNI plugin JSON configuration object -[cols=".^2,.^2,.^6",options="header"] -|==== -|Field|Type|Description - -|`cniVersion` -|`string` -|The CNI specification version. The `0.3.1` value is required. - -|`name` -|`string` -|The value for the `name` parameter you provided previously for the CNO configuration. - -|`type` -|`string` -|The name of the CNI plugin to configure: `bridge`. - -|`ipam` -|`object` -|The configuration object for the IPAM CNI plugin. The plugin manages IP address assignment for the attachment definition. - -|`bridge` -|`string` -|Optional: Specify the name of the virtual bridge to use. If the bridge interface does not exist on the host, it is created. The default value is `cni0`. - -|`ipMasq` -|`boolean` -|Optional: Set to `true` to enable IP masquerading for traffic that leaves the virtual network. The source IP address for all traffic is rewritten to the bridge's IP address. If the bridge does not have an IP address, this setting has no effect. The default value is `false`. - -|`isGateway` -|`boolean` -|Optional: Set to `true` to assign an IP address to the bridge. The default value is `false`. - -|`isDefaultGateway` -|`boolean` -|Optional: Set to `true` to configure the bridge as the default gateway for the virtual network. The default value is `false`. If `isDefaultGateway` is set to `true`, then `isGateway` is also set to `true` automatically. - -|`forceAddress` -|`boolean` -|Optional: Set to `true` to allow assignment of a previously assigned IP address to the virtual bridge. When set to `false`, if an IPv4 address or an IPv6 address from overlapping subsets is assigned to the virtual bridge, an error occurs. The default value is `false`. - -|`hairpinMode` -|`boolean` -|Optional: Set to `true` to allow the virtual bridge to send an Ethernet frame back through the virtual port it was received on. This mode is also known as _reflective relay_. The default value is `false`. - -|`promiscMode` -|`boolean` -|Optional: Set to `true` to enable promiscuous mode on the bridge. The default value is `false`. - -|`vlan` -|`string` -|Optional: Specify a virtual LAN (VLAN) tag as an integer value. By default, no VLAN tag is assigned. - -|`preserveDefaultVlan` -|`string` -|Optional: Indicates whether the default vlan must be preserved on the `veth` end connected to the bridge. Defaults to true. - -|`vlanTrunk` -|`list` -|Optional: Assign a VLAN trunk tag. The default value is `none`. - -|`mtu` -|`string` -|Optional: Set the maximum transmission unit (MTU) to the specified value. The default value is automatically set by the kernel. - -|`enabledad` -|`boolean` -|Optional: Enables duplicate address detection for the container side `veth`. The default value is `false`. - -|`macspoofchk` -|`boolean` -|Optional: Enables mac spoof check, limiting the traffic originating from the container to the mac address of the interface. The default value is `false`. -|==== - -[NOTE] -==== -The VLAN parameter configures the VLAN tag on the host end of the `veth` and also enables the `vlan_filtering` feature on the bridge interface. -==== - -[NOTE] -==== -To configure uplink for a L2 network you need to allow the vlan on the uplink interface by using the following command: - -[source,terminal] ----- -$ bridge vlan add vid VLAN_ID dev DEV ----- - -==== - - -[id="nw-multus-bridge-config-example_{context}"] -== bridge configuration example - -The following example configures an additional network named `bridge-net`: - -[source,json] ----- -{ - "cniVersion": "0.3.1", - "name": "bridge-net", - "type": "bridge", - "isGateway": true, - "vlan": 2, - "ipam": { - "type": "dhcp" - } -} ----- diff --git a/modules/nw-multus-create-network-apply.adoc b/modules/nw-multus-create-network-apply.adoc deleted file mode 100644 index 8c6103480ee9..000000000000 --- a/modules/nw-multus-create-network-apply.adoc +++ /dev/null @@ -1,47 +0,0 @@ -// Module included in the following assemblies: -// - -:_content-type: PROCEDURE -[id="nw-multus-create-network-apply_{context}"] -= Creating an additional network attachment by applying a YAML manifest - -.Prerequisites - -* Install the OpenShift CLI (`oc`). -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -. Create a YAML file with your additional network configuration, such as in the following example: -+ -[source,yaml] ----- -apiVersion: k8s.cni.cncf.io/v1 -kind: NetworkAttachmentDefinition -metadata: - name: next-net -spec: - config: |- - { - "cniVersion": "0.3.1", - "name": "work-network", - "type": "host-device", - "device": "eth1", - "ipam": { - "type": "dhcp" - } - } ----- - -. To create the additional network, enter the following command: -+ -[source,terminal] ----- -$ oc apply -f <file>.yaml ----- -+ --- -where: - -`<file>`:: Specifies the name of the file contained the YAML manifest. --- diff --git a/modules/nw-multus-create-network.adoc b/modules/nw-multus-create-network.adoc deleted file mode 100644 index b5b2177a7ddf..000000000000 --- a/modules/nw-multus-create-network.adoc +++ /dev/null @@ -1,96 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/multiple_networks/configuring-additional-network.adoc - -:_content-type: PROCEDURE -[id="nw-multus-create-network_{context}"] -= Creating an additional network attachment with the Cluster Network Operator - -The Cluster Network Operator (CNO) manages additional network definitions. When -you specify an additional network to create, the CNO creates the -`NetworkAttachmentDefinition` object automatically. - -[IMPORTANT] -==== -Do not edit the `NetworkAttachmentDefinition` objects that the Cluster Network -Operator manages. Doing so might disrupt network traffic on your additional -network. -==== - -.Prerequisites - -* Install the OpenShift CLI (`oc`). -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -. Optional: Create the namespace for the additional networks: -+ -[source,terminal] ----- -$ oc create namespace <namespace_name> ----- - -. To edit the CNO configuration, enter the following command: -+ -[source,terminal] ----- -$ oc edit networks.operator.openshift.io cluster ----- - -. Modify the CR that you are creating by adding the configuration for the -additional network that you are creating, as in the following example CR. -+ -[source,yaml,subs="attributes+"] ----- -apiVersion: operator.openshift.io/v1 -kind: Network -metadata: - name: cluster -spec: - # ... - additionalNetworks: - - name: tertiary-net - namespace: namespace2 - type: Raw - rawCNIConfig: |- - { - "cniVersion": "0.3.1", - "name": "tertiary-net", - "type": "ipvlan", - "master": "eth1", - "mode": "l2", - "ipam": { - "type": "static", - "addresses": [ - { - "address": "192.168.1.23/24" - } - ] - } - } ----- - -. Save your changes and quit the text editor to commit your changes. - -.Verification - -* Confirm that the CNO created the `NetworkAttachmentDefinition` object by running the following command. There might be a delay before the CNO creates the object. -+ -[source,terminal] ----- -$ oc get network-attachment-definitions -n <namespace> ----- -+ --- -where: - -`<namespace>`:: Specifies the namespace for the network attachment that you added to the CNO configuration. --- -+ -.Example output -[source,terminal] ----- -NAME AGE -test-network-1 14m ----- diff --git a/modules/nw-multus-delete-network.adoc b/modules/nw-multus-delete-network.adoc deleted file mode 100644 index 857ad601aaa1..000000000000 --- a/modules/nw-multus-delete-network.adoc +++ /dev/null @@ -1,53 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/multiple_networks/remove-additional-network.adoc - -:_content-type: PROCEDURE -[id="nw-multus-delete-network_{context}"] -= Removing an additional network attachment definition - -As a cluster administrator, you can remove an additional network from your -{product-title} cluster. The additional network is not removed from any pods it -is attached to. - -.Prerequisites - -* Install the OpenShift CLI (`oc`). -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -To remove an additional network from your cluster, complete the following steps: - -. Edit the Cluster Network Operator (CNO) in your default text editor by running -the following command: -+ -[source,terminal] ----- -$ oc edit networks.operator.openshift.io cluster ----- - -. Modify the CR by removing the configuration from the `additionalNetworks` -collection for the network attachment definition you are removing. -+ -[source,yaml] ----- -apiVersion: operator.openshift.io/v1 -kind: Network -metadata: - name: cluster -spec: - additionalNetworks: [] <1> ----- -<1> If you are removing the configuration mapping for the only additional -network attachment definition in the `additionalNetworks` collection, you must -specify an empty collection. - -. Save your changes and quit the text editor to commit your changes. - -. Optional: Confirm that the additional network CR was deleted by running the following command: -+ -[source,terminal] ----- -$ oc get network-attachment-definition --all-namespaces ----- diff --git a/modules/nw-multus-edit-network.adoc b/modules/nw-multus-edit-network.adoc deleted file mode 100644 index 457177ba2039..000000000000 --- a/modules/nw-multus-edit-network.adoc +++ /dev/null @@ -1,51 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/multiple_networks/edit-additional-network.adoc - -:_content-type: PROCEDURE -[id="nw-multus-edit-network_{context}"] -= Modifying an additional network attachment definition - -As a cluster administrator, you can make changes to an existing additional -network. Any existing pods attached to the additional network will not be updated. - -.Prerequisites - -* You have configured an additional network for your cluster. -* Install the OpenShift CLI (`oc`). -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -To edit an additional network for your cluster, complete the following steps: - -. Run the following command to edit the Cluster Network Operator (CNO) CR in -your default text editor: -+ -[source,terminal] ----- -$ oc edit networks.operator.openshift.io cluster ----- - -. In the `additionalNetworks` collection, update the additional network with -your changes. - -. Save your changes and quit the text editor to commit your changes. - -. Optional: Confirm that the CNO updated the `NetworkAttachmentDefinition` object by running the following command. Replace `<network-name>` with the name of the additional network to display. There might be a delay before the CNO updates the `NetworkAttachmentDefinition` object to reflect your changes. -+ -[source,terminal] ----- -$ oc get network-attachment-definitions <network-name> -o yaml ----- -+ -For example, the following console output displays a `NetworkAttachmentDefinition` object that is named `net1`: -+ -[source,terminal] ----- -$ oc get network-attachment-definitions net1 -o go-template='{{printf "%s\n" .spec.config}}' -{ "cniVersion": "0.3.1", "type": "macvlan", -"master": "ens5", -"mode": "bridge", -"ipam": {"type":"static","routes":[{"dst":"0.0.0.0/0","gw":"10.128.2.1"}],"addresses":[{"address":"10.128.2.100/23","gateway":"10.128.2.1"}],"dns":{"nameservers":["172.30.0.10"],"domain":"us-west-2.compute.internal","search":["us-west-2.compute.internal"]}} } ----- diff --git a/modules/nw-multus-host-device-object.adoc b/modules/nw-multus-host-device-object.adoc deleted file mode 100644 index 1e70eb1e4145..000000000000 --- a/modules/nw-multus-host-device-object.adoc +++ /dev/null @@ -1,63 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/multiple_networks/configuring-additional-network.adoc -:_content-type: REFERENCE -[id="nw-multus-host-device-object_{context}"] -= Configuration for a host device additional network - -[NOTE] -==== -Specify your network device by setting only one of the following parameters: `device`,`hwaddr`, `kernelpath`, or `pciBusID`. -==== - -The following object describes the configuration parameters for the host-device CNI plugin: - -// containernetworking/plugins/.../host-device.go#L50 -.Host device CNI plugin JSON configuration object -[cols=".^2,.^2,.^6",options="header"] -|==== -|Field|Type|Description - -|`cniVersion` -|`string` -|The CNI specification version. The `0.3.1` value is required. - -|`name` -|`string` -|The value for the `name` parameter you provided previously for the CNO configuration. - -|`type` -|`string` -|The name of the CNI plugin to configure: `host-device`. - -|`device` -|`string` -|Optional: The name of the device, such as `eth0`. - -|`hwaddr` -|`string` -|Optional: The device hardware MAC address. - -|`kernelpath` -|`string` -|Optional: The Linux kernel device path, such as `/sys/devices/pci0000:00/0000:00:1f.6`. - -|`pciBusID` -|`string` -|Optional: The PCI address of the network device, such as `0000:00:1f.6`. -|==== - -[id="nw-multus-hostdev-config-example_{context}"] -== host-device configuration example - -The following example configures an additional network named `hostdev-net`: - -[source,json] ----- -{ - "cniVersion": "0.3.1", - "name": "hostdev-net", - "type": "host-device", - "device": "eth1" -} ----- \ No newline at end of file diff --git a/modules/nw-multus-ipam-object.adoc b/modules/nw-multus-ipam-object.adoc deleted file mode 100644 index b6d21ba540a4..000000000000 --- a/modules/nw-multus-ipam-object.adoc +++ /dev/null @@ -1,242 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/multiple_networks/configuring-additional-network.adoc -// * networking/hardware_networks/configuring-sriov-net-attach.adoc -// * virt/virtual_machines/vm_networking/virt-defining-an-sriov-network.adoc - -// Because the Cluster Network Operator abstracts the configuration for -// Macvlan, including IPAM configuration, this must be provided as YAML -// for the Macvlan CNI plugin only. In the future other Multus plugins -// might be managed the same way by the CNO. - -ifeval::["{context}" == "configuring-sriov-net-attach"] -:sr-iov: -endif::[] - -:_content-type: CONCEPT -[id="nw-multus-ipam-object_{context}"] -= Configuration of IP address assignment for an additional network - -The IP address management (IPAM) Container Network Interface (CNI) plugin provides IP addresses for other CNI plugins. - -You can use the following IP address assignment types: - -- Static assignment. -- Dynamic assignment through a DHCP server. The DHCP server you specify must be reachable from the additional network. -- Dynamic assignment through the Whereabouts IPAM CNI plugin. - -//// -IMPORTANT: If you set the `type` parameter to the `DHCP` value, you cannot set -any other parameters. -//// - -[id="nw-multus-static_{context}"] -== Static IP address assignment configuration - -The following table describes the configuration for static IP address assignment: - -.`ipam` static configuration object -[cols=".^2,.^2,.^6",options="header"] -|==== -|Field|Type|Description - -|`type` -|`string` -|The IPAM address type. The value `static` is required. - -|`addresses` -|`array` -|An array of objects specifying IP addresses to assign to the virtual interface. Both IPv4 and IPv6 IP addresses are supported. - -|`routes` -|`array` -|An array of objects specifying routes to configure inside the pod. - -|`dns` -|`array` -|Optional: An array of objects specifying the DNS configuration. - -|==== - -The `addresses` array requires objects with the following fields: - -.`ipam.addresses[]` array -[cols=".^2,.^2,.^6",options="header"] -|==== -|Field|Type|Description - -|`address` -|`string` -|An IP address and network prefix that you specify. For example, if you specify `10.10.21.10/24`, then the additional network is assigned an IP address of `10.10.21.10` and the netmask is `255.255.255.0`. - -|`gateway` -|`string` -|The default gateway to route egress network traffic to. - -|==== - -.`ipam.routes[]` array -[cols=".^2,.^2,.^6",options="header"] -|==== -|Field|Type|Description - -|`dst` -|`string` -|The IP address range in CIDR format, such as `192.168.17.0/24` or `0.0.0.0/0` for the default route. - -|`gw` -|`string` -|The gateway where network traffic is routed. - -|==== - -.`ipam.dns` object -[cols=".^2,.^2,.^6",options="header"] -|==== -|Field|Type|Description - -|`nameservers` -|`array` -|An array of one or more IP addresses for to send DNS queries to. - -|`domain` -|`array` -|The default domain to append to a hostname. For example, if the -domain is set to `example.com`, a DNS lookup query for `example-host` is -rewritten as `example-host.example.com`. - -|`search` -|`array` -|An array of domain names to append to an unqualified hostname, -such as `example-host`, during a DNS lookup query. - -|==== - -.Static IP address assignment configuration example -[source,json] ----- -{ - "ipam": { - "type": "static", - "addresses": [ - { - "address": "191.168.1.7/24" - } - ] - } -} ----- - -[id="nw-multus-dhcp_{context}"] -== Dynamic IP address (DHCP) assignment configuration - -The following JSON describes the configuration for dynamic IP address address assignment with DHCP. - -.Renewal of DHCP leases -[IMPORTANT] -==== -A pod obtains its original DHCP lease when it is created. The lease must be periodically renewed by a minimal DHCP server deployment running on the cluster. - -ifdef::sr-iov[] -The SR-IOV Network Operator does not create a DHCP server deployment; The Cluster Network Operator is responsible for creating the minimal DHCP server deployment. -endif::sr-iov[] - -To trigger the deployment of the DHCP server, you must create a shim network attachment by editing the Cluster Network Operator configuration, as in the following example: - -.Example shim network attachment definition -[source,yaml] ----- -apiVersion: operator.openshift.io/v1 -kind: Network -metadata: - name: cluster -spec: - additionalNetworks: - - name: dhcp-shim - namespace: default - type: Raw - rawCNIConfig: |- - { - "name": "dhcp-shim", - "cniVersion": "0.3.1", - "type": "bridge", - "ipam": { - "type": "dhcp" - } - } - # ... ----- -==== - -.`ipam` DHCP configuration object -[cols=".^2,.^2,.^6",options="header"] -|==== -|Field|Type|Description - -|`type` -|`string` -|The IPAM address type. The value `dhcp` is required. - -|==== - -.Dynamic IP address (DHCP) assignment configuration example -[source,json] ----- -{ - "ipam": { - "type": "dhcp" - } -} ----- - -[id="nw-multus-whereabouts_{context}"] -== Dynamic IP address assignment configuration with Whereabouts - -The Whereabouts CNI plugin allows the dynamic assignment of an IP address to an additional network without the use of a DHCP server. - -The following table describes the configuration for dynamic IP address assignment with Whereabouts: - -.`ipam` whereabouts configuration object -[cols=".^2,.^2,.^6",options="header"] -|==== -|Field|Type|Description - -|`type` -|`string` -|The IPAM address type. The value `whereabouts` is required. - -|`range` -|`string` -|An IP address and range in CIDR notation. IP addresses are assigned from within this range of addresses. - -|`exclude` -|`array` -|Optional: A list of zero or more IP addresses and ranges in CIDR notation. IP addresses within an excluded address range are not assigned. - -|==== - -//// -[NOTE] -===== -Whereabouts can be used for both IPv4 and IPv6 addresses. -===== -//// - -.Dynamic IP address assignment configuration example that uses Whereabouts -[source,json] ----- -{ - "ipam": { - "type": "whereabouts", - "range": "192.0.2.192/27", - "exclude": [ - "192.0.2.192/30", - "192.0.2.196/32" - ] - } -} ----- - -ifdef::sr-iov[] -:!sr-iov: -endif::[] diff --git a/modules/nw-multus-ipvlan-object.adoc b/modules/nw-multus-ipvlan-object.adoc deleted file mode 100644 index bc71af986bd7..000000000000 --- a/modules/nw-multus-ipvlan-object.adoc +++ /dev/null @@ -1,78 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/multiple_networks/configuring-additional-network.adoc - -//37.1. IPVLAN overview -// https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/configuring_and_managing_networking/getting-started-with-ipvlan_configuring-and-managing-networking#ipvlan-overview_getting-started-with-ipvlan -:_content-type: REFERENCE - -[id="nw-multus-ipvlan-object_{context}"] -= Configuration for an IPVLAN additional network - -The following object describes the configuration parameters for the IPVLAN CNI plugin: - -.IPVLAN CNI plugin JSON configuration object -[cols=".^2,.^2,.^6",options="header"] -|==== -|Field|Type|Description - -|`cniVersion` -|`string` -|The CNI specification version. The `0.3.1` value is required. - -|`name` -|`string` -|The value for the `name` parameter you provided previously for the CNO configuration. - -|`type` -|`string` -|The name of the CNI plugin to configure: `ipvlan`. - -|`ipam` -|`object` -|The configuration object for the IPAM CNI plugin. The plugin manages IP address assignment for the attachment definition. This is required unless the plugin is chained. - -|`mode` -|`string` -|Optional: The operating mode for the virtual network. The value must be `l2`, `l3`, or `l3s`. The default value is `l2`. - -|`master` -|`string` -|Optional: The Ethernet interface to associate with the network attachment. If a `master` is not specified, the interface for the default network route is used. - -|`mtu` -|`integer` -|Optional: Set the maximum transmission unit (MTU) to the specified value. The default value is automatically set by the kernel. - -|==== - -[NOTE] -==== -* The `ipvlan` object does not allow virtual interfaces to communicate with the `master` interface. Therefore the container will not be able to reach the host by using the `ipvlan` interface. Be sure that the container joins a network that provides connectivity to the host, such as a network supporting the Precision Time Protocol (`PTP`). -* A single `master` interface cannot simultaneously be configured to use both `macvlan` and `ipvlan`. -* For IP allocation schemes that cannot be interface agnostic, the `ipvlan` plugin can be chained with an earlier plugin that handles this logic. If the `master` is omitted, then the previous result must contain a single interface name for the `ipvlan` plugin to enslave. If `ipam` is omitted, then the previous result is used to configure the `ipvlan` interface. -==== - -[id="nw-multus-ipvlan-config-example_{context}"] -== ipvlan configuration example - -The following example configures an additional network named `ipvlan-net`: - -[source,json] ----- -{ - "cniVersion": "0.3.1", - "name": "ipvlan-net", - "type": "ipvlan", - "master": "eth1", - "mode": "l3", - "ipam": { - "type": "static", - "addresses": [ - { - "address": "192.168.10.10/24" - } - ] - } -} ----- \ No newline at end of file diff --git a/modules/nw-multus-macvlan-object.adoc b/modules/nw-multus-macvlan-object.adoc deleted file mode 100644 index b9a75cbb8b23..000000000000 --- a/modules/nw-multus-macvlan-object.adoc +++ /dev/null @@ -1,67 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/multiple_networks/configuring-additional-network.adoc -:_content-type: REFERENCE -[id="nw-multus-macvlan-object_{context}"] -= Configuration for a MACVLAN additional network - -The following object describes the configuration parameters for the macvlan CNI plugin: - -.MACVLAN CNI plugin JSON configuration object -[cols=".^2,.^2,.^6",options="header"] -|==== -|Field|Type|Description - -|`cniVersion` -|`string` -|The CNI specification version. The `0.3.1` value is required. - -|`name` -|`string` -|The value for the `name` parameter you provided previously for the CNO configuration. - -|`type` -|`string` -|The name of the CNI plugin to configure: `macvlan`. - -|`ipam` -|`object` -|The configuration object for the IPAM CNI plugin. The plugin manages IP address assignment for the attachment definition. - -|`mode` -|`string` -|Optional: Configures traffic visibility on the virtual network. Must be either `bridge`, `passthru`, `private`, or `vepa`. If a value is not provided, the default value is `bridge`. - -|`master` -|`string` -|Optional: The host network interface to associate with the newly created macvlan interface. If a value is not specified, then the default route interface is used. - -|`mtu` -|`string` -|Optional: The maximum transmission unit (MTU) to the specified value. The default value is automatically set by the kernel. - -|==== - -[NOTE] -==== -If you specify the `master` key for the plugin configuration, use a different physical network interface than the one that is associated with your primary network plugin to avoid possible conflicts. -==== - -[id="nw-multus-macvlan-config-example_{context}"] -== macvlan configuration example - -The following example configures an additional network named `macvlan-net`: - -[source,json] ----- -{ - "cniVersion": "0.3.1", - "name": "macvlan-net", - "type": "macvlan", - "master": "eth1", - "mode": "bridge", - "ipam": { - "type": "dhcp" - } -} ----- diff --git a/modules/nw-multus-remove-pod.adoc b/modules/nw-multus-remove-pod.adoc deleted file mode 100644 index 4fbe7471a544..000000000000 --- a/modules/nw-multus-remove-pod.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/multiple_networks/removing-pod.adoc - -:_content-type: PROCEDURE -[id="nw-multus-remove-pod_{context}"] -= Removing a pod from an additional network - -You can remove a pod from an additional network only by deleting the pod. - -.Prerequisites - -* An additional network is attached to the pod. -* Install the OpenShift CLI (`oc`). -* Log in to the cluster. - -.Procedure - -* To delete the pod, enter the following command: -+ -[source,terminal] ----- -$ oc delete pod <name> -n <namespace> ----- -+ --- -* `<name>` is the name of the pod. -* `<namespace>` is the namespace that contains the pod. --- diff --git a/modules/nw-multus-vlan-object.adoc b/modules/nw-multus-vlan-object.adoc deleted file mode 100644 index d1c23a3b753a..000000000000 --- a/modules/nw-multus-vlan-object.adoc +++ /dev/null @@ -1,79 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/multiple_networks/configuring-additional-network.adoc - -//37.1. VLAN overview -// -:_content-type: REFERENCE -[id="nw-multus-vlan-object_{context}"] -= Configuration for an VLAN additional network - -The following object describes the configuration parameters for the VLAN CNI plugin: - -.VLAN CNI plugin JSON configuration object -[cols=".^2,.^2,.^6",options="header"] -|==== -|Field|Type|Description - -|`cniVersion` -|`string` -|The CNI specification version. The `0.3.1` value is required. - -|`name` -|`string` -|The value for the `name` parameter you provided previously for the CNO configuration. - -|`type` -|`string` -|The name of the CNI plugin to configure: `vlan`. - -|`master` -|`string` -|The Ethernet interface to associate with the network attachment. If a `master` is not specified, the interface for the default network route is used. - -|`vlanId` -|`integer` -|Set the id of the vlan. - -|`ipam` -|`object` -|The configuration object for the IPAM CNI plugin. The plugin manages IP address assignment for the attachment definition. - -|`mtu` -|`integer` -|Optional: Set the maximum transmission unit (MTU) to the specified value. The default value is automatically set by the kernel. - -|`dns` -|`integer` -|Optional: DNS information to return, for example, a priority-ordered list of DNS nameservers. - -|`linkInContainer` -|`boolean` -|Optional: Specifies if the master interface is in the container network namespace or the main network namespace. - -|==== - -[id="nw-multus-vlan-config-example_{context}"] -== vlan configuration example - -The following example configures an additional network named `vlan-net`: - -[source,json] ----- -{ - "name": "vlan-net", - "cniVersion": "0.3.1", - "type": "vlan", - "master": "eth0", - "mtu": 1500, - "vlanId": 5, - "linkInContainer": false, - "ipam": { - "type": "host-local", - "subnet": "10.1.1.0/24" - }, - "dns": { - "nameservers": [ "10.1.1.1", "8.8.8.8" ] - } -} ----- \ No newline at end of file diff --git a/modules/nw-mutual-tls-auth.adoc b/modules/nw-mutual-tls-auth.adoc deleted file mode 100644 index 407b4789be94..000000000000 --- a/modules/nw-mutual-tls-auth.adoc +++ /dev/null @@ -1,63 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ingress-operator.adoc - -:_content-type: PROCEDURE -[id=nw-mutual-tls-auth_{context}] -= Configuring mutual TLS authentication - -You can configure the Ingress Controller to enable mutual TLS (mTLS) authentication by setting a `spec.clientTLS` value. The `clientTLS` value configures the Ingress Controller to verify client certificates. This configuration includes setting a `clientCA` value, which is a reference to a config map. The config map contains the PEM-encoded CA certificate bundle that is used to verify a client's certificate. Optionally, you can also configure a list of certificate subject filters. - -If the `clientCA` value specifies an X509v3 certificate revocation list (CRL) distribution point, the Ingress Operator downloads and manages a CRL config map based on the HTTP URI X509v3 `CRL Distribution Point` specified in each provided certificate. The Ingress Controller uses this config map during mTLS/TLS negotiation. Requests that do not provide valid certificates are rejected. - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. -* You have a PEM-encoded CA certificate bundle. -* If your CA bundle references a CRL distribution point, you must have also included the end-entity or leaf certificate to the client CA bundle. This certificate must have included an HTTP URI under `CRL Distribution Points`, as described in RFC 5280. For example: -+ -[source,terminal] ----- - Issuer: C=US, O=Example Inc, CN=Example Global G2 TLS RSA SHA256 2020 CA1 - Subject: SOME SIGNED CERT X509v3 CRL Distribution Points: - Full Name: - URI:http://crl.example.com/example.crl ----- - -.Procedure -. In the `openshift-config` namespace, create a config map from your CA bundle: -+ -[source,terminal] ----- -$ oc create configmap \ - router-ca-certs-default \ - --from-file=ca-bundle.pem=client-ca.crt \// <1> - -n openshift-config ----- -<1> The config map data key must be `ca-bundle.pem`, and the data value must be a CA certificate in PEM format. - -. Edit the `IngressController` resource in the `openshift-ingress-operator` project: -+ -[source,terminal] ----- -$ oc edit IngressController default -n openshift-ingress-operator ----- - -. Add the `spec.clientTLS` field and subfields to configure mutual TLS: -+ -.Sample `IngressController` CR for a `clientTLS` profile that specifies filtering patterns -[source,yaml] ----- - apiVersion: operator.openshift.io/v1 - kind: IngressController - metadata: - name: default - namespace: openshift-ingress-operator - spec: - clientTLS: - clientCertificatePolicy: Required - clientCA: - name: router-ca-certs-default - allowedSubjectPatterns: - - "^/CN=example.com/ST=NC/C=US/O=Security/OU=OpenShift$" ----- diff --git a/modules/nw-ne-comparing-ingress-route.adoc b/modules/nw-ne-comparing-ingress-route.adoc deleted file mode 100644 index cc4def642704..000000000000 --- a/modules/nw-ne-comparing-ingress-route.adoc +++ /dev/null @@ -1,11 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/understanding-networking.adoc - -[id="nw-ne-comparing-ingress-route_{context}"] -= Comparing routes and Ingress -The Kubernetes Ingress resource in {product-title} implements the Ingress Controller with a shared router service that runs as a pod inside the cluster. The most common way to manage Ingress traffic is with the Ingress Controller. You can scale and replicate this pod like any other regular pod. This router service is based on link:http://www.haproxy.org/[HAProxy], which is an open source load balancer solution. - -The {product-title} route provides Ingress traffic to services in the cluster. Routes provide advanced features that might not be supported by standard Kubernetes Ingress Controllers, such as TLS re-encryption, TLS passthrough, and split traffic for blue-green deployments. - -Ingress traffic accesses services in the cluster through a route. Routes and Ingress are the main resources for handling Ingress traffic. Ingress provides features similar to a route, such as accepting external requests and delegating them based on the route. However, with Ingress you can only allow certain types of connections: HTTP/2, HTTPS and server name identification (SNI), and TLS with certificate. In {product-title}, routes are generated to meet the conditions specified by the Ingress resource. diff --git a/modules/nw-ne-openshift-dns.adoc b/modules/nw-ne-openshift-dns.adoc deleted file mode 100644 index 3bf03d6c308f..000000000000 --- a/modules/nw-ne-openshift-dns.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// * understanding-networking.adoc - - -[id="nw-ne-openshift-dns_{context}"] -= {product-title} DNS - -If you are running multiple services, such as front-end and back-end services for -use with multiple pods, environment variables are created for user names, -service IPs, and more so the front-end pods can communicate with the back-end -services. If the service is deleted and recreated, a new IP address can be -assigned to the service, and requires the front-end pods to be recreated to pick -up the updated values for the service IP environment variable. Additionally, the -back-end service must be created before any of the front-end pods to ensure that -the service IP is generated properly, and that it can be provided to the -front-end pods as an environment variable. - -For this reason, {product-title} has a built-in DNS so that the services can be -reached by the service DNS as well as the service IP/port. diff --git a/modules/nw-ne-openshift-ingress.adoc b/modules/nw-ne-openshift-ingress.adoc deleted file mode 100644 index e7daf123f7fb..000000000000 --- a/modules/nw-ne-openshift-ingress.adoc +++ /dev/null @@ -1,16 +0,0 @@ -// Module included in the following assemblies: -// * understanding-networking.adoc - - -[id="nw-ne-openshift-ingress_{context}"] -= {product-title} Ingress Operator -When you create your {product-title} cluster, pods and services running on the cluster are each allocated their own IP addresses. The IP addresses are accessible to other pods and services running nearby but are not accessible to outside clients. The Ingress Operator implements the `IngressController` API and is the component responsible for enabling external access to {product-title} cluster services. - -ifndef::openshift-rosa,openshift-dedicated[] -The Ingress Operator makes it possible for external clients to access your service by deploying and managing one or more HAProxy-based -link:https://kubernetes.io/docs/concepts/services-networking/ingress-controllers/[Ingress Controllers] to handle routing. You can use the Ingress Operator to route traffic by specifying {product-title} `Route` and Kubernetes `Ingress` resources. Configurations within the Ingress Controller, such as the ability to define `endpointPublishingStrategy` type and internal load balancing, provide ways to publish Ingress Controller endpoints. -endif::[] - -ifdef::openshift-rosa,openshift-dedicated[] -The Ingress Operator makes it possible for external clients to access your service by deploying and managing one or more HAProxy-based link:https://kubernetes.io/docs/concepts/services-networking/ingress-controllers/[Ingress Controllers] to handle routing. Red Hat Site Reliability Engineers (SRE) manage the Ingress Operator for {product-title} clusters. While you cannot alter the settings for the Ingress Operator, you may view the default Ingress Controller configurations, status, and logs as well as the Ingress Operator status. -endif::[] \ No newline at end of file diff --git a/modules/nw-ne-ways-to-manage-ingress-traffic.adoc b/modules/nw-ne-ways-to-manage-ingress-traffic.adoc deleted file mode 100644 index a6b551352a98..000000000000 --- a/modules/nw-ne-ways-to-manage-ingress-traffic.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/understanding-networking.adoc - -[id="nw-ne-ways-to-manage-ingress-traffic_{context}"] -= Ways to manage Ingress Controller traffic -The following table provides an overview of ways that administrators can manage their Ingress traffic: -[cols="1,2",options="header"] -|=== -|Method |Description - -|Ingress -|Ingress accept external requests and proxy them based on the route. An Ingress is a Kubernetes resource that provides some of the same features as routes (which is an {product-title} resource). You can only allow certain types of traffic: HTTP, HTTPS and server name identification (SNI), and TLS with SNI. In {product-title}, routes are generated to meet the conditions specified by the Ingress object. - -|Route -|Routes provide Ingress traffic to services in the cluster. The concept of Routes in {product-title} serve a similar purpose as Kubernetes Ingress objects and provide more features. Routes provide advanced features that may not be supported by Kubernetes Ingress Controllers through a standard interface, such as TLS re-encryption, TLS passthrough, and split traffic for blue-green deployments. - -|External load balancer -|This resource instructs {product-title} to create a load balancer in a cloud environment. - -|Service external IP -|This method instructs {product-title} to set NAT rules to redirect traffic from one of the cluster IPs to the container. - -|Node Port -|With this method, {product-title} exposes a service on a static port on the node IP address. You must ensure that the external IP addresses are properly routed to the nodes. - -|=== diff --git a/modules/nw-network-config.adoc b/modules/nw-network-config.adoc deleted file mode 100644 index 39e913bed207..000000000000 --- a/modules/nw-network-config.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// TODO - possibly delete this file -// Or does it add actual value? - -// Module included in the following assemblies: -// -// * networking/cluster-network-operator.adoc -// * installing/installing_aws/installing-aws-network-customizations.adoc -// * installing/installing_azure/installing-azure-network-customizations.adoc -// * installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-network-customizations.adoc -// * installing/installing_vsphere/installing-vsphere-network-customizations.adoc -// * installing/installing_vsphere/installing-vsphere-installer-provisioned-network-customizations.adoc -// * installing/installing_gcp/installing-gcp-network-customizations.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-network-customizations.adoc - -[id="nw-network-config_{context}"] -= Network configuration phases - -There are two phases prior to {product-title} installation where you can customize the network configuration. - -Phase 1:: You can customize the following network-related fields in the `install-config.yaml` file before you create the manifest files: -+ -* `networking.networkType` -* `networking.clusterNetwork` -* `networking.serviceNetwork` -* `networking.machineNetwork` -+ -For more information on these fields, refer to _Installation configuration parameters_. -+ -[NOTE] -==== -Set the `networking.machineNetwork` to match the CIDR that the preferred NIC resides in. -==== -+ -[IMPORTANT] -==== -The CIDR range `172.17.0.0/16` is reserved by libVirt. You cannot use this range or any range that overlaps with this range for any networks in your cluster. -==== - -Phase 2:: After creating the manifest files by running `openshift-install create manifests`, you can define a customized Cluster Network Operator manifest with only the fields you want to modify. You can use the manifest to specify advanced network configuration. - -You cannot override the values specified in phase 1 in the `install-config.yaml` file during phase 2. However, you can further customize the network plugin during phase 2. diff --git a/modules/nw-network-flows-create.adoc b/modules/nw-network-flows-create.adoc deleted file mode 100644 index 0b95c1d4debd..000000000000 --- a/modules/nw-network-flows-create.adoc +++ /dev/null @@ -1,95 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ovn_kubernetes_network_provider/tracking-network-flows.adoc - -:_content-type: PROCEDURE -[id="nw-network-flows-create_{context}"] -= Adding destinations for network flows collectors - -As a cluster administrator, you can configure the Cluster Network Operator (CNO) to send network flows metadata about the pod network to a network flows collector. - -.Prerequisites - -* You installed the OpenShift CLI (`oc`). -* You are logged in to the cluster with a user with `cluster-admin` privileges. -* You have a network flows collector and know the IP address and port that it listens on. - -.Procedure - -. Create a patch file that specifies the network flows collector type and the IP address and port information of the collectors: -+ -[source,terminal] ----- -spec: - exportNetworkFlows: - netFlow: - collectors: - - 192.168.1.99:2056 ----- - -. Configure the CNO with the network flows collectors: -+ -[source,terminal] ----- -$ oc patch network.operator cluster --type merge -p "$(cat <file_name>.yaml)" ----- -+ -.Example output -[source,terminal] ----- -network.operator.openshift.io/cluster patched ----- - -.Verification - -Verification is not typically necessary. You can run the following command to confirm that Open vSwitch (OVS) on each node is configured to send network flows records to one or more collectors. - -. View the Operator configuration to confirm that the `exportNetworkFlows` field is configured: -+ -[source,terminal] ----- -$ oc get network.operator cluster -o jsonpath="{.spec.exportNetworkFlows}" ----- -+ -.Example output -[source,terminal] ----- -{"netFlow":{"collectors":["192.168.1.99:2056"]}} ----- - -. View the network flows configuration in OVS from each node: -+ -[source,terminal] ----- -$ for pod in $(oc get pods -n openshift-ovn-kubernetes -l app=ovnkube-node -o jsonpath='{range@.items[*]}{.metadata.name}{"\n"}{end}'); - do ; - echo; - echo $pod; - oc -n openshift-ovn-kubernetes exec -c ovnkube-node $pod \ - -- bash -c 'for type in ipfix sflow netflow ; do ovs-vsctl find $type ; done'; -done ----- -+ -.Example output -[source,terminal] ----- -ovnkube-node-xrn4p -_uuid : a4d2aaca-5023-4f3d-9400-7275f92611f9 -active_timeout : 60 -add_id_to_interface : false -engine_id : [] -engine_type : [] -external_ids : {} -targets : ["192.168.1.99:2056"] - -ovnkube-node-z4vq9 -_uuid : 61d02fdb-9228-4993-8ff5-b27f01a29bd6 -active_timeout : 60 -add_id_to_interface : false -engine_id : [] -engine_type : [] -external_ids : {} -targets : ["192.168.1.99:2056"]- - -... ----- diff --git a/modules/nw-network-flows-delete.adoc b/modules/nw-network-flows-delete.adoc deleted file mode 100644 index 3581197329ed..000000000000 --- a/modules/nw-network-flows-delete.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ovn_kubernetes_network_provider/tracking-network-flows.adoc - -:_content-type: PROCEDURE -[id="nw-network-flows-delete_{context}"] -= Deleting all destinations for network flows collectors - -As a cluster administrator, you can configure the Cluster Network Operator (CNO) to stop sending network flows metadata to a network flows collector. - -.Prerequisites - -* You installed the OpenShift CLI (`oc`). -* You are logged in to the cluster with a user with `cluster-admin` privileges. - -.Procedure - -. Remove all network flows collectors: -+ -[source,terminal] ----- -$ oc patch network.operator cluster --type='json' \ - -p='[{"op":"remove", "path":"/spec/exportNetworkFlows"}]' ----- -+ -.Example output -[source,terminal] ----- -network.operator.openshift.io/cluster patched ----- diff --git a/modules/nw-network-flows-object.adoc b/modules/nw-network-flows-object.adoc deleted file mode 100644 index c9f82c13f3fb..000000000000 --- a/modules/nw-network-flows-object.adoc +++ /dev/null @@ -1,50 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ovn_kubernetes_network_provider/tracking-network-flows.adoc - -[id="nw-network-flows-object_{context}"] -= Network object configuration for tracking network flows - -The fields for configuring network flows collectors in the Cluster Network Operator (CNO) are shown in the following table: - -.Network flows configuration -[cols=".^2,.^2,.^6a",options="header"] -|==== -|Field|Type|Description - -|`metadata.name` -|`string` -|The name of the CNO object. This name is always `cluster`. - -|`spec.exportNetworkFlows` -|`object` -|One or more of `netFlow`, `sFlow`, or `ipfix`. - -|`spec.exportNetworkFlows.netFlow.collectors` -|`array` -|A list of IP address and network port pairs for up to 10 collectors. - -|`spec.exportNetworkFlows.sFlow.collectors` -|`array` -|A list of IP address and network port pairs for up to 10 collectors. - -|`spec.exportNetworkFlows.ipfix.collectors` -|`array` -|A list of IP address and network port pairs for up to 10 collectors. -|==== - -After applying the following manifest to the CNO, the Operator configures Open vSwitch (OVS) on each node in the cluster to send network flows records to the NetFlow collector that is listening at `192.168.1.99:2056`. - -.Example configuration for tracking network flows -[source,yaml] ----- -apiVersion: operator.openshift.io/v1 -kind: Network -metadata: - name: cluster -spec: - exportNetworkFlows: - netFlow: - collectors: - - 192.168.1.99:2056 ----- diff --git a/modules/nw-network-observability-operator.adoc b/modules/nw-network-observability-operator.adoc deleted file mode 100644 index 8fb8ec331221..000000000000 --- a/modules/nw-network-observability-operator.adoc +++ /dev/null @@ -1,90 +0,0 @@ -// Module included in the following assemblies: -// * networking/network_observability/understanding-network-observability-operator.adoc - -:_content-type: PROCEDURE -[id="nw-network-observability-operator_{context}"] -= Viewing statuses - -The Network Observability Operator provides the Flow Collector API. When a Flow Collector resource is created, it deploys pods and services to create and store network flows in the Loki log store, as well as to display dashboards, metrics, and flows in the {product-title} web console. - -.Procedure - -. Run the following command to view the state of `FlowCollector`: -+ -[source,terminal] ----- -$ oc get flowcollector/cluster ----- -+ -.Example output ----- -NAME AGENT SAMPLING (EBPF) DEPLOYMENT MODEL STATUS -cluster EBPF 50 DIRECT Ready ----- - -. Check the status of pods running in the `netobserv` namespace by entering the following command: -+ -[source,terminal] ----- -$ oc get pods -n netobserv ----- -+ -.Example output ----- -NAME READY STATUS RESTARTS AGE -flowlogs-pipeline-56hbp 1/1 Running 0 147m -flowlogs-pipeline-9plvv 1/1 Running 0 147m -flowlogs-pipeline-h5gkb 1/1 Running 0 147m -flowlogs-pipeline-hh6kf 1/1 Running 0 147m -flowlogs-pipeline-w7vv5 1/1 Running 0 147m -netobserv-plugin-cdd7dc6c-j8ggp 1/1 Running 0 147m ----- - -`flowlogs-pipeline` pods collect flows, enriches the collected flows, then send flows to the Loki storage. -`netobserv-plugin` pods create a visualization plugin for the {product-title} Console. - -. Check the status of pods running in the namespace `netobserv-privileged` by entering the following command: -+ -[source,terminal] ----- -$ oc get pods -n netobserv-privileged ----- -+ -.Example output ----- -NAME READY STATUS RESTARTS AGE -netobserv-ebpf-agent-4lpp6 1/1 Running 0 151m -netobserv-ebpf-agent-6gbrk 1/1 Running 0 151m -netobserv-ebpf-agent-klpl9 1/1 Running 0 151m -netobserv-ebpf-agent-vrcnf 1/1 Running 0 151m -netobserv-ebpf-agent-xf5jh 1/1 Running 0 151m ----- - -`netobserv-ebpf-agent` pods monitor network interfaces of the nodes to get flows and send them to `flowlogs-pipeline` pods. - -. If you are using a Loki Operator, check the status of pods running in the `openshift-operators-redhat` namespace by entering the following command: -+ -[source,terminal] ----- -$ oc get pods -n openshift-operators-redhat ----- -+ -.Example output ----- -NAME READY STATUS RESTARTS AGE -loki-operator-controller-manager-5f6cff4f9d-jq25h 2/2 Running 0 18h -lokistack-compactor-0 1/1 Running 0 18h -lokistack-distributor-654f87c5bc-qhkhv 1/1 Running 0 18h -lokistack-distributor-654f87c5bc-skxgm 1/1 Running 0 18h -lokistack-gateway-796dc6ff7-c54gz 2/2 Running 0 18h -lokistack-index-gateway-0 1/1 Running 0 18h -lokistack-index-gateway-1 1/1 Running 0 18h -lokistack-ingester-0 1/1 Running 0 18h -lokistack-ingester-1 1/1 Running 0 18h -lokistack-ingester-2 1/1 Running 0 18h -lokistack-querier-66747dc666-6vh5x 1/1 Running 0 18h -lokistack-querier-66747dc666-cjr45 1/1 Running 0 18h -lokistack-querier-66747dc666-xh8rq 1/1 Running 0 18h -lokistack-query-frontend-85c6db4fbd-b2xfb 1/1 Running 0 18h -lokistack-query-frontend-85c6db4fbd-jm94f 1/1 Running 0 18h ----- \ No newline at end of file diff --git a/modules/nw-network-plugin-migration-process.adoc b/modules/nw-network-plugin-migration-process.adoc deleted file mode 100644 index 97f5465b0c6e..000000000000 --- a/modules/nw-network-plugin-migration-process.adoc +++ /dev/null @@ -1,99 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ovn_kubernetes_network_provider/migrate-from-openshift-sdn.adoc -// * networking/openshift_sdn/migrate-to-openshift-sdn.adoc - -ifeval::["{context}" == "migrate-to-openshift-sdn"] -:sdn: OpenShift SDN -:previous-sdn: OVN-Kubernetes -:type: OpenShiftSDN -endif::[] -ifeval::["{context}" == "migrate-from-openshift-sdn"] -:sdn: OVN-Kubernetes -:previous-sdn: OpenShift SDN -:type: OVNKubernetes -endif::[] - -[id="how-the-migration-process-works_{context}"] -= How the migration process works - -The following table summarizes the migration process by segmenting between the user-initiated steps in the process and the actions that the migration performs in response. - -.Migrating to {sdn} from {previous-sdn} -[cols="1,1a",options="header"] -|=== - -|User-initiated steps|Migration activity - -| -Set the `migration` field of the `Network.operator.openshift.io` custom resource (CR) named `cluster` to `{type}`. Make sure the `migration` field is `null` before setting it to a value. -| -Cluster Network Operator (CNO):: Updates the status of the `Network.config.openshift.io` CR named `cluster` accordingly. -Machine Config Operator (MCO):: Rolls out an update to the systemd configuration necessary for {sdn}; the MCO updates a single machine per pool at a time by default, causing the total time the migration takes to increase with the size of the cluster. - -|Update the `networkType` field of the `Network.config.openshift.io` CR. -| -CNO:: Performs the following actions: -+ --- -* Destroys the {previous-sdn} control plane pods. -* Deploys the {sdn} control plane pods. -* Updates the Multus objects to reflect the new network plugin. --- - -| -Reboot each node in the cluster. -| -Cluster:: As nodes reboot, the cluster assigns IP addresses to pods on the {sdn} cluster network. - -|=== - -ifeval::["{context}" == "migrate-from-openshift-sdn"] -If a rollback to OpenShift SDN is required, the following table describes the process. - -.Performing a rollback to OpenShift SDN -[cols="1,1a",options="header"] -|=== - -|User-initiated steps|Migration activity - -|Suspend the MCO to ensure that it does not interrupt the migration. -|The MCO stops. - -| -Set the `migration` field of the `Network.operator.openshift.io` custom resource (CR) named `cluster` to `OpenShiftSDN`. Make sure the `migration` field is `null` before setting it to a value. -| -CNO:: Updates the status of the `Network.config.openshift.io` CR named `cluster` accordingly. - -|Update the `networkType` field. -| -CNO:: Performs the following actions: -+ --- -* Destroys the OVN-Kubernetes control plane pods. -* Deploys the OpenShift SDN control plane pods. -* Updates the Multus objects to reflect the new network plugin. --- - -| -Reboot each node in the cluster. -| -Cluster:: As nodes reboot, the cluster assigns IP addresses to pods on the OpenShift-SDN network. - -| -Enable the MCO after all nodes in the cluster reboot. -| -MCO:: Rolls out an update to the systemd configuration necessary for OpenShift SDN; the MCO updates a single machine per pool at a time by default, so the total time the migration takes increases with the size of the cluster. - -|=== -endif::[] - -ifdef::sdn[] -:!sdn: -endif::[] -ifdef::previous-sdn[] -:!previous-sdn: -endif::[] -ifdef::type[] -:!type: -endif::[] diff --git a/modules/nw-networking-glossary-terms.adoc b/modules/nw-networking-glossary-terms.adoc deleted file mode 100644 index 1acfc747451e..000000000000 --- a/modules/nw-networking-glossary-terms.adoc +++ /dev/null @@ -1,118 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/understanding-networking.adoc - -:_content-type: REFERENCE -[id="nw-networking-glossary-terms_{context}"] -= Glossary of common terms for {product-title} networking - -This glossary defines common terms that are used in the networking content. - -authentication:: -To control access to an {product-title} cluster, a cluster administrator can configure user authentication and ensure only approved users access the cluster. To interact with an {product-title} cluster, you must authenticate to the {product-title} API. You can authenticate by providing an OAuth access token or an X.509 client certificate in your requests to the {product-title} API. - -AWS Load Balancer Operator:: -The AWS Load Balancer (ALB) Operator deploys and manages an instance of the `aws-load-balancer-controller`. - -Cluster Network Operator:: -The Cluster Network Operator (CNO) deploys and manages the cluster network components in an {product-title} cluster. This includes deployment of the Container Network Interface (CNI) network plugin selected for the cluster during installation. - -config map:: -A config map provides a way to inject configuration data into pods. You can reference the data stored in a config map in a volume of type `ConfigMap`. Applications running in a pod can use this data. - -custom resource (CR):: -A CR is extension of the Kubernetes API. You can create custom resources. - -DNS:: -Cluster DNS is a DNS server which serves DNS records for Kubernetes services. Containers started by Kubernetes automatically include this DNS server in their DNS searches. - -DNS Operator:: -The DNS Operator deploys and manages CoreDNS to provide a name resolution service to pods. This enables DNS-based Kubernetes Service discovery in {product-title}. - -deployment:: -A Kubernetes resource object that maintains the life cycle of an application. - -domain:: -Domain is a DNS name serviced by the Ingress Controller. - -egress:: -The process of data sharing externally through a network’s outbound traffic from a pod. - -External DNS Operator:: -The External DNS Operator deploys and manages ExternalDNS to provide the name resolution for services and routes from the external DNS provider to {product-title}. - -HTTP-based route:: -An HTTP-based route is an unsecured route that uses the basic HTTP routing protocol and exposes a service on an unsecured application port. - -Ingress:: -The Kubernetes Ingress resource in {product-title} implements the Ingress Controller with a shared router service that runs as a pod inside the cluster. - -Ingress Controller:: -The Ingress Operator manages Ingress Controllers. Using an Ingress Controller is the most common way to allow external access to an {product-title} cluster. - -installer-provisioned infrastructure:: -The installation program deploys and configures the infrastructure that the cluster runs on. - -kubelet:: -A primary node agent that runs on each node in the cluster to ensure that containers are running in a pod. - -Kubernetes NMState Operator:: -The Kubernetes NMState Operator provides a Kubernetes API for performing state-driven network configuration across the {product-title} cluster’s nodes with NMState. - -kube-proxy:: -Kube-proxy is a proxy service which runs on each node and helps in making services available to the external host. It helps in forwarding the request to correct containers and is capable of performing primitive load balancing. - -load balancers:: -{product-title} uses load balancers for communicating from outside the cluster with services running in the cluster. - -MetalLB Operator:: -As a cluster administrator, you can add the MetalLB Operator to your cluster so that when a service of type `LoadBalancer` is added to the cluster, MetalLB can add an external IP address for the service. - -multicast:: -With IP multicast, data is broadcast to many IP addresses simultaneously. - -namespaces:: -A namespace isolates specific system resources that are visible to all processes. Inside a namespace, only processes that are members of that namespace can see those resources. - -networking:: -Network information of a {product-title} cluster. - -node:: -A worker machine in the {product-title} cluster. A node is either a virtual machine (VM) or a physical machine. - -{product-title} Ingress Operator:: -The Ingress Operator implements the `IngressController` API and is the component responsible for enabling external access to {product-title} services. - -pod:: -One or more containers with shared resources, such as volume and IP addresses, running in your {product-title} cluster. -A pod is the smallest compute unit defined, deployed, and managed. - -PTP Operator:: -The PTP Operator creates and manages the `linuxptp` services. - -route:: -The {product-title} route provides Ingress traffic to services in the cluster. Routes provide advanced features that might not be supported by standard Kubernetes Ingress Controllers, such as TLS re-encryption, TLS passthrough, and split traffic for blue-green deployments. - -scaling:: -Increasing or decreasing the resource capacity. - -service:: -Exposes a running application on a set of pods. - -Single Root I/O Virtualization (SR-IOV) Network Operator:: -The Single Root I/O Virtualization (SR-IOV) Network Operator manages the SR-IOV network devices and network attachments in your cluster. - -software-defined networking (SDN):: -{product-title} uses a software-defined networking (SDN) approach to provide a unified cluster network that enables communication between pods across the {product-title} cluster. - -Stream Control Transmission Protocol (SCTP):: -SCTP is a reliable message based protocol that runs on top of an IP network. - -taint:: -Taints and tolerations ensure that pods are scheduled onto appropriate nodes. You can apply one or more taints on a node. - -toleration:: -You can apply tolerations to pods. Tolerations allow the scheduler to schedule pods with matching taints. - -web console:: -A user interface (UI) to manage {product-title}. diff --git a/modules/nw-networkpolicy-about.adoc b/modules/nw-networkpolicy-about.adoc deleted file mode 100644 index eaa91fd2b69b..000000000000 --- a/modules/nw-networkpolicy-about.adoc +++ /dev/null @@ -1,175 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/network_policy/about-network-policy.adoc -// * post_installation_configuration/network-configuration.adoc - -:_content-type: CONCEPT -[id="nw-networkpolicy-about_{context}"] -= About network policy - -In a cluster using a network plugin that supports Kubernetes network policy, network isolation is controlled entirely by `NetworkPolicy` objects. -In {product-title} {product-version}, OpenShift SDN supports using network policy in its default network isolation mode. - -[WARNING] -==== -Network policy does not apply to the host network namespace. Pods with host networking enabled are unaffected by network policy rules. However, pods connecting to the host-networked pods might be affected by the network policy rules. - -Network policies cannot block traffic from localhost or from their resident nodes. -==== - -By default, all pods in a project are accessible from other pods and network endpoints. To isolate one or more pods in a project, you can create `NetworkPolicy` objects in that project to indicate the allowed incoming connections. Project administrators can create and delete `NetworkPolicy` objects within their own project. - -If a pod is matched by selectors in one or more `NetworkPolicy` objects, then the pod will accept only connections that are allowed by at least one of those `NetworkPolicy` objects. A pod that is not selected by any `NetworkPolicy` objects is fully accessible. - -A network policy applies to only the TCP, UDP, and SCTP protocols. Other protocols are not affected. - -The following example `NetworkPolicy` objects demonstrate supporting different scenarios: - -* Deny all traffic: -+ -To make a project deny by default, add a `NetworkPolicy` object that matches all pods but accepts no traffic: -+ -[source,yaml] ----- -kind: NetworkPolicy -apiVersion: networking.k8s.io/v1 -metadata: - name: deny-by-default -spec: - podSelector: {} - ingress: [] ----- - -* Only allow connections from the {product-title} Ingress Controller: -+ -To make a project allow only connections from the {product-title} Ingress Controller, add the following `NetworkPolicy` object. -+ -[source,yaml] ----- -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: allow-from-openshift-ingress -spec: - ingress: - - from: - - namespaceSelector: - matchLabels: - network.openshift.io/policy-group: ingress - podSelector: {} - policyTypes: - - Ingress ----- - -* Only accept connections from pods within a project: -+ -To make pods accept connections from other pods in the same project, but reject all other connections from pods in other projects, add the following `NetworkPolicy` object: -+ -[source,yaml] ----- -kind: NetworkPolicy -apiVersion: networking.k8s.io/v1 -metadata: - name: allow-same-namespace -spec: - podSelector: {} - ingress: - - from: - - podSelector: {} ----- - -* Only allow HTTP and HTTPS traffic based on pod labels: -+ -To enable only HTTP and HTTPS access to the pods with a specific label (`role=frontend` in following example), add a `NetworkPolicy` object similar to the following: -+ -[source,yaml] ----- -kind: NetworkPolicy -apiVersion: networking.k8s.io/v1 -metadata: - name: allow-http-and-https -spec: - podSelector: - matchLabels: - role: frontend - ingress: - - ports: - - protocol: TCP - port: 80 - - protocol: TCP - port: 443 ----- - -* Accept connections by using both namespace and pod selectors: -+ -To match network traffic by combining namespace and pod selectors, you can use a `NetworkPolicy` object similar to the following: -+ -[source,yaml] ----- -kind: NetworkPolicy -apiVersion: networking.k8s.io/v1 -metadata: - name: allow-pod-and-namespace-both -spec: - podSelector: - matchLabels: - name: test-pods - ingress: - - from: - - namespaceSelector: - matchLabels: - project: project_name - podSelector: - matchLabels: - name: test-pods ----- - -`NetworkPolicy` objects are additive, which means you can combine multiple `NetworkPolicy` objects together to satisfy complex network requirements. - -For example, for the `NetworkPolicy` objects defined in previous samples, you can define both `allow-same-namespace` and `allow-http-and-https` policies within the same project. Thus allowing the pods with the label `role=frontend`, to accept any connection allowed by each policy. That is, connections on any port from pods in the same namespace, and connections on ports `80` and `443` from pods in any namespace. - -[id="nw-networkpolicy-allow-from-router_{context}"] -== Using the allow-from-router network policy - -Use the following `NetworkPolicy` to allow external traffic regardless of the router configuration: - -[source,yaml] ----- -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: allow-from-router -spec: - ingress: - - from: - - namespaceSelector: - matchLabels: - policy-group.network.openshift.io/ingress:""<1> - podSelector: {} - policyTypes: - - Ingress ----- -<1> `policy-group.network.openshift.io/ingress:""` label supports both Openshift-SDN and OVN-Kubernetes. - - -[id="nw-networkpolicy-allow-from-hostnetwork_{context}"] -== Using the allow-from-hostnetwork network policy - -Add the following `allow-from-hostnetwork` `NetworkPolicy` object to direct traffic from the host network pods: - -[source,yaml] ----- -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: allow-from-hostnetwork -spec: - ingress: - - from: - - namespaceSelector: - matchLabels: - policy-group.network.openshift.io/host-network:"" - podSelector: {} - policyTypes: - - Ingress ----- \ No newline at end of file diff --git a/modules/nw-networkpolicy-allow-application-all-namespaces.adoc b/modules/nw-networkpolicy-allow-application-all-namespaces.adoc deleted file mode 100644 index 0e8181e1862d..000000000000 --- a/modules/nw-networkpolicy-allow-application-all-namespaces.adoc +++ /dev/null @@ -1,148 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/multiple_networks/configuring-multi-network-policy.adoc -:name: network -:role: admin -ifeval::[{product-version} >= 4.6] -:ovn: -endif::[] -ifeval::["{context}" == "configuring-multi-network-policy"] -:multi: -:name: multi-network -:role: cluster-admin -endif::[] - -:_content-type: PROCEDURE -[id="nw-networkpolicy-allow-traffic-from-all-applications_{context}"] -= Creating a {name} policy allowing traffic to an application from all namespaces - - -[NOTE] -==== -If you log in with a user with the `cluster-admin` role, then you can create a network policy in any namespace in the cluster. -==== - -Follow this procedure to configure a policy that allows traffic from all pods in all namespaces to a particular application. - -.Prerequisites - -* Your cluster uses a network plugin that supports `NetworkPolicy` objects, such as -ifndef::ovn[] -the OpenShift SDN network provider with `mode: NetworkPolicy` set. -endif::ovn[] -ifdef::ovn[] -the OVN-Kubernetes network provider or the OpenShift SDN network provider with `mode: NetworkPolicy` set. -endif::ovn[] -This mode is the default for OpenShift SDN. -* You installed the OpenShift CLI (`oc`). -* You are logged in to the cluster with a user with `{role}` privileges. -* You are working in the namespace that the {name} policy applies to. - -.Procedure - -. Create a policy that allows traffic from all pods in all namespaces to a particular application. Save the YAML in the `web-allow-all-namespaces.yaml` file: -+ -[source,yaml] ----- -ifndef::multi[] -kind: NetworkPolicy -apiVersion: networking.k8s.io/v1 -endif::multi[] -ifdef::multi[] -apiVersion: k8s.cni.cncf.io/v1beta1 -kind: MultiNetworkPolicy -endif::multi[] -metadata: - name: web-allow-all-namespaces - namespace: default -ifdef::multi[] - annotations: - k8s.v1.cni.cncf.io/policy-for: <network_name> -endif::multi[] -spec: - podSelector: - matchLabels: - app: web <1> - policyTypes: - - Ingress - ingress: - - from: - - namespaceSelector: {} <2> ----- -<1> Applies the policy only to `app:web` pods in default namespace. -<2> Selects all pods in all namespaces. -+ -[NOTE] -==== -By default, if you omit specifying a `namespaceSelector` it does not select any namespaces, which means the policy allows traffic only from the namespace the network policy is deployed to. -==== - -. Apply the policy by entering the following command: -+ -[source,terminal] ----- -$ oc apply -f web-allow-all-namespaces.yaml ----- -+ -.Example output -[source,terminal] ----- -ifndef::multi[] -networkpolicy.networking.k8s.io/web-allow-all-namespaces created -endif::multi[] -ifdef::multi[] -multinetworkpolicy.k8s.cni.cncf.io/web-allow-all-namespaces created -endif::multi[] ----- - -.Verification - -. Start a web service in the `default` namespace by entering the following command: -+ -[source,terminal] ----- -$ oc run web --namespace=default --image=nginx --labels="app=web" --expose --port=80 ----- - -. Run the following command to deploy an `alpine` image in the `secondary` namespace and to start a shell: -+ -[source,terminal] ----- -$ oc run test-$RANDOM --namespace=secondary --rm -i -t --image=alpine -- sh ----- - -. Run the following command in the shell and observe that the request is allowed: -+ -[source,terminal] ----- -# wget -qO- --timeout=2 http://web.default ----- -+ -.Expected output -+ -[source,terminal] ----- -<!DOCTYPE html> -<html> -<head> -<title>Welcome to nginx! - - - -

Welcome to nginx!

-

If you see this page, the nginx web server is successfully installed and -working. Further configuration is required.

- -

For online documentation and support please refer to -nginx.org.
-Commercial support is available at -nginx.com.

- -

Thank you for using nginx.

- - ----- \ No newline at end of file diff --git a/modules/nw-networkpolicy-allow-application-particular-namespace.adoc b/modules/nw-networkpolicy-allow-application-particular-namespace.adoc deleted file mode 100644 index 72ca4ed291ed..000000000000 --- a/modules/nw-networkpolicy-allow-application-particular-namespace.adoc +++ /dev/null @@ -1,198 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/multiple_networks/configuring-multi-network-policy.adoc -:name: network -:role: admin -ifeval::[{product-version} >= 4.6] -:ovn: -endif::[] -ifeval::["{context}" == "configuring-multi-network-policy"] -:multi: -:name: multi-network -:role: cluster-admin -endif::[] - -:_content-type: PROCEDURE -[id="nw-networkpolicy-allow-traffic-from-a-namespace_{context}"] -= Creating a {name} policy allowing traffic to an application from a namespace - -[NOTE] -==== -If you log in with a user with the `cluster-admin` role, then you can create a network policy in any namespace in the cluster. -==== - -Follow this procedure to configure a policy that allows traffic to a pod with the label `app=web` from a particular namespace. You might want to do this to: - -* Restrict traffic to a production database only to namespaces where production workloads are deployed. -* Enable monitoring tools deployed to a particular namespace to scrape metrics from the current namespace. - -.Prerequisites - -* Your cluster uses a network plugin that supports `NetworkPolicy` objects, such as -ifndef::ovn[] -the OpenShift SDN network provider with `mode: NetworkPolicy` set. -endif::ovn[] -ifdef::ovn[] -the OVN-Kubernetes network provider or the OpenShift SDN network provider with `mode: NetworkPolicy` set. -endif::ovn[] -This mode is the default for OpenShift SDN. -* You installed the OpenShift CLI (`oc`). -* You are logged in to the cluster with a user with `{role}` privileges. -* You are working in the namespace that the {name} policy applies to. - -.Procedure - -. Create a policy that allows traffic from all pods in a particular namespaces with a label `purpose=production`. Save the YAML in the `web-allow-prod.yaml` file: -+ -[source,yaml] ----- -ifndef::multi[] -kind: NetworkPolicy -apiVersion: networking.k8s.io/v1 -endif::multi[] -ifdef::multi[] -apiVersion: k8s.cni.cncf.io/v1beta1 -kind: MultiNetworkPolicy -endif::multi[] -metadata: - name: web-allow-prod - namespace: default -ifdef::multi[] - annotations: - k8s.v1.cni.cncf.io/policy-for: -endif::multi[] -spec: - podSelector: - matchLabels: - app: web <1> - policyTypes: - - Ingress - ingress: - - from: - - namespaceSelector: - matchLabels: - purpose: production <2> ----- -<1> Applies the policy only to `app:web` pods in the default namespace. -<2> Restricts traffic to only pods in namespaces that have the label `purpose=production`. - -. Apply the policy by entering the following command: -+ -[source,terminal] ----- -$ oc apply -f web-allow-prod.yaml ----- -+ -.Example output -[source,terminal] ----- -ifndef::multi[] -networkpolicy.networking.k8s.io/web-allow-prod created -endif::multi[] -ifdef::multi[] -multinetworkpolicy.k8s.cni.cncf.io/web-allow-prod created -endif::multi[] ----- - -.Verification - -. Start a web service in the `default` namespace by entering the following command: -+ -[source,terminal] ----- -$ oc run web --namespace=default --image=nginx --labels="app=web" --expose --port=80 ----- - -. Run the following command to create the `prod` namespace: -+ -[source,terminal] ----- -$ oc create namespace prod ----- - -. Run the following command to label the `prod` namespace: -+ -[source,terminal] ----- -$ oc label namespace/prod purpose=production ----- - -. Run the following command to create the `dev` namespace: -+ -[source,terminal] ----- -$ oc create namespace dev ----- - -. Run the following command to label the `dev` namespace: -+ -[source,terminal] ----- -$ oc label namespace/dev purpose=testing ----- - -. Run the following command to deploy an `alpine` image in the `dev` namespace and to start a shell: -+ -[source,terminal] ----- -$ oc run test-$RANDOM --namespace=dev --rm -i -t --image=alpine -- sh ----- - -. Run the following command in the shell and observe that the request is blocked: -+ -[source,terminal] ----- -# wget -qO- --timeout=2 http://web.default ----- -+ -.Expected output -+ -[source,terminal] ----- -wget: download timed out ----- - -. Run the following command to deploy an `alpine` image in the `prod` namespace and start a shell: -+ -[source,terminal] ----- -$ oc run test-$RANDOM --namespace=prod --rm -i -t --image=alpine -- sh ----- - -. Run the following command in the shell and observe that the request is allowed: -+ -[source,terminal] ----- -# wget -qO- --timeout=2 http://web.default ----- -+ -.Expected output -+ -[source,terminal] ----- - - - -Welcome to nginx! - - - -

Welcome to nginx!

-

If you see this page, the nginx web server is successfully installed and -working. Further configuration is required.

- -

For online documentation and support please refer to -nginx.org.
-Commercial support is available at -nginx.com.

- -

Thank you for using nginx.

- - ----- - - diff --git a/modules/nw-networkpolicy-allow-external-clients.adoc b/modules/nw-networkpolicy-allow-external-clients.adoc deleted file mode 100644 index 1b69f0d6ea5a..000000000000 --- a/modules/nw-networkpolicy-allow-external-clients.adoc +++ /dev/null @@ -1,94 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/multiple_networks/configuring-multi-network-policy.adoc -:name: network -:role: admin -ifeval::[{product-version} >= 4.6] -:ovn: -endif::[] -ifeval::["{context}" == "configuring-multi-network-policy"] -:multi: -:name: multi-network -:role: cluster-admin -endif::[] - -:_content-type: PROCEDURE -[id="nw-networkpolicy-allow-external-clients_{context}"] -= Creating a {name} policy to allow traffic from external clients - -With the `deny-by-default` policy in place you can proceed to configure a policy that allows traffic from external clients to a pod with the label `app=web`. - -[NOTE] -==== -If you log in with a user with the `cluster-admin` role, then you can create a network policy in any namespace in the cluster. -==== - -Follow this procedure to configure a policy that allows external service from the public Internet directly or by using a Load Balancer to access the pod. Traffic is only allowed to a pod with the label `app=web`. - -.Prerequisites - -* Your cluster uses a network plugin that supports `NetworkPolicy` objects, such as -ifndef::ovn[] -the OpenShift SDN network provider with `mode: NetworkPolicy` set. -endif::ovn[] -ifdef::ovn[] -the OVN-Kubernetes network provider or the OpenShift SDN network provider with `mode: NetworkPolicy` set. -endif::ovn[] -This mode is the default for OpenShift SDN. -* You installed the OpenShift CLI (`oc`). -* You are logged in to the cluster with a user with `{role}` privileges. -* You are working in the namespace that the {name} policy applies to. - -.Procedure - -. Create a policy that allows traffic from the public Internet directly or by using a load balancer to access the pod. Save the YAML in the `web-allow-external.yaml` file: -+ -[source,yaml] ----- -ifndef::multi[] -kind: NetworkPolicy -apiVersion: networking.k8s.io/v1 -endif::multi[] -ifdef::multi[] -apiVersion: k8s.cni.cncf.io/v1beta1 -kind: MultiNetworkPolicy -endif::multi[] -metadata: - name: web-allow-external - namespace: default -ifdef::multi[] - annotations: - k8s.v1.cni.cncf.io/policy-for: -endif::multi[] -spec: - policyTypes: - - Ingress - podSelector: - matchLabels: - app: web - ingress: - - {} ----- - -. Apply the policy by entering the following command: -+ -[source,terminal] ----- -$ oc apply -f web-allow-external.yaml ----- -+ -.Example output -+ -[source,terminal] ----- -ifndef::multi[] -networkpolicy.networking.k8s.io/web-allow-external created -endif::multi[] -ifdef::multi[] -multinetworkpolicy.k8s.cni.cncf.io/web-allow-external created -endif::multi[] ----- - -This policy allows traffic from all resources, including external traffic as illustrated in the following diagram: - -image::292_OpenShift_Configuring_multi-network_policy_1122.png[Allow traffic from external clients] \ No newline at end of file diff --git a/modules/nw-networkpolicy-audit-concept.adoc b/modules/nw-networkpolicy-audit-concept.adoc deleted file mode 100644 index 5b133c262735..000000000000 --- a/modules/nw-networkpolicy-audit-concept.adoc +++ /dev/null @@ -1,48 +0,0 @@ -[id="nw-networkpolicy-audit-concept_{context}"] -= Audit logging - -The OVN-Kubernetes network plugin uses Open Virtual Network (OVN) ACLs to manage egress firewalls and network policies. Audit logging exposes allow and deny ACL events. - -You can configure the destination for audit logs, such as a syslog server or a UNIX domain socket. -Regardless of any additional configuration, an audit log is always saved to `/var/log/ovn/acl-audit-log.log` on each OVN-Kubernetes pod in the cluster. - -Audit logging is enabled per namespace by annotating the namespace with the `k8s.ovn.org/acl-logging` key as in the following example: - -.Example namespace annotation -[source,yaml] ----- -kind: Namespace -apiVersion: v1 -metadata: - name: example1 - annotations: - k8s.ovn.org/acl-logging: |- - { - "deny": "info", - "allow": "info" - } ----- - -The logging format is compatible with syslog as defined by RFC5424. The syslog facility is configurable and defaults to `local0`. An example log entry might resemble the following: - -.Example ACL deny log entry for a network policy -[source,text] ----- -2021-06-13T19:33:11.590Z|00005|acl_log(ovn_pinctrl0)|INFO|name="verify-audit-logging_deny-all", verdict=drop, severity=alert: icmp,vlan_tci=0x0000,dl_src=0a:58:0a:80:02:39,dl_dst=0a:58:0a:80:02:37,nw_src=10.128.2.57,nw_dst=10.128.2.55,nw_tos=0,nw_ecn=0,nw_ttl=64,icmp_type=8,icmp_code=0 ----- - -The following table describes namespace annotation values: - -.Audit logging namespace annotation -[cols=".^4,.^6a",options="header"] -|==== -|Annotation|Value - -|`k8s.ovn.org/acl-logging` -| -You must specify at least one of `allow`, `deny`, or both to enable audit logging for a namespace. - -`deny`:: Optional: Specify `alert`, `warning`, `notice`, `info`, or `debug`. -`allow`:: Optional: Specify `alert`, `warning`, `notice`, `info`, or `debug`. - -|==== diff --git a/modules/nw-networkpolicy-audit-configure.adoc b/modules/nw-networkpolicy-audit-configure.adoc deleted file mode 100644 index 2578b105c5f3..000000000000 --- a/modules/nw-networkpolicy-audit-configure.adoc +++ /dev/null @@ -1,229 +0,0 @@ -:_content-type: PROCEDURE -[id="nw-networkpolicy-audit-configure_{context}"] -= Configuring egress firewall and network policy auditing for a cluster - -As a cluster administrator, you can customize audit logging for your cluster. - -.Prerequisites - -* Install the OpenShift CLI (`oc`). -* Log in to the cluster with a user with `cluster-admin` privileges. - -.Procedure - -* To customize the audit logging configuration, enter the following command: -+ -[source,terminal] ----- -$ oc edit network.operator.openshift.io/cluster ----- -+ -[TIP] -==== -You can alternatively customize and apply the following YAML to configure audit logging: - -[source,yaml] ----- -apiVersion: operator.openshift.io/v1 -kind: Network -metadata: - name: cluster -spec: - defaultNetwork: - ovnKubernetesConfig: - policyAuditConfig: - destination: "null" - maxFileSize: 50 - rateLimit: 20 - syslogFacility: local0 ----- -==== - -.Verification - -. To create a namespace with network policies complete the following steps: -.. Create a namespace for verification: -+ -[source,terminal] ----- -$ cat < k8s.ovn.org/acl-logging- ----- -+ --- -where: - -``:: Specifies the name of the namespace. --- -+ -[TIP] -==== -You can alternatively apply the following YAML to disable audit logging: - -[source,yaml] ----- -kind: Namespace -apiVersion: v1 -metadata: - name: - annotations: - k8s.ovn.org/acl-logging: null ----- -==== -+ -.Example output -[source,terminal] ----- -namespace/verify-audit-logging annotated ----- diff --git a/modules/nw-networkpolicy-audit-enable.adoc b/modules/nw-networkpolicy-audit-enable.adoc deleted file mode 100644 index 844f1e9ee099..000000000000 --- a/modules/nw-networkpolicy-audit-enable.adoc +++ /dev/null @@ -1,68 +0,0 @@ -:_content-type: PROCEDURE -[id="nw-networkpolicy-audit-enable_{context}"] -= Enabling egress firewall and network policy audit logging for a namespace - -As a cluster administrator, you can enable audit logging for a namespace. - -.Prerequisites - -* Install the OpenShift CLI (`oc`). -* Log in to the cluster with a user with `cluster-admin` privileges. - -.Procedure - -* To enable audit logging for a namespace, enter the following command: -+ -[source,terminal] ----- -$ oc annotate namespace \ - k8s.ovn.org/acl-logging='{ "deny": "alert", "allow": "notice" }' ----- -+ --- -where: - -``:: Specifies the name of the namespace. --- -+ -[TIP] -==== -You can alternatively apply the following YAML to enable audit logging: - -[source,yaml] ----- -kind: Namespace -apiVersion: v1 -metadata: - name: - annotations: - k8s.ovn.org/acl-logging: |- - { - "deny": "alert", - "allow": "notice" - } ----- -==== -+ -.Example output -[source,terminal] ----- -namespace/verify-audit-logging annotated ----- - -.Verification - -* Display the latest entries in the audit log: -+ -[source,terminal] ----- -$ for pod in $(oc get pods -n openshift-ovn-kubernetes -l app=ovnkube-node --no-headers=true | awk '{ print $1 }') ; do - oc exec -it $pod -n openshift-ovn-kubernetes -- tail -4 /var/log/ovn/acl-audit-log.log - done ----- -+ -.Example output -[source,text] ----- -2021-06-13T19:33:11.590Z|00005|acl_log(ovn_pinctrl0)|INFO|name="verify-audit-logging_deny-all", verdict=drop, severity=alert: icmp,vlan_tci=0x0000,dl_src=0a:58:0a:80:02:39,dl_dst=0a:58:0a:80:02:37,nw_src=10.128.2.57,nw_dst=10.128.2.55,nw_tos=0,nw_ecn=0,nw_ttl=64,icmp_type=8,icmp_code=0 ----- diff --git a/modules/nw-networkpolicy-create-cli.adoc b/modules/nw-networkpolicy-create-cli.adoc deleted file mode 100644 index 91202ad4ad12..000000000000 --- a/modules/nw-networkpolicy-create-cli.adoc +++ /dev/null @@ -1,254 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/multiple_networks/configuring-multi-network-policy.adoc -// * networking/network_policy/creating-network-policy.adoc -// * post_installation_configuration/network-configuration.adoc - -:name: network -:role: admin -ifeval::[{product-version} >= 4.6] -:ovn: -endif::[] -ifeval::["{context}" == "configuring-multi-network-policy"] -:multi: -:name: multi-network -:role: cluster-admin -endif::[] - -:_content-type: PROCEDURE -[id="nw-networkpolicy-create-cli_{context}"] -= Creating a {name} policy using the CLI - -To define granular rules describing ingress or egress network traffic allowed for namespaces in your cluster, you can create a {name} policy. - -ifndef::multi[] -[NOTE] -==== -If you log in with a user with the `cluster-admin` role, then you can create a network policy in any namespace in the cluster. -==== -endif::multi[] - -.Prerequisites - -* Your cluster uses a network plugin that supports `NetworkPolicy` objects, such as -ifndef::ovn[] -the OpenShift SDN network provider with `mode: NetworkPolicy` set. -endif::ovn[] -ifdef::ovn[] -the OVN-Kubernetes network provider or the OpenShift SDN network provider with `mode: NetworkPolicy` set. -endif::ovn[] -This mode is the default for OpenShift SDN. -* You installed the OpenShift CLI (`oc`). -* You are logged in to the cluster with a user with `{role}` privileges. -* You are working in the namespace that the {name} policy applies to. - -.Procedure - -. Create a policy rule: -.. Create a `.yaml` file: -+ -[source,terminal] ----- -$ touch .yaml ----- -+ --- -where: - -``:: Specifies the {name} policy file name. --- - -.. Define a {name} policy in the file that you just created, such as in the following examples: -+ -.Deny ingress from all pods in all namespaces -This is a fundamental policy, blocking all cross-pod networking other than cross-pod traffic allowed by the configuration of other Network Policies. -+ -[source,yaml] ----- -ifndef::multi[] -kind: NetworkPolicy -apiVersion: networking.k8s.io/v1 -endif::multi[] -ifdef::multi[] -apiVersion: k8s.cni.cncf.io/v1beta1 -kind: MultiNetworkPolicy -endif::multi[] -metadata: - name: deny-by-default -ifdef::multi[] - annotations: - k8s.v1.cni.cncf.io/policy-for: -endif::multi[] -spec: - podSelector: - ingress: [] ----- -+ -ifdef::multi[] --- -where: - -``:: Specifies the name of a network attachment definition. --- -endif::multi[] -+ -.Allow ingress from all pods in the same namespace -+ -[source,yaml] ----- -ifndef::multi[] -kind: NetworkPolicy -apiVersion: networking.k8s.io/v1 -endif::multi[] -ifdef::multi[] -apiVersion: k8s.cni.cncf.io/v1beta1 -kind: MultiNetworkPolicy -endif::multi[] -metadata: - name: allow-same-namespace -ifdef::multi[] - annotations: - k8s.v1.cni.cncf.io/policy-for: -endif::multi[] -spec: - podSelector: - ingress: - - from: - - podSelector: {} ----- -ifdef::multi[] -+ --- -where: - -``:: Specifies the name of a network attachment definition. --- -endif::multi[] -+ -.Allow ingress traffic to one pod from a particular namespace -+ -This policy allows traffic to pods labelled `pod-a` from pods running in `namespace-y`. -+ -[source,yaml] ----- -ifndef::multi[] -kind: NetworkPolicy -apiVersion: networking.k8s.io/v1 -endif::multi[] -ifdef::multi[] -apiVersion: k8s.cni.cncf.io/v1beta1 -kind: MultiNetworkPolicy -endif::multi[] -metadata: - name: allow-traffic-pod -ifdef::multi[] - annotations: - k8s.v1.cni.cncf.io/policy-for: -endif::multi[] -spec: - podSelector: - matchLabels: - pod: pod-a - policyTypes: - - Ingress - ingress: - - from: - - namespaceSelector: - matchLabels: - kubernetes.io/metadata.name: namespace-y ----- -ifdef::multi[] -+ --- -where: - -``:: Specifies the name of a network attachment definition. --- -endif::multi[] -+ -ifdef::multi[] -.Restrict traffic to a service -+ -This policy when applied ensures every pod with both labels `app=bookstore` and `role=api` can only be accessed by pods with label `app=bookstore`. In this example the application could be a REST API server, marked with labels `app=bookstore` and `role=api`. -+ -This example addresses the following use cases: - -* Restricting the traffic to a service to only the other microservices that need to use it. -* Restricting the connections to a database to only permit the application using it. -+ -[source,yaml] ----- -ifndef::multi[] -kind: NetworkPolicy -apiVersion: networking.k8s.io/v1 -endif::multi[] -ifdef::multi[] -apiVersion: k8s.cni.cncf.io/v1beta1 -kind: MultiNetworkPolicy -endif::multi[] -metadata: - name: api-allow -ifdef::multi[] - annotations: - k8s.v1.cni.cncf.io/policy-for: -endif::multi[] -spec: - podSelector: - matchLabels: - app: bookstore - role: api - ingress: - - from: - - podSelector: - matchLabels: - app: bookstore ----- -ifdef::multi[] -+ --- - -where: - -``:: Specifies the name of a network attachment definition. --- -endif::multi[] -endif::multi[] - -. To create the {name} policy object, enter the following command: -+ -[source,terminal] ----- -$ oc apply -f .yaml -n ----- -+ --- -where: - -``:: Specifies the {name} policy file name. -``:: Optional: Specifies the namespace if the object is defined in a different namespace than the current namespace. --- -+ -.Example output -[source,terminal] ----- -ifndef::multi[] -networkpolicy.networking.k8s.io/deny-by-default created -endif::multi[] -ifdef::multi[] -multinetworkpolicy.k8s.cni.cncf.io/deny-by-default created -endif::multi[] ----- - -ifdef::ovn[] -:!ovn: -endif::ovn[] -ifdef::multi[] -:!multi: -endif::multi[] -:!name: -:!role: - -[NOTE] -==== -If you log in to the web console with `cluster-admin` privileges, you have a choice of creating a network policy in any namespace in the cluster directly in YAML or from a form in the web console. -==== diff --git a/modules/nw-networkpolicy-create-ocm.adoc b/modules/nw-networkpolicy-create-ocm.adoc deleted file mode 100644 index ada7415a1bcd..000000000000 --- a/modules/nw-networkpolicy-create-ocm.adoc +++ /dev/null @@ -1,62 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/network_policy/creating-network-policy.adoc -// * post_installation_configuration/network-configuration.adoc - -:_content-type: PROCEDURE -[id="nw-networkpolicy-create-ocm_{context}"] -= Creating a network policy using {cluster-manager} - -To define granular rules describing the ingress or egress network traffic allowed for namespaces in your cluster, you can create a network policy. - -.Prerequisites - -* You logged in to {cluster-manager-url}. -* You created an {product-title} cluster. -* You configured an identity provider for your cluster. -* You added your user account to the configured identity provider. -* You created a project within your {product-title} cluster. - -.Procedure - -. From {cluster-manager-url}, click on the cluster you want to access. - -. Click *Open console* to navigate to the OpenShift web console. - -. Click on your identity provider and provide your credentials to log in to the cluster. - -. From the administrator perspective, under *Networking*, click *NetworkPolicies*. - -. Click *Create NetworkPolicy*. - -. Provide a name for the policy in the *Policy name* field. - -. Optional: You can provide the label and selector for a specific pod if this policy applies only to one or more specific pods. If you do not select a specific pod, then this policy will be applicable to all pods on the cluster. - -. Optional: You can block all ingress and egress traffic by using the *Deny all ingress traffic* or *Deny all egress traffic* checkboxes. - -. You can also add any combination of ingress and egress rules, allowing you to specify the port, namespace, or IP blocks you want to approve. - -. Add ingress rules to your policy: - -.. Select *Add ingress rule* to configure a new rule. This action creates a new *Ingress rule* row with an *Add allowed source* drop-down menu that enables you to specify how you want to limit inbound traffic. The drop-down menu offers three options to limit your ingress traffic: -+ -*** *Allow pods from the same namespace* limits traffic to pods within the same namespace. You can specify the pods in a namespace, but leaving this option blank allows all of the traffic from pods in the namespace. - -*** *Allow pods from inside the cluster* limits traffic to pods within the same cluster as the policy. You can specify namespaces and pods from which you want to allow inbound traffic. Leaving this option blank allows inbound traffic from all namespaces and pods within this cluster. - -*** *Allow peers by IP block* limits traffic from a specified Classless Inter-Domain Routing (CIDR) IP block. You can block certain IPs with the exceptions option. Leaving the CIDR field blank allows all inbound traffic from all external sources. - -.. You can restrict all of your inbound traffic to a port. If you do not add any ports then all ports are accessible to traffic. - -. Add egress rules to your network policy: - -.. Select *Add egress rule* to configure a new rule. This action creates a new *Egress rule* row with an *Add allowed destination*"* drop-down menu that enables you to specify how you want to limit outbound traffic. The drop-down menu offers three options to limit your egress traffic: -+ -*** *Allow pods from the same namespace* limits outbound traffic to pods within the same namespace. You can specify the pods in a namespace, but leaving this option blank allows all of the traffic from pods in the namespace. - -*** *Allow pods from inside the cluster* limits traffic to pods within the same cluster as the policy. You can specify namespaces and pods from which you want to allow outbound traffic. Leaving this option blank allows outbound traffic from all namespaces and pods within this cluster. - -*** *Allow peers by IP block* limits traffic from a specified CIDR IP block. You can block certain IPs with the exceptions option. Leaving the CIDR field blank allows all outbound traffic from all external sources. - -.. You can restrict all of your outbound traffic to a port. If you do not add any ports then all ports are accessible to traffic. \ No newline at end of file diff --git a/modules/nw-networkpolicy-delete-cli.adoc b/modules/nw-networkpolicy-delete-cli.adoc deleted file mode 100644 index c082bf78a5e0..000000000000 --- a/modules/nw-networkpolicy-delete-cli.adoc +++ /dev/null @@ -1,83 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/network_policy/deleting-network-policy.adoc -// * post_installation_configuration/network-configuration.adoc - -:name: network -:role: admin -ifeval::[{product-version} >= 4.6] -:ovn: -endif::[] -ifeval::["{context}" == "configuring-multi-network-policy"] -:multi: -:name: multi-network -:role: cluster-admin -endif::[] - -:_content-type: PROCEDURE -[id="nw-networkpolicy-delete-cli_{context}"] -= Deleting a {name} policy using the CLI - -You can delete a {name} policy in a namespace. - -ifndef::multi[] -[NOTE] -==== -If you log in with a user with the `cluster-admin` role, then you can delete any network policy in the cluster. -==== -endif::multi[] - -.Prerequisites - -* Your cluster uses a network plugin that supports `NetworkPolicy` objects, such as -ifndef::ovn[] -the OpenShift SDN network provider with `mode: NetworkPolicy` set. -endif::ovn[] -ifdef::ovn[] -the OVN-Kubernetes network provider or the OpenShift SDN network provider with `mode: NetworkPolicy` set. -endif::ovn[] -This mode is the default for OpenShift SDN. -* You installed the OpenShift CLI (`oc`). -* You are logged in to the cluster with a user with `{role}` privileges. -* You are working in the namespace where the {name} policy exists. - -.Procedure - -* To delete a {name} policy object, enter the following command: -+ -[source,terminal,subs="attributes+"] ----- -$ oc delete {name}policy -n ----- -+ --- -where: - -``:: Specifies the name of the {name} policy. -``:: Optional: Specifies the namespace if the object is defined in a different namespace than the current namespace. --- -+ -.Example output -[source,text] ----- -ifndef::multi[] -networkpolicy.networking.k8s.io/default-deny deleted -endif::multi[] -ifdef::multi[] -multinetworkpolicy.k8s.cni.cncf.io/default-deny deleted -endif::multi[] ----- - -ifdef::ovn[] -:!ovn: -endif::ovn[] -ifdef::multi[] -:!multi: -endif::multi[] -:!name: -:!role: - -[NOTE] -==== -If you log in to the web console with `cluster-admin` privileges, you have a choice of deleting a network policy in any namespace in the cluster directly in YAML or from the policy in the web console through the *Actions* menu. -==== diff --git a/modules/nw-networkpolicy-delete-ocm.adoc b/modules/nw-networkpolicy-delete-ocm.adoc deleted file mode 100644 index f38af5d7523c..000000000000 --- a/modules/nw-networkpolicy-delete-ocm.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/network_policy/deleting-network-policy.adoc -// * post_installation_configuration/network-configuration.adoc - -:_content-type: PROCEDURE -[id="nw-networkpolicy-delete-ocm_{context}"] -= Deleting a network policy using {cluster-manager} - -You can delete a network policy in a namespace. - -.Prerequisites - -* You logged in to {cluster-manager-url}. -* You created an {product-title} cluster. -* You configured an identity provider for your cluster. -* You added your user account to the configured identity provider. - -.Procedure - -. From the *Administrator* perspective in the {cluster-manager} web console, under *Networking*, click *NetworkPolicies*. - -. Use one of the following methods for deleting your network policy: - -** Delete the policy from the *Network Policies* table: -.. From the *Network Policies* table, select the stack menu on the row of the network policy you want to delete and then, click *Delete NetworkPolicy*. - -** Delete the policy using the *Actions* drop-down menu from the individual network policy details: -.. Click on *Actions* drop-down menu for your network policy. -.. Select *Delete NetworkPolicy* from the menu. \ No newline at end of file diff --git a/modules/nw-networkpolicy-deny-all-allowed.adoc b/modules/nw-networkpolicy-deny-all-allowed.adoc deleted file mode 100644 index 6dee79f875c8..000000000000 --- a/modules/nw-networkpolicy-deny-all-allowed.adoc +++ /dev/null @@ -1,97 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/multiple_networks/configuring-multi-network-policy.adoc -:name: network -:role: admin -ifeval::[{product-version} >= 4.6] -:ovn: -endif::[] -ifeval::["{context}" == "configuring-multi-network-policy"] -:multi: -:name: multi-network -:role: cluster-admin -endif::[] - -:_content-type: PROCEDURE -[id="nw-networkpolicy-deny-all-multi-network-policy_{context}"] -= Creating a default deny all {name} policy - -This is a fundamental policy, blocking all cross-pod networking other than network traffic allowed by the configuration of other deployed network policies. This procedure enforces a default `deny-by-default` policy. - -[NOTE] -==== -If you log in with a user with the `cluster-admin` role, then you can create a network policy in any namespace in the cluster. -==== - -.Prerequisites - -* Your cluster uses a network plugin that supports `NetworkPolicy` objects, such as -ifndef::ovn[] -the OpenShift SDN network provider with `mode: NetworkPolicy` set. -endif::ovn[] -ifdef::ovn[] -the OVN-Kubernetes network provider or the OpenShift SDN network provider with `mode: NetworkPolicy` set. -endif::ovn[] -This mode is the default for OpenShift SDN. -* You installed the OpenShift CLI (`oc`). -* You are logged in to the cluster with a user with `{role}` privileges. -* You are working in the namespace that the {name} policy applies to. - -.Procedure - -. Create the following YAML that defines a `deny-by-default` policy to deny ingress from all pods in all namespaces. Save the YAML in the `deny-by-default.yaml` file: -+ -[source,yaml] ----- -ifdef::multi[] -apiVersion: k8s.cni.cncf.io/v1beta1 -kind: MultiNetworkPolicy -metadata: - name: deny-by-default - namespace: default <1> - annotations: - k8s.v1.cni.cncf.io/policy-for: <2> -spec: - podSelector: {} <3> - ingress: [] <4> -endif::multi[] -ifndef::multi[] -kind: NetworkPolicy -apiVersion: networking.k8s.io/v1 -metadata: - name: deny-by-default - namespace: default <1> -spec: - podSelector: {} <2> - ingress: [] <3> -endif::multi[] ----- -ifdef::multi[] -<1> `namespace: default` deploys this policy to the `default` namespace. -<2> `podSelector:` is empty, this means it matches all the pods. Therefore, the policy applies to all pods in the default namespace. -<3> `network_name`: specifies the name of a network attachment definition. -<4> There are no `ingress` rules specified. This causes incoming traffic to be dropped to all pods. -endif::multi[] -ifndef::multi[] -<1> `namespace: default` deploys this policy to the `default` namespace. -<2> `podSelector:` is empty, this means it matches all the pods. Therefore, the policy applies to all pods in the default namespace. -<3> There are no `ingress` rules specified. This causes incoming traffic to be dropped to all pods. -endif::multi[] -+ -. Apply the policy by entering the following command: -+ -[source,terminal] ----- -$ oc apply -f deny-by-default.yaml ----- -+ -.Example output -[source,terminal] ----- -ifndef::multi[] -networkpolicy.networking.k8s.io/deny-by-default created -endif::multi[] -ifdef::multi[] -multinetworkpolicy.k8s.cni.cncf.io/deny-by-default created -endif::multi[] ----- \ No newline at end of file diff --git a/modules/nw-networkpolicy-edit.adoc b/modules/nw-networkpolicy-edit.adoc deleted file mode 100644 index 85d3c0397ab9..000000000000 --- a/modules/nw-networkpolicy-edit.adoc +++ /dev/null @@ -1,114 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/network_policy/editing-network-policy.adoc - -:name: network -:role: admin -ifeval::[{product-version} >= 4.6] -:ovn: -endif::[] -ifeval::["{context}" == "configuring-multi-network-policy"] -:multi: -:name: multi-network -:role: cluster-admin -endif::[] - -:_content-type: PROCEDURE -[id="nw-networkpolicy-edit_{context}"] -= Editing a {name} policy - -You can edit a {name} policy in a namespace. - -ifndef::multi[] -[NOTE] -==== -If you log in with a user with the `cluster-admin` role, then you can edit a network policy in any namespace in the cluster. -==== -endif::multi[] - -.Prerequisites - -* Your cluster uses a network plugin that supports `NetworkPolicy` objects, such as -ifndef::ovn[] -the OpenShift SDN network provider with `mode: NetworkPolicy` set. -endif::ovn[] -ifdef::ovn[] -the OVN-Kubernetes network provider or the OpenShift SDN network provider with `mode: NetworkPolicy` set. -endif::ovn[] -This mode is the default for OpenShift SDN. -* You installed the OpenShift CLI (`oc`). -* You are logged in to the cluster with a user with `{role}` privileges. -* You are working in the namespace where the {name} policy exists. - -.Procedure - -. Optional: To list the {name} policy objects in a namespace, enter the following command: -+ -[source,terminal,subs="attributes+"] ----- -$ oc get {name}policy ----- -+ --- -where: - -``:: Optional: Specifies the namespace if the object is defined in a different namespace than the current namespace. --- - -. Edit the {name} policy object. - -** If you saved the {name} policy definition in a file, edit the file and make any necessary changes, and then enter the following command. -+ -[source,terminal] ----- -$ oc apply -n -f .yaml ----- -+ --- -where: - -``:: Optional: Specifies the namespace if the object is defined in a different namespace than the current namespace. -``:: Specifies the name of the file containing the network policy. --- - -** If you need to update the {name} policy object directly, enter the following command: -+ -[source,terminal,subs="attributes+"] ----- -$ oc edit {name}policy -n ----- -+ --- -where: - -``:: Specifies the name of the network policy. -``:: Optional: Specifies the namespace if the object is defined in a different namespace than the current namespace. --- - -. Confirm that the {name} policy object is updated. -+ -[source,terminal,subs="attributes+"] ----- -$ oc describe {name}policy -n ----- -+ --- -where: - -``:: Specifies the name of the {name} policy. -``:: Optional: Specifies the namespace if the object is defined in a different namespace than the current namespace. --- - -ifdef::ovn[] -:!ovn: -endif::ovn[] -ifdef::multi[] -:!multi: -endif::multi[] -:!name: -:!role: - -[NOTE] -==== -If you log in to the web console with `cluster-admin` privileges, you have a choice of editing a network policy in any namespace in the cluster directly in YAML or from the policy in the web console through the *Actions* menu. -==== diff --git a/modules/nw-networkpolicy-multitenant-isolation.adoc b/modules/nw-networkpolicy-multitenant-isolation.adoc deleted file mode 100644 index a32e080e9301..000000000000 --- a/modules/nw-networkpolicy-multitenant-isolation.adoc +++ /dev/null @@ -1,163 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/network_policy/multitenant-network-policy.adoc -// * post_installation_configuration/network-configuration.adoc - -ifeval::[{product-version} >= 4.6] -:ovn: -endif::[] - -:_content-type: PROCEDURE -[id="nw-networkpolicy-multitenant-isolation_{context}"] -= Configuring multitenant isolation by using network policy - -You can configure your project to isolate it from pods and services in other -project namespaces. - -.Prerequisites - -* Your cluster uses a network plugin that supports `NetworkPolicy` objects, such as -ifndef::ovn[] -the OpenShift SDN network provider with `mode: NetworkPolicy` set. -endif::ovn[] -ifdef::ovn[] -the OVN-Kubernetes network provider or the OpenShift SDN network provider with `mode: NetworkPolicy` set. -endif::ovn[] -This mode is the default for OpenShift SDN. -* You installed the OpenShift CLI (`oc`). -* You are logged in to the cluster with a user with `admin` privileges. - -.Procedure - -. Create the following `NetworkPolicy` objects: -.. A policy named `allow-from-openshift-ingress`. -+ -[source,terminal] ----- -$ cat << EOF| oc create -f - -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: allow-from-openshift-ingress -spec: - ingress: - - from: - - namespaceSelector: - matchLabels: - policy-group.network.openshift.io/ingress: "" - podSelector: {} - policyTypes: - - Ingress -EOF ----- -+ -[NOTE] -==== -`policy-group.network.openshift.io/ingress: ""` is the preferred namespace selector label for OpenShift SDN. You can use the `network.openshift.io/policy-group: ingress` namespace selector label, but this is a legacy label. -==== -.. A policy named `allow-from-openshift-monitoring`: -+ -[source,terminal] ----- -$ cat << EOF| oc create -f - -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: allow-from-openshift-monitoring -spec: - ingress: - - from: - - namespaceSelector: - matchLabels: - network.openshift.io/policy-group: monitoring - podSelector: {} - policyTypes: - - Ingress -EOF ----- - -.. A policy named `allow-same-namespace`: -+ -[source,terminal] ----- -$ cat << EOF| oc create -f - -kind: NetworkPolicy -apiVersion: networking.k8s.io/v1 -metadata: - name: allow-same-namespace -spec: - podSelector: - ingress: - - from: - - podSelector: {} -EOF ----- - -.. A policy named `allow-from-kube-apiserver-operator`: -+ -[source,terminal] ----- -$ cat << EOF| oc create -f - -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: allow-from-kube-apiserver-operator -spec: - ingress: - - from: - - namespaceSelector: - matchLabels: - kubernetes.io/metadata.name: openshift-kube-apiserver-operator - podSelector: - matchLabels: - app: kube-apiserver-operator - policyTypes: - - Ingress -EOF ----- -+ -For more details, see link:https://access.redhat.com/solutions/6964520[New `kube-apiserver-operator` webhook controller validating health of webhook]. - -. Optional: To confirm that the network policies exist in your current project, enter the following command: -+ -[source,terminal] ----- -$ oc describe networkpolicy ----- -+ -.Example output -[source,text] ----- -Name: allow-from-openshift-ingress -Namespace: example1 -Created on: 2020-06-09 00:28:17 -0400 EDT -Labels: -Annotations: -Spec: - PodSelector: (Allowing the specific traffic to all pods in this namespace) - Allowing ingress traffic: - To Port: (traffic allowed to all ports) - From: - NamespaceSelector: network.openshift.io/policy-group: ingress - Not affecting egress traffic - Policy Types: Ingress - - -Name: allow-from-openshift-monitoring -Namespace: example1 -Created on: 2020-06-09 00:29:57 -0400 EDT -Labels: -Annotations: -Spec: - PodSelector: (Allowing the specific traffic to all pods in this namespace) - Allowing ingress traffic: - To Port: (traffic allowed to all ports) - From: - NamespaceSelector: network.openshift.io/policy-group: monitoring - Not affecting egress traffic - Policy Types: Ingress ----- - -ifdef::ovn[] -:!ovn: -endif::ovn[] diff --git a/modules/nw-networkpolicy-object.adoc b/modules/nw-networkpolicy-object.adoc deleted file mode 100644 index bd324ecf3cf1..000000000000 --- a/modules/nw-networkpolicy-object.adoc +++ /dev/null @@ -1,37 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/network_policy/creating-network-policy.adoc -// * networking/network_policy/viewing-network-policy.adoc -// * networking/network_policy/editing-network-policy.adoc -// * post_installation_configuration/network-configuration.adoc - -[id="nw-networkpolicy-object_{context}"] - -= Example NetworkPolicy object - -The following annotates an example NetworkPolicy object: - -[source,yaml] ----- -kind: NetworkPolicy -apiVersion: networking.k8s.io/v1 -metadata: - name: allow-27107 <1> -spec: - podSelector: <2> - matchLabels: - app: mongodb - ingress: - - from: - - podSelector: <3> - matchLabels: - app: app - ports: <4> - - protocol: TCP - port: 27017 ----- -<1> The name of the NetworkPolicy object. -<2> A selector that describes the pods to which the policy applies. The policy object can -only select pods in the project that defines the NetworkPolicy object. -<3> A selector that matches the pods from which the policy object allows ingress traffic. The selector matches pods in the same namespace as the NetworkPolicy. -<4> A list of one or more destination ports on which to accept traffic. diff --git a/modules/nw-networkpolicy-optimize-ovn.adoc b/modules/nw-networkpolicy-optimize-ovn.adoc deleted file mode 100644 index b917316a3772..000000000000 --- a/modules/nw-networkpolicy-optimize-ovn.adoc +++ /dev/null @@ -1,98 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/network_policy/about-network-policy.adoc - -[id="nw-networkpolicy-optimize-ovn_{context}"] -= Optimizations for network policy with OpenShift OVN - -When designing your network policy, refer to the following guidelines: - -* For network policies with the same `spec.podSelector` spec, it is more efficient to use one network policy with multiple `ingress` or `egress` rules, than multiple network policies with subsets of `ingress` or `egress` rules. - -* Every `ingress` or `egress` rule based on the `podSelector` or `namespaceSelector` spec generates the number of OVS flows proportional to `number of pods selected by network policy + number of pods selected by ingress or egress rule`. Therefore, it is preferable to use the `podSelector` or `namespaceSelector` spec that can select as many pods as you need in one rule, instead of creating individual rules for every pod. -+ -For example, the following policy contains two rules: -+ -[source,yaml] ----- -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: test-network-policy -spec: - podSelector: {} - ingress: - - from: - - podSelector: - matchLabels: - role: frontend - - from: - - podSelector: - matchLabels: - role: backend ----- -+ -The following policy expresses those same two rules as one: -+ -[source,yaml] ----- -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: test-network-policy -spec: - podSelector: {} - ingress: - - from: - - podSelector: - matchExpressions: - - {key: role, operator: In, values: [frontend, backend]} ----- -+ -The same guideline applies to the `spec.podSelector` spec. If you have the same `ingress` or `egress` rules for different network policies, it might be more efficient to create one network policy with a common `spec.podSelector` spec. For example, the following two policies have different rules: -+ -[source,yaml] ----- -metadata: - name: policy1 -spec: - podSelector: - matchLabels: - role: db - ingress: - - from: - - podSelector: - matchLabels: - role: frontend - -metadata: - name: policy2 -spec: - podSelector: - matchLabels: - role: client - ingress: - - from: - - podSelector: - matchLabels: - role: frontend ----- -+ -The following network policy expresses those same two rules as one: -+ -[source,yaml] ----- -metadata: - name: policy3 -spec: - podSelector: - matchExpressions: - - {key: role, operator: In, values: [db, client]} - ingress: - - from: - - podSelector: - matchLabels: - role: frontend ----- -+ -You can apply this optimization when only multiple selectors are expressed as one. In cases where selectors are based on different labels, it may not be possible to apply this optimization. In those cases, consider applying some new labels for network policy optimization specifically. diff --git a/modules/nw-networkpolicy-optimize.adoc b/modules/nw-networkpolicy-optimize.adoc deleted file mode 100644 index 477809d9be56..000000000000 --- a/modules/nw-networkpolicy-optimize.adoc +++ /dev/null @@ -1,22 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/network_policy/about-network-policy.adoc - -[id="nw-networkpolicy-optimize-sdn_{context}"] -= Optimizations for network policy with OpenShift SDN - -Use a network policy to isolate pods that are differentiated from one another by labels within a namespace. - -It is inefficient to apply `NetworkPolicy` objects to large numbers of individual pods in a single namespace. Pod labels do not exist at the IP address level, so a network policy generates a separate Open vSwitch (OVS) flow rule for every possible link between every pod selected with a `podSelector`. - -For example, if the spec `podSelector` and the ingress `podSelector` within a `NetworkPolicy` object each match 200 pods, then 40,000 (200*200) OVS flow rules are generated. This might slow down a node. - -When designing your network policy, refer to the following guidelines: - -* Reduce the number of OVS flow rules by using namespaces to contain groups of pods that need to be isolated. -+ -`NetworkPolicy` objects that select a whole namespace, by using the `namespaceSelector` or an empty `podSelector`, generate only a single OVS flow rule that matches the VXLAN virtual network ID (VNID) of the namespace. - -* Keep the pods that do not need to be isolated in their original namespace, and move the pods that require isolation into one or more different namespaces. - -* Create additional targeted cross-namespace network policies to allow the specific traffic that you do want to allow from the isolated pods. diff --git a/modules/nw-networkpolicy-project-defaults.adoc b/modules/nw-networkpolicy-project-defaults.adoc deleted file mode 100644 index c5bc56cd1347..000000000000 --- a/modules/nw-networkpolicy-project-defaults.adoc +++ /dev/null @@ -1,98 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/network_policy/default-network-policy.adoc -// * networking/configuring-networkpolicy.adoc -// * post_installation_configuration/network-configuration.adoc - -:_content-type: PROCEDURE -[id="nw-networkpolicy-project-defaults_{context}"] -= Adding network policies to the new project template - -As a cluster administrator, you can add network policies to the default template for new projects. -{product-title} will automatically create all the `NetworkPolicy` objects specified in the template in the project. - -.Prerequisites - -* Your cluster uses a default CNI network provider that supports `NetworkPolicy` objects, such as the OpenShift SDN network provider with `mode: NetworkPolicy` set. This mode is the default for OpenShift SDN. -* You installed the OpenShift CLI (`oc`). -* You must log in to the cluster with a user with `cluster-admin` privileges. -* You must have created a custom default project template for new projects. - -.Procedure - -. Edit the default template for a new project by running the following command: -+ -[source,terminal] ----- -$ oc edit template -n openshift-config ----- -+ -Replace `` with the name of the default template that you -configured for your cluster. The default template name is `project-request`. - -. In the template, add each `NetworkPolicy` object as an element to the `objects` parameter. The `objects` parameter accepts a collection of one or more objects. -+ -In the following example, the `objects` parameter collection includes several `NetworkPolicy` objects. -+ -[source,yaml] ----- -objects: -- apiVersion: networking.k8s.io/v1 - kind: NetworkPolicy - metadata: - name: allow-from-same-namespace - spec: - podSelector: {} - ingress: - - from: - - podSelector: {} -- apiVersion: networking.k8s.io/v1 - kind: NetworkPolicy - metadata: - name: allow-from-openshift-ingress - spec: - ingress: - - from: - - namespaceSelector: - matchLabels: - network.openshift.io/policy-group: ingress - podSelector: {} - policyTypes: - - Ingress -- apiVersion: networking.k8s.io/v1 - kind: NetworkPolicy - metadata: - name: allow-from-kube-apiserver-operator - spec: - ingress: - - from: - - namespaceSelector: - matchLabels: - kubernetes.io/metadata.name: openshift-kube-apiserver-operator - podSelector: - matchLabels: - app: kube-apiserver-operator - policyTypes: - - Ingress -... ----- - -. Optional: Create a new project to confirm that your network policy objects are created successfully by running the following commands: - -.. Create a new project: -+ -[source,terminal] ----- -$ oc new-project <1> ----- -<1> Replace `` with the name for the project you are creating. - -.. Confirm that the network policy objects in the new project template exist in the new project: -+ -[source,terminal] ----- -$ oc get networkpolicy -NAME POD-SELECTOR AGE -allow-from-openshift-ingress 7s -allow-from-same-namespace 7s ----- diff --git a/modules/nw-networkpolicy-view-cli.adoc b/modules/nw-networkpolicy-view-cli.adoc deleted file mode 100644 index 49c5ab312f29..000000000000 --- a/modules/nw-networkpolicy-view-cli.adoc +++ /dev/null @@ -1,96 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/network_policy/viewing-network-policy.adoc -// * post_installation_configuration/network-configuration.adoc - -:name: network -:role: admin -ifeval::["{context}" == "configuring-multi-network-policy"] -:multi: -:name: multi-network -:role: cluster-admin -endif::[] - -:_content-type: PROCEDURE -[id="nw-networkpolicy-view-cli_{context}"] -= Viewing {name} policies using the CLI - -You can examine the {name} policies in a namespace. - -ifndef::multi[] -[NOTE] -==== -If you log in with a user with the `cluster-admin` role, then you can view any network policy in the cluster. -==== -endif::multi[] - -.Prerequisites - -* You installed the OpenShift CLI (`oc`). -* You are logged in to the cluster with a user with `{role}` privileges. -* You are working in the namespace where the {name} policy exists. - -.Procedure - -* List {name} policies in a namespace: - -** To view {name} policy objects defined in a namespace, enter the following -command: -+ -[source,terminal,subs="attributes+"] ----- -$ oc get {name}policy ----- - -** Optional: To examine a specific {name} policy, enter the following command: -+ -[source,terminal,subs="attributes+"] ----- -$ oc describe {name}policy -n ----- -+ --- -where: - - ``:: Specifies the name of the {name} policy to inspect. - ``:: Optional: Specifies the namespace if the object is defined in a different namespace than the current namespace. --- -ifndef::multi[] -+ -For example: -+ -[source,terminal] ----- -$ oc describe networkpolicy allow-same-namespace ----- -+ -.Output for `oc describe` command -[source,text] ----- -Name: allow-same-namespace -Namespace: ns1 -Created on: 2021-05-24 22:28:56 -0400 EDT -Labels: -Annotations: -Spec: - PodSelector: (Allowing the specific traffic to all pods in this namespace) - Allowing ingress traffic: - To Port: (traffic allowed to all ports) - From: - PodSelector: - Not affecting egress traffic - Policy Types: Ingress ----- -endif::multi[] - -ifdef::multi[] -:!multi: -endif::multi[] -:!name: -:!role: - - -[NOTE] -==== -If you log in to the web console with `cluster-admin` privileges, you have a choice of viewing a network policy in any namespace in the cluster directly in YAML or from a form in the web console. -==== diff --git a/modules/nw-networkpolicy-view-ocm.adoc b/modules/nw-networkpolicy-view-ocm.adoc deleted file mode 100644 index cf86706fd5cb..000000000000 --- a/modules/nw-networkpolicy-view-ocm.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/network_policy/viewing-network-policy.adoc -// * post_installation_configuration/network-configuration.adoc - -:_content-type: PROCEDURE -[id="nw-networkpolicy-view-ocm_{context}"] -= Viewing network policies using {cluster-manager} - -You can view the configuration details of your network policy in {cluster-manager-first}. - -.Prerequisites - -* You logged in to {cluster-manager-url}. -* You created an {product-title} cluster. -* You configured an identity provider for your cluster. -* You added your user account to the configured identity provider. -* You created a network policy. - -.Procedure - -. From the *Administrator* perspective in the {cluster-manager} web console, under *Networking*, click *NetworkPolicies*. -. Select the desired network policy to view. -. In the *Network Policy* details page, you can view all of the associated ingress and egress rules. -. Select *YAML* on the network policy details to view the policy configuration in YAML format. -ifdef::openshift-rosa,openshift-dedicated[] -+ -[NOTE] -==== -You can only view the details of these policies. You cannot edit these policies. -==== -endif::[] \ No newline at end of file diff --git a/modules/nw-nodeport-service-range-edit.adoc b/modules/nw-nodeport-service-range-edit.adoc deleted file mode 100644 index d070c441744a..000000000000 --- a/modules/nw-nodeport-service-range-edit.adoc +++ /dev/null @@ -1,63 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/configuring-node-port-service-range.adoc - -:_content-type: PROCEDURE -[id="nw-nodeport-service-range-edit_{context}"] -= Expanding the node port range - -You can expand the node port range for the cluster. - -.Prerequisites - -* Install the OpenShift CLI (`oc`). -* Log in to the cluster with a user with `cluster-admin` privileges. - -.Procedure - -. To expand the node port range, enter the following command. Replace `` with the largest port number in the new range. -+ -[source,terminal] ----- -$ oc patch network.config.openshift.io cluster --type=merge -p \ - '{ - "spec": - { "serviceNodePortRange": "30000-" } - }' ----- -+ -[TIP] -==== -You can alternatively apply the following YAML to update the node port range: - -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: Network -metadata: - name: cluster -spec: - serviceNodePortRange: "30000-" ----- -==== -+ -.Example output -[source,terminal] ----- -network.config.openshift.io/cluster patched ----- - -. To confirm that the configuration is active, enter the following command. It can take several minutes for the update to apply. -+ -[source,terminal] ----- -$ oc get configmaps -n openshift-kube-apiserver config \ - -o jsonpath="{.data['config\.yaml']}" | \ - grep -Eo '"service-node-port-range":["[[:digit:]]+-[[:digit:]]+"]' ----- -+ -.Example output -[source,terminal] ----- -"service-node-port-range":["30000-33000"] ----- diff --git a/modules/nw-openshift-sdn-modes.adoc b/modules/nw-openshift-sdn-modes.adoc deleted file mode 100644 index ccb6789635a2..000000000000 --- a/modules/nw-openshift-sdn-modes.adoc +++ /dev/null @@ -1,15 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/openshift_sdn/about-openshift-sdn.adoc - -[id="nw-openshift-sdn-modes_{context}"] -= OpenShift SDN network isolation modes - -OpenShift SDN provides three SDN modes for configuring the pod network: - -* _Network policy_ mode allows project administrators to configure their own -isolation policies using `NetworkPolicy` objects. Network policy is the default mode in {product-title} {product-version}. - -* _Multitenant_ mode provides project-level isolation for pods and services. Pods from different projects cannot send packets to or receive packets from pods and services of a different project. You can disable isolation for a project, allowing it to send network traffic to all pods and services in the entire cluster and receive network traffic from those pods and services. - -* _Subnet_ mode provides a flat pod network where every pod can communicate with every other pod and service. The network policy mode provides the same functionality as subnet mode. diff --git a/modules/nw-openstack-external-ccm.adoc b/modules/nw-openstack-external-ccm.adoc deleted file mode 100644 index 0e2bee7135f6..000000000000 --- a/modules/nw-openstack-external-ccm.adoc +++ /dev/null @@ -1,44 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_openstack/installing-openstack-cloud-config-reference.adoc -// TODO: GitHub link clearance. - -:_content-type: CONCEPT -[id="nw-openstack-external-ccm_{context}"] -= The OpenStack Cloud Controller Manager - -Beginning with {product-title} 4.12, clusters that run on {rh-openstack-first} were switched from the legacy OpenStack cloud provider to the external OpenStack Cloud Controller Manager (CCM). This change follows the move in Kubernetes from in-tree, legacy cloud providers to external cloud providers that are implemented by using the link:https://kubernetes.io/docs/concepts/architecture/cloud-controller/[Cloud Controller Manager]. - -To preserve user-defined configurations for the legacy cloud provider, existing configurations are mapped to new ones as part of the migration process. It searches for a configuration called `cloud-provider-config` in the `openshift-config` namespace. - -NOTE: The config map name `cloud-provider-config` is not statically configured. It is derived from the `spec.cloudConfig.name` value in the `infrastructure/cluster` CRD. - -Found configurations are synchronized to the `cloud-conf` config map in the `openshift-cloud-controller-manager` namespace. - -// To synchronize the configuration to a different namespace, you can override the default `openshift-cloud-controller-manager` namespace by passing the name of the namespace with the `--namespace` flag to the operator binary. - -As part of this synchronization, the OpenStack CCM Operator alters the new config map such that its properties are compatible with the external cloud provider. The file is changed in the following ways: - -* The `[Global] secret-name`, `[Global] secret-namespace`, and `[Global] kubeconfig-path` options are removed. They do not apply to the external cloud provider. - -* The `[Global] use-clouds`, `[Global] clouds-file`, and `[Global] cloud` options are added. - -* The entire `[BlockStorage]` section is removed. External cloud providers no longer perform storage operations. Block storage configuration is managed by the Cinder CSI driver. - -Additionally, the CCM Operator enforces a number of default options. Values for these options are always overriden as follows: - -[source,txt] ----- -[Global] -use-clouds = true -clouds-file = /etc/openstack/secret/clouds.yaml -cloud = openstack -... - -[LoadBalancer] -use-octavia = true -enabled = true <1> ----- -<1> If the network is configured to use Kuryr, this value is `false`. - -The `clouds-value` value, `/etc/openstack/secret/clouds.yaml`, is mapped to the `openstack-cloud-credentials` config in the `openshift-cloud-controller-manager` namespace. You can modify the {rh-openstack} cloud in this file as you do any other `clouds.yaml` file. diff --git a/modules/nw-openstack-hw-offload-testpmd-pod.adoc b/modules/nw-openstack-hw-offload-testpmd-pod.adoc deleted file mode 100644 index b0fc3353da4b..000000000000 --- a/modules/nw-openstack-hw-offload-testpmd-pod.adoc +++ /dev/null @@ -1,50 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/hardware_networks/using-dpdk-and-rdma.adoc - -:_content-type: REFERENCE -[id="nw-openstack-hw-offload-testpmd-pod_{context}"] -= A test pod template for clusters that use OVS hardware offloading on OpenStack - -The following `testpmd` pod demonstrates Open vSwitch (OVS) hardware offloading on {rh-openstack-first}. - -.An example `testpmd` pod -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: testpmd-sriov - namespace: mynamespace - annotations: - k8s.v1.cni.cncf.io/networks: hwoffload1 -spec: - runtimeClassName: performance-cnf-performanceprofile <1> - containers: - - name: testpmd - command: ["sleep", "99999"] - image: registry.redhat.io/openshift4/dpdk-base-rhel8:v4.9 - securityContext: - capabilities: - add: ["IPC_LOCK","SYS_ADMIN"] - privileged: true - runAsUser: 0 - resources: - requests: - memory: 1000Mi - hugepages-1Gi: 1Gi - cpu: '2' - limits: - hugepages-1Gi: 1Gi - cpu: '2' - memory: 1000Mi - volumeMounts: - - mountPath: /dev/hugepages - name: hugepage - readOnly: False - volumes: - - name: hugepage - emptyDir: - medium: HugePages ----- -<1> If your performance profile is not named `cnf-performance profile`, replace that string with the correct performance profile name. diff --git a/modules/nw-openstack-ovs-dpdk-testpmd-pod.adoc b/modules/nw-openstack-ovs-dpdk-testpmd-pod.adoc deleted file mode 100644 index 01cb9021e1b7..000000000000 --- a/modules/nw-openstack-ovs-dpdk-testpmd-pod.adoc +++ /dev/null @@ -1,55 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/hardware_networks/using-dpdk-and-rdma.adoc - -:_content-type: REFERENCE -[id="nw-openstack-ovs-dpdk-testpmd-pod_{context}"] -= A test pod template for clusters that use OVS-DPDK on OpenStack - -The following `testpmd` pod demonstrates container creation with huge pages, reserved CPUs, and the SR-IOV port. - -.An example `testpmd` pod -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: testpmd-dpdk - namespace: mynamespace - annotations: - cpu-load-balancing.crio.io: "disable" - cpu-quota.crio.io: "disable" -# ... -spec: - containers: - - name: testpmd - command: ["sleep", "99999"] - image: registry.redhat.io/openshift4/dpdk-base-rhel8:v4.9 - securityContext: - capabilities: - add: ["IPC_LOCK","SYS_ADMIN"] - privileged: true - runAsUser: 0 - resources: - requests: - memory: 1000Mi - hugepages-1Gi: 1Gi - cpu: '2' - openshift.io/dpdk1: 1 <1> - limits: - hugepages-1Gi: 1Gi - cpu: '2' - memory: 1000Mi - openshift.io/dpdk1: 1 - volumeMounts: - - mountPath: /dev/hugepages - name: hugepage - readOnly: False - runtimeClassName: performance-cnf-performanceprofile <2> - volumes: - - name: hugepage - emptyDir: - medium: HugePages ----- -<1> The name `dpdk1` in this example is a user-created `SriovNetworkNodePolicy` resource. You can substitute this name for that of a resource that you create. -<2> If your performance profile is not named `cnf-performance profile`, replace that string with the correct performance profile name. \ No newline at end of file diff --git a/modules/nw-openstack-sr-iov-testpmd-pod.adoc b/modules/nw-openstack-sr-iov-testpmd-pod.adoc deleted file mode 100644 index 28dc31f2a6c5..000000000000 --- a/modules/nw-openstack-sr-iov-testpmd-pod.adoc +++ /dev/null @@ -1,54 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/hardware_networks/add-pod.adoc - -:_content-type: REFERENCE -[id="nw-openstack-ovs-sr-iov-testpmd-pod_{context}"] -= A test pod template for clusters that use SR-IOV on OpenStack - -The following `testpmd` pod demonstrates container creation with huge pages, reserved CPUs, and the SR-IOV port. - -.An example `testpmd` pod -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: testpmd-sriov - namespace: mynamespace - annotations: - cpu-load-balancing.crio.io: "disable" - cpu-quota.crio.io: "disable" -# ... -spec: - containers: - - name: testpmd - command: ["sleep", "99999"] - image: registry.redhat.io/openshift4/dpdk-base-rhel8:v4.9 - securityContext: - capabilities: - add: ["IPC_LOCK","SYS_ADMIN"] - privileged: true - runAsUser: 0 - resources: - requests: - memory: 1000Mi - hugepages-1Gi: 1Gi - cpu: '2' - openshift.io/sriov1: 1 - limits: - hugepages-1Gi: 1Gi - cpu: '2' - memory: 1000Mi - openshift.io/sriov1: 1 - volumeMounts: - - mountPath: /dev/hugepages - name: hugepage - readOnly: False - runtimeClassName: performance-cnf-performanceprofile <1> - volumes: - - name: hugepage - emptyDir: - medium: HugePages ----- -<1> This example assumes that the name of the performance profile is `cnf-performance profile`. \ No newline at end of file diff --git a/modules/nw-operator-cr.adoc b/modules/nw-operator-cr.adoc deleted file mode 100644 index 06f301bc3ab2..000000000000 --- a/modules/nw-operator-cr.adoc +++ /dev/null @@ -1,461 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing_aws/installing-aws-network-customizations.adoc -// * installing/installing_azure/installing-azure-network-customizations.adoc -// * installing/installing_bare_metal/installing-bare-metal-network-customizations.adoc -// * installing/installing_gcp/installing-gcp-network-customizations.adoc -// * installing/installing_ibm_power/installing-ibm-power.adoc -// * installing/installing_ibm_power/installing-restricted-networks-ibm-power.adoc -// * installing/installing_ibm_z/installing-ibm-z-kvm.adoc -// * installing/installing_ibm_z/installing-ibm-z.adoc -// * installing/installing_ibm_z/installing-restricted-networks-ibm-z-kvm.adoc -// * installing/installing_ibm_z/installing-restricted-networks-ibm-z.adoc -// * installing/installing_vsphere/installing-vsphere-installer-provisioned-network-customizations.adoc -// * installing/installing_vsphere/installing-vsphere-network-customizations.adoc -// * networking/cluster-network-operator.adoc -// * networking/network_policy/logging-network-policy.adoc -// * post_installation_configuration/network-configuration.adoc -// * installing/installing_ibm_cloud_public/installing-ibm-cloud-network-customizations.adoc -// * installing/installing_ibm_z/installing-ibm-z.adoc -// * installing/installing_ibm_z/installing-ibm-z-kvm.adoc -// * installing/installing_ibm_z/installing-restricted-networks-ibm-z.adoc -// * installing/installing_ibm_z/installing-restricted-networks-ibm-z-kvm.adoc -// * installing/installing_ibm_power/installing-ibm-power.adoc -// * installing/installing_ibm_power/installing-restricted-networks-ibm-power.adoc -// * installing/installing_azure_stack_hub/installing-azure-stack-hub-network-customizations.adoc - -// Installation assemblies need different details than the CNO operator does -ifeval::["{context}" == "cluster-network-operator"] -:operator: -endif::[] - -ifeval::["{context}" == "post-install-network-configuration"] -:post-install-network-configuration: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-network-customizations"] -:ibm-cloud: -endif::[] - -:_content-type: CONCEPT -[id="nw-operator-cr_{context}"] -= Cluster Network Operator configuration - -The configuration for the cluster network is specified as part of the Cluster Network Operator (CNO) configuration and stored in a custom resource (CR) object that is named `cluster`. The CR specifies the fields for the `Network` API in the `operator.openshift.io` API group. - -The CNO configuration inherits the following fields during cluster installation from the `Network` API in the `Network.config.openshift.io` API group and these fields cannot be changed: - -`clusterNetwork`:: IP address pools from which pod IP addresses are allocated. -`serviceNetwork`:: IP address pool for services. -`defaultNetwork.type`:: Cluster network plugin, such as OpenShift SDN or OVN-Kubernetes. - -// For the post installation assembly, no further content is provided. -ifdef::post-install-network-configuration,operator[] -[NOTE] -==== -After cluster installation, you cannot modify the fields listed in the previous section. -==== -endif::[] -ifndef::post-install-network-configuration[] -You can specify the cluster network plugin configuration for your cluster by setting the fields for the `defaultNetwork` object in the CNO object named `cluster`. - -[id="nw-operator-cr-cno-object_{context}"] -== Cluster Network Operator configuration object - -The fields for the Cluster Network Operator (CNO) are described in the following table: - -.Cluster Network Operator configuration object -[cols=".^2,.^2,.^6a",options="header"] -|==== -|Field|Type|Description - -|`metadata.name` -|`string` -|The name of the CNO object. This name is always `cluster`. - -|`spec.clusterNetwork` -|`array` -|A list specifying the blocks of IP addresses from which pod IP addresses are -allocated and the subnet prefix length assigned to each individual node in the cluster. For example: - -[source,yaml] ----- -spec: - clusterNetwork: - - cidr: 10.128.0.0/19 - hostPrefix: 23 - - cidr: 10.128.32.0/19 - hostPrefix: 23 ----- - -ifdef::operator[] -This value is ready-only and inherited from the `Network.config.openshift.io` object named `cluster` during cluster installation. -endif::operator[] -ifndef::operator[] -You can customize this field only in the `install-config.yaml` file before you create the manifests. The value is read-only in the manifest file. -endif::operator[] - -|`spec.serviceNetwork` -|`array` -|A block of IP addresses for services. The OpenShift SDN and OVN-Kubernetes network plugins support only a single IP address block for the service network. For example: - -[source,yaml] ----- -spec: - serviceNetwork: - - 172.30.0.0/14 ----- - -ifdef::operator[] -This value is ready-only and inherited from the `Network.config.openshift.io` object named `cluster` during cluster installation. -endif::operator[] -ifndef::operator[] -You can customize this field only in the `install-config.yaml` file before you create the manifests. The value is read-only in the manifest file. -endif::operator[] - -|`spec.defaultNetwork` -|`object` -|Configures the network plugin for the cluster network. - -|`spec.kubeProxyConfig` -|`object` -| -The fields for this object specify the kube-proxy configuration. -If you are using the OVN-Kubernetes cluster network plugin, the kube-proxy configuration has no effect. - -|==== - -[discrete] -[id="nw-operator-cr-defaultnetwork_{context}"] -=== defaultNetwork object configuration - -The values for the `defaultNetwork` object are defined in the following table: - -.`defaultNetwork` object -[cols=".^3,.^2,.^5a",options="header"] -|==== -|Field|Type|Description - -|`type` -|`string` -|Either `OpenShiftSDN` or `OVNKubernetes`. The {openshift-networking} network plugin is selected during installation. This value cannot be changed after cluster installation. -[NOTE] -==== -{product-title} uses the OVN-Kubernetes network plugin by default. -==== - -|`openshiftSDNConfig` -|`object` -|This object is only valid for the OpenShift SDN network plugin. - -|`ovnKubernetesConfig` -|`object` -|This object is only valid for the OVN-Kubernetes network plugin. - -|==== - -[discrete] -[id="nw-operator-configuration-parameters-for-openshift-sdn_{context}"] -==== Configuration for the OpenShift SDN network plugin - -The following table describes the configuration fields for the OpenShift SDN network plugin: - -.`openshiftSDNConfig` object -[cols=".^2,.^2,.^6a",options="header"] -|==== -|Field|Type|Description - -|`mode` -|`string` -| -ifndef::operator[] -Configures the network isolation mode for OpenShift SDN. The default value is `NetworkPolicy`. - -The values `Multitenant` and `Subnet` are available for backwards compatibility with {product-title} 3.x but are not recommended. This value cannot be changed after cluster installation. -endif::operator[] -ifdef::operator[] -The network isolation mode for OpenShift SDN. -endif::operator[] - -|`mtu` -|`integer` -| -ifndef::operator[] -The maximum transmission unit (MTU) for the VXLAN overlay network. This is detected automatically based on the MTU of the primary network interface. You do not normally need to override the detected MTU. - -If the auto-detected value is not what you expect it to be, confirm that the MTU on the primary network interface on your nodes is correct. You cannot use this option to change the MTU value of the primary network interface on the nodes. - -If your cluster requires different MTU values for different nodes, you must set this value to `50` less than the lowest MTU value in your cluster. For example, if some nodes in your cluster have an MTU of `9001`, and some have an MTU of `1500`, you must set this value to `1450`. - -This value cannot be changed after cluster installation. -endif::operator[] -ifdef::operator[] -The maximum transmission unit (MTU) for the VXLAN overlay network. This value is normally configured automatically. -endif::operator[] - -|`vxlanPort` -|`integer` -| -ifndef::operator[] -The port to use for all VXLAN packets. The default value is `4789`. This value cannot be changed after cluster installation. - -If you are running in a virtualized environment with existing nodes that are part of another VXLAN network, then you might be required to change this. For example, when running an OpenShift SDN overlay on top of VMware NSX-T, you must select an alternate port for the VXLAN, because both SDNs use the same default VXLAN port number. - -On Amazon Web Services (AWS), you can select an alternate port for the VXLAN between port `9000` and port `9999`. -endif::operator[] -ifdef::operator[] -The port to use for all VXLAN packets. The default value is `4789`. -endif::operator[] - -|==== - -ifdef::operator[] -[NOTE] -==== -You can only change the configuration for your cluster network plugin during cluster installation. -==== -endif::operator[] - -.Example OpenShift SDN configuration -[source,yaml] ----- -defaultNetwork: - type: OpenShiftSDN - openshiftSDNConfig: - mode: NetworkPolicy - mtu: 1450 - vxlanPort: 4789 ----- - -[discrete] -[id="nw-operator-configuration-parameters-for-ovn-sdn_{context}"] -==== Configuration for the OVN-Kubernetes network plugin - -The following table describes the configuration fields for the OVN-Kubernetes network plugin: - -.`ovnKubernetesConfig` object -[cols=".^2,.^2,.^6a",options="header"] -|==== -|Field|Type|Description - -|`mtu` -|`integer` -| -ifndef::operator[] -The maximum transmission unit (MTU) for the Geneve (Generic Network Virtualization Encapsulation) overlay network. This is detected automatically based on the MTU of the primary network interface. You do not normally need to override the detected MTU. - -If the auto-detected value is not what you expect it to be, confirm that the MTU on the primary network interface on your nodes is correct. You cannot use this option to change the MTU value of the primary network interface on the nodes. - -If your cluster requires different MTU values for different nodes, you must set this value to `100` less than the lowest MTU value in your cluster. For example, if some nodes in your cluster have an MTU of `9001`, and some have an MTU of `1500`, you must set this value to `1400`. -endif::operator[] -ifdef::operator[] -The maximum transmission unit (MTU) for the Geneve (Generic Network Virtualization Encapsulation) overlay network. This value is normally configured automatically. -endif::operator[] - -|`genevePort` -|`integer` -| -ifndef::operator[] -The port to use for all Geneve packets. The default value is `6081`. This value cannot be changed after cluster installation. -endif::operator[] -ifdef::operator[] -The UDP port for the Geneve overlay network. -endif::operator[] - -ifndef::ibm-cloud[] -|`ipsecConfig` -|`object` -| -ifndef::operator[] -Specify an empty object to enable IPsec encryption. -endif::operator[] -ifdef::operator[] -If the field is present, IPsec is enabled for the cluster. -endif::operator[] -endif::ibm-cloud[] - -|`policyAuditConfig` -|`object` -|Specify a configuration object for customizing network policy audit logging. If unset, the defaults audit log settings are used. - -|`gatewayConfig` -|`object` -|Optional: Specify a configuration object for customizing how egress traffic is sent to the node gateway. - -[NOTE] -==== - While migrating egress traffic, you can expect some disruption to workloads and service traffic until the Cluster Network Operator (CNO) successfully rolls out the changes. -==== - -|`v4InternalSubnet` -| -If your existing network infrastructure overlaps with the `100.64.0.0/16` IPv4 subnet, you can specify a different IP address range for internal use by OVN-Kubernetes. You must ensure that the IP address range does not overlap with any other subnet used by your {product-title} installation. The IP address range must be larger than the maximum number of nodes that can be added to the cluster. - -For example, if the `clusterNetwork.cidr` is `10.128.0.0/14` and the `clusterNetwork.hostPrefix` is `/23`, then the maximum number of nodes is `2^(23-14)=512`. An IP address is also required for the gateway, network, and broadcast addresses. Therefore the internal IP address range must be at least a `/24`. - -This field cannot be changed after installation. -|The default value is `100.64.0.0/16`. - -|`v6InternalSubnet` -| -If your existing network infrastructure overlaps with the `fd98::/48` IPv6 subnet, you can specify a different IP address range for internal use by OVN-Kubernetes. You must ensure that the IP address range does not overlap with any other subnet used by your {product-title} installation. The IP address range must be larger than the maximum number of nodes that can be added to the cluster. - -This field cannot be changed after installation. -| The default value is `fd98::/48`. -|==== - -ifdef::ibm-cloud[] -[NOTE] -==== -IPsec for the OVN-Kubernetes network plugin is not supported when installing a cluster on IBM Cloud. -==== -endif::ibm-cloud[] - -// tag::policy-audit[] -.`policyAuditConfig` object -[cols=".^2,.^2,.^6a",options="header"] -|==== -|Field|Type|Description - -|`rateLimit` -|integer -|The maximum number of messages to generate every second per node. The default value is `20` messages per second. - -|`maxFileSize` -|integer -|The maximum size for the audit log in bytes. The default value is `50000000` or 50 MB. - -|`destination` -|string -| -One of the following additional audit log targets: - -`libc`:: The libc `syslog()` function of the journald process on the host. -`udp::`:: A syslog server. Replace `:` with the host and port of the syslog server. -`unix:`:: A Unix Domain Socket file specified by ``. -`null`:: Do not send the audit logs to any additional target. - -|`syslogFacility` -|string -|The syslog facility, such as `kern`, as defined by RFC5424. The default value is `local0`. - -|==== -// end::policy-audit[] - -[id="gatewayConfig-object_{context}"] -.`gatewayConfig` object -[cols=".^2,.^2,.^6a",options="header"] -|==== -|Field|Type|Description - -|`routingViaHost` -|`boolean` -|Set this field to `true` to send egress traffic from pods to the host networking stack. -For highly-specialized installations and applications that rely on manually configured routes in the kernel routing table, you might want to route egress traffic to the host networking stack. -By default, egress traffic is processed in OVN to exit the cluster and is not affected by specialized routes in the kernel routing table. -The default value is `false`. - -This field has an interaction with the Open vSwitch hardware offloading feature. -If you set this field to `true`, you do not receive the performance benefits of the offloading because egress traffic is processed by the host networking stack. - -|==== - -ifdef::operator[] -[NOTE] -==== -You can only change the configuration for your cluster network plugin during cluster installation, except for the `gatewayConfig` field that can be changed at runtime as a post-installation activity. -==== -endif::operator[] - -.Example OVN-Kubernetes configuration with IPSec enabled -[source,yaml] ----- -defaultNetwork: - type: OVNKubernetes - ovnKubernetesConfig: - mtu: 1400 - genevePort: 6081 -ifndef::ibm-cloud[] - ipsecConfig: {} -endif::ibm-cloud[] ----- - -[discrete] -[id="nw-operator-cr-kubeproxyconfig_{context}"] -=== kubeProxyConfig object configuration - -The values for the `kubeProxyConfig` object are defined in the following table: - -.`kubeProxyConfig` object -[cols=".^3,.^2,.^5a",options="header"] -|==== -|Field|Type|Description - -|`iptablesSyncPeriod` -|`string` -| -The refresh period for `iptables` rules. The default value is `30s`. Valid suffixes include `s`, `m`, and `h` and are described in the link:https://golang.org/pkg/time/#ParseDuration[Go `time` package] documentation. - -[NOTE] -==== -Because of performance improvements introduced in {product-title} 4.3 and greater, adjusting the `iptablesSyncPeriod` parameter is no longer necessary. -==== - -|`proxyArguments.iptables-min-sync-period` -|`array` -| -The minimum duration before refreshing `iptables` rules. This field ensures that the refresh does not happen too frequently. Valid suffixes include `s`, `m`, and `h` and are described in the link:https://golang.org/pkg/time/#ParseDuration[Go `time` package]. The default value is: - -[source,yaml] ----- -kubeProxyConfig: - proxyArguments: - iptables-min-sync-period: - - 0s ----- -|==== - -ifdef::operator[] -[id="nw-operator-example-cr_{context}"] -== Cluster Network Operator example configuration - -A complete CNO configuration is specified in the following example: - -.Example Cluster Network Operator object -[source,yaml] ----- -apiVersion: operator.openshift.io/v1 -kind: Network -metadata: - name: cluster -spec: - clusterNetwork: <1> - - cidr: 10.128.0.0/14 - hostPrefix: 23 - serviceNetwork: <1> - - 172.30.0.0/16 - defaultNetwork: <1> - type: OpenShiftSDN - openshiftSDNConfig: - mode: NetworkPolicy - mtu: 1450 - vxlanPort: 4789 - kubeProxyConfig: - iptablesSyncPeriod: 30s - proxyArguments: - iptables-min-sync-period: - - 0s ----- -<1> Configured only during cluster installation. -endif::operator[] -endif::post-install-network-configuration[] - -ifeval::["{context}" == "cluster-network-operator"] -:!operator: -endif::[] - -ifdef::post-install-network-configuration[] -:!post-install-network-configuration: -endif::[] -ifeval::["{context}" == "installing-ibm-cloud-network-customizations"] -:!ibm-cloud: -endif::[] diff --git a/modules/nw-osp-configuring-external-load-balancer.adoc b/modules/nw-osp-configuring-external-load-balancer.adoc deleted file mode 100644 index ed66a61487a2..000000000000 --- a/modules/nw-osp-configuring-external-load-balancer.adoc +++ /dev/null @@ -1,164 +0,0 @@ -// Module included in the following assemblies: -// TODO -// * networking/TBD -// * networking/load-balancing-openstack.adoc -// * installing/installing_bare_metal_ipi/ipi-install-post-installation-configuration.adoc jowilkin -// * installing/installing-vsphere-installer-provisioned.adoc -// * installing/installing-vsphere-installer-provisioned-customizations.adoc -// * installing/installing-vsphere-installer-provisioned-network-customizations.adoc -// * installing/installing-restricted-networks-installer-provisioned-vsphere.adoc - - -ifeval::["{context}" == "installing-vsphere-installer-provisioned"] -:vsphere: -endif::[] -ifeval::["{context}" == "installing-vsphere-installer-provisioned-customizations"] -:vsphere: -endif::[] -ifeval::["{context}" == "installing-vsphere-installer-provisioned-network-customizations"] -:vsphere: -endif::[] -ifeval::["{context}" == installing-restricted-networks-installer-provisioned-vsphere] -:vsphere: -endif::[] - -:_content-type: PROCEDURE -[id="nw-osp-configuring-external-load-balancer_{context}"] -= Configuring an external load balancer - -You can configure an {product-title} cluster -ifeval::["{context}" == "load-balancing-openstack"] -on {rh-openstack-first} -endif::[] -to use an external load balancer in place of the default load balancer. - -You can also configure an {product-title} cluster to use an external load balancer that supports multiple subnets. If you use multiple subnets, you can explicitly list all the IP addresses in any networks that are used by your load balancer targets. This configuration can reduce maintenance overhead because you can create and destroy nodes within those networks without reconfiguring the load balancer targets. - -If you deploy your ingress pods by using a machine set on a smaller network, such as a `/27` or `/28`, you can simplify your load balancer targets. - -[NOTE] -==== -You do not need to specify API and Ingress static addresses for your installation program. If you choose this configuration, you must take additional actions to define network targets that accept an IP address from each referenced vSphere subnet. -==== - -.Prerequisites - -* On your load balancer, TCP over ports 6443, 443, and 80 must be reachable by all users of your system that are located outside the cluster. - -* Load balance the application ports, 443 and 80, between all the compute nodes. - -* Load balance the API port, 6443, between each of the control plane nodes. - -* On your load balancer, port 22623, which is used to serve ignition startup configurations to nodes, is not exposed outside of the cluster. - -* Your load balancer can access the required ports on each node in your cluster. You can ensure this level of access by completing the following actions: -** The API load balancer can access ports 22623 and 6443 on the control plane nodes. -** The ingress load balancer can access ports 443 and 80 on the nodes where the ingress pods are located. - -ifdef::vsphere[] -* Optional: If you are using multiple networks, you can create targets for every IP address in the network that can host nodes. This configuration can reduce the maintenance overhead of your cluster. -endif::vsphere[] - -[IMPORTANT] -==== -External load balancing services and the control plane nodes must run on the same L2 network, and on the same VLAN when using VLANs to route traffic between the load balancing services and the control plane nodes. -==== - -.Procedure - -. Enable access to the cluster from your load balancer on ports 6443, 443, and 80. -+ -As an example, note this HAProxy configuration: -+ -.A section of a sample HAProxy configuration -[source,text] ----- -... -listen my-cluster-api-6443 - bind 0.0.0.0:6443 - mode tcp - balance roundrobin - server my-cluster-master-2 192.0.2.2:6443 check - server my-cluster-master-0 192.0.2.3:6443 check - server my-cluster-master-1 192.0.2.1:6443 check -listen my-cluster-apps-443 - bind 0.0.0.0:443 - mode tcp - balance roundrobin - server my-cluster-worker-0 192.0.2.6:443 check - server my-cluster-worker-1 192.0.2.5:443 check - server my-cluster-worker-2 192.0.2.4:443 check -listen my-cluster-apps-80 - bind 0.0.0.0:80 - mode tcp - balance roundrobin - server my-cluster-worker-0 192.0.2.7:80 check - server my-cluster-worker-1 192.0.2.9:80 check - server my-cluster-worker-2 192.0.2.8:80 check ----- - -. Add records to your DNS server for the cluster API and apps over the load balancer. For example: -+ -[source,dns] ----- - api.. - apps.. ----- - -. From a command line, use `curl` to verify that the external load balancer and DNS configuration are operational. - -.. Verify that the cluster API is accessible: -+ -[source,terminal] ----- -$ curl https://:6443/version --insecure ----- -+ -If the configuration is correct, you receive a JSON object in response: -+ -[source,json] ----- -{ - "major": "1", - "minor": "11+", - "gitVersion": "v1.11.0+ad103ed", - "gitCommit": "ad103ed", - "gitTreeState": "clean", - "buildDate": "2019-01-09T06:44:10Z", - "goVersion": "go1.10.3", - "compiler": "gc", - "platform": "linux/amd64" -} ----- - -.. Verify that cluster applications are accessible: -+ -[NOTE] -==== -You can also verify application accessibility by opening the {product-title} console in a web browser. -==== -+ -[source, terminal] ----- -$ curl http://console-openshift-console.apps.. -I -L --insecure ----- -+ -If the configuration is correct, you receive an HTTP response: -+ -[source,terminal] ----- -HTTP/1.1 302 Found -content-length: 0 -location: https://console-openshift-console.apps../ -cache-control: no-cacheHTTP/1.1 200 OK -referrer-policy: strict-origin-when-cross-origin -set-cookie: csrf-token=39HoZgztDnzjJkq/JuLJMeoKNXlfiVv2YgZc09c3TBOBU4NI6kDXaJH1LdicNhN1UsQWzon4Dor9GWGfopaTEQ==; Path=/; Secure -x-content-type-options: nosniff -x-dns-prefetch-control: off -x-frame-options: DENY -x-xss-protection: 1; mode=block -date: Tue, 17 Nov 2020 08:42:10 GMT -content-type: text/html; charset=utf-8 -set-cookie: 1e2670d92730b515ce3a1bb65da45062=9b714eb87e93cf34853e87a92d6894be; path=/; HttpOnly; Secure; SameSite=None -cache-control: private ----- diff --git a/modules/nw-osp-enabling-ovs-offload.adoc b/modules/nw-osp-enabling-ovs-offload.adoc deleted file mode 100644 index 16630d619b8f..000000000000 --- a/modules/nw-osp-enabling-ovs-offload.adoc +++ /dev/null @@ -1,121 +0,0 @@ -// Module included in the following assemblies: -// -// * post_installation_configuration/network-configuration.adoc - -:_content-type: PROCEDURE -[id="nw-osp-enabling-ovs-offload_{context}"] -= Enabling OVS hardware offloading - -For clusters that run on {rh-openstack-first}, you can enable link:https://www.openvswitch.org/[Open vSwitch (OVS)] hardware offloading. - -OVS is a multi-layer virtual switch that enables large-scale, multi-server network virtualization. - -.Prerequisites - -* You installed a cluster on {rh-openstack} that is configured for single-root input/output virtualization (SR-IOV). -* You installed the SR-IOV Network Operator on your cluster. -* You created two `hw-offload` type virtual function (VF) interfaces on your cluster. - -.Procedure - -. Create an `SriovNetworkNodePolicy` policy for the two `hw-offload` type VF interfaces that are on your cluster: -+ -.The first virtual function interface -[source,yaml] ----- -apiVersion: sriovnetwork.openshift.io/v1 -kind: SriovNetworkNodePolicy <1> -metadata: - name: "hwoffload9" - namespace: openshift-sriov-network-operator -spec: - deviceType: netdevice - isRdma: true - nicSelector: - pfNames: <2> - - ens6 - nodeSelector: - feature.node.kubernetes.io/network-sriov.capable: 'true' - numVfs: 1 - priority: 99 - resourceName: "hwoffload9" ----- -<1> Insert the `SriovNetworkNodePolicy` value here. -<2> Both interfaces must include physical function (PF) names. -+ -.The second virtual function interface -[source,yaml] ----- -apiVersion: sriovnetwork.openshift.io/v1 -kind: SriovNetworkNodePolicy <1> -metadata: - name: "hwoffload10" - namespace: openshift-sriov-network-operator -spec: - deviceType: netdevice - isRdma: true - nicSelector: - pfNames: <2> - - ens5 - nodeSelector: - feature.node.kubernetes.io/network-sriov.capable: 'true' - numVfs: 1 - priority: 99 - resourceName: "hwoffload10" ----- -<1> Insert the `SriovNetworkNodePolicy` value here. -<2> Both interfaces must include physical function (PF) names. - -. Create `NetworkAttachmentDefinition` resources for the two interfaces: -+ -.A `NetworkAttachmentDefinition` resource for the first interface -[source,yaml] ----- -apiVersion: k8s.cni.cncf.io/v1 -kind: NetworkAttachmentDefinition -metadata: - annotations: - k8s.v1.cni.cncf.io/resourceName: openshift.io/hwoffload9 - name: hwoffload9 - namespace: default -spec: - config: '{ "cniVersion":"0.3.1", "name":"hwoffload9","type":"host-device","device":"ens6" - }' ----- -+ -.A `NetworkAttachmentDefinition` resource for the second interface -[source,yaml] ----- -apiVersion: k8s.cni.cncf.io/v1 -kind: NetworkAttachmentDefinition -metadata: - annotations: - k8s.v1.cni.cncf.io/resourceName: openshift.io/hwoffload10 - name: hwoffload10 - namespace: default -spec: - config: '{ "cniVersion":"0.3.1", "name":"hwoffload10","type":"host-device","device":"ens5" - }' ----- - -. Use the interfaces that you created with a pod. For example: -+ -.A pod that uses the two OVS offload interfaces -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: dpdk-testpmd - namespace: default - annotations: - irq-load-balancing.crio.io: disable - cpu-quota.crio.io: disable - k8s.v1.cni.cncf.io/resourceName: openshift.io/hwoffload9 - k8s.v1.cni.cncf.io/resourceName: openshift.io/hwoffload10 -spec: - restartPolicy: Never - containers: - - name: dpdk-testpmd - image: quay.io/krister/centos8_nfv-container-dpdk-testpmd:latest ----- diff --git a/modules/nw-osp-hardware-offload-attaching-network.adoc b/modules/nw-osp-hardware-offload-attaching-network.adoc deleted file mode 100644 index d70666274acf..000000000000 --- a/modules/nw-osp-hardware-offload-attaching-network.adoc +++ /dev/null @@ -1,44 +0,0 @@ -// Module included in the following assemblies: -// -// * post_installation_configuration/network-configuration.adoc - -:_content-type: PROCEDURE -[id="nw-osp-hardware-offload-attaching-network_{context}"] -= Attaching an OVS hardware offloading network - -You can attach an Open vSwitch (OVS) hardware offloading network to your cluster. - -.Prerequisites - -* Your cluster is installed and running. -* You provisioned an OVS hardware offloading network on {rh-openstack-first} to use with your cluster. - -.Procedure - -. Create a file named `network.yaml` from the following template: -+ -[source,yaml] ----- -spec: - additionalNetworks: - - name: hwoffload1 - namespace: cnf - rawCNIConfig: '{ "cniVersion": "0.3.1", "name": "hwoffload1", "type": "host-device","pciBusId": "0000:00:05.0", "ipam": {}}' <1> - type: Raw ----- -+ -where: -+ -`pciBusId`:: Specifies the device that is connected to the offloading network. If you do not have it, you can find this value by running the following command: -+ -[source,terminal] ----- -$ oc describe SriovNetworkNodeState -n openshift-sriov-network-operator ----- - -. From a command line, enter the following command to patch your cluster with the file: -+ -[source,terminal] ----- -$ oc apply -f network.yaml ----- \ No newline at end of file diff --git a/modules/nw-osp-loadbalancer-etp-local.adoc b/modules/nw-osp-loadbalancer-etp-local.adoc deleted file mode 100644 index a6e09f5fcb9c..000000000000 --- a/modules/nw-osp-loadbalancer-etp-local.adoc +++ /dev/null @@ -1,14 +0,0 @@ -// Module included in the following assemblies: -// * networking/nw-osp-loadbalancer-limitations.adoc - -:_content-type: CONCEPT -[id="nw-osp-loadbalancer-etp-local_{context}"] -= Local external traffic policies - -You can set the external traffic policy (ETP) parameter, `.spec.externalTrafficPolicy`, on a load balancer service to preserve the source IP address of incoming traffic when it reaches service endpoint pods. However, if your cluster uses the Amphora Octavia provider, the source IP of the traffic is replaced with the IP address of the Amphora VM. This behavior does not occur if your cluster uses the OVN Octavia provider. - -Having the `ETP` option set to `Local` requires that health monitors be created for the load balancer. Without health monitors, traffic can be routed to a node that doesn't have a functional endpoint, which causes the connection to drop. To force Cloud Provider OpenStack to create health monitors, you must set the value of the `create-monitor` option in the cloud provider configuration to `true`. - -In {rh-openstack} 16.1 and 16.2, the OVN Octavia provider does not support health monitors. Therefore, setting the ETP to local is unsupported. - -In {rh-openstack} 16.1 and 16.2, the Amphora Octavia provider does not support HTTP monitors on UDP pools. As a result, UDP load balancer services have `UDP-CONNECT` monitors created instead. Due to implementation details, this configuration only functions properly with the OVN-Kubernetes CNI plugin. When the OpenShift SDN CNI plugin is used, the UDP services alive nodes are detected unreliably. \ No newline at end of file diff --git a/modules/nw-osp-loadbalancer-limitations.adoc b/modules/nw-osp-loadbalancer-limitations.adoc deleted file mode 100644 index 463840ba836e..000000000000 --- a/modules/nw-osp-loadbalancer-limitations.adoc +++ /dev/null @@ -1,11 +0,0 @@ -// Module included in the following assemblies: -// * networking/load-balancing-openstack.adoc -// For thinking and reviewing, adding to networking/load-balancing-openstack.adoc - -:_content-type: CONCEPT -[id="nw-osp-loadbalancer-limitations_{context}"] -= Limitations of load balancer services - -{product-title} clusters on {rh-openstack-first} use Octavia to handle load balancer services. As a result of this choice, such clusters have a number of functional limitations. - -{rh-openstack} Octavia has two supported providers: Amphora and OVN. These providers differ in terms of available features as well as implementation details. These distinctions affect load balancer services that are created on your cluster. \ No newline at end of file diff --git a/modules/nw-osp-loadbalancer-source-ranges.adoc b/modules/nw-osp-loadbalancer-source-ranges.adoc deleted file mode 100644 index a82b94c8bb6a..000000000000 --- a/modules/nw-osp-loadbalancer-source-ranges.adoc +++ /dev/null @@ -1,8 +0,0 @@ -// Module included in the following assemblies: -// * networking/nw-osp-loadbalancer-limitations.adoc - -:_content-type: CONCEPT -[id="nw-osp-loadbalancer-source-ranges_{context}"] -= Load balancer source ranges - -Use the `.spec.loadBalancerSourceRanges` property to restrict the traffic that can pass through the load balancer according to source IP. This property is supported for use with the Amphora Octavia provider only. If your cluster uses the OVN Octavia provider, the option is ignored and traffic is unrestricted. \ No newline at end of file diff --git a/modules/nw-osp-pod-adding-connections-ipv6.adoc b/modules/nw-osp-pod-adding-connections-ipv6.adoc deleted file mode 100644 index 8c7914e2e1c4..000000000000 --- a/modules/nw-osp-pod-adding-connections-ipv6.adoc +++ /dev/null @@ -1,60 +0,0 @@ -// Module included in the following assemblies: -// -// * post_installation_configuration/network-configuration.adoc - -:_content-type: PROCEDURE -[id="nw-osp-pod-adding-connections-ipv6_{context}"] -= Adding IPv6 connectivity to pods on {rh-openstack} - -After you enable IPv6 connectivity in pods, add connectivity to them by using a Container Network Interface (CNI) configuration. - -.Procedure - -. To edit the Cluster Network Operator (CNO), enter the following command: -+ -[source,terminal] ----- -$ oc edit networks.operator.openshift.io cluster ----- - -. Specify your CNI configuration under the `spec` field. For example, the following configuration uses a SLAAC address mode with MACVLAN: -+ -[source,yaml] ----- -... -spec: - additionalNetworks: - - name: ipv6 - namespace: ipv6 <1> - rawCNIConfig: '{ "cniVersion": "0.3.1", "name": "ipv6", "type": "macvlan", "master": "ens4"}' <2> - type: Raw ----- -<1> Be sure to create pods in the same namespace. -<2> The interface in the network attachment `"master"` field can differ from `"ens4"` when more networks are configured or when a different kernel driver is used. -+ -[NOTE] -==== -If you are using stateful address mode, include the IP Address Management (IPAM) in the CNI configuration. - -DHCPv6 is not supported by Multus. -==== - -. Save your changes and quit the text editor to commit your changes. - -.Verification - -* On a command line, enter the following command: -+ -[source,terminal] ----- -$ oc get network-attachment-definitions -A ----- -+ -.Example output -[source,terminal] ----- -NAMESPACE NAME AGE -ipv6 ipv6 21h ----- - -You can now create pods that have secondary IPv6 connections. diff --git a/modules/nw-osp-pod-connections-ipv6.adoc b/modules/nw-osp-pod-connections-ipv6.adoc deleted file mode 100644 index fe724dd67aa9..000000000000 --- a/modules/nw-osp-pod-connections-ipv6.adoc +++ /dev/null @@ -1,34 +0,0 @@ -// Module included in the following assemblies: -// -// * post_installation_configuration/network-configuration.adoc - -:_content-type: PROCEDURE -[id="nw-osp-pod-connections-ipv6_{context}"] -= Enabling IPv6 connectivity to pods on {rh-openstack} - -To enable IPv6 connectivity between pods that have additional networks that are on different nodes, disable port security for the IPv6 port of the server. Disabling port security obviates the need to create allowed address pairs for each IPv6 address that is assigned to pods and enables traffic on the security group. - -[IMPORTANT] -==== -Only the following IPv6 additional network configurations are supported: - -* SLAAC and host-device -* SLAAC and MACVLAN -* DHCP stateless and host-device -* DHCP stateless and MACVLAN -==== - -.Procedure - -* On a command line, enter the following command: -+ -[source,terminal] ----- -$ openstack port set --no-security-group --disable-port-security ----- -+ -IMPORTANT: This command removes security groups from the port and disables port security. Traffic restrictions are removed entirely from the port. - -where: - -:: Specifies the IPv6 port of the compute server. \ No newline at end of file diff --git a/modules/nw-osp-pod-creating-ipv6.adoc b/modules/nw-osp-pod-creating-ipv6.adoc deleted file mode 100644 index 2669ccbce67e..000000000000 --- a/modules/nw-osp-pod-creating-ipv6.adoc +++ /dev/null @@ -1,68 +0,0 @@ -// Module included in the following assemblies: -// -// * post_installation_configuration/network-configuration.adoc - -:_content-type: PROCEDURE -[id="nw-osp-pod-creating-ipv6_{context}"] -= Create pods that have IPv6 connectivity on {rh-openstack} - -After you enable IPv6 connectivty for pods and add it to them, create pods that have secondary IPv6 connections. - -.Procedure - -. Define pods that use your IPv6 namespace and the annotation `k8s.v1.cni.cncf.io/networks: `, where ` ----- - -where: - -:: Specifies the file that contains your resource definition. diff --git a/modules/nw-ovn-ipsec-certificates.adoc b/modules/nw-ovn-ipsec-certificates.adoc deleted file mode 100644 index 224b3c9bc0c8..000000000000 --- a/modules/nw-ovn-ipsec-certificates.adoc +++ /dev/null @@ -1,11 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ovn_kubernetes_network_provider/about-ipsec-ovn.adoc - -:_content-type: CONCEPT -[id="nw-ovn-ipsec-certificates_{context}"] -= Security certificate generation and rotation - -The Cluster Network Operator (CNO) generates a self-signed X.509 certificate authority (CA) that is used by IPsec for encryption. Certificate signing requests (CSRs) from each node are automatically fulfilled by the CNO. - -The CA is valid for 10 years. The individual node certificates are valid for 5 years and are automatically rotated after 4 1/2 years elapse. diff --git a/modules/nw-ovn-ipsec-disable.adoc b/modules/nw-ovn-ipsec-disable.adoc deleted file mode 100644 index 916716850484..000000000000 --- a/modules/nw-ovn-ipsec-disable.adoc +++ /dev/null @@ -1,31 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ovn_kubernetes_network_provider/configuring-ipsec-ovn.adoc - -:_content-type: PROCEDURE -[id="nw-ovn-ipsec-disable_{context}"] -= Disabling IPsec encryption - -As a cluster administrator, you can disable IPsec encryption only if you enabled IPsec after cluster installation. - -[NOTE] -==== -If you enabled IPsec when you installed your cluster, you cannot disable IPsec with this procedure. -==== - -.Prerequisites - -* Install the OpenShift CLI (`oc`). -* Log in to the cluster with a user with `cluster-admin` privileges. - -.Procedure - -. To disable IPsec encryption, enter the following command: -+ -[source,terminal] ----- -$ oc patch networks.operator.openshift.io/cluster --type=json \ - -p='[{"op":"remove", "path":"/spec/defaultNetwork/ovnKubernetesConfig/ipsecConfig"}]' ----- - -. Optional: You can increase the size of your cluster MTU by `46` bytes because there is no longer any overhead from the IPsec ESP header in IP packets. diff --git a/modules/nw-ovn-ipsec-enable.adoc b/modules/nw-ovn-ipsec-enable.adoc deleted file mode 100644 index c8a3ce4f92f8..000000000000 --- a/modules/nw-ovn-ipsec-enable.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ovn_kubernetes_network_provider/configuring-ipsec-ovn.adoc - -:_content-type: PROCEDURE -[id="nw-ovn-ipsec-enable_{context}"] -= Enabling IPsec encryption - -As a cluster administrator, you can enable IPsec encryption after cluster installation. - -.Prerequisites - -* Install the OpenShift CLI (`oc`). -* Log in to the cluster with a user with `cluster-admin` privileges. -* You have reduced the size of your cluster MTU by `46` bytes to allow for the overhead of the IPsec ESP header. - -.Procedure - -* To enable IPsec encryption, enter the following command: -+ -[source,terminal] ----- -$ oc patch networks.operator.openshift.io cluster --type=merge \ --p '{"spec":{"defaultNetwork":{"ovnKubernetesConfig":{"ipsecConfig":{ }}}}}' ----- diff --git a/modules/nw-ovn-ipsec-encryption.adoc b/modules/nw-ovn-ipsec-encryption.adoc deleted file mode 100644 index 162622914bdc..000000000000 --- a/modules/nw-ovn-ipsec-encryption.adoc +++ /dev/null @@ -1,11 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ovn_kubernetes_network_provider/about-ipsec-ovn.adoc - -:_content-type: CONCEPT -[id="nw-ovn-ipsec-encryption_{context}"] -= Encryption protocol and IPsec mode - -The encrypt cipher used is `AES-GCM-16-256`. The integrity check value (ICV) is `16` bytes. The key length is `256` bits. - -The IPsec mode used is _Transport mode_, a mode that encrypts end-to-end communication by adding an Encapsulated Security Payload (ESP) header to the IP header of the original packet and encrypts the packet data. {product-title} does not currently use or support IPsec _Tunnel mode_ for pod-to-pod communication. diff --git a/modules/nw-ovn-ipsec-traffic.adoc b/modules/nw-ovn-ipsec-traffic.adoc deleted file mode 100644 index 2711b2260132..000000000000 --- a/modules/nw-ovn-ipsec-traffic.adoc +++ /dev/null @@ -1,49 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ovn_kubernetes_network_provider/about-ipsec-ovn.adoc - -:_content-type: CONCEPT -[id="nw-ovn-ipsec-traffic_{context}"] -= Types of network traffic flows encrypted by IPsec - -With IPsec enabled, only the following network traffic flows between pods are encrypted: - -* Traffic between pods on different nodes on the cluster network -* Traffic from a pod on the host network to a pod on the cluster network - -The following traffic flows are not encrypted: - -* Traffic between pods on the same node on the cluster network -* Traffic between pods on the host network -* Traffic from a pod on the cluster network to a pod on the host network - -The encrypted and unencrypted flows are illustrated in the following diagram: - -image::nw-ipsec-encryption.png[IPsec encrypted and unencrypted traffic flows] - -== Network connectivity requirements when IPsec is enabled - -You must configure the network connectivity between machines to allow {product-title} cluster -components to communicate. Each machine must be able to resolve the hostnames -of all other machines in the cluster. - -.Ports used for all-machine to all-machine communications -[cols="2a,2a,5a",options="header"] -|=== - -|Protocol -|Port -|Description - -.2+|UDP -|`500` -|IPsec IKE packets - -|`4500` -|IPsec NAT-T packets - -|ESP -|N/A -|IPsec Encapsulating Security Payload (ESP) - -|=== diff --git a/modules/nw-ovn-ipsec-verification.adoc b/modules/nw-ovn-ipsec-verification.adoc deleted file mode 100644 index ae43a1a3f762..000000000000 --- a/modules/nw-ovn-ipsec-verification.adoc +++ /dev/null @@ -1,49 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ovn_kubernetes_network_provider/about-ipsec-ovn.adoc - -:_content-type: PROCEDURE -[id="nw-ovn-ipsec-verification_{context}"] -= Verifying that IPsec is enabled - -As a cluster administrator, you can verify that IPsec is enabled. - -.Verification - -. To find the names of the OVN-Kubernetes control plane pods, enter the following command: -+ -[source,terminal] ----- -$ oc get pods -n openshift-ovn-kubernetes | grep ovnkube-master ----- -+ -.Example output -[source,terminal] ----- -ovnkube-master-4496s 1/1 Running 0 6h39m -ovnkube-master-d6cht 1/1 Running 0 6h42m -ovnkube-master-skblc 1/1 Running 0 6h51m -ovnkube-master-vf8rf 1/1 Running 0 6h51m -ovnkube-master-w7hjr 1/1 Running 0 6h51m -ovnkube-master-zsk7x 1/1 Running 0 6h42m ----- - -. Verify that IPsec is enabled on your cluster: -+ -[source,terminal] ----- -$ oc -n openshift-ovn-kubernetes -c nbdb rsh ovnkube-master- \ - ovn-nbctl --no-leader-only get nb_global . ipsec ----- -+ --- -where: - -``:: Specifies the random sequence of letters for a pod from the previous step. --- -+ -.Example output -[source,text] ----- -true ----- diff --git a/modules/nw-ovn-kuberentes-limitations.adoc b/modules/nw-ovn-kuberentes-limitations.adoc deleted file mode 100644 index 9b91b6f4c022..000000000000 --- a/modules/nw-ovn-kuberentes-limitations.adoc +++ /dev/null @@ -1,34 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ovn_kubernetes_network_provider/about-ovn-kubernetes.adoc - -[id="nw-ovn-kubernetes-limitations_{context}"] -= OVN-Kubernetes IPv6 and dual-stack limitations - -The OVN-Kubernetes network plugin has the following limitations: - -// The foll limitation is also recorded in the installation section. -* For clusters configured for dual-stack networking, both IPv4 and IPv6 traffic must use the same network interface as the default gateway. -If this requirement is not met, pods on the host in the `ovnkube-node` daemon set enter the `CrashLoopBackOff` state. -If you display a pod with a command such as `oc get pod -n openshift-ovn-kubernetes -l app=ovnkube-node -o yaml`, the `status` field contains more than one message about the default gateway, as shown in the following output: -+ -[source,terminal] ----- -I1006 16:09:50.985852 60651 helper_linux.go:73] Found default gateway interface br-ex 192.168.127.1 -I1006 16:09:50.985923 60651 helper_linux.go:73] Found default gateway interface ens4 fe80::5054:ff:febe:bcd4 -F1006 16:09:50.985939 60651 ovnkube.go:130] multiple gateway interfaces detected: br-ex ens4 ----- -+ -The only resolution is to reconfigure the host networking so that both IP families use the same network interface for the default gateway. - -* For clusters configured for dual-stack networking, both the IPv4 and IPv6 routing tables must contain the default gateway. -If this requirement is not met, pods on the host in the `ovnkube-node` daemon set enter the `CrashLoopBackOff` state. -If you display a pod with a command such as `oc get pod -n openshift-ovn-kubernetes -l app=ovnkube-node -o yaml`, the `status` field contains more than one message about the default gateway, as shown in the following output: -+ -[source,terminal] ----- -I0512 19:07:17.589083 108432 helper_linux.go:74] Found default gateway interface br-ex 192.168.123.1 -F0512 19:07:17.589141 108432 ovnkube.go:133] failed to get default gateway interface ----- -+ -The only resolution is to reconfigure the host networking so that both IP families contain the default gateway. diff --git a/modules/nw-ovn-kubernetes-alerts-cli.adoc b/modules/nw-ovn-kubernetes-alerts-cli.adoc deleted file mode 100644 index 29b4cd4a1f50..000000000000 --- a/modules/nw-ovn-kubernetes-alerts-cli.adoc +++ /dev/null @@ -1,44 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ovn_kubernetes_network_provider/ovn-kubernetes-troubleshooting-sources.adoc - -:_content-type: PROCEDURE -[id="nw-ovn-kubernetes-alerts-cli_{context}"] -= Viewing OVN-Kubernetes alerts in the CLI - -You can get information about alerts and their governing alerting rules and silences from the command line. - -.Prerequisites - -* Access to the cluster as a user with the `cluster-admin` role. -* The OpenShift CLI (`oc`) installed. -* You have installed `jq`. - -.Procedure - -. View active or firing alerts by running the following commands. - -.. Set the alert manager route environment variable by running the following command: -+ -[source,terminal] ----- -$ ALERT_MANAGER=$(oc get route alertmanager-main -n openshift-monitoring \ --o jsonpath='{@.spec.host}') ----- - -.. Issue a `curl` request to the alert manager route API with the correct authorization details requesting specific fields by running the following command: -+ -[source,terminal] ----- -$ curl -s -k -H "Authorization: Bearer \ -$(oc create token prometheus-k8s -n openshift-monitoring)" \ -https://$ALERT_MANAGER/api/v1/alerts \ -| jq '.data[] | "\(.labels.severity) \(.labels.alertname) \(.labels.pod) \(.labels.container) \(.labels.endpoint) \(.labels.instance)"' ----- - -. View alerting rules by running the following command: -+ -[source,terminal] ----- -$ oc -n openshift-monitoring exec -c prometheus prometheus-k8s-0 -- curl -s 'http://localhost:9090/api/v1/rules' | jq '.data.groups[].rules[] | select(((.name|contains("ovn")) or (.name|contains("OVN")) or (.name|contains("Ovn")) or (.name|contains("North")) or (.name|contains("South"))) and .type=="alerting")' ----- \ No newline at end of file diff --git a/modules/nw-ovn-kubernetes-alerts-console.adoc b/modules/nw-ovn-kubernetes-alerts-console.adoc deleted file mode 100644 index 8103a483643b..000000000000 --- a/modules/nw-ovn-kubernetes-alerts-console.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ovn_kubernetes_network_provider/ovn-kubernetes-troubleshooting-sources.adoc - -:_content-type: PROCEDURE -[id="nw-ovn-kubernetes-alerts-console_{context}"] -= Viewing OVN-Kubernetes alerts in the console - -The Alerting UI provides detailed information about alerts and their governing alerting rules and silences. - -.Prerequisites - -* You have access to the cluster as a developer or as a user with view permissions for the project that you are viewing metrics for. - -.Procedure (UI) - -. In the *Administrator* perspective, select *Observe* -> *Alerting*. The three main pages in the Alerting UI in this perspective are the *Alerts*, *Silences*, and *Alerting Rules* pages. - -. View the rules for OVN-Kubernetes alerts by selecting *Observe* -> *Alerting* -> *Alerting Rules*. \ No newline at end of file diff --git a/modules/nw-ovn-kubernetes-change-log-levels.adoc b/modules/nw-ovn-kubernetes-change-log-levels.adoc deleted file mode 100644 index 15e71013da3f..000000000000 --- a/modules/nw-ovn-kubernetes-change-log-levels.adoc +++ /dev/null @@ -1,101 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ovn_kubernetes_network_provider/ovn-kubernetes-troubleshooting-sources.adoc - -:_content-type: PROCEDURE -[id="nw-ovn-kubernetes-change-log-levels_{context}"] -= Changing the OVN-Kubernetes log levels - -The default log level for OVN-Kubernetes is 2. To debug OVN-Kubernetes set the log level to 5. -Follow this procedure to increase the log level of the OVN-Kubernetes to help you debug an issue. - -.Prerequisites - -* You have access to the cluster with `cluster-admin` privileges. -* You have access to the OpenShift Container Platform web console. - -.Procedure - -. Run the following command to get detailed information for all pods in the OVN-Kubernetes project: -+ -[source,terminal] ----- -$ oc get po -o wide -n openshift-ovn-kubernetes ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES -ovnkube-master-84nc9 6/6 Running 0 50m 10.0.134.156 ip-10-0-134-156.ec2.internal -ovnkube-master-gmlqv 6/6 Running 0 50m 10.0.209.180 ip-10-0-209-180.ec2.internal -ovnkube-master-nhts2 6/6 Running 1 (48m ago) 50m 10.0.147.31 ip-10-0-147-31.ec2.internal -ovnkube-node-2cbh8 5/5 Running 0 43m 10.0.217.114 ip-10-0-217-114.ec2.internal -ovnkube-node-6fvzl 5/5 Running 0 50m 10.0.147.31 ip-10-0-147-31.ec2.internal -ovnkube-node-f4lzz 5/5 Running 0 24m 10.0.146.76 ip-10-0-146-76.ec2.internal -ovnkube-node-jf67d 5/5 Running 0 50m 10.0.209.180 ip-10-0-209-180.ec2.internal -ovnkube-node-np9mf 5/5 Running 0 40m 10.0.165.191 ip-10-0-165-191.ec2.internal -ovnkube-node-qjldg 5/5 Running 0 50m 10.0.134.156 ip-10-0-134-156.ec2.internal ----- - -. Create a `ConfigMap` file similar to the following example and use a filename such as `env-overrides.yaml`: -+ -[source,yaml] -.Example `ConfigMap` file ----- -kind: ConfigMap -apiVersion: v1 -metadata: - name: env-overrides - namespace: openshift-ovn-kubernetes -data: - ip-10-0-217-114.ec2.internal: | <1> - # This sets the log level for the ovn-kubernetes node process: - OVN_KUBE_LOG_LEVEL=5 - # You might also/instead want to enable debug logging for ovn-controller: - OVN_LOG_LEVEL=dbg - ip-10-0-209-180.ec2.internal: | - # This sets the log level for the ovn-kubernetes node process: - OVN_KUBE_LOG_LEVEL=5 - # You might also/instead want to enable debug logging for ovn-controller: - OVN_LOG_LEVEL=dbg - _master: | <2> - # This sets the log level for the ovn-kubernetes master process as well as the ovn-dbchecker: - OVN_KUBE_LOG_LEVEL=5 - # You might also/instead want to enable debug logging for northd, nbdb and sbdb on all masters: - OVN_LOG_LEVEL=dbg ----- -<1> Specify the name of the node you want to set the debug log level on. -<2> Specify `_master` to set the log levels of `ovnkube-master` components. - -. Apply the `ConfigMap` file by using the following command: -+ -[source,terminal] ----- -$ oc create configmap env-overrides.yaml -n openshift-ovn-kubernetes ----- -+ -.Example output -[source,terminal] ----- -configmap/env-overrides.yaml created ----- - -. Restart the `ovnkube` pods to apply the new log level by using the following commands: -+ -[source,terminal] ----- -$ oc delete pod -n openshift-ovn-kubernetes \ ---field-selector spec.nodeName=ip-10-0-217-114.ec2.internal -l app=ovnkube-node ----- -+ -[source,terminal] ----- -$ oc delete pod -n openshift-ovn-kubernetes \ ---field-selector spec.nodeName=ip-10-0-209-180.ec2.internal -l app=ovnkube-node ----- -+ -[source,terminal] ----- -$ oc delete pod -n openshift-ovn-kubernetes -l app=ovnkube-master ----- \ No newline at end of file diff --git a/modules/nw-ovn-kubernetes-examine-nb-database-contents-ref.adoc b/modules/nw-ovn-kubernetes-examine-nb-database-contents-ref.adoc deleted file mode 100644 index 41646644ddaa..000000000000 --- a/modules/nw-ovn-kubernetes-examine-nb-database-contents-ref.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ovn_kubernetes_network_provider/ovn-kubernetes-architecture.adoc - -:_content-type: REFERENCE -[id="nw-ovn-kubernetes-examine-nb-database-contents-ref_{context}"] -= Command line arguments for ovn-nbctl to examine northbound database contents - -The following table describes the command line arguments that can be used with `ovn-nbctl` to examine the contents of the northbound database. - -.Command line arguments to examine northbound database contents -[cols="30%,70%",options="header"] -|=== -|Argument |Description - -|`ovn-nbctl show` -|An overview of the northbound database contents. - -|`ovn-nbctl show ` -|Show the details associated with the specified switch or router. - -|`ovn-nbctl lr-list` -|Show the logical routers. - -|`ovn-nbctl lrp-list ` -|Using the router information from `ovn-nbctl lr-list` to show the router ports. - -|`ovn-nbctl lr-nat-list ` -|Show network address translation details for the specified router. - -|`ovn-nbctl ls-list` -|Show the logical switches - -|`ovn-nbctl lsp-list ` -|Using the switch information from `ovn-nbctl ls-list` to show the switch port. - -|`ovn-nbctl lsp-get-type ` -|Get the type for the logical port. - -|`ovn-nbctl lb-list` -|Show the load balancers. -|=== \ No newline at end of file diff --git a/modules/nw-ovn-kubernetes-examine-sb-database-contents-ref.adoc b/modules/nw-ovn-kubernetes-examine-sb-database-contents-ref.adoc deleted file mode 100644 index 33fdf440df87..000000000000 --- a/modules/nw-ovn-kubernetes-examine-sb-database-contents-ref.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ovn_kubernetes_network_provider/ovn-kubernetes-architecture.adoc - -:_content-type: REFERENCE -[id="nw-ovn-kubernetes-examine-sb-database-contents-ref_{context}"] -= Command line arguments for ovn-sbctl to examine southbound database contents - -The following table describes the command line arguments that can be used with `ovn-sbctl` to examine the contents of the southbound database. - -.Command line arguments to examine southbound database contents -[cols="30%,70%",options="header"] -|=== -|Argument |Description - -|`ovn-sbctl show` -|Overview of the southbound database contents. - -|`ovn-sbctl list Port_Binding ` -|List the contents of southbound database for a the specified port . - -|`ovn-sbctl dump-flows` -|List the logical flows. - -|=== \ No newline at end of file diff --git a/modules/nw-ovn-kubernetes-features.adoc b/modules/nw-ovn-kubernetes-features.adoc deleted file mode 100644 index 8ea45e798775..000000000000 --- a/modules/nw-ovn-kubernetes-features.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ovn_kubernetes_network_provider/about-ovn-kubernetes.adoc - -[id="nw-ovn-kubernetes-purpose_{context}"] -= OVN-Kubernetes purpose - -The OVN-Kubernetes network plugin is an open-source, fully-featured Kubernetes CNI plugin that uses Open Virtual Network (OVN) to manage network traffic flows. OVN is a community developed, vendor-agnostic network virtualization solution. The OVN-Kubernetes network plugin: - -// OVN (Open Virtual Network) is consistent with upstream usage. - -* Uses OVN (Open Virtual Network) to manage network traffic flows. OVN is a community developed, vendor-agnostic network virtualization solution. -* Implements Kubernetes network policy support, including ingress and egress rules. -* Uses the Geneve (Generic Network Virtualization Encapsulation) protocol rather than VXLAN to create an overlay network between nodes. - -The OVN-Kubernetes network plugin provides the following advantages over OpenShift SDN. - -* Full support for IPv6 single-stack and IPv4/IPv6 dual-stack networking on supported platforms -* Support for hybrid clusters with both Linux and Microsoft Windows workloads -* Optional IPsec encryption of intra-cluster communications -* Offload of network data processing from host CPU to compatible network cards and data processing units (DPUs) diff --git a/modules/nw-ovn-kubernetes-install-ovnkube-trace-local.adoc b/modules/nw-ovn-kubernetes-install-ovnkube-trace-local.adoc deleted file mode 100644 index 322df391a71b..000000000000 --- a/modules/nw-ovn-kubernetes-install-ovnkube-trace-local.adoc +++ /dev/null @@ -1,86 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ovn_kubernetes_network_provider/ovn-kubernetes-architecture.adoc - -:_content-type: PROCEDURE -[id="nw-ovn-kubernetes-install-ovnkube-trace-local_{context}"] -= Installing the ovnkube-trace on local host - -The `ovnkube-trace` tool traces packet simulations for arbitrary UDP or TCP traffic between points in an OVN-Kubernetes driven {product-title} cluster. Copy the `ovnkube-trace` binary to your local host making it available to run against the cluster. - -.Prerequisites - -* You installed the OpenShift CLI (`oc`). -* You are logged in to the cluster with a user with `cluster-admin` privileges. - -.Procedure - -. Create a pod variable by using the following command: -+ -[source,terminal] ----- -$ POD=$(oc get pods -n openshift-ovn-kubernetes -l app=ovnkube-master -o name | head -1 | awk -F '/' '{print $NF}') ----- - -. Run the following command on your local host to copy the binary from the `ovnkube-master` pods: -+ -[source,terminal] ----- -$ oc cp -n openshift-ovn-kubernetes $POD:/usr/bin/ovnkube-trace ovnkube-trace ----- - -. Make `ovnkube-trace` executable by running the following command: -+ -[source,terminal] ----- -$ chmod +x ovnkube-trace ----- - -. Display the options available with `ovnkube-trace` by running the following command: -+ -[source,terminal] ----- -$ ./ovnkube-trace -help ----- -+ -.Expected output -+ -[source,terminal] ----- -I0111 15:05:27.973305 204872 ovs.go:90] Maximum command line arguments set to: 191102 -Usage of ./ovnkube-trace: - -dst string - dest: destination pod name - -dst-ip string - destination IP address (meant for tests to external targets) - -dst-namespace string - k8s namespace of dest pod (default "default") - -dst-port string - dst-port: destination port (default "80") - -kubeconfig string - absolute path to the kubeconfig file - -loglevel string - loglevel: klog level (default "0") - -ovn-config-namespace string - namespace used by ovn-config itself - -service string - service: destination service name - -skip-detrace - skip ovn-detrace command - -src string - src: source pod name - -src-namespace string - k8s namespace of source pod (default "default") - -tcp - use tcp transport protocol - -udp - use udp transport protocol ----- -+ -The command-line arguments supported are familiar Kubernetes constructs, such as namespaces, pods, services so you do not need to find the MAC address, the IP address of the destination nodes, or the ICMP type. -+ -The log levels are: - -* 0 (minimal output) -* 2 (more verbose output showing results of trace commands) -* 5 (debug output) diff --git a/modules/nw-ovn-kubernetes-installing-network-tools.adoc b/modules/nw-ovn-kubernetes-installing-network-tools.adoc deleted file mode 100644 index 755207cc1ee3..000000000000 --- a/modules/nw-ovn-kubernetes-installing-network-tools.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ovn_kubernetes_network_provider/ovn-kubernetes-architecture.adoc - -:_content-type: PROCEDURE -[id="nw-ovn-kubernetes-installing-network-tools_{context}"] -= Installing network-tools on local host - -Install `network-tools` on your local host to make a collection of tools available for debugging {product-title} cluster network issues. - -.Procedure - -. Clone the `network-tools` repository onto your workstation with the following command: -+ -[source,terminal] ----- -$ git clone git@github.com:openshift/network-tools.git ----- - -. Change into the directory for the repository you just cloned: -+ -[source,terminal] ----- -$ cd network-tools ----- - -. Optional: List all available commands: -+ -[source,terminal] ----- -$ ./debug-scripts/network-tools -h ----- \ No newline at end of file diff --git a/modules/nw-ovn-kubernetes-list-database-contents.adoc b/modules/nw-ovn-kubernetes-list-database-contents.adoc deleted file mode 100644 index a2f7f08f5c47..000000000000 --- a/modules/nw-ovn-kubernetes-list-database-contents.adoc +++ /dev/null @@ -1,233 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ovn_kubernetes_network_provider/ovn-kubernetes-architecture.adoc - -:_content-type: PROCEDURE -[id="nw-ovn-kubernetes-list-database-contents_{context}"] -= Listing the OVN-Kubernetes northbound database contents - -To understand logic flow rules you need to examine the northbound database and understand what objects are there to see how they are translated into logic flow rules. -The up to date information is present on the OVN Raft leader and this procedure describes how to find the Raft leader and subsequently query it to list the OVN northbound database contents. - -.Prerequisites - -* Access to the cluster as a user with the `cluster-admin` role. -* The OpenShift CLI (`oc`) installed. - -.Procedure - -. Find the OVN Raft leader for the northbound database. -+ -[NOTE] -==== -The Raft leader stores the most up to date information. -==== - -.. List the pods by running the following command: -+ -[source,terminal] ----- -$ oc get po -n openshift-ovn-kubernetes ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -ovnkube-master-7j97q 6/6 Running 2 (148m ago) 149m -ovnkube-master-gt4ms 6/6 Running 1 (140m ago) 147m -ovnkube-master-mk6p6 6/6 Running 0 148m -ovnkube-node-8qvtr 5/5 Running 0 149m -ovnkube-node-fqdc9 5/5 Running 0 149m -ovnkube-node-tlfwv 5/5 Running 0 149m -ovnkube-node-wlwkn 5/5 Running 0 142m ----- - -.. Choose one of the master pods at random and run the following command: -+ -[source,terminal] ----- -$ oc exec -n openshift-ovn-kubernetes ovnkube-master-7j97q \ --- /usr/bin/ovn-appctl -t /var/run/ovn/ovnnb_db.ctl \ ---timeout=3 cluster/status OVN_Northbound ----- -+ -.Example output -[source,terminal] ----- -Defaulted container "northd" out of: northd, nbdb, kube-rbac-proxy, sbdb, ovnkube-master, ovn-dbchecker -1c57 -Name: OVN_Northbound -Cluster ID: c48a (c48aa5c0-a704-4c77-a066-24fe99d9b338) -Server ID: 1c57 (1c57b6fc-2849-49b7-8679-fbf18bafe339) -Address: ssl:10.0.147.219:9643 -Status: cluster member -Role: follower <1> -Term: 5 -Leader: 2b4f <2> -Vote: unknown - -Election timer: 10000 -Log: [2, 3018] -Entries not yet committed: 0 -Entries not yet applied: 0 -Connections: ->0000 ->0000 <-8844 <-2b4f -Disconnections: 0 -Servers: - 1c57 (1c57 at ssl:10.0.147.219:9643) (self) - 8844 (8844 at ssl:10.0.163.212:9643) last msg 8928047 ms ago - 2b4f (2b4f at ssl:10.0.242.240:9643) last msg 620 ms ago <3> ----- -+ -<1> This pod is identified as a follower -<2> The leader is identified as `2b4f` -<3> The `2b4f` is on IP address `10.0.242.240` - -.. Find the `ovnkube-master` pod running on IP Address `10.0.242.240` using the following command: -+ -[source,terminal] ----- -$ oc get po -o wide -n openshift-ovn-kubernetes | grep 10.0.242.240 | grep -v ovnkube-node ----- -+ -.Example output -[source,terminal] ----- -ovnkube-master-gt4ms 6/6 Running 1 (143m ago) 150m 10.0.242.240 ip-10-0-242-240.ec2.internal ----- -+ -The `ovnkube-master-gt4ms` pod runs on IP Address 10.0.242.240. - -. Run the following command to show all the objects in the northbound database: -+ -[source,terminal] ----- -$ oc exec -n openshift-ovn-kubernetes -it ovnkube-master-gt4ms \ --c northd -- ovn-nbctl show ----- -+ -The output is too long to list here. The list includes the NAT rules, logical switches, load balancers and so on. -+ -Run the following command to display the options available with the command `ovn-nbctl`: -+ -[source,terminal] ----- -$ oc exec -n openshift-ovn-kubernetes -it ovnkube-master-mk6p6 \ --c northd ovn-nbctl --help ----- -+ -You can narrow down and focus on specific components by using some of the following commands: - -. Run the following command to show the list of logical routers: -+ -[source,terminal] ----- -$ oc exec -n openshift-ovn-kubernetes -it ovnkube-master-gt4ms \ --c northd -- ovn-nbctl lr-list ----- -+ -.Example output -[source,terminal] ----- -f971f1f3-5112-402f-9d1e-48f1d091ff04 (GR_ip-10-0-145-205.ec2.internal) -69c992d8-a4cf-429e-81a3-5361209ffe44 (GR_ip-10-0-147-219.ec2.internal) -7d164271-af9e-4283-b84a-48f2a44851cd (GR_ip-10-0-163-212.ec2.internal) -111052e3-c395-408b-97b2-8dd0a20a29a5 (GR_ip-10-0-165-9.ec2.internal) -ed50ce33-df5d-48e8-8862-2df6a59169a0 (GR_ip-10-0-209-170.ec2.internal) -f44e2a96-8d1e-4a4d-abae-ed8728ac6851 (GR_ip-10-0-242-240.ec2.internal) -ef3d0057-e557-4b1a-b3c6-fcc3463790b0 (ovn_cluster_router) ----- -+ -[NOTE] -==== -From this output you can see there is router on each node plus an `ovn_cluster_router`. -==== - -. Run the following command to show the list of logical switches: -+ -[source,terminal] ----- -$ oc exec -n openshift-ovn-kubernetes -it ovnkube-master-gt4ms \ --c northd -- ovn-nbctl ls-list ----- -+ -.Example output -[source,terminal] ----- -82808c5c-b3bc-414a-bb59-8fec4b07eb14 (ext_ip-10-0-145-205.ec2.internal) -3d22444f-0272-4c51-afc6-de9e03db3291 (ext_ip-10-0-147-219.ec2.internal) -bf73b9df-59ab-4c58-a456-ce8205b34ac5 (ext_ip-10-0-163-212.ec2.internal) -bee1e8d0-ec87-45eb-b98b-63f9ec213e5e (ext_ip-10-0-165-9.ec2.internal) -812f08f2-6476-4abf-9a78-635f8516f95e (ext_ip-10-0-209-170.ec2.internal) -f65e710b-32f9-482b-8eab-8d96a44799c1 (ext_ip-10-0-242-240.ec2.internal) -84dad700-afb8-4129-86f9-923a1ddeace9 (ip-10-0-145-205.ec2.internal) -1b7b448b-e36c-4ca3-9f38-4a2cf6814bfd (ip-10-0-147-219.ec2.internal) -d92d1f56-2606-4f23-8b6a-4396a78951de (ip-10-0-163-212.ec2.internal) -6864a6b2-de15-4de3-92d8-f95014b6f28f (ip-10-0-165-9.ec2.internal) -c26bf618-4d7e-4afd-804f-1a2cbc96ec6d (ip-10-0-209-170.ec2.internal) -ab9a4526-44ed-4f82-ae1c-e20da04947d9 (ip-10-0-242-240.ec2.internal) -a8588aba-21da-4276-ba0f-9d68e88911f0 (join) ----- -+ -[NOTE] -==== -From this output you can see there is an ext switch for each node plus switches with the node name itself and a join switch. -==== - -. Run the following command to show the list of load balancers: -+ -[source,terminal] ----- -$ oc exec -n openshift-ovn-kubernetes -it ovnkube-master-gt4ms \ --c northd -- ovn-nbctl lb-list ----- -+ -.Example output -[source,terminal] ----- -UUID LB PROTO VIP IPs -f0fb50f9-4968-4b55-908c-616bae4db0a2 Service_default/ tcp 172.30.0.1:443 10.0.147.219:6443,10.0.163.212:6443,169.254.169.2:6443 -0dc42012-4f5b-432e-ae01-2cc4bfe81b00 Service_default/ tcp 172.30.0.1:443 10.0.147.219:6443,169.254.169.2:6443,10.0.242.240:6443 -f7fff5d5-5eff-4a40-98b1-3a4ba8f7f69c Service_default/ tcp 172.30.0.1:443 169.254.169.2:6443,10.0.163.212:6443,10.0.242.240:6443 -12fe57a0-50a4-4a1b-ac10-5f288badee07 Service_default/ tcp 172.30.0.1:443 10.0.147.219:6443,10.0.163.212:6443,10.0.242.240:6443 -3f137fbf-0b78-4875-ba44-fbf89f254cf7 Service_openshif tcp 172.30.23.153:443 10.130.0.14:8443 -174199fe-0562-4141-b410-12094db922a7 Service_openshif tcp 172.30.69.51:50051 10.130.0.84:50051 -5ee2d4bd-c9e2-4d16-a6df-f54cd17c9ac3 Service_openshif tcp 172.30.143.87:9001 10.0.145.205:9001,10.0.147.219:9001,10.0.163.212:9001,10.0.165.9:9001,10.0.209.170:9001,10.0.242.240:9001 -a056ae3d-83f8-45bc-9c80-ef89bce7b162 Service_openshif tcp 172.30.164.74:443 10.0.147.219:6443,10.0.163.212:6443,10.0.242.240:6443 -bac51f3d-9a6f-4f5e-ac02-28fd343a332a Service_openshif tcp 172.30.0.10:53 10.131.0.6:5353 - tcp 172.30.0.10:9154 10.131.0.6:9154 -48105bbc-51d7-4178-b975-417433f9c20a Service_openshif tcp 172.30.26.159:2379 10.0.147.219:2379,169.254.169.2:2379,10.0.242.240:2379 - tcp 172.30.26.159:9979 10.0.147.219:9979,169.254.169.2:9979,10.0.242.240:9979 -7de2b8fc-342a-415f-ac13-1a493f4e39c0 Service_openshif tcp 172.30.53.219:443 10.128.0.7:8443 - tcp 172.30.53.219:9192 10.128.0.7:9192 -2cef36bc-d720-4afb-8d95-9350eff1d27a Service_openshif tcp 172.30.81.66:443 10.128.0.23:8443 -365cb6fb-e15e-45a4-a55b-21868b3cf513 Service_openshif tcp 172.30.96.51:50051 10.130.0.19:50051 -41691cbb-ec55-4cdb-8431-afce679c5e8d Service_openshif tcp 172.30.98.218:9099 169.254.169.2:9099 -82df10ba-8143-400b-977a-8f5f416a4541 Service_openshif tcp 172.30.26.159:2379 10.0.147.219:2379,10.0.163.212:2379,169.254.169.2:2379 - tcp 172.30.26.159:9979 10.0.147.219:9979,10.0.163.212:9979,169.254.169.2:9979 -debe7f3a-39a8-490e-bc0a-ebbfafdffb16 Service_openshif tcp 172.30.23.244:443 10.128.0.48:8443,10.129.0.27:8443,10.130.0.45:8443 -8a749239-02d9-4dc2-8737-716528e0da7b Service_openshif tcp 172.30.124.255:8443 10.128.0.14:8443 -880c7c78-c790-403d-a3cb-9f06592717a3 Service_openshif tcp 172.30.0.10:53 10.130.0.20:5353 - tcp 172.30.0.10:9154 10.130.0.20:9154 -d2f39078-6751-4311-a161-815bbaf7f9c7 Service_openshif tcp 172.30.26.159:2379 169.254.169.2:2379,10.0.163.212:2379,10.0.242.240:2379 - tcp 172.30.26.159:9979 169.254.169.2:9979,10.0.163.212:9979,10.0.242.240:9979 -30948278-602b-455c-934a-28e64c46de12 Service_openshif tcp 172.30.157.35:9443 10.130.0.43:9443 -2cc7e376-7c02-4a82-89e8-dfa1e23fb003 Service_openshif tcp 172.30.159.212:17698 10.128.0.48:17698,10.129.0.27:17698,10.130.0.45:17698 -e7d22d35-61c2-40c2-bc30-265cff8ed18d Service_openshif tcp 172.30.143.87:9001 10.0.145.205:9001,10.0.147.219:9001,10.0.163.212:9001,10.0.165.9:9001,10.0.209.170:9001,169.254.169.2:9001 -75164e75-e0c5-40fb-9636-bfdbf4223a02 Service_openshif tcp 172.30.150.68:1936 10.129.4.8:1936,10.131.0.10:1936 - tcp 172.30.150.68:443 10.129.4.8:443,10.131.0.10:443 - tcp 172.30.150.68:80 10.129.4.8:80,10.131.0.10:80 -7bc4ee74-dccf-47e9-9149-b011f09aff39 Service_openshif tcp 172.30.164.74:443 10.0.147.219:6443,10.0.163.212:6443,169.254.169.2:6443 -0db59e74-1cc6-470c-bf44-57c520e0aa8f Service_openshif tcp 10.0.163.212:31460 - tcp 10.0.163.212:32361 -c300e134-018c-49af-9f84-9deb1d0715f8 Service_openshif tcp 172.30.42.244:50051 10.130.0.47:50051 -5e352773-429b-4881-afb3-a13b7ba8b081 Service_openshif tcp 172.30.244.66:443 10.129.0.8:8443,10.130.0.8:8443 -54b82d32-1939-4465-a87d-f26321442a7a Service_openshif tcp 172.30.12.9:8443 10.128.0.35:8443 ----- -+ -[NOTE] -==== -From this truncated output you can see there are many OVN-Kubernetes load balancers. Load balancers in OVN-Kubernetes are representations of services. -==== - - diff --git a/modules/nw-ovn-kubernetes-list-resources.adoc b/modules/nw-ovn-kubernetes-list-resources.adoc deleted file mode 100644 index 022860dcb204..000000000000 --- a/modules/nw-ovn-kubernetes-list-resources.adoc +++ /dev/null @@ -1,99 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ovn_kubernetes_network_provider/ovn-kubernetes-architecture.adoc - -:_content-type: PROCEDURE -[id="nw-ovn-kubernetes-list-resources_{context}"] -= Listing all resources in the OVN-Kubernetes project - -Finding the resources and containers that run in the OVN-Kubernetes project is important to help you understand the OVN-Kubernetes networking implementation. - -.Prerequisites - -* Access to the cluster as a user with the `cluster-admin` role. -* The OpenShift CLI (`oc`) installed. - -.Procedure - -. Run the following command to get all resources, endpoints, and `ConfigMaps` in the OVN-Kubernetes project: -+ -[source,terminal] ----- -$ oc get all,ep,cm -n openshift-ovn-kubernetes ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -pod/ovnkube-master-9g7zt 6/6 Running 1 (48m ago) 57m -pod/ovnkube-master-lqs4v 6/6 Running 0 57m -pod/ovnkube-master-vxhtq 6/6 Running 0 57m -pod/ovnkube-node-9k9kc 5/5 Running 0 57m -pod/ovnkube-node-jg52r 5/5 Running 0 51m -pod/ovnkube-node-k8wf7 5/5 Running 0 57m -pod/ovnkube-node-tlwk6 5/5 Running 0 47m -pod/ovnkube-node-xsvnk 5/5 Running 0 57m - -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -service/ovn-kubernetes-master ClusterIP None 9102/TCP 57m -service/ovn-kubernetes-node ClusterIP None 9103/TCP,9105/TCP 57m -service/ovnkube-db ClusterIP None 9641/TCP,9642/TCP 57m - -NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE -daemonset.apps/ovnkube-master 3 3 3 3 3 beta.kubernetes.io/os=linux,node-role.kubernetes.io/master= 57m -daemonset.apps/ovnkube-node 5 5 5 5 5 beta.kubernetes.io/os=linux 57m - -NAME ENDPOINTS AGE -endpoints/ovn-kubernetes-master 10.0.132.11:9102,10.0.151.18:9102,10.0.192.45:9102 57m -endpoints/ovn-kubernetes-node 10.0.132.11:9105,10.0.143.72:9105,10.0.151.18:9105 + 7 more... 57m -endpoints/ovnkube-db 10.0.132.11:9642,10.0.151.18:9642,10.0.192.45:9642 + 3 more... 57m - -NAME DATA AGE -configmap/control-plane-status 1 55m -configmap/kube-root-ca.crt 1 57m -configmap/openshift-service-ca.crt 1 57m -configmap/ovn-ca 1 57m -configmap/ovn-kubernetes-master 0 55m -configmap/ovnkube-config 1 57m -configmap/signer-ca 1 57m ----- -+ -There are three `ovnkube-masters` that run on the control plane nodes, and two daemon sets used to deploy the `ovnkube-master` and `ovnkube-node` pods. -There is one `ovnkube-node` pod for each node in the cluster. -In this example, there are 5, and since there is one `ovnkube-node` per node in the cluster, there are five nodes in the cluster. -The `ovnkube-config` `ConfigMap` has the {product-title} OVN-Kubernetes configurations started by online-master and `ovnkube-node`. -The `ovn-kubernetes-master` `ConfigMap` has the information of the current online master leader. - -. List all the containers in the `ovnkube-master` pods by running the following command: -+ -[source,terminal] ----- -$ oc get pods ovnkube-master-9g7zt \ --o jsonpath='{.spec.containers[*].name}' -n openshift-ovn-kubernetes ----- -.Expected output -+ -[source,terminal] ----- -northd nbdb kube-rbac-proxy sbdb ovnkube-master ovn-dbchecker ----- -+ -The `ovnkube-master` pod is made up of several containers. -It is responsible for hosting the northbound database (`nbdb` container), the southbound database (`sbdb` container), watching for cluster events for pods, egressIP, namespaces, services, endpoints, egress firewall, and network policy and writing them to the northbound database (`ovnkube-master` pod), as well as managing pod subnet allocation to nodes. - -. List all the containers in the `ovnkube-node` pods by running the following command: -+ -[source,terminal] ----- -$ oc get pods ovnkube-node-jg52r \ --o jsonpath='{.spec.containers[*].name}' -n openshift-ovn-kubernetes ----- -.Expected output -+ -[source,terminal] ----- -ovn-controller ovn-acl-logging kube-rbac-proxy kube-rbac-proxy-ovn-metrics ovnkube-node ----- -+ -The `ovnkube-node` pod has a container (`ovn-controller`) that resides on each {product-title} node. Each node’s `ovn-controller` connects the OVN northbound to the OVN southbound database to learn about the OVN configuration. The `ovn-controller` connects southbound to `ovs-vswitchd` as an OpenFlow controller, for control over network traffic, and to the local `ovsdb-server` to allow it to monitor and control Open vSwitch configuration. \ No newline at end of file diff --git a/modules/nw-ovn-kubernetes-list-southbound-database-contents.adoc b/modules/nw-ovn-kubernetes-list-southbound-database-contents.adoc deleted file mode 100644 index edb4541048cf..000000000000 --- a/modules/nw-ovn-kubernetes-list-southbound-database-contents.adoc +++ /dev/null @@ -1,150 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ovn_kubernetes_network_provider/ovn-kubernetes-architecture.adoc - -:_content-type: PROCEDURE -[id="nw-ovn-kubernetes-list-southbound-database-contents_{context}"] -= Listing the OVN-Kubernetes southbound database contents - -Logic flow rules are stored in the southbound database that is a representation of your infrastructure. -The up to date information is present on the OVN Raft leader and this procedure describes how to find the Raft leader and query it to list the OVN southbound database contents. - -.Prerequisites - -* Access to the cluster as a user with the `cluster-admin` role. -* The OpenShift CLI (`oc`) installed. - -.Procedure - -. Find the OVN Raft leader for the southbound database. -+ -[NOTE] -==== -The Raft leader stores the most up to date information. -==== - -.. List the pods by running the following command: -+ -[source,terminal] ----- -$ oc get po -n openshift-ovn-kubernetes ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -ovnkube-master-7j97q 6/6 Running 2 (134m ago) 135m -ovnkube-master-gt4ms 6/6 Running 1 (126m ago) 133m -ovnkube-master-mk6p6 6/6 Running 0 134m -ovnkube-node-8qvtr 5/5 Running 0 135m -ovnkube-node-bqztb 5/5 Running 0 117m -ovnkube-node-fqdc9 5/5 Running 0 135m -ovnkube-node-tlfwv 5/5 Running 0 135m -ovnkube-node-wlwkn 5/5 Running 0 128m ----- - -.. Choose one of the master pods at random and run the following command to find the OVN southbound Raft leader: -+ -[source,terminal] ----- -$ oc exec -n openshift-ovn-kubernetes ovnkube-master-7j97q \ --- /usr/bin/ovn-appctl -t /var/run/ovn/ovnsb_db.ctl \ ---timeout=3 cluster/status OVN_Southbound ----- -+ -.Example output -[source,terminal] ----- -Defaulted container "northd" out of: northd, nbdb, kube-rbac-proxy, sbdb, ovnkube-master, ovn-dbchecker -1930 -Name: OVN_Southbound -Cluster ID: f772 (f77273c0-7986-42dd-bd3c-a9f18e25701f) -Server ID: 1930 (1930f4b7-314b-406f-9dcb-b81fe2729ae1) -Address: ssl:10.0.147.219:9644 -Status: cluster member -Role: follower <1> -Term: 3 -Leader: 7081 <2> -Vote: unknown - -Election timer: 16000 -Log: [2, 2423] -Entries not yet committed: 0 -Entries not yet applied: 0 -Connections: ->0000 ->7145 <-7081 <-7145 -Disconnections: 0 -Servers: - 7081 (7081 at ssl:10.0.163.212:9644) last msg 59 ms ago <3> - 1930 (1930 at ssl:10.0.147.219:9644) (self) - 7145 (7145 at ssl:10.0.242.240:9644) last msg 7871735 ms ago ----- -+ -<1> This pod is identified as a follower -<2> The leader is identified as `7081` -<3> The `7081` is on IP address `10.0.163.212` - -.. Find the `ovnkube-master` pod running on IP Address `10.0.163.212` using the following command: -+ -[source,terminal] ----- -$ oc get po -o wide -n openshift-ovn-kubernetes | grep 10.0.163.212 | grep -v ovnkube-node ----- -+ -.Example output -[source,terminal] ----- -ovnkube-master-mk6p6 6/6 Running 0 136m 10.0.163.212 ip-10-0-163-212.ec2.internal ----- -+ -The `ovnkube-master-mk6p6` pod runs on IP Address 10.0.163.212. - -. Run the following command to show all the information stored in the southbound database: -+ -[source,terminal] ----- -$ oc exec -n openshift-ovn-kubernetes -it ovnkube-master-mk6p6 \ --c northd -- ovn-sbctl show ----- -+ -.Example output -+ -[source,terminal] ----- -Chassis "8ca57b28-9834-45f0-99b0-96486c22e1be" - hostname: ip-10-0-156-16.ec2.internal - Encap geneve - ip: "10.0.156.16" - options: {csum="true"} - Port_Binding k8s-ip-10-0-156-16.ec2.internal - Port_Binding etor-GR_ip-10-0-156-16.ec2.internal - Port_Binding jtor-GR_ip-10-0-156-16.ec2.internal - Port_Binding openshift-ingress-canary_ingress-canary-hsblx - Port_Binding rtoj-GR_ip-10-0-156-16.ec2.internal - Port_Binding openshift-monitoring_prometheus-adapter-658fc5967-9l46x - Port_Binding rtoe-GR_ip-10-0-156-16.ec2.internal - Port_Binding openshift-multus_network-metrics-daemon-77nvz - Port_Binding openshift-ingress_router-default-64fd8c67c7-df598 - Port_Binding openshift-dns_dns-default-ttpcq - Port_Binding openshift-monitoring_alertmanager-main-0 - Port_Binding openshift-e2e-loki_loki-promtail-g2pbh - Port_Binding openshift-network-diagnostics_network-check-target-m6tn4 - Port_Binding openshift-monitoring_thanos-querier-75b5cf8dcb-qf8qj - Port_Binding cr-rtos-ip-10-0-156-16.ec2.internal - Port_Binding openshift-image-registry_image-registry-7b7bc44566-mp9b8 ----- -+ -This detailed output shows the chassis and the ports that are attached to the chassis which in this case are all of the router ports and anything that runs like host networking. -Any pods communicate out to the wider network using source network address translation (SNAT). -Their IP address is translated into the IP address of the node that the pod is running on and then sent out into the network. -+ -In addition to the chassis information the southbound database has all the logic flows and those logic flows are then sent to the `ovn-controller` running on each of the nodes. -The `ovn-controller` translates the logic flows into open flow rules and ultimately programs `OpenvSwitch` so that your pods can then follow open flow rules and make it out of the network. -+ -Run the following command to display the options available with the command `ovn-sbctl`: -+ -[source,terminal] ----- -$ oc exec -n openshift-ovn-kubernetes -it ovnkube-master-mk6p6 \ --c northd -- ovn-sbctl --help ----- diff --git a/modules/nw-ovn-kubernetes-logs-cli.adoc b/modules/nw-ovn-kubernetes-logs-cli.adoc deleted file mode 100644 index d1856121d7f9..000000000000 --- a/modules/nw-ovn-kubernetes-logs-cli.adoc +++ /dev/null @@ -1,68 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ovn_kubernetes_network_provider/ovn-kubernetes-troubleshooting-sources.adoc - -:_content-type: PROCEDURE -[id="nw-ovn-kubernetes-logs-cli_{context}"] -= Viewing the OVN-Kubernetes logs using the CLI - -You can view the logs for each of the pods in the `ovnkube-master` and `ovnkube-node` pods using the OpenShift CLI (`oc`). - -.Prerequisites - -* Access to the cluster as a user with the `cluster-admin` role. -* Access to the OpenShift CLI (`oc`). -* You have installed `jq`. - -.Procedure - -. View the log for a specific pod: -+ -[source,terminal] ----- -$ oc logs -f -c -n ----- -+ --- -where: - -`-f`:: Optional: Specifies that the output follows what is being written into the logs. -``:: Specifies the name of the pod. -``:: Optional: Specifies the name of a container. When a pod has more than one container, you must specify the container name. -``:: Specify the namespace the pod is running in. --- -+ -For example: -+ -[source,terminal] ----- -$ oc logs ovnkube-master-7h4q7 -n openshift-ovn-kubernetes ----- -+ -[source,terminal] ----- -$ oc logs -f ovnkube-master-7h4q7 -n openshift-ovn-kubernetes -c ovn-dbchecker ----- -+ -The contents of log files are printed out. - -. Examine the most recent entries in all the containers in the `ovnkube-master` pods: -+ -[source,terminal] ----- -$ for p in $(oc get pods --selector app=ovnkube-master -n openshift-ovn-kubernetes \ --o jsonpath='{range.items[*]}{" "}{.metadata.name}'); \ -do echo === $p ===; for container in $(oc get pods -n openshift-ovn-kubernetes $p \ --o json | jq -r '.status.containerStatuses[] | .name');do echo ---$container---; \ -oc logs -c $container $p -n openshift-ovn-kubernetes --tail=5; done; done ----- - -. View the last 5 lines of every log in every container in an `ovnkube-master` pod using the following command: -+ -[source,terminal] ----- -$ oc logs -l app=ovnkube-master -n openshift-ovn-kubernetes --all-containers --tail 5 ----- - - - diff --git a/modules/nw-ovn-kubernetes-logs-console.adoc b/modules/nw-ovn-kubernetes-logs-console.adoc deleted file mode 100644 index ee59160b01cc..000000000000 --- a/modules/nw-ovn-kubernetes-logs-console.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ovn_kubernetes_network_provider/ovn-kubernetes-troubleshooting-sources.adoc - -:_content-type: PROCEDURE -[id="nw-ovn-kubernetes-logs-console_{context}"] -= Viewing the OVN-Kubernetes logs using the web console - -You can view the logs for each of the pods in the `ovnkube-master` and `ovnkube-node` pods in the web console. - -.Prerequisites - -* Access to the OpenShift CLI (`oc`). - -.Procedure - -. In the {product-title} console, navigate to *Workloads* -> *Pods* or navigate to the pod through the resource you want to investigate. - -. Select the `openshift-ovn-kubernetes` project from the drop-down menu. - -. Click the name of the pod you want to investigate. - -. Click *Logs*. By default for the `ovnkube-master` the logs associated with the `northd` container are displayed. - -. Use the down-down menu to select logs for each container in turn. \ No newline at end of file diff --git a/modules/nw-ovn-kubernetes-matrix.adoc b/modules/nw-ovn-kubernetes-matrix.adoc deleted file mode 100644 index 3008cc157118..000000000000 --- a/modules/nw-ovn-kubernetes-matrix.adoc +++ /dev/null @@ -1,68 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ovn_kubernetes_network_provider/about-ovn-kubernetes.adoc - -:_content-type: REFERENCE -[id="nw-ovn-kubernetes-matrix_{context}"] -= Supported network plugin feature matrix - -{openshift-networking} offers two options for the network plugin, OpenShift SDN and OVN-Kubernetes, for the network plugin. The following table summarizes the current feature support for both network plugins: - -.Default CNI network plugin feature comparison -[cols="50%,25%,25%",options="header"] -|=== -ifeval::["{context}" == "about-ovn-kubernetes"] -|Feature|OVN-Kubernetes|OpenShift SDN - -|Egress IPs|Supported|Supported - -|Egress firewall ^[1]^|Supported|Supported - -|Egress router|Supported ^[2]^|Supported - -|Hybrid networking|Supported|Not supported - -|IPsec encryption for intra-cluster communication|Supported|Not supported - -|IPv6|Supported ^[3]^|Not supported - -|Kubernetes network policy|Supported|Supported - -|Kubernetes network policy logs|Supported|Not supported - -|Hardware offloading|Supported|Not supported - -|Multicast|Supported|Supported -endif::[] -ifeval::["{context}" == "about-openshift-sdn"] -|Feature|OpenShift SDN|OVN-Kubernetes - -|Egress IPs|Supported|Supported - -|Egress firewall ^[1]^|Supported|Supported - -|Egress router|Supported|Supported ^[2]^ - -|Hybrid networking|Not supported|Supported - -|IPsec encryption|Not supported|Supported - -|IPv6|Not supported|Supported ^[3]^ - -|Kubernetes network policy|Supported|Supported - -|Kubernetes network policy logs|Not supported|Supported - -|Multicast|Supported|Supported - -|Hardware offloading|Not supported|Supported -endif::[] -|=== -[.small] --- -1. Egress firewall is also known as egress network policy in OpenShift SDN. This is not the same as network policy egress. - -2. Egress router for OVN-Kubernetes supports only redirect mode. - -3. IPv6 is supported only on bare metal, IBM Power, and {ibmzProductName} clusters. --- diff --git a/modules/nw-ovn-kubernetes-metrics.adoc b/modules/nw-ovn-kubernetes-metrics.adoc deleted file mode 100644 index 50555328efb3..000000000000 --- a/modules/nw-ovn-kubernetes-metrics.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ovn_kubernetes_network_provider/about-ovn-kubernetes.adoc - -[id="nw-ovn-kubernetes-metrics_{context}"] -= Exposed metrics for OVN-Kubernetes - -The OVN-Kubernetes network plugin exposes certain metrics for use by the Prometheus-based {product-title} cluster monitoring stack. - -// openshift/ovn-kubernetes => go-controller/pkg/metrics/master.go - -.Metrics exposed by OVN-Kubernetes -[cols="2a,8a",options="header"] -|=== -|Name |Description - -|`ovnkube_master_pod_creation_latency_seconds` -|The latency between when a pod is created and when the pod is annotated by OVN-Kubernetes. The higher the latency, the more time that elapses before a pod is available for network connectivity. - -|=== - -//// -|`ovnkube_master_nb_e2e_timestamp` -|A timestamp persisted to the OVN (Open Virtual Network) northbound database and updated frequently. -//// diff --git a/modules/nw-ovn-kubernetes-migration-about.adoc b/modules/nw-ovn-kubernetes-migration-about.adoc deleted file mode 100644 index 86b8506759c1..000000000000 --- a/modules/nw-ovn-kubernetes-migration-about.adoc +++ /dev/null @@ -1,145 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ovn_kubernetes_network_provider/migrate-from-openshift-sdn.adoc - -[id="nw-ovn-kubernetes-migration-about_{context}"] -= Migration to the OVN-Kubernetes network plugin - -Migrating to the OVN-Kubernetes network plugin is a manual process that includes some downtime during which your cluster is unreachable. Although a rollback procedure is provided, the migration is intended to be a one-way process. - -A migration to the OVN-Kubernetes network plugin is supported on the following platforms: - -* Bare metal hardware -* Amazon Web Services (AWS) -* Google Cloud Platform (GCP) -* IBM Cloud -* Microsoft Azure -* {rh-openstack-first} -* {rh-virtualization-first} -* VMware vSphere - -[IMPORTANT] -==== -Migrating to or from the OVN-Kubernetes network plugin is not supported for managed OpenShift cloud services such as {product-dedicated}, Azure Red Hat OpenShift(ARO), and Red Hat OpenShift Service on AWS (ROSA). -==== - -[id="considerations-migrating-ovn-kubernetes-network-provider_{context}"] -== Considerations for migrating to the OVN-Kubernetes network plugin - -If you have more than 150 nodes in your {product-title} cluster, then open a support case for consultation on your migration to the OVN-Kubernetes network plugin. - -The subnets assigned to nodes and the IP addresses assigned to individual pods are not preserved during the migration. - -While the OVN-Kubernetes network plugin implements many of the capabilities present in the OpenShift SDN network plugin, the configuration is not the same. - -* If your cluster uses any of the following OpenShift SDN network plugin capabilities, you must manually configure the same capability in the OVN-Kubernetes network plugin: -+ --- -* Namespace isolation -* Egress router pods --- - -* If your cluster or surrounding network uses any part of the `100.64.0.0/16` address range, you must choose another unused IP range by specifying the `v4InternalSubnet` spec under the `spec.defaultNetwork.ovnKubernetesConfig` object definition. OVN-Kubernetes uses the IP range `100.64.0.0/16` internally by default. - -The following sections highlight the differences in configuration between the aforementioned capabilities in OVN-Kubernetes and OpenShift SDN network plugins. - -[discrete] -[id="namespace-isolation_{context}"] -=== Namespace isolation - -OVN-Kubernetes supports only the network policy isolation mode. - -[IMPORTANT] -==== -If your cluster uses OpenShift SDN configured in either the multitenant or subnet isolation modes, you cannot migrate to the OVN-Kubernetes network plugin. -==== - -[discrete] -[id="egress-ip-addresses_{context}"] -=== Egress IP addresses - -OpenShift SDN supports two different Egress IP modes: - -* In the _automatically assigned_ approach, an egress IP address range is assigned to a node. -* In the _manually assigned_ approach, a list of one or more egress IP addresses is assigned to a node. - -The migration process supports migrating Egress IP configurations that use the automatically assigned mode. - -The differences in configuring an egress IP address between OVN-Kubernetes and OpenShift SDN is described in the following table: - -.Differences in egress IP address configuration -[cols="1a,1a",options="header"] -|=== -|OVN-Kubernetes|OpenShift SDN - -| -* Create an `EgressIPs` object -* Add an annotation on a `Node` object - -| -* Patch a `NetNamespace` object -* Patch a `HostSubnet` object -|=== - -For more information on using egress IP addresses in OVN-Kubernetes, see "Configuring an egress IP address". - -[discrete] -[id="egress-network-policies_{context}"] -=== Egress network policies - -The difference in configuring an egress network policy, also known as an egress firewall, between OVN-Kubernetes and OpenShift SDN is described in the following table: - -.Differences in egress network policy configuration -[cols="1a,1a",options="header"] -|=== -|OVN-Kubernetes|OpenShift SDN - -| -* Create an `EgressFirewall` object in a namespace - -| -* Create an `EgressNetworkPolicy` object in a namespace -|=== - -[NOTE] -==== -Because the name of an `EgressFirewall` object can only be set to `default`, after the migration all migrated `EgressNetworkPolicy` objects are named `default`, regardless of what the name was under OpenShift SDN. - -If you subsequently rollback to OpenShift SDN, all `EgressNetworkPolicy` objects are named `default` as the prior name is lost. - -For more information on using an egress firewall in OVN-Kubernetes, see "Configuring an egress firewall for a project". -==== - -[discrete] -[id="egress-router-pods_{context}"] -=== Egress router pods - -OVN-Kubernetes supports egress router pods in redirect mode. OVN-Kubernetes does not support egress router pods in HTTP proxy mode or DNS proxy mode. - -When you deploy an egress router with the Cluster Network Operator, you cannot specify a node selector to control which node is used to host the egress router pod. - -[discrete] -[id="multicast_{context}"] -=== Multicast - -The difference between enabling multicast traffic on OVN-Kubernetes and OpenShift SDN is described in the following table: - -.Differences in multicast configuration -[cols="1a,1a",options="header"] -|=== -|OVN-Kubernetes|OpenShift SDN - -| -* Add an annotation on a `Namespace` object - -| -* Add an annotation on a `NetNamespace` object -|=== - -For more information on using multicast in OVN-Kubernetes, see "Enabling multicast for a project". - -[discrete] -[id="network-policies_{context}"] -=== Network policies - -OVN-Kubernetes fully supports the Kubernetes `NetworkPolicy` API in the `networking.k8s.io/v1` API group. No changes are necessary in your network policies when migrating from OpenShift SDN. diff --git a/modules/nw-ovn-kubernetes-migration.adoc b/modules/nw-ovn-kubernetes-migration.adoc deleted file mode 100644 index 7e843bd4d5ae..000000000000 --- a/modules/nw-ovn-kubernetes-migration.adoc +++ /dev/null @@ -1,370 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ovn_kubernetes_network_provider/migrate-from-openshift-sdn.adoc -// * networking/openshift_sdn/rollback-to-ovn-kubernetes.adoc - -:_content-type: PROCEDURE -[id="nw-ovn-kubernetes-migration_{context}"] -= Migrating to the OVN-Kubernetes network plugin - -As a cluster administrator, you can change the network plugin for your cluster to OVN-Kubernetes. -During the migration, you must reboot every node in your cluster. - -[IMPORTANT] -==== -While performing the migration, your cluster is unavailable and workloads might be interrupted. -Perform the migration only when an interruption in service is acceptable. -==== - -.Prerequisites - -* A cluster configured with the OpenShift SDN CNI network plugin in the network policy isolation mode. -* Install the OpenShift CLI (`oc`). -* Access to the cluster as a user with the `cluster-admin` role. -* A recent backup of the etcd database is available. -* A reboot can be triggered manually for each node. -* The cluster is in a known good state, without any errors. - -.Procedure - -. To backup the configuration for the cluster network, enter the following command: -+ -[source,terminal] ----- -$ oc get Network.config.openshift.io cluster -o yaml > cluster-openshift-sdn.yaml ----- - -. To prepare all the nodes for the migration, set the `migration` field on the Cluster Network Operator configuration object by entering the following command: -+ -[source,terminal] ----- -$ oc patch Network.operator.openshift.io cluster --type='merge' \ - --patch '{ "spec": { "migration": { "networkType": "OVNKubernetes" } } }' ----- -+ -[NOTE] -==== -This step does not deploy OVN-Kubernetes immediately. Instead, specifying the `migration` field triggers the Machine Config Operator (MCO) to apply new machine configs to all the nodes in the cluster in preparation for the OVN-Kubernetes deployment. -==== - -. Optional: You can disable automatic migration of several OpenShift SDN capabilities to the OVN-Kubernetes equivalents: -+ --- -* Egress IPs -* Egress firewall -* Multicast --- -+ -To disable automatic migration of the configuration for any of the previously noted OpenShift SDN features, specify the following keys: -+ -[source,terminal] ----- -$ oc patch Network.operator.openshift.io cluster --type='merge' \ - --patch '{ - "spec": { - "migration": { - "networkType": "OVNKubernetes", - "features": { - "egressIP": , - "egressFirewall": , - "multicast": - } - } - } - }' ----- -+ -where: -+ --- -`bool`: Specifies whether to enable migration of the feature. The default is `true`. --- - -. Optional: You can customize the following settings for OVN-Kubernetes to meet your network infrastructure requirements: -+ --- -* Maximum transmission unit (MTU) -* Geneve (Generic Network Virtualization Encapsulation) overlay network port -* OVN-Kubernetes IPv4 internal subnet -* OVN-Kubernetes IPv6 internal subnet --- -+ -To customize either of the previously noted settings, enter and customize the following command. If you do not need to change the default value, omit the key from the patch. -+ -[source,terminal] ----- -$ oc patch Network.operator.openshift.io cluster --type=merge \ - --patch '{ - "spec":{ - "defaultNetwork":{ - "ovnKubernetesConfig":{ - "mtu":, - "genevePort":, - "v4InternalSubnet":"", - "v6InternalSubnet":"" - }}}}' ----- -+ -where: -+ --- -`mtu`:: -The MTU for the Geneve overlay network. This value is normally configured automatically, but if the nodes in your cluster do not all use the same MTU, then you must set this explicitly to `100` less than the smallest node MTU value. -`port`:: -The UDP port for the Geneve overlay network. If a value is not specified, the default is `6081`. The port cannot be the same as the VXLAN port that is used by OpenShift SDN. The default value for the VXLAN port is `4789`. -`ipv4_subnet`:: -An IPv4 address range for internal use by OVN-Kubernetes. You must ensure that the IP address range does not overlap with any other subnet used by your {product-title} installation. The IP address range must be larger than the maximum number of nodes that can be added to the cluster. The default value is `100.64.0.0/16`. -`ipv6_subnet`:: -An IPv6 address range for internal use by OVN-Kubernetes. You must ensure that the IP address range does not overlap with any other subnet used by your {product-title} installation. The IP address range must be larger than the maximum number of nodes that can be added to the cluster. The default value is `fd98::/48`. --- -+ -.Example patch command to update `mtu` field -[source,terminal] ----- -$ oc patch Network.operator.openshift.io cluster --type=merge \ - --patch '{ - "spec":{ - "defaultNetwork":{ - "ovnKubernetesConfig":{ - "mtu":1200 - }}}}' ----- - -. As the MCO updates machines in each machine config pool, it reboots each node one by one. You must wait until all the nodes are updated. Check the machine config pool status by entering the following command: -+ -[source,terminal] ----- -$ oc get mcp ----- -+ -A successfully updated node has the following status: `UPDATED=true`, `UPDATING=false`, `DEGRADED=false`. -+ -[NOTE] -==== -By default, the MCO updates one machine per pool at a time, causing the total time the migration takes to increase with the size of the cluster. -==== - -. Confirm the status of the new machine configuration on the hosts: - -.. To list the machine configuration state and the name of the applied machine configuration, enter the following command: -+ -[source,terminal] ----- -$ oc describe node | egrep "hostname|machineconfig" ----- -+ -.Example output -[source,terminal] ----- -kubernetes.io/hostname=master-0 -machineconfiguration.openshift.io/currentConfig: rendered-master-c53e221d9d24e1c8bb6ee89dd3d8ad7b -machineconfiguration.openshift.io/desiredConfig: rendered-master-c53e221d9d24e1c8bb6ee89dd3d8ad7b -machineconfiguration.openshift.io/reason: -machineconfiguration.openshift.io/state: Done ----- -+ -Verify that the following statements are true: -+ --- - * The value of `machineconfiguration.openshift.io/state` field is `Done`. - * The value of the `machineconfiguration.openshift.io/currentConfig` field is equal to the value of the `machineconfiguration.openshift.io/desiredConfig` field. --- - -.. To confirm that the machine config is correct, enter the following command: -+ -[source,terminal] ----- -$ oc get machineconfig -o yaml | grep ExecStart ----- -+ -where `` is the name of the machine config from the `machineconfiguration.openshift.io/currentConfig` field. -+ -The machine config must include the following update to the systemd configuration: -+ -[source,plain] ----- -ExecStart=/usr/local/bin/configure-ovs.sh OVNKubernetes ----- - -.. If a node is stuck in the `NotReady` state, investigate the machine config daemon pod logs and resolve any errors. - -... To list the pods, enter the following command: -+ -[source,terminal] ----- -$ oc get pod -n openshift-machine-config-operator ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -machine-config-controller-75f756f89d-sjp8b 1/1 Running 0 37m -machine-config-daemon-5cf4b 2/2 Running 0 43h -machine-config-daemon-7wzcd 2/2 Running 0 43h -machine-config-daemon-fc946 2/2 Running 0 43h -machine-config-daemon-g2v28 2/2 Running 0 43h -machine-config-daemon-gcl4f 2/2 Running 0 43h -machine-config-daemon-l5tnv 2/2 Running 0 43h -machine-config-operator-79d9c55d5-hth92 1/1 Running 0 37m -machine-config-server-bsc8h 1/1 Running 0 43h -machine-config-server-hklrm 1/1 Running 0 43h -machine-config-server-k9rtx 1/1 Running 0 43h ----- -+ -The names for the config daemon pods are in the following format: `machine-config-daemon-`. The `` value is a random five character alphanumeric sequence. - -... Display the pod log for the first machine config daemon pod shown in the previous output by enter the following command: -+ -[source,terminal] ----- -$ oc logs -n openshift-machine-config-operator ----- -+ -where `pod` is the name of a machine config daemon pod. - -... Resolve any errors in the logs shown by the output from the previous command. - -. To start the migration, configure the OVN-Kubernetes network plugin by using one of the following commands: - -** To specify the network provider without changing the cluster network IP address block, enter the following command: -+ -[source,terminal] ----- -$ oc patch Network.config.openshift.io cluster \ - --type='merge' --patch '{ "spec": { "networkType": "OVNKubernetes" } }' ----- - -** To specify a different cluster network IP address block, enter the following command: -+ -[source,terminal] ----- -$ oc patch Network.config.openshift.io cluster \ - --type='merge' --patch '{ - "spec": { - "clusterNetwork": [ - { - "cidr": "", - "hostPrefix": - } - ], - "networkType": "OVNKubernetes" - } - }' ----- -+ -where `cidr` is a CIDR block and `prefix` is the slice of the CIDR block apportioned to each node in your cluster. You cannot use any CIDR block that overlaps with the `100.64.0.0/16` CIDR block because the OVN-Kubernetes network provider uses this block internally. -+ -[IMPORTANT] -==== -You cannot change the service network address block during the migration. -==== - -. Verify that the Multus daemon set rollout is complete before continuing with subsequent steps: -+ -[source,terminal] ----- -$ oc -n openshift-multus rollout status daemonset/multus ----- -+ -The name of the Multus pods is in the form of `multus-` where `` is a random sequence of letters. It might take several moments for the pods to restart. -+ -.Example output -[source,text] ----- -Waiting for daemon set "multus" rollout to finish: 1 out of 6 new pods have been updated... -... -Waiting for daemon set "multus" rollout to finish: 5 of 6 updated pods are available... -daemon set "multus" successfully rolled out ----- - -. To complete changing the network plugin, reboot each node in your cluster. You can reboot the nodes in your cluster with either of the following approaches: - -** With the `oc rsh` command, you can use a bash script similar to the following: -+ -[source,bash] ----- -#!/bin/bash -readarray -t POD_NODES <<< "$(oc get pod -n openshift-machine-config-operator -o wide| grep daemon|awk '{print $1" "$7}')" - -for i in "${POD_NODES[@]}" -do - read -r POD NODE <<< "$i" - until oc rsh -n openshift-machine-config-operator "$POD" chroot /rootfs shutdown -r +1 - do - echo "cannot reboot node $NODE, retry" && sleep 3 - done -done ----- - -** With the `ssh` command, you can use a bash script similar to the following. The script assumes that you have configured sudo to not prompt for a password. -+ -[source,bash] ----- -#!/bin/bash - -for ip in $(oc get nodes -o jsonpath='{.items[*].status.addresses[?(@.type=="InternalIP")].address}') -do - echo "reboot node $ip" - ssh -o StrictHostKeyChecking=no core@$ip sudo shutdown -r -t 3 -done ----- - -. Confirm that the migration succeeded: - -.. To confirm that the network plugin is OVN-Kubernetes, enter the following command. The value of `status.networkType` must be `OVNKubernetes`. -+ -[source,terminal] ----- -$ oc get network.config/cluster -o jsonpath='{.status.networkType}{"\n"}' ----- - -.. To confirm that the cluster nodes are in the `Ready` state, enter the following command: -+ -[source,terminal] ----- -$ oc get nodes ----- - -.. To confirm that your pods are not in an error state, enter the following command: -+ -[source,terminal] ----- -$ oc get pods --all-namespaces -o wide --sort-by='{.spec.nodeName}' ----- -+ -If pods on a node are in an error state, reboot that node. - -.. To confirm that all of the cluster Operators are not in an abnormal state, enter the following command: -+ -[source,terminal] ----- -$ oc get co ----- -+ -The status of every cluster Operator must be the following: `AVAILABLE="True"`, `PROGRESSING="False"`, `DEGRADED="False"`. If a cluster Operator is not available or degraded, check the logs for the cluster Operator for more information. - -. Complete the following steps only if the migration succeeds and your cluster is in a good state: - -.. To remove the migration configuration from the CNO configuration object, enter the following command: -+ -[source,terminal] ----- -$ oc patch Network.operator.openshift.io cluster --type='merge' \ - --patch '{ "spec": { "migration": null } }' ----- - -.. To remove custom configuration for the OpenShift SDN network provider, enter the following command: -+ -[source,terminal] ----- -$ oc patch Network.operator.openshift.io cluster --type='merge' \ - --patch '{ "spec": { "defaultNetwork": { "openshiftSDNConfig": null } } }' ----- - -.. To remove the OpenShift SDN network provider namespace, enter the following command: -+ -[source,terminal] ----- -$ oc delete namespace openshift-sdn ----- diff --git a/modules/nw-ovn-kubernetes-pod-connectivity-checks.adoc b/modules/nw-ovn-kubernetes-pod-connectivity-checks.adoc deleted file mode 100644 index 4ca2c16a5dcb..000000000000 --- a/modules/nw-ovn-kubernetes-pod-connectivity-checks.adoc +++ /dev/null @@ -1,72 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ovn_kubernetes_network_provider/ovn-kubernetes-troubleshooting-sources.adoc - -:_content-type: PROCEDURE -[id="nw-ovn-kubernetes-pod-connectivity-checks_{context}"] -= Checking the OVN-Kubernetes pod network connectivity - -The connectivity check controller, in {product-title} 4.10 and later, orchestrates connection verification checks in your cluster. These include Kubernetes API, OpenShift API and individual nodes. The results for the connection tests are stored in `PodNetworkConnectivity` objects in the `openshift-network-diagnostics` namespace. Connection tests are performed every minute in parallel. - -.Prerequisites - -* Access to the OpenShift CLI (`oc`). -* Access to the cluster as a user with the `cluster-admin` role. -* You have installed `jq`. - -.Procedure - -. To list the current `PodNetworkConnectivityCheck` objects, enter the following command: -+ -[source,terminal] ----- -$ oc get podnetworkconnectivitychecks -n openshift-network-diagnostics ----- - -. View the most recent success for each connection object by using the following command: -+ -[source,terminal] ----- -$ oc get podnetworkconnectivitychecks -n openshift-network-diagnostics \ --o json | jq '.items[]| .spec.targetEndpoint,.status.successes[0]' ----- - -. View the most recent failures for each connection object by using the following command: -+ -[source,terminal] ----- -$ oc get podnetworkconnectivitychecks -n openshift-network-diagnostics \ --o json | jq '.items[]| .spec.targetEndpoint,.status.failures[0]' ----- - -. View the most recent outages for each connection object by using the following command: -+ -[source,terminal] ----- -$ oc get podnetworkconnectivitychecks -n openshift-network-diagnostics \ --o json | jq '.items[]| .spec.targetEndpoint,.status.outages[0]' ----- -+ -The connectivity check controller also logs metrics from these checks into Prometheus. - -. View all the metrics by running the following command: -+ -[source,terminal] ----- -$ oc exec prometheus-k8s-0 -n openshift-monitoring -- \ -promtool query instant http://localhost:9090 \ -'{component="openshift-network-diagnostics"}' ----- - -. View the latency between the source pod and the openshift api service for the last 5 minutes: -+ -[source,terminal] ----- -$ oc exec prometheus-k8s-0 -n openshift-monitoring -- \ -promtool query instant http://localhost:9090 \ -'{component="openshift-network-diagnostics"}' ----- - - - - diff --git a/modules/nw-ovn-kubernetes-readiness-probes.adoc b/modules/nw-ovn-kubernetes-readiness-probes.adoc deleted file mode 100644 index ad9945939059..000000000000 --- a/modules/nw-ovn-kubernetes-readiness-probes.adoc +++ /dev/null @@ -1,73 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ovn_kubernetes_network_provider/ovn-kubernetes-troubleshooting-sources.adoc - -:_content-type: PROCEDURE -[id="nw-ovn-kubernetes-readiness-probes_{context}"] -= Monitoring OVN-Kubernetes health by using readiness probes - -The `ovnkube-master` and `ovnkube-node` pods have containers configured with readiness probes. - -.Prerequisites - -* Access to the OpenShift CLI (`oc`). -* You have access to the cluster with `cluster-admin` privileges. -* You have installed `jq`. - -.Procedure - -. Review the details of the `ovnkube-master` readiness probe by running the following command: -+ -[source,terminal] ----- -$ oc get pods -n openshift-ovn-kubernetes -l app=ovnkube-master \ --o json | jq '.items[0].spec.containers[] | .name,.readinessProbe' ----- -+ -The readiness probe for the northbound and southbound database containers in the `ovnkube-master` pod checks for the health of the Raft cluster hosting the databases. - -. Review the details of the `ovnkube-node` readiness probe by running the following command: -+ -[source,terminal] ----- -$ oc get pods -n openshift-ovn-kubernetes -l app=ovnkube-master \ --o json | jq '.items[0].spec.containers[] | .name,.readinessProbe' ----- -+ -The `ovnkube-node` container in the `ovnkube-node` pod has a readiness probe to verify the presence of the ovn-kubernetes CNI configuration file, the absence of which would indicate that the pod is not running or is not ready to accept requests to configure pods. - -. Show all events including the probe failures, for the namespace by using the following command: -+ -[source,terminal] ----- -$ oc get events -n openshift-ovn-kubernetes ----- - -. Show the events for just this pod: -+ -[source,terminal] ----- -$ oc describe pod ovnkube-master-tp2z8 -n openshift-ovn-kubernetes ----- - -. Show the messages and statuses from the cluster network operator: -+ -[source,terminal] ----- -$ oc get co/network -o json | jq '.status.conditions[]' ----- - -. Show the `ready` status of each container in `ovnkube-master` pods by running the following script: -+ -[source,terminal] ----- -$ for p in $(oc get pods --selector app=ovnkube-master -n openshift-ovn-kubernetes \ --o jsonpath='{range.items[*]}{" "}{.metadata.name}'); do echo === $p ===; \ -oc get pods -n openshift-ovn-kubernetes $p -o json | jq '.status.containerStatuses[] | .name, .ready'; \ -done ----- -+ -[NOTE] -==== -The expectation is all container statuses are reporting as `true`. Failure of a readiness probe sets the status to `false`. -==== \ No newline at end of file diff --git a/modules/nw-ovn-kubernetes-resources-con.adoc b/modules/nw-ovn-kubernetes-resources-con.adoc deleted file mode 100644 index 3de27a8dc9cf..000000000000 --- a/modules/nw-ovn-kubernetes-resources-con.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ovn_kubernetes_network_provider/ovn-kubernetes-architecture.adoc - -:_content-type: Procedure -[id="nw-kubernetes-resources-con_{context}"] -= Resources in the OVN-Kubernetes project - -The OVN-Kubernetes Container Network Interface (CNI) cluster network provider - -.Procedure - -. Run the following command to get all resources in the OVN-Kubernetes project -+ -[source,terminal] ----- -$ oc get all -n openshift-ovn-kubernetes ----- - -.Example output -[source,terminal] ----- -$ NAME READY STATUS RESTARTS AGE -pod/ovnkube-master-cpdxx 6/6 Running 0 157m -pod/ovnkube-master-kcbb5 6/6 Running 0 157m -pod/ovnkube-master-lqhsf 6/6 Running 0 157m -pod/ovnkube-node-2gj7j 5/5 Running 0 147m -pod/ovnkube-node-4kjhv 0/5 ContainerCreating 0 35s -pod/ovnkube-node-f567p 5/5 Running 0 157m -pod/ovnkube-node-lvswl 5/5 Running 0 157m -pod/ovnkube-node-z5dfx 5/5 Running 0 157m -pod/ovnkube-node-zpsn4 5/5 Running 0 134m - -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -service/ovn-kubernetes-master ClusterIP None 9102/TCP 157m -service/ovn-kubernetes-node ClusterIP None 9103/TCP,9105/TCP 157m -service/ovnkube-db ClusterIP None 9641/TCP,9642/TCP 157m - -NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE -daemonset.apps/ovnkube-master 3 3 3 3 3 beta.kubernetes.io/os=linux,node-role.kubernetes.io/master= 157m -daemonset.apps/ovnkube-node 6 6 5 6 5 beta.kubernetes.io/os=linux 157m ----- diff --git a/modules/nw-ovn-kubernetes-rollback.adoc b/modules/nw-ovn-kubernetes-rollback.adoc deleted file mode 100644 index 09d2e0e1b41f..000000000000 --- a/modules/nw-ovn-kubernetes-rollback.adoc +++ /dev/null @@ -1,337 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ovn_kubernetes_network_provider/rollback-to-openshift-sdn.adoc -// * networking/openshift_sdn/migrate-to-openshift-sdn.adoc - -// This procedure applies to both a roll back and a migration -:_content-type: PROCEDURE -[id="nw-ovn-kubernetes-rollback_{context}"] -= Migrating to the OpenShift SDN network plugin - -As a cluster administrator, you can migrate to the OpenShift SDN Container Network Interface (CNI) network plugin. -During the migration you must reboot every node in your cluster. - -ifeval::["{context}" == "rollback-to-openshift-sdn"] -[IMPORTANT] -==== -Only rollback to OpenShift SDN if the migration to OVN-Kubernetes fails. -==== -endif::[] - -.Prerequisites - -* Install the OpenShift CLI (`oc`). -* Access to the cluster as a user with the `cluster-admin` role. -* A cluster installed on infrastructure configured with the OVN-Kubernetes network plugin. -* A recent backup of the etcd database is available. -* A reboot can be triggered manually for each node. -* The cluster is in a known good state, without any errors. - -.Procedure - -. Stop all of the machine configuration pools managed by the Machine Config Operator (MCO): - -** Stop the master configuration pool: -+ -[source,terminal] ----- -$ oc patch MachineConfigPool master --type='merge' --patch \ - '{ "spec": { "paused": true } }' ----- - -** Stop the worker machine configuration pool: -+ -[source,terminal] ----- -$ oc patch MachineConfigPool worker --type='merge' --patch \ - '{ "spec":{ "paused": true } }' ----- - -. To prepare for the migration, set the migration field to `null` by entering the following command: -+ -[source,terminal] ----- -$ oc patch Network.operator.openshift.io cluster --type='merge' \ - --patch '{ "spec": { "migration": null } }' ----- - -. To start the migration, set the network plugin back to OpenShift SDN by entering the following commands: -+ -[source,terminal] ----- -$ oc patch Network.operator.openshift.io cluster --type='merge' \ - --patch '{ "spec": { "migration": { "networkType": "OpenShiftSDN" } } }' - -$ oc patch Network.config.openshift.io cluster --type='merge' \ - --patch '{ "spec": { "networkType": "OpenShiftSDN" } }' ----- - -. Optional: You can disable automatic migration of several OVN-Kubernetes capabilities to the OpenShift SDN equivalents: -+ --- -* Egress IPs -* Egress firewall -* Multicast --- -+ -To disable automatic migration of the configuration for any of the previously noted OpenShift SDN features, specify the following keys: -+ -[source,terminal] ----- -$ oc patch Network.operator.openshift.io cluster --type='merge' \ - --patch '{ - "spec": { - "migration": { - "networkType": "OpenShiftSDN", - "features": { - "egressIP": , - "egressFirewall": , - "multicast": - } - } - } - }' ----- -+ -where: -+ --- -`bool`: Specifies whether to enable migration of the feature. The default is `true`. --- - -. Optional: You can customize the following settings for OpenShift SDN to meet your network infrastructure requirements: -+ --- -* Maximum transmission unit (MTU) -* VXLAN port --- -+ -To customize either or both of the previously noted settings, customize and enter the following command. If you do not need to change the default value, omit the key from the patch. -+ -[source,terminal] ----- -$ oc patch Network.operator.openshift.io cluster --type=merge \ - --patch '{ - "spec":{ - "defaultNetwork":{ - "openshiftSDNConfig":{ - "mtu":, - "vxlanPort": - }}}}' ----- -+ --- -`mtu`:: -The MTU for the VXLAN overlay network. This value is normally configured automatically, but if the nodes in your cluster do not all use the same MTU, then you must set this explicitly to `50` less than the smallest node MTU value. -`port`:: -The UDP port for the VXLAN overlay network. If a value is not specified, the default is `4789`. The port cannot be the same as the Geneve port that is used by OVN-Kubernetes. The default value for the Geneve port is `6081`. --- -+ -.Example patch command -[source,terminal] ----- -$ oc patch Network.operator.openshift.io cluster --type=merge \ - --patch '{ - "spec":{ - "defaultNetwork":{ - "openshiftSDNConfig":{ - "mtu":1200 - }}}}' ----- - -. Wait until the Multus daemon set rollout completes. -+ -[source,terminal] ----- -$ oc -n openshift-multus rollout status daemonset/multus ----- -+ -The name of the Multus pods is in form of `multus-` where `` is a random sequence of letters. It might take several moments for the pods to restart. -+ -.Example output -[source,text] ----- -Waiting for daemon set "multus" rollout to finish: 1 out of 6 new pods have been updated... -... -Waiting for daemon set "multus" rollout to finish: 5 of 6 updated pods are available... -daemon set "multus" successfully rolled out ----- - -. To complete changing the network plugin, reboot each node in your cluster. You can reboot the nodes in your cluster with either of the following approaches: - -** With the `oc rsh` command, you can use a bash script similar to the following: -+ -[source,bash] ----- -#!/bin/bash -readarray -t POD_NODES <<< "$(oc get pod -n openshift-machine-config-operator -o wide| grep daemon|awk '{print $1" "$7}')" - -for i in "${POD_NODES[@]}" -do - read -r POD NODE <<< "$i" - until oc rsh -n openshift-machine-config-operator "$POD" chroot /rootfs shutdown -r +1 - do - echo "cannot reboot node $NODE, retry" && sleep 3 - done -done ----- - -** With the `ssh` command, you can use a bash script similar to the following. The script assumes that you have configured sudo to not prompt for a password. -+ -[source,bash] ----- -#!/bin/bash - -for ip in $(oc get nodes -o jsonpath='{.items[*].status.addresses[?(@.type=="InternalIP")].address}') -do - echo "reboot node $ip" - ssh -o StrictHostKeyChecking=no core@$ip sudo shutdown -r -t 3 -done ----- - -. After the nodes in your cluster have rebooted, start all of the machine configuration pools: -+ --- -* Start the master configuration pool: -+ -[source,terminal] ----- -$ oc patch MachineConfigPool master --type='merge' --patch \ - '{ "spec": { "paused": false } }' ----- - -* Start the worker configuration pool: -+ -[source,terminal] ----- -$ oc patch MachineConfigPool worker --type='merge' --patch \ - '{ "spec": { "paused": false } }' ----- --- -+ -As the MCO updates machines in each config pool, it reboots each node. -+ -By default the MCO updates a single machine per pool at a time, so the time that the migration requires to complete grows with the size of the cluster. - -. Confirm the status of the new machine configuration on the hosts: -.. To list the machine configuration state and the name of the applied machine configuration, enter the following command: -+ -[source,terminal] ----- -$ oc describe node | egrep "hostname|machineconfig" ----- -+ -.Example output -[source,terminal] ----- -kubernetes.io/hostname=master-0 -machineconfiguration.openshift.io/currentConfig: rendered-master-c53e221d9d24e1c8bb6ee89dd3d8ad7b -machineconfiguration.openshift.io/desiredConfig: rendered-master-c53e221d9d24e1c8bb6ee89dd3d8ad7b -machineconfiguration.openshift.io/reason: -machineconfiguration.openshift.io/state: Done ----- -+ -Verify that the following statements are true: -+ --- - * The value of `machineconfiguration.openshift.io/state` field is `Done`. - * The value of the `machineconfiguration.openshift.io/currentConfig` field is equal to the value of the `machineconfiguration.openshift.io/desiredConfig` field. --- - -.. To confirm that the machine config is correct, enter the following command: -+ -[source,terminal] ----- -$ oc get machineconfig -o yaml ----- -+ -where `` is the name of the machine config from the `machineconfiguration.openshift.io/currentConfig` field. - -. Confirm that the migration succeeded: - -.. To confirm that the network plugin is OpenShift SDN, enter the following command. The value of `status.networkType` must be `OpenShiftSDN`. -+ -[source,terminal] ----- -$ oc get network.config/cluster -o jsonpath='{.status.networkType}{"\n"}' ----- - -.. To confirm that the cluster nodes are in the `Ready` state, enter the following command: -+ -[source,terminal] ----- -$ oc get nodes ----- - -.. If a node is stuck in the `NotReady` state, investigate the machine config daemon pod logs and resolve any errors. - -... To list the pods, enter the following command: -+ -[source,terminal] ----- -$ oc get pod -n openshift-machine-config-operator ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -machine-config-controller-75f756f89d-sjp8b 1/1 Running 0 37m -machine-config-daemon-5cf4b 2/2 Running 0 43h -machine-config-daemon-7wzcd 2/2 Running 0 43h -machine-config-daemon-fc946 2/2 Running 0 43h -machine-config-daemon-g2v28 2/2 Running 0 43h -machine-config-daemon-gcl4f 2/2 Running 0 43h -machine-config-daemon-l5tnv 2/2 Running 0 43h -machine-config-operator-79d9c55d5-hth92 1/1 Running 0 37m -machine-config-server-bsc8h 1/1 Running 0 43h -machine-config-server-hklrm 1/1 Running 0 43h -machine-config-server-k9rtx 1/1 Running 0 43h ----- -+ -The names for the config daemon pods are in the following format: `machine-config-daemon-`. The `` value is a random five character alphanumeric sequence. - -... To display the pod log for each machine config daemon pod shown in the previous output, enter the following command: -+ -[source,terminal] ----- -$ oc logs -n openshift-machine-config-operator ----- -+ -where `pod` is the name of a machine config daemon pod. - -... Resolve any errors in the logs shown by the output from the previous command. - -.. To confirm that your pods are not in an error state, enter the following command: -+ -[source,terminal] ----- -$ oc get pods --all-namespaces -o wide --sort-by='{.spec.nodeName}' ----- -+ -If pods on a node are in an error state, reboot that node. - -. Complete the following steps only if the migration succeeds and your cluster is in a good state: - -.. To remove the migration configuration from the Cluster Network Operator configuration object, enter the following command: -+ -[source,terminal] ----- -$ oc patch Network.operator.openshift.io cluster --type='merge' \ - --patch '{ "spec": { "migration": null } }' ----- - -.. To remove the OVN-Kubernetes configuration, enter the following command: -+ -[source,terminal] ----- -$ oc patch Network.operator.openshift.io cluster --type='merge' \ - --patch '{ "spec": { "defaultNetwork": { "ovnKubernetesConfig":null } } }' ----- - -.. To remove the OVN-Kubernetes network provider namespace, enter the following command: -+ -[source,terminal] ----- -$ oc delete namespace openshift-ovn-kubernetes ----- diff --git a/modules/nw-ovn-kubernetes-running-network-tools.adoc b/modules/nw-ovn-kubernetes-running-network-tools.adoc deleted file mode 100644 index 09dd8b749d82..000000000000 --- a/modules/nw-ovn-kubernetes-running-network-tools.adoc +++ /dev/null @@ -1,223 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ovn_kubernetes_network_provider/ovn-kubernetes-architecture.adoc - -:_content-type: PROCEDURE -[id="nw-ovn-kubernetes-running-network-tools_{context}"] -= Running network-tools - -Get information about the logical switches and routers by running `network-tools`. - -.Prerequisites - -* You installed the OpenShift CLI (`oc`). -* You are logged in to the cluster as a user with `cluster-admin` privileges. -* You have installed `network-tools` on local host. - -.Procedure - -. List the routers by running the following command: -+ -[source,terminal] ----- -$ ./debug-scripts/network-tools ovn-db-run-command ovn-nbctl lr-list ----- -+ -.Example output -+ -[source,terminal] ----- -Leader pod is ovnkube-master-vslqm -5351ddd1-f181-4e77-afc6-b48b0a9df953 (GR_helix13.lab.eng.tlv2.redhat.com) -ccf9349e-1948-4df8-954e-39fb0c2d4d06 (GR_helix14.lab.eng.tlv2.redhat.com) -e426b918-75a8-4220-9e76-20b7758f92b7 (GR_hlxcl7-master-0.hlxcl7.lab.eng.tlv2.redhat.com) -dded77c8-0cc3-4b99-8420-56cd2ae6a840 (GR_hlxcl7-master-1.hlxcl7.lab.eng.tlv2.redhat.com) -4f6747e6-e7ba-4e0c-8dcd-94c8efa51798 (GR_hlxcl7-master-2.hlxcl7.lab.eng.tlv2.redhat.com) -52232654-336e-4952-98b9-0b8601e370b4 (ovn_cluster_router) ----- - -. List the localnet ports by running the following command: -+ -[source,terminal] ----- -$ ./debug-scripts/network-tools ovn-db-run-command \ -ovn-sbctl find Port_Binding type=localnet ----- -+ -.Example output -+ -[source,terminal] ----- -Leader pod is ovnkube-master-vslqm -_uuid : 3de79191-cca8-4c28-be5a-a228f0f9ebfc -additional_chassis : [] -additional_encap : [] -chassis : [] -datapath : 3f1a4928-7ff5-471f-9092-fe5f5c67d15c -encap : [] -external_ids : {} -gateway_chassis : [] -ha_chassis_group : [] -logical_port : br-ex_helix13.lab.eng.tlv2.redhat.com -mac : [unknown] -nat_addresses : [] -options : {network_name=physnet} -parent_port : [] -port_security : [] -requested_additional_chassis: [] -requested_chassis : [] -tag : [] -tunnel_key : 2 -type : localnet -up : false -virtual_parent : [] - -_uuid : dbe21daf-9594-4849-b8f0-5efbfa09a455 -additional_chassis : [] -additional_encap : [] -chassis : [] -datapath : db2a6067-fe7c-4d11-95a7-ff2321329e11 -encap : [] -external_ids : {} -gateway_chassis : [] -ha_chassis_group : [] -logical_port : br-ex_hlxcl7-master-2.hlxcl7.lab.eng.tlv2.redhat.com -mac : [unknown] -nat_addresses : [] -options : {network_name=physnet} -parent_port : [] -port_security : [] -requested_additional_chassis: [] -requested_chassis : [] -tag : [] -tunnel_key : 2 -type : localnet -up : false -virtual_parent : [] - -[...] ----- - -. List the `l3gateway` ports by running the following command: -+ -[source,terminal] ----- -$ ./debug-scripts/network-tools ovn-db-run-command \ -ovn-sbctl find Port_Binding type=l3gateway ----- -+ -.Example output -+ -[source,terminal] ----- -Leader pod is ovnkube-master-vslqm -_uuid : 9314dc80-39e1-4af7-9cc0-ae8a9708ed59 -additional_chassis : [] -additional_encap : [] -chassis : 336a923d-99e8-4e71-89a6-12564fde5760 -datapath : db2a6067-fe7c-4d11-95a7-ff2321329e11 -encap : [] -external_ids : {} -gateway_chassis : [] -ha_chassis_group : [] -logical_port : etor-GR_hlxcl7-master-2.hlxcl7.lab.eng.tlv2.redhat.com -mac : ["52:54:00:3e:95:d3"] -nat_addresses : ["52:54:00:3e:95:d3 10.46.56.77"] -options : {l3gateway-chassis="7eb1f1c3-87c2-4f68-8e89-60f5ca810971", peer=rtoe-GR_hlxcl7-master-2.hlxcl7.lab.eng.tlv2.redhat.com} -parent_port : [] -port_security : [] -requested_additional_chassis: [] -requested_chassis : [] -tag : [] -tunnel_key : 1 -type : l3gateway -up : true -virtual_parent : [] - -_uuid : ad7eb303-b411-4e9f-8d36-d07f1f268e27 -additional_chassis : [] -additional_encap : [] -chassis : f41453b8-29c5-4f39-b86b-e82cf344bce4 -datapath : 082e7a60-d9c7-464b-b6ec-117d3426645a -encap : [] -external_ids : {} -gateway_chassis : [] -ha_chassis_group : [] -logical_port : etor-GR_helix14.lab.eng.tlv2.redhat.com -mac : ["34:48:ed:f3:e2:2c"] -nat_addresses : ["34:48:ed:f3:e2:2c 10.46.56.14"] -options : {l3gateway-chassis="2e8abe3a-cb94-4593-9037-f5f9596325e2", peer=rtoe-GR_helix14.lab.eng.tlv2.redhat.com} -parent_port : [] -port_security : [] -requested_additional_chassis: [] -requested_chassis : [] -tag : [] -tunnel_key : 1 -type : l3gateway -up : true -virtual_parent : [] - -[...] ----- - -. List the patch ports by running the following command: -+ -[source,terminal] ----- -$ ./debug-scripts/network-tools ovn-db-run-command \ -ovn-sbctl find Port_Binding type=patch ----- -+ -.Example output -+ -[source,terminal] ----- -Leader pod is ovnkube-master-vslqm -_uuid : c48b1380-ff26-4965-a644-6bd5b5946c61 -additional_chassis : [] -additional_encap : [] -chassis : [] -datapath : 72734d65-fae1-4bd9-a1ee-1bf4e085a060 -encap : [] -external_ids : {} -gateway_chassis : [] -ha_chassis_group : [] -logical_port : jtor-ovn_cluster_router -mac : [router] -nat_addresses : [] -options : {peer=rtoj-ovn_cluster_router} -parent_port : [] -port_security : [] -requested_additional_chassis: [] -requested_chassis : [] -tag : [] -tunnel_key : 4 -type : patch -up : false -virtual_parent : [] - -_uuid : 5df51302-f3cd-415b-a059-ac24389938f7 -additional_chassis : [] -additional_encap : [] -chassis : [] -datapath : 0551c90f-e891-4909-8e9e-acc7909e06d0 -encap : [] -external_ids : {} -gateway_chassis : [] -ha_chassis_group : [] -logical_port : rtos-hlxcl7-master-1.hlxcl7.lab.eng.tlv2.redhat.com -mac : ["0a:58:0a:82:00:01 10.130.0.1/23"] -nat_addresses : [] -options : {chassis-redirect-port=cr-rtos-hlxcl7-master-1.hlxcl7.lab.eng.tlv2.redhat.com, peer=stor-hlxcl7-master-1.hlxcl7.lab.eng.tlv2.redhat.com} -parent_port : [] -port_security : [] -requested_additional_chassis: [] -requested_chassis : [] -tag : [] -tunnel_key : 4 -type : patch -up : false -virtual_parent : [] - -[...] ----- \ No newline at end of file diff --git a/modules/nw-ovn-kubernetes-running-ovnkube-trace.adoc b/modules/nw-ovn-kubernetes-running-ovnkube-trace.adoc deleted file mode 100644 index 32892372ed7a..000000000000 --- a/modules/nw-ovn-kubernetes-running-ovnkube-trace.adoc +++ /dev/null @@ -1,307 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ovn_kubernetes_network_provider/ovn-kubernetes-architecture.adoc - -:_content-type: PROCEDURE -[id="nw-ovn-kubernetes-running-ovnkube-trace_{context}"] -= Running ovnkube-trace - -Run `ovn-trace` to simulate packet forwarding within an OVN logical network. - -.Prerequisites - -* You installed the OpenShift CLI (`oc`). -* You are logged in to the cluster with a user with `cluster-admin` privileges. -* You have installed `ovnkube-trace` on local host - -.Example: Testing that DNS resolution works from a deployed pod - -This example illustrates how to test the DNS resolution from a deployed pod to the core DNS pod that runs in the cluster. - -.Procedure - -. Start a web service in the default namespace by entering the following command: -+ -[source,terminal] ----- -$ oc run web --namespace=default --image=nginx --labels="app=web" --expose --port=80 ----- - -. List the pods running in the `openshift-dns` namespace: -+ -[source,terminal] ----- -oc get pods -n openshift-dns ----- -+ -.Example output - -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -dns-default-467qw 2/2 Running 0 49m -dns-default-6prvx 2/2 Running 0 53m -dns-default-fkqr8 2/2 Running 0 53m -dns-default-qv2rg 2/2 Running 0 49m -dns-default-s29vr 2/2 Running 0 49m -dns-default-vdsbn 2/2 Running 0 53m -node-resolver-6thtt 1/1 Running 0 53m -node-resolver-7ksdn 1/1 Running 0 49m -node-resolver-8sthh 1/1 Running 0 53m -node-resolver-c5ksw 1/1 Running 0 50m -node-resolver-gbvdp 1/1 Running 0 53m -node-resolver-sxhkd 1/1 Running 0 50m ----- - -. Run the following `ovn-kube-trace` command to verify DNS resolution is working: -+ -[source,terminal] ----- -$ ./ovnkube-trace \ - -src-namespace default \ <1> - -src web \ <2> - -dst-namespace openshift-dns \ <3> - -dst dns-default-467qw \ <4> - -udp -dst-port 53 \ <5> - -loglevel 0 <6> ----- -+ -<1> Namespace of the source pod -<2> Source pod name -<3> Namespace of destination pod -<4> Destination pod name -<5> Use the `udp` transport protocol. Port 53 is the port the DNS service uses. -<6> Set the log level to 1 (0 is minimal and 5 is debug) -+ -.Expected output -[source,terminal] ----- -I0116 10:19:35.601303 17900 ovs.go:90] Maximum command line arguments set to: 191102 -ovn-trace source pod to destination pod indicates success from web to dns-default-467qw -ovn-trace destination pod to source pod indicates success from dns-default-467qw to web -ovs-appctl ofproto/trace source pod to destination pod indicates success from web to dns-default-467qw -ovs-appctl ofproto/trace destination pod to source pod indicates success from dns-default-467qw to web -ovn-detrace source pod to destination pod indicates success from web to dns-default-467qw -ovn-detrace destination pod to source pod indicates success from dns-default-467qw to web ----- -+ -The ouput indicates success from the deployed pod to the DNS port and also indicates that it is -successful going back in the other direction. So you know bi-directional traffic is supported on UDP port 53 if my web pod wants to do dns resolution from core DNS. - -If for example that did not work and you wanted to get the `ovn-trace`, the `ovs-appctl ofproto/trace` and `ovn-detrace`, and more debug type information increase the log level to 2 and run the command again as follows: - -[source,terminal] ----- -$ ./ovnkube-trace \ - -src-namespace default \ - -src web \ - -dst-namespace openshift-dns \ - -dst dns-default-467qw \ - -udp -dst-port 53 \ - -loglevel 2 ----- - -The output from this increased log level is too much to list here. In a failure situation the output of this command shows which flow is dropping that traffic. For example an egress or ingress network policy may be configured on the cluster that does not allow that traffic. - -.Example: Verifying by using debug output a configured default deny - -This example illustrates how to identify by using the debug output that an ingress default deny policy blocks traffic. - -.Procedure - -. Create the following YAML that defines a `deny-by-default` policy to deny ingress from all pods in all namespaces. Save the YAML in the `deny-by-default.yaml` file: -+ -[source,yaml] ----- -kind: NetworkPolicy -apiVersion: networking.k8s.io/v1 -metadata: - name: deny-by-default - namespace: default -spec: - podSelector: {} - ingress: [] ----- - -. Apply the policy by entering the following command: -+ -[source,terminal] ----- -$ oc apply -f deny-by-default.yaml ----- -+ -.Example output -[source,terminal] ----- -networkpolicy.networking.k8s.io/deny-by-default created ----- - -. Start a web service in the `default` namespace by entering the following command: -+ -[source,terminal] ----- -$ oc run web --namespace=default --image=nginx --labels="app=web" --expose --port=80 ----- - -. Run the following command to create the `prod` namespace: -+ -[source,terminal] ----- -$ oc create namespace prod ----- - -. Run the following command to label the `prod` namespace: -+ -[source,terminal] ----- -$ oc label namespace/prod purpose=production ----- - -. Run the following command to deploy an `alpine` image in the `prod` namespace and start a shell: -+ -[source,terminal] ----- -$ oc run test-6459 --namespace=prod --rm -i -t --image=alpine -- sh ----- - -. Open another terminal session. - -. In this new terminal session run `ovn-trace` to verify the failure in communication between the source pod `test-6459` running in namespace `prod` and destination pod running in the `default` namespace: -+ -[source,terminal] ----- -$ ./ovnkube-trace \ - -src-namespace prod \ - -src test-6459 \ - -dst-namespace default \ - -dst web \ - -tcp -dst-port 80 \ - -loglevel 0 ----- -+ -.Expected output - -[source,terminal] ----- -I0116 14:20:47.380775 50822 ovs.go:90] Maximum command line arguments set to: 191102 -ovn-trace source pod to destination pod indicates failure from test-6459 to web ----- - -. Increase the log level to 2 to expose the reason for the failure by running the following command: -+ -[source,terminal] ----- -$ ./ovnkube-trace \ - -src-namespace prod \ - -src test-6459 \ - -dst-namespace default \ - -dst web \ - -tcp -dst-port 80 \ - -loglevel 2 ----- -+ -.Expected output - -[source,terminal] ----- -ct_lb_mark /* default (use --ct to customize) */ ------------------------------------------------- - 3. ls_out_acl_hint (northd.c:6092): !ct.new && ct.est && !ct.rpl && ct_mark.blocked == 0, priority 4, uuid 32d45ad4 - reg0[8] = 1; - reg0[10] = 1; - next; - 4. ls_out_acl (northd.c:6435): reg0[10] == 1 && (outport == @a16982411286042166782_ingressDefaultDeny), priority 2000, uuid f730a887 <1> - ct_commit { ct_mark.blocked = 1; }; ----- -+ -<1> Ingress traffic is blocked due to the default deny policy being in place - -. Create a policy that allows traffic from all pods in a particular namespaces with a label `purpose=production`. Save the YAML in the `web-allow-prod.yaml` file: -+ -[source,terminal] ----- -kind: NetworkPolicy -apiVersion: networking.k8s.io/v1 -metadata: - name: web-allow-prod - namespace: default -spec: - podSelector: - matchLabels: - app: web - policyTypes: - - Ingress - ingress: - - from: - - namespaceSelector: - matchLabels: - purpose: production ----- - -. Apply the policy by entering the following command: -+ -[source,terminal] ----- -$ oc apply -f web-allow-prod.yaml ----- - -. Run `ovnkube-trace` to verify that traffic is now allowed by entering the following command: -+ -[source,terminal] ----- -$ ./ovnkube-trace \ - -src-namespace prod \ - -src test-6459 \ - -dst-namespace default \ - -dst web \ - -tcp -dst-port 80 \ - -loglevel 0 ----- -+ -.Expected output -[source,terminal] ----- -I0116 14:25:44.055207 51695 ovs.go:90] Maximum command line arguments set to: 191102 -ovn-trace source pod to destination pod indicates success from test-6459 to web -ovn-trace destination pod to source pod indicates success from web to test-6459 -ovs-appctl ofproto/trace source pod to destination pod indicates success from test-6459 to web -ovs-appctl ofproto/trace destination pod to source pod indicates success from web to test-6459 -ovn-detrace source pod to destination pod indicates success from test-6459 to web -ovn-detrace destination pod to source pod indicates success from web to test-6459 ----- - -. In the open shell run the following command: -+ -[source,terminal] ----- - wget -qO- --timeout=2 http://web.default ----- -+ -.Expected output - -[source,terminal] ----- - - - -Welcome to nginx! - - - -

Welcome to nginx!

-

If you see this page, the nginx web server is successfully installed and -working. Further configuration is required.

- -

For online documentation and support please refer to -nginx.org.
-Commercial support is available at -nginx.com.

- -

Thank you for using nginx.

- - ----- \ No newline at end of file diff --git a/modules/nw-ovn-kubernetes-session-affinity.adoc b/modules/nw-ovn-kubernetes-session-affinity.adoc deleted file mode 100644 index f1a1f5a669f8..000000000000 --- a/modules/nw-ovn-kubernetes-session-affinity.adoc +++ /dev/null @@ -1,13 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/ovn_kubernetes_network_provider/about-ovn-kubernetes.adoc - -:_content-type: CONCEPT -[id="nw-ovn-kubernetes-session-affinity_{context}"] -= Session affinity -Session affinity is a feature that applies to Kubernetes `Service` objects. You can use _session affinity_ if you want to ensure that each time you connect to a :, the traffic is always load balanced to the same back end. For more information, including how to set session affinity based on a client's IP address, see link:https://kubernetes.io/docs/reference/networking/virtual-ips/#session-affinity[Session affinity]. - -[discrete] -[id="nw-ovn-kubernetes-session-affinity-stickyness-timeout_{context}"] -== Stickiness timeout for session affinity -The OVN-Kubernetes network plugin for {product-title} calculates the stickiness timeout for a session from a client based on the last packet. For example, if you run a `curl` command 10 times, the sticky session timer starts from the tenth packet not the first. As a result, if the client is continuously contacting the service, then the session never times out. The timeout starts when the service has not received a packet for the amount of time set by the link:https://kubernetes.io/docs/reference/networking/virtual-ips/#session-stickiness-timeout[`timeoutSeconds`] parameter. diff --git a/modules/nw-path-based-routes.adoc b/modules/nw-path-based-routes.adoc deleted file mode 100644 index 95cf147a6d7e..000000000000 --- a/modules/nw-path-based-routes.adoc +++ /dev/null @@ -1,44 +0,0 @@ -// Module filename: nw-path-based-routes.adoc -// Module included in the following assemblies: -// * networking/routes/route-configuration.adoc - -[id="nw-path-based-routes_{context}"] -= Path-based routes - -Path-based routes specify a path component that can be compared against a URL, which requires that the traffic for the route be HTTP based. Thus, multiple routes can be served using the same hostname, each with a different path. Routers should match routes based on the most specific path to the least. However, this depends on the router implementation. - -The following table shows example routes and their accessibility: - -.Route availability -[cols="3*", options="header"] -|=== -|Route | When Compared to | Accessible -.2+|_www.example.com/test_ |_www.example.com/test_|Yes -|_www.example.com_|No -.2+|_www.example.com/test_ and _www.example.com_ | _www.example.com/test_|Yes -|_www.example.com_|Yes -.2+|_www.example.com_|_www.example.com/text_|Yes (Matched by the host, not the route) -|_www.example.com_|Yes -|=== - -.An unsecured route with a path - -[source,yaml] ----- -apiVersion: route.openshift.io/v1 -kind: Route -metadata: - name: route-unsecured -spec: - host: www.example.com - path: "/test" <1> - to: - kind: Service - name: service-name ----- -<1> The path is the only added attribute for a path-based route. - -[NOTE] -==== -Path-based routing is not available when using passthrough TLS, as the router does not terminate TLS in that case and cannot read the contents of the request. -==== diff --git a/modules/nw-pod-network-connectivity-check-object.adoc b/modules/nw-pod-network-connectivity-check-object.adoc deleted file mode 100644 index 429832253b68..000000000000 --- a/modules/nw-pod-network-connectivity-check-object.adoc +++ /dev/null @@ -1,162 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/verifying-connectivity-endpoint.adoc - -[id="nw-pod-network-connectivity-check-object_{context}"] -= PodNetworkConnectivityCheck object fields - -The `PodNetworkConnectivityCheck` object fields are described in the following tables. - -.PodNetworkConnectivityCheck object fields -[cols="2,1,3a",options="header"] -|=== - -|Field|Type|Description - -|`metadata.name` -|`string` -|The name of the object in the following format: `-to-`. The destination described by `` includes one of following strings: - -* `load-balancer-api-external` -* `load-balancer-api-internal` -* `kubernetes-apiserver-endpoint` -* `kubernetes-apiserver-service-cluster` -* `network-check-target` -* `openshift-apiserver-endpoint` -* `openshift-apiserver-service-cluster` - -|`metadata.namespace` -|`string` -|The namespace that the object is associated with. This value is always `openshift-network-diagnostics`. - -|`spec.sourcePod` -|`string` -|The name of the pod where the connection check originates, such as `network-check-source-596b4c6566-rgh92`. - -|`spec.targetEndpoint` -|`string` -|The target of the connection check, such as `api.devcluster.example.com:6443`. - -|`spec.tlsClientCert` -|`object` -|Configuration for the TLS certificate to use. - -|`spec.tlsClientCert.name` -|`string` -|The name of the TLS certificate used, if any. The default value is an empty string. - -|`status` -|`object` -|An object representing the condition of the connection test and logs of recent connection successes and failures. - -|`status.conditions` -|`array` -|The latest status of the connection check and any previous statuses. - -|`status.failures` -|`array` -|Connection test logs from unsuccessful attempts. - -|`status.outages` -|`array` -|Connect test logs covering the time periods of any outages. - -|`status.successes` -|`array` -|Connection test logs from successful attempts. - -|=== - -The following table describes the fields for objects in the `status.conditions` array: - -.status.conditions -[cols="2,1,3",options="header"] -|=== -|Field |Type |Description - -|`lastTransitionTime` -|`string` -|The time that the condition of the connection transitioned from one status to another. - -|`message` -|`string` -|The details about last transition in a human readable format. - -|`reason` -|`string` -|The last status of the transition in a machine readable format. - -|`status` -|`string` -|The status of the condition. - -|`type` -|`string` -|The type of the condition. - -|=== - -The following table describes the fields for objects in the `status.conditions` array: - -.status.outages -[cols="2,1,3",options="header"] -|=== -|Field |Type |Description - -|`end` -|`string` -|The timestamp from when the connection failure is resolved. - -|`endLogs` -|`array` -|Connection log entries, including the log entry related to the successful end of the outage. - -|`message` -|`string` -|A summary of outage details in a human readable format. - -|`start` -|`string` -|The timestamp from when the connection failure is first detected. - -|`startLogs` -|`array` -|Connection log entries, including the original failure. - -|=== - -[discrete] -== Connection log fields - -The fields for a connection log entry are described in the following table. The object is used in the following fields: - -* `status.failures[]` -* `status.successes[]` -* `status.outages[].startLogs[]` -* `status.outages[].endLogs[]` - -.Connection log object -[cols="2,1,3",options="header"] -|=== -|Field |Type |Description - -|`latency` -|`string` -|Records the duration of the action. - -|`message` -|`string` -|Provides the status in a human readable format. - -|`reason` -|`string` -|Provides the reason for status in a machine readable format. The value is one of `TCPConnect`, `TCPConnectError`, `DNSResolve`, `DNSError`. - -|`success` -|`boolean` -|Indicates if the log entry is a success or failure. - -|`time` -|`string` -|The start time of connection check. -|=== diff --git a/modules/nw-pod-network-connectivity-checks.adoc b/modules/nw-pod-network-connectivity-checks.adoc deleted file mode 100644 index 26909fffd0c4..000000000000 --- a/modules/nw-pod-network-connectivity-checks.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/verifying-connectivity-endpoint.adoc - -[id="nw-pod-network-connectivity-checks_{context}"] -= Connection health checks performed - -To verify that cluster resources are reachable, a TCP connection is made to each of the following cluster API services: - -* Kubernetes API server service -* Kubernetes API server endpoints -* OpenShift API server service -* OpenShift API server endpoints -* Load balancers - -To verify that services and service endpoints are reachable on every node in the cluster, a TCP connection is made to each of the following targets: - -* Health check target service -* Health check target endpoints diff --git a/modules/nw-pod-network-connectivity-implementation.adoc b/modules/nw-pod-network-connectivity-implementation.adoc deleted file mode 100644 index 66c86af6a9d3..000000000000 --- a/modules/nw-pod-network-connectivity-implementation.adoc +++ /dev/null @@ -1,14 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/verifying-connectivity-endpoint.adoc - -[id="nw-pod-network-connectivity-implementation_{context}"] -= Implementation of connection health checks - -The connectivity check controller orchestrates connection verification checks in your cluster. The results for the connection tests are stored in `PodNetworkConnectivity` objects in the `openshift-network-diagnostics` namespace. Connection tests are performed every minute in parallel. - -The Cluster Network Operator (CNO) deploys several resources to the cluster to send and receive connectivity health checks: - -Health check source:: This program deploys in a single pod replica set managed by a `Deployment` object. The program consumes `PodNetworkConnectivity` objects and connects to the `spec.targetEndpoint` specified in each object. - -Health check target:: A pod deployed as part of a daemon set on every node in the cluster. The pod listens for inbound health checks. The presence of this pod on every node allows for the testing of connectivity to each node. diff --git a/modules/nw-pod-network-connectivity-verify.adoc b/modules/nw-pod-network-connectivity-verify.adoc deleted file mode 100644 index c70d2d42dae0..000000000000 --- a/modules/nw-pod-network-connectivity-verify.adoc +++ /dev/null @@ -1,206 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/verifying-connectivity-endpoint.adoc - -:_content-type: PROCEDURE -[id="nw-pod-network-connectivity-verify_{context}"] -= Verifying network connectivity for an endpoint - -As a cluster administrator, you can verify the connectivity of an endpoint, such as an API server, load balancer, service, or pod. - -.Prerequisites - -* Install the OpenShift CLI (`oc`). -* Access to the cluster as a user with the `cluster-admin` role. - -.Procedure - -. To list the current `PodNetworkConnectivityCheck` objects, enter the following command: -+ -[source,terminal] ----- -$ oc get podnetworkconnectivitycheck -n openshift-network-diagnostics ----- -+ -.Example output -[source,terminal] ----- -NAME                                                                                                                                AGE -network-check-source-ci-ln-x5sv9rb-f76d1-4rzrp-worker-b-6xdmh-to-kubernetes-apiserver-endpoint-ci-ln-x5sv9rb-f76d1-4rzrp-master-0   75m -network-check-source-ci-ln-x5sv9rb-f76d1-4rzrp-worker-b-6xdmh-to-kubernetes-apiserver-endpoint-ci-ln-x5sv9rb-f76d1-4rzrp-master-1   73m -network-check-source-ci-ln-x5sv9rb-f76d1-4rzrp-worker-b-6xdmh-to-kubernetes-apiserver-endpoint-ci-ln-x5sv9rb-f76d1-4rzrp-master-2   75m -network-check-source-ci-ln-x5sv9rb-f76d1-4rzrp-worker-b-6xdmh-to-kubernetes-apiserver-service-cluster                               75m -network-check-source-ci-ln-x5sv9rb-f76d1-4rzrp-worker-b-6xdmh-to-kubernetes-default-service-cluster                                 75m -network-check-source-ci-ln-x5sv9rb-f76d1-4rzrp-worker-b-6xdmh-to-load-balancer-api-external                                         75m -network-check-source-ci-ln-x5sv9rb-f76d1-4rzrp-worker-b-6xdmh-to-load-balancer-api-internal                                         75m -network-check-source-ci-ln-x5sv9rb-f76d1-4rzrp-worker-b-6xdmh-to-network-check-target-ci-ln-x5sv9rb-f76d1-4rzrp-master-0            75m -network-check-source-ci-ln-x5sv9rb-f76d1-4rzrp-worker-b-6xdmh-to-network-check-target-ci-ln-x5sv9rb-f76d1-4rzrp-master-1            75m -network-check-source-ci-ln-x5sv9rb-f76d1-4rzrp-worker-b-6xdmh-to-network-check-target-ci-ln-x5sv9rb-f76d1-4rzrp-master-2            75m -network-check-source-ci-ln-x5sv9rb-f76d1-4rzrp-worker-b-6xdmh-to-network-check-target-ci-ln-x5sv9rb-f76d1-4rzrp-worker-b-6xdmh      74m -network-check-source-ci-ln-x5sv9rb-f76d1-4rzrp-worker-b-6xdmh-to-network-check-target-ci-ln-x5sv9rb-f76d1-4rzrp-worker-c-n8mbf      74m -network-check-source-ci-ln-x5sv9rb-f76d1-4rzrp-worker-b-6xdmh-to-network-check-target-ci-ln-x5sv9rb-f76d1-4rzrp-worker-d-4hnrz      74m -network-check-source-ci-ln-x5sv9rb-f76d1-4rzrp-worker-b-6xdmh-to-network-check-target-service-cluster                               75m -network-check-source-ci-ln-x5sv9rb-f76d1-4rzrp-worker-b-6xdmh-to-openshift-apiserver-endpoint-ci-ln-x5sv9rb-f76d1-4rzrp-master-0    75m -network-check-source-ci-ln-x5sv9rb-f76d1-4rzrp-worker-b-6xdmh-to-openshift-apiserver-endpoint-ci-ln-x5sv9rb-f76d1-4rzrp-master-1    75m -network-check-source-ci-ln-x5sv9rb-f76d1-4rzrp-worker-b-6xdmh-to-openshift-apiserver-endpoint-ci-ln-x5sv9rb-f76d1-4rzrp-master-2    74m -network-check-source-ci-ln-x5sv9rb-f76d1-4rzrp-worker-b-6xdmh-to-openshift-apiserver-service-cluster                                75m ----- - -. View the connection test logs: -.. From the output of the previous command, identify the endpoint that you want to review the connectivity logs for. -.. To view the object, enter the following command: -+ -[source,terminal] ----- -$ oc get podnetworkconnectivitycheck \ - -n openshift-network-diagnostics -o yaml ----- -+ -where `` specifies the name of the `PodNetworkConnectivityCheck` object. -+ -.Example output -[source,terminal] ----- -apiVersion: controlplane.operator.openshift.io/v1alpha1 -kind: PodNetworkConnectivityCheck -metadata: -  name: network-check-source-ci-ln-x5sv9rb-f76d1-4rzrp-worker-b-6xdmh-to-kubernetes-apiserver-endpoint-ci-ln-x5sv9rb-f76d1-4rzrp-master-0 -  namespace: openshift-network-diagnostics - ... -spec: -  sourcePod: network-check-source-7c88f6d9f-hmg2f -  targetEndpoint: 10.0.0.4:6443 -  tlsClientCert: -    name: "" -status: -  conditions: -  - lastTransitionTime: "2021-01-13T20:11:34Z" -    message: 'kubernetes-apiserver-endpoint-ci-ln-x5sv9rb-f76d1-4rzrp-master-0: tcp -      connection to 10.0.0.4:6443 succeeded' -    reason: TCPConnectSuccess -    status: "True" -    type: Reachable -  failures: -  - latency: 2.241775ms -    message: 'kubernetes-apiserver-endpoint-ci-ln-x5sv9rb-f76d1-4rzrp-master-0: failed -      to establish a TCP connection to 10.0.0.4:6443: dial tcp 10.0.0.4:6443: connect: -      connection refused' -    reason: TCPConnectError -    success: false -    time: "2021-01-13T20:10:34Z" -  - latency: 2.582129ms -    message: 'kubernetes-apiserver-endpoint-ci-ln-x5sv9rb-f76d1-4rzrp-master-0: failed -      to establish a TCP connection to 10.0.0.4:6443: dial tcp 10.0.0.4:6443: connect: -      connection refused' -    reason: TCPConnectError -    success: false -    time: "2021-01-13T20:09:34Z" -  - latency: 3.483578ms -    message: 'kubernetes-apiserver-endpoint-ci-ln-x5sv9rb-f76d1-4rzrp-master-0: failed -      to establish a TCP connection to 10.0.0.4:6443: dial tcp 10.0.0.4:6443: connect: -      connection refused' -    reason: TCPConnectError -    success: false -    time: "2021-01-13T20:08:34Z" -  outages: -  - end: "2021-01-13T20:11:34Z" -    endLogs: -    - latency: 2.032018ms -      message: 'kubernetes-apiserver-endpoint-ci-ln-x5sv9rb-f76d1-4rzrp-master-0: -        tcp connection to 10.0.0.4:6443 succeeded' -      reason: TCPConnect -      success: true -      time: "2021-01-13T20:11:34Z" -    - latency: 2.241775ms -      message: 'kubernetes-apiserver-endpoint-ci-ln-x5sv9rb-f76d1-4rzrp-master-0: -        failed to establish a TCP connection to 10.0.0.4:6443: dial tcp 10.0.0.4:6443: -        connect: connection refused' -      reason: TCPConnectError -      success: false -      time: "2021-01-13T20:10:34Z" -    - latency: 2.582129ms -      message: 'kubernetes-apiserver-endpoint-ci-ln-x5sv9rb-f76d1-4rzrp-master-0: -        failed to establish a TCP connection to 10.0.0.4:6443: dial tcp 10.0.0.4:6443: -        connect: connection refused' -      reason: TCPConnectError -      success: false -      time: "2021-01-13T20:09:34Z" -    - latency: 3.483578ms -      message: 'kubernetes-apiserver-endpoint-ci-ln-x5sv9rb-f76d1-4rzrp-master-0: -        failed to establish a TCP connection to 10.0.0.4:6443: dial tcp 10.0.0.4:6443: -        connect: connection refused' -      reason: TCPConnectError -      success: false -      time: "2021-01-13T20:08:34Z" -    message: Connectivity restored after 2m59.999789186s -    start: "2021-01-13T20:08:34Z" -    startLogs: -    - latency: 3.483578ms -      message: 'kubernetes-apiserver-endpoint-ci-ln-x5sv9rb-f76d1-4rzrp-master-0: -        failed to establish a TCP connection to 10.0.0.4:6443: dial tcp 10.0.0.4:6443: -        connect: connection refused' -      reason: TCPConnectError -      success: false -      time: "2021-01-13T20:08:34Z" -  successes: -  - latency: 2.845865ms -    message: 'kubernetes-apiserver-endpoint-ci-ln-x5sv9rb-f76d1-4rzrp-master-0: tcp -      connection to 10.0.0.4:6443 succeeded' -    reason: TCPConnect -    success: true -    time: "2021-01-13T21:14:34Z" -  - latency: 2.926345ms -    message: 'kubernetes-apiserver-endpoint-ci-ln-x5sv9rb-f76d1-4rzrp-master-0: tcp -      connection to 10.0.0.4:6443 succeeded' -    reason: TCPConnect -    success: true -    time: "2021-01-13T21:13:34Z" -  - latency: 2.895796ms -    message: 'kubernetes-apiserver-endpoint-ci-ln-x5sv9rb-f76d1-4rzrp-master-0: tcp -      connection to 10.0.0.4:6443 succeeded' -    reason: TCPConnect -    success: true -    time: "2021-01-13T21:12:34Z" -  - latency: 2.696844ms -    message: 'kubernetes-apiserver-endpoint-ci-ln-x5sv9rb-f76d1-4rzrp-master-0: tcp -      connection to 10.0.0.4:6443 succeeded' -    reason: TCPConnect -    success: true -    time: "2021-01-13T21:11:34Z" -  - latency: 1.502064ms -    message: 'kubernetes-apiserver-endpoint-ci-ln-x5sv9rb-f76d1-4rzrp-master-0: tcp -      connection to 10.0.0.4:6443 succeeded' -    reason: TCPConnect -    success: true -    time: "2021-01-13T21:10:34Z" -  - latency: 1.388857ms -    message: 'kubernetes-apiserver-endpoint-ci-ln-x5sv9rb-f76d1-4rzrp-master-0: tcp -      connection to 10.0.0.4:6443 succeeded' -    reason: TCPConnect -    success: true -    time: "2021-01-13T21:09:34Z" -  - latency: 1.906383ms -    message: 'kubernetes-apiserver-endpoint-ci-ln-x5sv9rb-f76d1-4rzrp-master-0: tcp -      connection to 10.0.0.4:6443 succeeded' -    reason: TCPConnect -    success: true -    time: "2021-01-13T21:08:34Z" -  - latency: 2.089073ms -    message: 'kubernetes-apiserver-endpoint-ci-ln-x5sv9rb-f76d1-4rzrp-master-0: tcp -      connection to 10.0.0.4:6443 succeeded' -    reason: TCPConnect -    success: true -    time: "2021-01-13T21:07:34Z" -  - latency: 2.156994ms -    message: 'kubernetes-apiserver-endpoint-ci-ln-x5sv9rb-f76d1-4rzrp-master-0: tcp -      connection to 10.0.0.4:6443 succeeded' -    reason: TCPConnect -    success: true -    time: "2021-01-13T21:06:34Z" -  - latency: 1.777043ms -    message: 'kubernetes-apiserver-endpoint-ci-ln-x5sv9rb-f76d1-4rzrp-master-0: tcp -      connection to 10.0.0.4:6443 succeeded' -    reason: TCPConnect -    success: true -    time: "2021-01-13T21:05:34Z" ----- diff --git a/modules/nw-proxy-configure-object.adoc b/modules/nw-proxy-configure-object.adoc deleted file mode 100644 index 656153fa2844..000000000000 --- a/modules/nw-proxy-configure-object.adoc +++ /dev/null @@ -1,111 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/configuring-a-custom-pki.adoc -// * networking/enable-cluster-wide-proxy.adoc -// * post_installation_configuration/network-configuration.adoc - -:_content-type: PROCEDURE -[id="nw-proxy-configure-object_{context}"] -= Enabling the cluster-wide proxy - -The `Proxy` object is used to manage the cluster-wide egress proxy. When a cluster is installed or upgraded without the proxy configured, a `Proxy` object is still generated but it will have a nil `spec`. For example: - -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: Proxy -metadata: - name: cluster -spec: - trustedCA: - name: "" -status: ----- - -A cluster administrator can configure the proxy for {product-title} by modifying this `cluster` `Proxy` object. - -[NOTE] -==== -Only the `Proxy` object named `cluster` is supported, and no additional proxies can be created. -==== - -.Prerequisites - -* Cluster administrator permissions -* {product-title} `oc` CLI tool installed - -.Procedure - -. Create a config map that contains any additional CA certificates required for proxying HTTPS connections. -+ -[NOTE] -==== -You can skip this step if the proxy's identity certificate is signed by an authority from the RHCOS trust bundle. -==== - -.. Create a file called `user-ca-bundle.yaml` with the following contents, and provide the values of your PEM-encoded certificates: -+ -[source,yaml] ----- -apiVersion: v1 -data: - ca-bundle.crt: | <1> - <2> -kind: ConfigMap -metadata: - name: user-ca-bundle <3> - namespace: openshift-config <4> ----- -<1> This data key must be named `ca-bundle.crt`. -<2> One or more PEM-encoded X.509 certificates used to sign the proxy's -identity certificate. -<3> The config map name that will be referenced from the `Proxy` object. -<4> The config map must be in the `openshift-config` namespace. - -.. Create the config map from this file: -+ -[source,terminal] ----- -$ oc create -f user-ca-bundle.yaml ----- - -. Use the `oc edit` command to modify the `Proxy` object: -+ -[source,terminal] ----- -$ oc edit proxy/cluster ----- - -. Configure the necessary fields for the proxy: -+ -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: Proxy -metadata: - name: cluster -spec: - httpProxy: http://:@: <1> - httpsProxy: https://:@: <2> - noProxy: example.com <3> - readinessEndpoints: - - http://www.google.com <4> - - https://www.google.com - trustedCA: - name: user-ca-bundle <5> ----- -+ --- -<1> A proxy URL to use for creating HTTP connections outside the cluster. The URL scheme must be `http`. -<2> A proxy URL to use for creating HTTPS connections outside the cluster. The URL scheme must be either `http` or `https`. Specify a URL for the proxy that supports the URL scheme. For example, most proxies will report an error if they are configured to use `https` but they only support `http`. This failure message may not propagate to the logs and can appear to be a network connection failure instead. If using a proxy that listens for `https` connections from the cluster, you may need to configure the cluster to accept the CAs and certificates that the proxy uses. -<3> A comma-separated list of destination domain names, domains, IP addresses or other network CIDRs to exclude proxying. -+ -Preface a domain with `.` to match subdomains only. For example, `.y.com` matches `x.y.com`, but not `y.com`. Use `*` to bypass proxy for all destinations. -If you scale up workers that are not included in the network defined by the `networking.machineNetwork[].cidr` field from the installation configuration, you must add them to this list to prevent connection issues. -+ -This field is ignored if neither the `httpProxy` or `httpsProxy` fields are set. -<4> One or more URLs external to the cluster to use to perform a readiness check before writing the `httpProxy` and `httpsProxy` values to status. -<5> A reference to the config map in the `openshift-config` namespace that contains additional CA certificates required for proxying HTTPS connections. Note that the config map must already exist before referencing it here. This field is required unless the proxy's identity certificate is signed by an authority from the RHCOS trust bundle. --- - -. Save the file to apply the changes. diff --git a/modules/nw-proxy-remove.adoc b/modules/nw-proxy-remove.adoc deleted file mode 100644 index de59671acf90..000000000000 --- a/modules/nw-proxy-remove.adoc +++ /dev/null @@ -1,36 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/enable-cluster-wide-proxy.adoc - -:_content-type: PROCEDURE -[id="nw-proxy-remove_{context}"] -= Removing the cluster-wide proxy - -The `cluster` Proxy object cannot be deleted. To remove the proxy from a cluster, remove all `spec` fields from the Proxy object. - -.Prerequisites - -* Cluster administrator permissions -* {product-title} `oc` CLI tool installed - -.Procedure - -. Use the `oc edit` command to modify the proxy: -+ -[source,terminal] ----- -$ oc edit proxy/cluster ----- - -. Remove all `spec` fields from the Proxy object. For example: -+ -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: Proxy -metadata: - name: cluster -spec: {} ----- - -. Save the file to apply the changes. \ No newline at end of file diff --git a/modules/nw-ptp-configuring-linuxptp-services-as-boundary-clock.adoc b/modules/nw-ptp-configuring-linuxptp-services-as-boundary-clock.adoc deleted file mode 100644 index e9e748c7541a..000000000000 --- a/modules/nw-ptp-configuring-linuxptp-services-as-boundary-clock.adoc +++ /dev/null @@ -1,272 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/using-ptp.adoc - -:_content-type: PROCEDURE -[id="configuring-linuxptp-services-as-boundary-clock_{context}"] -= Configuring linuxptp services as a boundary clock - -You can configure the `linuxptp` services (`ptp4l`, `phc2sys`) as boundary clock by creating a `PtpConfig` custom resource (CR) object. - -[NOTE] -==== -Use the following example `PtpConfig` CR as the basis to configure `linuxptp` services as the boundary clock for your particular hardware and environment. -This example CR does not configure PTP fast events. To configure PTP fast events, set appropriate values for `ptp4lOpts`, `ptp4lConf`, and `ptpClockThreshold`. -`ptpClockThreshold` is used only when events are enabled. -See "Configuring the PTP fast event notifications publisher" for more information. -==== - -.Prerequisites - -* Install the OpenShift CLI (`oc`). - -* Log in as a user with `cluster-admin` privileges. - -* Install the PTP Operator. - -.Procedure - -. Create the following `PtpConfig` CR, and then save the YAML in the `boundary-clock-ptp-config.yaml` file. -+ -.Recommended PTP boundary clock configuration -[source,yaml] ----- ---- -apiVersion: ptp.openshift.io/v1 -kind: PtpConfig -metadata: - name: boundary-clock-ptp-config - namespace: openshift-ptp -spec: - profile: - - name: boundary-clock - phc2sysOpts: "-a -r -n 24" - ptp4lOpts: "-2" - ptpSchedulingPolicy: SCHED_FIFO - ptpSchedulingPriority: 10 - ptp4lConf: | - [] - masterOnly 0 - [] - masterOnly 1 - [] - masterOnly 1 - [] - masterOnly 1 - [global] - # - # Default Data Set - # - twoStepFlag 1 - slaveOnly 0 - priority1 128 - priority2 128 - domainNumber 24 - clockClass 248 - clockAccuracy 0xFE - offsetScaledLogVariance 0xFFFF - free_running 0 - freq_est_interval 1 - dscp_event 0 - dscp_general 0 - dataset_comparison G.8275.x - G.8275.defaultDS.localPriority 128 - # - # Port Data Set - # - logAnnounceInterval -3 - logSyncInterval -4 - logMinDelayReqInterval -4 - logMinPdelayReqInterval -4 - announceReceiptTimeout 3 - syncReceiptTimeout 0 - delayAsymmetry 0 - fault_reset_interval 4 - neighborPropDelayThresh 20000000 - masterOnly 0 - G.8275.portDS.localPriority 128 - # - # Run time options - # - assume_two_step 0 - logging_level 6 - path_trace_enabled 0 - follow_up_info 0 - hybrid_e2e 0 - inhibit_multicast_service 0 - net_sync_monitor 0 - tc_spanning_tree 0 - tx_timestamp_timeout 50 - unicast_listen 0 - unicast_master_table 0 - unicast_req_duration 3600 - use_syslog 1 - verbose 0 - summary_interval 0 - kernel_leap 1 - check_fup_sync 0 - # - # Servo Options - # - pi_proportional_const 0.0 - pi_integral_const 0.0 - pi_proportional_scale 0.0 - pi_proportional_exponent -0.3 - pi_proportional_norm_max 0.7 - pi_integral_scale 0.0 - pi_integral_exponent 0.4 - pi_integral_norm_max 0.3 - step_threshold 2.0 - first_step_threshold 0.00002 - max_frequency 900000000 - clock_servo pi - sanity_freq_limit 200000000 - ntpshm_segment 0 - # - # Transport options - # - transportSpecific 0x0 - ptp_dst_mac 01:1B:19:00:00:00 - p2p_dst_mac 01:80:C2:00:00:0E - udp_ttl 1 - udp6_scope 0x0E - uds_address /var/run/ptp4l - # - # Default interface options - # - clock_type BC - network_transport L2 - delay_mechanism E2E - time_stamping hardware - tsproc_mode filter - delay_filter moving_median - delay_filter_length 10 - egressLatency 0 - ingressLatency 0 - boundary_clock_jbod 0 - # - # Clock description - # - productDescription ;; - revisionData ;; - manufacturerIdentity 00:00:00 - userDescription ; - timeSource 0xA0 - recommend: - - profile: boundary-clock - priority: 4 - match: - - nodeLabel: node-role.kubernetes.io/master - nodeName: ----- -+ -.PTP boundary clock CR configuration options -[cols="1,3" options="header"] -|==== -|Custom resource field -|Description - -|`name` -|The name of the `PtpConfig` CR. - -|`profile` -|Specify an array of one or more `profile` objects. - -|`name` -|Specify the name of a profile object which uniquely identifies a profile object. - -|`ptp4lOpts` -|Specify system config options for the `ptp4l` service. The options should not include the network interface name `-i ` and service config file `-f /etc/ptp4l.conf` because the network interface name and the service config file are automatically appended. - -|`ptp4lConf` -|Specify the required configuration to start `ptp4l` as boundary clock. For example, `ens1f0` synchronizes from a grandmaster clock and `ens1f3` synchronizes connected devices. - -|`` -|The interface that receives the synchronization clock. - -|`` -|The interface that sends the synchronization clock. - -|`tx_timestamp_timeout` -|For Intel Columbiaville 800 Series NICs, set `tx_timestamp_timeout` to `50`. - -|`boundary_clock_jbod` -|For Intel Columbiaville 800 Series NICs, ensure `boundary_clock_jbod` is set to `0`. For Intel Fortville X710 Series NICs, ensure `boundary_clock_jbod` is set to `1`. - -|`phc2sysOpts` -|Specify system config options for the `phc2sys` service. If this field is empty, the PTP Operator does not start the `phc2sys` service. - -|`ptpSchedulingPolicy` -|Scheduling policy for ptp4l and phc2sys processes. Default value is `SCHED_OTHER`. Use `SCHED_FIFO` on systems that support FIFO scheduling. - -|`ptpSchedulingPriority` -|Integer value from 1-65 used to set FIFO priority for `ptp4l` and `phc2sys` processes when `ptpSchedulingPolicy` is set to `SCHED_FIFO`. The `ptpSchedulingPriority` field is not used when `ptpSchedulingPolicy` is set to `SCHED_OTHER`. - -|`ptpClockThreshold` -|Optional. If `ptpClockThreshold` is not present, default values are used for the `ptpClockThreshold` fields. `ptpClockThreshold` configures how long after the PTP master clock is disconnected before PTP events are triggered. `holdOverTimeout` is the time value in seconds before the PTP clock event state changes to `FREERUN` when the PTP master clock is disconnected. The `maxOffsetThreshold` and `minOffsetThreshold` settings configure offset values in nanoseconds that compare against the values for `CLOCK_REALTIME` (`phc2sys`) or master offset (`ptp4l`). When the `ptp4l` or `phc2sys` offset value is outside this range, the PTP clock state is set to `FREERUN`. When the offset value is within this range, the PTP clock state is set to `LOCKED`. - -|`recommend` -|Specify an array of one or more `recommend` objects that define rules on how the `profile` should be applied to nodes. - -|`.recommend.profile` -|Specify the `.recommend.profile` object name defined in the `profile` section. - -|`.recommend.priority` -|Specify the `priority` with an integer value between `0` and `99`. A larger number gets lower priority, so a priority of `99` is lower than a priority of `10`. If a node can be matched with multiple profiles according to rules defined in the `match` field, the profile with the higher priority is applied to that node. - -|`.recommend.match` -|Specify `.recommend.match` rules with `nodeLabel` or `nodeName`. - -|`.recommend.match.nodeLabel` -|Update `nodeLabel` with the `key` of `node.Labels` from the node object by using the `oc get nodes --show-labels` command. For example: `node-role.kubernetes.io/worker`. - -|`.recommend.match.nodeLabel` -|Update `nodeName` with value of `node.Name` from the node object by using the `oc get nodes` command. For example: `compute-0.example.com`. -|==== - -. Create the CR by running the following command: -+ -[source,terminal] ----- -$ oc create -f boundary-clock-ptp-config.yaml ----- - -.Verification - -. Check that the `PtpConfig` profile is applied to the node. - -.. Get the list of pods in the `openshift-ptp` namespace by running the following command: -+ -[source,terminal] ----- -$ oc get pods -n openshift-ptp -o wide ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE IP NODE -linuxptp-daemon-4xkbb 1/1 Running 0 43m 10.1.196.24 compute-0.example.com -linuxptp-daemon-tdspf 1/1 Running 0 43m 10.1.196.25 compute-1.example.com -ptp-operator-657bbb64c8-2f8sj 1/1 Running 0 43m 10.129.0.61 control-plane-1.example.com ----- - -.. Check that the profile is correct. Examine the logs of the `linuxptp` daemon that corresponds to the node you specified in the `PtpConfig` profile. Run the following command: -+ -[source,terminal] ----- -$ oc logs linuxptp-daemon-4xkbb -n openshift-ptp -c linuxptp-daemon-container ----- -+ -.Example output -[source,terminal] ----- -I1115 09:41:17.117596 4143292 daemon.go:107] in applyNodePTPProfile -I1115 09:41:17.117604 4143292 daemon.go:109] updating NodePTPProfile to: -I1115 09:41:17.117607 4143292 daemon.go:110] ------------------------------------ -I1115 09:41:17.117612 4143292 daemon.go:102] Profile Name: profile1 -I1115 09:41:17.117616 4143292 daemon.go:102] Interface: -I1115 09:41:17.117620 4143292 daemon.go:102] Ptp4lOpts: -2 -I1115 09:41:17.117623 4143292 daemon.go:102] Phc2sysOpts: -a -r -n 24 -I1115 09:41:17.117626 4143292 daemon.go:116] ------------------------------------ ----- diff --git a/modules/nw-ptp-configuring-linuxptp-services-as-grandmaster-clock.adoc b/modules/nw-ptp-configuring-linuxptp-services-as-grandmaster-clock.adoc deleted file mode 100644 index d8d1f7833833..000000000000 --- a/modules/nw-ptp-configuring-linuxptp-services-as-grandmaster-clock.adoc +++ /dev/null @@ -1,82 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/using-ptp.adoc - -:_content-type: PROCEDURE -[id="configuring-linuxptp-services-as-grandmaster-clock_{context}"] -= Configuring linuxptp services as a grandmaster clock - -You can configure the `linuxptp` services (`ptp4l`, `phc2sys`, `ts2phc`) as grandmaster clock by creating a `PtpConfig` custom resource (CR) that configures the host NIC. - -The `ts2phc` utility allows you to synchronize the system clock with the PTP grandmaster clock so that the node can stream precision clock signal to downstream PTP ordinary clocks and boundary clocks. - -[NOTE] -==== -Use the following example `PtpConfig` CR as the basis to configure `linuxptp` services as the grandmaster clock for your particular hardware and environment. -This example CR does not configure PTP fast events. To configure PTP fast events, set appropriate values for `ptp4lOpts`, `ptp4lConf`, and `ptpClockThreshold`. -`ptpClockThreshold` is used only when events are enabled. -See "Configuring the PTP fast event notifications publisher" for more information. -==== - -.Prerequisites - -* Install an Intel Westport Channel network interface in the bare-metal cluster host. - -* Install the OpenShift CLI (`oc`). - -* Log in as a user with `cluster-admin` privileges. - -* Install the PTP Operator. - -.Procedure - -. Create the `PtpConfig` resource. For example: - -.. Save the following YAML in the `grandmaster-clock-ptp-config.yaml` file: -+ -include::snippets/grandmaster-clock-ptp-config.adoc[] - -.. Create the CR by running the following command: -+ -[source,terminal] ----- -$ oc create -f grandmaster-clock-ptp-config.yaml ----- - -.Verification - -. Check that the `PtpConfig` profile is applied to the node. - -.. Get the list of pods in the `openshift-ptp` namespace by running the following command: -+ -[source,terminal] ----- -$ oc get pods -n openshift-ptp -o wide ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE IP NODE -linuxptp-daemon-74m2g 3/3 Running 3 4d15h 10.16.230.7 compute-1.example.com -ptp-operator-5f4f48d7c-x7zkf 1/1 Running 1 4d15h 10.128.1.145 compute-1.example.com ----- - -.. Check that the profile is correct. Examine the logs of the `linuxptp` daemon that corresponds to the node you specified in the `PtpConfig` profile. -Run the following command: -+ -[source,terminal] ----- -$ oc logs linuxptp-daemon-74m2g -n openshift-ptp -c linuxptp-daemon-container ----- -+ -.Example output -[source,terminal] ----- -ts2phc[94980.334]: [ts2phc.0.config] nmea delay: 98690975 ns -ts2phc[94980.334]: [ts2phc.0.config] ens3f0 extts index 0 at 1676577329.999999999 corr 0 src 1676577330.901342528 diff -1 -ts2phc[94980.334]: [ts2phc.0.config] ens3f0 master offset -1 s2 freq -1 -ts2phc[94980.441]: [ts2phc.0.config] nmea sentence: GNRMC,195453.00,A,4233.24427,N,07126.64420,W,0.008,,160223,,,A,V -phc2sys[94980.450]: [ptp4l.0.config] CLOCK_REALTIME phc offset 943 s2 freq -89604 delay 504 -phc2sys[94980.512]: [ptp4l.0.config] CLOCK_REALTIME phc offset 1000 s2 freq -89264 delay 474 ----- diff --git a/modules/nw-ptp-configuring-linuxptp-services-as-ordinary-clock.adoc b/modules/nw-ptp-configuring-linuxptp-services-as-ordinary-clock.adoc deleted file mode 100644 index 3687d309b47f..000000000000 --- a/modules/nw-ptp-configuring-linuxptp-services-as-ordinary-clock.adoc +++ /dev/null @@ -1,257 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/using-ptp.adoc - -:_content-type: PROCEDURE -[id="configuring-linuxptp-services-as-ordinary-clock_{context}"] -= Configuring linuxptp services as an ordinary clock - -You can configure `linuxptp` services (`ptp4l`, `phc2sys`) as ordinary clock by creating a `PtpConfig` custom resource (CR) object. - -[NOTE] -==== -Use the following example `PtpConfig` CR as the basis to configure `linuxptp` services as an ordinary clock for your particular hardware and environment. -This example CR does not configure PTP fast events. -To configure PTP fast events, set appropriate values for `ptp4lOpts`, `ptp4lConf`, and `ptpClockThreshold`. `ptpClockThreshold` is required only when events are enabled. -See "Configuring the PTP fast event notifications publisher" for more information. -==== - -.Prerequisites - -* Install the OpenShift CLI (`oc`). -* Log in as a user with `cluster-admin` privileges. -* Install the PTP Operator. - -.Procedure - -. Create the following `PtpConfig` CR, and then save the YAML in the `ordinary-clock-ptp-config.yaml` file. -+ -[[ptp-ordinary-clock]] -.Recommended PTP ordinary clock configuration -[source,yaml] ----- -apiVersion: ptp.openshift.io/v1 -kind: PtpConfig -metadata: - name: ordinary-clock-ptp-config - namespace: openshift-ptp - spec: - profile: - - name: ordinary-clock - interface: "" - phc2sysOpts: "-a -r -n 24" - ptp4lOpts: "-2 -s" - ptpSchedulingPolicy: SCHED_FIFO - ptpSchedulingPriority: 10 - ptp4lConf: | - [global] - # - # Default Data Set - # - twoStepFlag 1 - slaveOnly 1 - priority1 128 - priority2 128 - domainNumber 24 - clockClass 255 - clockAccuracy 0xFE - offsetScaledLogVariance 0xFFFF - free_running 0 - freq_est_interval 1 - dscp_event 0 - dscp_general 0 - dataset_comparison G.8275.x - G.8275.defaultDS.localPriority 128 - # - # Port Data Set - # - logAnnounceInterval -3 - logSyncInterval -4 - logMinDelayReqInterval -4 - logMinPdelayReqInterval -4 - announceReceiptTimeout 3 - syncReceiptTimeout 0 - delayAsymmetry 0 - fault_reset_interval 4 - neighborPropDelayThresh 20000000 - masterOnly 0 - G.8275.portDS.localPriority 128 - # - # Run time options - # - assume_two_step 0 - logging_level 6 - path_trace_enabled 0 - follow_up_info 0 - hybrid_e2e 0 - inhibit_multicast_service 0 - net_sync_monitor 0 - tc_spanning_tree 0 - tx_timestamp_timeout 50 - unicast_listen 0 - unicast_master_table 0 - unicast_req_duration 3600 - use_syslog 1 - verbose 0 - summary_interval 0 - kernel_leap 1 - check_fup_sync 0 - # - # Servo Options - # - pi_proportional_const 0.0 - pi_integral_const 0.0 - pi_proportional_scale 0.0 - pi_proportional_exponent -0.3 - pi_proportional_norm_max 0.7 - pi_integral_scale 0.0 - pi_integral_exponent 0.4 - pi_integral_norm_max 0.3 - step_threshold 2.0 - first_step_threshold 0.00002 - max_frequency 900000000 - clock_servo pi - sanity_freq_limit 200000000 - ntpshm_segment 0 - # - # Transport options - # - transportSpecific 0x0 - ptp_dst_mac 01:1B:19:00:00:00 - p2p_dst_mac 01:80:C2:00:00:0E - udp_ttl 1 - udp6_scope 0x0E - uds_address /var/run/ptp4l - # - # Default interface options - # - clock_type OC - network_transport L2 - delay_mechanism E2E - time_stamping hardware - tsproc_mode filter - delay_filter moving_median - delay_filter_length 10 - egressLatency 0 - ingressLatency 0 - boundary_clock_jbod 0 - # - # Clock description - # - productDescription ;; - revisionData ;; - manufacturerIdentity 00:00:00 - userDescription ; - timeSource 0xA0 - recommend: - - profile: ordinary-clock - priority: 4 - match: - - nodeLabel: "node-role.kubernetes.io/worker" - nodeName: "" ----- -+ -.PTP ordinary clock CR configuration options -[cols="1,3" options="header"] -|==== -|Custom resource field -|Description - -|`name` -|The name of the `PtpConfig` CR. - -|`profile` -|Specify an array of one or more `profile` objects. Each profile must be uniquely named. - -|`interface` -|Specify the network interface to be used by the `ptp4l` service, for example `ens787f1`. - -|`ptp4lOpts` -|Specify system config options for the `ptp4l` service, for example `-2` to select the IEEE 802.3 network transport. The options should not include the network interface name `-i ` and service config file `-f /etc/ptp4l.conf` because the network interface name and the service config file are automatically appended. Append `--summary_interval -4` to use PTP fast events with this interface. - -|`phc2sysOpts` -|Specify system config options for the `phc2sys` service. If this field is empty, the PTP Operator does not start the `phc2sys` service. For Intel Columbiaville 800 Series NICs, set `phc2sysOpts` options to `-a -r -m -n 24 -N 8 -R 16`. `-m` prints messages to `stdout`. The `linuxptp-daemon` `DaemonSet` parses the logs and generates Prometheus metrics. - -|`ptp4lConf` -|Specify a string that contains the configuration to replace the default `/etc/ptp4l.conf` file. To use the default configuration, leave the field empty. - -|`tx_timestamp_timeout` -|For Intel Columbiaville 800 Series NICs, set `tx_timestamp_timeout` to `50`. - -|`boundary_clock_jbod` -|For Intel Columbiaville 800 Series NICs, set `boundary_clock_jbod` to `0`. - -|`ptpSchedulingPolicy` -|Scheduling policy for `ptp4l` and `phc2sys` processes. Default value is `SCHED_OTHER`. Use `SCHED_FIFO` on systems that support FIFO scheduling. - -|`ptpSchedulingPriority` -|Integer value from 1-65 used to set FIFO priority for `ptp4l` and `phc2sys` processes when `ptpSchedulingPolicy` is set to `SCHED_FIFO`. The `ptpSchedulingPriority` field is not used when `ptpSchedulingPolicy` is set to `SCHED_OTHER`. - -|`ptpClockThreshold` -|Optional. If `ptpClockThreshold` is not present, default values are used for the `ptpClockThreshold` fields. `ptpClockThreshold` configures how long after the PTP master clock is disconnected before PTP events are triggered. `holdOverTimeout` is the time value in seconds before the PTP clock event state changes to `FREERUN` when the PTP master clock is disconnected. The `maxOffsetThreshold` and `minOffsetThreshold` settings configure offset values in nanoseconds that compare against the values for `CLOCK_REALTIME` (`phc2sys`) or master offset (`ptp4l`). When the `ptp4l` or `phc2sys` offset value is outside this range, the PTP clock state is set to `FREERUN`. When the offset value is within this range, the PTP clock state is set to `LOCKED`. - -|`recommend` -|Specify an array of one or more `recommend` objects that define rules on how the `profile` should be applied to nodes. - -|`.recommend.profile` -|Specify the `.recommend.profile` object name defined in the `profile` section. - -|`.recommend.priority` -|Set `.recommend.priority` to `0` for ordinary clock. - -|`.recommend.match` -|Specify `.recommend.match` rules with `nodeLabel` or `nodeName`. - -|`.recommend.match.nodeLabel` -|Update `nodeLabel` with the `key` of `node.Labels` from the node object by using the `oc get nodes --show-labels` command. For example: `node-role.kubernetes.io/worker`. - -|`.recommend.match.nodeLabel` -|Update `nodeName` with value of `node.Name` from the node object by using the `oc get nodes` command. For example: `compute-0.example.com`. -|==== - -. Create the `PtpConfig` CR by running the following command: -+ -[source,terminal] ----- -$ oc create -f ordinary-clock-ptp-config.yaml ----- - -.Verification - -. Check that the `PtpConfig` profile is applied to the node. - -.. Get the list of pods in the `openshift-ptp` namespace by running the following command: -+ -[source,terminal] ----- -$ oc get pods -n openshift-ptp -o wide ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE IP NODE -linuxptp-daemon-4xkbb 1/1 Running 0 43m 10.1.196.24 compute-0.example.com -linuxptp-daemon-tdspf 1/1 Running 0 43m 10.1.196.25 compute-1.example.com -ptp-operator-657bbb64c8-2f8sj 1/1 Running 0 43m 10.129.0.61 control-plane-1.example.com ----- - -.. Check that the profile is correct. Examine the logs of the `linuxptp` daemon that corresponds to the node you specified in the `PtpConfig` profile. Run the following command: -+ -[source,terminal] ----- -$ oc logs linuxptp-daemon-4xkbb -n openshift-ptp -c linuxptp-daemon-container ----- -+ -.Example output -[source,terminal] ----- -I1115 09:41:17.117596 4143292 daemon.go:107] in applyNodePTPProfile -I1115 09:41:17.117604 4143292 daemon.go:109] updating NodePTPProfile to: -I1115 09:41:17.117607 4143292 daemon.go:110] ------------------------------------ -I1115 09:41:17.117612 4143292 daemon.go:102] Profile Name: profile1 -I1115 09:41:17.117616 4143292 daemon.go:102] Interface: ens787f1 -I1115 09:41:17.117620 4143292 daemon.go:102] Ptp4lOpts: -2 -s -I1115 09:41:17.117623 4143292 daemon.go:102] Phc2sysOpts: -a -r -n 24 -I1115 09:41:17.117626 4143292 daemon.go:116] ------------------------------------ ----- diff --git a/modules/nw-ptp-device-discovery.adoc b/modules/nw-ptp-device-discovery.adoc deleted file mode 100644 index 8f9a54706b0d..000000000000 --- a/modules/nw-ptp-device-discovery.adoc +++ /dev/null @@ -1,43 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/using-ptp.adoc - -:_content-type: PROCEDURE -[id="discover-ptp-devices_{context}"] -= Discovering PTP capable network devices in your cluster - -* To return a complete list of PTP capable network devices in your cluster, run the following command: -+ -[source,terminal] ----- -$ oc get NodePtpDevice -n openshift-ptp -o yaml ----- -+ -.Example output -[source,terminal] ----- -apiVersion: v1 -items: -- apiVersion: ptp.openshift.io/v1 - kind: NodePtpDevice - metadata: - creationTimestamp: "2022-01-27T15:16:28Z" - generation: 1 - name: dev-worker-0 <1> - namespace: openshift-ptp - resourceVersion: "6538103" - uid: d42fc9ad-bcbf-4590-b6d8-b676c642781a - spec: {} - status: - devices: <2> - - name: eno1 - - name: eno2 - - name: eno3 - - name: eno4 - - name: enp5s0f0 - - name: enp5s0f1 -... ----- -<1> The value for the `name` parameter is the same as the name of the parent node. -<2> The `devices` collection includes a list of the PTP capable devices that the PTP Operator discovers for the node. - diff --git a/modules/nw-ptp-grandmaster-clock-configuration-reference.adoc b/modules/nw-ptp-grandmaster-clock-configuration-reference.adoc deleted file mode 100644 index c21198686e4b..000000000000 --- a/modules/nw-ptp-grandmaster-clock-configuration-reference.adoc +++ /dev/null @@ -1,102 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/using-ptp.adoc - -:_content-type: REFERENCE -[id="nw-ptp-grandmaster-clock-configuration-reference_{context}"] -= Grandmaster clock PtpConfig configuration reference - -The following reference information describes the configuration options for the `PtpConfig` custom resource (CR) that configures the `linuxptp` services (`ptp4l`, `phc2sys`, `ts2phc`) as grandmaster clock. - -.PtpConfig configuration options for PTP Grandmaster clock -[cols="1,3" options="header"] -|==== -|PtpConfig CR field -|Description - -|`plugins` -|Specify an array of `.exec.cmdline` options that configure the NIC for grandmaster clock operation. Grandmaster clock configuration requires certain PTP pins to be disabled. - -The plugin mechanism allows the PTP Operator to do automated hardware configuration. -For the Intel Westport Channel NIC, when `enableDefaultConfig` is true, The PTP Operator runs a hard-coded script to do the required configuration for the NIC. - -|`ptp4lOpts` -|Specify system configuration options for the `ptp4l` service. -The options should not include the network interface name `-i ` and service config file `-f /etc/ptp4l.conf` because the network interface name and the service config file are automatically appended. - -|`ptp4lConf` -|Specify the required configuration to start `ptp4l` as grandmaster clock. -For example, the `ens2f1` interface synchronizes downstream connected devices. -For grandmaster clocks, set `clockClass` to `6` and set `clockAccuracy` to `0x27`. -Set `timeSource` to `0x20` for when receiving the timing signal from a Global navigation satellite system (GNSS). - -|`tx_timestamp_timeout` -|Specify the maximum amount of time to wait for the transmit (TX) timestamp from the sender before discarding the data. - -|`boundary_clock_jbod` -|Specify the JBOD boundary clock time delay value. -This value is used to correct the time values that are passed between the network time devices. - -|`phc2sysOpts` -a|Specify system config options for the `phc2sys` service. -If this field is empty the PTP Operator does not start the `phc2sys` service. -[NOTE] -==== -Ensure that the network interface listed here is configured as grandmaster and is referenced as required in the `ts2phcConf` and `ptp4lConf` fields. -==== - -|`ptpSchedulingPolicy` -|Configure the scheduling policy for `ptp4l` and `phc2sys` processes. -Default value is `SCHED_OTHER`. -Use `SCHED_FIFO` on systems that support FIFO scheduling. - -|`ptpSchedulingPriority` -|Set an integer value from 1-65 to configure FIFO priority for `ptp4l` and `phc2sys` processes when `ptpSchedulingPolicy` is set to `SCHED_FIFO`. -The `ptpSchedulingPriority` field is not used when `ptpSchedulingPolicy` is set to `SCHED_OTHER`. - -|`ptpClockThreshold` -|Optional. -If `ptpClockThreshold` stanza is not present, default values are used for `ptpClockThreshold` fields. -Stanza shows default `ptpClockThreshold` values. `ptpClockThreshold` values configure how long after the PTP master clock is disconnected before PTP events are triggered. -`holdOverTimeout` is the time value in seconds before the PTP clock event state changes to `FREERUN` when the PTP master clock is disconnected. -The `maxOffsetThreshold` and `minOffsetThreshold` settings configure offset values in nanoseconds that compare against the values for `CLOCK_REALTIME` (`phc2sys`) or master offset (`ptp4l`). -When the `ptp4l` or `phc2sys` offset value is outside this range, the PTP clock state is set to `FREERUN`. When the offset value is within this range, the PTP clock state is set to `LOCKED`. - -|`ts2phcConf` -a|Sets the configuration for the `ts2phc` command. - -`leapfile` is the default path to the current leap seconds definition file in the PTP Operator container image. - -`ts2phc.nmea_serialport` is the serial port device that is connected to the NMEA GPS clock source. -When configured, the GNSS receiver is accessible on `/dev/gnss`. -If the host has multiple GNSS receivers, you can find the correct device by enumerating either of the following devices: - -* `/sys/class/net//device/gnss/` -* `/sys/class/gnss/gnss/device/` - -|`ts2phcOpts` -|Set options for the `ts2phc` command. - -|`recommend` -|Specify an array of one or more `recommend` objects that define rules on how the `profile` should be applied to nodes. - -|`.recommend.profile` -|Specify the `.recommend.profile` object name that is defined in the `profile` section. - -|`.recommend.priority` -|Specify the `priority` with an integer value between `0` and `99`. -A larger number gets lower priority, so a priority of `99` is lower than a priority of `10`. -If a node can be matched with multiple profiles according to rules defined in the `match` field, the profile with the higher priority is applied to that node. - -|`.recommend.match` -|Specify `.recommend.match` rules with `nodeLabel` or `nodeName`. - -|`.recommend.match.nodeLabel` -|Set `nodeLabel` with the `key` of `node.Labels` from the node object by using the `oc -get nodes --show-labels` command. -For example: `node-role.kubernetes.io/worker`. - -|`.recommend.match.nodeName` -|Set `nodeName` with value of `node.Name` from the node object by using the `oc get nodes` command. -For example: `compute-1.example.com`. -|==== diff --git a/modules/nw-ptp-installing-operator-cli.adoc b/modules/nw-ptp-installing-operator-cli.adoc deleted file mode 100644 index b3acec10665a..000000000000 --- a/modules/nw-ptp-installing-operator-cli.adoc +++ /dev/null @@ -1,103 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/using-ptp.adoc - -:_content-type: PROCEDURE -[id="install-ptp-operator-cli_{context}"] -= Installing the PTP Operator using the CLI - -As a cluster administrator, you can install the Operator by using the CLI. - -.Prerequisites - -* A cluster installed on bare-metal hardware with nodes that have hardware that supports PTP. -* Install the OpenShift CLI (`oc`). -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -. Create a namespace for the PTP Operator. - -.. Save the following YAML in the `ptp-namespace.yaml` file: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Namespace -metadata: - name: openshift-ptp - annotations: - workload.openshift.io/allowed: management - labels: - name: openshift-ptp - openshift.io/cluster-monitoring: "true" ----- - -.. Create the `Namespace` CR: -+ -[source,terminal] ----- -$ oc create -f ptp-namespace.yaml ----- - -. Create an Operator group for the PTP Operator. - -.. Save the following YAML in the `ptp-operatorgroup.yaml` file: -+ -[source,yaml] ----- -apiVersion: operators.coreos.com/v1 -kind: OperatorGroup -metadata: - name: ptp-operators - namespace: openshift-ptp -spec: - targetNamespaces: - - openshift-ptp ----- - -.. Create the `OperatorGroup` CR: -+ -[source,terminal] ----- -$ oc create -f ptp-operatorgroup.yaml ----- - -. Subscribe to the PTP Operator. - -.. Save the following YAML in the `ptp-sub.yaml` file: -+ -[source,yaml] ----- -apiVersion: operators.coreos.com/v1alpha1 -kind: Subscription -metadata: - name: ptp-operator-subscription - namespace: openshift-ptp -spec: - channel: "stable" - name: ptp-operator - source: redhat-operators - sourceNamespace: openshift-marketplace ----- - -.. Create the `Subscription` CR: -+ -[source,terminal] ----- -$ oc create -f ptp-sub.yaml ----- - -. To verify that the Operator is installed, enter the following command: -+ -[source,terminal] ----- -$ oc get csv -n openshift-ptp -o custom-columns=Name:.metadata.name,Phase:.status.phase ----- -+ -.Example output -[source,terminal] ----- -Name Phase -4.13.0-202301261535 Succeeded ----- diff --git a/modules/nw-ptp-installing-operator-web-console.adoc b/modules/nw-ptp-installing-operator-web-console.adoc deleted file mode 100644 index 262b28af6ecc..000000000000 --- a/modules/nw-ptp-installing-operator-web-console.adoc +++ /dev/null @@ -1,47 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/using-ptp.adoc - -:_content-type: PROCEDURE -[id="install-ptp-operator-web-console_{context}"] -= Installing the PTP Operator using the web console - -As a cluster administrator, you can install the PTP Operator using the web console. - -[NOTE] -==== -You have to create the namespace and Operator group as mentioned -in the previous section. -==== - -.Procedure - -. Install the PTP Operator using the {product-title} web console: - -.. In the {product-title} web console, click *Operators* -> *OperatorHub*. - -.. Choose *PTP Operator* from the list of available Operators, and then click *Install*. - -.. On the *Install Operator* page, under *A specific namespace on the cluster* select *openshift-ptp*. Then, click *Install*. - -. Optional: Verify that the PTP Operator installed successfully: - -.. Switch to the *Operators* -> *Installed Operators* page. - -.. Ensure that *PTP Operator* is listed in the *openshift-ptp* project with a *Status* of *InstallSucceeded*. -+ -[NOTE] -==== -During installation an Operator might display a *Failed* status. -If the installation later succeeds with an *InstallSucceeded* message, you can ignore the *Failed* message. -==== - -+ -If the Operator does not appear as installed, to troubleshoot further: - -+ -* Go to the *Operators* -> *Installed Operators* page and inspect -the *Operator Subscriptions* and *Install Plans* tabs for any failure or errors -under *Status*. -* Go to the *Workloads* -> *Pods* page and check the logs for pods in the -`openshift-ptp` project. diff --git a/modules/nw-ptp-introduction.adoc b/modules/nw-ptp-introduction.adoc deleted file mode 100644 index ec5f06993b1e..000000000000 --- a/modules/nw-ptp-introduction.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/using-ptp.adoc - -:_content-type: CONCEPT -[id="ptp-introduction_{context}"] -= About PTP - -Precision Time Protocol (PTP) is used to synchronize clocks in a network. When used in conjunction with hardware support, PTP is capable of sub-microsecond accuracy, and is more accurate than Network Time Protocol (NTP). - -[id="ptp-elements_{context}"] -== Elements of a PTP domain - -PTP is used to synchronize multiple nodes connected in a network, with clocks for each node. The clocks synchronized by PTP are organized in a source-destination hierarchy. -The hierarchy is created and updated automatically by the best master clock (BMC) algorithm, which runs on every clock. Destination clocks are synchronized to source clocks, and destination clocks can themselves be the source for other downstream clocks. -The three primary types of PTP clocks are described below. - -Grandmaster clock:: The grandmaster clock provides standard time information to other clocks across the network and ensures accurate and stable synchronisation. It writes time stamps and responds to time requests from other clocks. Grandmaster clocks synchronize to a Global Navigation Satellite System (GNSS) time source. The Grandmaster clock is the authoritative source of time in the network and is responsible for providing time synchronization to all other devices. - -Ordinary clock:: The ordinary clock has a single port connection that can play the role of source or destination clock, depending on its position in the network. The ordinary clock can read and write time stamps. - -Boundary clock:: The boundary clock has ports in two or more communication paths and can be a source and a destination to other destination clocks at the same time. The boundary clock works as a destination clock upstream. The destination clock receives the timing message, adjusts for delay, and then creates a new source time signal to pass down the network. The boundary clock produces a new timing packet that is still correctly synced with the source clock and can reduce the number of connected devices reporting directly to the source clock. - -[id="ptp-advantages-over-ntp_{context}"] -== Advantages of PTP over NTP - -One of the main advantages that PTP has over NTP is the hardware support present in various network interface controllers (NIC) and network switches. The specialized hardware allows PTP to account for delays in message transfer and improves the accuracy of time synchronization. To achieve the best possible accuracy, it is recommended that all networking components between PTP clocks are PTP hardware enabled. - -Hardware-based PTP provides optimal accuracy, since the NIC can time stamp the PTP packets at the exact moment they are sent and received. Compare this to software-based PTP, which requires additional processing of the PTP packets by the operating system. diff --git a/modules/nw-rfhe-creating-bmc-event-sub.adoc b/modules/nw-rfhe-creating-bmc-event-sub.adoc deleted file mode 100644 index 78edf2a58473..000000000000 --- a/modules/nw-rfhe-creating-bmc-event-sub.adoc +++ /dev/null @@ -1,168 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/using-rfhe.adoc - -:_content-type: PROCEDURE -[id="nw-rfhe-creating-bmc-event-sub_{context}"] -= Subscribing to bare-metal events - -You can configure the baseboard management controller (BMC) to send bare-metal events to subscribed applications running in an {product-title} cluster. Example Redfish bare-metal events include an increase in device temperature, or removal of a device. You subscribe applications to bare-metal events using a REST API. - -[IMPORTANT] -==== -You can only create a `BMCEventSubscription` custom resource (CR) for physical hardware that supports Redfish and has a vendor interface set to `redfish` or `idrac-redfish`. -==== - -[NOTE] -==== -Use the `BMCEventSubscription` CR to subscribe to predefined Redfish events. The Redfish standard does not provide an option to create specific alerts and thresholds. For example, to receive an alert event when an enclosure's temperature exceeds 40° Celsius, you must manually configure the event according to the vendor's recommendations. -==== - -Perform the following procedure to subscribe to bare-metal events for the node using a `BMCEventSubscription` CR. - -.Prerequisites -* Install the OpenShift CLI (`oc`). - -* Log in as a user with `cluster-admin` privileges. - -* Get the user name and password for the BMC. - -* Deploy a bare-metal node with a Redfish-enabled Baseboard Management Controller (BMC) in your cluster, and enable Redfish events on the BMC. -+ -[NOTE] -==== -Enabling Redfish events on specific hardware is outside the scope of this information. For more information about enabling Redfish events for your specific hardware, consult the BMC manufacturer documentation. -==== - -.Procedure - -. Confirm that the node hardware has the Redfish `EventService` enabled by running the following `curl` command: -+ -[source,terminal] ----- -$ curl https:///redfish/v1/EventService --insecure -H 'Content-Type: application/json' -u ":" ----- -+ -where: -+ --- -bmc_ip_address:: is the IP address of the BMC where the Redfish events are generated. --- -+ -.Example output -[source,terminal] ----- -{ - "@odata.context": "/redfish/v1/$metadata#EventService.EventService", - "@odata.id": "/redfish/v1/EventService", - "@odata.type": "#EventService.v1_0_2.EventService", - "Actions": { - "#EventService.SubmitTestEvent": { - "EventType@Redfish.AllowableValues": ["StatusChange", "ResourceUpdated", "ResourceAdded", "ResourceRemoved", "Alert"], - "target": "/redfish/v1/EventService/Actions/EventService.SubmitTestEvent" - } - }, - "DeliveryRetryAttempts": 3, - "DeliveryRetryIntervalSeconds": 30, - "Description": "Event Service represents the properties for the service", - "EventTypesForSubscription": ["StatusChange", "ResourceUpdated", "ResourceAdded", "ResourceRemoved", "Alert"], - "EventTypesForSubscription@odata.count": 5, - "Id": "EventService", - "Name": "Event Service", - "ServiceEnabled": true, - "Status": { - "Health": "OK", - "HealthRollup": "OK", - "State": "Enabled" - }, - "Subscriptions": { - "@odata.id": "/redfish/v1/EventService/Subscriptions" - } -} ----- - -. Get the {redfish-operator} service route for the cluster by running the following command: -+ -[source,terminal] ----- -$ oc get route -n openshift-bare-metal-events ----- -+ -.Example output -[source,terminal] ----- -NAME HOST/PORT PATH SERVICES PORT TERMINATION WILDCARD -hw-event-proxy hw-event-proxy-openshift-bare-metal-events.apps.compute-1.example.com hw-event-proxy-service 9087 edge None ----- - -. Create a `BMCEventSubscription` resource to subscribe to the Redfish events: - -.. Save the following YAML in the `bmc_sub.yaml` file: -+ -[source,yaml] ----- -apiVersion: metal3.io/v1alpha1 -kind: BMCEventSubscription -metadata: - name: sub-01 - namespace: openshift-machine-api -spec: - hostName: <1> - destination: <2> - context: '' ----- -<1> Specifies the name or UUID of the worker node where the Redfish events are generated. -<2> Specifies the bare-metal event proxy service, for example, `https://hw-event-proxy-openshift-bare-metal-events.apps.compute-1.example.com/webhook`. - -.. Create the `BMCEventSubscription` CR: -+ -[source,terminal] ----- -$ oc create -f bmc_sub.yaml ----- - -. Optional: To delete the BMC event subscription, run the following command: -+ -[source,terminal] ----- -$ oc delete -f bmc_sub.yaml ----- - -. Optional: To manually create a Redfish event subscription without creating a `BMCEventSubscription` CR, run the following `curl` command, specifying the BMC username and password. -+ -[source,terminal] ----- -$ curl -i -k -X POST -H "Content-Type: application/json" -d '{"Destination": "https://", "Protocol" : "Redfish", "EventTypes": ["Alert"], "Context": "root"}' -u : 'https:///redfish/v1/EventService/Subscriptions' –v ----- -+ -where: -+ --- -proxy_service_url:: is the bare-metal event proxy service, for example, `https://hw-event-proxy-openshift-bare-metal-events.apps.compute-1.example.com/webhook`. --- -+ --- -bmc_ip_address:: is the IP address of the BMC where the Redfish events are generated. --- -+ -.Example output -[source,terminal] ----- -HTTP/1.1 201 Created -Server: AMI MegaRAC Redfish Service -Location: /redfish/v1/EventService/Subscriptions/1 -Allow: GET, POST -Access-Control-Allow-Origin: * -Access-Control-Expose-Headers: X-Auth-Token -Access-Control-Allow-Headers: X-Auth-Token -Access-Control-Allow-Credentials: true -Cache-Control: no-cache, must-revalidate -Link: ; rel=describedby -Link: -Link: ; path= -ETag: "1651135676" -Content-Type: application/json; charset=UTF-8 -OData-Version: 4.0 -Content-Length: 614 -Date: Thu, 28 Apr 2022 08:47:57 GMT ----- diff --git a/modules/nw-rfhe-creating-hardware-event.adoc b/modules/nw-rfhe-creating-hardware-event.adoc deleted file mode 100644 index d654e240afa9..000000000000 --- a/modules/nw-rfhe-creating-hardware-event.adoc +++ /dev/null @@ -1,102 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/using-rfhe.adoc - -:_content-type: PROCEDURE -[id="nw-rfhe-creating-hardware-event_{context}"] -= Creating the bare-metal event and Secret CRs - -To start using bare-metal events, create the `HardwareEvent` custom resource (CR) for the host where the Redfish hardware is present. Hardware events and faults are reported in the `hw-event-proxy` logs. - -.Prerequisites - -* You have installed the {product-title} CLI (`oc`). - -* You have logged in as a user with `cluster-admin` privileges. - -* You have installed the {redfish-operator}. - -* You have created a `BMCEventSubscription` CR for the BMC Redfish hardware. - -* You have configured dynamic volume provisioning in the cluster or you have manually created `StorageClass`, `LocalVolume`, and `PersistentVolume` CRs to persist the events subscription. -+ -[NOTE] -==== -When you enable dynamic volume provisioning in the cluster, a `PersistentVolume` resource is automatically created for the `PersistentVolumeClaim` that the {redfish-operator} deploys. - -For more information about manually creating persistent storage in the cluster, see "Persistent storage using local volumes". -==== - -.Procedure - -. Create the `HardwareEvent` custom resource (CR): -+ -[NOTE] -==== -Multiple `HardwareEvent` resources are not permitted. -==== - -.. Save the following YAML in the `hw-event.yaml` file: -+ -[source,yaml] ----- -apiVersion: "event.redhat-cne.org/v1alpha1" -kind: "HardwareEvent" -metadata: - name: "hardware-event" -spec: - nodeSelector: - node-role.kubernetes.io/hw-event: "" <1> - storageType: "example-storage-class" <2> - logLevel: "debug" <3> - msgParserTimeout: "10" <4> ----- -+ --- -<1> Required. Use the `nodeSelector` field to target nodes with the specified label, for example, `node-role.kubernetes.io/hw-event: ""`. -<2> The value of `storageType` is used to populate the `StorageClassName` field for the `PersistentVolumeClaim` (`PVC`) resource that the {redfish-operator} automatically deploys. -The `PVC` resource is used to persist consumer event subscriptions. -+ -[NOTE] -==== -In {product-title} 4.13 or later, you do not need to set the `spec.transportHost` field in the `HardwareEvent` resource when you use HTTP transport for bare-metal events. -Set `transportHost` only when you use AMQP transport for bare-metal events. - -The value that you set for `.spec.storageType` in the `HardwareEvent` CR must match the `storageClassName` that is set in the `PersistentVolume` CR. -==== -<3> Optional. The default value is `debug`. Sets the log level in `hw-event-proxy` logs. The following log levels are available: `fatal`, `error`, `warning`, `info`, `debug`, `trace`. -<4> Optional. Sets the timeout value in milliseconds for the Message Parser. If a message parsing request is not responded to within the timeout duration, the original hardware event message is passed to the cloud native event framework. The default value is 10. --- - -.. Apply the `HardwareEvent` CR in the cluster: -+ -[source,terminal] ----- -$ oc create -f hardware-event.yaml ----- - -. Create a BMC username and password `Secret` CR that enables the hardware events proxy to access the Redfish message registry for the bare-metal host. -+ -.. Save the following YAML in the `hw-event-bmc-secret.yaml` file: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: redfish-basic-auth -type: Opaque -stringData: <1> - username: - password: - # BMC host DNS or IP address - hostaddr: ----- -<1> Enter plain text values for the various items under `stringData`. -+ -.. Create the `Secret` CR: -+ -[source,terminal] ----- -$ oc create -f hw-event-bmc-secret.yaml ----- diff --git a/modules/nw-rfhe-installing-operator-cli.adoc b/modules/nw-rfhe-installing-operator-cli.adoc deleted file mode 100644 index 32acbe7aa69e..000000000000 --- a/modules/nw-rfhe-installing-operator-cli.adoc +++ /dev/null @@ -1,96 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/using-rfhe.adoc - -:_content-type: PROCEDURE -[id="nw-rfhe-installing-operator-cli_{context}"] -= Installing the {redfish-operator} using the CLI - -As a cluster administrator, you can install the {redfish-operator} Operator by using the CLI. - -.Prerequisites - -* A cluster that is installed on bare-metal hardware with nodes that have a RedFish-enabled Baseboard Management Controller (BMC). -* Install the OpenShift CLI (`oc`). -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -. Create a namespace for the {redfish-operator}. - -.. Save the following YAML in the `bare-metal-events-namespace.yaml` file: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Namespace -metadata: - name: openshift-bare-metal-events - labels: - name: openshift-bare-metal-events - openshift.io/cluster-monitoring: "true" ----- - -.. Create the `Namespace` CR: -+ -[source,terminal] ----- -$ oc create -f bare-metal-events-namespace.yaml ----- - -. Create an Operator group for the {redfish-operator} Operator. - -.. Save the following YAML in the `bare-metal-events-operatorgroup.yaml` file: -+ -[source,yaml] ----- -apiVersion: operators.coreos.com/v1 -kind: OperatorGroup -metadata: - name: bare-metal-event-relay-group - namespace: openshift-bare-metal-events -spec: - targetNamespaces: - - openshift-bare-metal-events ----- - -.. Create the `OperatorGroup` CR: -+ -[source,terminal] ----- -$ oc create -f bare-metal-events-operatorgroup.yaml ----- - -. Subscribe to the {redfish-operator}. - -.. Save the following YAML in the `bare-metal-events-sub.yaml` file: -+ -[source,yaml] ----- -apiVersion: operators.coreos.com/v1alpha1 -kind: Subscription -metadata: - name: bare-metal-event-relay-subscription - namespace: openshift-bare-metal-events -spec: - channel: "stable" - name: bare-metal-event-relay - source: redhat-operators - sourceNamespace: openshift-marketplace ----- - -.. Create the `Subscription` CR: -+ -[source,terminal] ----- -$ oc create -f bare-metal-events-sub.yaml ----- - -.Verification - -To verify that the {redfish-operator} Operator is installed, run the following command: - -[source,terminal] ----- -$ oc get csv -n openshift-bare-metal-events -o custom-columns=Name:.metadata.name,Phase:.status.phase ----- diff --git a/modules/nw-rfhe-installing-operator-web-console.adoc b/modules/nw-rfhe-installing-operator-web-console.adoc deleted file mode 100644 index 02a43f89cdb7..000000000000 --- a/modules/nw-rfhe-installing-operator-web-console.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/using-rfhe.adoc - -:_content-type: PROCEDURE -[id="nw-rfhe-installing-operator-web-console_{context}"] -= Installing the {redfish-operator} using the web console - -As a cluster administrator, you can install the {redfish-operator} Operator using the web console. - -.Prerequisites - -* A cluster that is installed on bare-metal hardware with nodes that have a RedFish-enabled Baseboard Management Controller (BMC). -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -. Install the {redfish-operator} using the {product-title} web console: - -.. In the {product-title} web console, click *Operators* -> *OperatorHub*. - -.. Choose *{redfish-operator}* from the list of available Operators, and then click *Install*. - -.. On the *Install Operator* page, select or create a *Namespace*, select *openshift-bare-metal-events*, and then click *Install*. - -.Verification - -Optional: You can verify that the Operator installed successfully by performing the following check: - -. Switch to the *Operators* -> *Installed Operators* page. - -. Ensure that *{redfish-operator}* is listed in the project with a *Status* of *InstallSucceeded*. -+ -[NOTE] -==== -During installation an Operator might display a *Failed* status. If the installation later succeeds with an *InstallSucceeded* message, you can ignore the *Failed* message. -==== - -If the Operator does not appear as installed, to troubleshoot further: - -* Go to the *Operators* -> *Installed Operators* page and inspect the *Operator Subscriptions* and *Install Plans* tabs for any failure or errors under *Status*. -* Go to the *Workloads* -> *Pods* page and check the logs for pods in the project namespace. diff --git a/modules/nw-rfhe-introduction.adoc b/modules/nw-rfhe-introduction.adoc deleted file mode 100644 index 24428b7c0f62..000000000000 --- a/modules/nw-rfhe-introduction.adoc +++ /dev/null @@ -1,52 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/using-rfhe.adoc - -:_content-type: CONCEPT -[id="nw-rfhe-introduction_{context}"] -= How bare-metal events work - -The {redfish-operator} enables applications running on bare-metal clusters to respond quickly to Redfish hardware changes and failures such as breaches of temperature thresholds, fan failure, disk loss, power outages, and memory failure. These hardware events are delivered over a reliable low-latency transport channel based on Advanced Message Queuing Protocol (AMQP). The latency of the messaging service is between 10 to 20 milliseconds. - -The {redfish-operator} provides a publish-subscribe service for the hardware events. Applications can use a REST API to subscribe to the events. The {redfish-operator} supports hardware that complies with Redfish OpenAPI v1.8 or later. - -[id="rfhe-elements_{context}"] -== {redfish-operator} data flow - -The following figure illustrates an example bare-metal events data flow: - -.{redfish-operator} data flow -image::319_OpenShift_redfish_bare-metal_OCP_nodes_0323.png[Bare-metal events data flow] - -=== Operator-managed pod - -The Operator uses custom resources to manage the pod containing the {redfish-operator} and its components using the `HardwareEvent` CR. - -=== {redfish-operator} - -At startup, the {redfish-operator} queries the Redfish API and downloads all the message registries, including custom registries. The {redfish-operator} then begins to receive subscribed events from the Redfish hardware. - -The {redfish-operator} enables applications running on bare-metal clusters to respond quickly to Redfish hardware changes and failures such as breaches of temperature thresholds, fan failure, disk loss, power outages, and memory failure. The events are reported using the `HardwareEvent` CR. - -=== Cloud native event - -Cloud native events (CNE) is a REST API specification for defining the format of event data. - -=== CNCF CloudEvents - -link:https://cloudevents.io/[CloudEvents] is a vendor-neutral specification developed by the Cloud Native Computing Foundation (CNCF) for defining the format of event data. - -=== HTTP transport or AMQP dispatch router - -The HTTP transport or AMQP dispatch router is responsible for the message delivery service between publisher and subscriber. - -include::snippets/ptp-amq-interconnect-eol.adoc[] - -=== Cloud event proxy sidecar - -The cloud event proxy sidecar container image is based on the O-RAN API specification and provides a publish-subscribe event framework for hardware events. - -[id="rfhe-data-flow_{context}"] -== Redfish message parsing service - -In addition to handling Redfish events, the {redfish-operator} provides message parsing for events without a `Message` property. The proxy downloads all the Redfish message registries including vendor specific registries from the hardware when it starts. If an event does not contain a `Message` property, the proxy uses the Redfish message registries to construct the `Message` and `Resolution` properties and add them to the event before passing the event to the cloud events framework. This service allows Redfish events to have smaller message size and lower transmission latency. diff --git a/modules/nw-rfhe-quering-redfish-hardware-event-subs.adoc b/modules/nw-rfhe-quering-redfish-hardware-event-subs.adoc deleted file mode 100644 index e71d0deaf97f..000000000000 --- a/modules/nw-rfhe-quering-redfish-hardware-event-subs.adoc +++ /dev/null @@ -1,68 +0,0 @@ -// Module included in the following assemblies: -// -// * monitoring/using-rfhe.adoc - -:_module-type: PROCEDURE -[id="nw-rfhe-querying-redfish-hardware-event-subs_{context}"] -= Querying Redfish bare-metal event subscriptions with curl - -Some hardware vendors limit the amount of Redfish hardware event subscriptions. You can query the number of Redfish event subscriptions by using `curl`. - -.Prerequisites -* Get the user name and password for the BMC. -* Deploy a bare-metal node with a Redfish-enabled Baseboard Management Controller (BMC) in your cluster, and enable Redfish hardware events on the BMC. - -.Procedure - -. Check the current subscriptions for the BMC by running the following `curl` command: -+ -[source,terminal] ----- -$ curl --globoff -H "Content-Type: application/json" -k -X GET --user : https:///redfish/v1/EventService/Subscriptions ----- -+ -where: -+ --- -bmc_ip_address:: is the IP address of the BMC where the Redfish events are generated. --- -+ -.Example output -[source,terminal] ----- -% Total % Received % Xferd Average Speed Time Time Time Current -Dload Upload Total Spent Left Speed -100 435 100 435 0 0 399 0 0:00:01 0:00:01 --:--:-- 399 -{ - "@odata.context": "/redfish/v1/$metadata#EventDestinationCollection.EventDestinationCollection", - "@odata.etag": "" - 1651137375 "", - "@odata.id": "/redfish/v1/EventService/Subscriptions", - "@odata.type": "#EventDestinationCollection.EventDestinationCollection", - "Description": "Collection for Event Subscriptions", - "Members": [ - { - "@odata.id": "/redfish/v1/EventService/Subscriptions/1" - }], - "Members@odata.count": 1, - "Name": "Event Subscriptions Collection" -} ----- -+ -In this example, a single subscription is configured: `/redfish/v1/EventService/Subscriptions/1`. - -. Optional: To remove the `/redfish/v1/EventService/Subscriptions/1` subscription with `curl`, run the following command, specifying the BMC username and password: -+ -[source,terminal] ----- -$ curl --globoff -L -w "%{http_code} %{url_effective}\n" -k -u :-H "Content-Type: application/json" -d '{}' -X DELETE https:///redfish/v1/EventService/Subscriptions/1 ----- -+ -where: -+ --- -bmc_ip_address:: is the IP address of the BMC where the Redfish events are generated. --- - - - diff --git a/modules/nw-rosa-proxy-remove-cli.adoc b/modules/nw-rosa-proxy-remove-cli.adoc deleted file mode 100644 index 07a878b0a033..000000000000 --- a/modules/nw-rosa-proxy-remove-cli.adoc +++ /dev/null @@ -1,102 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/enable-cluster-wide-proxy.adoc - -:_content-type: PROCEDURE -[id="nw-rosa-proxy-remove-cli_{context}"] -= Removing the cluster-wide proxy using CLI - -You must use the {product-title} (ROSA) CLI, `rosa`, to remove the proxy's address from your cluster. - -.Prerequisites - -* You must have cluster administrator privileges. -* You have installed the ROSA CLI (`rosa`). - -.Procedure - -* Use the `rosa edit` command to modify the proxy. You must pass empty strings to the `--http-proxy` and `--https-proxy` arguments to clear the proxy from the cluster: -+ -[source,terminal] ----- -$ rosa edit cluster -c --http-proxy "" --https-proxy "" ----- -+ -[NOTE] -==== -While your proxy might only use one of the proxy arguments, the empty fields are ignored, so passing empty strings to both the `--http-proxy` and `--https-proxy` arguments do not cause any issues. -==== -+ -.Example Output -+ -[source,yaml] ----- -I: Updated cluster ----- - -.Verification - -* You can verify that the proxy has been removed from the cluster by using the `rosa describe` command: -+ -[source,yaml] ----- -$ rosa describe cluster -c ----- -+ -Before removal, the proxy IP displays in a proxy section: -+ -[source,yaml] ----- -Name: -ID: -External ID: -OpenShift Version: 4.13.0 -Channel Group: stable -DNS: -AWS Account: -API URL: -Console URL: -Region: us-east-1 -Multi-AZ: false -Nodes: - - Control plane: 3 - - Infra: 2 - - Compute: 2 -Network: - - Type: OVNKubernetes - - Service CIDR: - - Machine CIDR: - - Pod CIDR: - - Host Prefix: -Proxy: - - HTTPProxy: -Additional trust bundle: REDACTED ----- -+ -After removing the proxy, the proxy section is removed: -+ -[source,yaml] ----- -Name: -ID: -External ID: -OpenShift Version: 4.13.0 -Channel Group: stable -DNS: -AWS Account: -API URL: -Console URL: -Region: us-east-1 -Multi-AZ: false -Nodes: - - Control plane: 3 - - Infra: 2 - - Compute: 2 -Network: - - Type: OVNKubernetes - - Service CIDR: - - Machine CIDR: - - Pod CIDR: - - Host Prefix: -Additional trust bundle: REDACTED ----- diff --git a/modules/nw-route-admission-policy.adoc b/modules/nw-route-admission-policy.adoc deleted file mode 100644 index de2fe5ceaa72..000000000000 --- a/modules/nw-route-admission-policy.adoc +++ /dev/null @@ -1,53 +0,0 @@ -// Module included in the following assemblies: -// -// * ingress/configure-ingress-operator.adoc -// * networking/routes/route-configuration.adoc - -:_content-type: PROCEDURE -[id="nw-route-admission-policy_{context}"] -= Configuring the route admission policy - -Administrators and application developers can run applications in multiple namespaces with the same domain name. This is for organizations where multiple teams develop microservices that are exposed on the same hostname. - -[WARNING] -==== -Allowing claims across namespaces should only be enabled for clusters with trust between namespaces, otherwise a malicious user could take over a hostname. For this reason, the default admission policy disallows hostname claims across namespaces. -==== - -.Prerequisites - -* Cluster administrator privileges. - -.Procedure - -* Edit the `.spec.routeAdmission` field of the `ingresscontroller` resource variable using the following command: -+ -[source,terminal] ----- -$ oc -n openshift-ingress-operator patch ingresscontroller/default --patch '{"spec":{"routeAdmission":{"namespaceOwnership":"InterNamespaceAllowed"}}}' --type=merge ----- -+ -.Sample Ingress Controller configuration -[source,yaml] ----- -spec: - routeAdmission: - namespaceOwnership: InterNamespaceAllowed -... ----- -+ -[TIP] -==== -You can alternatively apply the following YAML to configure the route admission policy: -[source,yaml] ----- -apiVersion: operator.openshift.io/v1 -kind: IngressController -metadata: - name: default - namespace: openshift-ingress-operator -spec: - routeAdmission: - namespaceOwnership: InterNamespaceAllowed ----- -==== diff --git a/modules/nw-route-specific-annotations.adoc b/modules/nw-route-specific-annotations.adoc deleted file mode 100644 index 24d4b645f282..000000000000 --- a/modules/nw-route-specific-annotations.adoc +++ /dev/null @@ -1,181 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/routes/route-configuration.adoc - -[id="nw-route-specific-annotations_{context}"] -= Route-specific annotations - -The Ingress Controller can set the default options for all the routes it exposes. An individual route can override some of these defaults by providing specific configurations in its annotations. Red Hat does not support adding a route annotation to an operator-managed route. - -[IMPORTANT] -==== -To create a whitelist with multiple source IPs or subnets, use a space-delimited list. Any other delimiter type causes the list to be ignored without a warning or error message. -==== - -//For all the variables outlined in this section, you can set annotations on the -//*route definition* for the route to alter its configuration. - -.Route annotations -[cols="3*", options="header"] -|=== -|Variable | Description | Environment variable used as default -|`haproxy.router.openshift.io/balance`| Sets the load-balancing algorithm. Available options are `random`, `source`, `roundrobin`, and `leastconn`. The default value is `random`.| `ROUTER_TCP_BALANCE_SCHEME` for passthrough routes. Otherwise, use `ROUTER_LOAD_BALANCE_ALGORITHM`. -|`haproxy.router.openshift.io/disable_cookies`| Disables the use of cookies to track related connections. If set to `'true'` or `'TRUE'`, the balance algorithm is used to choose which back-end serves connections for each incoming HTTP request. | -|`router.openshift.io/cookie_name`| Specifies an optional cookie to use for -this route. The name must consist of any combination of upper and lower case letters, digits, "_", -and "-". The default is the hashed internal key name for the route. | -|`haproxy.router.openshift.io/pod-concurrent-connections`| Sets the maximum number of connections that are allowed to a backing pod from a router. + -Note: If there are multiple pods, each can have this many connections. If you have multiple routers, there is no coordination among them, each may connect this many times. If not set, or set to 0, there is no limit. | -|`haproxy.router.openshift.io/rate-limit-connections`| Setting `'true'` or `'TRUE'` enables rate limiting functionality which is implemented through stick-tables on the specific backend per route. + -Note: Using this annotation provides basic protection against distributed denial-of-service (DDoS) attacks. | -|`haproxy.router.openshift.io/rate-limit-connections.concurrent-tcp`| Limits the number of concurrent TCP connections made through the same source IP address. It accepts a numeric value. + -Note: Using this annotation provides basic protection against distributed denial-of-service (DDoS) attacks. | -|`haproxy.router.openshift.io/rate-limit-connections.rate-http`| Limits the rate at which a client with the same source IP address can make HTTP requests. It accepts a numeric value. + -Note: Using this annotation provides basic protection against distributed denial-of-service (DDoS) attacks. | -|`haproxy.router.openshift.io/rate-limit-connections.rate-tcp`| Limits the rate at which a client with the same source IP address can make TCP connections. It accepts a numeric value. + -Note: Using this annotation provides basic protection against distributed denial-of-service (DDoS) attacks. | -|`haproxy.router.openshift.io/timeout` | Sets a server-side timeout for the route. (TimeUnits) | `ROUTER_DEFAULT_SERVER_TIMEOUT` -|`haproxy.router.openshift.io/timeout-tunnel` | This timeout applies to a tunnel connection, for example, WebSocket over cleartext, edge, reencrypt, or passthrough routes. With cleartext, edge, or reencrypt route types, this annotation is applied as a timeout tunnel with the existing timeout value. For the passthrough route types, the annotation takes precedence over any existing timeout value set. | `ROUTER_DEFAULT_TUNNEL_TIMEOUT` -|`ingresses.config/cluster ingress.operator.openshift.io/hard-stop-after` | You can set either an IngressController or the ingress config . This annotation redeploys the router and configures the HA proxy to emit the haproxy `hard-stop-after` global option, which defines the maximum time allowed to perform a clean soft-stop. | `ROUTER_HARD_STOP_AFTER` -|`router.openshift.io/haproxy.health.check.interval`| Sets the interval for the back-end health checks. (TimeUnits) | `ROUTER_BACKEND_CHECK_INTERVAL` -|`haproxy.router.openshift.io/ip_whitelist` -| Sets a whitelist for the route. The whitelist is a space-separated list of IP addresses and CIDR ranges for the approved source addresses. Requests from IP addresses that are not in the whitelist are dropped. - -The maximum number of IP addresses and CIDR ranges allowed in a whitelist is 61.| -|`haproxy.router.openshift.io/hsts_header` | Sets a Strict-Transport-Security header for the edge terminated or re-encrypt route. | -|`haproxy.router.openshift.io/log-send-hostname` | Sets the `hostname` field in the Syslog header. Uses the hostname of the system. `log-send-hostname` is enabled by default if any Ingress API logging method, such as sidecar or Syslog facility, is enabled for the router. | -|`haproxy.router.openshift.io/rewrite-target` | Sets the rewrite path of the request on the backend. | -|`router.openshift.io/cookie-same-site` | Sets a value to restrict cookies. The values are: - -`Lax`: cookies are transferred between the visited site and third-party sites. - -`Strict`: cookies are restricted to the visited site. - -`None`: cookies are restricted to the visited site. - -This value is applicable to re-encrypt and edge routes only. For more information, see the link:https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Set-Cookie/SameSite[SameSite cookies documentation].| - -|`haproxy.router.openshift.io/set-forwarded-headers` | Sets the policy for handling the `Forwarded` and `X-Forwarded-For` HTTP headers per route. The values are: - -`append`: appends the header, preserving any existing header. This is the default value. - -`replace`: sets the header, removing any existing header. - -`never`: never sets the header, but preserves any existing header. - -`if-none`: sets the header if it is not already set.| `ROUTER_SET_FORWARDED_HEADERS` - -|=== - -[NOTE] -==== -Environment variables cannot be edited. -==== - -.Router timeout variables - -`TimeUnits` are represented by a number followed by the unit: `us` *(microseconds), `ms` (milliseconds, default), `s` (seconds), `m` (minutes), `h` *(hours), `d` (days). - -The regular expression is: [1-9][0-9]*(`us`\|`ms`\|`s`\|`m`\|`h`\|`d`). -[cols="2,1,2a", options="header"] -|=== -|Variable | Default | Description -| `ROUTER_BACKEND_CHECK_INTERVAL` | `5000ms` | Length of time between subsequent liveness checks on back ends. -| `ROUTER_CLIENT_FIN_TIMEOUT` | `1s` | Controls the TCP FIN timeout period for the client connecting to the route. If the FIN sent to close the connection does not answer within the given time, HAProxy closes the connection. This is harmless if set to a low value and uses fewer resources on the router. -| `ROUTER_DEFAULT_CLIENT_TIMEOUT` | `30s` | Length of time that a client has to acknowledge or send data. -| `ROUTER_DEFAULT_CONNECT_TIMEOUT` | `5s` | The maximum connection time. -| `ROUTER_DEFAULT_SERVER_FIN_TIMEOUT` | `1s` | Controls the TCP FIN timeout from the router to the pod backing the route. -| `ROUTER_DEFAULT_SERVER_TIMEOUT` | `30s` | Length of time that a server has to acknowledge or send data. -| `ROUTER_DEFAULT_TUNNEL_TIMEOUT` | `1h` | Length of time for TCP or WebSocket connections to remain open. This timeout period resets whenever HAProxy reloads. -| `ROUTER_SLOWLORIS_HTTP_KEEPALIVE` | `300s` | Set the maximum time to wait for a new HTTP request to appear. If this is set too low, it can cause problems with browsers and applications not expecting a small `keepalive` value. - -Some effective timeout values can be the sum of certain variables, rather than the specific expected timeout. For example, `ROUTER_SLOWLORIS_HTTP_KEEPALIVE` adjusts `timeout http-keep-alive`. It is set to `300s` by default, but HAProxy also waits on `tcp-request inspect-delay`, which is set to `5s`. In this case, the overall timeout would be `300s` plus `5s`. -| `ROUTER_SLOWLORIS_TIMEOUT` | `10s` | Length of time the transmission of an HTTP request can take. -| `RELOAD_INTERVAL` | `5s` | Allows the minimum frequency for the router to reload and accept new changes. -| `ROUTER_METRICS_HAPROXY_TIMEOUT` | `5s` | Timeout for the gathering of HAProxy metrics. - -|=== - -.A route setting custom timeout -[source,yaml] ----- -apiVersion: route.openshift.io/v1 -kind: Route -metadata: - annotations: - haproxy.router.openshift.io/timeout: 5500ms <1> -... ----- -<1> Specifies the new timeout with HAProxy supported units (`us`, `ms`, `s`, `m`, `h`, `d`). If the unit is not provided, `ms` is the default. - -[NOTE] -==== -Setting a server-side timeout value for passthrough routes too low can cause -WebSocket connections to timeout frequently on that route. -==== - -.A route that allows only one specific IP address -[source,yaml] ----- -metadata: - annotations: - haproxy.router.openshift.io/ip_whitelist: 192.168.1.10 ----- - -.A route that allows several IP addresses -[source,yaml] ----- -metadata: - annotations: - haproxy.router.openshift.io/ip_whitelist: 192.168.1.10 192.168.1.11 192.168.1.12 ----- - -.A route that allows an IP address CIDR network -[source,yaml] ----- -metadata: - annotations: - haproxy.router.openshift.io/ip_whitelist: 192.168.1.0/24 ----- - -.A route that allows both IP an address and IP address CIDR networks -[source,yaml] ----- -metadata: - annotations: - haproxy.router.openshift.io/ip_whitelist: 180.5.61.153 192.168.1.0/24 10.0.0.0/8 ----- - -.A route specifying a rewrite target -[source,yaml] ----- -apiVersion: route.openshift.io/v1 -kind: Route -metadata: - annotations: - haproxy.router.openshift.io/rewrite-target: / <1> -... ----- -<1> Sets `/` as rewrite path of the request on the backend. - -Setting the `haproxy.router.openshift.io/rewrite-target` annotation on a route specifies that the Ingress Controller should rewrite paths in HTTP requests using this route before forwarding the requests to the backend application. -The part of the request path that matches the path specified in `spec.path` is replaced with the rewrite target specified in the annotation. - -The following table provides examples of the path rewriting behavior for various combinations of `spec.path`, request path, and rewrite target. - -.rewrite-target examples: -[cols="4*", options="header"] -|=== -|Route.spec.path|Request path|Rewrite target| Forwarded request path -|/foo|/foo|/|/ -|/foo|/foo/|/|/ -|/foo|/foo/bar|/|/bar -|/foo|/foo/bar/|/|/bar/ -|/foo|/foo|/bar|/bar -|/foo|/foo/|/bar|/bar/ -|/foo|/foo/bar|/baz|/baz/bar -|/foo|/foo/bar/|/baz|/baz/bar/ -|/foo/|/foo|/|N/A (request path does not match route path) -|/foo/|/foo/|/|/ -|/foo/|/foo/bar|/|/bar -|=== diff --git a/modules/nw-router-configuring-dual-stack.adoc b/modules/nw-router-configuring-dual-stack.adoc deleted file mode 100644 index 2f4cf741dbf6..000000000000 --- a/modules/nw-router-configuring-dual-stack.adoc +++ /dev/null @@ -1,77 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/routes/route-configuration.adoc - -:_content-type: PROCEDURE -[id="nw-router-configuring-dual-stack_{context}"] -= Configuring the {product-title} Ingress Controller for dual-stack networking - -If your {product-title} cluster is configured for IPv4 and IPv6 dual-stack networking, your cluster is externally reachable by {product-title} routes. - -The Ingress Controller automatically serves services that have both IPv4 and IPv6 endpoints, but you can configure the Ingress Controller for single-stack or dual-stack services. - -.Prerequisites - -* You deployed an {product-title} cluster on bare metal. -* You installed the OpenShift CLI (`oc`). - -.Procedure - -. To have the Ingress Controller serve traffic over IPv4/IPv6 to a workload, you can create a service YAML file or modify an existing service YAML file by setting the `ipFamilies` and `ipFamilyPolicy` fields. For example: -+ -.Sample service YAML file -[source,yaml] ----- -apiVersion: v1 -kind: Service -metadata: - creationTimestamp: yyyy-mm-ddT00:00:00Z - labels: - name: - manager: kubectl-create - operation: Update - time: yyyy-mm-ddT00:00:00Z - name: - namespace: - resourceVersion: "" - selfLink: "/api/v1/namespaces//services/" - uid: -spec: - clusterIP: 172.30.0.0/16 - clusterIPs: <1> - - 172.30.0.0/16 - - - ipFamilies: <2> - - IPv4 - - IPv6 - ipFamilyPolicy: RequireDualStack <3> - ports: - - port: 8080 - protocol: TCP - targetport: 8080 - selector: - name: - sessionAffinity: None - type: ClusterIP -status: - loadbalancer: {} ----- -<1> In a dual-stack instance, there are two different `clusterIPs` provided. -<2> For a single-stack instance, enter `IPv4` or `IPv6`. For a dual-stack instance, enter both `IPv4` and `IPv6`. -<3> For a single-stack instance, enter `SingleStack`. For a dual-stack instance, enter `RequireDualStack`. -+ -These resources generate corresponding `endpoints`. The Ingress Controller now watches `endpointslices`. -+ -. To view `endpoints`, enter the following command: -+ -[source,terminal] ----- -$ oc get endpoints ----- -+ -. To view `endpointslices`, enter the following command: -+ -[source,terminal] ----- -$ oc get endpointslices ----- diff --git a/modules/nw-scaling-ingress-controller.adoc b/modules/nw-scaling-ingress-controller.adoc deleted file mode 100644 index be3ffd99fe76..000000000000 --- a/modules/nw-scaling-ingress-controller.adoc +++ /dev/null @@ -1,76 +0,0 @@ -// Module filename: nw-scaling-ingress-controller.adoc -// Module included in the following assemblies: -// * networking/ingress-controller-configuration.adoc - -:_content-type: PROCEDURE -[id="nw-ingress-controller-configuration_{context}"] -= Scaling an Ingress Controller - -Manually scale an Ingress Controller to meeting routing performance or -availability requirements such as the requirement to increase throughput. `oc` -commands are used to scale the `IngressController` resource. The following -procedure provides an example for scaling up the default `IngressController`. - -[NOTE] -==== -Scaling is not an immediate action, as it takes time to create the desired number of replicas. -==== - -.Procedure -. View the current number of available replicas for the default `IngressController`: -+ -[source,terminal] ----- -$ oc get -n openshift-ingress-operator ingresscontrollers/default -o jsonpath='{$.status.availableReplicas}' ----- -+ -.Example output -[source,terminal] ----- -2 ----- - -. Scale the default `IngressController` to the desired number of replicas using -the `oc patch` command. The following example scales the default `IngressController` -to 3 replicas: -+ -[source,terminal] ----- -$ oc patch -n openshift-ingress-operator ingresscontroller/default --patch '{"spec":{"replicas": 3}}' --type=merge ----- -+ -.Example output -[source,terminal] ----- -ingresscontroller.operator.openshift.io/default patched ----- - -. Verify that the default `IngressController` scaled to the number of replicas -that you specified: -+ -[source,terminal] ----- -$ oc get -n openshift-ingress-operator ingresscontrollers/default -o jsonpath='{$.status.availableReplicas}' ----- -+ -.Example output -[source,terminal] ----- -3 ----- -+ -[TIP] -==== -You can alternatively apply the following YAML to scale an Ingress Controller to three replicas: -[source,yaml] ----- -apiVersion: operator.openshift.io/v1 -kind: IngressController -metadata: - name: default - namespace: openshift-ingress-operator -spec: - replicas: 3 <1> ----- -==== -<1> If you need a different amount of replicas, change the `replicas` value. diff --git a/modules/nw-sctp-about.adoc b/modules/nw-sctp-about.adoc deleted file mode 100644 index 8e399516ddbd..000000000000 --- a/modules/nw-sctp-about.adoc +++ /dev/null @@ -1,75 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/using-sctp.adoc - -[id="nw-sctp-about_{context}"] -= Support for Stream Control Transmission Protocol (SCTP) on {product-title} - -As a cluster administrator, you can enable SCTP on the hosts in the cluster. -On {op-system-first}, the SCTP module is disabled by default. - -SCTP is a reliable message based protocol that runs on top of an IP network. - -When enabled, you can use SCTP as a protocol with pods, services, and network policy. -A `Service` object must be defined with the `type` parameter set to either the `ClusterIP` or `NodePort` value. - -[id="example_configurations_{context}"] -== Example configurations using SCTP protocol - -You can configure a pod or service to use SCTP by setting the `protocol` parameter to the `SCTP` value in the pod or service object. - -In the following example, a pod is configured to use SCTP: - -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - namespace: project1 - name: example-pod -spec: - containers: - - name: example-pod -... - ports: - - containerPort: 30100 - name: sctpserver - protocol: SCTP ----- - -In the following example, a service is configured to use SCTP: - -[source,yaml] ----- -apiVersion: v1 -kind: Service -metadata: - namespace: project1 - name: sctpserver -spec: -... - ports: - - name: sctpserver - protocol: SCTP - port: 30100 - targetPort: 30100 - type: ClusterIP ----- - -In the following example, a `NetworkPolicy` object is configured to apply to SCTP network traffic on port `80` from any pods with a specific label: - -[source,yaml] ----- -kind: NetworkPolicy -apiVersion: networking.k8s.io/v1 -metadata: - name: allow-sctp-on-http -spec: - podSelector: - matchLabels: - role: web - ingress: - - ports: - - protocol: SCTP - port: 80 ----- diff --git a/modules/nw-sctp-enabling.adoc b/modules/nw-sctp-enabling.adoc deleted file mode 100644 index e8ebb7cd8ced..000000000000 --- a/modules/nw-sctp-enabling.adoc +++ /dev/null @@ -1,58 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/using-sctp.adoc - -:_content-type: PROCEDURE -[id="nw-sctp-enabling_{context}"] -= Enabling Stream Control Transmission Protocol (SCTP) - -As a cluster administrator, you can load and enable the blacklisted SCTP kernel module on worker nodes in your cluster. - -.Prerequisites - -* Install the OpenShift CLI (`oc`). -* Access to the cluster as a user with the `cluster-admin` role. - -.Procedure - -. Create a file named `load-sctp-module.yaml` that contains the following YAML definition: -+ -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfig -metadata: - name: load-sctp-module - labels: - machineconfiguration.openshift.io/role: worker -spec: - config: - ignition: - version: 3.2.0 - storage: - files: - - path: /etc/modprobe.d/sctp-blacklist.conf - mode: 0644 - overwrite: true - contents: - source: data:, - - path: /etc/modules-load.d/sctp-load.conf - mode: 0644 - overwrite: true - contents: - source: data:,sctp ----- - -. To create the `MachineConfig` object, enter the following command: -+ -[source,terminal] ----- -$ oc create -f load-sctp-module.yaml ----- - -. Optional: To watch the status of the nodes while the MachineConfig Operator applies the configuration change, enter the following command. When the status of a node transitions to `Ready`, the configuration update is applied. -+ -[source,terminal] ----- -$ oc get nodes ----- diff --git a/modules/nw-sctp-verifying.adoc b/modules/nw-sctp-verifying.adoc deleted file mode 100644 index 7b48290232b6..000000000000 --- a/modules/nw-sctp-verifying.adoc +++ /dev/null @@ -1,154 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/using-sctp.adoc - -:image: registry.access.redhat.com/ubi9/ubi - -ifdef::openshift-origin[] -:image: fedora:31 -endif::[] - -:_content-type: PROCEDURE -[id="nw-sctp-verifying_{context}"] -= Verifying Stream Control Transmission Protocol (SCTP) is enabled - -You can verify that SCTP is working on a cluster by creating a pod with an application that listens for SCTP traffic, associating it with a service, and then connecting to the exposed service. - -.Prerequisites - -* Access to the internet from the cluster to install the `nc` package. -* Install the OpenShift CLI (`oc`). -* Access to the cluster as a user with the `cluster-admin` role. - -.Procedure - -. Create a pod starts an SCTP listener: - -.. Create a file named `sctp-server.yaml` that defines a pod with the following YAML: -+ -[source,yaml,subs="attributes+"] ----- -apiVersion: v1 -kind: Pod -metadata: - name: sctpserver - labels: - app: sctpserver -spec: - containers: - - name: sctpserver - image: {image} - command: ["/bin/sh", "-c"] - args: - ["dnf install -y nc && sleep inf"] - ports: - - containerPort: 30102 - name: sctpserver - protocol: SCTP ----- - -.. Create the pod by entering the following command: -+ -[source,terminal] ----- -$ oc create -f sctp-server.yaml ----- - -. Create a service for the SCTP listener pod. - -.. Create a file named `sctp-service.yaml` that defines a service with the following YAML: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Service -metadata: - name: sctpservice - labels: - app: sctpserver -spec: - type: NodePort - selector: - app: sctpserver - ports: - - name: sctpserver - protocol: SCTP - port: 30102 - targetPort: 30102 ----- - -.. To create the service, enter the following command: -+ -[source,terminal] ----- -$ oc create -f sctp-service.yaml ----- - -. Create a pod for the SCTP client. - -.. Create a file named `sctp-client.yaml` with the following YAML: -+ -[source,yaml,subs="attributes+"] ----- -apiVersion: v1 -kind: Pod -metadata: - name: sctpclient - labels: - app: sctpclient -spec: - containers: - - name: sctpclient - image: {image} - command: ["/bin/sh", "-c"] - args: - ["dnf install -y nc && sleep inf"] ----- - -.. To create the `Pod` object, enter the following command: -+ -[source,terminal] ----- -$ oc apply -f sctp-client.yaml ----- - -. Run an SCTP listener on the server. - -.. To connect to the server pod, enter the following command: -+ -[source,terminal] ----- -$ oc rsh sctpserver ----- - -.. To start the SCTP listener, enter the following command: -+ -[source,terminal] ----- -$ nc -l 30102 --sctp ----- - -. Connect to the SCTP listener on the server. - -.. Open a new terminal window or tab in your terminal program. - -.. Obtain the IP address of the `sctpservice` service. Enter the following command: -+ -[source,terminal] ----- -$ oc get services sctpservice -o go-template='{{.spec.clusterIP}}{{"\n"}}' ----- - -.. To connect to the client pod, enter the following command: -+ -[source,terminal] ----- -$ oc rsh sctpclient ----- - -.. To start the SCTP client, enter the following command. Replace `` with the cluster IP address of the `sctpservice` service. -+ -[source,terminal] ----- -# nc 30102 --sctp ----- diff --git a/modules/nw-service-externalip-create.adoc b/modules/nw-service-externalip-create.adoc deleted file mode 100644 index a742680278d4..000000000000 --- a/modules/nw-service-externalip-create.adoc +++ /dev/null @@ -1,75 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/configuring_ingress_cluster_traffic/configuring-ingress-cluster-traffic-service-external-ip.adoc - -:_content-type: PROCEDURE -[id="nw-service-externalip-create_{context}"] -= Attaching an ExternalIP to a service - -You can attach an ExternalIP to a service. If your cluster is configured to allocate an ExternalIP automatically, you might not need to manually attach an ExternalIP to the service. - -.Procedure - -. Optional: To confirm what IP address ranges are configured for use with ExternalIP, enter the following command: -+ -[source,terminal] ----- -$ oc get networks.config cluster -o jsonpath='{.spec.externalIP}{"\n"}' ----- -+ -If `autoAssignCIDRs` is set, {product-title} automatically assigns an ExternalIP to a new `Service` object if the `spec.externalIPs` field is not specified. - -. Attach an ExternalIP to the service. - -.. If you are creating a new service, specify the `spec.externalIPs` field and provide an array of one or more valid IP addresses. For example: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Service -metadata: - name: svc-with-externalip -spec: - ... - externalIPs: - - 192.174.120.10 ----- - -.. If you are attaching an ExternalIP to an existing service, enter the following command. Replace `` with the service name. Replace `` with a valid ExternalIP address. You can provide multiple IP addresses separated by commas. -+ -[source,terminal] ----- -$ oc patch svc -p \ - '{ - "spec": { - "externalIPs": [ "" ] - } - }' ----- -+ -For example: -+ -[source,terminal] ----- -$ oc patch svc mysql-55-rhel7 -p '{"spec":{"externalIPs":["192.174.120.10"]}}' ----- -+ -.Example output -[source,terminal] ----- -"mysql-55-rhel7" patched ----- - -. To confirm that an ExternalIP address is attached to the service, enter the following command. If you specified an ExternalIP for a new service, you must create the service first. -+ -[source,terminal] ----- -$ oc get svc ----- -+ -.Example output -[source,terminal] ----- -NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE -mysql-55-rhel7 172.30.131.89 192.174.120.10 3306/TCP 13m ----- diff --git a/modules/nw-sriov-add-pod-runtimeconfig.adoc b/modules/nw-sriov-add-pod-runtimeconfig.adoc deleted file mode 100644 index e470d091654c..000000000000 --- a/modules/nw-sriov-add-pod-runtimeconfig.adoc +++ /dev/null @@ -1,103 +0,0 @@ -// Module included in the following assemblies: -// -// * virt/node_network/virt-configuring-sr-iov-network.adoc -// * virt/virtual_machines/vm_networking/virt-defining-an-sriov-network.adoc - -// Deprecating in OCP; This is identical in practice to adding a pod -// to an additional network. - -[id="nw-sriov-add-pod-runtimeconfig_{context}"] -= Configuring static MAC and IP addresses on additional SR-IOV networks - -You can configure static MAC and IP addresses on an SR-IOV network by specifying Container Network Interface (CNI) `runtimeConfig` data in a pod annotation. - -.Prerequisites - -* Install the OpenShift CLI (`oc`). -* Log in as a user with `cluster-admin` privileges when creating the `SriovNetwork` object. - -.Procedure - -. Create the following `SriovNetwork` object, and then save the YAML in the `-sriov-network.yaml` file. Replace `` with a name for this additional network. -+ -[source,yaml] ----- -apiVersion: sriovnetwork.openshift.io/v1 -kind: SriovNetwork -metadata: - name: <1> - namespace: openshift-sriov-network-operator <2> -spec: - networkNamespace: <3> - ipam: '{ "type": "static" }' <4> - capabilities: '{ "mac": true, "ips": true }' <5> - resourceName: <6> ----- -<1> Replace `` with a name for the object. The SR-IOV Network Operator creates a `NetworkAttachmentDefinition` object with same name. -<2> Specify the namespace where the SR-IOV Network Operator is installed. -<3> Replace `` with the namespace where the `NetworkAttachmentDefinition` object is created. -<4> Specify static type for the ipam CNI plugin as a YAML block scalar. -<5> Specify `mac` and `ips` `capabilities` to `true`. -<6> Replace `` with the value for the `spec.resourceName` parameter from the `SriovNetworkNodePolicy` object that defines the SR-IOV hardware for this additional network. - -. Create the object by running the following command: -+ -[source,terminal] ----- -$ oc create -f <1> ----- -<1> Replace `` with the name of the file you created in the previous step. - -. Optional: Confirm that the NetworkAttachmentDefinition CR associated with the `SriovNetwork` object that you created in the previous step exists by running the following command. Replace `` with the namespace you specified in the `SriovNetwork` object. -+ -[source,terminal] ----- -$ oc get net-attach-def -n ----- - -[NOTE] -===== -Do not modify or delete a `SriovNetwork` custom resource (CR) if it is attached to any pods in the `running` state. -===== - -. Create the following SR-IOV pod spec, and then save the YAML in the `-sriov-pod.yaml` file. Replace `` with a name for this pod. -+ -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: sample-pod - annotations: - k8s.v1.cni.cncf.io/networks: '[ - { - "name": "", <1> - "mac": "20:04:0f:f1:88:01", <2> - "ips": ["192.168.10.1/24", "2001::1/64"] <3> - } -]' -spec: - containers: - - name: sample-container - image: - imagePullPolicy: IfNotPresent - command: ["sleep", "infinity"] ----- -<1> Specify the name of the SR-IOV network attachment definition CR. -<2> Specify the MAC address for the SR-IOV device that is allocated from the resource type defined in the SR-IOV network attachment definition CR. -<3> Specify addresses for the SR-IOV device which is allocated from the resource type defined in the SR-IOV network attachment definition CR. Both IPv4 and IPv6 addresses are supported. - -. Create the sample SR-IOV pod by running the following command: -+ -[source,terminal] ----- -$ oc create -f <1> ----- -<1> Replace `` with the name of the file you created in the previous step. - -. Optional: Confirm that `mac` and `ips` addresses are applied to the SR-IOV device by running the following command. Replace `` with the namespace you specified in the `SriovNetwork` object. -+ -[source,terminal] ----- -$ oc exec sample-pod -n -- ip addr show ----- diff --git a/modules/nw-sriov-app-netutil.adoc b/modules/nw-sriov-app-netutil.adoc deleted file mode 100644 index 5b7d29069f0e..000000000000 --- a/modules/nw-sriov-app-netutil.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/hardware_networks/about-sriov.adoc - -[id="nw-sriov-app-netutil_{context}"] -= DPDK library for use with container applications - -An link:https://github.com/openshift/app-netutil[optional library], `app-netutil`, provides several API methods for gathering network information about a pod from within a container running within that pod. - -This library can assist with integrating SR-IOV virtual functions (VFs) in Data Plane Development Kit (DPDK) mode into the container. -The library provides both a Golang API and a C API. - -Currently there are three API methods implemented: - -`GetCPUInfo()`:: This function determines which CPUs are available to the container and returns the list. - -`GetHugepages()`:: This function determines the amount of huge page memory requested in the `Pod` spec for each container and returns the values. - -`GetInterfaces()`:: This function determines the set of interfaces in the container and returns the list. The return value includes the interface type and type-specific data for each interface. - -The repository for the library includes a sample Dockerfile to build a container image, `dpdk-app-centos`. The container image can run one of the following DPDK sample applications, depending on an environment variable in the pod specification: `l2fwd`, `l3wd` or `testpmd`. The container image provides an example of integrating the `app-netutil` library into the container image itself. The library can also integrate into an init container. The init container can collect the required data and pass the data to an existing DPDK workload. diff --git a/modules/nw-sriov-cfg-bond-interface-with-virtual-functions.adoc b/modules/nw-sriov-cfg-bond-interface-with-virtual-functions.adoc deleted file mode 100644 index a029417056b6..000000000000 --- a/modules/nw-sriov-cfg-bond-interface-with-virtual-functions.adoc +++ /dev/null @@ -1,142 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/hardware_networks/configuring-sriov-operator.adoc -:_content-type: PROCEDURE -[id="nw-sriov-cfg-bond-interface-with-virtual-functions_{context}"] -= Configuring a bond interface from two SR-IOV interfaces - -Bonding enables multiple network interfaces to be aggregated into a single logical "bonded" interface. Bond Container Network Interface (Bond-CNI) brings bond capability into containers. - -Bond-CNI can be created using Single Root I/O Virtualization (SR-IOV) virtual functions and placing them in the container network namespace. - -{product-title} only supports Bond-CNI using SR-IOV virtual functions. The SR-IOV Network Operator provides the SR-IOV CNI plugin needed to manage the virtual functions. Other CNIs or types of interfaces are not supported. - -.Prerequisites - -* The SR-IOV Network Operator must be installed and configured to obtain virtual functions in a container. -* To configure SR-IOV interfaces, an SR-IOV network and policy must be created for each interface. -* The SR-IOV Network Operator creates a network attachment definition for each SR-IOV interface, based on the SR-IOV network and policy defined. -* The `linkState` is set to the default value `auto` for the SR-IOV virtual function. - -[id="nw-sriov-cfg-creating-bond-network-attachment-definition_{context}"] -== Creating a bond network attachment definition - -Now that the SR-IOV virtual functions are available, you can create a bond network attachment definition. - -[source,yaml] ----- -apiVersion: "k8s.cni.cncf.io/v1" - kind: NetworkAttachmentDefinition - metadata: - name: bond-net1 - namespace: demo - spec: - config: '{ - "type": "bond", <1> - "cniVersion": "0.3.1", - "name": "bond-net1", - "mode": "active-backup", <2> - "failOverMac": 1, <3> - "linksInContainer": true, <4> - "miimon": "100", - "mtu": 1500, - "links": [ <5> - {"name": "net1"}, - {"name": "net2"} - ], - "ipam": { - "type": "host-local", - "subnet": "10.56.217.0/24", - "routes": [{ - "dst": "0.0.0.0/0" - }], - "gateway": "10.56.217.1" - } - }' ----- -<1> The cni-type is always set to `bond`. -<2> The `mode` attribute specifies the bonding mode. -+ -[NOTE] -==== -The bonding modes supported are: - -* `balance-rr` - 0 -* `active-backup` - 1 -* `balance-xor` - 2 - -For `balance-rr` or `balance-xor` modes, you must set the `trust` mode to `on` for the SR-IOV virtual function. -==== -<3> The `failover` attribute is mandatory for active-backup mode and must be set to 1. -<4> The `linksInContainer=true` flag informs the Bond CNI that the required interfaces are to be found inside the container. By default, Bond CNI looks for these interfaces on the host which does not work for integration with SRIOV and Multus. -<5> The `links` section defines which interfaces will be used to create the bond. By default, Multus names the attached interfaces as: "net", plus a consecutive number, starting with one. - -[id="nw-sriov-cfg-creating-pod-using-interface_{context}"] -== Creating a pod using a bond interface - -. Test the setup by creating a pod with a YAML file named for example `podbonding.yaml` with content similar to the following: -+ -[source,yaml] ----- -apiVersion: v1 - kind: Pod - metadata: - name: bondpod1 - namespace: demo - annotations: - k8s.v1.cni.cncf.io/networks: demo/sriovnet1, demo/sriovnet2, demo/bond-net1 <1> - spec: - containers: - - name: podexample - image: quay.io/openshift/origin-network-interface-bond-cni:4.11.0 - command: ["/bin/bash", "-c", "sleep INF"] ----- -<1> Note the network annotation: it contains two SR-IOV network attachments, and one bond network attachment. The bond attachment uses the two SR-IOV interfaces as bonded port interfaces. - -. Apply the yaml by running the following command: -+ -[source,terminal] ----- -$ oc apply -f podbonding.yaml ----- - -. Inspect the pod interfaces with the following command: -+ -[source,yaml] ----- -$ oc rsh -n demo bondpod1 -sh-4.4# -sh-4.4# ip a -1: lo: mtu 65536 qdisc noqueue state UNKNOWN qlen 1000 -link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 -inet 127.0.0.1/8 scope host lo -valid_lft forever preferred_lft forever -3: eth0@if150: mtu 1450 qdisc noqueue state UP -link/ether 62:b1:b5:c8:fb:7a brd ff:ff:ff:ff:ff:ff -inet 10.244.1.122/24 brd 10.244.1.255 scope global eth0 -valid_lft forever preferred_lft forever -4: net3: mtu 1500 qdisc noqueue state UP qlen 1000 -link/ether 9e:23:69:42:fb:8a brd ff:ff:ff:ff:ff:ff <1> -inet 10.56.217.66/24 scope global bond0 -valid_lft forever preferred_lft forever -43: net1: mtu 1500 qdisc mq master bond0 state UP qlen 1000 -link/ether 9e:23:69:42:fb:8a brd ff:ff:ff:ff:ff:ff <2> -44: net2: mtu 1500 qdisc mq master bond0 state UP qlen 1000 -link/ether 9e:23:69:42:fb:8a brd ff:ff:ff:ff:ff:ff <3> ----- -<1> The bond interface is automatically named `net3`. To set a specific interface name add `@name` suffix to the pod’s `k8s.v1.cni.cncf.io/networks` annotation. -<2> The `net1` interface is based on an SR-IOV virtual function. -<3> The `net2` interface is based on an SR-IOV virtual function. -+ -[NOTE] -==== -If no interface names are configured in the pod annotation, interface names are assigned automatically as `net`, with `` starting at `1`. -==== - -. Optional: If you want to set a specific interface name for example `bond0`, edit the `k8s.v1.cni.cncf.io/networks` annotation and set `bond0` as the interface name as follows: -+ -[source,terminal] ----- -annotations: - k8s.v1.cni.cncf.io/networks: demo/sriovnet1, demo/sriovnet2, demo/bond-net1@bond0 ----- \ No newline at end of file diff --git a/modules/nw-sriov-concept-dpdk-line-rate.adoc b/modules/nw-sriov-concept-dpdk-line-rate.adoc deleted file mode 100644 index a6698c796aeb..000000000000 --- a/modules/nw-sriov-concept-dpdk-line-rate.adoc +++ /dev/null @@ -1,31 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/hardware_networks/using-dpdk-and-rdma.adoc - -:_content-type: CONCEPT -[id="nw-sriov-example-dpdk-line-rate_{context}"] -= Overview of achieving a specific DPDK line rate - -To achieve a specific Data Plane Development Kit (DPDK) line rate, deploy a Node Tuning Operator and configure Single Root I/O Virtualization (SR-IOV). You must also tune the DPDK settings for the following resources: - -- Isolated CPUs -- Hugepages -- The topology scheduler - -[NOTE] -==== -In previous versions of {product-title}, the Performance Addon Operator was used to implement automatic tuning to achieve low latency performance for {product-title} applications. In {product-title} 4.11 and later, this functionality is part of the Node Tuning Operator. -==== - -.DPDK test environment -The following diagram shows the components of a traffic-testing environment: - -image::261_OpenShift_DPDK_0722.png[DPDK test environment] - -- **Traffic generator**: An application that can generate high-volume packet traffic. -- **SR-IOV-supporting NIC**: A network interface card compatible with SR-IOV. The card runs a number of virtual functions on a physical interface. -- **Physical Function (PF)**: A PCI Express (PCIe) function of a network adapter that supports the SR-IOV interface. -- **Virtual Function (VF)**: A lightweight PCIe function on a network adapter that supports SR-IOV. The VF is associated with the PCIe PF on the network adapter. The VF represents a virtualized instance of the network adapter. -- **Switch**: A network switch. Nodes can also be connected back-to-back. -- **`testpmd`**: An example application included with DPDK. The `testpmd` application can be used to test the DPDK in a packet-forwarding mode. The `testpmd` application is also an example of how to build a fully-fledged application using the DPDK Software Development Kit (SDK). -- **worker 0** and **worker 1**: {product-title} nodes. diff --git a/modules/nw-sriov-configure-exclude-topology-manager.adoc b/modules/nw-sriov-configure-exclude-topology-manager.adoc deleted file mode 100644 index 28a9bcb5b6b2..000000000000 --- a/modules/nw-sriov-configure-exclude-topology-manager.adoc +++ /dev/null @@ -1,216 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/hardware_networks/configuring-sriov-device.adoc - -:_content-type: PROCEDURE -[id="nw-sriov-configure-exclude-topology-manager_{context}"] -= Excluding the SR-IOV network topology for NUMA-aware scheduling - -To exclude advertising the SR-IOV network resource's Non-Uniform Memory Access (NUMA) node to the Topology Manager, you can configure the `excludeTopology` specification in the `SriovNetworkNodePolicy` custom resource. Use this configuration for more flexible SR-IOV network deployments during NUMA-aware pod scheduling. - -.Prerequisites - -* You have installed the OpenShift CLI (`oc`). -* You have configured the CPU Manager policy to `static`. For more information about CPU Manager, see the _Additional resources_ section. -* You have configured the Topology Manager policy to `single-numa-node`. -* You have installed the SR-IOV Network Operator. - -.Procedure - -. Create the `SriovNetworkNodePolicy` CR: - -.. Save the following YAML in the `sriov-network-node-policy.yaml` file, replacing values in the YAML to match your environment: -+ -[source,yaml] ----- -apiVersion: sriovnetwork.openshift.io/v1 -kind: SriovNetworkNodePolicy -metadata: - name: - namespace: openshift-sriov-network-operator -spec: - resourceName: sriovnuma0 <1> - nodeSelector: - kubernetes.io/hostname: - numVfs: - nicSelector: <2> - vendor: "" - deviceID: "" - deviceType: netdevice - excludeTopology: true <3> ----- -<1> The resource name of the SR-IOV network device plugin. This YAML uses a sample `resourceName` value. -<2> Identify the device for the Operator to configure by using the NIC selector. -<3> To exclude advertising the NUMA node for the SR-IOV network resource to the Topology Manager, set the value to `true`. The default value is `false`. -+ -[NOTE] -==== -If multiple `SriovNetworkNodePolicy` resources target the same SR-IOV network resource, the `SriovNetworkNodePolicy` resources must have the same value as the `excludeTopology` specification. Otherwise, the conflicting policy is rejected. -==== - -.. Create the `SriovNetworkNodePolicy` resource by running the following command: -+ -[source,terminal] ----- -$ oc create -f sriov-network-node-policy.yaml ----- -+ -.Example output -[source,terminal] ----- -sriovnetworknodepolicy.sriovnetwork.openshift.io/policy-for-numa-0 created ----- - -. Create the `SriovNetwork` CR: - -.. Save the following YAML in the `sriov-network.yaml` file, replacing values in the YAML to match your environment: -+ -[source,yaml] ----- -apiVersion: sriovnetwork.openshift.io/v1 -kind: SriovNetwork -metadata: - name: sriov-numa-0-network <1> - namespace: openshift-sriov-network-operator -spec: - resourceName: sriovnuma0 <2> - networkNamespace: <3> - ipam: |- <4> - { - "type": "", - } ----- -<1> Replace `sriov-numa-0-network` with the name for the SR-IOV network resource. -<2> Specify the resource name for the `SriovNetworkNodePolicy` CR from the previous step. This YAML uses a sample `resourceName` value. -<3> Enter the namespace for your SR-IOV network resource. -<4> Enter the IP address management configuration for the SR-IOV network. - -.. Create the `SriovNetwork` resource by running the following command: -+ -[source,terminal] ----- -$ oc create -f sriov-network.yaml ----- -+ -.Example output -[source,terminal] ----- -sriovnetwork.sriovnetwork.openshift.io/sriov-numa-0-network created ----- - -. Create a pod and assign the SR-IOV network resource from the previous step: - -.. Save the following YAML in the `sriov-network-pod.yaml` file, replacing values in the YAML to match your environment: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: - annotations: - k8s.v1.cni.cncf.io/networks: |- - [ - { - "name": "sriov-numa-0-network", <1> - } - ] -spec: - containers: - - name: - image: - imagePullPolicy: IfNotPresent - command: ["sleep", "infinity"] ----- -<1> This is the name of the `SriovNetwork` resource that uses the `SriovNetworkNodePolicy` resource. - -.. Create the `Pod` resource by running the following command: -+ -[source,terminal] ----- -$ oc create -f sriov-network-pod.yaml ----- -+ -.Example output -[source,terminal] ----- -pod/example-pod created ----- - -.Verification - -. Verify the status of the pod by running the following command, replacing `` with the name of the pod: -+ -[source,terminal] ----- -$ oc get pod ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -test-deployment-sriov-76cbbf4756-k9v72 1/1 Running 0 45h ----- - -. Open a debug session with the target pod to verify that the SR-IOV network resources are deployed to a different node than the memory and CPU resources. - -.. Open a debug session with the pod by running the follow command, replacing with the target pod name. -+ -[source,terminal] ----- -$ oc debug pod/ ----- - -.. Set `/host` as the root directory within the debug shell. The debug pod mounts the root file system from the host in `/host` within the pod. By changing the root directory to `/host`, you can run binaries from the host file system: -+ -[source,terminal] ----- -$ chroot /host ----- - -.. View information about the CPU allocation by running the following commands: -+ -[source,terminal] ----- -$ lscpu | grep NUMA ----- -+ -.Example output -[source,terminal] ----- -NUMA node(s): 2 -NUMA node0 CPU(s): 0,2,4,6,8,10,12,14,16,18,... -NUMA node1 CPU(s): 1,3,5,7,9,11,13,15,17,19,... ----- -+ -[source,terminal] ----- -$ cat /proc/self/status | grep Cpus ----- -+ -.Example output -[source,terminal] ----- -Cpus_allowed: aa -Cpus_allowed_list: 1,3,5,7 ----- -+ -[source,terminal] ----- -$ cat /sys/class/net/net1/device/numa_node ----- -+ -.Example output -[source,terminal] ----- -0 ----- -+ -In this example, CPUs 1,3,5, and 7 are allocated to `NUMA node1` but the SR-IOV network resource can use the NIC in `NUMA node0`. - -[NOTE] -==== -If the `excludeTopology` specification is set to `True`, it is possible that the required resources exist in the same NUMA node. -==== - diff --git a/modules/nw-sriov-configuring-device.adoc b/modules/nw-sriov-configuring-device.adoc deleted file mode 100644 index 1d88803c2485..000000000000 --- a/modules/nw-sriov-configuring-device.adoc +++ /dev/null @@ -1,107 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/hardware_networks/configuring-sriov-device.adoc -// * virt/vm_networking/virt-attaching-vm-to-sriov-network.adoc - -ifeval::["{context}" == "configuring-sriov-device"] -:ocp-sriov: -endif::[] - -ifeval::["{context}" == "virt-attaching-vm-to-sriov-network"] -:virt-sriov: -endif::[] - -:_content-type: PROCEDURE -[id="nw-sriov-configuring-device_{context}"] -= Configuring SR-IOV network devices - -The SR-IOV Network Operator adds the `SriovNetworkNodePolicy.sriovnetwork.openshift.io` CustomResourceDefinition to {product-title}. -You can configure an SR-IOV network device by creating a SriovNetworkNodePolicy custom resource (CR). - -[NOTE] -===== -When applying the configuration specified in a `SriovNetworkNodePolicy` object, the SR-IOV Operator might drain the nodes, and in some cases, reboot nodes. - -It might take several minutes for a configuration change to apply. -===== - -.Prerequisites - -* You installed the OpenShift CLI (`oc`). -* You have access to the cluster as a user with the `cluster-admin` role. -* You have installed the SR-IOV Network Operator. -* You have enough available nodes in your cluster to handle the evicted workload from drained nodes. -* You have not selected any control plane nodes for SR-IOV network device configuration. - -.Procedure - -. Create an `SriovNetworkNodePolicy` object, and then save the YAML in the `-sriov-node-network.yaml` file. Replace `` with the name for this configuration. -ifdef::virt-sriov[] -+ -[source,yaml] ----- -apiVersion: sriovnetwork.openshift.io/v1 -kind: SriovNetworkNodePolicy -metadata: - name: <1> - namespace: openshift-sriov-network-operator <2> -spec: - resourceName: <3> - nodeSelector: - feature.node.kubernetes.io/network-sriov.capable: "true" <4> - priority: <5> - mtu: <6> - numVfs: <7> - nicSelector: <8> - vendor: "" <9> - deviceID: "" <10> - pfNames: ["", ...] <11> - rootDevices: ["", "..."] <12> - deviceType: vfio-pci <13> - isRdma: false <14> ----- -<1> Specify a name for the CR object. -<2> Specify the namespace where the SR-IOV Operator is installed. -<3> Specify the resource name of the SR-IOV device plugin. You can create multiple `SriovNetworkNodePolicy` objects for a resource name. -<4> Specify the node selector to select which nodes are configured. -Only SR-IOV network devices on selected nodes are configured. The SR-IOV -Container Network Interface (CNI) plugin and device plugin are deployed only on selected nodes. -<5> Optional: Specify an integer value between `0` and `99`. A smaller number gets higher priority, so a priority of `10` is higher than a priority of `99`. The default value is `99`. -<6> Optional: Specify a value for the maximum transmission unit (MTU) of the virtual function. The maximum MTU value can vary for different NIC models. -<7> Specify the number of the virtual functions (VF) to create for the SR-IOV physical network device. For an Intel network interface controller (NIC), the number of VFs cannot be larger than the total VFs supported by the device. For a Mellanox NIC, the number of VFs cannot be larger than `128`. -<8> The `nicSelector` mapping selects the Ethernet device for the Operator to configure. You do not need to specify values for all the parameters. It is recommended to identify the Ethernet adapter with enough precision to minimize the possibility of selecting an Ethernet device unintentionally. -If you specify `rootDevices`, you must also specify a value for `vendor`, `deviceID`, or `pfNames`. -If you specify both `pfNames` and `rootDevices` at the same time, ensure that they point to an identical device. -<9> Optional: Specify the vendor hex code of the SR-IOV network device. The only allowed values are either `8086` or `15b3`. -<10> Optional: Specify the device hex code of SR-IOV network device. The only allowed values are `158b`, `1015`, `1017`. -<11> Optional: The parameter accepts an array of one or more physical function (PF) names for the Ethernet device. -<12> The parameter accepts an array of one or more PCI bus addresses for the physical function of the Ethernet device. Provide the address in the following format: `0000:02:00.1`. -<13> The `vfio-pci` driver type is required for virtual functions in {VirtProductName}. -<14> Optional: Specify whether to enable remote direct memory access (RDMA) mode. For a Mellanox card, set `isRdma` to `false`. The default value is `false`. -+ -[NOTE] -==== -If `isRDMA` flag is set to `true`, you can continue to use the RDMA enabled VF as a normal network device. -A device can be used in either mode. -==== -endif::virt-sriov[] - -. Optional: Label the SR-IOV capable cluster nodes with `SriovNetworkNodePolicy.Spec.NodeSelector` if they are not already labeled. For more information about labeling nodes, see "Understanding how to update labels on nodes". - -. Create the `SriovNetworkNodePolicy` object: -+ -[source,terminal] ----- -$ oc create -f -sriov-node-network.yaml ----- -+ -where `` specifies the name for this configuration. -+ -After applying the configuration update, all the pods in `sriov-network-operator` namespace transition to the `Running` status. - -. To verify that the SR-IOV network device is configured, enter the following command. Replace `` with the name of a node with the SR-IOV network device that you just configured. -+ -[source,terminal] ----- -$ oc get sriovnetworknodestates -n openshift-sriov-network-operator -o jsonpath='{.status.syncStatus}' ----- diff --git a/modules/nw-sriov-configuring-operator.adoc b/modules/nw-sriov-configuring-operator.adoc deleted file mode 100644 index a2a886c010dc..000000000000 --- a/modules/nw-sriov-configuring-operator.adoc +++ /dev/null @@ -1,308 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/hardware_networks/configuring-sriov-operator.adoc - -:_content-type: PROCEDURE -[id="nw-sriov-configuring-operator_{context}"] -= Configuring the SR-IOV Network Operator - -[IMPORTANT] -==== -Modifying the SR-IOV Network Operator configuration is not normally necessary. -The default configuration is recommended for most use cases. -Complete the steps to modify the relevant configuration only if the default behavior of the Operator is not compatible with your use case. -==== - -The SR-IOV Network Operator adds the `SriovOperatorConfig.sriovnetwork.openshift.io` CustomResourceDefinition resource. -The Operator automatically creates a SriovOperatorConfig custom resource (CR) named `default` in the `openshift-sriov-network-operator` namespace. - -[NOTE] -===== -The `default` CR contains the SR-IOV Network Operator configuration for your cluster. -To change the Operator configuration, you must modify this CR. -===== - -[id="nw-sriov-operator-cr_{context}"] -== SR-IOV Network Operator config custom resource - -The fields for the `sriovoperatorconfig` custom resource are described in the following table: - -.SR-IOV Network Operator config custom resource -[cols=".^2,.^2,.^6a",options="header"] -|==== -|Field|Type|Description - -|`metadata.name` -|`string` -|Specifies the name of the SR-IOV Network Operator instance. -The default value is `default`. -Do not set a different value. - -|`metadata.namespace` -|`string` -|Specifies the namespace of the SR-IOV Network Operator instance. -The default value is `openshift-sriov-network-operator`. -Do not set a different value. - -|`spec.configDaemonNodeSelector` -|`string` -|Specifies the node selection to control scheduling the SR-IOV Network Config Daemon on selected nodes. -By default, this field is not set and the Operator deploys the SR-IOV Network Config daemon set on worker nodes. - -|`spec.disableDrain` -|`boolean` -|Specifies whether to disable the node draining process or enable the node draining process when you apply a new policy to configure the NIC on a node. -Setting this field to `true` facilitates software development and installing {product-title} on a single node. By default, this field is not set. - -For single-node clusters, set this field to `true` after installing the Operator. This field must remain set to `true`. - -|`spec.enableInjector` -|`boolean` -|Specifies whether to enable or disable the Network Resources Injector daemon set. -By default, this field is set to `true`. - -|`spec.enableOperatorWebhook` -|`boolean` -|Specifies whether to enable or disable the Operator Admission Controller webhook daemon set. -By default, this field is set to `true`. - -|`spec.logLevel` -|`integer` -|Specifies the log verbosity level of the Operator. -Set to `0` to show only the basic logs. Set to `2` to show all the available logs. -By default, this field is set to `2`. - -|==== - -[id="about-network-resource-injector_{context}"] -== About the Network Resources Injector - -The Network Resources Injector is a Kubernetes Dynamic Admission Controller -application. It provides the following capabilities: - -* Mutation of resource requests and limits in a pod specification to add an SR-IOV resource name according to an SR-IOV network attachment definition annotation. -* Mutation of a pod specification with a Downward API volume to expose pod annotations, labels, and huge pages requests and limits. Containers that run in the pod can access the exposed information as files under the `/etc/podnetinfo` path. - -By default, the Network Resources Injector is enabled by the SR-IOV Network Operator and runs as a daemon set on all control plane nodes. The following is an example of Network Resources Injector pods running in a cluster with three control plane nodes: - -[source,terminal] ----- -$ oc get pods -n openshift-sriov-network-operator ----- - -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -network-resources-injector-5cz5p 1/1 Running 0 10m -network-resources-injector-dwqpx 1/1 Running 0 10m -network-resources-injector-lktz5 1/1 Running 0 10m ----- - -[id="about-sr-iov-operator-admission-control-webhook_{context}"] -== About the SR-IOV Network Operator admission controller webhook - -The SR-IOV Network Operator Admission Controller webhook is a Kubernetes Dynamic -Admission Controller application. It provides the following capabilities: - -* Validation of the `SriovNetworkNodePolicy` CR when it is created or updated. -* Mutation of the `SriovNetworkNodePolicy` CR by setting the default value for the `priority` and `deviceType` fields when the CR is created or updated. - -By default the SR-IOV Network Operator Admission Controller webhook is enabled by the Operator and runs as a daemon set on all control plane nodes. - -NOTE: Use caution when disabling the SR-IOV Network Operator Admission Controller webhook. You can disable the webhook under specific circumstances, such as troubleshooting, or if you want to use unsupported devices. - - -The following is an example of the Operator Admission Controller webhook pods running in a cluster with three control plane nodes: - -[source,terminal] ----- -$ oc get pods -n openshift-sriov-network-operator ----- - -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -operator-webhook-9jkw6 1/1 Running 0 16m -operator-webhook-kbr5p 1/1 Running 0 16m -operator-webhook-rpfrl 1/1 Running 0 16m ----- - -[id="about-custom-node-selectors_{context}"] -== About custom node selectors - -The SR-IOV Network Config daemon discovers and configures the SR-IOV network devices on cluster nodes. -By default, it is deployed to all the `worker` nodes in the cluster. -You can use node labels to specify on which nodes the SR-IOV Network Config daemon runs. - -[id="disable-enable-network-resource-injector_{context}"] -== Disabling or enabling the Network Resources Injector - -To disable or enable the Network Resources Injector, which is enabled by default, complete the following procedure. - -.Prerequisites - -* Install the OpenShift CLI (`oc`). -* Log in as a user with `cluster-admin` privileges. -* You must have installed the SR-IOV Network Operator. - -.Procedure - -- Set the `enableInjector` field. Replace `` with `false` to disable the feature or `true` to enable the feature. -+ -[source,terminal] ----- -$ oc patch sriovoperatorconfig default \ - --type=merge -n openshift-sriov-network-operator \ - --patch '{ "spec": { "enableInjector": } }' ----- -+ -[TIP] -==== -You can alternatively apply the following YAML to update the Operator: - -[source,yaml] ----- -apiVersion: sriovnetwork.openshift.io/v1 -kind: SriovOperatorConfig -metadata: - name: default - namespace: openshift-sriov-network-operator -spec: - enableInjector: ----- -==== - -[id="disable-enable-sr-iov-operator-admission-control-webhook_{context}"] -== Disabling or enabling the SR-IOV Network Operator admission controller webhook - -To disable or enable the admission controller webhook, which is enabled by default, complete the following procedure. - -.Prerequisites - -* Install the OpenShift CLI (`oc`). -* Log in as a user with `cluster-admin` privileges. -* You must have installed the SR-IOV Network Operator. - -.Procedure - -- Set the `enableOperatorWebhook` field. Replace `` with `false` to disable the feature or `true` to enable it: -+ -[source,terminal] ----- -$ oc patch sriovoperatorconfig default --type=merge \ - -n openshift-sriov-network-operator \ - --patch '{ "spec": { "enableOperatorWebhook": } }' ----- -+ -[TIP] -==== -You can alternatively apply the following YAML to update the Operator: - -[source,yaml] ----- -apiVersion: sriovnetwork.openshift.io/v1 -kind: SriovOperatorConfig -metadata: - name: default - namespace: openshift-sriov-network-operator -spec: - enableOperatorWebhook: ----- -==== - -[id="configuring-custom-nodeselector_{context}"] -== Configuring a custom NodeSelector for the SR-IOV Network Config daemon - -The SR-IOV Network Config daemon discovers and configures the SR-IOV network devices on cluster nodes. By default, it is deployed to all the `worker` nodes in the cluster. You can use node labels to specify on which nodes the SR-IOV Network Config daemon runs. - -To specify the nodes where the SR-IOV Network Config daemon is deployed, complete the following procedure. - -[IMPORTANT] -===== -When you update the `configDaemonNodeSelector` field, the SR-IOV Network Config daemon is recreated on each selected node. -While the daemon is recreated, cluster users are unable to apply any new SR-IOV Network node policy or create new SR-IOV pods. -===== - -.Procedure - -- To update the node selector for the operator, enter the following command: -+ -[source,terminal] ----- -$ oc patch sriovoperatorconfig default --type=json \ - -n openshift-sriov-network-operator \ - --patch '[{ - "op": "replace", - "path": "/spec/configDaemonNodeSelector", - "value": {} - }]' ----- -+ -Replace `` with a label to apply as in the following example: -`"node-role.kubernetes.io/worker": ""`. -+ -[TIP] -==== -You can alternatively apply the following YAML to update the Operator: - -[source,yaml] ----- -apiVersion: sriovnetwork.openshift.io/v1 -kind: SriovOperatorConfig -metadata: - name: default - namespace: openshift-sriov-network-operator -spec: - configDaemonNodeSelector: - ----- -==== - -[id="configure-sr-iov-operator-single-node_{context}"] -== Configuring the SR-IOV Network Operator for single node installations - -By default, the SR-IOV Network Operator drains workloads from a node before every policy change. -The Operator performs this action to ensure that there no workloads using the virtual functions before the reconfiguration. - -For installations on a single node, there are no other nodes to receive the workloads. -As a result, the Operator must be configured not to drain the workloads from the single node. - -[IMPORTANT] -==== -After performing the following procedure to disable draining workloads, you must remove any workload that uses an SR-IOV network interface before you change any SR-IOV network node policy. -==== - -.Prerequisites - -* Install the OpenShift CLI (`oc`). -* Log in as a user with `cluster-admin` privileges. -* You must have installed the SR-IOV Network Operator. - -.Procedure - -- To set the `disableDrain` field to `true`, enter the following command: -+ -[source,terminal] ----- -$ oc patch sriovoperatorconfig default --type=merge \ - -n openshift-sriov-network-operator \ - --patch '{ "spec": { "disableDrain": true } }' ----- -+ -[TIP] -==== -You can alternatively apply the following YAML to update the Operator: - -[source,yaml] ----- -apiVersion: sriovnetwork.openshift.io/v1 -kind: SriovOperatorConfig -metadata: - name: default - namespace: openshift-sriov-network-operator -spec: - disableDrain: true ----- -==== diff --git a/modules/nw-sriov-create-object.adoc b/modules/nw-sriov-create-object.adoc deleted file mode 100644 index 16aa95b9d685..000000000000 --- a/modules/nw-sriov-create-object.adoc +++ /dev/null @@ -1,40 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/hardware_networks/using-dpdk-and-rdma.adoc - -:_content-type: REFERENCE -[id="nw-sriov-create-object_{context}"] -= Example SR-IOV network operator - -The following is an example definition of an `sriovNetwork` object. In this case, Intel and Mellanox configurations are identical: -[source,yaml] ----- -apiVersion: sriovnetwork.openshift.io/v1 -kind: SriovNetwork -metadata: - name: dpdk-network-1 - namespace: openshift-sriov-network-operator -spec: - ipam: '{"type": "host-local","ranges": [[{"subnet": "10.0.1.0/24"}]],"dataDir": - "/run/my-orchestrator/container-ipam-state-1"}' <1> - networkNamespace: dpdk-test <2> - spoofChk: "off" - trust: "on" - resourceName: dpdk_nic_1 <3> ---- -apiVersion: sriovnetwork.openshift.io/v1 -kind: SriovNetwork -metadata: - name: dpdk-network-2 - namespace: openshift-sriov-network-operator -spec: - ipam: '{"type": "host-local","ranges": [[{"subnet": "10.0.2.0/24"}]],"dataDir": - "/run/my-orchestrator/container-ipam-state-1"}' - networkNamespace: dpdk-test - spoofChk: "off" - trust: "on" - resourceName: dpdk_nic_2 ----- -<1> You can use a different IP Address Management (IPAM) implementation, such as Whereabouts. For more information, see _Dynamic IP address assignment configuration with Whereabouts_. -<2> You must request the `networkNamespace` where the network attachment definition will be created. You must create the `sriovNetwork` CR under the `openshift-sriov-network-operator` namespace. -<3> The `resourceName` value must match that of the `resourceName` created under the `sriovNetworkNodePolicy`. diff --git a/modules/nw-sriov-device-discovery.adoc b/modules/nw-sriov-device-discovery.adoc deleted file mode 100644 index 3bed8380c191..000000000000 --- a/modules/nw-sriov-device-discovery.adoc +++ /dev/null @@ -1,82 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/hardware_networks/about-sriov.adoc -// * virt/virtual_machines/vm_networking/virt-configuring-sriov-device-for-vms.adoc - -[id="discover-sr-iov-devices_{context}"] -= Automated discovery of SR-IOV network devices - -The SR-IOV Network Operator searches your cluster for SR-IOV capable network devices on worker nodes. -The Operator creates and updates a SriovNetworkNodeState custom resource (CR) for each worker node that provides a compatible SR-IOV network device. - -The CR is assigned the same name as the worker node. -The `status.interfaces` list provides information about the network devices on a node. - -[IMPORTANT] -==== -Do not modify a `SriovNetworkNodeState` object. -The Operator creates and manages these resources automatically. -==== - -[id="example-sriovnetworknodestate_{context}"] -== Example SriovNetworkNodeState object - -The following YAML is an example of a `SriovNetworkNodeState` object created by the SR-IOV Network Operator: - -.An SriovNetworkNodeState object -[source,yaml] ----- -apiVersion: sriovnetwork.openshift.io/v1 -kind: SriovNetworkNodeState -metadata: - name: node-25 <1> - namespace: openshift-sriov-network-operator - ownerReferences: - - apiVersion: sriovnetwork.openshift.io/v1 - blockOwnerDeletion: true - controller: true - kind: SriovNetworkNodePolicy - name: default -spec: - dpConfigVersion: "39824" -status: - interfaces: <2> - - deviceID: "1017" - driver: mlx5_core - mtu: 1500 - name: ens785f0 - pciAddress: "0000:18:00.0" - totalvfs: 8 - vendor: 15b3 - - deviceID: "1017" - driver: mlx5_core - mtu: 1500 - name: ens785f1 - pciAddress: "0000:18:00.1" - totalvfs: 8 - vendor: 15b3 - - deviceID: 158b - driver: i40e - mtu: 1500 - name: ens817f0 - pciAddress: 0000:81:00.0 - totalvfs: 64 - vendor: "8086" - - deviceID: 158b - driver: i40e - mtu: 1500 - name: ens817f1 - pciAddress: 0000:81:00.1 - totalvfs: 64 - vendor: "8086" - - deviceID: 158b - driver: i40e - mtu: 1500 - name: ens803f0 - pciAddress: 0000:86:00.0 - totalvfs: 64 - vendor: "8086" - syncStatus: Succeeded ----- -<1> The value of the `name` field is the same as the name of the worker node. -<2> The `interfaces` stanza includes a list of all of the SR-IOV devices discovered by the Operator on the worker node. diff --git a/modules/nw-sriov-dpdk-base-workload.adoc b/modules/nw-sriov-dpdk-base-workload.adoc deleted file mode 100644 index 0ab92f1e769c..000000000000 --- a/modules/nw-sriov-dpdk-base-workload.adoc +++ /dev/null @@ -1,82 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/hardware_networks/using-dpdk-and-rdma.adoc - -:_content-type: REFERENCE -[id="nw-sriov-dpdk-base-workload_{context}"] -= Example DPDK base workload - -The following is an example of a Data Plane Development Kit (DPDK) container: -[source,yaml] ----- -apiVersion: v1 -kind: Namespace -metadata: - name: dpdk-test ---- -apiVersion: v1 -kind: Pod -metadata: - annotations: - k8s.v1.cni.cncf.io/networks: '[ <1> - { - "name": "dpdk-network-1", - "namespace": "dpdk-test" - }, - { - "name": "dpdk-network-2", - "namespace": "dpdk-test" - } - ]' - irq-load-balancing.crio.io: "disable" <2> - cpu-load-balancing.crio.io: "disable" - cpu-quota.crio.io: "disable" - labels: - app: dpdk - name: testpmd - namespace: dpdk-test -spec: - runtimeClassName: performance-performance <3> - containers: - - command: - - /bin/bash - - -c - - sleep INF - image: registry.redhat.io/openshift4/dpdk-base-rhel8 - imagePullPolicy: Always - name: dpdk - resources: <4> - limits: - cpu: "16" - hugepages-1Gi: 8Gi - memory: 2Gi - requests: - cpu: "16" - hugepages-1Gi: 8Gi - memory: 2Gi - securityContext: - capabilities: - add: - - IPC_LOCK - - SYS_RESOURCE - - NET_RAW - - NET_ADMIN - runAsUser: 0 - volumeMounts: - - mountPath: /mnt/huge - name: hugepages - terminationGracePeriodSeconds: 5 - volumes: - - emptyDir: - medium: HugePages - name: hugepages ----- -<1> Request the SR-IOV networks you need. Resources for the devices will be injected automatically. -<2> Disable the CPU and IRQ load balancing base. See _Disabling interrupt processing for individual pods_ for more information. -<3> Set the `runtimeClass` to `performance-performance`. Do not set the `runtimeClass` to `HostNetwork` or `privileged`. -<4> Request an equal number of resources for requests and limits to start the pod with `Guaranteed` Quality of Service (QoS). - -[NOTE] -==== -Do not start the pod with `SLEEP` and then exec into the pod to start the testpmd or the DPDK workload. This can add additional interrupts as the `exec` process is not pinned to any CPU. -==== diff --git a/modules/nw-sriov-dpdk-example-intel.adoc b/modules/nw-sriov-dpdk-example-intel.adoc deleted file mode 100644 index 053d79ad4960..000000000000 --- a/modules/nw-sriov-dpdk-example-intel.adoc +++ /dev/null @@ -1,143 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/hardware_networks/using-dpdk-and-rdma.adoc - -:_content-type: PROCEDURE -[id="example-vf-use-in-dpdk-mode-intel_{context}"] -= Using a virtual function in DPDK mode with an Intel NIC - -.Prerequisites - -* Install the OpenShift CLI (`oc`). -* Install the SR-IOV Network Operator. -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -. Create the following `SriovNetworkNodePolicy` object, and then save the YAML in the `intel-dpdk-node-policy.yaml` file. -+ -[source,yaml] ----- -apiVersion: sriovnetwork.openshift.io/v1 -kind: SriovNetworkNodePolicy -metadata: - name: intel-dpdk-node-policy - namespace: openshift-sriov-network-operator -spec: - resourceName: intelnics - nodeSelector: - feature.node.kubernetes.io/network-sriov.capable: "true" - priority: - numVfs: - nicSelector: - vendor: "8086" - deviceID: "158b" - pfNames: ["", ...] - rootDevices: ["", "..."] - deviceType: vfio-pci <1> ----- -<1> Specify the driver type for the virtual functions to `vfio-pci`. -+ -[NOTE] -===== -See the `Configuring SR-IOV network devices` section for a detailed explanation on each option in `SriovNetworkNodePolicy`. - -When applying the configuration specified in a `SriovNetworkNodePolicy` object, the SR-IOV Operator may drain the nodes, and in some cases, reboot nodes. -It may take several minutes for a configuration change to apply. -Ensure that there are enough available nodes in your cluster to handle the evicted workload beforehand. - -After the configuration update is applied, all the pods in `openshift-sriov-network-operator` namespace will change to a `Running` status. -===== - -. Create the `SriovNetworkNodePolicy` object by running the following command: -+ -[source,terminal] ----- -$ oc create -f intel-dpdk-node-policy.yaml ----- - -. Create the following `SriovNetwork` object, and then save the YAML in the `intel-dpdk-network.yaml` file. -+ -[source,yaml] ----- -apiVersion: sriovnetwork.openshift.io/v1 -kind: SriovNetwork -metadata: - name: intel-dpdk-network - namespace: openshift-sriov-network-operator -spec: - networkNamespace: - ipam: |- -# ... <1> - vlan: - resourceName: intelnics ----- -<1> Specify a configuration object for the ipam CNI plugin as a YAML block scalar. The plugin manages IP address assignment for the attachment definition. -+ -[NOTE] -===== -See the "Configuring SR-IOV additional network" section for a detailed explanation on each option in `SriovNetwork`. -===== -+ -An optional library, app-netutil, provides several API methods for gathering network information about a container's parent pod. - -. Create the `SriovNetwork` object by running the following command: -+ -[source,terminal] ----- -$ oc create -f intel-dpdk-network.yaml ----- - -. Create the following `Pod` spec, and then save the YAML in the `intel-dpdk-pod.yaml` file. -+ -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: dpdk-app - namespace: <1> - annotations: - k8s.v1.cni.cncf.io/networks: intel-dpdk-network -spec: - containers: - - name: testpmd - image: <2> - securityContext: - runAsUser: 0 - capabilities: - add: ["IPC_LOCK","SYS_RESOURCE","NET_RAW"] <3> - volumeMounts: - - mountPath: /dev/hugepages <4> - name: hugepage - resources: - limits: - openshift.io/intelnics: "1" <5> - memory: "1Gi" - cpu: "4" <6> - hugepages-1Gi: "4Gi" <7> - requests: - openshift.io/intelnics: "1" - memory: "1Gi" - cpu: "4" - hugepages-1Gi: "4Gi" - command: ["sleep", "infinity"] - volumes: - - name: hugepage - emptyDir: - medium: HugePages ----- -<1> Specify the same `target_namespace` where the `SriovNetwork` object `intel-dpdk-network` is created. If you would like to create the pod in a different namespace, change `target_namespace` in both the `Pod` spec and the `SriovNetwork` object. -<2> Specify the DPDK image which includes your application and the DPDK library used by application. -<3> Specify additional capabilities required by the application inside the container for hugepage allocation, system resource allocation, and network interface access. -<4> Mount a hugepage volume to the DPDK pod under `/dev/hugepages`. The hugepage volume is backed by the emptyDir volume type with the medium being `Hugepages`. -<5> Optional: Specify the number of DPDK devices allocated to DPDK pod. This resource request and limit, if not explicitly specified, will be automatically added by the SR-IOV network resource injector. The SR-IOV network resource injector is an admission controller component managed by the SR-IOV Operator. It is enabled by default and can be disabled by setting `enableInjector` option to `false` in the default `SriovOperatorConfig` CR. -<6> Specify the number of CPUs. The DPDK pod usually requires exclusive CPUs to be allocated from the kubelet. This is achieved by setting CPU Manager policy to `static` and creating a pod with `Guaranteed` QoS. -<7> Specify hugepage size `hugepages-1Gi` or `hugepages-2Mi` and the quantity of hugepages that will be allocated to the DPDK pod. Configure `2Mi` and `1Gi` hugepages separately. Configuring `1Gi` hugepage requires adding kernel arguments to Nodes. For example, adding kernel arguments `default_hugepagesz=1GB`, `hugepagesz=1G` and `hugepages=16` will result in `16*1Gi` hugepages be allocated during system boot. - -. Create the DPDK pod by running the following command: -+ -[source,terminal] ----- -$ oc create -f intel-dpdk-pod.yaml ----- diff --git a/modules/nw-sriov-dpdk-example-mellanox.adoc b/modules/nw-sriov-dpdk-example-mellanox.adoc deleted file mode 100644 index 0ccf1d350e4f..000000000000 --- a/modules/nw-sriov-dpdk-example-mellanox.adoc +++ /dev/null @@ -1,148 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/hardware_networks/using-dpdk-and-rdma.adoc - -:_content-type: PROCEDURE -[id="example-vf-use-in-dpdk-mode-mellanox_{context}"] -= Using a virtual function in DPDK mode with a Mellanox NIC - -You can create a network node policy and create a Data Plane Development Kit (DPDK) pod using a virtual function in DPDK mode with a Mellanox NIC. - -.Prerequisites - -* You have installed the OpenShift CLI (`oc`). -* You have installed the Single Root I/O Virtualization (SR-IOV) Network Operator. -* You have logged in as a user with `cluster-admin` privileges. - -.Procedure - -. Save the following `SriovNetworkNodePolicy` YAML configuration to an `mlx-dpdk-node-policy.yaml` file: -+ -[source,yaml] ----- -apiVersion: sriovnetwork.openshift.io/v1 -kind: SriovNetworkNodePolicy -metadata: - name: mlx-dpdk-node-policy - namespace: openshift-sriov-network-operator -spec: - resourceName: mlxnics - nodeSelector: - feature.node.kubernetes.io/network-sriov.capable: "true" - priority: - numVfs: - nicSelector: - vendor: "15b3" - deviceID: "1015" <1> - pfNames: ["", ...] - rootDevices: ["", "..."] - deviceType: netdevice <2> - isRdma: true <3> ----- -<1> Specify the device hex code of the SR-IOV network device. -<2> Specify the driver type for the virtual functions to `netdevice`. A Mellanox SR-IOV Virtual Function (VF) can work in DPDK mode without using the `vfio-pci` device type. The VF device appears as a kernel network interface inside a container. -<3> Enable Remote Direct Memory Access (RDMA) mode. This is required for Mellanox cards to work in DPDK mode. -+ -[NOTE] -===== -See _Configuring an SR-IOV network device_ for a detailed explanation of each option in the `SriovNetworkNodePolicy` object. - -When applying the configuration specified in an `SriovNetworkNodePolicy` object, the SR-IOV Operator might drain the nodes, and in some cases, reboot nodes. -It might take several minutes for a configuration change to apply. -Ensure that there are enough available nodes in your cluster to handle the evicted workload beforehand. - -After the configuration update is applied, all the pods in the `openshift-sriov-network-operator` namespace will change to a `Running` status. -===== - -. Create the `SriovNetworkNodePolicy` object by running the following command: -+ -[source,terminal] ----- -$ oc create -f mlx-dpdk-node-policy.yaml ----- - -. Save the following `SriovNetwork` YAML configuration to an `mlx-dpdk-network.yaml` file: -+ -[source,yaml] ----- -apiVersion: sriovnetwork.openshift.io/v1 -kind: SriovNetwork -metadata: - name: mlx-dpdk-network - namespace: openshift-sriov-network-operator -spec: - networkNamespace: - ipam: |- <1> -... - vlan: - resourceName: mlxnics ----- -<1> Specify a configuration object for the IP Address Management (IPAM) Container Network Interface (CNI) plugin as a YAML block scalar. The plugin manages IP address assignment for the attachment definition. -+ -[NOTE] -===== -See _Configuring an SR-IOV network device_ for a detailed explanation on each option in the `SriovNetwork` object. -===== -+ -The `app-netutil` option library provides several API methods for gathering network information about the parent pod of a container. - -. Create the `SriovNetwork` object by running the following command: -+ -[source,terminal] ----- -$ oc create -f mlx-dpdk-network.yaml ----- -. Save the following `Pod` YAML configuration to an `mlx-dpdk-pod.yaml` file: - -+ -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: dpdk-app - namespace: <1> - annotations: - k8s.v1.cni.cncf.io/networks: mlx-dpdk-network -spec: - containers: - - name: testpmd - image: <2> - securityContext: - runAsUser: 0 - capabilities: - add: ["IPC_LOCK","SYS_RESOURCE","NET_RAW"] <3> - volumeMounts: - - mountPath: /dev/hugepages <4> - name: hugepage - resources: - limits: - openshift.io/mlxnics: "1" <5> - memory: "1Gi" - cpu: "4" <6> - hugepages-1Gi: "4Gi" <7> - requests: - openshift.io/mlxnics: "1" - memory: "1Gi" - cpu: "4" - hugepages-1Gi: "4Gi" - command: ["sleep", "infinity"] - volumes: - - name: hugepage - emptyDir: - medium: HugePages ----- -<1> Specify the same `target_namespace` where `SriovNetwork` object `mlx-dpdk-network` is created. To create the pod in a different namespace, change `target_namespace` in both the `Pod` spec and `SriovNetwork` object. -<2> Specify the DPDK image which includes your application and the DPDK library used by the application. -<3> Specify additional capabilities required by the application inside the container for hugepage allocation, system resource allocation, and network interface access. -<4> Mount the hugepage volume to the DPDK pod under `/dev/hugepages`. The hugepage volume is backed by the `emptyDir` volume type with the medium being `Hugepages`. -<5> Optional: Specify the number of DPDK devices allocated for the DPDK pod. If not explicitly specified, this resource request and limit is automatically added by the SR-IOV network resource injector. The SR-IOV network resource injector is an admission controller component managed by SR-IOV Operator. It is enabled by default and can be disabled by setting the `enableInjector` option to `false` in the default `SriovOperatorConfig` CR. -<6> Specify the number of CPUs. The DPDK pod usually requires that exclusive CPUs be allocated from the kubelet. To do this, set the CPU Manager policy to `static` and create a pod with `Guaranteed` Quality of Service (QoS). -<7> Specify hugepage size `hugepages-1Gi` or `hugepages-2Mi` and the quantity of hugepages that will be allocated to the DPDK pod. Configure `2Mi` and `1Gi` hugepages separately. Configuring `1Gi` hugepages requires adding kernel arguments to Nodes. - -. Create the DPDK pod by running the following command: -+ -[source,terminal] ----- -$ oc create -f mlx-dpdk-pod.yaml ----- diff --git a/modules/nw-sriov-dpdk-running-testpmd.adoc b/modules/nw-sriov-dpdk-running-testpmd.adoc deleted file mode 100644 index 8f01f461904a..000000000000 --- a/modules/nw-sriov-dpdk-running-testpmd.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/hardware_networks/using-dpdk-and-rdma.adoc - -:_content-type: REFERENCE -[id="nw-sriov-dpdk-running-testpmd_{context}"] -= Example testpmd script - -The following is an example script for running `testpmd`: - -[source,terminal] ----- -#!/bin/bash -set -ex -export CPU=$(cat /sys/fs/cgroup/cpuset/cpuset.cpus) -echo ${CPU} - -dpdk-testpmd -l ${CPU} -a ${PCIDEVICE_OPENSHIFT_IO_DPDK_NIC_1} -a ${PCIDEVICE_OPENSHIFT_IO_DPDK_NIC_2} -n 4 -- -i --nb-cores=15 --rxd=4096 --txd=4096 --rxq=7 --txq=7 --forward-mode=mac --eth-peer=0,50:00:00:00:00:01 --eth-peer=1,50:00:00:00:00:02 ----- -This example uses two different `sriovNetwork` CRs. The environment variable contains the Virtual Function (VF) PCI address that was allocated for the pod. If you use the same network in the pod definition, you must split the `pciAddress`. -It is important to configure the correct MAC addresses of the traffic generator. This example uses custom MAC addresses. diff --git a/modules/nw-sriov-dual-nic-con.adoc b/modules/nw-sriov-dual-nic-con.adoc deleted file mode 100644 index 98bf49fc615e..000000000000 --- a/modules/nw-sriov-dual-nic-con.adoc +++ /dev/null @@ -1,40 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/hardware_networks/configuring-sriov-device.adoc - -:_content-type: CONCEPT -[id="nw-sriov-dual-nic-con_{context}"] -= NIC partitioning for SR-IOV devices (Technology Preview) - -{product-title} can be deployed on a server with a dual port network interface card (NIC). -You can partition a single, high-speed dual port NIC into multiple virtual functions (VFs) and enable SR-IOV. - -[NOTE] -==== -Currently, it is not possible to assign virtual functions (VF) for system services such as OVN-Kubernetes and assign other VFs created from the same physical function (PF) to pods connected to the SR-IOV Network Operator. -==== - -This feature supports the use of bonds for high availability with the Link Aggregation Control Protocol (LACP). - -[NOTE] -==== -Only one LACP can be declared by physical NIC. -==== - -An {product-title} cluster can be deployed on a bond interface with 2 VFs on 2 physical functions (PFs) using the following methods: - -* Agent-based installer -+ -[NOTE] -==== -The minimum required version of `nmstate` is: - -* `1.4.2-4` for RHEL 8 versions -* `2.2.7` for RHEL 9 versions -==== - -* Installer-provisioned infrastructure installation -* User-provisioned infrastructure installation - -:FeatureName: Support for Day 1 operations associated with enabling NIC partitioning for SR-IOV devices -include::snippets/technology-preview.adoc[leveloffset=+1] \ No newline at end of file diff --git a/modules/nw-sriov-example-dpdk-line-rate.adoc b/modules/nw-sriov-example-dpdk-line-rate.adoc deleted file mode 100644 index f4944d09d551..000000000000 --- a/modules/nw-sriov-example-dpdk-line-rate.adoc +++ /dev/null @@ -1,58 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/hardware_networks/using-dpdk-and-rdma.adoc - -:_content-type: PROCEDURE -[id="nw-example-dpdk-line-rate_{context}"] -= Using SR-IOV and the Node Tuning Operator to achieve a DPDK line rate -You can use the Node Tuning Operator to configure isolated CPUs, hugepages, and a topology scheduler. -You can then use the Node Tuning Operator with Single Root I/O Virtualization (SR-IOV) to achieve a specific Data Plane Development Kit (DPDK) line rate. - -.Prerequisites - -* You have installed the OpenShift CLI (`oc`). -* You have installed the SR-IOV Network Operator. -* You have logged in as a user with `cluster-admin` privileges. -* You have deployed a standalone Node Tuning Operator. -+ -[NOTE] -==== -In previous versions of {product-title}, the Performance Addon Operator was used to implement automatic tuning to achieve low latency performance for OpenShift applications. In {product-title} 4.11 and later, this functionality is part of the Node Tuning Operator. -==== - -.Procedure -. Create a `PerformanceProfile` object based on the following example: -+ -[source,yaml] ----- -apiVersion: performance.openshift.io/v2 -kind: PerformanceProfile -metadata: - name: performance -spec: - globallyDisableIrqLoadBalancing: true - cpu: - isolated: 21-51,73-103 <1> - reserved: 0-20,52-72 <2> - hugepages: - defaultHugepagesSize: 1G <3> - pages: - - count: 32 - size: 1G - net: - userLevelNetworking: true - numa: - topologyPolicy: "single-numa-node" - nodeSelector: - node-role.kubernetes.io/worker-cnf: "" ----- -<1> If hyperthreading is enabled on the system, allocate the relevant symbolic links to the `isolated` and `reserved` CPU groups. If the system contains multiple non-uniform memory access nodes (NUMAs), allocate CPUs from both NUMAs to both groups. You can also use the Performance Profile Creator for this task. For more information, see _Creating a performance profile_. -<2> You can also specify a list of devices that will have their queues set to the reserved CPU count. For more information, see _Reducing NIC queues using the Node Tuning Operator_. -<3> Allocate the number and size of hugepages needed. You can specify the NUMA configuration for the hugepages. By default, the system allocates an even number to every NUMA node on the system. If needed, you can request the use of a realtime kernel for the nodes. See _Provisioning a worker with real-time capabilities_ for more information. -. Save the `yaml` file as `mlx-dpdk-perfprofile-policy.yaml`. -. Apply the performance profile using the following command: -+ -[source,terminal] ----- -$ oc create -f mlx-dpdk-perfprofile-policy.yaml ----- diff --git a/modules/nw-sriov-example-vf-function-in-pod.adoc b/modules/nw-sriov-example-vf-function-in-pod.adoc deleted file mode 100644 index ef82f935b7ac..000000000000 --- a/modules/nw-sriov-example-vf-function-in-pod.adoc +++ /dev/null @@ -1,69 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/hardware_networks/about-sriov.adoc - -[id="example-vf-use-in-pod_{context}"] -= Example use of a virtual function in a pod - -You can run a remote direct memory access (RDMA) or a Data Plane Development Kit (DPDK) application in a pod with SR-IOV VF attached. - -This example shows a pod using a virtual function (VF) in RDMA mode: - -.`Pod` spec that uses RDMA mode -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: rdma-app - annotations: - k8s.v1.cni.cncf.io/networks: sriov-rdma-mlnx -spec: - containers: - - name: testpmd - image: - imagePullPolicy: IfNotPresent - securityContext: - runAsUser: 0 - capabilities: - add: ["IPC_LOCK","SYS_RESOURCE","NET_RAW"] - command: ["sleep", "infinity"] ----- - -The following example shows a pod with a VF in DPDK mode: - -.`Pod` spec that uses DPDK mode -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: dpdk-app - annotations: - k8s.v1.cni.cncf.io/networks: sriov-dpdk-net -spec: - containers: - - name: testpmd - image: - securityContext: - runAsUser: 0 - capabilities: - add: ["IPC_LOCK","SYS_RESOURCE","NET_RAW"] - volumeMounts: - - mountPath: /dev/hugepages - name: hugepage - resources: - limits: - memory: "1Gi" - cpu: "2" - hugepages-1Gi: "4Gi" - requests: - memory: "1Gi" - cpu: "2" - hugepages-1Gi: "4Gi" - command: ["sleep", "infinity"] - volumes: - - name: hugepage - emptyDir: - medium: HugePages ----- diff --git a/modules/nw-sriov-exclude-topology-manager.adoc b/modules/nw-sriov-exclude-topology-manager.adoc deleted file mode 100644 index 891aaa4c33c6..000000000000 --- a/modules/nw-sriov-exclude-topology-manager.adoc +++ /dev/null @@ -1,13 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/hardware_networks/configuring-sriov-device.adoc - -:_content-type: CONCEPT -[id="nw-sriov-exclude-topology-manager_{context}"] -= Exclude the SR-IOV network topology for NUMA-aware scheduling - -You can exclude advertising the Non-Uniform Memory Access (NUMA) node for the SR-IOV network to the Topology Manager for more flexible SR-IOV network deployments during NUMA-aware pod scheduling. - -In some scenarios, it is a priority to maximize CPU and memory resources for a pod on a single NUMA node. By not providing a hint to the Topology Manager about the NUMA node for the pod's SR-IOV network resource, the Topology Manager can deploy the SR-IOV network resource and the pod CPU and memory resources to different NUMA nodes. This can add to network latency because of the data transfer between NUMA nodes. However, it is acceptable in scenarios when workloads require optimal CPU and memory performance. - -For example, consider a compute node, `compute-1`, that features two NUMA nodes: `numa0` and `numa1`. The SR-IOV-enabled NIC is present on `numa0`. The CPUs available for pod scheduling are present on `numa1` only. By setting the `excludeTopology` specification to `true`, the Topology Manager can assign CPU and memory resources for the pod to `numa1` and can assign the SR-IOV network resource for the same pod to `numa0`. This is only possible when you set the `excludeTopology` specification to `true`. Otherwise, the Topology Manager attempts to place all resources on the same NUMA node. diff --git a/modules/nw-sriov-huge-pages.adoc b/modules/nw-sriov-huge-pages.adoc deleted file mode 100644 index 06913a25882b..000000000000 --- a/modules/nw-sriov-huge-pages.adoc +++ /dev/null @@ -1,26 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/hardware_networks/about-sriov.adoc - -[id="nw-sriov-hugepages_{context}"] -= Huge pages resource injection for Downward API - -When a pod specification includes a resource request or limit for huge pages, the Network Resources Injector automatically adds Downward API fields to the pod specification to provide the huge pages information to the container. - -The Network Resources Injector adds a volume that is named `podnetinfo` and is mounted at `/etc/podnetinfo` for each container in the pod. The volume uses the Downward API and includes a file for huge pages requests and limits. The file naming convention is as follows: - -* `/etc/podnetinfo/hugepages_1G_request_` -* `/etc/podnetinfo/hugepages_1G_limit_` -* `/etc/podnetinfo/hugepages_2M_request_` -* `/etc/podnetinfo/hugepages_2M_limit_` - -The paths specified in the previous list are compatible with the `app-netutil` library. By default, the library is configured to search for resource information in the `/etc/podnetinfo` directory. If you choose to specify the Downward API path items yourself manually, the `app-netutil` library searches for the following paths in addition to the paths in the previous list. - -* `/etc/podnetinfo/hugepages_request` -* `/etc/podnetinfo/hugepages_limit` -* `/etc/podnetinfo/hugepages_1G_request` -* `/etc/podnetinfo/hugepages_1G_limit` -* `/etc/podnetinfo/hugepages_2M_request` -* `/etc/podnetinfo/hugepages_2M_limit` - -As with the paths that the Network Resources Injector can create, the paths in the preceding list can optionally end with a `_` suffix. diff --git a/modules/nw-sriov-hwol-about-hardware-offloading.adoc b/modules/nw-sriov-hwol-about-hardware-offloading.adoc deleted file mode 100644 index 4989903ba665..000000000000 --- a/modules/nw-sriov-hwol-about-hardware-offloading.adoc +++ /dev/null @@ -1,31 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/configuring-hardware-offloading.adoc - -:_content-type: CONCEPT -[id="about-hardware-offloading_{context}"] -= About hardware offloading - -Open vSwitch hardware offloading is a method of processing network tasks by diverting them away from the CPU and offloading them to a dedicated processor on a network interface controller. -As a result, clusters can benefit from faster data transfer speeds, reduced CPU workloads, and lower computing costs. - -The key element for this feature is a modern class of network interface controllers known as SmartNICs. -A SmartNIC is a network interface controller that is able to handle computationally-heavy network processing tasks. -In the same way that a dedicated graphics card can improve graphics performance, a SmartNIC can improve network performance. -In each case, a dedicated processor improves performance for a specific type of processing task. - -In {product-title}, you can configure hardware offloading for bare metal nodes that have a compatible SmartNIC. -Hardware offloading is configured and enabled by the SR-IOV Network Operator. - -Hardware offloading is not compatible with all workloads or application types. -Only the following two communication types are supported: - -* pod-to-pod -* pod-to-service, where the service is a ClusterIP service backed by a regular pod - -In all cases, hardware offloading takes place only when those pods and services are assigned to nodes that have a compatible SmartNIC. -Suppose, for example, that a pod on a node with hardware offloading tries to communicate with a service on a regular node. -On the regular node, all the processing takes place in the kernel, so the overall performance of the pod-to-service communication is limited to the maximum performance of that regular node. -Hardware offloading is not compatible with DPDK applications. - -Enabling hardware offloading on a node, but not configuring pods to use, it can result in decreased throughput performance for pod traffic. You cannot configure hardware offloading for pods that are managed by {product-title}. \ No newline at end of file diff --git a/modules/nw-sriov-hwol-adding-network-attachment-definitions-to-pods.adoc b/modules/nw-sriov-hwol-adding-network-attachment-definitions-to-pods.adoc deleted file mode 100644 index ad6f68c0c61e..000000000000 --- a/modules/nw-sriov-hwol-adding-network-attachment-definitions-to-pods.adoc +++ /dev/null @@ -1,22 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/configuring-hardware-offloading.adoc - -:_content-type: PROCEDURE -[id="adding-network-attachment-definition-to-pods_{context}"] -= Adding the network attachment definition to your pods - -After you create the machine config pool, the `SriovNetworkPoolConfig` and `SriovNetworkNodePolicy` custom resources, and the network attachment definition, you can apply these configurations to your pods by adding the network attachment definition to your pod specifications. - -.Procedure - -* In the pod specification, add the `.metadata.annotations.k8s.v1.cni.cncf.io/networks` field and specify the network attachment definition you created for hardware offloading: -+ -[source,yaml] ----- -.... -metadata: - annotations: - v1.multus-cni.io/default-network: net-attach-def/net-attach-def <.> ----- -<.> The value must be the name and namespace of the network attachment definition you created for hardware offloading. \ No newline at end of file diff --git a/modules/nw-sriov-hwol-configuring-machine-config-pool.adoc b/modules/nw-sriov-hwol-configuring-machine-config-pool.adoc deleted file mode 100644 index 639e56cb8175..000000000000 --- a/modules/nw-sriov-hwol-configuring-machine-config-pool.adoc +++ /dev/null @@ -1,104 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/configuring-hardware-offloading.adoc - -:_content-type: PROCEDURE -[id="configuring-machine-config-pool_{context}"] -= Configuring a machine config pool for hardware offloading - -To enable hardware offloading, you must first create a dedicated machine config pool and configure it to work with the SR-IOV Network Operator. - -.Prerequisites - -* You installed the OpenShift CLI (`oc`). -* You have access to the cluster as a user with the `cluster-admin` role. - -.Procedure - -. Create a machine config pool for machines you want to use hardware offloading on. - -.. Create a file, such as `mcp-offloading.yaml`, with content like the following example: -+ -[source,yaml] ----- -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfigPool -metadata: - name: mcp-offloading <1> -spec: - machineConfigSelector: - matchExpressions: - - {key: machineconfiguration.openshift.io/role, operator: In, values: [worker,mcp-offloading]} <1> - nodeSelector: - matchLabels: - node-role.kubernetes.io/mcp-offloading: "" <2> ----- -<1> The name of your machine config pool for hardware offloading. -<2> This node role label is used to add nodes to the machine config pool. - -.. Apply the configuration for the machine config pool: -+ -[source,terminal] ----- -$ oc create -f mcp-offloading.yaml ----- - -. Add nodes to the machine config pool. Label each node with the node role label of your pool: -+ -[source,terminal] ----- -$ oc label node worker-2 node-role.kubernetes.io/mcp-offloading="" ----- - -. Optional: To verify that the new pool is created, run the following command: -+ -[source,terminal] ----- -$ oc get nodes ----- -+ --- -.Example output -[source,terminal] ----- -NAME STATUS ROLES AGE VERSION -master-0 Ready master 2d v1.26.0 -master-1 Ready master 2d v1.26.0 -master-2 Ready master 2d v1.26.0 -worker-0 Ready worker 2d v1.26.0 -worker-1 Ready worker 2d v1.26.0 -worker-2 Ready mcp-offloading,worker 47h v1.26.0 -worker-3 Ready mcp-offloading,worker 47h v1.26.0 ----- --- - -. Add this machine config pool to the `SriovNetworkPoolConfig` custom resource: - -.. Create a file, such as `sriov-pool-config.yaml`, with content like the following example: -+ -[source,yaml] ----- -apiVersion: sriovnetwork.openshift.io/v1 -kind: SriovNetworkPoolConfig -metadata: - name: sriovnetworkpoolconfig-offload - namespace: openshift-sriov-network-operator -spec: - ovsHardwareOffloadConfig: - name: mcp-offloading <1> ----- -<1> The name of your machine config pool for hardware offloading. - -.. Apply the configuration: -+ -[source,terminal] ----- -$ oc create -f .yaml ----- -+ -[NOTE] -===== -When you apply the configuration specified in a `SriovNetworkPoolConfig` object, the SR-IOV Operator drains and restarts the nodes in the machine config pool. - -It might take several minutes for a configuration changes to apply. -===== diff --git a/modules/nw-sriov-hwol-creating-network-attachment-definition.adoc b/modules/nw-sriov-hwol-creating-network-attachment-definition.adoc deleted file mode 100644 index 013856ed5644..000000000000 --- a/modules/nw-sriov-hwol-creating-network-attachment-definition.adoc +++ /dev/null @@ -1,57 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/configuring-hardware-offloading.adoc - -:_content-type: PROCEDURE -[id="create-network-attachment-definition_{context}"] -= Creating a network attachment definition - -After you define the machine config pool and the SR-IOV network node policy, you can create a network attachment definition for the network interface card you specified. - -.Prerequisites - -* You installed the OpenShift CLI (`oc`). -* You have access to the cluster as a user with the `cluster-admin` role. - -.Procedure - -. Create a file, such as `net-attach-def.yaml`, with content like the following example: -+ -[source,yaml] ----- -apiVersion: "k8s.cni.cncf.io/v1" -kind: NetworkAttachmentDefinition -metadata: - name: net-attach-def <.> - namespace: net-attach-def <.> - annotations: - k8s.v1.cni.cncf.io/resourceName: openshift.io/mlxnics <.> -spec: - config: '{"cniVersion":"0.3.1","name":"ovn-kubernetes","type":"ovn-k8s-cni-overlay","ipam":{},"dns":{}}' ----- -<.> The name for your network attachment definition. -<.> The namespace for your network attachment definition. -<.> This is the value of the `spec.resourceName` field you specified in the `SriovNetworkNodePolicy` object. - -. Apply the configuration for the network attachment definition: -+ -[source,terminal] ----- -$ oc create -f net-attach-def.yaml ----- - -.Verification - -* Run the following command to see whether the new definition is present: -+ -[source,terminal] ----- -$ oc get net-attach-def -A ----- -+ -.Example output -[source,terminal] ----- -NAMESPACE NAME AGE -net-attach-def net-attach-def 43h ----- diff --git a/modules/nw-sriov-hwol-creating-sriov-policy.adoc b/modules/nw-sriov-hwol-creating-sriov-policy.adoc deleted file mode 100644 index 3acf04825546..000000000000 --- a/modules/nw-sriov-hwol-creating-sriov-policy.adoc +++ /dev/null @@ -1,62 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/configuring-hardware-offloading.adoc - -:_content-type: PROCEDURE -[id="configure-sriov-node-policy_{context}"] -= Configuring the SR-IOV network node policy - -You can create an SR-IOV network device configuration for a node by creating an SR-IOV network node policy. -To enable hardware offloading, you must define the `.spec.eSwitchMode` field with the value `"switchdev"`. - -The following procedure creates an SR-IOV interface for a network interface controller with hardware offloading. - -.Prerequisites - -* You installed the OpenShift CLI (`oc`). -* You have access to the cluster as a user with the `cluster-admin` role. - -.Procedure - -. Create a file, such as `sriov-node-policy.yaml`, with content like the following example: -+ -[source,yaml] ----- -apiVersion: sriovnetwork.openshift.io/v1 -kind: SriovNetworkNodePolicy -metadata: - name: sriov-node-policy <.> - namespace: openshift-sriov-network-operator -spec: - deviceType: netdevice <.> - eSwitchMode: "switchdev" <.> - nicSelector: - deviceID: "1019" - rootDevices: - - 0000:d8:00.0 - vendor: "15b3" - pfNames: - - ens8f0 - nodeSelector: - feature.node.kubernetes.io/network-sriov.capable: "true" - numVfs: 6 - priority: 5 - resourceName: mlxnics ----- -<.> The name for the custom resource object. -<.> Required. Hardware offloading is not supported with `vfio-pci`. -<.> Required. - -. Apply the configuration for the policy: -+ -[source,terminal] ----- -$ oc create -f sriov-node-policy.yaml ----- -+ -[NOTE] -===== -When you apply the configuration specified in a `SriovNetworkPoolConfig` object, the SR-IOV Operator drains and restarts the nodes in the machine config pool. - -It might take several minutes for a configuration change to apply. -===== diff --git a/modules/nw-sriov-hwol-ref-openstack-sriov-policy.adoc b/modules/nw-sriov-hwol-ref-openstack-sriov-policy.adoc deleted file mode 100644 index c0c04fb8dabf..000000000000 --- a/modules/nw-sriov-hwol-ref-openstack-sriov-policy.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/configuring-hardware-offloading.adoc - -:_content-type: PROCEDURE -[id="nw-sriov-hwol-ref-openstack-sriov-policy_{context}"] -= An example SR-IOV network node policy for OpenStack - -The following example describes an SR-IOV interface for a network interface controller (NIC) with hardware offloading on {rh-openstack-first}. - -.An SR-IOV interface for a NIC with hardware offloading on {rh-openstack} -[source,yaml] ----- -apiVersion: sriovnetwork.openshift.io/v1 -kind: SriovNetworkNodePolicy -metadata: - name: ${name} - namespace: openshift-sriov-network-operator -spec: - deviceType: switchdev - isRdma: true - nicSelector: - netFilter: openstack/NetworkID:${net_id} - nodeSelector: - feature.node.kubernetes.io/network-sriov.capable: 'true' - numVfs: 1 - priority: 99 - resourceName: ${name} ----- \ No newline at end of file diff --git a/modules/nw-sriov-hwol-supported-devices.adoc b/modules/nw-sriov-hwol-supported-devices.adoc deleted file mode 100644 index 1999212c8080..000000000000 --- a/modules/nw-sriov-hwol-supported-devices.adoc +++ /dev/null @@ -1,49 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/configuring-hardware-offloading.adoc - -:_content-type: REFERENCE -[id="supported_devices_{context}"] -= Supported devices - -Hardware offloading is supported on the following network interface controllers: - -.Supported network interface controllers -[cols="1,2,1,1"] -|=== -|Manufacturer |Model |Vendor ID | Device ID - -|Mellanox -|MT27800 Family [ConnectX‑5] -|15b3 -|1017 - -|Mellanox -|MT28880 Family [ConnectX‑5{nbsp}Ex] -|15b3 -|1019 - -|Mellanox -|MT2892 Family [ConnectX‑6 Dx] -|15b3 -|101d -|=== - -.Technology Preview network interface controllers -[cols="1,2,1,1"] -|=== -|Manufacturer |Model |Vendor ID | Device ID - -|Mellanox -|MT2894 Family [ConnectX-6 Lx] -|15b3 -|101f - -|Mellanox -|MT42822 BlueField-2 in ConnectX-6 NIC mode -|15b3 -|a2d6 -|=== - -:FeatureName: Using a ConnectX-6 Lx or BlueField-2 in ConnectX-6 NIC mode device -include::snippets/technology-preview.adoc[] diff --git a/modules/nw-sriov-ibnetwork-object.adoc b/modules/nw-sriov-ibnetwork-object.adoc deleted file mode 100644 index 14127e07c5f8..000000000000 --- a/modules/nw-sriov-ibnetwork-object.adoc +++ /dev/null @@ -1,39 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/hardware_networks/configuring-sriov-ib-attach.adoc - -[id="nw-sriov-ibnetwork-object_{context}"] -= InfiniBand device configuration object - -You can configure an InfiniBand (IB) network device by defining an `SriovIBNetwork` object. - -The following YAML describes an `SriovIBNetwork` object: - -[source,yaml] ----- -apiVersion: sriovnetwork.openshift.io/v1 -kind: SriovIBNetwork -metadata: - name: <1> - namespace: openshift-sriov-network-operator <2> -spec: - resourceName: <3> - networkNamespace: <4> - ipam: |- <5> - {} - linkState: <6> - capabilities: <7> ----- -<1> A name for the object. The SR-IOV Network Operator creates a `NetworkAttachmentDefinition` object with same name. - -<2> The namespace where the SR-IOV Operator is installed. - -<3> The value for the `spec.resourceName` parameter from the `SriovNetworkNodePolicy` object that defines the SR-IOV hardware for this additional network. - -<4> The target namespace for the `SriovIBNetwork` object. Only pods in the target namespace can attach to the network device. - -<5> Optional: A configuration object for the IPAM CNI plugin as a YAML block scalar. The plugin manages IP address assignment for the attachment definition. - -<6> Optional: The link state of virtual function (VF). Allowed values are `enable`, `disable` and `auto`. - -<7> Optional: The capabilities to configure for this network. You can specify `"{ "ips": true }"` to enable IP address support or `"{ "infinibandGUID": true }"` to enable IB Global Unique Identifier (GUID) support. diff --git a/modules/nw-sriov-installing-operator.adoc b/modules/nw-sriov-installing-operator.adoc deleted file mode 100644 index aea6af35b4c8..000000000000 --- a/modules/nw-sriov-installing-operator.adoc +++ /dev/null @@ -1,150 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/hardware_networks/installing-sriov-operator.adoc - -:_content-type: PROCEDURE -[id="installing-sr-iov-operator_{context}"] -= Installing SR-IOV Network Operator - -As a cluster administrator, you can install the SR-IOV Network Operator by using the {product-title} CLI or the web console. - -[id="install-operator-cli_{context}"] -== CLI: Installing the SR-IOV Network Operator - -As a cluster administrator, you can install the Operator using the CLI. - -.Prerequisites - -* A cluster installed on bare-metal hardware with nodes that have hardware that supports SR-IOV. -* Install the OpenShift CLI (`oc`). -* An account with `cluster-admin` privileges. - -.Procedure - -. To create the `openshift-sriov-network-operator` namespace, enter the following command: -+ -[source,terminal] ----- -$ cat << EOF| oc create -f - -apiVersion: v1 -kind: Namespace -metadata: - name: openshift-sriov-network-operator - annotations: - workload.openshift.io/allowed: management -EOF ----- - -. To create an OperatorGroup CR, enter the following command: -+ -[source,terminal] ----- -$ cat << EOF| oc create -f - -apiVersion: operators.coreos.com/v1 -kind: OperatorGroup -metadata: - name: sriov-network-operators - namespace: openshift-sriov-network-operator -spec: - targetNamespaces: - - openshift-sriov-network-operator -EOF ----- - -. Subscribe to the SR-IOV Network Operator. - -.. Run the following command to get the {product-title} major and minor version. It is required for the `channel` value in the next -step. -+ -[source,terminal] ----- -$ OC_VERSION=$(oc version -o yaml | grep openshiftVersion | \ - grep -o '[0-9]*[.][0-9]*' | head -1) ----- - -.. To create a Subscription CR for the SR-IOV Network Operator, enter the following command: -+ -[source,terminal] ----- -$ cat << EOF| oc create -f - -apiVersion: operators.coreos.com/v1alpha1 -kind: Subscription -metadata: - name: sriov-network-operator-subscription - namespace: openshift-sriov-network-operator -spec: - channel: "${OC_VERSION}" - name: sriov-network-operator - source: redhat-operators - sourceNamespace: openshift-marketplace -EOF ----- - -. To verify that the Operator is installed, enter the following command: -+ -[source,terminal] ----- -$ oc get csv -n openshift-sriov-network-operator \ - -o custom-columns=Name:.metadata.name,Phase:.status.phase ----- -+ -.Example output -[source,terminal] ----- -Name Phase -sriov-network-operator.4.13.0-202310121402 Succeeded ----- - -[id="install-operator-web-console_{context}"] -== Web console: Installing the SR-IOV Network Operator - -As a cluster administrator, you can install the Operator using the web console. - -.Prerequisites - -* A cluster installed on bare-metal hardware with nodes that have hardware that supports SR-IOV. -* Install the OpenShift CLI (`oc`). -* An account with `cluster-admin` privileges. - -.Procedure - - -. Install the SR-IOV Network Operator: - -.. In the {product-title} web console, click *Operators* -> *OperatorHub*. - -.. Select *SR-IOV Network Operator* from the list of available Operators, and then click *Install*. - -.. On the *Install Operator* page, under *Installed Namespace*, select *Operator recommended Namespace*. - -.. Click *Install*. - -. Verify that the SR-IOV Network Operator is installed successfully: - -.. Navigate to the *Operators* -> *Installed Operators* page. - -.. Ensure that *SR-IOV Network Operator* is listed in the *openshift-sriov-network-operator* project with a *Status* of *InstallSucceeded*. -+ -[NOTE] -==== -During installation an Operator might display a *Failed* status. -If the installation later succeeds with an *InstallSucceeded* message, you can ignore the *Failed* message. -==== - -+ -If the Operator does not appear as installed, to troubleshoot further: - -+ -* Inspect the *Operator Subscriptions* and *Install Plans* tabs for any failure or errors under *Status*. -* Navigate to the *Workloads* -> *Pods* page and check the logs for pods in the `openshift-sriov-network-operator` project. -* Check the namespace of the YAML file. If the annotation is missing, you can add the annotation `workload.openshift.io/allowed=management` to the Operator namespace with the following command: -+ -[source,terminal] ----- -$ oc annotate ns/openshift-sriov-network-operator workload.openshift.io/allowed=management ----- -+ -[NOTE] -==== -For {sno} clusters, the annotation `workload.openshift.io/allowed=management` is required for the namespace. -==== diff --git a/modules/nw-sriov-interface-level-sysctl-basic-node-policy.adoc b/modules/nw-sriov-interface-level-sysctl-basic-node-policy.adoc deleted file mode 100644 index a93feb2806c2..000000000000 --- a/modules/nw-sriov-interface-level-sysctl-basic-node-policy.adoc +++ /dev/null @@ -1,83 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/hardware_networks/configuring-interface-sysctl-sriov-device.adoc - -:_content-type: PROCEDURE -[id="nw-basic-example-setting-one-sysctl-flag-node-policy_{context}"] -= Setting one sysctl flag on nodes with SR-IOV network devices - -The SR-IOV Network Operator adds the `SriovNetworkNodePolicy.sriovnetwork.openshift.io` custom resource definition (CRD) to {product-title}. You can configure an SR-IOV network device by creating a `SriovNetworkNodePolicy` custom resource (CR). - -[NOTE] -==== -When applying the configuration specified in a `SriovNetworkNodePolicy` object, the SR-IOV Operator might drain and reboot the nodes. - -It can take several minutes for a configuration change to apply. -==== - -Follow this procedure to create a `SriovNetworkNodePolicy` custom resource (CR). - -.Procedure - -. Create an `SriovNetworkNodePolicy` custom resource (CR). For example, save the following YAML as the file `policyoneflag-sriov-node-network.yaml`: -+ -[source,yaml] ----- -apiVersion: sriovnetwork.openshift.io/v1 -kind: SriovNetworkNodePolicy -metadata: - name: policyoneflag <1> - namespace: openshift-sriov-network-operator <2> -spec: - resourceName: policyoneflag <3> - nodeSelector: <4> - feature.node.kubernetes.io/network-sriov.capable="true" - priority: 10 <5> - numVfs: 5 <6> - nicSelector: <7> - pfNames: ["ens5"] <8> - deviceType: "netdevice" <9> - isRdma: false <10> ----- -+ -<1> The name for the custom resource object. -<2> The namespace where the SR-IOV Network Operator is installed. -<3> The resource name of the SR-IOV network device plugin. You can create multiple SR-IOV network node policies for a resource name. -<4> The node selector specifies the nodes to configure. Only SR-IOV network devices on the selected nodes are configured. The SR-IOV Container Network Interface (CNI) plugin and device plugin are deployed on selected nodes only. -<5> Optional: The priority is an integer value between `0` and `99`. A smaller value receives higher priority. For example, a priority of `10` is a higher priority than `99`. The default value is `99`. -<6> The number of the virtual functions (VFs) to create for the SR-IOV physical network device. For an Intel network interface controller (NIC), the number of VFs cannot be larger than the total VFs supported by the device. For a Mellanox NIC, the number of VFs cannot be larger than `128`. -<7> The NIC selector identifies the device for the Operator to configure. You do not have to specify values for all the parameters. It is recommended to identify the network device with enough precision to avoid selecting a device unintentionally. -If you specify `rootDevices`, you must also specify a value for `vendor`, `deviceID`, or `pfNames`. If you specify both `pfNames` and `rootDevices` at the same time, ensure that they refer to the same device. If you specify a value for `netFilter`, then you do not need to specify any other parameter because a network ID is unique. -<8> Optional: An array of one or more physical function (PF) names for the device. -<9> Optional: The driver type for the virtual functions. The only allowed value is `netdevice`. -For a Mellanox NIC to work in DPDK mode on bare metal nodes, set `isRdma` to `true`. -<10> Optional: Configures whether to enable remote direct memory access (RDMA) mode. The default value is `false`. -If the `isRdma` parameter is set to `true`, you can continue to use the RDMA-enabled VF as a normal network device. A device can be used in either mode. -Set `isRdma` to `true` and additionally set `needVhostNet` to `true` to configure a Mellanox NIC for use with Fast Datapath DPDK applications. -+ -[NOTE] -==== -The `vfio-pci` driver type is not supported. -==== -+ -. Create the `SriovNetworkNodePolicy` object: -+ -[source,terminal] ----- -$ oc create -f policyoneflag-sriov-node-network.yaml ----- -+ -After applying the configuration update, all the pods in `sriov-network-operator` namespace change to the `Running` status. -+ -. To verify that the SR-IOV network device is configured, enter the following command. Replace `` with the name of a node with the SR-IOV network device that you just configured. -+ -[source,terminal] ----- -$ oc get sriovnetworknodestates -n openshift-sriov-network-operator -o jsonpath='{.status.syncStatus}' ----- -+ -.Example output -[source,terminal] ----- -Succeeded ----- \ No newline at end of file diff --git a/modules/nw-sriov-interface-level-sysctl-basic.adoc b/modules/nw-sriov-interface-level-sysctl-basic.adoc deleted file mode 100644 index 2e1f2eb494cb..000000000000 --- a/modules/nw-sriov-interface-level-sysctl-basic.adoc +++ /dev/null @@ -1,20 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/hardware_networks/configuring-interface-sysctl-sriov-device.adoc - -:_content-type: CONCEPT -[id="nw-setting-one-sysctl-flag_{context}"] -= Setting one sysctl flag - -You can set interface-level network `sysctl` settings for a pod connected to a SR-IOV network device. - -In this example, `net.ipv4.conf.IFNAME.accept_redirects` is set to `1` on the created virtual interfaces. - -The `sysctl-tuning-test` is a namespace used in this example. - -* Use the following command to create the `sysctl-tuning-test` namespace: -+ ----- -$ oc create namespace sysctl-tuning-test ----- - diff --git a/modules/nw-sriov-interface-level-sysctl-bonded-node-policy.adoc b/modules/nw-sriov-interface-level-sysctl-bonded-node-policy.adoc deleted file mode 100644 index a2609fe3ea73..000000000000 --- a/modules/nw-sriov-interface-level-sysctl-bonded-node-policy.adoc +++ /dev/null @@ -1,84 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/hardware_networks/configuring-interface-sysctl-sriov-device.adoc - -:_content-type: CONCEPT -[id="nw-setting-all-sysctls-flag-node-policy-bonded_{context}"] -= Setting all sysctl flag on nodes with bonded SR-IOV network devices - -The SR-IOV Network Operator adds the `SriovNetworkNodePolicy.sriovnetwork.openshift.io` custom resource definition (CRD) to {product-title}. You can configure an SR-IOV network device by creating a `SriovNetworkNodePolicy` custom resource (CR). - -[NOTE] -==== -When applying the configuration specified in a SriovNetworkNodePolicy object, the SR-IOV Operator might drain the nodes, and in some cases, reboot nodes. - -It might take several minutes for a configuration change to apply. -==== - -Follow this procedure to create a `SriovNetworkNodePolicy` custom resource (CR). - -.Procedure - -. Create an `SriovNetworkNodePolicy` custom resource (CR). Save the following YAML as the file `policyallflags-sriov-node-network.yaml`. Replace `policyallflags` with the name for the configuration. -+ -[source,yaml] ----- -apiVersion: sriovnetwork.openshift.io/v1 -kind: SriovNetworkNodePolicy -metadata: - name: policyallflags <1> - namespace: openshift-sriov-network-operator <2> -spec: - resourceName: policyallflags <3> - nodeSelector: <4> - node.alpha.kubernetes-incubator.io/nfd-network-sriov.capable = `true` - priority: 10 <5> - numVfs: 5 <6> - nicSelector: <7> - pfNames: ["ens1f0"] <8> - deviceType: "netdevice" <9> - isRdma: false <10> ----- -+ -<1> The name for the custom resource object. -<2> The namespace where the SR-IOV Network Operator is installed. -<3> The resource name of the SR-IOV network device plugin. You can create multiple SR-IOV network node policies for a resource name. -<4> The node selector specifies the nodes to configure. Only SR-IOV network devices on the selected nodes are configured. The SR-IOV Container Network Interface (CNI) plugin and device plugin are deployed on selected nodes only. -<5> Optional: The priority is an integer value between `0` and `99`. A smaller value receives higher priority. For example, a priority of `10` is a higher priority than `99`. The default value is `99`. -<6> The number of virtual functions (VFs) to create for the SR-IOV physical network device. For an Intel network interface controller (NIC), the number of VFs cannot be larger than the total VFs supported by the device. For a Mellanox NIC, the number of VFs cannot be larger than `128`. -<7> The NIC selector identifies the device for the Operator to configure. You do not have to specify values for all the parameters. It is recommended to identify the network device with enough precision to avoid selecting a device unintentionally. -If you specify `rootDevices`, you must also specify a value for `vendor`, `deviceID`, or `pfNames`. If you specify both `pfNames` and `rootDevices` at the same time, ensure that they refer to the same device. If you specify a value for `netFilter`, then you do not need to specify any other parameter because a network ID is unique. -<8> Optional: An array of one or more physical function (PF) names for the device. -<9> Optional: The driver type for the virtual functions. The only allowed value is `netdevice`. -For a Mellanox NIC to work in DPDK mode on bare metal nodes, set `isRdma` to `true`. -<10> Optional: Configures whether to enable remote direct memory access (RDMA) mode. The default value is `false`. -If the `isRdma` parameter is set to `true`, you can continue to use the RDMA-enabled VF as a normal network device. A device can be used in either mode. -Set `isRdma` to `true` and additionally set `needVhostNet` to `true` to configure a Mellanox NIC for use with Fast Datapath DPDK applications. -+ -[NOTE] -==== -The `vfio-pci` driver type is not supported. -==== -+ -. Create the SriovNetworkNodePolicy object: -+ -[source,terminal] ----- -$ oc create -f policyallflags-sriov-node-network.yaml ----- -+ -After applying the configuration update, all the pods in sriov-network-operator namespace change to the `Running` status. -+ -. To verify that the SR-IOV network device is configured, enter the following command. Replace `` with the name of a node with the SR-IOV network device that you just configured. -+ -[source,terminal] ----- -$ oc get sriovnetworknodestates -n openshift-sriov-network-operator -o jsonpath='{.status.syncStatus}' ----- -+ -.Example output -+ -[source,terminal] ----- -Succeeded ----- \ No newline at end of file diff --git a/modules/nw-sriov-interface-level-sysctl-bonded.adoc b/modules/nw-sriov-interface-level-sysctl-bonded.adoc deleted file mode 100644 index 4a7943f79f0e..000000000000 --- a/modules/nw-sriov-interface-level-sysctl-bonded.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/hardware_networks/configuring-interface-sysctl-sriov-device.adoc - -:_content-type: CONCEPT -[id="nw-configure-sysctl-settings-flag-bonded_{context}"] -= Configuring sysctl settings for pods associated with bonded SR-IOV interface flag - -You can set interface-level network `sysctl` settings for a pod connected to a bonded SR-IOV network device. - -In this example, the specific network interface-level `sysctl` settings that can be configured are set on the bonded interface. - -The `sysctl-tuning-test` is a namespace used in this example. - -* Use the following command to create the `sysctl-tuning-test` namespace: -+ ----- -$ oc create namespace sysctl-tuning-test ----- \ No newline at end of file diff --git a/modules/nw-sriov-network-attachment.adoc b/modules/nw-sriov-network-attachment.adoc deleted file mode 100644 index e54e7a2a42c9..000000000000 --- a/modules/nw-sriov-network-attachment.adoc +++ /dev/null @@ -1,200 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/hardware_networks/configuring-sriov-net-attach.adoc -// * virt/virtual_machines/vm_networking/virt-attaching-vm-to-sriov-network.adoc - -// Note: IB does not support ipam with `type=dhcp`. - -ifeval::["{context}" == "configuring-sriov-net-attach"] -:rs: SriovNetwork -:ocp-sriov-net: -:object: pods -endif::[] - -ifeval::["{context}" == "configuring-sriov-ib-attach"] -:rs: SriovIBNetwork -:ocp-sriov-net: -:object: pods -endif::[] - -ifeval::["{context}" == "virt-attaching-vm-to-sriov-network"] -:rs: SriovNetwork -:virt-sriov-net: -:object: pods or virtual machines -endif::[] - -:_content-type: PROCEDURE -ifdef::ocp-sriov-net[] -[id="nw-sriov-network-attachment_{context}"] -= Configuring SR-IOV additional network - -You can configure an additional network that uses SR-IOV hardware by creating an `{rs}` object. -When you create an `{rs}` object, the SR-IOV Network Operator automatically creates a `NetworkAttachmentDefinition` object. - -[NOTE] -===== -Do not modify or delete an `{rs}` object if it is attached to any {object} in a `running` state. -===== - -.Prerequisites - -* Install the OpenShift CLI (`oc`). -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -. Create a `{rs}` object, and then save the YAML in the `.yaml` file, where `` is a name for this additional network. The object specification might resemble the following example: -+ -[source,yaml,subs="attributes+"] ----- -apiVersion: sriovnetwork.openshift.io/v1 -kind: {rs} -metadata: - name: attach1 - namespace: openshift-sriov-network-operator -spec: - resourceName: net1 - networkNamespace: project2 - ipam: |- - { - "type": "host-local", - "subnet": "10.56.217.0/24", - "rangeStart": "10.56.217.171", - "rangeEnd": "10.56.217.181", - "gateway": "10.56.217.1" - } ----- - -. To create the object, enter the following command: -+ -[source,terminal] ----- -$ oc create -f .yaml ----- -+ -where `` specifies the name of the additional network. - -. Optional: To confirm that the `NetworkAttachmentDefinition` object that is associated with the `{rs}` object that you created in the previous step exists, enter the following command. Replace `` with the networkNamespace you specified in the `{rs}` object. -+ -[source,terminal] ----- -$ oc get net-attach-def -n ----- -endif::ocp-sriov-net[] - -// LEGACY -ifdef::virt-sriov-net[] -[id="nw-sriov-network-attachment_{context}"] -= Configuring SR-IOV additional network - -You can configure an additional network that uses SR-IOV hardware by creating an `{rs}` object. - -When you create an `{rs}` object, the SR-IOV Network Operator automatically creates a `NetworkAttachmentDefinition` object. - -[NOTE] -===== -Do not modify or delete an `{rs}` object if it is attached to {object} in a `running` state. -===== - -.Prerequisites - -* Install the OpenShift CLI (`oc`). -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -. Create the following `SriovNetwork` object, and then save the YAML in the `-sriov-network.yaml` file. Replace `` with a name for this additional network. - -// The list breaks because of the [NOTE] -[source,yaml] ----- -apiVersion: sriovnetwork.openshift.io/v1 -kind: SriovNetwork -metadata: - name: <1> - namespace: openshift-sriov-network-operator <2> -spec: - resourceName: <3> - networkNamespace: <4> - vlan: <5> - spoofChk: "" <6> - linkState: <7> - maxTxRate: <8> - minTxRate: <9> - vlanQoS: <10> - trust: "" <11> - capabilities: <12> -ifdef::ocp-sriov-net[] - ipam: {} <7> - linkState: <8> - maxTxRate: <9> - minTxRate: <10> - vlanQoS: <11> - trust: "" <12> - capabilities: <13> -endif::ocp-sriov-net[] ----- -<1> Replace `` with a name for the object. The SR-IOV Network Operator creates a `NetworkAttachmentDefinition` object with same name. -<2> Specify the namespace where the SR-IOV Network Operator is installed. -<3> Replace `` with the value for the `.spec.resourceName` parameter from the `SriovNetworkNodePolicy` object that defines the SR-IOV hardware for this additional network. -<4> Replace `` with the target namespace for the SriovNetwork. Only {object} in the target namespace can attach to the SriovNetwork. -<5> Optional: Replace `` with a Virtual LAN (VLAN) ID for the additional network. The integer value must be from `0` to `4095`. The default value is `0`. -<6> Optional: Replace `` with the spoof check mode of the VF. The allowed values are the strings `"on"` and `"off"`. -+ -[IMPORTANT] -==== -You must enclose the value you specify in quotes or the CR is rejected by the SR-IOV Network Operator. -==== -<7> Optional: Replace `` with the link state of virtual function (VF). Allowed value are `enable`, `disable` and `auto`. -<8> Optional: Replace `` with a maximum transmission rate, in Mbps, for the VF. -<9> Optional: Replace `` with a minimum transmission rate, in Mbps, for the VF. This value should always be less than or equal to Maximum transmission rate. -+ -[NOTE] -==== -Intel NICs do not support the `minTxRate` parameter. For more information, see link:https://bugzilla.redhat.com/show_bug.cgi?id=1772847[BZ#1772847]. -==== -<10> Optional: Replace `` with an IEEE 802.1p priority level for the VF. The default value is `0`. -<11> Optional: Replace `` with the trust mode of the VF. The allowed values are the strings `"on"` and `"off"`. -+ -[IMPORTANT] -==== -You must enclose the value you specify in quotes or the CR is rejected by the SR-IOV Network Operator. -==== -<12> Optional: Replace `` with the capabilities to configure for this network. -ifdef::ocp-sriov-net[] -You can specify `"{ "ips": true }"` to enable IP address support or `"{ "mac": true }"` to enable MAC address support. -<13> A configuration object for the IPAM CNI plugin as a YAML block scalar. The plugin manages IP address assignment for the attachment definition. -endif::ocp-sriov-net[] - -[start=2] -. To create the object, enter the following command. Replace `` with a name for this additional network. -+ -[source,terminal] ----- -$ oc create -f -sriov-network.yaml ----- - -. Optional: To confirm that the `NetworkAttachmentDefinition` object associated with the `SriovNetwork` object that you created in the previous step exists, enter the following command. Replace `` with the namespace you specified in the `SriovNetwork` object. -+ -[source,terminal] ----- -$ oc get net-attach-def -n ----- -// LEGACY -endif::virt-sriov-net[] - -ifdef::object[] -:!object: -endif::[] - -ifdef::rs[] -:!rs: -endif::[] - -ifdef::virt-sriov-net[] -:!virt-sriov-net: -endif::[] - -ifdef::ocp-sriov-net[] -:!ocp-sriov-net: -endif::[] diff --git a/modules/nw-sriov-network-object.adoc b/modules/nw-sriov-network-object.adoc deleted file mode 100644 index 95479a491ded..000000000000 --- a/modules/nw-sriov-network-object.adoc +++ /dev/null @@ -1,93 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/hardware_networks/configuring-sriov-net-attach.adoc - -// Because of an existing issue in go-yaml, the strings 'on' and 'off' -// are interpreted as booleans, not strings. The SR-IOV admission controller -// will reject 'spoofCheck' and 'trust' if the values are not strings. -// So these values must be explicitly quoted in the YAML. -// https://github.com/go-yaml/yaml/issues/214 - -ifeval::["{context}" == "configuring-sriov-net-attach"] -:ocp-sriov-net: -:object: pods -endif::[] - -ifeval::["{context}" == "virt-defining-an-sriov-network"] -:virt-sriov-net: -:object: pods or virtual machines -endif::[] - -[id="nw-sriov-network-object_{context}"] -= Ethernet device configuration object - -You can configure an Ethernet network device by defining an `SriovNetwork` object. - -The following YAML describes an `SriovNetwork` object: - -[source,yaml] ----- -apiVersion: sriovnetwork.openshift.io/v1 -kind: SriovNetwork -metadata: - name: <1> - namespace: openshift-sriov-network-operator <2> -spec: - resourceName: <3> - networkNamespace: <4> - vlan: <5> - spoofChk: "" <6> -ifdef::ocp-sriov-net[] - ipam: |- <7> - {} - linkState: <8> - maxTxRate: <9> - minTxRate: <10> - vlanQoS: <11> - trust: "" <12> - capabilities: <13> -endif::ocp-sriov-net[] ----- -<1> A name for the object. The SR-IOV Network Operator creates a `NetworkAttachmentDefinition` object with same name. -<2> The namespace where the SR-IOV Network Operator is installed. -<3> The value for the `spec.resourceName` parameter from the `SriovNetworkNodePolicy` object that defines the SR-IOV hardware for this additional network. -<4> The target namespace for the `SriovNetwork` object. Only {object} in the target namespace can attach to the additional network. -<5> Optional: A Virtual LAN (VLAN) ID for the additional network. The integer value must be from `0` to `4095`. The default value is `0`. -<6> Optional: The spoof check mode of the VF. The allowed values are the strings `"on"` and `"off"`. -+ -[IMPORTANT] -==== -You must enclose the value you specify in quotes or the object is rejected by the SR-IOV Network Operator. -==== -+ -ifdef::ocp-sriov-net[] -<7> A configuration object for the IPAM CNI plugin as a YAML block scalar. The plugin manages IP address assignment for the attachment definition. -<8> Optional: The link state of virtual function (VF). Allowed value are `enable`, `disable` and `auto`. -<9> Optional: A maximum transmission rate, in Mbps, for the VF. -<10> Optional: A minimum transmission rate, in Mbps, for the VF. This value must be less than or equal to the maximum transmission rate. -+ -[NOTE] -==== -Intel NICs do not support the `minTxRate` parameter. For more information, see link:https://bugzilla.redhat.com/show_bug.cgi?id=1772847[BZ#1772847]. -==== -+ -<11> Optional: An IEEE 802.1p priority level for the VF. The default value is `0`. -<12> Optional: The trust mode of the VF. The allowed values are the strings `"on"` and `"off"`. -+ -[IMPORTANT] -==== -You must enclose the value that you specify in quotes, or the SR-IOV Network Operator rejects the object. -==== -+ -<13> Optional: The capabilities to configure for this additional network. You can specify `"{ "ips": true }"` to enable IP address support or `"{ "mac": true }"` to enable MAC address support. -endif::ocp-sriov-net[] - -ifdef::object[] -:object!: -endif::[] -ifdef::ocp-sriov-net[] -:ocp-sriov-net!: -endif::[] -ifdef::virt-sriov-net[] -:virt-sriov-net!: -endif::[] diff --git a/modules/nw-sriov-network-operator.adoc b/modules/nw-sriov-network-operator.adoc deleted file mode 100644 index f5ba2b29c69d..000000000000 --- a/modules/nw-sriov-network-operator.adoc +++ /dev/null @@ -1,93 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/hardware_networks/using-dpdk-and-rdma.adoc - -:_content-type: REFERENCE -[id="nw-sriov-network-operator_{context}"] -= Example SR-IOV Network Operator for virtual functions - -You can use the Single Root I/O Virtualization (SR-IOV) Network Operator to allocate and configure Virtual Functions (VFs) from SR-IOV-supporting Physical Function NICs on the nodes. - -For more information on deploying the Operator, see _Installing the SR-IOV Network Operator_. -For more information on configuring an SR-IOV network device, see _Configuring an SR-IOV network device_. - -There are some differences between running Data Plane Development Kit (DPDK) workloads on Intel VFs and Mellanox VFs. This section provides object configuration examples for both VF types. -The following is an example of an `sriovNetworkNodePolicy` object used to run DPDK applications on Intel NICs: -[source,yaml] ----- -apiVersion: sriovnetwork.openshift.io/v1 -kind: SriovNetworkNodePolicy -metadata: - name: dpdk-nic-1 - namespace: openshift-sriov-network-operator -spec: - deviceType: vfio-pci <1> - needVhostNet: true <2> - nicSelector: - pfNames: ["ens3f0"] - nodeSelector: - node-role.kubernetes.io/worker-cnf: "" - numVfs: 10 - priority: 99 - resourceName: dpdk_nic_1 ---- -apiVersion: sriovnetwork.openshift.io/v1 -kind: SriovNetworkNodePolicy -metadata: - name: dpdk-nic-1 - namespace: openshift-sriov-network-operator -spec: - deviceType: vfio-pci - needVhostNet: true - nicSelector: - pfNames: ["ens3f1"] - nodeSelector: - node-role.kubernetes.io/worker-cnf: "" - numVfs: 10 - priority: 99 - resourceName: dpdk_nic_2 - ----- -<1> For Intel NICs, `deviceType` must be `vfio-pci`. -<2> If kernel communication with DPDK workloads is required, add `needVhostNet: true`. This mounts the `/dev/net/tun` and `/dev/vhost-net` devices into the container so the application can create a tap device and connect the tap device to the DPDK workload. - -The following is an example of an `sriovNetworkNodePolicy` object for Mellanox NICs: -[source,yaml] ----- -apiVersion: sriovnetwork.openshift.io/v1 -kind: SriovNetworkNodePolicy -metadata: - name: dpdk-nic-1 - namespace: openshift-sriov-network-operator -spec: - deviceType: netdevice <1> - isRdma: true <2> - nicSelector: - rootDevices: - - "0000:5e:00.1" - nodeSelector: - node-role.kubernetes.io/worker-cnf: "" - numVfs: 5 - priority: 99 - resourceName: dpdk_nic_1 ---- -apiVersion: sriovnetwork.openshift.io/v1 -kind: SriovNetworkNodePolicy -metadata: - name: dpdk-nic-2 - namespace: openshift-sriov-network-operator -spec: - deviceType: netdevice - isRdma: true - nicSelector: - rootDevices: - - "0000:5e:00.0" - nodeSelector: - node-role.kubernetes.io/worker-cnf: "" - numVfs: 5 - priority: 99 - resourceName: dpdk_nic_2 ----- -<1> For Mellanox devices the `deviceType` must be `netdevice`. -<2> For Mellanox devices `isRdma` must be `true`. -Mellanox cards are connected to DPDK applications using Flow Bifurcation. This mechanism splits traffic between Linux user space and kernel space, and can enhance line rate processing capability. diff --git a/modules/nw-sriov-networknodepolicy-object.adoc b/modules/nw-sriov-networknodepolicy-object.adoc deleted file mode 100644 index 22ef6effcc72..000000000000 --- a/modules/nw-sriov-networknodepolicy-object.adoc +++ /dev/null @@ -1,142 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/hardware_networks/configuring-sriov-device.adoc - -:_content-type: REFERENCE -[id="nw-sriov-networknodepolicy-object_{context}"] -= SR-IOV network node configuration object - -You specify the SR-IOV network device configuration for a node by creating an SR-IOV network node policy. The API object for the policy is part of the `sriovnetwork.openshift.io` API group. - -The following YAML describes an SR-IOV network node policy: - -[source,yaml] ----- -apiVersion: sriovnetwork.openshift.io/v1 -kind: SriovNetworkNodePolicy -metadata: - name: <1> - namespace: openshift-sriov-network-operator <2> -spec: - resourceName: <3> - nodeSelector: - feature.node.kubernetes.io/network-sriov.capable: "true" <4> - priority: <5> - mtu: <6> - needVhostNet: false <7> - numVfs: <8> - nicSelector: <9> - vendor: "" <10> - deviceID: "" <11> - pfNames: ["", ...] <12> - rootDevices: ["", ...] <13> - netFilter: "" <14> - deviceType: <15> - isRdma: false <16> - linkType: <17> - eSwitchMode: "switchdev" <18> - excludeTopology: false <19> ----- -<1> The name for the custom resource object. - -<2> The namespace where the SR-IOV Network Operator is installed. - -<3> The resource name of the SR-IOV network device plugin. You can create multiple SR-IOV network node policies for a resource name. -+ -When specifying a name, be sure to use the accepted syntax expression `^[a-zA-Z0-9_]+$` in the `resourceName`. - -<4> The node selector specifies the nodes to configure. Only SR-IOV network devices on the selected nodes are configured. The SR-IOV Container Network Interface (CNI) plugin and device plugin are deployed on selected nodes only. - -<5> Optional: The priority is an integer value between `0` and `99`. A smaller value receives higher priority. For example, a priority of `10` is a higher priority than `99`. The default value is `99`. - -<6> Optional: The maximum transmission unit (MTU) of the virtual function. The maximum MTU value can vary for different network interface controller (NIC) models. - -<7> Optional: Set `needVhostNet` to `true` to mount the `/dev/vhost-net` device in the pod. Use the mounted `/dev/vhost-net` device with Data Plane Development Kit (DPDK) to forward traffic to the kernel network stack. - -<8> The number of the virtual functions (VF) to create for the SR-IOV physical network device. For an Intel network interface controller (NIC), the number of VFs cannot be larger than the total VFs supported by the device. For a Mellanox NIC, the number of VFs cannot be larger than `128`. - -<9> The NIC selector identifies the device for the Operator to configure. You do not have to specify values for all the parameters. It is recommended to identify the network device with enough precision to avoid selecting a device unintentionally. -+ -If you specify `rootDevices`, you must also specify a value for `vendor`, `deviceID`, or `pfNames`. If you specify both `pfNames` and `rootDevices` at the same time, ensure that they refer to the same device. If you specify a value for `netFilter`, then you do not need to specify any other parameter because a network ID is unique. - -<10> Optional: The vendor hexadecimal code of the SR-IOV network device. The only allowed values are `8086` and `15b3`. - -<11> Optional: The device hexadecimal code of the SR-IOV network device. For example, `101b` is the device ID for a Mellanox ConnectX-6 device. - -<12> Optional: An array of one or more physical function (PF) names for the device. - -<13> Optional: An array of one or more PCI bus addresses for the PF of the device. Provide the address in the following format: `0000:02:00.1`. - -<14> Optional: The platform-specific network filter. The only supported platform is {rh-openstack-first}. Acceptable values use the following format: `openstack/NetworkID:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx`. Replace `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx` with the value from the `/var/config/openstack/latest/network_data.json` metadata file. - -<15> Optional: The driver type for the virtual functions. The only allowed values are `netdevice` and `vfio-pci`. The default value is `netdevice`. -+ -For a Mellanox NIC to work in DPDK mode on bare metal nodes, use the `netdevice` driver type and set `isRdma` to `true`. - -<16> Optional: Configures whether to enable remote direct memory access (RDMA) mode. The default value is `false`. -+ -If the `isRdma` parameter is set to `true`, you can continue to use the RDMA-enabled VF as a normal network device. A device can be used in either mode. -+ -Set `isRdma` to `true` and additionally set `needVhostNet` to `true` to configure a Mellanox NIC for use with Fast Datapath DPDK applications. - -<17> Optional: The link type for the VFs. The default value is `eth` for Ethernet. Change this value to 'ib' for InfiniBand. -+ -When `linkType` is set to `ib`, `isRdma` is automatically set to `true` by the SR-IOV Network Operator webhook. When `linkType` is set to `ib`, `deviceType` should not be set to `vfio-pci`. -+ -Do not set linkType to 'eth' for SriovNetworkNodePolicy, because this can lead to an incorrect number of available devices reported by the device plugin. - -<18> Optional: To enable hardware offloading, the 'eSwitchMode' field must be set to `"switchdev"`. - -<19> Optional: To exclude advertising an SR-IOV network resource's NUMA node to the Topology Manager, set the value to `true`. The default value is `false`. - -[id="sr-iov-network-node-configuration-examples_{context}"] -== SR-IOV network node configuration examples - -The following example describes the configuration for an InfiniBand device: - -.Example configuration for an InfiniBand device -[source,yaml] ----- -apiVersion: sriovnetwork.openshift.io/v1 -kind: SriovNetworkNodePolicy -metadata: - name: policy-ib-net-1 - namespace: openshift-sriov-network-operator -spec: - resourceName: ibnic1 - nodeSelector: - feature.node.kubernetes.io/network-sriov.capable: "true" - numVfs: 4 - nicSelector: - vendor: "15b3" - deviceID: "101b" - rootDevices: - - "0000:19:00.0" - linkType: ib - isRdma: true ----- - -The following example describes the configuration for an SR-IOV network device in a {rh-openstack} virtual machine: - -.Example configuration for an SR-IOV device in a virtual machine -[source,yaml] ----- -apiVersion: sriovnetwork.openshift.io/v1 -kind: SriovNetworkNodePolicy -metadata: - name: policy-sriov-net-openstack-1 - namespace: openshift-sriov-network-operator -spec: - resourceName: sriovnic1 - nodeSelector: - feature.node.kubernetes.io/network-sriov.capable: "true" - numVfs: 1 <1> - nicSelector: - vendor: "15b3" - deviceID: "101b" - netFilter: "openstack/NetworkID:ea24bd04-8674-4f69-b0ee-fa0b3bd20509" <2> ----- - -<1> The `numVfs` field is always set to `1` when configuring the node network policy for a virtual machine. - -<2> The `netFilter` field must refer to a network ID when the virtual machine is deployed on {rh-openstack}. Valid values for `netFilter` are available from an `SriovNetworkNodeState` object. diff --git a/modules/nw-sriov-nic-partitioning.adoc b/modules/nw-sriov-nic-partitioning.adoc deleted file mode 100644 index 2681c0d60b2c..000000000000 --- a/modules/nw-sriov-nic-partitioning.adoc +++ /dev/null @@ -1,94 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/hardware_networks/configuring-sriov-device.adoc - -[id="nw-sriov-nic-partitioning_{context}"] -= Virtual function (VF) partitioning for SR-IOV devices - -In some cases, you might want to split virtual functions (VFs) from the same physical function (PF) into multiple resource pools. -For example, you might want some of the VFs to load with the default driver and the remaining VFs load with the `vfio-pci` driver. -In such a deployment, the `pfNames` selector in your SriovNetworkNodePolicy custom resource (CR) can be used to specify a range of VFs for a pool using the following format: `#-`. - -For example, the following YAML shows the selector for an interface named `netpf0` with VF `2` through `7`: - -[source,yaml] ----- -pfNames: ["netpf0#2-7"] ----- - -* `netpf0` is the PF interface name. -* `2` is the first VF index (0-based) that is included in the range. -* `7` is the last VF index (0-based) that is included in the range. - -You can select VFs from the same PF by using different policy CRs if the following requirements are met: - -* The `numVfs` value must be identical for policies that select the same PF. -* The VF index must be in the range of `0` to `-1`. For example, if you have a policy with `numVfs` set to `8`, then the `` value must not be smaller than `0`, and the `` must not be larger than `7`. -* The VFs ranges in different policies must not overlap. -* The `` must not be larger than the ``. - -The following example illustrates NIC partitioning for an SR-IOV device. - -The policy `policy-net-1` defines a resource pool `net-1` that contains the VF `0` of PF `netpf0` with the default VF driver. -The policy `policy-net-1-dpdk` defines a resource pool `net-1-dpdk` that contains the VF `8` to `15` of PF `netpf0` with the `vfio` VF driver. - -Policy `policy-net-1`: - -[source,yaml] ----- -apiVersion: sriovnetwork.openshift.io/v1 -kind: SriovNetworkNodePolicy -metadata: - name: policy-net-1 - namespace: openshift-sriov-network-operator -spec: - resourceName: net1 - nodeSelector: - feature.node.kubernetes.io/network-sriov.capable: "true" - numVfs: 16 - nicSelector: - pfNames: ["netpf0#0-0"] - deviceType: netdevice ----- - -Policy `policy-net-1-dpdk`: - -[source,yaml] ----- -apiVersion: sriovnetwork.openshift.io/v1 -kind: SriovNetworkNodePolicy -metadata: - name: policy-net-1-dpdk - namespace: openshift-sriov-network-operator -spec: - resourceName: net1dpdk - nodeSelector: - feature.node.kubernetes.io/network-sriov.capable: "true" - numVfs: 16 - nicSelector: - pfNames: ["netpf0#8-15"] - deviceType: vfio-pci ----- - -.Verifying that the interface is successfully partitioned -Confirm that the interface partitioned to virtual functions (VFs) for the SR-IOV device by running the following command. - -[source,terminal] ----- -$ ip link show <1> ----- - -<1> Replace `` with the interface that you specified when partitioning to VFs for the SR-IOV device, for example, `ens3f1`. - -.Example output -[source,terminal] ----- -5: ens3f1: mtu 1500 qdisc mq state UP mode DEFAULT group default qlen 1000 -link/ether 3c:fd:fe:d1:bc:01 brd ff:ff:ff:ff:ff:ff - -vf 0 link/ether 5a:e7:88:25:ea:a0 brd ff:ff:ff:ff:ff:ff, spoof checking on, link-state auto, trust off -vf 1 link/ether 3e:1d:36:d7:3d:49 brd ff:ff:ff:ff:ff:ff, spoof checking on, link-state auto, trust off -vf 2 link/ether ce:09:56:97:df:f9 brd ff:ff:ff:ff:ff:ff, spoof checking on, link-state auto, trust off -vf 3 link/ether 5e:91:cf:88:d1:38 brd ff:ff:ff:ff:ff:ff, spoof checking on, link-state auto, trust off -vf 4 link/ether e6:06:a1:96:2f:de brd ff:ff:ff:ff:ff:ff, spoof checking on, link-state auto, trust off ----- diff --git a/modules/nw-sriov-operator-uninstall.adoc b/modules/nw-sriov-operator-uninstall.adoc deleted file mode 100644 index 6fdd82688b1c..000000000000 --- a/modules/nw-sriov-operator-uninstall.adoc +++ /dev/null @@ -1,92 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/hardware_networks/uninstalling-sriov-operator.adoc - -:_content-type: PROCEDURE -[id="nw-sriov-operator-uninstall_{context}"] -= Uninstalling the SR-IOV Network Operator - -As a cluster administrator, you can uninstall the SR-IOV Network Operator. - -.Prerequisites - -* You have access to an {product-title} cluster using an account with `cluster-admin` permissions. -* You have the SR-IOV Network Operator installed. - -.Procedure - -. Delete all SR-IOV custom resources (CRs): -+ -[source,terminal] ----- -$ oc delete sriovnetwork -n openshift-sriov-network-operator --all ----- -+ -[source,terminal] ----- -$ oc delete sriovnetworknodepolicy -n openshift-sriov-network-operator --all ----- -+ -[source,terminal] ----- -$ oc delete sriovibnetwork -n openshift-sriov-network-operator --all ----- - -. Follow the instructions in the "Deleting Operators from a cluster" section to remove the SR-IOV Network Operator from your cluster. - -. Delete the SR-IOV custom resource definitions that remain in the cluster after the SR-IOV Network Operator is uninstalled: -+ -[source,terminal] ----- -$ oc delete crd sriovibnetworks.sriovnetwork.openshift.io ----- -+ -[source,terminal] ----- -$ oc delete crd sriovnetworknodepolicies.sriovnetwork.openshift.io ----- -+ -[source,terminal] ----- -$ oc delete crd sriovnetworknodestates.sriovnetwork.openshift.io ----- -+ -[source,terminal] ----- -$ oc delete crd sriovnetworkpoolconfigs.sriovnetwork.openshift.io ----- -+ -[source,terminal] ----- -$ oc delete crd sriovnetworks.sriovnetwork.openshift.io ----- -+ -[source,terminal] ----- -$ oc delete crd sriovoperatorconfigs.sriovnetwork.openshift.io ----- - -. Delete the SR-IOV webhooks: -+ -[source,terminal] ----- -$ oc delete mutatingwebhookconfigurations network-resources-injector-config ----- -+ -[source,terminal] ----- -$ oc delete MutatingWebhookConfiguration sriov-operator-webhook-config ----- -+ -[source,terminal] ----- -$ oc delete ValidatingWebhookConfiguration sriov-operator-webhook-config ----- - -. Delete the SR-IOV Network Operator namespace: -+ -[source,terminal] ----- -$ oc delete namespace openshift-sriov-network-operator ----- - diff --git a/modules/nw-sriov-rdma-example-mellanox.adoc b/modules/nw-sriov-rdma-example-mellanox.adoc deleted file mode 100644 index 0708321b94e5..000000000000 --- a/modules/nw-sriov-rdma-example-mellanox.adoc +++ /dev/null @@ -1,148 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/hardware_networks/using-dpdk-and-rdma.adoc - -:_content-type: PROCEDURE -[id="example-vf-use-in-rdma-mode-mellanox_{context}"] -= Using a virtual function in RDMA mode with a Mellanox NIC -// Extract content for TP notice -// tag::content[] -RDMA over Converged Ethernet (RoCE) is the only supported mode when using RDMA -on {product-title}. - -.Prerequisites - -* Install the OpenShift CLI (`oc`). -* Install the SR-IOV Network Operator. -* Log in as a user with `cluster-admin` privileges. - -.Procedure - -. Create the following `SriovNetworkNodePolicy` object, and then save the YAML in the `mlx-rdma-node-policy.yaml` file. -+ -[source,yaml] ----- -apiVersion: sriovnetwork.openshift.io/v1 -kind: SriovNetworkNodePolicy -metadata: - name: mlx-rdma-node-policy - namespace: openshift-sriov-network-operator -spec: - resourceName: mlxnics - nodeSelector: - feature.node.kubernetes.io/network-sriov.capable: "true" - priority: - numVfs: - nicSelector: - vendor: "15b3" - deviceID: "1015" <1> - pfNames: ["", ...] - rootDevices: ["", "..."] - deviceType: netdevice <2> - isRdma: true <3> ----- -<1> Specify the device hex code of the SR-IOV network device. -<2> Specify the driver type for the virtual functions to `netdevice`. -<3> Enable RDMA mode. -+ -[NOTE] -===== -See the `Configuring SR-IOV network devices` section for a detailed explanation on each option in `SriovNetworkNodePolicy`. - -When applying the configuration specified in a `SriovNetworkNodePolicy` object, the SR-IOV Operator may drain the nodes, and in some cases, reboot nodes. -It may take several minutes for a configuration change to apply. -Ensure that there are enough available nodes in your cluster to handle the evicted workload beforehand. - -After the configuration update is applied, all the pods in the `openshift-sriov-network-operator` namespace will change to a `Running` status. -===== - -. Create the `SriovNetworkNodePolicy` object by running the following command: -+ -[source,terminal] ----- -$ oc create -f mlx-rdma-node-policy.yaml ----- - -. Create the following `SriovNetwork` object, and then save the YAML in the `mlx-rdma-network.yaml` file. -+ -[source,yaml] ----- -apiVersion: sriovnetwork.openshift.io/v1 -kind: SriovNetwork -metadata: - name: mlx-rdma-network - namespace: openshift-sriov-network-operator -spec: - networkNamespace: - ipam: |- <1> -# ... - vlan: - resourceName: mlxnics ----- -<1> Specify a configuration object for the ipam CNI plugin as a YAML block scalar. The plugin manages IP address assignment for the attachment definition. -+ -[NOTE] -===== -See the "Configuring SR-IOV additional network" section for a detailed explanation on each option in `SriovNetwork`. -===== -+ -An optional library, app-netutil, provides several API methods for gathering network information about a container's parent pod. - -. Create the `SriovNetworkNodePolicy` object by running the following command: -+ -[source,terminal] ----- -$ oc create -f mlx-rdma-network.yaml ----- - -. Create the following `Pod` spec, and then save the YAML in the `mlx-rdma-pod.yaml` file. -+ -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: rdma-app - namespace: <1> - annotations: - k8s.v1.cni.cncf.io/networks: mlx-rdma-network -spec: - containers: - - name: testpmd - image: <2> - securityContext: - runAsUser: 0 - capabilities: - add: ["IPC_LOCK","SYS_RESOURCE","NET_RAW"] <3> - volumeMounts: - - mountPath: /dev/hugepages <4> - name: hugepage - resources: - limits: - memory: "1Gi" - cpu: "4" <5> - hugepages-1Gi: "4Gi" <6> - requests: - memory: "1Gi" - cpu: "4" - hugepages-1Gi: "4Gi" - command: ["sleep", "infinity"] - volumes: - - name: hugepage - emptyDir: - medium: HugePages ----- -<1> Specify the same `target_namespace` where `SriovNetwork` object `mlx-rdma-network` is created. If you would like to create the pod in a different namespace, change `target_namespace` in both `Pod` spec and `SriovNetwork` object. -<2> Specify the RDMA image which includes your application and RDMA library used by application. -<3> Specify additional capabilities required by the application inside the container for hugepage allocation, system resource allocation, and network interface access. -<4> Mount the hugepage volume to RDMA pod under `/dev/hugepages`. The hugepage volume is backed by the emptyDir volume type with the medium being `Hugepages`. -<5> Specify number of CPUs. The RDMA pod usually requires exclusive CPUs be allocated from the kubelet. This is achieved by setting CPU Manager policy to `static` and create pod with `Guaranteed` QoS. -<6> Specify hugepage size `hugepages-1Gi` or `hugepages-2Mi` and the quantity of hugepages that will be allocated to the RDMA pod. Configure `2Mi` and `1Gi` hugepages separately. Configuring `1Gi` hugepage requires adding kernel arguments to Nodes. - -. Create the RDMA pod by running the following command: -+ -[source,terminal] ----- -$ oc create -f mlx-rdma-pod.yaml ----- -// end::content[] \ No newline at end of file diff --git a/modules/nw-sriov-runtime-config.adoc b/modules/nw-sriov-runtime-config.adoc deleted file mode 100644 index 07b34087c3f2..000000000000 --- a/modules/nw-sriov-runtime-config.adoc +++ /dev/null @@ -1,96 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/hardware_networks/add-pod.adoc - -[id="nw-sriov-runtime-config_{context}"] -= Runtime configuration for a network attachment - -When attaching a pod to an additional network, you can specify a runtime configuration to make specific customizations for the pod. For example, you can request a specific MAC hardware address. - -You specify the runtime configuration by setting an annotation in the pod specification. The annotation key is `k8s.v1.cni.cncf.io/networks`, and it accepts a JSON object that describes the runtime configuration. - -[id="runtime-config-ethernet_{context}"] -== Runtime configuration for an Ethernet-based SR-IOV attachment - -The following JSON describes the runtime configuration options for an Ethernet-based SR-IOV network attachment. - -[source,json] ----- -[ - { - "name": "", <1> - "mac": "", <2> - "ips": [""] <3> - } -] ----- -<1> The name of the SR-IOV network attachment definition CR. -<2> Optional: The MAC address for the SR-IOV device that is allocated from the resource type defined in the SR-IOV network attachment definition CR. To use this feature, you also must specify `{ "mac": true }` in the `SriovNetwork` object. -<3> Optional: IP addresses for the SR-IOV device that is allocated from the resource type defined in the SR-IOV network attachment definition CR. Both IPv4 and IPv6 addresses are supported. To use this feature, you also must specify `{ "ips": true }` in the `SriovNetwork` object. - -.Example runtime configuration -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: sample-pod - annotations: - k8s.v1.cni.cncf.io/networks: |- - [ - { - "name": "net1", - "mac": "20:04:0f:f1:88:01", - "ips": ["192.168.10.1/24", "2001::1/64"] - } - ] -spec: - containers: - - name: sample-container - image: - imagePullPolicy: IfNotPresent - command: ["sleep", "infinity"] ----- - -[id="runtime-config-infiniband_{context}"] -== Runtime configuration for an InfiniBand-based SR-IOV attachment - -The following JSON describes the runtime configuration options for an InfiniBand-based SR-IOV network attachment. - -[source,json] ----- -[ - { - "name": "", <1> - "infiniband-guid": "", <2> - "ips": [""] <3> - } -] ----- -<1> The name of the SR-IOV network attachment definition CR. -<2> The InfiniBand GUID for the SR-IOV device. To use this feature, you also must specify `{ "infinibandGUID": true }` in the `SriovIBNetwork` object. -<3> The IP addresses for the SR-IOV device that is allocated from the resource type defined in the SR-IOV network attachment definition CR. Both IPv4 and IPv6 addresses are supported. To use this feature, you also must specify `{ "ips": true }` in the `SriovIBNetwork` object. - -.Example runtime configuration -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: sample-pod - annotations: - k8s.v1.cni.cncf.io/networks: |- - [ - { - "name": "ib1", - "infiniband-guid": "c2:11:22:33:44:55:66:77", - "ips": ["192.168.10.1/24", "2001::1/64"] - } - ] -spec: - containers: - - name: sample-container - image: - imagePullPolicy: IfNotPresent - command: ["sleep", "infinity"] ----- diff --git a/modules/nw-sriov-supported-devices.adoc b/modules/nw-sriov-supported-devices.adoc deleted file mode 100644 index 5e32c692686a..000000000000 --- a/modules/nw-sriov-supported-devices.adoc +++ /dev/null @@ -1,128 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/hardware_networks/about-sriov.adoc - -[id="supported-devices_{context}"] -= Supported devices - -{product-title} supports the following network interface controllers: - -.Supported network interface controllers -[cols="1,2,1,1"] -|=== -|Manufacturer |Model |Vendor ID | Device ID - -|Broadcom -|BCM57414 -|14e4 -|16d7 - -|Broadcom -|BCM57508 -|14e4 -|1750 - -|Intel -|X710 -|8086 -|1572 - -|Intel -|XL710 -|8086 -|1583 - -|Intel -|XXV710 -|8086 -|158b - -|Intel -|E810-CQDA2 -|8086 -|1592 - -|Intel -|E810-2CQDA2 -|8086 -|1592 - -|Intel -|E810-XXVDA2 -|8086 -|159b - -|Intel -|E810-XXVDA4 -|8086 -|1593 - -|Intel -|E810-XXVDA4T -|8086 -|1593 - -|Mellanox -|MT27700 Family [ConnectX‑4] -|15b3 -|1013 - -|Mellanox -|MT27710 Family [ConnectX‑4{nbsp}Lx] -|15b3 -|1015 - -|Mellanox -|MT27800 Family [ConnectX‑5] -|15b3 -|1017 - -|Mellanox -|MT28880 Family [ConnectX‑5{nbsp}Ex] -|15b3 -|1019 - -|Mellanox -|MT28908 Family [ConnectX‑6] -|15b3 -|101b - -|Mellanox -|MT2892 Family [ConnectX‑6{nbsp}Dx] -|15b3 -|101d - -|Mellanox -|MT2894 Family [ConnectX‑6{nbsp}Lx] -|15b3 -|101f - -|Mellanox -|MT42822 BlueField‑2 in ConnectX‑6 NIC mode -|15b3 -|a2d6 - -|Pensando ^[1]^ -|DSC-25 dual-port 25G distributed services card for ionic driver -|0x1dd8 -|0x1002 - -|Pensando ^[1]^ -|DSC-100 dual-port 100G distributed services card for ionic driver -|0x1dd8 -|0x1003 - -|Silicom -|STS Family -|8086 -|1591 -|=== -[.small] --- -1. OpenShift SR-IOV is supported, but you must set a static, Virtual Function (VF) media access control (MAC) address using the SR-IOV CNI config file when using SR-IOV. --- - -[NOTE] -==== -For the most up-to-date list of supported cards and compatible {product-title} versions available, see link:https://access.redhat.com/articles/6954499[Openshift Single Root I/O Virtualization (SR-IOV) and PTP hardware networks Support Matrix]. -==== diff --git a/modules/nw-sriov-supported-platforms.adoc b/modules/nw-sriov-supported-platforms.adoc deleted file mode 100644 index 898c736a175d..000000000000 --- a/modules/nw-sriov-supported-platforms.adoc +++ /dev/null @@ -1,11 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/hardware_networks/about-sriov.adoc - -[id="nw-sriov-supported-platforms_{context}"] -= Supported platforms - -The SR-IOV Network Operator is supported on the following platforms: - -- Bare metal -- {rh-openstack-first} diff --git a/modules/nw-sriov-topology-manager.adoc b/modules/nw-sriov-topology-manager.adoc deleted file mode 100644 index c0cab370957b..000000000000 --- a/modules/nw-sriov-topology-manager.adoc +++ /dev/null @@ -1,81 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/hardware_networks/add-pod.adoc - -:_content-type: PROCEDURE -[id="nw-sriov-topology-manager_{context}"] -= Creating a non-uniform memory access (NUMA) aligned SR-IOV pod - -You can create a NUMA aligned SR-IOV pod by restricting SR-IOV and the CPU resources allocated from the same NUMA node with `restricted` or `single-numa-node` Topology Manager polices. - -.Prerequisites - -* You have installed the OpenShift CLI (`oc`). -* You have configured the CPU Manager policy to `static`. For more information on CPU Manager, see the "Additional resources" section. -* You have configured the Topology Manager policy to `single-numa-node`. -+ -[NOTE] -==== -When `single-numa-node` is unable to satisfy the request, you can configure the Topology Manager policy to `restricted`. For more flexible SR-IOV network resource scheduling, see _Excluding SR-IOV network topology during NUMA-aware scheduling_ in the _Additional resources_ section. -==== - -.Procedure - -. Create the following SR-IOV pod spec, and then save the YAML in the `-sriov-pod.yaml` file. Replace `` with a name for this pod. -+ -The following example shows an SR-IOV pod spec: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Pod -metadata: - name: sample-pod - annotations: - k8s.v1.cni.cncf.io/networks: <1> -spec: - containers: - - name: sample-container - image: <2> - command: ["sleep", "infinity"] - resources: - limits: - memory: "1Gi" <3> - cpu: "2" <4> - requests: - memory: "1Gi" - cpu: "2" ----- -<1> Replace `` with the name of the SR-IOV network attachment definition CR. -<2> Replace `` with the name of the `sample-pod` image. -<3> To create the SR-IOV pod with guaranteed QoS, set `memory limits` equal to `memory requests`. -<4> To create the SR-IOV pod with guaranteed QoS, set `cpu limits` equals to `cpu requests`. - -. Create the sample SR-IOV pod by running the following command: -+ -[source,terminal] ----- -$ oc create -f <1> ----- -<1> Replace `` with the name of the file you created in the previous step. - -. Confirm that the `sample-pod` is configured with guaranteed QoS. -+ -[source,terminal] ----- -$ oc describe pod sample-pod ----- - -. Confirm that the `sample-pod` is allocated with exclusive CPUs. -+ -[source,terminal] ----- -$ oc exec sample-pod -- cat /sys/fs/cgroup/cpuset/cpuset.cpus ----- - -. Confirm that the SR-IOV device and CPUs that are allocated for the `sample-pod` are on the same NUMA node. -+ -[source,terminal] ----- -$ oc exec sample-pod -- cat /sys/fs/cgroup/cpuset/cpuset.cpus ----- diff --git a/modules/nw-sriov-troubleshooting.adoc b/modules/nw-sriov-troubleshooting.adoc deleted file mode 100644 index 1811667362cb..000000000000 --- a/modules/nw-sriov-troubleshooting.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/hardware_networks/configuring-sriov-device.adoc - -[id="nw-sriov-troubleshooting_{context}"] -= Troubleshooting SR-IOV configuration - -After following the procedure to configure an SR-IOV network device, the following sections address some error conditions. - -To display the state of nodes, run the following command: - -[source,terminal] ----- -$ oc get sriovnetworknodestates -n openshift-sriov-network-operator ----- - -where: `` specifies the name of a node with an SR-IOV network device. - -.Error output: Cannot allocate memory -[source,terminal] ----- -"lastSyncError": "write /sys/bus/pci/devices/0000:3b:00.1/sriov_numvfs: cannot allocate memory" ----- - -When a node indicates that it cannot allocate memory, check the following items: - -* Confirm that global SR-IOV settings are enabled in the BIOS for the node. - -* Confirm that VT-d is enabled in the BIOS for the node. diff --git a/modules/nw-throughput-troubleshoot.adoc b/modules/nw-throughput-troubleshoot.adoc deleted file mode 100644 index 75c4295341d0..000000000000 --- a/modules/nw-throughput-troubleshoot.adoc +++ /dev/null @@ -1,39 +0,0 @@ -// Module filename: nw-throughput-troubleshoot.adoc -// Module included in the following assemblies: -// * networking/routes/route-configuration.adoc - -:_content-type: CONCEPT -[id="nw-throughput-troubleshoot_{context}"] -= Throughput issue troubleshooting methods - -Sometimes applications deployed by using {product-title} can cause network throughput issues, such as unusually high latency between specific services. - -If pod logs do not reveal any cause of the problem, use the following methods to analyze performance issues: - -* Use a packet analyzer, such as `ping` or `tcpdump` to analyze traffic between a pod and its node. -+ -For example, link:https://access.redhat.com/solutions/4569211[run the `tcpdump` tool on each pod] while reproducing the behavior that led to the issue. Review the captures on both sides to compare send and receive timestamps to analyze the latency of traffic to and from a pod. Latency can occur in {product-title} if a node interface is overloaded with traffic from other pods, storage devices, or the data plane. -+ -[source,terminal] ----- -$ tcpdump -s 0 -i any -w /tmp/dump.pcap host && host <1> ----- -+ -<1> `podip` is the IP address for the pod. Run the `oc get pod -o wide` command to get the IP address of a pod. -+ -The `tcpdump` command generates a file at `/tmp/dump.pcap` containing all traffic between these two pods. You can run the analyzer shortly before the issue is reproduced and stop the analyzer shortly after the issue is finished reproducing to minimize the size of the file. You can also link:https://access.redhat.com/solutions/5074041[run a packet analyzer between the nodes] (eliminating the SDN from the equation) with: -+ -[source,terminal] ----- -$ tcpdump -s 0 -i any -w /tmp/dump.pcap port 4789 ----- - -* Use a bandwidth measuring tool, such as link:https://access.redhat.com/solutions/6129701[`iperf`], to measure streaming throughput and UDP throughput. Locate any bottlenecks by running the tool from the pods first, and then running it from the nodes. - -ifdef::openshift-enterprise,openshift-webscale[] -** For information on installing and using `iperf`, see this link:https://access.redhat.com/solutions/33103[Red Hat Solution]. -endif::openshift-enterprise,openshift-webscale[] - -* In some cases, the cluster may mark the node with the router pod as unhealthy due to latency issues. Use worker latency profiles to adjust the frequency that the cluster waits for a status update from the node before taking action. - -* If your cluster has designated lower-latency and higher-latency nodes, configure the `spec.nodePlacement` field in the Ingress Controller to control the placement of the router pod. diff --git a/modules/nw-troubleshoot-ovs.adoc b/modules/nw-troubleshoot-ovs.adoc deleted file mode 100644 index 309bae0abbe3..000000000000 --- a/modules/nw-troubleshoot-ovs.adoc +++ /dev/null @@ -1,14 +0,0 @@ -[id="nw-troubleshoot-ovs_{context}"] -= Troubleshooting Open vSwitch issues - -To troubleshoot some Open vSwitch (OVS) issues, you might need to configure the log level to include more information. - -If you modify the log level on a node temporarily, be aware that you can receive log messages from the machine config daemon on the node like the following example: - -[source,terminal] ----- -E0514 12:47:17.998892 2281 daemon.go:1350] content mismatch for file /etc/systemd/system/ovs-vswitchd.service: [Unit] ----- - -To avoid the log messages related to the mismatch, revert the log level change after you complete your troubleshooting. - diff --git a/modules/nw-using-cookies-keep-route-statefulness.adoc b/modules/nw-using-cookies-keep-route-statefulness.adoc deleted file mode 100644 index 62cbafdd8884..000000000000 --- a/modules/nw-using-cookies-keep-route-statefulness.adoc +++ /dev/null @@ -1,28 +0,0 @@ -// Module filename: nw-using-cookies-keep-route-statefulness.adoc -// Use module with the following module: -// nw-annotating-a-route-with-a-cookie-name.adoc -// -// Module included in the following assemblies: -// -// * networking/configuring-routing.adoc -[id="nw-using-cookies-keep-route-statefulness_{context}"] -= Using cookies to keep route statefulness - -{product-title} provides sticky sessions, which enables stateful application -traffic by ensuring all traffic hits the same endpoint. However, if the endpoint -pod terminates, whether through restart, scaling, or a change in configuration, -this statefulness can disappear. - -{product-title} can use cookies to configure session persistence. The Ingress -controller selects an endpoint to handle any user requests, and creates a cookie -for the session. The cookie is passed back in the response to the request and -the user sends the cookie back with the next request in the session. The cookie -tells the Ingress Controller which endpoint is handling the session, ensuring -that client requests use the cookie so that they are routed to the same pod. - -[NOTE] -==== -Cookies cannot be set on passthrough routes, because the HTTP traffic cannot be seen. Instead, a number is calculated based on the source IP address, which determines the backend. - -If backends change, the traffic can be directed to the wrong server, making it less sticky. If you are using a load balancer, which hides source IP, the same number is set for all connections and traffic is sent to the same pod. -==== diff --git a/modules/nw-using-ingress-and-routes.adoc b/modules/nw-using-ingress-and-routes.adoc deleted file mode 100644 index 90793167f59b..000000000000 --- a/modules/nw-using-ingress-and-routes.adoc +++ /dev/null @@ -1,31 +0,0 @@ -// Module included in the following assemblies: -// -// * ingress/configuring-ingress-cluster-traffic-ingress-controller.adoc - -[id="nw-using-ingress-and-routes_{context}"] -= Using Ingress Controllers and routes - -The Ingress Operator manages Ingress Controllers and wildcard DNS. - -Using an Ingress Controller is the most common way to allow external access to -an {product-title} cluster. - -An Ingress Controller is configured to accept external requests and proxy them -based on the configured routes. This is limited to HTTP, HTTPS using SNI, and -TLS using SNI, which is sufficient for web applications and services that work -over TLS with SNI. - -Work with your administrator to configure an Ingress Controller -to accept external requests and proxy them based on the -configured routes. - -The administrator can create a wildcard DNS entry and then set up an Ingress -Controller. Then, you can work with the edge Ingress Controller without -having to contact the administrators. - -By default, every Ingress Controller in the cluster can admit any route created in any project in the cluster. - -The Ingress Controller: - -* Has two replicas by default, which means it should be running on two worker nodes. -* Can be scaled up to have more replicas on more nodes. diff --git a/modules/nw-using-ingress-forwarded.adoc b/modules/nw-using-ingress-forwarded.adoc deleted file mode 100644 index db2a0f0950e1..000000000000 --- a/modules/nw-using-ingress-forwarded.adoc +++ /dev/null @@ -1,59 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/configuring-ingress-controller - -:_content-type: PROCEDURE -[id="nw-using-ingress-forwarded_{context}"] -= Using X-Forwarded headers - -You configure the HAProxy Ingress Controller to specify a policy for how to handle HTTP headers including `Forwarded` and `X-Forwarded-For`. The Ingress Operator uses the `HTTPHeaders` field to configure the `ROUTER_SET_FORWARDED_HEADERS` environment variable of the Ingress Controller. - -.Procedure - -. Configure the `HTTPHeaders` field for the Ingress Controller. -.. Use the following command to edit the `IngressController` resource: -+ -[source,terminal] ----- -$ oc edit IngressController ----- -+ -.. Under `spec`, set the `HTTPHeaders` policy field to `Append`, `Replace`, `IfNone`, or `Never`: -+ -[source,yaml] ----- -apiVersion: operator.openshift.io/v1 -kind: IngressController -metadata: - name: default - namespace: openshift-ingress-operator -spec: - httpHeaders: - forwardedHeaderPolicy: Append ----- - - -[discrete] -== Example use cases - -*As a cluster administrator, you can:* - -* Configure an external proxy that injects the `X-Forwarded-For` header into each request before forwarding it to an Ingress Controller. -+ -To configure the Ingress Controller to pass the header through unmodified, you specify the `never` policy. The Ingress Controller then never sets the headers, and applications receive only the headers that the external proxy provides. - - -* Configure the Ingress Controller to pass the `X-Forwarded-For` header that your external proxy sets on external cluster requests through unmodified. -+ -To configure the Ingress Controller to set the `X-Forwarded-For` header on internal cluster requests, which do not go through the external proxy, specify the `if-none` policy. If an HTTP request already has the header set through the external proxy, then the Ingress Controller preserves it. If the header is absent because the request did not come through the proxy, then the Ingress Controller adds the header. - -*As an application developer, you can:* - -* Configure an application-specific external proxy that injects the `X-Forwarded-For` header. -+ -To configure an Ingress Controller to pass the header through unmodified for an application's Route, without affecting the policy for other Routes, add an annotation `haproxy.router.openshift.io/set-forwarded-headers: if-none` or `haproxy.router.openshift.io/set-forwarded-headers: never` on the Route for the application. -+ -[NOTE] -==== -You can set the `haproxy.router.openshift.io/set-forwarded-headers` annotation on a per route basis, independent from the globally set value for the Ingress Controller. -==== diff --git a/modules/nw-using-load-balancer-getting-traffic.adoc b/modules/nw-using-load-balancer-getting-traffic.adoc deleted file mode 100644 index 7e37be14ecf3..000000000000 --- a/modules/nw-using-load-balancer-getting-traffic.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// * ingress/getting-traffic-cluster.adoc - -[id="nw-using-load-balancer-getting-traffic_{context}"] -= Using a load balancer to get traffic into the cluster - -If you do not need a specific external IP address, you can configure a load -balancer service to allow external access to an {product-title} cluster. - -A load balancer service allocates a unique IP. The load balancer has a single -edge router IP, which can be a virtual IP (VIP), but is still a single machine -for initial load balancing. - -[NOTE] -==== -If a pool is configured, it is done at the infrastructure level, not by a cluster -administrator. -==== diff --git a/modules/nw-using-nodeport.adoc b/modules/nw-using-nodeport.adoc deleted file mode 100644 index d5f4b53cf188..000000000000 --- a/modules/nw-using-nodeport.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/configuring_ingress_cluster_traffic/configuring-ingress-cluster-traffic-nodeport.adoc - -[id="nw-using-nodeport_{context}"] -= Using a NodePort to get traffic into the cluster - -Use a `NodePort`-type `Service` resource to expose a service on a specific port -on all nodes in the cluster. The port is specified in the `Service` resource's -`.spec.ports[*].nodePort` field. - -[IMPORTANT] -==== -Using a node port requires additional port resources. -==== - -A `NodePort` exposes the service on a static port on the node's IP address. -``NodePort``s are in the `30000` to `32767` range by default, which means a -`NodePort` is unlikely to match a service's intended port. For example, port -`8080` may be exposed as port `31020` on the node. - -The administrator must ensure the external IP addresses are routed to the nodes. - -``NodePort``s and external IPs are independent and both can be used -concurrently. diff --git a/modules/nw-using-service-external-ip.adoc b/modules/nw-using-service-external-ip.adoc deleted file mode 100644 index ad18f1bf02d3..000000000000 --- a/modules/nw-using-service-external-ip.adoc +++ /dev/null @@ -1,22 +0,0 @@ -// Module included in the following assemblies: -// -// * networking/configuring_ingress_cluster_traffic/configuring-ingress-cluster-traffic-service-external-ip.adoc - -[id="nw-service-external-ip_{context}"] -= Using a service external IP to get traffic into the cluster - -One method to expose a service is to assign an external IP address directly to -the service you want to make accessible from outside the cluster. - -The external IP address that you use must be provisioned on your infrastructure -platform and attached to a cluster node. - -With an external IP on the service, {product-title} sets up NAT rules to -allow traffic arriving at any cluster node attached to that IP address to be -sent to one of the internal pods. This is similar to the internal -service IP addresses, but the external IP tells {product-title} that this -service should also be exposed externally at the given IP. The administrator -must assign the IP address to a host (node) interface on one of the nodes in the -cluster. Alternatively, the address can be used as a virtual IP (VIP). - -These IP addresses are not managed by {product-title}. The cluster administrator is responsible for ensuring that traffic arrives at a node with this IP address. diff --git a/modules/nw-view-status-configuration-network-observability-operator.adoc b/modules/nw-view-status-configuration-network-observability-operator.adoc deleted file mode 100644 index 6f68da51b9f5..000000000000 --- a/modules/nw-view-status-configuration-network-observability-operator.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Module included in the following assemblies: -// * networking/network_observability/understanding-network-observability-operator.adoc - -:_content-type: PROCEDURE -[id="nw-status-configuration-network-observability-operator_{context}"] -= Viewing Network Observability Operator status and configuration - -You can inspect the status and view the details of the `FlowCollector` using the `oc describe` command. - -.Procedure - -. Run the following command to view the status and configuration of the Network Observability Operator: -+ -[source,terminal] ----- -$ oc describe flowcollector/cluster ----- \ No newline at end of file diff --git a/modules/oadp-about-backing-and-restoring-from-cluster-to-cluster.adoc b/modules/oadp-about-backing-and-restoring-from-cluster-to-cluster.adoc deleted file mode 100644 index aa913c89d17e..000000000000 --- a/modules/oadp-about-backing-and-restoring-from-cluster-to-cluster.adoc +++ /dev/null @@ -1,81 +0,0 @@ -// Module included in the following assemblies: -// -// * backup_and_restore/application_backup_and_restore/advanced-topics.adoc - - -:_content-type: CONCEPT -[id="oadp-about-backing-and-restoring-from-cluster-to-cluster_{context}"] -= About backing up data from one cluster and restoring it on another cluster - -{oadp-first} is designed to back up and restore application data in the same {product-title} cluster. {mtc-full} ({mtc-short}) is designed to migrate containers, including application data, from one {product-title} cluster to another cluster. - -You can use OADP to back up application data from one {product-title} cluster and restore it on another cluster. However, doing so is more complicated than using {mtc-short} or using OADP to back up and restore on the same cluster. - -To successfully use OADP to back up data from one cluster and restore it to another cluster, you must take into account the following factors, in addition to the prerequisites and procedures that apply to using OADP to back up and restore data on the same cluster: - -* Operators -* Use of Velero -* UID and GID ranges - -[id="oadp-cluster-to-cluster-operators_{context}"] -== Operators -You must exclude Operators from the backup of an application for backup and restore to succeed. - -[id="oadp-cluster-to-cluster-velero_{context}"] -== Use of Velero - -Velero, which OADP is built upon, does not natively support migrating persistent volume snapshots across cloud providers. To migrate volume snapshot data between cloud platforms, you must _either_ enable the Velero Restic file system backup option, which backs up volume contents at the filesystem level, _or_ use the OADP Data Mover for CSI snapshots. - -[NOTE] -==== -In OADP 1.1 and earlier, the Velero Restic file system backup option is called `restic`. -In OADP 1.2 and later, the Velero Restic file system backup option is called `file-system-backup`. -==== - -[NOTE] -==== -Velero's file system backup feature supports both Kopia and Restic, but currently OADP supports only Restic. -==== - -* You must also use Velero's link:https://velero.io/docs/main/file-system-backup/[File System Backup] to migrate data between AWS regions or between Microsoft Azure regions. -* Velero does not support restoring data to a cluster with an _earlier_ Kubernetes version than the source cluster. -* It is theoretically possible to migrate workloads to a destination with a _later_ Kubernetes version than the source, but you must consider the compatibility of API groups between clusters for each custom resource. If a Kubernetes version upgrade breaks the compatibility of core or native API groups, you must first update the impacted custom resources. - -[id="oadp-cluster-to-cluster-uid-and-gid-ranges_{context}"] -== UID and GID ranges - -When you back up data from one cluster and restore it to another cluster, there are potential issues that might arise with UID (User ID) and GID (Group ID) ranges. The following section explains these potential issues and mitigations: - -Summary of issues:: -The UID and GID ranges of the namespace might change on the destination cluster. OADP does not back up and restore OpenShift UID range metadata. If the backed application requires a specific UID, ensure the range is available when restored. For more information about OpenShift's UID and GID ranges, see link:https://cloud.redhat.com/blog/a-guide-to-openshift-and-uids[A Guide to OpenShift and UIDs]. - -Detailed description of issues:: -When you create a namespace in {product-title} by using the shell command `oc create namespace`, {product-title} assigns the namespace a unique User ID (UID) range from its available pool of UIDs, a Supplemental Group (GID) range, and unique SELinux MCS labels. This information is stored in the `metadata.annotations` field of the cluster. This information is part of the Security Context Constraints (SCC) annotations, which comprise the following components: - -* `openshift.io/sa.scc.mcs` -* `openshift.io/sa.scc.supplemental-groups` -* `openshift.io/sa.scc.uid-range` - -+ -When you use OADP to restore the namespace, it automatically uses the information in `metadata.annotations` without resetting it for the destination cluster. As a result, the workload might not have access to the backed up data if one of the following is true: - -* There is a pre-existing namespace with different SCC annotations, for example, on a different cluster. In this case, at backup time, OADP reuses the pre-existing namespace instead of the namespace you are trying to restore. -* The backup used a label selector, but the namespace where workloads run on does not have the label on it. In this case, OADP does not back up the namespace, but instead creates a new namespace during restore that does not include the annotations of the namespace you backed up. This causes a new UID range to be assigned to the namespace. -+ -This might be an issue for customer workloads if {product-title} assigns a pod a `securityContext` UID based on namespace annotations that have changed from the time the persistent volume data was backed up. -* The container UID no longer matches the UID of the file owner. -* An error occurs because {product-title} did not modify the UID range of the destination cluster to match the data of the backup cluster. As a result, the backup cluster has a different UID than the destination cluster, which means the application cannot read or write data to the destination cluster. - -Mitigations:: - -You can use one or more of the following mitigations to resolve the UID and GID range issues: - -* Simple mitigations: - -** If you use a label selector in the `Backup` CR to filter the objects to include in the backup, be sure to add this label selector to the namespace that contains the workspace. -** Remove any pre-existing version of a namespace on the destination cluster before attempting to restore a namespace with the same name. - -* Advanced mitigations: -** Fix UID ranges after migration by performing steps 1-4 of link:https://access.redhat.com/articles/6844071[Fixing UID ranges after migration]. Step 1 is optional. - -For an in-depth discussion of UID and GID ranges in {product-title} with an emphasis on overcoming issues in backing up data on one cluster and restoring it on another, see link:https://cloud.redhat.com/blog/a-guide-to-openshift-and-uids[A Guide to OpenShift and UIDs]. diff --git a/modules/oadp-about-backup-snapshot-locations-secrets.adoc b/modules/oadp-about-backup-snapshot-locations-secrets.adoc deleted file mode 100644 index 777f8d7da208..000000000000 --- a/modules/oadp-about-backup-snapshot-locations-secrets.adoc +++ /dev/null @@ -1,49 +0,0 @@ -// Module included in the following assemblies: -// -// * backup_and_restore/application_backup_and_restore/installing/installing-oadp-aws.adoc -// * backup_and_restore/application_backup_and_restore/installing/installing-oadp-azure.adoc -// * backup_and_restore/application_backup_and_restore/installing/installing-oadp-gcp.adoc -// * backup_and_restore/application_backup_and_restore/installing/installing-oadp-mcg.adoc -// * backup_and_restore/application_backup_and_restore/installing/installing-oadp-ocs.adoc - -:_content-type: CONCEPT -[id="oadp-about-backup-snapshot-locations_{context}"] -= About backup and snapshot locations and their secrets - -You specify backup and snapshot locations and their secrets in the `DataProtectionApplication` custom resource (CR). - -[id="backup-locations_{context}"] -[discrete] -== Backup locations - -You specify S3-compatible object storage, such as Multicloud Object Gateway, Noobaa, or Minio, as a backup location. - -Velero backs up {product-title} resources, Kubernetes objects, and internal images as an archive file on object storage. - -[id="snapshot-locations_{context}"] -[discrete] -== Snapshot locations - -If you use your cloud provider's native snapshot API to back up persistent volumes, you must specify the cloud provider as the snapshot location. - -If you use Container Storage Interface (CSI) snapshots, you do not need to specify a snapshot location because you will create a `VolumeSnapshotClass` CR to register the CSI driver. - -If you use Restic, you do not need to specify a snapshot location because Restic backs up the file system on object storage. - -[id="secrets_{context}"] -[discrete] -== Secrets - -If the backup and snapshot locations use the same credentials or if you do not require a snapshot location, you create a default `Secret`. - -If the backup and snapshot locations use different credentials, you create two secret objects: - -* Custom `Secret` for the backup location, which you specify in the `DataProtectionApplication` CR. -* Default `Secret` for the snapshot location, which is not referenced in the `DataProtectionApplication` CR. - -[IMPORTANT] -==== -The Data Protection Application requires a default `Secret`. Otherwise, the installation will fail. - -If you do not want to specify backup or snapshot locations during the installation, you can create a default `Secret` with an empty `credentials-velero` file. -==== diff --git a/modules/oadp-about-enable-api-group-versions.adoc b/modules/oadp-about-enable-api-group-versions.adoc deleted file mode 100644 index 62b1df888c38..000000000000 --- a/modules/oadp-about-enable-api-group-versions.adoc +++ /dev/null @@ -1,31 +0,0 @@ -// Module included in the following assemblies: -// -// * backup_and_restore/application_backup_and_restore/advanced-topics.adoc - - -:_content-type: CONCEPT -[id="oadp-about-enable-api-group-versions_{context}"] -= About Enable API Group Versions - -By default, Velero only backs up resources that use the preferred version of the Kubernetes API. However, Velero also includes a feature, link:https://velero.io/docs/v1.9/enable-api-group-versions-feature/[Enable API Group Versions], that overcomes this limitation. When enabled on the source cluster, this feature causes Velero to back up _all_ Kubernetes API group versions that are supported on the cluster, not only the preferred one. After the versions are stored in the backup .tar file, they are available to be restored on the destination cluster. - -For example, a source cluster with an API named `Example` might be available in the `example.com/v1` and `example.com/v1beta2` API groups, with `example.com/v1` being the preferred API. - -Without the Enable API Group Versions feature enabled, Velero backs up only the preferred API group version for `Example`, which is `example.com/v1`. With the feature enabled, Velero also backs up `example.com/v1beta2`. - -When the Enable API Group Versions feature is enabled on the destination cluster, Velero selects the version to restore on the basis of the order of priority of API group versions. - -[NOTE] -==== -Enable API Group Versions is still in beta. -==== - -Velero uses the following algorithm to assign priorities to API versions, with `1` as the top priority: - -. Preferred version of the _destination_ cluster -. Preferred version of the source_ cluster -. Common non-preferred supported version with the highest Kubernetes version priority - -[role="_additional-resources"] -.Additional resources -* link:https://velero.io/docs/v1.9/enable-api-group-versions-feature/[Enable API Group Versions Feature] diff --git a/modules/oadp-backing-and-restoring-from-cluster-to-cluster.adoc b/modules/oadp-backing-and-restoring-from-cluster-to-cluster.adoc deleted file mode 100644 index 83edbc71bdd7..000000000000 --- a/modules/oadp-backing-and-restoring-from-cluster-to-cluster.adoc +++ /dev/null @@ -1,33 +0,0 @@ -// Module included in the following assemblies: -// -// * backup_and_restore/application_backup_and_restore/advanced-topics.adoc - - -:_content-type: CONCEPT -[id="oadp-backing-and-restoring-from-cluster-to-cluster_{context}"] -= Backing up data from one cluster and restoring it to another cluster - -In general, you back up data from one {product-title} cluster and restore it on another {product-title} cluster in the same way that you back up and restore data to the same cluster. However, there are some additional prerequisites and differences in the procedure when backing up data from one {product-title} cluster and restoring it on another. - -.Prerequisites - -* All relevant prerequisites for backing up and restoring on your platform (for example, AWS, Microsoft Azure, GCP, and so on), especially the prerequisites for for the Data Protection Application (DPA), are described in the relevant sections of this guide. - -.Procedure - -* Make the following additions to the procedures given for your platform: - -** Ensure that the backup store location (BSL) and volume snapshot location have the same names and paths to restore resources to another cluster. -** Share the same object storage location credentials across the clusters. -** For best results, use OADP to create the namespace on the destination cluster. -** If you use the Velero `file-system-backup` option, enable the `--default-volumes-to-fs-backup` flag for use during backup by running the following command: -+ -[source,terminal] ----- -$ velero backup create --default-volumes-to-fs-backup ----- - -[NOTE] -==== -In OADP 1.2 and later, the Velero Restic option is called `file-system-backup`. -==== diff --git a/modules/oadp-backing-up-applications-restic.adoc b/modules/oadp-backing-up-applications-restic.adoc deleted file mode 100644 index 01777409bc5d..000000000000 --- a/modules/oadp-backing-up-applications-restic.adoc +++ /dev/null @@ -1,41 +0,0 @@ -// Module included in the following assemblies: -// -// * backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc - -:_content-type: PROCEDURE -[id="oadp-backing-up-applications-restic_{context}"] -= Backing up applications with Restic - -You back up Kubernetes resources, internal images, and persistent volumes with Restic by editing the `Backup` custom resource (CR). - -You do not need to specify a snapshot location in the `DataProtectionApplication` CR. - -[IMPORTANT] -==== -Restic does not support backing up `hostPath` volumes. For more information, see link:https://{velero-domain}/docs/v{velero-version}/restic/#limitations[additional Restic limitations]. -==== - -.Prerequisites - -* You must install the OpenShift API for Data Protection (OADP) Operator. -* You must not disable the default Restic installation by setting `spec.configuration.restic.enable` to `false` in the `DataProtectionApplication` CR. -* The `DataProtectionApplication` CR must be in a `Ready` state. - -.Procedure - -* Edit the `Backup` CR, as in the following example: -+ -[source,yaml] ----- -apiVersion: velero.io/v1 -kind: Backup -metadata: - name: - labels: - velero.io/storage-location: default - namespace: openshift-adp -spec: - defaultVolumesToRestic: true <1> -... ----- -<1> Add `defaultVolumesToRestic: true` to the `spec` block. diff --git a/modules/oadp-backing-up-pvs-csi.adoc b/modules/oadp-backing-up-pvs-csi.adoc deleted file mode 100644 index d3a6aed2e121..000000000000 --- a/modules/oadp-backing-up-pvs-csi.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc - -:_content-type: PROCEDURE -[id="oadp-backing-up-pvs-csi_{context}"] -= Backing up persistent volumes with CSI snapshots - -You back up persistent volumes with Container Storage Interface (CSI) snapshots by editing the `VolumeSnapshotClass` custom resource (CR) of the cloud storage before you create the `Backup` CR. - -.Prerequisites - -* The cloud provider must support CSI snapshots. -* You must enable CSI in the `DataProtectionApplication` CR. - -.Procedure - -* Add the `metadata.labels.velero.io/csi-volumesnapshot-class: "true"` key-value pair to the `VolumeSnapshotClass` CR: -+ -[source,yaml,subs="attributes+"] ----- -apiVersion: snapshot.storage.k8s.io/v1 -kind: VolumeSnapshotClass -metadata: - name: - labels: - velero.io/csi-volumesnapshot-class: "true" -driver: -deletionPolicy: Retain ----- - -You can now create a `Backup` CR. diff --git a/modules/oadp-backup-restore-cr-issues.adoc b/modules/oadp-backup-restore-cr-issues.adoc deleted file mode 100644 index 472e692dfea3..000000000000 --- a/modules/oadp-backup-restore-cr-issues.adoc +++ /dev/null @@ -1,87 +0,0 @@ -// Module included in the following assemblies: -// -// * backup_and_restore/application_backup_and_restore/troubleshooting.adoc - -:_content-type: CONCEPT -[id="oadp-backup-restore-cr-issues_{context}"] -= Backup and Restore CR issues - -You might encounter these common issues with `Backup` and `Restore` custom resources (CRs). - -[id="backup-cannot-retrieve-volume_{context}"] -== Backup CR cannot retrieve volume - -The `Backup` CR displays the error message, `InvalidVolume.NotFound: The volume ‘vol-xxxx’ does not exist`. - -.Cause - -The persistent volume (PV) and the snapshot locations are in different regions. - -.Solution - -. Edit the value of the `spec.snapshotLocations.velero.config.region` key in the `DataProtectionApplication` manifest so that the snapshot location is in the same region as the PV. -. Create a new `Backup` CR. - -[id="backup-cr-remains-in-progress_{context}"] -== Backup CR status remains in progress - -The status of a `Backup` CR remains in the `InProgress` phase and does not complete. - -.Cause - -If a backup is interrupted, it cannot be resumed. - -.Solution - -. Retrieve the details of the `Backup` CR: -+ -[source,terminal] ----- -$ oc -n {namespace} exec deployment/velero -c velero -- ./velero \ - backup describe ----- - -. Delete the `Backup` CR: -+ -[source,terminal] ----- -$ oc delete backup -n openshift-adp ----- -+ -You do not need to clean up the backup location because a `Backup` CR in progress has not uploaded files to object storage. - -. Create a new `Backup` CR. - -[id="backup-cr-remains-partiallyfailed_{context}"] -== Backup CR status remains in PartiallyFailed - -The status of a `Backup` CR without Restic in use remains in the `PartiallyFailed` phase and does not complete. A snapshot of the affiliated PVC is not created. - -.Cause - -If the backup is created based on the CSI snapshot class, but the label is missing, CSI snapshot plugin fails to create a snapshot. As a result, the `Velero` pod logs an error similar to the following: -+ -[source,text] ----- -time="2023-02-17T16:33:13Z" level=error msg="Error backing up item" backup=openshift-adp/user1-backup-check5 error="error executing custom action (groupResource=persistentvolumeclaims, namespace=busy1, name=pvc1-user1): rpc error: code = Unknown desc = failed to get volumesnapshotclass for storageclass ocs-storagecluster-ceph-rbd: failed to get volumesnapshotclass for provisioner openshift-storage.rbd.csi.ceph.com, ensure that the desired volumesnapshot class has the velero.io/csi-volumesnapshot-class label" logSource="/remote-source/velero/app/pkg/backup/backup.go:417" name=busybox-79799557b5-vprq ----- - -.Solution - -. Delete the `Backup` CR: -+ -[source,terminal] ----- -$ oc delete backup -n openshift-adp ----- - -. If required, clean up the stored data on the `BackupStorageLocation` to free up space. - -. Apply label `velero.io/csi-volumesnapshot-class=true` to the `VolumeSnapshotClass` object: -+ -[source,terminal] ----- -$ oc label volumesnapshotclass/ velero.io/csi-volumesnapshot-class=true ----- - -. Create a new `Backup` CR. \ No newline at end of file diff --git a/modules/oadp-ceph-cephfs-back-up-dba.adoc b/modules/oadp-ceph-cephfs-back-up-dba.adoc deleted file mode 100644 index c403e45515ab..000000000000 --- a/modules/oadp-ceph-cephfs-back-up-dba.adoc +++ /dev/null @@ -1,82 +0,0 @@ -// Module included in the following assemblies: -// -// * backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc - -:_content-type: PROCEDURE -[id="oadp-ceph-cephfs-back-up-dba_{context}"] -= Creating a DPA for use with CephFS storage - -You must create a Data Protection Application (DPA) CR before you use the OpenShift API for Data Protection (OADP) 1.2 Data Mover to back up and restore data using CephFS storage. - -.Procedure - -. Verify that the `deletionPolicy` field of the `VolumeSnapshotClass` CR is set to `Retain` by running the following command: -+ -[source,terminal] ----- -$ oc get volumesnapshotclass -A -o jsonpath='{range .items[*]}{"Name: "}{.metadata.name}{" "}{"Retention Policy: "}{.deletionPolicy}{"\n"}{end}' ----- - -. Verify that the labels of the `VolumeSnapshotClass` CR are set to `true` by running the following command: -+ -[source,terminal] ----- -$ oc get volumesnapshotclass -A -o jsonpath='{range .items[*]}{"Name: "}{.metadata.name}{" "}{"labels: "}{.metadata.labels}{"\n"}{end}' ----- - -. Verify that the `storageclass.kubernetes.io/is-default-class` annotation of the `StorageClass` CR is set to `true` by running the following command: -+ -[source,terminal] ----- -$ oc get storageClass -A -o jsonpath='{range .items[*]}{"Name: "}{.metadata.name}{" "}{"annotations: "}{.metadata.annotations}{"\n"}{end}' ----- - -. Create a Data Protection Application (DPA) CR similar to the following example: -+ -.Example DPA CR -+ -[source,yaml] ----- -apiVersion: oadp.openshift.io/v1alpha1 -kind: DataProtectionApplication -metadata: - name: velero-sample - namespace: openshift-adp -spec: - backupLocations: - - velero: - config: - profile: default - region: us-east-1 - credential: - key: cloud - name: cloud-credentials - default: true - objectStorage: - bucket: - prefix: velero - provider: aws - configuration: - restic: - enable: false <1> - velero: - defaultPlugins: - - openshift - - aws - - csi - - vsm - features: - dataMover: - credentialName: <2> - enable: true <3> - volumeOptionsForStorageClasses: - ocs-storagecluster-cephfs: - sourceVolumeOptions: - accessMode: ReadOnlyMany - cacheAccessMode: ReadWriteMany - cacheStorageClassName: ocs-storagecluster-cephfs - storageClassName: ocs-storagecluster-cephfs-shallow ----- -<1> There is no default value for the `enable` field. Valid values are `true` or `false`. -<2> Use the Restic `Secret` that you created when you prepared your environment for working with OADP 1.2 Data Mover and Ceph. If you do not use your Restic `Secret`, the CR uses the default value `dm-credential` for this parameter. -<3> There is no default value for the `enable` field. Valid values are `true` or `false`. diff --git a/modules/oadp-ceph-cephfs-back-up.adoc b/modules/oadp-ceph-cephfs-back-up.adoc deleted file mode 100644 index 5cb6eac7277f..000000000000 --- a/modules/oadp-ceph-cephfs-back-up.adoc +++ /dev/null @@ -1,67 +0,0 @@ -// Module included in the following assemblies: -// -// * backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc - -ifeval::["{context}" == "cephfs"] -:cephfs: -endif::[] -ifeval::["{context}" == "split"] -:split: -endif::[] - -:_content-type: PROCEDURE -[id="oadp-ceph-cephfs-back-up_{context}"] - -ifdef::cephfs[] -= Backing up data using OADP 1.2 Data Mover and CephFS storage -You can use OpenShift API for Data Protection (OADP) 1.2 Data Mover to back up data using CephFS storage by enabling the shallow copy feature of CephFS storage. -endif::cephfs[] - -ifdef::split[] -= Backing up data using OADP 1.2 Data Mover and split volumes -You can use OpenShift API for Data Protection (OADP) 1.2 Data Mover to back up data in an environment that has split volumes. -endif::split[] - -.Procedure - -. Create a `Backup` CR as in the following example: -+ -.Example `Backup` CR -+ -[source,yaml] ----- -apiVersion: velero.io/v1 -kind: Backup -metadata: - name: - namespace: -spec: - includedNamespaces: - - - storageLocation: velero-sample-1 ----- - -. Monitor the progress of the `VolumeSnapshotBackup` CRs by completing the following steps: -.. To check the progress of all the `VolumeSnapshotBackup` CRs, run the following command: -+ -[source, terminal] ----- -$ oc get vsb -n ----- - -.. To check the progress of a specific `VolumeSnapshotBackup` CR, run the following command: -+ -[source,terminal] ----- -$ oc get vsb -n -ojsonpath="{.status.phase}` ----- - -. Wait several minutes until the `VolumeSnapshotBackup` CR has the status `Completed`. -. Verify that there is at least one snapshot in the object store that is given in the Restic `Secret`. You can check for this snapshot in your targeted `BackupStorageLocation` storage provider that has a prefix of `/`. - -ifeval::["{context}" == "cephfs"] -:!cephfs: -endif::[] -ifeval::["{context}" == "split"] -:!split: -endif::[] diff --git a/modules/oadp-ceph-cephfs-restore.adoc b/modules/oadp-ceph-cephfs-restore.adoc deleted file mode 100644 index f87a3e875565..000000000000 --- a/modules/oadp-ceph-cephfs-restore.adoc +++ /dev/null @@ -1,83 +0,0 @@ -// Module included in the following assemblies: -// -// * backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc - -ifeval::["{context}" == "cephfs"] -:cephfs: -endif::[] -ifeval::["{context}" == "split"] -:split: -endif::[] - -:_content-type: PROCEDURE -[id="oadp-ceph-cephfs-restore_{context}"] - -ifdef::cephfs[] -= Restoring data using OADP 1.2 Data Mover and CephFS storage -You can use OpenShift API for Data Protection (OADP) 1.2 Data Mover to restore data using CephFS storage if the shallow copy feature of CephFS storage was enabled for the back up procedure. The shallow copy feature is not used in the restore procedure. -endif::cephfs[] - -ifdef::split[] -= Restoring data using OADP 1.2 Data Mover and split volumes -You can use OpenShift API for Data Protection (OADP) 1.2 Data Mover to restore data in an environment that has split volumes, if the shallow copy feature of CephFS storage was enabled for the back up procedure. The shallow copy feature is not used in the restore procedure. -endif::split[] - -.Procedure - -. Delete the application namespace by running the following command: -+ -[source,terminal] ----- -$ oc delete vsb -n --all ----- - -. Delete any `VolumeSnapshotContent` CRs that were created during backup by running the following command: -+ -[source,terminal] ----- -$ oc delete volumesnapshotcontent --all ----- - -. Create a `Restore` CR as in the following example: -+ -.Example `Restore` CR -+ -[source,yaml] ----- -apiVersion: velero.io/v1 -kind: Restore -metadata: - name: - namespace: -spec: - backupName: ----- - -. Monitor the progress of the `VolumeSnapshotRestore` CRs by doing the following: -.. To check the progress of all the `VolumeSnapshotRestore` CRs, run the following command: -+ -[source, terminal] ----- -$ oc get vsr -n ----- - -.. To check the progress of a specific `VolumeSnapshotRestore` CR, run the following command: -+ -[source,terminal] ----- -$ oc get vsr -n -ojsonpath="{.status.phase} ----- - -. Verify that your application data has been restored by running the following command: -+ -[source,terminal] ----- -$ oc get route -n -ojsonpath="{.spec.host}" ----- - -ifeval::["{context}" == "cephfs"] -:!cephfs: -endif::[] -ifeval::["{context}" == "split"] -:!split: -endif::[] diff --git a/modules/oadp-ceph-preparing-cephfs-crs.adoc b/modules/oadp-ceph-preparing-cephfs-crs.adoc deleted file mode 100644 index 7b5effec9ce2..000000000000 --- a/modules/oadp-ceph-preparing-cephfs-crs.adoc +++ /dev/null @@ -1,65 +0,0 @@ -// Module included in the following assemblies: -// -// * backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc - -:_content-type: PROCEDURE -[id="oadp-ceph-preparing-cephfs-crs_{context}"] -= Defining CephFS custom resources for use with OADP 1.2 Data Mover - -When you install {rh-storage-first}, it automatically creates a default CephFS `StorageClass` custom resource (CR) and a default CephFS `VolumeSnapshotClass` CR. You can define these CRs for use with OpenShift API for Data Protection (OADP) 1.2 Data Mover. - -.Procedure - -. Define the `VolumeSnapshotClass` CR as in the following example: -+ -.Example `VolumeSnapshotClass` CR -+ -[source,yaml] ----- -apiVersion: snapshot.storage.k8s.io/v1 -deletionPolicy: Retain <1> -driver: openshift-storage.cephfs.csi.ceph.com -kind: VolumeSnapshotClass -metadata: - annotations: - snapshot.storage.kubernetes.io/is-default-class: true <2> - labels: - velero.io/csi-volumesnapshot-class: true <3> - name: ocs-storagecluster-cephfsplugin-snapclass -parameters: - clusterID: openshift-storage - csi.storage.k8s.io/snapshotter-secret-name: rook-csi-cephfs-provisioner - csi.storage.k8s.io/snapshotter-secret-namespace: openshift-storage ----- -<1> Must be set to `Retain`. -<2> Must be set to `true`. -<3> Must be set to `true`. - -. Define the `StorageClass` CR as in the following example: -+ -.Example `StorageClass` CR -+ -[source,yaml] ----- -kind: StorageClass -apiVersion: storage.k8s.io/v1 -metadata: - name: ocs-storagecluster-cephfs - annotations: - description: Provides RWO and RWX Filesystem volumes - storageclass.kubernetes.io/is-default-class: true <1> -provisioner: openshift-storage.cephfs.csi.ceph.com -parameters: - clusterID: openshift-storage - csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner - csi.storage.k8s.io/controller-expand-secret-namespace: openshift-storage - csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node - csi.storage.k8s.io/node-stage-secret-namespace: openshift-storage - csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner - csi.storage.k8s.io/provisioner-secret-namespace: openshift-storage - fsName: ocs-storagecluster-cephfilesystem -reclaimPolicy: Delete -allowVolumeExpansion: true -volumeBindingMode: Immediate ----- -<1> Must be set to `true`. diff --git a/modules/oadp-ceph-preparing-cephrbd-crs.adoc b/modules/oadp-ceph-preparing-cephrbd-crs.adoc deleted file mode 100644 index 65d287de3e82..000000000000 --- a/modules/oadp-ceph-preparing-cephrbd-crs.adoc +++ /dev/null @@ -1,63 +0,0 @@ -// Module included in the following assemblies: -// -// * backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc - -:_content-type: PROCEDURE -[id="oadp-ceph-preparing-cephrbd-crs_{context}"] -= Defining CephRBD custom resources for use with OADP 1.2 Data Mover - -When you install {rh-storage-first}, it automatically creates a default CephRBD `StorageClass` custom resource (CR) and a default CephRBD `VolumeSnapshotClass` CR. You can define these CRs for use with OpenShift API for Data Protection (OADP) 1.2 Data Mover. - -.Procedure - -. Define the `VolumeSnapshotClass` CR as in the following example: -+ -.Example `VolumeSnapshotClass` CR -+ -[source,yaml] ----- -apiVersion: snapshot.storage.k8s.io/v1 -deletionPolicy: Retain <1> -driver: openshift-storage.rbd.csi.ceph.com -kind: VolumeSnapshotClass -metadata: - labels: - velero.io/csi-volumesnapshot-class: true <2> - name: ocs-storagecluster-rbdplugin-snapclass -parameters: - clusterID: openshift-storage - csi.storage.k8s.io/snapshotter-secret-name: rook-csi-rbd-provisioner - csi.storage.k8s.io/snapshotter-secret-namespace: openshift-storage ----- -<1> Must be set to `Retain`. -<2> Must be set to `true`. - -. Define the `StorageClass` CR as in the following example: -+ -.Example `StorageClass` CR -+ -[source,yaml] ----- -kind: StorageClass -apiVersion: storage.k8s.io/v1 -metadata: - name: ocs-storagecluster-ceph-rbd - annotations: - description: 'Provides RWO Filesystem volumes, and RWO and RWX Block volumes' -provisioner: openshift-storage.rbd.csi.ceph.com -parameters: - csi.storage.k8s.io/fstype: ext4 - csi.storage.k8s.io/provisioner-secret-namespace: openshift-storage - csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner - csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node - csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner - imageFormat: '2' - clusterID: openshift-storage - imageFeatures: layering - csi.storage.k8s.io/controller-expand-secret-namespace: openshift-storage - pool: ocs-storagecluster-cephblockpool - csi.storage.k8s.io/node-stage-secret-namespace: openshift-storage -reclaimPolicy: Delete -allowVolumeExpansion: true -volumeBindingMode: Immediate ----- diff --git a/modules/oadp-ceph-preparing-crs-additional.adoc b/modules/oadp-ceph-preparing-crs-additional.adoc deleted file mode 100644 index 80f77b37c533..000000000000 --- a/modules/oadp-ceph-preparing-crs-additional.adoc +++ /dev/null @@ -1,65 +0,0 @@ -// Module included in the following assemblies: -// -// * backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc - -:_content-type: PROCEDURE -[id="oadp-ceph-preparing-crs-additional_{context}"] -= Defining additional custom resources for use with OADP 1.2 Data Mover - -After you redefine the default `StorageClass` and CephRBD `VolumeSnapshotClass` custom resources (CRs), you must create the following CRs: - -* A CephFS `StorageClass` CR defined to use the shallow copy feature -* A Rustic `Secret` CR - -.Procedure - -. Create a CephFS `StorageClass` CR and set the `backingSnapshot` parameter set to `true` as in the following example: -+ -.Example CephFS `StorageClass` CR with `backingSnapshot` set to `true` -+ -[source, yaml] ----- -kind: StorageClass -apiVersion: storage.k8s.io/v1 -metadata: - name: ocs-storagecluster-cephfs-shallow - annotations: - description: Provides RWO and RWX Filesystem volumes - storageclass.kubernetes.io/is-default-class: false -provisioner: openshift-storage.cephfs.csi.ceph.com -parameters: - csi.storage.k8s.io/provisioner-secret-namespace: openshift-storage - csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner - csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node - csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner - clusterID: openshift-storage - fsName: ocs-storagecluster-cephfilesystem - csi.storage.k8s.io/controller-expand-secret-namespace: openshift-storage - backingSnapshot: true <1> - csi.storage.k8s.io/node-stage-secret-namespace: openshift-storage -reclaimPolicy: Delete -allowVolumeExpansion: true -volumeBindingMode: Immediate ----- -<1> Must be set to `true`. -+ -[IMPORTANT] -==== -Ensure that the CephFS `VolumeSnapshotClass` and `StorageClass` CRs have the same value for `provisioner`. -==== - -. Configure a Restic `Secret` CR as in the following example: -+ -.Example Restic `Secret` CR -+ -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: - namespace: -type: Opaque -stringData: - RESTIC_PASSWORD: ----- diff --git a/modules/oadp-ceph-prerequisites.adoc b/modules/oadp-ceph-prerequisites.adoc deleted file mode 100644 index 487fce00ec29..000000000000 --- a/modules/oadp-ceph-prerequisites.adoc +++ /dev/null @@ -1,16 +0,0 @@ -// Module included in the following assemblies: -// -// * backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc - - -:_content-type: CONCEPT -[id="oadp-ceph-prerequisites_{context}"] -= Prerequisites for using OADP 1.2 Data Mover with Ceph storage - -The following prerequisites apply to all back up and restore operations of data using {oadp-first} 1.2 Data Mover in a cluster that uses Ceph storage: - -* You have installed {product-title} 4.12 or later. -* You have installed the OADP Operator. -* You have created a secret `cloud-credentials` in the namespace `openshift-adp.` -* You have installed {rh-storage-first}. -* You have installed the latest VolSync Operator using the Operator Lifecycle Manager. diff --git a/modules/oadp-ceph-split-back-up-dba.adoc b/modules/oadp-ceph-split-back-up-dba.adoc deleted file mode 100644 index 4b6a71446db1..000000000000 --- a/modules/oadp-ceph-split-back-up-dba.adoc +++ /dev/null @@ -1,67 +0,0 @@ -// Module included in the following assemblies: -// -// * backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc - -:_content-type: PROCEDURE -[id="oadp-ceph-split-back-up-dba_{context}"] -= Creating a DPA for use with split volumes - -You must create a Data Protection Application (DPA) CR before you use the OpenShift API for Data Protection (OADP) 1.2 Data Mover to back up and restore data using split volumes. - -.Procedure - -* Create a Data Protection Application (DPA) CR as in the following example: -+ -.Example DPA CR for environment with split volumes - -[source,yaml] ----- -apiVersion: oadp.openshift.io/v1alpha1 -kind: DataProtectionApplication -metadata: - name: velero-sample - namespace: openshift-adp -spec: - backupLocations: - - velero: - config: - profile: default - region: us-east-1 - credential: - key: cloud - name: cloud-credentials - default: true - objectStorage: - bucket: - prefix: velero - provider: aws - configuration: - restic: - enable: false - velero: - defaultPlugins: - - openshift - - aws - - csi - - vsm - features: - dataMover: - credentialName: <1> - enable: true - volumeOptionsForStorageClasses: <2> - ocs-storagecluster-cephfs: - sourceVolumeOptions: - accessMode: ReadOnlyMany - cacheAccessMode: ReadWriteMany - cacheStorageClassName: ocs-storagecluster-cephfs - storageClassName: ocs-storagecluster-cephfs-shallow - ocs-storagecluster-ceph-rbd: - sourceVolumeOptions: - storageClassName: ocs-storagecluster-ceph-rbd - cacheStorageClassName: ocs-storagecluster-ceph-rbd - destinationVolumeOptions: - storageClassName: ocs-storagecluster-ceph-rbd - cacheStorageClassName: ocs-storagecluster-ceph-rbd ----- -<1> Use the Restic `Secret` that you created when you prepared your environment for working with OADP 1.2 Data Mover and Ceph. If you do not, then the CR will use the default value `dm-credential` for this parameter. -<2> A different set of `VolumeOptionsForStorageClass` labels can be defined for each `storageClass` volume, thus allowing a backup to volumes with different providers. diff --git a/modules/oadp-checking-api-group-versions.adoc b/modules/oadp-checking-api-group-versions.adoc deleted file mode 100644 index 06f947e437b5..000000000000 --- a/modules/oadp-checking-api-group-versions.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// Module included in the following assemblies: -// -// * backup_and_restore/application_backup_and_restore/advanced-topics.adoc - - -:_content-type: PROCEDURE -[id="oadp-checking-api-group-versions_{context}"] -= Listing the Kubernetes API group versions on a cluster - -A source cluster might offer multiple versions of an API, where one of these versions is the preferred API version. For example, a source cluster with an API named `Example` might be available in the `example.com/v1` and `example.com/v1beta2` API groups. - -If you use Velero to back up and restore such a source cluster, Velero backs up only the version of that resource that uses the preferred version of its Kubernetes API. - -To return to the above example, if `example.com/v1` is the preferred API, then Velero only backs up the version of a resource that uses `example.com/v1`. Moreover, the target cluster needs to have `example.com/v1` registered in its set of available API resources in order for Velero to restore the resource on the target cluster. - -Therefore, you need to generate a list of the Kubernetes API group versions on your target cluster to be sure the preferred API version is registered in its set of available API resources. - -.Procedure - -* Enter the following command: - -[source,terminal] ----- -$ oc api-resources ----- diff --git a/modules/oadp-cleaning-up-after-data-mover-snapshots.adoc b/modules/oadp-cleaning-up-after-data-mover-snapshots.adoc deleted file mode 100644 index 9a989255968d..000000000000 --- a/modules/oadp-cleaning-up-after-data-mover-snapshots.adoc +++ /dev/null @@ -1,16 +0,0 @@ -// Module included in the following assemblies: -// -// * backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc - -:_content-type: PROCEDURE -[id="oadp-cleaning-up-after-data-mover-snapshots_{context}"] -= Deleting snapshots in a bucket - -OADP 1.1 Data Mover might leave one or more snapshots in a bucket after a backup. You can either delete all the snapshots or delete individual snapshots. - -.Procedure - -* To delete all snapshots in your bucket, delete the `/` folder that is specified in the Data Protection Application (DPA) `.spec.backupLocation.objectStorage.bucket` resource. -* To delete an individual snapshot: -. Browse to the `/` folder that is specified in the DPA `.spec.backupLocation.objectStorage.bucket` resource. -. Delete the appropriate folders that are prefixed with `/-pvc` where `` is the `VolumeSnapshotContent` created by Data Mover per PVC. diff --git a/modules/oadp-configuring-noobaa-for-dr.adoc b/modules/oadp-configuring-noobaa-for-dr.adoc deleted file mode 100644 index 43282d529270..000000000000 --- a/modules/oadp-configuring-noobaa-for-dr.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module included in the following assemblies: -// -// * backup_and_restore/application_backup_and_restore/installing/installing-oadp-ocs.adoc - -:_content-type: PROCEDURE -[id="oadp-configuring-noobaa-for-dr_{context}"] -= Configuring NooBaa for disaster recovery on {rh-storage} - -If you use cluster storage for your NooBaa bucket `backupStorageLocation` on {rh-storage}, configure NooBaa as an external object store. - -[WARNING] -==== -Failure to configure NooBaa as an external object store might lead to backups not being available. -==== - -.Procedure - -* Configure NooBaa as an external object store as described in link:https://access.redhat.com/documentation/en-us/red_hat_openshift_data_foundation/4.11/html/managing_hybrid_and_multicloud_resources/adding-storage-resources-for-hybrid-or-multicloud_rhodf#doc-wrapper[Adding storage resources for hybrid or Multicloud]. diff --git a/modules/oadp-configuring-velero-plugins.adoc b/modules/oadp-configuring-velero-plugins.adoc deleted file mode 100644 index 0bbf58a055cc..000000000000 --- a/modules/oadp-configuring-velero-plugins.adoc +++ /dev/null @@ -1,75 +0,0 @@ -// Module included in the following assemblies: -// -// * backup_and_restore/application_backup_and_restore/oadp-features-plugins.adoc - -:_content-type: CONCEPT -[id="oadp-configuring-velero-plugins_{context}"] -= About OADP Velero plugins - -You can configure two types of plugins when you install Velero: - -* Default cloud provider plugins -* Custom plugins - -Both types of plugin are optional, but most users configure at least one cloud provider plugin. - -== Default Velero cloud provider plugins - -You can install any of the following default Velero cloud provider plugins when you configure the `oadp_v1alpha1_dpa.yaml` file during deployment: - -* `aws` (Amazon Web Services) -* `gcp` (Google Cloud Platform) -* `azure` (Microsoft Azure) -* `openshift` (OpenShift Velero plugin) -* `csi` (Container Storage Interface) -* `kubevirt` (KubeVirt) - -You specify the desired default plugins in the `oadp_v1alpha1_dpa.yaml` file during deployment. - -.Example file - -The following `.yaml` file installs the `openshift`, `aws`, `azure`, and `gcp` plugins: - -[source,yaml] ----- - apiVersion: oadp.openshift.io/v1alpha1 - kind: DataProtectionApplication - metadata: - name: dpa-sample - spec: - configuration: - velero: - defaultPlugins: - - openshift - - aws - - azure - - gcp ----- - -== Custom Velero plugins - -You can install a custom Velero plugin by specifying the plugin `image` and `name` when you configure the `oadp_v1alpha1_dpa.yaml` file during deployment. - -You specify the desired custom plugins in the `oadp_v1alpha1_dpa.yaml` file during deployment. - -.Example file - -The following `.yaml` file installs the default `openshift`, `azure`, and `gcp` plugins and a custom plugin that has the name `custom-plugin-example` and the image `quay.io/example-repo/custom-velero-plugin`: - -[source,yaml] ----- -apiVersion: oadp.openshift.io/v1alpha1 -kind: DataProtectionApplication -metadata: - name: dpa-sample -spec: - configuration: - velero: - defaultPlugins: - - openshift - - azure - - gcp - customPlugins: - - name: custom-plugin-example - image: quay.io/example-repo/custom-velero-plugin ----- diff --git a/modules/oadp-creating-backup-cr.adoc b/modules/oadp-creating-backup-cr.adoc deleted file mode 100644 index 1dcb826fdd40..000000000000 --- a/modules/oadp-creating-backup-cr.adoc +++ /dev/null @@ -1,83 +0,0 @@ -// Module included in the following assemblies: -// -// * backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc - -:_content-type: PROCEDURE -[id="oadp-creating-backup-cr_{context}"] -= Creating a Backup CR - -You back up Kubernetes images, internal images, and persistent volumes (PVs) by creating a `Backup` custom resource (CR). - -.Prerequisites - -* You must install the OpenShift API for Data Protection (OADP) Operator. -* The `DataProtectionApplication` CR must be in a `Ready` state. -* Backup location prerequisites: -** You must have S3 object storage configured for Velero. -** You must have a backup location configured in the `DataProtectionApplication` CR. -* Snapshot location prerequisites: -** Your cloud provider must have a native snapshot API or support Container Storage Interface (CSI) snapshots. -** For CSI snapshots, you must create a `VolumeSnapshotClass` CR to register the CSI driver. -** You must have a volume location configured in the `DataProtectionApplication` CR. - -.Procedure - -. Retrieve the `backupStorageLocations` CRs by entering the following command: - -+ -[source,terminal] ----- -$ oc get backupStorageLocations -n openshift-adp ----- -+ -.Example output -+ -[source,terminal] ----- -NAMESPACE NAME PHASE LAST VALIDATED AGE DEFAULT -openshift-adp velero-sample-1 Available 11s 31m ----- - -. Create a `Backup` CR, as in the following example: -+ -[source,yaml] ----- -apiVersion: velero.io/v1 -kind: Backup -metadata: - name: - labels: - velero.io/storage-location: default - namespace: openshift-adp -spec: - hooks: {} - includedNamespaces: - - <1> - includedResources: [] <2> - excludedResources: [] <3> - storageLocation: <4> - ttl: 720h0m0s - labelSelector: <5> - matchLabels: - app= - app= - app= - orLabelSelectors: <6> - - matchLabels: - app= - app= - app= ----- -<1> Specify an array of namespaces to back up. -<2> Optional: Specify an array of resources to include in the backup. Resources might be shortcuts (for example, 'po' for 'pods') or fully-qualified. If unspecified, all resources are included. -<3> Optional: Specify an array of resources to exclude from the backup. Resources might be shortcuts (for example, 'po' for 'pods') or fully-qualified. -<4> Specify the name of the `backupStorageLocations` CR. -<5> Map of {key,value} pairs of backup resources that have *all* of the specified labels. -<6> Map of {key,value} pairs of backup resources that have *one or more* of the specified labels. - -. Verify that the status of the `Backup` CR is `Completed`: -+ -[source,terminal] ----- -$ oc get backup -n openshift-adp -o jsonpath='{.status.phase}' ----- diff --git a/modules/oadp-creating-backup-hooks.adoc b/modules/oadp-creating-backup-hooks.adoc deleted file mode 100644 index e3b25887f2a7..000000000000 --- a/modules/oadp-creating-backup-hooks.adoc +++ /dev/null @@ -1,60 +0,0 @@ -// Module included in the following assemblies: -// -// * backup_and_restore/application_backup_and_restore/backing-up-applications.adoc - -:_content-type: PROCEDURE -[id="oadp-creating-backup-hooks_{context}"] -= Creating backup hooks - -You create backup hooks to run commands in a container in a pod by editing the `Backup` custom resource (CR). - -_Pre_ hooks run before the pod is backed up. _Post_ hooks run after the backup. - -.Procedure - -* Add a hook to the `spec.hooks` block of the `Backup` CR, as in the following example: -+ -[source,yaml] ----- -apiVersion: velero.io/v1 -kind: Backup -metadata: - name: - namespace: openshift-adp -spec: - hooks: - resources: - - name: - includedNamespaces: - - <1> - excludedNamespaces: <2> - - - includedResources: [] - - pods <3> - excludedResources: [] <4> - labelSelector: <5> - matchLabels: - app: velero - component: server - pre: <6> - - exec: - container: <7> - command: - - /bin/uname <8> - - -a - onError: Fail <9> - timeout: 30s <10> - post: <11> -... ----- -<1> Optional: You can specify namespaces to which the hook applies. If this value is not specified, the hook applies to all namespaces. -<2> Optional: You can specify namespaces to which the hook does not apply. -<3> Currently, pods are the only supported resource that hooks can apply to. -<4> Optional: You can specify resources to which the hook does not apply. -<5> Optional: This hook only applies to objects matching the label. If this value is not specified, the hook applies to all namespaces. -<6> Array of hooks to run before the backup. -<7> Optional: If the container is not specified, the command runs in the first container in the pod. -<8> This is the entrypoint for the init container being added. -<9> Allowed values for error handling are `Fail` and `Continue`. The default is `Fail`. -<10> Optional: How long to wait for the commands to run. The default is `30s`. -<11> This block defines an array of hooks to run after the backup, with the same parameters as the pre-backup hooks. diff --git a/modules/oadp-creating-default-secret.adoc b/modules/oadp-creating-default-secret.adoc deleted file mode 100644 index b50694a4de03..000000000000 --- a/modules/oadp-creating-default-secret.adoc +++ /dev/null @@ -1,44 +0,0 @@ -// Module included in the following assemblies: -// -// * backup_and_restore/application_backup_and_restore/installing/installing-oadp-aws.adoc -// * backup_and_restore/application_backup_and_restore/installing/installing-oadp-azure.adoc -// * backup_and_restore/application_backup_and_restore/installing/installing-oadp-gcp.adoc -// * backup_and_restore/application_backup_and_restore/installing/installing-oadp-mcg.adoc -// * backup_and_restore/application_backup_and_restore/installing/installing-oadp-ocs.adoc - -:_content-type: PROCEDURE -[id="oadp-creating-default-secret_{context}"] -= Creating a default Secret - -You create a default `Secret` if your backup and snapshot locations use the same credentials or if you do not require a snapshot location. - -ifdef::installing-oadp-aws,installing-oadp-azure,installing-oadp-gcp,installing-oadp-mcg[] -The default name of the `Secret` is `{credentials}`. -endif::[] -ifdef::installing-oadp-ocs[] -The default name of the `Secret` is `{credentials}`, unless your backup storage provider has a default plugin, such as `aws`, `azure`, or `gcp`. In that case, the default name is specified in the provider-specific OADP installation procedure. -endif::[] - -[NOTE] -==== -The `DataProtectionApplication` custom resource (CR) requires a default `Secret`. Otherwise, the installation will fail. If the name of the backup location `Secret` is not specified, the default name is used. - -If you do not want to use the backup location credentials during the installation, you can create a `Secret` with the default name by using an empty `credentials-velero` file. -==== - -.Prerequisites - -* Your object storage and cloud storage, if any, must use the same credentials. -* You must configure object storage for Velero. -* You must create a `credentials-velero` file for the object storage in the appropriate format. - -.Procedure - -* Create a `Secret` with the default name: -+ -[source,terminal,subs="attributes+"] ----- -$ oc create secret generic {credentials} -n openshift-adp --from-file cloud=credentials-velero ----- - -The `Secret` is referenced in the `spec.backupLocations.credential` block of the `DataProtectionApplication` CR when you install the Data Protection Application. diff --git a/modules/oadp-creating-restore-cr.adoc b/modules/oadp-creating-restore-cr.adoc deleted file mode 100644 index 13b807f4bab3..000000000000 --- a/modules/oadp-creating-restore-cr.adoc +++ /dev/null @@ -1,126 +0,0 @@ -// Module included in the following assemblies: -// -// * backup_and_restore/application_backup_and_restore/backing_up_and_restoring/restoring-applications.adoc - -:_content-type: PROCEDURE -[id="oadp-creating-restore-cr_{context}"] -= Creating a Restore CR - -You restore a `Backup` custom resource (CR) by creating a `Restore` CR. - -.Prerequisites - -* You must install the OpenShift API for Data Protection (OADP) Operator. -* The `DataProtectionApplication` CR must be in a `Ready` state. -* You must have a Velero `Backup` CR. -* Adjust the requested size so the persistent volume (PV) capacity matches the requested size at backup time. - -.Procedure - -. Create a `Restore` CR, as in the following example: -+ -[source,yaml] ----- -apiVersion: velero.io/v1 -kind: Restore -metadata: - name: - namespace: openshift-adp -spec: - backupName: <1> - includedResources: [] <2> - excludedResources: - - nodes - - events - - events.events.k8s.io - - backups.velero.io - - restores.velero.io - - resticrepositories.velero.io - restorePVs: true <3> ----- -<1> Name of the `Backup` CR. -<2> Optional: Specify an array of resources to include in the restore process. Resources might be shortcuts (for example, `po` for `pods`) or fully-qualified. If unspecified, all resources are included. -<3> Optional: The `restorePVs` parameter can be set to `false` in order to turn off restore of `PersistentVolumes` from `VolumeSnapshot` of Container Storage Interface (CSI) snapshots, or from native snapshots when `VolumeSnaphshotLocation` is configured. - -. Verify that the status of the `Restore` CR is `Completed` by entering the following command: -+ -[source,terminal] ----- -$ oc get restore -n openshift-adp -o jsonpath='{.status.phase}' ----- - -. Verify that the backup resources have been restored by entering the following command: -+ -[source,terminal] ----- -$ oc get all -n <1> ----- -<1> Namespace that you backed up. - -. If you use Restic to restore `DeploymentConfig` objects or if you use post-restore hooks, run the `dc-restic-post-restore.sh` cleanup script by entering the following command: -+ -[source,terminal] ----- -$ bash dc-restic-post-restore.sh ----- -+ -[NOTE] -==== -In the course of the restore process, the OADP Velero plug-ins scale down the `DeploymentConfig` objects and restore the pods as standalone pods to prevent the cluster from deleting the restored `DeploymentConfig` pods immediately on restore and to allow Restic and post-restore hooks to complete their actions on the restored pods. The cleanup script removes these disconnected pods and scale any `DeploymentConfig` objects back up to the appropriate number of replicas. -==== -+ -.`dc-restic-post-restore.sh` cleanup script -[%collapsible] -==== -[source,bash] ----- -#!/bin/bash -set -e - -# if sha256sum exists, use it to check the integrity of the file -if command -v sha256sum >/dev/null 2>&1; then - CHECKSUM_CMD="sha256sum" -else - CHECKSUM_CMD="shasum -a 256" -fi - -label_name () { - if [ "${#1}" -le "63" ]; then - echo $1 - return - fi - sha=$(echo -n $1|$CHECKSUM_CMD) - echo "${1:0:57}${sha:0:6}" -} - -OADP_NAMESPACE=${OADP_NAMESPACE:=openshift-adp} - -if [[ $# -ne 1 ]]; then - echo "usage: ${BASH_SOURCE} restore-name" - exit 1 -fi - -echo using OADP Namespace $OADP_NAMESPACE -echo restore: $1 - -label=$(label_name $1) -echo label: $label - -echo Deleting disconnected restore pods -oc delete pods -l oadp.openshift.io/disconnected-from-dc=$label - -for dc in $(oc get dc --all-namespaces -l oadp.openshift.io/replicas-modified=$label -o jsonpath='{range .items[*]}{.metadata.namespace}{","}{.metadata.name}{","}{.metadata.annotations.oadp\.openshift\.io/original-replicas}{","}{.metadata.annotations.oadp\.openshift\.io/original-paused}{"\n"}') -do - IFS=',' read -ra dc_arr <<< "$dc" - if [ ${#dc_arr[0]} -gt 0 ]; then - echo Found deployment ${dc_arr[0]}/${dc_arr[1]}, setting replicas: ${dc_arr[2]}, paused: ${dc_arr[3]} - cat < - namespace: openshift-adp -spec: - hooks: - resources: - - name: - includedNamespaces: - - <1> - excludedNamespaces: - - - includedResources: - - pods <2> - excludedResources: [] - labelSelector: <3> - matchLabels: - app: velero - component: server - postHooks: - - init: - initContainers: - - name: restore-hook-init - image: alpine:latest - volumeMounts: - - mountPath: /restores/pvc1-vm - name: pvc1-vm - command: - - /bin/ash - - -c - timeout: <4> - - exec: - container: <5> - command: - - /bin/bash <6> - - -c - - "psql < /backup/backup.sql" - waitTimeout: 5m <7> - execTimeout: 1m <8> - onError: Continue <9> ----- -<1> Optional: Array of namespaces to which the hook applies. If this value is not specified, the hook applies to all namespaces. -<2> Currently, pods are the only supported resource that hooks can apply to. -<3> Optional: This hook only applies to objects matching the label selector. -<4> Optional: Timeout specifies the maximum amount of time Velero waits for `initContainers` to complete. -<5> Optional: If the container is not specified, the command runs in the first container in the pod. -<6> This is the entrypoint for the init container being added. -<7> Optional: How long to wait for a container to become ready. This should be long enough for the container to start and for any preceding hooks in the same container to complete. If not set, the restore process waits indefinitely. -<8> Optional: How long to wait for the commands to run. The default is `30s`. -<9> Allowed values for error handling are `Fail` and `Continue`: -** `Continue`: Only command failures are logged. -** `Fail`: No more restore hooks run in any container in any pod. The status of the `Restore` CR will be `PartiallyFailed`. diff --git a/modules/oadp-debugging-oc-cli.adoc b/modules/oadp-debugging-oc-cli.adoc deleted file mode 100644 index caab87d62170..000000000000 --- a/modules/oadp-debugging-oc-cli.adoc +++ /dev/null @@ -1,66 +0,0 @@ -// Module included in the following assemblies: -// -// * backup_and_restore/application_backup_and_restore/troubleshooting.adoc - -:_content-type: REFERENCE -[id="oadp-debugging-oc-cli_{context}"] -= Debugging Velero resources with the OpenShift CLI tool - -You can debug a failed backup or restore by checking Velero custom resources (CRs) and the `Velero` pod log with the OpenShift CLI tool. - -[discrete] -[id="oc-velero-cr_{context}"] -== Velero CRs - -Use the `oc describe` command to retrieve a summary of warnings and errors associated with a `Backup` or `Restore` CR: - -[source,terminal] ----- -$ oc describe ----- - -[discrete] -[id="oc-velero-pod-logs_{context}"] -== Velero pod logs - -Use the `oc logs` command to retrieve the `Velero` pod logs: - -[source,terminal] ----- -$ oc logs pod/ ----- - -[discrete] -[id="oc-velero-debug-logs_{context}"] -== Velero pod debug logs - -You can specify the Velero log level in the `DataProtectionApplication` resource as shown in the following example. - -[NOTE] -==== -This option is available starting from OADP 1.0.3. -==== - -[source,yaml] ----- -apiVersion: oadp.openshift.io/v1alpha1 -kind: DataProtectionApplication -metadata: - name: velero-sample -spec: - configuration: - velero: - logLevel: warning ----- - -The following `logLevel` values are available: - -* `trace` -* `debug` -* `info` -* `warning` -* `error` -* `fatal` -* `panic` - -It is recommended to use `debug` for most logs. diff --git a/modules/oadp-deleting-backups.adoc b/modules/oadp-deleting-backups.adoc deleted file mode 100644 index a57a3ceec11a..000000000000 --- a/modules/oadp-deleting-backups.adoc +++ /dev/null @@ -1,44 +0,0 @@ -// Module included in the following assemblies: -// -// * backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc - -:_content-type: PROCEDURE -[id="oadp-deleting-backups_{context}"] -= Deleting backups - -You can remove backup files by deleting the `Backup` custom resource (CR). - -[WARNING] -==== -After you delete the `Backup` CR and the associated object storage data, you cannot recover the deleted data. -==== - -.Prerequisites - -* You created a `Backup` CR. -* You know the name of the `Backup` CR and the namespace that contains it. -* You downloaded the Velero CLI tool. -* You can access the Velero binary in your cluster. - -.Procedure - -* Choose one of the following actions to delete the `Backup` CR: - -** To delete the `Backup` CR and keep the associated object storage data, issue the following command: -+ -[source,terminal] ----- -$ oc delete backup -n ----- - -** To delete the `Backup` CR and delete the associated object storage data, issue the following command: -+ -[source,terminal] ----- -$ velero backup delete -n ----- -+ -Where: -+ -:: Specifies the name of the `Backup` custom resource. -:: Specifies the namespace that contains the `Backup` custom resource. \ No newline at end of file diff --git a/modules/oadp-deleting-cluster-resources-following-failure.adoc b/modules/oadp-deleting-cluster-resources-following-failure.adoc deleted file mode 100644 index 7969abd70531..000000000000 --- a/modules/oadp-deleting-cluster-resources-following-failure.adoc +++ /dev/null @@ -1,78 +0,0 @@ -// Module included in the following assemblies: -// -// * backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc - -:_content-type: PROCEDURE -[id="oadp-deleting-cluster-resources-following-failure_{context}"] -= Deleting cluster resources following a partially successful or a failed backup and restore that used Data Mover - -If your backup and restore operation that uses Data Mover either fails or only partially succeeds, you must clean up any `VolumeSnapshotBackup` (VSB) or `VolumeSnapshotRestore` custom resource definitions (CRDs) that exist in the application namespace, and clean up any extra resources created by these controllers. - -.Procedure - -. Clean up cluster resources that remain after a backup operation where you used Data Mover by entering the following commands: - -.. Delete VSB CRDs on the application namespace, the namespace with the application PVCs to backup and restore: -+ -[source,terminal] ----- -$ oc delete vsb -n --all ----- - -.. Delete `VolumeSnapshot` CRs: -+ -[source,terminal] ----- -$ oc delete volumesnapshot -A --all ----- - -.. Delete `VolumeSnapshotContent` CRs: -+ -[source,terminal] ----- -$ oc delete volumesnapshotcontent --all ----- - -.. Delete any PVCs on the protected namespace, the namespace the Operator is installed on. -+ -[source,terminal] ----- -$ oc delete pvc -n --all ----- - -.. Delete any `ReplicationSource` resources on the namespace. -+ -[source,terminal] ----- -$ oc delete replicationsource -n --all ----- - -. Clean up cluster resources that remain after a restore operation using Data Mover by entering the following commands: - -.. Delete VSR CRDs: -+ -[source,terminal] ----- -$ oc delete vsr -n --all ----- - -.. Delete `VolumeSnapshot` CRs: -+ -[source,terminal] ----- -$ oc delete volumesnapshot -A --all ----- - -.. Delete `VolumeSnapshotContent` CRs: -+ -[source,terminal] ----- -$ oc delete volumesnapshotcontent --all ----- - -.. Delete any `ReplicationDestination` resources on the namespace. -+ -[source,terminal] ----- -$ oc delete replicationdestination -n --all ----- \ No newline at end of file diff --git a/modules/oadp-deleting-cluster-resources-following-success.adoc b/modules/oadp-deleting-cluster-resources-following-success.adoc deleted file mode 100644 index 51684fd00652..000000000000 --- a/modules/oadp-deleting-cluster-resources-following-success.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc - -:_content-type: PROCEDURE -[id="oadp-deleting-cluster-resources-following-success_{context}"] -= Deleting cluster resources following a successful backup and restore that used Data Mover - -You can delete any `VolumeSnapshotBackup` or `VolumeSnapshotRestore` CRs that remain in your application namespace after a successful backup and restore where you used Data Mover. - -.Procedure - -. Delete cluster resources that remain on the application namespace, the namespace with the application PVCs to backup and restore, after a backup where you use Data Mover: -+ -[source,terminal] ----- -$ oc delete vsb -n --all ----- - -. Delete cluster resources that remain after a restore where you use Data Mover: -+ -[source,terminal] ----- -$ oc delete vsr -n --all ----- - -. If needed, delete any `VolumeSnapshotContent` resources that remain after a backup and restore where you use Data Mover: -+ -[source,terminal] ----- -$ oc delete volumesnapshotcontent --all ----- diff --git a/modules/oadp-enabling-csi-dpa.adoc b/modules/oadp-enabling-csi-dpa.adoc deleted file mode 100644 index 6ce140f0d1fb..000000000000 --- a/modules/oadp-enabling-csi-dpa.adoc +++ /dev/null @@ -1,35 +0,0 @@ -// Module included in the following assemblies: -// -// * backup_and_restore/application_backup_and_restore/installing/installing-oadp-aws.adoc -// * backup_and_restore/application_backup_and_restore/installing/installing-oadp-azure.adoc -// * backup_and_restore/application_backup_and_restore/installing/installing-oadp-gcp.adoc -// * backup_and_restore/application_backup_and_restore/installing/installing-oadp-mcg.adoc -// * backup_and_restore/application_backup_and_restore/installing/installing-oadp-ocs.adoc - -:_content-type: PROCEDURE -[id="oadp-enabling-csi-dpa_{context}"] -= Enabling CSI in the DataProtectionApplication CR - -You enable the Container Storage Interface (CSI) in the `DataProtectionApplication` custom resource (CR) in order to back up persistent volumes with CSI snapshots. - -.Prerequisites - -* The cloud provider must support CSI snapshots. - -.Procedure - -* Edit the `DataProtectionApplication` CR, as in the following example: -+ -[source,yaml] ----- -apiVersion: oadp.openshift.io/v1alpha1 -kind: DataProtectionApplication -... -spec: - configuration: - velero: - defaultPlugins: - - openshift - - csi <1> ----- -<1> Add the `csi` default plugin. diff --git a/modules/oadp-features.adoc b/modules/oadp-features.adoc deleted file mode 100644 index 6bbfd22b7d86..000000000000 --- a/modules/oadp-features.adoc +++ /dev/null @@ -1,36 +0,0 @@ -// Module included in the following assemblies: -// -// * backup_and_restore/application_backup_and_restore/oadp-features-plugins.adoc - -:_content-type: CONCEPT -[id="oadp-features_{context}"] -= OADP features - -OpenShift API for Data Protection (OADP) supports the following features: - -Backup:: -You can use OADP to back up all applications on the OpenShift Platform, or you can filter the resources by type, namespace, or label. -+ -OADP backs up Kubernetes objects and internal images by saving them as an archive file on object storage. OADP backs up persistent volumes (PVs) by creating snapshots with the native cloud snapshot API or with the Container Storage Interface (CSI). For cloud providers that do not support snapshots, OADP backs up resources and PV data with Restic. - -+ -[NOTE] -==== -You must exclude Operators from the backup of an application for backup and restore to succeed. -==== - - -Restore:: -You can restore resources and PVs from a backup. You can restore all objects in a backup or filter the restored objects by namespace, PV, or label. - -+ -[NOTE] -==== -You must exclude Operators from the backup of an application for backup and restore to succeed. -==== - -Schedule:: -You can schedule backups at specified intervals. - -Hooks:: -You can use hooks to run commands in a container on a pod, for example, `fsfreeze` to freeze a file system. You can configure a hook to run before or after a backup or restore. Restore hooks can run in an init container or in the application container. diff --git a/modules/oadp-ibm-power-test-support.adoc b/modules/oadp-ibm-power-test-support.adoc deleted file mode 100644 index 6db93c853eba..000000000000 --- a/modules/oadp-ibm-power-test-support.adoc +++ /dev/null @@ -1,9 +0,0 @@ -// Module included in the following assemblies: -// -// * backup_and_restore/application_backup_and_restore/oadp-features-plugins.adoc - -:_content-type: CONCEPT -[id="oadp-ibm-power-test-matrix_{context}"] -= OADP support for target backup locations using IBM Power - -IBM Power running with {product-title} 4.11 and 4.12, and OpenShift API for Data Protection (OADP) 1.1.2 was tested successfully against an AWS S3 backup location target. Although the test involved only an AWS S3 target, Red Hat supports running IBM Power with {product-title} 4.11 and 4.12, and OADP 1.1.2 against all non-AWS S3 backup location targets as well. diff --git a/modules/oadp-ibm-z-test-support.adoc b/modules/oadp-ibm-z-test-support.adoc deleted file mode 100644 index 72e6855c95aa..000000000000 --- a/modules/oadp-ibm-z-test-support.adoc +++ /dev/null @@ -1,9 +0,0 @@ -// Module included in the following assemblies: -// -// * backup_and_restore/application_backup_and_restore/oadp-features-plugins.adoc - -:_content-type: CONCEPT -[id="oadp-ibm-z-test-support_{context}"] -= OADP testing and support for target backup locations using {ibmzProductName} - -{ibmzProductName} running with {product-title} 4.11 and 4.12, and OpenShift API for Data Protection (OADP) 1.1.2 was tested successfully against an AWS S3 backup location target. Although the test involved only an AWS S3 target, Red Hat supports running {ibmzProductName} with {product-title} 4.11 and 4.12, and OADP 1.1.2 against all non-AWS S3 backup location targets as well. diff --git a/modules/oadp-installation-issues.adoc b/modules/oadp-installation-issues.adoc deleted file mode 100644 index e9dfe57965ed..000000000000 --- a/modules/oadp-installation-issues.adoc +++ /dev/null @@ -1,46 +0,0 @@ -// Module included in the following assemblies: -// -// * backup_and_restore/application_backup_and_restore/troubleshooting.adoc - -:_content-type: CONCEPT -[id="oadp-installation-issues_{context}"] -= Installation issues - -You might encounter issues caused by using invalid directories or incorrect credentials when you install the Data Protection Application. - -[id="oadp-backup-location-contains-invalid-directories_{context}"] -== Backup storage contains invalid directories - -The `Velero` pod log displays the error message, `Backup storage contains invalid top-level directories`. - -.Cause - -The object storage contains top-level directories that are not Velero directories. - -.Solution - -If the object storage is not dedicated to Velero, you must specify a prefix for the bucket by setting the `spec.backupLocations.velero.objectStorage.prefix` parameter in the `DataProtectionApplication` manifest. - -[id="oadp-incorrect-aws-credentials_{context}"] -== Incorrect AWS credentials - -The `oadp-aws-registry` pod log displays the error message, `InvalidAccessKeyId: The AWS Access Key Id you provided does not exist in our records.` - -The `Velero` pod log displays the error message, `NoCredentialProviders: no valid providers in chain`. - -.Cause - -The `credentials-velero` file used to create the `Secret` object is incorrectly formatted. - -.Solution - -Ensure that the `credentials-velero` file is correctly formatted, as in the following example: - -.Example `credentials-velero` file ----- -[default] <1> -aws_access_key_id=AKIAIOSFODNN7EXAMPLE <2> -aws_secret_access_key=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY ----- -<1> AWS default profile. -<2> Do not enclose the values with quotation marks (`"`, `'`). diff --git a/modules/oadp-installing-dpa.adoc b/modules/oadp-installing-dpa.adoc deleted file mode 100644 index dc49f3b9ead5..000000000000 --- a/modules/oadp-installing-dpa.adoc +++ /dev/null @@ -1,366 +0,0 @@ -// Module included in the following assemblies: -// -// * backup_and_restore/application_backup_and_restore/installing/installing-oadp-aws.adoc -// * backup_and_restore/application_backup_and_restore/installing/installing-oadp-azure.adoc -// * backup_and_restore/application_backup_and_restore/installing/installing-oadp-gcp.adoc -// * backup_and_restore/application_backup_and_restore/installing/installing-oadp-mcg.adoc -// * backup_and_restore/application_backup_and_restore/installing/installing-oadp-ocs.adoc - -:_content-type: PROCEDURE -[id="oadp-installing-dpa_{context}"] -= Installing the Data Protection Application - -You install the Data Protection Application (DPA) by creating an instance of the `DataProtectionApplication` API. - -.Prerequisites - -* You must install the OADP Operator. -* You must configure object storage as a backup location. -* If you use snapshots to back up PVs, your cloud provider must support either a native snapshot API or Container Storage Interface (CSI) snapshots. -* If the backup and snapshot locations use the same credentials, you must create a `Secret` with the default name, `{credentials}`. -ifdef::installing-oadp-azure,installing-oadp-gcp,installing-oadp-mcg,installing-oadp-ocs,virt-installing-configuring-oadp[] -* If the backup and snapshot locations use different credentials, you must create two `Secrets`: - -** `Secret` with a custom name for the backup location. You add this `Secret` to the `DataProtectionApplication` CR. -** `Secret` with the default name, `{credentials}`, for the snapshot location. This `Secret` is not referenced in the `DataProtectionApplication` CR. -endif::[] -ifdef::installing-oadp-aws[] -* If the backup and snapshot locations use different credentials, you must create a `Secret` with the default name, `{credentials}`, which contains separate profiles for the backup and snapshot location credentials. -endif::[] -+ -[NOTE] -==== -If you do not want to specify backup or snapshot locations during the installation, you can create a default `Secret` with an empty `credentials-velero` file. If there is no default `Secret`, the installation will fail. -==== - -.Procedure - -. Click *Operators* -> *Installed Operators* and select the OADP Operator. -. Under *Provided APIs*, click *Create instance* in the *DataProtectionApplication* box. - -. Click *YAML View* and update the parameters of the `DataProtectionApplication` manifest: -ifdef::installing-oadp-aws[] -+ -[source,yaml,subs="attributes+"] ----- -apiVersion: oadp.openshift.io/v1beta1 -kind: DataProtectionApplication -metadata: - name: - namespace: openshift-adp -spec: - configuration: - velero: - defaultPlugins: - - openshift <1> - - aws - resourceTimeout: 10m <2> - restic: - enable: true <3> - podConfig: - nodeSelector: <4> - backupLocations: - - name: default - velero: - provider: {provider} - default: true - objectStorage: - bucket: <5> - prefix: <6> - config: - region: - profile: "default" - credential: - key: cloud - name: {credentials} <7> - snapshotLocations: <8> - - name: default - velero: - provider: {provider} - config: - region: <9> - profile: "default" ----- -<1> The `openshift` plugin is mandatory. -<2> Specify how many minutes to wait for several Velero resources before timeout occurs, such as Velero CRD availability, volumeSnapshot deletion, and backup repository availability. The default is 10m. -<3> Set to `false`, if you want to disable the Restic installation. Restic deploys a daemon set, which means that each worker node has `Restic` pods running. You can configure Restic for backups by adding `spec.defaultVolumesToRestic: true` to the `Backup` CR. -<4> Specify on which nodes Restic is available. By default, Restic runs on all nodes. -<5> Specify a bucket as the backup storage location. If the bucket is not a dedicated bucket for Velero backups, you must specify a prefix. -<6> Specify a prefix for Velero backups, for example, `velero`, if the bucket is used for multiple purposes. -<7> Specify the name of the `Secret` object that you created. If you do not specify this value, the default name, `{credentials}`, is used. If you specify a custom name, the custom name is used for the backup location. -<8> Specify a snapshot location, unless you use CSI snapshots or Restic to back up PVs. -<9> The snapshot location must be in the same region as the PVs. -endif::[] -ifdef::installing-oadp-azure[] -+ -[source,yaml,subs="attributes+"] ----- -apiVersion: oadp.openshift.io/v1beta1 -kind: DataProtectionApplication -metadata: - name: - namespace: openshift-adp -spec: - configuration: - velero: - defaultPlugins: - - azure - - openshift <1> - resourceTimeout: 10m <2> - restic: - enable: true <3> - podConfig: - nodeSelector: <4> - backupLocations: - - velero: - config: - resourceGroup: <5> - storageAccount: <6> - subscriptionId: <7> - storageAccountKeyEnvVar: AZURE_STORAGE_ACCOUNT_ACCESS_KEY - credential: - key: cloud - name: {credentials} <8> - provider: {provider} - default: true - objectStorage: - bucket: <9> - prefix: <10> - snapshotLocations: <11> - - velero: - config: - resourceGroup: - subscriptionId: - incremental: "true" - name: default - provider: {provider} ----- -<1> The `openshift` plugin is mandatory. -<2> Specify how many minutes to wait for several Velero resources before timeout occurs, such as Velero CRD availability, volumeSnapshot deletion, and backup repository availability. The default is 10m. -<3> Set to `false`, if you want to disable the Restic installation. Restic deploys a daemon set, which means that each worker node has `Restic` pods running. You can configure Restic for backups by adding `spec.defaultVolumesToRestic: true` to the `Backup` CR. -<4> Specify on which nodes Restic is available. By default, Restic runs on all nodes. -<5> Specify the Azure resource group. -<6> Specify the Azure storage account ID. -<7> Specify the Azure subscription ID. -<8> If you do not specify this value, the default name, `{credentials}`, is used. If you specify a custom name, the custom name is used for the backup location. -<9> Specify a bucket as the backup storage location. If the bucket is not a dedicated bucket for Velero backups, you must specify a prefix. -<10> Specify a prefix for Velero backups, for example, `velero`, if the bucket is used for multiple purposes. -<11> You do not need to specify a snapshot location if you use CSI snapshots or Restic to back up PVs. -endif::[] -ifdef::installing-oadp-gcp[] -+ -[source,yaml,subs="attributes+"] ----- -apiVersion: oadp.openshift.io/v1beta1 -kind: DataProtectionApplication -metadata: - name: - namespace: openshift-adp -spec: - configuration: - velero: - defaultPlugins: - - gcp - - openshift <1> - resourceTimeout: 10m <2> - restic: - enable: true <3> - podConfig: - nodeSelector: <4> - backupLocations: - - velero: - provider: {provider} - default: true - credential: - key: cloud - name: {credentials} <5> - objectStorage: - bucket: <6> - prefix: <7> - snapshotLocations: <8> - - velero: - provider: {provider} - default: true - config: - project: - snapshotLocation: us-west1 <9> ----- -<1> The `openshift` plugin is mandatory. -<2> Specify how many minutes to wait for several Velero resources before timeout occurs, such as Velero CRD availability, volumeSnapshot deletion, and backup repository availability. The default is 10m. -<3> Set to `false`, if you want to disable the Restic installation. Restic deploys a daemon set, which means that each worker node has `Restic` pods running. You can configure Restic for backups by adding `spec.defaultVolumesToRestic: true` to the `Backup` CR. -<4> Specify on which nodes Restic is available. By default, Restic runs on all nodes. -<5> If you do not specify this value, the default name, `{credentials}`, is used. If you specify a custom name, the custom name is used for the backup location. -<6> Specify a bucket as the backup storage location. If the bucket is not a dedicated bucket for Velero backups, you must specify a prefix. -<7> Specify a prefix for Velero backups, for example, `velero`, if the bucket is used for multiple purposes. -<8> Specify a snapshot location, unless you use CSI snapshots or Restic to back up PVs. -<9> The snapshot location must be in the same region as the PVs. -endif::[] -ifdef::installing-oadp-mcg[] -+ -[source,yaml,subs="attributes+"] ----- -apiVersion: oadp.openshift.io/v1beta1 -kind: DataProtectionApplication -metadata: - name: - namespace: openshift-adp -spec: - configuration: - velero: - defaultPlugins: - - aws - - openshift <1> - resourceTimeout: 10m <2> - restic: - enable: true <3> - podConfig: - nodeSelector: <4> - backupLocations: - - velero: - config: - profile: "default" - region: minio - s3Url: <5> - insecureSkipTLSVerify: "true" - s3ForcePathStyle: "true" - provider: {provider} - default: true - credential: - key: cloud - name: {credentials} <6> - objectStorage: - bucket: <7> - prefix: <8> ----- -<1> The `openshift` plugin is mandatory. -<2> Specify how many minutes to wait for several Velero resources before timeout occurs, such as Velero CRD availability, volumeSnapshot deletion, and backup repository availability. The default is 10m. -<3> Set to `false`, if you want to disable the Restic installation. Restic deploys a daemon set, which means that each worker node has `Restic` pods running. You can configure Restic for backups by adding `spec.defaultVolumesToRestic: true` to the `Backup` CR. -<4> Specify on which nodes Restic is available. By default, Restic runs on all nodes. -<5> Specify the URL of the S3 endpoint. -<6> If you do not specify this value, the default name, `{credentials}`, is used. If you specify a custom name, the custom name is used for the backup location. -<7> Specify a bucket as the backup storage location. If the bucket is not a dedicated bucket for Velero backups, you must specify a prefix. -<8> Specify a prefix for Velero backups, for example, `velero`, if the bucket is used for multiple purposes. -endif::[] -ifdef::installing-oadp-ocs[] -+ -[source,yaml,subs="attributes+"] ----- -apiVersion: oadp.openshift.io/v1beta1 -kind: DataProtectionApplication -metadata: - name: - namespace: openshift-adp -spec: - configuration: - velero: - defaultPlugins: - - kubevirt <1> - - gcp <2> - - csi <3> - - openshift <4> - resourceTimeout: 10m <5> - restic: - enable: true <6> - podConfig: - nodeSelector: <7> - backupLocations: - - velero: - provider: {provider} <8> - default: true - credential: - key: cloud - name: <9> - objectStorage: - bucket: <10> - prefix: <11> ----- -<1> Optional: The `kubevirt` plugin is used with {VirtProductName}. -<2> Specify the default plugin for the backup provider, for example, `gcp`, if appropriate. -<3> Specify the `csi` default plugin if you use CSI snapshots to back up PVs. The `csi` plugin uses the link:https://{velero-domain}/docs/main/csi/[Velero CSI beta snapshot APIs]. You do not need to configure a snapshot location. -<4> The `openshift` plugin is mandatory. -<5> Specify how many minutes to wait for several Velero resources before timeout occurs, such as Velero CRD availability, volumeSnapshot deletion, and backup repository availability. The default is 10m. -<6> Set to `false`, if you want to disable the Restic installation. Restic deploys a daemon set, which means that each worker node has `Restic` pods running. You can configure Restic for backups by adding `spec.defaultVolumesToRestic: true` to the `Backup` CR. -<7> Specify on which nodes Restic is available. By default, Restic runs on all nodes. -<8> Specify the backup provider. -<9> Specify the correct default name for the `Secret`, for example, `cloud-credentials-gcp`, if you use a default plugin for the backup provider. If specifying a custom name, then the custom name is used for the backup location. If you do not specify a `Secret` name, the default name is used. -<10> Specify a bucket as the backup storage location. If the bucket is not a dedicated bucket for Velero backups, you must specify a prefix. -<11> Specify a prefix for Velero backups, for example, `velero`, if the bucket is used for multiple purposes. -endif::[] -ifdef::virt-installing-configuring-oadp[] -+ -[source,yaml,subs="attributes+"] ----- -apiVersion: oadp.openshift.io/v1beta1 -kind: DataProtectionApplication -metadata: - name: - namespace: openshift-adp -spec: - configuration: - velero: - defaultPlugins: - - kubevirt <1> - - gcp <2> - - csi <3> - - openshift <4> - resourceTimeout: 10m <5> - restic: - enable: true <6> - podConfig: - nodeSelector: <7> - backupLocations: - - velero: - provider: {provider} <8> - default: true - credential: - key: cloud - name: <9> - objectStorage: - bucket: <10> - prefix: <11> ----- -<1> The `kubevirt` plugin is mandatory for {VirtProductName}. -<2> Specify the plugin for the backup provider, for example, `gcp`, if it exists. -<3> The `csi` plugin is mandatory for backing up PVs with CSI snapshots. The `csi` plugin uses the link:https://{velero-domain}/docs/main/csi/[Velero CSI beta snapshot APIs]. You do not need to configure a snapshot location. -<4> The `openshift` plugin is mandatory. -<5> Specify how many minutes to wait for several Velero resources before timeout occurs, such as Velero CRD availability, volumeSnapshot deletion, and backup repository availability. The default is 10m. -<6> Set to `false`, if you want to disable the Restic installation. Restic deploys a daemon set, which means that each worker node has `Restic` pods running. You can configure Restic for backups by adding `spec.defaultVolumesToRestic: true` to the `Backup` CR. -<7> Specify on which nodes Restic is available. By default, Restic runs on all nodes. -<8> Specify the backup provider. -<9> Specify the correct default name for the `Secret`, for example, `cloud-credentials-gcp`, if you use a default plugin for the backup provider. If specifying a custom name, then the custom name is used for the backup location. If you do not specify a `Secret` name, the default name is used. -<10> Specify a bucket as the backup storage location. If the bucket is not a dedicated bucket for Velero backups, you must specify a prefix. -<11> Specify a prefix for Velero backups, for example, `velero`, if the bucket is used for multiple purposes. -endif::[] - -. Click *Create*. -. Verify the installation by viewing the OADP resources: -+ -[source,terminal] ----- -$ oc get all -n openshift-adp ----- -+ -.Example output -+ ----- -NAME READY STATUS RESTARTS AGE -pod/oadp-operator-controller-manager-67d9494d47-6l8z8 2/2 Running 0 2m8s -pod/restic-9cq4q 1/1 Running 0 94s -pod/restic-m4lts 1/1 Running 0 94s -pod/restic-pv4kr 1/1 Running 0 95s -pod/velero-588db7f655-n842v 1/1 Running 0 95s - -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -service/oadp-operator-controller-manager-metrics-service ClusterIP 172.30.70.140 8443/TCP 2m8s - -NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE -daemonset.apps/restic 3 3 3 3 3 96s - -NAME READY UP-TO-DATE AVAILABLE AGE -deployment.apps/oadp-operator-controller-manager 1/1 1 1 2m9s -deployment.apps/velero 1/1 1 1 96s - -NAME DESIRED CURRENT READY AGE -replicaset.apps/oadp-operator-controller-manager-67d9494d47 1 1 1 2m9s -replicaset.apps/velero-588db7f655 1 1 1 96s ----- \ No newline at end of file diff --git a/modules/oadp-installing-oadp-rosa-sts.adoc b/modules/oadp-installing-oadp-rosa-sts.adoc deleted file mode 100644 index fab13ae93ece..000000000000 --- a/modules/oadp-installing-oadp-rosa-sts.adoc +++ /dev/null @@ -1,127 +0,0 @@ -// Module included in the following assemblies: -// -// * rosa_backing_up_and_restoring_applications/backing-up-applications.adoc - -:_content-type: PROCEDURE -[id="oadp-installing-oadp-rosa-sts_{context}"] -= Installing OADP on {product-title} with AWS STS - -AWS Security Token Service (AWS STS) is a global web service that provides short-term credentials for IAM or federated users. {product-title} (ROSA) with STS is the recommended credential mode for ROSA clusters. This document describes how to install OpenShift API for Data Protection (OADP) on (ROSA) with AWS STS. - -[IMPORTANT] -==== -Restic is not supported in the OADP on ROSA with AWS STS environment. Ensure the Restic service is disabled. Use native snapshots to backup volumes. See _Known Issues_ for more information. -==== - -.Prerequisites - -* A ROSA OpenShift Cluster with the required access and tokens. -* link:https://docs.openshift.com/container-platform/4.13/backup_and_restore/application_backup_and_restore/installing/installing-oadp-aws.html#oadp-creating-default-secret_installing-oadp-aws[A default Secret], if your backup and snapshot locations use the same credentials, or if you do not require a snapshot location. - -.Procedure - -. Create an Openshift secret from your AWS token file by entering the following commands: - -.. Create the credentials file: -+ -[source, terminal] ----- -$ cat < ${SCRATCH}/credentials -[default] -role_arn = ${ROLE_ARN} -web_identity_token_file = /var/run/secrets/openshift/serviceaccount/token -EOF ----- - -.. Create the OpenShift secret: -+ -[source, terminal] ----- -$ oc -n openshift-adp create secret generic cloud-credentials \ - --from-file=${SCRATCH}/credentials ----- - -. Install the OADP Operator. -.. In the {product-title} web console, navigate to Operators *->* OperatorHub. -.. Search for the OADP Operator, then click *Install*. - -. Create AWS cloud storage using your AWS credentials: -+ -[source,terminal] ----- -$ cat << EOF | oc create -f - -apiVersion: oadp.openshift.io/v1alpha1 -kind: CloudStorage -metadata: - name: ${CLUSTER_NAME}-oadp - namespace: openshift-adp -spec: - creationSecret: - key: credentials - name: cloud-credentials - enableSharedConfig: true - name: ${CLUSTER_NAME}-oadp - provider: aws - region: $REGION -EOF ----- - -. Create the `DataProtectionApplication resource`, which is used to configure the connection to the storage where the backups and volume snapshots will be stored: -+ -[source,terminal] ----- -$ cat << EOF | oc create -f - -apiVersion: oadp.openshift.io/v1alpha1 -kind: DataProtectionApplication -metadata: - name: ${CLUSTER_NAME}-dpa - namespace: openshift-adp -spec: - backupLocations: - - bucket: - cloudStorageRef: - name: ${CLUSTER_NAME}-oadp - credential: - key: credentials - name: cloud-credentials - default: true - configuration: - velero: - defaultPlugins: - - openshift - - aws - restic: - enable: false - volumeSnapshots: - - velero: - config: - credentialsFile: /tmp/credentials/openshift-adp/cloud-credentials-credentials - enableSharedConfig: "true" - region: ${REGION} - provider: aws -EOF ----- -+ -[NOTE] -==== -The `enable` parameter of `restic` is set to `false` in this configuration because OADP does not support Restic in ROSA environments. -==== -+ -You are now ready to backup and restore OpenShift applications, as described in the link:https://docs.openshift.com/container-platform/4.11/backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.html[OADP documentation]. - -== Known Issues -.Restic is not supported or recommended - -* link:https://issues.redhat.com/browse/OADP-1054[CloudStorage: openshift-adp-controller-manager crashloop seg fault with Restic enabled] -* link:https://issues.redhat.com/browse/OADP-1057[Cloudstorage API: CSI Backup of an app with internal images partially fails with plugin panicked error] -* (Affects OADP 1.1.x_ only): link:https://issues.redhat.com/browse/OADP-1055[CloudStorage: bucket is removed on CS CR delete, although it doesn't have "oadp.openshift.io/cloudstorage-delete": "true"] - -[role="_additional-resources"] -.Additional resources - -* link:https://docs.openshift.com/rosa/rosa_architecture/rosa-understanding.html[Understanding ROSA with STS] -* link:https://docs.openshift.com/rosa/rosa_getting_started/rosa-sts-getting-started-workflow.html[Getting started with ROSA STS] -* link:https://docs.openshift.com/rosa/rosa_install_access_delete_clusters/rosa-sts-creating-a-cluster-quickly.html[Creating a ROSA cluster with STS] -* link:https://docs.openshift.com/container-platform/4.13/backup_and_restore/application_backup_and_restore/installing/about-installing-oadp.html[About installing OADP] -* link:https://docs.openshift.com/container-platform/4.13/storage/container_storage_interface/persistent-storage-csi.html[Configuring CSI volumes] -* link:https://docs.openshift.com/rosa/rosa_architecture/rosa_policy_service_definition/rosa-service-definition.html#rosa-sdpolicy-storage_rosa-service-definition[ROSA storage options] diff --git a/modules/oadp-installing-operator.adoc b/modules/oadp-installing-operator.adoc deleted file mode 100644 index 1e0b2f944133..000000000000 --- a/modules/oadp-installing-operator.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// -// * backup_and_restore/application_backup_and_restore/installing/installing-oadp-aws.adoc -// * backup_and_restore/application_backup_and_restore/installing/installing-oadp-azure.adoc -// * backup_and_restore/application_backup_and_restore/installing/installing-oadp-gcp.adoc -// * backup_and_restore/application_backup_and_restore/installing/installing-oadp-mcg.adoc -// * backup_and_restore/application_backup_and_restore/installing/installing-oadp-ocs.adoc - -:_content-type: PROCEDURE -[id="oadp-installing-operator_{context}"] -= Installing the OADP Operator - -You install the OpenShift API for Data Protection (OADP) Operator on {product-title} {product-version} by using Operator Lifecycle Manager (OLM). - -The OADP Operator installs link:https://{velero-domain}/docs/v{velero-version}/[Velero {velero-version}]. - -.Prerequisites - -* You must be logged in as a user with `cluster-admin` privileges. - -.Procedure - -. In the {product-title} web console, click *Operators* -> *OperatorHub*. -. Use the *Filter by keyword* field to find the *OADP Operator*. -. Select the *OADP Operator* and click *Install*. -. Click *Install* to install the Operator in the `openshift-adp` project. -. Click *Operators* -> *Installed Operators* to verify the installation. diff --git a/modules/oadp-plugins.adoc b/modules/oadp-plugins.adoc deleted file mode 100644 index 0fbed2e6f5bb..000000000000 --- a/modules/oadp-plugins.adoc +++ /dev/null @@ -1,38 +0,0 @@ -// Module included in the following assemblies: -// -// * backup_and_restore/application_backup_and_restore/oadp-features-plugins.adoc - -:_content-type: CONCEPT -[id="oadp-plugins_{context}"] -= OADP plugins - -The OpenShift API for Data Protection (OADP) provides default Velero plugins that are integrated with storage providers to support backup and snapshot operations. You can create link:https://{velero-domain}/docs/v{velero-version}/custom-plugins/[custom plugins] based on the Velero plugins. - -OADP also provides plugins for {product-title} resource backups, OpenShift Virtualization resource backups, and Container Storage Interface (CSI) snapshots. - -[cols="3", options="header"] -.OADP plugins -|=== -|OADP plugin |Function |Storage location - -.2+|`aws` |Backs up and restores Kubernetes objects. |AWS S3 -|Backs up and restores volumes with snapshots. |AWS EBS - -.2+|`azure` |Backs up and restores Kubernetes objects. |Microsoft Azure Blob storage -|Backs up and restores volumes with snapshots. |Microsoft Azure Managed Disks - -.2+|`gcp` |Backs up and restores Kubernetes objects. |Google Cloud Storage -|Backs up and restores volumes with snapshots. |Google Compute Engine Disks - -|`openshift` |Backs up and restores {product-title} resources. ^[1]^ |Object store - -|`kubevirt` |Backs up and restores OpenShift Virtualization resources. ^[2]^ |Object store - -|`csi` |Backs up and restores volumes with CSI snapshots. ^[3]^ |Cloud storage that supports CSI snapshots -|=== -[.small] --- -1. Mandatory. -2. Virtual machine disks are backed up with CSI snapshots or Restic. -3. The `csi` plugin uses the link:https://velero.io/docs/main/csi/[Velero CSI beta snapshot API]. --- diff --git a/modules/oadp-pod-crash-set-resource-request-restic.adoc b/modules/oadp-pod-crash-set-resource-request-restic.adoc deleted file mode 100644 index 94043e3463d8..000000000000 --- a/modules/oadp-pod-crash-set-resource-request-restic.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// Module included in the following assemblies: -// -// * backup_and_restore/application_backup_and_restore/troubleshooting.adoc - -:_content-type: PROCEDURE -[id="oadp-pod-crash-resource-request-retics_{context}"] -= Setting resource requests for a Restic pod - -You can use the `configuration.restic.podConfig.resourceAllocations` specification field to set specific resource requests for a `Restic` pod. - -.Procedure - -* Set the `cpu` and `memory` resource requests in the YAML file: -+ -.Example Restic file - -[source,yaml] ----- -apiVersion: oadp.openshift.io/v1alpha1 -kind: DataProtectionApplication -... -configuration: - restic: - podConfig: - resourceAllocations: - requests: - cpu: 500m - memory: 256Mi ----- diff --git a/modules/oadp-pod-crash-set-resource-request-velero.adoc b/modules/oadp-pod-crash-set-resource-request-velero.adoc deleted file mode 100644 index 23d8427e4106..000000000000 --- a/modules/oadp-pod-crash-set-resource-request-velero.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// Module included in the following assemblies: -// -// * backup_and_restore/application_backup_and_restore/troubleshooting.adoc - -:_content-type: PROCEDURE -[id="oadp-pod-crash-resource-request-velero_{context}"] -= Setting resource requests for a Velero pod - -You can use the `configuration.velero.podConfig.resourceAllocations` specification field in the `oadp_v1alpha1_dpa.yaml` file to set specific resource requests for a `Velero` pod. - -.Procedure - -* Set the `cpu` and `memory` resource requests in the YAML file: -+ -.Example Velero file - -[source,yaml] ----- -apiVersion: oadp.openshift.io/v1alpha1 -kind: DataProtectionApplication -... -configuration: - velero: - podConfig: - resourceAllocations: - requests: - cpu: 500m - memory: 256Mi ----- diff --git a/modules/oadp-release-notes-1-1-1.adoc b/modules/oadp-release-notes-1-1-1.adoc deleted file mode 100644 index da19f92d1c28..000000000000 --- a/modules/oadp-release-notes-1-1-1.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// -// * backup_and_restore/oadp-release-notes.adoc -:_content-type: REFERENCE -[id="migration-oadp-release-notes-1-1-1_{context}"] -= OADP 1.1.1 release notes - -The OADP 1.1.1 release notes include product recommendations and descriptions of known issues. - -== Product recommendations - -Before you install OADP 1.1.1, it is recommended to either install VolSync 0.5.1 or to upgrade to it. - -== Known issues - -This release has the following known issues: - -* OADP currently does not support backup and restore of AWS EFS volumes using restic in Velero (link:https://issues.redhat.com/browse/OADP-778[*OADP-778*]). - -* CSI backups might fail due to a Ceph limitation of `VolumeSnapshotContent` snapshots per PVC. -+ -You can create many snapshots of the same persistent volume claim (PVC) but cannot schedule periodic creation of snapshots: -+ -** For CephFS, you can create up to 100 snapshots per PVC. -** For RADOS Block Device (RBD), you can create up to 512 snapshots for each PVC. (link:https://issues.redhat.com/browse/OADP-804[*OADP-804*]) and (link:https://issues.redhat.com/browse/OADP-975[*OADP-975*]) -+ -For more information, see https://access.redhat.com/documentation/en-us/red_hat_openshift_data_foundation/4.11/html/managing_and_allocating_storage_resources/volume-snapshots_rhodf[Volume Snapshots]. diff --git a/modules/oadp-release-notes-1-1-2.adoc b/modules/oadp-release-notes-1-1-2.adoc deleted file mode 100644 index 25cb4b935c87..000000000000 --- a/modules/oadp-release-notes-1-1-2.adoc +++ /dev/null @@ -1,56 +0,0 @@ -// Module included in the following assemblies: -// -// * backup_and_restore/oadp-release-notes.adoc - -:_content-type: REFERENCE -[id="migration-oadp-release-notes-1-1-2_{context}"] -= OADP 1.1.2 release notes - -The OADP 1.1.2 release notes include product recommendations, a list of fixed bugs and descriptions of known issues. - -[id="product-recommendations_{context}"] -== Product recommendations - -.VolSync - -To prepare for the upgrade from VolSync 0.5.1 to the latest version available from the VolSync *stable* channel, you must add this annotation in the `openshift-adp` namespace by running the following command: - -[source,terminal] ----- -$ oc annotate --overwrite namespace/openshift-adp volsync.backube/privileged-movers='true' ----- - -.Velero - -In this release, Velero has been upgraded from version 1.9.2 to version link:https://github.com/vmware-tanzu/velero/releases/tag/v1.9.5[1.9.5]. - -.Restic - -In this release, Restic has been upgraded from version 0.13.1 to version link:https://github.com/restic/restic/releases/tag/v0.14.0[0.14.0]. - -[id="fixed-bugs_{context}"] -== Fixed bugs - -The following bugs have been fixed in this release: - -* link:https://issues.redhat.com/browse/OADP-1150[OADP-1150] -* link:https://issues.redhat.com/browse/OADP-290[OADP-290] -* link:https://issues.redhat.com/browse/OADP-1056[OADP-1056] - -[id="known-issues_{context}"] -== Known issues - -This release has the following known issues: - -* OADP currently does not support backup and restore of AWS EFS volumes using restic in Velero (link:https://issues.redhat.com/browse/OADP-778[*OADP-778*]). - -* CSI backups might fail due to a Ceph limitation of `VolumeSnapshotContent` snapshots per PVC. -+ -You can create many snapshots of the same persistent volume claim (PVC) but cannot schedule periodic creation of snapshots: -+ --- -** For CephFS, you can create up to 100 snapshots per PVC. (link:https://issues.redhat.com/browse/OADP-804[*OADP-804*]) -** For RADOS Block Device (RBD), you can create up to 512 snapshots for each PVC. (link:https://issues.redhat.com/browse/OADP-975[*OADP-975*]) --- -+ -For more information, see https://access.redhat.com/documentation/en-us/red_hat_openshift_data_foundation/4.11/html/managing_and_allocating_storage_resources/volume-snapshots_rhodf[Volume Snapshots]. diff --git a/modules/oadp-release-notes-1-1-4.adoc b/modules/oadp-release-notes-1-1-4.adoc deleted file mode 100644 index 21343b5453a3..000000000000 --- a/modules/oadp-release-notes-1-1-4.adoc +++ /dev/null @@ -1,40 +0,0 @@ -// Module included in the following assemblies: -// -// * backup_and_restore/oadp-release-notes.adoc - -:_content-type: REFERENCE -[id="migration-oadp-release-notes-1-1-4_{context}"] -= OADP 1.1.4 release notes - -The OADP 1.1.4 release notes lists any new features, resolved issues and bugs, and known issues. - -[id="new-features1.1.4_{context}"] -== New features - -This version of OADP is a service release. No new features are added to this version. - -[id="resolved-issues1.1.4_{context}"] -== Fixed bugs - -The following bugs have been fixed in this release: - -* link:https://issues.redhat.com/browse/OADP-1557[OADP-1557] -* link:https://issues.redhat.com/browse/OADP-1822[OADP-1822] -* link:https://issues.redhat.com/browse/OADP-1511[OADP-1511] -* link:https://issues.redhat.com/browse/OADP-1642[OADP-1642] -* link:https://issues.redhat.com/browse/OADP-1398[OADP-1398] -* link:https://issues.redhat.com/browse/OADP-1267[OADP-1267] -* link:https://issues.redhat.com/browse/OADP-1390[OADP-1390] -* link:https://issues.redhat.com/browse/OADP-1650[OADP-1650] -* link:https://issues.redhat.com/browse/OADP-1487[OADP-1487] - - -[id="known-issues1.1.4_{context}"] -== Known issues - -This release has the following known issues: - -* OADP backups might fail because a UID/GID range might have changed on the cluster where the application has been restored, with the result that OADP does not back up and restore {product-title} UID/GID range metadata. To avoid the issue, if the backed application requires a specific UUID, ensure the range is available when restored. An additional workaround is to allow OADP to create the namespace in the restore operation. - -* A restoration might fail if ArgoCD is used during the process due to a label used by ArgoCD, `app.kubernetes.io/instance`. This label identifies which resources ArgoCD needs to manage, which can create a conflict with OADP's procedure for managing resources on restoration. To work around this issue, set `.spec.resourceTrackingMethod` on the ArgoCD YAML to `annotation+label` or `annotation`. If the issue continues to persist, then disable ArgoCD before beginning to restore, and enable it again when restoration is finished. - diff --git a/modules/oadp-release-notes-1-2-0.adoc b/modules/oadp-release-notes-1-2-0.adoc deleted file mode 100644 index 7403883eede7..000000000000 --- a/modules/oadp-release-notes-1-2-0.adoc +++ /dev/null @@ -1,81 +0,0 @@ -// Module included in the following assemblies: -// -// * backup_and_restore/oadp-release-notes.adoc - -:_content-type: REFERENCE -[id="migration-oadp-release-notes-1-2-0_{context}"] -= OADP 1.2.0 release notes - -The OADP 1.2.0 release notes include information about new features, bug fixes, and known issues. - -[id="new-features_{context}"] -== New features - -.link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.12/html/backup_and_restore/application-backup-and-restore#installing-oadp-aws[Resource timeouts] -The new `resourceTimeout` option specifies the timeout duration in minutes for waiting on various Velero resources. This option applies to resources such as Velero CRD availability, `volumeSnapshot` deletion, and backup repository availability. The default duration is ten minutes. - -.link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.11/html/backup_and_restore/application-backup-and-restore#oadp-s3-compatible-backup-storage-providers_about-installing-oadp[AWS S3 compatible backup storage providers] -You can back up objects and snapshots on AWS S3 compatible providers. - -[id="new-features-tech-preview-1-2-0_{context}"] -=== Technical preview features - -.link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.9/html/backup_and_restore/application-backup-and-restore#installing-and-configuring-oadp[Data Mover] -The OADP Data Mover enables you to back up Container Storage Interface (CSI) volume snapshots to a remote object store. When you enable Data Mover, you can restore stateful applications using CSI volume snapshots pulled from the object store in case of accidental cluster deletion, cluster failure, or data corruption. - -:FeatureName: OADP Data Mover -include::snippets/technology-preview.adoc[] - -[id="fixed-bugs-1-2-0_{context}"] -== Fixed bugs - -The following bugs have been fixed in this release: - -* link:https://issues.redhat.com/browse/OADP-144[OADP-144] -* link:https://issues.redhat.com/browse/OADP-639[OADP-639] -* link:https://issues.redhat.com/browse/OADP-1741[OADP-1741] -* link:https://issues.redhat.com/browse/OADP-1152[OADP-1152] -* link:https://issues.redhat.com/browse/OADP-1143[OADP-1143] -* link:https://issues.redhat.com/browse/OADP-1931[OADP-1931] -* link:https://issues.redhat.com/browse/OADP-148[OADP-148] -* link:https://issues.redhat.com/browse/OADP-1067[OADP-1067] -* link:https://issues.redhat.com/browse/OADP-1332[OADP-1332] -* link:https://issues.redhat.com/browse/OADP-1164[OADP-1164] -* link:https://issues.redhat.com/browse/OADP-1105[OADP-1105] -* link:https://issues.redhat.com/browse/OADP-2009[OADP-2009] -* link:https://issues.redhat.com/browse/OADP-1370[OADP-1370] -* link:https://issues.redhat.com/browse/OADP-969[OADP-969] -* link:https://issues.redhat.com/browse/OADP-1672[OADP-1672] -* link:https://issues.redhat.com/browse/OADP-1151[OADP-1151] -* link:https://issues.redhat.com/browse/OADP-988[OADP-988] -* link:https://issues.redhat.com/browse/OADP-1941[OADP-1941] -* link:https://issues.redhat.com/browse/OADP-1830[OADP-1830] -* link:https://issues.redhat.com/browse/OADP-1821[OADP-1821] -* link:https://issues.redhat.com/browse/OADP-1783[OADP-1783] -* link:https://issues.redhat.com/browse/OADP-1719[OADP-1719] -* link:https://issues.redhat.com/browse/OADP-1833[OADP-1833] -* link:https://issues.redhat.com/browse/OADP-1872[OADP-1872] -* link:https://issues.redhat.com/browse/OADP-2047[OADP-2047] -* link:https://issues.redhat.com/browse/OADP-1932[OADP-1932] -* link:https://issues.redhat.com/browse/OADP-1844[OADP-1844] -* link:https://issues.redhat.com/browse/OADP-1182[OADP-1182] -* link:https://issues.redhat.com/browse/OADP-1183[OADP-1183] -* link:https://issues.redhat.com/browse/OADP-1798[OADP-1798] -* link:https://issues.redhat.com/browse/OADP-1726[OADP-1726] -* link:https://issues.redhat.com/browse/OADP-821[OADP-821] -* link:https://issues.redhat.com/browse/OADP-1833[OADP-1781] -* link:https://issues.redhat.com/browse/OADP-697[OADP-697] -* link:https://issues.redhat.com/browse/OADP-1281[OADP-1281] -* link:https://issues.redhat.com/browse/OADP-1077[OADP-1077] -* link:https://issues.redhat.com/browse/OADP-1076[OADP-1076] -* link:https://issues.redhat.com/browse/OADP-1670[OADP-1670] -* link:https://issues.redhat.com/browse/OADP-1307[OADP-1307] -* link:https://issues.redhat.com/browse/OADP-1640[OADP-1640] -* link:https://issues.redhat.com/browse/OADP-1987[OADP-1987] -* link:https://issues.redhat.com/browse/OADP-1934[OADP-1934] - -[id="known-issues-1-2-0_{context}"] -== Known issues - -This release does not have any known issues. - diff --git a/modules/oadp-restic-issues.adoc b/modules/oadp-restic-issues.adoc deleted file mode 100644 index 4d272712e7c8..000000000000 --- a/modules/oadp-restic-issues.adoc +++ /dev/null @@ -1,80 +0,0 @@ -// Module included in the following assemblies: -// -// * backup_and_restore/application_backup_and_restore/troubleshooting.adoc - -:_content-type: CONCEPT -[id="oadp-restic-issues_{context}"] -= Restic issues - -You might encounter these issues when you back up applications with Restic. - -[id="restic-permission-error-nfs-root-squash-enabled_{context}"] -== Restic permission error for NFS data volumes with root_squash enabled - -The `Restic` pod log displays the error message: `controller=pod-volume-backup error="fork/exec/usr/bin/restic: permission denied"`. - -.Cause - -If your NFS data volumes have `root_squash` enabled, `Restic` maps to `nfsnobody` and does not have permission to create backups. - -.Solution - -You can resolve this issue by creating a supplemental group for `Restic` and adding the group ID to the `DataProtectionApplication` manifest: - -. Create a supplemental group for `Restic` on the NFS data volume. -. Set the `setgid` bit on the NFS directories so that group ownership is inherited. -. Add the `spec.configuration.restic.supplementalGroups` parameter and the group ID to the `DataProtectionApplication` manifest, as in the following example: -+ -[source,yaml] ----- -spec: - configuration: - restic: - enable: true - supplementalGroups: - - <1> ----- -<1> Specify the supplemental group ID. - -. Wait for the `Restic` pods to restart so that the changes are applied. - -[id="restic-backup-cannot-be-recreated-after-s3-bucket-emptied_{context}"] -== Restic Backup CR cannot be recreated after bucket is emptied - -If you create a Restic `Backup` CR for a namespace, empty the object storage bucket, and then recreate the `Backup` CR for the same namespace, the recreated `Backup` CR fails. - -The `velero` pod log displays the following error message: `stderr=Fatal: unable to open config file: Stat: The specified key does not exist.\nIs there a repository at the following location?`. - -.Cause - -Velero does not recreate or update the Restic repository from the `ResticRepository` manifest if the Restic directories are deleted from object storage. See link:https://github.com/vmware-tanzu/velero/issues/4421[Velero issue 4421] for more information. - -.Solution - -* Remove the related Restic repository from the namespace by running the following command: -+ -[source,terminal] ----- -$ oc delete resticrepository openshift-adp ----- -+ - -In the following error log, `mysql-persistent` is the problematic Restic repository. The name of the repository appears in italics for clarity. -+ -[source,text,options="nowrap",subs="+quotes,verbatim"] ----- - time="2021-12-29T18:29:14Z" level=info msg="1 errors - encountered backup up item" backup=velero/backup65 - logSource="pkg/backup/backup.go:431" name=mysql-7d99fc949-qbkds - time="2021-12-29T18:29:14Z" level=error msg="Error backing up item" - backup=velero/backup65 error="pod volume backup failed: error running - restic backup, stderr=Fatal: unable to open config file: Stat: The - specified key does not exist.\nIs there a repository at the following - location?\ns3:http://minio-minio.apps.mayap-oadp- - veleo-1234.qe.devcluster.openshift.com/mayapvelerooadp2/velero1/ - restic/_mysql-persistent_\n: exit status 1" error.file="/remote-source/ - src/github.com/vmware-tanzu/velero/pkg/restic/backupper.go:184" - error.function="github.com/vmware-tanzu/velero/ - pkg/restic.(*backupper).BackupPodVolumes" - logSource="pkg/backup/backup.go:435" name=mysql-7d99fc949-qbkds ----- diff --git a/modules/oadp-s3-compatible-backup-storage-providers.adoc b/modules/oadp-s3-compatible-backup-storage-providers.adoc deleted file mode 100644 index d75b6191ac29..000000000000 --- a/modules/oadp-s3-compatible-backup-storage-providers.adoc +++ /dev/null @@ -1,47 +0,0 @@ -// Module included in the following assemblies: -// -// * backup_and_restore/application_backup_and_restore/installing/installing-oadp-ocs.adoc - -:_content-type: CONCEPT -[id="oadp-s3-compatible-backup-storage-providers_{context}"] -= AWS S3 compatible backup storage providers - -OADP is compatible with many object storage providers for use with different backup and snapshot operations. Several object storage providers are fully supported, several are unsupported but known to work, and some have known limitations. - -[id="oadp-s3-compatible-backup-storage-providers-supported"] -== Supported backup storage providers - -The following AWS S3 compatible object storage providers, are fully supported by OADP through the AWS plugin for use as backup storage locations: - -* MinIO -* Multicloud Object Gateway (MCG) with NooBaa -* Amazon Web Services (AWS) S3 - -[NOTE] -==== -The following compatible object storage providers are supported and have their own Velero object store plugins: - -* Google Cloud Platform (GCP) -* Microsoft Azure -==== - -[id="oadp-s3-compatible-backup-storage-providers-unsupported"] -== Unsupported backup storage providers - -The following AWS S3 compatible object storage providers, are known to work with Velero through the AWS plugin, for use as backup storage locations, however, they are unsupported and have not been tested by Red Hat: - -* IBM Cloud -* Oracle Cloud -* DigitalOcean -* NooBaa -* Tencent Cloud -* Ceph RADOS v12.2.7 -* Quobyte -* Cloudian HyperStore - -[id="oadp-s3-compatible-backup-storage-providers-known-limitations"] -== Backup storage providers with known limitations - -The following AWS S3 compatible object storage providers are known to work with Velero through the AWS plugin with a limited feature set: - -* Swift - It works for use as a backup storage location for backup storage, but is not compatible with Restic for filesystem-based volume backup and restore. diff --git a/modules/oadp-scheduling-backups.adoc b/modules/oadp-scheduling-backups.adoc deleted file mode 100644 index 6cebfd5e977c..000000000000 --- a/modules/oadp-scheduling-backups.adoc +++ /dev/null @@ -1,71 +0,0 @@ -// Module included in the following assemblies: -// -// * backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc - -:_content-type: PROCEDURE -[id="oadp-scheduling-backups_{context}"] -= Scheduling backups - -You schedule backups by creating a `Schedule` custom resource (CR) instead of a `Backup` CR. - -[WARNING] -==== -Leave enough time in your backup schedule for a backup to finish before another backup is created. - -For example, if a backup of a namespace typically takes 10 minutes, do not schedule backups more frequently than every 15 minutes. -==== - -.Prerequisites - -* You must install the OpenShift API for Data Protection (OADP) Operator. -* The `DataProtectionApplication` CR must be in a `Ready` state. - -.Procedure - -. Retrieve the `backupStorageLocations` CRs: -+ -[source,terminal] ----- -$ oc get backupStorageLocations -n openshift-adp ----- -+ -.Example output -+ -[source,terminal] ----- -NAMESPACE NAME PHASE LAST VALIDATED AGE DEFAULT -openshift-adp velero-sample-1 Available 11s 31m ----- - -. Create a `Schedule` CR, as in the following example: -+ -[source,yaml] ----- -$ cat << EOF | oc apply -f - -apiVersion: velero.io/v1 -kind: Schedule -metadata: - name: - namespace: openshift-adp -spec: - schedule: 0 7 * * * <1> - template: - hooks: {} - includedNamespaces: - - <2> - storageLocation: <3> - defaultVolumesToRestic: true <4> - ttl: 720h0m0s -EOF ----- -<1> `cron` expression to schedule the backup, for example, `0 7 * * *` to perform a backup every day at 7:00. -<2> Array of namespaces to back up. -<3> Name of the `backupStorageLocations` CR. -<4> Optional: Add the `defaultVolumesToRestic: true` key-value pair if you are backing up volumes with Restic. - -. Verify that the status of the `Schedule` CR is `Completed` after the scheduled backup runs: -+ -[source,terminal] ----- -$ oc get schedule -n openshift-adp -o jsonpath='{.status.phase}' ----- diff --git a/modules/oadp-secrets-for-different-credentials.adoc b/modules/oadp-secrets-for-different-credentials.adoc deleted file mode 100644 index eabe57192b09..000000000000 --- a/modules/oadp-secrets-for-different-credentials.adoc +++ /dev/null @@ -1,223 +0,0 @@ -// Module included in the following assemblies: -// -// * backup_and_restore/application_backup_and_restore/installing/installing-oadp-aws.adoc -// * backup_and_restore/application_backup_and_restore/installing/installing-oadp-azure.adoc -// * backup_and_restore/application_backup_and_restore/installing/installing-oadp-gcp.adoc -// * backup_and_restore/application_backup_and_restore/installing/installing-oadp-mcg.adoc -// * backup_and_restore/application_backup_and_restore/installing/installing-oadp-ocs.adoc - -:_content-type: PROCEDURE -[id="oadp-secrets-for-different-credentials_{context}"] -ifdef::installing-oadp-aws[] -= Creating profiles for different credentials - -If your backup and snapshot locations use different credentials, you create separate profiles in the `credentials-velero` file. - -Then, you create a `Secret` object and specify the profiles in the `DataProtectionApplication` custom resource (CR). - -.Procedure - -. Create a `credentials-velero` file with separate profiles for the backup and snapshot locations, as in the following example: -+ -[source,terminal] ----- -[backupStorage] -aws_access_key_id= -aws_secret_access_key= - -[volumeSnapshot] -aws_access_key_id= -aws_secret_access_key= ----- - -. Create a `Secret` object with the `credentials-velero` file: -+ -[source,terminal,subs="attributes+"] ----- -$ oc create secret generic {credentials} -n openshift-adp --from-file cloud=credentials-velero <1> ----- - -. Add the profiles to the `DataProtectionApplication` CR, as in the following example: -+ -[source,yaml,subs="attributes+"] ----- -apiVersion: oadp.openshift.io/v1alpha1 -kind: DataProtectionApplication -metadata: - name: - namespace: openshift-adp -spec: -... - backupLocations: - - name: default - velero: - provider: {provider} - default: true - objectStorage: - bucket: - prefix: - config: - region: us-east-1 - profile: "backupStorage" - credential: - key: cloud - name: {credentials} - snapshotLocations: - - name: default - velero: - provider: {provider} - config: - region: us-west-2 - profile: "volumeSnapshot" ----- -endif::[] -ifdef::installing-oadp-azure,installing-oadp-gcp,installing-oadp-ocs,installing-oadp-mcg[] -= Creating secrets for different credentials - -If your backup and snapshot locations use different credentials, you must create two `Secret` objects: - -* Backup location `Secret` with a custom name. The custom name is specified in the `spec.backupLocations` block of the `DataProtectionApplication` custom resource (CR). -* Snapshot location `Secret` with the default name, `{credentials}`. This `Secret` is not specified in the `DataProtectionApplication` CR. - -.Procedure - -. Create a `credentials-velero` file for the snapshot location in the appropriate format for your cloud provider. -. Create a `Secret` for the snapshot location with the default name: -+ -[source,terminal,subs="attributes+"] ----- -$ oc create secret generic {credentials} -n openshift-adp --from-file cloud=credentials-velero ----- - -. Create a `credentials-velero` file for the backup location in the appropriate format for your object storage. -. Create a `Secret` for the backup location with a custom name: -+ -[source,terminal,subs="attributes+"] ----- -$ oc create secret generic -n openshift-adp --from-file cloud=credentials-velero ----- - -. Add the `Secret` with the custom name to the `DataProtectionApplication` CR, as in the following example: -endif::[] -ifdef::installing-oadp-azure[] -+ -[source,yaml,subs="attributes+"] ----- -apiVersion: oadp.openshift.io/v1alpha1 -kind: DataProtectionApplication -metadata: - name: - namespace: openshift-adp -spec: -... - backupLocations: - - velero: - config: - resourceGroup: - storageAccount: - subscriptionId: - storageAccountKeyEnvVar: AZURE_STORAGE_ACCOUNT_ACCESS_KEY - credential: - key: cloud - name: <1> - provider: azure - default: true - objectStorage: - bucket: - prefix: - snapshotLocations: - - velero: - config: - resourceGroup: - subscriptionId: - incremental: "true" - name: default - provider: {provider} ----- -<1> Backup location `Secret` with custom name. -endif::[] -ifdef::installing-oadp-gcp[] -+ -[source,yaml,subs="attributes+"] ----- -apiVersion: oadp.openshift.io/v1alpha1 -kind: DataProtectionApplication -metadata: - name: - namespace: openshift-adp -spec: -... - backupLocations: - - velero: - provider: {provider} - default: true - credential: - key: cloud - name: <1> - objectStorage: - bucket: - prefix: - snapshotLocations: - - velero: - provider: {provider} - default: true - config: - project: - snapshotLocation: us-west1 ----- -<1> Backup location `Secret` with custom name. -endif::[] -ifdef::installing-oadp-mcg[] -+ -[source,yaml,subs="attributes+"] ----- -apiVersion: oadp.openshift.io/v1alpha1 -kind: DataProtectionApplication -metadata: - name: - namespace: openshift-adp -spec: -... - backupLocations: - - velero: - config: - profile: "default" - region: minio - s3Url: - insecureSkipTLSVerify: "true" - s3ForcePathStyle: "true" - provider: {provider} - default: true - credential: - key: cloud - name: <1> - objectStorage: - bucket: - prefix: ----- -<1> Backup location `Secret` with custom name. -endif::[] -ifdef::installing-oadp-ocs[] -+ -[source,yaml,subs="attributes+"] ----- -apiVersion: oadp.openshift.io/v1alpha1 -kind: DataProtectionApplication -metadata: - name: - namespace: openshift-adp -spec: -... - backupLocations: - - velero: - provider: - default: true - credential: - key: cloud - name: <1> - objectStorage: - bucket: - prefix: ----- -<1> Backup location `Secret` with custom name. -endif::[] diff --git a/modules/oadp-self-signed-certificate.adoc b/modules/oadp-self-signed-certificate.adoc deleted file mode 100644 index 3472eb4b234b..000000000000 --- a/modules/oadp-self-signed-certificate.adoc +++ /dev/null @@ -1,41 +0,0 @@ -// Module included in the following assemblies: -// -// * backup_and_restore/application_backup_and_restore/configuring-oadp.adoc - -:_content-type: PROCEDURE -[id="oadp-self-signed-certificate_{context}"] -= Enabling self-signed CA certificates - -You must enable a self-signed CA certificate for object storage by editing the `DataProtectionApplication` custom resource (CR) manifest to prevent a `certificate signed by unknown authority` error. - -.Prerequisites - -* You must have the OpenShift API for Data Protection (OADP) Operator installed. - -.Procedure - -* Edit the `spec.backupLocations.velero.objectStorage.caCert` parameter and `spec.backupLocations.velero.config` parameters of the `DataProtectionApplication` CR manifest: -+ -[source,yaml] ----- -apiVersion: oadp.openshift.io/v1beta1 -kind: DataProtectionApplication -metadata: - name: -spec: -... - backupLocations: - - name: default - velero: - provider: aws - default: true - objectStorage: - bucket: - prefix: - caCert: <1> - config: - insecureSkipTLSVerify: "false" <2> -... ----- -<1> Specify the Base64-encoded CA certificate string. -<2> The `insecureSkipTLSVerify` configuration can be set to either `"true"` or `"false"`. If set to `"true"`, SSL/TLS security is disabled. If set to `"false"`, SSL/TLS security is enabled. diff --git a/modules/oadp-setting-resource-limits-and-requests.adoc b/modules/oadp-setting-resource-limits-and-requests.adoc deleted file mode 100644 index f1f376705e56..000000000000 --- a/modules/oadp-setting-resource-limits-and-requests.adoc +++ /dev/null @@ -1,40 +0,0 @@ -// Module included in the following assemblies: -// -// * backup_and_restore/application_backup_and_restore/configuring-oadp.adoc -// * virt/backup_restore/virt-installing-configuring-oadp.adoc - -:_content-type: PROCEDURE -[id="oadp-setting-resource-limits-and-requests_{context}"] -= Setting Velero CPU and memory resource allocations - -You set the CPU and memory resource allocations for the `Velero` pod by editing the `DataProtectionApplication` custom resource (CR) manifest. - -.Prerequisites - -* You must have the OpenShift API for Data Protection (OADP) Operator installed. - -.Procedure - -* Edit the values in the `spec.configuration.velero.podConfig.ResourceAllocations` block of the `DataProtectionApplication` CR manifest, as in the following example: -+ -[source,yaml] ----- -apiVersion: oadp.openshift.io/v1beta1 -kind: DataProtectionApplication -metadata: - name: -spec: -... - configuration: - velero: - podConfig: - nodeSelector: <1> - resourceAllocations: - limits: - cpu: "1" - memory: 512Mi - requests: - cpu: 500m - memory: 256Mi ----- -<1> Specify the node selector to be supplied to Velero podSpec. diff --git a/modules/oadp-using-data-mover-for-csi-snapshots.adoc b/modules/oadp-using-data-mover-for-csi-snapshots.adoc deleted file mode 100644 index d029d7dda7a8..000000000000 --- a/modules/oadp-using-data-mover-for-csi-snapshots.adoc +++ /dev/null @@ -1,275 +0,0 @@ -// Module included in the following assemblies: -// -// * backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc - -:_content-type: PROCEDURE -[id="oadp-using-data-mover-for-csi-snapshots_{context}"] -= Using Data Mover for CSI snapshots - -:FeatureName: Data Mover for CSI snapshots - -The OADP Data Mover enables customers to back up Container Storage Interface (CSI) volume snapshots to a remote object store. When Data Mover is enabled, you can restore stateful applications, using CSI volume snapshots pulled from the object store if a failure, accidental deletion, or corruption of the cluster occurs. - -The Data Mover solution uses the Restic option of VolSync. - -Data Mover supports backup and restore of CSI volume snapshots only. - -In OADP 1.2 Data Mover `VolumeSnapshotBackups` (VSBs) and `VolumeSnapshotRestores` (VSRs) are queued using the VolumeSnapshotMover (VSM). The VSM's performance is improved by specifying a concurrent number of VSBs and VSRs simultaneously `InProgress`. After all async plugin operations are complete, the backup is marked as complete. - - -[NOTE] -==== -The OADP 1.1 Data Mover is a Technology Preview feature. - -The OADP 1.2 Data Mover has significantly improved features and performances, but is still a Technology Preview feature. -==== -:FeatureName: The OADP Data Mover -include::snippets/technology-preview.adoc[leveloffset=+1] - -[NOTE] -==== -Red Hat recommends that customers who use OADP 1.2 Data Mover in order to back up and restore ODF CephFS volumes, upgrade or install {product-title} version 4.12 or later for improved performance. OADP Data Mover can leverage CephFS shallow volumes in {product-title} version 4.12 or later, which based on our testing, can improve the performance of backup times. - -* https://issues.redhat.com/browse/RHSTOR-4287[CephFS ROX details] -//* https://github.com/ceph/ceph-csi/blob/devel/docs/cephfs-snapshot-backed-volumes.md[Provisioning and mounting CephFS snapshot-backed volumes] - - -//For more information about OADP 1.2 with CephS [name of topic], see ___. - -==== - -.Prerequisites - -* You have verified that the `StorageClass` and `VolumeSnapshotClass` custom resources (CRs) support CSI. - -* You have verified that only one `volumeSnapshotClass` CR has the annotation `snapshot.storage.kubernetes.io/is-default-class: true`. -+ -[NOTE] -==== -In {product-title} version 4.12 or later, verify that this is the only default `volumeSnapshotClass`. -==== - -* You have verified that `deletionPolicy` of the `VolumeSnapshotClass` CR is set to `Retain`. - -* You have verified that only one `storageClass` CR has the annotation `storageclass.kubernetes.io/is-default-class: true`. - -* You have included the label `{velero-domain}/csi-volumesnapshot-class: 'true'` in your `VolumeSnapshotClass` CR. - -* You have verified that the `OADP namespace` has the annotation `oc annotate --overwrite namespace/openshift-adp volsync.backube/privileged-movers='true'`. -+ -[NOTE] -==== -In OADP 1.1 the above setting is mandatory. - -In OADP 1.2 the `privileged-movers` setting is not required in most scenarios. The restoring container permissions should be adequate for the Volsync copy. In some user scenarios, there may be permission errors that the `privileged-mover`= `true` setting should resolve. -==== - -* You have installed the VolSync Operator by using the Operator Lifecycle Manager (OLM). -+ -[NOTE] -==== -The VolSync Operator is required for using OADP Data Mover. -==== - -* You have installed the OADP operator by using OLM. - -.Procedure - -. Configure a Restic secret by creating a `.yaml` file as following: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: - namespace: openshift-adp -type: Opaque -stringData: - RESTIC_PASSWORD: ----- -+ -[NOTE] -==== -By default, the Operator looks for a secret named `dm-credential`. If you are using a different name, you need to specify the name through a Data Protection Application (DPA) CR using `dpa.spec.features.dataMover.credentialName`. -==== - -. Create a DPA CR similar to the following example. The default plugins include CSI. -+ -.Example Data Protection Application (DPA) CR -[source,yaml] ----- -apiVersion: oadp.openshift.io/v1alpha1 -kind: DataProtectionApplication -metadata: - name: velero-sample - namespace: openshift-adp -spec: - backupLocations: - - velero: - config: - profile: default - region: us-east-1 - credential: - key: cloud - name: cloud-credentials - default: true - objectStorage: - bucket: - prefix: - provider: aws - configuration: - restic: - enable: - velero: - itemOperationSyncFrequency: "10s" - defaultPlugins: - - openshift - - aws - - csi - - vsm <1> - features: - dataMover: - credentialName: restic-secret - enable: true - maxConcurrentBackupVolumes: "3" <2> - maxConcurrentRestoreVolumes: "3" <3> - pruneInterval: "14" <4> - volumeOptions: <5> - sourceVolumeOptions: - accessMode: ReadOnlyMany - cacheAccessMode: ReadWriteOnce - cacheCapacity: 2Gi - destinationVolumeOptions: - storageClass: other-storageclass-name - cacheAccessMode: ReadWriteMany - snapshotLocations: - - velero: - config: - profile: default - region: us-west-2 - provider: aws - ----- -<1> OADP 1.2 only. -<2> OADP 1.2 only. Optional: Specify the upper limit of the number of snapshots allowed to be queued for backup. The default value is 10. -<3> OADP 1.2 only. Optional: Specify the upper limit of the number of snapshots allowed to be queued for restore. The default value is 10. -<4> OADP 1.2 only. Optional: Specify the number of days, between running Restic pruning on the repository. The prune operation repacks the data to free space, but it can also generate significant I/O traffic as a part of the process. Setting this option allows a trade-off between storage consumption, from no longer referenced data, and access costs. -<5> OADP 1.2 only. Optional: Specify VolumeSync volume options for backup and restore. - -+ -The OADP Operator installs two custom resource definitions (CRDs), `VolumeSnapshotBackup` and `VolumeSnapshotRestore`. -+ -.Example `VolumeSnapshotBackup` CRD -[source,yaml] ----- -apiVersion: datamover.oadp.openshift.io/v1alpha1 -kind: VolumeSnapshotBackup -metadata: - name: - namespace: <1> -spec: - volumeSnapshotContent: - name: - protectedNamespace: - resticSecretRef: - name: ----- -<1> Specify the namespace where the volume snapshot exists. -+ -.Example `VolumeSnapshotRestore` CRD -[source,yaml] ----- -apiVersion: datamover.oadp.openshift.io/v1alpha1 -kind: VolumeSnapshotRestore -metadata: - name: - namespace: <1> -spec: - protectedNamespace: <2> - resticSecretRef: - name: - volumeSnapshotMoverBackupRef: - sourcePVCData: - name: - size: - resticrepository: - volumeSnapshotClassName: ----- -<1> Specify the namespace where the volume snapshot exists. -<2> Specify the namespace where the Operator is installed. The default is `openshift-adp`. - -. You can back up a volume snapshot by performing the following steps: - -.. Create a backup CR: -+ -[source,yaml] ----- -apiVersion: velero.io/v1 -kind: Backup -metadata: - name: - namespace: <1> -spec: - includedNamespaces: - - - storageLocation: velero-sample-1 ----- -<1> Specify the namespace where the Operator is installed. The default namespace is `openshift-adp`. - -.. Wait up to 10 minutes and check whether the `VolumeSnapshotBackup` CR status is `Completed` by entering the following commands: -+ -[source,terminal] ----- -$ oc get vsb -n ----- -+ -[source,terminal] ----- -$ oc get vsb -n -o jsonpath="{.status.phase}" ----- -+ -A snapshot is created in the object store was configured in the DPA. -+ -[NOTE] -==== -If the status of the `VolumeSnapshotBackup` CR becomes `Failed`, refer to the Velero logs for troubleshooting. -==== - -. You can restore a volume snapshot by performing the following steps: - -.. Delete the application namespace and the `volumeSnapshotContent` that was created by the Velero CSI plugin. - -.. Create a `Restore` CR and set `restorePVs` to `true`. -+ -.Example `Restore` CR -[source,yaml] ----- -apiVersion: velero.io/v1 -kind: Restore -metadata: - name: - namespace: -spec: - backupName: - restorePVs: true ----- - -.. Wait up to 10 minutes and check whether the `VolumeSnapshotRestore` CR status is `Completed` by entering the following command: -+ -[source,terminal] ----- -$ oc get vsr -n ----- -+ -[source,terminal] ----- -$ oc get vsr -n -o jsonpath="{.status.phase}" ----- - -.. Check whether your application data and resources have been restored. -+ -[NOTE] -==== -If the status of the `VolumeSnapshotRestore` CR becomes 'Failed', refer to the Velero logs for troubleshooting. -==== diff --git a/modules/oadp-using-enable-api-group-versions.adoc b/modules/oadp-using-enable-api-group-versions.adoc deleted file mode 100644 index b8a5d589ace4..000000000000 --- a/modules/oadp-using-enable-api-group-versions.adoc +++ /dev/null @@ -1,35 +0,0 @@ -// Module included in the following assemblies: -// -// * backup_and_restore/application_backup_and_restore/advanced-topics.adoc - - -:_content-type: PROCEDURE -[id="oadp-using-enable-api-group-versions_{context}"] -= Using Enable API Group Versions - -You can use Velero's Enable API Group Versions feature to back up _all_ Kubernetes API group versions that are supported on a cluster, not only the preferred one. - -[NOTE] -==== -Enable API Group Versions is still in beta. -==== - -.Procedure - -* Configure the `EnableAPIGroupVersions` feature flag: - -[source,yaml] ----- -apiVersion: oadp.openshift.io/vialpha1 -kind: DataProtectionApplication -... -spec: - configuration: - velero: - featureFlags: - - EnableAPIGroupVersions ----- - -[role="_additional-resources"] -.Additional resources -* link:https://velero.io/docs/v1.9/enable-api-group-versions-feature/[Enable API Group Versions Feature] diff --git a/modules/oadp-vsb-cleanup-after-scheduler.adoc b/modules/oadp-vsb-cleanup-after-scheduler.adoc deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/modules/oauth-configuring-internal-oauth.adoc b/modules/oauth-configuring-internal-oauth.adoc deleted file mode 100644 index e135a3fcd417..000000000000 --- a/modules/oauth-configuring-internal-oauth.adoc +++ /dev/null @@ -1,68 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/configuring-internal-oauth.adoc - -:_content-type: PROCEDURE -[id="oauth-configuring-internal-oauth_{context}"] -= Configuring the internal OAuth server's token duration - -You can configure default options for the internal OAuth server's -token duration. - -[IMPORTANT] -==== -By default, tokens are only valid for 24 hours. Existing sessions -expire after this time elapses. -==== - -If the default time is insufficient, then this can be modified using -the following procedure. - -.Procedure - -. Create a configuration file that contains the token duration options. The -following file sets this to 48 hours, twice the default. -+ -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: OAuth -metadata: - name: cluster -spec: - tokenConfig: - accessTokenMaxAgeSeconds: 172800 <1> ----- -<1> Set `accessTokenMaxAgeSeconds` to control the lifetime of access tokens. -The default lifetime is 24 hours, or 86400 seconds. This attribute cannot -be negative. If set to zero, the default lifetime is used. - -. Apply the new configuration file: -+ -[NOTE] -==== -Because you update the existing OAuth server, you must use the `oc apply` -command to apply the change. -==== -+ -[source,terminal] ----- -$ oc apply -f ----- - -. Confirm that the changes are in effect: -+ -[source,terminal] ----- -$ oc describe oauth.config.openshift.io/cluster ----- -+ -.Example output -[source,terminal] ----- -... -Spec: - Token Config: - Access Token Max Age Seconds: 172800 -... ----- diff --git a/modules/oauth-configuring-token-inactivity-timeout-clients.adoc b/modules/oauth-configuring-token-inactivity-timeout-clients.adoc deleted file mode 100644 index ff38749a940a..000000000000 --- a/modules/oauth-configuring-token-inactivity-timeout-clients.adoc +++ /dev/null @@ -1,58 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/configuring-oauth-clients.adoc - -:_content-type: PROCEDURE -[id="oauth-token-inactivity-timeout_{context}"] -= Configuring token inactivity timeout for an OAuth client - -You can configure OAuth clients to expire OAuth tokens after a set period of inactivity. By default, no token inactivity timeout is set. - -[NOTE] -==== -If the token inactivity timeout is also configured in the internal OAuth server configuration, the timeout that is set in the OAuth client overrides that value. -==== - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. -* You have configured an identity provider (IDP). - -.Procedure - -* Update the `OAuthClient` configuration to set a token inactivity timeout. - -.. Edit the `OAuthClient` object: -+ -[source,terminal] ----- -$ oc edit oauthclient <1> ----- -<1> Replace `` with the OAuth client to configure, for example, `console`. -+ -Add the `accessTokenInactivityTimeoutSeconds` field and set your timeout value: -+ -[source,yaml] ----- -apiVersion: oauth.openshift.io/v1 -grantMethod: auto -kind: OAuthClient -metadata: -... -accessTokenInactivityTimeoutSeconds: 600 <1> ----- -<1> The minimum allowed timeout value in seconds is `300`. - -.. Save the file to apply the changes. - -.Verification - -. Log in to the cluster with an identity from your IDP. Be sure to use the OAuth client that you just configured. - -. Perform an action and verify that it was successful. - -. Wait longer than the configured timeout without using the identity. In this procedure's example, wait longer than 600 seconds. - -. Try to perform an action from the same identity's session. -+ -This attempt should fail because the token should have expired due to inactivity longer than the configured timeout. diff --git a/modules/oauth-configuring-token-inactivity-timeout.adoc b/modules/oauth-configuring-token-inactivity-timeout.adoc deleted file mode 100644 index 6c693ce59bbb..000000000000 --- a/modules/oauth-configuring-token-inactivity-timeout.adoc +++ /dev/null @@ -1,98 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/understanding-internal-oauth.adoc - -:_content-type: PROCEDURE -[id="oauth-token-inactivity-timeout_{context}"] -= Configuring token inactivity timeout for the internal OAuth server - -You can configure OAuth tokens to expire after a set period of inactivity. By default, no token inactivity timeout is set. - -[NOTE] -==== -If the token inactivity timeout is also configured in your OAuth client, that value overrides the timeout that is set in the internal OAuth server configuration. -==== - -.Prerequisites - -* You have access to the cluster as a user with the `cluster-admin` role. -* You have configured an identity provider (IDP). - -.Procedure - -. Update the `OAuth` configuration to set a token inactivity timeout. - -.. Edit the `OAuth` object: -+ -[source,terminal] ----- -$ oc edit oauth cluster ----- -+ -Add the `spec.tokenConfig.accessTokenInactivityTimeout` field and set your timeout value: -+ -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: OAuth -metadata: -... -spec: - tokenConfig: - accessTokenInactivityTimeout: 400s <1> ----- -<1> Set a value with the appropriate units, for example `400s` for 400 seconds, or `30m` for 30 minutes. The minimum allowed timeout value is `300s`. - -.. Save the file to apply the changes. - -. Check that the OAuth server pods have restarted: -+ -[source,terminal] ----- -$ oc get clusteroperators authentication ----- -+ -Do not continue to the next step until `PROGRESSING` is listed as `False`, as shown in the following output: -+ -.Example output -[source,terminal] ----- -NAME VERSION AVAILABLE PROGRESSING DEGRADED SINCE -authentication 4.13.0 True False False 145m ----- - -. Check that a new revision of the Kubernetes API server pods has rolled out. This will take several minutes. -+ -[source,terminal] ----- -$ oc get clusteroperators kube-apiserver ----- -+ -Do not continue to the next step until `PROGRESSING` is listed as `False`, as shown in the following output: -+ -.Example output -[source,terminal] ----- -NAME VERSION AVAILABLE PROGRESSING DEGRADED SINCE -kube-apiserver 4.13.0 True False False 145m ----- -+ -If `PROGRESSING` is showing `True`, wait a few minutes and try again. - -.Verification - -. Log in to the cluster with an identity from your IDP. - -. Execute a command and verify that it was successful. - -. Wait longer than the configured timeout without using the identity. In this procedure's example, wait longer than 400 seconds. - -. Try to execute a command from the same identity's session. -+ -This command should fail because the token should have expired due to inactivity longer than the configured timeout. -+ -.Example output -[source,terminal] ----- -error: You must be logged in to the server (Unauthorized) ----- diff --git a/modules/oauth-customizing-the-oauth-server-URL.adoc b/modules/oauth-customizing-the-oauth-server-URL.adoc deleted file mode 100755 index 668c24a36370..000000000000 --- a/modules/oauth-customizing-the-oauth-server-URL.adoc +++ /dev/null @@ -1,62 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/configuring-internal-oauth.adoc - -:_content-type: PROCEDURE -[id="customizing-the-oauth-server-url_{context}"] -= Customizing the internal OAuth server URL - -You can customize the internal OAuth server URL by setting the custom hostname and TLS certificate in the `spec.componentRoutes` field of the cluster `Ingress` configuration. - -[WARNING] -==== -If you update the internal OAuth server URL, you might break trust from components in the cluster that need to communicate with the OpenShift OAuth server to retrieve OAuth access tokens. Components that need to trust the OAuth server will need to include the proper CA bundle when calling OAuth endpoints. For example: - -[source,terminal] ----- -$ oc login -u -p --certificate-authority= <1> ----- -<1> For self-signed certificates, the `ca.crt` file must contain the custom CA certificate, otherwise the login will not succeed. - -The Cluster Authentication Operator publishes the OAuth server's serving certificate in the `oauth-serving-cert` config map in the `openshift-config-managed` namespace. You can find the certificate in the `data.ca-bundle.crt` key of the config map. -==== - -.Prerequisites - -* You have logged in to the cluster as a user with administrative privileges. -* You have created a secret in the `openshift-config` namespace containing the TLS certificate and key. This is required if the domain for the custom hostname suffix does not match the cluster domain suffix. The secret is optional if the suffix matches. -+ -[TIP] -==== -You can create a TLS secret by using the `oc create secret tls` command. -==== - -.Procedure - -. Edit the cluster `Ingress` configuration: -+ -[source,terminal] ----- -$ oc edit ingress.config.openshift.io cluster ----- - -. Set the custom hostname and optionally the serving certificate and key: -+ -[source,yaml] ----- -apiVersion: config.openshift.io/v1 -kind: Ingress -metadata: - name: cluster -spec: - componentRoutes: - - name: oauth-openshift - namespace: openshift-authentication - hostname: <1> - servingCertKeyPairSecret: - name: <2> ----- -<1> The custom hostname. -<2> Reference to a secret in the `openshift-config` namespace that contains a TLS certificate (`tls.crt`) and key (`tls.key`). This is required if the domain for the custom hostname suffix does not match the cluster domain suffix. The secret is optional if the suffix matches. - -. Save the file to apply the changes. diff --git a/modules/oauth-default-clients.adoc b/modules/oauth-default-clients.adoc deleted file mode 100644 index 33a59aa0558f..000000000000 --- a/modules/oauth-default-clients.adoc +++ /dev/null @@ -1,31 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/configuring-oauth-clients.adoc - -[id="oauth-default-clients_{context}"] -= Default OAuth clients - -The following OAuth clients are automatically created when starting the {product-title} API: - -[cols="2,3",options="header"] -|=== - -|OAuth client |Usage - -|`openshift-browser-client` -|Requests tokens at `/oauth/token/request` with a user-agent that can handle interactive logins. ^[1]^ - -|`openshift-challenging-client` -|Requests tokens with a user-agent that can handle `WWW-Authenticate` challenges. - -|=== -[.small] --- -1. `` refers to the namespace route. This is found by -running the following command: -+ -[source,terminal] ----- -$ oc get route oauth-openshift -n openshift-authentication -o json | jq .spec.host ----- --- diff --git a/modules/oauth-delete-tokens.adoc b/modules/oauth-delete-tokens.adoc deleted file mode 100644 index 351f27570856..000000000000 --- a/modules/oauth-delete-tokens.adoc +++ /dev/null @@ -1,26 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/managing-oauth-access-tokens.adoc - -:_content-type: PROCEDURE -[id="oauth-delete-tokens_{context}"] -= Deleting user-owned OAuth access tokens - -The `oc logout` command only invalidates the OAuth token for the active session. You can use the following procedure to delete any user-owned OAuth tokens that are no longer needed. - -Deleting an OAuth access token logs out the user from all sessions that use the token. - -.Procedure - -* Delete the user-owned OAuth access token: -+ -[source,terminal] ----- -$ oc delete useroauthaccesstokens ----- -+ -.Example output -[source,terminal] ----- -useroauthaccesstoken.oauth.openshift.io "" deleted ----- diff --git a/modules/oauth-internal-options.adoc b/modules/oauth-internal-options.adoc deleted file mode 100644 index b0944f99c583..000000000000 --- a/modules/oauth-internal-options.adoc +++ /dev/null @@ -1,57 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/configuring-internal-oauth.adoc - -[id="oauth-internal-options_{context}"] -= Options for the internal OAuth server - -Several configuration options are available for the internal OAuth server. - -[id="oauth-token-duration_{context}"] -== OAuth token duration options - -The internal OAuth server generates two kinds of tokens: - -[cols="1,2",options="header"] -|=== - -|Token -|Description - -|Access tokens -|Longer-lived tokens that grant access to the API. - -|Authorize codes -|Short-lived tokens whose only use is to be exchanged for -an access token. - -|=== - -You can configure the default duration for both types of token. If necessary, -you can override the duration of the access token by using an `OAuthClient` -object definition. - -[id="oauth-grant-options_{context}"] -== OAuth grant options - -When the OAuth server receives token requests for a client to which the user -has not previously granted permission, the action that the OAuth server -takes is dependent on the OAuth client's grant strategy. - -The OAuth client requesting token must provide its own grant strategy. - -You can apply the following default methods: - -[cols="1,2",options="header"] -|=== - -|Grant option -|Description - -|`auto` -|Auto-approve the grant and retry the request. - -|`prompt` -|Prompt the user to approve or deny the grant. - -|=== diff --git a/modules/oauth-internal-tokens.adoc b/modules/oauth-internal-tokens.adoc deleted file mode 100644 index d023b76163ea..000000000000 --- a/modules/oauth-internal-tokens.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/understanding-internal-oauth.adoc - -[id="oauth-token-request-flows_{context}"] -= OAuth token request flows and responses - -The OAuth server supports standard -link:https://tools.ietf.org/html/rfc6749#section-4.1[authorization code grant] -and the link:https://tools.ietf.org/html/rfc6749#section-4.2[implicit grant] -OAuth authorization flows. - -When requesting an OAuth token using the implicit grant flow -(`response_type=token`) with a client_id configured to request `WWW-Authenticate challenges` -(like `openshift-challenging-client`), these are the possible server -responses from `/oauth/authorize`, and how they should be handled: - -[cols="2a,8a,8a",options="header"] -|=== -|Status | Content | Client response -|302 | `Location` header containing an `access_token` parameter in the URL fragment (link:https://tools.ietf.org/html/rfc6749#section-4.2.2[RFC 6749 section 4.2.2]) | Use the `access_token` value as the OAuth token. -|302 | `Location` header containing an `error` query parameter (link:https://tools.ietf.org/html/rfc6749#section-4.1.2.1[RFC 6749 section 4.1.2.1]) | Fail, optionally surfacing the `error` (and optional `error_description`) query values to the user. -|302 | Other `Location` header | Follow the redirect, and process the result using these rules. -|401 | `WWW-Authenticate` header present | Respond to challenge if type is recognized (e.g. `Basic`, `Negotiate`, etc), resubmit request, and process the result using these rules. -|401 | `WWW-Authenticate` header missing | No challenge authentication is possible. Fail and show response body (which might contain links or details on alternate methods to obtain an OAuth token). -|Other | Other | Fail, optionally surfacing response body to the user. -|=== diff --git a/modules/oauth-list-tokens.adoc b/modules/oauth-list-tokens.adoc deleted file mode 100644 index 908b6da10680..000000000000 --- a/modules/oauth-list-tokens.adoc +++ /dev/null @@ -1,41 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/managing-oauth-access-tokens.adoc - -:_content-type: PROCEDURE -[id="oauth-list-tokens_{context}"] -= Listing user-owned OAuth access tokens - -You can list your user-owned OAuth access tokens. Token names are not sensitive and cannot be used to log in. - -.Procedure - -* List all user-owned OAuth access tokens: -+ -[source,terminal] ----- -$ oc get useroauthaccesstokens ----- -+ -.Example output -[source,terminal] ----- -NAME CLIENT NAME CREATED EXPIRES REDIRECT URI SCOPES - openshift-challenging-client 2021-01-11T19:25:35Z 2021-01-12 19:25:35 +0000 UTC https://oauth-openshift.apps.example.com/oauth/token/implicit user:full - openshift-browser-client 2021-01-11T19:27:06Z 2021-01-12 19:27:06 +0000 UTC https://oauth-openshift.apps.example.com/oauth/token/display user:full - console 2021-01-11T19:26:29Z 2021-01-12 19:26:29 +0000 UTC https://console-openshift-console.apps.example.com/auth/callback user:full ----- - -* List user-owned OAuth access tokens for a particular OAuth client: -+ -[source,terminal] ----- -$ oc get useroauthaccesstokens --field-selector=clientName="console" ----- -+ -.Example output -[source,terminal] ----- -NAME CLIENT NAME CREATED EXPIRES REDIRECT URI SCOPES - console 2021-01-11T19:26:29Z 2021-01-12 19:26:29 +0000 UTC https://console-openshift-console.apps.example.com/auth/callback user:full ----- diff --git a/modules/oauth-register-additional-client.adoc b/modules/oauth-register-additional-client.adoc deleted file mode 100644 index 9ee0b80f974a..000000000000 --- a/modules/oauth-register-additional-client.adoc +++ /dev/null @@ -1,41 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/configuring-oauth-clients.adoc - -:_content-type: PROCEDURE -[id="oauth-register-additional-client_{context}"] -= Registering an additional OAuth client - -If you need an additional OAuth client to manage authentication for your -{product-title} cluster, you can register one. - -.Procedure - -* To register additional OAuth clients: -+ -[source,terminal] ----- -$ oc create -f <(echo ' -kind: OAuthClient -apiVersion: oauth.openshift.io/v1 -metadata: - name: demo <1> -secret: "..." <2> -redirectURIs: - - "http://www.example.com/" <3> -grantMethod: prompt <4> -') ----- -<1> The `name` of the OAuth client is used as the `client_id` parameter when -making requests to `/oauth/authorize` and -`/oauth/token`. -<2> The `secret` is used as the `client_secret` parameter when making requests -to `/oauth/token`. -<3> The `redirect_uri` parameter specified in requests to -`/oauth/authorize` and `/oauth/token` - must be equal to or prefixed by one of the URIs listed in the -`redirectURIs` parameter value. -<4> The `grantMethod` is used to determine what action to take when this -client requests tokens and has not yet been granted access by the user. -Specify `auto` to automatically approve the grant and retry the request, -or `prompt` to prompt the user to approve or deny the grant. diff --git a/modules/oauth-server-metadata.adoc b/modules/oauth-server-metadata.adoc deleted file mode 100644 index dbb02a71e6c8..000000000000 --- a/modules/oauth-server-metadata.adoc +++ /dev/null @@ -1,72 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/configuring-internal-oauth.adoc - -[id="oauth-server-metadata_{context}"] -= OAuth server metadata - -Applications running in {product-title} might have to discover information -about the built-in OAuth server. For example, they might have to discover -what the address of the `` is without manual -configuration. To aid in this, {product-title} implements the IETF -link:https://tools.ietf.org/html/draft-ietf-oauth-discovery-10[OAuth 2.0 Authorization Server Metadata] draft specification. - -Thus, any application running inside the cluster can issue a `GET` request -to *_\https://openshift.default.svc/.well-known/oauth-authorization-server_* -to fetch the following information: - ----- -{ - "issuer": "https://", <1> - "authorization_endpoint": "https:///oauth/authorize", <2> - "token_endpoint": "https:///oauth/token", <3> - "scopes_supported": [ <4> - "user:full", - "user:info", - "user:check-access", - "user:list-scoped-projects", - "user:list-projects" - ], - "response_types_supported": [ <5> - "code", - "token" - ], - "grant_types_supported": [ <6> - "authorization_code", - "implicit" - ], - "code_challenge_methods_supported": [ <7> - "plain", - "S256" - ] -} ----- -<1> The authorization server's issuer identifier, which is a URL that uses the -`https` scheme and has no query or fragment components. This is the location -where `.well-known` link:https://tools.ietf.org/html/rfc5785[RFC 5785] resources -containing information about the authorization server are published. -<2> URL of the authorization server's authorization endpoint. See -link:https://tools.ietf.org/html/rfc6749[RFC 6749]. -<3> URL of the authorization server's token endpoint. See -link:https://tools.ietf.org/html/rfc6749[RFC 6749]. -<4> JSON array containing a list of the OAuth 2.0 -link:https://tools.ietf.org/html/rfc6749[RFC 6749] scope values that this -authorization server supports. Note that not all supported scope values are -advertised. -<5> JSON array containing a list of the OAuth 2.0 `response_type` values that this -authorization server supports. The array values used are the same as those used -with the `response_types` parameter defined by "OAuth 2.0 Dynamic Client -Registration Protocol" in link:https://tools.ietf.org/html/rfc7591[RFC 7591]. -<6> JSON array containing a list of the OAuth 2.0 grant type values that this -authorization server supports. The array values used are the same as those used -with the `grant_types` parameter defined by -`OAuth 2.0 Dynamic Client Registration Protocol` in -link:https://tools.ietf.org/html/rfc7591[RFC 7591]. -<7> JSON array containing a list of PKCE -link:https://tools.ietf.org/html/rfc7636[RFC 7636] code challenge methods -supported by this authorization server. Code challenge method values are used in -the `code_challenge_method` parameter defined in -link:https://tools.ietf.org/html/rfc7636#section-4.3[Section 4.3 of RFC 7636]. -The valid code challenge method values are those registered in the IANA -`PKCE Code Challenge Methods` registry. See -link:http://www.iana.org/assignments/oauth-parameters[IANA OAuth Parameters]. diff --git a/modules/oauth-server-overview.adoc b/modules/oauth-server-overview.adoc deleted file mode 100644 index ea50ca16e5e8..000000000000 --- a/modules/oauth-server-overview.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/understanding-authentication.adoc -// * authentication/configuring-internal-oauth.adoc - - -[id="oauth-server-overview_{context}"] -= {product-title} OAuth server - -The {product-title} master includes a built-in OAuth server. Users obtain OAuth -access tokens to authenticate themselves to the API. - -When a person requests a new OAuth token, the OAuth server uses the configured -identity provider -to determine the identity of the person making the request. - -It then determines what user that identity maps to, creates an access token for -that user, and returns the token for use. diff --git a/modules/oauth-token-requests.adoc b/modules/oauth-token-requests.adoc deleted file mode 100644 index 491f34d3699a..000000000000 --- a/modules/oauth-token-requests.adoc +++ /dev/null @@ -1,63 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/understanding-authentication.adoc - -[id="oauth-token-requests_{context}"] -== OAuth token requests - -Every request for an OAuth token must specify the OAuth client that will -receive and use the token. The following OAuth clients are automatically -created when starting the {product-title} API: - -[options="header"] -|=== - -|OAuth client |Usage - -|`openshift-browser-client` -|Requests tokens at `/oauth/token/request` with a user-agent that can handle interactive logins. ^[1]^ - -|`openshift-challenging-client` -|Requests tokens with a user-agent that can handle `WWW-Authenticate` challenges. - -|=== -[.small] --- -1. `` refers to the namespace route. This is found by -running the following command: -+ -[source,terminal] ----- -$ oc get route oauth-openshift -n openshift-authentication -o json | jq .spec.host ----- --- - -All requests for OAuth tokens involve a request to -`/oauth/authorize`. Most authentication integrations place an -authenticating proxy in front of this endpoint, or configure -{product-title} to validate credentials against a backing identity provider. -Requests to `/oauth/authorize` can come from user-agents that -cannot display interactive login pages, such as the CLI. Therefore, -{product-title} supports authenticating using a `WWW-Authenticate` -challenge in addition to interactive login flows. - -If an authenticating proxy is placed in front of the -`/oauth/authorize` endpoint, it sends unauthenticated, -non-browser user-agents `WWW-Authenticate` challenges rather than -displaying an interactive login page or redirecting to an interactive -login flow. - -[NOTE] -==== -To prevent cross-site request forgery (CSRF) attacks against browser -clients, only send Basic authentication challenges with if a -`X-CSRF-Token` header is on the request. Clients that expect -to receive Basic `WWW-Authenticate` challenges must set this header to a -non-empty value. - -If the authenticating proxy cannot support `WWW-Authenticate` challenges, -or if {product-title} is configured to use an identity provider that does -not support WWW-Authenticate challenges, you must use a browser to manually -obtain a token from -`/oauth/token/request`. -==== diff --git a/modules/oauth-troubleshooting-api-events.adoc b/modules/oauth-troubleshooting-api-events.adoc deleted file mode 100644 index f52b0ac2b9f1..000000000000 --- a/modules/oauth-troubleshooting-api-events.adoc +++ /dev/null @@ -1,78 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/configuring-internal-oauth.adoc - -[id="oauth-troubleshooting-api-events_{context}"] -= Troubleshooting OAuth API events - -In some cases the API server returns an `unexpected condition` error message -that is difficult to debug without direct access to the API master log. -The underlying reason for the error is purposely obscured in order -to avoid providing an unauthenticated user with information about the server's state. - -A subset of these errors is related to service account OAuth configuration issues. -These issues are captured in events that can be viewed by non-administrator users. When encountering -an `unexpected condition` server error during OAuth, run `oc get events` to view these events under `ServiceAccount`. - -The following example warns of a service account that is missing a proper OAuth redirect URI: - -[source,terminal] ----- -$ oc get events | grep ServiceAccount ----- - -.Example output -[source,terminal] ----- -1m 1m 1 proxy ServiceAccount Warning NoSAOAuthRedirectURIs service-account-oauth-client-getter system:serviceaccount:myproject:proxy has no redirectURIs; set serviceaccounts.openshift.io/oauth-redirecturi.= or create a dynamic URI using serviceaccounts.openshift.io/oauth-redirectreference.= ----- - -Running `oc describe sa/` reports any OAuth events associated with the given service account name. - -[source,terminal] ----- -$ oc describe sa/proxy | grep -A5 Events ----- - -.Example output -[source,terminal] ----- -Events: - FirstSeen LastSeen Count From SubObjectPath Type Reason Message - --------- -------- ----- ---- ------------- -------- ------ ------- - 3m 3m 1 service-account-oauth-client-getter Warning NoSAOAuthRedirectURIs system:serviceaccount:myproject:proxy has no redirectURIs; set serviceaccounts.openshift.io/oauth-redirecturi.= or create a dynamic URI using serviceaccounts.openshift.io/oauth-redirectreference.= ----- - -The following is a list of the possible event errors: - -**No redirect URI annotations or an invalid URI is specified** - -[source,terminal] ----- -Reason Message -NoSAOAuthRedirectURIs system:serviceaccount:myproject:proxy has no redirectURIs; set serviceaccounts.openshift.io/oauth-redirecturi.= or create a dynamic URI using serviceaccounts.openshift.io/oauth-redirectreference.= ----- - -**Invalid route specified** - -[source,terminal] ----- -Reason Message -NoSAOAuthRedirectURIs [routes.route.openshift.io "" not found, system:serviceaccount:myproject:proxy has no redirectURIs; set serviceaccounts.openshift.io/oauth-redirecturi.= or create a dynamic URI using serviceaccounts.openshift.io/oauth-redirectreference.=] ----- - -**Invalid reference type specified** - -[source,terminal] ----- -Reason Message -NoSAOAuthRedirectURIs [no kind "" is registered for version "v1", system:serviceaccount:myproject:proxy has no redirectURIs; set serviceaccounts.openshift.io/oauth-redirecturi.= or create a dynamic URI using serviceaccounts.openshift.io/oauth-redirectreference.=] ----- - -**Missing SA tokens** - -[source,terminal] ----- -Reason Message -NoSAOAuthTokens system:serviceaccount:myproject:proxy has no tokens ----- diff --git a/modules/oauth-view-details-tokens.adoc b/modules/oauth-view-details-tokens.adoc deleted file mode 100644 index 5c382f416cdb..000000000000 --- a/modules/oauth-view-details-tokens.adoc +++ /dev/null @@ -1,64 +0,0 @@ -// Module included in the following assemblies: -// -// * authentication/managing-oauth-access-tokens.adoc - -:_content-type: PROCEDURE -[id="oauth-view-details-tokens_{context}"] -= Viewing the details of a user-owned OAuth access token - -You can view the details of a user-owned OAuth access token. - -.Procedure - -* Describe the details of a user-owned OAuth access token: -+ -[source,terminal] ----- -$ oc describe useroauthaccesstokens ----- -+ -.Example output -[source,terminal] ----- -Name: <1> -Namespace: -Labels: -Annotations: -API Version: oauth.openshift.io/v1 -Authorize Token: sha256~Ksckkug-9Fg_RWn_AUysPoIg-_HqmFI9zUL_CgD8wr8 -Client Name: openshift-browser-client <2> -Expires In: 86400 <3> -Inactivity Timeout Seconds: 317 <4> -Kind: UserOAuthAccessToken -Metadata: - Creation Timestamp: 2021-01-11T19:27:06Z - Managed Fields: - API Version: oauth.openshift.io/v1 - Fields Type: FieldsV1 - fieldsV1: - f:authorizeToken: - f:clientName: - f:expiresIn: - f:redirectURI: - f:scopes: - f:userName: - f:userUID: - Manager: oauth-server - Operation: Update - Time: 2021-01-11T19:27:06Z - Resource Version: 30535 - Self Link: /apis/oauth.openshift.io/v1/useroauthaccesstokens/ - UID: f9d00b67-ab65-489b-8080-e427fa3c6181 -Redirect URI: https://oauth-openshift.apps.example.com/oauth/token/display -Scopes: - user:full <5> -User Name: <6> -User UID: 82356ab0-95f9-4fb3-9bc0-10f1d6a6a345 -Events: ----- -<1> The token name, which is the sha256 hash of the token. Token names are not sensitive and cannot be used to log in. -<2> The client name, which describes where the token originated from. -<3> The value in seconds from the creation time before this token expires. -<4> If there is a token inactivity timeout set for the OAuth server, this is the value in seconds from the creation time before this token can no longer be used. -<5> The scopes for this token. -<6> The user name associated with this token. diff --git a/modules/oc-adm-by-example-content.adoc b/modules/oc-adm-by-example-content.adoc deleted file mode 100644 index f8e56acc9dc2..000000000000 --- a/modules/oc-adm-by-example-content.adoc +++ /dev/null @@ -1,787 +0,0 @@ -// NOTE: The contents of this file are auto-generated -// This template is for admin ('oc adm ...') commands -// Uses 'source,bash' for proper syntax highlighting for comments in examples - -:_content-type: REFERENCE -[id="openshift-cli-admin_{context}"] -= OpenShift CLI (oc) administrator commands - - - -== oc adm build-chain -Output the inputs and dependencies of your builds - -.Example usage -[source,bash,options="nowrap"] ----- - # Build the dependency tree for the 'latest' tag in - oc adm build-chain - - # Build the dependency tree for the 'v2' tag in dot format and visualize it via the dot utility - oc adm build-chain :v2 -o dot | dot -T svg -o deps.svg - - # Build the dependency tree across all namespaces for the specified image stream tag found in the 'test' namespace - oc adm build-chain -n test --all ----- - - - -== oc adm catalog mirror -Mirror an operator-registry catalog - -.Example usage -[source,bash,options="nowrap"] ----- - # Mirror an operator-registry image and its contents to a registry - oc adm catalog mirror quay.io/my/image:latest myregistry.com - - # Mirror an operator-registry image and its contents to a particular namespace in a registry - oc adm catalog mirror quay.io/my/image:latest myregistry.com/my-namespace - - # Mirror to an airgapped registry by first mirroring to files - oc adm catalog mirror quay.io/my/image:latest file:///local/index - oc adm catalog mirror file:///local/index/my/image:latest my-airgapped-registry.com - - # Configure a cluster to use a mirrored registry - oc apply -f manifests/imageContentSourcePolicy.yaml - - # Edit the mirroring mappings and mirror with "oc image mirror" manually - oc adm catalog mirror --manifests-only quay.io/my/image:latest myregistry.com - oc image mirror -f manifests/mapping.txt - - # Delete all ImageContentSourcePolicies generated by oc adm catalog mirror - oc delete imagecontentsourcepolicy -l operators.openshift.org/catalog=true ----- - - - -== oc adm certificate approve -Approve a certificate signing request - -.Example usage -[source,bash,options="nowrap"] ----- - # Approve CSR 'csr-sqgzp' - oc adm certificate approve csr-sqgzp ----- - - - -== oc adm certificate deny -Deny a certificate signing request - -.Example usage -[source,bash,options="nowrap"] ----- - # Deny CSR 'csr-sqgzp' - oc adm certificate deny csr-sqgzp ----- - - - -== oc adm cordon -Mark node as unschedulable - -.Example usage -[source,bash,options="nowrap"] ----- - # Mark node "foo" as unschedulable - oc adm cordon foo ----- - - - -== oc adm create-bootstrap-project-template -Create a bootstrap project template - -.Example usage -[source,bash,options="nowrap"] ----- - # Output a bootstrap project template in YAML format to stdout - oc adm create-bootstrap-project-template -o yaml ----- - - - -== oc adm create-error-template -Create an error page template - -.Example usage -[source,bash,options="nowrap"] ----- - # Output a template for the error page to stdout - oc adm create-error-template ----- - - - -== oc adm create-login-template -Create a login template - -.Example usage -[source,bash,options="nowrap"] ----- - # Output a template for the login page to stdout - oc adm create-login-template ----- - - - -== oc adm create-provider-selection-template -Create a provider selection template - -.Example usage -[source,bash,options="nowrap"] ----- - # Output a template for the provider selection page to stdout - oc adm create-provider-selection-template ----- - - - -== oc adm drain -Drain node in preparation for maintenance - -.Example usage -[source,bash,options="nowrap"] ----- - # Drain node "foo", even if there are pods not managed by a replication controller, replica set, job, daemon set or stateful set on it - oc adm drain foo --force - - # As above, but abort if there are pods not managed by a replication controller, replica set, job, daemon set or stateful set, and use a grace period of 15 minutes - oc adm drain foo --grace-period=900 ----- - - - -== oc adm groups add-users -Add users to a group - -.Example usage -[source,bash,options="nowrap"] ----- - # Add user1 and user2 to my-group - oc adm groups add-users my-group user1 user2 ----- - - - -== oc adm groups new -Create a new group - -.Example usage -[source,bash,options="nowrap"] ----- - # Add a group with no users - oc adm groups new my-group - - # Add a group with two users - oc adm groups new my-group user1 user2 - - # Add a group with one user and shorter output - oc adm groups new my-group user1 -o name ----- - - - -== oc adm groups prune -Remove old OpenShift groups referencing missing records from an external provider - -.Example usage -[source,bash,options="nowrap"] ----- - # Prune all orphaned groups - oc adm groups prune --sync-config=/path/to/ldap-sync-config.yaml --confirm - - # Prune all orphaned groups except the ones from the blacklist file - oc adm groups prune --blacklist=/path/to/blacklist.txt --sync-config=/path/to/ldap-sync-config.yaml --confirm - - # Prune all orphaned groups from a list of specific groups specified in a whitelist file - oc adm groups prune --whitelist=/path/to/whitelist.txt --sync-config=/path/to/ldap-sync-config.yaml --confirm - - # Prune all orphaned groups from a list of specific groups specified in a whitelist - oc adm groups prune groups/group_name groups/other_name --sync-config=/path/to/ldap-sync-config.yaml --confirm ----- - - - -== oc adm groups remove-users -Remove users from a group - -.Example usage -[source,bash,options="nowrap"] ----- - # Remove user1 and user2 from my-group - oc adm groups remove-users my-group user1 user2 ----- - - - -== oc adm groups sync -Sync OpenShift groups with records from an external provider - -.Example usage -[source,bash,options="nowrap"] ----- - # Sync all groups with an LDAP server - oc adm groups sync --sync-config=/path/to/ldap-sync-config.yaml --confirm - - # Sync all groups except the ones from the blacklist file with an LDAP server - oc adm groups sync --blacklist=/path/to/blacklist.txt --sync-config=/path/to/ldap-sync-config.yaml --confirm - - # Sync specific groups specified in a whitelist file with an LDAP server - oc adm groups sync --whitelist=/path/to/whitelist.txt --sync-config=/path/to/sync-config.yaml --confirm - - # Sync all OpenShift groups that have been synced previously with an LDAP server - oc adm groups sync --type=openshift --sync-config=/path/to/ldap-sync-config.yaml --confirm - - # Sync specific OpenShift groups if they have been synced previously with an LDAP server - oc adm groups sync groups/group1 groups/group2 groups/group3 --sync-config=/path/to/sync-config.yaml --confirm ----- - - - -== oc adm inspect -Collect debugging data for a given resource - -.Example usage -[source,bash,options="nowrap"] ----- - # Collect debugging data for the "openshift-apiserver" clusteroperator - oc adm inspect clusteroperator/openshift-apiserver - - # Collect debugging data for the "openshift-apiserver" and "kube-apiserver" clusteroperators - oc adm inspect clusteroperator/openshift-apiserver clusteroperator/kube-apiserver - - # Collect debugging data for all clusteroperators - oc adm inspect clusteroperator - - # Collect debugging data for all clusteroperators and clusterversions - oc adm inspect clusteroperators,clusterversions ----- - - - -== oc adm migrate icsp -Update imagecontentsourcepolicy file(s) to imagedigestmirrorset file(s). - -.Example usage -[source,bash,options="nowrap"] ----- - # update the imagecontentsourcepolicy.yaml to new imagedigestmirrorset file under directory mydir - oc adm migrate icsp imagecontentsourcepolicy.yaml --dest-dir mydir ----- - - - -== oc adm migrate template-instances -Update template instances to point to the latest group-version-kinds - -.Example usage -[source,bash,options="nowrap"] ----- - # Perform a dry-run of updating all objects - oc adm migrate template-instances - - # To actually perform the update, the confirm flag must be appended - oc adm migrate template-instances --confirm ----- - - - -== oc adm must-gather -Launch a new instance of a pod for gathering debug information - -.Example usage -[source,bash,options="nowrap"] ----- - # Gather information using the default plug-in image and command, writing into ./must-gather.local. - oc adm must-gather - - # Gather information with a specific local folder to copy to - oc adm must-gather --dest-dir=/local/directory - - # Gather audit information - oc adm must-gather -- /usr/bin/gather_audit_logs - - # Gather information using multiple plug-in images - oc adm must-gather --image=quay.io/kubevirt/must-gather --image=quay.io/openshift/origin-must-gather - - # Gather information using a specific image stream plug-in - oc adm must-gather --image-stream=openshift/must-gather:latest - - # Gather information using a specific image, command, and pod-dir - oc adm must-gather --image=my/image:tag --source-dir=/pod/directory -- myspecial-command.sh ----- - - - -== oc adm new-project -Create a new project - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a new project using a node selector - oc adm new-project myproject --node-selector='type=user-node,region=east' ----- - - - -== oc adm node-logs -Display and filter node logs - -.Example usage -[source,bash,options="nowrap"] ----- - # Show kubelet logs from all masters - oc adm node-logs --role master -u kubelet - - # See what logs are available in masters in /var/logs - oc adm node-logs --role master --path=/ - - # Display cron log file from all masters - oc adm node-logs --role master --path=cron ----- - - - -== oc adm pod-network isolate-projects -Isolate project network - -.Example usage -[source,bash,options="nowrap"] ----- - # Provide isolation for project p1 - oc adm pod-network isolate-projects - - # Allow all projects with label name=top-secret to have their own isolated project network - oc adm pod-network isolate-projects --selector='name=top-secret' ----- - - - -== oc adm pod-network join-projects -Join project network - -.Example usage -[source,bash,options="nowrap"] ----- - # Allow project p2 to use project p1 network - oc adm pod-network join-projects --to= - - # Allow all projects with label name=top-secret to use project p1 network - oc adm pod-network join-projects --to= --selector='name=top-secret' ----- - - - -== oc adm pod-network make-projects-global -Make project network global - -.Example usage -[source,bash,options="nowrap"] ----- - # Allow project p1 to access all pods in the cluster and vice versa - oc adm pod-network make-projects-global - - # Allow all projects with label name=share to access all pods in the cluster and vice versa - oc adm pod-network make-projects-global --selector='name=share' ----- - - - -== oc adm policy add-role-to-user -Add a role to users or service accounts for the current project - -.Example usage -[source,bash,options="nowrap"] ----- - # Add the 'view' role to user1 for the current project - oc adm policy add-role-to-user view user1 - - # Add the 'edit' role to serviceaccount1 for the current project - oc adm policy add-role-to-user edit -z serviceaccount1 ----- - - - -== oc adm policy add-scc-to-group -Add a security context constraint to groups - -.Example usage -[source,bash,options="nowrap"] ----- - # Add the 'restricted' security context constraint to group1 and group2 - oc adm policy add-scc-to-group restricted group1 group2 ----- - - - -== oc adm policy add-scc-to-user -Add a security context constraint to users or a service account - -.Example usage -[source,bash,options="nowrap"] ----- - # Add the 'restricted' security context constraint to user1 and user2 - oc adm policy add-scc-to-user restricted user1 user2 - - # Add the 'privileged' security context constraint to serviceaccount1 in the current namespace - oc adm policy add-scc-to-user privileged -z serviceaccount1 ----- - - - -== oc adm policy scc-review -Check which service account can create a pod - -.Example usage -[source,bash,options="nowrap"] ----- - # Check whether service accounts sa1 and sa2 can admit a pod with a template pod spec specified in my_resource.yaml - # Service Account specified in myresource.yaml file is ignored - oc adm policy scc-review -z sa1,sa2 -f my_resource.yaml - - # Check whether service accounts system:serviceaccount:bob:default can admit a pod with a template pod spec specified in my_resource.yaml - oc adm policy scc-review -z system:serviceaccount:bob:default -f my_resource.yaml - - # Check whether the service account specified in my_resource_with_sa.yaml can admit the pod - oc adm policy scc-review -f my_resource_with_sa.yaml - - # Check whether the default service account can admit the pod; default is taken since no service account is defined in myresource_with_no_sa.yaml - oc adm policy scc-review -f myresource_with_no_sa.yaml ----- - - - -== oc adm policy scc-subject-review -Check whether a user or a service account can create a pod - -.Example usage -[source,bash,options="nowrap"] ----- - # Check whether user bob can create a pod specified in myresource.yaml - oc adm policy scc-subject-review -u bob -f myresource.yaml - - # Check whether user bob who belongs to projectAdmin group can create a pod specified in myresource.yaml - oc adm policy scc-subject-review -u bob -g projectAdmin -f myresource.yaml - - # Check whether a service account specified in the pod template spec in myresourcewithsa.yaml can create the pod - oc adm policy scc-subject-review -f myresourcewithsa.yaml ----- - - - -== oc adm prune builds -Remove old completed and failed builds - -.Example usage -[source,bash,options="nowrap"] ----- - # Dry run deleting older completed and failed builds and also including - # all builds whose associated build config no longer exists - oc adm prune builds --orphans - - # To actually perform the prune operation, the confirm flag must be appended - oc adm prune builds --orphans --confirm ----- - - - -== oc adm prune deployments -Remove old completed and failed deployment configs - -.Example usage -[source,bash,options="nowrap"] ----- - # Dry run deleting all but the last complete deployment for every deployment config - oc adm prune deployments --keep-complete=1 - - # To actually perform the prune operation, the confirm flag must be appended - oc adm prune deployments --keep-complete=1 --confirm ----- - - - -== oc adm prune groups -Remove old OpenShift groups referencing missing records from an external provider - -.Example usage -[source,bash,options="nowrap"] ----- - # Prune all orphaned groups - oc adm prune groups --sync-config=/path/to/ldap-sync-config.yaml --confirm - - # Prune all orphaned groups except the ones from the blacklist file - oc adm prune groups --blacklist=/path/to/blacklist.txt --sync-config=/path/to/ldap-sync-config.yaml --confirm - - # Prune all orphaned groups from a list of specific groups specified in a whitelist file - oc adm prune groups --whitelist=/path/to/whitelist.txt --sync-config=/path/to/ldap-sync-config.yaml --confirm - - # Prune all orphaned groups from a list of specific groups specified in a whitelist - oc adm prune groups groups/group_name groups/other_name --sync-config=/path/to/ldap-sync-config.yaml --confirm ----- - - - -== oc adm prune images -Remove unreferenced images - -.Example usage -[source,bash,options="nowrap"] ----- - # See what the prune command would delete if only images and their referrers were more than an hour old - # and obsoleted by 3 newer revisions under the same tag were considered - oc adm prune images --keep-tag-revisions=3 --keep-younger-than=60m - - # To actually perform the prune operation, the confirm flag must be appended - oc adm prune images --keep-tag-revisions=3 --keep-younger-than=60m --confirm - - # See what the prune command would delete if we are interested in removing images - # exceeding currently set limit ranges ('openshift.io/Image') - oc adm prune images --prune-over-size-limit - - # To actually perform the prune operation, the confirm flag must be appended - oc adm prune images --prune-over-size-limit --confirm - - # Force the insecure http protocol with the particular registry host name - oc adm prune images --registry-url=http://registry.example.org --confirm - - # Force a secure connection with a custom certificate authority to the particular registry host name - oc adm prune images --registry-url=registry.example.org --certificate-authority=/path/to/custom/ca.crt --confirm ----- - - - -== oc adm release extract -Extract the contents of an update payload to disk - -.Example usage -[source,bash,options="nowrap"] ----- - # Use git to check out the source code for the current cluster release to DIR - oc adm release extract --git=DIR - - # Extract cloud credential requests for AWS - oc adm release extract --credentials-requests --cloud=aws - - # Use git to check out the source code for the current cluster release to DIR from linux/s390x image - # Note: Wildcard filter is not supported. Pass a single os/arch to extract - oc adm release extract --git=DIR quay.io/openshift-release-dev/ocp-release:4.11.2 --filter-by-os=linux/s390x ----- - - - -== oc adm release info -Display information about a release - -.Example usage -[source,bash,options="nowrap"] ----- - # Show information about the cluster's current release - oc adm release info - - # Show the source code that comprises a release - oc adm release info 4.11.2 --commit-urls - - # Show the source code difference between two releases - oc adm release info 4.11.0 4.11.2 --commits - - # Show where the images referenced by the release are located - oc adm release info quay.io/openshift-release-dev/ocp-release:4.11.2 --pullspecs - - # Show information about linux/s390x image - # Note: Wildcard filter is not supported. Pass a single os/arch to extract - oc adm release info quay.io/openshift-release-dev/ocp-release:4.11.2 --filter-by-os=linux/s390x ----- - - - -== oc adm release mirror -Mirror a release to a different image registry location - -.Example usage -[source,bash,options="nowrap"] ----- - # Perform a dry run showing what would be mirrored, including the mirror objects - oc adm release mirror 4.11.0 --to myregistry.local/openshift/release \ - --release-image-signature-to-dir /tmp/releases --dry-run - - # Mirror a release into the current directory - oc adm release mirror 4.11.0 --to file://openshift/release \ - --release-image-signature-to-dir /tmp/releases - - # Mirror a release to another directory in the default location - oc adm release mirror 4.11.0 --to-dir /tmp/releases - - # Upload a release from the current directory to another server - oc adm release mirror --from file://openshift/release --to myregistry.com/openshift/release \ - --release-image-signature-to-dir /tmp/releases - - # Mirror the 4.11.0 release to repository registry.example.com and apply signatures to connected cluster - oc adm release mirror --from=quay.io/openshift-release-dev/ocp-release:4.11.0-x86_64 \ - --to=registry.example.com/your/repository --apply-release-image-signature ----- - - - -== oc adm release new -Create a new OpenShift release - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a release from the latest origin images and push to a DockerHub repo - oc adm release new --from-image-stream=4.11 -n origin --to-image docker.io/mycompany/myrepo:latest - - # Create a new release with updated metadata from a previous release - oc adm release new --from-release registry.ci.openshift.org/origin/release:v4.11 --name 4.11.1 \ - --previous 4.11.0 --metadata ... --to-image docker.io/mycompany/myrepo:latest - - # Create a new release and override a single image - oc adm release new --from-release registry.ci.openshift.org/origin/release:v4.11 \ - cli=docker.io/mycompany/cli:latest --to-image docker.io/mycompany/myrepo:latest - - # Run a verification pass to ensure the release can be reproduced - oc adm release new --from-release registry.ci.openshift.org/origin/release:v4.11 ----- - - - -== oc adm taint -Update the taints on one or more nodes - -.Example usage -[source,bash,options="nowrap"] ----- - # Update node 'foo' with a taint with key 'dedicated' and value 'special-user' and effect 'NoSchedule' - # If a taint with that key and effect already exists, its value is replaced as specified - oc adm taint nodes foo dedicated=special-user:NoSchedule - - # Remove from node 'foo' the taint with key 'dedicated' and effect 'NoSchedule' if one exists - oc adm taint nodes foo dedicated:NoSchedule- - - # Remove from node 'foo' all the taints with key 'dedicated' - oc adm taint nodes foo dedicated- - - # Add a taint with key 'dedicated' on nodes having label mylabel=X - oc adm taint node -l myLabel=X dedicated=foo:PreferNoSchedule - - # Add to node 'foo' a taint with key 'bar' and no value - oc adm taint nodes foo bar:NoSchedule ----- - - - -== oc adm top images -Show usage statistics for images - -.Example usage -[source,bash,options="nowrap"] ----- - # Show usage statistics for images - oc adm top images ----- - - - -== oc adm top imagestreams -Show usage statistics for image streams - -.Example usage -[source,bash,options="nowrap"] ----- - # Show usage statistics for image streams - oc adm top imagestreams ----- - - - -== oc adm top node -Display resource (CPU/memory) usage of nodes - -.Example usage -[source,bash,options="nowrap"] ----- - # Show metrics for all nodes - oc adm top node - - # Show metrics for a given node - oc adm top node NODE_NAME ----- - - - -== oc adm top pod -Display resource (CPU/memory) usage of pods - -.Example usage -[source,bash,options="nowrap"] ----- - # Show metrics for all pods in the default namespace - oc adm top pod - - # Show metrics for all pods in the given namespace - oc adm top pod --namespace=NAMESPACE - - # Show metrics for a given pod and its containers - oc adm top pod POD_NAME --containers - - # Show metrics for the pods defined by label name=myLabel - oc adm top pod -l name=myLabel ----- - - - -== oc adm uncordon -Mark node as schedulable - -.Example usage -[source,bash,options="nowrap"] ----- - # Mark node "foo" as schedulable - oc adm uncordon foo ----- - - - -== oc adm upgrade -Upgrade a cluster or adjust the upgrade channel - -.Example usage -[source,bash,options="nowrap"] ----- - # Review the available cluster updates - oc adm upgrade - - # Update to the latest version - oc adm upgrade --to-latest=true ----- - - - -== oc adm verify-image-signature -Verify the image identity contained in the image signature - -.Example usage -[source,bash,options="nowrap"] ----- - # Verify the image signature and identity using the local GPG keychain - oc adm verify-image-signature sha256:c841e9b64e4579bd56c794bdd7c36e1c257110fd2404bebbb8b613e4935228c4 \ - --expected-identity=registry.local:5000/foo/bar:v1 - - # Verify the image signature and identity using the local GPG keychain and save the status - oc adm verify-image-signature sha256:c841e9b64e4579bd56c794bdd7c36e1c257110fd2404bebbb8b613e4935228c4 \ - --expected-identity=registry.local:5000/foo/bar:v1 --save - - # Verify the image signature and identity via exposed registry route - oc adm verify-image-signature sha256:c841e9b64e4579bd56c794bdd7c36e1c257110fd2404bebbb8b613e4935228c4 \ - --expected-identity=registry.local:5000/foo/bar:v1 \ - --registry-url=docker-registry.foo.com - - # Remove all signature verifications from the image - oc adm verify-image-signature sha256:c841e9b64e4579bd56c794bdd7c36e1c257110fd2404bebbb8b613e4935228c4 --remove-all ----- - - diff --git a/modules/oc-by-example-content.adoc b/modules/oc-by-example-content.adoc deleted file mode 100644 index 21f2ef4f8bde..000000000000 --- a/modules/oc-by-example-content.adoc +++ /dev/null @@ -1,2937 +0,0 @@ -// NOTE: The contents of this file are auto-generated -// This template is for non-admin (not 'oc adm ...') commands -// Uses 'source,bash' for proper syntax highlighting for comments in examples - -:_content-type: REFERENCE -[id="openshift-cli-developer_{context}"] -= OpenShift CLI (oc) developer commands - - - -== oc annotate -Update the annotations on a resource - -.Example usage -[source,bash,options="nowrap"] ----- - # Update pod 'foo' with the annotation 'description' and the value 'my frontend' - # If the same annotation is set multiple times, only the last value will be applied - oc annotate pods foo description='my frontend' - - # Update a pod identified by type and name in "pod.json" - oc annotate -f pod.json description='my frontend' - - # Update pod 'foo' with the annotation 'description' and the value 'my frontend running nginx', overwriting any existing value - oc annotate --overwrite pods foo description='my frontend running nginx' - - # Update all pods in the namespace - oc annotate pods --all description='my frontend running nginx' - - # Update pod 'foo' only if the resource is unchanged from version 1 - oc annotate pods foo description='my frontend running nginx' --resource-version=1 - - # Update pod 'foo' by removing an annotation named 'description' if it exists - # Does not require the --overwrite flag - oc annotate pods foo description- ----- - - - -== oc api-resources -Print the supported API resources on the server - -.Example usage -[source,bash,options="nowrap"] ----- - # Print the supported API resources - oc api-resources - - # Print the supported API resources with more information - oc api-resources -o wide - - # Print the supported API resources sorted by a column - oc api-resources --sort-by=name - - # Print the supported namespaced resources - oc api-resources --namespaced=true - - # Print the supported non-namespaced resources - oc api-resources --namespaced=false - - # Print the supported API resources with a specific APIGroup - oc api-resources --api-group=rbac.authorization.k8s.io ----- - - - -== oc api-versions -Print the supported API versions on the server, in the form of "group/version" - -.Example usage -[source,bash,options="nowrap"] ----- - # Print the supported API versions - oc api-versions ----- - - - -== oc apply -Apply a configuration to a resource by file name or stdin - -.Example usage -[source,bash,options="nowrap"] ----- - # Apply the configuration in pod.json to a pod - oc apply -f ./pod.json - - # Apply resources from a directory containing kustomization.yaml - e.g. dir/kustomization.yaml - oc apply -k dir/ - - # Apply the JSON passed into stdin to a pod - cat pod.json | oc apply -f - - - # Apply the configuration from all files that end with '.json' - i.e. expand wildcard characters in file names - oc apply -f '*.json' - - # Note: --prune is still in Alpha - # Apply the configuration in manifest.yaml that matches label app=nginx and delete all other resources that are not in the file and match label app=nginx - oc apply --prune -f manifest.yaml -l app=nginx - - # Apply the configuration in manifest.yaml and delete all the other config maps that are not in the file - oc apply --prune -f manifest.yaml --all --prune-allowlist=core/v1/ConfigMap ----- - - - -== oc apply edit-last-applied -Edit latest last-applied-configuration annotations of a resource/object - -.Example usage -[source,bash,options="nowrap"] ----- - # Edit the last-applied-configuration annotations by type/name in YAML - oc apply edit-last-applied deployment/nginx - - # Edit the last-applied-configuration annotations by file in JSON - oc apply edit-last-applied -f deploy.yaml -o json ----- - - - -== oc apply set-last-applied -Set the last-applied-configuration annotation on a live object to match the contents of a file - -.Example usage -[source,bash,options="nowrap"] ----- - # Set the last-applied-configuration of a resource to match the contents of a file - oc apply set-last-applied -f deploy.yaml - - # Execute set-last-applied against each configuration file in a directory - oc apply set-last-applied -f path/ - - # Set the last-applied-configuration of a resource to match the contents of a file; will create the annotation if it does not already exist - oc apply set-last-applied -f deploy.yaml --create-annotation=true ----- - - - -== oc apply view-last-applied -View the latest last-applied-configuration annotations of a resource/object - -.Example usage -[source,bash,options="nowrap"] ----- - # View the last-applied-configuration annotations by type/name in YAML - oc apply view-last-applied deployment/nginx - - # View the last-applied-configuration annotations by file in JSON - oc apply view-last-applied -f deploy.yaml -o json ----- - - - -== oc attach -Attach to a running container - -.Example usage -[source,bash,options="nowrap"] ----- - # Get output from running pod mypod; use the 'oc.kubernetes.io/default-container' annotation - # for selecting the container to be attached or the first container in the pod will be chosen - oc attach mypod - - # Get output from ruby-container from pod mypod - oc attach mypod -c ruby-container - - # Switch to raw terminal mode; sends stdin to 'bash' in ruby-container from pod mypod - # and sends stdout/stderr from 'bash' back to the client - oc attach mypod -c ruby-container -i -t - - # Get output from the first pod of a replica set named nginx - oc attach rs/nginx ----- - - - -== oc auth can-i -Check whether an action is allowed - -.Example usage -[source,bash,options="nowrap"] ----- - # Check to see if I can create pods in any namespace - oc auth can-i create pods --all-namespaces - - # Check to see if I can list deployments in my current namespace - oc auth can-i list deployments.apps - - # Check to see if I can do everything in my current namespace ("*" means all) - oc auth can-i '*' '*' - - # Check to see if I can get the job named "bar" in namespace "foo" - oc auth can-i list jobs.batch/bar -n foo - - # Check to see if I can read pod logs - oc auth can-i get pods --subresource=log - - # Check to see if I can access the URL /logs/ - oc auth can-i get /logs/ - - # List all allowed actions in namespace "foo" - oc auth can-i --list --namespace=foo ----- - - - -== oc auth reconcile -Reconciles rules for RBAC role, role binding, cluster role, and cluster role binding objects - -.Example usage -[source,bash,options="nowrap"] ----- - # Reconcile RBAC resources from a file - oc auth reconcile -f my-rbac-rules.yaml ----- - - - -== oc autoscale -Autoscale a deployment config, deployment, replica set, stateful set, or replication controller - -.Example usage -[source,bash,options="nowrap"] ----- - # Auto scale a deployment "foo", with the number of pods between 2 and 10, no target CPU utilization specified so a default autoscaling policy will be used - oc autoscale deployment foo --min=2 --max=10 - - # Auto scale a replication controller "foo", with the number of pods between 1 and 5, target CPU utilization at 80% - oc autoscale rc foo --max=5 --cpu-percent=80 ----- - - - -== oc cancel-build -Cancel running, pending, or new builds - -.Example usage -[source,bash,options="nowrap"] ----- - # Cancel the build with the given name - oc cancel-build ruby-build-2 - - # Cancel the named build and print the build logs - oc cancel-build ruby-build-2 --dump-logs - - # Cancel the named build and create a new one with the same parameters - oc cancel-build ruby-build-2 --restart - - # Cancel multiple builds - oc cancel-build ruby-build-1 ruby-build-2 ruby-build-3 - - # Cancel all builds created from the 'ruby-build' build config that are in the 'new' state - oc cancel-build bc/ruby-build --state=new ----- - - - -== oc cluster-info -Display cluster information - -.Example usage -[source,bash,options="nowrap"] ----- - # Print the address of the control plane and cluster services - oc cluster-info ----- - - - -== oc cluster-info dump -Dump relevant information for debugging and diagnosis - -.Example usage -[source,bash,options="nowrap"] ----- - # Dump current cluster state to stdout - oc cluster-info dump - - # Dump current cluster state to /path/to/cluster-state - oc cluster-info dump --output-directory=/path/to/cluster-state - - # Dump all namespaces to stdout - oc cluster-info dump --all-namespaces - - # Dump a set of namespaces to /path/to/cluster-state - oc cluster-info dump --namespaces default,kube-system --output-directory=/path/to/cluster-state ----- - - - -== oc completion -Output shell completion code for the specified shell (bash, zsh, fish, or powershell) - -.Example usage -[source,bash,options="nowrap"] ----- - # Installing bash completion on macOS using homebrew - ## If running Bash 3.2 included with macOS - brew install bash-completion - ## or, if running Bash 4.1+ - brew install bash-completion@2 - ## If oc is installed via homebrew, this should start working immediately - ## If you've installed via other means, you may need add the completion to your completion directory - oc completion bash > $(brew --prefix)/etc/bash_completion.d/oc - - - # Installing bash completion on Linux - ## If bash-completion is not installed on Linux, install the 'bash-completion' package - ## via your distribution's package manager. - ## Load the oc completion code for bash into the current shell - source <(oc completion bash) - ## Write bash completion code to a file and source it from .bash_profile - oc completion bash > ~/.kube/completion.bash.inc - printf " - # Kubectl shell completion - source '$HOME/.kube/completion.bash.inc' - " >> $HOME/.bash_profile - source $HOME/.bash_profile - - # Load the oc completion code for zsh[1] into the current shell - source <(oc completion zsh) - # Set the oc completion code for zsh[1] to autoload on startup - oc completion zsh > "${fpath[1]}/_oc" - - - # Load the oc completion code for fish[2] into the current shell - oc completion fish | source - # To load completions for each session, execute once: - oc completion fish > ~/.config/fish/completions/oc.fish - - # Load the oc completion code for powershell into the current shell - oc completion powershell | Out-String | Invoke-Expression - # Set oc completion code for powershell to run on startup - ## Save completion code to a script and execute in the profile - oc completion powershell > $HOME\.kube\completion.ps1 - Add-Content $PROFILE "$HOME\.kube\completion.ps1" - ## Execute completion code in the profile - Add-Content $PROFILE "if (Get-Command oc -ErrorAction SilentlyContinue) { - oc completion powershell | Out-String | Invoke-Expression - }" - ## Add completion code directly to the $PROFILE script - oc completion powershell >> $PROFILE ----- - - - -== oc config current-context -Display the current-context - -.Example usage -[source,bash,options="nowrap"] ----- - # Display the current-context - oc config current-context ----- - - - -== oc config delete-cluster -Delete the specified cluster from the kubeconfig - -.Example usage -[source,bash,options="nowrap"] ----- - # Delete the minikube cluster - oc config delete-cluster minikube ----- - - - -== oc config delete-context -Delete the specified context from the kubeconfig - -.Example usage -[source,bash,options="nowrap"] ----- - # Delete the context for the minikube cluster - oc config delete-context minikube ----- - - - -== oc config delete-user -Delete the specified user from the kubeconfig - -.Example usage -[source,bash,options="nowrap"] ----- - # Delete the minikube user - oc config delete-user minikube ----- - - - -== oc config get-clusters -Display clusters defined in the kubeconfig - -.Example usage -[source,bash,options="nowrap"] ----- - # List the clusters that oc knows about - oc config get-clusters ----- - - - -== oc config get-contexts -Describe one or many contexts - -.Example usage -[source,bash,options="nowrap"] ----- - # List all the contexts in your kubeconfig file - oc config get-contexts - - # Describe one context in your kubeconfig file - oc config get-contexts my-context ----- - - - -== oc config get-users -Display users defined in the kubeconfig - -.Example usage -[source,bash,options="nowrap"] ----- - # List the users that oc knows about - oc config get-users ----- - - - -== oc config rename-context -Rename a context from the kubeconfig file - -.Example usage -[source,bash,options="nowrap"] ----- - # Rename the context 'old-name' to 'new-name' in your kubeconfig file - oc config rename-context old-name new-name ----- - - - -== oc config set -Set an individual value in a kubeconfig file - -.Example usage -[source,bash,options="nowrap"] ----- - # Set the server field on the my-cluster cluster to https://1.2.3.4 - oc config set clusters.my-cluster.server https://1.2.3.4 - - # Set the certificate-authority-data field on the my-cluster cluster - oc config set clusters.my-cluster.certificate-authority-data $(echo "cert_data_here" | base64 -i -) - - # Set the cluster field in the my-context context to my-cluster - oc config set contexts.my-context.cluster my-cluster - - # Set the client-key-data field in the cluster-admin user using --set-raw-bytes option - oc config set users.cluster-admin.client-key-data cert_data_here --set-raw-bytes=true ----- - - - -== oc config set-cluster -Set a cluster entry in kubeconfig - -.Example usage -[source,bash,options="nowrap"] ----- - # Set only the server field on the e2e cluster entry without touching other values - oc config set-cluster e2e --server=https://1.2.3.4 - - # Embed certificate authority data for the e2e cluster entry - oc config set-cluster e2e --embed-certs --certificate-authority=~/.kube/e2e/kubernetes.ca.crt - - # Disable cert checking for the e2e cluster entry - oc config set-cluster e2e --insecure-skip-tls-verify=true - - # Set custom TLS server name to use for validation for the e2e cluster entry - oc config set-cluster e2e --tls-server-name=my-cluster-name - - # Set proxy url for the e2e cluster entry - oc config set-cluster e2e --proxy-url=https://1.2.3.4 ----- - - - -== oc config set-context -Set a context entry in kubeconfig - -.Example usage -[source,bash,options="nowrap"] ----- - # Set the user field on the gce context entry without touching other values - oc config set-context gce --user=cluster-admin ----- - - - -== oc config set-credentials -Set a user entry in kubeconfig - -.Example usage -[source,bash,options="nowrap"] ----- - # Set only the "client-key" field on the "cluster-admin" - # entry, without touching other values - oc config set-credentials cluster-admin --client-key=~/.kube/admin.key - - # Set basic auth for the "cluster-admin" entry - oc config set-credentials cluster-admin --username=admin --password=uXFGweU9l35qcif - - # Embed client certificate data in the "cluster-admin" entry - oc config set-credentials cluster-admin --client-certificate=~/.kube/admin.crt --embed-certs=true - - # Enable the Google Compute Platform auth provider for the "cluster-admin" entry - oc config set-credentials cluster-admin --auth-provider=gcp - - # Enable the OpenID Connect auth provider for the "cluster-admin" entry with additional args - oc config set-credentials cluster-admin --auth-provider=oidc --auth-provider-arg=client-id=foo --auth-provider-arg=client-secret=bar - - # Remove the "client-secret" config value for the OpenID Connect auth provider for the "cluster-admin" entry - oc config set-credentials cluster-admin --auth-provider=oidc --auth-provider-arg=client-secret- - - # Enable new exec auth plugin for the "cluster-admin" entry - oc config set-credentials cluster-admin --exec-command=/path/to/the/executable --exec-api-version=client.authentication.k8s.io/v1beta1 - - # Define new exec auth plugin args for the "cluster-admin" entry - oc config set-credentials cluster-admin --exec-arg=arg1 --exec-arg=arg2 - - # Create or update exec auth plugin environment variables for the "cluster-admin" entry - oc config set-credentials cluster-admin --exec-env=key1=val1 --exec-env=key2=val2 - - # Remove exec auth plugin environment variables for the "cluster-admin" entry - oc config set-credentials cluster-admin --exec-env=var-to-remove- ----- - - - -== oc config unset -Unset an individual value in a kubeconfig file - -.Example usage -[source,bash,options="nowrap"] ----- - # Unset the current-context - oc config unset current-context - - # Unset namespace in foo context - oc config unset contexts.foo.namespace ----- - - - -== oc config use-context -Set the current-context in a kubeconfig file - -.Example usage -[source,bash,options="nowrap"] ----- - # Use the context for the minikube cluster - oc config use-context minikube ----- - - - -== oc config view -Display merged kubeconfig settings or a specified kubeconfig file - -.Example usage -[source,bash,options="nowrap"] ----- - # Show merged kubeconfig settings - oc config view - - # Show merged kubeconfig settings and raw certificate data and exposed secrets - oc config view --raw - - # Get the password for the e2e user - oc config view -o jsonpath='{.users[?(@.name == "e2e")].user.password}' ----- - - - -== oc cp -Copy files and directories to and from containers - -.Example usage -[source,bash,options="nowrap"] ----- - # !!!Important Note!!! - # Requires that the 'tar' binary is present in your container - # image. If 'tar' is not present, 'oc cp' will fail. - # - # For advanced use cases, such as symlinks, wildcard expansion or - # file mode preservation, consider using 'oc exec'. - - # Copy /tmp/foo local file to /tmp/bar in a remote pod in namespace - tar cf - /tmp/foo | oc exec -i -n -- tar xf - -C /tmp/bar - - # Copy /tmp/foo from a remote pod to /tmp/bar locally - oc exec -n -- tar cf - /tmp/foo | tar xf - -C /tmp/bar - - # Copy /tmp/foo_dir local directory to /tmp/bar_dir in a remote pod in the default namespace - oc cp /tmp/foo_dir :/tmp/bar_dir - - # Copy /tmp/foo local file to /tmp/bar in a remote pod in a specific container - oc cp /tmp/foo :/tmp/bar -c - - # Copy /tmp/foo local file to /tmp/bar in a remote pod in namespace - oc cp /tmp/foo /:/tmp/bar - - # Copy /tmp/foo from a remote pod to /tmp/bar locally - oc cp /:/tmp/foo /tmp/bar ----- - - - -== oc create -Create a resource from a file or from stdin - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a pod using the data in pod.json - oc create -f ./pod.json - - # Create a pod based on the JSON passed into stdin - cat pod.json | oc create -f - - - # Edit the data in registry.yaml in JSON then create the resource using the edited data - oc create -f registry.yaml --edit -o json ----- - - - -== oc create build -Create a new build - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a new build - oc create build myapp ----- - - - -== oc create clusterresourcequota -Create a cluster resource quota - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a cluster resource quota limited to 10 pods - oc create clusterresourcequota limit-bob --project-annotation-selector=openshift.io/requester=user-bob --hard=pods=10 ----- - - - -== oc create clusterrole -Create a cluster role - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a cluster role named "pod-reader" that allows user to perform "get", "watch" and "list" on pods - oc create clusterrole pod-reader --verb=get,list,watch --resource=pods - - # Create a cluster role named "pod-reader" with ResourceName specified - oc create clusterrole pod-reader --verb=get --resource=pods --resource-name=readablepod --resource-name=anotherpod - - # Create a cluster role named "foo" with API Group specified - oc create clusterrole foo --verb=get,list,watch --resource=rs.apps - - # Create a cluster role named "foo" with SubResource specified - oc create clusterrole foo --verb=get,list,watch --resource=pods,pods/status - - # Create a cluster role name "foo" with NonResourceURL specified - oc create clusterrole "foo" --verb=get --non-resource-url=/logs/* - - # Create a cluster role name "monitoring" with AggregationRule specified - oc create clusterrole monitoring --aggregation-rule="rbac.example.com/aggregate-to-monitoring=true" ----- - - - -== oc create clusterrolebinding -Create a cluster role binding for a particular cluster role - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a cluster role binding for user1, user2, and group1 using the cluster-admin cluster role - oc create clusterrolebinding cluster-admin --clusterrole=cluster-admin --user=user1 --user=user2 --group=group1 ----- - - - -== oc create configmap -Create a config map from a local file, directory or literal value - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a new config map named my-config based on folder bar - oc create configmap my-config --from-file=path/to/bar - - # Create a new config map named my-config with specified keys instead of file basenames on disk - oc create configmap my-config --from-file=key1=/path/to/bar/file1.txt --from-file=key2=/path/to/bar/file2.txt - - # Create a new config map named my-config with key1=config1 and key2=config2 - oc create configmap my-config --from-literal=key1=config1 --from-literal=key2=config2 - - # Create a new config map named my-config from the key=value pairs in the file - oc create configmap my-config --from-file=path/to/bar - - # Create a new config map named my-config from an env file - oc create configmap my-config --from-env-file=path/to/foo.env --from-env-file=path/to/bar.env ----- - - - -== oc create cronjob -Create a cron job with the specified name - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a cron job - oc create cronjob my-job --image=busybox --schedule="*/1 * * * *" - - # Create a cron job with a command - oc create cronjob my-job --image=busybox --schedule="*/1 * * * *" -- date ----- - - - -== oc create deployment -Create a deployment with the specified name - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a deployment named my-dep that runs the busybox image - oc create deployment my-dep --image=busybox - - # Create a deployment with a command - oc create deployment my-dep --image=busybox -- date - - # Create a deployment named my-dep that runs the nginx image with 3 replicas - oc create deployment my-dep --image=nginx --replicas=3 - - # Create a deployment named my-dep that runs the busybox image and expose port 5701 - oc create deployment my-dep --image=busybox --port=5701 ----- - - - -== oc create deploymentconfig -Create a deployment config with default options that uses a given image - -.Example usage -[source,bash,options="nowrap"] ----- - # Create an nginx deployment config named my-nginx - oc create deploymentconfig my-nginx --image=nginx ----- - - - -== oc create identity -Manually create an identity (only needed if automatic creation is disabled) - -.Example usage -[source,bash,options="nowrap"] ----- - # Create an identity with identity provider "acme_ldap" and the identity provider username "adamjones" - oc create identity acme_ldap:adamjones ----- - - - -== oc create imagestream -Create a new empty image stream - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a new image stream - oc create imagestream mysql ----- - - - -== oc create imagestreamtag -Create a new image stream tag - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a new image stream tag based on an image in a remote registry - oc create imagestreamtag mysql:latest --from-image=myregistry.local/mysql/mysql:5.0 ----- - - - -== oc create ingress -Create an ingress with the specified name - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a single ingress called 'simple' that directs requests to foo.com/bar to svc - # svc1:8080 with a tls secret "my-cert" - oc create ingress simple --rule="foo.com/bar=svc1:8080,tls=my-cert" - - # Create a catch all ingress of "/path" pointing to service svc:port and Ingress Class as "otheringress" - oc create ingress catch-all --class=otheringress --rule="/path=svc:port" - - # Create an ingress with two annotations: ingress.annotation1 and ingress.annotations2 - oc create ingress annotated --class=default --rule="foo.com/bar=svc:port" \ - --annotation ingress.annotation1=foo \ - --annotation ingress.annotation2=bla - - # Create an ingress with the same host and multiple paths - oc create ingress multipath --class=default \ - --rule="foo.com/=svc:port" \ - --rule="foo.com/admin/=svcadmin:portadmin" - - # Create an ingress with multiple hosts and the pathType as Prefix - oc create ingress ingress1 --class=default \ - --rule="foo.com/path*=svc:8080" \ - --rule="bar.com/admin*=svc2:http" - - # Create an ingress with TLS enabled using the default ingress certificate and different path types - oc create ingress ingtls --class=default \ - --rule="foo.com/=svc:https,tls" \ - --rule="foo.com/path/subpath*=othersvc:8080" - - # Create an ingress with TLS enabled using a specific secret and pathType as Prefix - oc create ingress ingsecret --class=default \ - --rule="foo.com/*=svc:8080,tls=secret1" - - # Create an ingress with a default backend - oc create ingress ingdefault --class=default \ - --default-backend=defaultsvc:http \ - --rule="foo.com/*=svc:8080,tls=secret1" ----- - - - -== oc create job -Create a job with the specified name - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a job - oc create job my-job --image=busybox - - # Create a job with a command - oc create job my-job --image=busybox -- date - - # Create a job from a cron job named "a-cronjob" - oc create job test-job --from=cronjob/a-cronjob ----- - - - -== oc create namespace -Create a namespace with the specified name - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a new namespace named my-namespace - oc create namespace my-namespace ----- - - - -== oc create poddisruptionbudget -Create a pod disruption budget with the specified name - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a pod disruption budget named my-pdb that will select all pods with the app=rails label - # and require at least one of them being available at any point in time - oc create poddisruptionbudget my-pdb --selector=app=rails --min-available=1 - - # Create a pod disruption budget named my-pdb that will select all pods with the app=nginx label - # and require at least half of the pods selected to be available at any point in time - oc create pdb my-pdb --selector=app=nginx --min-available=50% ----- - - - -== oc create priorityclass -Create a priority class with the specified name - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a priority class named high-priority - oc create priorityclass high-priority --value=1000 --description="high priority" - - # Create a priority class named default-priority that is considered as the global default priority - oc create priorityclass default-priority --value=1000 --global-default=true --description="default priority" - - # Create a priority class named high-priority that cannot preempt pods with lower priority - oc create priorityclass high-priority --value=1000 --description="high priority" --preemption-policy="Never" ----- - - - -== oc create quota -Create a quota with the specified name - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a new resource quota named my-quota - oc create quota my-quota --hard=cpu=1,memory=1G,pods=2,services=3,replicationcontrollers=2,resourcequotas=1,secrets=5,persistentvolumeclaims=10 - - # Create a new resource quota named best-effort - oc create quota best-effort --hard=pods=100 --scopes=BestEffort ----- - - - -== oc create role -Create a role with single rule - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a role named "pod-reader" that allows user to perform "get", "watch" and "list" on pods - oc create role pod-reader --verb=get --verb=list --verb=watch --resource=pods - - # Create a role named "pod-reader" with ResourceName specified - oc create role pod-reader --verb=get --resource=pods --resource-name=readablepod --resource-name=anotherpod - - # Create a role named "foo" with API Group specified - oc create role foo --verb=get,list,watch --resource=rs.apps - - # Create a role named "foo" with SubResource specified - oc create role foo --verb=get,list,watch --resource=pods,pods/status ----- - - - -== oc create rolebinding -Create a role binding for a particular role or cluster role - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a role binding for user1, user2, and group1 using the admin cluster role - oc create rolebinding admin --clusterrole=admin --user=user1 --user=user2 --group=group1 ----- - - - -== oc create route edge -Create a route that uses edge TLS termination - -.Example usage -[source,bash,options="nowrap"] ----- - # Create an edge route named "my-route" that exposes the frontend service - oc create route edge my-route --service=frontend - - # Create an edge route that exposes the frontend service and specify a path - # If the route name is omitted, the service name will be used - oc create route edge --service=frontend --path /assets ----- - - - -== oc create route passthrough -Create a route that uses passthrough TLS termination - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a passthrough route named "my-route" that exposes the frontend service - oc create route passthrough my-route --service=frontend - - # Create a passthrough route that exposes the frontend service and specify - # a host name. If the route name is omitted, the service name will be used - oc create route passthrough --service=frontend --hostname=www.example.com ----- - - - -== oc create route reencrypt -Create a route that uses reencrypt TLS termination - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a route named "my-route" that exposes the frontend service - oc create route reencrypt my-route --service=frontend --dest-ca-cert cert.cert - - # Create a reencrypt route that exposes the frontend service, letting the - # route name default to the service name and the destination CA certificate - # default to the service CA - oc create route reencrypt --service=frontend ----- - - - -== oc create secret docker-registry -Create a secret for use with a Docker registry - -.Example usage -[source,bash,options="nowrap"] ----- - # If you don't already have a .dockercfg file, you can create a dockercfg secret directly by using: - oc create secret docker-registry my-secret --docker-server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL - - # Create a new secret named my-secret from ~/.docker/config.json - oc create secret docker-registry my-secret --from-file=.dockerconfigjson=path/to/.docker/config.json ----- - - - -== oc create secret generic -Create a secret from a local file, directory, or literal value - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a new secret named my-secret with keys for each file in folder bar - oc create secret generic my-secret --from-file=path/to/bar - - # Create a new secret named my-secret with specified keys instead of names on disk - oc create secret generic my-secret --from-file=ssh-privatekey=path/to/id_rsa --from-file=ssh-publickey=path/to/id_rsa.pub - - # Create a new secret named my-secret with key1=supersecret and key2=topsecret - oc create secret generic my-secret --from-literal=key1=supersecret --from-literal=key2=topsecret - - # Create a new secret named my-secret using a combination of a file and a literal - oc create secret generic my-secret --from-file=ssh-privatekey=path/to/id_rsa --from-literal=passphrase=topsecret - - # Create a new secret named my-secret from env files - oc create secret generic my-secret --from-env-file=path/to/foo.env --from-env-file=path/to/bar.env ----- - - - -== oc create secret tls -Create a TLS secret - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a new TLS secret named tls-secret with the given key pair - oc create secret tls tls-secret --cert=path/to/tls.cert --key=path/to/tls.key ----- - - - -== oc create service clusterip -Create a ClusterIP service - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a new ClusterIP service named my-cs - oc create service clusterip my-cs --tcp=5678:8080 - - # Create a new ClusterIP service named my-cs (in headless mode) - oc create service clusterip my-cs --clusterip="None" ----- - - - -== oc create service externalname -Create an ExternalName service - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a new ExternalName service named my-ns - oc create service externalname my-ns --external-name bar.com ----- - - - -== oc create service loadbalancer -Create a LoadBalancer service - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a new LoadBalancer service named my-lbs - oc create service loadbalancer my-lbs --tcp=5678:8080 ----- - - - -== oc create service nodeport -Create a NodePort service - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a new NodePort service named my-ns - oc create service nodeport my-ns --tcp=5678:8080 ----- - - - -== oc create serviceaccount -Create a service account with the specified name - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a new service account named my-service-account - oc create serviceaccount my-service-account ----- - - - -== oc create token -Request a service account token - -.Example usage -[source,bash,options="nowrap"] ----- - # Request a token to authenticate to the kube-apiserver as the service account "myapp" in the current namespace - oc create token myapp - - # Request a token for a service account in a custom namespace - oc create token myapp --namespace myns - - # Request a token with a custom expiration - oc create token myapp --duration 10m - - # Request a token with a custom audience - oc create token myapp --audience https://example.com - - # Request a token bound to an instance of a Secret object - oc create token myapp --bound-object-kind Secret --bound-object-name mysecret - - # Request a token bound to an instance of a Secret object with a specific uid - oc create token myapp --bound-object-kind Secret --bound-object-name mysecret --bound-object-uid 0d4691ed-659b-4935-a832-355f77ee47cc ----- - - - -== oc create user -Manually create a user (only needed if automatic creation is disabled) - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a user with the username "ajones" and the display name "Adam Jones" - oc create user ajones --full-name="Adam Jones" ----- - - - -== oc create useridentitymapping -Manually map an identity to a user - -.Example usage -[source,bash,options="nowrap"] ----- - # Map the identity "acme_ldap:adamjones" to the user "ajones" - oc create useridentitymapping acme_ldap:adamjones ajones ----- - - - -== oc debug -Launch a new instance of a pod for debugging - -.Example usage -[source,bash,options="nowrap"] ----- - # Start a shell session into a pod using the OpenShift tools image - oc debug - - # Debug a currently running deployment by creating a new pod - oc debug deploy/test - - # Debug a node as an administrator - oc debug node/master-1 - - # Launch a shell in a pod using the provided image stream tag - oc debug istag/mysql:latest -n openshift - - # Test running a job as a non-root user - oc debug job/test --as-user=1000000 - - # Debug a specific failing container by running the env command in the 'second' container - oc debug daemonset/test -c second -- /bin/env - - # See the pod that would be created to debug - oc debug mypod-9xbc -o yaml - - # Debug a resource but launch the debug pod in another namespace - # Note: Not all resources can be debugged using --to-namespace without modification. For example, - # volumes and service accounts are namespace-dependent. Add '-o yaml' to output the debug pod definition - # to disk. If necessary, edit the definition then run 'oc debug -f -' or run without --to-namespace - oc debug mypod-9xbc --to-namespace testns ----- - - - -== oc delete -Delete resources by file names, stdin, resources and names, or by resources and label selector - -.Example usage -[source,bash,options="nowrap"] ----- - # Delete a pod using the type and name specified in pod.json - oc delete -f ./pod.json - - # Delete resources from a directory containing kustomization.yaml - e.g. dir/kustomization.yaml - oc delete -k dir - - # Delete resources from all files that end with '.json' - i.e. expand wildcard characters in file names - oc delete -f '*.json' - - # Delete a pod based on the type and name in the JSON passed into stdin - cat pod.json | oc delete -f - - - # Delete pods and services with same names "baz" and "foo" - oc delete pod,service baz foo - - # Delete pods and services with label name=myLabel - oc delete pods,services -l name=myLabel - - # Delete a pod with minimal delay - oc delete pod foo --now - - # Force delete a pod on a dead node - oc delete pod foo --force - - # Delete all pods - oc delete pods --all ----- - - - -== oc describe -Show details of a specific resource or group of resources - -.Example usage -[source,bash,options="nowrap"] ----- - # Describe a node - oc describe nodes kubernetes-node-emt8.c.myproject.internal - - # Describe a pod - oc describe pods/nginx - - # Describe a pod identified by type and name in "pod.json" - oc describe -f pod.json - - # Describe all pods - oc describe pods - - # Describe pods by label name=myLabel - oc describe po -l name=myLabel - - # Describe all pods managed by the 'frontend' replication controller - # (rc-created pods get the name of the rc as a prefix in the pod name) - oc describe pods frontend ----- - - - -== oc diff -Diff the live version against a would-be applied version - -.Example usage -[source,bash,options="nowrap"] ----- - # Diff resources included in pod.json - oc diff -f pod.json - - # Diff file read from stdin - cat service.yaml | oc diff -f - ----- - - - -== oc edit -Edit a resource on the server - -.Example usage -[source,bash,options="nowrap"] ----- - # Edit the service named 'registry' - oc edit svc/registry - - # Use an alternative editor - KUBE_EDITOR="nano" oc edit svc/registry - - # Edit the job 'myjob' in JSON using the v1 API format - oc edit job.v1.batch/myjob -o json - - # Edit the deployment 'mydeployment' in YAML and save the modified config in its annotation - oc edit deployment/mydeployment -o yaml --save-config - - # Edit the deployment/mydeployment's status subresource - oc edit deployment mydeployment --subresource='status' ----- - - - -== oc events -List events - -.Example usage -[source,bash,options="nowrap"] ----- - # List recent events in the default namespace. - oc events - - # List recent events in all namespaces. - oc events --all-namespaces - - # List recent events for the specified pod, then wait for more events and list them as they arrive. - oc events --for pod/web-pod-13je7 --watch - - # List recent events in given format. Supported ones, apart from default, are json and yaml. - oc events -oyaml - - # List recent only events in given event types - oc events --types=Warning,Normal ----- - - - -== oc exec -Execute a command in a container - -.Example usage -[source,bash,options="nowrap"] ----- - # Get output from running the 'date' command from pod mypod, using the first container by default - oc exec mypod -- date - - # Get output from running the 'date' command in ruby-container from pod mypod - oc exec mypod -c ruby-container -- date - - # Switch to raw terminal mode; sends stdin to 'bash' in ruby-container from pod mypod - # and sends stdout/stderr from 'bash' back to the client - oc exec mypod -c ruby-container -i -t -- bash -il - - # List contents of /usr from the first container of pod mypod and sort by modification time - # If the command you want to execute in the pod has any flags in common (e.g. -i), - # you must use two dashes (--) to separate your command's flags/arguments - # Also note, do not surround your command and its flags/arguments with quotes - # unless that is how you would execute it normally (i.e., do ls -t /usr, not "ls -t /usr") - oc exec mypod -i -t -- ls -t /usr - - # Get output from running 'date' command from the first pod of the deployment mydeployment, using the first container by default - oc exec deploy/mydeployment -- date - - # Get output from running 'date' command from the first pod of the service myservice, using the first container by default - oc exec svc/myservice -- date ----- - - - -== oc explain -Get documentation for a resource - -.Example usage -[source,bash,options="nowrap"] ----- - # Get the documentation of the resource and its fields - oc explain pods - - # Get the documentation of a specific field of a resource - oc explain pods.spec.containers ----- - - - -== oc expose -Expose a replicated application as a service or route - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a route based on service nginx. The new route will reuse nginx's labels - oc expose service nginx - - # Create a route and specify your own label and route name - oc expose service nginx -l name=myroute --name=fromdowntown - - # Create a route and specify a host name - oc expose service nginx --hostname=www.example.com - - # Create a route with a wildcard - oc expose service nginx --hostname=x.example.com --wildcard-policy=Subdomain - # This would be equivalent to *.example.com. NOTE: only hosts are matched by the wildcard; subdomains would not be included - - # Expose a deployment configuration as a service and use the specified port - oc expose dc ruby-hello-world --port=8080 - - # Expose a service as a route in the specified path - oc expose service nginx --path=/nginx ----- - - - -== oc extract -Extract secrets or config maps to disk - -.Example usage -[source,bash,options="nowrap"] ----- - # Extract the secret "test" to the current directory - oc extract secret/test - - # Extract the config map "nginx" to the /tmp directory - oc extract configmap/nginx --to=/tmp - - # Extract the config map "nginx" to STDOUT - oc extract configmap/nginx --to=- - - # Extract only the key "nginx.conf" from config map "nginx" to the /tmp directory - oc extract configmap/nginx --to=/tmp --keys=nginx.conf ----- - - - -== oc get -Display one or many resources - -.Example usage -[source,bash,options="nowrap"] ----- - # List all pods in ps output format - oc get pods - - # List all pods in ps output format with more information (such as node name) - oc get pods -o wide - - # List a single replication controller with specified NAME in ps output format - oc get replicationcontroller web - - # List deployments in JSON output format, in the "v1" version of the "apps" API group - oc get deployments.v1.apps -o json - - # List a single pod in JSON output format - oc get -o json pod web-pod-13je7 - - # List a pod identified by type and name specified in "pod.yaml" in JSON output format - oc get -f pod.yaml -o json - - # List resources from a directory with kustomization.yaml - e.g. dir/kustomization.yaml - oc get -k dir/ - - # Return only the phase value of the specified pod - oc get -o template pod/web-pod-13je7 --template={{.status.phase}} - - # List resource information in custom columns - oc get pod test-pod -o custom-columns=CONTAINER:.spec.containers[0].name,IMAGE:.spec.containers[0].image - - # List all replication controllers and services together in ps output format - oc get rc,services - - # List one or more resources by their type and names - oc get rc/web service/frontend pods/web-pod-13je7 - - # List status subresource for a single pod. - oc get pod web-pod-13je7 --subresource status ----- - - - -== oc idle -Idle scalable resources - -.Example usage -[source,bash,options="nowrap"] ----- - # Idle the scalable controllers associated with the services listed in to-idle.txt - $ oc idle --resource-names-file to-idle.txt ----- - - - -== oc image append -Add layers to images and push them to a registry - -.Example usage -[source,bash,options="nowrap"] ----- - # Remove the entrypoint on the mysql:latest image - oc image append --from mysql:latest --to myregistry.com/myimage:latest --image '{"Entrypoint":null}' - - # Add a new layer to the image - oc image append --from mysql:latest --to myregistry.com/myimage:latest layer.tar.gz - - # Add a new layer to the image and store the result on disk - # This results in $(pwd)/v2/mysql/blobs,manifests - oc image append --from mysql:latest --to file://mysql:local layer.tar.gz - - # Add a new layer to the image and store the result on disk in a designated directory - # This will result in $(pwd)/mysql-local/v2/mysql/blobs,manifests - oc image append --from mysql:latest --to file://mysql:local --dir mysql-local layer.tar.gz - - # Add a new layer to an image that is stored on disk (~/mysql-local/v2/image exists) - oc image append --from-dir ~/mysql-local --to myregistry.com/myimage:latest layer.tar.gz - - # Add a new layer to an image that was mirrored to the current directory on disk ($(pwd)/v2/image exists) - oc image append --from-dir v2 --to myregistry.com/myimage:latest layer.tar.gz - - # Add a new layer to a multi-architecture image for an os/arch that is different from the system's os/arch - # Note: Wildcard filter is not supported with append. Pass a single os/arch to append - oc image append --from docker.io/library/busybox:latest --filter-by-os=linux/s390x --to myregistry.com/myimage:latest layer.tar.gz ----- - - - -== oc image extract -Copy files from an image to the file system - -.Example usage -[source,bash,options="nowrap"] ----- - # Extract the busybox image into the current directory - oc image extract docker.io/library/busybox:latest - - # Extract the busybox image into a designated directory (must exist) - oc image extract docker.io/library/busybox:latest --path /:/tmp/busybox - - # Extract the busybox image into the current directory for linux/s390x platform - # Note: Wildcard filter is not supported with extract. Pass a single os/arch to extract - oc image extract docker.io/library/busybox:latest --filter-by-os=linux/s390x - - # Extract a single file from the image into the current directory - oc image extract docker.io/library/centos:7 --path /bin/bash:. - - # Extract all .repo files from the image's /etc/yum.repos.d/ folder into the current directory - oc image extract docker.io/library/centos:7 --path /etc/yum.repos.d/*.repo:. - - # Extract all .repo files from the image's /etc/yum.repos.d/ folder into a designated directory (must exist) - # This results in /tmp/yum.repos.d/*.repo on local system - oc image extract docker.io/library/centos:7 --path /etc/yum.repos.d/*.repo:/tmp/yum.repos.d - - # Extract an image stored on disk into the current directory ($(pwd)/v2/busybox/blobs,manifests exists) - # --confirm is required because the current directory is not empty - oc image extract file://busybox:local --confirm - - # Extract an image stored on disk in a directory other than $(pwd)/v2 into the current directory - # --confirm is required because the current directory is not empty ($(pwd)/busybox-mirror-dir/v2/busybox exists) - oc image extract file://busybox:local --dir busybox-mirror-dir --confirm - - # Extract an image stored on disk in a directory other than $(pwd)/v2 into a designated directory (must exist) - oc image extract file://busybox:local --dir busybox-mirror-dir --path /:/tmp/busybox - - # Extract the last layer in the image - oc image extract docker.io/library/centos:7[-1] - - # Extract the first three layers of the image - oc image extract docker.io/library/centos:7[:3] - - # Extract the last three layers of the image - oc image extract docker.io/library/centos:7[-3:] ----- - - - -== oc image info -Display information about an image - -.Example usage -[source,bash,options="nowrap"] ----- - # Show information about an image - oc image info quay.io/openshift/cli:latest - - # Show information about images matching a wildcard - oc image info quay.io/openshift/cli:4.* - - # Show information about a file mirrored to disk under DIR - oc image info --dir=DIR file://library/busybox:latest - - # Select which image from a multi-OS image to show - oc image info library/busybox:latest --filter-by-os=linux/arm64 ----- - - - -== oc image mirror -Mirror images from one repository to another - -.Example usage -[source,bash,options="nowrap"] ----- - # Copy image to another tag - oc image mirror myregistry.com/myimage:latest myregistry.com/myimage:stable - - # Copy image to another registry - oc image mirror myregistry.com/myimage:latest docker.io/myrepository/myimage:stable - - # Copy all tags starting with mysql to the destination repository - oc image mirror myregistry.com/myimage:mysql* docker.io/myrepository/myimage - - # Copy image to disk, creating a directory structure that can be served as a registry - oc image mirror myregistry.com/myimage:latest file://myrepository/myimage:latest - - # Copy image to S3 (pull from .s3.amazonaws.com/image:latest) - oc image mirror myregistry.com/myimage:latest s3://s3.amazonaws.com///image:latest - - # Copy image to S3 without setting a tag (pull via @) - oc image mirror myregistry.com/myimage:latest s3://s3.amazonaws.com///image - - # Copy image to multiple locations - oc image mirror myregistry.com/myimage:latest docker.io/myrepository/myimage:stable \ - docker.io/myrepository/myimage:dev - - # Copy multiple images - oc image mirror myregistry.com/myimage:latest=myregistry.com/other:test \ - myregistry.com/myimage:new=myregistry.com/other:target - - # Copy manifest list of a multi-architecture image, even if only a single image is found - oc image mirror myregistry.com/myimage:latest=myregistry.com/other:test \ - --keep-manifest-list=true - - # Copy specific os/arch manifest of a multi-architecture image - # Run 'oc image info myregistry.com/myimage:latest' to see available os/arch for multi-arch images - # Note that with multi-arch images, this results in a new manifest list digest that includes only - # the filtered manifests - oc image mirror myregistry.com/myimage:latest=myregistry.com/other:test \ - --filter-by-os=os/arch - - # Copy all os/arch manifests of a multi-architecture image - # Run 'oc image info myregistry.com/myimage:latest' to see list of os/arch manifests that will be mirrored - oc image mirror myregistry.com/myimage:latest=myregistry.com/other:test \ - --keep-manifest-list=true - - # Note the above command is equivalent to - oc image mirror myregistry.com/myimage:latest=myregistry.com/other:test \ - --filter-by-os=.* ----- - - - -== oc import-image -Import images from a container image registry - -.Example usage -[source,bash,options="nowrap"] ----- - # Import tag latest into a new image stream - oc import-image mystream --from=registry.io/repo/image:latest --confirm - - # Update imported data for tag latest in an already existing image stream - oc import-image mystream - - # Update imported data for tag stable in an already existing image stream - oc import-image mystream:stable - - # Update imported data for all tags in an existing image stream - oc import-image mystream --all - - # Update imported data for a tag which points to a manifest list to include the full manifest list - oc import-image mystream --import-mode=PreserveOriginal - - # Import all tags into a new image stream - oc import-image mystream --from=registry.io/repo/image --all --confirm - - # Import all tags into a new image stream using a custom timeout - oc --request-timeout=5m import-image mystream --from=registry.io/repo/image --all --confirm ----- - - - -== oc kustomize -Build a kustomization target from a directory or URL. - -.Example usage -[source,bash,options="nowrap"] ----- - # Build the current working directory - oc kustomize - - # Build some shared configuration directory - oc kustomize /home/config/production - - # Build from github - oc kustomize https://github.com/kubernetes-sigs/kustomize.git/examples/helloWorld?ref=v1.0.6 ----- - - - -== oc label -Update the labels on a resource - -.Example usage -[source,bash,options="nowrap"] ----- - # Update pod 'foo' with the label 'unhealthy' and the value 'true' - oc label pods foo unhealthy=true - - # Update pod 'foo' with the label 'status' and the value 'unhealthy', overwriting any existing value - oc label --overwrite pods foo status=unhealthy - - # Update all pods in the namespace - oc label pods --all status=unhealthy - - # Update a pod identified by the type and name in "pod.json" - oc label -f pod.json status=unhealthy - - # Update pod 'foo' only if the resource is unchanged from version 1 - oc label pods foo status=unhealthy --resource-version=1 - - # Update pod 'foo' by removing a label named 'bar' if it exists - # Does not require the --overwrite flag - oc label pods foo bar- ----- - - - -== oc login -Log in to a server - -.Example usage -[source,bash,options="nowrap"] ----- - # Log in interactively - oc login --username=myuser - - # Log in to the given server with the given certificate authority file - oc login localhost:8443 --certificate-authority=/path/to/cert.crt - - # Log in to the given server with the given credentials (will not prompt interactively) - oc login localhost:8443 --username=myuser --password=mypass ----- - - - -== oc logout -End the current server session - -.Example usage -[source,bash,options="nowrap"] ----- - # Log out - oc logout ----- - - - -== oc logs -Print the logs for a container in a pod - -.Example usage -[source,bash,options="nowrap"] ----- - # Start streaming the logs of the most recent build of the openldap build config - oc logs -f bc/openldap - - # Start streaming the logs of the latest deployment of the mysql deployment config - oc logs -f dc/mysql - - # Get the logs of the first deployment for the mysql deployment config. Note that logs - # from older deployments may not exist either because the deployment was successful - # or due to deployment pruning or manual deletion of the deployment - oc logs --version=1 dc/mysql - - # Return a snapshot of ruby-container logs from pod backend - oc logs backend -c ruby-container - - # Start streaming of ruby-container logs from pod backend - oc logs -f pod/backend -c ruby-container ----- - - - -== oc new-app -Create a new application - -.Example usage -[source,bash,options="nowrap"] ----- - # List all local templates and image streams that can be used to create an app - oc new-app --list - - # Create an application based on the source code in the current git repository (with a public remote) and a container image - oc new-app . --image=registry/repo/langimage - - # Create an application myapp with Docker based build strategy expecting binary input - oc new-app --strategy=docker --binary --name myapp - - # Create a Ruby application based on the provided [image]~[source code] combination - oc new-app centos/ruby-25-centos7~https://github.com/sclorg/ruby-ex.git - - # Use the public container registry MySQL image to create an app. Generated artifacts will be labeled with db=mysql - oc new-app mysql MYSQL_USER=user MYSQL_PASSWORD=pass MYSQL_DATABASE=testdb -l db=mysql - - # Use a MySQL image in a private registry to create an app and override application artifacts' names - oc new-app --image=myregistry.com/mycompany/mysql --name=private - - # Create an application from a remote repository using its beta4 branch - oc new-app https://github.com/openshift/ruby-hello-world#beta4 - - # Create an application based on a stored template, explicitly setting a parameter value - oc new-app --template=ruby-helloworld-sample --param=MYSQL_USER=admin - - # Create an application from a remote repository and specify a context directory - oc new-app https://github.com/youruser/yourgitrepo --context-dir=src/build - - # Create an application from a remote private repository and specify which existing secret to use - oc new-app https://github.com/youruser/yourgitrepo --source-secret=yoursecret - - # Create an application based on a template file, explicitly setting a parameter value - oc new-app --file=./example/myapp/template.json --param=MYSQL_USER=admin - - # Search all templates, image streams, and container images for the ones that match "ruby" - oc new-app --search ruby - - # Search for "ruby", but only in stored templates (--template, --image-stream and --image - # can be used to filter search results) - oc new-app --search --template=ruby - - # Search for "ruby" in stored templates and print the output as YAML - oc new-app --search --template=ruby --output=yaml ----- - - - -== oc new-build -Create a new build configuration - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a build config based on the source code in the current git repository (with a public - # remote) and a container image - oc new-build . --image=repo/langimage - - # Create a NodeJS build config based on the provided [image]~[source code] combination - oc new-build centos/nodejs-8-centos7~https://github.com/sclorg/nodejs-ex.git - - # Create a build config from a remote repository using its beta2 branch - oc new-build https://github.com/openshift/ruby-hello-world#beta2 - - # Create a build config using a Dockerfile specified as an argument - oc new-build -D $'FROM centos:7\nRUN yum install -y httpd' - - # Create a build config from a remote repository and add custom environment variables - oc new-build https://github.com/openshift/ruby-hello-world -e RACK_ENV=development - - # Create a build config from a remote private repository and specify which existing secret to use - oc new-build https://github.com/youruser/yourgitrepo --source-secret=yoursecret - - # Create a build config from a remote repository and inject the npmrc into a build - oc new-build https://github.com/openshift/ruby-hello-world --build-secret npmrc:.npmrc - - # Create a build config from a remote repository and inject environment data into a build - oc new-build https://github.com/openshift/ruby-hello-world --build-config-map env:config - - # Create a build config that gets its input from a remote repository and another container image - oc new-build https://github.com/openshift/ruby-hello-world --source-image=openshift/jenkins-1-centos7 --source-image-path=/var/lib/jenkins:tmp ----- - - - -== oc new-project -Request a new project - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a new project with minimal information - oc new-project web-team-dev - - # Create a new project with a display name and description - oc new-project web-team-dev --display-name="Web Team Development" --description="Development project for the web team." ----- - - - -== oc observe -Observe changes to resources and react to them (experimental) - -.Example usage -[source,bash,options="nowrap"] ----- - # Observe changes to services - oc observe services - - # Observe changes to services, including the clusterIP and invoke a script for each - oc observe services --template '{ .spec.clusterIP }' -- register_dns.sh - - # Observe changes to services filtered by a label selector - oc observe namespaces -l regist-dns=true --template '{ .spec.clusterIP }' -- register_dns.sh ----- - - - -== oc patch -Update fields of a resource - -.Example usage -[source,bash,options="nowrap"] ----- - # Partially update a node using a strategic merge patch, specifying the patch as JSON - oc patch node k8s-node-1 -p '{"spec":{"unschedulable":true}}' - - # Partially update a node using a strategic merge patch, specifying the patch as YAML - oc patch node k8s-node-1 -p $'spec:\n unschedulable: true' - - # Partially update a node identified by the type and name specified in "node.json" using strategic merge patch - oc patch -f node.json -p '{"spec":{"unschedulable":true}}' - - # Update a container's image; spec.containers[*].name is required because it's a merge key - oc patch pod valid-pod -p '{"spec":{"containers":[{"name":"kubernetes-serve-hostname","image":"new image"}]}}' - - # Update a container's image using a JSON patch with positional arrays - oc patch pod valid-pod --type='json' -p='[{"op": "replace", "path": "/spec/containers/0/image", "value":"new image"}]' - - # Update a deployment's replicas through the scale subresource using a merge patch. - oc patch deployment nginx-deployment --subresource='scale' --type='merge' -p '{"spec":{"replicas":2}}' ----- - - - -== oc plugin list -List all visible plugin executables on a user's PATH - -.Example usage -[source,bash,options="nowrap"] ----- - # List all available plugins - oc plugin list ----- - - - -== oc policy add-role-to-user -Add a role to users or service accounts for the current project - -.Example usage -[source,bash,options="nowrap"] ----- - # Add the 'view' role to user1 for the current project - oc policy add-role-to-user view user1 - - # Add the 'edit' role to serviceaccount1 for the current project - oc policy add-role-to-user edit -z serviceaccount1 ----- - - - -== oc policy scc-review -Check which service account can create a pod - -.Example usage -[source,bash,options="nowrap"] ----- - # Check whether service accounts sa1 and sa2 can admit a pod with a template pod spec specified in my_resource.yaml - # Service Account specified in myresource.yaml file is ignored - oc policy scc-review -z sa1,sa2 -f my_resource.yaml - - # Check whether service accounts system:serviceaccount:bob:default can admit a pod with a template pod spec specified in my_resource.yaml - oc policy scc-review -z system:serviceaccount:bob:default -f my_resource.yaml - - # Check whether the service account specified in my_resource_with_sa.yaml can admit the pod - oc policy scc-review -f my_resource_with_sa.yaml - - # Check whether the default service account can admit the pod; default is taken since no service account is defined in myresource_with_no_sa.yaml - oc policy scc-review -f myresource_with_no_sa.yaml ----- - - - -== oc policy scc-subject-review -Check whether a user or a service account can create a pod - -.Example usage -[source,bash,options="nowrap"] ----- - # Check whether user bob can create a pod specified in myresource.yaml - oc policy scc-subject-review -u bob -f myresource.yaml - - # Check whether user bob who belongs to projectAdmin group can create a pod specified in myresource.yaml - oc policy scc-subject-review -u bob -g projectAdmin -f myresource.yaml - - # Check whether a service account specified in the pod template spec in myresourcewithsa.yaml can create the pod - oc policy scc-subject-review -f myresourcewithsa.yaml ----- - - - -== oc port-forward -Forward one or more local ports to a pod - -.Example usage -[source,bash,options="nowrap"] ----- - # Listen on ports 5000 and 6000 locally, forwarding data to/from ports 5000 and 6000 in the pod - oc port-forward pod/mypod 5000 6000 - - # Listen on ports 5000 and 6000 locally, forwarding data to/from ports 5000 and 6000 in a pod selected by the deployment - oc port-forward deployment/mydeployment 5000 6000 - - # Listen on port 8443 locally, forwarding to the targetPort of the service's port named "https" in a pod selected by the service - oc port-forward service/myservice 8443:https - - # Listen on port 8888 locally, forwarding to 5000 in the pod - oc port-forward pod/mypod 8888:5000 - - # Listen on port 8888 on all addresses, forwarding to 5000 in the pod - oc port-forward --address 0.0.0.0 pod/mypod 8888:5000 - - # Listen on port 8888 on localhost and selected IP, forwarding to 5000 in the pod - oc port-forward --address localhost,10.19.21.23 pod/mypod 8888:5000 - - # Listen on a random port locally, forwarding to 5000 in the pod - oc port-forward pod/mypod :5000 ----- - - - -== oc process -Process a template into list of resources - -.Example usage -[source,bash,options="nowrap"] ----- - # Convert the template.json file into a resource list and pass to create - oc process -f template.json | oc create -f - - - # Process a file locally instead of contacting the server - oc process -f template.json --local -o yaml - - # Process template while passing a user-defined label - oc process -f template.json -l name=mytemplate - - # Convert a stored template into a resource list - oc process foo - - # Convert a stored template into a resource list by setting/overriding parameter values - oc process foo PARM1=VALUE1 PARM2=VALUE2 - - # Convert a template stored in different namespace into a resource list - oc process openshift//foo - - # Convert template.json into a resource list - cat template.json | oc process -f - ----- - - - -== oc project -Switch to another project - -.Example usage -[source,bash,options="nowrap"] ----- - # Switch to the 'myapp' project - oc project myapp - - # Display the project currently in use - oc project ----- - - - -== oc projects -Display existing projects - -.Example usage -[source,bash,options="nowrap"] ----- - # List all projects - oc projects ----- - - - -== oc proxy -Run a proxy to the Kubernetes API server - -.Example usage -[source,bash,options="nowrap"] ----- - # To proxy all of the Kubernetes API and nothing else - oc proxy --api-prefix=/ - - # To proxy only part of the Kubernetes API and also some static files - # You can get pods info with 'curl localhost:8001/api/v1/pods' - oc proxy --www=/my/files --www-prefix=/static/ --api-prefix=/api/ - - # To proxy the entire Kubernetes API at a different root - # You can get pods info with 'curl localhost:8001/custom/api/v1/pods' - oc proxy --api-prefix=/custom/ - - # Run a proxy to the Kubernetes API server on port 8011, serving static content from ./local/www/ - oc proxy --port=8011 --www=./local/www/ - - # Run a proxy to the Kubernetes API server on an arbitrary local port - # The chosen port for the server will be output to stdout - oc proxy --port=0 - - # Run a proxy to the Kubernetes API server, changing the API prefix to k8s-api - # This makes e.g. the pods API available at localhost:8001/k8s-api/v1/pods/ - oc proxy --api-prefix=/k8s-api ----- - - - -== oc registry info -Print information about the integrated registry - -.Example usage -[source,bash,options="nowrap"] ----- - # Display information about the integrated registry - oc registry info ----- - - - -== oc registry login -Log in to the integrated registry - -.Example usage -[source,bash,options="nowrap"] ----- - # Log in to the integrated registry - oc registry login - - # Log in to different registry using BASIC auth credentials - oc registry login --registry quay.io/myregistry --auth-basic=USER:PASS ----- - - - -== oc replace -Replace a resource by file name or stdin - -.Example usage -[source,bash,options="nowrap"] ----- - # Replace a pod using the data in pod.json - oc replace -f ./pod.json - - # Replace a pod based on the JSON passed into stdin - cat pod.json | oc replace -f - - - # Update a single-container pod's image version (tag) to v4 - oc get pod mypod -o yaml | sed 's/\(image: myimage\):.*$/\1:v4/' | oc replace -f - - - # Force replace, delete and then re-create the resource - oc replace --force -f ./pod.json ----- - - - -== oc rollback -Revert part of an application back to a previous deployment - -.Example usage -[source,bash,options="nowrap"] ----- - # Perform a rollback to the last successfully completed deployment for a deployment config - oc rollback frontend - - # See what a rollback to version 3 will look like, but do not perform the rollback - oc rollback frontend --to-version=3 --dry-run - - # Perform a rollback to a specific deployment - oc rollback frontend-2 - - # Perform the rollback manually by piping the JSON of the new config back to oc - oc rollback frontend -o json | oc replace dc/frontend -f - - - # Print the updated deployment configuration in JSON format instead of performing the rollback - oc rollback frontend -o json ----- - - - -== oc rollout cancel -Cancel the in-progress deployment - -.Example usage -[source,bash,options="nowrap"] ----- - # Cancel the in-progress deployment based on 'nginx' - oc rollout cancel dc/nginx ----- - - - -== oc rollout history -View rollout history - -.Example usage -[source,bash,options="nowrap"] ----- - # View the rollout history of a deployment - oc rollout history dc/nginx - - # View the details of deployment revision 3 - oc rollout history dc/nginx --revision=3 ----- - - - -== oc rollout latest -Start a new rollout for a deployment config with the latest state from its triggers - -.Example usage -[source,bash,options="nowrap"] ----- - # Start a new rollout based on the latest images defined in the image change triggers - oc rollout latest dc/nginx - - # Print the rolled out deployment config - oc rollout latest dc/nginx -o json ----- - - - -== oc rollout pause -Mark the provided resource as paused - -.Example usage -[source,bash,options="nowrap"] ----- - # Mark the nginx deployment as paused. Any current state of - # the deployment will continue its function, new updates to the deployment will not - # have an effect as long as the deployment is paused - oc rollout pause dc/nginx ----- - - - -== oc rollout restart -Restart a resource - -.Example usage -[source,bash,options="nowrap"] ----- - # Restart a deployment - oc rollout restart deployment/nginx - - # Restart a daemon set - oc rollout restart daemonset/abc - - # Restart deployments with the app=nginx label - oc rollout restart deployment --selector=app=nginx ----- - - - -== oc rollout resume -Resume a paused resource - -.Example usage -[source,bash,options="nowrap"] ----- - # Resume an already paused deployment - oc rollout resume dc/nginx ----- - - - -== oc rollout retry -Retry the latest failed rollout - -.Example usage -[source,bash,options="nowrap"] ----- - # Retry the latest failed deployment based on 'frontend' - # The deployer pod and any hook pods are deleted for the latest failed deployment - oc rollout retry dc/frontend ----- - - - -== oc rollout status -Show the status of the rollout - -.Example usage -[source,bash,options="nowrap"] ----- - # Watch the status of the latest rollout - oc rollout status dc/nginx ----- - - - -== oc rollout undo -Undo a previous rollout - -.Example usage -[source,bash,options="nowrap"] ----- - # Roll back to the previous deployment - oc rollout undo dc/nginx - - # Roll back to deployment revision 3. The replication controller for that version must exist - oc rollout undo dc/nginx --to-revision=3 ----- - - - -== oc rsh -Start a shell session in a container - -.Example usage -[source,bash,options="nowrap"] ----- - # Open a shell session on the first container in pod 'foo' - oc rsh foo - - # Open a shell session on the first container in pod 'foo' and namespace 'bar' - # (Note that oc client specific arguments must come before the resource name and its arguments) - oc rsh -n bar foo - - # Run the command 'cat /etc/resolv.conf' inside pod 'foo' - oc rsh foo cat /etc/resolv.conf - - # See the configuration of your internal registry - oc rsh dc/docker-registry cat config.yml - - # Open a shell session on the container named 'index' inside a pod of your job - oc rsh -c index job/sheduled ----- - - - -== oc rsync -Copy files between a local file system and a pod - -.Example usage -[source,bash,options="nowrap"] ----- - # Synchronize a local directory with a pod directory - oc rsync ./local/dir/ POD:/remote/dir - - # Synchronize a pod directory with a local directory - oc rsync POD:/remote/dir/ ./local/dir ----- - - - -== oc run -Run a particular image on the cluster - -.Example usage -[source,bash,options="nowrap"] ----- - # Start a nginx pod - oc run nginx --image=nginx - - # Start a hazelcast pod and let the container expose port 5701 - oc run hazelcast --image=hazelcast/hazelcast --port=5701 - - # Start a hazelcast pod and set environment variables "DNS_DOMAIN=cluster" and "POD_NAMESPACE=default" in the container - oc run hazelcast --image=hazelcast/hazelcast --env="DNS_DOMAIN=cluster" --env="POD_NAMESPACE=default" - - # Start a hazelcast pod and set labels "app=hazelcast" and "env=prod" in the container - oc run hazelcast --image=hazelcast/hazelcast --labels="app=hazelcast,env=prod" - - # Dry run; print the corresponding API objects without creating them - oc run nginx --image=nginx --dry-run=client - - # Start a nginx pod, but overload the spec with a partial set of values parsed from JSON - oc run nginx --image=nginx --overrides='{ "apiVersion": "v1", "spec": { ... } }' - - # Start a busybox pod and keep it in the foreground, don't restart it if it exits - oc run -i -t busybox --image=busybox --restart=Never - - # Start the nginx pod using the default command, but use custom arguments (arg1 .. argN) for that command - oc run nginx --image=nginx -- ... - - # Start the nginx pod using a different command and custom arguments - oc run nginx --image=nginx --command -- ... ----- - - - -== oc scale -Set a new size for a deployment, replica set, or replication controller - -.Example usage -[source,bash,options="nowrap"] ----- - # Scale a replica set named 'foo' to 3 - oc scale --replicas=3 rs/foo - - # Scale a resource identified by type and name specified in "foo.yaml" to 3 - oc scale --replicas=3 -f foo.yaml - - # If the deployment named mysql's current size is 2, scale mysql to 3 - oc scale --current-replicas=2 --replicas=3 deployment/mysql - - # Scale multiple replication controllers - oc scale --replicas=5 rc/foo rc/bar rc/baz - - # Scale stateful set named 'web' to 3 - oc scale --replicas=3 statefulset/web ----- - - - -== oc secrets link -Link secrets to a service account - -.Example usage -[source,bash,options="nowrap"] ----- - # Add an image pull secret to a service account to automatically use it for pulling pod images - oc secrets link serviceaccount-name pull-secret --for=pull - - # Add an image pull secret to a service account to automatically use it for both pulling and pushing build images - oc secrets link builder builder-image-secret --for=pull,mount ----- - - - -== oc secrets unlink -Detach secrets from a service account - -.Example usage -[source,bash,options="nowrap"] ----- - # Unlink a secret currently associated with a service account - oc secrets unlink serviceaccount-name secret-name another-secret-name ... ----- - - - -== oc set build-hook -Update a build hook on a build config - -.Example usage -[source,bash,options="nowrap"] ----- - # Clear post-commit hook on a build config - oc set build-hook bc/mybuild --post-commit --remove - - # Set the post-commit hook to execute a test suite using a new entrypoint - oc set build-hook bc/mybuild --post-commit --command -- /bin/bash -c /var/lib/test-image.sh - - # Set the post-commit hook to execute a shell script - oc set build-hook bc/mybuild --post-commit --script="/var/lib/test-image.sh param1 param2 && /var/lib/done.sh" ----- - - - -== oc set build-secret -Update a build secret on a build config - -.Example usage -[source,bash,options="nowrap"] ----- - # Clear the push secret on a build config - oc set build-secret --push --remove bc/mybuild - - # Set the pull secret on a build config - oc set build-secret --pull bc/mybuild mysecret - - # Set the push and pull secret on a build config - oc set build-secret --push --pull bc/mybuild mysecret - - # Set the source secret on a set of build configs matching a selector - oc set build-secret --source -l app=myapp gitsecret ----- - - - -== oc set data -Update the data within a config map or secret - -.Example usage -[source,bash,options="nowrap"] ----- - # Set the 'password' key of a secret - oc set data secret/foo password=this_is_secret - - # Remove the 'password' key from a secret - oc set data secret/foo password- - - # Update the 'haproxy.conf' key of a config map from a file on disk - oc set data configmap/bar --from-file=../haproxy.conf - - # Update a secret with the contents of a directory, one key per file - oc set data secret/foo --from-file=secret-dir ----- - - - -== oc set deployment-hook -Update a deployment hook on a deployment config - -.Example usage -[source,bash,options="nowrap"] ----- - # Clear pre and post hooks on a deployment config - oc set deployment-hook dc/myapp --remove --pre --post - - # Set the pre deployment hook to execute a db migration command for an application - # using the data volume from the application - oc set deployment-hook dc/myapp --pre --volumes=data -- /var/lib/migrate-db.sh - - # Set a mid deployment hook along with additional environment variables - oc set deployment-hook dc/myapp --mid --volumes=data -e VAR1=value1 -e VAR2=value2 -- /var/lib/prepare-deploy.sh ----- - - - -== oc set env -Update environment variables on a pod template - -.Example usage -[source,bash,options="nowrap"] ----- - # Update deployment config 'myapp' with a new environment variable - oc set env dc/myapp STORAGE_DIR=/local - - # List the environment variables defined on a build config 'sample-build' - oc set env bc/sample-build --list - - # List the environment variables defined on all pods - oc set env pods --all --list - - # Output modified build config in YAML - oc set env bc/sample-build STORAGE_DIR=/data -o yaml - - # Update all containers in all replication controllers in the project to have ENV=prod - oc set env rc --all ENV=prod - - # Import environment from a secret - oc set env --from=secret/mysecret dc/myapp - - # Import environment from a config map with a prefix - oc set env --from=configmap/myconfigmap --prefix=MYSQL_ dc/myapp - - # Remove the environment variable ENV from container 'c1' in all deployment configs - oc set env dc --all --containers="c1" ENV- - - # Remove the environment variable ENV from a deployment config definition on disk and - # update the deployment config on the server - oc set env -f dc.json ENV- - - # Set some of the local shell environment into a deployment config on the server - oc set env | grep RAILS_ | oc env -e - dc/myapp ----- - - - -== oc set image -Update the image of a pod template - -.Example usage -[source,bash,options="nowrap"] ----- - # Set a deployment configs's nginx container image to 'nginx:1.9.1', and its busybox container image to 'busybox'. - oc set image dc/nginx busybox=busybox nginx=nginx:1.9.1 - - # Set a deployment configs's app container image to the image referenced by the imagestream tag 'openshift/ruby:2.3'. - oc set image dc/myapp app=openshift/ruby:2.3 --source=imagestreamtag - - # Update all deployments' and rc's nginx container's image to 'nginx:1.9.1' - oc set image deployments,rc nginx=nginx:1.9.1 --all - - # Update image of all containers of daemonset abc to 'nginx:1.9.1' - oc set image daemonset abc *=nginx:1.9.1 - - # Print result (in yaml format) of updating nginx container image from local file, without hitting the server - oc set image -f path/to/file.yaml nginx=nginx:1.9.1 --local -o yaml ----- - - - -== oc set image-lookup -Change how images are resolved when deploying applications - -.Example usage -[source,bash,options="nowrap"] ----- - # Print all of the image streams and whether they resolve local names - oc set image-lookup - - # Use local name lookup on image stream mysql - oc set image-lookup mysql - - # Force a deployment to use local name lookup - oc set image-lookup deploy/mysql - - # Show the current status of the deployment lookup - oc set image-lookup deploy/mysql --list - - # Disable local name lookup on image stream mysql - oc set image-lookup mysql --enabled=false - - # Set local name lookup on all image streams - oc set image-lookup --all ----- - - - -== oc set probe -Update a probe on a pod template - -.Example usage -[source,bash,options="nowrap"] ----- - # Clear both readiness and liveness probes off all containers - oc set probe dc/myapp --remove --readiness --liveness - - # Set an exec action as a liveness probe to run 'echo ok' - oc set probe dc/myapp --liveness -- echo ok - - # Set a readiness probe to try to open a TCP socket on 3306 - oc set probe rc/mysql --readiness --open-tcp=3306 - - # Set an HTTP startup probe for port 8080 and path /healthz over HTTP on the pod IP - oc set probe dc/webapp --startup --get-url=http://:8080/healthz - - # Set an HTTP readiness probe for port 8080 and path /healthz over HTTP on the pod IP - oc set probe dc/webapp --readiness --get-url=http://:8080/healthz - - # Set an HTTP readiness probe over HTTPS on 127.0.0.1 for a hostNetwork pod - oc set probe dc/router --readiness --get-url=https://127.0.0.1:1936/stats - - # Set only the initial-delay-seconds field on all deployments - oc set probe dc --all --readiness --initial-delay-seconds=30 ----- - - - -== oc set resources -Update resource requests/limits on objects with pod templates - -.Example usage -[source,bash,options="nowrap"] ----- - # Set a deployments nginx container CPU limits to "200m and memory to 512Mi" - oc set resources deployment nginx -c=nginx --limits=cpu=200m,memory=512Mi - - # Set the resource request and limits for all containers in nginx - oc set resources deployment nginx --limits=cpu=200m,memory=512Mi --requests=cpu=100m,memory=256Mi - - # Remove the resource requests for resources on containers in nginx - oc set resources deployment nginx --limits=cpu=0,memory=0 --requests=cpu=0,memory=0 - - # Print the result (in YAML format) of updating nginx container limits locally, without hitting the server - oc set resources -f path/to/file.yaml --limits=cpu=200m,memory=512Mi --local -o yaml ----- - - - -== oc set route-backends -Update the backends for a route - -.Example usage -[source,bash,options="nowrap"] ----- - # Print the backends on the route 'web' - oc set route-backends web - - # Set two backend services on route 'web' with 2/3rds of traffic going to 'a' - oc set route-backends web a=2 b=1 - - # Increase the traffic percentage going to b by 10%% relative to a - oc set route-backends web --adjust b=+10%% - - # Set traffic percentage going to b to 10%% of the traffic going to a - oc set route-backends web --adjust b=10%% - - # Set weight of b to 10 - oc set route-backends web --adjust b=10 - - # Set the weight to all backends to zero - oc set route-backends web --zero ----- - - - -== oc set selector -Set the selector on a resource - -.Example usage -[source,bash,options="nowrap"] ----- - # Set the labels and selector before creating a deployment/service pair. - oc create service clusterip my-svc --clusterip="None" -o yaml --dry-run | oc set selector --local -f - 'environment=qa' -o yaml | oc create -f - - oc create deployment my-dep -o yaml --dry-run | oc label --local -f - environment=qa -o yaml | oc create -f - ----- - - - -== oc set serviceaccount -Update the service account of a resource - -.Example usage -[source,bash,options="nowrap"] ----- - # Set deployment nginx-deployment's service account to serviceaccount1 - oc set serviceaccount deployment nginx-deployment serviceaccount1 - - # Print the result (in YAML format) of updated nginx deployment with service account from a local file, without hitting the API server - oc set sa -f nginx-deployment.yaml serviceaccount1 --local --dry-run -o yaml ----- - - - -== oc set subject -Update the user, group, or service account in a role binding or cluster role binding - -.Example usage -[source,bash,options="nowrap"] ----- - # Update a cluster role binding for serviceaccount1 - oc set subject clusterrolebinding admin --serviceaccount=namespace:serviceaccount1 - - # Update a role binding for user1, user2, and group1 - oc set subject rolebinding admin --user=user1 --user=user2 --group=group1 - - # Print the result (in YAML format) of updating role binding subjects locally, without hitting the server - oc create rolebinding admin --role=admin --user=admin -o yaml --dry-run | oc set subject --local -f - --user=foo -o yaml ----- - - - -== oc set triggers -Update the triggers on one or more objects - -.Example usage -[source,bash,options="nowrap"] ----- - # Print the triggers on the deployment config 'myapp' - oc set triggers dc/myapp - - # Set all triggers to manual - oc set triggers dc/myapp --manual - - # Enable all automatic triggers - oc set triggers dc/myapp --auto - - # Reset the GitHub webhook on a build to a new, generated secret - oc set triggers bc/webapp --from-github - oc set triggers bc/webapp --from-webhook - - # Remove all triggers - oc set triggers bc/webapp --remove-all - - # Stop triggering on config change - oc set triggers dc/myapp --from-config --remove - - # Add an image trigger to a build config - oc set triggers bc/webapp --from-image=namespace1/image:latest - - # Add an image trigger to a stateful set on the main container - oc set triggers statefulset/db --from-image=namespace1/image:latest -c main ----- - - - -== oc set volumes -Update volumes on a pod template - -.Example usage -[source,bash,options="nowrap"] ----- - # List volumes defined on all deployment configs in the current project - oc set volume dc --all - - # Add a new empty dir volume to deployment config (dc) 'myapp' mounted under - # /var/lib/myapp - oc set volume dc/myapp --add --mount-path=/var/lib/myapp - - # Use an existing persistent volume claim (pvc) to overwrite an existing volume 'v1' - oc set volume dc/myapp --add --name=v1 -t pvc --claim-name=pvc1 --overwrite - - # Remove volume 'v1' from deployment config 'myapp' - oc set volume dc/myapp --remove --name=v1 - - # Create a new persistent volume claim that overwrites an existing volume 'v1' - oc set volume dc/myapp --add --name=v1 -t pvc --claim-size=1G --overwrite - - # Change the mount point for volume 'v1' to /data - oc set volume dc/myapp --add --name=v1 -m /data --overwrite - - # Modify the deployment config by removing volume mount "v1" from container "c1" - # (and by removing the volume "v1" if no other containers have volume mounts that reference it) - oc set volume dc/myapp --remove --name=v1 --containers=c1 - - # Add new volume based on a more complex volume source (AWS EBS, GCE PD, - # Ceph, Gluster, NFS, ISCSI, ...) - oc set volume dc/myapp --add -m /data --source= ----- - - - -== oc start-build -Start a new build - -.Example usage -[source,bash,options="nowrap"] ----- - # Starts build from build config "hello-world" - oc start-build hello-world - - # Starts build from a previous build "hello-world-1" - oc start-build --from-build=hello-world-1 - - # Use the contents of a directory as build input - oc start-build hello-world --from-dir=src/ - - # Send the contents of a Git repository to the server from tag 'v2' - oc start-build hello-world --from-repo=../hello-world --commit=v2 - - # Start a new build for build config "hello-world" and watch the logs until the build - # completes or fails - oc start-build hello-world --follow - - # Start a new build for build config "hello-world" and wait until the build completes. It - # exits with a non-zero return code if the build fails - oc start-build hello-world --wait ----- - - - -== oc status -Show an overview of the current project - -.Example usage -[source,bash,options="nowrap"] ----- - # See an overview of the current project - oc status - - # Export the overview of the current project in an svg file - oc status -o dot | dot -T svg -o project.svg - - # See an overview of the current project including details for any identified issues - oc status --suggest ----- - - - -== oc tag -Tag existing images into image streams - -.Example usage -[source,bash,options="nowrap"] ----- - # Tag the current image for the image stream 'openshift/ruby' and tag '2.0' into the image stream 'yourproject/ruby with tag 'tip' - oc tag openshift/ruby:2.0 yourproject/ruby:tip - - # Tag a specific image - oc tag openshift/ruby@sha256:6b646fa6bf5e5e4c7fa41056c27910e679c03ebe7f93e361e6515a9da7e258cc yourproject/ruby:tip - - # Tag an external container image - oc tag --source=docker openshift/origin-control-plane:latest yourproject/ruby:tip - - # Tag an external container image and request pullthrough for it - oc tag --source=docker openshift/origin-control-plane:latest yourproject/ruby:tip --reference-policy=local - - # Tag an external container image and include the full manifest list - oc tag --source=docker openshift/origin-control-plane:latest yourproject/ruby:tip --import-mode=PreserveOriginal - - # Remove the specified spec tag from an image stream - oc tag openshift/origin-control-plane:latest -d ----- - - - -== oc version -Print the client and server version information - -.Example usage -[source,bash,options="nowrap"] ----- - # Print the OpenShift client, kube-apiserver, and openshift-apiserver version information for the current context - oc version - - # Print the OpenShift client, kube-apiserver, and openshift-apiserver version numbers for the current context - oc version --short - - # Print the OpenShift client version information for the current context - oc version --client ----- - - - -== oc wait -Experimental: Wait for a specific condition on one or many resources - -.Example usage -[source,bash,options="nowrap"] ----- - # Wait for the pod "busybox1" to contain the status condition of type "Ready" - oc wait --for=condition=Ready pod/busybox1 - - # The default value of status condition is true; you can wait for other targets after an equal delimiter (compared after Unicode simple case folding, which is a more general form of case-insensitivity): - oc wait --for=condition=Ready=false pod/busybox1 - - # Wait for the pod "busybox1" to contain the status phase to be "Running". - oc wait --for=jsonpath='{.status.phase}'=Running pod/busybox1 - - # Wait for the pod "busybox1" to be deleted, with a timeout of 60s, after having issued the "delete" command - oc delete pod/busybox1 - oc wait --for=delete pod/busybox1 --timeout=60s ----- - - - -== oc whoami -Return information about the current session - -.Example usage -[source,bash,options="nowrap"] ----- - # Display the currently authenticated user - oc whoami ----- - - diff --git a/modules/oc-compliance-fetching-compliance-remediation-details.adoc b/modules/oc-compliance-fetching-compliance-remediation-details.adoc deleted file mode 100644 index 128365ec5944..000000000000 --- a/modules/oc-compliance-fetching-compliance-remediation-details.adoc +++ /dev/null @@ -1,105 +0,0 @@ -// Module included in the following assemblies: -// -// * security/oc_compliance_plug_in/oc-compliance-plug-in-using.adoc - -:_content-type: PROCEDURE -[id="fetching-compliance-remediation-details_{context}"] -= Fetching compliance remediation details - -The Compliance Operator provides remediation objects that are used to automate the changes required to make the cluster compliant. The `fetch-fixes` subcommand can help you understand exactly which configuration remediations are used. Use the `fetch-fixes` subcommand to extract the remediation objects from a profile, rule, or `ComplianceRemediation` object into a directory to inspect. - -.Procedure - -. View the remediations for a profile: -+ -[source,terminal] ----- -$ oc compliance fetch-fixes profile ocp4-cis -o /tmp ----- -+ -.Example output -[source,terminal] ----- -No fixes to persist for rule 'ocp4-api-server-api-priority-flowschema-catch-all' <1> -No fixes to persist for rule 'ocp4-api-server-api-priority-gate-enabled' -No fixes to persist for rule 'ocp4-api-server-audit-log-maxbackup' -Persisted rule fix to /tmp/ocp4-api-server-audit-log-maxsize.yaml -No fixes to persist for rule 'ocp4-api-server-audit-log-path' -No fixes to persist for rule 'ocp4-api-server-auth-mode-no-aa' -No fixes to persist for rule 'ocp4-api-server-auth-mode-node' -No fixes to persist for rule 'ocp4-api-server-auth-mode-rbac' -No fixes to persist for rule 'ocp4-api-server-basic-auth' -No fixes to persist for rule 'ocp4-api-server-bind-address' -No fixes to persist for rule 'ocp4-api-server-client-ca' -Persisted rule fix to /tmp/ocp4-api-server-encryption-provider-cipher.yaml -Persisted rule fix to /tmp/ocp4-api-server-encryption-provider-config.yaml ----- -<1> The `No fixes to persist` warning is expected whenever there are rules in a profile that do not have a corresponding remediation, because either the rule cannot be remediated automatically or a remediation was not provided. - -. You can view a sample of the YAML file. The `head` command will show you the first 10 lines: -+ -[source,terminal] ----- -$ head /tmp/ocp4-api-server-audit-log-maxsize.yaml ----- -+ -.Example output -[source,terminal] ----- -apiVersion: config.openshift.io/v1 -kind: APIServer -metadata: - name: cluster -spec: - maximumFileSizeMegabytes: 100 ----- - -. View the remediation from a `ComplianceRemediation` object created after a scan: -+ -[source,terminal] ----- -$ oc get complianceremediations -n openshift-compliance ----- -+ -.Example output -[source,terminal] ----- -NAME STATE -ocp4-cis-api-server-encryption-provider-cipher NotApplied -ocp4-cis-api-server-encryption-provider-config NotApplied ----- -+ -[source,terminal] ----- -$ oc compliance fetch-fixes complianceremediations ocp4-cis-api-server-encryption-provider-cipher -o /tmp ----- -+ -.Example output -[source,terminal] ----- -Persisted compliance remediation fix to /tmp/ocp4-cis-api-server-encryption-provider-cipher.yaml ----- - -. You can view a sample of the YAML file. The `head` command will show you the first 10 lines: -+ -[source,terminal] ----- -$ head /tmp/ocp4-cis-api-server-encryption-provider-cipher.yaml ----- -+ -.Example output -[source,terminal] ----- -apiVersion: config.openshift.io/v1 -kind: APIServer -metadata: - name: cluster -spec: - encryption: - type: aescbc ----- - -[WARNING] -==== -Use caution before applying remediations directly. Some remediations might not be applicable in bulk, such as the usbguard rules in the moderate profile. In these cases, allow the Compliance Operator to apply the rules because it addresses the dependencies and ensures that the cluster remains in a good state. -==== diff --git a/modules/oc-compliance-fetching-raw-results.adoc b/modules/oc-compliance-fetching-raw-results.adoc deleted file mode 100644 index 7fa44f9e00e3..000000000000 --- a/modules/oc-compliance-fetching-raw-results.adoc +++ /dev/null @@ -1,74 +0,0 @@ -// Module included in the following assemblies: -// -// * security/oc_compliance_plug_in/oc-compliance-plug-in-using.adoc - -:_content-type: PROCEDURE -[id="fetching-raw-results_{context}"] -= Fetching raw results - -When a compliance scan finishes, the results of the individual checks are listed in the resulting `ComplianceCheckResult` custom resource (CR). However, an administrator or auditor might require the complete details of the scan. The OpenSCAP tool creates an Advanced Recording Format (ARF) formatted file with the detailed results. This ARF file is too large to store in a config map or other standard Kubernetes resource, so a persistent volume (PV) is created to contain it. - -.Procedure - -* Fetching the results from the PV with the Compliance Operator is a four-step process. However, with the `oc-compliance` plugin, you can use a single command: -+ -[source,terminal] ----- -$ oc compliance fetch-raw -o ----- -+ -* `` can be either `scansettingbinding`, `compliancescan` or `compliancesuite`, depending on which of these objects the scans were launched with. -* `` is the name of the binding, suite, or scan object to gather the ARF file for, and `` is the local directory to place the results. -+ -For example: -+ -[source,terminal] ----- -$ oc compliance fetch-raw scansettingbindings my-binding -o /tmp/ ----- -+ -.Example output -[source,terminal] ----- -Fetching results for my-binding scans: ocp4-cis, ocp4-cis-node-worker, ocp4-cis-node-master -Fetching raw compliance results for scan 'ocp4-cis'....... -The raw compliance results are available in the following directory: /tmp/ocp4-cis -Fetching raw compliance results for scan 'ocp4-cis-node-worker'........... -The raw compliance results are available in the following directory: /tmp/ocp4-cis-node-worker -Fetching raw compliance results for scan 'ocp4-cis-node-master'...... -The raw compliance results are available in the following directory: /tmp/ocp4-cis-node-master ----- - -View the list of files in the directory: - -[source,terminal] ----- -$ ls /tmp/ocp4-cis-node-master/ ----- - -.Example output -[source,terminal] ----- -ocp4-cis-node-master-ip-10-0-128-89.ec2.internal-pod.xml.bzip2 ocp4-cis-node-master-ip-10-0-150-5.ec2.internal-pod.xml.bzip2 ocp4-cis-node-master-ip-10-0-163-32.ec2.internal-pod.xml.bzip2 ----- - -Extract the results: - -[source,terminal] ----- -$ bunzip2 -c resultsdir/worker-scan/worker-scan-stage-459-tqkg7-compute-0-pod.xml.bzip2 > resultsdir/worker-scan/worker-scan-ip-10-0-170-231.us-east-2.compute.internal-pod.xml ----- - -View the results: -[source,terminal] ----- -$ ls resultsdir/worker-scan/ ----- - -.Example output -[source,terminal] ----- -worker-scan-ip-10-0-170-231.us-east-2.compute.internal-pod.xml -worker-scan-stage-459-tqkg7-compute-0-pod.xml.bzip2 -worker-scan-stage-459-tqkg7-compute-1-pod.xml.bzip2 ----- diff --git a/modules/oc-compliance-installing.adoc b/modules/oc-compliance-installing.adoc deleted file mode 100644 index 589a1e69e1b2..000000000000 --- a/modules/oc-compliance-installing.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// Module included in the following assemblies: -// -// * security/oc_compliance_plug_in/oc-compliance-plug-in-using.adoc - -:_content-type: PROCEDURE -[id="installing-oc-compliance_{context}"] -= Installing the oc-compliance plugin - -.Procedure - -. Extract the `oc-compliance` image to get the `oc-compliance` binary: -+ -[source,terminal] ----- -$ podman run --rm -v ~/.local/bin:/mnt/out:Z registry.redhat.io/compliance/oc-compliance-rhel8:stable /bin/cp /usr/bin/oc-compliance /mnt/out/ ----- -+ -.Example output -+ -[source,terminal] ----- -W0611 20:35:46.486903 11354 manifest.go:440] Chose linux/amd64 manifest from the manifest list. ----- -+ -You can now run `oc-compliance`. diff --git a/modules/oc-compliance-printing-controls.adoc b/modules/oc-compliance-printing-controls.adoc deleted file mode 100644 index 418e9cec9e87..000000000000 --- a/modules/oc-compliance-printing-controls.adoc +++ /dev/null @@ -1,38 +0,0 @@ -// Module included in the following assemblies: -// -// * security/oc_compliance_plug_in/oc-compliance-plug-in-using.adoc - -:_content-type: PROCEDURE -[id="printing-controls_{context}"] -= Printing controls - -Compliance standards are generally organized into a hierarchy as follows: - -* A benchmark is the top-level definition of a set of controls for a particular standard. For example, FedRAMP Moderate or Center for Internet Security (CIS) v.1.6.0. -* A control describes a family of requirements that must be met in order to be in compliance with the benchmark. For example, FedRAMP AC-01 (access control policy and procedures). -* A rule is a single check that is specific for the system being brought into compliance, and one or more of these rules map to a control. -* The Compliance Operator handles the grouping of rules into a profile for a single benchmark. It can be difficult to determine which controls that the set of rules in a profile satisfy. - -.Procedure - -* The `oc compliance` `controls` subcommand provides a report of the standards and controls that a given profile satisfies: -+ -[source,terminal] ----- -$ oc compliance controls profile ocp4-cis-node ----- -+ -.Example output -[source,terminal] ----- -+-----------+----------+ -| FRAMEWORK | CONTROLS | -+-----------+----------+ -| CIS-OCP | 1.1.1 | -+ +----------+ -| | 1.1.10 | -+ +----------+ -| | 1.1.11 | -+ +----------+ -... ----- diff --git a/modules/oc-compliance-rerunning-scans.adoc b/modules/oc-compliance-rerunning-scans.adoc deleted file mode 100644 index 96472183b5bf..000000000000 --- a/modules/oc-compliance-rerunning-scans.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// Module included in the following assemblies: -// -// * security/oc_compliance_plug_in/oc-compliance-plug-in-using.adoc - -:_content-type: PROCEDURE -[id="re-running-scans_{context}"] -= Re-running scans - -Although it is possible to run scans as scheduled jobs, you must often re-run a scan on demand, particularly after remediations are applied or when other changes to the cluster are made. - -.Procedure - -* Rerunning a scan with the Compliance Operator requires use of an annotation on the scan object. However, with the `oc-compliance` plugin you can rerun a scan with a single command. Enter the following command to rerun the scans for the `ScanSettingBinding` object named `my-binding`: -+ -[source,terminal] ----- -$ oc compliance rerun-now scansettingbindings my-binding ----- -+ -.Example output -[source,terminal] ----- -Rerunning scans from 'my-binding': ocp4-cis -Re-running scan 'openshift-compliance/ocp4-cis' ----- diff --git a/modules/oc-compliance-using-scan-setting-bindings.adoc b/modules/oc-compliance-using-scan-setting-bindings.adoc deleted file mode 100644 index edbb92e3761e..000000000000 --- a/modules/oc-compliance-using-scan-setting-bindings.adoc +++ /dev/null @@ -1,76 +0,0 @@ -// Module included in the following assemblies: -// -// * security/oc_compliance_plug_in/oc-compliance-plug-in-using.adoc - -:_content-type: PROCEDURE -[id="using-scan-setting-bindings_{context}"] -= Using ScanSettingBinding custom resources - -When using the `ScanSetting` and `ScanSettingBinding` custom resources (CRs) that the Compliance Operator provides, it is possible to run scans for multiple profiles while using a common set of scan options, such as `schedule`, `machine roles`, `tolerations`, and so on. While that is easier than working with multiple `ComplianceSuite` or `ComplianceScan` objects, it can confuse new users. - -The `oc compliance bind` subcommand helps you create a `ScanSettingBinding` CR. - -.Procedure - -. Run: -+ -[source,terminal] ----- -$ oc compliance bind [--dry-run] -N [-S ] [..] ----- -+ -* If you omit the `-S` flag, the `default` scan setting provided by the Compliance Operator is used. -* The object type is the Kubernetes object type, which can be `profile` or `tailoredprofile`. More than one object can be provided. -* The object name is the name of the Kubernetes resource, such as `.metadata.name`. -* Add the `--dry-run` option to display the YAML file of the objects that are created. -+ -For example, given the following profiles and scan settings: -+ -[source,terminal] ----- -$ oc get profile.compliance -n openshift-compliance ----- -+ -.Example output -[source,terminal] ----- -NAME AGE -ocp4-cis 9m54s -ocp4-cis-node 9m54s -ocp4-e8 9m54s -ocp4-moderate 9m54s -ocp4-ncp 9m54s -rhcos4-e8 9m54s -rhcos4-moderate 9m54s -rhcos4-ncp 9m54s -rhcos4-ospp 9m54s -rhcos4-stig 9m54s ----- -+ -[source,terminal] ----- -$ oc get scansettings -n openshift-compliance ----- -+ -.Example output -[source,terminal] ----- -NAME AGE -default 10m -default-auto-apply 10m ----- - -. To apply the `default` settings to the `ocp4-cis` and `ocp4-cis-node` profiles, run: -+ -[source,terminal] ----- -$ oc compliance bind -N my-binding profile/ocp4-cis profile/ocp4-cis-node ----- -+ -.Example output -[source,terminal] ----- -Creating ScanSettingBinding my-binding ----- -+ -Once the `ScanSettingBinding` CR is created, the bound profile begins scanning for both profiles with the related settings. Overall, this is the fastest way to begin scanning with the Compliance Operator. diff --git a/modules/oc-compliance-viewing-compliance-check-result-details.adoc b/modules/oc-compliance-viewing-compliance-check-result-details.adoc deleted file mode 100644 index 0f9b65c57a72..000000000000 --- a/modules/oc-compliance-viewing-compliance-check-result-details.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module included in the following assemblies: -// -// * security/oc_compliance_plug_in/oc-compliance-plug-in-using.adoc - -:_content-type: PROCEDURE -[id="viewing-compliance-remediation-details_{context}"] -= Viewing ComplianceCheckResult object details - -When scans are finished running, `ComplianceCheckResult` objects are created for the individual scan rules. The `view-result` subcommand provides a human-readable output of the `ComplianceCheckResult` object details. - -.Procedure - -* Run: -+ -[source,terminal] ----- -$ oc compliance view-result ocp4-cis-scheduler-no-bind-address ----- diff --git a/modules/oc-mirror-about.adoc b/modules/oc-mirror-about.adoc deleted file mode 100644 index 348c1ba13bbf..000000000000 --- a/modules/oc-mirror-about.adoc +++ /dev/null @@ -1,26 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/disconnected_install/installing-mirroring-disconnected.adoc -// * updating/updating-restricted-network-cluster/mirroring-image-repository.adoc - -:_content-type: CONCEPT -[id="installation-oc-mirror-about_{context}"] -= About the oc-mirror plugin - -You can use the oc-mirror OpenShift CLI (`oc`) plugin to mirror all required {product-title} content and other images to your mirror registry by using a single tool. It provides the following features: - -* Provides a centralized method to mirror {product-title} releases, Operators, helm charts, and other images. -* Maintains update paths for {product-title} and Operators. -* Uses a declarative image set configuration file to include only the {product-title} releases, Operators, and images that your cluster needs. -* Performs incremental mirroring, which reduces the size of future image sets. -* Prunes images from the target mirror registry that were excluded from the image set configuration since the previous execution. -* Optionally generates supporting artifacts for OpenShift Update Service (OSUS) usage. - -When using the oc-mirror plugin, you specify which content to mirror in an image set configuration file. In this YAML file, you can fine-tune the configuration to only include the {product-title} releases and Operators that your cluster needs. This reduces the amount of data that you need to download and transfer. The oc-mirror plugin can also mirror arbitrary helm charts and additional container images to assist users in seamlessly synchronizing their workloads onto mirror registries. - -The first time you run the oc-mirror plugin, it populates your mirror registry with the required content to perform your disconnected cluster installation or update. In order for your disconnected cluster to continue receiving updates, you must keep your mirror registry updated. To update your mirror registry, you run the oc-mirror plugin using the same configuration as the first time you ran it. The oc-mirror plugin references the metadata from the storage backend and only downloads what has been released since the last time you ran the tool. This provides update paths for {product-title} and Operators and performs dependency resolution as required. - -[IMPORTANT] -==== -When using the oc-mirror CLI plugin to populate a mirror registry, any further updates to the mirror registry must be made using the oc-mirror tool. -==== diff --git a/modules/oc-mirror-command-reference.adoc b/modules/oc-mirror-command-reference.adoc deleted file mode 100644 index 228849fbb935..000000000000 --- a/modules/oc-mirror-command-reference.adoc +++ /dev/null @@ -1,118 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/disconnected_install/installing-mirroring-disconnected.adoc -// * updating/updating-restricted-network-cluster/mirroring-image-repository.adoc - -:_content-type: REFERENCE -[id="oc-mirror-command-reference_{context}"] -= Command reference for oc-mirror - -The following tables describe the `oc mirror` subcommands and flags: - -.oc mirror subcommands -[cols="1,2",options="header"] -|=== -|Subcommand -|Description - -|`completion` -|Generate the autocompletion script for the specified shell. - -|`describe` -|Output the contents of an image set. - -|`help` -|Show help about any subcommand. - -|`init` -|Output an initial image set configuration template. - -|`list` -|List available platform and Operator content and their version. - -|`version` -|Output the oc-mirror version. - -|=== - -.oc mirror flags -[cols="1,2",options="header"] -|=== -|Flag -|Description - -|`-c`, `--config` `` -|Specify the path to an image set configuration file. - -|`--continue-on-error` -|If any non image-pull related error occurs, continue and attempt to mirror as much as possible. - -|`--dest-skip-tls` -|Disable TLS validation for the target registry. - -|`--dest-use-http` -|Use plain HTTP for the target registry. - -|`--dry-run` -|Print actions without mirroring images. Generates `mapping.txt` and `pruning-plan.json` files. - -|`--from ` -|Specify the path to an image set archive that was generated by an execution of oc-mirror to load into a target registry. - -|`-h`, `--help` -|Show the help. - -|`--ignore-history` -|Ignore past mirrors when downloading images and packing layers. Disables incremental mirroring and might download more data. - -|`--include-local-oci-catalogs` -|Enable mirroring for local OCI catalogs on disk to the target mirror registry. - -|`--manifests-only` -|Generate manifests for `ImageContentSourcePolicy` objects to configure a cluster to use the mirror registry, but do not actually mirror any images. To use this flag, you must pass in an image set archive with the `--from` flag. - -|`--max-nested-paths ` -|Specify the maximum number of nested paths for destination registries that limit nested paths. The default is `0`. - -|`--max-per-registry ` -|Specify the number of concurrent requests allowed per registry. The default is `6`. - -|`--oci-insecure-signature-policy` -|Do not push signatures when mirroring local OCI catalogs (with `--include-local-oci-catalogs`). - -|`--oci-registries-config` -|Provide a registries configuration file to specify an alternative registry location to copy from when mirroring local OCI catalogs (with `--include-local-oci-catalogs`). - -|`--skip-cleanup` -|Skip removal of artifact directories. - -|`--skip-image-pin` -|Do not replace image tags with digest pins in Operator catalogs. - -|`--skip-metadata-check` -|Skip metadata when publishing an image set. This is only recommended when the image set was created with `--ignore-history`. - -|`--skip-missing` -|If an image is not found, skip it instead of reporting an error and aborting execution. Does not apply to custom images explicitly specified in the image set configuration. - -|`--skip-pruning` -|Disable automatic pruning of images from the target mirror registry. - -|`--skip-verification` -|Skip digest verification. - -|`--source-skip-tls` -|Disable TLS validation for the source registry. - -|`--source-use-http` -|Use plain HTTP for the source registry. - -|`--use-oci-feature` -|Enable mirroring for local OCI catalogs on disk to the target mirror registry. - -The `--use-oci-feature` flag is deprecated. Use the `--include-local-oci-catalogs` flag instead. - -|`-v`, `--verbose` `` -|Specify the number for the log level verbosity. Valid values are `0` - `9`. The default is `0`. - -|=== diff --git a/modules/oc-mirror-creating-image-set-config.adoc b/modules/oc-mirror-creating-image-set-config.adoc deleted file mode 100644 index 3e744dbaa821..000000000000 --- a/modules/oc-mirror-creating-image-set-config.adoc +++ /dev/null @@ -1,74 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/disconnected_install/installing-mirroring-disconnected.adoc -// * updating/updating-restricted-network-cluster/mirroring-image-repository.adoc - -:_content-type: PROCEDURE -[id="oc-mirror-creating-image-set-config_{context}"] -= Creating the image set configuration - -Before you can use the oc-mirror plugin to mirror image sets, you must create an image set configuration file. This image set configuration file defines which {product-title} releases, Operators, and other images to mirror, along with other configuration settings for the oc-mirror plugin. - -You must specify a storage backend in the image set configuration file. This storage backend can be a local directory or a registry that supports link:https://docs.docker.com/registry/spec/manifest-v2-2[Docker v2-2]. The oc-mirror plugin stores metadata in this storage backend during image set creation. - -[IMPORTANT] -==== -Do not delete or modify the metadata that is generated by the oc-mirror plugin. You must use the same storage backend every time you run the oc-mirror plugin for the same mirror registry. -==== - -.Prerequisites - -* You have created a container image registry credentials file. For instructions, see _Configuring credentials that allow images to be mirrored_. - -.Procedure - -. Use the `oc mirror init` command to create a template for the image set configuration and save it to a file called `imageset-config.yaml`: -+ -[source,terminal] ----- -$ oc mirror init --registry example.com/mirror/oc-mirror-metadata > imageset-config.yaml <1> ----- -<1> Replace `example.com/mirror/oc-mirror-metadata` with the location of your registry for the storage backend. - -. Edit the file and adjust the settings as necessary: -+ -[source,yaml] ----- -kind: ImageSetConfiguration -apiVersion: mirror.openshift.io/v1alpha2 -archiveSize: 4 <1> -storageConfig: <2> - registry: - imageURL: example.com/mirror/oc-mirror-metadata <3> - skipTLS: false -mirror: - platform: - channels: - - name: stable-4.13 <4> - type: ocp - graph: true <5> - operators: - - catalog: registry.redhat.io/redhat/redhat-operator-index:v4.13 <6> - packages: - - name: serverless-operator <7> - channels: - - name: stable <8> - additionalImages: - - name: registry.redhat.io/ubi9/ubi:latest <9> - helm: {} ----- -<1> Add `archiveSize` to set the maximum size, in GiB, of each file within the image set. -<2> Set the back-end location to save the image set metadata to. This location can be a registry or local directory. It is required to specify `storageConfig` values. -<3> Set the registry URL for the storage backend. -<4> Set the channel to retrieve the {product-title} images from. -<5> Add `graph: true` to build and push the graph-data image to the mirror registry. The graph-data image is required to create OpenShift Update Service (OSUS). The `graph: true` field also generates the `UpdateService` custom resource manifest. The `oc` command-line interface (CLI) can use the `UpdateService` custom resource manifest to create OSUS. For more information, see _About the OpenShift Update Service_. -<6> Set the Operator catalog to retrieve the {product-title} images from. -<7> Specify only certain Operator packages to include in the image set. Remove this field to retrieve all packages in the catalog. -<8> Specify only certain channels of the Operator packages to include in the image set. You must always include the default channel for the Operator package even if you do not use the bundles in that channel. You can find the default channel by running the following command: `oc mirror list operators --catalog= --package=`. -<9> Specify any additional images to include in image set. -+ -See _Image set configuration parameters_ for the full list of parameters and _Image set configuration examples_ for various mirroring use cases. - -. Save the updated file. -+ -This image set configuration file is required by the `oc mirror` command when mirroring content. diff --git a/modules/oc-mirror-differential-updates.adoc b/modules/oc-mirror-differential-updates.adoc deleted file mode 100644 index 2c11d7e404a4..000000000000 --- a/modules/oc-mirror-differential-updates.adoc +++ /dev/null @@ -1,36 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/disconnected_install/installing-mirroring-disconnected.adoc -// * updating/updating-restricted-network-cluster/mirroring-image-repository.adoc - -:_content-type: PROCEDURE -[id="oc-mirror-differential-updates_{context}"] -= Updating your mirror registry content - -After you publish the initial image set to the mirror registry, you can use the oc-mirror plugin to keep your disconnected clusters updated. - -Depending on your image set configuration, oc-mirror automatically detects newer releases of {product-title} and your selected Operators that have been released after you completed the inital mirror. It is recommended to run oc-mirror at regular intervals, for example in a nightly cron job, to receive product and security updates on a timely basis. - -.Prerequisites - -* You have used the oc-mirror plugin to mirror the initial image set to your mirror registry. -* You have access to the storage backend that was used for the initial execution of the oc-mirror plugin. -+ -[NOTE] -==== -You must use the same storage backend as the initial execution of oc-mirror for the same mirror registry. Do not delete or modify the metadata image that is generated by the oc-mirror plugin. -==== - -.Procedure - -. If necessary, update your image set configuration file to pick up new {product-title} and Operator versions. See _Image set configuration examples_ for example mirroring use cases. - -. Follow the same steps that you used to mirror your initial image set to the mirror registry. For instructions, see _Mirroring an image set in a partially disconnected environment_ or _Mirroring an image set in a fully disconnected environment_. -+ -[IMPORTANT] -==== -* You must provide the same storage backend so that only a differential image set is created and mirrored. -* If you specified a top-level namespace for the mirror registry during the initial image set creation, then you must use this same namespace every time you run the oc-mirror plugin for the same mirror registry. -==== - -. Configure your cluster to use the resources generated by oc-mirror. diff --git a/modules/oc-mirror-disk-to-mirror.adoc b/modules/oc-mirror-disk-to-mirror.adoc deleted file mode 100644 index 505f39ce8949..000000000000 --- a/modules/oc-mirror-disk-to-mirror.adoc +++ /dev/null @@ -1,44 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/disconnected_install/installing-mirroring-disconnected.adoc -// * updating/updating-restricted-network-cluster/mirroring-image-repository.adoc - -:_content-type: PROCEDURE -[id="oc-mirror-disk-to-mirror_{context}"] -= Mirroring from disk to mirror - -You can use the oc-mirror plugin to mirror the contents of a generated image set to the target mirror registry. - -.Prerequisites - -* You have installed the OpenShift CLI (`oc`) in the disconnected environment. -* You have installed the `oc-mirror` CLI plugin in the disconnected environment. -* You have generated the image set file by using the `oc mirror` command. -* You have transferred the image set file to the disconnected environment. -// TODO: Confirm prereq about not needing a cluster, but need pull secret misc - -.Procedure - -* Run the `oc mirror` command to process the image set file on disk and mirror the contents to a target mirror registry: -+ -[source,terminal] ----- -$ oc mirror --from=./mirror_seq1_000000.tar \// <1> - docker://registry.example:5000 <2> ----- -<1> Pass in the image set .tar file to mirror, named `mirror_seq1_000000.tar` in this example. If an `archiveSize` value was specified in the image set configuration file, the image set might be broken up into multiple .tar files. In this situation, you can pass in a directory that contains the image set .tar files. -<2> Specify the registry to mirror the image set file to. The registry must start with `docker://`. If you specify a top-level namespace for the mirror registry, you must also use this same namespace on subsequent executions. -+ -This command updates the mirror registry with the image set and generates the `ImageContentSourcePolicy` and `CatalogSource` resources. - -.Verification - -. Navigate into the `oc-mirror-workspace/` directory that was generated. -. Navigate into the results directory, for example, `results-1639608409/`. -. Verify that YAML files are present for the `ImageContentSourcePolicy` and `CatalogSource` resources. -+ -// TODO: Test and get some better wording/example output. - -.Next steps - -* Configure your cluster to use the resources generated by oc-mirror. diff --git a/modules/oc-mirror-dry-run.adoc b/modules/oc-mirror-dry-run.adoc deleted file mode 100644 index af159838d3ef..000000000000 --- a/modules/oc-mirror-dry-run.adoc +++ /dev/null @@ -1,69 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/disconnected_install/installing-mirroring-disconnected.adoc -// * updating/updating-restricted-network-cluster/mirroring-image-repository.adoc - -:_content-type: PROCEDURE -[id="oc-mirror-dry-run_{context}"] -= Performing a dry run - -You can use oc-mirror to perform a dry run, without actually mirroring any images. This allows you to review the list of images that would be mirrored, as well as any images that would be pruned from the mirror registry. It also allows you to catch any errors with your image set configuration early or use the generated list of images with other tools to carry out the mirroring operation. - -.Prerequisites - -* You have access to the internet to obtain the necessary container images. -* You have installed the OpenShift CLI (`oc`). -* You have installed the `oc-mirror` CLI plugin. -* You have created the image set configuration file. - -.Procedure - -. Run the `oc mirror` command with the `--dry-run` flag to perform a dry run: -+ -[source,terminal] ----- -$ oc mirror --config=./imageset-config.yaml \// <1> - docker://registry.example:5000 \// <2> - --dry-run <3> ----- -<1> Pass in the image set configuration file that was created. This procedure assumes that it is named `imageset-config.yaml`. -<2> Specify the mirror registry. Nothing is mirrored to this registry as long as you use the `--dry-run` flag. -<3> Use the `--dry-run` flag to generate the dry run artifacts and not an actual image set file. -+ -.Example output -[source,terminal] ----- -Checking push permissions for registry.example:5000 -Creating directory: oc-mirror-workspace/src/publish -Creating directory: oc-mirror-workspace/src/v2 -Creating directory: oc-mirror-workspace/src/charts -Creating directory: oc-mirror-workspace/src/release-signatures -No metadata detected, creating new workspace -wrote mirroring manifests to oc-mirror-workspace/operators.1658342351/manifests-redhat-operator-index - -... - -info: Planning completed in 31.48s -info: Dry run complete -Writing image mapping to oc-mirror-workspace/mapping.txt ----- - -. Navigate into the workspace directory that was generated: -+ -[source,terminal] ----- -$ cd oc-mirror-workspace/ ----- - -. Review the `mapping.txt` file that was generated. -+ -This file contains a list of all images that would be mirrored. - -. Review the `pruning-plan.json` file that was generated. -+ -This file contains a list of all images that would be pruned from the mirror registry when the image set is published. -+ -[NOTE] -==== -The `pruning-plan.json` file is only generated if your oc-mirror command points to your mirror registry and there are images to be pruned. -==== diff --git a/modules/oc-mirror-image-set-config-examples.adoc b/modules/oc-mirror-image-set-config-examples.adoc deleted file mode 100644 index 6193a193baf1..000000000000 --- a/modules/oc-mirror-image-set-config-examples.adoc +++ /dev/null @@ -1,246 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/disconnected_install/installing-mirroring-disconnected.adoc -// * updating/updating-restricted-network-cluster/mirroring-image-repository.adoc - -:_content-type: REFERENCE -[id="oc-mirror-image-set-examples_{context}"] -= Image set configuration examples - -The following `ImageSetConfiguration` file examples show the configuration for various mirroring use cases. - -// Moved to first; unchanged -[discrete] -[id="oc-mirror-image-set-examples-shortest-upgrade-path_{context}"] -== Use case: Including the shortest {product-title} update path - -The following `ImageSetConfiguration` file uses a local storage backend and includes all {product-title} versions along the shortest update path from the minimum version of `4.11.37` to the maximum version of `4.12.15`. - -.Example `ImageSetConfiguration` file -[source,yaml] ----- -apiVersion: mirror.openshift.io/v1alpha2 -kind: ImageSetConfiguration -storageConfig: - local: - path: /home/user/metadata -mirror: - platform: - channels: - - name: stable-4.12 - minVersion: 4.11.37 - maxVersion: 4.12.15 - shortestPath: true ----- - -// Moved to second; unchanged -[discrete] -[id="oc-mirror-image-set-examples-minimum-to-latest_{context}"] -== Use case: Including all versions of {product-title} from a minimum to the latest - -The following `ImageSetConfiguration` file uses a registry storage backend and includes all {product-title} versions starting at a minimum version of `4.10.10` to the latest version in the channel. - -On every invocation of oc-mirror with this image set configuration, the latest release of the `stable-4.10` channel is evaluated, so running oc-mirror at regular intervals ensures that you automatically receive the latest releases of {product-title} images. - -.Example `ImageSetConfiguration` file -[source,yaml] ----- -apiVersion: mirror.openshift.io/v1alpha2 -kind: ImageSetConfiguration -storageConfig: - registry: - imageURL: example.com/mirror/oc-mirror-metadata - skipTLS: false -mirror: - platform: - channels: - - name: stable-4.10 - minVersion: 4.10.10 ----- - -// Updated: -// - Added a note below about the maxVersion -// - Added a note about not necessarily getting all versions in the range -[discrete] -[id="oc-mirror-image-set-examples-operator-versions_{context}"] -== Use case: Including Operator versions from a minimum to the latest - -The following `ImageSetConfiguration` file uses a local storage backend and includes only the Red Hat Advanced Cluster Security for Kubernetes Operator, versions starting at 4.0.1 and later in the `stable` channel. - -[NOTE] -==== -When you specify a minimum or maximum version range, you might not receive all Operator versions in that range. - -By default, oc-mirror excludes any versions that are skipped or replaced by a newer version in the Operator Lifecycle Manager (OLM) specification. Operator versions that are skipped might be affected by a CVE or contain bugs. Use a newer version instead. For more information on skipped and replaced versions, see link:https://olm.operatorframework.io/docs/concepts/olm-architecture/operator-catalog/creating-an-update-graph/[Creating an update graph with OLM]. - -To receive all Operator versions in a specified range, you can set the `mirror.operators.full` field to `true`. -==== - -.Example `ImageSetConfiguration` file -[source,yaml] ----- -apiVersion: mirror.openshift.io/v1alpha2 -kind: ImageSetConfiguration -storageConfig: - local: - path: /home/user/metadata -mirror: - operators: - - catalog: registry.redhat.io/redhat/redhat-operator-index:v4.13 - packages: - - name: rhacs-operator - channels: - - name: stable - minVersion: 4.0.1 ----- - -[NOTE] -==== -To specify a maximum version instead of the latest, set the `mirror.operators.packages.channels.maxVersion` field. -==== - -[discrete] -[id="oc-mirror-image-set-examples-nutanix-operator_{context}"] -== Use case: Including the Nutanix CSI Operator -The following `ImageSetConfiguration` file uses a local storage backend and includes the Nutanix CSI Operator, the OpenShift Update Service (OSUS) graph image, and an additional Red Hat Universal Base Image (UBI). - -.Example `ImageSetConfiguration` file -[source,yaml] ----- -kind: ImageSetConfiguration -apiVersion: mirror.openshift.io/v1alpha2 -storageConfig: - registry: - imageURL: mylocalregistry/ocp-mirror/openshift4 - skipTLS: false -mirror: - platform: - channels: - - name: stable-4.11 - type: ocp - graph: true - operators: - - catalog: registry.redhat.io/redhat/certified-operator-index:v4.11 - packages: - - name: nutanixcsioperator - channels: - - name: stable - additionalImages: - - name: registry.redhat.io/ubi9/ubi:latest ----- - -// New example; including the default channel -[discrete] -[id="oc-mirror-image-set-examples-default-channel_{context}"] -== Use case: Including the default Operator channel - -The following `ImageSetConfiguration` file includes the `stable-5.7` and `stable` channels for the OpenShift Elasticsearch Operator. Even if only the packages from the `stable-5.7` channel are needed, the `stable` channel must also be included in the `ImageSetConfiguration` file, because it is the default channel for the Operator. You must always include the default channel for the Operator package even if you do not use the bundles in that channel. - -[TIP] -==== -You can find the default channel by running the following command: `oc mirror list operators --catalog= --package=`. -==== - -.Example `ImageSetConfiguration` file -[source,yaml] ----- -apiVersion: mirror.openshift.io/v1alpha2 -kind: ImageSetConfiguration -storageConfig: - registry: - imageURL: example.com/mirror/oc-mirror-metadata - skipTLS: false -mirror: - operators: - - catalog: registry.redhat.io/redhat/redhat-operator-index:v4.13 - packages: - - name: elasticsearch-operator - channels: - - name: stable-5.7 - - name: stable ----- - -// New example; Entire catalog; all versions -[discrete] -[id="oc-mirror-image-set-examples-entire-catalog-full_{context}"] -== Use case: Including an entire catalog (all versions) - -The following `ImageSetConfiguration` file sets the `mirror.operators.full` field to `true` to include all versions for an entire Operator catalog. - -.Example `ImageSetConfiguration` file -[source,yaml] ----- -apiVersion: mirror.openshift.io/v1alpha2 -kind: ImageSetConfiguration -storageConfig: - registry: - imageURL: example.com/mirror/oc-mirror-metadata - skipTLS: false -mirror: - operators: - - catalog: registry.redhat.io/redhat/redhat-operator-index:v4.13 - full: true ----- - -// New example; Entire catalog; heads only -// - Included 'targetCatalog' in example -[discrete] -[id="oc-mirror-image-set-examples-entire-catalog-heads_{context}"] -== Use case: Including an entire catalog (channel heads only) - -The following `ImageSetConfiguration` file includes the channel heads for an entire Operator catalog. - -By default, for each Operator in the catalog, oc-mirror includes the latest Operator version (channel head) from the default channel. If you want to mirror all Operator versions, and not just the channel heads, you must set the `mirror.operators.full` field to `true`. - -This example also uses the `targetCatalog` field to specify an alternative namespace and name to mirror the catalog as. - -.Example `ImageSetConfiguration` file -[source,yaml] ----- -apiVersion: mirror.openshift.io/v1alpha2 -kind: ImageSetConfiguration -storageConfig: - registry: - imageURL: example.com/mirror/oc-mirror-metadata - skipTLS: false -mirror: - operators: - - catalog: registry.redhat.io/redhat/redhat-operator-index:v4.13 - targetCatalog: my-namespace/my-operator-catalog ----- - -// Moved to last; unchanged -[discrete] -[id="oc-mirror-image-set-examples-helm_{context}"] -== Use case: Including arbitrary images and helm charts - -The following `ImageSetConfiguration` file uses a registry storage backend and includes helm charts and an additional Red Hat Universal Base Image (UBI). - -.Example `ImageSetConfiguration` file -[source,yaml] ----- -apiVersion: mirror.openshift.io/v1alpha2 -kind: ImageSetConfiguration -archiveSize: 4 -storageConfig: - registry: - imageURL: example.com/mirror/oc-mirror-metadata - skipTLS: false -mirror: - platform: - architectures: - - "s390x" - channels: - - name: stable-4.13 - operators: - - catalog: registry.redhat.io/redhat/redhat-operator-index:v4.13 - helm: - repositories: - - name: redhat-helm-charts - url: https://raw.githubusercontent.com/redhat-developer/redhat-helm-charts/master - charts: - - name: ibm-mongodb-enterprise-helm - version: 0.2.0 - additionalImages: - - name: registry.redhat.io/ubi9/ubi:latest ----- diff --git a/modules/oc-mirror-imageset-config-params.adoc b/modules/oc-mirror-imageset-config-params.adoc deleted file mode 100644 index 63a91b2dfa06..000000000000 --- a/modules/oc-mirror-imageset-config-params.adoc +++ /dev/null @@ -1,271 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/disconnected_install/installing-mirroring-disconnected.adoc -// * updating/updating-restricted-network-cluster/mirroring-image-repository.adoc - -:_content-type: REFERENCE -[id="oc-mirror-imageset-config-params_{context}"] -= Image set configuration parameters - -The oc-mirror plugin requires an image set configuration file that defines what images to mirror. The following table lists the available parameters for the `ImageSetConfiguration` resource. - -// TODO: Consider adding examples for the general "Object" params - -.`ImageSetConfiguration` parameters -[cols="2,2a,1a",options="header"] -|=== -|Parameter -|Description -|Values - -|`apiVersion` -|The API version for the `ImageSetConfiguration` content. -|String. For example: `mirror.openshift.io/v1alpha2`. - -|`archiveSize` -|The maximum size, in GiB, of each archive file within the image set. -|Integer. For example: `4` - -|`mirror` -|The configuration of the image set. -|Object - -|`mirror.additionalImages` -|The additional images configuration of the image set. -|Array of objects. For example: - -[source,yaml] ----- -additionalImages: - - name: registry.redhat.io/ubi8/ubi:latest ----- - -|`mirror.additionalImages.name` -|The tag or digest of the image to mirror. -|String. For example: `registry.redhat.io/ubi8/ubi:latest` - -|`mirror.blockedImages` -|The full tag, digest, or pattern of images to block from mirroring. -|Array of strings. For example: `docker.io/library/alpine` - -|`mirror.helm` -|The helm configuration of the image set. Note that the oc-mirror plugin supports only helm charts that do not require user input when rendered. -|Object - -|`mirror.helm.local` -|The local helm charts to mirror. -|Array of objects. For example: - -[source,yaml] ----- -local: - - name: podinfo - path: /test/podinfo-5.0.0.tar.gz ----- - -|`mirror.helm.local.name` -|The name of the local helm chart to mirror. -|String. For example: `podinfo`. - -|`mirror.helm.local.path` -|The path of the local helm chart to mirror. -|String. For example: `/test/podinfo-5.0.0.tar.gz`. - -|`mirror.helm.repositories` -|The remote helm repositories to mirror from. -|Array of objects. For example: - -[source,yaml] ----- -repositories: - - name: podinfo - url: https://example.github.io/podinfo - charts: - - name: podinfo - version: 5.0.0 ----- - -|`mirror.helm.repositories.name` -|The name of the helm repository to mirror from. -|String. For example: `podinfo`. - -|`mirror.helm.repositories.url` -|The URL of the helm repository to mirror from. -|String. For example: [x-]`https://example.github.io/podinfo`. - -|`mirror.helm.repositories.charts` -|The remote helm charts to mirror. -|Array of objects. - -|`mirror.helm.repositories.charts.name` -|The name of the helm chart to mirror. -|String. For example: `podinfo`. - -|`mirror.helm.repositories.charts.version` -|The version of the named helm chart to mirror. -|String. For example: `5.0.0`. - -|`mirror.operators` -|The Operators configuration of the image set. -|Array of objects. For example: - -[source,yaml] ----- -operators: - - catalog: registry.redhat.io/redhat/redhat-operator-index:v4.13 - packages: - - name: elasticsearch-operator - minVersion: '2.4.0' ----- - -|`mirror.operators.catalog` -|The Operator catalog to include in the image set. -|String. For example: `registry.redhat.io/redhat/redhat-operator-index:v4.13`. - -|`mirror.operators.full` -|When `true`, downloads the full catalog, Operator package, or Operator channel. -|Boolean. The default value is `false`. - -|`mirror.operators.packages` -|The Operator packages configuration. -|Array of objects. For example: - -[source,yaml] ----- -operators: - - catalog: registry.redhat.io/redhat/redhat-operator-index:v4.13 - packages: - - name: elasticsearch-operator - minVersion: '5.2.3-31' ----- - -|`mirror.operators.packages.name` -|The Operator package name to include in the image set -|String. For example: `elasticsearch-operator`. - -|`mirror.operators.packages.channels` -|The Operator package channel configuration. -|Object - -|`mirror.operators.packages.channels.name` -|The Operator channel name, unique within a package, to include in the image set. -|String. For example: `fast` or `stable-v4.13`. - -|`mirror.operators.packages.channels.maxVersion` -|The highest version of the Operator mirror across all channels in which it exists. -|String. For example: `5.2.3-31` - -|`mirror.operators.packages.channels.minBundle` -|The name of the minimum bundle to include, plus all bundles in the update graph to the channel head. Set this field only if the named bundle has no semantic version metadata. -|String. For example: `bundleName` - -|`mirror.operators.packages.channels.minVersion` -|The lowest version of the Operator to mirror across all channels in which it exists. -|String. For example: `5.2.3-31` - -|`mirror.operators.packages.maxVersion` -|The highest version of the Operator to mirror across all channels in which it exists. -|String. For example: `5.2.3-31`. - -|`mirror.operators.packages.minVersion` -|The lowest version of the Operator to mirror across all channels in which it exists. -|String. For example: `5.2.3-31`. - -|`mirror.operators.skipDependencies` -|If `true`, dependencies of bundles are not included. -|Boolean. The default value is `false`. - -|`mirror.operators.targetCatalog` -|An alternative name and optional namespace hierarchy to mirror the referenced catalog as. -|String. For example: `my-namespace/my-operator-catalog` - -|`mirror.operators.targetName` -|An alternative name to mirror the referenced catalog as. - -The `targetName` parameter is deprecated. Use the `targetCatalog` parameter instead. - -|String. For example: `my-operator-catalog` - -|`mirror.operators.targetTag` -|An alternative tag to append to the `targetName` or `targetCatalog`. -|String. For example: `v1` - -|`mirror.platform` -|The platform configuration of the image set. -|Object - -|`mirror.platform.architectures` -|The architecture of the platform release payload to mirror. -|Array of strings. For example: - -[source,yaml] ----- -architectures: - - amd64 - - arm64 ----- - -|`mirror.platform.channels` -|The platform channel configuration of the image set. -|Array of objects. For example: - -[source,yaml] ----- -channels: - - name: stable-4.10 - - name: stable-4.13 ----- - -|`mirror.platform.channels.full` -|When `true`, sets the `minVersion` to the first release in the channel and the `maxVersion` to the last release in the channel. -|Boolean. The default value is `false`. - -|`mirror.platform.channels.name` -|The name of the release channel. -|String. For example: `stable-4.13` - -|`mirror.platform.channels.minVersion` -|The minimum version of the referenced platform to be mirrored. -|String. For example: `4.12.6` - -|`mirror.platform.channels.maxVersion` -|The highest version of the referenced platform to be mirrored. -|String. For example: `4.13.1` - -|`mirror.platform.channels.shortestPath` -|Toggles shortest path mirroring or full range mirroring. -|Boolean. The default value is `false`. - -|`mirror.platform.channels.type` -|The type of the platform to be mirrored. -|String. For example: `ocp` or `okd`. The default is `ocp`. - -|`mirror.platform.graph` -|Indicates whether the OSUS graph is added to the image set and subsequently published to the mirror. -|Boolean. The default value is `false`. - -|`storageConfig` -|The back-end configuration of the image set. -|Object - -|`storageConfig.local` -|The local back-end configuration of the image set. -|Object - -|`storageConfig.local.path` -|The path of the directory to contain the image set metadata. -|String. For example: `./path/to/dir/`. - -|`storageConfig.registry` -|The registry back-end configuration of the image set. -|Object - -|`storageConfig.registry.imageURL` -|The back-end registry URI. Can optionally include a namespace reference in the URI. -|String. For example: `quay.io/myuser/imageset:metadata`. - -|`storageConfig.registry.skipTLS` -|Optionally skip TLS verification of the referenced back-end registry. -|Boolean. The default value is `false`. - -|=== diff --git a/modules/oc-mirror-installing-plugin.adoc b/modules/oc-mirror-installing-plugin.adoc deleted file mode 100644 index ba4cb13a7cc5..000000000000 --- a/modules/oc-mirror-installing-plugin.adoc +++ /dev/null @@ -1,57 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/disconnected_install/installing-mirroring-disconnected.adoc -// * updating/updating-restricted-network-cluster/mirroring-image-repository.adoc - -:_content-type: PROCEDURE -[id="installation-oc-mirror-installing-plugin_{context}"] -= Installing the oc-mirror OpenShift CLI plugin - -To use the oc-mirror OpenShift CLI plugin to mirror registry images, you must install the plugin. If you are mirroring image sets in a fully disconnected environment, ensure that you install the oc-mirror plugin on the host with internet access and the host in the disconnected environment with access to the mirror registry. - -.Prerequisites - -* You have installed the OpenShift CLI (`oc`). - -.Procedure - -. Download the oc-mirror CLI plugin. - -.. Navigate to the link:https://console.redhat.com/openshift/downloads[Downloads] page of the {cluster-manager-url}. - -.. Under the *OpenShift disconnected installation tools* section, click *Download* for *OpenShift Client (oc) mirror plugin* and save the file. - -. Extract the archive: -+ -[source,terminal] ----- -$ tar xvzf oc-mirror.tar.gz ----- - -. If necessary, update the plugin file to be executable: -+ -[source,terminal] ----- -$ chmod +x oc-mirror ----- -+ -[NOTE] -==== -Do not rename the `oc-mirror` file. -==== - -. Install the oc-mirror CLI plugin by placing the file in your `PATH`, for example, `/usr/local/bin`: -+ -[source,terminal] ----- -$ sudo mv oc-mirror /usr/local/bin/. ----- - -.Verification - -* Run `oc mirror help` to verify that the plugin was successfully installed: -+ -[source,terminal] ----- -$ oc mirror help ----- diff --git a/modules/oc-mirror-mirror-to-disk.adoc b/modules/oc-mirror-mirror-to-disk.adoc deleted file mode 100644 index 0af7e2678620..000000000000 --- a/modules/oc-mirror-mirror-to-disk.adoc +++ /dev/null @@ -1,70 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/disconnected_install/installing-mirroring-disconnected.adoc -// * updating/updating-restricted-network-cluster/mirroring-image-repository.adoc - -:_content-type: PROCEDURE -[id="oc-mirror-mirror-to-disk_{context}"] -= Mirroring from mirror to disk - -You can use the oc-mirror plugin to generate an image set and save the contents to disk. The generated image set can then be transferred to the disconnected environment and mirrored to the target registry. - -[IMPORTANT] -==== -Depending on the configuration specified in the image set configuration file, using oc-mirror to mirror images might download several hundreds of gigabytes of data to disk. - -The initial image set download when you populate the mirror registry is often the largest. Because you only download the images that changed since the last time you ran the command, when you run the oc-mirror plugin again, the generated image set is often smaller. -==== - -You are required to specify a storage backend in the image set configuration file. This storage backend can be a local directory or a docker v2 registry. The oc-mirror plugin stores metadata in this storage backend during image set creation. - -[IMPORTANT] -==== -Do not delete or modify the metadata that is generated by the oc-mirror plugin. You must use the same storage backend every time you run the oc-mirror plugin for the same mirror registry. -==== - -.Prerequisites - -* You have access to the internet to obtain the necessary container images. -* You have installed the OpenShift CLI (`oc`). -* You have installed the `oc-mirror` CLI plugin. -* You have created the image set configuration file. -// TODO: Don't need a running cluster, but need some pull secrets. Sync w/ team on this - -.Procedure - -* Run the `oc mirror` command to mirror the images from the specified image set configuration to disk: -+ -[source,terminal] ----- -$ oc mirror --config=./imageset-config.yaml \// <1> - file:// <2> ----- -<1> Pass in the image set configuration file that was created. This procedure assumes that it is named `imageset-config.yaml`. -<2> Specify the target directory where you want to output the image set file. The target directory path must start with `file://`. - -.Verification - -. Navigate to your output directory: -+ -[source,terminal] ----- -$ cd ----- - -. Verify that an image set `.tar` file was created: -+ -[source,terminal] ----- -$ ls ----- -+ -.Example output -[source,text] ----- -mirror_seq1_000000.tar ----- - -.Next steps - -* Transfer the image set .tar file to the disconnected environment. diff --git a/modules/oc-mirror-mirror-to-mirror.adoc b/modules/oc-mirror-mirror-to-mirror.adoc deleted file mode 100644 index 7fea603a15a7..000000000000 --- a/modules/oc-mirror-mirror-to-mirror.adoc +++ /dev/null @@ -1,48 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/disconnected_install/installing-mirroring-disconnected.adoc -// * updating/updating-restricted-network-cluster/mirroring-image-repository.adoc - -:_content-type: PROCEDURE -[id="oc-mirror-mirror-to-mirror_{context}"] -= Mirroring from mirror to mirror - -You can use the oc-mirror plugin to mirror an image set directly to a target mirror registry that is accessible during image set creation. - -You are required to specify a storage backend in the image set configuration file. This storage backend can be a local directory or a Docker v2 registry. The oc-mirror plugin stores metadata in this storage backend during image set creation. - -[IMPORTANT] -==== -Do not delete or modify the metadata that is generated by the oc-mirror plugin. You must use the same storage backend every time you run the oc-mirror plugin for the same mirror registry. -==== - -.Prerequisites - -* You have access to the internet to obtain the necessary container images. -* You have installed the OpenShift CLI (`oc`). -* You have installed the `oc-mirror` CLI plugin. -* You have created the image set configuration file. - -.Procedure - -* Run the `oc mirror` command to mirror the images from the specified image set configuration to a specified registry: -+ -[source,terminal] ----- -$ oc mirror --config=./imageset-config.yaml \// <1> - docker://registry.example:5000 <2> ----- -<1> Pass in the image set configuration file that was created. This procedure assumes that it is named `imageset-config.yaml`. -<2> Specify the registry to mirror the image set file to. The registry must start with `docker://`. If you specify a top-level namespace for the mirror registry, you must also use this same namespace on subsequent executions. - -.Verification - -. Navigate into the `oc-mirror-workspace/` directory that was generated. -. Navigate into the results directory, for example, `results-1639608409/`. -. Verify that YAML files are present for the `ImageContentSourcePolicy` and `CatalogSource` resources. -+ -// TODO: Test and get some better wording/example output. - -.Next steps - -* Configure your cluster to use the resources generated by oc-mirror. diff --git a/modules/oc-mirror-oci-format.adoc b/modules/oc-mirror-oci-format.adoc deleted file mode 100644 index 78b420a3d0ed..000000000000 --- a/modules/oc-mirror-oci-format.adoc +++ /dev/null @@ -1,105 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/disconnected_install/installing-mirroring-disconnected.adoc -// * updating/updating-restricted-network-cluster/mirroring-image-repository.adoc - -:_content-type: PROCEDURE -[id="oc-mirror-oci-format_{context}"] -= Including local OCI Operator catalogs - -While mirroring {product-title} releases, Operator catalogs, and additional images from a registry to a partially disconnected cluster, you can include Operator catalog images from a local file-based catalog on disk. The local catalog must be in the Open Container Initiative (OCI) format. - -The local catalog and its contents are mirrored to your target mirror registry based on the filtering information in the image set configuration file. - -[IMPORTANT] -==== -When mirroring local OCI catalogs, any {product-title} releases or additional images that you want to mirror along with the local OCI-formatted catalog must be pulled from a registry. - -You cannot mirror OCI catalogs along with an oc-mirror image set file on disk. -==== - -One example use case for using the OCI feature is if you have a CI/CD system building an OCI catalog to a location on disk, and you want to mirror that OCI catalog along with an {product-title} release to your mirror registry. - -[NOTE] -==== -If you used the Technology Preview OCI local catalogs feature for the oc-mirror plugin for {product-title} 4.12, you can no longer use the OCI local catalogs feature of the oc-mirror plugin to copy a catalog locally and convert it to OCI format as a first step to mirroring to a fully disconnected cluster. -==== - -.Prerequisites - -* You have access to the internet to obtain the necessary container images. -* You have installed the OpenShift CLI (`oc`). -* You have installed the `oc-mirror` CLI plugin. - -.Procedure - -. Create the image set configuration file and adjust the settings as necessary. -+ -The following example image set configuration mirrors an OCI catalog on disk along with an {product-title} release and a UBI image from `registry.redhat.io`. -+ -[source,yaml] ----- -kind: ImageSetConfiguration -apiVersion: mirror.openshift.io/v1alpha2 -storageConfig: - local: - path: /home/user/metadata <1> -mirror: - platform: - channels: - - name: stable-4.13 <2> - type: ocp - graph: false - operators: - - catalog: oci:///home/user/oc-mirror/my-oci-catalog <3> - targetCatalog: my-namespace/redhat-operator-index <4> - packages: - - name: aws-load-balancer-operator - - catalog: registry.redhat.io/redhat/redhat-operator-index:v4.13 <5> - packages: - - name: rhacs-operator - additionalImages: - - name: registry.redhat.io/ubi9/ubi:latest <6> ----- -<1> Set the back-end location to save the image set metadata to. This location can be a registry or local directory. It is required to specify `storageConfig` values. -<2> Optionally, include an {product-title} release to mirror from `registry.redhat.io`. -<3> Specify the absolute path to the location of the OCI catalog on disk. The path must start with `oci://` when using the OCI feature. -<4> Optionally, specify an alternative namespace and name to mirror the catalog as. -<5> Optionally, specify additional Operator catalogs to pull from a registry. -<6> Optionally, specify additional images to pull from a registry. - -. Run the `oc mirror` command to mirror the OCI catalog to a target mirror registry: -+ -[source,terminal] ----- -$ oc mirror --config=./imageset-config.yaml \ <1> - --include-local-oci-catalogs <2> - docker://registry.example:5000 <3> ----- -<1> Pass in the image set configuration file. This procedure assumes that it is named `imageset-config.yaml`. -<2> Use the `--include-local-oci-catalogs` flag to enable mirroring local OCI catalogs along with other remote content. -<3> Specify the registry to mirror the content to. The registry must start with `docker://`. If you specify a top-level namespace for the mirror registry, you must also use this same namespace on subsequent executions. -+ -Optionally, you can specify other flags to adjust the behavior of the OCI feature: -+ -`--oci-insecure-signature-policy`:: Do not push signatures to the target mirror registry. -+ -`--oci-registries-config`:: Specify the path to a TOML-formatted `registries.conf` file. You can use this to mirror from a different registry, such as a pre-production location for testing, without having to change the image set configuration file. This flag only affects local OCI catalogs, not any other mirrored content. -+ -.Example registries.conf file -[source,toml] ----- -[[registry]] - location = "registry.redhat.io:5000" - insecure = false - blocked = false - mirror-by-digest-only = true - prefix = "" - [[registry.mirror]] - location = "preprod-registry.example.com" - insecure = false ----- - -.Next steps - -* Configure your cluster to use the resources generated by oc-mirror. diff --git a/modules/oc-mirror-support.adoc b/modules/oc-mirror-support.adoc deleted file mode 100644 index a1588f3c080d..000000000000 --- a/modules/oc-mirror-support.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/disconnected_install/installing-mirroring-disconnected.adoc -// * updating/updating-restricted-network-cluster/mirroring-image-repository.adoc - -:_content-type: CONCEPT -[id="oc-mirror-support_{context}"] -= oc-mirror compatibility and support - -The oc-mirror plugin supports mirroring {product-title} payload images and Operator catalogs for {product-title} versions 4.10 and later. - -Use the latest available version of the oc-mirror plugin regardless of which versions of {product-title} you need to mirror. - -// TODO: Remove this in 4.14 -[IMPORTANT] -==== -If you used the Technology Preview OCI local catalogs feature for the oc-mirror plugin for {product-title} 4.12, you can no longer use the OCI local catalogs feature of the oc-mirror plugin to copy a catalog locally and convert it to OCI format as a first step to mirroring to a fully disconnected cluster. -==== diff --git a/modules/oc-mirror-updating-cluster-manifests.adoc b/modules/oc-mirror-updating-cluster-manifests.adoc deleted file mode 100644 index 0c8cf51eae86..000000000000 --- a/modules/oc-mirror-updating-cluster-manifests.adoc +++ /dev/null @@ -1,53 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/disconnected_install/installing-mirroring-disconnected.adoc -// * updating/updating-restricted-network-cluster/mirroring-image-repository.adoc - -:_content-type: PROCEDURE -[id="oc-mirror-updating-cluster-manifests_{context}"] -= Configuring your cluster to use the resources generated by oc-mirror - -After you have mirrored your image set to the mirror registry, you must apply the generated `ImageContentSourcePolicy`, `CatalogSource`, and release image signature resources into the cluster. - -The `ImageContentSourcePolicy` resource associates the mirror registry with the source registry and redirects image pull requests from the online registries to the mirror registry. The `CatalogSource` resource is used by Operator Lifecycle Manager (OLM) to retrieve information about the available Operators in the mirror registry. The release image signatures are used to verify the mirrored release images. - -.Prerequisites - -* You have mirrored the image set to the registry mirror in the disconnected environment. -* You have access to the cluster as a user with the `cluster-admin` role. - -.Procedure - -. Log in to the OpenShift CLI as a user with the `cluster-admin` role. - -. Apply the YAML files from the results directory to the cluster by running the following command: -+ -[source,terminal] ----- -$ oc apply -f ./oc-mirror-workspace/results-1639608409/ ----- - -. Apply the release image signatures to the cluster by running the following command: -+ -[source,terminal] ----- -$ oc apply -f ./oc-mirror-workspace/results-1639608409/release-signatures/ ----- - -// TODO: Any example output to show? - -.Verification - -. Verify that the `ImageContentSourcePolicy` resources were successfully installed by running the following command: -+ -[source,terminal] ----- -$ oc get imagecontentsourcepolicy --all-namespaces ----- - -. Verify that the `CatalogSource` resources were successfully installed by running the following command: -+ -[source,terminal] ----- -$ oc get catalogsource --all-namespaces ----- diff --git a/modules/oc-mirror-updating-registry-about.adoc b/modules/oc-mirror-updating-registry-about.adoc deleted file mode 100644 index aee26dcb5126..000000000000 --- a/modules/oc-mirror-updating-registry-about.adoc +++ /dev/null @@ -1,38 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/disconnected_install/installing-mirroring-disconnected.adoc -// * updating/updating-restricted-network-cluster/mirroring-image-repository.adoc - -:_content-type: CONCEPT -[id="oc-mirror-updating-registry-about_{context}"] -= About updating your mirror registry content - -When you run the oc-mirror plugin again, it generates an image set that only contains new and updated images since the previous execution. Because it only pulls in the differences since the previous image set was created, the generated image set is often smaller and faster to process than the initial image set. - -[IMPORTANT] -==== -Generated image sets are sequential and must be pushed to the target mirror registry in order. You can derive the sequence number from the file name of the generated image set archive file. -==== - -[discrete] -== Adding new and updated images - -Depending on the settings in your image set configuration, future executions of oc-mirror can mirror additional new and updated images. Review the settings in your image set configuration to ensure that you are retrieving new versions as necessary. For example, you can set the minimum and maximum versions of Operators to mirror if you want to restrict to specific versions. Alternatively, you can set the minimum version as a starting point to mirror, but keep the version range open so you keep receiving new Operator versions on future executions of oc-mirror. Omitting any minimum or maximum version gives you the full version history of an Operator in a channel. Omitting explicitly named channels gives you all releases in all channels of the specified Operator. Omitting any named Operator gives you the entire catalog of all Operators and all their versions ever released. - -All these constraints and conditions are evaluated against the publicly released content by Red Hat on every invocation of oc-mirror. This way, it automatically picks up new releases and entirely new Operators. Constraints can be specified by only listing a desired set of Operators, which will not automatically add other newly released Operators into the mirror set. You can also specify a particular release channel, which limits mirroring to just this channel and not any new channels that have been added. This is important for Operator products, such as Red Hat Quay, that use different release channels for their minor releases. Lastly, you can specify a maximum version of a particular Operator, which causes the tool to only mirror the specified version range so that you do not automatically get any newer releases past the maximum version mirrored. In all these cases, you must update the image set configuration file to broaden the scope of the mirroring of Operators to get other Operators, new channels, and newer versions of Operators to be available in your target registry. - -It is recommended to align constraints like channel specification or version ranges with the release strategy that a particular Operator has chosen. For example, when the Operator uses a `stable` channel, you should restrict mirroring to that channel and potentially a minimum version to find the right balance between download volume and getting stable updates regularly. If the Operator chooses a release version channel scheme, for example `stable-3.7`, you should mirror all releases in that channel. This allows you to keep receiving patch versions of the Operator, for example `3.7.1`. You can also regularly adjust the image set configuration to add channels for new product releases, for example `stable-3.8`. - -[discrete] -== Pruning images - -Images are pruned automatically from the target mirror registry if they are no longer included in the latest image set that was generated and mirrored. This allows you to easily manage and clean up unneeded content and reclaim storage resources. - -If there are {product-title} releases or Operator versions that you no longer need, you can modify your image set configuration to exclude them, and they will be pruned from the mirror registry upon mirroring. This can be done by adjusting a minimum or maximum version range setting per Operator in the image set configuration file or by deleting the Operator from the list of Operators to mirror from the catalog. You can also remove entire Operator catalogs or entire {product-title} releases from the configuration file. - -[IMPORTANT] -==== -If there are no new or updated images to mirror, the excluded images are not pruned from the target mirror registry. Additionally, if an Operator publisher removes an Operator version from a channel, the removed versions are pruned from the target mirror registry. -==== - -To disable automatic pruning of images from the target mirror registry, pass the `--skip-pruning` flag to the `oc mirror` command. diff --git a/modules/oc-mirror-updating-restricted-cluster-manifests.adoc b/modules/oc-mirror-updating-restricted-cluster-manifests.adoc deleted file mode 100644 index 6deb4835beac..000000000000 --- a/modules/oc-mirror-updating-restricted-cluster-manifests.adoc +++ /dev/null @@ -1,46 +0,0 @@ -// Module included in the following assemblies: -// -// * installing/installing-restricted-networks-nutanix-installer-provisioned.adoc - -:_content-type: PROCEDURE -[id="oc-mirror-updating-cluster-manifests_{context}"] -= Installing the policy resources into the cluster - -Mirroring the {product-title} content using the oc-mirror OpenShift CLI (oc) plugin creates resources, which include `catalogSource-certified-operator-index.yaml` and `imageContentSourcePolicy.yaml`. - -* The `ImageContentSourcePolicy` resource associates the mirror registry with the source registry and redirects image pull requests from the online registries to the mirror registry. -* The `CatalogSource` resource is used by Operator Lifecycle Manager (OLM) to retrieve information about the available Operators in the mirror registry, which lets users discover and install Operators. - -After you install the cluster, you must install these resources into the cluster. - -.Prerequisites - -* You have mirrored the image set to the registry mirror in the disconnected environment. -* You have access to the cluster as a user with the `cluster-admin` role. - -.Procedure - -. Log in to the OpenShift CLI as a user with the `cluster-admin` role. - -. Apply the YAML files from the results directory to the cluster: -+ -[source,terminal] ----- -$ oc apply -f ./oc-mirror-workspace/results-/ ----- - -.Verification - -. Verify that the `ImageContentSourcePolicy` resources were successfully installed: -+ -[source,terminal] ----- -$ oc get imagecontentsourcepolicy --all-namespaces ----- - -. Verify that the `CatalogSource` resources were successfully installed: -+ -[source,terminal] ----- -$ oc get catalogsource --all-namespaces ----- diff --git a/modules/ocm-accesscontrol-tab.adoc b/modules/ocm-accesscontrol-tab.adoc deleted file mode 100644 index 50bf9e84328d..000000000000 --- a/modules/ocm-accesscontrol-tab.adoc +++ /dev/null @@ -1,20 +0,0 @@ -// Module included in the following assemblies: -// -// ocm/ocm-overview.adoc - -:_content-type: PROCEDURE -[id="ocm-accesscontrol-tab_{context}"] -= Access control tab - -The **Access control** tab allows the cluster owner to set up an identity provider, grant elevated permissions, and grant roles to other users. - -.Prerequisites - -* You must be the cluster owner or have the correct permissions to grant roles on the cluster. - -.Procedure - -. Select the **Grant role** button. -. Enter the Red Hat account login for the user that you wish to grant a role on the cluster. -. Select the **Grant role** button on the dialog box. -. The dialog box closes, and the selected user shows the "Cluster Editor" access. diff --git a/modules/ocm-accessing.adoc b/modules/ocm-accessing.adoc deleted file mode 100644 index 5f9709b9e18e..000000000000 --- a/modules/ocm-accessing.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module included in the following assemblies: -// -// ocm/ocm-overview.adoc - -:_content-type: PROCEDURE -[id="accessing-ocm_{context}"] -= Accessing {cluster-manager-first} - -You can access {cluster-manager} with your configured OpenShift account. - -.Prerequisites - -* You have an account that is part of an OpenShift organization. -* If you are creating a cluster, your organization has specified quota. - -.Procedure - -* Log in to {cluster-manager-url} using your login credentials. diff --git a/modules/ocm-addons-tab.adoc b/modules/ocm-addons-tab.adoc deleted file mode 100644 index b4b998cd8031..000000000000 --- a/modules/ocm-addons-tab.adoc +++ /dev/null @@ -1,8 +0,0 @@ -// Module included in the following assemblies: -// -// ocm/ocm-overview.adoc - -[id="ocm-addons-tab_{context}"] -= Add-ons tab - -The **Add-ons** tab displays all of the optional add-ons that can be added to the cluster. Select the desired add-on, and then select **Install** below the description for the add-on that displays. diff --git a/modules/ocm-cluster-history.adoc b/modules/ocm-cluster-history.adoc deleted file mode 100644 index 00399d2b3ec9..000000000000 --- a/modules/ocm-cluster-history.adoc +++ /dev/null @@ -1,9 +0,0 @@ -// Module included in the following assemblies: -// -// ocm/ocm-overview.adoc - -:_content-type: CONCEPT -[id="ocm-cluster-history-tab_{context}"] -= Cluster history tab - -The **Cluster history** tab shows all the history of the cluster including: changes made to the cluster, descriptions of changes, severity, dates, and who made the changes. You can also download the information using the **Download history** button. diff --git a/modules/ocm-disabling-autoscaling-nodes.adoc b/modules/ocm-disabling-autoscaling-nodes.adoc deleted file mode 100644 index 9965dba3f09c..000000000000 --- a/modules/ocm-disabling-autoscaling-nodes.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// -// * rosa_cluster_admin/rosa_nodes/rosa-nodes-about-autoscaling-nodes.adoc -// * nodes/nodes-about-autoscaling-nodes.adoc -// * osd_cluster_admin/osd_nodes/osd-nodes-about-autoscaling-nodes.adoc - -:_content-type: PROCEDURE -[id="ocm-disabling-autoscaling_{context}"] -= Disabling autoscaling nodes in an existing cluster using {cluster-manager-first} - -Disable autoscaling for worker nodes in the machine pool definition from {cluster-manager} console. - -.Procedure - -. From {cluster-manager-url}, navigate to the *Clusters* page and select the cluster with autoscaling that must be disabled. - -. On the selected cluster, select the *Machine pools* tab. - -. Click the Options menu {kebab} at the end of the machine pool with autoscaling and select *Scale*. - -. On the "Edit node count" dialog, deselect the *Enable autoscaling* checkbox. - -. Select *Apply* to save these changes and disable autoscaling from the cluster. diff --git a/modules/ocm-enabling-autoscaling-nodes.adoc b/modules/ocm-enabling-autoscaling-nodes.adoc deleted file mode 100644 index 3358b0a17144..000000000000 --- a/modules/ocm-enabling-autoscaling-nodes.adoc +++ /dev/null @@ -1,23 +0,0 @@ -// Module included in the following assemblies: -// -// * rosa_cluster_admin/rosa_nodes/rosa-nodes-about-autoscaling-nodes.adoc -// * nodes/nodes-about-autoscaling-nodes.adoc -// * osd_cluster_admin/osd_nodes/osd-nodes-about-autoscaling-nodes.adoc - -:_content-type: PROCEDURE -[id="ocm-enabling-autoscaling_{context}"] -= Enabling autoscaling nodes in an existing cluster using {cluster-manager-first} - -Enable autoscaling for worker nodes in the machine pool definition from {cluster-manager} console. - -.Procedure - -. From {cluster-manager-url}, navigate to the *Clusters* page and select the cluster that you want to enable autoscaling for. - -. On the selected cluster, select the *Machine pools* tab. - -. Click the Options menu {kebab} at the end of the machine pool that you want to enable autoscaling for and select *Scale*. - -. On the *Edit node count* dialog, select the *Enable autoscaling* checkbox. - -. Select *Apply* to save these changes and enable autoscaling for the cluster. diff --git a/modules/ocm-insightsadvisor-tab.adoc b/modules/ocm-insightsadvisor-tab.adoc deleted file mode 100644 index c98b05139bbc..000000000000 --- a/modules/ocm-insightsadvisor-tab.adoc +++ /dev/null @@ -1,8 +0,0 @@ -// Module included in the following assemblies: -// -// ocm/ocm-overview.adoc - -[id="ocm-insightsadvisor-tab_{context}"] -= Insights Advisor tab - -The **Insights Advisor** tab uses the Remote Health functionality of the OpenShift Container Platform to identify and mitigate risks to security, performance, availability, and stability. See link:https://docs.openshift.com/container-platform/latest/support/getting-support.html[Using Insights to identify issues with your cluster] in the {OCP} documentation. diff --git a/modules/ocm-machinepools-tab.adoc b/modules/ocm-machinepools-tab.adoc deleted file mode 100644 index 90fdad5e69f4..000000000000 --- a/modules/ocm-machinepools-tab.adoc +++ /dev/null @@ -1,10 +0,0 @@ -// Module included in the following assemblies: -// -// ocm/ocm-overview.adoc - -[id="ocm-machinepools-tab_{context}"] -= Machine pools tab - -The **Machine pools** tab allows the cluster owner to create new machine pools, if there is enough available quota, or edit an existing machine pool. - -Selecting the **More options** > **Scale** opens the "Edit node count" dialog. In this dialog, you can change the node count per availability zone. If autoscaling is enabled, you can also set the range for autoscaling. diff --git a/modules/ocm-networking-tab-adding-ingress.adoc b/modules/ocm-networking-tab-adding-ingress.adoc deleted file mode 100644 index 7bfc8aaeabf5..000000000000 --- a/modules/ocm-networking-tab-adding-ingress.adoc +++ /dev/null @@ -1,22 +0,0 @@ -// Module included in the following assemblies: -// -// ocm/ocm-overview.adoc -:_content-type: PROCEDURE -[id="ocm-networking-tab-adding-ingress_{context}"] -= Adding a network Ingress to your {product-title} cluster - -You can add a network Ingress to your cluster from the {cluster-manager-url} web UI. - -.Prerequisites - -* You have a Red Hat account. -* You have the required permissions to make changes to your cluster in {cluster-manager}. - -.Procedure - -. From the **Networking** tab in {cluster-manager}, click the **Additional application router** toggle to enable the Ingress. There are two options you can add to the additional router: -.. **Make router private**: This checkbox allows you to control cluster privacy. By default, your Ingress router is publicly exposed and allows anyone access. You can limit access to applications or websites you run on your cluster by selecting this checkbox. For example, if you only want internal employees to access this cluster, then using this option requires a private connection, such as a virtual private network (VPN) or virtual private cloud (VPC) peering connection. -.. **Label match for additional router**: This field provides a way to target the specific route you want exposed in this additional Ingress router. By default, the router exposes all routes. If you leave this field blank, these routes stay exposed. -+ -A commonly used setup has a private default router, which means any applications deployed require a VPN or VPC peering to access. You can create an additional public router with a label match of `route=external`. Then, if you add the `route=external` label to additional routes, the additional router matches this label and exposes it for public use. -. Click **Change settings** to confirm that you want to add the network Ingress. \ No newline at end of file diff --git a/modules/ocm-networking-tab-concept.adoc b/modules/ocm-networking-tab-concept.adoc deleted file mode 100644 index 81c8c03396d7..000000000000 --- a/modules/ocm-networking-tab-concept.adoc +++ /dev/null @@ -1,22 +0,0 @@ -// Module included in the following assemblies: -// -// ocm/ocm-overview.adoc -:_content-type: CONCEPT -[id="ocm-networking-tab_{context}"] -= Networking tab - -The **Networking** tab provides a control plane API endpoint as well as the default application router. Both the control plane API endpoint and the default application router can be made private by selecting the respective box below each of them. - -ifdef::openshift-rosa[] -[IMPORTANT] -==== -For Security Token Service (STS) installations, these options cannot be changed. STS installations also do not allow you to change privacy nor allow you to add an additional router. -==== -endif::openshift-rosa[] - -ifndef::openshift-rosa[] -[IMPORTANT] -==== -{cluster-manager-first} does not support the networking tab for a Google Cloud Platform (GCP), non-CCS cluster running in a Red Hat GCP project. -==== -endif::openshift-rosa[] diff --git a/modules/ocm-overview-tab.adoc b/modules/ocm-overview-tab.adoc deleted file mode 100644 index 3c7e22299050..000000000000 --- a/modules/ocm-overview-tab.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -// ocm/ocm-overview.adoc - -:_content-type: CONCEPT -[id="ocm-overview-tab_{context}"] -= Overview tab - -The **Overview** tab provides information about how your cluster was configured: - -* **Cluster ID** is the unique identification for the created cluster. This ID can be used when issuing commands to the cluster from the command line. -* **Type** shows the OpenShift version that the cluster is using. -* **Region** is the server region. -* **Provider** shows which cloud provider that the cluster was built upon. -* **Availability** shows which type of availability zone that the cluster uses, either single or multizone. -* **Version** is the OpenShift version that is installed on the cluster. If there is an update available, you can update from this field. -* **Created at** shows the date and time that the cluster was created. -* **Owner** identifies who created the cluster and has owner rights. -* **Subscription type** shows the subscription model that was selected on creation. -* **Infrastructure type** is the type of account that the cluster uses. -* **Status** displays the current status of the cluster. -* **Total vCPU** shows the total available virtual CPU for this cluster. -* **Total memory** shows the total available memory for this cluster. -* **Load balancers** -* **Persistent storage** displays the amount of storage that is available on this cluster. -* **Nodes** shows the actual and desired nodes on the cluster. These numbers might not match due to cluster scaling. -* **Network** field shows the address and prefixes for network connectivity. -* **Resource usage** section of the tab displays the resources in use with a graph. -* **Advisor recommendations** section gives insight in relation to security, performance, availability, and stablility. This section requires the use of remote health functionality. See link:https://docs.openshift.com/container-platform/4.9/support/remote_health_monitoring/using-insights-to-identify-issues-with-your-cluster.html[Using Insights to identify issues with your cluster]. -* **Cluster history** section shows everything that has been done with the cluster including creation and when a new version is identified. diff --git a/modules/ocm-settings-tab.adoc b/modules/ocm-settings-tab.adoc deleted file mode 100644 index 680c935cd119..000000000000 --- a/modules/ocm-settings-tab.adoc +++ /dev/null @@ -1,13 +0,0 @@ -// Module included in the following assemblies: -// -// ocm/ocm-overview.adoc - -[id="ocm-settings-tab_{context}"] -= Settings tab - -The **Settings** tab provides a few options for the cluster owner: - -* **Monitoring**, which is enabled by default, allows for reporting done on user-defined actions. See link:https://docs.openshift.com/rosa/monitoring/osd-understanding-the-monitoring-stack.html[Understanding the monitoring stack]. -* **Update strategy** allows you to determine if the cluster automatically updates on a certain day of the week at a specified time or if all updates are scheduled manually. -* **Node draining** sets the duration that protected workloads are respected during updates. When this duration has passed, the node is forcibly removed. -* **Update status** shows the current version and if there are any updates available. diff --git a/modules/ocm-support-tab.adoc b/modules/ocm-support-tab.adoc deleted file mode 100644 index 8bd59f757f3b..000000000000 --- a/modules/ocm-support-tab.adoc +++ /dev/null @@ -1,13 +0,0 @@ -// Module included in the following assemblies: -// -// ocm/ocm-overview.adoc - -[id="ocm-support-tab_{context}"] -= Support tab - -In the *Support* tab, you can add notification contacts for individuals that should receive cluster notifications. The username or email address that you provide must relate to a user account in the Red Hat organization where the cluster is deployed. -ifdef::openshift-dedicated,openshift-rosa[] -For the steps to add a notification contact, see _Adding cluster notification contacts_. -endif::openshift-dedicated,openshift-rosa[] - -Also from this tab, you can open a support case to request technical support for your cluster. diff --git a/modules/odc-access-web-terminal.adoc b/modules/odc-access-web-terminal.adoc deleted file mode 100644 index 42635cffd274..000000000000 --- a/modules/odc-access-web-terminal.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// web_console/web_terminal/odc-using-web-terminal.adoc - -:_content-type: PROCEDURE -[id="odc-access-web-terminal_{context}"] -= Accessing the web terminal - -After the {web-terminal-op} is installed, you can access the web terminal. -You can re-run commands by selecting them from the list of commands you have run in the terminal. These commands persist across multiple terminal sessions. -The web terminal remains open until you close it or until you close the browser window or tab. - -.Prerequisites - -* You have access to an {product-title} cluster and are logged into the web console. -* The {web-terminal-op} is installed on your cluster. - -.Procedure - -. To launch the web terminal, click the command line terminal icon (image:odc-wto-icon.png[title="wto icon"]) in the masthead of the console. A web terminal instance is displayed in the *Command line terminal* pane. This instance is automatically logged in with your credentials. - -. Select the project where the `DevWorkspace` CR must be created from the *Project* drop-down list. By default, the current project is selected. -+ -[NOTE] -==== -* The `DevWorkspace` CR is created only if it does not already exist. -* The `openshift-terminal` project is the default project used for cluster administrators. They do not have the option to choose another project. -==== - -. Click *Start* to initialize the web terminal using the selected project. After the web terminal is initialized, you can use the preinstalled CLI tools like `oc`, `kubectl`, `odo`, `kn`, `tkn`, `helm`, `kubens`, `subctl`, and `kubectx` in the web terminal. - -. Click *+* to open multiple tabs within the web terminal in the console. diff --git a/modules/odc-accessing-perspectives.adoc b/modules/odc-accessing-perspectives.adoc deleted file mode 100644 index 3b5b05b1e1ff..000000000000 --- a/modules/odc-accessing-perspectives.adoc +++ /dev/null @@ -1,28 +0,0 @@ -// Module included in the following assemblies: -// -// web_console/web-console-overview.adoc - -:_content-type: PROCEDURE -[id="accessing-perspectives_{context}"] -= Accessing the Perspectives - - -You can access the *Administrator* and *Developer* perspective from the web console as follows: - -.Prerequisites -To access a perspective, ensure that you have logged in to the web console. Your default perspective is automatically determined by the permission of the users. The *Administrator* perspective is selected for users with access to all projects, while the *Developer* perspective is selected for users with limited access to their own projects - -.Additional Resources -See link:https://docs.openshift.com/container-platform/4.13/web_console/adding-user-preferences.html[Adding User Preferences] for more information on changing perspectives. - - -.Procedure - -. Use the perspective switcher to switch to the *Administrator* or *Developer* perspective. - -. Select an existing project from the *Project* drop-down list. You can also create a new project from this dropdown. - -[NOTE] -==== -You can use the perspective switcher only as `cluster-admin`. -==== diff --git a/modules/odc-adding-a-GitHub-repository-containing-pipelines.adoc b/modules/odc-adding-a-GitHub-repository-containing-pipelines.adoc deleted file mode 100644 index ee12c3e10306..000000000000 --- a/modules/odc-adding-a-GitHub-repository-containing-pipelines.adoc +++ /dev/null @@ -1,58 +0,0 @@ -:_content-type: PROCEDURE -[id="odc-adding-a-GitHub-repository-containing-pipelines_{context}"] - -= Adding a GitHub repository containing pipelines - -In the *Developer* perspective, you can add your GitHub repository containing pipelines to the {product-title} cluster. This allows you to run pipelines and tasks from your GitHub repository on the cluster when relevant Git events, such as push or pull requests, are triggered. - -[NOTE] -==== -You can add both public and private GitHub repositories. -==== - -.Prerequisites -* Ensure that your cluster administrator has configured the required GitHub applications in the administrator perspective. - -.Procedure -. In the *Developer* perspective, choose the namespace or project in which you want to add your GitHub repository. -. Navigate to *Pipelines* using the left navigation pane. -. Click *Create* -> *Repository* on the right side of the *Pipelines* page. -. Enter your *Git Repo URL* and the console automatically fetches the repository name. -. Click *Show configuration options*. By default, you see only one option *Setup a webhook*. If you have a GitHub application configured, you see two options: -* *Use GitHub App*: Select this option to install your GitHub application in your repository. -* *Setup a webhook*: Select this option to add a webhook to your GitHub application. -. Set up a webhook using one of the following options in the *Secret* section: -* Setup a webhook using *Git access token*: -+ -.. Enter your personal access token. -.. Click *Generate* corresponding to the *Webhook secret* field to generate a new webhook secret. -+ -image::Git-access-token.png[] -+ -[NOTE] -==== -You can click the link below the *Git access token* field if you do not have a personal access token and want to create a new one. -==== - -* Setup a webhook using *Git access token secret*: -** Select a secret in your namespace from the dropdown list. Depending on the secret you selected, a webhook secret is automatically generated. -+ -image::Git-access-token-secret.png[] - -. Add the webhook secret details to your GitHub repository: -.. Copy the *webhook URL* and navigate to your GitHub repository settings. -.. Click *Webhooks* -> *Add webhook*. -.. Copy the *Webhook URL* from the developer console and paste it in the *Payload URL* field of the GitHub repository settings. -.. Select the *Content type*. -.. Copy the *Webhook secret* from the developer console and paste it in the *Secret* field of the GitHub repository settings. -.. Select one of the *SSL verification* options. -.. Select the events to trigger this webhook. -.. Click *Add webhook*. -. Navigate back to the developer console and click *Add*. -. Read the details of the steps that you have to perform and click *Close*. -. View the details of the repository you just created. - -[NOTE] -==== -When importing an application using *Import from Git* and the Git repository has a `.tekton` directory, you can configure `pipelines-as-code` for your application. -==== diff --git a/modules/odc-adding-components-to-an-existing-project.adoc b/modules/odc-adding-components-to-an-existing-project.adoc deleted file mode 100644 index 6765825372ee..000000000000 --- a/modules/odc-adding-components-to-an-existing-project.adoc +++ /dev/null @@ -1,20 +0,0 @@ -// Module included in the following assemblies: -// -// applications/application_life_cycle_management/odc-viewing-application-composition-using-topology-view.adoc - -:_content-type: PROCEDURE -[id="odc-adding-components-to-an-existing-project_{context}"] -= Adding components to an existing project - -.Procedure - -. Click *Add to Project* (image:odc_add_to_project.png[title="Add to Project"]) next to left navigation pane or press kbd:[Ctrl+Space] -. Search for the component and select *Create* or press kbd:[Enter] to add the component to the project and see it in the topology *Graph view*. - -.Adding component via quick search -image::odc_quick_search.png[] - -Alternatively, you can also use the *Import from Git*, *Container Image*, *Database*, *From Catalog*, *Operator Backed*, *Helm Charts*, *Samples*, or *Upload JAR file* options in the context menu by right-clicking in the topology *Graph view* to add a component to your project. - -.Context menu to add services -image::odc_context_project.png[] diff --git a/modules/odc-adding-health-checks.adoc b/modules/odc-adding-health-checks.adoc deleted file mode 100644 index 266cdbb3724a..000000000000 --- a/modules/odc-adding-health-checks.adoc +++ /dev/null @@ -1,35 +0,0 @@ -// Module included in the following assemblies: -// -// applications/application-health - -:_content-type: PROCEDURE -[id="odc-adding-health-checks"] -= Adding health checks using the Developer perspective - -You can use the *Topology* view to add health checks to your deployed application. - -.Prerequisites: -* You have switched to the *Developer* perspective in the web console. -* You have created and deployed an application on {product-title} using the *Developer* perspective. - -.Procedure -. In the *Topology* view, click on the application node to see the side panel. If the container does not have health checks added to ensure the smooth running of your application, a *Health Checks* notification is displayed with a link to add health checks. -. In the displayed notification, click the *Add Health Checks* link. -. Alternatively, you can also click the *Actions* drop-down list and select *Add Health Checks*. Note that if the container already has health checks, you will see the *Edit Health Checks* option instead of the add option. -. In the *Add Health Checks* form, if you have deployed multiple containers, use the *Container* drop-down list to ensure that the appropriate container is selected. -. Click the required health probe links to add them to the container. Default data for the health checks is prepopulated. You can add the probes with the default data or further customize the values and then add them. For example, to add a Readiness probe that checks if your container is ready to handle requests: -.. Click *Add Readiness Probe*, to see a form containing the parameters for the probe. -.. Click the *Type* drop-down list to select the request type you want to add. For example, in this case, select *Container Command* to select the command that will be executed inside the container. -.. In the *Command* field, add an argument `cat`, similarly, you can add multiple arguments for the check, for example, add another argument `/tmp/healthy`. -.. Retain or modify the default values for the other parameters as required. -+ -[NOTE] -==== -The `Timeout` value must be lower than the `Period` value. The `Timeout` default value is `1`. The `Period` default value is `10`. -==== -.. Click the check mark at the bottom of the form. The *Readiness Probe Added* message is displayed. - -. Click *Add* to add the health check. You are redirected to the *Topology* view and the container is restarted. -. In the side panel, verify that the probes have been added by clicking on the deployed pod under the *Pods* section. -. In the *Pod Details* page, click the listed container in the *Containers* section. -. In the *Container Details* page, verify that the Readiness probe - *Exec Command* `cat` `/tmp/healthy` has been added to the container. diff --git a/modules/odc-adding-services-to-application.adoc b/modules/odc-adding-services-to-application.adoc deleted file mode 100644 index c68cc38bafb5..000000000000 --- a/modules/odc-adding-services-to-application.adoc +++ /dev/null @@ -1,21 +0,0 @@ -:_content-type: PROCEDURE -[id="odc-adding-services-to-your-application_{context}"] -= Adding services to your application - -To add a service to your application use the *+Add* actions using the context menu in the topology *Graph view*. - -[NOTE] -==== -In addition to the context menu, you can add services by using the sidebar or hovering and dragging the dangling arrow from the application group. -==== - -.Procedure - -1. Right-click an application group in the topology *Graph view* to display the context menu. -+ -.Add resource context menu -image::odc_context_menu.png[] - -2. Use *Add to Application* to select a method for adding a service to the application group, such as *From Git*, *Container Image*, *From Dockerfile*, *From Devfile*, *Upload JAR file*, *Event Source*, *Channel*, or *Broker*. - -3. Complete the form for the method you choose and click *Create*. For example, to add a service based on the source code in your Git repository, choose the *From Git* method, fill in the *Import from Git* form, and click *Create*. diff --git a/modules/odc-configure-web-terminal-image-session.adoc b/modules/odc-configure-web-terminal-image-session.adoc deleted file mode 100644 index bd304d1851c6..000000000000 --- a/modules/odc-configure-web-terminal-image-session.adoc +++ /dev/null @@ -1,22 +0,0 @@ - -// Module is included in the following assemblies: -// -// * web_console/web_terminal/configuring-web-terminal.adoc - -:_content-type: PROCEDURE -[id="odc-configure-web-terminal-image-session_{context}"] -= Configuring the web terminal image for a session - -You can change the default image for the web terminal for your current session. - -.Prerequisites - -* You have access to an {product-title} cluster that has the {web-terminal-op} installed. -* You are logged into the web console. - -.Procedure - -. Click the web terminal icon (image:odc-wto-icon.png[title="web terminal icon"]). -. Click *Image* to display advanced configuration options for the web terminal image. -. Enter the URL of the image that you want to use. -. Click *Start* to begin a terminal instance using the specified image setting. \ No newline at end of file diff --git a/modules/odc-configure-web-terminal-timeout-session.adoc b/modules/odc-configure-web-terminal-timeout-session.adoc deleted file mode 100644 index fb269bc3f07f..000000000000 --- a/modules/odc-configure-web-terminal-timeout-session.adoc +++ /dev/null @@ -1,22 +0,0 @@ - -// Module is included in the following assemblies: -// -// * web_console/web_terminal/configuring-web-terminal.adoc - -:_content-type: PROCEDURE -[id="odc-configure-web-terminal-timeout-session_{context}"] -= Configuring the web terminal timeout for a session - -You can change the default timeout period for the web terminal for your current session. - -.Prerequisites - -* You have access to an {product-title} cluster that has the {web-terminal-op} installed. -* You are logged into the web console. - -.Procedure - -. Click the web terminal icon (image:odc-wto-icon.png[title="web terminal icon"]). -. Click *Timeout* to display advanced configuration options for the web terminal timeout. -. Set a value for the timeout. From the drop-down list, select a time interval of *Seconds*, *Minutes*, *Hours*, or *Milliseconds*. -. Click *Start* to begin a terminal instance using the specified timeout setting. \ No newline at end of file diff --git a/modules/odc-creating-a-binding-connection-between-components.adoc b/modules/odc-creating-a-binding-connection-between-components.adoc deleted file mode 100644 index 4dc6e56bd084..000000000000 --- a/modules/odc-creating-a-binding-connection-between-components.adoc +++ /dev/null @@ -1,70 +0,0 @@ -// Module included in the following assemblies: -// -// * applications/connecting_applications_to_services/odc-connecting-an-application-to-a-service-using-the-developer-perspective.adoc - -:_content-type: PROCEDURE -[id="odc-creating-a-binding-connection-between-components_{context}"] -= Creating a binding connection between components - -You can create a binding connection with Operator-backed components, as demonstrated in the following example, which uses a PostgreSQL Database service and a Spring PetClinic sample application. To create a binding connection with a service that the PostgreSQL Database Operator backs, you must first add the Red Hat-provided PostgreSQL Database Operator to the *OperatorHub*, and then install the Operator. The PostreSQL Database Operator then creates and manages the Database resource, which exposes the binding data in secrets, config maps, status, and spec attributes. - -.Prerequisites - -* You created and deployed a Spring PetClinic sample application in the *Developer* perspective. -* You installed {servicebinding-title} from the *OperatorHub*. -* You installed the *Crunchy Postgres for Kubernetes* Operator from the OperatorHub in the `v5` *Update* channel. -* You created a *PostgresCluster* resource in the *Developer* perspective, which resulted in a Crunchy PostgreSQL database instance with the following components: `hippo-backup`, `hippo-instance`, `hippo-repo-host`, and `hippo-pgbouncer`. - -.Procedure - -. In the *Developer* perspective, switch to the relevant project, for example, `my-petclinic`. -. In the *Topology* view, hover over the Spring PetClinic sample application to see a dangling arrow on the node. -. Drag and drop the arrow onto the *hippo* database icon in the Postgres Cluster to make a binding connection with the Spring PetClinic sample application. - -. In the *Create Service Binding* dialog, keep the default name or add a different name for the service binding, and then click *Create*. -+ -.Service Binding dialog -image::odc-sbc-modal.png[] -. Optional: If there is difficulty in making a binding connection using the Topology view, go to *+Add* -> *YAML* -> *Import YAML*. -. Optional: In the YAML editor, add the `ServiceBinding` resource: -+ -[source,YAML] ----- -apiVersion: binding.operators.coreos.com/v1alpha1 -kind: ServiceBinding -metadata: - name: spring-petclinic-pgcluster - namespace: my-petclinic -spec: - services: - - group: postgres-operator.crunchydata.com - version: v1beta1 - kind: PostgresCluster - name: hippo - application: - name: spring-petclinic - group: apps - version: v1 - resource: deployments ----- -+ -A service binding request is created and a binding connection is created through a `ServiceBinding` resource. When the database service connection request succeeds, the application is redeployed and the connection is established. -+ -.Binding connector -image::odc-binding-connector.png[] -+ -[TIP] -==== -You can also use the context menu by dragging the dangling arrow to add and create a binding connection to an operator-backed service. - -.Context menu to create binding connection -image::odc_context_operator.png[] -==== - -. In the navigation menu, click *Topology*. The spring-petclinic deployment in the Topology view includes an Open URL link to view its web page. - -. Click the *Open URL* link. - -You can now view the Spring PetClinic sample application remotely to confirm that the application is now connected to the database service and that the data has been successfully projected to the application from the Crunchy PostgreSQL database service. - -The Service Binding Operator has successfully created a working connection between the application and the database service. \ No newline at end of file diff --git a/modules/odc-creating-a-visual-connection-between-components.adoc b/modules/odc-creating-a-visual-connection-between-components.adoc deleted file mode 100644 index 3561578dfd62..000000000000 --- a/modules/odc-creating-a-visual-connection-between-components.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// Module included in the following assemblies: -// -// * applications/connecting_applications_to_services/odc-connecting-an-application-to-a-service-using-the-developer-perspective.adoc -:_content-type: PROCEDURE -[id="odc-creating-a-visual-connection-between-components_{context}"] -= Creating a visual connection between components - -You can depict an intent to connect application components by using the visual connector. - -This procedure walks you through an example of creating a visual connection between a PostgreSQL Database service and a Spring PetClinic sample application. - -.Prerequisites - -* You have created and deployed a Spring PetClinic sample application by using the *Developer* perspective. -* You have created and deployed a Crunchy PostgreSQL database instance by using the *Developer* perspective. This instance has the following components: `hippo-backup`, `hippo-instance`, `hippo-repo-host`, and `hippo-pgbouncer`. - -.Procedure - -. Hover over the Spring PetClinic sample application to see a dangling arrow on the node. -+ -.Visual connector -image::odc_connector.png[] -. Click and drag the arrow towards the `hippo-pgbouncer` deployment to connect the Spring PetClinic sample application with it. -. Click the `spring-petclinic` deployment to see the *Overview* panel. Under the *Details* tab, click the edit icon in the *Annotations* section to see the *Key = `app.openshift.io/connects-to`* and *Value = `[{"apiVersion":"apps/v1","kind":"Deployment","name":"hippo-pgbouncer"}]`* annotation added to the deployment. - -. Optional: You can repeat these steps to establish visual connections between other applications and components you create. -+ -.Connecting multiple applications -image::odc_connecting_multiple_applications.png[] \ No newline at end of file diff --git a/modules/odc-creating-apiserversource.adoc b/modules/odc-creating-apiserversource.adoc deleted file mode 100644 index 0d7df2d8c78f..000000000000 --- a/modules/odc-creating-apiserversource.adoc +++ /dev/null @@ -1,54 +0,0 @@ -// Module included in the following assemblies: -// -// * serverless/eventing/event-sources/serverless-apiserversource.adoc - -:_content-type: PROCEDURE -[id="odc-creating-apiserversource_{context}"] -= Creating an API server source by using the web console - -After Knative Eventing is installed on your cluster, you can create an API server source by using the web console. Using the {product-title} web console provides a streamlined and intuitive user interface to create an event source. - -.Prerequisites - -* You have logged in to the {product-title} web console. -* The {ServerlessOperatorName} and Knative Eventing are installed on the cluster. -* You have created a project or have access to a project with the appropriate roles and permissions to create applications and other workloads in {product-title}. -* You have installed the OpenShift CLI (`oc`). - -.Procedure - -include::snippets/serverless-service-account-apiserversource.adoc[] - -. In the *Developer* perspective, navigate to *+Add* → *Event Source*. The *Event Sources* page is displayed. -. Optional: If you have multiple providers for your event sources, select the required provider from the *Providers* list to filter the available event sources from the provider. -. Select *ApiServerSource* and then click *Create Event Source*. The *Create Event Source* page is displayed. -. Configure the *ApiServerSource* settings by using the *Form view* or *YAML view*: -+ -[NOTE] -==== -You can switch between the *Form view* and *YAML view*. The data is persisted when switching between the views. -==== -.. Enter `v1` as the *APIVERSION* and `Event` as the *KIND*. -// .. Select *Resource* as the *Mode*. *Mode* is the mode that the receive adapter controller runs in. `Ref` sends only the reference to the resource. `Resource` sends the full resource. -// TODO: clarify what this is used for. Out of scope for this PR since not required. -.. Select the *Service Account Name* for the service account that you created. -.. Select the *Sink* for the event source. A *Sink* can be either a *Resource*, such as a channel, broker, or service, or a *URI*. -. Click *Create*. - -.Verification - -* After you have created the API server source, you will see it connected to the service it is sinked to in the *Topology* view. -+ -image::toplogy-odc-apiserver.png[ApiServerSource Topology view] - -[NOTE] -==== -If a URI sink is used, modify the URI by right-clicking on *URI sink* -> *Edit URI*. -==== - -.Deleting the API server source - -. Navigate to the *Topology* view. -. Right-click the API server source and select *Delete ApiServerSource*. -+ -image::delete-apiserversource-odc.png[Delete the ApiServerSource] diff --git a/modules/odc-creating-helm-releases-using-developer-perspective.adoc b/modules/odc-creating-helm-releases-using-developer-perspective.adoc deleted file mode 100644 index e52d0e47bc48..000000000000 --- a/modules/odc-creating-helm-releases-using-developer-perspective.adoc +++ /dev/null @@ -1,38 +0,0 @@ -:_content-type: PROCEDURE -[id="odc-creating-helm-releases-using-developer-perspective_{context}"] -= Creating Helm releases using the Developer perspective - -You can use either the *Developer* perspective in the web console or the CLI to select and create a release from the Helm charts listed in the *Developer Catalog*. You can create Helm releases by installing Helm charts and see them in the *Developer* perspective of the web console. - -.Prerequisites -* You have logged in to the web console and have switched to xref:../../web_console/web-console-overview.adoc#about-developer-perspective_web-console-overview[the *Developer* perspective]. - -.Procedure -To create Helm releases from the Helm charts provided in the *Developer Catalog*: - -. In the *Developer* perspective, navigate to the *+Add* view and select a project. Then click *Helm Chart* option to see all the Helm Charts in the *Developer Catalog*. -. Select a chart and read the description, README, and other details about the chart. -. Click *Create*. -+ -.Helm charts in developer catalog -image::odc_helm_chart_devcatalog_new.png[] -+ -. In the *Create Helm Release* page: -.. Enter a unique name for the release in the *Release Name* field. -.. Select the required chart version from the *Chart Version* drop-down list. -.. Configure your Helm chart by using the *Form View* or the *YAML View*. -+ -[NOTE] -==== -Where available, you can switch between the *YAML View* and *Form View*. The data is persisted when switching between the views. -==== -+ -.. Click *Create* to create a Helm release. The web console displays the new release in the *Topology* view. -+ -If a Helm chart has release notes, the web console displays them. -+ -If a Helm chart creates workloads, the web console displays them on the *Topology* or *Helm release details* page. The workloads are `DaemonSet`, `CronJob`, `Pod`, `Deployment`, and `DeploymentConfig`. - -.. View the newly created Helm release in the *Helm Releases* page. - -You can upgrade, rollback, or delete a Helm release by using the *Actions* button on the side panel or by right-clicking a Helm release. diff --git a/modules/odc-creating-projects-using-developer-perspective.adoc b/modules/odc-creating-projects-using-developer-perspective.adoc deleted file mode 100644 index 78715d61c548..000000000000 --- a/modules/odc-creating-projects-using-developer-perspective.adoc +++ /dev/null @@ -1,41 +0,0 @@ -// Module included in the following assemblies: -// -// applications/projects/working-with-projects.adoc - -:_content-type: PROCEDURE -[id="odc-creating-projects-using-developer-perspective_{context}"] -= Creating a project using the Developer perspective in the web console - -You can use the *Developer* perspective in the {product-title} web console to create a project in your cluster. - -[NOTE] -==== -Projects starting with `openshift-` and `kube-` are considered critical by {product-title}. As such, {product-title} does not allow you to create projects starting with `openshift-` or `kube-` using the *Developer* perspective. Cluster administrators can create these projects using the `oc adm new-project` command. -==== - -[NOTE] -==== -You cannot assign an SCC to pods created in one of the default namespaces: `default`, `kube-system`, `kube-public`, `openshift-node`, `openshift-infra`, and `openshift`. You cannot use these namespaces for running pods or services. -==== - -.Prerequisites - -* Ensure that you have the appropriate roles and permissions to create projects, applications, and other workloads in {product-title}. - -.Procedure -You can create a project using the *Developer* perspective, as follows: - -. Click the *Project* drop-down menu to see a list of all available projects. Select *Create Project*. -+ -.Create project -image::odc_create_project.png[] - -. In the *Create Project* dialog box, enter a unique name, such as `myproject`, in the *Name* field. -. Optional: Add the *Display Name* and *Description* details for the project. -. Click *Create*. -. Use the left navigation panel to navigate to the *Project* view and see the dashboard for your project. -. Optional: -+ -* Use the *Project* drop-down menu at the top of the screen and select *all projects* to list all of the projects in your cluster. -* Use the *Details* tab to see the project details. -* If you have adequate permissions for a project, you can use the *Project Access* tab to provide or revoke _administrator_, _edit_, and _view_ privileges for the project. diff --git a/modules/odc-creating-sample-applications.adoc b/modules/odc-creating-sample-applications.adoc deleted file mode 100644 index 8b16bdd63637..000000000000 --- a/modules/odc-creating-sample-applications.adoc +++ /dev/null @@ -1,19 +0,0 @@ -:_content-type: PROCEDURE -[id="odc-creating-sample-applications_{context}"] -= Creating Sample applications - -You can use the sample applications in the *+Add* flow of the *Developer* perspective to create, build, and deploy applications quickly. - -.Prerequisites - -* You have logged in to the {product-title} web console and are in the *Developer* perspective. - -.Procedure - -. In the *+Add* view, click on the *Samples* tile to see the *Samples* page. -. On the *Samples* page, select one of the available sample applications to see the *Create Sample Application* form. -. In the *Create Sample Application Form*: -* In the *Name* field, the deployment name is displayed by default. You can modify this name as required. -* In the *Builder Image Version*, a builder image is selected by default. You can modify this image version by using the *Builder Image Version* drop-down list. -* A sample Git repository URL is added by default. -. Click *Create* to create the sample application. The build status of the sample application is displayed on the *Topology* view. After the sample application is created, you can see the deployment added to the application. diff --git a/modules/odc-creating-vms.adoc b/modules/odc-creating-vms.adoc deleted file mode 100644 index fbb717996b9c..000000000000 --- a/modules/odc-creating-vms.adoc +++ /dev/null @@ -1,33 +0,0 @@ -// Module included in the following assemblies: -// -// * virt/virtual_machines/virt-create-vms.adoc - -:_content-type: PROCEDURE -[id="odc-creating-vms_{context}"] -= Using the Developer perspective to create a virtual machine - -You can create a virtual machine by using the *Developer* perspective of the {product-title} web console. - -.Prerequisites - -* A cluster administrator has installed the {VirtProductName} Operator in your cluster. -* You have logged in to the {product-title} web console and are in the *Developer* perspective. - -.Procedure - -. In the *+Add* view, click the *Virtual Machines* tile to see the *Create new VirtualMachine* page. - -. Select a virtual machine type from the *Template catalog*. - -. Click *Quick create VirtualMachine* to create a virtual machine with no further input, or click *Customize VirtualMachine* to see the *Customize Template parameters* page. -+ -[NOTE] -==== -For Microsoft Windows virtual machines, only the *Customize VirtualMachine* option is available. -==== - -. If you choose to customize your virtual machine, you can modify *Storage* and *Optional Parameters* settings on the *Customize Template parameters* page. When you have finished customizing your virtual machine settings, click *Next* to see the *Review and create VirtualMachine* page. Review your settings, then click *Create VirtualMachine*. - -. You can view virtual machines that you have created in the *Topology* view. -** Select your virtual machine in the graph view to see the details panel. -** Click the *List view* icon (image:odc_list_view_icon.png[title="List view icon"]) to see a list of all your virtual machines, and use the *Graph view* icon (image:odc_topology_view_icon.png[title="Topology view icon"]) to switch back to the graph view. \ No newline at end of file diff --git a/modules/odc-customizing-a-perspective-using-YAML-view.adoc b/modules/odc-customizing-a-perspective-using-YAML-view.adoc deleted file mode 100644 index fd589fb1ff6d..000000000000 --- a/modules/odc-customizing-a-perspective-using-YAML-view.adoc +++ /dev/null @@ -1,79 +0,0 @@ -// Module included in the following assembly: -// -// * web_console/customizing-the-web-console.adoc - -:_content-type: PROCEDURE -[id="odc-customizing-a-perspective-using-YAML-view_{context}"] -= Customizing a perspective using YAML view - -.Prerequisites -* You must have administrator privileges. - -.Procedure -. In the *Administrator* perspective, navigate to *Administration* -> *Cluster Settings*. -. Select the *Configuration* tab and click the *Console (operator.openshift.io)* resource. -. Click the *YAML* tab and make your customization: -.. To enable or disable a perspective, insert the snippet for *Add user perspectives* and edit the YAML code as needed: -+ -[source,yaml] ----- -apiVersion: operator.openshift.io/v1 -kind: Console -metadata: - name: cluster -spec: - customization: - perspectives: - - id: admin - visibility: - state: Enabled - - id: dev - visibility: - state: Enabled ----- -.. To hide a perspective based on RBAC permissions, insert the snippet for *Hide user perspectives* and edit the YAML code as needed: -+ -[source,yaml] ----- -apiVersion: operator.openshift.io/v1 -kind: Console -metadata: - name: cluster -spec: - customization: - perspectives: - - id: admin - requiresAccessReview: - - group: rbac.authorization.k8s.io - resource: clusterroles - verb: list - - id: dev - state: Enabled ----- -.. To customize a perspective based on your needs, create your own YAML snippet: -+ -[source,yaml] ----- -apiVersion: operator.openshift.io/v1 -kind: Console -metadata: - name: cluster -spec: - customization: - perspectives: - - id: admin - visibility: - state: AccessReview - accessReview: - missing: - - resource: deployment - verb: list - required: - - resource: namespaces - verb: list - - id: dev - visibility: - state: Enabled ----- - -. Click *Save*. diff --git a/modules/odc-customizing-a-perspective-using-form-view.adoc b/modules/odc-customizing-a-perspective-using-form-view.adoc deleted file mode 100644 index 1232b2d453d8..000000000000 --- a/modules/odc-customizing-a-perspective-using-form-view.adoc +++ /dev/null @@ -1,29 +0,0 @@ -// Module included in the following assembly: -// -// * web_console/customizing-the-web-console.adoc - -:_content-type: PROCEDURE -[id="odc-customizing-a-perspective-using-form-view_{context}"] -= Customizing a perspective using form view - -.Prerequisites -* You must have administrator privileges. - -.Procedure -. In the *Administrator* perspective, navigate to *Administration* -> *Cluster Settings*. -. Select the *Configuration* tab and click the *Console (operator.openshift.io)* resource. -. Click *Actions* -> *Customize* on the right side of the page. -. In the *General* settings, customize the perspective by selecting one of the following options from the dropdown list: -* *Enabled*: Enables the perspective for all users -* *Only visible for privileged users*: Enables the perspective for users who can list all namespaces -* *Only visible for unprivileged users*: Enables the perspective for users who cannot list all namespaces -* *Disabled*: Disables the perspective for all users -+ -A notification opens to confirm that your changes are saved. -+ -image::customizing-user-perspective.png[] -+ -[NOTE] -==== -When you customize the user perspective, your changes are automatically saved and take effect after a browser refresh. -==== diff --git a/modules/odc-customizing-available-cluster-roles-using-developer-perspective.adoc b/modules/odc-customizing-available-cluster-roles-using-developer-perspective.adoc deleted file mode 100644 index cf24c177a2e1..000000000000 --- a/modules/odc-customizing-available-cluster-roles-using-developer-perspective.adoc +++ /dev/null @@ -1,33 +0,0 @@ -// Module included in the following assemblies: -// -// applications/projects/working-with-projects.adoc - -[id="odc-customizing-available-cluster-roles-using-developer-perspective_{context}"] -= Customizing the available cluster roles using the Developer perspective - -The users of a project are assigned to a cluster role based on their access control. You can access these cluster roles by navigating to the *Project* -> *Project access* -> *Role*. By default, these roles are *Admin*, *Edit*, and *View*. - -To add or edit the cluster roles for a project, you can customize the YAML code of the cluster. - -.Procedure -To customize the different cluster roles of a project: - -. In the *Search* view, use the *Resources* drop-down list to search for `Console`. -. From the available options, select the *Console `operator.openshift.io/v1`*. -+ -.Searching Console resource -image::odc_cluster_console.png[] -. Select *cluster* under the *Name* list. -. Navigate to the *YAML* tab to view and edit the YAML code. -. In the YAML code under `spec`, add or edit the list of `availableClusterRoles` and save your changes: -+ -[source,yaml] ----- -spec: - customization: - projectAccess: - availableClusterRoles: - - admin - - edit - - view ----- diff --git a/modules/odc-customizing-user-perspectives.adoc b/modules/odc-customizing-user-perspectives.adoc deleted file mode 100644 index 4171ca8dcee4..000000000000 --- a/modules/odc-customizing-user-perspectives.adoc +++ /dev/null @@ -1,22 +0,0 @@ -// Module included in the following assembly: -// -// * web_console/customizing-the-web-console.adoc - -:_content-type: CONCEPT -[id="odc-customizing-user-perspectives_{context}"] -= Customizing user perspectives - -The {product-title} web console provides two perspectives by default, *Administrator* and *Developer*. You might have more perspectives available depending on installed console plugins. As a cluster administrator, you can show or hide a perspective for all users or for a specific user role. Customizing perspectives ensures that users can view only the perspectives that are applicable to their role and tasks. For example, you can hide the *Administrator* perspective from unprivileged users so that they cannot manage cluster resources, users, and projects. Similarly, you can show the *Developer* perspective to users with the developer role so that they can create, deploy, and monitor applications. - -You can also customize the perspective visibility for users based on role-based access control (RBAC). For example, if you customize a perspective for monitoring purposes, which requires specific permissions, you can define that the perspective is visible only to users with required permissions. - -Each perspective includes the following mandatory parameters, which you can edit in the YAML view: - -* `id`: Defines the ID of the perspective to show or hide -* `visibility`: Defines the state of the perspective along with access review checks, if needed -* `state`: Defines whether the perspective is enabled, disabled, or needs an access review check - -[NOTE] -==== -By default, all perspectives are enabled. When you customize the user perspective, your changes are applicable to the entire cluster. -==== diff --git a/modules/odc-deleting-applications-using-developer-perspective.adoc b/modules/odc-deleting-applications-using-developer-perspective.adoc deleted file mode 100644 index e1c7021953ed..000000000000 --- a/modules/odc-deleting-applications-using-developer-perspective.adoc +++ /dev/null @@ -1,10 +0,0 @@ -[id="odc-deleting-applications-using-developer-perspective_{context}"] -= Deleting applications using the Developer perspective - -You can delete an application and all of its associated components using the *Topology* view in the *Developer* perspective: - -. Click the application you want to delete to see the side panel with the resource details of the application. -. Click the *Actions* drop-down menu displayed on the upper right of the panel, and select *Delete Application* to see a confirmation dialog box. -. Enter the name of the application and click *Delete* to delete it. - -You can also right-click the application you want to delete and click *Delete Application* to delete it. diff --git a/modules/odc-deleting-helm-release.adoc b/modules/odc-deleting-helm-release.adoc deleted file mode 100644 index 9b6f21fe327e..000000000000 --- a/modules/odc-deleting-helm-release.adoc +++ /dev/null @@ -1,7 +0,0 @@ -:_content-type: PROCEDURE -[id="odc-deleting-helm-release_{context}"] -= Deleting a Helm release - -.Procedure -. In the *Topology* view, right-click the Helm release and select *Delete Helm Release*. -. In the confirmation prompt, enter the name of the chart and click *Delete*. diff --git a/modules/odc-deploying-container-image.adoc b/modules/odc-deploying-container-image.adoc deleted file mode 100644 index 1f4ca9656d00..000000000000 --- a/modules/odc-deploying-container-image.adoc +++ /dev/null @@ -1,28 +0,0 @@ -// Module included in the following assemblies: -// -// * applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc - -:_content-type: PROCEDURE -[id="odc-deploying-container-image_{context}"] -= Creating applications by deploying container image - -You can use an external image registry or an image stream tag from an internal registry to deploy an application on your cluster. - -.Prerequisites - -* You have logged in to the {product-title} web console and are in the *Developer* perspective. - -.Procedure - -. In the *+Add* view, click *Container images* to view the *Deploy Images* page. -. In the *Image* section: -.. Select *Image name from external registry* to deploy an image from a public or a private registry, or select *Image stream tag from internal registry* to deploy an image from an internal registry. -.. Select an icon for your image in the *Runtime icon* tab. -. In the *General* section: -.. In the *Application name* field, enter a unique name for the application grouping. -.. In the *Name* field, enter a unique name to identify the resources created for this component. -. In the *Resource type* section, select the resource type to generate: -.. Select *Deployment* to enable declarative updates for `Pod` and `ReplicaSet` objects. -.. Select *DeploymentConfig* to define the template for a `Pod` object, and manage deploying new images and configuration sources. -.. Select *Serverless Deployment* to enable scaling to zero when idle. -. Click *Create*. You can view the build status of the application in the *Topology* view. diff --git a/modules/odc-deploying-java-applications.adoc b/modules/odc-deploying-java-applications.adoc deleted file mode 100644 index a8b18fc3d3aa..000000000000 --- a/modules/odc-deploying-java-applications.adoc +++ /dev/null @@ -1,47 +0,0 @@ -// Module included in the following assemblies: -// -// * applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc - -:_content-type: PROCEDURE -[id="odc-deploying-java-applications_{context}"] -= Deploying a Java application by uploading a JAR file - -You can use the web console *Developer* perspective to upload a JAR file by using the following options: - -* Navigate to the *+Add* view of the *Developer* perspective, and click *Upload JAR file* in the *From Local Machine* tile. Browse and select your JAR file, or drag a JAR file to deploy your application. - -* Navigate to the *Topology* view and use the *Upload JAR file* option, or drag a JAR file to deploy your application. - -* Use the in-context menu in the *Topology* view, and then use the *Upload JAR file* option to upload your JAR file to deploy your application. - -.Prerequisites - -* The Cluster Samples Operator must be installed by a cluster administrator. -* You have access to the {product-title} web console and are in the *Developer* perspective. - -.Procedure - -. In the *Topology* view, right-click anywhere to view the *Add to Project* menu. - -. Hover over the *Add to Project* menu to see the menu options, and then select the *Upload JAR file* option to see the *Upload JAR file* form. Alternatively, you can drag the JAR file into the *Topology* view. - -. In the *JAR file* field, browse for the required JAR file on your local machine and upload it. Alternatively, you can drag the JAR file on to the field. A toast alert is displayed at the top right if an incompatible file type is dragged into the *Topology* view. A field error is displayed if an incompatible file type is dropped on the field in the upload form. - -. The runtime icon and builder image are selected by default. If a builder image is not auto-detected, select a builder image. If required, you can change the version using the *Builder Image Version* drop-down list. - -. Optional: In the *Application Name* field, enter a unique name for your application to use for resource labelling. - -. In the *Name* field, enter a unique component name for the associated resources. - -. Optional: Use the *Resource type* drop-down list to change the resource type. - -. In the *Advanced options* menu, click *Create a Route to the Application* to configure a public URL for your deployed application. - -. Click *Create* to deploy the application. A toast notification is shown to notify you that the JAR file is being uploaded. The toast notification also includes a link to view the build logs. - -[NOTE] -==== -If you attempt to close the browser tab while the build is running, a web alert is displayed. -==== - -After the JAR file is uploaded and the application is deployed, you can view the application in the *Topology* view. diff --git a/modules/odc-discovering-and-identifying-operator-backed-bindable-services.adoc b/modules/odc-discovering-and-identifying-operator-backed-bindable-services.adoc deleted file mode 100644 index 354d7216d9d6..000000000000 --- a/modules/odc-discovering-and-identifying-operator-backed-bindable-services.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// Module included in the following assemblies: -// -// * applications/connecting_applications_to_services/odc-connecting-an-application-to-a-service-using-the-developer-perspective.adoc -:_content-type: PROCEDURE -[id="odc-discovering-and-identifying-operator-backed-bindable-services_{context}"] -= Discovering and identifying Operator-backed bindable services - -As a user, if you want to create a bindable service, you must know which services are bindable. Bindable services are services that the applications can consume easily because they expose their binding data such as credentials, connection details, volume mounts, secrets, and other binding data in a standard way. The *Developer* perspective helps you discover and identify such bindable services. - -.Procedure -* To discover and identify Operator-backed bindable services, consider the following alternative approaches: -+ -** Click *+Add* -> *Developer Catalog* -> *Operator Backed* to see the Operator-backed tiles. Operator-backed services that support service binding features have a *Bindable* badge on the tiles. -** On the left pane of the *Operator Backed* page, select the *Bindable* checkbox. -+ -[TIP] -==== -Click the help icon next to *Service binding* to see more information about bindable services. -==== - -** Click *+Add* -> *Add* and search for Operator-backed services. When you click the bindable service, you can view the *Bindable* badge in the side panel to the right. \ No newline at end of file diff --git a/modules/odc-editing-application-configuration-using-developer-perspective.adoc b/modules/odc-editing-application-configuration-using-developer-perspective.adoc deleted file mode 100644 index 68d14e84c2e5..000000000000 --- a/modules/odc-editing-application-configuration-using-developer-perspective.adoc +++ /dev/null @@ -1,33 +0,0 @@ -:_content-type: PROCEDURE -[id="odc-editing-application-configuration-using-developer-perspective_{context}"] -= Editing the application configuration using the Developer perspective - -You can use the *Topology* view in the *Developer* perspective to edit the configuration of your application. - -[NOTE] -==== -Currently, only configurations of applications created by using the *From Git*, *Container Image*, *From Catalog*, or *From Dockerfile* options in the *Add* workflow of the *Developer* perspective can be edited. Configurations of applications created by using the CLI or the *YAML* option from the *Add* workflow cannot be edited. -==== - -.Prerequisites -Ensure that you have created an application using the *From Git*, *Container Image*, *From Catalog*, or *From Dockerfile* options in the *Add* workflow. - -.Procedure - -. After you have created an application and it is displayed in the *Topology* view, right-click the application to see the edit options available. -+ -.Edit application -image::odc_edit_app.png[] -+ -. Click *Edit _application-name_* to see the *Add* workflow you used to create the application. The form is pre-populated with the values you had added while creating the application. -. Edit the necessary values for the application. -+ -[NOTE] -==== -You cannot edit the *Name* field in the *General* section, the CI/CD pipelines, or the *Create a route to the application* field in the *Advanced Options* section. -==== -+ -. Click *Save* to restart the build and deploy a new image. -+ -.Edit and redeploy application -image::odc_edit_redeploy.png[] diff --git a/modules/odc-editing-deployments.adoc b/modules/odc-editing-deployments.adoc deleted file mode 100644 index fd6ffb330dc3..000000000000 --- a/modules/odc-editing-deployments.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// Module included in the following assemblies: -// -// * applications/deployments/deployment-strategies.adoc - -:_content-type: PROCEDURE -[id="odc-editing-deployments_{context}"] -= Editing a deployment by using the Developer perspective - -You can edit the deployment strategy, image settings, environment variables, and advanced options for your deployment by using the *Developer* perspective. - -.Prerequisites - -* You are in the *Developer* perspective of the web console. -* You have created an application. - -.Procedure - -. Navigate to the *Topology* view. Click on your application to see the *Details* panel. -. In the *Actions* drop-down menu, select *Edit Deployment* to view the *Edit Deployment* page. -. You can edit the following *Advanced options* for your deployment: -.. Optional: You can pause rollouts by clicking *Pause rollouts*, and then selecting the *Pause rollouts for this deployment* checkbox. -+ -By pausing rollouts, you can make changes to your application without triggering a rollout. You can resume rollouts at any time. -.. Optional: Click *Scaling* to change the number of instances of your image by modifying the number of *Replicas*. -. Click *Save*. \ No newline at end of file diff --git a/modules/odc-editing-health-checks.adoc b/modules/odc-editing-health-checks.adoc deleted file mode 100644 index 4eca4db4905a..000000000000 --- a/modules/odc-editing-health-checks.adoc +++ /dev/null @@ -1,40 +0,0 @@ -// Module included in the following assemblies: -// -// applications/application-health - -:_content-type: PROCEDURE -[id="odc-editing-health-checks"] -= Editing health checks using the Developer perspective - -You can use the *Topology* view to edit health checks added to your application, modify them, or add more health checks. - -.Prerequisites: -* You have switched to the *Developer* perspective in the web console. -* You have created and deployed an application on {product-title} using the *Developer* perspective. -* You have added health checks to your application. - -.Procedure -. In the *Topology* view, right-click your application and select *Edit Health Checks*. Alternatively, in the side panel, click the *Actions* drop-down list and select *Edit Health Checks*. -. In the *Edit Health Checks* page: - -* To remove a previously added health probe, click the *Remove* icon adjoining it. -* To edit the parameters of an existing probe: -+ -.. Click the *Edit Probe* link next to a previously added probe to see the parameters for the probe. -.. Modify the parameters as required, and click the check mark to save your changes. -+ -* To add a new health probe, in addition to existing health checks, click the add probe links. For example, to add a Liveness probe that checks if your container is running: -+ -.. Click *Add Liveness Probe*, to see a form containing the parameters for the probe. -.. Edit the probe parameters as required. -+ -[NOTE] -==== -The `Timeout` value must be lower than the `Period` value. The `Timeout` default value is `1`. The `Period` default value is `10`. -==== -.. Click the check mark at the bottom of the form. The *Liveness Probe Added* message is displayed. - -. Click *Save* to save your modifications and add the additional probes to your container. You are redirected to the *Topology* view. -. In the side panel, verify that the probes have been added by clicking on the deployed pod under the *Pods* section. -. In the *Pod Details* page, click the listed container in the *Containers* section. -. In the *Container Details* page, verify that the Liveness probe - `HTTP Get 10.129.4.65:8080/` has been added to the container, in addition to the earlier existing probes. diff --git a/modules/odc-editing-source-code-using-developer-perspective.adoc b/modules/odc-editing-source-code-using-developer-perspective.adoc deleted file mode 100644 index 803b939c4b27..000000000000 --- a/modules/odc-editing-source-code-using-developer-perspective.adoc +++ /dev/null @@ -1,16 +0,0 @@ -:_content-type: PROCEDURE -[id="odc-editing-source-code-using-developer-perspective_{context}"] -= Editing the source code of an application using the Developer perspective - -You can use the *Topology* view in the *Developer* perspective to edit the source code of your application. - -.Procedure - -* In the *Topology* view, click the *Edit Source code* icon, displayed at the bottom-right of the deployed application, to access your source code and modify it. -+ -[NOTE] -==== -This feature is available only when you create applications using the *From Git*, *From Catalog*, and the *From Dockerfile* options. -==== -+ -If the *Eclipse Che* Operator is installed in your cluster, a Che workspace (image:odc_che_workspace.png[title="Che Workspace"]) is created and you are directed to the workspace to edit your source code. If it is not installed, you will be directed to the Git repository (image:odc_git_repository.png[title="Git Repository"]) your source code is hosted in. diff --git a/modules/odc-grouping-multiple-components.adoc b/modules/odc-grouping-multiple-components.adoc deleted file mode 100644 index cb3d7c82d9e9..000000000000 --- a/modules/odc-grouping-multiple-components.adoc +++ /dev/null @@ -1,28 +0,0 @@ -:_content-type: PROCEDURE -[id="odc-grouping-multiple-components_{context}"] -= Grouping multiple components within an application - -You can use the *+Add* view to add multiple components or services to your project and use the topology *Graph view* to group applications and resources within an application group. - -.Prerequisites - -* You have created and deployed minimum two or more components on {product-title} using the *Developer* perspective. - -.Procedure - -* To add a service to the existing application group, press kbd:[Shift]+ drag it to the existing application group. Dragging a component and adding it to an application group adds the required labels to the component. -+ -.Application grouping -image::odc_app_grouping_label.png[] - -Alternatively, you can also add the component to an application as follows: - -. Click the service pod to see the *Overview* panel to the right. - -. Click the *Actions* drop-down menu and select *Edit Application Grouping*. - -. In the *Edit Application Grouping* dialog box, click the *Application* drop-down list, and select an appropriate application group. - -. Click *Save* to add the service to the application group. - -You can remove a component from an application group by selecting the component and using kbd:[Shift]+ drag to drag it out of the application group. diff --git a/modules/odc-image-vulnerabilities-breakdown.adoc b/modules/odc-image-vulnerabilities-breakdown.adoc deleted file mode 100644 index 858384d85e81..000000000000 --- a/modules/odc-image-vulnerabilities-breakdown.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module included in the following assemblies: -// -// * applications/odc-monitoring-project-and-application-metrics-using-developer-perspective.adoc - -:_content-type: CONCEPT -[id="odc-image-vulnerabilities-breakdown_{context}"] -= Image vulnerabilities breakdown - -In the developer perspective, the project dashboard shows the *Image Vulnerabilities* link in the *Status* section. Using this link, you can view the *Image Vulnerabilities breakdown* window, which includes details regarding vulnerable container images and fixable container images. The icon color indicates severity: - -* Red: High priority. Fix immediately. -* Orange: Medium priority. Can be fixed after high-priority vulnerabilities. -* Yellow: Low priority. Can be fixed after high and medium-priority vulnerabilities. - -Based on the severity level, you can prioritize vulnerabilities and fix them in an organized manner. - -.Viewing image vulnerabilities -image::odc_image_vulnerabilities.png[] diff --git a/modules/odc-importing-codebase-from-git-to-create-application.adoc b/modules/odc-importing-codebase-from-git-to-create-application.adoc deleted file mode 100644 index 59dcf963fef0..000000000000 --- a/modules/odc-importing-codebase-from-git-to-create-application.adoc +++ /dev/null @@ -1,112 +0,0 @@ -// Module included in the following assemblies: -// -// * applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc - -:_content-type: PROCEDURE -[id="odc-importing-codebase-from-git-to-create-application_{context}"] -= Importing a codebase from Git to create an application - -[role="_abstract"] -You can use the *Developer* perspective to create, build, and deploy an application on {product-title} using an existing codebase in GitHub. - -The following procedure walks you through the *From Git* option in the *Developer* perspective to create an application. - -.Procedure - -. In the *+Add* view, click *From Git* in the *Git Repository* tile to see the *Import from git* form. -. In the *Git* section, enter the Git repository URL for the codebase you want to use to create an application. For example, enter the URL of this sample Node.js application `\https://github.com/sclorg/nodejs-ex`. The URL is then validated. -. Optional: You can click *Show Advanced Git Options* to add details such as: - -* *Git Reference* to point to code in a specific branch, tag, or commit to be used to build the application. -* *Context Dir* to specify the subdirectory for the application source code you want to use to build the application. -* *Source Secret* to create a *Secret Name* with credentials for pulling your source code from a private repository. - -. Optional: You can import a `Devfile`, a `Dockerfile`, `Builder Image`, or a `Serverless Function` through your Git repository to further customize your deployment. -* If your Git repository contains a `Devfile`, a `Dockerfile`, a `Builder Image`, or a `func.yaml`, it is automatically detected and populated on the respective path fields. -* If a `Devfile`, a `Dockerfile`, or a `Builder Image` are detected in the same repository, the `Devfile` is selected by default. -* If `func.yaml` is detected in the Git repository, the *Import Strategy* changes to `Serverless Function`. -* Alternatively, you can create a serverless function by clicking *Create Serverless function* in the *+Add* view using the Git repository URL. -* To edit the file import type and select a different strategy, click *Edit import strategy* option. -* If multiple `Devfiles`, a `Dockerfiles`, or a `Builder Images` are detected, to import a specific instance, specify the respective paths relative to the context directory. - - -. After the Git URL is validated, the recommended builder image is selected and marked with a star. If the builder image is not auto-detected, select a builder image. For the `https://github.com/sclorg/nodejs-ex` Git URL, by default the Node.js builder image is selected. -.. Optional: Use the *Builder Image Version* drop-down to specify a version. -.. Optional: Use the *Edit import strategy* to select a different strategy. -.. Optional: For the Node.js builder image, use the **Run command** field to override the command to run the application. - -. In the *General* section: -.. In the *Application* field, enter a unique name for the application grouping, for example, `myapp`. Ensure that the application name is unique in a namespace. -.. The *Name* field to identify the resources created for this application is automatically populated based on the Git repository URL if there are no existing applications. If there are existing applications, you can choose to deploy the component within an existing application, create a new application, or keep the component unassigned. -+ -[NOTE] -==== -The resource name must be unique in a namespace. Modify the resource name if you get an error. -==== - -. In the *Resources* section, select: - -* *Deployment*, to create an application in plain Kubernetes style. -* *Deployment Config*, to create an {product-title} style application. -* *Serverless Deployment*, to create a Knative service. -+ -[NOTE] -==== -To set the default resource preference for importing an application, go to *User Preferences* -> *Applications* -> *Resource type* field. The *Serverless Deployment* option is displayed in the *Import from Git* form only if the {ServerlessOperatorName} is installed in your cluster. The *Resources* section is not available while creating a serverless function. For further details, refer to the {ServerlessProductName} documentation. -==== - -. In the *Pipelines* section, select *Add Pipeline*, and then click *Show Pipeline Visualization* to see the pipeline for the application. A default pipeline is selected, but you can choose the pipeline you want from the list of available pipelines for the application. -+ -[NOTE] -==== -The *Add pipeline* checkbox is checked and *Configure PAC* is selected by default if the following criterias are fulfilled: - -* Pipeline operator is installed -* `pipelines-as-code` is enabled -* `.tekton` directory is detected in the Git repository -==== - - -. Add a webhook to your repository. If *Configure PAC* is checked and the GitHub App is set up, you can see the *Use GitHub App* and *Setup a webhook* options. If GitHub App is not set up, you can only see the *Setup a webhook* option: - -.. Go to *Settings* -> *Webhooks* and click *Add webhook*. -.. Set the *Payload URL* to the Pipelines as Code controller public URL. -.. Select the content type as *application/json*. -.. Add a webhook secret and note it in an alternate location. With `openssl` installed on your local machine, generate a random secret. -.. Click *Let me select individual events* and select these events: *Commit comments*, *Issue comments*, *Pull request*, and *Pushes*. -.. Click *Add webhook*. - -. Optional: In the *Advanced Options* section, the *Target port* and the *Create a route to the application* is selected by default so that you can access your application using a publicly available URL. -+ -If your application does not expose its data on the default public port, 80, clear the check box, and set the target port number you want to expose. - -. Optional: You can use the following advanced options to further customize your application: - -include::snippets/routing-odc.adoc[] -include::snippets/serverless-domain-mapping-odc.adoc[] - -Health Checks:: -Click the *Health Checks* link to add Readiness, Liveness, and Startup probes to your application. All the probes have prepopulated default data; you can add the probes with the default data or customize it as required. -+ -To customize the health probes: -+ -* Click *Add Readiness Probe*, if required, modify the parameters to check if the container is ready to handle requests, and select the check mark to add the probe. -* Click *Add Liveness Probe*, if required, modify the parameters to check if a container is still running, and select the check mark to add the probe. -* Click *Add Startup Probe*, if required, modify the parameters to check if the application within the container has started, and select the check mark to add the probe. -+ -For each of the probes, you can specify the request type - *HTTP GET*, *Container Command*, or *TCP Socket*, from the drop-down list. The form changes as per the selected request type. You can then modify the default values for the other parameters, such as the success and failure thresholds for the probe, number of seconds before performing the first probe after the container starts, frequency of the probe, and the timeout value. - -Build Configuration and Deployment:: -Click the *Build Configuration* and *Deployment* links to see the respective configuration options. Some options are selected by default; you can customize them further by adding the necessary triggers and environment variables. -+ -For serverless applications, the *Deployment* option is not displayed as the Knative configuration resource maintains the desired state for your deployment instead of a `DeploymentConfig` resource. - -include::snippets/scaling-odc.adoc[] - -Resource Limit:: -Click the *Resource Limit* link to set the amount of *CPU* and *Memory* resources a container is guaranteed or allowed to use when running. - -Labels:: -Click the *Labels* link to add custom labels to your application. - -. Click *Create* to create the application and a success notification is displayed. You can see the build status of the application in the *Topology* view. diff --git a/modules/odc-interacting-with-applications-and-components.adoc b/modules/odc-interacting-with-applications-and-components.adoc deleted file mode 100644 index 19c3f4f05d52..000000000000 --- a/modules/odc-interacting-with-applications-and-components.adoc +++ /dev/null @@ -1,44 +0,0 @@ -[id="odc-interacting-with-applications-and-components_{context}"] -= Interacting with applications and components - -The *Topology* view in the *Developer* perspective of the web console provides the following options to interact with applications and components: - -* Click *Open URL* (image:odc_open_url.png[title="Application Link"]) to see your application exposed by the route on a public URL. -* Click *Edit Source code* to access your source code and modify it. -+ -[NOTE] -==== -This feature is available only when you create applications using the *From Git*, *From Catalog*, and the *From Dockerfile* options. -==== -+ -* Hover your cursor over the lower left icon on the pod to see the name of the latest build and its status. The status of the application build is indicated as *New* (image:odc_build_new.png[title="New Build"]), *Pending* (image:odc_build_pending.png[title="Pending Build"]), *Running* (image:odc_build_running.png[title="Running Build"]), *Completed* (image:odc_build_completed.png[title="Completed Build"]), *Failed* (image:odc_build_failed.png[title="Failed Build"]), and *Canceled* (image:odc_build_canceled.png[title="Canceled Build"]). -* The status or phase of the pod is indicated by different colors and tooltips as: -** *Running* (image:odc_pod_running.png[title="Pod Running"]): The pod is bound to a node and all of the containers are created. At least one container is still running or is in the process of starting or restarting. -** *Not Ready* (image:odc_pod_not_ready.png[title="Pod Not Ready"]): The pods which are running multiple containers, not all containers are ready. -** *Warning*(image:odc_pod_warning.png[title="Pod Warning"]): Containers in pods are being terminated, however termination did not succeed. Some containers may be other states. -** *Failed*(image:odc_pod_failed.png[title="Pod Failed"]): All containers in the pod terminated but least one container has terminated in failure. That is, the container either exited with non-zero status or was terminated by the system. -** *Pending*(image:odc_pod_pending.png[title="Pod Pending"]): The pod is accepted by the Kubernetes cluster, but one or more of the containers has not been set up and made ready to run. This includes time a pod spends waiting to be scheduled as well as the time spent downloading container images over the network. -** *Succeeded*(image:odc_pod_succeeded.png[title="Pod Succeeded"]): All containers in the pod terminated successfully and will not be restarted. -** *Terminating*(image:odc_pod_terminating.png[title="Pod Terminating"]): When a pod is being deleted, it is shown as *Terminating* by some kubectl commands. *Terminating* status is not one of the pod phases. A pod is granted a graceful termination period, which defaults to 30 seconds. -** *Unknown*(image:odc_pod_unknown.png[title="Pod Unknown"]): The state of the pod could not be obtained. This phase typically occurs due to an error in communicating with the node where the pod should be running. - -* After you create an application and an image is deployed, the status is shown as *Pending*. After the application is built, it is displayed as *Running*. -+ -.Application topology -image::odc_application_topology.png[] -+ -The application resource name is appended with indicators for the different types of resource objects as follows: -+ -** *CJ*: `CronJob` -** *D*: `Deployment` -** *DC*: `DeploymentConfig` -** *DS*: `DaemonSet` -** *J*: `Job` -** *P*: `Pod` -** *SS*: `StatefulSet` -** image:odc_serverless_app.png[title="Serverless Application"] (Knative): A serverless application -+ -[NOTE] -==== -Serverless applications take some time to load and display on the *Graph view*. When you deploy a serverless application, it first creates a service resource and then a revision. After that, it is deployed and displayed on the *Graph view*. If it is the only workload, you might be redirected to the *Add* page. After the revision is deployed, the serverless application is displayed on the *Graph view*. -==== diff --git a/modules/odc-labels-and-annotations-used-for-topology-view.adoc b/modules/odc-labels-and-annotations-used-for-topology-view.adoc deleted file mode 100644 index 9aac81e55fdd..000000000000 --- a/modules/odc-labels-and-annotations-used-for-topology-view.adoc +++ /dev/null @@ -1,11 +0,0 @@ -[id="odc-labels-and-annotations-used-for-topology-view_{context}"] -= Labels and annotations used for the Topology view - -The *Topology* view uses the following labels and annotations: - -Icon displayed in the node:: Icons in the node are defined by looking for matching icons using the `app.openshift.io/runtime` label, followed by the `app.kubernetes.io/name` label. This matching is done using a predefined set of icons. -Link to the source code editor or the source:: The `app.openshift.io/vcs-uri` annotation is used to create links to the source code editor. -Node Connector:: The `app.openshift.io/connects-to` annotation is used to connect the nodes. -App grouping:: The `app.kubernetes.io/part-of=` label is used to group the applications, services, and components. - -For detailed information on the labels and annotations {product-title} applications must use, see link:https://github.com/redhat-developer/app-labels/blob/master/labels-annotation-for-openshift.adoc[Guidelines for labels and annotations for OpenShift applications]. diff --git a/modules/odc-monitoring-application-health-using-developer-perspective.adoc b/modules/odc-monitoring-application-health-using-developer-perspective.adoc deleted file mode 100644 index 643921638b08..000000000000 --- a/modules/odc-monitoring-application-health-using-developer-perspective.adoc +++ /dev/null @@ -1,14 +0,0 @@ -// Module included in the following assemblies: -// -// applications/application-health - -[id="odc-monitoring-application-health-using-developer-perspective"] -= Monitoring application health using the Developer perspective - -You can use the *Developer* perspective to add three types of health probes to your container to ensure that your application is healthy: - -* Use the Readiness probe to check if the container is ready to handle requests. -* Use the Liveness probe to check if the container is running. -* Use the Startup probe to check if the application within the container has started. - -You can add health checks either while creating and deploying an application, or after you have deployed an application. diff --git a/modules/odc-monitoring-health-checks.adoc b/modules/odc-monitoring-health-checks.adoc deleted file mode 100644 index bc8418043473..000000000000 --- a/modules/odc-monitoring-health-checks.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// applications/application-health - -:_content-type: PROCEDURE -[id="odc-monitoring-health-checks"] -= Monitoring health check failures using the Developer perspective - -In case an application health check fails, you can use the *Topology* view to monitor these health check violations. - -.Prerequisites: -* You have switched to the *Developer* perspective in the web console. -* You have created and deployed an application on {product-title} using the *Developer* perspective. -* You have added health checks to your application. - -.Procedure -. In the *Topology* view, click on the application node to see the side panel. -. Click the *Observe* tab to see the health check failures in the *Events (Warning)* section. -. Click the down arrow adjoining *Events (Warning)* to see the details of the health check failure. diff --git a/modules/odc-monitoring-your-app-vulnerabilities.adoc b/modules/odc-monitoring-your-app-vulnerabilities.adoc deleted file mode 100644 index cb9b1e104df5..000000000000 --- a/modules/odc-monitoring-your-app-vulnerabilities.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * applications/odc-monitoring-project-and-application-metrics-using-developer-perspective.adoc - -:_content-type: PROCEDURE -[id="odc-monitoring-your-application-image-vulnerabilities-metrics_{context}"] -= Monitoring your application and image vulnerabilities metrics - -After you create applications in your project and deploy them, use the *Developer* perspective in the web console to see the metrics for your application dependency vulnerabilities across your cluster. The metrics help you to analyze the following image vulnerabilities in detail: - -* Total count of vulnerable images in a selected project -* Severity-based counts of all vulnerable images in a selected project -* Drilldown into severity to obtain the details, such as count of vulnerabilities, count of fixable vulnerabilities, and number of affected pods for each vulnerable image - -.Prerequisites -* You have installed the Red Hat Quay Container Security operator from the Operator Hub. -+ -[NOTE] -==== -The Red Hat Quay Container Security operator detects vulnerabilities by scanning the images that are in the quay registry. -==== - -.Procedure - -. For a general overview of the image vulnerabilities, on the navigation panel of the *Developer* perspective, click *Project* to see the project dashboard. - -. Click *Image Vulnerabilities* in the *Status* section. The window that opens displays details such as *Vulnerable Container Images* and *Fixable Container Images*. - -. For a detailed vulnerabilities overview, click the *Vulnerabilities* tab on the project dashboard. - -.. To get more detail about an image, click its name. - -.. View the default graph with all types of vulnerabilities in the *Details* tab. - -.. Optional: Click the toggle button to view a specific type of vulnerability. For example, click *App dependency* to see vulnerabilities specific to application dependency. - -.. Optional: You can filter the list of vulnerabilities based on their *Severity* and *Type* or sort them by *Severity*, *Package*, *Type*, *Source*, *Current Version*, and *Fixed in Version*. - -.. Click a *Vulnerability* to get its associated details: -+ -* *Base image* vulnerabilities display information from a Red Hat Security Advisory (RHSA). -* *App dependency* vulnerabilities display information from the Snyk security application. diff --git a/modules/odc-monitoring-your-application-metrics.adoc b/modules/odc-monitoring-your-application-metrics.adoc deleted file mode 100644 index 14d5741ae108..000000000000 --- a/modules/odc-monitoring-your-application-metrics.adoc +++ /dev/null @@ -1,27 +0,0 @@ -// Module included in the following assemblies: -// -// * applications/odc-monitoring-project-and-application-metrics-using-developer-perspective.adoc - -:_content-type: PROCEDURE -[id="odc-monitoring-your-application-metrics_{context}"] -= Monitoring your application metrics - -After you create applications in your project and deploy them, you can use the *Topology* view in the *Developer* perspective to see the alerts and metrics for your application. Critical and warning alerts for your application are indicated on the workload node in the *Topology* view. - -.Procedure -To see the alerts for your workload: - -. In the *Topology* view, click the workload to see the workload details in the right panel. -. Click the *Observe* tab to see the critical and warning alerts for the application; graphs for metrics, such as CPU, memory, and bandwidth usage; and all the events for the application. -+ -[NOTE] -==== -Only critical and warning alerts in the *Firing* state are displayed in the *Topology* view. Alerts in the *Silenced*, *Pending* and *Not Firing* states are not displayed. -==== -+ -.Monitoring application metrics -image::odc_app_metrics.png[] -+ -.. Click the alert listed in the right panel to see the alert details in the *Alert Details* page. -.. Click any of the charts to go to the *Metrics* tab to see the detailed metrics for the application. -.. Click *View monitoring dashboard* to see the monitoring dashboard for that application. diff --git a/modules/odc-monitoring-your-project-metrics.adoc b/modules/odc-monitoring-your-project-metrics.adoc deleted file mode 100644 index d922cca20cf4..000000000000 --- a/modules/odc-monitoring-your-project-metrics.adoc +++ /dev/null @@ -1,85 +0,0 @@ -// Module included in the following assemblies: -// -// * applications/odc-monitoring-project-and-application-metrics-using-developer-perspective.adoc - -:_content-type: PROCEDURE -[id="odc-monitoring-your-project-metrics_{context}"] -= Monitoring your project metrics - -After you create applications in your project and deploy them, you can use the *Developer* perspective in the web console to see the metrics for your project. - -.Procedure - -. On the left navigation panel of the *Developer* perspective, click *Observe* to see the *Dashboard*, *Metrics*, *Alerts*, and *Events* for your project. -+ -. Optional: Use the *Dashboard* tab to see graphs depicting the following application metrics: -+ --- -* CPU usage -* Memory usage -* Bandwidth consumption -* Network-related information such as the rate of transmitted and received packets and the rate of dropped packets. --- -+ -In the *Dashboard* tab, you can access the Kubernetes compute resources dashboards. -+ -.Observe dashboard -image::odc_observe_dashboard.png[] -+ -[NOTE] -==== -In the *Dashboard* list, *Kubernetes / Compute Resources / Namespace (Pods)* dashboard is selected by default. -==== -+ -Use the following options to see further details: - -** Select a dashboard from the *Dashboard* list to see the filtered metrics. All dashboards produce additional sub-menus when selected, except *Kubernetes / Compute Resources / Namespace (Pods)*. -** Select an option from the *Time Range* list to determine the time frame for the data being captured. -** Set a custom time range by selecting *Custom time range* from the *Time Range* list. You can input or select the *From* and *To* dates and times. Click *Save* to save the custom time range. -** Select an option from the *Refresh Interval* list to determine the time period after which the data is refreshed. -** Hover your cursor over the graphs to see specific details for your pod. -** Click *Inspect* located in the upper-right corner of every graph to see any particular graph details. The graph details appear in the *Metrics* tab. - -. Optional: Use the *Metrics* tab to query for the required project metric. -+ -.Monitoring metrics -image::odc_project_metrics.png[] -+ -.. In the *Select Query* list, select an option to filter the required details for your project. The filtered metrics for all the application pods in your project are displayed in the graph. The pods in your project are also listed below. -.. From the list of pods, clear the colored square boxes to remove the metrics for specific pods to further filter your query result. -.. Click *Show PromQL* to see the Prometheus query. You can further modify this query with the help of prompts to customize the query and filter the metrics you want to see for that namespace. -.. Use the drop-down list to set a time range for the data being displayed. You can click *Reset Zoom* to reset it to the default time range. -.. Optional: In the *Select Query* list, select *Custom Query* to create a custom Prometheus query and filter relevant metrics. - -. Optional: Use the *Alerts* tab to do the following tasks: -+ --- -* See the rules that trigger alerts for the applications in your project. -* Identify the alerts firing in the project. -* Silence such alerts if required. --- -+ -.Monitoring alerts -image::odc_project_alerts.png[] -+ -Use the following options to see further details: - -** Use the *Filter* list to filter the alerts by their *Alert State* and *Severity*. - -** Click on an alert to go to the details page for that alert. In the *Alerts Details* page, you can click *View Metrics* to see the metrics for the alert. - -** Use the *Notifications* toggle adjoining an alert rule to silence all the alerts for that rule, and then select the duration for which the alerts will be silenced from the *Silence for* list. -You must have the permissions to edit alerts to see the *Notifications* toggle. - -** Use the *Options* menu {kebab} adjoining an alert rule to see the details of the alerting rule. - -. Optional: Use the *Events* tab to see the events for your project. -+ -.Monitoring events -image::odc_project_events.png[] -+ -You can filter the displayed events using the following options: - -** In the *Resources* list, select a resource to see events for that resource. -** In the *All Types* list, select a type of event to see events relevant to that type. -** Search for specific events using the *Filter events by names or messages* field. diff --git a/modules/odc-providing-project-permissions-using-developer-perspective.adoc b/modules/odc-providing-project-permissions-using-developer-perspective.adoc deleted file mode 100644 index b4c3b81e5bcc..000000000000 --- a/modules/odc-providing-project-permissions-using-developer-perspective.adoc +++ /dev/null @@ -1,31 +0,0 @@ -// Module included in the following assemblies: -// -// applications/projects/working-with-projects.adoc - -:_content-type: PROCEDURE -[id="odc-providing-project-permissions-using-developer-perspective_{context}"] -= Providing access permissions to your project using the Developer perspective - -You can use the *Project* view in the *Developer* perspective to grant or revoke access permissions to your project. - -.Procedure -To add users to your project and provide *Admin*, *Edit*, or *View* access to them: - -. In the *Developer* perspective, navigate to the *Project* view. -. In the *Project* page, select the *Project Access* tab. -. Click *Add Access* to add a new row of permissions to the default ones. -+ -.Project permissions -image::odc_project_permissions.png[] -. Enter the user name, click the *Select a role* drop-down list, and select an appropriate role. -. Click *Save* to add the new permissions. - -You can also use: - -* The *Select a role* drop-down list, to modify the access permissions of an existing user. -* The *Remove Access* icon, to completely remove the access permissions of an existing user to the project. - -[NOTE] -==== -Advanced role-based access control is managed in the *Roles* and *Roles Binding* views in the *Administrator* perspective. -==== diff --git a/modules/odc-removing-services-from-application.adoc b/modules/odc-removing-services-from-application.adoc deleted file mode 100644 index 278b08f12986..000000000000 --- a/modules/odc-removing-services-from-application.adoc +++ /dev/null @@ -1,14 +0,0 @@ -:_content-type: PROCEDURE -[id="odc-removing-services-from-your-application_{context}"] -= Removing services from your application - -In the topology *Graph view* remove a service from your application using the context menu. - -.Procedure - -1. Right-click on a service in an application group in the topology *Graph view* to display the context menu. - -2. Select *Delete Deployment* to delete the service. -+ -.Deleting deployment option -image::odc_deleting_deployment.png[] diff --git a/modules/odc-rolling-back-helm-release.adoc b/modules/odc-rolling-back-helm-release.adoc deleted file mode 100644 index 28d7dd3fc37e..000000000000 --- a/modules/odc-rolling-back-helm-release.adoc +++ /dev/null @@ -1,19 +0,0 @@ -:_content-type: PROCEDURE -[id="odc-rolling-back-helm-release_{context}"] -= Rolling back a Helm release - -If a release fails, you can rollback the Helm release to a previous version. - -.Procedure -To rollback a release using the *Helm* view: - -. In the *Developer* perspective, navigate to the *Helm* view to see the *Helm Releases* in the namespace. -. Click the *Options* menu {kebab} adjoining the listed release, and select *Rollback*. -. In the *Rollback Helm Release* page, select the *Revision* you want to rollback to and click *Rollback*. -. In the *Helm Releases* page, click on the chart to see the details and resources for that release. -. Go to the *Revision History* tab to see all the revisions for the chart. -+ -.Helm revision history -image::odc_helm_revision_history.png[] -+ -. If required, you can further use the *Options* menu {kebab} adjoining a particular revision and select the revision to rollback to. diff --git a/modules/odc-scaling-application-pods-and-checking-builds-and-routes.adoc b/modules/odc-scaling-application-pods-and-checking-builds-and-routes.adoc deleted file mode 100644 index 755bbea5cabb..000000000000 --- a/modules/odc-scaling-application-pods-and-checking-builds-and-routes.adoc +++ /dev/null @@ -1,19 +0,0 @@ -[id="odc-scaling-application-pods-and-checking-builds-and-routes_{context}"] -= Scaling application pods and checking builds and routes - -The *Topology* view provides the details of the deployed components in the *Overview* panel. You can use the *Overview* and *Resources* tabs to scale the application pods, check build status, services, and routes as follows: - - -* Click on the component node to see the *Overview* panel to the right. Use the *Overview* tab to: - -** Scale your pods using the up and down arrows to increase or decrease the number of instances of the application manually. For serverless applications, the pods are automatically scaled down to zero when idle and scaled up depending on the channel traffic. -** Check the *Labels*, *Annotations*, and *Status* of the application. - -* Click the *Resources* tab to: - -** See the list of all the pods, view their status, access logs, and click on the pod to see the pod details. -** See the builds, their status, access logs, and start a new build if needed. -** See the services and routes used by the component. - -+ -For serverless applications, the *Resources* tab provides information on the revision, routes, and the configurations used for that component. diff --git a/modules/odc-setting-user-preferences.adoc b/modules/odc-setting-user-preferences.adoc deleted file mode 100644 index 6a34d7c0f093..000000000000 --- a/modules/odc-setting-user-preferences.adoc +++ /dev/null @@ -1,22 +0,0 @@ -// Module included in the following assemblies: -// -// *web_console/adding-user-preferences.adoc -:_content-type: PROCEDURE -[id="odc-setting-user-preferences_{context}"] -= Setting user preferences - -You can set the default user preferences for your cluster. - -.Procedure - -. Log in to the {product-title} web console using your login credentials. -. Use the masthead to access the user preferences under the user profile. -. In the *General* section: -.. In the *Perspective* field, you can set the default perspective you want to be logged in to. You can select the *Administrator* or the *Developer* perspective as required. If a perspective is not selected, you are logged into the perspective you last visited. -.. In the *Project* field, select a project you want to work in. The console will default to the project every time you log in. -.. In the *Topology* field, you can set the topology view to default to the graph or list view. If not selected, the console defaults to the last view you used. -.. In the *Create/Edit resource method* field, you can set a preference for creating or editing a resource. If both the form and YAML options are available, the console defaults to your selection. -. In the *Language* section, select *Default browser language* to use the default browser language settings. Otherwise, select the language that you want to use for the console. -. In the *Applications* section: -.. You can view the default *Resource type*. For example, if the {ServerlessOperatorName} is installed, the default resource type is *Serverless Deployment*. Otherwise, the default resource type is *Deployment*. -.. You can select another resource type to be the default resource type from the *Resource Type* field. diff --git a/modules/odc-splitting-traffic-between-revisions-using-developer-perspective.adoc b/modules/odc-splitting-traffic-between-revisions-using-developer-perspective.adoc deleted file mode 100644 index 54a747f172d1..000000000000 --- a/modules/odc-splitting-traffic-between-revisions-using-developer-perspective.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * serverless/develop/serverless-traffic-management.adoc - -:_content-type: PROCEDURE -[id="odc-splitting-traffic-between-revisions-using-developer-perspective_{context}"] -= Managing traffic between revisions by using the {product-title} web console - -.Prerequisites - -* The {ServerlessOperatorName} and Knative Serving are installed on your cluster. -* You have logged in to the {product-title} web console. - -.Procedure - -To split traffic between multiple revisions of an application in the *Topology* view: - -. Click the Knative service to see its overview in the side panel. -. Click the *Resources* tab, to see a list of *Revisions* and *Routes* for the service. -+ -.Serverless application -image::odc-serverless-app.png[] - -. Click the service, indicated by the *S* icon at the top of the side panel, to see an overview of the service details. -. Click the *YAML* tab and modify the service configuration in the YAML editor, and click *Save*. For example, change the `timeoutseconds` from 300 to 301 . This change in the configuration triggers a new revision. In the *Topology* view, the latest revision is displayed and the *Resources* tab for the service now displays the two revisions. -. In the *Resources* tab, click btn:[Set Traffic Distribution] to see the traffic distribution dialog box: -.. Add the split traffic percentage portion for the two revisions in the *Splits* field. -.. Add tags to create custom URLs for the two revisions. -.. Click *Save* to see two nodes representing the two revisions in the Topology view. -+ -.Serverless application revisions -image::odc-serverless-revisions.png[] diff --git a/modules/odc-starting-recreate-deployment.adoc b/modules/odc-starting-recreate-deployment.adoc deleted file mode 100644 index e43a1b4fa9de..000000000000 --- a/modules/odc-starting-recreate-deployment.adoc +++ /dev/null @@ -1,25 +0,0 @@ -// Module included in the following assemblies: -// -// * applications/deployments/deployment-strategies.adoc - -:_content-type: PROCEDURE -[id="odc-starting-recreate-deployment_{context}"] -= Starting a recreate deployment using the Developer perspective - -You can switch the deployment strategy from the default rolling update to a recreate update using the *Developer* perspective in the web console. - -.Prerequisites -* Ensure that you are in the *Developer* perspective of the web console. -* Ensure that you have created an application using the *Add* view and see it deployed in the *Topology* view. - -.Procedure - -To switch to a recreate update strategy and to upgrade an application: - -. In the *Actions* drop-down menu, select *Edit Deployment Config* to see the deployment configuration details of the application. -. In the YAML editor, change the `spec.strategy.type` to `Recreate` and click *Save*. -. In the *Topology* view, select the node to see the *Overview* tab in the side panel. The *Update Strategy* is now set to *Recreate*. -. Use the *Actions* drop-down menu to select *Start Rollout* to start an update using the recreate strategy. The recreate strategy first terminates pods for the older version of the application and then spins up pods for the new version. -+ -.Recreate update -image::odc-recreate-update.png[] diff --git a/modules/odc-starting-rolling-deployment.adoc b/modules/odc-starting-rolling-deployment.adoc deleted file mode 100644 index 8aab6eee4d26..000000000000 --- a/modules/odc-starting-rolling-deployment.adoc +++ /dev/null @@ -1,22 +0,0 @@ -// Module included in the following assemblies: -// -// * applications/deployments/deployment-strategies.adoc - -:_content-type: PROCEDURE -[id="odc-starting-rolling-deployment_{context}"] -= Starting a rolling deployment using the Developer perspective - -You can upgrade an application by starting a rolling deployment. - -.Prerequisites - -* You are in the *Developer* perspective of the web console. -* You have created an application. - -.Procedure - -. In the *Topology* view of the *Developer* perspective, click on the application node to see the *Overview* tab in the side panel. Note that the *Update Strategy* is set to the default *Rolling* strategy. -. In the *Actions* drop-down menu, select *Start Rollout* to start a rolling update. The rolling deployment spins up the new version of the application and then terminates the old one. -+ -.Rolling update -image::odc-rolling-update.png[] diff --git a/modules/odc-upgrading-helm-release.adoc b/modules/odc-upgrading-helm-release.adoc deleted file mode 100644 index 050569b6b4c0..000000000000 --- a/modules/odc-upgrading-helm-release.adoc +++ /dev/null @@ -1,11 +0,0 @@ -:_content-type: PROCEDURE -[id="odc-upgrading-helm-release_{context}"] -= Upgrading a Helm release - -You can upgrade a Helm release to upgrade to a new chart version or update your release configuration. - -.Procedure - -. In the *Topology* view, select the Helm release to see the side panel. -. Click *Actions* -> *Upgrade Helm Release*. -. In the *Upgrade Helm Release* page, select the *Chart Version* you want to upgrade to, and then click *Upgrade* to create another Helm release. The *Helm Releases* page displays the two revisions. diff --git a/modules/odc-using-quickstarts.adoc b/modules/odc-using-quickstarts.adoc deleted file mode 100644 index 3689b4133438..000000000000 --- a/modules/odc-using-quickstarts.adoc +++ /dev/null @@ -1,19 +0,0 @@ -// Module included in the following assemblies: -// -// * applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc - -:_content-type: PROCEDURE -[id="odc-using-quickstarts_{context}"] -= Creating applications using Quick Starts - -The *Quick Starts* page shows you how to create, import, and run applications on {product-title}, with step-by-step instructions and tasks. - -.Prerequisites - -* You have logged in to the {product-title} web console and are in the *Developer* perspective. - -.Procedure - -. In the *+Add* view, click the *View all quick starts* link to view the *Quick Starts* page. -. In the *Quick Starts* page, click the tile for the quick start that you want to use. -. Click *Start* to begin the quick start. \ No newline at end of file diff --git a/modules/odc-using-the-developer-catalog-to-add-services-or-components.adoc b/modules/odc-using-the-developer-catalog-to-add-services-or-components.adoc deleted file mode 100644 index c89608474865..000000000000 --- a/modules/odc-using-the-developer-catalog-to-add-services-or-components.adoc +++ /dev/null @@ -1,15 +0,0 @@ -:_content-type: PROCEDURE -[id="odc-using-the-developer-catalog-to-add-services-or-components_{context}"] -= Using the Developer Catalog to add services or components to your application - -You use the Developer Catalog to deploy applications and services based on Operator backed services such as Databases, Builder Images, and Helm Charts. The Developer Catalog contains a collection of application components, services, event sources, or source-to-image builders that you can add to your project. Cluster administrators can customize the content made available in the catalog. - -.Procedure - -. In the *Developer* perspective, navigate to the *+Add* view and from the *Developer Catalog* tile, click *All Services* to view all the available services in the *Developer Catalog*. -. Under *All Services*, select the kind of service or the component you need to add to your project. For this example, select *Databases* to list all the database services and then click *MariaDB* to see the details for the service. -+ -. Click *Instantiate Template* to see an automatically populated template with details for the *MariaDB* service, and then click *Create* to create and view the MariaDB service in the *Topology* view. -+ -.MariaDB in Topology -image::odc_devcatalog_toplogy.png[] diff --git a/modules/odc-using-the-devfile-registry.adoc b/modules/odc-using-the-devfile-registry.adoc deleted file mode 100644 index f1a3cce14efd..000000000000 --- a/modules/odc-using-the-devfile-registry.adoc +++ /dev/null @@ -1,15 +0,0 @@ -:_content-type: PROCEDURE -[id="odc-using-the-devfile-registry_{context}"] -= Using the Devfile registry to access devfiles - -You can use the devfiles in the *+Add* flow of the *Developer* perspective to create an application. The *+Add* flow provides a complete integration with the https://registry.devfile.io/viewer[devfile community registry]. A devfile is a portable YAML file that describes your development environment without needing to configure it from scratch. Using the *Devfile registry*, you can use a pre-configured devfile to create an application. - -.Procedure - -. Navigate to *Developer Perspective* -> *+Add* -> *Developer Catalog* -> *All Services*. A list of all the available services in the *Developer Catalog* is displayed. - -. Under *All Services*, select *Devfiles* to browse for devfiles that support a particular language or framework. Alternatively, you can use the keyword filter to search for a particular devfile using their name, tag, or description. - -. Click the devfile you want to use to create an application. The devfile tile displays the details of the devfile, including the name, description, provider, and the documentation of the devfile. - -. Click *Create* to create an application and view the application in the *Topology* view. diff --git a/modules/odc-verifying-the-status-of-your-service-binding-from-the-topology-view.adoc b/modules/odc-verifying-the-status-of-your-service-binding-from-the-topology-view.adoc deleted file mode 100644 index 54ea7ae5c5ed..000000000000 --- a/modules/odc-verifying-the-status-of-your-service-binding-from-the-topology-view.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -// * applications/connecting_applications_to_services/odc-connecting-an-application-to-a-service-using-the-developer-perspective.adoc -:_content-type: PROCEDURE -[id="odc-verifying-the-status-of-your-service-binding-from-the-topology-view_{context}"] -= Verifying the status of your service binding from the Topology view - -The *Developer* perspective helps you verify the status of your service binding through the *Topology* view. - -.Procedure - -. If a service binding was successful, click the binding connector. A side panel appears displaying the *Connected* status under the *Details* tab. -+ -Optionally, you can view the *Connected* status on the following pages from the *Developer* perspective: -+ -** The *ServiceBindings* page. -** The *ServiceBinding details* page. In addition, the page title displays a *Connected* badge. -. If a service binding was unsuccessful, the binding connector shows a red arrowhead and a red cross in the middle of the connection. Click this connector to view the *Error* status in the side panel under the *Details* tab. Optionally, click the *Error* status to view specific information about the underlying problem. -+ -You can also view the *Error* status and a tooltip on the following pages from the *Developer* perspective: -+ -** The *ServiceBindings* page. -** The *ServiceBinding details* page. In addition, the page title displays an *Error* badge. - - -[TIP] -==== -In the *ServiceBindings* page, use the *Filter* dropdown to list the service bindings based on their status. -==== - diff --git a/modules/odc-viewing-application-topology.adoc b/modules/odc-viewing-application-topology.adoc deleted file mode 100644 index a0962e5a5f93..000000000000 --- a/modules/odc-viewing-application-topology.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * applications/odc-viewing-application-composition-using-topology-view.adoc - -:_content-type: CONCEPT -[id="odc-viewing-application-topology_{context}"] - -= Viewing the topology of your application - -You can navigate to the *Topology* view using the left navigation panel in the *Developer* perspective. After you deploy an application, you are directed automatically to the *Graph view* where you can see the status of the application pods, quickly access the application on a public URL, access the source code to modify it, and see the status of your last build. You can zoom in and out to see more details for a particular application. - -The *Topology* view provides you the option to monitor your applications using the *List* view. Use the *List view* icon (image:odc_list_view_icon.png[title="List view icon"]) to see a list of all your applications and use the *Graph view* icon (image:odc_topology_view_icon.png[title="Topology view icon"]) to switch back to the graph view. - -You can customize the views as required using the following: - -* Use the *Find by name* field to find the required components. Search results may appear outside of the visible area; click *Fit to Screen* from the lower-left toolbar to resize the *Topology* view to show all components. -* Use the *Display Options* drop-down list to configure the *Topology* view of the various application groupings. The options are available depending on the types of components deployed in the project: - -** *Mode* (*Connectivity* or *Consumption*) -*** Connectivity: Select to show all the connections between the different nodes in the topology. -*** Consumption: Select to show the resource consumption for all nodes in the topology. -** *Expand* group -*** Virtual Machines: Toggle to show or hide the virtual machines. -*** Application Groupings: Clear to condense the application groups into cards with an overview of an application group and alerts associated with it. -*** Helm Releases: Clear to condense the components deployed as Helm Release into cards with an overview of a given release. -*** Knative Services: Clear to condense the Knative Service components into cards with an overview of a given component. -*** Operator Groupings: Clear to condense the components deployed with an Operator into cards with an overview of the given group. -** *Show* elements based on *Pod Count* or *Labels* -*** Pod Count: Select to show the number of pods of a component in the component icon. -*** Labels: Toggle to show or hide the component labels. - -The *Topology* view also provides you the *Export application* option to download your application in the ZIP file format. You can then import the downloaded application to another project or cluster. For more details, see _Exporting an application to another project or cluster_ in the _Additional resources_ section. diff --git a/modules/odc-visualizing-the-binding-connections-to-resources.adoc b/modules/odc-visualizing-the-binding-connections-to-resources.adoc deleted file mode 100644 index aac8acc11d34..000000000000 --- a/modules/odc-visualizing-the-binding-connections-to-resources.adoc +++ /dev/null @@ -1,44 +0,0 @@ -// Module included in the following assemblies: -// -// * applications/connecting_applications_to_services/odc-connecting-an-application-to-a-service-using-the-developer-perspective.adoc - -:_content-type: PROCEDURE -[id="odc-visualizing-the-binding-connections-to-resources_{context}"] -= Visualizing the binding connections to resources - -As a user, use *Label Selector* in the *Topology* view to visualize a service binding and simplify the process of binding applications to backing services. When creating `ServiceBinding` resources, specify labels by using *Label Selector* to find and connect applications instead of using the name of the application. The Service Binding Operator then consumes these `ServiceBinding` resources and specified labels to find the applications to create a service binding with. - - -[TIP] -==== -To navigate to a list of all connected resources, click the label selector associated with the `ServiceBinding` resource. -==== - -To view the *Label Selector*, consider the following approaches: - -** After you import a `ServiceBinding` resource, view the *Label Selector* associated with the service binding on the *ServiceBinding details* page. - -+ -.ServiceBinding details page -image::odc-label-selector-sb-details.png[] - -[NOTE] -==== -To use *Label Selector* and to create one or more connections at once, you must import the YAML file of the `ServiceBinding` resource. -==== - -** After the connection is established and when you click the binding connector, the service binding connector *Details* side panel appears. You can view the *Label Selector* associated with the service binding on this panel. - -+ -.Topology label selector side panel -image::odc-label-selector-topology-side-panel.png[] - -+ -[NOTE] -==== -When you delete a binding connector (a single connection within *Topology* along with a service binding), the action removes all connections that are tied to the deleted service binding. While deleting a binding connector, a confirmation dialog appears, which informs that all connectors will be deleted. - -.Delete ServiceBinding confirmation dialog -image::odc-delete-service-binding.png[] - -==== \ No newline at end of file diff --git a/modules/odc_con_customizing-a-developer-catalog-or-its-sub-catalogs.adoc b/modules/odc_con_customizing-a-developer-catalog-or-its-sub-catalogs.adoc deleted file mode 100644 index b58c402f6151..000000000000 --- a/modules/odc_con_customizing-a-developer-catalog-or-its-sub-catalogs.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assembly: -// -// * web_console/customizing-the-web-console.adoc - -:_content-type: CONCEPT - -[id="odc_con_customizing-a-developer-catalog-or-its-sub-catalogs_{context}"] -= Developer catalog and sub-catalog customization - -As a cluster administrator, you have the ability to organize and manage the Developer catalog or its sub-catalogs. You can enable or disable the sub-catalog types or disable the entire developer catalog. - -The `developerCatalog.types` object includes the following parameters that you must define in a snippet to use them in the YAML view: - -* `state`: Defines if a list of developer catalog types should be enabled or disabled. -* `enabled`: Defines a list of developer catalog types (sub-catalogs) that are visible to users. -* `disabled`: Defines a list of developer catalog types (sub-catalogs) that are not visible to users. - -You can enable or disable the following developer catalog types (sub-catalogs) using the YAML view or the form view. - -* `Builder Images` -* `Templates` -* `Devfiles` -* `Samples` -* `Helm Charts` -* `Event Sources` -* `Event Sinks` -* `Operator Backed` - - - diff --git a/modules/odc_con_example-yaml-file-changes.adoc b/modules/odc_con_example-yaml-file-changes.adoc deleted file mode 100644 index dbb3d5323838..000000000000 --- a/modules/odc_con_example-yaml-file-changes.adoc +++ /dev/null @@ -1,63 +0,0 @@ -// Module included in the following assembly: -// -// * web_console/customizing-the-web-console.adoc - -:_content-type: CONCEPT - -[id="con_example-yaml-file-changes_{context}"] -= Example YAML file changes - -You can dynamically add the following snippets in the YAML editor for customizing a developer catalog. - -Use the following snippet to display all the sub-catalogs by setting the _state_ type to *Enabled*. -[source,yaml] ----- -apiVersion: operator.openshift.io/v1 -kind: Console -metadata: - name: cluster -... -spec: - customization: - developerCatalog: - categories: - types: - state: Enabled ----- - -Use the following snippet to disable all sub-catalogs by setting the _state_ type to *Disabled*: -[source,yaml] ----- -apiVersion: operator.openshift.io/v1 -kind: Console -metadata: - name: cluster -... -spec: - customization: - developerCatalog: - categories: - types: - state: Disabled ----- - -Use the following snippet when a cluster administrator defines a list of sub-catalogs, which are enabled in the Web Console. -[source,yaml] ----- -apiVersion: operator.openshift.io/v1 -kind: Console -metadata: - name: cluster -... -spec: - customization: - developerCatalog: - categories: - types: - state: Enabled - enabled: - - BuilderImage - - Devfile - - HelmChart - - ... ----- \ No newline at end of file diff --git a/modules/odc_customizing-a-developer-catalog-or-its-sub-catalogs-using-the-form-view.adoc b/modules/odc_customizing-a-developer-catalog-or-its-sub-catalogs-using-the-form-view.adoc deleted file mode 100644 index d90c2e04a87c..000000000000 --- a/modules/odc_customizing-a-developer-catalog-or-its-sub-catalogs-using-the-form-view.adoc +++ /dev/null @@ -1,37 +0,0 @@ -// Module included in the following assembly: -// -// * web_console/customizing-the-web-console.adoc - -:_content-type: PROCEDURE -[id="odc_customizing-a-developer-catalog-or-its-sub-catalogs-using-the-form-view_{context}"] -= Customizing a developer catalog or its sub-catalogs using the form view - -You can customize a developer catalog by using the form view in the Web Console. - -.Prerequisites - -* An OpenShift web console session with cluster administrator privileges. -* The Developer perspective is enabled. - -.Procedure - -. In the *Administrator* perspective, navigate to *Administration* -> *Cluster Settings*. -. Select the *Configuration* tab and click the *Console (operator.openshift.io)* resource. -. Click *Actions* -> *Customize*. -. Enable or disable items in the *Pre-pinned navigation items*, *Add page*, and *Developer Catalog* sections. -+ -.Verification -After you have customized the developer catalog, your changes are automatically saved in the system and take effect in the browser after a refresh. -+ -image::odc_customizing_developer_catalog.png[] - -[NOTE] -==== -As an administrator, you can define the navigation items that appear by default for all users. You can also reorder the navigation items. -==== - -[TIP] -==== -You can use a similar procedure to customize Web UI items such as Quick starts, Cluster roles, and Actions. -==== - diff --git a/modules/odc_customizing-a-developer-catalog-or-its-sub-catalogs-using-the-yaml-view.adoc b/modules/odc_customizing-a-developer-catalog-or-its-sub-catalogs-using-the-yaml-view.adoc deleted file mode 100644 index a7f20417246c..000000000000 --- a/modules/odc_customizing-a-developer-catalog-or-its-sub-catalogs-using-the-yaml-view.adoc +++ /dev/null @@ -1,49 +0,0 @@ -// Module included in the following assembly: -// -// * web_console/customizing-the-web-console.adoc - -:_content-type: PROCEDURE - -[id="odc_customizing-a-developer-catalog-or-its-sub-catalogs-using-the-yaml-view_{context}"] -= Customizing a developer catalog or its sub-catalogs using the YAML view - -You can customize a developer catalog by editing the YAML content in the YAML view. - -.Prerequisites - -* An OpenShift web console session with cluster administrator privileges. - -.Procedure - -. In the *Administrator* perspective of the web console, navigate to *Administration* -> *Cluster Settings*. -. Select the *Configuration* tab, click the *Console (operator.openshift.io)* resource and view the *Details* page. -. Click the *YAML* tab to open the editor and edit the YAML content as needed. -+ -For example, to disable a developer catalog type, insert the following snippet that defines a list of disabled developer catalog resources: -+ -[source,yaml] ----- -apiVersion: operator.openshift.io/v1 -kind: Console -metadata: - name: cluster -... -spec: - customization: - developerCatalog: - categories: - types: - state: Disabled - disabled: - - BuilderImage - - Devfile - - HelmChart -... ----- - -. Click *Save*. - -[NOTE] -==== -By default, the developer catalog types are enabled in the Administrator view of the Web Console. -==== \ No newline at end of file diff --git a/modules/odo-core-concepts.adoc b/modules/odo-core-concepts.adoc deleted file mode 100644 index 7881342a7c2b..000000000000 --- a/modules/odo-core-concepts.adoc +++ /dev/null @@ -1,44 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/developer_cli_odo/understanding-odo.adoc - -:_content-type: CONCEPT -[id="odo-core-concepts_{context}"] - -= odo core concepts - -`odo` abstracts Kubernetes concepts into terminology that is familiar to developers: - -Application:: -A typical application, developed with a link:https://www.redhat.com/en/topics/cloud-native-apps[cloud-native approach], that is used to perform a particular task. -+ -Examples of _applications_ include online video streaming, online shopping, and hotel reservation systems. - -Component:: -A set of Kubernetes resources that can run and be deployed separately. A cloud-native application is a collection of small, independent, loosely coupled _components_. -+ -Examples of _components_ include an API back-end, a web interface, and a payment back-end. - -Project:: -A single unit containing your source code, tests, and libraries. - -Context:: -A directory that contains the source code, tests, libraries, and `odo` config files for a single component. - -URL:: -A mechanism to expose a component for access from outside the cluster. - -Storage:: -Persistent storage in the cluster. It persists the data across restarts and component rebuilds. - -Service:: -An external application that provides additional functionality to a component. -+ -Examples of _services_ include PostgreSQL, MySQL, Redis, and RabbitMQ. -+ -In `{odo-title}`, services are provisioned from the OpenShift Service Catalog and must be enabled within your cluster. - -devfile:: -An open standard for defining containerized development environments that enables developer tools to simplify and accelerate workflows. For more information, see the documentation at link:https://devfile.io[]. -+ -You can connect to publicly available _devfile_ registries, or you can install a Secure Registry. \ No newline at end of file diff --git a/modules/odo-key-features.adoc b/modules/odo-key-features.adoc deleted file mode 100644 index 37deac5bdfcb..000000000000 --- a/modules/odo-key-features.adoc +++ /dev/null @@ -1,18 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/developer_cli_odo/understanding-odo.adoc - -:_content-type: CONCEPT -[id="odo-features_{context}"] -= odo key features - -`{odo-title}` is designed to be a developer-friendly interface to Kubernetes, with the ability to: - -* Quickly deploy applications on a Kubernetes cluster by creating a new manifest or using an existing one -* Use commands to easily create and update the manifest, without the need to understand and maintain Kubernetes configuration files -* Provide secure access to applications running on a Kubernetes cluster -* Add and remove additional storage for applications on a Kubernetes cluster -* Create Operator-backed services and link your application to them -* Create a link between multiple microservices that are deployed as `odo` components -* Remotely debug applications you deployed using `odo` in your IDE -* Easily test applications deployed on Kubernetes using `odo` diff --git a/modules/odo-listing-components.adoc b/modules/odo-listing-components.adoc deleted file mode 100644 index 658cc3329c20..000000000000 --- a/modules/odo-listing-components.adoc +++ /dev/null @@ -1,60 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/developer_cli_odo/understanding-odo.adoc - -:_content-type: PROCEDURE -[id="odo-listing-components_{context}"] - -= Listing components in odo - -`odo` uses the portable _devfile_ format to describe components and their related URLs, storage, and services. `odo` can connect to various devfile registries to download devfiles for different languages and frameworks. See the documentation for the `odo registry` command for more information on how to manage the registries used by `odo` to retrieve devfile information. - - -You can list all the _devfiles_ available of the different registries with the `odo catalog list components` command. - -.Procedure - -. Log in to the cluster with `{odo-title}`: -+ -[source,terminal] ----- -$ odo login -u developer -p developer ----- - -. List the available `{odo-title}` components: -+ -[source,terminal] ----- -$ odo catalog list components ----- -+ -.Example output -[source,terminal] ----- -Odo Devfile Components: -NAME DESCRIPTION REGISTRY -dotnet50 Stack with .NET 5.0 DefaultDevfileRegistry -dotnet60 Stack with .NET 6.0 DefaultDevfileRegistry -dotnetcore31 Stack with .NET Core 3.1 DefaultDevfileRegistry -go Stack with the latest Go version DefaultDevfileRegistry -java-maven Upstream Maven and OpenJDK 11 DefaultDevfileRegistry -java-openliberty Java application Maven-built stack using the Open Liberty ru... DefaultDevfileRegistry -java-openliberty-gradle Java application Gradle-built stack using the Open Liberty r... DefaultDevfileRegistry -java-quarkus Quarkus with Java DefaultDevfileRegistry -java-springboot Spring Boot® using Java DefaultDevfileRegistry -java-vertx Upstream Vert.x using Java DefaultDevfileRegistry -java-websphereliberty Java application Maven-built stack using the WebSphere Liber... DefaultDevfileRegistry -java-websphereliberty-gradle Java application Gradle-built stack using the WebSphere Libe... DefaultDevfileRegistry -java-wildfly Upstream WildFly DefaultDevfileRegistry -java-wildfly-bootable-jar Java stack with WildFly in bootable Jar mode, OpenJDK 11 and... DefaultDevfileRegistry -nodejs Stack with Node.js 14 DefaultDevfileRegistry -nodejs-angular Stack with Angular 12 DefaultDevfileRegistry -nodejs-nextjs Stack with Next.js 11 DefaultDevfileRegistry -nodejs-nuxtjs Stack with Nuxt.js 2 DefaultDevfileRegistry -nodejs-react Stack with React 17 DefaultDevfileRegistry -nodejs-svelte Stack with Svelte 3 DefaultDevfileRegistry -nodejs-vue Stack with Vue 3 DefaultDevfileRegistry -php-laravel Stack with Laravel 8 DefaultDevfileRegistry -python Python Stack with Python 3.7 DefaultDevfileRegistry -python-django Python3.7 with Django DefaultDevfileRegistry ----- diff --git a/modules/odo-telemetry.adoc b/modules/odo-telemetry.adoc deleted file mode 100644 index 31a98dcc9404..000000000000 --- a/modules/odo-telemetry.adoc +++ /dev/null @@ -1,16 +0,0 @@ -// Module included in the following assemblies: -// -// * cli_reference/developer_cli_odo/understanding-odo.adoc - -:_content-type: CONCEPT -[id="odo-telemetry_{context}"] - -= Telemetry in odo - -`odo` collects information about how it is being used, including metrics on the operating system, RAM, CPU, number of cores, `odo` version, errors, success/failures, and how long `odo` commands take to complete. - -You can modify your telemetry consent by using the `odo preference` command: - -* `odo preference set ConsentTelemetry true` consents to telemetry. -* `odo preference unset ConsentTelemetry` disables telemetry. -* `odo preference view` shows the current preferences. \ No newline at end of file diff --git a/modules/olm-about-catalogs.adoc b/modules/olm-about-catalogs.adoc deleted file mode 100644 index 189387c32234..000000000000 --- a/modules/olm-about-catalogs.adoc +++ /dev/null @@ -1,30 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/understanding/olm-rh-catalogs.adoc - -:_content-type: CONCEPT -[id="olm-about-catalogs_{context}"] -= About Operator catalogs - -An Operator catalog is a repository of metadata that Operator Lifecycle Manager (OLM) can query to discover and install Operators and their dependencies on a cluster. OLM always installs Operators from the latest version of a catalog. - -An index image, based on the Operator bundle format, is a containerized snapshot of a catalog. It is an immutable artifact that contains the database of pointers to a set of Operator manifest content. A catalog can reference an index image to source its content for OLM on the cluster. - -As catalogs are updated, the latest versions of Operators change, and older versions may be removed or altered. In addition, when OLM runs on an {product-title} cluster in a restricted network environment, it is unable to access the catalogs directly from the internet to pull the latest content. - -As a cluster administrator, you can create your own custom index image, either based on a Red Hat-provided catalog or from scratch, which can be used to source the catalog content on the cluster. Creating and updating your own index image provides a method for customizing the set of Operators available on the cluster, while also avoiding the aforementioned restricted network environment issues. - -[IMPORTANT] -==== -Kubernetes periodically deprecates certain APIs that are removed in subsequent releases. As a result, Operators are unable to use removed APIs starting with the version of {product-title} that uses the Kubernetes version that removed the API. - -If your cluster is using custom catalogs, see xref:../../operators/operator_sdk/osdk-working-bundle-images#osdk-control-compat_osdk-working-bundle-images[Controlling Operator compatibility with {product-title} versions] for more details about how Operator authors can update their projects to help avoid workload issues and prevent incompatible upgrades. -==== - -[NOTE] -==== -Support for the legacy _package manifest format_ for Operators, including custom catalogs that were using the legacy format, is removed in {product-title} 4.8 and later. - -When creating custom catalog images, previous versions of {product-title} 4 required using the `oc adm catalog build` command, which was deprecated for several releases and is now removed. With the availability of Red Hat-provided index images starting in {product-title} 4.6, catalog builders must use the `opm index` command to manage index images. -==== -//Check on pulling this note during the 4.10 to 4.11 version scrub. diff --git a/modules/olm-about-opm.adoc b/modules/olm-about-opm.adoc deleted file mode 100644 index 994f5d8b2533..000000000000 --- a/modules/olm-about-opm.adoc +++ /dev/null @@ -1,12 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/understanding/olm-packaging-format.adoc -// * cli_reference/opm/cli-opm-install.adoc - -:_content-type: CONCEPT -[id="olm-about-opm_{context}"] -= About the opm CLI - -The `opm` CLI tool is provided by the Operator Framework for use with the Operator bundle format. This tool allows you to create and maintain catalogs of Operators from a list of Operator bundles that are similar to software repositories. The result is a container image which can be stored in a container registry and then installed on a cluster. - -A catalog contains a database of pointers to Operator manifest content that can be queried through an included API that is served when the container image is run. On {product-title}, Operator Lifecycle Manager (OLM) can reference the image in a catalog source, defined by a `CatalogSource` object, which polls the image at regular intervals to enable frequent updates to installed Operators on the cluster. diff --git a/modules/olm-accessing-images-private-registries.adoc b/modules/olm-accessing-images-private-registries.adoc deleted file mode 100644 index dea921865fcd..000000000000 --- a/modules/olm-accessing-images-private-registries.adoc +++ /dev/null @@ -1,216 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/admin/managing-custom-catalogs.adoc - -:_content-type: PROCEDURE -[id="olm-accessing-images-private-registries_{context}"] -= Accessing images for Operators from private registries - -If certain images relevant to Operators managed by Operator Lifecycle Manager (OLM) are hosted in an authenticated container image registry, also known as a private registry, OLM and OperatorHub are unable to pull the images by default. To enable access, you can create a pull secret that contains the authentication credentials for the registry. By referencing one or more pull secrets in a catalog source, OLM can handle placing the secrets in the Operator and catalog namespace to allow installation. - -Other images required by an Operator or its Operands might require access to private registries as well. OLM does not handle placing the secrets in target tenant namespaces for this scenario, but authentication credentials can be added to the global cluster pull secret or individual namespace service accounts to enable the required access. - -The following types of images should be considered when determining whether Operators managed by OLM have appropriate pull access: - -Index images:: A `CatalogSource` object can reference an index image, which use the Operator bundle format and are catalog sources packaged as container images hosted in images registries. If an index image is hosted in a private registry, a secret can be used to enable pull access. - -Bundle images:: Operator bundle images are metadata and manifests packaged as container images that represent a unique version of an Operator. If any bundle images referenced in a catalog source are hosted in one or more private registries, a secret can be used to enable pull access. - -Operator and Operand images:: If an Operator installed from a catalog source uses a private image, either for the Operator image itself or one of the Operand images it watches, the Operator will fail to install because the deployment will not have access to the required registry authentication. Referencing secrets in a catalog source does not enable OLM to place the secrets in target tenant namespaces in which Operands are installed. -+ -Instead, the authentication details can be added to the global cluster pull secret in the `openshift-config` namespace, which provides access to all namespaces on the cluster. Alternatively, if providing access to the entire cluster is not permissible, the pull secret can be added to the `default` service accounts of the target tenant namespaces. - -.Prerequisites - -* At least one of the following hosted in a private registry: -** An index image or catalog image. -** An Operator bundle image. -** An Operator or Operand image. - -.Procedure - -. Create a secret for each required private registry. - -.. Log in to the private registry to create or update your registry credentials file: -+ -[source,terminal] ----- -$ podman login : ----- -+ -[NOTE] -==== -The file path of your registry credentials can be different depending on the container tool used to log in to the registry. For the `podman` CLI, the default location is `${XDG_RUNTIME_DIR}/containers/auth.json`. For the `docker` CLI, the default location is `/root/.docker/config.json`. -==== - -.. It is recommended to include credentials for only one registry per secret, and manage credentials for multiple registries in separate secrets. Multiple secrets can be included in a `CatalogSource` object in later steps, and {product-title} will merge the secrets into a single virtual credentials file for use during an image pull. -+ -A registry credentials file can, by default, store details for more than one registry or for multiple repositories in one registry. Verify the current contents of your file. For example: -+ -.File storing credentials for multiple registries -[source,json] ----- -{ - "auths": { - "registry.redhat.io": { - "auth": "FrNHNydQXdzclNqdg==" - }, - "quay.io": { - "auth": "fegdsRib21iMQ==" - }, - "https://quay.io/my-namespace/my-user/my-image": { - "auth": "eWfjwsDdfsa221==" - }, - "https://quay.io/my-namespace/my-user": { - "auth": "feFweDdscw34rR==" - }, - "https://quay.io/my-namespace": { - "auth": "frwEews4fescyq==" - } - } -} ----- -+ -Because this file is used to create secrets in later steps, ensure that you are storing details for only one registry per file. This can be accomplished by using either of the following methods: -+ --- -* Use the `podman logout ` command to remove credentials for additional registries until only the one registry you want remains. -* Edit your registry credentials file and separate the registry details to be stored in multiple files. For example: -+ -.File storing credentials for one registry -[source,json] ----- -{ - "auths": { - "registry.redhat.io": { - "auth": "FrNHNydQXdzclNqdg==" - } - } -} ----- -+ -.File storing credentials for another registry -[source,json] ----- -{ - "auths": { - "quay.io": { - "auth": "Xd2lhdsbnRib21iMQ==" - } - } -} ----- --- - -.. Create a secret in the `openshift-marketplace` namespace that contains the authentication credentials for a private registry: -+ -[source,terminal] ----- -$ oc create secret generic \ - -n openshift-marketplace \ - --from-file=.dockerconfigjson= \ - --type=kubernetes.io/dockerconfigjson ----- -+ -Repeat this step to create additional secrets for any other required private registries, updating the `--from-file` flag to specify another registry credentials file path. - -. Create or update an existing `CatalogSource` object to reference one or more secrets: -+ -[source,yaml] ----- -apiVersion: operators.coreos.com/v1alpha1 -kind: CatalogSource -metadata: - name: my-operator-catalog - namespace: openshift-marketplace -spec: - sourceType: grpc - secrets: <1> - - "" - - "" - grpcPodConfig: - securityContextConfig: <2> - image: ://: - displayName: My Operator Catalog - publisher: - updateStrategy: - registryPoll: - interval: 30m ----- -<1> Add a `spec.secrets` section and specify any required secrets. -<2> Specify the value of `legacy` or `restricted`. If the field is not set, the default value is `legacy`. In a future {product-title} release, it is planned that the default value will be `restricted`. If your catalog cannot run with `restricted` permissions, it is recommended that you manually set this field to `legacy`. - -. If any Operator or Operand images that are referenced by a subscribed Operator require access to a private registry, you can either provide access to all namespaces in the cluster, or individual target tenant namespaces. - -* To provide access to all namespaces in the cluster, add authentication details to the global cluster pull secret in the `openshift-config` namespace. -+ -[WARNING] -==== -Cluster resources must adjust to the new global pull secret, which can temporarily limit the usability of the cluster. -==== - -.. Extract the `.dockerconfigjson` file from the global pull secret: -+ -[source,terminal] ----- -$ oc extract secret/pull-secret -n openshift-config --confirm ----- - -.. Update the `.dockerconfigjson` file with your authentication credentials for the required private registry or registries and save it as a new file: -+ -[source,terminal] ----- -$ cat .dockerconfigjson | \ - jq --compact-output '.auths["://"] |= . + {"auth":""}' \//<1> - > new_dockerconfigjson ----- -<1> Replace `:/` with the private registry details and `` with your authentication credentials. - -.. Update the global pull secret with the new file: -+ -[source,terminal] ----- -$ oc set data secret/pull-secret -n openshift-config \ - --from-file=.dockerconfigjson=new_dockerconfigjson ----- - -* To update an individual namespace, add a pull secret to the service account for the Operator that requires access in the target tenant namespace. - -.. Recreate the secret that you created for the `openshift-marketplace` in the tenant namespace: -+ -[source,terminal] ----- -$ oc create secret generic \ - -n \ - --from-file=.dockerconfigjson= \ - --type=kubernetes.io/dockerconfigjson ----- - -.. Verify the name of the service account for the Operator by searching the tenant namespace: -+ -[source,terminal] ----- -$ oc get sa -n <1> ----- -<1> If the Operator was installed in an individual namespace, search that namespace. If the Operator was installed for all namespaces, search the `openshift-operators` namespace. -+ -.Example output -[source,terminal] ----- -NAME SECRETS AGE -builder 2 6m1s -default 2 6m1s -deployer 2 6m1s -etcd-operator 2 5m18s <1> ----- -<1> Service account for an installed etcd Operator. - -.. Link the secret to the service account for the Operator: -+ -[source,terminal] ----- -$ oc secrets link \ - -n \ - \ - --for=pull ----- diff --git a/modules/olm-adding-new-crd-version.adoc b/modules/olm-adding-new-crd-version.adoc deleted file mode 100644 index 1ec946661d9e..000000000000 --- a/modules/olm-adding-new-crd-version.adoc +++ /dev/null @@ -1,42 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-generating-csvs.adoc - -:_content-type: PROCEDURE -[id="olm-dependency-resolution-adding-new-crd-version_{context}"] -= Adding a new CRD version - -.Procedure - -To add a new version of a CRD to your Operator: - -. Add a new entry in the CRD resource under the `versions` section of your CSV. -+ -For example, if the current CRD has a version `v1alpha1` and you want to add a new version `v1beta1` and mark it as the new storage version, add a new entry for `v1beta1`: -+ -[source,yaml] ----- -versions: - - name: v1alpha1 - served: true - storage: false - - name: v1beta1 <1> - served: true - storage: true ----- -<1> New entry. - -. Ensure the referencing version of the CRD in the `owned` section of your CSV is updated if the CSV intends to use the new version: -+ -[source,yaml] ----- -customresourcedefinitions: - owned: - - name: cluster.example.com - version: v1beta1 <1> - kind: cluster - displayName: Cluster ----- -<1> Update the `version`. - -. Push the updated CRD and CSV to your bundle. diff --git a/modules/olm-approving-pending-upgrade.adoc b/modules/olm-approving-pending-upgrade.adoc deleted file mode 100644 index ca7c420ffcd1..000000000000 --- a/modules/olm-approving-pending-upgrade.adoc +++ /dev/null @@ -1,28 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/admin/olm-upgrading-operators.adoc -// * virt/updating/upgrading-virt.adoc - -:_content-type: PROCEDURE -[id="olm-approving-pending-upgrade_{context}"] -= Manually approving a pending Operator update - -If an installed Operator has the approval strategy in its subscription set to *Manual*, when new updates are released in its current update channel, the update must be manually approved before installation can begin. - -.Prerequisites - -* An Operator previously installed using Operator Lifecycle Manager (OLM). - -.Procedure - -. In the *Administrator* perspective of the {product-title} web console, navigate to *Operators -> Installed Operators*. - -. Operators that have a pending update display a status with *Upgrade available*. Click the name of the Operator you want to update. - -. Click the *Subscription* tab. Any update requiring approval are displayed next to *Upgrade Status*. For example, it might display *1 requires approval*. - -. Click *1 requires approval*, then click *Preview Install Plan*. - -. Review the resources that are listed as available for update. When satisfied, click *Approve*. - -. Navigate back to the *Operators -> Installed Operators* page to monitor the progress of the update. When complete, the status changes to *Succeeded* and *Up to date*. diff --git a/modules/olm-arch-catalog-operator.adoc b/modules/olm-arch-catalog-operator.adoc deleted file mode 100644 index aa47be10dafd..000000000000 --- a/modules/olm-arch-catalog-operator.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/understanding/olm/olm-arch.adoc -// * operators/operator-reference.adoc - -[id="olm-arch-catalog-operator_{context}"] -= Catalog Operator - -The Catalog Operator is responsible for resolving and installing cluster service versions (CSVs) and the required resources they specify. It is also responsible for watching catalog sources for updates to packages in channels and upgrading them, automatically if desired, to the latest available versions. - -To track a package in a channel, you can create a `Subscription` object configuring the desired package, channel, and the `CatalogSource` object you want to use for pulling updates. When updates are found, an appropriate `InstallPlan` object is written into the namespace on behalf of the user. - -The Catalog Operator uses the following workflow: - -. Connect to each catalog source in the cluster. -. Watch for unresolved install plans created by a user, and if found: -.. Find the CSV matching the name requested and add the CSV as a resolved resource. -.. For each managed or required CRD, add the CRD as a resolved resource. -.. For each required CRD, find the CSV that manages it. -. Watch for resolved install plans and create all of the discovered resources for it, if approved by a user or automatically. -. Watch for catalog sources and subscriptions and create install plans based on them. diff --git a/modules/olm-arch-catalog-registry.adoc b/modules/olm-arch-catalog-registry.adoc deleted file mode 100644 index 1b3d72f338d7..000000000000 --- a/modules/olm-arch-catalog-registry.adoc +++ /dev/null @@ -1,11 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/understanding/olm/olm-arch.adoc -// * operators/operator-reference.adoc - -[id="olm-arch-catalog-registry_{context}"] -= Catalog Registry - -The Catalog Registry stores CSVs and CRDs for creation in a cluster and stores metadata about packages and channels. - -A _package manifest_ is an entry in the Catalog Registry that associates a package identity with sets of CSVs. Within a package, channels point to a particular CSV. Because CSVs explicitly reference the CSV that they replace, a package manifest provides the Catalog Operator with all of the information that is required to update a CSV to the latest version in a channel, stepping through each intermediate version. diff --git a/modules/olm-arch-olm-operator.adoc b/modules/olm-arch-olm-operator.adoc deleted file mode 100644 index a0c1520ac49c..000000000000 --- a/modules/olm-arch-olm-operator.adoc +++ /dev/null @@ -1,21 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/understanding/olm/olm-arch.adoc -// * operators/operator-reference.adoc - -[id="olm-arch-olm-operator_{context}"] -= OLM Operator - -The OLM Operator is responsible for deploying applications defined by CSV resources after the required resources specified in the CSV are present in the cluster. - -The OLM Operator is not concerned with the creation of the required resources; you can choose to manually create these resources using the CLI or using the Catalog Operator. This separation of concern allows users incremental buy-in in terms of how much of the OLM framework they choose to leverage for their application. - -The OLM Operator uses the following workflow: - -. Watch for cluster service versions (CSVs) in a namespace and check that requirements are met. -. If requirements are met, run the install strategy for the CSV. -+ -[NOTE] -==== -A CSV must be an active member of an Operator group for the install strategy to run. -==== diff --git a/modules/olm-arch-os-support.adoc b/modules/olm-arch-os-support.adoc deleted file mode 100644 index dc138b536ce3..000000000000 --- a/modules/olm-arch-os-support.adoc +++ /dev/null @@ -1,41 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/operator_sdk/osdk-generating-csvs.adoc - -[id="olm-arch-os-support_{context}"] -= Architecture and operating system support for Operators - -The following strings are supported in Operator Lifecycle Manager (OLM) on {product-title} when labeling or filtering Operators that support multiple architectures and operating systems: - -.Architectures supported on {product-title} -[options="header"] -|=== -|Architecture |String - -|AMD64 -|`amd64` - -|{ibmpowerProductName} -|`ppc64le` - -|{ibmzProductName} -|`s390x` -|=== - -.Operating systems supported on {product-title} -[options="header"] -|=== -|Operating system |String - -|Linux -|`linux` - -|z/OS -|`zos` -|=== - -[NOTE] -==== -Different versions of {product-title} and other Kubernetes-based distributions -might support a different set of architectures and operating systems. -==== diff --git a/modules/olm-architecture.adoc b/modules/olm-architecture.adoc deleted file mode 100644 index 1c5e75104ebe..000000000000 --- a/modules/olm-architecture.adoc +++ /dev/null @@ -1,66 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/understanding/olm/olm-understanding-olm.adoc -// * operators/operator-reference.adoc - -[id="olm-architecture_{context}"] -ifeval::["{context}" != "cluster-operators-ref"] -= Component responsibilities -endif::[] -ifeval::["{context}" == "cluster-operators-ref"] -= CRDs -endif::[] - -Operator Lifecycle Manager (OLM) is composed of two Operators: the OLM Operator and the Catalog Operator. - -Each of these Operators is responsible for managing the custom resource definitions (CRDs) that are the basis for the OLM framework: - -.CRDs managed by OLM and Catalog Operators -[cols="2a,1a,1a,8a",options="header"] -|=== -|Resource |Short name |Owner |Description - -|`ClusterServiceVersion` (CSV) -|`csv` -|OLM -|Application metadata: name, version, icon, required resources, installation, and so on. - -|`InstallPlan` -|`ip` -|Catalog -|Calculated list of resources to be created to automatically install or upgrade a CSV. - -|`CatalogSource` -|`catsrc` -|Catalog -|A repository of CSVs, CRDs, and packages that define an application. - -|`Subscription` -|`sub` -|Catalog -|Keeps CSVs up to date by tracking a channel in a package. - -|`OperatorGroup` -|`og` -|OLM -|Configures all Operators deployed in the same namespace as the `OperatorGroup` object to watch for their custom resource (CR) in a list of namespaces or cluster-wide. -|=== - -Each of these Operators is also responsible for creating the following resources: - -.Resources created by OLM and Catalog Operators -[options="header"] -|=== -|Resource |Owner - -|`Deployments` -.4+.^|OLM - -|`ServiceAccounts` -|`(Cluster)Roles` -|`(Cluster)RoleBindings` - -|`CustomResourceDefinitions` (CRDs) -.2+.^|Catalog -|`ClusterServiceVersions` -|=== diff --git a/modules/olm-bundle-format.adoc b/modules/olm-bundle-format.adoc deleted file mode 100644 index 2b5ed934c0c9..000000000000 --- a/modules/olm-bundle-format.adoc +++ /dev/null @@ -1,106 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/understanding/olm-packaging-format.adoc - -[id="olm-bundle-format_{context}"] -= Bundle format - -The _bundle format_ for Operators is a packaging format introduced by the Operator Framework. To improve scalability and to better enable upstream users hosting their own catalogs, the bundle format specification simplifies the distribution of Operator metadata. - -An Operator bundle represents a single version of an Operator. On-disk _bundle manifests_ are containerized and shipped as a _bundle image_, which is a non-runnable container image that stores the Kubernetes manifests and Operator metadata. Storage and distribution of the bundle image is then managed using existing container tools like `podman` and `docker` and container registries such as Quay. - -Operator metadata can include: - -* Information that identifies the Operator, for example its name and version. -* Additional information that drives the UI, for example its icon and some example custom resources (CRs). -* Required and provided APIs. -* Related images. - -When loading manifests into the Operator Registry database, the following requirements are validated: - -* The bundle must have at least one channel defined in the annotations. -* Every bundle has exactly one cluster service version (CSV). -* If a CSV owns a custom resource definition (CRD), that CRD must exist in the bundle. - -[id="olm-bundle-format-manifests_{context}"] -== Manifests - -Bundle manifests refer to a set of Kubernetes manifests that define the deployment and RBAC model of the Operator. - -A bundle includes one CSV per directory and typically the CRDs that define the owned APIs of the CSV in its `/manifests` directory. - -.Example bundle format layout -[source,terminal] ----- -etcd -├── manifests -│ ├── etcdcluster.crd.yaml -│ └── etcdoperator.clusterserviceversion.yaml -│ └── secret.yaml -│ └── configmap.yaml -└── metadata - └── annotations.yaml - └── dependencies.yaml ----- - -[discrete] -[id="olm-bundle-format-manifests-optional_{context}"] -=== Additionally supported objects - -The following object types can also be optionally included in the `/manifests` directory of a bundle: - -.Supported optional object types -[.small] -* `ClusterRole` -* `ClusterRoleBinding` -* `ConfigMap` -* `ConsoleCLIDownload` -* `ConsoleLink` -* `ConsoleQuickStart` -* `ConsoleYamlSample` -* `PodDisruptionBudget` -* `PriorityClass` -* `PrometheusRule` -* `Role` -* `RoleBinding` -* `Secret` -* `Service` -* `ServiceAccount` -* `ServiceMonitor` -* `VerticalPodAutoscaler` - -When these optional objects are included in a bundle, Operator Lifecycle Manager (OLM) can create them from the bundle and manage their lifecycle along with the CSV: - -.Lifecycle for optional objects -* When the CSV is deleted, OLM deletes the optional object. -* When the CSV is upgraded: -** If the name of the optional object is the same, OLM updates it in place. -** If the name of the optional object has changed between versions, OLM deletes and recreates it. - -[id="olm-bundle-format-annotations_{context}"] -== Annotations - -A bundle also includes an `annotations.yaml` file in its `/metadata` directory. This file defines higher level aggregate data that helps describe the format and package information about how the bundle should be added into an index of bundles: - -.Example `annotations.yaml` -[source,yaml] ----- -annotations: - operators.operatorframework.io.bundle.mediatype.v1: "registry+v1" <1> - operators.operatorframework.io.bundle.manifests.v1: "manifests/" <2> - operators.operatorframework.io.bundle.metadata.v1: "metadata/" <3> - operators.operatorframework.io.bundle.package.v1: "test-operator" <4> - operators.operatorframework.io.bundle.channels.v1: "beta,stable" <5> - operators.operatorframework.io.bundle.channel.default.v1: "stable" <6> ----- -<1> The media type or format of the Operator bundle. The `registry+v1` format means it contains a CSV and its associated Kubernetes objects. -<2> The path in the image to the directory that contains the Operator manifests. This label is reserved for future use and currently defaults to `manifests/`. The value `manifests.v1` implies that the bundle contains Operator manifests. -<3> The path in the image to the directory that contains metadata files about the bundle. This label is reserved for future use and currently defaults to `metadata/`. The value `metadata.v1` implies that this bundle has Operator metadata. -<4> The package name of the bundle. -<5> The list of channels the bundle is subscribing to when added into an Operator Registry. -<6> The default channel an Operator should be subscribed to when installed from a registry. - -[NOTE] -==== -In case of a mismatch, the `annotations.yaml` file is authoritative because the on-cluster Operator Registry that relies on these annotations only has access to this file. -==== diff --git a/modules/olm-catalog-source-and-psa.adoc b/modules/olm-catalog-source-and-psa.adoc deleted file mode 100644 index 27e505179598..000000000000 --- a/modules/olm-catalog-source-and-psa.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/admin/olm-managing-custom-catalogs.adoc - -:_content-type: CONCEPT -[id="olm-catalog-sources-and-psa_{context}"] -= Catalog sources and pod security admission - -_Pod security admission_ was introduced in {product-title} 4.11 to ensure pod security standards. Catalog sources built using the SQLite-based catalog format and a version of the `opm` CLI tool released before {product-title} 4.11 cannot run under restricted pod security enforcement. - -In {product-title} {product-version}, namespaces do not have restricted pod security enforcement by default and the default catalog source security mode is set to `legacy`. - -Default restricted enforcement for all namespaces is planned for inclusion in a future {product-title} release. When restricted enforcement occurs, the security context of the pod specification for catalog source pods must match the restricted pod security standard. If your catalog source image requires a different pod security standard, the pod security admissions label for the namespace must be explicitly set. - -[NOTE] -==== -If you do not want to run your SQLite-based catalog source pods as restricted, you do not need to update your catalog source in {product-title} {product-version}. - -However, it is recommended that you take action now to ensure your catalog sources run under restricted pod security enforcement. If you do not take action to ensure your catalog sources run under restricted pod security enforcement, your catalog sources might not run in future {product-title} releases. -==== - -As a catalog author, you can enable compatibility with restricted pod security enforcement by completing either of the following actions: - -* Migrate your catalog to the file-based catalog format. -* Update your catalog image with a version of the `opm` CLI tool released with {product-title} 4.11 or later. - -[NOTE] -==== -The SQLite database catalog format is deprecated, but still supported by Red Hat. In a future release, the SQLite database format will not be supported, and catalogs will need to migrate to the file-based catalog format. As of {product-title} 4.11, the default Red Hat-provided Operator catalog is released in the file-based catalog format. File-based catalogs are compatible with restricted pod security enforcement. -==== - -If you do not want to update your SQLite database catalog image or migrate your catalog to the file-based catalog format, you can configure your catalog to run with elevated permissions. diff --git a/modules/olm-catalogsource-image-template.adoc b/modules/olm-catalogsource-image-template.adoc deleted file mode 100644 index 5469c8ebd2f4..000000000000 --- a/modules/olm-catalogsource-image-template.adoc +++ /dev/null @@ -1,94 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/understanding/olm/olm-understanding-olm.adoc - -ifdef::openshift-origin[] -:global_ns: olm -endif::[] -ifndef::openshift-origin[] -:global_ns: openshift-marketplace -endif::[] - -[id="olm-catalogsource-image-template_{context}"] -= Image template for custom catalog sources - -Operator compatibility with the underlying cluster can be expressed by a catalog source in various ways. One way, which is used for the default Red Hat-provided catalog sources, is to identify image tags for index images that are specifically created for a particular platform release, for example {product-title} 4.13. - -During a cluster upgrade, the index image tag for the default Red Hat-provided catalog sources are updated automatically by the Cluster Version Operator (CVO) so that Operator Lifecycle Manager (OLM) pulls the updated version of the catalog. For example during an upgrade from {product-title} 4.12 to 4.13, the `spec.image` field in the `CatalogSource` object for the `redhat-operators` catalog is updated from: - -[source,terminal] ----- -registry.redhat.io/redhat/redhat-operator-index:v4.12 ----- - -to: - -[source,terminal] ----- -registry.redhat.io/redhat/redhat-operator-index:v4.13 ----- - -However, the CVO does not automatically update image tags for custom catalogs. To ensure users are left with a compatible and supported Operator installation after a cluster upgrade, custom catalogs should also be kept updated to reference an updated index image. - -Starting in {product-title} 4.9, cluster administrators can add the `olm.catalogImageTemplate` annotation in the `CatalogSource` object for custom catalogs to an image reference that includes a template. The following Kubernetes version variables are supported for use in the template: - -* `kube_major_version` -* `kube_minor_version` -* `kube_patch_version` - -[NOTE] -==== -You must specify the Kubernetes cluster version and not an {product-title} cluster version, as the latter is not currently available for templating. -==== - -Provided that you have created and pushed an index image with a tag specifying the updated Kubernetes version, setting this annotation enables the index image versions in custom catalogs to be automatically changed after a cluster upgrade. The annotation value is used to set or update the image reference in the `spec.image` field of the `CatalogSource` object. This helps avoid cluster upgrades leaving Operator installations in unsupported states or without a continued update path. - -[IMPORTANT] -==== -You must ensure that the index image with the updated tag, in whichever registry it is stored in, is accessible by the cluster at the time of the cluster upgrade. -==== - -.Example catalog source with an image template -[%collapsible] -==== -[source,yaml,subs="attributes+"] ----- -apiVersion: operators.coreos.com/v1alpha1 -kind: CatalogSource -metadata: - generation: 1 - name: example-catalog - namespace: openshift-marketplace - annotations: - olm.catalogImageTemplate: - "quay.io/example-org/example-catalog:v{kube_major_version}.{kube_minor_version}" -spec: - displayName: Example Catalog - image: quay.io/example-org/example-catalog:v1.26 - priority: -400 - publisher: Example Org ----- -==== - -[NOTE] -==== -If the `spec.image` field and the `olm.catalogImageTemplate` annotation are both set, the `spec.image` field is overwritten by the resolved value from the annotation. If the annotation does not resolve to a usable pull spec, the catalog source falls back to the set `spec.image` value. - -If the `spec.image` field is not set and the annotation does not resolve to a usable pull spec, OLM stops reconciliation of the catalog source and sets it into a human-readable error condition. -==== - -For an {product-title} 4.13 cluster, which uses Kubernetes 1.26, the `olm.catalogImageTemplate` annotation in the preceding example resolves to the following image reference: - -[source,terminal] ----- -quay.io/example-org/example-catalog:v1.26 ----- - -For future releases of {product-title}, you can create updated index images for your custom catalogs that target the later Kubernetes version that is used by the later {product-title} version. With the `olm.catalogImageTemplate` annotation set before the upgrade, upgrading the cluster to the later {product-title} version would then automatically update the catalog's index image as well. - -ifdef::openshift-origin[] -:!global_ns: -endif::[] -ifndef::openshift-origin[] -:!global_ns: -endif::[] diff --git a/modules/olm-catalogsource.adoc b/modules/olm-catalogsource.adoc deleted file mode 100644 index 173cfdef948b..000000000000 --- a/modules/olm-catalogsource.adoc +++ /dev/null @@ -1,128 +0,0 @@ -// Module included in the following assemblies: -// -// * operators/understanding/olm/olm-understanding-olm.adoc - -ifdef::openshift-origin[] -:global_ns: olm -endif::[] -ifndef::openshift-origin[] -:global_ns: openshift-marketplace -endif::[] - -[id="olm-catalogsource_{context}"] -= Catalog source - -A _catalog source_ represents a store of metadata, typically by referencing an _index image_ stored in a container registry. Operator Lifecycle Manager (OLM) queries catalog sources to discover and install Operators and their dependencies. OperatorHub in the {product-title} web console also displays the Operators provided by catalog sources. - -[TIP] -==== -Cluster administrators can view the full list of Operators provided by an enabled catalog source on a cluster by using the *Administration* -> *Cluster Settings* -> *Configuration* -> *OperatorHub* page in the web console. -==== - -The `spec` of a `CatalogSource` object indicates how to construct a pod or how to communicate with a service that serves the Operator Registry gRPC API. - -.Example `CatalogSource` object -[%collapsible] -==== -[source,yaml,subs="attributes+"] ----- -apiVersion: operators.coreos.com/v1alpha1 -kind: CatalogSource -metadata: - generation: 1 - name: example-catalog <1> - namespace: {global_ns} <2> - annotations: - olm.catalogImageTemplate: <3> - "quay.io/example-org/example-catalog:v{kube_major_version}.{kube_minor_version}.{kube_patch_version}" -spec: - displayName: Example Catalog <4> - image: quay.io/example-org/example-catalog:v1 <5> - priority: -400 <6> - publisher: Example Org - sourceType: grpc <7> - grpcPodConfig: - securityContextConfig: <8> - nodeSelector: <9> - custom_label: